NetEaseDSMonitor/breeze_monitor_CHAT.py
2025-04-29 22:01:18 +08:00

1096 lines
45 KiB
Python
Raw Blame History

This file contains ambiguous Unicode characters

This file contains Unicode characters that might be confused with other characters. If you think that this is intentional, you can safely ignore this warning. Use the Escape button to reveal them.

# -*- coding: utf-8 -*-
import requests
import time
import json
from datetime import datetime, timedelta
import logging
import os
import sys
import threading
from threading import Lock
import re
import subprocess
# 配置日志
def setup_logging():
try:
# 获取当前脚本所在目录
script_dir = os.path.dirname(os.path.abspath(__file__))
log_file = os.path.join(script_dir, 'breeze_monitor.log')
# 确保日志文件存在
if not os.path.exists(log_file):
with open(log_file, 'w', encoding='utf-8') as f:
f.write('')
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s - %(levelname)s - %(message)s',
handlers=[
logging.FileHandler(log_file, encoding='utf-8', mode='a'),
logging.StreamHandler()
]
)
logging.info("Breeze监控日志系统初始化成功")
except Exception as e:
print(f"日志系统初始化失败: {str(e)}")
# 如果文件日志失败,至少使用控制台日志
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s - %(levelname)s - %(message)s',
handlers=[logging.StreamHandler()]
)
# 初始化日志系统
setup_logging()
# API配置
API_BASE_URL = 'https://breeze.gameyw.netease.com/api/cms/issue/list'
# 默认的各类工单的折算系数
DEFAULT_COEFFICIENTS = {
'NTES_GOD_IMAGES': 0.54, # 网易大神APP图片
'NTES_GOD_VIDEOS': 3.8, # 网易大神APP视频
'NTES_GOD_CHAT_IMAGES': 0.54, # 网易大神APP聊天图片
'NTES_GOD_CHAT_VIDEOS': 3.8, # 网易大神APP聊天视频
'NTES_DASONG': 139.19, # 大神大宋视频
'SPIDER_VIDEO': 3.8, # 大神普通供给视频
'SPIDER_VIDEO_SP': 13.3, # 大神高优供给视频
'NTES_GOD_AI': 0.54, # 大神AI图片
'NTES_GOD_TOP': 3.8, # 大神短视频
'T_SPIDER_VIDEO': 3.8, # 大神tiktok普通视频
'T_SPIDER_VIDEO_SP': 13.3, # 大神tiktok高优视频
'V_SPIDER_VIDEO': 3.8, # 大神ins普通供给视频
'V_SPIDER_VIDEO_SP': 13.3, # 大神ins高优供给视频
'NTES_GOD_XHS': 0.54, # 大神小红书图片
'XHS_SPIDER_VIDEO': 3.8, # 小红书供给视频
'Cupid': 0.54, # 大神交友
'CHAT_P2P': 0.55, # 大神聊天/风险用户_私聊/私聊频繁
'CHAT_TEAM': 0.55, # 大神聊天/风险用户_群聊/群聊频繁
'CHAT_ROOM': 0.55, # 大神聊天_聊天室
'CHAT_ROOM_MSG': 0.55 # 风险用户_聊天室频繁
}
# 系数配置文件路径
COEFFICIENTS_CONFIG_FILE = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'breeze_coefficients.json')
# 全局变量
user_credentials = {
'cookie': None,
'username': None
}
credentials_lock = Lock()
coefficients_lock = Lock()
# 定义全局系数变量
COEFFICIENTS = DEFAULT_COEFFICIENTS.copy()
def fetch_and_save_coefficients(url):
"""从在线服务器获取系数配置并保存到本地"""
max_retries = 3
retry_delay = 5 # 重试间隔(秒)
for attempt in range(max_retries):
try:
# 尝试不使用代理直接获取
try:
response = requests.get(url, timeout=10, proxies={'http': None, 'https': None}, verify=False)
except:
# 如果直接获取失败,尝试使用系统代理
proxies = {
'http': os.environ.get('HTTP_PROXY', ''),
'https': os.environ.get('HTTPS_PROXY', '')
}
response = requests.get(url, timeout=10, proxies=proxies, verify=False)
if response.status_code == 200:
loaded_coefficients = response.json()
# 更新系数
COEFFICIENTS.update(loaded_coefficients)
# 保存到本地文件
with open(COEFFICIENTS_CONFIG_FILE, 'w', encoding='utf-8') as f:
json.dump(loaded_coefficients, f, indent=4, ensure_ascii=False)
log(f"从服务器获取系数: {loaded_coefficients}")
return True
else:
log(f"获取在线系数配置失败HTTP状态码: {response.status_code}")
if attempt < max_retries - 1:
log(f"{attempt + 1}次重试失败,{retry_delay}秒后重试...")
time.sleep(retry_delay)
continue
return False
except Exception as e:
log(f"获取在线系数配置时发生错误: {str(e)}")
if attempt < max_retries - 1:
log(f"{attempt + 1}次重试失败,{retry_delay}秒后重试...")
time.sleep(retry_delay)
continue
return False
# 读取系数配置
def load_coefficients():
"""从在线链接获取系数配置,如果获取失败则使用默认配置"""
global COEFFICIENTS
try:
with coefficients_lock:
url = "http://scripts.ui-beam.com:5000/NetEaseDSMonitor/config/breeze_coefficients.json"
# 检查本地文件是否存在
local_config_exists = os.path.exists(COEFFICIENTS_CONFIG_FILE)
# 如果是首次运行或需要强制更新
if not local_config_exists:
log("首次运行,从在线服务器获取系数配置")
if not fetch_and_save_coefficients(url):
log("首次获取系数配置失败,使用默认配置")
COEFFICIENTS = DEFAULT_COEFFICIENTS.copy()
return
# 读取本地配置
try:
with open(COEFFICIENTS_CONFIG_FILE, 'r', encoding='utf-8') as f:
local_coefficients = json.load(f)
except Exception as e:
log(f"读取本地系数配置失败: {str(e)}")
local_coefficients = None
# 尝试获取在线配置
try:
response = requests.get(url, timeout=10, proxies={'http': None, 'https': None}, verify=False)
if response.status_code == 200:
online_coefficients = response.json()
# 比较配置是否发生变化
if local_coefficients != online_coefficients:
log("检测到系数配置发生变化,更新本地配置")
if fetch_and_save_coefficients(url):
log("系数配置更新成功")
else:
log("系数配置更新失败,继续使用本地配置")
else:
log("系数配置未发生变化,使用本地配置")
else:
log(f"获取在线系数配置失败HTTP状态码: {response.status_code}")
except Exception as e:
log(f"检查在线系数配置时发生错误: {str(e)}")
# 使用本地配置
if local_coefficients:
COEFFICIENTS.update(local_coefficients)
log(f"从本地从加载系数: {local_coefficients}")
else:
COEFFICIENTS = DEFAULT_COEFFICIENTS.copy()
except Exception as e:
log(f"加载系数配置失败: {str(e)}")
COEFFICIENTS = DEFAULT_COEFFICIENTS.copy()
# 保存系数配置
def save_coefficients(coefficients=None):
"""保存系数到配置文件"""
try:
if coefficients is None:
coefficients = COEFFICIENTS
with coefficients_lock:
with open(COEFFICIENTS_CONFIG_FILE, 'w', encoding='utf-8') as f:
json.dump(coefficients, f, indent=4, ensure_ascii=False)
log(f"系数配置已保存")
except Exception as e:
log(f"保存系数配置失败: {str(e)}")
# 初始化用户凭据(从环境变量读取)
def init_credentials():
"""从环境变量初始化用户凭据"""
try:
cookie = os.environ.get('BREEZE_COOKIE', '')
username = os.environ.get('BREEZE_USERNAME', '')
if cookie and username:
with credentials_lock:
user_credentials['cookie'] = cookie
user_credentials['username'] = username
log(f"已从环境变量加载用户凭据: {username}")
return True
else:
log(f"未能从环境变量获取用户凭据BREEZE_COOKIE长度: {len(cookie)}, BREEZE_USERNAME: {username}")
return False
except Exception as e:
log(f"初始化用户凭据失败: {str(e)}")
return False
def get_api_headers(cookie):
"""获取API请求头"""
return {
'accept': 'application/json, text/plain, */*',
'accept-language': 'zh-CN,zh;q=0.9',
'cookie': cookie,
'priority': 'u=1, i',
'referer': 'https://breeze.opd.netease.com/',
'sec-ch-ua': '"Google Chrome";v="131", "Chromium";v="131", "Not_A Brand";v="24"',
'sec-ch-ua-mobile': '?0',
'sec-ch-ua-platform': '"Windows"',
'sec-fetch-dest': 'empty',
'sec-fetch-mode': 'cors',
'sec-fetch-site': 'same-site',
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Safari/537.36'
}
def log(message, level='info'):
"""记录日志"""
try:
if level == 'warning':
logging.warning(f"[Breeze] {message}")
elif level == 'error':
logging.error(f"[Breeze] {message}")
else:
logging.info(f"[Breeze] {message}")
except Exception as e:
print(f"日志记录失败: {str(e)}")
print(f"原始消息: {message}")
def is_image_url(url):
"""判断URL是否指向图片"""
if not url:
return False
# 图片文件格式检查
image_extensions = ['.jpg', '.jpeg', '.png', '.gif', '.webp', '.svg', '.bmp', '.heif', '.heic']
if any(ext in url.lower() for ext in image_extensions):
return True
# 图片服务链接检查
image_patterns = [
'fp.ps.netease.com',
'cc.fp.ps.netease.com',
'nos.netease.com',
'imageView' # 特殊参数标记为图片
]
return any(pattern in url.lower() for pattern in image_patterns)
def is_video_url(url):
"""判断URL是否指向视频"""
if not url:
return False
# 视频文件格式检查
video_extensions = ['.mp4', '.avi', '.mov', '.wmv', '.mkv', '.flv', '.webm', '.m4v']
if any(ext in url.lower() for ext in video_extensions):
return True
# 视频服务链接检查
video_patterns = [
'vod.cc.163.com',
'my.fp.ps.netease.com',
'vframe=1' # 特殊参数标记为视频
]
return any(pattern in url.lower() for pattern in video_patterns)
def determine_media_type(issue):
"""判断工单媒体类型"""
try:
# 从extraInfo中提取信息
extra_info = json.loads(issue.get('extraInfo', '{}'))
product = extra_info.get('product', '')
msg_type = extra_info.get('msg_type', '')
title = issue.get('title', '')
# 首先判断聊天工单类型
if msg_type == 'P2P' or '私聊频繁' in title:
return 'CHAT_P2P'
elif msg_type == 'TEAM' or '群聊频繁' in title:
return 'CHAT_TEAM'
elif msg_type == 'ROOM' or '聊天室' in title:
return 'CHAT_ROOM'
elif 'CHAT_ROOM_MSG' in msg_type or '聊天室频繁' in title:
return 'CHAT_ROOM_MSG'
# 如果没有product尝试从uniqueid中提取
if not product:
uniqueid = issue.get('uniqueid', '')
if '||' in uniqueid:
product = uniqueid.split('||')[0]
# 如果是NTES_GOD或NTES_GOD_CHAT需要通过URL判断图片或视频
if product in ['NTES_GOD', 'NTES_GOD_CHAT']:
url = extra_info.get('url', '').lower()
# 检查是否是视频
video_extensions = ['.mp4', '.avi', '.mov', '.wmv', '.mkv', '.flv', '.webm', '.m4v']
video_domains = ['vod.cc.163.com']
is_video = False
# 检查URL是否包含视频扩展名
for ext in video_extensions:
if url.endswith(ext) or f"{ext}?" in url:
is_video = True
break
# 检查是否包含视频域名
for domain in video_domains:
if domain in url:
is_video = True
break
# 检查特殊参数
if 'vframe=1' in url:
is_video = True
# 根据媒体类型返回对应的工单类型
if is_video:
return f"{product}_VIDEOS"
else:
return f"{product}_IMAGES"
# 其他工单直接返回产品代码
return product
except Exception as e:
log(f"确定工单类型时出错: {str(e)}", level='error')
return None
def get_coefficient(issue_data):
"""获取工单的折算系数"""
media_type = determine_media_type(issue_data)
# 确保使用最新的系数配置
with coefficients_lock:
current_coefficients = COEFFICIENTS.copy()
if media_type in current_coefficients:
return current_coefficients[media_type]
# 如果无法确定媒体类型,使用默认系数
log(f"Unknown media type: {media_type}, using default coefficient")
return current_coefficients['NTES_GOD_IMAGES']
def fetch_issue_data(cookie, username, create_start_time, create_end_time, close_start_time=None, close_end_time=None, max_pages=10000):
"""从API获取工单数据"""
issues = []
headers = get_api_headers(cookie)
page = 1
stats = {
'total_count': 0,
'types': {}
}
# 如果未提供关闭时间,则使用创建时间
if close_start_time is None:
close_start_time = create_start_time
if close_end_time is None:
close_end_time = create_end_time
# 添加重试逻辑
max_retries = 5
retry_interval = 5
current_retry = 0
while page <= max_pages:
try:
params = {
'pageNum': page,
'pageSize': 500, # 每页500条
'createTimeStart': create_start_time,
'createTimeEnd': create_end_time,
'closeTimeStart': close_start_time,
'closeTimeEnd': close_end_time,
'gameCode': 'a19',
'handleUsername': username,
'cold': 'false',
'status': 'FINISH'
}
log(f"正在获取第 {page} 页数据,创建时间范围:{create_start_time}{create_end_time}")
log(f"关闭时间范围:{close_start_time}{close_end_time}")
# 发送请求
response = requests.get(API_BASE_URL, headers=headers, params=params, timeout=15)
# 检查响应状态
if response.status_code == 200:
try:
data = response.json()
# 检查API响应格式的逻辑
if data.get('code') == 200 and 'data' in data and 'records' in data['data']:
items = data['data']['records']
total = data['data'].get('total', 0)
log(f"API返回成功找到 {len(items)} 条记录,总计 {total}")
# 如果返回0条记录进行重试
if total == 0 and current_retry < max_retries - 1:
current_retry += 1
log(f"API返回0条记录正在进行第{current_retry}次重试...")
time.sleep(retry_interval)
continue
elif total == 0 and current_retry >= max_retries - 1:
log(f"API返回0条记录已达到最大重试次数({max_retries}次),停止重试")
return None
# 记录当前统计的工单总数占API返回总数的百分比
if total > 0 and stats['total_count'] + len(items) <= total:
progress = ((stats['total_count'] + len(items)) / total) * 100
log(f"当前进度: {progress:.2f}% ({stats['total_count'] + len(items)}/{total})")
elif data.get('code') != 200:
# 这是一个明确的失败响应
error_msg = data.get('msg', '未知错误')
log(f"API返回错误{error_msg}")
log(f"API响应内容{response.text[:500]}")
return None
else:
# 其他情况,记录响应并退出
log(f"未知的API响应格式{response.text[:500]}")
return None
# 处理工单数据
if not items or len(items) == 0:
log("当前页没有数据,结束获取")
break
issues.extend(items)
# 更新当前页的统计信息
for item in items:
media_type = determine_media_type(item)
if media_type not in stats['types']:
stats['types'][media_type] = 0
stats['types'][media_type] += 1
stats['total_count'] += len(items)
log(f"{page}页有 {len(items)} 条记录,累计处理 {stats['total_count']}")
# 检查是否还有下一页数据
total_pages = data['data'].get('pages', 1)
if page >= total_pages or len(items) < params['pageSize']:
log(f"已获取所有数据,共 {total_pages} 页,处理了 {stats['total_count']} 条记录")
break
except ValueError as e:
log(f"解析JSON数据失败: {str(e)}")
log(f"原始响应内容:{response.text[:500]}")
return None
else:
log(f"API请求失败: HTTP {response.status_code}")
log(f"响应内容: {response.text[:500]}")
return None
page += 1
except requests.exceptions.Timeout:
log("API请求超时")
return None
except requests.exceptions.ConnectionError:
log("网络连接错误,请检查网络连接")
return None
except Exception as e:
log(f"获取数据失败: {str(e)}")
return None
# 检查是否因为达到最大页数而停止
if page > max_pages:
log(f"达到最大页数限制({max_pages}页)停止获取。如需获取更多数据请增加max_pages参数。")
# 确保每次都是从配置文件读取最新系数
load_coefficients()
# 使用全局系数变量
with coefficients_lock:
current_coefficients = COEFFICIENTS.copy()
# 计算各类型工单折算总数
weighted_total = 0
for media_type, count in stats['types'].items():
coefficient = current_coefficients.get(media_type, current_coefficients['NTES_GOD_IMAGES'])
weighted_count = count * coefficient
weighted_total += weighted_count
log(f"最终统计结果:工单总数 {stats['total_count']},折算总计 {weighted_total:.2f}")
# 将统计结果整理为前端需要的格式
frontend_stats = {
'total': stats['total_count'],
'weighted_total': weighted_total,
'categories': {},
'coefficients': current_coefficients # 添加系数到返回结果中
}
# 整理分类统计,确保所有可能的工单类型都包括在内
for media_type, coefficient in current_coefficients.items():
count = stats['types'].get(media_type, 0)
weighted_count = count * coefficient
# 使用对应的中文名称
if media_type == 'NTES_GOD_IMAGES':
type_name = "网易大神APP图片"
elif media_type == 'NTES_GOD_VIDEOS':
type_name = "网易大神APP视频"
elif media_type == 'NTES_GOD_CHAT_IMAGES':
type_name = "网易大神APP聊天图片"
elif media_type == 'NTES_GOD_CHAT_VIDEOS':
type_name = "网易大神APP聊天视频"
elif media_type == 'NTES_DASONG':
type_name = "大神大宋视频"
elif media_type == 'SPIDER_VIDEO':
type_name = "大神普通供给视频"
elif media_type == 'SPIDER_VIDEO_SP':
type_name = "大神高优供给视频"
elif media_type == 'NTES_GOD_AI':
type_name = "大神AI图片"
elif media_type == 'NTES_GOD_TOP':
type_name = "大神短视频"
elif media_type == 'T_SPIDER_VIDEO':
type_name = "大神tiktok普通视频"
elif media_type == 'T_SPIDER_VIDEO_SP':
type_name = "大神tiktok高优视频"
elif media_type == 'V_SPIDER_VIDEO':
type_name = "大神ins普通供给视频"
elif media_type == 'V_SPIDER_VIDEO_SP':
type_name = "大神ins高优供给视频"
elif media_type == 'NTES_GOD_XHS':
type_name = "大神小红书图片"
elif media_type == 'XHS_SPIDER_VIDEO':
type_name = "小红书供给视频"
elif media_type == 'Cupid':
type_name = "大神交友"
elif media_type == 'CHAT_P2P':
type_name = "大神聊天/风险用户_私聊/私聊频繁"
elif media_type == 'CHAT_TEAM':
type_name = "大神聊天/风险用户_群聊/群聊频繁"
elif media_type == 'CHAT_ROOM':
type_name = "大神聊天_聊天室"
elif media_type == 'CHAT_ROOM_MSG':
type_name = "风险用户_聊天室频繁"
else:
# 默认情况下仍然使用替换方式
type_name = media_type.replace('_', ' ').replace('NTES', '大神')
frontend_stats['categories'][type_name] = {
'count': count,
'weighted': weighted_count,
'coefficient': coefficient
}
return {
'stats': frontend_stats,
'issues': issues
}
def switch_business(cookie, business_id):
"""切换业务线"""
try:
url = 'https://breeze.gameyw.netease.com/api/cms/user/switchBusiness'
headers = {
'accept': 'application/json, text/plain, */*',
'accept-language': 'zh-CN,zh;q=0.9',
'content-type': 'application/json',
'cookie': cookie,
'origin': 'https://breeze.opd.netease.com',
'priority': 'u=1, i',
'referer': 'https://breeze.opd.netease.com/',
'sec-ch-ua': '"Google Chrome";v="131", "Chromium";v="131", "Not_A Brand";v="24"',
'sec-ch-ua-mobile': '?0',
'sec-ch-ua-platform': '"Windows"',
'sec-fetch-dest': 'empty',
'sec-fetch-mode': 'cors',
'sec-fetch-site': 'same-site',
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Safari/537.36'
}
data = {"businessId": business_id}
log(f"正在切换业务线目标业务ID: {business_id}")
response = requests.post(url, headers=headers, json=data, timeout=15)
if response.status_code == 200:
try:
result = response.json()
if result.get('code') == 200:
log(f"业务线切换成功: {business_id}")
time.sleep(1) # 等待切换完成
return True
else:
log(f"业务线切换失败: {result.get('msg', '未知错误')}")
return False
except ValueError as e:
log(f"解析业务线切换响应失败: {str(e)}")
return False
else:
log(f"业务线切换请求失败: HTTP {response.status_code}")
return False
except Exception as e:
log(f"切换业务线时出错: {str(e)}")
return False
def check_current_hour_counts(cookie, username):
"""检查当前小时数据"""
try:
# 获取当前小时的开始和结束时间
now = datetime.now()
current_hour = now.hour
# 创建时间仍然使用全天
create_start_time = now.strftime('%Y-%m-%d') + " 00:00:00"
create_end_time = now.strftime('%Y-%m-%d') + " 23:59:59"
# 关闭时间使用当前小时
close_start_time = now.strftime('%Y-%m-%d %H') + ":00:00"
close_end_time = now.strftime('%Y-%m-%d %H') + ":59:59"
log(f"当前小时查询 - 创建时间范围: {create_start_time}{create_end_time}")
log(f"当前小时查询 - 关闭时间范围: {close_start_time}{close_end_time}")
# 首先切换到清风审核-大神审核(业务ID: 7)
log("正在切换到清风审核-大神审核...")
switch_business(cookie, 7)
# 调用API获取大神审核数据
log("正在获取大神审核数据...")
godResult = fetch_issue_data(cookie, username, create_start_time, create_end_time, close_start_time, close_end_time, max_pages=10000)
if godResult is None:
log("获取大神审核数据失败")
godStats = {
'total': 0,
'weighted_total': 0,
'categories': {}
}
else:
godStats = godResult['stats']
log(f"大神审核数据获取成功,共 {godStats['total']} 条记录,折算总计 {godStats['weighted_total']:.2f}")
# 然后切换到清风审核-图片审核(业务ID: 12)
log("正在切换到清风审核-图片审核...")
switch_business(cookie, 12)
# 调用API获取图片审核数据
log("正在获取图片审核数据...")
imageResult = fetch_issue_data(cookie, username, create_start_time, create_end_time, close_start_time, close_end_time, max_pages=10000)
if imageResult is None:
log("获取图片审核数据失败")
imageStats = {
'total': 0,
'weighted_total': 0,
'categories': {}
}
else:
imageStats = imageResult['stats']
log(f"图片审核数据获取成功,共 {imageStats['total']} 条记录,折算总计 {imageStats['weighted_total']:.2f}")
# 合并两部分统计结果
mergedStats = {
'total': godStats['total'] + imageStats['total'],
'weighted_total': godStats['weighted_total'] + imageStats['weighted_total'],
'categories': {}
}
# 合并分类统计
allCategories = set(list(godStats['categories'].keys()) + list(imageStats['categories'].keys()))
for category in allCategories:
godCat = godStats['categories'].get(category, {'count': 0, 'coefficient': 0, 'weighted': 0})
imageCat = imageStats['categories'].get(category, {'count': 0, 'coefficient': 0, 'weighted': 0})
# 使用相同的系数(两者应该是一样的,如果有就使用它)
coefficient = godCat['coefficient'] or imageCat['coefficient']
mergedStats['categories'][category] = {
'count': godCat['count'] + imageCat['count'],
'coefficient': coefficient,
'weighted': godCat['weighted'] + imageCat['weighted']
}
log(f"合并后的统计结果:工单总数 {mergedStats['total']},折算总计 {mergedStats['weighted_total']:.2f}")
# 将统计结果写入到共享数据文件中
try:
data = {
'type': 'breeze_hourly',
'stats': mergedStats,
'timestamp': datetime.now().strftime('%Y-%m-%d %H:%M:%S')
}
with open(os.path.join(os.path.dirname(os.path.abspath(__file__)), 'breeze_hourly.json'), 'w', encoding='utf-8') as f:
json.dump(data, f, ensure_ascii=False)
log("小时数据已更新到共享文件")
except Exception as e:
log(f"写入小时数据到共享文件失败: {str(e)}")
return mergedStats
except Exception as e:
log(f"检查当前小时数据失败: {str(e)}")
return None
def check_daily_counts(cookie, username):
"""检查全天数据"""
try:
# 获取今日开始和结束时间
today = datetime.now().strftime('%Y-%m-%d')
create_start_time = "%s 00:00:00" % today
create_end_time = "%s 23:59:59" % today
# 关闭时间也使用全天
close_start_time = create_start_time
close_end_time = create_end_time
log(f"全天查询 - 创建时间范围: {create_start_time}{create_end_time}")
log(f"全天查询 - 关闭时间范围: {close_start_time}{close_end_time}")
# 首先切换到清风审核-大神审核(业务ID: 7)
log("正在切换到清风审核-大神审核...")
switch_business(cookie, 7)
# 调用API获取大神审核数据
log("正在获取大神审核全天数据...")
godResult = fetch_issue_data(cookie, username, create_start_time, create_end_time, close_start_time, close_end_time, max_pages=10000)
if godResult is None:
log("获取大神审核全天数据失败")
godStats = {
'total': 0,
'weighted_total': 0,
'categories': {}
}
else:
godStats = godResult['stats']
log(f"大神审核全天数据获取成功,共 {godStats['total']} 条记录,折算总计 {godStats['weighted_total']:.2f}")
# 然后切换到清风审核-图片审核(业务ID: 12)
log("正在切换到清风审核-图片审核...")
switch_business(cookie, 12)
# 调用API获取图片审核数据
log("正在获取图片审核全天数据...")
imageResult = fetch_issue_data(cookie, username, create_start_time, create_end_time, close_start_time, close_end_time, max_pages=10000)
if imageResult is None:
log("获取图片审核全天数据失败")
imageStats = {
'total': 0,
'weighted_total': 0,
'categories': {}
}
else:
imageStats = imageResult['stats']
log(f"图片审核全天数据获取成功,共 {imageStats['total']} 条记录,折算总计 {imageStats['weighted_total']:.2f}")
# 合并两部分统计结果
mergedStats = {
'total': godStats['total'] + imageStats['total'],
'weighted_total': godStats['weighted_total'] + imageStats['weighted_total'],
'categories': {}
}
# 合并分类统计
allCategories = set(list(godStats['categories'].keys()) + list(imageStats['categories'].keys()))
for category in allCategories:
godCat = godStats['categories'].get(category, {'count': 0, 'coefficient': 0, 'weighted': 0})
imageCat = imageStats['categories'].get(category, {'count': 0, 'coefficient': 0, 'weighted': 0})
# 使用相同的系数(两者应该是一样的,如果有就使用它)
coefficient = godCat['coefficient'] or imageCat['coefficient']
mergedStats['categories'][category] = {
'count': godCat['count'] + imageCat['count'],
'coefficient': coefficient,
'weighted': godCat['weighted'] + imageCat['weighted']
}
log(f"合并后的全天统计结果:工单总数 {mergedStats['total']},折算总计 {mergedStats['weighted_total']:.2f}")
# 将统计结果写入到共享数据文件中
try:
data = {
'type': 'breeze_daily',
'stats': mergedStats,
'timestamp': datetime.now().strftime('%Y-%m-%d %H:%M:%S')
}
with open(os.path.join(os.path.dirname(os.path.abspath(__file__)), 'breeze_daily.json'), 'w', encoding='utf-8') as f:
json.dump(data, f, ensure_ascii=False)
log("全天数据已更新到共享文件")
except Exception as e:
log(f"写入全天数据到共享文件失败: {str(e)}")
return mergedStats
except Exception as e:
log(f"检查今日数据失败: {str(e)}")
return None
def monitor_hourly_thread():
"""每小时监控线程"""
log("每小时监控线程启动")
while True:
try:
# 从全局变量获取用户信息
with credentials_lock:
cookie = user_credentials['cookie']
username = user_credentials['username']
if cookie and username:
# 检查当前小时数据
stats = check_current_hour_counts(cookie, username)
time.sleep(120) # 每2分钟检查一次
else:
time.sleep(30) # 未登录时等待30秒
except Exception as e:
log(f"每小时监控线程异常: {str(e)}")
time.sleep(60) # 发生异常时等待1分钟后重试
def monitor_daily_thread():
"""每日监控线程"""
log("每日监控线程启动")
while True:
try:
# 从全局变量获取用户信息
with credentials_lock:
cookie = user_credentials['cookie']
username = user_credentials['username']
if cookie and username:
# 检查全天数据
stats = check_daily_counts(cookie, username)
time.sleep(3600) # 每60分钟检查一次
else:
time.sleep(30) # 未登录时等待30秒
except Exception as e:
log(f"每日监控线程异常: {str(e)}")
time.sleep(60) # 发生异常时等待1分钟后重试
# 监控配置文件变化线程
def monitor_config_thread():
"""监控配置文件变化线程"""
log("配置监控线程启动")
last_modified_time = 0
while True:
try:
if os.path.exists(COEFFICIENTS_CONFIG_FILE):
current_modified_time = os.path.getmtime(COEFFICIENTS_CONFIG_FILE)
# 检查文件是否有更新
if current_modified_time > last_modified_time:
log(f"检测到配置文件变化,重新加载系数")
load_coefficients()
# 系数变化后,立即重新计算数据
with credentials_lock:
cookie = user_credentials['cookie']
username = user_credentials['username']
if cookie and username:
log("系数变更后立即更新数据...")
threading.Thread(target=lambda: check_current_hour_counts(cookie, username)).start()
threading.Thread(target=lambda: check_daily_counts(cookie, username)).start()
last_modified_time = current_modified_time
time.sleep(120) # 每2分钟检查一次减少频率
except Exception as e:
log(f"配置监控线程异常: {str(e)}")
time.sleep(60) # 发生异常时等待1分钟后重试
def main():
"""主函数"""
log("Breeze监控系统启动")
# 解析命令行参数
check_now = False
force_mode = False
update_coefficients = False
no_config_check = False
for arg in sys.argv:
if arg == "--check-now":
check_now = True
log("收到立即检查参数")
elif arg == "--force":
force_mode = True
log("收到强制模式参数")
elif arg == "--update-coefficients":
update_coefficients = True
log("收到更新系数参数")
elif arg == "--no-config-check":
no_config_check = True
log("收到禁用配置检查参数")
# 从配置文件加载系数,除非指定了不检查配置
if not no_config_check:
load_coefficients()
else:
log("跳过配置检查,使用当前已加载的系数")
# 处理系数更新
if update_coefficients:
# 检查是否提供了新系数
if len(sys.argv) >= 5: # 脚本名 + --update-coefficients + 类型 + 值
try:
coefficient_type = sys.argv[2]
coefficient_value = float(sys.argv[3])
# 检查是否为有效的系数类型
if coefficient_type in COEFFICIENTS:
log(f"更新系数:{coefficient_type}={coefficient_value}")
# 更新全局系数
with coefficients_lock:
COEFFICIENTS[coefficient_type] = coefficient_value
# 保存到配置文件
save_coefficients()
else:
log(f"未知的系数类型: {coefficient_type}")
except ValueError as e:
log(f"系数更新失败: {str(e)}")
log("系数必须是有效的数字")
except IndexError:
log("参数不足,无法更新系数")
log("系数更新完成,退出程序")
sys.exit(0)
# 确保输出目录存在
script_dir = os.path.dirname(os.path.abspath(__file__))
if not os.path.exists(script_dir):
os.makedirs(script_dir)
# 从环境变量初始化凭据
init_credentials()
# 处理--check-now参数
if check_now:
# 从全局变量获取用户信息
with credentials_lock:
cookie = user_credentials['cookie']
username = user_credentials['username']
if cookie and username:
log("开始执行手动检查")
if force_mode:
# 在强制模式下,使用当前小时的整点数据进行查询
now = datetime.now()
# 创建时间仍然使用全天
create_start_time = now.strftime('%Y-%m-%d') + " 00:00:00"
create_end_time = now.strftime('%Y-%m-%d') + " 23:59:59"
# 关闭时间使用当前小时的整点范围
close_start_time = now.strftime('%Y-%m-%d %H') + ":00:00"
close_end_time = now.strftime('%Y-%m-%d %H') + ":59:59"
log(f"强制模式 - 创建时间范围: {create_start_time}{create_end_time}")
log(f"强制模式 - 关闭时间范围: {close_start_time}{close_end_time}")
# 首先切换到清风审核-大神审核(业务ID: 7)
log("正在切换到清风审核-大神审核...")
switch_business(cookie, 7)
# 调用API获取大神审核数据
log("正在获取大神审核数据...")
godResult = fetch_issue_data(cookie, username, create_start_time, create_end_time, close_start_time, close_end_time, max_pages=10000)
if godResult is None:
log("获取大神审核数据失败")
godStats = {
'total': 0,
'weighted_total': 0,
'categories': {}
}
else:
godStats = godResult['stats']
log(f"大神审核数据获取成功,共 {godStats['total']} 条记录,折算总计 {godStats['weighted_total']:.2f}")
# 然后切换到清风审核-图片审核(业务ID: 12)
log("正在切换到清风审核-图片审核...")
switch_business(cookie, 12)
# 调用API获取图片审核数据
log("正在获取图片审核数据...")
imageResult = fetch_issue_data(cookie, username, create_start_time, create_end_time, close_start_time, close_end_time, max_pages=10000)
if imageResult is None:
log("获取图片审核数据失败")
imageStats = {
'total': 0,
'weighted_total': 0,
'categories': {}
}
else:
imageStats = imageResult['stats']
log(f"图片审核数据获取成功,共 {imageStats['total']} 条记录,折算总计 {imageStats['weighted_total']:.2f}")
# 合并两部分统计结果
mergedStats = {
'total': godStats['total'] + imageStats['total'],
'weighted_total': godStats['weighted_total'] + imageStats['weighted_total'],
'categories': {}
}
# 合并分类统计
allCategories = set(list(godStats['categories'].keys()) + list(imageStats['categories'].keys()))
for category in allCategories:
godCat = godStats['categories'].get(category, {'count': 0, 'coefficient': 0, 'weighted': 0})
imageCat = imageStats['categories'].get(category, {'count': 0, 'coefficient': 0, 'weighted': 0})
# 使用相同的系数(两者应该是一样的,如果有就使用它)
coefficient = godCat['coefficient'] or imageCat['coefficient']
mergedStats['categories'][category] = {
'count': godCat['count'] + imageCat['count'],
'coefficient': coefficient,
'weighted': godCat['weighted'] + imageCat['weighted']
}
log(f"合并后的统计结果:工单总数 {mergedStats['total']},折算总计 {mergedStats['weighted_total']:.2f}")
# 将统计结果写入到共享数据文件中
try:
data = {
'type': 'breeze_hourly',
'stats': mergedStats,
'timestamp': datetime.now().strftime('%Y-%m-%d %H:%M:%S')
}
with open(os.path.join(os.path.dirname(os.path.abspath(__file__)), 'breeze_hourly.json'), 'w', encoding='utf-8') as f:
json.dump(data, f, ensure_ascii=False)
log("小时数据已更新到共享文件")
except Exception as e:
log(f"写入小时数据到共享文件失败: {str(e)}")
else:
# 常规检查
check_current_hour_counts(cookie, username)
log("手动检查完成")
else:
log("无法执行手动检查: 凭据不可用")
# 立即检查完成后退出
sys.exit(0)
# 启动监控线程
hourly_thread = threading.Thread(target=monitor_hourly_thread)
hourly_thread.daemon = True
hourly_thread.start()
daily_thread = threading.Thread(target=monitor_daily_thread)
daily_thread.daemon = True
daily_thread.start()
# 启动配置监控线程
config_thread = threading.Thread(target=monitor_config_thread)
config_thread.daemon = True
config_thread.start()
# 保持主线程运行
try:
while True:
time.sleep(10)
except KeyboardInterrupt:
log("程序被用户中断")
except Exception as e:
log(f"主线程异常: {str(e)}")
finally:
log("Breeze监控系统关闭")
if __name__ == '__main__':
main()