NetEaseDSMonitor/breeze_monitor.py

1022 lines
41 KiB
Python
Raw Blame History

This file contains ambiguous Unicode characters

This file contains Unicode characters that might be confused with other characters. If you think that this is intentional, you can safely ignore this warning. Use the Escape button to reveal them.

# -*- coding: utf-8 -*-
import requests
import time
import json
from datetime import datetime, timedelta
import logging
import os
import sys
import threading
from threading import Lock
import re
import subprocess
# 配置日志
def setup_logging():
try:
# 获取当前脚本所在目录
script_dir = os.path.dirname(os.path.abspath(__file__))
log_file = os.path.join(script_dir, 'breeze_monitor.log')
# 确保日志文件存在
if not os.path.exists(log_file):
with open(log_file, 'w', encoding='utf-8') as f:
f.write('')
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s - %(levelname)s - %(message)s',
handlers=[
logging.FileHandler(log_file, encoding='utf-8', mode='a'),
logging.StreamHandler()
]
)
logging.info("Breeze监控日志系统初始化成功")
except Exception as e:
print(f"日志系统初始化失败: {str(e)}")
# 如果文件日志失败,至少使用控制台日志
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s - %(levelname)s - %(message)s',
handlers=[logging.StreamHandler()]
)
# 初始化日志系统
setup_logging()
# API配置
API_BASE_URL = 'https://breeze.gameyw.netease.com/api/cms/issue/list'
# 默认的各类工单的折算系数
DEFAULT_COEFFICIENTS = {
'NTES_GOD_IMAGES': 0.54, # 网易大神APP图片
'NTES_GOD_VIDEOS': 3.8, # 网易大神APP视频
'NTES_GOD_CHAT_IMAGES': 0.54, # 网易大神APP聊天图片
'NTES_GOD_CHAT_VIDEOS': 3.8, # 网易大神APP聊天视频
'NTES_DASONG': 139.19, # 大神大宋视频
'SPIDER_VIDEO': 3.8, # 大神普通供给视频
'SPIDER_VIDEO_SP': 13.3, # 大神高优供给视频
'NTES_GOD_AI': 0.54, # 大神AI图片
'NTES_GOD_TOP': 3.8, # 大神短视频
'T_SPIDER_VIDEO': 3.8, # 大神tiktok普通视频
'T_SPIDER_VIDEO_SP': 13.3, # 大神tiktok高优视频
'V_SPIDER_VIDEO': 3.8, # 大神ins普通供给视频
'V_SPIDER_VIDEO_SP': 13.3, # 大神ins高优供给视频
'NTES_GOD_XHS': 0.54, # 大神小红书图片
'XHS_SPIDER_VIDEO': 3.8, # 小红书供给视频
'Cupid': 0.54, # 大神交友
'CHAT_P2P': 0.55, # 大神聊天/风险用户_私聊/私聊频繁
'CHAT_TEAM': 0.55, # 大神聊天/风险用户_群聊/群聊频繁
'CHAT_ROOM': 0.55, # 大神聊天_聊天室
'CHAT_ROOM_MSG': 0.55 # 风险用户_聊天室频繁
}
# 系数配置文件路径
COEFFICIENTS_CONFIG_FILE = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'breeze_coefficients.json')
# 全局变量
user_credentials = {
'cookie': None,
'username': None
}
credentials_lock = Lock()
coefficients_lock = Lock()
# 定义全局系数变量
COEFFICIENTS = DEFAULT_COEFFICIENTS.copy()
# 读取系数配置
def load_coefficients():
"""从配置文件读取系数,如果文件不存在则创建默认配置"""
global COEFFICIENTS
try:
with coefficients_lock:
if os.path.exists(COEFFICIENTS_CONFIG_FILE):
with open(COEFFICIENTS_CONFIG_FILE, 'r', encoding='utf-8') as f:
loaded_coefficients = json.load(f)
log(f"从配置文件加载系数: {str(loaded_coefficients)}")
# 更新系数
COEFFICIENTS.update(loaded_coefficients)
else:
# 创建默认配置文件
with open(COEFFICIENTS_CONFIG_FILE, 'w', encoding='utf-8') as f:
json.dump(DEFAULT_COEFFICIENTS, f, indent=4, ensure_ascii=False)
log("创建默认系数配置文件")
COEFFICIENTS = DEFAULT_COEFFICIENTS.copy()
log(f"当前使用的系数已更新")
except Exception as e:
log(f"加载系数配置失败: {str(e)}")
# 出错时使用默认系数
COEFFICIENTS = DEFAULT_COEFFICIENTS.copy()
# 保存系数配置
def save_coefficients(coefficients=None):
"""保存系数到配置文件"""
try:
if coefficients is None:
coefficients = COEFFICIENTS
with coefficients_lock:
with open(COEFFICIENTS_CONFIG_FILE, 'w', encoding='utf-8') as f:
json.dump(coefficients, f, indent=4, ensure_ascii=False)
log(f"系数配置已保存")
except Exception as e:
log(f"保存系数配置失败: {str(e)}")
# 初始化用户凭据(从环境变量读取)
def init_credentials():
"""从环境变量初始化用户凭据"""
try:
cookie = os.environ.get('BREEZE_COOKIE', '')
username = os.environ.get('BREEZE_USERNAME', '')
if cookie and username:
with credentials_lock:
user_credentials['cookie'] = cookie
user_credentials['username'] = username
log(f"已从环境变量加载用户凭据: {username}")
return True
else:
log(f"未能从环境变量获取用户凭据BREEZE_COOKIE长度: {len(cookie)}, BREEZE_USERNAME: {username}")
return False
except Exception as e:
log(f"初始化用户凭据失败: {str(e)}")
return False
def get_api_headers(cookie):
"""获取API请求头"""
return {
'accept': 'application/json, text/plain, */*',
'accept-language': 'zh-CN,zh;q=0.9',
'cookie': cookie,
'priority': 'u=1, i',
'referer': 'https://breeze.opd.netease.com/',
'sec-ch-ua': '"Google Chrome";v="131", "Chromium";v="131", "Not_A Brand";v="24"',
'sec-ch-ua-mobile': '?0',
'sec-ch-ua-platform': '"Windows"',
'sec-fetch-dest': 'empty',
'sec-fetch-mode': 'cors',
'sec-fetch-site': 'same-site',
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Safari/537.36'
}
def log(message, level='info'):
"""记录日志"""
try:
if level == 'warning':
logging.warning(f"[Breeze] {message}")
elif level == 'error':
logging.error(f"[Breeze] {message}")
else:
logging.info(f"[Breeze] {message}")
except Exception as e:
print(f"日志记录失败: {str(e)}")
print(f"原始消息: {message}")
def is_image_url(url):
"""判断URL是否指向图片"""
if not url:
return False
# 图片文件格式检查
image_extensions = ['.jpg', '.jpeg', '.png', '.gif', '.webp', '.svg', '.bmp', '.heif', '.heic']
if any(ext in url.lower() for ext in image_extensions):
return True
# 图片服务链接检查
image_patterns = [
'fp.ps.netease.com',
'cc.fp.ps.netease.com',
'nos.netease.com',
'imageView' # 特殊参数标记为图片
]
return any(pattern in url.lower() for pattern in image_patterns)
def is_video_url(url):
"""判断URL是否指向视频"""
if not url:
return False
# 视频文件格式检查
video_extensions = ['.mp4', '.avi', '.mov', '.wmv', '.mkv', '.flv', '.webm', '.m4v']
if any(ext in url.lower() for ext in video_extensions):
return True
# 视频服务链接检查
video_patterns = [
'vod.cc.163.com',
'my.fp.ps.netease.com',
'vframe=1' # 特殊参数标记为视频
]
return any(pattern in url.lower() for pattern in video_patterns)
def determine_media_type(issue):
"""判断工单媒体类型"""
try:
# 获取必要字段
title = issue.get('title', '')
product_code = issue.get('product_code', '')
uniqueid = issue.get('uniqueid', '')
url = issue.get('url', '')
msg_type = issue.get('msg_type', '')
# 检查标题关键词
if '图片' in title:
return product_code + "_IMAGES"
elif '视频' in title:
return product_code + "_VIDEOS"
elif 'AI' in title:
return product_code + "_AI_IMAGES"
elif '短视频' in title:
return product_code + "_SHORT_VIDEOS"
elif '抖音' in title:
return product_code + "_TIKTOK_VIDEOS"
elif 'INS' in title:
return product_code + "_INS_VIDEOS"
elif '小红书' in title:
return product_code + "_XIAOHONGSHU_VIDEOS"
# 检查msg_type
if msg_type == 'P2P':
return 'CHAT_P2P'
elif msg_type == 'TEAM':
return 'CHAT_TEAM'
elif msg_type == 'ROOM':
return 'CHAT_ROOM'
elif msg_type == 'CHAT_ROOM_MSG':
return 'CHAT_ROOM_MSG'
# 检查URL
if url and 'nos.netease.com' in url:
if 'dasong' in url:
return product_code + "_DASONG_VIDEOS"
elif 'audiozhurong' in url:
return product_code + "_SUPPLY_VIDEOS"
elif 'high' in url:
return product_code + "_HIGH_VIDEOS"
elif 'ai' in url:
return product_code + "_AI_IMAGES"
elif 'shortvideo' in url:
return product_code + "_SHORT_VIDEOS"
elif 'tiktok' in url:
return product_code + "_TIKTOK_VIDEOS"
elif 'ins' in url:
return product_code + "_INS_VIDEOS"
elif 'xiaohongshu' in url:
return product_code + "_XIAOHONGSHU_VIDEOS"
else:
return product_code + "_IMAGES"
# 默认返回图片类型
return 'NTES_GOD_IMAGES'
except Exception as e:
log(f"确定工单类型时出错: {str(e)}", level='error')
return 'NTES_GOD_IMAGES'
def get_coefficient(issue_data):
"""获取工单的折算系数"""
media_type = determine_media_type(issue_data)
# 确保使用最新的系数配置
with coefficients_lock:
current_coefficients = COEFFICIENTS.copy()
if media_type in current_coefficients:
return current_coefficients[media_type]
# 如果无法确定媒体类型,使用默认系数
log(f"Unknown media type: {media_type}, using default coefficient")
return current_coefficients['NTES_GOD_IMAGES']
def fetch_issue_data(cookie, username, create_start_time, create_end_time, close_start_time=None, close_end_time=None, max_pages=10000):
"""从API获取工单数据"""
issues = []
headers = get_api_headers(cookie)
page = 1
stats = {
'total_count': 0,
'types': {}
}
# 如果未提供关闭时间,则使用创建时间
if close_start_time is None:
close_start_time = create_start_time
if close_end_time is None:
close_end_time = create_end_time
# 添加重试逻辑
max_retries = 5
retry_interval = 5
current_retry = 0
while page <= max_pages:
try:
params = {
'pageNum': page,
'pageSize': 500, # 每页500条
'createTimeStart': create_start_time,
'createTimeEnd': create_end_time,
'closeTimeStart': close_start_time,
'closeTimeEnd': close_end_time,
'gameCode': 'a19',
'handleUsername': username,
'cold': 'false',
'status': 'FINISH'
}
log(f"正在获取第 {page} 页数据,创建时间范围:{create_start_time}{create_end_time}")
log(f"关闭时间范围:{close_start_time}{close_end_time}")
# 发送请求
response = requests.get(API_BASE_URL, headers=headers, params=params, timeout=15)
# 检查响应状态
if response.status_code == 200:
try:
data = response.json()
# 检查API响应格式的逻辑
if data.get('code') == 200 and 'data' in data and 'records' in data['data']:
items = data['data']['records']
total = data['data'].get('total', 0)
log(f"API返回成功找到 {len(items)} 条记录,总计 {total}")
# 如果返回0条记录进行重试
if total == 0 and current_retry < max_retries - 1:
current_retry += 1
log(f"API返回0条记录正在进行第{current_retry}次重试...")
time.sleep(retry_interval)
continue
elif total == 0 and current_retry >= max_retries - 1:
log(f"API返回0条记录已达到最大重试次数({max_retries}次),停止重试")
return None
# 记录当前统计的工单总数占API返回总数的百分比
if total > 0 and stats['total_count'] + len(items) <= total:
progress = ((stats['total_count'] + len(items)) / total) * 100
log(f"当前进度: {progress:.2f}% ({stats['total_count'] + len(items)}/{total})")
elif data.get('code') != 200:
# 这是一个明确的失败响应
error_msg = data.get('msg', '未知错误')
log(f"API返回错误{error_msg}")
log(f"API响应内容{response.text[:500]}")
return None
else:
# 其他情况,记录响应并退出
log(f"未知的API响应格式{response.text[:500]}")
return None
# 处理工单数据
if not items or len(items) == 0:
log("当前页没有数据,结束获取")
break
issues.extend(items)
# 更新当前页的统计信息
for item in items:
media_type = determine_media_type(item)
if media_type not in stats['types']:
stats['types'][media_type] = 0
stats['types'][media_type] += 1
stats['total_count'] += len(items)
log(f"{page}页有 {len(items)} 条记录,累计处理 {stats['total_count']}")
# 检查是否还有下一页数据
total_pages = data['data'].get('pages', 1)
if page >= total_pages or len(items) < params['pageSize']:
log(f"已获取所有数据,共 {total_pages} 页,处理了 {stats['total_count']} 条记录")
break
except ValueError as e:
log(f"解析JSON数据失败: {str(e)}")
log(f"原始响应内容:{response.text[:500]}")
return None
else:
log(f"API请求失败: HTTP {response.status_code}")
log(f"响应内容: {response.text[:500]}")
return None
page += 1
except requests.exceptions.Timeout:
log("API请求超时")
return None
except requests.exceptions.ConnectionError:
log("网络连接错误,请检查网络连接")
return None
except Exception as e:
log(f"获取数据失败: {str(e)}")
return None
# 检查是否因为达到最大页数而停止
if page > max_pages:
log(f"达到最大页数限制({max_pages}页)停止获取。如需获取更多数据请增加max_pages参数。")
# 确保每次都是从配置文件读取最新系数
load_coefficients()
# 使用全局系数变量
with coefficients_lock:
current_coefficients = COEFFICIENTS.copy()
# 计算各类型工单折算总数
weighted_total = 0
for media_type, count in stats['types'].items():
coefficient = current_coefficients.get(media_type, current_coefficients['NTES_GOD_IMAGES'])
weighted_count = count * coefficient
weighted_total += weighted_count
log(f"最终统计结果:工单总数 {stats['total_count']},折算总计 {weighted_total:.2f}")
# 将统计结果整理为前端需要的格式
frontend_stats = {
'total': stats['total_count'],
'weighted_total': weighted_total,
'categories': {},
'coefficients': current_coefficients # 添加系数到返回结果中
}
# 整理分类统计,确保所有可能的工单类型都包括在内
for media_type, coefficient in current_coefficients.items():
count = stats['types'].get(media_type, 0)
weighted_count = count * coefficient
# 使用对应的中文名称
if media_type == 'NTES_GOD_IMAGES':
type_name = "网易大神APP图片"
elif media_type == 'NTES_GOD_VIDEOS':
type_name = "网易大神APP视频"
elif media_type == 'NTES_GOD_CHAT_IMAGES':
type_name = "网易大神APP聊天图片"
elif media_type == 'NTES_GOD_CHAT_VIDEOS':
type_name = "网易大神APP聊天视频"
elif media_type == 'NTES_DASONG':
type_name = "大神大宋视频"
elif media_type == 'SPIDER_VIDEO':
type_name = "大神普通供给视频"
elif media_type == 'SPIDER_VIDEO_SP':
type_name = "大神高优供给视频"
elif media_type == 'NTES_GOD_AI':
type_name = "大神AI图片"
elif media_type == 'NTES_GOD_TOP':
type_name = "大神短视频"
elif media_type == 'T_SPIDER_VIDEO':
type_name = "大神tiktok普通视频"
elif media_type == 'T_SPIDER_VIDEO_SP':
type_name = "大神tiktok高优视频"
elif media_type == 'V_SPIDER_VIDEO':
type_name = "大神ins普通供给视频"
elif media_type == 'V_SPIDER_VIDEO_SP':
type_name = "大神ins高优供给视频"
elif media_type == 'NTES_GOD_XHS':
type_name = "大神小红书图片"
elif media_type == 'XHS_SPIDER_VIDEO':
type_name = "小红书供给视频"
elif media_type == 'Cupid':
type_name = "大神交友"
elif media_type == 'CHAT_P2P':
type_name = "大神聊天/风险用户_私聊/私聊频繁"
elif media_type == 'CHAT_TEAM':
type_name = "大神聊天/风险用户_群聊/群聊频繁"
elif media_type == 'CHAT_ROOM':
type_name = "大神聊天_聊天室"
elif media_type == 'CHAT_ROOM_MSG':
type_name = "风险用户_聊天室频繁"
else:
# 默认情况下仍然使用替换方式
type_name = media_type.replace('_', ' ').replace('NTES', '大神')
frontend_stats['categories'][type_name] = {
'count': count,
'weighted': weighted_count,
'coefficient': coefficient
}
return {
'stats': frontend_stats,
'issues': issues
}
def switch_business(cookie, business_id):
"""切换业务线"""
try:
url = 'https://breeze.gameyw.netease.com/api/cms/user/switchBusiness'
headers = {
'accept': 'application/json, text/plain, */*',
'accept-language': 'zh-CN,zh;q=0.9',
'content-type': 'application/json',
'cookie': cookie,
'origin': 'https://breeze.opd.netease.com',
'priority': 'u=1, i',
'referer': 'https://breeze.opd.netease.com/',
'sec-ch-ua': '"Google Chrome";v="131", "Chromium";v="131", "Not_A Brand";v="24"',
'sec-ch-ua-mobile': '?0',
'sec-ch-ua-platform': '"Windows"',
'sec-fetch-dest': 'empty',
'sec-fetch-mode': 'cors',
'sec-fetch-site': 'same-site',
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Safari/537.36'
}
data = {"businessId": business_id}
log(f"正在切换业务线目标业务ID: {business_id}")
response = requests.post(url, headers=headers, json=data, timeout=15)
if response.status_code == 200:
try:
result = response.json()
if result.get('code') == 200:
log(f"业务线切换成功: {business_id}")
time.sleep(1) # 等待切换完成
return True
else:
log(f"业务线切换失败: {result.get('msg', '未知错误')}")
return False
except ValueError as e:
log(f"解析业务线切换响应失败: {str(e)}")
return False
else:
log(f"业务线切换请求失败: HTTP {response.status_code}")
return False
except Exception as e:
log(f"切换业务线时出错: {str(e)}")
return False
def check_current_hour_counts(cookie, username):
"""检查当前小时数据"""
try:
# 获取当前小时的开始和结束时间
now = datetime.now()
current_hour = now.hour
# 创建时间仍然使用全天
create_start_time = now.strftime('%Y-%m-%d') + " 00:00:00"
create_end_time = now.strftime('%Y-%m-%d') + " 23:59:59"
# 关闭时间使用当前小时
close_start_time = now.strftime('%Y-%m-%d %H') + ":00:00"
close_end_time = now.strftime('%Y-%m-%d %H') + ":59:59"
log(f"当前小时查询 - 创建时间范围: {create_start_time}{create_end_time}")
log(f"当前小时查询 - 关闭时间范围: {close_start_time}{close_end_time}")
# 首先切换到清风审核-大神审核(业务ID: 7)
log("正在切换到清风审核-大神审核...")
switch_business(cookie, 7)
# 调用API获取大神审核数据
log("正在获取大神审核数据...")
godResult = fetch_issue_data(cookie, username, create_start_time, create_end_time, close_start_time, close_end_time, max_pages=10000)
if godResult is None:
log("获取大神审核数据失败")
godStats = {
'total': 0,
'weighted_total': 0,
'categories': {}
}
else:
godStats = godResult['stats']
log(f"大神审核数据获取成功,共 {godStats['total']} 条记录,折算总计 {godStats['weighted_total']:.2f}")
# 然后切换到清风审核-图片审核(业务ID: 12)
log("正在切换到清风审核-图片审核...")
switch_business(cookie, 12)
# 调用API获取图片审核数据
log("正在获取图片审核数据...")
imageResult = fetch_issue_data(cookie, username, create_start_time, create_end_time, close_start_time, close_end_time, max_pages=10000)
if imageResult is None:
log("获取图片审核数据失败")
imageStats = {
'total': 0,
'weighted_total': 0,
'categories': {}
}
else:
imageStats = imageResult['stats']
log(f"图片审核数据获取成功,共 {imageStats['total']} 条记录,折算总计 {imageStats['weighted_total']:.2f}")
# 合并两部分统计结果
mergedStats = {
'total': godStats['total'] + imageStats['total'],
'weighted_total': godStats['weighted_total'] + imageStats['weighted_total'],
'categories': {}
}
# 合并分类统计
allCategories = set(list(godStats['categories'].keys()) + list(imageStats['categories'].keys()))
for category in allCategories:
godCat = godStats['categories'].get(category, {'count': 0, 'coefficient': 0, 'weighted': 0})
imageCat = imageStats['categories'].get(category, {'count': 0, 'coefficient': 0, 'weighted': 0})
# 使用相同的系数(两者应该是一样的,如果有就使用它)
coefficient = godCat['coefficient'] or imageCat['coefficient']
mergedStats['categories'][category] = {
'count': godCat['count'] + imageCat['count'],
'coefficient': coefficient,
'weighted': godCat['weighted'] + imageCat['weighted']
}
log(f"合并后的统计结果:工单总数 {mergedStats['total']},折算总计 {mergedStats['weighted_total']:.2f}")
# 将统计结果写入到共享数据文件中
try:
data = {
'type': 'breeze_hourly',
'stats': mergedStats,
'timestamp': datetime.now().strftime('%Y-%m-%d %H:%M:%S')
}
with open(os.path.join(os.path.dirname(os.path.abspath(__file__)), 'breeze_hourly.json'), 'w', encoding='utf-8') as f:
json.dump(data, f, ensure_ascii=False)
log("小时数据已更新到共享文件")
except Exception as e:
log(f"写入小时数据到共享文件失败: {str(e)}")
return mergedStats
except Exception as e:
log(f"检查当前小时数据失败: {str(e)}")
return None
def check_daily_counts(cookie, username):
"""检查全天数据"""
try:
# 获取今日开始和结束时间
today = datetime.now().strftime('%Y-%m-%d')
create_start_time = "%s 00:00:00" % today
create_end_time = "%s 23:59:59" % today
# 关闭时间也使用全天
close_start_time = create_start_time
close_end_time = create_end_time
log(f"全天查询 - 创建时间范围: {create_start_time}{create_end_time}")
log(f"全天查询 - 关闭时间范围: {close_start_time}{close_end_time}")
# 首先切换到清风审核-大神审核(业务ID: 7)
log("正在切换到清风审核-大神审核...")
switch_business(cookie, 7)
# 调用API获取大神审核数据
log("正在获取大神审核全天数据...")
godResult = fetch_issue_data(cookie, username, create_start_time, create_end_time, close_start_time, close_end_time, max_pages=10000)
if godResult is None:
log("获取大神审核全天数据失败")
godStats = {
'total': 0,
'weighted_total': 0,
'categories': {}
}
else:
godStats = godResult['stats']
log(f"大神审核全天数据获取成功,共 {godStats['total']} 条记录,折算总计 {godStats['weighted_total']:.2f}")
# 然后切换到清风审核-图片审核(业务ID: 12)
log("正在切换到清风审核-图片审核...")
switch_business(cookie, 12)
# 调用API获取图片审核数据
log("正在获取图片审核全天数据...")
imageResult = fetch_issue_data(cookie, username, create_start_time, create_end_time, close_start_time, close_end_time, max_pages=10000)
if imageResult is None:
log("获取图片审核全天数据失败")
imageStats = {
'total': 0,
'weighted_total': 0,
'categories': {}
}
else:
imageStats = imageResult['stats']
log(f"图片审核全天数据获取成功,共 {imageStats['total']} 条记录,折算总计 {imageStats['weighted_total']:.2f}")
# 合并两部分统计结果
mergedStats = {
'total': godStats['total'] + imageStats['total'],
'weighted_total': godStats['weighted_total'] + imageStats['weighted_total'],
'categories': {}
}
# 合并分类统计
allCategories = set(list(godStats['categories'].keys()) + list(imageStats['categories'].keys()))
for category in allCategories:
godCat = godStats['categories'].get(category, {'count': 0, 'coefficient': 0, 'weighted': 0})
imageCat = imageStats['categories'].get(category, {'count': 0, 'coefficient': 0, 'weighted': 0})
# 使用相同的系数(两者应该是一样的,如果有就使用它)
coefficient = godCat['coefficient'] or imageCat['coefficient']
mergedStats['categories'][category] = {
'count': godCat['count'] + imageCat['count'],
'coefficient': coefficient,
'weighted': godCat['weighted'] + imageCat['weighted']
}
log(f"合并后的全天统计结果:工单总数 {mergedStats['total']},折算总计 {mergedStats['weighted_total']:.2f}")
# 将统计结果写入到共享数据文件中
try:
data = {
'type': 'breeze_daily',
'stats': mergedStats,
'timestamp': datetime.now().strftime('%Y-%m-%d %H:%M:%S')
}
with open(os.path.join(os.path.dirname(os.path.abspath(__file__)), 'breeze_daily.json'), 'w', encoding='utf-8') as f:
json.dump(data, f, ensure_ascii=False)
log("全天数据已更新到共享文件")
except Exception as e:
log(f"写入全天数据到共享文件失败: {str(e)}")
return mergedStats
except Exception as e:
log(f"检查今日数据失败: {str(e)}")
return None
def monitor_hourly_thread():
"""每小时监控线程"""
log("每小时监控线程启动")
while True:
try:
# 从全局变量获取用户信息
with credentials_lock:
cookie = user_credentials['cookie']
username = user_credentials['username']
if cookie and username:
# 检查当前小时数据
stats = check_current_hour_counts(cookie, username)
time.sleep(120) # 每2分钟检查一次
else:
time.sleep(30) # 未登录时等待30秒
except Exception as e:
log(f"每小时监控线程异常: {str(e)}")
time.sleep(60) # 发生异常时等待1分钟后重试
def monitor_daily_thread():
"""每日监控线程"""
log("每日监控线程启动")
while True:
try:
# 从全局变量获取用户信息
with credentials_lock:
cookie = user_credentials['cookie']
username = user_credentials['username']
if cookie and username:
# 检查全天数据
stats = check_daily_counts(cookie, username)
time.sleep(3600) # 每60分钟检查一次
else:
time.sleep(30) # 未登录时等待30秒
except Exception as e:
log(f"每日监控线程异常: {str(e)}")
time.sleep(60) # 发生异常时等待1分钟后重试
# 监控配置文件变化线程
def monitor_config_thread():
"""监控配置文件变化线程"""
log("配置监控线程启动")
last_modified_time = 0
while True:
try:
if os.path.exists(COEFFICIENTS_CONFIG_FILE):
current_modified_time = os.path.getmtime(COEFFICIENTS_CONFIG_FILE)
# 检查文件是否有更新
if current_modified_time > last_modified_time:
log(f"检测到配置文件变化,重新加载系数")
load_coefficients()
# 系数变化后,立即重新计算数据
with credentials_lock:
cookie = user_credentials['cookie']
username = user_credentials['username']
if cookie and username:
log("系数变更后立即更新数据...")
threading.Thread(target=lambda: check_current_hour_counts(cookie, username)).start()
threading.Thread(target=lambda: check_daily_counts(cookie, username)).start()
last_modified_time = current_modified_time
time.sleep(10) # 每10秒检查一次确保数据更新及时
except Exception as e:
log(f"配置监控线程异常: {str(e)}")
time.sleep(60) # 发生异常时等待1分钟后重试
def main():
"""主函数"""
log("Breeze监控系统启动")
# 解析命令行参数
check_now = False
force_mode = False
update_coefficients = False
no_config_check = False
for arg in sys.argv:
if arg == "--check-now":
check_now = True
log("收到立即检查参数")
elif arg == "--force":
force_mode = True
log("收到强制模式参数")
elif arg == "--update-coefficients":
update_coefficients = True
log("收到更新系数参数")
elif arg == "--no-config-check":
no_config_check = True
log("收到禁用配置检查参数")
# 从配置文件加载系数,除非指定了不检查配置
if not no_config_check:
load_coefficients()
else:
log("跳过配置检查,使用当前已加载的系数")
# 处理系数更新
if update_coefficients:
# 检查是否提供了新系数
if len(sys.argv) >= 5: # 脚本名 + --update-coefficients + 类型 + 值
try:
coefficient_type = sys.argv[2]
coefficient_value = float(sys.argv[3])
# 检查是否为有效的系数类型
if coefficient_type in COEFFICIENTS:
log(f"更新系数:{coefficient_type}={coefficient_value}")
# 更新全局系数
with coefficients_lock:
COEFFICIENTS[coefficient_type] = coefficient_value
# 保存到配置文件
save_coefficients()
else:
log(f"未知的系数类型: {coefficient_type}")
except ValueError as e:
log(f"系数更新失败: {str(e)}")
log("系数必须是有效的数字")
except IndexError:
log("参数不足,无法更新系数")
log("系数更新完成,退出程序")
sys.exit(0)
# 确保输出目录存在
script_dir = os.path.dirname(os.path.abspath(__file__))
if not os.path.exists(script_dir):
os.makedirs(script_dir)
# 从环境变量初始化凭据
init_credentials()
# 处理--check-now参数
if check_now:
# 从全局变量获取用户信息
with credentials_lock:
cookie = user_credentials['cookie']
username = user_credentials['username']
if cookie and username:
log("开始执行手动检查")
if force_mode:
# 在强制模式下,使用当前小时的整点数据进行查询
now = datetime.now()
# 创建时间仍然使用全天
create_start_time = now.strftime('%Y-%m-%d') + " 00:00:00"
create_end_time = now.strftime('%Y-%m-%d') + " 23:59:59"
# 关闭时间使用当前小时的整点范围
close_start_time = now.strftime('%Y-%m-%d %H') + ":00:00"
close_end_time = now.strftime('%Y-%m-%d %H') + ":59:59"
log(f"强制模式 - 创建时间范围: {create_start_time}{create_end_time}")
log(f"强制模式 - 关闭时间范围: {close_start_time}{close_end_time}")
# 首先切换到清风审核-大神审核(业务ID: 7)
log("正在切换到清风审核-大神审核...")
switch_business(cookie, 7)
# 调用API获取大神审核数据
log("正在获取大神审核数据...")
godResult = fetch_issue_data(cookie, username, create_start_time, create_end_time, close_start_time, close_end_time, max_pages=10000)
if godResult is None:
log("获取大神审核数据失败")
godStats = {
'total': 0,
'weighted_total': 0,
'categories': {}
}
else:
godStats = godResult['stats']
log(f"大神审核数据获取成功,共 {godStats['total']} 条记录,折算总计 {godStats['weighted_total']:.2f}")
# 然后切换到清风审核-图片审核(业务ID: 12)
log("正在切换到清风审核-图片审核...")
switch_business(cookie, 12)
# 调用API获取图片审核数据
log("正在获取图片审核数据...")
imageResult = fetch_issue_data(cookie, username, create_start_time, create_end_time, close_start_time, close_end_time, max_pages=10000)
if imageResult is None:
log("获取图片审核数据失败")
imageStats = {
'total': 0,
'weighted_total': 0,
'categories': {}
}
else:
imageStats = imageResult['stats']
log(f"图片审核数据获取成功,共 {imageStats['total']} 条记录,折算总计 {imageStats['weighted_total']:.2f}")
# 合并两部分统计结果
mergedStats = {
'total': godStats['total'] + imageStats['total'],
'weighted_total': godStats['weighted_total'] + imageStats['weighted_total'],
'categories': {}
}
# 合并分类统计
allCategories = set(list(godStats['categories'].keys()) + list(imageStats['categories'].keys()))
for category in allCategories:
godCat = godStats['categories'].get(category, {'count': 0, 'coefficient': 0, 'weighted': 0})
imageCat = imageStats['categories'].get(category, {'count': 0, 'coefficient': 0, 'weighted': 0})
# 使用相同的系数(两者应该是一样的,如果有就使用它)
coefficient = godCat['coefficient'] or imageCat['coefficient']
mergedStats['categories'][category] = {
'count': godCat['count'] + imageCat['count'],
'coefficient': coefficient,
'weighted': godCat['weighted'] + imageCat['weighted']
}
log(f"合并后的统计结果:工单总数 {mergedStats['total']},折算总计 {mergedStats['weighted_total']:.2f}")
# 将统计结果写入到共享数据文件中
try:
data = {
'type': 'breeze_hourly',
'stats': mergedStats,
'timestamp': datetime.now().strftime('%Y-%m-%d %H:%M:%S')
}
with open(os.path.join(os.path.dirname(os.path.abspath(__file__)), 'breeze_hourly.json'), 'w', encoding='utf-8') as f:
json.dump(data, f, ensure_ascii=False)
log("小时数据已更新到共享文件")
except Exception as e:
log(f"写入小时数据到共享文件失败: {str(e)}")
else:
# 常规检查
check_current_hour_counts(cookie, username)
log("手动检查完成")
else:
log("无法执行手动检查: 凭据不可用")
# 立即检查完成后退出
sys.exit(0)
# 启动监控线程
hourly_thread = threading.Thread(target=monitor_hourly_thread)
hourly_thread.daemon = True
hourly_thread.start()
daily_thread = threading.Thread(target=monitor_daily_thread)
daily_thread.daemon = True
daily_thread.start()
# 启动配置监控线程
config_thread = threading.Thread(target=monitor_config_thread)
config_thread.daemon = True
config_thread.start()
# 保持主线程运行
try:
while True:
time.sleep(10)
except KeyboardInterrupt:
log("程序被用户中断")
except Exception as e:
log(f"主线程异常: {str(e)}")
finally:
log("Breeze监控系统关闭")
if __name__ == '__main__':
main()