From 5d471440e6a32a4a8ef63e25b6a174f6a00196ee Mon Sep 17 00:00:00 2001
From: UnCLAS-Prommer
Date: Sat, 19 Apr 2025 22:41:52 +0800
Subject: [PATCH 01/26] =?UTF-8?q?=E6=8B=86=E5=88=86=5Fexecute=5Frequest?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
src/plugins/PFC/reply_generator.py | 2 +-
.../reasoning_chat/reasoning_chat.py | 2 +-
src/plugins/models/utils_model_new.py | 1231 +++++++++++++++++
3 files changed, 1233 insertions(+), 2 deletions(-)
create mode 100644 src/plugins/models/utils_model_new.py
diff --git a/src/plugins/PFC/reply_generator.py b/src/plugins/PFC/reply_generator.py
index bb471900..a27abecd 100644
--- a/src/plugins/PFC/reply_generator.py
+++ b/src/plugins/PFC/reply_generator.py
@@ -151,7 +151,7 @@ class ReplyGenerator:
return content
except Exception as e:
- logger.error(f"生成回复时出错: {e}")
+ logger.error(f"生成回复时出错: {str(e)}")
return "抱歉,我现在有点混乱,让我重新思考一下..."
async def check_reply(self, reply: str, goal: str, retry_count: int = 0) -> Tuple[bool, str, bool]:
diff --git a/src/plugins/chat_module/reasoning_chat/reasoning_chat.py b/src/plugins/chat_module/reasoning_chat/reasoning_chat.py
index 5809e31d..46eeb79f 100644
--- a/src/plugins/chat_module/reasoning_chat/reasoning_chat.py
+++ b/src/plugins/chat_module/reasoning_chat/reasoning_chat.py
@@ -255,7 +255,7 @@ class ReasoningChat:
info_catcher.catch_after_generate_response(timing_results["生成回复"])
except Exception as e:
- logger.error(f"回复生成出现错误:str{e}")
+ logger.error(f"回复生成出现错误:{str(e)}")
response_set = None
if not response_set:
diff --git a/src/plugins/models/utils_model_new.py b/src/plugins/models/utils_model_new.py
new file mode 100644
index 00000000..8535476c
--- /dev/null
+++ b/src/plugins/models/utils_model_new.py
@@ -0,0 +1,1231 @@
+import asyncio
+import json
+import re
+from datetime import datetime
+from typing import Tuple, Union, Dict, Any
+
+import aiohttp
+from aiohttp.client import ClientResponse
+
+from src.common.logger import get_module_logger
+import base64
+from PIL import Image
+import io
+import os
+from ...common.database import db
+from ...config.config import global_config
+
+logger = get_module_logger("model_utils")
+
+
+class PayLoadTooLargeError(Exception):
+ """自定义异常类,用于处理请求体过大错误"""
+
+ def __init__(self, message: str):
+ super().__init__(message)
+ self.message = message
+
+ def __str__(self):
+ return "请求体过大,请尝试压缩图片或减少输入内容。"
+
+
+class RequestAbortException(Exception):
+ """自定义异常类,用于处理请求中断异常"""
+
+ def __init__(self, message: str, response: ClientResponse):
+ super().__init__(message)
+ self.message = message
+ self.response = response
+
+ def __str__(self):
+ return self.message
+
+
+class PermissionDeniedException(Exception):
+ """自定义异常类,用于处理访问拒绝的异常"""
+
+ def __init__(self, message: str):
+ super().__init__(message)
+ self.message = message
+
+ def __str__(self):
+ return self.message
+
+
+# 常见Error Code Mapping
+error_code_mapping = {
+ 400: "参数不正确",
+ 401: "API key 错误,认证失败,请检查/config/bot_config.toml和.env中的配置是否正确哦~",
+ 402: "账号余额不足",
+ 403: "需要实名,或余额不足",
+ 404: "Not Found",
+ 429: "请求过于频繁,请稍后再试",
+ 500: "服务器内部故障",
+ 503: "服务器负载过高",
+}
+
+
+class LLMRequest:
+ # 定义需要转换的模型列表,作为类变量避免重复
+ MODELS_NEEDING_TRANSFORMATION = [
+ "o3-mini",
+ "o1-mini",
+ "o1-preview",
+ "o1-2024-12-17",
+ "o1-preview-2024-09-12",
+ "o3-mini-2025-01-31",
+ "o1-mini-2024-09-12",
+ ]
+
+ def __init__(self, model: dict, **kwargs):
+ # 将大写的配置键转换为小写并从config中获取实际值
+ try:
+ self.api_key = os.environ[model["key"]]
+ self.base_url = os.environ[model["base_url"]]
+ except AttributeError as e:
+ logger.error(f"原始 model dict 信息:{model}")
+ logger.error(f"配置错误:找不到对应的配置项 - {str(e)}")
+ raise ValueError(f"配置错误:找不到对应的配置项 - {str(e)}") from e
+ self.model_name: str = model["name"]
+ self.params = kwargs
+
+ self.stream = model.get("stream", False)
+ self.pri_in = model.get("pri_in", 0)
+ self.pri_out = model.get("pri_out", 0)
+
+ # 获取数据库实例
+ self._init_database()
+
+ # 从 kwargs 中提取 request_type,如果没有提供则默认为 "default"
+ self.request_type = kwargs.pop("request_type", "default")
+
+ @staticmethod
+ def _init_database():
+ """初始化数据库集合"""
+ try:
+ # 创建llm_usage集合的索引
+ db.llm_usage.create_index([("timestamp", 1)])
+ db.llm_usage.create_index([("model_name", 1)])
+ db.llm_usage.create_index([("user_id", 1)])
+ db.llm_usage.create_index([("request_type", 1)])
+ except Exception as e:
+ logger.error(f"创建数据库索引失败: {str(e)}")
+
+ def _record_usage(
+ self,
+ prompt_tokens: int,
+ completion_tokens: int,
+ total_tokens: int,
+ user_id: str = "system",
+ request_type: str = None,
+ endpoint: str = "/chat/completions",
+ ):
+ """记录模型使用情况到数据库
+ Args:
+ prompt_tokens: 输入token数
+ completion_tokens: 输出token数
+ total_tokens: 总token数
+ user_id: 用户ID,默认为system
+ request_type: 请求类型(chat/embedding/image/topic/schedule)
+ endpoint: API端点
+ """
+ # 如果 request_type 为 None,则使用实例变量中的值
+ if request_type is None:
+ request_type = self.request_type
+
+ try:
+ usage_data = {
+ "model_name": self.model_name,
+ "user_id": user_id,
+ "request_type": request_type,
+ "endpoint": endpoint,
+ "prompt_tokens": prompt_tokens,
+ "completion_tokens": completion_tokens,
+ "total_tokens": total_tokens,
+ "cost": self._calculate_cost(prompt_tokens, completion_tokens),
+ "status": "success",
+ "timestamp": datetime.now(),
+ }
+ db.llm_usage.insert_one(usage_data)
+ logger.trace(
+ f"Token使用情况 - 模型: {self.model_name}, "
+ f"用户: {user_id}, 类型: {request_type}, "
+ f"提示词: {prompt_tokens}, 完成: {completion_tokens}, "
+ f"总计: {total_tokens}"
+ )
+ except Exception as e:
+ logger.error(f"记录token使用情况失败: {str(e)}")
+
+ def _calculate_cost(self, prompt_tokens: int, completion_tokens: int) -> float:
+ """计算API调用成本
+ 使用模型的pri_in和pri_out价格计算输入和输出的成本
+
+ Args:
+ prompt_tokens: 输入token数量
+ completion_tokens: 输出token数量
+
+ Returns:
+ float: 总成本(元)
+ """
+ # 使用模型的pri_in和pri_out计算成本
+ input_cost = (prompt_tokens / 1000000) * self.pri_in
+ output_cost = (completion_tokens / 1000000) * self.pri_out
+ return round(input_cost + output_cost, 6)
+
+ '''
+ async def _execute_request(
+ self,
+ endpoint: str,
+ prompt: str = None,
+ image_base64: str = None,
+ image_format: str = None,
+ payload: dict = None,
+ retry_policy: dict = None,
+ response_handler: callable = None,
+ user_id: str = "system",
+ request_type: str = None,
+ ):
+ """统一请求执行入口
+ Args:
+ endpoint: API端点路径 (如 "chat/completions")
+ prompt: prompt文本
+ image_base64: 图片的base64编码
+ image_format: 图片格式
+ payload: 请求体数据
+ retry_policy: 自定义重试策略
+ response_handler: 自定义响应处理器
+ user_id: 用户ID
+ request_type: 请求类型
+ """
+
+ if request_type is None:
+ request_type = self.request_type
+
+ # 合并重试策略
+ default_retry = {
+ "max_retries": 3,
+ "base_wait": 10,
+ "retry_codes": [429, 413, 500, 503],
+ "abort_codes": [400, 401, 402, 403],
+ }
+ policy = {**default_retry, **(retry_policy or {})}
+
+ # 常见Error Code Mapping
+ error_code_mapping = {
+ 400: "参数不正确",
+ 401: "API key 错误,认证失败,请检查/config/bot_config.toml和.env中的配置是否正确哦~",
+ 402: "账号余额不足",
+ 403: "需要实名,或余额不足",
+ 404: "Not Found",
+ 429: "请求过于频繁,请稍后再试",
+ 500: "服务器内部故障",
+ 503: "服务器负载过高",
+ }
+
+ api_url = f"{self.base_url.rstrip('/')}/{endpoint.lstrip('/')}"
+ # 判断是否为流式
+ stream_mode = self.stream
+ # logger_msg = "进入流式输出模式," if stream_mode else ""
+ # logger.debug(f"{logger_msg}发送请求到URL: {api_url}")
+ # logger.info(f"使用模型: {self.model_name}")
+
+ # 构建请求体
+ if image_base64:
+ payload = await self._build_payload(prompt, image_base64, image_format)
+ elif payload is None:
+ payload = await self._build_payload(prompt)
+
+ # 流式输出标志
+ # 先构建payload,再添加流式输出标志
+ if stream_mode:
+ payload["stream"] = stream_mode
+
+ for retry in range(policy["max_retries"]):
+ try:
+ # 使用上下文管理器处理会话
+ headers = await self._build_headers()
+ # 似乎是openai流式必须要的东西,不过阿里云的qwq-plus加了这个没有影响
+ if stream_mode:
+ headers["Accept"] = "text/event-stream"
+
+ async with aiohttp.ClientSession() as session:
+ try:
+ async with session.post(api_url, headers=headers, json=payload) as response:
+ # 处理需要重试的状态码
+ if response.status in policy["retry_codes"]:
+ wait_time = policy["base_wait"] * (2**retry)
+ logger.warning(
+ f"模型 {self.model_name} 错误码: {response.status}, 等待 {wait_time}秒后重试"
+ )
+ if response.status == 413:
+ logger.warning("请求体过大,尝试压缩...")
+ image_base64 = compress_base64_image_by_scale(image_base64)
+ payload = await self._build_payload(prompt, image_base64, image_format)
+ elif response.status in [500, 503]:
+ logger.error(
+ f"模型 {self.model_name} 错误码: {response.status} - {error_code_mapping.get(response.status)}"
+ )
+ raise RuntimeError("服务器负载过高,模型恢复失败QAQ")
+ else:
+ logger.warning(f"模型 {self.model_name} 请求限制(429),等待{wait_time}秒后重试...")
+
+ await asyncio.sleep(wait_time)
+ continue
+ elif response.status in policy["abort_codes"]:
+ logger.error(
+ f"模型 {self.model_name} 错误码: {response.status} - {error_code_mapping.get(response.status)}"
+ )
+ # 尝试获取并记录服务器返回的详细错误信息
+ try:
+ error_json = await response.json()
+ if error_json and isinstance(error_json, list) and len(error_json) > 0:
+ for error_item in error_json:
+ if "error" in error_item and isinstance(error_item["error"], dict):
+ error_obj = error_item["error"]
+ error_code = error_obj.get("code")
+ error_message = error_obj.get("message")
+ error_status = error_obj.get("status")
+ logger.error(
+ f"服务器错误详情: 代码={error_code}, 状态={error_status}, "
+ f"消息={error_message}"
+ )
+ elif isinstance(error_json, dict) and "error" in error_json:
+ # 处理单个错误对象的情况
+ error_obj = error_json.get("error", {})
+ error_code = error_obj.get("code")
+ error_message = error_obj.get("message")
+ error_status = error_obj.get("status")
+ logger.error(
+ f"服务器错误详情: 代码={error_code}, 状态={error_status}, 消息={error_message}"
+ )
+ else:
+ # 记录原始错误响应内容
+ logger.error(f"服务器错误响应: {error_json}")
+ except Exception as e:
+ logger.warning(f"无法解析服务器错误响应: {str(e)}")
+
+ if response.status == 403:
+ # 只针对硅基流动的V3和R1进行降级处理
+ if (
+ self.model_name.startswith("Pro/deepseek-ai")
+ and self.base_url == "https://api.siliconflow.cn/v1/"
+ ):
+ old_model_name = self.model_name
+ self.model_name = self.model_name[4:] # 移除"Pro/"前缀
+ logger.warning(
+ f"检测到403错误,模型从 {old_model_name} 降级为 {self.model_name}"
+ )
+
+ # 对全局配置进行更新
+ if global_config.llm_normal.get("name") == old_model_name:
+ global_config.llm_normal["name"] = self.model_name
+ logger.warning(f"将全局配置中的 llm_normal 模型临时降级至{self.model_name}")
+
+ if global_config.llm_reasoning.get("name") == old_model_name:
+ global_config.llm_reasoning["name"] = self.model_name
+ logger.warning(
+ f"将全局配置中的 llm_reasoning 模型临时降级至{self.model_name}"
+ )
+
+ # 更新payload中的模型名
+ if payload and "model" in payload:
+ payload["model"] = self.model_name
+
+ # 重新尝试请求
+ retry -= 1 # 不计入重试次数
+ continue
+
+ raise RuntimeError(f"请求被拒绝: {error_code_mapping.get(response.status)}")
+
+ response.raise_for_status()
+ reasoning_content = ""
+
+ # 将流式输出转化为非流式输出
+ if stream_mode:
+ flag_delta_content_finished = False
+ accumulated_content = ""
+ usage = None # 初始化usage变量,避免未定义错误
+
+ async for line_bytes in response.content:
+ try:
+ line = line_bytes.decode("utf-8").strip()
+ if not line:
+ continue
+ if line.startswith("data:"):
+ data_str = line[5:].strip()
+ if data_str == "[DONE]":
+ break
+ try:
+ chunk = json.loads(data_str)
+ if flag_delta_content_finished:
+ chunk_usage = chunk.get("usage", None)
+ if chunk_usage:
+ usage = chunk_usage # 获取token用量
+ else:
+ delta = chunk["choices"][0]["delta"]
+ delta_content = delta.get("content")
+ if delta_content is None:
+ delta_content = ""
+ accumulated_content += delta_content
+ # 检测流式输出文本是否结束
+ finish_reason = chunk["choices"][0].get("finish_reason")
+ if delta.get("reasoning_content", None):
+ reasoning_content += delta["reasoning_content"]
+ if finish_reason == "stop":
+ chunk_usage = chunk.get("usage", None)
+ if chunk_usage:
+ usage = chunk_usage
+ break
+ # 部分平台在文本输出结束前不会返回token用量,此时需要再获取一次chunk
+ flag_delta_content_finished = True
+
+ except Exception as e:
+ logger.exception(f"模型 {self.model_name} 解析流式输出错误: {str(e)}")
+ except GeneratorExit:
+ logger.warning("模型 {self.model_name} 流式输出被中断,正在清理资源...")
+ # 确保资源被正确清理
+ await response.release()
+ # 返回已经累积的内容
+ result = {
+ "choices": [
+ {
+ "message": {
+ "content": accumulated_content,
+ "reasoning_content": reasoning_content,
+ # 流式输出可能没有工具调用,此处不需要添加tool_calls字段
+ }
+ }
+ ],
+ "usage": usage,
+ }
+ return (
+ response_handler(result)
+ if response_handler
+ else self._default_response_handler(result, user_id, request_type, endpoint)
+ )
+ except Exception as e:
+ logger.error(f"模型 {self.model_name} 处理流式输出时发生错误: {str(e)}")
+ # 确保在发生错误时也能正确清理资源
+ try:
+ await response.release()
+ except Exception as cleanup_error:
+ logger.error(f"清理资源时发生错误: {cleanup_error}")
+ # 返回已经累积的内容
+ result = {
+ "choices": [
+ {
+ "message": {
+ "content": accumulated_content,
+ "reasoning_content": reasoning_content,
+ # 流式输出可能没有工具调用,此处不需要添加tool_calls字段
+ }
+ }
+ ],
+ "usage": usage,
+ }
+ return (
+ response_handler(result)
+ if response_handler
+ else self._default_response_handler(result, user_id, request_type, endpoint)
+ )
+ content = accumulated_content
+ think_match = re.search(r"(.*?)", content, re.DOTALL)
+ if think_match:
+ reasoning_content = think_match.group(1).strip()
+ content = re.sub(r".*?", "", content, flags=re.DOTALL).strip()
+ # 构造一个伪result以便调用自定义响应处理器或默认处理器
+ result = {
+ "choices": [
+ {
+ "message": {
+ "content": content,
+ "reasoning_content": reasoning_content,
+ # 流式输出可能没有工具调用,此处不需要添加tool_calls字段
+ }
+ }
+ ],
+ "usage": usage,
+ }
+ return (
+ response_handler(result)
+ if response_handler
+ else self._default_response_handler(result, user_id, request_type, endpoint)
+ )
+ else:
+ result = await response.json()
+ # 使用自定义处理器或默认处理
+ return (
+ response_handler(result)
+ if response_handler
+ else self._default_response_handler(result, user_id, request_type, endpoint)
+ )
+
+ except (aiohttp.ClientError, asyncio.TimeoutError) as e:
+ if retry < policy["max_retries"] - 1:
+ wait_time = policy["base_wait"] * (2**retry)
+ logger.error(f"模型 {self.model_name} 网络错误,等待{wait_time}秒后重试... 错误: {str(e)}")
+ await asyncio.sleep(wait_time)
+ continue
+ else:
+ logger.critical(f"模型 {self.model_name} 网络错误达到最大重试次数: {str(e)}")
+ raise RuntimeError(f"网络请求失败: {str(e)}") from e
+ except Exception as e:
+ logger.critical(f"模型 {self.model_name} 未预期的错误: {str(e)}")
+ raise RuntimeError(f"请求过程中发生错误: {str(e)}") from e
+
+ except aiohttp.ClientResponseError as e:
+ # 处理aiohttp抛出的响应错误
+ if retry < policy["max_retries"] - 1:
+ wait_time = policy["base_wait"] * (2**retry)
+ logger.error(
+ f"模型 {self.model_name} HTTP响应错误,等待{wait_time}秒后重试... 状态码: {e.status}, 错误: {e.message}"
+ )
+ try:
+ if hasattr(e, "response") and e.response and hasattr(e.response, "text"):
+ error_text = await e.response.text()
+ try:
+ error_json = json.loads(error_text)
+ if isinstance(error_json, list) and len(error_json) > 0:
+ for error_item in error_json:
+ if "error" in error_item and isinstance(error_item["error"], dict):
+ error_obj = error_item["error"]
+ logger.error(
+ f"模型 {self.model_name} 服务器错误详情: 代码={error_obj.get('code')}, "
+ f"状态={error_obj.get('status')}, "
+ f"消息={error_obj.get('message')}"
+ )
+ elif isinstance(error_json, dict) and "error" in error_json:
+ error_obj = error_json.get("error", {})
+ logger.error(
+ f"模型 {self.model_name} 服务器错误详情: 代码={error_obj.get('code')}, "
+ f"状态={error_obj.get('status')}, "
+ f"消息={error_obj.get('message')}"
+ )
+ else:
+ logger.error(f"模型 {self.model_name} 服务器错误响应: {error_json}")
+ except (json.JSONDecodeError, TypeError) as json_err:
+ logger.warning(
+ f"模型 {self.model_name} 响应不是有效的JSON: {str(json_err)}, 原始内容: {error_text[:200]}"
+ )
+ except (AttributeError, TypeError, ValueError) as parse_err:
+ logger.warning(f"模型 {self.model_name} 无法解析响应错误内容: {str(parse_err)}")
+
+ await asyncio.sleep(wait_time)
+ else:
+ logger.critical(
+ f"模型 {self.model_name} HTTP响应错误达到最大重试次数: 状态码: {e.status}, 错误: {e.message}"
+ )
+ # 安全地检查和记录请求详情
+ if (
+ image_base64
+ and payload
+ and isinstance(payload, dict)
+ and "messages" in payload
+ and len(payload["messages"]) > 0
+ ):
+ if isinstance(payload["messages"][0], dict) and "content" in payload["messages"][0]:
+ content = payload["messages"][0]["content"]
+ if isinstance(content, list) and len(content) > 1 and "image_url" in content[1]:
+ payload["messages"][0]["content"][1]["image_url"]["url"] = (
+ f"data:image/{image_format.lower() if image_format else 'jpeg'};base64,"
+ f"{image_base64[:10]}...{image_base64[-10:]}"
+ )
+ logger.critical(f"请求头: {await self._build_headers(no_key=True)} 请求体: {payload}")
+ raise RuntimeError(f"模型 {self.model_name} API请求失败: 状态码 {e.status}, {e.message}") from e
+ except Exception as e:
+ if retry < policy["max_retries"] - 1:
+ wait_time = policy["base_wait"] * (2**retry)
+ logger.error(f"模型 {self.model_name} 请求失败,等待{wait_time}秒后重试... 错误: {str(e)}")
+ await asyncio.sleep(wait_time)
+ else:
+ logger.critical(f"模型 {self.model_name} 请求失败: {str(e)}")
+ # 安全地检查和记录请求详情
+ if (
+ image_base64
+ and payload
+ and isinstance(payload, dict)
+ and "messages" in payload
+ and len(payload["messages"]) > 0
+ ):
+ if isinstance(payload["messages"][0], dict) and "content" in payload["messages"][0]:
+ content = payload["messages"][0]["content"]
+ if isinstance(content, list) and len(content) > 1 and "image_url" in content[1]:
+ payload["messages"][0]["content"][1]["image_url"]["url"] = (
+ f"data:image/{image_format.lower() if image_format else 'jpeg'};base64,"
+ f"{image_base64[:10]}...{image_base64[-10:]}"
+ )
+ logger.critical(f"请求头: {await self._build_headers(no_key=True)} 请求体: {payload}")
+ raise RuntimeError(f"模型 {self.model_name} API请求失败: {str(e)}") from e
+
+ logger.error(f"模型 {self.model_name} 达到最大重试次数,请求仍然失败")
+ raise RuntimeError(f"模型 {self.model_name} 达到最大重试次数,API请求仍然失败")
+ '''
+
+ async def _prepare_request(
+ self,
+ endpoint: str,
+ prompt: str = None,
+ image_base64: str = None,
+ image_format: str = None,
+ payload: dict = None,
+ retry_policy: dict = None,
+ ) -> Dict[str, Any]:
+ """配置请求参数
+ Args:
+ endpoint: API端点路径 (如 "chat/completions")
+ prompt: prompt文本
+ image_base64: 图片的base64编码
+ image_format: 图片格式
+ payload: 请求体数据
+ retry_policy: 自定义重试策略
+ request_type: 请求类型
+ """
+
+ # 合并重试策略
+ default_retry = {
+ "max_retries": 3,
+ "base_wait": 10,
+ "retry_codes": [429, 413, 500, 503],
+ "abort_codes": [400, 401, 402, 403],
+ }
+ policy = {**default_retry, **(retry_policy or {})}
+
+ api_url = f"{self.base_url.rstrip('/')}/{endpoint.lstrip('/')}"
+
+ stream_mode = self.stream
+
+ # 构建请求体
+ if image_base64:
+ payload = await self._build_payload(prompt, image_base64, image_format)
+ elif payload is None:
+ payload = await self._build_payload(prompt)
+
+ if stream_mode:
+ payload["stream"] = stream_mode
+
+ return {
+ "policy": policy,
+ "payload": payload,
+ "api_url": api_url,
+ "stream_mode": stream_mode,
+ "image_base64": image_base64, # 保留必要的exception处理所需的原始数据
+ "image_format": image_format,
+ "prompt": prompt
+ }
+
+ async def _execute_request(
+ self,
+ endpoint: str,
+ prompt: str = None,
+ image_base64: str = None,
+ image_format: str = None,
+ payload: dict = None,
+ retry_policy: dict = None,
+ response_handler: callable = None,
+ user_id: str = "system",
+ request_type: str = None,
+ ):
+ """统一请求执行入口
+ Args:
+ endpoint: API端点路径 (如 "chat/completions")
+ prompt: prompt文本
+ image_base64: 图片的base64编码
+ image_format: 图片格式
+ payload: 请求体数据
+ retry_policy: 自定义重试策略
+ response_handler: 自定义响应处理器
+ user_id: 用户ID
+ request_type: 请求类型
+ """
+ # 获取请求配置
+ request_content = await self._prepare_request(
+ endpoint, prompt, image_base64, image_format, payload, retry_policy
+ )
+ if request_type is None:
+ request_type = self.request_type
+ for retry in range(request_content["policy"]["max_retries"]):
+ try:
+ # 使用上下文管理器处理会话
+ headers = await self._build_headers()
+ # 似乎是openai流式必须要的东西,不过阿里云的qwq-plus加了这个没有影响
+ if request_content["stream_mode"]:
+ headers["Accept"] = "text/event-stream"
+ async with aiohttp.ClientSession() as session:
+ async with session.post(
+ request_content["api_url"], headers=headers, json=request_content["payload"]
+ ) as response:
+ handled_result = await self._handle_response(
+ response, request_content, retry, response_handler, user_id, request_type, endpoint
+ )
+ return handled_result
+ except Exception as e:
+ handled_payload, count_delta = await self._handle_exception(e, retry, request_content)
+ retry += count_delta # 降级不计入重试次数
+ if handled_payload:
+ # 如果降级成功,重新构建请求体
+ request_content["payload"] = handled_payload
+ continue
+
+ logger.error(f"模型 {self.model_name} 达到最大重试次数,请求仍然失败")
+ raise RuntimeError(f"模型 {self.model_name} 达到最大重试次数,API请求仍然失败")
+
+ async def _handle_response(
+ self,
+ response: ClientResponse,
+ request_content: Dict[str, Any],
+ retry_count: int,
+ response_handler: callable,
+ user_id,
+ request_type,
+ endpoint,
+ ) -> Union[Dict[str, Any], None]:
+ policy = request_content["policy"]
+ stream_mode = request_content["stream_mode"]
+ if response.status in policy["retry_codes"] or response.status in policy["abort_codes"]:
+ await self._handle_error_response(response, retry_count, policy)
+ return
+
+ response.raise_for_status()
+ result = {}
+ if stream_mode:
+ # 将流式输出转化为非流式输出
+ result = await self._handle_stream_output(response)
+ else:
+ result = await response.json()
+ return (
+ response_handler(result)
+ if response_handler
+ else self._default_response_handler(result, user_id, request_type, endpoint)
+ )
+
+ async def _handle_stream_output(self, response: ClientResponse) -> Dict[str, Any]:
+ flag_delta_content_finished = False
+ accumulated_content = ""
+ usage = None # 初始化usage变量,避免未定义错误
+ reasoning_content = ""
+ content = ""
+ async for line_bytes in response.content:
+ try:
+ line = line_bytes.decode("utf-8").strip()
+ if not line:
+ continue
+ if line.startswith("data:"):
+ data_str = line[5:].strip()
+ if data_str == "[DONE]":
+ break
+ try:
+ chunk = json.loads(data_str)
+ if flag_delta_content_finished:
+ chunk_usage = chunk.get("usage", None)
+ if chunk_usage:
+ usage = chunk_usage # 获取token用量
+ else:
+ delta = chunk["choices"][0]["delta"]
+ delta_content = delta.get("content")
+ if delta_content is None:
+ delta_content = ""
+ accumulated_content += delta_content
+ # 检测流式输出文本是否结束
+ finish_reason = chunk["choices"][0].get("finish_reason")
+ if delta.get("reasoning_content", None):
+ reasoning_content += delta["reasoning_content"]
+ if finish_reason == "stop":
+ chunk_usage = chunk.get("usage", None)
+ if chunk_usage:
+ usage = chunk_usage
+ break
+ # 部分平台在文本输出结束前不会返回token用量,此时需要再获取一次chunk
+ flag_delta_content_finished = True
+ except Exception as e:
+ logger.exception(f"模型 {self.model_name} 解析流式输出错误: {str(e)}")
+ except Exception as e:
+ if isinstance(e, GeneratorExit):
+ log_content = f"模型 {self.model_name} 流式输出被中断,正在清理资源..."
+ else:
+ log_content = f"模型 {self.model_name} 处理流式输出时发生错误: {str(e)}"
+ logger.warning(log_content)
+ # 确保资源被正确清理
+ try:
+ await response.release()
+ except Exception as cleanup_error:
+ logger.error(f"清理资源时发生错误: {cleanup_error}")
+ # 返回已经累积的内容
+ content = accumulated_content
+ if not content:
+ content = accumulated_content
+ think_match = re.search(r"(.*?)", content, re.DOTALL)
+ if think_match:
+ reasoning_content = think_match.group(1).strip()
+ content = re.sub(r".*?", "", content, flags=re.DOTALL).strip()
+ result = {
+ "choices": [
+ {
+ "message": {
+ "content": content,
+ "reasoning_content": reasoning_content,
+ # 流式输出可能没有工具调用,此处不需要添加tool_calls字段
+ }
+ }
+ ],
+ "usage": usage,
+ }
+ return result
+
+ async def _handle_error_response(
+ self, response: ClientResponse, retry_count: int, policy: Dict[str, Any]
+ ) -> Union[Dict[str, any]]:
+ if response.status in policy["retry_codes"]:
+ wait_time = policy["base_wait"] * (2**retry_count)
+ logger.warning(f"模型 {self.model_name} 错误码: {response.status}, 等待 {wait_time}秒后重试")
+ if response.status == 413:
+ logger.warning("请求体过大,尝试压缩...")
+ raise PayLoadTooLargeError("请求体过大")
+ elif response.status in [500, 503]:
+ logger.error(
+ f"模型 {self.model_name} 错误码: {response.status} - {error_code_mapping.get(response.status)}"
+ )
+ raise RuntimeError("服务器负载过高,模型恢复失败QAQ")
+ else:
+ logger.warning(f"模型 {self.model_name} 请求限制(429),等待{wait_time}秒后重试...")
+ raise RuntimeError("请求限制(429)")
+ elif response.status in policy["abort_codes"]:
+ if response.status != 403:
+ raise RequestAbortException("请求出现错误,中断处理", response)
+ else:
+ raise PermissionDeniedException("模型禁止访问")
+
+ async def _handle_exception(
+ self, exception, retry_count: int, request_content: Dict[str, Any]
+ ) -> Union[Tuple[Dict[str, Any], int], Tuple[None, int]]:
+ policy = request_content["policy"]
+ payload = request_content["payload"]
+ keep_request = False
+ if retry_count < policy["max_retries"] - 1:
+ wait_time = policy["base_wait"] * (2**retry_count)
+ keep_request = True
+ if isinstance(exception, RequestAbortException):
+ response = exception.response
+ logger.error(
+ f"模型 {self.model_name} 错误码: {response.status} - {error_code_mapping.get(response.status)}"
+ )
+ # 尝试获取并记录服务器返回的详细错误信息
+ try:
+ error_json = await response.json()
+ if error_json and isinstance(error_json, list) and len(error_json) > 0:
+ # 处理多个错误的情况
+ for error_item in error_json:
+ if "error" in error_item and isinstance(error_item["error"], dict):
+ error_obj: dict = error_item["error"]
+ error_code = error_obj.get("code")
+ error_message = error_obj.get("message")
+ error_status = error_obj.get("status")
+ logger.error(
+ f"服务器错误详情: 代码={error_code}, 状态={error_status}, 消息={error_message}"
+ )
+ elif isinstance(error_json, dict) and "error" in error_json:
+ # 处理单个错误对象的情况
+ error_obj = error_json.get("error", {})
+ error_code = error_obj.get("code")
+ error_message = error_obj.get("message")
+ error_status = error_obj.get("status")
+ logger.error(f"服务器错误详情: 代码={error_code}, 状态={error_status}, 消息={error_message}")
+ else:
+ # 记录原始错误响应内容
+ logger.error(f"服务器错误响应: {error_json}")
+ except Exception as e:
+ logger.warning(f"无法解析服务器错误响应: {str(e)}")
+ raise RuntimeError(f"请求被拒绝: {error_code_mapping.get(response.status)}")
+
+ elif isinstance(exception, PermissionDeniedException):
+ # 只针对硅基流动的V3和R1进行降级处理
+ if self.model_name.startswith("Pro/deepseek-ai") and self.base_url == "https://api.siliconflow.cn/v1/":
+ old_model_name = self.model_name
+ self.model_name = self.model_name[4:] # 移除"Pro/"前缀
+ logger.warning(f"检测到403错误,模型从 {old_model_name} 降级为 {self.model_name}")
+
+ # 对全局配置进行更新
+ if global_config.llm_normal.get("name") == old_model_name:
+ global_config.llm_normal["name"] = self.model_name
+ logger.warning(f"将全局配置中的 llm_normal 模型临时降级至{self.model_name}")
+ if global_config.llm_reasoning.get("name") == old_model_name:
+ global_config.llm_reasoning["name"] = self.model_name
+ logger.warning(f"将全局配置中的 llm_reasoning 模型临时降级至{self.model_name}")
+
+ if payload and "model" in payload:
+ payload["model"] = self.model_name
+
+ await asyncio.sleep(wait_time)
+ return payload, -1
+ raise RuntimeError(f"请求被拒绝: {error_code_mapping.get(403)}")
+
+ elif isinstance(exception, PayLoadTooLargeError):
+ if keep_request:
+ image_base64 = request_content["image_base64"]
+ compressed_image_base64 = compress_base64_image_by_scale(image_base64)
+ new_payload = await self._build_payload(request_content["prompt"], compressed_image_base64, request_content["image_format"])
+ return new_payload, 0
+ else:
+ return None, 0
+
+ elif isinstance(exception, aiohttp.ClientError) or isinstance(exception, asyncio.TimeoutError):
+ if keep_request:
+ logger.error(f"模型 {self.model_name} 网络错误,等待{wait_time}秒后重试... 错误: {str(exception)}")
+ await asyncio.sleep(wait_time)
+ return None, 0
+ else:
+ logger.critical(f"模型 {self.model_name} 网络错误达到最大重试次数: {str(exception)}")
+ raise RuntimeError(f"网络请求失败: {str(exception)}")
+
+ elif isinstance(exception, aiohttp.ClientResponseError):
+ # 处理aiohttp抛出的,除了policy中的status的响应错误
+ if keep_request:
+ logger.error(
+ f"模型 {self.model_name} HTTP响应错误,等待{wait_time}秒后重试... 状态码: {exception.status}, 错误: {exception.message}"
+ )
+ try:
+ error_text = await exception.response.text()
+ error_json = json.loads(error_text)
+ if isinstance(error_json, list) and len(error_json) > 0:
+ # 处理多个错误的情况
+ for error_item in error_json:
+ if "error" in error_item and isinstance(error_item["error"], dict):
+ error_obj = error_item["error"]
+ logger.error(
+ f"模型 {self.model_name} 服务器错误详情: 代码={error_obj.get('code')}, "
+ f"状态={error_obj.get('status')}, "
+ f"消息={error_obj.get('message')}"
+ )
+ elif isinstance(error_json, dict) and "error" in error_json:
+ error_obj = error_json.get("error", {})
+ logger.error(
+ f"模型 {self.model_name} 服务器错误详情: 代码={error_obj.get('code')}, "
+ f"状态={error_obj.get('status')}, "
+ f"消息={error_obj.get('message')}"
+ )
+ else:
+ logger.error(f"模型 {self.model_name} 服务器错误响应: {error_json}")
+ except (json.JSONDecodeError, TypeError) as json_err:
+ logger.warning(
+ f"模型 {self.model_name} 响应不是有效的JSON: {str(json_err)}, 原始内容: {error_text[:200]}"
+ )
+ except Exception as parse_err:
+ logger.warning(f"模型 {self.model_name} 无法解析响应错误内容: {str(parse_err)}")
+
+ await asyncio.sleep(wait_time)
+ return None, 0
+ else:
+ logger.critical(
+ f"模型 {self.model_name} HTTP响应错误达到最大重试次数: 状态码: {exception.status}, 错误: {exception.message}"
+ )
+ # 安全地检查和记录请求详情
+ handled_payload = await self._safely_record(request_content, payload)
+ logger.critical(f"请求头: {await self._build_headers(no_key=True)} 请求体: {handled_payload}")
+ raise RuntimeError(
+ f"模型 {self.model_name} API请求失败: 状态码 {exception.status}, {exception.message}"
+ )
+
+ else:
+ if keep_request:
+ logger.error(f"模型 {self.model_name} 请求失败,等待{wait_time}秒后重试... 错误: {str(exception)}")
+ await asyncio.sleep(wait_time)
+ return None, 0
+ else:
+ logger.critical(f"模型 {self.model_name} 请求失败: {str(exception)}")
+ # 安全地检查和记录请求详情
+ handled_payload = await self._safely_record(request_content, payload)
+ logger.critical(f"请求头: {await self._build_headers(no_key=True)} 请求体: {handled_payload}")
+ raise RuntimeError(f"模型 {self.model_name} API请求失败: {str(exception)}")
+
+ async def _safely_record(self, request_content: Dict[str, Any], payload: Dict[str, Any]):
+ image_base64: str = request_content.get("image_base64")
+ image_format: str = request_content.get("image_format")
+ if (
+ image_base64
+ and payload
+ and isinstance(payload, dict)
+ and "messages" in payload
+ and len(payload["messages"]) > 0
+ ):
+ if isinstance(payload["messages"][0], dict) and "content" in payload["messages"][0]:
+ content = payload["messages"][0]["content"]
+ if isinstance(content, list) and len(content) > 1 and "image_url" in content[1]:
+ payload["messages"][0]["content"][1]["image_url"]["url"] = (
+ f"data:image/{image_format.lower() if image_format else 'jpeg'};base64,"
+ f"{image_base64[:10]}...{image_base64[-10:]}"
+ )
+ # if isinstance(content, str) and len(content) > 100:
+ # payload["messages"][0]["content"] = content[:100]
+ return payload
+
+ async def _transform_parameters(self, params: dict) -> dict:
+ """
+ 根据模型名称转换参数:
+ - 对于需要转换的OpenAI CoT系列模型(例如 "o3-mini"),删除 'temperature' 参数,
+ 并将 'max_tokens' 重命名为 'max_completion_tokens'
+ """
+ # 复制一份参数,避免直接修改原始数据
+ new_params = dict(params)
+
+ if self.model_name.lower() in self.MODELS_NEEDING_TRANSFORMATION:
+ # 删除 'temperature' 参数(如果存在)
+ new_params.pop("temperature", None)
+ # 如果存在 'max_tokens',则重命名为 'max_completion_tokens'
+ if "max_tokens" in new_params:
+ new_params["max_completion_tokens"] = new_params.pop("max_tokens")
+ return new_params
+
+ async def _build_payload(self, prompt: str, image_base64: str = None, image_format: str = None) -> dict:
+ """构建请求体"""
+ # 复制一份参数,避免直接修改 self.params
+ params_copy = await self._transform_parameters(self.params)
+ if image_base64:
+ payload = {
+ "model": self.model_name,
+ "messages": [
+ {
+ "role": "user",
+ "content": [
+ {"type": "text", "text": prompt},
+ {
+ "type": "image_url",
+ "image_url": {"url": f"data:image/{image_format.lower()};base64,{image_base64}"},
+ },
+ ],
+ }
+ ],
+ "max_tokens": global_config.max_response_length,
+ **params_copy,
+ }
+ else:
+ payload = {
+ "model": self.model_name,
+ "messages": [{"role": "user", "content": prompt}],
+ "max_tokens": global_config.max_response_length,
+ **params_copy,
+ }
+ # 如果 payload 中依然存在 max_tokens 且需要转换,在这里进行再次检查
+ if self.model_name.lower() in self.MODELS_NEEDING_TRANSFORMATION and "max_tokens" in payload:
+ payload["max_completion_tokens"] = payload.pop("max_tokens")
+ return payload
+
+ def _default_response_handler(
+ self, result: dict, user_id: str = "system", request_type: str = None, endpoint: str = "/chat/completions"
+ ) -> Tuple:
+ """默认响应解析"""
+ if "choices" in result and result["choices"]:
+ message = result["choices"][0]["message"]
+ content = message.get("content", "")
+ content, reasoning = self._extract_reasoning(content)
+ reasoning_content = message.get("model_extra", {}).get("reasoning_content", "")
+ if not reasoning_content:
+ reasoning_content = message.get("reasoning_content", "")
+ if not reasoning_content:
+ reasoning_content = reasoning
+
+ # 提取工具调用信息
+ tool_calls = message.get("tool_calls", None)
+
+ # 记录token使用情况
+ usage = result.get("usage", {})
+ if usage:
+ prompt_tokens = usage.get("prompt_tokens", 0)
+ completion_tokens = usage.get("completion_tokens", 0)
+ total_tokens = usage.get("total_tokens", 0)
+ self._record_usage(
+ prompt_tokens=prompt_tokens,
+ completion_tokens=completion_tokens,
+ total_tokens=total_tokens,
+ user_id=user_id,
+ request_type=request_type if request_type is not None else self.request_type,
+ endpoint=endpoint,
+ )
+
+ # 只有当tool_calls存在且不为空时才返回
+ if tool_calls:
+ return content, reasoning_content, tool_calls
+ else:
+ return content, reasoning_content
+
+ return "没有返回结果", ""
+
+ @staticmethod
+ def _extract_reasoning(content: str) -> Tuple[str, str]:
+ """CoT思维链提取"""
+ match = re.search(r"(?:)?(.*?)", content, re.DOTALL)
+ content = re.sub(r"(?:)?.*?", "", content, flags=re.DOTALL, count=1).strip()
+ if match:
+ reasoning = match.group(1).strip()
+ else:
+ reasoning = ""
+ return content, reasoning
+
+ async def _build_headers(self, no_key: bool = False) -> dict:
+ """构建请求头"""
+ if no_key:
+ return {"Authorization": "Bearer **********", "Content-Type": "application/json"}
+ else:
+ return {"Authorization": f"Bearer {self.api_key}", "Content-Type": "application/json"}
+ # 防止小朋友们截图自己的key
+
+ async def generate_response(self, prompt: str) -> Tuple:
+ """根据输入的提示生成模型的异步响应"""
+
+ response = await self._execute_request(endpoint="/chat/completions", prompt=prompt)
+ # 根据返回值的长度决定怎么处理
+ if len(response) == 3:
+ content, reasoning_content, tool_calls = response
+ return content, reasoning_content, self.model_name, tool_calls
+ else:
+ content, reasoning_content = response
+ return content, reasoning_content, self.model_name
+
+ async def generate_response_for_image(self, prompt: str, image_base64: str, image_format: str) -> Tuple:
+ """根据输入的提示和图片生成模型的异步响应"""
+
+ response = await self._execute_request(
+ endpoint="/chat/completions", prompt=prompt, image_base64=image_base64, image_format=image_format
+ )
+ # 根据返回值的长度决定怎么处理
+ if len(response) == 3:
+ content, reasoning_content, tool_calls = response
+ return content, reasoning_content, tool_calls
+ else:
+ content, reasoning_content = response
+ return content, reasoning_content
+
+ async def generate_response_async(self, prompt: str, **kwargs) -> Union[str, Tuple]:
+ """异步方式根据输入的提示生成模型的响应"""
+ # 构建请求体
+ data = {
+ "model": self.model_name,
+ "messages": [{"role": "user", "content": prompt}],
+ "max_tokens": global_config.max_response_length,
+ **self.params,
+ **kwargs,
+ }
+
+ response = await self._execute_request(endpoint="/chat/completions", payload=data, prompt=prompt)
+ # 原样返回响应,不做处理
+ return response
+
+ async def get_embedding(self, text: str) -> Union[list, None]:
+ """异步方法:获取文本的embedding向量
+
+ Args:
+ text: 需要获取embedding的文本
+
+ Returns:
+ list: embedding向量,如果失败则返回None
+ """
+
+ if len(text) < 1:
+ logger.debug("该消息没有长度,不再发送获取embedding向量的请求")
+ return None
+
+ def embedding_handler(result):
+ """处理响应"""
+ if "data" in result and len(result["data"]) > 0:
+ # 提取 token 使用信息
+ usage = result.get("usage", {})
+ if usage:
+ prompt_tokens = usage.get("prompt_tokens", 0)
+ completion_tokens = usage.get("completion_tokens", 0)
+ total_tokens = usage.get("total_tokens", 0)
+ # 记录 token 使用情况
+ self._record_usage(
+ prompt_tokens=prompt_tokens,
+ completion_tokens=completion_tokens,
+ total_tokens=total_tokens,
+ user_id="system", # 可以根据需要修改 user_id
+ # request_type="embedding", # 请求类型为 embedding
+ request_type=self.request_type, # 请求类型为 text
+ endpoint="/embeddings", # API 端点
+ )
+ return result["data"][0].get("embedding", None)
+ return result["data"][0].get("embedding", None)
+ return None
+
+ embedding = await self._execute_request(
+ endpoint="/embeddings",
+ prompt=text,
+ payload={"model": self.model_name, "input": text, "encoding_format": "float"},
+ retry_policy={"max_retries": 2, "base_wait": 6},
+ response_handler=embedding_handler,
+ )
+ return embedding
+
+
+def compress_base64_image_by_scale(base64_data: str, target_size: int = 0.8 * 1024 * 1024) -> str:
+ """压缩base64格式的图片到指定大小
+ Args:
+ base64_data: base64编码的图片数据
+ target_size: 目标文件大小(字节),默认0.8MB
+ Returns:
+ str: 压缩后的base64图片数据
+ """
+ try:
+ # 将base64转换为字节数据
+ image_data = base64.b64decode(base64_data)
+
+ # 如果已经小于目标大小,直接返回原图
+ if len(image_data) <= 2 * 1024 * 1024:
+ return base64_data
+
+ # 将字节数据转换为图片对象
+ img = Image.open(io.BytesIO(image_data))
+
+ # 获取原始尺寸
+ original_width, original_height = img.size
+
+ # 计算缩放比例
+ scale = min(1.0, (target_size / len(image_data)) ** 0.5)
+
+ # 计算新的尺寸
+ new_width = int(original_width * scale)
+ new_height = int(original_height * scale)
+
+ # 创建内存缓冲区
+ output_buffer = io.BytesIO()
+
+ # 如果是GIF,处理所有帧
+ if getattr(img, "is_animated", False):
+ frames = []
+ for frame_idx in range(img.n_frames):
+ img.seek(frame_idx)
+ new_frame = img.copy()
+ new_frame = new_frame.resize((new_width // 2, new_height // 2), Image.Resampling.LANCZOS) # 动图折上折
+ frames.append(new_frame)
+
+ # 保存到缓冲区
+ frames[0].save(
+ output_buffer,
+ format="GIF",
+ save_all=True,
+ append_images=frames[1:],
+ optimize=True,
+ duration=img.info.get("duration", 100),
+ loop=img.info.get("loop", 0),
+ )
+ else:
+ # 处理静态图片
+ resized_img = img.resize((new_width, new_height), Image.Resampling.LANCZOS)
+
+ # 保存到缓冲区,保持原始格式
+ if img.format == "PNG" and img.mode in ("RGBA", "LA"):
+ resized_img.save(output_buffer, format="PNG", optimize=True)
+ else:
+ resized_img.save(output_buffer, format="JPEG", quality=95, optimize=True)
+
+ # 获取压缩后的数据并转换为base64
+ compressed_data = output_buffer.getvalue()
+ logger.success(f"压缩图片: {original_width}x{original_height} -> {new_width}x{new_height}")
+ logger.info(f"压缩前大小: {len(image_data) / 1024:.1f}KB, 压缩后大小: {len(compressed_data) / 1024:.1f}KB")
+
+ return base64.b64encode(compressed_data).decode("utf-8")
+
+ except Exception as e:
+ logger.error(f"压缩图片失败: {str(e)}")
+ import traceback
+
+ logger.error(traceback.format_exc())
+ return base64_data
From e6d770383662bd507dac7d030e792d1cda1d951b Mon Sep 17 00:00:00 2001
From: "github-actions[bot]"
Date: Sat, 19 Apr 2025 14:42:27 +0000
Subject: [PATCH 02/26] =?UTF-8?q?=F0=9F=A4=96=20=E8=87=AA=E5=8A=A8?=
=?UTF-8?q?=E6=A0=BC=E5=BC=8F=E5=8C=96=E4=BB=A3=E7=A0=81=20[skip=20ci]?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
src/plugins/models/utils_model_new.py | 8 +++++---
1 file changed, 5 insertions(+), 3 deletions(-)
diff --git a/src/plugins/models/utils_model_new.py b/src/plugins/models/utils_model_new.py
index 8535476c..1faebc3e 100644
--- a/src/plugins/models/utils_model_new.py
+++ b/src/plugins/models/utils_model_new.py
@@ -610,7 +610,7 @@ class LLMRequest:
"stream_mode": stream_mode,
"image_base64": image_base64, # 保留必要的exception处理所需的原始数据
"image_format": image_format,
- "prompt": prompt
+ "prompt": prompt,
}
async def _execute_request(
@@ -862,7 +862,9 @@ class LLMRequest:
if keep_request:
image_base64 = request_content["image_base64"]
compressed_image_base64 = compress_base64_image_by_scale(image_base64)
- new_payload = await self._build_payload(request_content["prompt"], compressed_image_base64, request_content["image_format"])
+ new_payload = await self._build_payload(
+ request_content["prompt"], compressed_image_base64, request_content["image_format"]
+ )
return new_payload, 0
else:
return None, 0
@@ -910,7 +912,7 @@ class LLMRequest:
)
except Exception as parse_err:
logger.warning(f"模型 {self.model_name} 无法解析响应错误内容: {str(parse_err)}")
-
+
await asyncio.sleep(wait_time)
return None, 0
else:
From 46a5b01a1355c7ecfd819dab19b2c30b12e37d12 Mon Sep 17 00:00:00 2001
From: UnCLAS-Prommer
Date: Sat, 19 Apr 2025 22:48:30 +0800
Subject: [PATCH 03/26] =?UTF-8?q?=E6=8B=86=E5=88=86=5Fexecute=5Frequest?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
src/plugins/models/utils_model.py | 455 ++++++++-
src/plugins/models/utils_model_new.py | 1233 -------------------------
2 files changed, 452 insertions(+), 1236 deletions(-)
delete mode 100644 src/plugins/models/utils_model_new.py
diff --git a/src/plugins/models/utils_model.py b/src/plugins/models/utils_model.py
index 7930a035..1faebc3e 100644
--- a/src/plugins/models/utils_model.py
+++ b/src/plugins/models/utils_model.py
@@ -2,9 +2,11 @@ import asyncio
import json
import re
from datetime import datetime
-from typing import Tuple, Union
+from typing import Tuple, Union, Dict, Any
import aiohttp
+from aiohttp.client import ClientResponse
+
from src.common.logger import get_module_logger
import base64
from PIL import Image
@@ -16,6 +18,53 @@ from ...config.config import global_config
logger = get_module_logger("model_utils")
+class PayLoadTooLargeError(Exception):
+ """自定义异常类,用于处理请求体过大错误"""
+
+ def __init__(self, message: str):
+ super().__init__(message)
+ self.message = message
+
+ def __str__(self):
+ return "请求体过大,请尝试压缩图片或减少输入内容。"
+
+
+class RequestAbortException(Exception):
+ """自定义异常类,用于处理请求中断异常"""
+
+ def __init__(self, message: str, response: ClientResponse):
+ super().__init__(message)
+ self.message = message
+ self.response = response
+
+ def __str__(self):
+ return self.message
+
+
+class PermissionDeniedException(Exception):
+ """自定义异常类,用于处理访问拒绝的异常"""
+
+ def __init__(self, message: str):
+ super().__init__(message)
+ self.message = message
+
+ def __str__(self):
+ return self.message
+
+
+# 常见Error Code Mapping
+error_code_mapping = {
+ 400: "参数不正确",
+ 401: "API key 错误,认证失败,请检查/config/bot_config.toml和.env中的配置是否正确哦~",
+ 402: "账号余额不足",
+ 403: "需要实名,或余额不足",
+ 404: "Not Found",
+ 429: "请求过于频繁,请稍后再试",
+ 500: "服务器内部故障",
+ 503: "服务器负载过高",
+}
+
+
class LLMRequest:
# 定义需要转换的模型列表,作为类变量避免重复
MODELS_NEEDING_TRANSFORMATION = [
@@ -28,7 +77,7 @@ class LLMRequest:
"o1-mini-2024-09-12",
]
- def __init__(self, model, **kwargs):
+ def __init__(self, model: dict, **kwargs):
# 将大写的配置键转换为小写并从config中获取实际值
try:
self.api_key = os.environ[model["key"]]
@@ -37,7 +86,7 @@ class LLMRequest:
logger.error(f"原始 model dict 信息:{model}")
logger.error(f"配置错误:找不到对应的配置项 - {str(e)}")
raise ValueError(f"配置错误:找不到对应的配置项 - {str(e)}") from e
- self.model_name = model["name"]
+ self.model_name: str = model["name"]
self.params = kwargs
self.stream = model.get("stream", False)
@@ -123,6 +172,7 @@ class LLMRequest:
output_cost = (completion_tokens / 1000000) * self.pri_out
return round(input_cost + output_cost, 6)
+ '''
async def _execute_request(
self,
endpoint: str,
@@ -509,6 +559,405 @@ class LLMRequest:
logger.error(f"模型 {self.model_name} 达到最大重试次数,请求仍然失败")
raise RuntimeError(f"模型 {self.model_name} 达到最大重试次数,API请求仍然失败")
+ '''
+
+ async def _prepare_request(
+ self,
+ endpoint: str,
+ prompt: str = None,
+ image_base64: str = None,
+ image_format: str = None,
+ payload: dict = None,
+ retry_policy: dict = None,
+ ) -> Dict[str, Any]:
+ """配置请求参数
+ Args:
+ endpoint: API端点路径 (如 "chat/completions")
+ prompt: prompt文本
+ image_base64: 图片的base64编码
+ image_format: 图片格式
+ payload: 请求体数据
+ retry_policy: 自定义重试策略
+ request_type: 请求类型
+ """
+
+ # 合并重试策略
+ default_retry = {
+ "max_retries": 3,
+ "base_wait": 10,
+ "retry_codes": [429, 413, 500, 503],
+ "abort_codes": [400, 401, 402, 403],
+ }
+ policy = {**default_retry, **(retry_policy or {})}
+
+ api_url = f"{self.base_url.rstrip('/')}/{endpoint.lstrip('/')}"
+
+ stream_mode = self.stream
+
+ # 构建请求体
+ if image_base64:
+ payload = await self._build_payload(prompt, image_base64, image_format)
+ elif payload is None:
+ payload = await self._build_payload(prompt)
+
+ if stream_mode:
+ payload["stream"] = stream_mode
+
+ return {
+ "policy": policy,
+ "payload": payload,
+ "api_url": api_url,
+ "stream_mode": stream_mode,
+ "image_base64": image_base64, # 保留必要的exception处理所需的原始数据
+ "image_format": image_format,
+ "prompt": prompt,
+ }
+
+ async def _execute_request(
+ self,
+ endpoint: str,
+ prompt: str = None,
+ image_base64: str = None,
+ image_format: str = None,
+ payload: dict = None,
+ retry_policy: dict = None,
+ response_handler: callable = None,
+ user_id: str = "system",
+ request_type: str = None,
+ ):
+ """统一请求执行入口
+ Args:
+ endpoint: API端点路径 (如 "chat/completions")
+ prompt: prompt文本
+ image_base64: 图片的base64编码
+ image_format: 图片格式
+ payload: 请求体数据
+ retry_policy: 自定义重试策略
+ response_handler: 自定义响应处理器
+ user_id: 用户ID
+ request_type: 请求类型
+ """
+ # 获取请求配置
+ request_content = await self._prepare_request(
+ endpoint, prompt, image_base64, image_format, payload, retry_policy
+ )
+ if request_type is None:
+ request_type = self.request_type
+ for retry in range(request_content["policy"]["max_retries"]):
+ try:
+ # 使用上下文管理器处理会话
+ headers = await self._build_headers()
+ # 似乎是openai流式必须要的东西,不过阿里云的qwq-plus加了这个没有影响
+ if request_content["stream_mode"]:
+ headers["Accept"] = "text/event-stream"
+ async with aiohttp.ClientSession() as session:
+ async with session.post(
+ request_content["api_url"], headers=headers, json=request_content["payload"]
+ ) as response:
+ handled_result = await self._handle_response(
+ response, request_content, retry, response_handler, user_id, request_type, endpoint
+ )
+ return handled_result
+ except Exception as e:
+ handled_payload, count_delta = await self._handle_exception(e, retry, request_content)
+ retry += count_delta # 降级不计入重试次数
+ if handled_payload:
+ # 如果降级成功,重新构建请求体
+ request_content["payload"] = handled_payload
+ continue
+
+ logger.error(f"模型 {self.model_name} 达到最大重试次数,请求仍然失败")
+ raise RuntimeError(f"模型 {self.model_name} 达到最大重试次数,API请求仍然失败")
+
+ async def _handle_response(
+ self,
+ response: ClientResponse,
+ request_content: Dict[str, Any],
+ retry_count: int,
+ response_handler: callable,
+ user_id,
+ request_type,
+ endpoint,
+ ) -> Union[Dict[str, Any], None]:
+ policy = request_content["policy"]
+ stream_mode = request_content["stream_mode"]
+ if response.status in policy["retry_codes"] or response.status in policy["abort_codes"]:
+ await self._handle_error_response(response, retry_count, policy)
+ return
+
+ response.raise_for_status()
+ result = {}
+ if stream_mode:
+ # 将流式输出转化为非流式输出
+ result = await self._handle_stream_output(response)
+ else:
+ result = await response.json()
+ return (
+ response_handler(result)
+ if response_handler
+ else self._default_response_handler(result, user_id, request_type, endpoint)
+ )
+
+ async def _handle_stream_output(self, response: ClientResponse) -> Dict[str, Any]:
+ flag_delta_content_finished = False
+ accumulated_content = ""
+ usage = None # 初始化usage变量,避免未定义错误
+ reasoning_content = ""
+ content = ""
+ async for line_bytes in response.content:
+ try:
+ line = line_bytes.decode("utf-8").strip()
+ if not line:
+ continue
+ if line.startswith("data:"):
+ data_str = line[5:].strip()
+ if data_str == "[DONE]":
+ break
+ try:
+ chunk = json.loads(data_str)
+ if flag_delta_content_finished:
+ chunk_usage = chunk.get("usage", None)
+ if chunk_usage:
+ usage = chunk_usage # 获取token用量
+ else:
+ delta = chunk["choices"][0]["delta"]
+ delta_content = delta.get("content")
+ if delta_content is None:
+ delta_content = ""
+ accumulated_content += delta_content
+ # 检测流式输出文本是否结束
+ finish_reason = chunk["choices"][0].get("finish_reason")
+ if delta.get("reasoning_content", None):
+ reasoning_content += delta["reasoning_content"]
+ if finish_reason == "stop":
+ chunk_usage = chunk.get("usage", None)
+ if chunk_usage:
+ usage = chunk_usage
+ break
+ # 部分平台在文本输出结束前不会返回token用量,此时需要再获取一次chunk
+ flag_delta_content_finished = True
+ except Exception as e:
+ logger.exception(f"模型 {self.model_name} 解析流式输出错误: {str(e)}")
+ except Exception as e:
+ if isinstance(e, GeneratorExit):
+ log_content = f"模型 {self.model_name} 流式输出被中断,正在清理资源..."
+ else:
+ log_content = f"模型 {self.model_name} 处理流式输出时发生错误: {str(e)}"
+ logger.warning(log_content)
+ # 确保资源被正确清理
+ try:
+ await response.release()
+ except Exception as cleanup_error:
+ logger.error(f"清理资源时发生错误: {cleanup_error}")
+ # 返回已经累积的内容
+ content = accumulated_content
+ if not content:
+ content = accumulated_content
+ think_match = re.search(r"(.*?)", content, re.DOTALL)
+ if think_match:
+ reasoning_content = think_match.group(1).strip()
+ content = re.sub(r".*?", "", content, flags=re.DOTALL).strip()
+ result = {
+ "choices": [
+ {
+ "message": {
+ "content": content,
+ "reasoning_content": reasoning_content,
+ # 流式输出可能没有工具调用,此处不需要添加tool_calls字段
+ }
+ }
+ ],
+ "usage": usage,
+ }
+ return result
+
+ async def _handle_error_response(
+ self, response: ClientResponse, retry_count: int, policy: Dict[str, Any]
+ ) -> Union[Dict[str, any]]:
+ if response.status in policy["retry_codes"]:
+ wait_time = policy["base_wait"] * (2**retry_count)
+ logger.warning(f"模型 {self.model_name} 错误码: {response.status}, 等待 {wait_time}秒后重试")
+ if response.status == 413:
+ logger.warning("请求体过大,尝试压缩...")
+ raise PayLoadTooLargeError("请求体过大")
+ elif response.status in [500, 503]:
+ logger.error(
+ f"模型 {self.model_name} 错误码: {response.status} - {error_code_mapping.get(response.status)}"
+ )
+ raise RuntimeError("服务器负载过高,模型恢复失败QAQ")
+ else:
+ logger.warning(f"模型 {self.model_name} 请求限制(429),等待{wait_time}秒后重试...")
+ raise RuntimeError("请求限制(429)")
+ elif response.status in policy["abort_codes"]:
+ if response.status != 403:
+ raise RequestAbortException("请求出现错误,中断处理", response)
+ else:
+ raise PermissionDeniedException("模型禁止访问")
+
+ async def _handle_exception(
+ self, exception, retry_count: int, request_content: Dict[str, Any]
+ ) -> Union[Tuple[Dict[str, Any], int], Tuple[None, int]]:
+ policy = request_content["policy"]
+ payload = request_content["payload"]
+ keep_request = False
+ if retry_count < policy["max_retries"] - 1:
+ wait_time = policy["base_wait"] * (2**retry_count)
+ keep_request = True
+ if isinstance(exception, RequestAbortException):
+ response = exception.response
+ logger.error(
+ f"模型 {self.model_name} 错误码: {response.status} - {error_code_mapping.get(response.status)}"
+ )
+ # 尝试获取并记录服务器返回的详细错误信息
+ try:
+ error_json = await response.json()
+ if error_json and isinstance(error_json, list) and len(error_json) > 0:
+ # 处理多个错误的情况
+ for error_item in error_json:
+ if "error" in error_item and isinstance(error_item["error"], dict):
+ error_obj: dict = error_item["error"]
+ error_code = error_obj.get("code")
+ error_message = error_obj.get("message")
+ error_status = error_obj.get("status")
+ logger.error(
+ f"服务器错误详情: 代码={error_code}, 状态={error_status}, 消息={error_message}"
+ )
+ elif isinstance(error_json, dict) and "error" in error_json:
+ # 处理单个错误对象的情况
+ error_obj = error_json.get("error", {})
+ error_code = error_obj.get("code")
+ error_message = error_obj.get("message")
+ error_status = error_obj.get("status")
+ logger.error(f"服务器错误详情: 代码={error_code}, 状态={error_status}, 消息={error_message}")
+ else:
+ # 记录原始错误响应内容
+ logger.error(f"服务器错误响应: {error_json}")
+ except Exception as e:
+ logger.warning(f"无法解析服务器错误响应: {str(e)}")
+ raise RuntimeError(f"请求被拒绝: {error_code_mapping.get(response.status)}")
+
+ elif isinstance(exception, PermissionDeniedException):
+ # 只针对硅基流动的V3和R1进行降级处理
+ if self.model_name.startswith("Pro/deepseek-ai") and self.base_url == "https://api.siliconflow.cn/v1/":
+ old_model_name = self.model_name
+ self.model_name = self.model_name[4:] # 移除"Pro/"前缀
+ logger.warning(f"检测到403错误,模型从 {old_model_name} 降级为 {self.model_name}")
+
+ # 对全局配置进行更新
+ if global_config.llm_normal.get("name") == old_model_name:
+ global_config.llm_normal["name"] = self.model_name
+ logger.warning(f"将全局配置中的 llm_normal 模型临时降级至{self.model_name}")
+ if global_config.llm_reasoning.get("name") == old_model_name:
+ global_config.llm_reasoning["name"] = self.model_name
+ logger.warning(f"将全局配置中的 llm_reasoning 模型临时降级至{self.model_name}")
+
+ if payload and "model" in payload:
+ payload["model"] = self.model_name
+
+ await asyncio.sleep(wait_time)
+ return payload, -1
+ raise RuntimeError(f"请求被拒绝: {error_code_mapping.get(403)}")
+
+ elif isinstance(exception, PayLoadTooLargeError):
+ if keep_request:
+ image_base64 = request_content["image_base64"]
+ compressed_image_base64 = compress_base64_image_by_scale(image_base64)
+ new_payload = await self._build_payload(
+ request_content["prompt"], compressed_image_base64, request_content["image_format"]
+ )
+ return new_payload, 0
+ else:
+ return None, 0
+
+ elif isinstance(exception, aiohttp.ClientError) or isinstance(exception, asyncio.TimeoutError):
+ if keep_request:
+ logger.error(f"模型 {self.model_name} 网络错误,等待{wait_time}秒后重试... 错误: {str(exception)}")
+ await asyncio.sleep(wait_time)
+ return None, 0
+ else:
+ logger.critical(f"模型 {self.model_name} 网络错误达到最大重试次数: {str(exception)}")
+ raise RuntimeError(f"网络请求失败: {str(exception)}")
+
+ elif isinstance(exception, aiohttp.ClientResponseError):
+ # 处理aiohttp抛出的,除了policy中的status的响应错误
+ if keep_request:
+ logger.error(
+ f"模型 {self.model_name} HTTP响应错误,等待{wait_time}秒后重试... 状态码: {exception.status}, 错误: {exception.message}"
+ )
+ try:
+ error_text = await exception.response.text()
+ error_json = json.loads(error_text)
+ if isinstance(error_json, list) and len(error_json) > 0:
+ # 处理多个错误的情况
+ for error_item in error_json:
+ if "error" in error_item and isinstance(error_item["error"], dict):
+ error_obj = error_item["error"]
+ logger.error(
+ f"模型 {self.model_name} 服务器错误详情: 代码={error_obj.get('code')}, "
+ f"状态={error_obj.get('status')}, "
+ f"消息={error_obj.get('message')}"
+ )
+ elif isinstance(error_json, dict) and "error" in error_json:
+ error_obj = error_json.get("error", {})
+ logger.error(
+ f"模型 {self.model_name} 服务器错误详情: 代码={error_obj.get('code')}, "
+ f"状态={error_obj.get('status')}, "
+ f"消息={error_obj.get('message')}"
+ )
+ else:
+ logger.error(f"模型 {self.model_name} 服务器错误响应: {error_json}")
+ except (json.JSONDecodeError, TypeError) as json_err:
+ logger.warning(
+ f"模型 {self.model_name} 响应不是有效的JSON: {str(json_err)}, 原始内容: {error_text[:200]}"
+ )
+ except Exception as parse_err:
+ logger.warning(f"模型 {self.model_name} 无法解析响应错误内容: {str(parse_err)}")
+
+ await asyncio.sleep(wait_time)
+ return None, 0
+ else:
+ logger.critical(
+ f"模型 {self.model_name} HTTP响应错误达到最大重试次数: 状态码: {exception.status}, 错误: {exception.message}"
+ )
+ # 安全地检查和记录请求详情
+ handled_payload = await self._safely_record(request_content, payload)
+ logger.critical(f"请求头: {await self._build_headers(no_key=True)} 请求体: {handled_payload}")
+ raise RuntimeError(
+ f"模型 {self.model_name} API请求失败: 状态码 {exception.status}, {exception.message}"
+ )
+
+ else:
+ if keep_request:
+ logger.error(f"模型 {self.model_name} 请求失败,等待{wait_time}秒后重试... 错误: {str(exception)}")
+ await asyncio.sleep(wait_time)
+ return None, 0
+ else:
+ logger.critical(f"模型 {self.model_name} 请求失败: {str(exception)}")
+ # 安全地检查和记录请求详情
+ handled_payload = await self._safely_record(request_content, payload)
+ logger.critical(f"请求头: {await self._build_headers(no_key=True)} 请求体: {handled_payload}")
+ raise RuntimeError(f"模型 {self.model_name} API请求失败: {str(exception)}")
+
+ async def _safely_record(self, request_content: Dict[str, Any], payload: Dict[str, Any]):
+ image_base64: str = request_content.get("image_base64")
+ image_format: str = request_content.get("image_format")
+ if (
+ image_base64
+ and payload
+ and isinstance(payload, dict)
+ and "messages" in payload
+ and len(payload["messages"]) > 0
+ ):
+ if isinstance(payload["messages"][0], dict) and "content" in payload["messages"][0]:
+ content = payload["messages"][0]["content"]
+ if isinstance(content, list) and len(content) > 1 and "image_url" in content[1]:
+ payload["messages"][0]["content"][1]["image_url"]["url"] = (
+ f"data:image/{image_format.lower() if image_format else 'jpeg'};base64,"
+ f"{image_base64[:10]}...{image_base64[-10:]}"
+ )
+ # if isinstance(content, str) and len(content) > 100:
+ # payload["messages"][0]["content"] = content[:100]
+ return payload
async def _transform_parameters(self, params: dict) -> dict:
"""
diff --git a/src/plugins/models/utils_model_new.py b/src/plugins/models/utils_model_new.py
deleted file mode 100644
index 1faebc3e..00000000
--- a/src/plugins/models/utils_model_new.py
+++ /dev/null
@@ -1,1233 +0,0 @@
-import asyncio
-import json
-import re
-from datetime import datetime
-from typing import Tuple, Union, Dict, Any
-
-import aiohttp
-from aiohttp.client import ClientResponse
-
-from src.common.logger import get_module_logger
-import base64
-from PIL import Image
-import io
-import os
-from ...common.database import db
-from ...config.config import global_config
-
-logger = get_module_logger("model_utils")
-
-
-class PayLoadTooLargeError(Exception):
- """自定义异常类,用于处理请求体过大错误"""
-
- def __init__(self, message: str):
- super().__init__(message)
- self.message = message
-
- def __str__(self):
- return "请求体过大,请尝试压缩图片或减少输入内容。"
-
-
-class RequestAbortException(Exception):
- """自定义异常类,用于处理请求中断异常"""
-
- def __init__(self, message: str, response: ClientResponse):
- super().__init__(message)
- self.message = message
- self.response = response
-
- def __str__(self):
- return self.message
-
-
-class PermissionDeniedException(Exception):
- """自定义异常类,用于处理访问拒绝的异常"""
-
- def __init__(self, message: str):
- super().__init__(message)
- self.message = message
-
- def __str__(self):
- return self.message
-
-
-# 常见Error Code Mapping
-error_code_mapping = {
- 400: "参数不正确",
- 401: "API key 错误,认证失败,请检查/config/bot_config.toml和.env中的配置是否正确哦~",
- 402: "账号余额不足",
- 403: "需要实名,或余额不足",
- 404: "Not Found",
- 429: "请求过于频繁,请稍后再试",
- 500: "服务器内部故障",
- 503: "服务器负载过高",
-}
-
-
-class LLMRequest:
- # 定义需要转换的模型列表,作为类变量避免重复
- MODELS_NEEDING_TRANSFORMATION = [
- "o3-mini",
- "o1-mini",
- "o1-preview",
- "o1-2024-12-17",
- "o1-preview-2024-09-12",
- "o3-mini-2025-01-31",
- "o1-mini-2024-09-12",
- ]
-
- def __init__(self, model: dict, **kwargs):
- # 将大写的配置键转换为小写并从config中获取实际值
- try:
- self.api_key = os.environ[model["key"]]
- self.base_url = os.environ[model["base_url"]]
- except AttributeError as e:
- logger.error(f"原始 model dict 信息:{model}")
- logger.error(f"配置错误:找不到对应的配置项 - {str(e)}")
- raise ValueError(f"配置错误:找不到对应的配置项 - {str(e)}") from e
- self.model_name: str = model["name"]
- self.params = kwargs
-
- self.stream = model.get("stream", False)
- self.pri_in = model.get("pri_in", 0)
- self.pri_out = model.get("pri_out", 0)
-
- # 获取数据库实例
- self._init_database()
-
- # 从 kwargs 中提取 request_type,如果没有提供则默认为 "default"
- self.request_type = kwargs.pop("request_type", "default")
-
- @staticmethod
- def _init_database():
- """初始化数据库集合"""
- try:
- # 创建llm_usage集合的索引
- db.llm_usage.create_index([("timestamp", 1)])
- db.llm_usage.create_index([("model_name", 1)])
- db.llm_usage.create_index([("user_id", 1)])
- db.llm_usage.create_index([("request_type", 1)])
- except Exception as e:
- logger.error(f"创建数据库索引失败: {str(e)}")
-
- def _record_usage(
- self,
- prompt_tokens: int,
- completion_tokens: int,
- total_tokens: int,
- user_id: str = "system",
- request_type: str = None,
- endpoint: str = "/chat/completions",
- ):
- """记录模型使用情况到数据库
- Args:
- prompt_tokens: 输入token数
- completion_tokens: 输出token数
- total_tokens: 总token数
- user_id: 用户ID,默认为system
- request_type: 请求类型(chat/embedding/image/topic/schedule)
- endpoint: API端点
- """
- # 如果 request_type 为 None,则使用实例变量中的值
- if request_type is None:
- request_type = self.request_type
-
- try:
- usage_data = {
- "model_name": self.model_name,
- "user_id": user_id,
- "request_type": request_type,
- "endpoint": endpoint,
- "prompt_tokens": prompt_tokens,
- "completion_tokens": completion_tokens,
- "total_tokens": total_tokens,
- "cost": self._calculate_cost(prompt_tokens, completion_tokens),
- "status": "success",
- "timestamp": datetime.now(),
- }
- db.llm_usage.insert_one(usage_data)
- logger.trace(
- f"Token使用情况 - 模型: {self.model_name}, "
- f"用户: {user_id}, 类型: {request_type}, "
- f"提示词: {prompt_tokens}, 完成: {completion_tokens}, "
- f"总计: {total_tokens}"
- )
- except Exception as e:
- logger.error(f"记录token使用情况失败: {str(e)}")
-
- def _calculate_cost(self, prompt_tokens: int, completion_tokens: int) -> float:
- """计算API调用成本
- 使用模型的pri_in和pri_out价格计算输入和输出的成本
-
- Args:
- prompt_tokens: 输入token数量
- completion_tokens: 输出token数量
-
- Returns:
- float: 总成本(元)
- """
- # 使用模型的pri_in和pri_out计算成本
- input_cost = (prompt_tokens / 1000000) * self.pri_in
- output_cost = (completion_tokens / 1000000) * self.pri_out
- return round(input_cost + output_cost, 6)
-
- '''
- async def _execute_request(
- self,
- endpoint: str,
- prompt: str = None,
- image_base64: str = None,
- image_format: str = None,
- payload: dict = None,
- retry_policy: dict = None,
- response_handler: callable = None,
- user_id: str = "system",
- request_type: str = None,
- ):
- """统一请求执行入口
- Args:
- endpoint: API端点路径 (如 "chat/completions")
- prompt: prompt文本
- image_base64: 图片的base64编码
- image_format: 图片格式
- payload: 请求体数据
- retry_policy: 自定义重试策略
- response_handler: 自定义响应处理器
- user_id: 用户ID
- request_type: 请求类型
- """
-
- if request_type is None:
- request_type = self.request_type
-
- # 合并重试策略
- default_retry = {
- "max_retries": 3,
- "base_wait": 10,
- "retry_codes": [429, 413, 500, 503],
- "abort_codes": [400, 401, 402, 403],
- }
- policy = {**default_retry, **(retry_policy or {})}
-
- # 常见Error Code Mapping
- error_code_mapping = {
- 400: "参数不正确",
- 401: "API key 错误,认证失败,请检查/config/bot_config.toml和.env中的配置是否正确哦~",
- 402: "账号余额不足",
- 403: "需要实名,或余额不足",
- 404: "Not Found",
- 429: "请求过于频繁,请稍后再试",
- 500: "服务器内部故障",
- 503: "服务器负载过高",
- }
-
- api_url = f"{self.base_url.rstrip('/')}/{endpoint.lstrip('/')}"
- # 判断是否为流式
- stream_mode = self.stream
- # logger_msg = "进入流式输出模式," if stream_mode else ""
- # logger.debug(f"{logger_msg}发送请求到URL: {api_url}")
- # logger.info(f"使用模型: {self.model_name}")
-
- # 构建请求体
- if image_base64:
- payload = await self._build_payload(prompt, image_base64, image_format)
- elif payload is None:
- payload = await self._build_payload(prompt)
-
- # 流式输出标志
- # 先构建payload,再添加流式输出标志
- if stream_mode:
- payload["stream"] = stream_mode
-
- for retry in range(policy["max_retries"]):
- try:
- # 使用上下文管理器处理会话
- headers = await self._build_headers()
- # 似乎是openai流式必须要的东西,不过阿里云的qwq-plus加了这个没有影响
- if stream_mode:
- headers["Accept"] = "text/event-stream"
-
- async with aiohttp.ClientSession() as session:
- try:
- async with session.post(api_url, headers=headers, json=payload) as response:
- # 处理需要重试的状态码
- if response.status in policy["retry_codes"]:
- wait_time = policy["base_wait"] * (2**retry)
- logger.warning(
- f"模型 {self.model_name} 错误码: {response.status}, 等待 {wait_time}秒后重试"
- )
- if response.status == 413:
- logger.warning("请求体过大,尝试压缩...")
- image_base64 = compress_base64_image_by_scale(image_base64)
- payload = await self._build_payload(prompt, image_base64, image_format)
- elif response.status in [500, 503]:
- logger.error(
- f"模型 {self.model_name} 错误码: {response.status} - {error_code_mapping.get(response.status)}"
- )
- raise RuntimeError("服务器负载过高,模型恢复失败QAQ")
- else:
- logger.warning(f"模型 {self.model_name} 请求限制(429),等待{wait_time}秒后重试...")
-
- await asyncio.sleep(wait_time)
- continue
- elif response.status in policy["abort_codes"]:
- logger.error(
- f"模型 {self.model_name} 错误码: {response.status} - {error_code_mapping.get(response.status)}"
- )
- # 尝试获取并记录服务器返回的详细错误信息
- try:
- error_json = await response.json()
- if error_json and isinstance(error_json, list) and len(error_json) > 0:
- for error_item in error_json:
- if "error" in error_item and isinstance(error_item["error"], dict):
- error_obj = error_item["error"]
- error_code = error_obj.get("code")
- error_message = error_obj.get("message")
- error_status = error_obj.get("status")
- logger.error(
- f"服务器错误详情: 代码={error_code}, 状态={error_status}, "
- f"消息={error_message}"
- )
- elif isinstance(error_json, dict) and "error" in error_json:
- # 处理单个错误对象的情况
- error_obj = error_json.get("error", {})
- error_code = error_obj.get("code")
- error_message = error_obj.get("message")
- error_status = error_obj.get("status")
- logger.error(
- f"服务器错误详情: 代码={error_code}, 状态={error_status}, 消息={error_message}"
- )
- else:
- # 记录原始错误响应内容
- logger.error(f"服务器错误响应: {error_json}")
- except Exception as e:
- logger.warning(f"无法解析服务器错误响应: {str(e)}")
-
- if response.status == 403:
- # 只针对硅基流动的V3和R1进行降级处理
- if (
- self.model_name.startswith("Pro/deepseek-ai")
- and self.base_url == "https://api.siliconflow.cn/v1/"
- ):
- old_model_name = self.model_name
- self.model_name = self.model_name[4:] # 移除"Pro/"前缀
- logger.warning(
- f"检测到403错误,模型从 {old_model_name} 降级为 {self.model_name}"
- )
-
- # 对全局配置进行更新
- if global_config.llm_normal.get("name") == old_model_name:
- global_config.llm_normal["name"] = self.model_name
- logger.warning(f"将全局配置中的 llm_normal 模型临时降级至{self.model_name}")
-
- if global_config.llm_reasoning.get("name") == old_model_name:
- global_config.llm_reasoning["name"] = self.model_name
- logger.warning(
- f"将全局配置中的 llm_reasoning 模型临时降级至{self.model_name}"
- )
-
- # 更新payload中的模型名
- if payload and "model" in payload:
- payload["model"] = self.model_name
-
- # 重新尝试请求
- retry -= 1 # 不计入重试次数
- continue
-
- raise RuntimeError(f"请求被拒绝: {error_code_mapping.get(response.status)}")
-
- response.raise_for_status()
- reasoning_content = ""
-
- # 将流式输出转化为非流式输出
- if stream_mode:
- flag_delta_content_finished = False
- accumulated_content = ""
- usage = None # 初始化usage变量,避免未定义错误
-
- async for line_bytes in response.content:
- try:
- line = line_bytes.decode("utf-8").strip()
- if not line:
- continue
- if line.startswith("data:"):
- data_str = line[5:].strip()
- if data_str == "[DONE]":
- break
- try:
- chunk = json.loads(data_str)
- if flag_delta_content_finished:
- chunk_usage = chunk.get("usage", None)
- if chunk_usage:
- usage = chunk_usage # 获取token用量
- else:
- delta = chunk["choices"][0]["delta"]
- delta_content = delta.get("content")
- if delta_content is None:
- delta_content = ""
- accumulated_content += delta_content
- # 检测流式输出文本是否结束
- finish_reason = chunk["choices"][0].get("finish_reason")
- if delta.get("reasoning_content", None):
- reasoning_content += delta["reasoning_content"]
- if finish_reason == "stop":
- chunk_usage = chunk.get("usage", None)
- if chunk_usage:
- usage = chunk_usage
- break
- # 部分平台在文本输出结束前不会返回token用量,此时需要再获取一次chunk
- flag_delta_content_finished = True
-
- except Exception as e:
- logger.exception(f"模型 {self.model_name} 解析流式输出错误: {str(e)}")
- except GeneratorExit:
- logger.warning("模型 {self.model_name} 流式输出被中断,正在清理资源...")
- # 确保资源被正确清理
- await response.release()
- # 返回已经累积的内容
- result = {
- "choices": [
- {
- "message": {
- "content": accumulated_content,
- "reasoning_content": reasoning_content,
- # 流式输出可能没有工具调用,此处不需要添加tool_calls字段
- }
- }
- ],
- "usage": usage,
- }
- return (
- response_handler(result)
- if response_handler
- else self._default_response_handler(result, user_id, request_type, endpoint)
- )
- except Exception as e:
- logger.error(f"模型 {self.model_name} 处理流式输出时发生错误: {str(e)}")
- # 确保在发生错误时也能正确清理资源
- try:
- await response.release()
- except Exception as cleanup_error:
- logger.error(f"清理资源时发生错误: {cleanup_error}")
- # 返回已经累积的内容
- result = {
- "choices": [
- {
- "message": {
- "content": accumulated_content,
- "reasoning_content": reasoning_content,
- # 流式输出可能没有工具调用,此处不需要添加tool_calls字段
- }
- }
- ],
- "usage": usage,
- }
- return (
- response_handler(result)
- if response_handler
- else self._default_response_handler(result, user_id, request_type, endpoint)
- )
- content = accumulated_content
- think_match = re.search(r"(.*?)", content, re.DOTALL)
- if think_match:
- reasoning_content = think_match.group(1).strip()
- content = re.sub(r".*?", "", content, flags=re.DOTALL).strip()
- # 构造一个伪result以便调用自定义响应处理器或默认处理器
- result = {
- "choices": [
- {
- "message": {
- "content": content,
- "reasoning_content": reasoning_content,
- # 流式输出可能没有工具调用,此处不需要添加tool_calls字段
- }
- }
- ],
- "usage": usage,
- }
- return (
- response_handler(result)
- if response_handler
- else self._default_response_handler(result, user_id, request_type, endpoint)
- )
- else:
- result = await response.json()
- # 使用自定义处理器或默认处理
- return (
- response_handler(result)
- if response_handler
- else self._default_response_handler(result, user_id, request_type, endpoint)
- )
-
- except (aiohttp.ClientError, asyncio.TimeoutError) as e:
- if retry < policy["max_retries"] - 1:
- wait_time = policy["base_wait"] * (2**retry)
- logger.error(f"模型 {self.model_name} 网络错误,等待{wait_time}秒后重试... 错误: {str(e)}")
- await asyncio.sleep(wait_time)
- continue
- else:
- logger.critical(f"模型 {self.model_name} 网络错误达到最大重试次数: {str(e)}")
- raise RuntimeError(f"网络请求失败: {str(e)}") from e
- except Exception as e:
- logger.critical(f"模型 {self.model_name} 未预期的错误: {str(e)}")
- raise RuntimeError(f"请求过程中发生错误: {str(e)}") from e
-
- except aiohttp.ClientResponseError as e:
- # 处理aiohttp抛出的响应错误
- if retry < policy["max_retries"] - 1:
- wait_time = policy["base_wait"] * (2**retry)
- logger.error(
- f"模型 {self.model_name} HTTP响应错误,等待{wait_time}秒后重试... 状态码: {e.status}, 错误: {e.message}"
- )
- try:
- if hasattr(e, "response") and e.response and hasattr(e.response, "text"):
- error_text = await e.response.text()
- try:
- error_json = json.loads(error_text)
- if isinstance(error_json, list) and len(error_json) > 0:
- for error_item in error_json:
- if "error" in error_item and isinstance(error_item["error"], dict):
- error_obj = error_item["error"]
- logger.error(
- f"模型 {self.model_name} 服务器错误详情: 代码={error_obj.get('code')}, "
- f"状态={error_obj.get('status')}, "
- f"消息={error_obj.get('message')}"
- )
- elif isinstance(error_json, dict) and "error" in error_json:
- error_obj = error_json.get("error", {})
- logger.error(
- f"模型 {self.model_name} 服务器错误详情: 代码={error_obj.get('code')}, "
- f"状态={error_obj.get('status')}, "
- f"消息={error_obj.get('message')}"
- )
- else:
- logger.error(f"模型 {self.model_name} 服务器错误响应: {error_json}")
- except (json.JSONDecodeError, TypeError) as json_err:
- logger.warning(
- f"模型 {self.model_name} 响应不是有效的JSON: {str(json_err)}, 原始内容: {error_text[:200]}"
- )
- except (AttributeError, TypeError, ValueError) as parse_err:
- logger.warning(f"模型 {self.model_name} 无法解析响应错误内容: {str(parse_err)}")
-
- await asyncio.sleep(wait_time)
- else:
- logger.critical(
- f"模型 {self.model_name} HTTP响应错误达到最大重试次数: 状态码: {e.status}, 错误: {e.message}"
- )
- # 安全地检查和记录请求详情
- if (
- image_base64
- and payload
- and isinstance(payload, dict)
- and "messages" in payload
- and len(payload["messages"]) > 0
- ):
- if isinstance(payload["messages"][0], dict) and "content" in payload["messages"][0]:
- content = payload["messages"][0]["content"]
- if isinstance(content, list) and len(content) > 1 and "image_url" in content[1]:
- payload["messages"][0]["content"][1]["image_url"]["url"] = (
- f"data:image/{image_format.lower() if image_format else 'jpeg'};base64,"
- f"{image_base64[:10]}...{image_base64[-10:]}"
- )
- logger.critical(f"请求头: {await self._build_headers(no_key=True)} 请求体: {payload}")
- raise RuntimeError(f"模型 {self.model_name} API请求失败: 状态码 {e.status}, {e.message}") from e
- except Exception as e:
- if retry < policy["max_retries"] - 1:
- wait_time = policy["base_wait"] * (2**retry)
- logger.error(f"模型 {self.model_name} 请求失败,等待{wait_time}秒后重试... 错误: {str(e)}")
- await asyncio.sleep(wait_time)
- else:
- logger.critical(f"模型 {self.model_name} 请求失败: {str(e)}")
- # 安全地检查和记录请求详情
- if (
- image_base64
- and payload
- and isinstance(payload, dict)
- and "messages" in payload
- and len(payload["messages"]) > 0
- ):
- if isinstance(payload["messages"][0], dict) and "content" in payload["messages"][0]:
- content = payload["messages"][0]["content"]
- if isinstance(content, list) and len(content) > 1 and "image_url" in content[1]:
- payload["messages"][0]["content"][1]["image_url"]["url"] = (
- f"data:image/{image_format.lower() if image_format else 'jpeg'};base64,"
- f"{image_base64[:10]}...{image_base64[-10:]}"
- )
- logger.critical(f"请求头: {await self._build_headers(no_key=True)} 请求体: {payload}")
- raise RuntimeError(f"模型 {self.model_name} API请求失败: {str(e)}") from e
-
- logger.error(f"模型 {self.model_name} 达到最大重试次数,请求仍然失败")
- raise RuntimeError(f"模型 {self.model_name} 达到最大重试次数,API请求仍然失败")
- '''
-
- async def _prepare_request(
- self,
- endpoint: str,
- prompt: str = None,
- image_base64: str = None,
- image_format: str = None,
- payload: dict = None,
- retry_policy: dict = None,
- ) -> Dict[str, Any]:
- """配置请求参数
- Args:
- endpoint: API端点路径 (如 "chat/completions")
- prompt: prompt文本
- image_base64: 图片的base64编码
- image_format: 图片格式
- payload: 请求体数据
- retry_policy: 自定义重试策略
- request_type: 请求类型
- """
-
- # 合并重试策略
- default_retry = {
- "max_retries": 3,
- "base_wait": 10,
- "retry_codes": [429, 413, 500, 503],
- "abort_codes": [400, 401, 402, 403],
- }
- policy = {**default_retry, **(retry_policy or {})}
-
- api_url = f"{self.base_url.rstrip('/')}/{endpoint.lstrip('/')}"
-
- stream_mode = self.stream
-
- # 构建请求体
- if image_base64:
- payload = await self._build_payload(prompt, image_base64, image_format)
- elif payload is None:
- payload = await self._build_payload(prompt)
-
- if stream_mode:
- payload["stream"] = stream_mode
-
- return {
- "policy": policy,
- "payload": payload,
- "api_url": api_url,
- "stream_mode": stream_mode,
- "image_base64": image_base64, # 保留必要的exception处理所需的原始数据
- "image_format": image_format,
- "prompt": prompt,
- }
-
- async def _execute_request(
- self,
- endpoint: str,
- prompt: str = None,
- image_base64: str = None,
- image_format: str = None,
- payload: dict = None,
- retry_policy: dict = None,
- response_handler: callable = None,
- user_id: str = "system",
- request_type: str = None,
- ):
- """统一请求执行入口
- Args:
- endpoint: API端点路径 (如 "chat/completions")
- prompt: prompt文本
- image_base64: 图片的base64编码
- image_format: 图片格式
- payload: 请求体数据
- retry_policy: 自定义重试策略
- response_handler: 自定义响应处理器
- user_id: 用户ID
- request_type: 请求类型
- """
- # 获取请求配置
- request_content = await self._prepare_request(
- endpoint, prompt, image_base64, image_format, payload, retry_policy
- )
- if request_type is None:
- request_type = self.request_type
- for retry in range(request_content["policy"]["max_retries"]):
- try:
- # 使用上下文管理器处理会话
- headers = await self._build_headers()
- # 似乎是openai流式必须要的东西,不过阿里云的qwq-plus加了这个没有影响
- if request_content["stream_mode"]:
- headers["Accept"] = "text/event-stream"
- async with aiohttp.ClientSession() as session:
- async with session.post(
- request_content["api_url"], headers=headers, json=request_content["payload"]
- ) as response:
- handled_result = await self._handle_response(
- response, request_content, retry, response_handler, user_id, request_type, endpoint
- )
- return handled_result
- except Exception as e:
- handled_payload, count_delta = await self._handle_exception(e, retry, request_content)
- retry += count_delta # 降级不计入重试次数
- if handled_payload:
- # 如果降级成功,重新构建请求体
- request_content["payload"] = handled_payload
- continue
-
- logger.error(f"模型 {self.model_name} 达到最大重试次数,请求仍然失败")
- raise RuntimeError(f"模型 {self.model_name} 达到最大重试次数,API请求仍然失败")
-
- async def _handle_response(
- self,
- response: ClientResponse,
- request_content: Dict[str, Any],
- retry_count: int,
- response_handler: callable,
- user_id,
- request_type,
- endpoint,
- ) -> Union[Dict[str, Any], None]:
- policy = request_content["policy"]
- stream_mode = request_content["stream_mode"]
- if response.status in policy["retry_codes"] or response.status in policy["abort_codes"]:
- await self._handle_error_response(response, retry_count, policy)
- return
-
- response.raise_for_status()
- result = {}
- if stream_mode:
- # 将流式输出转化为非流式输出
- result = await self._handle_stream_output(response)
- else:
- result = await response.json()
- return (
- response_handler(result)
- if response_handler
- else self._default_response_handler(result, user_id, request_type, endpoint)
- )
-
- async def _handle_stream_output(self, response: ClientResponse) -> Dict[str, Any]:
- flag_delta_content_finished = False
- accumulated_content = ""
- usage = None # 初始化usage变量,避免未定义错误
- reasoning_content = ""
- content = ""
- async for line_bytes in response.content:
- try:
- line = line_bytes.decode("utf-8").strip()
- if not line:
- continue
- if line.startswith("data:"):
- data_str = line[5:].strip()
- if data_str == "[DONE]":
- break
- try:
- chunk = json.loads(data_str)
- if flag_delta_content_finished:
- chunk_usage = chunk.get("usage", None)
- if chunk_usage:
- usage = chunk_usage # 获取token用量
- else:
- delta = chunk["choices"][0]["delta"]
- delta_content = delta.get("content")
- if delta_content is None:
- delta_content = ""
- accumulated_content += delta_content
- # 检测流式输出文本是否结束
- finish_reason = chunk["choices"][0].get("finish_reason")
- if delta.get("reasoning_content", None):
- reasoning_content += delta["reasoning_content"]
- if finish_reason == "stop":
- chunk_usage = chunk.get("usage", None)
- if chunk_usage:
- usage = chunk_usage
- break
- # 部分平台在文本输出结束前不会返回token用量,此时需要再获取一次chunk
- flag_delta_content_finished = True
- except Exception as e:
- logger.exception(f"模型 {self.model_name} 解析流式输出错误: {str(e)}")
- except Exception as e:
- if isinstance(e, GeneratorExit):
- log_content = f"模型 {self.model_name} 流式输出被中断,正在清理资源..."
- else:
- log_content = f"模型 {self.model_name} 处理流式输出时发生错误: {str(e)}"
- logger.warning(log_content)
- # 确保资源被正确清理
- try:
- await response.release()
- except Exception as cleanup_error:
- logger.error(f"清理资源时发生错误: {cleanup_error}")
- # 返回已经累积的内容
- content = accumulated_content
- if not content:
- content = accumulated_content
- think_match = re.search(r"(.*?)", content, re.DOTALL)
- if think_match:
- reasoning_content = think_match.group(1).strip()
- content = re.sub(r".*?", "", content, flags=re.DOTALL).strip()
- result = {
- "choices": [
- {
- "message": {
- "content": content,
- "reasoning_content": reasoning_content,
- # 流式输出可能没有工具调用,此处不需要添加tool_calls字段
- }
- }
- ],
- "usage": usage,
- }
- return result
-
- async def _handle_error_response(
- self, response: ClientResponse, retry_count: int, policy: Dict[str, Any]
- ) -> Union[Dict[str, any]]:
- if response.status in policy["retry_codes"]:
- wait_time = policy["base_wait"] * (2**retry_count)
- logger.warning(f"模型 {self.model_name} 错误码: {response.status}, 等待 {wait_time}秒后重试")
- if response.status == 413:
- logger.warning("请求体过大,尝试压缩...")
- raise PayLoadTooLargeError("请求体过大")
- elif response.status in [500, 503]:
- logger.error(
- f"模型 {self.model_name} 错误码: {response.status} - {error_code_mapping.get(response.status)}"
- )
- raise RuntimeError("服务器负载过高,模型恢复失败QAQ")
- else:
- logger.warning(f"模型 {self.model_name} 请求限制(429),等待{wait_time}秒后重试...")
- raise RuntimeError("请求限制(429)")
- elif response.status in policy["abort_codes"]:
- if response.status != 403:
- raise RequestAbortException("请求出现错误,中断处理", response)
- else:
- raise PermissionDeniedException("模型禁止访问")
-
- async def _handle_exception(
- self, exception, retry_count: int, request_content: Dict[str, Any]
- ) -> Union[Tuple[Dict[str, Any], int], Tuple[None, int]]:
- policy = request_content["policy"]
- payload = request_content["payload"]
- keep_request = False
- if retry_count < policy["max_retries"] - 1:
- wait_time = policy["base_wait"] * (2**retry_count)
- keep_request = True
- if isinstance(exception, RequestAbortException):
- response = exception.response
- logger.error(
- f"模型 {self.model_name} 错误码: {response.status} - {error_code_mapping.get(response.status)}"
- )
- # 尝试获取并记录服务器返回的详细错误信息
- try:
- error_json = await response.json()
- if error_json and isinstance(error_json, list) and len(error_json) > 0:
- # 处理多个错误的情况
- for error_item in error_json:
- if "error" in error_item and isinstance(error_item["error"], dict):
- error_obj: dict = error_item["error"]
- error_code = error_obj.get("code")
- error_message = error_obj.get("message")
- error_status = error_obj.get("status")
- logger.error(
- f"服务器错误详情: 代码={error_code}, 状态={error_status}, 消息={error_message}"
- )
- elif isinstance(error_json, dict) and "error" in error_json:
- # 处理单个错误对象的情况
- error_obj = error_json.get("error", {})
- error_code = error_obj.get("code")
- error_message = error_obj.get("message")
- error_status = error_obj.get("status")
- logger.error(f"服务器错误详情: 代码={error_code}, 状态={error_status}, 消息={error_message}")
- else:
- # 记录原始错误响应内容
- logger.error(f"服务器错误响应: {error_json}")
- except Exception as e:
- logger.warning(f"无法解析服务器错误响应: {str(e)}")
- raise RuntimeError(f"请求被拒绝: {error_code_mapping.get(response.status)}")
-
- elif isinstance(exception, PermissionDeniedException):
- # 只针对硅基流动的V3和R1进行降级处理
- if self.model_name.startswith("Pro/deepseek-ai") and self.base_url == "https://api.siliconflow.cn/v1/":
- old_model_name = self.model_name
- self.model_name = self.model_name[4:] # 移除"Pro/"前缀
- logger.warning(f"检测到403错误,模型从 {old_model_name} 降级为 {self.model_name}")
-
- # 对全局配置进行更新
- if global_config.llm_normal.get("name") == old_model_name:
- global_config.llm_normal["name"] = self.model_name
- logger.warning(f"将全局配置中的 llm_normal 模型临时降级至{self.model_name}")
- if global_config.llm_reasoning.get("name") == old_model_name:
- global_config.llm_reasoning["name"] = self.model_name
- logger.warning(f"将全局配置中的 llm_reasoning 模型临时降级至{self.model_name}")
-
- if payload and "model" in payload:
- payload["model"] = self.model_name
-
- await asyncio.sleep(wait_time)
- return payload, -1
- raise RuntimeError(f"请求被拒绝: {error_code_mapping.get(403)}")
-
- elif isinstance(exception, PayLoadTooLargeError):
- if keep_request:
- image_base64 = request_content["image_base64"]
- compressed_image_base64 = compress_base64_image_by_scale(image_base64)
- new_payload = await self._build_payload(
- request_content["prompt"], compressed_image_base64, request_content["image_format"]
- )
- return new_payload, 0
- else:
- return None, 0
-
- elif isinstance(exception, aiohttp.ClientError) or isinstance(exception, asyncio.TimeoutError):
- if keep_request:
- logger.error(f"模型 {self.model_name} 网络错误,等待{wait_time}秒后重试... 错误: {str(exception)}")
- await asyncio.sleep(wait_time)
- return None, 0
- else:
- logger.critical(f"模型 {self.model_name} 网络错误达到最大重试次数: {str(exception)}")
- raise RuntimeError(f"网络请求失败: {str(exception)}")
-
- elif isinstance(exception, aiohttp.ClientResponseError):
- # 处理aiohttp抛出的,除了policy中的status的响应错误
- if keep_request:
- logger.error(
- f"模型 {self.model_name} HTTP响应错误,等待{wait_time}秒后重试... 状态码: {exception.status}, 错误: {exception.message}"
- )
- try:
- error_text = await exception.response.text()
- error_json = json.loads(error_text)
- if isinstance(error_json, list) and len(error_json) > 0:
- # 处理多个错误的情况
- for error_item in error_json:
- if "error" in error_item and isinstance(error_item["error"], dict):
- error_obj = error_item["error"]
- logger.error(
- f"模型 {self.model_name} 服务器错误详情: 代码={error_obj.get('code')}, "
- f"状态={error_obj.get('status')}, "
- f"消息={error_obj.get('message')}"
- )
- elif isinstance(error_json, dict) and "error" in error_json:
- error_obj = error_json.get("error", {})
- logger.error(
- f"模型 {self.model_name} 服务器错误详情: 代码={error_obj.get('code')}, "
- f"状态={error_obj.get('status')}, "
- f"消息={error_obj.get('message')}"
- )
- else:
- logger.error(f"模型 {self.model_name} 服务器错误响应: {error_json}")
- except (json.JSONDecodeError, TypeError) as json_err:
- logger.warning(
- f"模型 {self.model_name} 响应不是有效的JSON: {str(json_err)}, 原始内容: {error_text[:200]}"
- )
- except Exception as parse_err:
- logger.warning(f"模型 {self.model_name} 无法解析响应错误内容: {str(parse_err)}")
-
- await asyncio.sleep(wait_time)
- return None, 0
- else:
- logger.critical(
- f"模型 {self.model_name} HTTP响应错误达到最大重试次数: 状态码: {exception.status}, 错误: {exception.message}"
- )
- # 安全地检查和记录请求详情
- handled_payload = await self._safely_record(request_content, payload)
- logger.critical(f"请求头: {await self._build_headers(no_key=True)} 请求体: {handled_payload}")
- raise RuntimeError(
- f"模型 {self.model_name} API请求失败: 状态码 {exception.status}, {exception.message}"
- )
-
- else:
- if keep_request:
- logger.error(f"模型 {self.model_name} 请求失败,等待{wait_time}秒后重试... 错误: {str(exception)}")
- await asyncio.sleep(wait_time)
- return None, 0
- else:
- logger.critical(f"模型 {self.model_name} 请求失败: {str(exception)}")
- # 安全地检查和记录请求详情
- handled_payload = await self._safely_record(request_content, payload)
- logger.critical(f"请求头: {await self._build_headers(no_key=True)} 请求体: {handled_payload}")
- raise RuntimeError(f"模型 {self.model_name} API请求失败: {str(exception)}")
-
- async def _safely_record(self, request_content: Dict[str, Any], payload: Dict[str, Any]):
- image_base64: str = request_content.get("image_base64")
- image_format: str = request_content.get("image_format")
- if (
- image_base64
- and payload
- and isinstance(payload, dict)
- and "messages" in payload
- and len(payload["messages"]) > 0
- ):
- if isinstance(payload["messages"][0], dict) and "content" in payload["messages"][0]:
- content = payload["messages"][0]["content"]
- if isinstance(content, list) and len(content) > 1 and "image_url" in content[1]:
- payload["messages"][0]["content"][1]["image_url"]["url"] = (
- f"data:image/{image_format.lower() if image_format else 'jpeg'};base64,"
- f"{image_base64[:10]}...{image_base64[-10:]}"
- )
- # if isinstance(content, str) and len(content) > 100:
- # payload["messages"][0]["content"] = content[:100]
- return payload
-
- async def _transform_parameters(self, params: dict) -> dict:
- """
- 根据模型名称转换参数:
- - 对于需要转换的OpenAI CoT系列模型(例如 "o3-mini"),删除 'temperature' 参数,
- 并将 'max_tokens' 重命名为 'max_completion_tokens'
- """
- # 复制一份参数,避免直接修改原始数据
- new_params = dict(params)
-
- if self.model_name.lower() in self.MODELS_NEEDING_TRANSFORMATION:
- # 删除 'temperature' 参数(如果存在)
- new_params.pop("temperature", None)
- # 如果存在 'max_tokens',则重命名为 'max_completion_tokens'
- if "max_tokens" in new_params:
- new_params["max_completion_tokens"] = new_params.pop("max_tokens")
- return new_params
-
- async def _build_payload(self, prompt: str, image_base64: str = None, image_format: str = None) -> dict:
- """构建请求体"""
- # 复制一份参数,避免直接修改 self.params
- params_copy = await self._transform_parameters(self.params)
- if image_base64:
- payload = {
- "model": self.model_name,
- "messages": [
- {
- "role": "user",
- "content": [
- {"type": "text", "text": prompt},
- {
- "type": "image_url",
- "image_url": {"url": f"data:image/{image_format.lower()};base64,{image_base64}"},
- },
- ],
- }
- ],
- "max_tokens": global_config.max_response_length,
- **params_copy,
- }
- else:
- payload = {
- "model": self.model_name,
- "messages": [{"role": "user", "content": prompt}],
- "max_tokens": global_config.max_response_length,
- **params_copy,
- }
- # 如果 payload 中依然存在 max_tokens 且需要转换,在这里进行再次检查
- if self.model_name.lower() in self.MODELS_NEEDING_TRANSFORMATION and "max_tokens" in payload:
- payload["max_completion_tokens"] = payload.pop("max_tokens")
- return payload
-
- def _default_response_handler(
- self, result: dict, user_id: str = "system", request_type: str = None, endpoint: str = "/chat/completions"
- ) -> Tuple:
- """默认响应解析"""
- if "choices" in result and result["choices"]:
- message = result["choices"][0]["message"]
- content = message.get("content", "")
- content, reasoning = self._extract_reasoning(content)
- reasoning_content = message.get("model_extra", {}).get("reasoning_content", "")
- if not reasoning_content:
- reasoning_content = message.get("reasoning_content", "")
- if not reasoning_content:
- reasoning_content = reasoning
-
- # 提取工具调用信息
- tool_calls = message.get("tool_calls", None)
-
- # 记录token使用情况
- usage = result.get("usage", {})
- if usage:
- prompt_tokens = usage.get("prompt_tokens", 0)
- completion_tokens = usage.get("completion_tokens", 0)
- total_tokens = usage.get("total_tokens", 0)
- self._record_usage(
- prompt_tokens=prompt_tokens,
- completion_tokens=completion_tokens,
- total_tokens=total_tokens,
- user_id=user_id,
- request_type=request_type if request_type is not None else self.request_type,
- endpoint=endpoint,
- )
-
- # 只有当tool_calls存在且不为空时才返回
- if tool_calls:
- return content, reasoning_content, tool_calls
- else:
- return content, reasoning_content
-
- return "没有返回结果", ""
-
- @staticmethod
- def _extract_reasoning(content: str) -> Tuple[str, str]:
- """CoT思维链提取"""
- match = re.search(r"(?:)?(.*?)", content, re.DOTALL)
- content = re.sub(r"(?:)?.*?", "", content, flags=re.DOTALL, count=1).strip()
- if match:
- reasoning = match.group(1).strip()
- else:
- reasoning = ""
- return content, reasoning
-
- async def _build_headers(self, no_key: bool = False) -> dict:
- """构建请求头"""
- if no_key:
- return {"Authorization": "Bearer **********", "Content-Type": "application/json"}
- else:
- return {"Authorization": f"Bearer {self.api_key}", "Content-Type": "application/json"}
- # 防止小朋友们截图自己的key
-
- async def generate_response(self, prompt: str) -> Tuple:
- """根据输入的提示生成模型的异步响应"""
-
- response = await self._execute_request(endpoint="/chat/completions", prompt=prompt)
- # 根据返回值的长度决定怎么处理
- if len(response) == 3:
- content, reasoning_content, tool_calls = response
- return content, reasoning_content, self.model_name, tool_calls
- else:
- content, reasoning_content = response
- return content, reasoning_content, self.model_name
-
- async def generate_response_for_image(self, prompt: str, image_base64: str, image_format: str) -> Tuple:
- """根据输入的提示和图片生成模型的异步响应"""
-
- response = await self._execute_request(
- endpoint="/chat/completions", prompt=prompt, image_base64=image_base64, image_format=image_format
- )
- # 根据返回值的长度决定怎么处理
- if len(response) == 3:
- content, reasoning_content, tool_calls = response
- return content, reasoning_content, tool_calls
- else:
- content, reasoning_content = response
- return content, reasoning_content
-
- async def generate_response_async(self, prompt: str, **kwargs) -> Union[str, Tuple]:
- """异步方式根据输入的提示生成模型的响应"""
- # 构建请求体
- data = {
- "model": self.model_name,
- "messages": [{"role": "user", "content": prompt}],
- "max_tokens": global_config.max_response_length,
- **self.params,
- **kwargs,
- }
-
- response = await self._execute_request(endpoint="/chat/completions", payload=data, prompt=prompt)
- # 原样返回响应,不做处理
- return response
-
- async def get_embedding(self, text: str) -> Union[list, None]:
- """异步方法:获取文本的embedding向量
-
- Args:
- text: 需要获取embedding的文本
-
- Returns:
- list: embedding向量,如果失败则返回None
- """
-
- if len(text) < 1:
- logger.debug("该消息没有长度,不再发送获取embedding向量的请求")
- return None
-
- def embedding_handler(result):
- """处理响应"""
- if "data" in result and len(result["data"]) > 0:
- # 提取 token 使用信息
- usage = result.get("usage", {})
- if usage:
- prompt_tokens = usage.get("prompt_tokens", 0)
- completion_tokens = usage.get("completion_tokens", 0)
- total_tokens = usage.get("total_tokens", 0)
- # 记录 token 使用情况
- self._record_usage(
- prompt_tokens=prompt_tokens,
- completion_tokens=completion_tokens,
- total_tokens=total_tokens,
- user_id="system", # 可以根据需要修改 user_id
- # request_type="embedding", # 请求类型为 embedding
- request_type=self.request_type, # 请求类型为 text
- endpoint="/embeddings", # API 端点
- )
- return result["data"][0].get("embedding", None)
- return result["data"][0].get("embedding", None)
- return None
-
- embedding = await self._execute_request(
- endpoint="/embeddings",
- prompt=text,
- payload={"model": self.model_name, "input": text, "encoding_format": "float"},
- retry_policy={"max_retries": 2, "base_wait": 6},
- response_handler=embedding_handler,
- )
- return embedding
-
-
-def compress_base64_image_by_scale(base64_data: str, target_size: int = 0.8 * 1024 * 1024) -> str:
- """压缩base64格式的图片到指定大小
- Args:
- base64_data: base64编码的图片数据
- target_size: 目标文件大小(字节),默认0.8MB
- Returns:
- str: 压缩后的base64图片数据
- """
- try:
- # 将base64转换为字节数据
- image_data = base64.b64decode(base64_data)
-
- # 如果已经小于目标大小,直接返回原图
- if len(image_data) <= 2 * 1024 * 1024:
- return base64_data
-
- # 将字节数据转换为图片对象
- img = Image.open(io.BytesIO(image_data))
-
- # 获取原始尺寸
- original_width, original_height = img.size
-
- # 计算缩放比例
- scale = min(1.0, (target_size / len(image_data)) ** 0.5)
-
- # 计算新的尺寸
- new_width = int(original_width * scale)
- new_height = int(original_height * scale)
-
- # 创建内存缓冲区
- output_buffer = io.BytesIO()
-
- # 如果是GIF,处理所有帧
- if getattr(img, "is_animated", False):
- frames = []
- for frame_idx in range(img.n_frames):
- img.seek(frame_idx)
- new_frame = img.copy()
- new_frame = new_frame.resize((new_width // 2, new_height // 2), Image.Resampling.LANCZOS) # 动图折上折
- frames.append(new_frame)
-
- # 保存到缓冲区
- frames[0].save(
- output_buffer,
- format="GIF",
- save_all=True,
- append_images=frames[1:],
- optimize=True,
- duration=img.info.get("duration", 100),
- loop=img.info.get("loop", 0),
- )
- else:
- # 处理静态图片
- resized_img = img.resize((new_width, new_height), Image.Resampling.LANCZOS)
-
- # 保存到缓冲区,保持原始格式
- if img.format == "PNG" and img.mode in ("RGBA", "LA"):
- resized_img.save(output_buffer, format="PNG", optimize=True)
- else:
- resized_img.save(output_buffer, format="JPEG", quality=95, optimize=True)
-
- # 获取压缩后的数据并转换为base64
- compressed_data = output_buffer.getvalue()
- logger.success(f"压缩图片: {original_width}x{original_height} -> {new_width}x{new_height}")
- logger.info(f"压缩前大小: {len(image_data) / 1024:.1f}KB, 压缩后大小: {len(compressed_data) / 1024:.1f}KB")
-
- return base64.b64encode(compressed_data).decode("utf-8")
-
- except Exception as e:
- logger.error(f"压缩图片失败: {str(e)}")
- import traceback
-
- logger.error(traceback.format_exc())
- return base64_data
From 58c66c5c9b248de653f4a2f1d6f77d39e3fb8b26 Mon Sep 17 00:00:00 2001
From: SengokuCola <1026294844@qq.com>
Date: Mon, 21 Apr 2025 10:49:13 +0800
Subject: [PATCH 04/26] =?UTF-8?q?better=EF=BC=9A=E4=BC=98=E5=8C=96?=
=?UTF-8?q?=E8=AE=B0=E5=BF=86=E6=8F=90=E5=8F=96=E5=8A=9F=E8=83=BD?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
src/heart_flow/heartflow.py | 4 ++--
src/heart_flow/observation.py | 2 +-
src/plugins/chat_module/heartFC_chat/messagesender.py | 3 +--
src/plugins/memory_system/Hippocampus.py | 5 ++---
template/bot_config_template.toml | 2 +-
5 files changed, 7 insertions(+), 9 deletions(-)
diff --git a/src/heart_flow/heartflow.py b/src/heart_flow/heartflow.py
index 793f406f..5d53a07d 100644
--- a/src/heart_flow/heartflow.py
+++ b/src/heart_flow/heartflow.py
@@ -96,7 +96,7 @@ class Heartflow:
continue
await self.do_a_thinking()
- await asyncio.sleep(global_config.heart_flow_update_interval) # 5分钟思考一次
+ await asyncio.sleep(global_config.heart_flow_update_interval * 3) # 5分钟思考一次
async def heartflow_start_working(self):
# 启动清理任务
@@ -110,7 +110,7 @@ class Heartflow:
print("TODO")
async def do_a_thinking(self):
- logger.debug("麦麦大脑袋转起来了")
+ # logger.debug("麦麦大脑袋转起来了")
self.current_state.update_current_state_info()
# 开始构建prompt
diff --git a/src/heart_flow/observation.py b/src/heart_flow/observation.py
index 9903b184..49efe7eb 100644
--- a/src/heart_flow/observation.py
+++ b/src/heart_flow/observation.py
@@ -139,7 +139,7 @@ class ChattingObservation(Observation):
# traceback.print_exc() # 记录详细堆栈
# print(f"处理后self.talking_message:{self.talking_message}")
- self.talking_message_str = await build_readable_messages(self.talking_message)
+ self.talking_message_str = await build_readable_messages(messages=self.talking_message, timestamp_mode="normal")
logger.trace(
f"Chat {self.chat_id} - 压缩早期记忆:{self.mid_memory_info}\n现在聊天内容:{self.talking_message_str}"
diff --git a/src/plugins/chat_module/heartFC_chat/messagesender.py b/src/plugins/chat_module/heartFC_chat/messagesender.py
index fb295bed..897bc45f 100644
--- a/src/plugins/chat_module/heartFC_chat/messagesender.py
+++ b/src/plugins/chat_module/heartFC_chat/messagesender.py
@@ -220,9 +220,8 @@ class MessageManager:
await asyncio.sleep(typing_time)
logger.debug(f"\n{message_earliest.processed_plain_text},{typing_time},等待输入时间结束\n")
- await self.storage.store_message(message_earliest, message_earliest.chat_stream)
-
await MessageSender().send_message(message_earliest)
+ await self.storage.store_message(message_earliest, message_earliest.chat_stream)
container.remove_message(message_earliest)
diff --git a/src/plugins/memory_system/Hippocampus.py b/src/plugins/memory_system/Hippocampus.py
index 557b42f2..4b40649d 100644
--- a/src/plugins/memory_system/Hippocampus.py
+++ b/src/plugins/memory_system/Hippocampus.py
@@ -1946,15 +1946,14 @@ class HippocampusManager:
valid_keywords: list[str],
max_memory_num: int = 3,
max_memory_length: int = 2,
- max_depth: int = 3,
- fast_retrieval: bool = False,
+ max_depth: int = 3
) -> list:
"""从文本中获取相关记忆的公共接口"""
if not self._initialized:
raise RuntimeError("HippocampusManager 尚未初始化,请先调用 initialize 方法")
try:
response = await self._hippocampus.get_memory_from_topic(
- valid_keywords, max_memory_num, max_memory_length, max_depth, fast_retrieval
+ valid_keywords, max_memory_num, max_memory_length, max_depth
)
except Exception as e:
logger.error(f"文本激活记忆失败: {e}")
diff --git a/template/bot_config_template.toml b/template/bot_config_template.toml
index f0a52e76..3ebf1459 100644
--- a/template/bot_config_template.toml
+++ b/template/bot_config_template.toml
@@ -1,5 +1,5 @@
[inner]
-version = "1.3.1"
+version = "1.4.0"
#以下是给开发人员阅读的,一般用户不需要阅读
From 7e0f41c0395df7b897a534cf08fb8df21eefee30 Mon Sep 17 00:00:00 2001
From: SengokuCola <1026294844@qq.com>
Date: Mon, 21 Apr 2025 12:34:00 +0800
Subject: [PATCH 05/26] =?UTF-8?q?fix=EF=BC=9A=E4=BF=AE=E6=94=B9config?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
src/config/config.py | 62 +++++++-----
src/heart_flow/heartflow.py | 99 ++++++++++++-------
src/heart_flow/sub_heartflow.py | 28 ++----
src/plugins/chat/bot.py | 63 ++++--------
.../chat_module/heartFC_chat/pf_chatting.py | 5 +-
template/bot_config_template.toml | 77 ++++++++-------
6 files changed, 179 insertions(+), 155 deletions(-)
diff --git a/src/config/config.py b/src/config/config.py
index 0dae0244..d2fe6f0f 100644
--- a/src/config/config.py
+++ b/src/config/config.py
@@ -186,12 +186,18 @@ class BotConfig:
ban_words = set()
ban_msgs_regex = set()
- # heartflow
- # enable_heartflow: bool = False # 是否启用心流
- sub_heart_flow_update_interval: int = 60 # 子心流更新频率,间隔 单位秒
- sub_heart_flow_freeze_time: int = 120 # 子心流冻结时间,超过这个时间没有回复,子心流会冻结,间隔 单位秒
+ # [heartflow] # 启用启用heart_flowC(心流聊天)模式时生效, 需要填写token消耗量巨大的相关模型
+ # 启用后麦麦会自主选择进入heart_flowC模式(持续一段时间), 进行长时间高质量的聊天
+ enable_heart_flowC: bool = True # 是否启用heart_flowC(心流聊天, HFC)模式
+ reply_trigger_threshold: float = 3.0 # 心流聊天触发阈值,越低越容易触发
+ probability_decay_factor_per_second: float = 0.2 # 概率衰减因子,越大衰减越快
+ default_decay_rate_per_second: float = 0.98 # 默认衰减率,越大衰减越慢
+ initial_duration: int = 60 # 初始持续时间,越大心流聊天持续的时间越长
+
+ # sub_heart_flow_update_interval: int = 60 # 子心流更新频率,间隔 单位秒
+ # sub_heart_flow_freeze_time: int = 120 # 子心流冻结时间,超过这个时间没有回复,子心流会冻结,间隔 单位秒
sub_heart_flow_stop_time: int = 600 # 子心流停止时间,超过这个时间没有回复,子心流会停止,间隔 单位秒
- heart_flow_update_interval: int = 300 # 心流更新频率,间隔 单位秒
+ # heart_flow_update_interval: int = 300 # 心流更新频率,间隔 单位秒
observation_context_size: int = 20 # 心流观察到的最长上下文大小,超过这个值的上下文会被压缩
compressed_length: int = 5 # 不能大于observation_context_size,心流上下文压缩的最短压缩长度,超过心流观察到的上下文长度,会压缩,最短压缩长度为5
compress_length_limit: int = 5 # 最多压缩份数,超过该数值的压缩上下文会被删除
@@ -207,8 +213,8 @@ class BotConfig:
# response
response_mode: str = "heart_flow" # 回复策略
- MODEL_R1_PROBABILITY: float = 0.8 # R1模型概率
- MODEL_V3_PROBABILITY: float = 0.1 # V3模型概率
+ model_reasoning_probability: float = 0.7 # 麦麦回答时选择推理模型(主要)模型概率
+ model_normal_probability: float = 0.3 # 麦麦回答时选择一般模型(次要)模型概率
# MODEL_R1_DISTILL_PROBABILITY: float = 0.1 # R1蒸馏模型概率
# emoji
@@ -401,29 +407,32 @@ class BotConfig:
def response(parent: dict):
response_config = parent["response"]
- config.MODEL_R1_PROBABILITY = response_config.get("model_r1_probability", config.MODEL_R1_PROBABILITY)
- config.MODEL_V3_PROBABILITY = response_config.get("model_v3_probability", config.MODEL_V3_PROBABILITY)
- # config.MODEL_R1_DISTILL_PROBABILITY = response_config.get(
- # "model_r1_distill_probability", config.MODEL_R1_DISTILL_PROBABILITY
- # )
- config.max_response_length = response_config.get("max_response_length", config.max_response_length)
- if config.INNER_VERSION in SpecifierSet(">=1.0.4"):
- config.response_mode = response_config.get("response_mode", config.response_mode)
+ config.model_reasoning_probability = response_config.get("model_reasoning_probability", config.model_reasoning_probability)
+ config.model_normal_probability = response_config.get("model_normal_probability", config.model_normal_probability)
+
+
+ # 添加 enable_heart_flowC 的加载逻辑 (假设它在 [response] 部分)
+ if config.INNER_VERSION in SpecifierSet(">=1.4.0"):
+ config.enable_heart_flowC = response_config.get("enable_heart_flowC", config.enable_heart_flowC)
def heartflow(parent: dict):
heartflow_config = parent["heartflow"]
- config.sub_heart_flow_update_interval = heartflow_config.get(
- "sub_heart_flow_update_interval", config.sub_heart_flow_update_interval
- )
- config.sub_heart_flow_freeze_time = heartflow_config.get(
- "sub_heart_flow_freeze_time", config.sub_heart_flow_freeze_time
- )
+ # 加载新增的 heartflowC 参数
+
+
+ # 加载原有的 heartflow 参数
+ # config.sub_heart_flow_update_interval = heartflow_config.get(
+ # "sub_heart_flow_update_interval", config.sub_heart_flow_update_interval
+ # )
+ # config.sub_heart_flow_freeze_time = heartflow_config.get(
+ # "sub_heart_flow_freeze_time", config.sub_heart_flow_freeze_time
+ # )
config.sub_heart_flow_stop_time = heartflow_config.get(
"sub_heart_flow_stop_time", config.sub_heart_flow_stop_time
)
- config.heart_flow_update_interval = heartflow_config.get(
- "heart_flow_update_interval", config.heart_flow_update_interval
- )
+ # config.heart_flow_update_interval = heartflow_config.get(
+ # "heart_flow_update_interval", config.heart_flow_update_interval
+ # )
if config.INNER_VERSION in SpecifierSet(">=1.3.0"):
config.observation_context_size = heartflow_config.get(
"observation_context_size", config.observation_context_size
@@ -432,6 +441,11 @@ class BotConfig:
config.compress_length_limit = heartflow_config.get(
"compress_length_limit", config.compress_length_limit
)
+ if config.INNER_VERSION in SpecifierSet(">=1.4.0"):
+ config.reply_trigger_threshold = heartflow_config.get("reply_trigger_threshold", config.reply_trigger_threshold)
+ config.probability_decay_factor_per_second = heartflow_config.get("probability_decay_factor_per_second", config.probability_decay_factor_per_second)
+ config.default_decay_rate_per_second = heartflow_config.get("default_decay_rate_per_second", config.default_decay_rate_per_second)
+ config.initial_duration = heartflow_config.get("initial_duration", config.initial_duration)
def willing(parent: dict):
willing_config = parent["willing"]
diff --git a/src/heart_flow/heartflow.py b/src/heart_flow/heartflow.py
index 5d53a07d..c2f922ff 100644
--- a/src/heart_flow/heartflow.py
+++ b/src/heart_flow/heartflow.py
@@ -1,5 +1,4 @@
-from .sub_heartflow import SubHeartflow
-from .observation import ChattingObservation
+from .sub_heartflow import SubHeartflow, ChattingObservation
from src.plugins.moods.moods import MoodManager
from src.plugins.models.utils_model import LLMRequest
from src.config.config import global_config
@@ -10,7 +9,8 @@ from src.common.logger import get_module_logger, LogConfig, HEARTFLOW_STYLE_CONF
from src.individuality.individuality import Individuality
import time
import random
-from typing import Dict, Any
+from typing import Dict, Any, Optional
+import traceback
heartflow_config = LogConfig(
# 使用海马体专用样式
@@ -70,20 +70,27 @@ class Heartflow:
"""定期清理不活跃的子心流"""
while True:
current_time = time.time()
- inactive_subheartflows = []
+ inactive_subheartflows_ids = [] # 修改变量名以清晰表示存储的是ID
# 检查所有子心流
- for subheartflow_id, subheartflow in self._subheartflows.items():
+ # 使用 list(self._subheartflows.items()) 避免在迭代时修改字典
+ for subheartflow_id, subheartflow in list(self._subheartflows.items()):
if (
current_time - subheartflow.last_active_time > global_config.sub_heart_flow_stop_time
): # 10分钟 = 600秒
- inactive_subheartflows.append(subheartflow_id)
- logger.info(f"发现不活跃的子心流: {subheartflow_id}")
+ logger.info(f"发现不活跃的子心流: {subheartflow_id}, 准备清理。")
+ # 1. 标记子心流让其后台任务停止
+ subheartflow.should_stop = True
+ # 2. 将ID添加到待清理列表
+ inactive_subheartflows_ids.append(subheartflow_id)
- # 清理不活跃的子心流
- for subheartflow_id in inactive_subheartflows:
- del self._subheartflows[subheartflow_id]
- logger.info(f"已清理不活跃的子心流: {subheartflow_id}")
+ # 清理不活跃的子心流 (从字典中移除)
+ for subheartflow_id in inactive_subheartflows_ids:
+ if subheartflow_id in self._subheartflows:
+ del self._subheartflows[subheartflow_id]
+ logger.info(f"已从主心流移除子心流: {subheartflow_id}")
+ else:
+ logger.warning(f"尝试移除子心流 {subheartflow_id} 时发现其已被移除。")
await asyncio.sleep(30) # 每分钟检查一次
@@ -95,8 +102,10 @@ class Heartflow:
await asyncio.sleep(30) # 每分钟检查一次是否有新的子心流
continue
- await self.do_a_thinking()
- await asyncio.sleep(global_config.heart_flow_update_interval * 3) # 5分钟思考一次
+ # await self.do_a_thinking()
+ # await asyncio.sleep(global_config.heart_flow_update_interval * 3) # 5分钟思考一次
+
+ await asyncio.sleep(300)
async def heartflow_start_working(self):
# 启动清理任务
@@ -216,33 +225,55 @@ class Heartflow:
return response
- async def create_subheartflow(self, subheartflow_id):
- """
- 创建一个新的SubHeartflow实例
- 添加一个SubHeartflow实例到self._subheartflows字典中
- 并根据subheartflow_id为子心流创建一个观察对象
+ async def create_subheartflow(self, subheartflow_id: Any) -> Optional[SubHeartflow]:
"""
+ 获取或创建一个新的SubHeartflow实例。
+ 如果实例已存在,则直接返回。
+ 如果不存在,则创建实例、观察对象、启动后台任务,并返回新实例。
+ 创建过程中发生任何错误将返回 None。
+
+ Args:
+ subheartflow_id: 用于标识子心流的ID (例如群聊ID)。
+
+ Returns:
+ 对应的 SubHeartflow 实例,如果创建失败则返回 None。
+ """
+ # 检查是否已存在
+ existing_subheartflow = self._subheartflows.get(subheartflow_id)
+ if existing_subheartflow:
+ logger.debug(f"返回已存在的 subheartflow: {subheartflow_id}")
+ return existing_subheartflow
+
+ # 如果不存在,则创建新的
+ logger.info(f"尝试创建新的 subheartflow: {subheartflow_id}")
try:
- if subheartflow_id not in self._subheartflows:
- subheartflow = SubHeartflow(subheartflow_id)
- # 创建一个观察对象,目前只可以用chat_id创建观察对象
- logger.debug(f"创建 observation: {subheartflow_id}")
- observation = ChattingObservation(subheartflow_id)
- await observation.initialize()
- subheartflow.add_observation(observation)
- logger.debug("添加 observation 成功")
- # 创建异步任务
- asyncio.create_task(subheartflow.subheartflow_start_working())
- logger.debug("创建异步任务 成功")
- self._subheartflows[subheartflow_id] = subheartflow
- logger.info("添加 subheartflow 成功")
- return self._subheartflows[subheartflow_id]
+ subheartflow = SubHeartflow(subheartflow_id)
+
+ # 创建并初始化观察对象
+ logger.debug(f"为 {subheartflow_id} 创建 observation")
+ observation = ChattingObservation(subheartflow_id)
+ await observation.initialize() # 等待初始化完成
+ subheartflow.add_observation(observation)
+ logger.debug(f"为 {subheartflow_id} 添加 observation 成功")
+
+ # 创建并存储后台任务
+ subheartflow.task = asyncio.create_task(subheartflow.subheartflow_start_working())
+ logger.debug(f"为 {subheartflow_id} 创建后台任务成功")
+
+ # 添加到管理字典
+ self._subheartflows[subheartflow_id] = subheartflow
+ logger.info(f"添加 subheartflow {subheartflow_id} 成功")
+ return subheartflow
+
except Exception as e:
- logger.error(f"创建 subheartflow 失败: {e}")
+ # 记录详细错误信息
+ logger.error(f"创建 subheartflow {subheartflow_id} 失败: {e}")
+ logger.error(traceback.format_exc()) # 记录完整的 traceback
+ # 考虑是否需要更具体的错误处理或资源清理逻辑
return None
- def get_subheartflow(self, observe_chat_id) -> SubHeartflow:
+ def get_subheartflow(self, observe_chat_id: Any) -> Optional[SubHeartflow]:
"""获取指定ID的SubHeartflow实例"""
return self._subheartflows.get(observe_chat_id)
diff --git a/src/heart_flow/sub_heartflow.py b/src/heart_flow/sub_heartflow.py
index 439b2a3f..c1a58dcd 100644
--- a/src/heart_flow/sub_heartflow.py
+++ b/src/heart_flow/sub_heartflow.py
@@ -4,7 +4,7 @@ from src.plugins.moods.moods import MoodManager
from src.plugins.models.utils_model import LLMRequest
from src.config.config import global_config
import time
-from typing import Optional
+from typing import Optional, List
from datetime import datetime
import traceback
from src.plugins.chat.utils import parse_text_timestamps
@@ -65,7 +65,7 @@ class SubHeartflow:
def __init__(self, subheartflow_id):
self.subheartflow_id = subheartflow_id
- self.current_mind = ""
+ self.current_mind = "你什么也没想"
self.past_mind = []
self.current_state: CurrentState = CurrentState()
self.llm_model = LLMRequest(
@@ -76,16 +76,14 @@ class SubHeartflow:
)
self.main_heartflow_info = ""
-
- self.last_reply_time = time.time()
+
self.last_active_time = time.time() # 添加最后激活时间
-
- if not self.current_mind:
- self.current_mind = "你什么也没想"
+ self.should_stop = False # 添加停止标志
+ self.task: Optional[asyncio.Task] = None # 添加 task 属性
self.is_active = False
- self.observations: list[ChattingObservation] = []
+ self.observations: List[ChattingObservation] = [] # 使用 List 类型提示
self.running_knowledges = []
@@ -93,20 +91,14 @@ class SubHeartflow:
async def subheartflow_start_working(self):
while True:
- current_time = time.time()
# --- 调整后台任务逻辑 --- #
# 这个后台循环现在主要负责检查是否需要自我销毁
# 不再主动进行思考或状态更新,这些由 HeartFC_Chat 驱动
- # 检查是否超过指定时间没有激活 (例如,没有被调用进行思考)
- if current_time - self.last_active_time > global_config.sub_heart_flow_stop_time: # 例如 5 分钟
- logger.info(
- f"子心流 {self.subheartflow_id} 超过 {global_config.sub_heart_flow_stop_time} 秒没有激活,正在销毁..."
- f" (Last active: {datetime.fromtimestamp(self.last_active_time).strftime('%Y-%m-%d %H:%M:%S')})"
- )
- # 在这里添加实际的销毁逻辑,例如从主 Heartflow 管理器中移除自身
- # heartflow.remove_subheartflow(self.subheartflow_id) # 假设有这样的方法
- break # 退出循环以停止任务
+ # 检查是否被主心流标记为停止
+ if self.should_stop:
+ logger.info(f"子心流 {self.subheartflow_id} 被标记为停止,正在退出后台任务...")
+ break # 退出循环以停止任务
await asyncio.sleep(global_config.sub_heart_flow_update_interval) # 定期检查销毁条件
diff --git a/src/plugins/chat/bot.py b/src/plugins/chat/bot.py
index 314d20ff..c3ba78b0 100644
--- a/src/plugins/chat/bot.py
+++ b/src/plugins/chat/bot.py
@@ -105,53 +105,26 @@ class ChatBot:
template_group_name = None
async def preprocess():
- if global_config.enable_pfc_chatting:
- try:
- if groupinfo is None:
- if global_config.enable_friend_chat:
- userinfo = message.message_info.user_info
- messageinfo = message.message_info
- # 创建聊天流
- chat = await chat_manager.get_or_create_stream(
- platform=messageinfo.platform,
- user_info=userinfo,
- group_info=groupinfo,
- )
- message.update_chat_stream(chat)
- await self.only_process_chat.process_message(message)
- await self._create_pfc_chat(message)
+ if groupinfo is None:
+ if global_config.enable_friend_chat:
+ if global_config.enable_pfc_chatting:
+ userinfo = message.message_info.user_info
+ messageinfo = message.message_info
+ # 创建聊天流
+ chat = await chat_manager.get_or_create_stream(
+ platform=messageinfo.platform,
+ user_info=userinfo,
+ group_info=groupinfo,
+ )
+ message.update_chat_stream(chat)
+ await self.only_process_chat.process_message(message)
+ await self._create_pfc_chat(message)
else:
- if groupinfo.group_id in global_config.talk_allowed_groups:
- # logger.debug(f"开始群聊模式{str(message_data)[:50]}...")
- if global_config.response_mode == "heart_flow":
- # logger.info(f"启动最新最好的思维流FC模式{str(message_data)[:50]}...")
- await self.heartFC_processor.process_message(message_data)
- elif global_config.response_mode == "reasoning":
- # logger.debug(f"开始推理模式{str(message_data)[:50]}...")
- await self.reasoning_chat.process_message(message_data)
- else:
- logger.error(f"未知的回复模式,请检查配置文件!!: {global_config.response_mode}")
- except Exception as e:
- logger.error(f"处理PFC消息失败: {e}")
+ await self.heartFC_processor.process_message(message_data)
else:
- if groupinfo is None:
- if global_config.enable_friend_chat:
- # 私聊处理流程
- # await self._handle_private_chat(message)
- if global_config.response_mode == "heart_flow":
- await self.heartFC_processor.process_message(message_data)
- elif global_config.response_mode == "reasoning":
- await self.reasoning_chat.process_message(message_data)
- else:
- logger.error(f"未知的回复模式,请检查配置文件!!: {global_config.response_mode}")
- else: # 群聊处理
- if groupinfo.group_id in global_config.talk_allowed_groups:
- if global_config.response_mode == "heart_flow":
- await self.heartFC_processor.process_message(message_data)
- elif global_config.response_mode == "reasoning":
- await self.reasoning_chat.process_message(message_data)
- else:
- logger.error(f"未知的回复模式,请检查配置文件!!: {global_config.response_mode}")
+ await self.heartFC_processor.process_message(message_data)
+
+
if template_group_name:
async with global_prompt_manager.async_message_scope(template_group_name):
diff --git a/src/plugins/chat_module/heartFC_chat/pf_chatting.py b/src/plugins/chat_module/heartFC_chat/pf_chatting.py
index 59472fd1..7e6acd53 100644
--- a/src/plugins/chat_module/heartFC_chat/pf_chatting.py
+++ b/src/plugins/chat_module/heartFC_chat/pf_chatting.py
@@ -15,6 +15,9 @@ from src.config.config import global_config
from src.plugins.chat.utils_image import image_path_to_base64 # Local import needed after move
from src.plugins.utils.timer_calculater import Timer # <--- Import Timer
+INITIAL_DURATION = 60.0
+
+
# 定义日志配置 (使用 loguru 格式)
interest_log_config = LogConfig(
console_format=PFC_STYLE_CONFIG["console_format"], # 使用默认控制台格式
@@ -91,7 +94,7 @@ class PFChatting:
self._loop_active: bool = False # Is the loop currently running?
self._loop_task: Optional[asyncio.Task] = None # Stores the main loop task
self._trigger_count_this_activation: int = 0 # Counts triggers within an active period
- self._initial_duration: float = 60.0 # 首次触发增加的时间
+ self._initial_duration: float = INITIAL_DURATION # 首次触发增加的时间
self._last_added_duration: float = self._initial_duration # <--- 新增:存储上次增加的时间
def _get_log_prefix(self) -> str:
diff --git a/template/bot_config_template.toml b/template/bot_config_template.toml
index 3ebf1459..e4e2a2a8 100644
--- a/template/bot_config_template.toml
+++ b/template/bot_config_template.toml
@@ -1,8 +1,7 @@
[inner]
version = "1.4.0"
-
-#以下是给开发人员阅读的,一般用户不需要阅读
+#----以下是给开发人员阅读的,如果你只是部署了麦麦,不需要阅读----
#如果你想要修改配置文件,请在修改后将version的值进行变更
#如果新增项目,请在BotConfig类下新增相应的变量
#1.如果你修改的是[]层级项目,例如你新增了 [memory],那么请在config.py的 load_config函数中的include_configs字典中新增"内容":{
@@ -19,11 +18,12 @@ version = "1.4.0"
# 次版本号:当你做了向下兼容的功能性新增,
# 修订号:当你做了向下兼容的问题修正。
# 先行版本号及版本编译信息可以加到“主版本号.次版本号.修订号”的后面,作为延伸。
+#----以上是给开发人员阅读的,如果你只是部署了麦麦,不需要阅读----
[bot]
-qq = 114514
+qq = 1145141919810
nickname = "麦麦"
-alias_names = ["麦叠", "牢麦"]
+alias_names = ["麦叠", "牢麦"] #该选项还在调试中,暂时未生效
[groups]
talk_allowed = [
@@ -41,23 +41,24 @@ personality_sides = [
"用一句话或几句话描述人格的一些细节",
"用一句话或几句话描述人格的一些细节",
"用一句话或几句话描述人格的一些细节",
-]# 条数任意
+]# 条数任意,不能为0, 该选项还在调试中,可能未完全生效
[identity] #アイデンティティがない 生まれないらららら
# 兴趣爱好 未完善,有些条目未使用
identity_detail = [
"身份特点",
"身份特点",
-]# 条数任意
+]# 条数任意,不能为0, 该选项还在调试中,可能未完全生效
#外貌特征
-height = 170 # 身高 单位厘米
-weight = 50 # 体重 单位千克
-age = 20 # 年龄 单位岁
-gender = "男" # 性别
-appearance = "用几句话描述外貌特征" # 外貌特征
+height = 170 # 身高 单位厘米 该选项还在调试中,暂时未生效
+weight = 50 # 体重 单位千克 该选项还在调试中,暂时未生效
+age = 20 # 年龄 单位岁 该选项还在调试中,暂时未生效
+gender = "男" # 性别 该选项还在调试中,暂时未生效
+appearance = "用几句话描述外貌特征" # 外貌特征 该选项还在调试中,暂时未生效
[schedule]
-enable_schedule_gen = true # 是否启用日程表(尚未完成)
+enable_schedule_gen = true # 是否启用日程表
+enable_schedule_interaction = true # 日程表是否影响回复模式
prompt_schedule_gen = "用几句话描述描述性格特点或行动规律,这个特征会用来生成日程表"
schedule_doing_update_interval = 900 # 日程表更新间隔 单位秒
schedule_temperature = 0.1 # 日程表温度,建议0.1-0.5
@@ -67,19 +68,25 @@ time_zone = "Asia/Shanghai" # 给你的机器人设置时区,可以解决运
nonebot-qq="http://127.0.0.1:18002/api/message"
[response] #群聊的回复策略
-#reasoning:推理模式,麦麦会根据上下文进行推理,并给出回复
-#heart_flow:结合了PFC模式和心流模式,麦麦会进行主动的观察和回复,并给出回复
-response_mode = "heart_flow" # 回复策略,可选值:heart_flow(心流),reasoning(推理)
+enable_heart_flowC = true
+# 该功能还在完善中
+# 是否启用heart_flowC(心流聊天,HFC)模式
+# 启用后麦麦会自主选择进入heart_flowC模式(持续一段时间),进行主动的观察和回复,并给出回复,比较消耗token
-#推理回复参数
-model_r1_probability = 0.7 # 麦麦回答时选择主要回复模型1 模型的概率
-model_v3_probability = 0.3 # 麦麦回答时选择次要回复模型2 模型的概率
+#一般回复参数
+model_reasoning_probability = 0.7 # 麦麦回答时选择推理模型 模型的概率
+model_normal_probability = 0.3 # 麦麦回答时选择一般模型 模型的概率
+
+[heartflow] #启用启用heart_flowC(心流聊天)模式时生效,需要填写以下参数
+reply_trigger_threshold = 3.0 # 心流聊天触发阈值,越低越容易进入心流聊天
+probability_decay_factor_per_second = 0.2 # 概率衰减因子,越大衰减越快,越高越容易退出心流聊天
+default_decay_rate_per_second = 0.98 # 默认衰减率,越大衰减越快,越高越难进入心流聊天
+initial_duration = 60 # 初始持续时间,越大心流聊天持续的时间越长
-[heartflow] # 注意:可能会消耗大量token,请谨慎开启,仅会使用v3模型
-sub_heart_flow_update_interval = 60 # 子心流更新频率,间隔 单位秒
-sub_heart_flow_freeze_time = 100 # 子心流冻结时间,超过这个时间没有回复,子心流会冻结,间隔 单位秒
sub_heart_flow_stop_time = 500 # 子心流停止时间,超过这个时间没有回复,子心流会停止,间隔 单位秒
-heart_flow_update_interval = 600 # 心流更新频率,间隔 单位秒
+# sub_heart_flow_update_interval = 60
+# sub_heart_flow_freeze_time = 100
+# heart_flow_update_interval = 600
observation_context_size = 20 # 心流观察到的最长上下文大小,超过这个值的上下文会被压缩
compressed_length = 5 # 不能大于observation_context_size,心流上下文压缩的最短压缩长度,超过心流观察到的上下文长度,会压缩,最短压缩长度为5
@@ -87,11 +94,13 @@ compress_length_limit = 5 #最多压缩份数,超过该数值的压缩上下
[message]
-max_context_size = 12 # 麦麦获得的上文数量,建议12,太短太长都会导致脑袋尖尖
-emoji_chance = 0.2 # 麦麦使用表情包的概率,设置为1让麦麦自己决定发不发
-thinking_timeout = 60 # 麦麦最长思考时间,超过这个时间的思考会放弃
-max_response_length = 256 # 麦麦回答的最大token数
+max_context_size = 12 # 麦麦回复时获得的上文数量,建议12,太短太长都会导致脑袋尖尖
+emoji_chance = 0.2 # 麦麦一般回复时使用表情包的概率,设置为1让麦麦自己决定发不发
+thinking_timeout = 100 # 麦麦最长思考时间,超过这个时间的思考会放弃(往往是api反应太慢)
+max_response_length = 256 # 麦麦单次回答的最大token数
message_buffer = true # 启用消息缓冲器?启用此项以解决消息的拆分问题,但会使麦麦的回复延迟
+
+# 以下是消息过滤,可以根据规则过滤特定消息,将不会读取这些消息
ban_words = [
# "403","张三"
]
@@ -103,22 +112,23 @@ ban_msgs_regex = [
# "\\[CQ:at,qq=\\d+\\]" # 匹配@
]
-[willing]
+[willing] # 一般回复模式的回复意愿设置
willing_mode = "classical" # 回复意愿模式 —— 经典模式:classical,动态模式:dynamic,mxp模式:mxp,自定义模式:custom(需要你自己实现)
response_willing_amplifier = 1 # 麦麦回复意愿放大系数,一般为1
response_interested_rate_amplifier = 1 # 麦麦回复兴趣度放大系数,听到记忆里的内容时放大系数
down_frequency_rate = 3 # 降低回复频率的群组回复意愿降低系数 除法
-emoji_response_penalty = 0.1 # 表情包回复惩罚系数,设为0为不回复单个表情包,减少单独回复表情包的概率
+emoji_response_penalty = 0 # 表情包回复惩罚系数,设为0为不回复单个表情包,减少单独回复表情包的概率
mentioned_bot_inevitable_reply = false # 提及 bot 必然回复
at_bot_inevitable_reply = false # @bot 必然回复
[emoji]
-max_emoji_num = 120 # 表情包最大数量
+max_emoji_num = 90 # 表情包最大数量
max_reach_deletion = true # 开启则在达到最大数量时删除表情包,关闭则达到最大数量时不删除,只是不会继续收集表情包
check_interval = 30 # 检查表情包(注册,破损,删除)的时间间隔(分钟)
auto_save = true # 是否保存表情包和图片
-enable_check = false # 是否启用表情包过滤
-check_prompt = "符合公序良俗" # 表情包过滤要求
+
+enable_check = false # 是否启用表情包过滤,只有符合该要求的表情包才会被保存
+check_prompt = "符合公序良俗" # 表情包过滤要求,只有符合该要求的表情包才会被保存
[memory]
build_memory_interval = 2000 # 记忆构建间隔 单位秒 间隔越低,麦麦学习越多,但是冗余信息也会增多
@@ -131,7 +141,8 @@ forget_memory_interval = 1000 # 记忆遗忘间隔 单位秒 间隔越低,
memory_forget_time = 24 #多长时间后的记忆会被遗忘 单位小时
memory_forget_percentage = 0.01 # 记忆遗忘比例 控制记忆遗忘程度 越大遗忘越多 建议保持默认
-memory_ban_words = [ #不希望记忆的词
+#不希望记忆的词,已经记忆的不会受到影响
+memory_ban_words = [
# "403","张三"
]
@@ -167,7 +178,7 @@ word_replace_rate=0.006 # 整词替换概率
[response_splitter]
enable_response_splitter = true # 是否启用回复分割器
-response_max_length = 100 # 回复允许的最大长度
+response_max_length = 256 # 回复允许的最大长度
response_max_sentence_num = 4 # 回复允许的最大句子数
[remote] #发送统计信息,主要是看全球有多少只麦麦
From 388392b9c515dcdfb1f3697ef8020a5b2574ff1e Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?=E6=98=A5=E6=B2=B3=E6=99=B4?=
Date: Mon, 21 Apr 2025 14:16:13 +0900
Subject: [PATCH 06/26] =?UTF-8?q?fix:=20=E5=88=A0=E9=99=A4=E9=87=8D?=
=?UTF-8?q?=E5=A4=8D=E7=9A=84=E7=B1=BB=E5=AE=9A=E4=B9=89?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
src/plugins/memory_system/Hippocampus.py | 995 +++++++----------------
1 file changed, 281 insertions(+), 714 deletions(-)
diff --git a/src/plugins/memory_system/Hippocampus.py b/src/plugins/memory_system/Hippocampus.py
index 557b42f2..f25f1d45 100644
--- a/src/plugins/memory_system/Hippocampus.py
+++ b/src/plugins/memory_system/Hippocampus.py
@@ -342,720 +342,6 @@ class Hippocampus:
memories.sort(key=lambda x: x[2], reverse=True)
return memories
- async def get_memory_from_text(
- self,
- text: str,
- max_memory_num: int = 3,
- max_memory_length: int = 2,
- max_depth: int = 3,
- fast_retrieval: bool = False,
- ) -> list:
- """从文本中提取关键词并获取相关记忆。
-
- Args:
- text (str): 输入文本
- max_memory_num (int, optional): 记忆数量限制。默认为3。
- max_memory_length (int, optional): 记忆长度限制。默认为2。
- max_depth (int, optional): 记忆检索深度。默认为2。
- fast_retrieval (bool, optional): 是否使用快速检索。默认为False。
- 如果为True,使用jieba分词和TF-IDF提取关键词,速度更快但可能不够准确。
- 如果为False,使用LLM提取关键词,速度较慢但更准确。
-
- Returns:
- list: 记忆列表,每个元素是一个元组 (topic, memory_items, similarity)
- - topic: str, 记忆主题
- - memory_items: list, 该主题下的记忆项列表
- - similarity: float, 与文本的相似度
- """
- if not text:
- return []
-
- if fast_retrieval:
- # 使用jieba分词提取关键词
- words = jieba.cut(text)
- # 过滤掉停用词和单字词
- keywords = [word for word in words if len(word) > 1]
- # 去重
- keywords = list(set(keywords))
- # 限制关键词数量
- keywords = keywords[:5]
- else:
- # 使用LLM提取关键词
- topic_num = min(5, max(1, int(len(text) * 0.1))) # 根据文本长度动态调整关键词数量
- # logger.info(f"提取关键词数量: {topic_num}")
- topics_response = await self.llm_topic_judge.generate_response(self.find_topic_llm(text, topic_num))
-
- # 提取关键词
- keywords = re.findall(r"<([^>]+)>", topics_response[0])
- if not keywords:
- keywords = []
- else:
- keywords = [
- keyword.strip()
- for keyword in ",".join(keywords).replace(",", ",").replace("、", ",").replace(" ", ",").split(",")
- if keyword.strip()
- ]
-
- # logger.info(f"提取的关键词: {', '.join(keywords)}")
-
- # 过滤掉不存在于记忆图中的关键词
- valid_keywords = [keyword for keyword in keywords if keyword in self.memory_graph.G]
- if not valid_keywords:
- # logger.info("没有找到有效的关键词节点")
- return []
-
- logger.info(f"有效的关键词: {', '.join(valid_keywords)}")
-
- # 从每个关键词获取记忆
- all_memories = []
- activate_map = {} # 存储每个词的累计激活值
-
- # 对每个关键词进行扩散式检索
- for keyword in valid_keywords:
- logger.debug(f"开始以关键词 '{keyword}' 为中心进行扩散检索 (最大深度: {max_depth}):")
- # 初始化激活值
- activation_values = {keyword: 1.0}
- # 记录已访问的节点
- visited_nodes = {keyword}
- # 待处理的节点队列,每个元素是(节点, 激活值, 当前深度)
- nodes_to_process = [(keyword, 1.0, 0)]
-
- while nodes_to_process:
- current_node, current_activation, current_depth = nodes_to_process.pop(0)
-
- # 如果激活值小于0或超过最大深度,停止扩散
- if current_activation <= 0 or current_depth >= max_depth:
- continue
-
- # 获取当前节点的所有邻居
- neighbors = list(self.memory_graph.G.neighbors(current_node))
-
- for neighbor in neighbors:
- if neighbor in visited_nodes:
- continue
-
- # 获取连接强度
- edge_data = self.memory_graph.G[current_node][neighbor]
- strength = edge_data.get("strength", 1)
-
- # 计算新的激活值
- new_activation = current_activation - (1 / strength)
-
- if new_activation > 0:
- activation_values[neighbor] = new_activation
- visited_nodes.add(neighbor)
- nodes_to_process.append((neighbor, new_activation, current_depth + 1))
- logger.trace(
- f"节点 '{neighbor}' 被激活,激活值: {new_activation:.2f} (通过 '{current_node}' 连接,强度: {strength}, 深度: {current_depth + 1})"
- ) # noqa: E501
-
- # 更新激活映射
- for node, activation_value in activation_values.items():
- if activation_value > 0:
- if node in activate_map:
- activate_map[node] += activation_value
- else:
- activate_map[node] = activation_value
-
- # 输出激活映射
- # logger.info("激活映射统计:")
- # for node, total_activation in sorted(activate_map.items(), key=lambda x: x[1], reverse=True):
- # logger.info(f"节点 '{node}': 累计激活值 = {total_activation:.2f}")
-
- # 基于激活值平方的独立概率选择
- remember_map = {}
- # logger.info("基于激活值平方的归一化选择:")
-
- # 计算所有激活值的平方和
- total_squared_activation = sum(activation**2 for activation in activate_map.values())
- if total_squared_activation > 0:
- # 计算归一化的激活值
- normalized_activations = {
- node: (activation**2) / total_squared_activation for node, activation in activate_map.items()
- }
-
- # 按归一化激活值排序并选择前max_memory_num个
- sorted_nodes = sorted(normalized_activations.items(), key=lambda x: x[1], reverse=True)[:max_memory_num]
-
- # 将选中的节点添加到remember_map
- for node, normalized_activation in sorted_nodes:
- remember_map[node] = activate_map[node] # 使用原始激活值
- logger.debug(
- f"节点 '{node}' (归一化激活值: {normalized_activation:.2f}, 激活值: {activate_map[node]:.2f})"
- )
- else:
- logger.info("没有有效的激活值")
-
- # 从选中的节点中提取记忆
- all_memories = []
- # logger.info("开始从选中的节点中提取记忆:")
- for node, activation in remember_map.items():
- logger.debug(f"处理节点 '{node}' (激活值: {activation:.2f}):")
- node_data = self.memory_graph.G.nodes[node]
- memory_items = node_data.get("memory_items", [])
- if not isinstance(memory_items, list):
- memory_items = [memory_items] if memory_items else []
-
- if memory_items:
- logger.debug(f"节点包含 {len(memory_items)} 条记忆")
- # 计算每条记忆与输入文本的相似度
- memory_similarities = []
- for memory in memory_items:
- # 计算与输入文本的相似度
- memory_words = set(jieba.cut(memory))
- text_words = set(jieba.cut(text))
- all_words = memory_words | text_words
- v1 = [1 if word in memory_words else 0 for word in all_words]
- v2 = [1 if word in text_words else 0 for word in all_words]
- similarity = cosine_similarity(v1, v2)
- memory_similarities.append((memory, similarity))
-
- # 按相似度排序
- memory_similarities.sort(key=lambda x: x[1], reverse=True)
- # 获取最匹配的记忆
- top_memories = memory_similarities[:max_memory_length]
-
- # 添加到结果中
- for memory, similarity in top_memories:
- all_memories.append((node, [memory], similarity))
- # logger.info(f"选中记忆: {memory} (相似度: {similarity:.2f})")
- else:
- logger.info("节点没有记忆")
-
- # 去重(基于记忆内容)
- logger.debug("开始记忆去重:")
- seen_memories = set()
- unique_memories = []
- for topic, memory_items, activation_value in all_memories:
- memory = memory_items[0] # 因为每个topic只有一条记忆
- if memory not in seen_memories:
- seen_memories.add(memory)
- unique_memories.append((topic, memory_items, activation_value))
- logger.debug(f"保留记忆: {memory} (来自节点: {topic}, 激活值: {activation_value:.2f})")
- else:
- logger.debug(f"跳过重复记忆: {memory} (来自节点: {topic})")
-
- # 转换为(关键词, 记忆)格式
- result = []
- for topic, memory_items, _ in unique_memories:
- memory = memory_items[0] # 因为每个topic只有一条记忆
- result.append((topic, memory))
- logger.info(f"选中记忆: {memory} (来自节点: {topic})")
-
- return result
-
- async def get_activate_from_text(self, text: str, max_depth: int = 3, fast_retrieval: bool = False) -> float:
- """从文本中提取关键词并获取相关记忆。
-
- Args:
- text (str): 输入文本
- max_depth (int, optional): 记忆检索深度。默认为2。
- fast_retrieval (bool, optional): 是否使用快速检索。默认为False。
- 如果为True,使用jieba分词和TF-IDF提取关键词,速度更快但可能不够准确。
- 如果为False,使用LLM提取关键词,速度较慢但更准确。
-
- Returns:
- float: 激活节点数与总节点数的比值
- """
- if not text:
- return 0
-
- if fast_retrieval:
- # 使用jieba分词提取关键词
- words = jieba.cut(text)
- # 过滤掉停用词和单字词
- keywords = [word for word in words if len(word) > 1]
- # 去重
- keywords = list(set(keywords))
- # 限制关键词数量
- keywords = keywords[:5]
- else:
- # 使用LLM提取关键词
- topic_num = min(5, max(1, int(len(text) * 0.1))) # 根据文本长度动态调整关键词数量
- # logger.info(f"提取关键词数量: {topic_num}")
- topics_response = await self.llm_topic_judge.generate_response(self.find_topic_llm(text, topic_num))
-
- # 提取关键词
- keywords = re.findall(r"<([^>]+)>", topics_response[0])
- if not keywords:
- keywords = []
- else:
- keywords = [
- keyword.strip()
- for keyword in ",".join(keywords).replace(",", ",").replace("、", ",").replace(" ", ",").split(",")
- if keyword.strip()
- ]
-
- # logger.info(f"提取的关键词: {', '.join(keywords)}")
-
- # 过滤掉不存在于记忆图中的关键词
- valid_keywords = [keyword for keyword in keywords if keyword in self.memory_graph.G]
- if not valid_keywords:
- # logger.info("没有找到有效的关键词节点")
- return 0
-
- logger.info(f"有效的关键词: {', '.join(valid_keywords)}")
-
- # 从每个关键词获取记忆
- activate_map = {} # 存储每个词的累计激活值
-
- # 对每个关键词进行扩散式检索
- for keyword in valid_keywords:
- logger.debug(f"开始以关键词 '{keyword}' 为中心进行扩散检索 (最大深度: {max_depth}):")
- # 初始化激活值
- activation_values = {keyword: 1.0}
- # 记录已访问的节点
- visited_nodes = {keyword}
- # 待处理的节点队列,每个元素是(节点, 激活值, 当前深度)
- nodes_to_process = [(keyword, 1.0, 0)]
-
- while nodes_to_process:
- current_node, current_activation, current_depth = nodes_to_process.pop(0)
-
- # 如果激活值小于0或超过最大深度,停止扩散
- if current_activation <= 0 or current_depth >= max_depth:
- continue
-
- # 获取当前节点的所有邻居
- neighbors = list(self.memory_graph.G.neighbors(current_node))
-
- for neighbor in neighbors:
- if neighbor in visited_nodes:
- continue
-
- # 获取连接强度
- edge_data = self.memory_graph.G[current_node][neighbor]
- strength = edge_data.get("strength", 1)
-
- # 计算新的激活值
- new_activation = current_activation - (1 / strength)
-
- if new_activation > 0:
- activation_values[neighbor] = new_activation
- visited_nodes.add(neighbor)
- nodes_to_process.append((neighbor, new_activation, current_depth + 1))
- # logger.debug(
- # f"节点 '{neighbor}' 被激活,激活值: {new_activation:.2f} (通过 '{current_node}' 连接,强度: {strength}, 深度: {current_depth + 1})") # noqa: E501
-
- # 更新激活映射
- for node, activation_value in activation_values.items():
- if activation_value > 0:
- if node in activate_map:
- activate_map[node] += activation_value
- else:
- activate_map[node] = activation_value
-
- # 输出激活映射
- # logger.info("激活映射统计:")
- # for node, total_activation in sorted(activate_map.items(), key=lambda x: x[1], reverse=True):
- # logger.info(f"节点 '{node}': 累计激活值 = {total_activation:.2f}")
-
- # 计算激活节点数与总节点数的比值
- total_activation = sum(activate_map.values())
- logger.info(f"总激活值: {total_activation:.2f}")
- total_nodes = len(self.memory_graph.G.nodes())
- # activated_nodes = len(activate_map)
- activation_ratio = total_activation / total_nodes if total_nodes > 0 else 0
- activation_ratio = activation_ratio * 60
- logger.info(f"总激活值: {total_activation:.2f}, 总节点数: {total_nodes}, 激活: {activation_ratio}")
-
- return activation_ratio
-
-
-# 负责海马体与其他部分的交互
-class EntorhinalCortex:
- def __init__(self, hippocampus: Hippocampus):
- self.hippocampus = hippocampus
- self.memory_graph = hippocampus.memory_graph
- self.config = hippocampus.config
-
- def get_memory_sample(self):
- """从数据库获取记忆样本"""
- # 硬编码:每条消息最大记忆次数
- max_memorized_time_per_msg = 3
-
- # 创建双峰分布的记忆调度器
- sample_scheduler = MemoryBuildScheduler(
- n_hours1=self.config.memory_build_distribution[0],
- std_hours1=self.config.memory_build_distribution[1],
- weight1=self.config.memory_build_distribution[2],
- n_hours2=self.config.memory_build_distribution[3],
- std_hours2=self.config.memory_build_distribution[4],
- weight2=self.config.memory_build_distribution[5],
- total_samples=self.config.build_memory_sample_num,
- )
-
- timestamps = sample_scheduler.get_timestamp_array()
- logger.info(f"回忆往事: {[time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(ts)) for ts in timestamps]}")
- chat_samples = []
- for timestamp in timestamps:
- messages = self.random_get_msg_snippet(
- timestamp, self.config.build_memory_sample_length, max_memorized_time_per_msg
- )
- if messages:
- time_diff = (datetime.datetime.now().timestamp() - timestamp) / 3600
- logger.debug(f"成功抽取 {time_diff:.1f} 小时前的消息样本,共{len(messages)}条")
- chat_samples.append(messages)
- else:
- logger.debug(f"时间戳 {timestamp} 的消息样本抽取失败")
-
- return chat_samples
-
- @staticmethod
- def random_get_msg_snippet(target_timestamp: float, chat_size: int, max_memorized_time_per_msg: int) -> list:
- """从数据库中随机获取指定时间戳附近的消息片段"""
- try_count = 0
- while try_count < 3:
- messages = get_closest_chat_from_db(length=chat_size, timestamp=target_timestamp)
- if messages:
- for message in messages:
- if message["memorized_times"] >= max_memorized_time_per_msg:
- messages = None
- break
- if messages:
- for message in messages:
- db.messages.update_one(
- {"_id": message["_id"]}, {"$set": {"memorized_times": message["memorized_times"] + 1}}
- )
- return messages
- try_count += 1
- return None
-
- async def sync_memory_to_db(self):
- """将记忆图同步到数据库"""
- # 获取数据库中所有节点和内存中所有节点
- db_nodes = list(db.graph_data.nodes.find())
- memory_nodes = list(self.memory_graph.G.nodes(data=True))
-
- # 转换数据库节点为字典格式,方便查找
- db_nodes_dict = {node["concept"]: node for node in db_nodes}
-
- # 检查并更新节点
- for concept, data in memory_nodes:
- memory_items = data.get("memory_items", [])
- if not isinstance(memory_items, list):
- memory_items = [memory_items] if memory_items else []
-
- # 计算内存中节点的特征值
- memory_hash = self.hippocampus.calculate_node_hash(concept, memory_items)
-
- # 获取时间信息
- created_time = data.get("created_time", datetime.datetime.now().timestamp())
- last_modified = data.get("last_modified", datetime.datetime.now().timestamp())
-
- if concept not in db_nodes_dict:
- # 数据库中缺少的节点,添加
- node_data = {
- "concept": concept,
- "memory_items": memory_items,
- "hash": memory_hash,
- "created_time": created_time,
- "last_modified": last_modified,
- }
- db.graph_data.nodes.insert_one(node_data)
- else:
- # 获取数据库中节点的特征值
- db_node = db_nodes_dict[concept]
- db_hash = db_node.get("hash", None)
-
- # 如果特征值不同,则更新节点
- if db_hash != memory_hash:
- db.graph_data.nodes.update_one(
- {"concept": concept},
- {
- "$set": {
- "memory_items": memory_items,
- "hash": memory_hash,
- "created_time": created_time,
- "last_modified": last_modified,
- }
- },
- )
-
- # 处理边的信息
- db_edges = list(db.graph_data.edges.find())
- memory_edges = list(self.memory_graph.G.edges(data=True))
-
- # 创建边的哈希值字典
- db_edge_dict = {}
- for edge in db_edges:
- edge_hash = self.hippocampus.calculate_edge_hash(edge["source"], edge["target"])
- db_edge_dict[(edge["source"], edge["target"])] = {"hash": edge_hash, "strength": edge.get("strength", 1)}
-
- # 检查并更新边
- for source, target, data in memory_edges:
- edge_hash = self.hippocampus.calculate_edge_hash(source, target)
- edge_key = (source, target)
- strength = data.get("strength", 1)
-
- # 获取边的时间信息
- created_time = data.get("created_time", datetime.datetime.now().timestamp())
- last_modified = data.get("last_modified", datetime.datetime.now().timestamp())
-
- if edge_key not in db_edge_dict:
- # 添加新边
- edge_data = {
- "source": source,
- "target": target,
- "strength": strength,
- "hash": edge_hash,
- "created_time": created_time,
- "last_modified": last_modified,
- }
- db.graph_data.edges.insert_one(edge_data)
- else:
- # 检查边的特征值是否变化
- if db_edge_dict[edge_key]["hash"] != edge_hash:
- db.graph_data.edges.update_one(
- {"source": source, "target": target},
- {
- "$set": {
- "hash": edge_hash,
- "strength": strength,
- "created_time": created_time,
- "last_modified": last_modified,
- }
- },
- )
-
- def sync_memory_from_db(self):
- """从数据库同步数据到内存中的图结构"""
- current_time = datetime.datetime.now().timestamp()
- need_update = False
-
- # 清空当前图
- self.memory_graph.G.clear()
-
- # 从数据库加载所有节点
- nodes = list(db.graph_data.nodes.find())
- for node in nodes:
- concept = node["concept"]
- memory_items = node.get("memory_items", [])
- if not isinstance(memory_items, list):
- memory_items = [memory_items] if memory_items else []
-
- # 检查时间字段是否存在
- if "created_time" not in node or "last_modified" not in node:
- need_update = True
- # 更新数据库中的节点
- update_data = {}
- if "created_time" not in node:
- update_data["created_time"] = current_time
- if "last_modified" not in node:
- update_data["last_modified"] = current_time
-
- db.graph_data.nodes.update_one({"concept": concept}, {"$set": update_data})
- logger.info(f"[时间更新] 节点 {concept} 添加缺失的时间字段")
-
- # 获取时间信息(如果不存在则使用当前时间)
- created_time = node.get("created_time", current_time)
- last_modified = node.get("last_modified", current_time)
-
- # 添加节点到图中
- self.memory_graph.G.add_node(
- concept, memory_items=memory_items, created_time=created_time, last_modified=last_modified
- )
-
- # 从数据库加载所有边
- edges = list(db.graph_data.edges.find())
- for edge in edges:
- source = edge["source"]
- target = edge["target"]
- strength = edge.get("strength", 1)
-
- # 检查时间字段是否存在
- if "created_time" not in edge or "last_modified" not in edge:
- need_update = True
- # 更新数据库中的边
- update_data = {}
- if "created_time" not in edge:
- update_data["created_time"] = current_time
- if "last_modified" not in edge:
- update_data["last_modified"] = current_time
-
- db.graph_data.edges.update_one({"source": source, "target": target}, {"$set": update_data})
- logger.info(f"[时间更新] 边 {source} - {target} 添加缺失的时间字段")
-
- # 获取时间信息(如果不存在则使用当前时间)
- created_time = edge.get("created_time", current_time)
- last_modified = edge.get("last_modified", current_time)
-
- # 只有当源节点和目标节点都存在时才添加边
- if source in self.memory_graph.G and target in self.memory_graph.G:
- self.memory_graph.G.add_edge(
- source, target, strength=strength, created_time=created_time, last_modified=last_modified
- )
-
- if need_update:
- logger.success("[数据库] 已为缺失的时间字段进行补充")
-
- async def resync_memory_to_db(self):
- """清空数据库并重新同步所有记忆数据"""
- start_time = time.time()
- logger.info("[数据库] 开始重新同步所有记忆数据...")
-
- # 清空数据库
- clear_start = time.time()
- db.graph_data.nodes.delete_many({})
- db.graph_data.edges.delete_many({})
- clear_end = time.time()
- logger.info(f"[数据库] 清空数据库耗时: {clear_end - clear_start:.2f}秒")
-
- # 获取所有节点和边
- memory_nodes = list(self.memory_graph.G.nodes(data=True))
- memory_edges = list(self.memory_graph.G.edges(data=True))
-
- # 重新写入节点
- node_start = time.time()
- for concept, data in memory_nodes:
- memory_items = data.get("memory_items", [])
- if not isinstance(memory_items, list):
- memory_items = [memory_items] if memory_items else []
-
- node_data = {
- "concept": concept,
- "memory_items": memory_items,
- "hash": self.hippocampus.calculate_node_hash(concept, memory_items),
- "created_time": data.get("created_time", datetime.datetime.now().timestamp()),
- "last_modified": data.get("last_modified", datetime.datetime.now().timestamp()),
- }
- db.graph_data.nodes.insert_one(node_data)
- node_end = time.time()
- logger.info(f"[数据库] 写入 {len(memory_nodes)} 个节点耗时: {node_end - node_start:.2f}秒")
-
- # 重新写入边
- edge_start = time.time()
- for source, target, data in memory_edges:
- edge_data = {
- "source": source,
- "target": target,
- "strength": data.get("strength", 1),
- "hash": self.hippocampus.calculate_edge_hash(source, target),
- "created_time": data.get("created_time", datetime.datetime.now().timestamp()),
- "last_modified": data.get("last_modified", datetime.datetime.now().timestamp()),
- }
- db.graph_data.edges.insert_one(edge_data)
- edge_end = time.time()
- logger.info(f"[数据库] 写入 {len(memory_edges)} 条边耗时: {edge_end - edge_start:.2f}秒")
-
- end_time = time.time()
- logger.success(f"[数据库] 重新同步完成,总耗时: {end_time - start_time:.2f}秒")
- logger.success(f"[数据库] 同步了 {len(memory_nodes)} 个节点和 {len(memory_edges)} 条边")
-
-
-# 海马体
-class Hippocampus:
- def __init__(self):
- self.memory_graph = MemoryGraph()
- self.llm_topic_judge = None
- self.llm_summary_by_topic = None
- self.entorhinal_cortex = None
- self.parahippocampal_gyrus = None
- self.config = None
-
- def initialize(self, global_config):
- self.config = MemoryConfig.from_global_config(global_config)
- # 初始化子组件
- self.entorhinal_cortex = EntorhinalCortex(self)
- self.parahippocampal_gyrus = ParahippocampalGyrus(self)
- # 从数据库加载记忆图
- self.entorhinal_cortex.sync_memory_from_db()
- self.llm_topic_judge = LLMRequest(self.config.llm_topic_judge, request_type="memory")
- self.llm_summary_by_topic = LLMRequest(self.config.llm_summary_by_topic, request_type="memory")
-
- def get_all_node_names(self) -> list:
- """获取记忆图中所有节点的名字列表"""
- return list(self.memory_graph.G.nodes())
-
- @staticmethod
- def calculate_node_hash(concept, memory_items) -> int:
- """计算节点的特征值"""
- if not isinstance(memory_items, list):
- memory_items = [memory_items] if memory_items else []
- sorted_items = sorted(memory_items)
- content = f"{concept}:{'|'.join(sorted_items)}"
- return hash(content)
-
- @staticmethod
- def calculate_edge_hash(source, target) -> int:
- """计算边的特征值"""
- nodes = sorted([source, target])
- return hash(f"{nodes[0]}:{nodes[1]}")
-
- @staticmethod
- def find_topic_llm(text, topic_num):
- prompt = (
- f"这是一段文字:{text}。请你从这段话中总结出最多{topic_num}个关键的概念,可以是名词,动词,或者特定人物,帮我列出来,"
- f"将主题用逗号隔开,并加上<>,例如<主题1>,<主题2>......尽可能精简。只需要列举最多{topic_num}个话题就好,不要有序号,不要告诉我其他内容。"
- f"如果确定找不出主题或者没有明显主题,返回。"
- )
- return prompt
-
- @staticmethod
- def topic_what(text, topic, time_info):
- prompt = (
- f'这是一段文字,{time_info}:{text}。我想让你基于这段文字来概括"{topic}"这个概念,帮我总结成一句自然的话,'
- f"可以包含时间和人物,以及具体的观点。只输出这句话就好"
- )
- return prompt
-
- @staticmethod
- def calculate_topic_num(text, compress_rate):
- """计算文本的话题数量"""
- information_content = calculate_information_content(text)
- topic_by_length = text.count("\n") * compress_rate
- topic_by_information_content = max(1, min(5, int((information_content - 3) * 2)))
- topic_num = int((topic_by_length + topic_by_information_content) / 2)
- logger.debug(
- f"topic_by_length: {topic_by_length}, topic_by_information_content: {topic_by_information_content}, "
- f"topic_num: {topic_num}"
- )
- return topic_num
-
- def get_memory_from_keyword(self, keyword: str, max_depth: int = 2) -> list:
- """从关键词获取相关记忆。
-
- Args:
- keyword (str): 关键词
- max_depth (int, optional): 记忆检索深度,默认为2。1表示只获取直接相关的记忆,2表示获取间接相关的记忆。
-
- Returns:
- list: 记忆列表,每个元素是一个元组 (topic, memory_items, similarity)
- - topic: str, 记忆主题
- - memory_items: list, 该主题下的记忆项列表
- - similarity: float, 与关键词的相似度
- """
- if not keyword:
- return []
-
- # 获取所有节点
- all_nodes = list(self.memory_graph.G.nodes())
- memories = []
-
- # 计算关键词的词集合
- keyword_words = set(jieba.cut(keyword))
-
- # 遍历所有节点,计算相似度
- for node in all_nodes:
- node_words = set(jieba.cut(node))
- all_words = keyword_words | node_words
- v1 = [1 if word in keyword_words else 0 for word in all_words]
- v2 = [1 if word in node_words else 0 for word in all_words]
- similarity = cosine_similarity(v1, v2)
-
- # 如果相似度超过阈值,获取该节点的记忆
- if similarity >= 0.3: # 可以调整这个阈值
- node_data = self.memory_graph.G.nodes[node]
- memory_items = node_data.get("memory_items", [])
- if not isinstance(memory_items, list):
- memory_items = [memory_items] if memory_items else []
-
- memories.append((node, memory_items, similarity))
-
- # 按相似度降序排序
- memories.sort(key=lambda x: x[2], reverse=True)
- return memories
-
async def get_memory_from_text(
self,
text: str,
@@ -1543,6 +829,287 @@ class Hippocampus:
return activation_ratio
+# 负责海马体与其他部分的交互
+class EntorhinalCortex:
+ def __init__(self, hippocampus: Hippocampus):
+ self.hippocampus = hippocampus
+ self.memory_graph = hippocampus.memory_graph
+ self.config = hippocampus.config
+
+ def get_memory_sample(self):
+ """从数据库获取记忆样本"""
+ # 硬编码:每条消息最大记忆次数
+ max_memorized_time_per_msg = 3
+
+ # 创建双峰分布的记忆调度器
+ sample_scheduler = MemoryBuildScheduler(
+ n_hours1=self.config.memory_build_distribution[0],
+ std_hours1=self.config.memory_build_distribution[1],
+ weight1=self.config.memory_build_distribution[2],
+ n_hours2=self.config.memory_build_distribution[3],
+ std_hours2=self.config.memory_build_distribution[4],
+ weight2=self.config.memory_build_distribution[5],
+ total_samples=self.config.build_memory_sample_num,
+ )
+
+ timestamps = sample_scheduler.get_timestamp_array()
+ logger.info(f"回忆往事: {[time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(ts)) for ts in timestamps]}")
+ chat_samples = []
+ for timestamp in timestamps:
+ messages = self.random_get_msg_snippet(
+ timestamp, self.config.build_memory_sample_length, max_memorized_time_per_msg
+ )
+ if messages:
+ time_diff = (datetime.datetime.now().timestamp() - timestamp) / 3600
+ logger.debug(f"成功抽取 {time_diff:.1f} 小时前的消息样本,共{len(messages)}条")
+ chat_samples.append(messages)
+ else:
+ logger.debug(f"时间戳 {timestamp} 的消息样本抽取失败")
+
+ return chat_samples
+
+ @staticmethod
+ def random_get_msg_snippet(target_timestamp: float, chat_size: int, max_memorized_time_per_msg: int) -> list:
+ """从数据库中随机获取指定时间戳附近的消息片段"""
+ try_count = 0
+ while try_count < 3:
+ messages = get_closest_chat_from_db(length=chat_size, timestamp=target_timestamp)
+ if messages:
+ for message in messages:
+ if message["memorized_times"] >= max_memorized_time_per_msg:
+ messages = None
+ break
+ if messages:
+ for message in messages:
+ db.messages.update_one(
+ {"_id": message["_id"]}, {"$set": {"memorized_times": message["memorized_times"] + 1}}
+ )
+ return messages
+ try_count += 1
+ return None
+
+ async def sync_memory_to_db(self):
+ """将记忆图同步到数据库"""
+ # 获取数据库中所有节点和内存中所有节点
+ db_nodes = list(db.graph_data.nodes.find())
+ memory_nodes = list(self.memory_graph.G.nodes(data=True))
+
+ # 转换数据库节点为字典格式,方便查找
+ db_nodes_dict = {node["concept"]: node for node in db_nodes}
+
+ # 检查并更新节点
+ for concept, data in memory_nodes:
+ memory_items = data.get("memory_items", [])
+ if not isinstance(memory_items, list):
+ memory_items = [memory_items] if memory_items else []
+
+ # 计算内存中节点的特征值
+ memory_hash = self.hippocampus.calculate_node_hash(concept, memory_items)
+
+ # 获取时间信息
+ created_time = data.get("created_time", datetime.datetime.now().timestamp())
+ last_modified = data.get("last_modified", datetime.datetime.now().timestamp())
+
+ if concept not in db_nodes_dict:
+ # 数据库中缺少的节点,添加
+ node_data = {
+ "concept": concept,
+ "memory_items": memory_items,
+ "hash": memory_hash,
+ "created_time": created_time,
+ "last_modified": last_modified,
+ }
+ db.graph_data.nodes.insert_one(node_data)
+ else:
+ # 获取数据库中节点的特征值
+ db_node = db_nodes_dict[concept]
+ db_hash = db_node.get("hash", None)
+
+ # 如果特征值不同,则更新节点
+ if db_hash != memory_hash:
+ db.graph_data.nodes.update_one(
+ {"concept": concept},
+ {
+ "$set": {
+ "memory_items": memory_items,
+ "hash": memory_hash,
+ "created_time": created_time,
+ "last_modified": last_modified,
+ }
+ },
+ )
+
+ # 处理边的信息
+ db_edges = list(db.graph_data.edges.find())
+ memory_edges = list(self.memory_graph.G.edges(data=True))
+
+ # 创建边的哈希值字典
+ db_edge_dict = {}
+ for edge in db_edges:
+ edge_hash = self.hippocampus.calculate_edge_hash(edge["source"], edge["target"])
+ db_edge_dict[(edge["source"], edge["target"])] = {"hash": edge_hash, "strength": edge.get("strength", 1)}
+
+ # 检查并更新边
+ for source, target, data in memory_edges:
+ edge_hash = self.hippocampus.calculate_edge_hash(source, target)
+ edge_key = (source, target)
+ strength = data.get("strength", 1)
+
+ # 获取边的时间信息
+ created_time = data.get("created_time", datetime.datetime.now().timestamp())
+ last_modified = data.get("last_modified", datetime.datetime.now().timestamp())
+
+ if edge_key not in db_edge_dict:
+ # 添加新边
+ edge_data = {
+ "source": source,
+ "target": target,
+ "strength": strength,
+ "hash": edge_hash,
+ "created_time": created_time,
+ "last_modified": last_modified,
+ }
+ db.graph_data.edges.insert_one(edge_data)
+ else:
+ # 检查边的特征值是否变化
+ if db_edge_dict[edge_key]["hash"] != edge_hash:
+ db.graph_data.edges.update_one(
+ {"source": source, "target": target},
+ {
+ "$set": {
+ "hash": edge_hash,
+ "strength": strength,
+ "created_time": created_time,
+ "last_modified": last_modified,
+ }
+ },
+ )
+
+ def sync_memory_from_db(self):
+ """从数据库同步数据到内存中的图结构"""
+ current_time = datetime.datetime.now().timestamp()
+ need_update = False
+
+ # 清空当前图
+ self.memory_graph.G.clear()
+
+ # 从数据库加载所有节点
+ nodes = list(db.graph_data.nodes.find())
+ for node in nodes:
+ concept = node["concept"]
+ memory_items = node.get("memory_items", [])
+ if not isinstance(memory_items, list):
+ memory_items = [memory_items] if memory_items else []
+
+ # 检查时间字段是否存在
+ if "created_time" not in node or "last_modified" not in node:
+ need_update = True
+ # 更新数据库中的节点
+ update_data = {}
+ if "created_time" not in node:
+ update_data["created_time"] = current_time
+ if "last_modified" not in node:
+ update_data["last_modified"] = current_time
+
+ db.graph_data.nodes.update_one({"concept": concept}, {"$set": update_data})
+ logger.info(f"[时间更新] 节点 {concept} 添加缺失的时间字段")
+
+ # 获取时间信息(如果不存在则使用当前时间)
+ created_time = node.get("created_time", current_time)
+ last_modified = node.get("last_modified", current_time)
+
+ # 添加节点到图中
+ self.memory_graph.G.add_node(
+ concept, memory_items=memory_items, created_time=created_time, last_modified=last_modified
+ )
+
+ # 从数据库加载所有边
+ edges = list(db.graph_data.edges.find())
+ for edge in edges:
+ source = edge["source"]
+ target = edge["target"]
+ strength = edge.get("strength", 1)
+
+ # 检查时间字段是否存在
+ if "created_time" not in edge or "last_modified" not in edge:
+ need_update = True
+ # 更新数据库中的边
+ update_data = {}
+ if "created_time" not in edge:
+ update_data["created_time"] = current_time
+ if "last_modified" not in edge:
+ update_data["last_modified"] = current_time
+
+ db.graph_data.edges.update_one({"source": source, "target": target}, {"$set": update_data})
+ logger.info(f"[时间更新] 边 {source} - {target} 添加缺失的时间字段")
+
+ # 获取时间信息(如果不存在则使用当前时间)
+ created_time = edge.get("created_time", current_time)
+ last_modified = edge.get("last_modified", current_time)
+
+ # 只有当源节点和目标节点都存在时才添加边
+ if source in self.memory_graph.G and target in self.memory_graph.G:
+ self.memory_graph.G.add_edge(
+ source, target, strength=strength, created_time=created_time, last_modified=last_modified
+ )
+
+ if need_update:
+ logger.success("[数据库] 已为缺失的时间字段进行补充")
+
+ async def resync_memory_to_db(self):
+ """清空数据库并重新同步所有记忆数据"""
+ start_time = time.time()
+ logger.info("[数据库] 开始重新同步所有记忆数据...")
+
+ # 清空数据库
+ clear_start = time.time()
+ db.graph_data.nodes.delete_many({})
+ db.graph_data.edges.delete_many({})
+ clear_end = time.time()
+ logger.info(f"[数据库] 清空数据库耗时: {clear_end - clear_start:.2f}秒")
+
+ # 获取所有节点和边
+ memory_nodes = list(self.memory_graph.G.nodes(data=True))
+ memory_edges = list(self.memory_graph.G.edges(data=True))
+
+ # 重新写入节点
+ node_start = time.time()
+ for concept, data in memory_nodes:
+ memory_items = data.get("memory_items", [])
+ if not isinstance(memory_items, list):
+ memory_items = [memory_items] if memory_items else []
+
+ node_data = {
+ "concept": concept,
+ "memory_items": memory_items,
+ "hash": self.hippocampus.calculate_node_hash(concept, memory_items),
+ "created_time": data.get("created_time", datetime.datetime.now().timestamp()),
+ "last_modified": data.get("last_modified", datetime.datetime.now().timestamp()),
+ }
+ db.graph_data.nodes.insert_one(node_data)
+ node_end = time.time()
+ logger.info(f"[数据库] 写入 {len(memory_nodes)} 个节点耗时: {node_end - node_start:.2f}秒")
+
+ # 重新写入边
+ edge_start = time.time()
+ for source, target, data in memory_edges:
+ edge_data = {
+ "source": source,
+ "target": target,
+ "strength": data.get("strength", 1),
+ "hash": self.hippocampus.calculate_edge_hash(source, target),
+ "created_time": data.get("created_time", datetime.datetime.now().timestamp()),
+ "last_modified": data.get("last_modified", datetime.datetime.now().timestamp()),
+ }
+ db.graph_data.edges.insert_one(edge_data)
+ edge_end = time.time()
+ logger.info(f"[数据库] 写入 {len(memory_edges)} 条边耗时: {edge_end - edge_start:.2f}秒")
+
+ end_time = time.time()
+ logger.success(f"[数据库] 重新同步完成,总耗时: {end_time - start_time:.2f}秒")
+ logger.success(f"[数据库] 同步了 {len(memory_nodes)} 个节点和 {len(memory_edges)} 条边")
+
+
# 负责整合,遗忘,合并记忆
class ParahippocampalGyrus:
def __init__(self, hippocampus: Hippocampus):
From 5d663347766c631e04d513d090cce4260d57e8e4 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?=E6=98=A5=E6=B2=B3=E6=99=B4?=
Date: Mon, 21 Apr 2025 14:21:08 +0900
Subject: [PATCH 07/26] =?UTF-8?q?fix:=20=E4=BF=AE=E5=A4=8D=E6=9C=AA?=
=?UTF-8?q?=E8=A7=A3=E6=9E=90=E7=9A=84=E9=A5=AE=E7=94=A8?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
.../memory_system/manually_alter_memory.py | 18 ++++++++----------
1 file changed, 8 insertions(+), 10 deletions(-)
diff --git a/src/plugins/memory_system/manually_alter_memory.py b/src/plugins/memory_system/manually_alter_memory.py
index 81874211..1452d3d5 100644
--- a/src/plugins/memory_system/manually_alter_memory.py
+++ b/src/plugins/memory_system/manually_alter_memory.py
@@ -5,7 +5,8 @@ import time
from pathlib import Path
import datetime
from rich.console import Console
-from memory_manual_build import Memory_graph, Hippocampus # 海马体和记忆图
+from Hippocampus import Hippocampus # 海马体和记忆图
+
from dotenv import load_dotenv
@@ -45,13 +46,13 @@ else:
# 查询节点信息
-def query_mem_info(memory_graph: Memory_graph):
+def query_mem_info(hippocampus: Hippocampus):
while True:
query = input("\n请输入新的查询概念(输入'退出'以结束):")
if query.lower() == "退出":
break
- items_list = memory_graph.get_related_item(query)
+ items_list = hippocampus.memory_graph.get_related_item(query)
if items_list:
have_memory = False
first_layer, second_layer = items_list
@@ -312,14 +313,11 @@ def alter_mem_edge(hippocampus: Hippocampus):
async def main():
start_time = time.time()
- # 创建记忆图
- memory_graph = Memory_graph()
-
# 创建海马体
- hippocampus = Hippocampus(memory_graph)
+ hippocampus = Hippocampus()
# 从数据库同步数据
- hippocampus.sync_memory_from_db()
+ hippocampus.entorhinal_cortex.sync_memory_from_db()
end_time = time.time()
logger.info(f"\033[32m[加载海马体耗时: {end_time - start_time:.2f} 秒]\033[0m")
@@ -338,7 +336,7 @@ async def main():
query = -1
if query == 0:
- query_mem_info(memory_graph)
+ query_mem_info(hippocampus.memory_graph)
elif query == 1:
add_mem_node(hippocampus)
elif query == 2:
@@ -355,7 +353,7 @@ async def main():
print("已结束操作")
break
- hippocampus.sync_memory_to_db()
+ hippocampus.entorhinal_cortex.sync_memory_to_db()
if __name__ == "__main__":
From 1dd3c62c553df60d52137f6942ff2e60a015314e Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?=E6=98=A5=E6=B2=B3=E6=99=B4?=
Date: Mon, 21 Apr 2025 14:32:32 +0900
Subject: [PATCH 08/26] chore: PEP8 naming
---
src/main.py | 6 +++---
src/plugins/chat/bot.py | 4 ++--
.../heartFC_chat/heartFC_controler.py | 16 ++++++++--------
.../heartFC_chat/heartFC_processor.py | 14 +++++++-------
.../chat_module/heartFC_chat/pf_chatting.py | 6 +++---
5 files changed, 23 insertions(+), 23 deletions(-)
diff --git a/src/main.py b/src/main.py
index aad08b90..929cff7d 100644
--- a/src/main.py
+++ b/src/main.py
@@ -18,7 +18,7 @@ from .plugins.remote import heartbeat_thread # noqa: F401
from .individuality.individuality import Individuality
from .common.server import global_server
from .plugins.chat_module.heartFC_chat.interest import InterestManager
-from .plugins.chat_module.heartFC_chat.heartFC_controler import HeartFC_Controller
+from .plugins.chat_module.heartFC_chat.heartFC_controler import HeartFCController
logger = get_module_logger("main")
@@ -118,8 +118,8 @@ class MainSystem:
logger.success("兴趣管理器后台任务启动成功")
# 初始化并独立启动 HeartFC_Chat
- HeartFC_Controller()
- heartfc_chat_instance = HeartFC_Controller.get_instance()
+ HeartFCController()
+ heartfc_chat_instance = HeartFCController.get_instance()
if heartfc_chat_instance:
await heartfc_chat_instance.start()
logger.success("HeartFC_Chat 模块独立启动成功")
diff --git a/src/plugins/chat/bot.py b/src/plugins/chat/bot.py
index 314d20ff..cfe4238e 100644
--- a/src/plugins/chat/bot.py
+++ b/src/plugins/chat/bot.py
@@ -7,7 +7,7 @@ from ..chat_module.only_process.only_message_process import MessageProcessor
from src.common.logger import get_module_logger, CHAT_STYLE_CONFIG, LogConfig
from ..chat_module.reasoning_chat.reasoning_chat import ReasoningChat
-from ..chat_module.heartFC_chat.heartFC_processor import HeartFC_Processor
+from ..chat_module.heartFC_chat.heartFC_processor import HeartFCProcessor
from ..utils.prompt_builder import Prompt, global_prompt_manager
import traceback
@@ -29,7 +29,7 @@ class ChatBot:
self.mood_manager = MoodManager.get_instance() # 获取情绪管理器单例
self.mood_manager.start_mood_update() # 启动情绪更新
self.reasoning_chat = ReasoningChat()
- self.heartFC_processor = HeartFC_Processor() # 新增
+ self.heartFC_processor = HeartFCProcessor() # 新增
# 创建初始化PFC管理器的任务,会在_ensure_started时执行
self.only_process_chat = MessageProcessor()
diff --git a/src/plugins/chat_module/heartFC_chat/heartFC_controler.py b/src/plugins/chat_module/heartFC_chat/heartFC_controler.py
index 389e030a..a217f978 100644
--- a/src/plugins/chat_module/heartFC_chat/heartFC_controler.py
+++ b/src/plugins/chat_module/heartFC_chat/heartFC_controler.py
@@ -20,18 +20,18 @@ chat_config = LogConfig(
file_format=CHAT_STYLE_CONFIG["file_format"],
)
-logger = get_module_logger("HeartFC_Controller", config=chat_config)
+logger = get_module_logger("HeartFCController", config=chat_config)
# 检测群聊兴趣的间隔时间
INTEREST_MONITOR_INTERVAL_SECONDS = 1
-class HeartFC_Controller:
+class HeartFCController:
_instance = None # For potential singleton access if needed by MessageManager
def __init__(self):
# --- Updated Init ---
- if HeartFC_Controller._instance is not None:
+ if HeartFCController._instance is not None:
# Prevent re-initialization if used as a singleton
return
self.gpt = ResponseGenerator()
@@ -44,7 +44,7 @@ class HeartFC_Controller:
self.pf_chatting_instances: Dict[str, PFChatting] = {}
self._pf_chatting_lock = Lock()
# --- End New PFChatting Management ---
- HeartFC_Controller._instance = self # Register instance
+ HeartFCController._instance = self # Register instance
# --- End Updated Init ---
# --- Make dependencies accessible for PFChatting ---
# These are accessed via the passed instance in PFChatting
@@ -58,7 +58,7 @@ class HeartFC_Controller:
def get_instance(cls):
if cls._instance is None:
# This might indicate an issue if called before initialization
- logger.warning("HeartFC_Controller get_instance called before initialization.")
+ logger.warning("HeartFCController get_instance called before initialization.")
# Optionally, initialize here if a strict singleton pattern is desired
# cls._instance = cls()
return cls._instance
@@ -67,9 +67,9 @@ class HeartFC_Controller:
async def start(self):
"""启动异步任务,如回复启动器"""
- logger.debug("HeartFC_Controller 正在启动异步任务...")
+ logger.debug("HeartFCController 正在启动异步任务...")
self._initialize_monitor_task()
- logger.info("HeartFC_Controller 异步任务启动完成")
+ logger.info("HeartFCController 异步任务启动完成")
def _initialize_monitor_task(self):
"""启动后台兴趣监控任务,可以检查兴趣是否足以开启心流对话"""
@@ -89,7 +89,7 @@ class HeartFC_Controller:
async with self._pf_chatting_lock:
if stream_id not in self.pf_chatting_instances:
logger.info(f"为流 {stream_id} 创建新的PFChatting实例")
- # 传递 self (HeartFC_Controller 实例) 进行依赖注入
+ # 传递 self (HeartFCController 实例) 进行依赖注入
instance = PFChatting(stream_id, self)
# 执行异步初始化
if not await instance._initialize():
diff --git a/src/plugins/chat_module/heartFC_chat/heartFC_processor.py b/src/plugins/chat_module/heartFC_chat/heartFC_processor.py
index 37708a94..44849f82 100644
--- a/src/plugins/chat_module/heartFC_chat/heartFC_processor.py
+++ b/src/plugins/chat_module/heartFC_chat/heartFC_processor.py
@@ -25,7 +25,7 @@ logger = get_module_logger("heartFC_processor", config=processor_config)
# INTEREST_INCREASE_THRESHOLD = 0.5
-class HeartFC_Processor:
+class HeartFCProcessor:
def __init__(self):
self.storage = MessageStorage()
self.interest_manager = InterestManager()
@@ -97,21 +97,21 @@ class HeartFC_Processor:
# 处理缓冲器结果 (Bombing logic)
if not buffer_result:
- F_type = "seglist"
+ f_type = "seglist"
if message.message_segment.type != "seglist":
- F_type = message.message_segment.type
+ f_type = message.message_segment.type
else:
if (
isinstance(message.message_segment.data, list)
and all(isinstance(x, Seg) for x in message.message_segment.data)
and len(message.message_segment.data) == 1
):
- F_type = message.message_segment.data[0].type
- if F_type == "text":
+ f_type = message.message_segment.data[0].type
+ if f_type == "text":
logger.debug(f"触发缓冲,消息:{message.processed_plain_text}")
- elif F_type == "image":
+ elif f_type == "image":
logger.debug("触发缓冲,表情包/图片等待中")
- elif F_type == "seglist":
+ elif f_type == "seglist":
logger.debug("触发缓冲,消息列表等待中")
return # 被缓冲器拦截,不生成回复
diff --git a/src/plugins/chat_module/heartFC_chat/pf_chatting.py b/src/plugins/chat_module/heartFC_chat/pf_chatting.py
index 59472fd1..620a9eea 100644
--- a/src/plugins/chat_module/heartFC_chat/pf_chatting.py
+++ b/src/plugins/chat_module/heartFC_chat/pf_chatting.py
@@ -25,7 +25,7 @@ logger = get_module_logger("PFCLoop", config=interest_log_config) # Logger Name
# Forward declaration for type hinting
if TYPE_CHECKING:
- from .heartFC_controler import HeartFC_Controller
+ from .heartFC_controler import HeartFCController
PLANNER_TOOL_DEFINITION = [
{
@@ -61,7 +61,7 @@ class PFChatting:
只要计时器>0,循环就会继续。
"""
- def __init__(self, chat_id: str, heartfc_controller_instance: "HeartFC_Controller"):
+ def __init__(self, chat_id: str, heartfc_controller_instance: "HeartFCController"):
"""
初始化PFChatting实例。
@@ -771,7 +771,7 @@ class PFChatting:
logger.error(traceback.format_exc())
return None
- # --- Methods moved from HeartFC_Controller start ---
+ # --- Methods moved from HeartFCController start ---
async def _create_thinking_message(self, anchor_message: Optional[MessageRecv]) -> Optional[str]:
"""创建思考消息 (尝试锚定到 anchor_message)"""
if not anchor_message or not anchor_message.chat_stream:
From 13e05adf806806f90edac86f8dbe294f89113991 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?=E6=98=A5=E6=B2=B3=E6=99=B4?=
Date: Mon, 21 Apr 2025 14:42:33 +0900
Subject: [PATCH 09/26] =?UTF-8?q?chore:=20=E6=98=BE=E5=BC=8F=E8=BF=94?=
=?UTF-8?q?=E5=9B=9ENone?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
src/individuality/individuality.py | 1 +
src/plugins/PFC/conversation.py | 8 +++++
src/plugins/chat/message.py | 12 ++++----
src/plugins/chat/utils.py | 5 ++--
.../reasoning_chat/reasoning_chat.py | 29 ++++++++++---------
src/plugins/message/message_base.py | 1 -
src/plugins/person_info/person_info.py | 2 +-
src/plugins/remote/remote.py | 1 +
8 files changed, 35 insertions(+), 24 deletions(-)
diff --git a/src/individuality/individuality.py b/src/individuality/individuality.py
index e7616ec2..2a489338 100644
--- a/src/individuality/individuality.py
+++ b/src/individuality/individuality.py
@@ -105,3 +105,4 @@ class Individuality:
return self.personality.agreeableness
elif factor == "neuroticism":
return self.personality.neuroticism
+ return None
diff --git a/src/plugins/PFC/conversation.py b/src/plugins/PFC/conversation.py
index 598468e8..9502b755 100644
--- a/src/plugins/PFC/conversation.py
+++ b/src/plugins/PFC/conversation.py
@@ -180,6 +180,7 @@ class Conversation:
"time": datetime.datetime.now().strftime("%H:%M:%S"),
}
)
+ return None
elif action == "fetch_knowledge":
self.waiter.wait_accumulated_time = 0
@@ -193,28 +194,35 @@ class Conversation:
if knowledge:
if topic not in self.conversation_info.knowledge_list:
self.conversation_info.knowledge_list.append({"topic": topic, "knowledge": knowledge})
+ return None
else:
self.conversation_info.knowledge_list[topic] += knowledge
+ return None
+ return None
elif action == "rethink_goal":
self.waiter.wait_accumulated_time = 0
self.state = ConversationState.RETHINKING
await self.goal_analyzer.analyze_goal(conversation_info, observation_info)
+ return None
elif action == "listening":
self.state = ConversationState.LISTENING
logger.info("倾听对方发言...")
await self.waiter.wait_listening(conversation_info)
+ return None
elif action == "end_conversation":
self.should_continue = False
logger.info("决定结束对话...")
+ return None
else: # wait
self.state = ConversationState.WAITING
logger.info("等待更多信息...")
await self.waiter.wait(self.conversation_info)
+ return None
async def _send_timeout_message(self):
"""发送超时结束消息"""
diff --git a/src/plugins/chat/message.py b/src/plugins/chat/message.py
index cbea1fd9..87380e7c 100644
--- a/src/plugins/chat/message.py
+++ b/src/plugins/chat/message.py
@@ -1,14 +1,13 @@
import time
from dataclasses import dataclass
-from typing import Dict, List, Optional
+from typing import Dict, List, Optional, Union
import urllib3
-from .utils_image import image_manager
-
-from ..message.message_base import Seg, UserInfo, BaseMessageInfo, MessageBase
-from .chat_stream import ChatStream
from src.common.logger import get_module_logger
+from .chat_stream import ChatStream
+from .utils_image import image_manager
+from ..message.message_base import Seg, UserInfo, BaseMessageInfo, MessageBase
logger = get_module_logger("chat_message")
@@ -207,7 +206,7 @@ class MessageProcessBase(Message):
# 处理单个消息段
return await self._process_single_segment(segment)
- async def _process_single_segment(self, seg: Seg) -> str:
+ async def _process_single_segment(self, seg: Seg) -> Union[str, None]:
"""处理单个消息段
Args:
@@ -233,6 +232,7 @@ class MessageProcessBase(Message):
elif seg.type == "reply":
if self.reply and hasattr(self.reply, "processed_plain_text"):
return f"[回复:{self.reply.processed_plain_text}]"
+ return None
else:
return f"[{seg.type}:{str(seg.data)}]"
except Exception as e:
diff --git a/src/plugins/chat/utils.py b/src/plugins/chat/utils.py
index 9c98a16a..3e4cfa52 100644
--- a/src/plugins/chat/utils.py
+++ b/src/plugins/chat/utils.py
@@ -2,7 +2,7 @@ import random
import time
import re
from collections import Counter
-from typing import Dict, List
+from typing import Dict, List, Optional
import jieba
import numpy as np
@@ -688,7 +688,7 @@ def count_messages_between(start_time: float, end_time: float, stream_id: str) -
return 0, 0
-def translate_timestamp_to_human_readable(timestamp: float, mode: str = "normal") -> str:
+def translate_timestamp_to_human_readable(timestamp: float, mode: str = "normal") -> Optional[str]:
"""将时间戳转换为人类可读的时间格式
Args:
@@ -716,6 +716,7 @@ def translate_timestamp_to_human_readable(timestamp: float, mode: str = "normal"
return f"{int(diff / 86400)}天前:\n"
else:
return time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(timestamp)) + ":\n"
+ return None
def parse_text_timestamps(text: str, mode: str = "normal") -> str:
diff --git a/src/plugins/chat_module/reasoning_chat/reasoning_chat.py b/src/plugins/chat_module/reasoning_chat/reasoning_chat.py
index d149f68b..2eb56c83 100644
--- a/src/plugins/chat_module/reasoning_chat/reasoning_chat.py
+++ b/src/plugins/chat_module/reasoning_chat/reasoning_chat.py
@@ -1,25 +1,26 @@
import time
-from random import random
import traceback
-from typing import List
-from ...memory_system.Hippocampus import HippocampusManager
-from ...moods.moods import MoodManager
-from ....config.config import global_config
-from ...chat.emoji_manager import emoji_manager
+from random import random
+from typing import List, Optional
+
+from src.common.logger import get_module_logger, CHAT_STYLE_CONFIG, LogConfig
+from src.plugins.respon_info_catcher.info_catcher import info_catcher_manager
from .reasoning_generator import ResponseGenerator
+from ...chat.chat_stream import chat_manager
+from ...chat.emoji_manager import emoji_manager
from ...chat.message import MessageSending, MessageRecv, MessageThinking, MessageSet
+from ...chat.message_buffer import message_buffer
from ...chat.messagesender import message_manager
-from ...storage.storage import MessageStorage
from ...chat.utils import is_mentioned_bot_in_message
from ...chat.utils_image import image_path_to_base64
-from ...willing.willing_manager import willing_manager
+from ...memory_system.Hippocampus import HippocampusManager
from ...message import UserInfo, Seg
-from src.common.logger import get_module_logger, CHAT_STYLE_CONFIG, LogConfig
-from ...chat.chat_stream import chat_manager
+from ...moods.moods import MoodManager
from ...person_info.relationship_manager import relationship_manager
-from ...chat.message_buffer import message_buffer
-from src.plugins.respon_info_catcher.info_catcher import info_catcher_manager
+from ...storage.storage import MessageStorage
from ...utils.timer_calculater import Timer
+from ...willing.willing_manager import willing_manager
+from ....config.config import global_config
# 定义日志配置
chat_config = LogConfig(
@@ -61,7 +62,7 @@ class ReasoningChat:
return thinking_id
@staticmethod
- async def _send_response_messages(message, chat, response_set: List[str], thinking_id) -> MessageSending:
+ async def _send_response_messages(message, chat, response_set: List[str], thinking_id) -> Optional[MessageSending]:
"""发送回复消息"""
container = message_manager.get_container(chat.stream_id)
thinking_message = None
@@ -74,7 +75,7 @@ class ReasoningChat:
if not thinking_message:
logger.warning("未找到对应的思考消息,可能已超时被移除")
- return
+ return None
thinking_start_time = thinking_message.thinking_start_time
message_set = MessageSet(chat, thinking_id)
diff --git a/src/plugins/message/message_base.py b/src/plugins/message/message_base.py
index 2f177670..b853d469 100644
--- a/src/plugins/message/message_base.py
+++ b/src/plugins/message/message_base.py
@@ -12,7 +12,6 @@ class Seg:
- 对于 text 类型,data 是字符串
- 对于 image 类型,data 是 base64 字符串
- 对于 seglist 类型,data 是 Seg 列表
- translated_data: 经过翻译处理的数据(可选)
"""
type: str
diff --git a/src/plugins/person_info/person_info.py b/src/plugins/person_info/person_info.py
index 8105b330..b4404988 100644
--- a/src/plugins/person_info/person_info.py
+++ b/src/plugins/person_info/person_info.py
@@ -169,7 +169,7 @@ class PersonInfoManager:
"""给某个用户取名"""
if not person_id:
logger.debug("取名失败:person_id不能为空")
- return
+ return None
old_name = await self.get_value(person_id, "person_name")
old_reason = await self.get_value(person_id, "name_reason")
diff --git a/src/plugins/remote/remote.py b/src/plugins/remote/remote.py
index 0d119a3e..5bc4dab1 100644
--- a/src/plugins/remote/remote.py
+++ b/src/plugins/remote/remote.py
@@ -134,3 +134,4 @@ def main():
heartbeat_thread.start()
return heartbeat_thread # 返回线程对象,便于外部控制
+ return None
From 1e481a7af1d3ca85a532efd8517d4b4c486a694d Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?=E6=98=A5=E6=B2=B3=E6=99=B4?=
Date: Mon, 21 Apr 2025 14:49:14 +0900
Subject: [PATCH 10/26] fix: unreachable?
---
.../chat_module/heartFC_chat/pf_chatting.py | 31 ++++++++++---------
1 file changed, 16 insertions(+), 15 deletions(-)
diff --git a/src/plugins/chat_module/heartFC_chat/pf_chatting.py b/src/plugins/chat_module/heartFC_chat/pf_chatting.py
index 620a9eea..2bb89987 100644
--- a/src/plugins/chat_module/heartFC_chat/pf_chatting.py
+++ b/src/plugins/chat_module/heartFC_chat/pf_chatting.py
@@ -374,6 +374,22 @@ class PFChatting:
)
action_taken_this_cycle = False
+ # --- Print Timer Results --- #
+ if cycle_timers: # 先检查cycle_timers是否非空
+ timer_strings = []
+ for name, elapsed in cycle_timers.items():
+ # 直接格式化存储在字典中的浮点数 elapsed
+ formatted_time = f"{elapsed * 1000:.2f}毫秒" if elapsed < 1 else f"{elapsed:.2f}秒"
+ timer_strings.append(f"{name}: {formatted_time}")
+
+ if timer_strings: # 如果有有效计时器数据才打印
+ logger.debug(
+ f"{log_prefix} test testtesttesttesttesttesttesttesttesttest Cycle Timers: {'; '.join(timer_strings)}"
+ )
+
+ # --- Timer Decrement --- #
+ cycle_duration = time.monotonic() - loop_cycle_start_time
+
except Exception as e_cycle:
logger.error(f"{log_prefix} 循环周期执行时发生错误: {e_cycle}")
logger.error(traceback.format_exc())
@@ -387,21 +403,6 @@ class PFChatting:
self._processing_lock.release()
logger.trace(f"{log_prefix} 循环释放了处理锁.")
- # --- Print Timer Results --- #
- if cycle_timers: # 先检查cycle_timers是否非空
- timer_strings = []
- for name, elapsed in cycle_timers.items():
- # 直接格式化存储在字典中的浮点数 elapsed
- formatted_time = f"{elapsed * 1000:.2f}毫秒" if elapsed < 1 else f"{elapsed:.2f}秒"
- timer_strings.append(f"{name}: {formatted_time}")
-
- if timer_strings: # 如果有有效计时器数据才打印
- logger.debug(
- f"{log_prefix} test testtesttesttesttesttesttesttesttesttest Cycle Timers: {'; '.join(timer_strings)}"
- )
-
- # --- Timer Decrement --- #
- cycle_duration = time.monotonic() - loop_cycle_start_time
async with self._timer_lock:
self._loop_timer -= cycle_duration
# Log timer decrement less aggressively
From 6cb317123d8e403e6f6a81b3b080042c492d2645 Mon Sep 17 00:00:00 2001
From: UnCLAS-Prommer
Date: Mon, 21 Apr 2025 14:50:25 +0800
Subject: [PATCH 11/26] =?UTF-8?q?ai=E8=AF=B4=E7=9A=84=E5=A5=BD?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
src/api/__init__.py | 7 ++
src/api/config_api.py | 153 ++++++++++++++++++++++++++++++
src/plugins/models/utils_model.py | 18 +++-
3 files changed, 173 insertions(+), 5 deletions(-)
create mode 100644 src/api/__init__.py
create mode 100644 src/api/config_api.py
diff --git a/src/api/__init__.py b/src/api/__init__.py
new file mode 100644
index 00000000..11c09148
--- /dev/null
+++ b/src/api/__init__.py
@@ -0,0 +1,7 @@
+from fastapi import FastAPI
+from strawberry.fastapi import GraphQLRouter
+app = FastAPI()
+
+graphql_router = GraphQLRouter(schema=None, path="/") # Replace `None` with your actual schema
+
+app.include_router(graphql_router, prefix="/graphql", tags=["GraphQL"])
\ No newline at end of file
diff --git a/src/api/config_api.py b/src/api/config_api.py
new file mode 100644
index 00000000..3f323ff8
--- /dev/null
+++ b/src/api/config_api.py
@@ -0,0 +1,153 @@
+from typing import Dict, List, Optional
+import strawberry
+from packaging.version import Version, InvalidVersion
+from packaging.specifiers import SpecifierSet, InvalidSpecifier
+from ..config.config import global_config
+import os
+
+
+@strawberry.type
+class BotConfig:
+ """机器人配置类"""
+
+ INNER_VERSION: Version
+ MAI_VERSION: str # 硬编码的版本信息
+
+ # bot
+ BOT_QQ: Optional[int]
+ BOT_NICKNAME: Optional[str]
+ BOT_ALIAS_NAMES: List[str] # 别名,可以通过这个叫它
+
+ # group
+ talk_allowed_groups: set
+ talk_frequency_down_groups: set
+ ban_user_id: set
+
+ # personality
+ personality_core: str # 建议20字以内,谁再写3000字小作文敲谁脑袋
+ personality_sides: List[str]
+ # identity
+ identity_detail: List[str]
+ height: int # 身高 单位厘米
+ weight: int # 体重 单位千克
+ age: int # 年龄 单位岁
+ gender: str # 性别
+ appearance: str # 外貌特征
+
+ # schedule
+ ENABLE_SCHEDULE_GEN: bool # 是否启用日程生成
+ PROMPT_SCHEDULE_GEN: str
+ SCHEDULE_DOING_UPDATE_INTERVAL: int # 日程表更新间隔 单位秒
+ SCHEDULE_TEMPERATURE: float # 日程表温度,建议0.5-1.0
+ TIME_ZONE: str # 时区
+
+ # message
+ MAX_CONTEXT_SIZE: int # 上下文最大消息数
+ emoji_chance: float # 发送表情包的基础概率
+ thinking_timeout: int # 思考时间
+ max_response_length: int # 最大回复长度
+ message_buffer: bool # 消息缓冲器
+
+ ban_words: set
+ ban_msgs_regex: set
+ # heartflow
+ # enable_heartflow: bool = False # 是否启用心流
+ sub_heart_flow_update_interval: int # 子心流更新频率,间隔 单位秒
+ sub_heart_flow_freeze_time: int # 子心流冻结时间,超过这个时间没有回复,子心流会冻结,间隔 单位秒
+ sub_heart_flow_stop_time: int # 子心流停止时间,超过这个时间没有回复,子心流会停止,间隔 单位秒
+ heart_flow_update_interval: int # 心流更新频率,间隔 单位秒
+ observation_context_size: int # 心流观察到的最长上下文大小,超过这个值的上下文会被压缩
+ compressed_length: int # 不能大于observation_context_size,心流上下文压缩的最短压缩长度,超过心流观察到的上下文长度,会压缩,最短压缩长度为5
+ compress_length_limit: int # 最多压缩份数,超过该数值的压缩上下文会被删除
+
+ # willing
+ willing_mode: str # 意愿模式
+ response_willing_amplifier: float # 回复意愿放大系数
+ response_interested_rate_amplifier: float # 回复兴趣度放大系数
+ down_frequency_rate: float # 降低回复频率的群组回复意愿降低系数
+ emoji_response_penalty: float # 表情包回复惩罚
+ mentioned_bot_inevitable_reply: bool # 提及 bot 必然回复
+ at_bot_inevitable_reply: bool # @bot 必然回复
+
+ # response
+ response_mode: str # 回复策略
+ MODEL_R1_PROBABILITY: float # R1模型概率
+ MODEL_V3_PROBABILITY: float # V3模型概率
+ # MODEL_R1_DISTILL_PROBABILITY: float # R1蒸馏模型概率
+
+ # emoji
+ max_emoji_num: int # 表情包最大数量
+ max_reach_deletion: bool # 开启则在达到最大数量时删除表情包,关闭则不会继续收集表情包
+ EMOJI_CHECK_INTERVAL: int # 表情包检查间隔(分钟)
+ EMOJI_REGISTER_INTERVAL: int # 表情包注册间隔(分钟)
+ EMOJI_SAVE: bool # 偷表情包
+ EMOJI_CHECK: bool # 是否开启过滤
+ EMOJI_CHECK_PROMPT: str # 表情包过滤要求
+
+ # memory
+ build_memory_interval: int # 记忆构建间隔(秒)
+ memory_build_distribution: list # 记忆构建分布,参数:分布1均值,标准差,权重,分布2均值,标准差,权重
+ build_memory_sample_num: int # 记忆构建采样数量
+ build_memory_sample_length: int # 记忆构建采样长度
+ memory_compress_rate: float # 记忆压缩率
+
+ forget_memory_interval: int # 记忆遗忘间隔(秒)
+ memory_forget_time: int # 记忆遗忘时间(小时)
+ memory_forget_percentage: float # 记忆遗忘比例
+
+ memory_ban_words: list # 添加新的配置项默认值
+
+ # mood
+ mood_update_interval: float # 情绪更新间隔 单位秒
+ mood_decay_rate: float # 情绪衰减率
+ mood_intensity_factor: float # 情绪强度因子
+
+ # keywords
+ keywords_reaction_rules: list # 关键词回复规则
+
+ # chinese_typo
+ chinese_typo_enable: bool # 是否启用中文错别字生成器
+ chinese_typo_error_rate: float # 单字替换概率
+ chinese_typo_min_freq: int # 最小字频阈值
+ chinese_typo_tone_error_rate: float # 声调错误概率
+ chinese_typo_word_replace_rate: float # 整词替换概率
+
+ # response_splitter
+ enable_response_splitter: bool # 是否启用回复分割器
+ response_max_length: int # 回复允许的最大长度
+ response_max_sentence_num: int # 回复允许的最大句子数
+
+ # remote
+ remote_enable: bool # 是否启用远程控制
+
+ # experimental
+ enable_friend_chat: bool # 是否启用好友聊天
+ # enable_think_flow: bool # 是否启用思考流程
+ enable_pfc_chatting: bool # 是否启用PFC聊天
+
+ # 模型配置
+ llm_reasoning: Dict[str, str] # LLM推理
+ # llm_reasoning_minor: Dict[str, str]
+ llm_normal: Dict[str, str] # LLM普通
+ llm_topic_judge: Dict[str, str] # LLM话题判断
+ llm_summary_by_topic: Dict[str, str] # LLM话题总结
+ llm_emotion_judge: Dict[str, str] # LLM情感判断
+ embedding: Dict[str, str] # 嵌入
+ vlm: Dict[str, str] # VLM
+ moderation: Dict[str, str] # 审核
+
+ # 实验性
+ llm_observation: Dict[str, str] # LLM观察
+ llm_sub_heartflow: Dict[str, str] # LLM子心流
+ llm_heartflow: Dict[str, str] # LLM心流
+
+ api_urls: Dict[str, str] # API URLs
+
+
+@strawberry.type
+class EnvConfig:
+ pass
+
+ @strawberry.field
+ def get_env(self) -> str:
+ return "env"
diff --git a/src/plugins/models/utils_model.py b/src/plugins/models/utils_model.py
index 1faebc3e..5fd11692 100644
--- a/src/plugins/models/utils_model.py
+++ b/src/plugins/models/utils_model.py
@@ -68,13 +68,20 @@ error_code_mapping = {
class LLMRequest:
# 定义需要转换的模型列表,作为类变量避免重复
MODELS_NEEDING_TRANSFORMATION = [
- "o3-mini",
- "o1-mini",
- "o1-preview",
+ "o1",
"o1-2024-12-17",
- "o1-preview-2024-09-12",
- "o3-mini-2025-01-31",
+ "o1-mini",
"o1-mini-2024-09-12",
+ "o1-preview",
+ "o1-preview-2024-09-12",
+ "o1-pro",
+ "o1-pro-2025-03-19",
+ "o3",
+ "o3-2025-04-16",
+ "o3-mini",
+ "o3-mini-2025-01-31"
+ "o4-mini",
+ "o4-mini-2025-04-16",
]
def __init__(self, model: dict, **kwargs):
@@ -800,6 +807,7 @@ class LLMRequest:
policy = request_content["policy"]
payload = request_content["payload"]
keep_request = False
+ wait_time = 0.1
if retry_count < policy["max_retries"] - 1:
wait_time = policy["base_wait"] * (2**retry_count)
keep_request = True
From cbfa8508c6a32fcfd4bec1a19284d6a108456090 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?=E6=98=A5=E6=B2=B3=E6=99=B4?=
Date: Mon, 21 Apr 2025 15:56:17 +0900
Subject: [PATCH 12/26] chore: update README and template.env for better
formatting and clarity
---
README.md | 7 +++----
template/template.env | 20 +++++++++++++++-----
2 files changed, 18 insertions(+), 9 deletions(-)
diff --git a/README.md b/README.md
index 656f536a..26cd30f6 100644
--- a/README.md
+++ b/README.md
@@ -14,7 +14,7 @@
-
+
@@ -34,7 +34,6 @@
·
提出新特性
-
## 新版0.6.x部署前先阅读:https://docs.mai-mai.org/manual/usage/mmc_q_a
@@ -53,7 +52,7 @@
-
+
👆 点击观看麦麦演示视频 👆
@@ -186,7 +185,7 @@ MaiCore是一个开源项目,我们非常欢迎你的参与。你的贡献,
感谢各位大佬!
-
+
**也感谢每一位给麦麦发展提出宝贵意见与建议的用户,感谢陪伴麦麦走到现在的你们**
diff --git a/template/template.env b/template/template.env
index 06e9b07e..c1a6dd0d 100644
--- a/template/template.env
+++ b/template/template.env
@@ -29,8 +29,18 @@ CHAT_ANY_WHERE_KEY=
SILICONFLOW_KEY=
# 定义日志相关配置
-SIMPLE_OUTPUT=true # 精简控制台输出格式
-CONSOLE_LOG_LEVEL=INFO # 自定义日志的默认控制台输出日志级别
-FILE_LOG_LEVEL=DEBUG # 自定义日志的默认文件输出日志级别
-DEFAULT_CONSOLE_LOG_LEVEL=SUCCESS # 原生日志的控制台输出日志级别(nonebot就是这一类)
-DEFAULT_FILE_LOG_LEVEL=DEBUG # 原生日志的默认文件输出日志级别(nonebot就是这一类)
\ No newline at end of file
+
+# 精简控制台输出格式
+SIMPLE_OUTPUT=true
+
+# 自定义日志的默认控制台输出日志级别
+CONSOLE_LOG_LEVEL=INFO
+
+# 自定义日志的默认文件输出日志级别
+FILE_LOG_LEVEL=DEBUG
+
+# 原生日志的控制台输出日志级别(nonebot就是这一类)
+DEFAULT_CONSOLE_LOG_LEVEL=SUCCESS
+
+# 原生日志的默认文件输出日志级别(nonebot就是这一类)
+DEFAULT_FILE_LOG_LEVEL=DEBUG
From 7d2f5b51a7f6af71c44ff2333f759e671ce1479b Mon Sep 17 00:00:00 2001
From: UnCLAS-Prommer
Date: Mon, 21 Apr 2025 15:25:29 +0800
Subject: [PATCH 13/26] =?UTF-8?q?=E5=90=88=E5=B9=B6openai=E5=85=BC?=
=?UTF-8?q?=E5=AE=B9=EF=BC=8C=E8=BF=87ruff?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
src/api/config_api.py | 10 +++---
src/plugins/models/utils_model.py | 53 ++++++++++++++-----------------
2 files changed, 28 insertions(+), 35 deletions(-)
diff --git a/src/api/config_api.py b/src/api/config_api.py
index 3f323ff8..650cbf63 100644
--- a/src/api/config_api.py
+++ b/src/api/config_api.py
@@ -1,10 +1,10 @@
from typing import Dict, List, Optional
import strawberry
-from packaging.version import Version, InvalidVersion
-from packaging.specifiers import SpecifierSet, InvalidSpecifier
-from ..config.config import global_config
-import os
-
+# from packaging.version import Version, InvalidVersion
+# from packaging.specifiers import SpecifierSet, InvalidSpecifier
+# from ..config.config import global_config
+# import os
+from packaging.version import Version
@strawberry.type
class BotConfig:
diff --git a/src/plugins/models/utils_model.py b/src/plugins/models/utils_model.py
index 5fd11692..365b15a6 100644
--- a/src/plugins/models/utils_model.py
+++ b/src/plugins/models/utils_model.py
@@ -79,8 +79,7 @@ class LLMRequest:
"o3",
"o3-2025-04-16",
"o3-mini",
- "o3-mini-2025-01-31"
- "o4-mini",
+ "o3-mini-2025-01-31o4-mini",
"o4-mini-2025-04-16",
]
@@ -806,10 +805,8 @@ class LLMRequest:
) -> Union[Tuple[Dict[str, Any], int], Tuple[None, int]]:
policy = request_content["policy"]
payload = request_content["payload"]
- keep_request = False
- wait_time = 0.1
+ wait_time = policy["base_wait"] * (2**retry_count)
if retry_count < policy["max_retries"] - 1:
- wait_time = policy["base_wait"] * (2**retry_count)
keep_request = True
if isinstance(exception, RequestAbortException):
response = exception.response
@@ -989,30 +986,27 @@ class LLMRequest:
# 复制一份参数,避免直接修改 self.params
params_copy = await self._transform_parameters(self.params)
if image_base64:
- payload = {
- "model": self.model_name,
- "messages": [
- {
- "role": "user",
- "content": [
- {"type": "text", "text": prompt},
- {
- "type": "image_url",
- "image_url": {"url": f"data:image/{image_format.lower()};base64,{image_base64}"},
- },
- ],
- }
- ],
- "max_tokens": global_config.max_response_length,
- **params_copy,
- }
+ messages = [
+ {
+ "role": "user",
+ "content": [
+ {"type": "text", "text": prompt},
+ {
+ "type": "image_url",
+ "image_url": {"url": f"data:image/{image_format.lower()};base64,{image_base64}"},
+ },
+ ],
+ }
+ ]
else:
- payload = {
- "model": self.model_name,
- "messages": [{"role": "user", "content": prompt}],
- "max_tokens": global_config.max_response_length,
- **params_copy,
- }
+ messages = [{"role": "user", "content": prompt}]
+ payload = {
+ "model": self.model_name,
+ "messages": messages,
+ **params_copy,
+ }
+ if "max_tokens" not in payload and "max_completion_tokens" not in payload:
+ payload["max_tokens"] = global_config.max_response_length
# 如果 payload 中依然存在 max_tokens 且需要转换,在这里进行再次检查
if self.model_name.lower() in self.MODELS_NEEDING_TRANSFORMATION and "max_tokens" in payload:
payload["max_completion_tokens"] = payload.pop("max_tokens")
@@ -1105,11 +1099,10 @@ class LLMRequest:
async def generate_response_async(self, prompt: str, **kwargs) -> Union[str, Tuple]:
"""异步方式根据输入的提示生成模型的响应"""
- # 构建请求体
+ # 构建请求体,不硬编码max_tokens
data = {
"model": self.model_name,
"messages": [{"role": "user", "content": prompt}],
- "max_tokens": global_config.max_response_length,
**self.params,
**kwargs,
}
From 6e0a3cf8cf183429c3cdb9aca45c7213604d5bef Mon Sep 17 00:00:00 2001
From: "github-actions[bot]"
Date: Mon, 21 Apr 2025 07:26:08 +0000
Subject: [PATCH 14/26] =?UTF-8?q?=F0=9F=A4=96=20=E8=87=AA=E5=8A=A8?=
=?UTF-8?q?=E6=A0=BC=E5=BC=8F=E5=8C=96=E4=BB=A3=E7=A0=81=20[skip=20ci]?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
src/api/__init__.py | 3 ++-
src/api/config_api.py | 2 ++
2 files changed, 4 insertions(+), 1 deletion(-)
diff --git a/src/api/__init__.py b/src/api/__init__.py
index 11c09148..f5bc08a6 100644
--- a/src/api/__init__.py
+++ b/src/api/__init__.py
@@ -1,7 +1,8 @@
from fastapi import FastAPI
from strawberry.fastapi import GraphQLRouter
+
app = FastAPI()
graphql_router = GraphQLRouter(schema=None, path="/") # Replace `None` with your actual schema
-app.include_router(graphql_router, prefix="/graphql", tags=["GraphQL"])
\ No newline at end of file
+app.include_router(graphql_router, prefix="/graphql", tags=["GraphQL"])
diff --git a/src/api/config_api.py b/src/api/config_api.py
index 650cbf63..e3934617 100644
--- a/src/api/config_api.py
+++ b/src/api/config_api.py
@@ -1,11 +1,13 @@
from typing import Dict, List, Optional
import strawberry
+
# from packaging.version import Version, InvalidVersion
# from packaging.specifiers import SpecifierSet, InvalidSpecifier
# from ..config.config import global_config
# import os
from packaging.version import Version
+
@strawberry.type
class BotConfig:
"""机器人配置类"""
From c10b7eea61b9f352475363f4c92883cd6f4e055b Mon Sep 17 00:00:00 2001
From: SengokuCola <1026294844@qq.com>
Date: Mon, 21 Apr 2025 18:37:49 +0800
Subject: [PATCH 15/26] =?UTF-8?q?feat:=20=E6=95=B4=E5=90=88reasoning?=
=?UTF-8?q?=E6=A8=A1=E5=BC=8F=E5=92=8Chfc=E6=A8=A1=E5=BC=8F=EF=BC=8C?=
=?UTF-8?q?=E7=BB=9F=E4=B8=80=E8=B0=83=E6=8E=A7=EF=BC=88=E4=BD=86=E4=B8=8D?=
=?UTF-8?q?=E6=98=AF=E5=BE=88=E7=BB=9F=E4=B8=80=EF=BC=89?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
src/config/config.py | 28 +-
src/heart_flow/heartflow.py | 10 +-
src/heart_flow/sub_heartflow.py | 11 +-
src/main.py | 8 +-
src/plugins/chat/bot.py | 2 -
.../heartFC_chat/heartFC_controler.py | 82 ++--
.../heartFC_chat/heartFC_processor.py | 16 +-
.../chat_module/heartFC_chat/interest.py | 12 +
.../heartFC_chat/reasoning_chat.py | 412 ++++++++++++++++
.../heartFC_chat/reasoning_generator.py | 199 ++++++++
.../heartFC_chat/reasoning_prompt_builder.py | 445 ++++++++++++++++++
.../reasoning_chat/reasoning_chat.py | 43 +-
.../reasoning_chat/reasoning_generator.py | 2 +-
src/plugins/memory_system/Hippocampus.py | 6 +-
14 files changed, 1188 insertions(+), 88 deletions(-)
create mode 100644 src/plugins/chat_module/heartFC_chat/reasoning_chat.py
create mode 100644 src/plugins/chat_module/heartFC_chat/reasoning_generator.py
create mode 100644 src/plugins/chat_module/heartFC_chat/reasoning_prompt_builder.py
diff --git a/src/config/config.py b/src/config/config.py
index d2fe6f0f..83e47837 100644
--- a/src/config/config.py
+++ b/src/config/config.py
@@ -213,8 +213,8 @@ class BotConfig:
# response
response_mode: str = "heart_flow" # 回复策略
- model_reasoning_probability: float = 0.7 # 麦麦回答时选择推理模型(主要)模型概率
- model_normal_probability: float = 0.3 # 麦麦回答时选择一般模型(次要)模型概率
+ model_reasoning_probability: float = 0.7 # 麦麦回答时选择推理模型(主要)模型概率
+ model_normal_probability: float = 0.3 # 麦麦回答时选择一般模型(次要)模型概率
# MODEL_R1_DISTILL_PROBABILITY: float = 0.1 # R1蒸馏模型概率
# emoji
@@ -407,10 +407,13 @@ class BotConfig:
def response(parent: dict):
response_config = parent["response"]
- config.model_reasoning_probability = response_config.get("model_reasoning_probability", config.model_reasoning_probability)
- config.model_normal_probability = response_config.get("model_normal_probability", config.model_normal_probability)
-
-
+ config.model_reasoning_probability = response_config.get(
+ "model_reasoning_probability", config.model_reasoning_probability
+ )
+ config.model_normal_probability = response_config.get(
+ "model_normal_probability", config.model_normal_probability
+ )
+
# 添加 enable_heart_flowC 的加载逻辑 (假设它在 [response] 部分)
if config.INNER_VERSION in SpecifierSet(">=1.4.0"):
config.enable_heart_flowC = response_config.get("enable_heart_flowC", config.enable_heart_flowC)
@@ -418,7 +421,6 @@ class BotConfig:
def heartflow(parent: dict):
heartflow_config = parent["heartflow"]
# 加载新增的 heartflowC 参数
-
# 加载原有的 heartflow 参数
# config.sub_heart_flow_update_interval = heartflow_config.get(
@@ -442,9 +444,15 @@ class BotConfig:
"compress_length_limit", config.compress_length_limit
)
if config.INNER_VERSION in SpecifierSet(">=1.4.0"):
- config.reply_trigger_threshold = heartflow_config.get("reply_trigger_threshold", config.reply_trigger_threshold)
- config.probability_decay_factor_per_second = heartflow_config.get("probability_decay_factor_per_second", config.probability_decay_factor_per_second)
- config.default_decay_rate_per_second = heartflow_config.get("default_decay_rate_per_second", config.default_decay_rate_per_second)
+ config.reply_trigger_threshold = heartflow_config.get(
+ "reply_trigger_threshold", config.reply_trigger_threshold
+ )
+ config.probability_decay_factor_per_second = heartflow_config.get(
+ "probability_decay_factor_per_second", config.probability_decay_factor_per_second
+ )
+ config.default_decay_rate_per_second = heartflow_config.get(
+ "default_decay_rate_per_second", config.default_decay_rate_per_second
+ )
config.initial_duration = heartflow_config.get("initial_duration", config.initial_duration)
def willing(parent: dict):
diff --git a/src/heart_flow/heartflow.py b/src/heart_flow/heartflow.py
index c2f922ff..50f0a735 100644
--- a/src/heart_flow/heartflow.py
+++ b/src/heart_flow/heartflow.py
@@ -45,6 +45,8 @@ class CurrentState:
def __init__(self):
self.current_state_info = ""
+ self.chat_status = "IDLE"
+
self.mood_manager = MoodManager()
self.mood = self.mood_manager.get_prompt()
@@ -70,7 +72,7 @@ class Heartflow:
"""定期清理不活跃的子心流"""
while True:
current_time = time.time()
- inactive_subheartflows_ids = [] # 修改变量名以清晰表示存储的是ID
+ inactive_subheartflows_ids = [] # 修改变量名以清晰表示存储的是ID
# 检查所有子心流
# 使用 list(self._subheartflows.items()) 避免在迭代时修改字典
@@ -104,7 +106,7 @@ class Heartflow:
# await self.do_a_thinking()
# await asyncio.sleep(global_config.heart_flow_update_interval * 3) # 5分钟思考一次
-
+
await asyncio.sleep(300)
async def heartflow_start_working(self):
@@ -253,7 +255,7 @@ class Heartflow:
# 创建并初始化观察对象
logger.debug(f"为 {subheartflow_id} 创建 observation")
observation = ChattingObservation(subheartflow_id)
- await observation.initialize() # 等待初始化完成
+ await observation.initialize() # 等待初始化完成
subheartflow.add_observation(observation)
logger.debug(f"为 {subheartflow_id} 添加 observation 成功")
@@ -269,7 +271,7 @@ class Heartflow:
except Exception as e:
# 记录详细错误信息
logger.error(f"创建 subheartflow {subheartflow_id} 失败: {e}")
- logger.error(traceback.format_exc()) # 记录完整的 traceback
+ logger.error(traceback.format_exc()) # 记录完整的 traceback
# 考虑是否需要更具体的错误处理或资源清理逻辑
return None
diff --git a/src/heart_flow/sub_heartflow.py b/src/heart_flow/sub_heartflow.py
index c1a58dcd..9087b576 100644
--- a/src/heart_flow/sub_heartflow.py
+++ b/src/heart_flow/sub_heartflow.py
@@ -5,7 +5,6 @@ from src.plugins.models.utils_model import LLMRequest
from src.config.config import global_config
import time
from typing import Optional, List
-from datetime import datetime
import traceback
from src.plugins.chat.utils import parse_text_timestamps
@@ -76,14 +75,14 @@ class SubHeartflow:
)
self.main_heartflow_info = ""
-
+
self.last_active_time = time.time() # 添加最后激活时间
- self.should_stop = False # 添加停止标志
- self.task: Optional[asyncio.Task] = None # 添加 task 属性
+ self.should_stop = False # 添加停止标志
+ self.task: Optional[asyncio.Task] = None # 添加 task 属性
self.is_active = False
- self.observations: List[ChattingObservation] = [] # 使用 List 类型提示
+ self.observations: List[ChattingObservation] = [] # 使用 List 类型提示
self.running_knowledges = []
@@ -98,7 +97,7 @@ class SubHeartflow:
# 检查是否被主心流标记为停止
if self.should_stop:
logger.info(f"子心流 {self.subheartflow_id} 被标记为停止,正在退出后台任务...")
- break # 退出循环以停止任务
+ break # 退出循环以停止任务
await asyncio.sleep(global_config.sub_heart_flow_update_interval) # 定期检查销毁条件
diff --git a/src/main.py b/src/main.py
index aad08b90..f113a732 100644
--- a/src/main.py
+++ b/src/main.py
@@ -19,6 +19,7 @@ from .individuality.individuality import Individuality
from .common.server import global_server
from .plugins.chat_module.heartFC_chat.interest import InterestManager
from .plugins.chat_module.heartFC_chat.heartFC_controler import HeartFC_Controller
+from .plugins.chat_module.heartFC_chat.reasoning_chat import ReasoningChat
logger = get_module_logger("main")
@@ -117,8 +118,11 @@ class MainSystem:
await interest_manager.start_background_tasks()
logger.success("兴趣管理器后台任务启动成功")
- # 初始化并独立启动 HeartFC_Chat
- HeartFC_Controller()
+ # 初始化 ReasoningChat 单例 (确保它在需要之前被创建)
+ ReasoningChat.get_instance()
+ logger.success("ReasoningChat 单例初始化成功")
+
+ # 初始化并独立启动 HeartFC_Chat 控制器 (使用 get_instance 获取单例)
heartfc_chat_instance = HeartFC_Controller.get_instance()
if heartfc_chat_instance:
await heartfc_chat_instance.start()
diff --git a/src/plugins/chat/bot.py b/src/plugins/chat/bot.py
index c3ba78b0..eaf82997 100644
--- a/src/plugins/chat/bot.py
+++ b/src/plugins/chat/bot.py
@@ -123,8 +123,6 @@ class ChatBot:
await self.heartFC_processor.process_message(message_data)
else:
await self.heartFC_processor.process_message(message_data)
-
-
if template_group_name:
async with global_prompt_manager.async_message_scope(template_group_name):
diff --git a/src/plugins/chat_module/heartFC_chat/heartFC_controler.py b/src/plugins/chat_module/heartFC_chat/heartFC_controler.py
index 389e030a..55790eb4 100644
--- a/src/plugins/chat_module/heartFC_chat/heartFC_controler.py
+++ b/src/plugins/chat_module/heartFC_chat/heartFC_controler.py
@@ -13,6 +13,7 @@ from src.do_tool.tool_use import ToolUser
from .interest import InterestManager
from src.plugins.chat.chat_stream import chat_manager
from .pf_chatting import PFChatting
+import threading # 导入 threading
# 定义日志配置
chat_config = LogConfig(
@@ -27,43 +28,58 @@ INTEREST_MONITOR_INTERVAL_SECONDS = 1
class HeartFC_Controller:
- _instance = None # For potential singleton access if needed by MessageManager
+ _instance = None
+ _lock = threading.Lock() # 使用 threading.Lock 替代 asyncio.Lock 以兼容 __new__
+ _initialized = False
- def __init__(self):
- # --- Updated Init ---
- if HeartFC_Controller._instance is not None:
- # Prevent re-initialization if used as a singleton
- return
- self.gpt = ResponseGenerator()
- self.mood_manager = MoodManager.get_instance()
- self.mood_manager.start_mood_update()
- self.tool_user = ToolUser()
- self.interest_manager = InterestManager()
- self._interest_monitor_task: Optional[asyncio.Task] = None
- # --- New PFChatting Management ---
- self.pf_chatting_instances: Dict[str, PFChatting] = {}
- self._pf_chatting_lock = Lock()
- # --- End New PFChatting Management ---
- HeartFC_Controller._instance = self # Register instance
- # --- End Updated Init ---
- # --- Make dependencies accessible for PFChatting ---
- # These are accessed via the passed instance in PFChatting
- self.emoji_manager = emoji_manager
- self.relationship_manager = relationship_manager
- self.MessageManager = MessageManager # Pass the class/singleton access
- # --- End dependencies ---
-
- # --- Added Class Method for Singleton Access ---
- @classmethod
- def get_instance(cls):
+ def __new__(cls, *args, **kwargs):
if cls._instance is None:
- # This might indicate an issue if called before initialization
- logger.warning("HeartFC_Controller get_instance called before initialization.")
- # Optionally, initialize here if a strict singleton pattern is desired
- # cls._instance = cls()
+ with cls._lock:
+ if cls._instance is None:
+ cls._instance = super().__new__(cls)
return cls._instance
- # --- End Added Class Method ---
+ def __init__(self):
+ if self._initialized:
+ return
+ with self.__class__._lock: # 使用类锁确保初始化线程安全
+ if self._initialized:
+ return
+ logger.info("正在初始化 HeartFC_Controller 单例...")
+ self.gpt = ResponseGenerator()
+ self.mood_manager = MoodManager.get_instance()
+ self.mood_manager.start_mood_update()
+ self.tool_user = ToolUser()
+ self.interest_manager = InterestManager()
+ self._interest_monitor_task: Optional[asyncio.Task] = None
+ self.pf_chatting_instances: Dict[str, PFChatting] = {}
+ self._pf_chatting_lock = Lock() # 这个可以是 asyncio.Lock,用于异步上下文
+ self.emoji_manager = emoji_manager
+ self.relationship_manager = relationship_manager
+ self.MessageManager = MessageManager
+ self._initialized = True
+ logger.info("HeartFC_Controller 单例初始化完成。")
+
+ @classmethod
+ def get_instance(cls):
+ """获取 HeartFC_Controller 的单例实例。"""
+ if cls._instance is None:
+ logger.warning("HeartFC_Controller 实例在首次 get_instance 时创建,可能未在 main 中正确初始化。")
+ cls() # 调用构造函数创建
+ return cls._instance
+
+ # --- 新增:检查 PFChatting 状态的方法 --- #
+ def is_pf_chatting_active(self, stream_id: str) -> bool:
+ """检查指定 stream_id 的 PFChatting 循环是否处于活动状态。"""
+ # 注意:这里直接访问字典,不加锁,因为读取通常是安全的,
+ # 并且 PFChatting 实例的 _loop_active 状态由其自身的异步循环管理。
+ # 如果需要更强的保证,可以在访问 pf_instance 前获取 _pf_chatting_lock
+ pf_instance = self.pf_chatting_instances.get(stream_id)
+ if pf_instance and pf_instance._loop_active: # 直接检查 PFChatting 实例的 _loop_active 属性
+ return True
+ return False
+
+ # --- 结束新增 --- #
async def start(self):
"""启动异步任务,如回复启动器"""
diff --git a/src/plugins/chat_module/heartFC_chat/heartFC_processor.py b/src/plugins/chat_module/heartFC_chat/heartFC_processor.py
index 37708a94..38c68779 100644
--- a/src/plugins/chat_module/heartFC_chat/heartFC_processor.py
+++ b/src/plugins/chat_module/heartFC_chat/heartFC_processor.py
@@ -13,6 +13,7 @@ from ...chat.message_buffer import message_buffer
from ...utils.timer_calculater import Timer
from .interest import InterestManager
from src.plugins.person_info.relationship_manager import relationship_manager
+from .reasoning_chat import ReasoningChat
# 定义日志配置
processor_config = LogConfig(
@@ -29,7 +30,7 @@ class HeartFC_Processor:
def __init__(self):
self.storage = MessageStorage()
self.interest_manager = InterestManager()
- # self.chat_instance = chat_instance # 持有 HeartFC_Chat 实例
+ self.reasoning_chat = ReasoningChat.get_instance()
async def process_message(self, message_data: str) -> None:
"""处理接收到的原始消息数据,完成消息解析、缓冲、过滤、存储、兴趣度计算与更新等核心流程。
@@ -72,11 +73,11 @@ class HeartFC_Processor:
user_info=userinfo,
group_info=groupinfo,
)
- if not chat:
- logger.error(
- f"无法为消息创建或获取聊天流: user {userinfo.user_id}, group {groupinfo.group_id if groupinfo else 'None'}"
- )
- return
+
+ # --- 添加兴趣追踪启动 ---
+ # 在获取到 chat 对象后,启动对该聊天流的兴趣监控
+ await self.reasoning_chat.start_monitoring_interest(chat)
+ # --- 结束添加 ---
message.update_chat_stream(chat)
@@ -90,7 +91,6 @@ class HeartFC_Processor:
message.raw_message, chat, userinfo
):
return
- logger.trace(f"过滤词/正则表达式过滤成功: {message.processed_plain_text}")
# 查询缓冲器结果
buffer_result = await message_buffer.query_buffer_result(message)
@@ -152,6 +152,8 @@ class HeartFC_Processor:
f"使用激活率 {interested_rate:.2f} 更新后 (通过缓冲后),当前兴趣度: {current_interest:.2f}"
)
+ self.interest_manager.add_interest_dict(message, interested_rate, is_mentioned)
+
except Exception as e:
logger.error(f"更新兴趣度失败: {e}") # 调整日志消息
logger.error(traceback.format_exc())
diff --git a/src/plugins/chat_module/heartFC_chat/interest.py b/src/plugins/chat_module/heartFC_chat/interest.py
index 5a961e91..4ac5498a 100644
--- a/src/plugins/chat_module/heartFC_chat/interest.py
+++ b/src/plugins/chat_module/heartFC_chat/interest.py
@@ -6,6 +6,7 @@ import json # 引入 json
import os # 引入 os
from typing import Optional # <--- 添加导入
import random # <--- 添加导入 random
+from src.plugins.chat.message import MessageRecv
from src.common.logger import get_module_logger, LogConfig, DEFAULT_CONFIG # 引入 DEFAULT_CONFIG
from src.plugins.chat.chat_stream import chat_manager # *** Import ChatManager ***
@@ -66,6 +67,13 @@ class InterestChatting:
self.is_above_threshold: bool = False # 标记兴趣值是否高于阈值
# --- 结束:概率回复相关属性 ---
+ # 记录激发兴趣对(消息id,激活值)
+ self.interest_dict = {}
+
+ def add_interest_dict(self, message: MessageRecv, interest_value: float, is_mentioned: bool):
+ # Store the MessageRecv object and the interest value as a tuple
+ self.interest_dict[message.message_info.message_id] = (message, interest_value, is_mentioned)
+
def _calculate_decay(self, current_time: float):
"""计算从上次更新到现在的衰减"""
time_delta = current_time - self.last_update_time
@@ -445,6 +453,10 @@ class InterestManager:
stream_name = chat_manager.get_stream_name(stream_id) or stream_id # 获取流名称
logger.warning(f"尝试降低不存在的聊天流 {stream_name} 的兴趣度")
+ def add_interest_dict(self, message: MessageRecv, interest_value: float, is_mentioned: bool):
+ interest_chatting = self._get_or_create_interest_chatting(message.chat_stream.stream_id)
+ interest_chatting.add_interest_dict(message, interest_value, is_mentioned)
+
def cleanup_inactive_chats(self, max_age_seconds=INACTIVE_THRESHOLD_SECONDS):
"""
清理长时间不活跃的聊天流记录
diff --git a/src/plugins/chat_module/heartFC_chat/reasoning_chat.py b/src/plugins/chat_module/heartFC_chat/reasoning_chat.py
new file mode 100644
index 00000000..95d3641d
--- /dev/null
+++ b/src/plugins/chat_module/heartFC_chat/reasoning_chat.py
@@ -0,0 +1,412 @@
+import time
+import threading # 导入 threading
+from random import random
+import traceback
+import asyncio
+from typing import List, Dict
+from ...moods.moods import MoodManager
+from ....config.config import global_config
+from ...chat.emoji_manager import emoji_manager
+from .reasoning_generator import ResponseGenerator
+from ...chat.message import MessageSending, MessageRecv, MessageThinking, MessageSet
+from ...chat.messagesender import message_manager
+from ...storage.storage import MessageStorage
+from ...chat.utils import is_mentioned_bot_in_message
+from ...chat.utils_image import image_path_to_base64
+from ...willing.willing_manager import willing_manager
+from ...message import UserInfo, Seg
+from src.common.logger import get_module_logger, CHAT_STYLE_CONFIG, LogConfig
+from src.plugins.chat.chat_stream import ChatStream
+from src.plugins.person_info.relationship_manager import relationship_manager
+from src.plugins.respon_info_catcher.info_catcher import info_catcher_manager
+from src.plugins.utils.timer_calculater import Timer
+from .interest import InterestManager
+from .heartFC_controler import HeartFC_Controller # 导入 HeartFC_Controller
+
+# 定义日志配置
+chat_config = LogConfig(
+ console_format=CHAT_STYLE_CONFIG["console_format"],
+ file_format=CHAT_STYLE_CONFIG["file_format"],
+)
+
+logger = get_module_logger("reasoning_chat", config=chat_config)
+
+
+class ReasoningChat:
+ _instance = None
+ _lock = threading.Lock()
+ _initialized = False
+
+ def __new__(cls, *args, **kwargs):
+ if cls._instance is None:
+ with cls._lock:
+ # Double-check locking
+ if cls._instance is None:
+ cls._instance = super().__new__(cls)
+ return cls._instance
+
+ def __init__(self):
+ # 防止重复初始化
+ if self._initialized:
+ return
+ with self.__class__._lock: # 使用类锁确保线程安全
+ if self._initialized:
+ return
+ logger.info("正在初始化 ReasoningChat 单例...") # 添加日志
+ self.storage = MessageStorage()
+ self.gpt = ResponseGenerator()
+ self.mood_manager = MoodManager.get_instance()
+ self.mood_manager.start_mood_update()
+ # 用于存储每个 chat stream 的兴趣监控任务
+ self._interest_monitoring_tasks: Dict[str, asyncio.Task] = {}
+ self._initialized = True
+ self.interest_manager = InterestManager()
+ logger.info("ReasoningChat 单例初始化完成。") # 添加日志
+
+ @classmethod
+ def get_instance(cls):
+ """获取 ReasoningChat 的单例实例。"""
+ if cls._instance is None:
+ # 如果实例还未创建(理论上应该在 main 中初始化,但作为备用)
+ logger.warning("ReasoningChat 实例在首次 get_instance 时创建。")
+ cls() # 调用构造函数来创建实例
+ return cls._instance
+
+ @staticmethod
+ async def _create_thinking_message(message, chat, userinfo, messageinfo):
+ """创建思考消息"""
+ bot_user_info = UserInfo(
+ user_id=global_config.BOT_QQ,
+ user_nickname=global_config.BOT_NICKNAME,
+ platform=messageinfo.platform,
+ )
+
+ thinking_time_point = round(time.time(), 2)
+ thinking_id = "mt" + str(thinking_time_point)
+ thinking_message = MessageThinking(
+ message_id=thinking_id,
+ chat_stream=chat,
+ bot_user_info=bot_user_info,
+ reply=message,
+ thinking_start_time=thinking_time_point,
+ )
+
+ message_manager.add_message(thinking_message)
+
+ return thinking_id
+
+ @staticmethod
+ async def _send_response_messages(message, chat, response_set: List[str], thinking_id) -> MessageSending:
+ """发送回复消息"""
+ container = message_manager.get_container(chat.stream_id)
+ thinking_message = None
+
+ for msg in container.messages:
+ if isinstance(msg, MessageThinking) and msg.message_info.message_id == thinking_id:
+ thinking_message = msg
+ container.messages.remove(msg)
+ break
+
+ if not thinking_message:
+ logger.warning("未找到对应的思考消息,可能已超时被移除")
+ return
+
+ thinking_start_time = thinking_message.thinking_start_time
+ message_set = MessageSet(chat, thinking_id)
+
+ mark_head = False
+ first_bot_msg = None
+ for msg in response_set:
+ message_segment = Seg(type="text", data=msg)
+ bot_message = MessageSending(
+ message_id=thinking_id,
+ chat_stream=chat,
+ bot_user_info=UserInfo(
+ user_id=global_config.BOT_QQ,
+ user_nickname=global_config.BOT_NICKNAME,
+ platform=message.message_info.platform,
+ ),
+ sender_info=message.message_info.user_info,
+ message_segment=message_segment,
+ reply=message,
+ is_head=not mark_head,
+ is_emoji=False,
+ thinking_start_time=thinking_start_time,
+ )
+ if not mark_head:
+ mark_head = True
+ first_bot_msg = bot_message
+ message_set.add_message(bot_message)
+ message_manager.add_message(message_set)
+
+ return first_bot_msg
+
+ @staticmethod
+ async def _handle_emoji(message, chat, response):
+ """处理表情包"""
+ if random() < global_config.emoji_chance:
+ emoji_raw = await emoji_manager.get_emoji_for_text(response)
+ if emoji_raw:
+ emoji_path, description = emoji_raw
+ emoji_cq = image_path_to_base64(emoji_path)
+
+ thinking_time_point = round(message.message_info.time, 2)
+
+ message_segment = Seg(type="emoji", data=emoji_cq)
+ bot_message = MessageSending(
+ message_id="mt" + str(thinking_time_point),
+ chat_stream=chat,
+ bot_user_info=UserInfo(
+ user_id=global_config.BOT_QQ,
+ user_nickname=global_config.BOT_NICKNAME,
+ platform=message.message_info.platform,
+ ),
+ sender_info=message.message_info.user_info,
+ message_segment=message_segment,
+ reply=message,
+ is_head=False,
+ is_emoji=True,
+ )
+ message_manager.add_message(bot_message)
+
+ async def _update_relationship(self, message: MessageRecv, response_set):
+ """更新关系情绪"""
+ ori_response = ",".join(response_set)
+ stance, emotion = await self.gpt._get_emotion_tags(ori_response, message.processed_plain_text)
+ await relationship_manager.calculate_update_relationship_value(
+ chat_stream=message.chat_stream, label=emotion, stance=stance
+ )
+ self.mood_manager.update_mood_from_emotion(emotion, global_config.mood_intensity_factor)
+
+ async def _find_interested_message(self, chat: ChatStream) -> None:
+ # 此函数设计为后台任务,轮询指定 chat 的兴趣消息。
+ # 它通常由外部代码在 chat 流活跃时启动。
+ controller = HeartFC_Controller.get_instance() # 获取控制器实例
+ if not controller:
+ logger.error(f"无法获取 HeartFC_Controller 实例,无法检查 PFChatting 状态。stream: {chat.stream_id}")
+ # 在没有控制器的情况下可能需要决定是继续处理还是完全停止?这里暂时假设继续
+ pass # 或者 return?
+
+ while True:
+ await asyncio.sleep(1) # 每秒检查一次
+ interest_chatting = self.interest_manager.get_interest_chatting(chat.stream_id)
+
+ if not interest_chatting:
+ continue
+
+ interest_dict = interest_chatting.interest_dict if interest_chatting.interest_dict else {}
+ items_to_process = list(interest_dict.items())
+
+ if not items_to_process:
+ continue
+
+ for msg_id, (message, interest_value, is_mentioned) in items_to_process:
+ # --- 检查 PFChatting 是否活跃 --- #
+ pf_active = False
+ if controller:
+ pf_active = controller.is_pf_chatting_active(chat.stream_id)
+
+ if pf_active:
+ # 如果 PFChatting 活跃,则跳过处理,直接移除消息
+ removed_item = interest_dict.pop(msg_id, None)
+ if removed_item:
+ logger.debug(f"PFChatting 活跃,已跳过并移除兴趣消息 {msg_id} for stream: {chat.stream_id}")
+ continue # 处理下一条消息
+ # --- 结束检查 --- #
+
+ # 只有当 PFChatting 不活跃时才执行以下处理逻辑
+ try:
+ # logger.debug(f"正在处理消息 {msg_id} for stream: {chat.stream_id}") # 可选调试信息
+ await self.normal_reasoning_chat(
+ message=message,
+ chat=chat,
+ is_mentioned=is_mentioned,
+ interested_rate=interest_value,
+ )
+ # logger.debug(f"处理完成消息 {msg_id}") # 可选调试信息
+ except Exception as e:
+ logger.error(f"处理兴趣消息 {msg_id} 时出错: {e}\n{traceback.format_exc()}")
+ finally:
+ # 无论处理成功与否(且PFChatting不活跃),都尝试从原始字典中移除该消息
+ removed_item = interest_dict.pop(msg_id, None)
+ if removed_item:
+ logger.debug(f"已从兴趣字典中移除消息 {msg_id}")
+
+ async def normal_reasoning_chat(
+ self, message: MessageRecv, chat: ChatStream, is_mentioned: bool, interested_rate: float
+ ) -> None:
+ timing_results = {}
+ userinfo = message.message_info.user_info
+ messageinfo = message.message_info
+
+ is_mentioned, reply_probability = is_mentioned_bot_in_message(message)
+ # 意愿管理器:设置当前message信息
+ willing_manager.setup(message, chat, is_mentioned, interested_rate)
+
+ # 获取回复概率
+ is_willing = False
+ if reply_probability != 1:
+ is_willing = True
+ reply_probability = await willing_manager.get_reply_probability(message.message_info.message_id)
+
+ if message.message_info.additional_config:
+ if "maimcore_reply_probability_gain" in message.message_info.additional_config.keys():
+ reply_probability += message.message_info.additional_config["maimcore_reply_probability_gain"]
+
+ # 打印消息信息
+ mes_name = chat.group_info.group_name if chat.group_info else "私聊"
+ current_time = time.strftime("%H:%M:%S", time.localtime(message.message_info.time))
+ willing_log = f"[回复意愿:{await willing_manager.get_willing(chat.stream_id):.2f}]" if is_willing else ""
+ logger.info(
+ f"[{current_time}][{mes_name}]"
+ f"{chat.user_info.user_nickname}:"
+ f"{message.processed_plain_text}{willing_log}[概率:{reply_probability * 100:.1f}%]"
+ )
+ do_reply = False
+ if random() < reply_probability:
+ do_reply = True
+
+ # 回复前处理
+ await willing_manager.before_generate_reply_handle(message.message_info.message_id)
+
+ # 创建思考消息
+ with Timer("创建思考消息", timing_results):
+ thinking_id = await self._create_thinking_message(message, chat, userinfo, messageinfo)
+
+ logger.debug(f"创建捕捉器,thinking_id:{thinking_id}")
+
+ info_catcher = info_catcher_manager.get_info_catcher(thinking_id)
+ info_catcher.catch_decide_to_response(message)
+
+ # 生成回复
+ try:
+ with Timer("生成回复", timing_results):
+ response_set = await self.gpt.generate_response(message, thinking_id)
+
+ info_catcher.catch_after_generate_response(timing_results["生成回复"])
+ except Exception as e:
+ logger.error(f"回复生成出现错误:{str(e)} {traceback.format_exc()}")
+ response_set = None
+
+ if not response_set:
+ logger.info("为什么生成回复失败?")
+ return
+
+ # 发送消息
+ with Timer("发送消息", timing_results):
+ first_bot_msg = await self._send_response_messages(message, chat, response_set, thinking_id)
+
+ info_catcher.catch_after_response(timing_results["发送消息"], response_set, first_bot_msg)
+
+ info_catcher.done_catch()
+
+ # 处理表情包
+ with Timer("处理表情包", timing_results):
+ await self._handle_emoji(message, chat, response_set)
+
+ # 更新关系情绪
+ with Timer("更新关系情绪", timing_results):
+ await self._update_relationship(message, response_set)
+
+ # 回复后处理
+ await willing_manager.after_generate_reply_handle(message.message_info.message_id)
+
+ # 输出性能计时结果
+ if do_reply:
+ timing_str = " | ".join([f"{step}: {duration:.2f}秒" for step, duration in timing_results.items()])
+ trigger_msg = message.processed_plain_text
+ response_msg = " ".join(response_set) if response_set else "无回复"
+ logger.info(f"触发消息: {trigger_msg[:20]}... | 推理消息: {response_msg[:20]}... | 性能计时: {timing_str}")
+ else:
+ # 不回复处理
+ await willing_manager.not_reply_handle(message.message_info.message_id)
+
+ # 意愿管理器:注销当前message信息
+ willing_manager.delete(message.message_info.message_id)
+
+ @staticmethod
+ def _check_ban_words(text: str, chat, userinfo) -> bool:
+ """检查消息中是否包含过滤词"""
+ for word in global_config.ban_words:
+ if word in text:
+ logger.info(
+ f"[{chat.group_info.group_name if chat.group_info else '私聊'}]{userinfo.user_nickname}:{text}"
+ )
+ logger.info(f"[过滤词识别]消息中含有{word},filtered")
+ return True
+ return False
+
+ @staticmethod
+ def _check_ban_regex(text: str, chat, userinfo) -> bool:
+ """检查消息是否匹配过滤正则表达式"""
+ for pattern in global_config.ban_msgs_regex:
+ if pattern.search(text):
+ logger.info(
+ f"[{chat.group_info.group_name if chat.group_info else '私聊'}]{userinfo.user_nickname}:{text}"
+ )
+ logger.info(f"[正则表达式过滤]消息匹配到{pattern},filtered")
+ return True
+ return False
+
+ async def start_monitoring_interest(self, chat: ChatStream):
+ """为指定的 ChatStream 启动后台兴趣消息监控任务。"""
+ stream_id = chat.stream_id
+ # 检查任务是否已在运行
+ if stream_id in self._interest_monitoring_tasks and not self._interest_monitoring_tasks[stream_id].done():
+ task = self._interest_monitoring_tasks[stream_id]
+ if not task.cancelled(): # 确保任务未被取消
+ logger.info(f"兴趣监控任务已在运行 stream: {stream_id}")
+ return
+ else:
+ logger.info(f"发现已取消的任务,重新创建 stream: {stream_id}")
+ # 如果任务被取消了,允许重新创建
+
+ logger.info(f"启动兴趣监控任务 stream: {stream_id}...")
+ # 创建新的后台任务来运行 _find_interested_message
+ task = asyncio.create_task(self._find_interested_message(chat))
+ self._interest_monitoring_tasks[stream_id] = task
+
+ # 添加回调,当任务完成(或被取消)时,自动从字典中移除
+ task.add_done_callback(lambda t: self._handle_task_completion(stream_id, t))
+
+ def _handle_task_completion(self, stream_id: str, task: asyncio.Task):
+ """处理监控任务完成的回调。"""
+ try:
+ # 检查任务是否因异常而结束
+ exception = task.exception()
+ if exception:
+ logger.error(f"兴趣监控任务 stream {stream_id} 异常结束: {exception}", exc_info=exception)
+ elif task.cancelled():
+ logger.info(f"兴趣监控任务 stream {stream_id} 已被取消。")
+ else:
+ logger.info(f"兴趣监控任务 stream {stream_id} 正常结束。") # 理论上 while True 不会正常结束
+ except asyncio.CancelledError:
+ logger.info(f"兴趣监控任务 stream {stream_id} 在完成处理期间被取消。")
+ finally:
+ # 无论如何都从字典中移除
+ removed_task = self._interest_monitoring_tasks.pop(stream_id, None)
+ if removed_task:
+ logger.debug(f"已从监控任务字典移除 stream: {stream_id}")
+
+ async def stop_monitoring_interest(self, stream_id: str):
+ """停止指定 stream_id 的兴趣消息监控任务。"""
+ if stream_id in self._interest_monitoring_tasks:
+ task = self._interest_monitoring_tasks[stream_id]
+ if not task.done():
+ logger.info(f"正在停止兴趣监控任务 stream: {stream_id}...")
+ task.cancel() # 请求取消任务
+ try:
+ # 等待任务实际被取消(可选,提供更明确的停止)
+ # 设置超时以防万一
+ await asyncio.wait_for(task, timeout=5.0)
+ except asyncio.CancelledError:
+ logger.info(f"兴趣监控任务 stream {stream_id} 已确认取消。")
+ except asyncio.TimeoutError:
+ logger.warning(f"停止兴趣监控任务 stream {stream_id} 超时。任务可能仍在运行。")
+ except Exception as e:
+ # 捕获 task.exception() 可能在取消期间重新引发的错误
+ logger.error(f"停止兴趣监控任务 stream {stream_id} 时发生错误: {e}")
+ # 任务最终会由 done_callback 移除,或在这里再次确认移除
+ self._interest_monitoring_tasks.pop(stream_id, None)
+ else:
+ logger.warning(f"尝试停止不存在或已停止的监控任务 stream: {stream_id}")
diff --git a/src/plugins/chat_module/heartFC_chat/reasoning_generator.py b/src/plugins/chat_module/heartFC_chat/reasoning_generator.py
new file mode 100644
index 00000000..2f4ba06e
--- /dev/null
+++ b/src/plugins/chat_module/heartFC_chat/reasoning_generator.py
@@ -0,0 +1,199 @@
+from typing import List, Optional, Tuple, Union
+import random
+
+from ...models.utils_model import LLMRequest
+from ....config.config import global_config
+from ...chat.message import MessageThinking
+from .reasoning_prompt_builder import prompt_builder
+from ...chat.utils import process_llm_response
+from ...utils.timer_calculater import Timer
+from src.common.logger import get_module_logger, LogConfig, LLM_STYLE_CONFIG
+from src.plugins.respon_info_catcher.info_catcher import info_catcher_manager
+
+# 定义日志配置
+llm_config = LogConfig(
+ # 使用消息发送专用样式
+ console_format=LLM_STYLE_CONFIG["console_format"],
+ file_format=LLM_STYLE_CONFIG["file_format"],
+)
+
+logger = get_module_logger("llm_generator", config=llm_config)
+
+
+class ResponseGenerator:
+ def __init__(self):
+ self.model_reasoning = LLMRequest(
+ model=global_config.llm_reasoning,
+ temperature=0.7,
+ max_tokens=3000,
+ request_type="response_reasoning",
+ )
+ self.model_normal = LLMRequest(
+ model=global_config.llm_normal,
+ temperature=global_config.llm_normal["temp"],
+ max_tokens=256,
+ request_type="response_reasoning",
+ )
+
+ self.model_sum = LLMRequest(
+ model=global_config.llm_summary_by_topic, temperature=0.7, max_tokens=3000, request_type="relation"
+ )
+ self.current_model_type = "r1" # 默认使用 R1
+ self.current_model_name = "unknown model"
+
+ async def generate_response(self, message: MessageThinking, thinking_id: str) -> Optional[Union[str, List[str]]]:
+ """根据当前模型类型选择对应的生成函数"""
+ # 从global_config中获取模型概率值并选择模型
+ if random.random() < global_config.model_reasoning_probability:
+ self.current_model_type = "深深地"
+ current_model = self.model_reasoning
+ else:
+ self.current_model_type = "浅浅的"
+ current_model = self.model_normal
+
+ logger.info(
+ f"{self.current_model_type}思考:{message.processed_plain_text[:30] + '...' if len(message.processed_plain_text) > 30 else message.processed_plain_text}"
+ ) # noqa: E501
+
+ model_response = await self._generate_response_with_model(message, current_model, thinking_id)
+
+ # print(f"raw_content: {model_response}")
+
+ if model_response:
+ logger.info(f"{global_config.BOT_NICKNAME}的回复是:{model_response}")
+ model_response = await self._process_response(model_response)
+
+ return model_response
+ else:
+ logger.info(f"{self.current_model_type}思考,失败")
+ return None
+
+ async def _generate_response_with_model(self, message: MessageThinking, model: LLMRequest, thinking_id: str):
+ info_catcher = info_catcher_manager.get_info_catcher(thinking_id)
+
+ if message.chat_stream.user_info.user_cardname and message.chat_stream.user_info.user_nickname:
+ sender_name = (
+ f"[({message.chat_stream.user_info.user_id}){message.chat_stream.user_info.user_nickname}]"
+ f"{message.chat_stream.user_info.user_cardname}"
+ )
+ elif message.chat_stream.user_info.user_nickname:
+ sender_name = f"({message.chat_stream.user_info.user_id}){message.chat_stream.user_info.user_nickname}"
+ else:
+ sender_name = f"用户({message.chat_stream.user_info.user_id})"
+
+ logger.debug("开始使用生成回复-2")
+ # 构建prompt
+ with Timer() as t_build_prompt:
+ prompt = await prompt_builder._build_prompt(
+ message.chat_stream,
+ message_txt=message.processed_plain_text,
+ sender_name=sender_name,
+ stream_id=message.chat_stream.stream_id,
+ )
+ logger.info(f"构建prompt时间: {t_build_prompt.human_readable}")
+
+ try:
+ content, reasoning_content, self.current_model_name = await model.generate_response(prompt)
+
+ info_catcher.catch_after_llm_generated(
+ prompt=prompt, response=content, reasoning_content=reasoning_content, model_name=self.current_model_name
+ )
+
+ except Exception:
+ logger.exception("生成回复时出错")
+ return None
+
+ # 保存到数据库
+ # self._save_to_db(
+ # message=message,
+ # sender_name=sender_name,
+ # prompt=prompt,
+ # content=content,
+ # reasoning_content=reasoning_content,
+ # # reasoning_content_check=reasoning_content_check if global_config.enable_kuuki_read else ""
+ # )
+
+ return content
+
+ # def _save_to_db(
+ # self,
+ # message: MessageRecv,
+ # sender_name: str,
+ # prompt: str,
+ # content: str,
+ # reasoning_content: str,
+ # ):
+ # """保存对话记录到数据库"""
+ # db.reasoning_logs.insert_one(
+ # {
+ # "time": time.time(),
+ # "chat_id": message.chat_stream.stream_id,
+ # "user": sender_name,
+ # "message": message.processed_plain_text,
+ # "model": self.current_model_name,
+ # "reasoning": reasoning_content,
+ # "response": content,
+ # "prompt": prompt,
+ # }
+ # )
+
+ async def _get_emotion_tags(self, content: str, processed_plain_text: str):
+ """提取情感标签,结合立场和情绪"""
+ try:
+ # 构建提示词,结合回复内容、被回复的内容以及立场分析
+ prompt = f"""
+ 请严格根据以下对话内容,完成以下任务:
+ 1. 判断回复者对被回复者观点的直接立场:
+ - "支持":明确同意或强化被回复者观点
+ - "反对":明确反驳或否定被回复者观点
+ - "中立":不表达明确立场或无关回应
+ 2. 从"开心,愤怒,悲伤,惊讶,平静,害羞,恐惧,厌恶,困惑"中选出最匹配的1个情感标签
+ 3. 按照"立场-情绪"的格式直接输出结果,例如:"反对-愤怒"
+ 4. 考虑回复者的人格设定为{global_config.personality_core}
+
+ 对话示例:
+ 被回复:「A就是笨」
+ 回复:「A明明很聪明」 → 反对-愤怒
+
+ 当前对话:
+ 被回复:「{processed_plain_text}」
+ 回复:「{content}」
+
+ 输出要求:
+ - 只需输出"立场-情绪"结果,不要解释
+ - 严格基于文字直接表达的对立关系判断
+ """
+
+ # 调用模型生成结果
+ result, _, _ = await self.model_sum.generate_response(prompt)
+ result = result.strip()
+
+ # 解析模型输出的结果
+ if "-" in result:
+ stance, emotion = result.split("-", 1)
+ valid_stances = ["支持", "反对", "中立"]
+ valid_emotions = ["开心", "愤怒", "悲伤", "惊讶", "害羞", "平静", "恐惧", "厌恶", "困惑"]
+ if stance in valid_stances and emotion in valid_emotions:
+ return stance, emotion # 返回有效的立场-情绪组合
+ else:
+ logger.debug(f"无效立场-情感组合:{result}")
+ return "中立", "平静" # 默认返回中立-平静
+ else:
+ logger.debug(f"立场-情感格式错误:{result}")
+ return "中立", "平静" # 格式错误时返回默认值
+
+ except Exception as e:
+ logger.debug(f"获取情感标签时出错: {e}")
+ return "中立", "平静" # 出错时返回默认值
+
+ @staticmethod
+ async def _process_response(content: str) -> Tuple[List[str], List[str]]:
+ """处理响应内容,返回处理后的内容和情感标签"""
+ if not content:
+ return None, []
+
+ processed_response = process_llm_response(content)
+
+ # print(f"得到了处理后的llm返回{processed_response}")
+
+ return processed_response
diff --git a/src/plugins/chat_module/heartFC_chat/reasoning_prompt_builder.py b/src/plugins/chat_module/heartFC_chat/reasoning_prompt_builder.py
new file mode 100644
index 00000000..d37d6545
--- /dev/null
+++ b/src/plugins/chat_module/heartFC_chat/reasoning_prompt_builder.py
@@ -0,0 +1,445 @@
+import random
+import time
+from typing import Optional, Union
+
+from ....common.database import db
+from ...chat.utils import get_embedding, get_recent_group_detailed_plain_text, get_recent_group_speaker
+from ...chat.chat_stream import chat_manager
+from ...moods.moods import MoodManager
+from ....individuality.individuality import Individuality
+from ...memory_system.Hippocampus import HippocampusManager
+from ...schedule.schedule_generator import bot_schedule
+from ....config.config import global_config
+from ...person_info.relationship_manager import relationship_manager
+from src.common.logger import get_module_logger
+from src.plugins.utils.prompt_builder import Prompt, global_prompt_manager
+
+logger = get_module_logger("prompt")
+
+
+def init_prompt():
+ Prompt(
+ """
+{relation_prompt_all}
+{memory_prompt}
+{prompt_info}
+{schedule_prompt}
+{chat_target}
+{chat_talking_prompt}
+现在"{sender_name}"说的:{message_txt}。引起了你的注意,你想要在群里发言发言或者回复这条消息。\n
+你的网名叫{bot_name},有人也叫你{bot_other_names},{prompt_personality}。
+你正在{chat_target_2},现在请你读读之前的聊天记录,{mood_prompt},然后给出日常且口语化的回复,平淡一些,
+尽量简短一些。{keywords_reaction_prompt}请注意把握聊天内容,不要回复的太有条理,可以有个性。{prompt_ger}
+请回复的平淡一些,简短一些,说中文,不要刻意突出自身学科背景,尽量不要说你说过的话
+请注意不要输出多余内容(包括前后缀,冒号和引号,括号,表情等),只输出回复内容。
+{moderation_prompt}不要输出多余内容(包括前后缀,冒号和引号,括号,表情包,at或 @等 )。""",
+ "reasoning_prompt_main",
+ )
+ Prompt(
+ "{relation_prompt}关系等级越大,关系越好,请分析聊天记录,根据你和说话者{sender_name}的关系和态度进行回复,明确你的立场和情感。",
+ "relationship_prompt",
+ )
+ Prompt(
+ "你想起你之前见过的事情:{related_memory_info}。\n以上是你的回忆,不一定是目前聊天里的人说的,也不一定是现在发生的事情,请记住。\n",
+ "memory_prompt",
+ )
+ Prompt("你现在正在做的事情是:{schedule_info}", "schedule_prompt")
+ Prompt("\n你有以下这些**知识**:\n{prompt_info}\n请你**记住上面的知识**,之后可能会用到。\n", "knowledge_prompt")
+
+
+class PromptBuilder:
+ def __init__(self):
+ self.prompt_built = ""
+ self.activate_messages = ""
+
+ async def _build_prompt(
+ self, chat_stream, message_txt: str, sender_name: str = "某人", stream_id: Optional[int] = None
+ ) -> tuple[str, str]:
+ # 开始构建prompt
+ prompt_personality = "你"
+ # person
+ individuality = Individuality.get_instance()
+
+ personality_core = individuality.personality.personality_core
+ prompt_personality += personality_core
+
+ personality_sides = individuality.personality.personality_sides
+ random.shuffle(personality_sides)
+ prompt_personality += f",{personality_sides[0]}"
+
+ identity_detail = individuality.identity.identity_detail
+ random.shuffle(identity_detail)
+ prompt_personality += f",{identity_detail[0]}"
+
+ # 关系
+ who_chat_in_group = [
+ (chat_stream.user_info.platform, chat_stream.user_info.user_id, chat_stream.user_info.user_nickname)
+ ]
+ who_chat_in_group += get_recent_group_speaker(
+ stream_id,
+ (chat_stream.user_info.platform, chat_stream.user_info.user_id),
+ limit=global_config.MAX_CONTEXT_SIZE,
+ )
+
+ relation_prompt = ""
+ for person in who_chat_in_group:
+ relation_prompt += await relationship_manager.build_relationship_info(person)
+
+ # relation_prompt_all = (
+ # f"{relation_prompt}关系等级越大,关系越好,请分析聊天记录,"
+ # f"根据你和说话者{sender_name}的关系和态度进行回复,明确你的立场和情感。"
+ # )
+
+ # 心情
+ mood_manager = MoodManager.get_instance()
+ mood_prompt = mood_manager.get_prompt()
+
+ # logger.info(f"心情prompt: {mood_prompt}")
+
+ # 调取记忆
+ memory_prompt = ""
+ related_memory = await HippocampusManager.get_instance().get_memory_from_text(
+ text=message_txt, max_memory_num=2, max_memory_length=2, max_depth=3, fast_retrieval=False
+ )
+ related_memory_info = ""
+ if related_memory:
+ for memory in related_memory:
+ related_memory_info += memory[1]
+ # memory_prompt = f"你想起你之前见过的事情:{related_memory_info}。\n以上是你的回忆,不一定是目前聊天里的人说的,也不一定是现在发生的事情,请记住。\n"
+ memory_prompt = await global_prompt_manager.format_prompt(
+ "memory_prompt", related_memory_info=related_memory_info
+ )
+
+ # print(f"相关记忆:{related_memory_info}")
+
+ # 日程构建
+ # schedule_prompt = f"""你现在正在做的事情是:{bot_schedule.get_current_num_task(num=1, time_info=False)}"""
+
+ # 获取聊天上下文
+ chat_in_group = True
+ chat_talking_prompt = ""
+ if stream_id:
+ chat_talking_prompt = get_recent_group_detailed_plain_text(
+ stream_id, limit=global_config.MAX_CONTEXT_SIZE, combine=True
+ )
+ chat_stream = chat_manager.get_stream(stream_id)
+ if chat_stream.group_info:
+ chat_talking_prompt = chat_talking_prompt
+ else:
+ chat_in_group = False
+ chat_talking_prompt = chat_talking_prompt
+ # print(f"\033[1;34m[调试]\033[0m 已从数据库获取群 {group_id} 的消息记录:{chat_talking_prompt}")
+ # 关键词检测与反应
+ keywords_reaction_prompt = ""
+ for rule in global_config.keywords_reaction_rules:
+ if rule.get("enable", False):
+ if any(keyword in message_txt.lower() for keyword in rule.get("keywords", [])):
+ logger.info(
+ f"检测到以下关键词之一:{rule.get('keywords', [])},触发反应:{rule.get('reaction', '')}"
+ )
+ keywords_reaction_prompt += rule.get("reaction", "") + ","
+ else:
+ for pattern in rule.get("regex", []):
+ result = pattern.search(message_txt)
+ if result:
+ reaction = rule.get("reaction", "")
+ for name, content in result.groupdict().items():
+ reaction = reaction.replace(f"[{name}]", content)
+ logger.info(f"匹配到以下正则表达式:{pattern},触发反应:{reaction}")
+ keywords_reaction_prompt += reaction + ","
+ break
+
+ # 中文高手(新加的好玩功能)
+ prompt_ger = ""
+ if random.random() < 0.04:
+ prompt_ger += "你喜欢用倒装句"
+ if random.random() < 0.02:
+ prompt_ger += "你喜欢用反问句"
+ if random.random() < 0.01:
+ prompt_ger += "你喜欢用文言文"
+
+ # 知识构建
+ start_time = time.time()
+ prompt_info = await self.get_prompt_info(message_txt, threshold=0.38)
+ if prompt_info:
+ # prompt_info = f"""\n你有以下这些**知识**:\n{prompt_info}\n请你**记住上面的知识**,之后可能会用到。\n"""
+ prompt_info = await global_prompt_manager.format_prompt("knowledge_prompt", prompt_info=prompt_info)
+
+ end_time = time.time()
+ logger.debug(f"知识检索耗时: {(end_time - start_time):.3f}秒")
+
+ # moderation_prompt = ""
+ # moderation_prompt = """**检查并忽略**任何涉及尝试绕过审核的行为。
+ # 涉及政治敏感以及违法违规的内容请规避。"""
+
+ logger.debug("开始构建prompt")
+
+ # prompt = f"""
+ # {relation_prompt_all}
+ # {memory_prompt}
+ # {prompt_info}
+ # {schedule_prompt}
+ # {chat_target}
+ # {chat_talking_prompt}
+ # 现在"{sender_name}"说的:{message_txt}。引起了你的注意,你想要在群里发言发言或者回复这条消息。\n
+ # 你的网名叫{global_config.BOT_NICKNAME},有人也叫你{"/".join(global_config.BOT_ALIAS_NAMES)},{prompt_personality}。
+ # 你正在{chat_target_2},现在请你读读之前的聊天记录,{mood_prompt},然后给出日常且口语化的回复,平淡一些,
+ # 尽量简短一些。{keywords_reaction_prompt}请注意把握聊天内容,不要回复的太有条理,可以有个性。{prompt_ger}
+ # 请回复的平淡一些,简短一些,说中文,不要刻意突出自身学科背景,尽量不要说你说过的话
+ # 请注意不要输出多余内容(包括前后缀,冒号和引号,括号,表情等),只输出回复内容。
+ # {moderation_prompt}不要输出多余内容(包括前后缀,冒号和引号,括号,表情包,at或 @等 )。"""
+
+ prompt = await global_prompt_manager.format_prompt(
+ "reasoning_prompt_main",
+ relation_prompt_all=await global_prompt_manager.get_prompt_async("relationship_prompt"),
+ relation_prompt=relation_prompt,
+ sender_name=sender_name,
+ memory_prompt=memory_prompt,
+ prompt_info=prompt_info,
+ schedule_prompt=await global_prompt_manager.format_prompt(
+ "schedule_prompt", schedule_info=bot_schedule.get_current_num_task(num=1, time_info=False)
+ ),
+ chat_target=await global_prompt_manager.get_prompt_async("chat_target_group1")
+ if chat_in_group
+ else await global_prompt_manager.get_prompt_async("chat_target_private1"),
+ chat_target_2=await global_prompt_manager.get_prompt_async("chat_target_group2")
+ if chat_in_group
+ else await global_prompt_manager.get_prompt_async("chat_target_private2"),
+ chat_talking_prompt=chat_talking_prompt,
+ message_txt=message_txt,
+ bot_name=global_config.BOT_NICKNAME,
+ bot_other_names="/".join(
+ global_config.BOT_ALIAS_NAMES,
+ ),
+ prompt_personality=prompt_personality,
+ mood_prompt=mood_prompt,
+ keywords_reaction_prompt=keywords_reaction_prompt,
+ prompt_ger=prompt_ger,
+ moderation_prompt=await global_prompt_manager.get_prompt_async("moderation_prompt"),
+ )
+
+ return prompt
+
+ async def get_prompt_info(self, message: str, threshold: float):
+ start_time = time.time()
+ related_info = ""
+ logger.debug(f"获取知识库内容,元消息:{message[:30]}...,消息长度: {len(message)}")
+
+ # 1. 先从LLM获取主题,类似于记忆系统的做法
+ topics = []
+ # try:
+ # # 先尝试使用记忆系统的方法获取主题
+ # hippocampus = HippocampusManager.get_instance()._hippocampus
+ # topic_num = min(5, max(1, int(len(message) * 0.1)))
+ # topics_response = await hippocampus.llm_topic_judge.generate_response(hippocampus.find_topic_llm(message, topic_num))
+
+ # # 提取关键词
+ # topics = re.findall(r"<([^>]+)>", topics_response[0])
+ # if not topics:
+ # topics = []
+ # else:
+ # topics = [
+ # topic.strip()
+ # for topic in ",".join(topics).replace(",", ",").replace("、", ",").replace(" ", ",").split(",")
+ # if topic.strip()
+ # ]
+
+ # logger.info(f"从LLM提取的主题: {', '.join(topics)}")
+ # except Exception as e:
+ # logger.error(f"从LLM提取主题失败: {str(e)}")
+ # # 如果LLM提取失败,使用jieba分词提取关键词作为备选
+ # words = jieba.cut(message)
+ # topics = [word for word in words if len(word) > 1][:5]
+ # logger.info(f"使用jieba提取的主题: {', '.join(topics)}")
+
+ # 如果无法提取到主题,直接使用整个消息
+ if not topics:
+ logger.info("未能提取到任何主题,使用整个消息进行查询")
+ embedding = await get_embedding(message, request_type="prompt_build")
+ if not embedding:
+ logger.error("获取消息嵌入向量失败")
+ return ""
+
+ related_info = self.get_info_from_db(embedding, limit=3, threshold=threshold)
+ logger.info(f"知识库检索完成,总耗时: {time.time() - start_time:.3f}秒")
+ return related_info
+
+ # 2. 对每个主题进行知识库查询
+ logger.info(f"开始处理{len(topics)}个主题的知识库查询")
+
+ # 优化:批量获取嵌入向量,减少API调用
+ embeddings = {}
+ topics_batch = [topic for topic in topics if len(topic) > 0]
+ if message: # 确保消息非空
+ topics_batch.append(message)
+
+ # 批量获取嵌入向量
+ embed_start_time = time.time()
+ for text in topics_batch:
+ if not text or len(text.strip()) == 0:
+ continue
+
+ try:
+ embedding = await get_embedding(text, request_type="prompt_build")
+ if embedding:
+ embeddings[text] = embedding
+ else:
+ logger.warning(f"获取'{text}'的嵌入向量失败")
+ except Exception as e:
+ logger.error(f"获取'{text}'的嵌入向量时发生错误: {str(e)}")
+
+ logger.info(f"批量获取嵌入向量完成,耗时: {time.time() - embed_start_time:.3f}秒")
+
+ if not embeddings:
+ logger.error("所有嵌入向量获取失败")
+ return ""
+
+ # 3. 对每个主题进行知识库查询
+ all_results = []
+ query_start_time = time.time()
+
+ # 首先添加原始消息的查询结果
+ if message in embeddings:
+ original_results = self.get_info_from_db(embeddings[message], limit=3, threshold=threshold, return_raw=True)
+ if original_results:
+ for result in original_results:
+ result["topic"] = "原始消息"
+ all_results.extend(original_results)
+ logger.info(f"原始消息查询到{len(original_results)}条结果")
+
+ # 然后添加每个主题的查询结果
+ for topic in topics:
+ if not topic or topic not in embeddings:
+ continue
+
+ try:
+ topic_results = self.get_info_from_db(embeddings[topic], limit=3, threshold=threshold, return_raw=True)
+ if topic_results:
+ # 添加主题标记
+ for result in topic_results:
+ result["topic"] = topic
+ all_results.extend(topic_results)
+ logger.info(f"主题'{topic}'查询到{len(topic_results)}条结果")
+ except Exception as e:
+ logger.error(f"查询主题'{topic}'时发生错误: {str(e)}")
+
+ logger.info(f"知识库查询完成,耗时: {time.time() - query_start_time:.3f}秒,共获取{len(all_results)}条结果")
+
+ # 4. 去重和过滤
+ process_start_time = time.time()
+ unique_contents = set()
+ filtered_results = []
+ for result in all_results:
+ content = result["content"]
+ if content not in unique_contents:
+ unique_contents.add(content)
+ filtered_results.append(result)
+
+ # 5. 按相似度排序
+ filtered_results.sort(key=lambda x: x["similarity"], reverse=True)
+
+ # 6. 限制总数量(最多10条)
+ filtered_results = filtered_results[:10]
+ logger.info(
+ f"结果处理完成,耗时: {time.time() - process_start_time:.3f}秒,过滤后剩余{len(filtered_results)}条结果"
+ )
+
+ # 7. 格式化输出
+ if filtered_results:
+ format_start_time = time.time()
+ grouped_results = {}
+ for result in filtered_results:
+ topic = result["topic"]
+ if topic not in grouped_results:
+ grouped_results[topic] = []
+ grouped_results[topic].append(result)
+
+ # 按主题组织输出
+ for topic, results in grouped_results.items():
+ related_info += f"【主题: {topic}】\n"
+ for _i, result in enumerate(results, 1):
+ _similarity = result["similarity"]
+ content = result["content"].strip()
+ # 调试:为内容添加序号和相似度信息
+ # related_info += f"{i}. [{similarity:.2f}] {content}\n"
+ related_info += f"{content}\n"
+ related_info += "\n"
+
+ logger.info(f"格式化输出完成,耗时: {time.time() - format_start_time:.3f}秒")
+
+ logger.info(f"知识库检索总耗时: {time.time() - start_time:.3f}秒")
+ return related_info
+
+ @staticmethod
+ def get_info_from_db(
+ query_embedding: list, limit: int = 1, threshold: float = 0.5, return_raw: bool = False
+ ) -> Union[str, list]:
+ if not query_embedding:
+ return "" if not return_raw else []
+ # 使用余弦相似度计算
+ pipeline = [
+ {
+ "$addFields": {
+ "dotProduct": {
+ "$reduce": {
+ "input": {"$range": [0, {"$size": "$embedding"}]},
+ "initialValue": 0,
+ "in": {
+ "$add": [
+ "$$value",
+ {
+ "$multiply": [
+ {"$arrayElemAt": ["$embedding", "$$this"]},
+ {"$arrayElemAt": [query_embedding, "$$this"]},
+ ]
+ },
+ ]
+ },
+ }
+ },
+ "magnitude1": {
+ "$sqrt": {
+ "$reduce": {
+ "input": "$embedding",
+ "initialValue": 0,
+ "in": {"$add": ["$$value", {"$multiply": ["$$this", "$$this"]}]},
+ }
+ }
+ },
+ "magnitude2": {
+ "$sqrt": {
+ "$reduce": {
+ "input": query_embedding,
+ "initialValue": 0,
+ "in": {"$add": ["$$value", {"$multiply": ["$$this", "$$this"]}]},
+ }
+ }
+ },
+ }
+ },
+ {"$addFields": {"similarity": {"$divide": ["$dotProduct", {"$multiply": ["$magnitude1", "$magnitude2"]}]}}},
+ {
+ "$match": {
+ "similarity": {"$gte": threshold} # 只保留相似度大于等于阈值的结果
+ }
+ },
+ {"$sort": {"similarity": -1}},
+ {"$limit": limit},
+ {"$project": {"content": 1, "similarity": 1}},
+ ]
+
+ results = list(db.knowledges.aggregate(pipeline))
+ logger.debug(f"知识库查询结果数量: {len(results)}")
+
+ if not results:
+ return "" if not return_raw else []
+
+ if return_raw:
+ return results
+ else:
+ # 返回所有找到的内容,用换行分隔
+ return "\n".join(str(result["content"]) for result in results)
+
+
+init_prompt()
+prompt_builder = PromptBuilder()
diff --git a/src/plugins/chat_module/reasoning_chat/reasoning_chat.py b/src/plugins/chat_module/reasoning_chat/reasoning_chat.py
index d149f68b..be1c6628 100644
--- a/src/plugins/chat_module/reasoning_chat/reasoning_chat.py
+++ b/src/plugins/chat_module/reasoning_chat/reasoning_chat.py
@@ -156,17 +156,17 @@ class ReasoningChat:
# 消息加入缓冲池
await message_buffer.start_caching_messages(message)
- # logger.info("使用推理聊天模式")
-
# 创建聊天流
chat = await chat_manager.get_or_create_stream(
platform=messageinfo.platform,
user_info=userinfo,
group_info=groupinfo,
)
+
message.update_chat_stream(chat)
await message.process()
+ logger.trace(f"消息处理成功: {message.processed_plain_text}")
# 过滤词/正则表达式过滤
if self._check_ban_words(message.processed_plain_text, chat, userinfo) or self._check_ban_regex(
@@ -174,27 +174,13 @@ class ReasoningChat:
):
return
- await self.storage.store_message(message, chat)
-
- # 记忆激活
- with Timer("记忆激活", timing_results):
- interested_rate = await HippocampusManager.get_instance().get_activate_from_text(
- message.processed_plain_text, fast_retrieval=True
- )
-
# 查询缓冲器结果,会整合前面跳过的消息,改变processed_plain_text
buffer_result = await message_buffer.query_buffer_result(message)
- # 处理提及
- is_mentioned, reply_probability = is_mentioned_bot_in_message(message)
-
- # 意愿管理器:设置当前message信息
- willing_manager.setup(message, chat, is_mentioned, interested_rate)
-
# 处理缓冲器结果
if not buffer_result:
- await willing_manager.bombing_buffer_message_handle(message.message_info.message_id)
- willing_manager.delete(message.message_info.message_id)
+ # await willing_manager.bombing_buffer_message_handle(message.message_info.message_id)
+ # willing_manager.delete(message.message_info.message_id)
f_type = "seglist"
if message.message_segment.type != "seglist":
f_type = message.message_segment.type
@@ -213,6 +199,27 @@ class ReasoningChat:
logger.info("触发缓冲,已炸飞消息列")
return
+ try:
+ await self.storage.store_message(message, chat)
+ logger.trace(f"存储成功 (通过缓冲后): {message.processed_plain_text}")
+ except Exception as e:
+ logger.error(f"存储消息失败: {e}")
+ logger.error(traceback.format_exc())
+ # 存储失败可能仍需考虑是否继续,暂时返回
+ return
+
+ is_mentioned, reply_probability = is_mentioned_bot_in_message(message)
+ # 记忆激活
+ with Timer("记忆激活", timing_results):
+ interested_rate = await HippocampusManager.get_instance().get_activate_from_text(
+ message.processed_plain_text, fast_retrieval=True
+ )
+
+ # 处理提及
+
+ # 意愿管理器:设置当前message信息
+ willing_manager.setup(message, chat, is_mentioned, interested_rate)
+
# 获取回复概率
is_willing = False
if reply_probability != 1:
diff --git a/src/plugins/chat_module/reasoning_chat/reasoning_generator.py b/src/plugins/chat_module/reasoning_chat/reasoning_generator.py
index dda4e7c7..2f4ba06e 100644
--- a/src/plugins/chat_module/reasoning_chat/reasoning_generator.py
+++ b/src/plugins/chat_module/reasoning_chat/reasoning_generator.py
@@ -44,7 +44,7 @@ class ResponseGenerator:
async def generate_response(self, message: MessageThinking, thinking_id: str) -> Optional[Union[str, List[str]]]:
"""根据当前模型类型选择对应的生成函数"""
# 从global_config中获取模型概率值并选择模型
- if random.random() < global_config.MODEL_R1_PROBABILITY:
+ if random.random() < global_config.model_reasoning_probability:
self.current_model_type = "深深地"
current_model = self.model_reasoning
else:
diff --git a/src/plugins/memory_system/Hippocampus.py b/src/plugins/memory_system/Hippocampus.py
index 4b40649d..5ccdec5a 100644
--- a/src/plugins/memory_system/Hippocampus.py
+++ b/src/plugins/memory_system/Hippocampus.py
@@ -1942,11 +1942,7 @@ class HippocampusManager:
return response
async def get_memory_from_topic(
- self,
- valid_keywords: list[str],
- max_memory_num: int = 3,
- max_memory_length: int = 2,
- max_depth: int = 3
+ self, valid_keywords: list[str], max_memory_num: int = 3, max_memory_length: int = 2, max_depth: int = 3
) -> list:
"""从文本中获取相关记忆的公共接口"""
if not self._initialized:
From ac7b300326d51424b769cc1f1ddd047558aabae5 Mon Sep 17 00:00:00 2001
From: SengokuCola <1026294844@qq.com>
Date: Mon, 21 Apr 2025 18:46:08 +0800
Subject: [PATCH 16/26] Update main.py
---
src/main.py | 8 ++++----
1 file changed, 4 insertions(+), 4 deletions(-)
diff --git a/src/main.py b/src/main.py
index 05196068..12b81877 100644
--- a/src/main.py
+++ b/src/main.py
@@ -18,7 +18,7 @@ from .plugins.remote import heartbeat_thread # noqa: F401
from .individuality.individuality import Individuality
from .common.server import global_server
from .plugins.chat_module.heartFC_chat.interest import InterestManager
-from .plugins.chat_module.heartFC_chat.heartFC_controler import HeartFC_Controller
+from .plugins.chat_module.heartFC_chat.heartFC_controler import HeartFCController
from .plugins.chat_module.heartFC_chat.reasoning_chat import ReasoningChat
logger = get_module_logger("main")
@@ -118,9 +118,9 @@ class MainSystem:
await interest_manager.start_background_tasks()
logger.success("兴趣管理器后台任务启动成功")
- # 初始化并独立启动 HeartFC_Chat
- HeartFC_Controller()
- heartfc_chat_instance = HeartFC_Controller.get_instance()
+ # 初始化并独立启动 HeartFCController
+ HeartFCController()
+ heartfc_chat_instance = HeartFCController.get_instance()
if heartfc_chat_instance:
await heartfc_chat_instance.start()
logger.success("HeartFC_Chat 模块独立启动成功")
From 09ff1d9db592d8efc5ebf53b5ca00fb27fc6788d Mon Sep 17 00:00:00 2001
From: SengokuCola <1026294844@qq.com>
Date: Mon, 21 Apr 2025 18:46:40 +0800
Subject: [PATCH 17/26] =?UTF-8?q?fix:=E5=B8=8C=E6=9C=9B=E4=B8=8D=E4=BC=9A?=
=?UTF-8?q?=E7=88=86=E7=82=B8?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
src/main.py | 1 -
.../heartFC_chat/heartFC_controler.py | 17 ++++++++---------
2 files changed, 8 insertions(+), 10 deletions(-)
diff --git a/src/main.py b/src/main.py
index 12b81877..99578591 100644
--- a/src/main.py
+++ b/src/main.py
@@ -19,7 +19,6 @@ from .individuality.individuality import Individuality
from .common.server import global_server
from .plugins.chat_module.heartFC_chat.interest import InterestManager
from .plugins.chat_module.heartFC_chat.heartFC_controler import HeartFCController
-from .plugins.chat_module.heartFC_chat.reasoning_chat import ReasoningChat
logger = get_module_logger("main")
diff --git a/src/plugins/chat_module/heartFC_chat/heartFC_controler.py b/src/plugins/chat_module/heartFC_chat/heartFC_controler.py
index 51b1a05d..4dd49e2d 100644
--- a/src/plugins/chat_module/heartFC_chat/heartFC_controler.py
+++ b/src/plugins/chat_module/heartFC_chat/heartFC_controler.py
@@ -1,7 +1,6 @@
import traceback
from typing import Optional, Dict
import asyncio
-from asyncio import Lock
import threading # 导入 threading
from ...moods.moods import MoodManager
from ...chat.emoji_manager import emoji_manager
@@ -51,8 +50,8 @@ class HeartFCController:
# 再次使用类锁保护初始化过程是更严谨的做法。
# 如果确定 __init__ 逻辑本身是幂等的或非关键的,可以省略这里的锁。
# 但为了保持原始逻辑的意图(防止重复初始化),这里保留检查。
- with self.__class__._lock: # 确保初始化逻辑线程安全
- if self._initialized: # 再次检查,防止锁等待期间其他线程已完成初始化
+ with self.__class__._lock: # 确保初始化逻辑线程安全
+ if self._initialized: # 再次检查,防止锁等待期间其他线程已完成初始化
return
logger.info("正在初始化 HeartFCController 单例...")
@@ -68,9 +67,9 @@ class HeartFCController:
self._interest_monitor_task: Optional[asyncio.Task] = None
self.pf_chatting_instances: Dict[str, PFChatting] = {}
# _pf_chatting_lock 用于保护 pf_chatting_instances 的异步操作
- self._pf_chatting_lock = asyncio.Lock() # 这个是 asyncio.Lock,用于异步上下文
- self.emoji_manager = emoji_manager # 假设是全局或已初始化的实例
- self.relationship_manager = relationship_manager # 假设是全局或已初始化的实例
+ self._pf_chatting_lock = asyncio.Lock() # 这个是 asyncio.Lock,用于异步上下文
+ self.emoji_manager = emoji_manager # 假设是全局或已初始化的实例
+ self.relationship_manager = relationship_manager # 假设是全局或已初始化的实例
# MessageManager 可能是类本身或单例实例,根据其设计确定
self.MessageManager = MessageManager
self._initialized = True
@@ -81,14 +80,14 @@ class HeartFCController:
"""获取 HeartFCController 的单例实例。"""
# 如果实例尚未创建,调用构造函数(这将触发 __new__ 和 __init__)
if cls._instance is None:
- # 在首次调用 get_instance 时创建实例。
- # __new__ 中的锁会确保线程安全。
+ # 在首次调用 get_instance 时创建实例。
+ # __new__ 中的锁会确保线程安全。
cls()
# 添加日志记录,说明实例是在 get_instance 调用时创建的
logger.info("HeartFCController 实例在首次 get_instance 时创建。")
elif not cls._initialized:
# 实例已创建但可能未初始化完成(理论上不太可能发生,除非 __init__ 异常)
- logger.warning("HeartFCController 实例存在但尚未完成初始化。")
+ logger.warning("HeartFCController 实例存在但尚未完成初始化。")
return cls._instance
# --- 新增:检查 PFChatting 状态的方法 --- #
From 94b1b3c0e6041739bce1aeccf9aee0f2a2635dd0 Mon Sep 17 00:00:00 2001
From: SengokuCola <1026294844@qq.com>
Date: Mon, 21 Apr 2025 18:51:53 +0800
Subject: [PATCH 18/26] =?UTF-8?q?fix=EF=BC=9A=E4=BF=AE=E5=A4=8D=E6=B0=91?=
=?UTF-8?q?=E5=91=BD?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
src/plugins/chat_module/heartFC_chat/pf_chatting.py | 2 +-
src/plugins/chat_module/heartFC_chat/reasoning_chat.py | 6 +++---
2 files changed, 4 insertions(+), 4 deletions(-)
diff --git a/src/plugins/chat_module/heartFC_chat/pf_chatting.py b/src/plugins/chat_module/heartFC_chat/pf_chatting.py
index e4486a79..92e3da54 100644
--- a/src/plugins/chat_module/heartFC_chat/pf_chatting.py
+++ b/src/plugins/chat_module/heartFC_chat/pf_chatting.py
@@ -70,7 +70,7 @@ class PFChatting:
Args:
chat_id: The identifier for the chat stream (e.g., stream_id).
- heartfc_controller_instance: 访问共享资源和方法的主HeartFC_Controller实例。
+ heartfc_controller_instance: 访问共享资源和方法的主HeartFCController实例。
"""
self.heartfc_controller = heartfc_controller_instance # Store the controller instance
self.stream_id: str = chat_id
diff --git a/src/plugins/chat_module/heartFC_chat/reasoning_chat.py b/src/plugins/chat_module/heartFC_chat/reasoning_chat.py
index 95d3641d..5a9732d9 100644
--- a/src/plugins/chat_module/heartFC_chat/reasoning_chat.py
+++ b/src/plugins/chat_module/heartFC_chat/reasoning_chat.py
@@ -21,7 +21,7 @@ from src.plugins.person_info.relationship_manager import relationship_manager
from src.plugins.respon_info_catcher.info_catcher import info_catcher_manager
from src.plugins.utils.timer_calculater import Timer
from .interest import InterestManager
-from .heartFC_controler import HeartFC_Controller # 导入 HeartFC_Controller
+from .heartFC_controler import HeartFCController # 导入 HeartFCController
# 定义日志配置
chat_config = LogConfig(
@@ -181,9 +181,9 @@ class ReasoningChat:
async def _find_interested_message(self, chat: ChatStream) -> None:
# 此函数设计为后台任务,轮询指定 chat 的兴趣消息。
# 它通常由外部代码在 chat 流活跃时启动。
- controller = HeartFC_Controller.get_instance() # 获取控制器实例
+ controller = HeartFCController.get_instance() # 获取控制器实例
if not controller:
- logger.error(f"无法获取 HeartFC_Controller 实例,无法检查 PFChatting 状态。stream: {chat.stream_id}")
+ logger.error(f"无法获取 HeartFCController 实例,无法检查 PFChatting 状态。stream: {chat.stream_id}")
# 在没有控制器的情况下可能需要决定是继续处理还是完全停止?这里暂时假设继续
pass # 或者 return?
From ce1247f2fd3ede228a4ff8e759b336f57d0802e9 Mon Sep 17 00:00:00 2001
From: Bakadax
Date: Mon, 21 Apr 2025 20:36:38 +0800
Subject: [PATCH 19/26] modified: src/plugins/chat/utils.py
---
src/plugins/chat/utils.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/src/plugins/chat/utils.py b/src/plugins/chat/utils.py
index 3e4cfa52..739fc6c7 100644
--- a/src/plugins/chat/utils.py
+++ b/src/plugins/chat/utils.py
@@ -76,7 +76,7 @@ def is_mentioned_bot_in_message(message: MessageRecv) -> tuple[bool, float]:
else:
if not is_mentioned:
# 判断是否被回复
- if re.match("回复[\s\S]*?\((\d+)\)的消息,说:", message.processed_plain_text):
+ if re.match(f"回复[\s\S]*?\({global_config.BOT_QQ}\)的消息,说:", message.processed_plain_text):
is_mentioned = True
# 判断内容中是否被提及
From ea1a6401f86742aa78361692aa48171aefdde02c Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?=E5=A2=A8=E6=A2=93=E6=9F=92?= <1787882683@qq.com>
Date: Mon, 21 Apr 2025 22:24:32 +0800
Subject: [PATCH 20/26] 1
---
README.md | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/README.md b/README.md
index 26cd30f6..7eca2260 100644
--- a/README.md
+++ b/README.md
@@ -98,7 +98,7 @@
-
📚 文档
+📚 文档
### (部分内容可能过时,请注意版本对应)
From 5b894f7f598e39abe08092394d1cd3291ee83ed9 Mon Sep 17 00:00:00 2001
From: tcmofashi
Date: Tue, 22 Apr 2025 01:27:04 +0800
Subject: [PATCH 21/26] =?UTF-8?q?fix:=20reply=E4=B8=AD=E7=9A=84format=5Fin?=
=?UTF-8?q?fo=E4=B8=BA=E7=A9=BA?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
src/plugins/chat/message.py | 5 +----
1 file changed, 1 insertion(+), 4 deletions(-)
diff --git a/src/plugins/chat/message.py b/src/plugins/chat/message.py
index 87380e7c..b7afa817 100644
--- a/src/plugins/chat/message.py
+++ b/src/plugins/chat/message.py
@@ -309,10 +309,7 @@ class MessageSending(MessageProcessBase):
def set_reply(self, reply: Optional["MessageRecv"] = None) -> None:
"""设置回复消息"""
- if (
- self.message_info.format_info.accept_format is not None
- and "reply" in self.message_info.format_info.accept_format
- ):
+ if self.message_info.format_info is not None and "reply" in self.message_info.format_info.accept_format:
if reply:
self.reply = reply
if self.reply:
From 55254549beffe8ccdb8a5ab401cec188e54fb480 Mon Sep 17 00:00:00 2001
From: SengokuCola <1026294844@qq.com>
Date: Tue, 22 Apr 2025 02:01:52 +0800
Subject: [PATCH 22/26] =?UTF-8?q?feat=EF=BC=9A=E6=88=90=E5=8A=9F=E8=9E=8D?=
=?UTF-8?q?=E5=90=88reasoning=E5=92=8CHFC=EF=BC=8C=E7=94=B1=E4=B8=BB?=
=?UTF-8?q?=E5=BF=83=E6=B5=81=E7=BB=9F=E4=B8=80=E8=B0=83=E6=8E=A7?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
src/MaiBot0.6roadmap.md | 16 +
src/config/config.py | 2 +-
src/heart_flow/L{QA$T9C4`IVQEAB3WZYFXL.jpg | Bin 60448 -> 0 bytes
src/heart_flow/README.md | 14 +-
src/heart_flow/SKG`8J~]3I~E8WEB%Y85I`M.jpg | Bin 93248 -> 0 bytes
src/heart_flow/Update.md | 11 +
src/heart_flow/ZX65~ALHC_7{Q9FKE$X}TQC.jpg | Bin 90138 -> 0 bytes
src/heart_flow/heartflow.py | 429 ++++++++++-----
src/heart_flow/sub_heartflow.py | 206 ++++++-
src/main.py | 6 -
src/plugins/chat/bot.py | 1 -
.../heartFC_chat/heartFC_controler.py | 94 ++--
.../heartFC_chat/heartFC_processor.py | 45 +-
.../chat_module/heartFC_chat/interest.py | 503 ------------------
.../chat_module/heartFC_chat/pf_chatting.py | 2 +-
.../heartFC_chat/reasoning_chat.py | 153 +++---
.../reasoning_chat/reasoning_chat.py | 1 -
src/plugins/person_info/person_info.py | 6 +-
.../person_info/relationship_manager.py | 4 +-
19 files changed, 708 insertions(+), 785 deletions(-)
create mode 100644 src/MaiBot0.6roadmap.md
delete mode 100644 src/heart_flow/L{QA$T9C4`IVQEAB3WZYFXL.jpg
delete mode 100644 src/heart_flow/SKG`8J~]3I~E8WEB%Y85I`M.jpg
create mode 100644 src/heart_flow/Update.md
delete mode 100644 src/heart_flow/ZX65~ALHC_7{Q9FKE$X}TQC.jpg
delete mode 100644 src/plugins/chat_module/heartFC_chat/interest.py
diff --git a/src/MaiBot0.6roadmap.md b/src/MaiBot0.6roadmap.md
new file mode 100644
index 00000000..54774197
--- /dev/null
+++ b/src/MaiBot0.6roadmap.md
@@ -0,0 +1,16 @@
+MaiCore/MaiBot 0.6路线图 draft
+
+0.6.3:解决0.6.x版本核心问题,改进功能
+主要功能加入
+LPMM全面替代旧知识库
+采用新的HFC回复模式,取代旧心流
+合并推理模式和心流模式,根据麦麦自己决策回复模式
+提供新的表情包系统
+
+0.6.4:提升用户体验,交互优化
+加入webui
+提供麦麦 API
+修复prompt建构的各种问题
+修复各种bug
+调整代码文件结构,重构部分落后设计
+
diff --git a/src/config/config.py b/src/config/config.py
index 83e47837..bf184a00 100644
--- a/src/config/config.py
+++ b/src/config/config.py
@@ -28,7 +28,7 @@ logger = get_module_logger("config", config=config_config)
# 考虑到,实际上配置文件中的mai_version是不会自动更新的,所以采用硬编码
is_test = True
mai_version_main = "0.6.3"
-mai_version_fix = "snapshot-2"
+mai_version_fix = "snapshot-3"
if mai_version_fix:
if is_test:
diff --git a/src/heart_flow/L{QA$T9C4`IVQEAB3WZYFXL.jpg b/src/heart_flow/L{QA$T9C4`IVQEAB3WZYFXL.jpg
deleted file mode 100644
index 186b34de2c8115074978456317fd795e4e5f2ac3..0000000000000000000000000000000000000000
GIT binary patch
literal 0
HcmV?d00001
literal 60448
zcmZ_01yoku);&%ssdU3bmvnbZH_|8#(o&*ycXxM*fYROFEhR`dO1FUj{k-q@{qBAL
z_kLqMV>kx*u+Q0N?X~8bbFLGnsx0#ig$M-(2IiTZtmG>g82ED-7&vVtIPeoyvxG(%
z7;+dnNpTHV*q>R*sW^SN{U42W@M$XP)wHOak@}@nyND}7wuFd|$0LUY->sD{IN
zAzf4?*7>gfh-OovUp_#+}NQ@Xa9h`k1uCEle=a0U9dq2_o
zaKbeEgIm-7p}nH4WB>El;UCQ|HqobczrNQYk~sLDS_?DcxNB7yv{}s*_n?a|bLM=;
z7T*l`fT;9p@&Y67xKBI*GmaH@6;1p+KX)ItGgmGj;$8>+!38BhC4P?V`Oes%>yAHm
ztL>!7Lv$aHSM*=M&dABh$;eNOf1jTWSBkul==_MN;p`
z9F{9Bym0jDAo6FZkrr;5dA)W|_1Bform}9t+n3>k+q0M!BQ-)lzUsGhcXu;|bnYa;
zAjkgiComC)%45mF)hK3c#UdO**7skZ?w7uPwyw5PMbu(&|N5*c`Np-4J!QqRU1?qX
zFJFmkvn#tioY&UYUiz&V_rJ?P*AsI^Vyi}v`BIYjuQkAWt;lTE*Kd0`|Fr~I4g&(0
zNjy+L>;3!JbphGK;Qz5un`uVd1kS(i1}qa8B8G72gj}aZnRlb#@?o9t@oxMk+G$Jb
zvqp%}nZ)QJJCodsCvGsTB*G%C{$S;hXW97}hFAi(EEW;{D#t__aWCEN4r=R})2ISU
zIz)Av5;S)AgYLge17nG~T`%V;=&(rarD8(2{wiol3%cN)@
zHmB@Q2RJFp4{KD6uSLslOK|Q>yl}s8=ax^f*WH+nyTAAJv=_69i^Ued&Pqw4^LZO1
z1VYW`d+p~|db3!K6c%Ad?XSy?j+=2Z5JJ|KN@{+xLC$hSZtFO+(IB```a)WGIjJ(a
zua-FeN)f1eBvQgdA0Bj8Rqh&2I=%vQp!^OGd@J(|pQ(
zQ$g*f50@wuV<23jW{?$5y~VyS(Vjj=70=04A;U_!4jZ7X)avnk}YJ`{1O!pbVUq1e1b2v+7brQZj$0N
zn-OZPlosdwheatfXMFz*pD>u+{;Dst`BJF6DUja?>i-N&SGdTZcGz{%c2^?*zu!RM
zNTRSIuhkg-@#kEzjm(b6f-V02T*;CPArLGs)$6r<_?2SlZFS5k^>61(3~rsGb8w{|
zm-ceC*`@41!yAU`_sK%IyxhN?RN2l9aEddOsIj^zTmH+H#r^5yLF)h0UL7~^vHxR|
zQ{`DQbp{{JkkP&*B+C6N#t!+An@MhbfI~~-GagCYWW=Cmqq8<9*yLQ~x6}-0?8Pz~
zW)D&l)mGw?70DQQpCm%)YufZ`mEYA(rlF0g)utzxL1v#op9+C=;-Ud_hhEUR4H^;6
zEQ5i0Gj&s|z;vyZx>~KQD{|j}1B#nx9yvNsg;TF14&?+q{TW*DrC7b3T)QXQp}314
zbskCTY$WOvj5s;`Y4hr*`j#ac)mDyqPxs@lDPq%G3v>6y3(ypXjVglw5I9hr)4q&y
zDkJ{`GY-q|0%;2%pt9cvs-!|A|
zNh^cbzCZEmzwBk?&mL387c{U~CG9hBrLa)@n)>c+lf~?O_5sdr)
zvhH8$5Cx>ehCGv^7ak!D|NhsokZ@t6`;n8H=#&4ED21T^zw6uF+`Gl<=Z)m;%Pf0qPRw>=+vl&0E5Rv^sA*hj`Gsg#2a3N!HCjQCeCItIg;Ye>+OI;s{Al;`0L5{g(16(80ZnUYVj`vH
zrCDZ5%C1L@VUrWh!DT69l>5muy1z?d=U64=TZt6bat|~b)MS;4z1K$sG4%gu>And6
zV#IVM=%b!hBY==fEkw=<0hQ%
za<)^ZqeRcU2A*wUw@d-*K1Z6g9O3>f(JjKHS$jOkQrWgJWbNimJeZAg*tTMPFj}@T
z|AhCJl&7YR2%)vU{On-x>etc}B*yi=Cj%dUE>Q3@DtFm?>RZfz)o-7@ealvkI7^sN
z*ga4P`BtP5FNcppPHDz*c8x
zXX!k42b$4*(P;4p(?u2!cbDKKi(E{;-0UMxrrw2j3%<;iW{zKjynRFO8!BPVafxc^
zBo%=wAA$P$P_7+XymI&b-X=h6S>5u8#v6bTG^o?wQ{aO1Vs0i=7z%=#DESFu1#?+z
zy7T73JKqYH*Y&nl*J+ghN(flwekT}4W@P`h9@y7(sqlkBT=Sn_Su0Pe|MEQYzf~8{
zxBvaG{a@9kJ2B;O?#q8!!^q5j$$F+(sZ6_~{y(qazh88ykv@y#oILetWRZ;{iBAUC
z=`Dxv|HD+T@?^fzf7W)A@0<#iHAq3}(ICgVlz+S%=Gm8+WSBRFfV!|JMeT^@D8J&F
zB($SMD}%!&tZHKrzg)g^IjB?|jr1#?_nDNjf{4l_#uFuU^24M=H(GTWoXh>@64rxu
z$m=t=i*z&&FPb6F)cMUa-3n@pb1EHIO`SP;R{{Zd*WXva;%hd(MK91!1al900YsOp
zKhggCXsJ|_QLZgsUCX16{MC8@QPjS;elxYzdVv~xI0pkmB(1vS))E)UC?zu5GJyx4
z8E>KagFt#~WS|nFAF9}_3WND~5EgDofpkJnzFOT|Ms<68d$Zy@M@d2FeNQ}>XB4tHx=8%?5i`tQMoTmvMv4#mK7G#Kz6loC
z+c!I>0B{Dl2?x{AOtATec1{+ea(^?s@qVqVdzm4J)Eh*zJLxAtet=9la{m~99qwzpbucQQU@yITu|)ut-GbC;5%mo$jBv^{gf
zxy)@&%@=O?>H2Lhj+&-Od*XW4=g*W3nb+O^CqvIuDHj*+JL6H^5O3F!WM&D1^soiU
zg9E7Gljwi8=tTD?^Lo}p$X4oXwBHZD2#w!@huxPlBmADGnpoJ4*cbdaET8(|rUvH$
zB1zuqV4bM;s>rYNIeN*}iiiDg4D~eSwGcAzR8gNtV*A$93ZI9=w(FIb&VnD0B8^BJ
z!qt`epENu}I2FB{iaBA!T9xaaXiO!&%&>3Qt$919ZRmA*Izaa4m%Y#H=JyQQiSLhB
z+AJorEixVZbuF@z_7|$ZdVxCsQY<-UmkL`l*{?AGDg2QYM^Wkt@#@Jm@
zW<-D0&YXM>T;=?M>zsqY5eJ1gr5mun)MQA_$xL3nL|&_!yx5U>=?<>yx_=r!EpeMRN&-s(-sdHRP#JajBK839!Er$V;n(-Y--_laet>pZe>Shc+J-~L%8qy5
zw3~gn)KKBqZybm&{oC_wh)zmMYB$@11wzN(&g+|{39uwWY$u2U?*-$KAD-u9uCg&H
zCOMvGa%cgHb;_-Qt`%__!+77-Oh)dIGWc_|$BWtO9364bVz(%m{?)9N>s<
z6&MxElURs8e|7KW^fe1=V@+-qZS^TE#^r?G4um%WEbhZKr;3nO5p
z`$#-~R`LFLKXbC))A4v`6WP{P;@)Gz=1I26mcpd#|GIPkQ;vx1$(s2{GT~h3fovxl
z2!XeJHFz@!Y?|A%+J;edT&agoC-#q9&y|^8zSRpX5K)W^>k@6OSbNY5ym58d<{P4h
zE@ws#E2JVWdOu?Lcs9y8At@?k{jEg0m8dKZwSP-7TZqtk-*&kXQ-!5K>_Jm)5#aXeoOHC_?j2%Us23As?3=3FyLPZhUS>4B4c1@}(l+C?h#!#ZbXL
z@kp14R=J*c@s@xIb;bf9qm1;YcUQdtSX3r03h-3QkKR}vc!g@?IQ
z2$g(j{bD943imp9VBbkq*opKt4n#s>`S(Jq8gXg)UH9@rUTI@0(8Bg0P^QvJYFN=w
zqf~NLZI$x{DSYRR(Xo=q9&3eEy-F%H7-A@6*2DOGA|BO$k;|eVUwd*o9S~&7+Hvp1
zWaw|P9-0xyTuGd4;^TjvKnU
ze;O(U6VWc`*;zS6Ezi`|9{>Kwh-v|zF#SLwwV{ZC`$-_=2T^f*SB*mzUqhdeWthLe
zKS=T@oadq51T|^8C_R=Oq1Scka`>;N0)&WYba&3>Z_cL522$~x-c?|>@<+d1h&?f(
zhz<~Wd*RGtM4UtB#uh&Qk({)fNX}({8h+7IU&1(;X6P2C#TYOH5rv7(lcJBOF%&O@
z*JkNQMFTWVH?uf>WuBrD_#&^dG;Lv_N%SGu5r`+gl4OB%Z=+ui+ALQC1cn%>sae@I
zQHz=VKe|Q`M>9qh!pzAHfGb
zUWuxwh&fhz?pE0s-~bV)I1{p(3aFEvQe-uzrMW$WRZMVK&BBwTM|4$RmjDIF@O0+j
z8Sk?1qJ3sI-aQv>Z!araW8Ip$Vfm8(`b!3hM2sK{t3`{=ZM{QsY$#WPfd{99fnqGImHbR!hZTtz
zfOU1&TpGk}(!;!VHO=(DAsBnMqM
zabQGRfYe$TUACA`%+Ls1Ij7DrQPImBAbBy9#F}Cwe?aNTH2AP+
zo~lX9+3gNX%-FPM=RW#9tn0eSZi%-oS%-F2B1b|NXa|GXY2St~bxpE?!Fx$5p|BeXc=BW`6wv
zl!{eQNz<-frF@xKSqIARe};@Tx9CWjnZl>=88snyLS0MdBY$yDV^WQ2gov0
zs;)bn1yFY2XG=cET6{zGd!KG(IwP~_dMDk#9vHO}j6K2^
zgSNZv6hr0aLNR;?kQO(j;FuUdra9DkqQ}@5Cq@b@Qd2?I^`}Q{EdX7llhk~pH*C+L
zCzqEY3S^}h23ySsrDYC?N~Pr9snET=!0Z%Qb?x@|ZUoQwPX3vt+{5o{Rz}8`qLEUE
za4+JjWphP9V!w2AU|m3vjwa!Mzc)EOJ#94dt@yd2X*B;1!LoL3(x5*ylauqq?Zs7+
z%ue_`o^ORCk9|7jrWF;17gG^wCottBSw=)V!NiG`@f{G~Js$)lP4oGW64o(Q~21&^zs$>w;#y$-XK*cTu^>0qnH2Q?$vUxu1H5+`R<26g~5;3A!`HMNiA)4E?;gFIWYMlID
zXH&&R9xO!5rTUhv_a}B=U!N9n-5c|8TJ??-W;cVd6L;=+y8t0~Xt%hMAe=~^kMXW5
zhD!VhitqR+=KHmsJyko6P!cPF*`SpmufJTk2J5>(4Z4Z<#Egoleyy7SAToue0ca|X
ziqzs0JUs;pp`BfGn-f>`<*)QGG3?Ht2%xrez3oG;9sU;DGyF@(78X6~mAa6^UrZhi
zoX`Cx*{J}-oQ39amTp?*_z_5!yynA+qt^Kb9z>uLSa<^BIx|mfKh(^qVwkkvh
z5S{IZr}bGNs+kcc&)+z7J%P3Wde-R5MO4y}HyaVG)pp_aYbT$uaG`f4pL$kZUq82E
z@qz$>ZPENgJ0Ee3&&|ntE~ZbW%wLDY1%Zuw5j-!iMe~%Rui=%6k+WTnqV5NuKXp^4
z3e?0x+m=0>x$;}kcL3%LPHYXdVT>zeq(ZI^Ftvq-a8H}OZ`MQXJ03pV6{*4kL1_*n
zRlvm>P))ZzpV@9eKOzLevq&N3ZPdf!WGp7&GB^UvGnN62{aRQ^y`=YRb_Kv}(B-&+
zynH>}xe3iwxmhs3jp<&Mqk3
znHb18R<)4V=KNgm2&X;fc|nEbR}*QB_eMW6F?Fd@}Agn_R0-Pbmdl
zmvU5mtKOG!nF@&83{=kB@kpebw~T2R(8P6oFRUN
zcFU&6P?AtSXE9U9QwG;Q&
z3tYDnemfZVMqDDvm=z^GdrOeg2nXLgftI9h?|9#pEq)SFaYc{`y4`i+Qt^QGmKLTT
zPNk;H0g;Kvxrpfa^cn_$lf>&_;&+wzFGP$-%}Tyo)w$Aa$WC5}B}vPj4XOtLk&AZI
zdj41TN2Uq|-+>v$HE6j67wmmilf$huFpZq$>?{~l_U;6+fUM+m_
zl_#Z7rWF~4kZH+$?%8+p6hM@vLsO5F(+X(+cxq-!!6^6DZ
zn-!op1Lv>qI!zv=gB3T#2JF`RT=BllSNC&UZ7$hHN`_Zmsm9kTEOx8w;qx|J-!jrE
z*&axa>edp{H*{Y>LDNqPUvKH94CS<0^vxgeLiYXG^mi!82vK6xmj2Tvh#jq_3I#A)
zzo-ELKt|$oe>zSjR~+H~?9eT=B{J$bW#WF&@nqpt6Vv)v%>@!I4F5
zXfs;yx#tNU+!_DG|9SzmeYw<7z(McF6ccssC$zA%)#|_K`TetI0-&WR-2i32b%ZES
z9UNq872i0oGc;WZc_gFGp(JgC&u=c$hxbvhBRh0!EU84ablIKVjW0H(gzpCA`S*wA
z5;hFr%ODMv24*=4>JI%c74Yz{_S{GON1D+^EjJ5S_6Zfq5`Nwye#B9f+I35(t41T>Ix>;w8ovfTT<7o<35W#L>G
z&4=P?+Ofm=hBi5`X+-oNvwT#!$8q7AKMf3`#zGd&p&UNsd~2&4BFHFx
zwD={3@a#@G%4#o^6~-$oMRDlok8S>n(61&T
zJCYNvobNh)7YvrkKAMq@Iou4&g$!`i)3da_*J*2Tw82YKsVEmN+^saY?76w>tKKi#
zuvXfEDb@qr`XX4D3e%u_$XD^!qF}xHK|$@p%DsH}adSZVC}WAAX#2O2E-O)!>-FFr
zN7;bOQ3faP)pp(rGRZO9@Eun3ZU!AfqOuHE2OZsqk1ACnG!mXar8`8KutL7DmQ+;`
zYH86{$HLqhE4pcDXmk`L_zgCD6NH)xxw4ERXqD?Y4pM{!RzGp&3%@Y}l)9`ePoVB#
zKu5d~Uyu;JQZ}MR$K3&=TsBCh8c+wv=;0}MT0ZqBR84K|P4&|Z2a59EZ-6AYsdKe33t19&2=!PZwL0E2N@q>&PIt%8t_-{@=;xeNnqu)B+
ziQ7wqRz(Hd1FhfOCh=MYcM_sE^2s9g8OBY+4Wh|}*u%mL90aY-R{Zo_E@y%7gy=H2
zL_=17?>2d;STW6ifU%x=R(){_U0X|Q!^BU&wX?9>%)mug>wMUEQ+%dCJHBw8uc&*V
zXcx3{KA`Gj^}i5r$!fZAct()qf8HVH-j)#dp$FR;wBPKsb#ddI@74?1AqAGiJ`cpo-E(4=t4*JUIyq6*FZUu(0Rp+EKEMY4;
z2i@B&o4W@14{(-!i&F_M7VOkdYpU_|8_alv?SI`G{q*D-W~~aa8dzho;1B(FnxOF$
zzDk1B(IKFFVc3sk*6OX^WWNXVmwnXq4N
zZ%f%N2r(9As;cnq@z(LD|Msbpg!%}TF6;d?$k9=|*A&(Y*vo{0Zw^J`*!(I42ZKA1
zC-e~G2ILj6&TRLmYm9rI60qjkLPIVWn1<9$B{=59apeY0ni6B
zxitUfbx4(3{^-+teF5VI;n}_bh5U*2WA88b_nA+$U5-3w;n@AQYbk<)e*O649$7EV
zy0uliZ)i|=vV-kxy<<4lWa&Fme)TL?GsR`h{|T}D+w#?V%d0}UmoVeC}PINoG+xkRB{Hiz51!6*nGi5erh*^ZH7^NBIhdrm0yO&Ca_I>|;k-&3tkTB#4g*;=vA^*k-
z!ig5ivCu}SG`>^%Xqf=TyMO`Ij=x_-w$fYL;nronx-P&bCiO
z6Gr=OY}^&-1B_b-u!4mj7T^)!npADo7V96fcVJ%@ZG8e^)Vm_9Tf#7
z_FZRvjdiz_dD0UqycG>D+k@InS+$|j1AmG>({L0iajAjJ%bd&VwL*6LFGoTpjTe1S
zoEdH8pIhfX6>WMT%U^zDJgT%UI0PZy|qVJZh4gg81{
z=NbhLE(CM%tlhMjk?P5XfD6Y|FK@qI@qkif68BvJh$+d*DE5*(jta5(xWX;(yP5eP
z6r6`oP`xhPJl~(V6K2;9MAfjqI0(sYA}~Foi#*!o9rsqL8+RgM-Ity);k^nD6Yk}|
z>-0XoX~wsXGmb$UaI_qW@@T;iav3D0u`WNzU^FPI>)(jj%=THQ#Ecf&9)3Xy#DgBokDOe;5mI>OSlP|N1UXb-UU}JRWo>Wr_ZoYpM7fw
zJ)HU{pS=EkO4RO?I`r`~1YIeyZH-*s#=eAS=R$~OpuE;11<$8r=HA@#2x*{8){VZbGLuxq!vN3fz-eYDej>6U`AOxo(%>9XD;LjR#|Sx_|*H13w}leIW<
z5_HQT&!(sEsbm0Q?_tp*$7@m?Q7N5kzr1E#LUg&4j!AbOoyxWoq1PZ0LpQ8Zg0E@b
zaCKGsq!6!7n1#IptJhH@#2a0d%GDgW81}x=%S}5-iJZDSuHdCE+SZ
zOtOx{vFC`yA2_DotC5rp+Pw}vW*;JgkaF9vypibivhLe@T|9?_mMwUC?@tobgko)T
z@@46vi97Xzqf1NG^Zn1gK`vy{v?MmElb4-EoN;v$XQ+Pbao;`*eoksZa7M#M9&ZnJScNj>dV
z0xn@o{R8T-Jg*Xo-fIU}%vCb?y}Zz~O$ralP`fw5M^B+B|3h~UH9pu+nqlcU^3w@4
zAR0=!-%y{BZ+>P!B!eAQ^qmy@4Hdk`2-
zD|<0dF0;zz1U^?=^RqHdOgMEI0asTmL<1PZTodvXcg?9J=qu=a
z4C*5wqGpndMVe2pFXJM!)A$S79tl0lNB1(KFYB3;I9Ffn&9u;q3Vdr^NenoCGuu26
z!2RUq?pN<>m9~N7OA&iZMC|FIC`!??`3O&8GF8O`wWi@W@h5SvBM(4w?n0$F9rz(5
z5Y5yv?!t8UruC8HG*je*t@15bE0c{%Xc#-E*Qa%4ulJOm*ILWCHOo2$HYS`JD|YB-
zpNXRqW-NxY37JU?-l=L_24;k}w0H^~ED$`ewGjEeSMs1XmEx6?@nl2+Y|sG1
zF9niDa4roeF`i}zoQE<-oxsPCZtwW+ZmzRj$ie}@D$dC*AMdQSo}h}Z81PXoP|6oS
z7AizsAN^Pw*<)EMvcUh+XU>#Xk4#ueaW4V6$89n2sT=(ah&Ib|W4W)$ksDUTbMUZw
z)J<=Y66BgPA)+2_fY7(p*VQFt$+!t-B>n7NG9hmFTgX-gQvG{2&JNvL@Tc2ouj
zErZX|#8YVwlNm}JesOQZ3cl=&Xid}^$cjJtkhmR1DbZKJH_C|eNi+QtD@ghaA@^za
zK5fCutBE>?<@6a^fw-cXBbS^g>zCitbBmSBvR1i$l=Xub{!FF0sY)q|$qi+HcXWu^
z)IB$vGq2y0+U*xNGpw-ImAbUB0Wk+|=Hjf!lN>aCVX
ztskk&wNr+0FqY|l%j;MWC-a&0E+t2jx;9ld-LlN&-9Y
zq1v#AJ{5RDo0TRX&r2y&d7?89mj@2w4n&b>p166ZF6KRSfbnDJ!e$rt=Ih2Efhu1F
zFBu?7pSj6m`ae;W_AlHvvz9;XdZ*1asPB?6$!Kb(d#GP;N9;a7{GR9hd``9OMNYV^
zz}<7%_dV&VyILl*rNX}-hIf@Geob-@G!?RE53t54>(1aWN-g)@xeo0Tn%#70
zt@Zq9W%X`9uqtWhX$*cuBfm4J5}ZE_ZM&p|>todbB%-1bk<60~qPRi++t`rKF>3F8
zYyxea(pURS;F{sXfp#MUxg>JXW8u?~p!}c%O2;r0dA!T}(KIwzMYeE@e0tavly(GD
zX9G@G3c1J|TOHg#&H0rs*Ni(-GBXv~&V9b(+8X3dMa|WvPH*LmLz?$le{tC0CA!$H
zkaFS*G-Tn1Hane#T#C-RaaoLE7xfrU40V(U2_%Zv&yEvMIe*hH+EpnDfBazQ4o_Aw
zU$3Zu?BkV+z6;gc1uegRa8H-zc3okDl
z_LliB@r(Q7rlks;`eVNSMp63%uT#-fj_ut%fS1MSJVCoN+*_r50~ulNK?Y+4rys~o
z++9WPj6dzxkX><$g1wYnAM;g!-*o78cK6(L38sg{nFy7}6p8GMEN&CN~0@tOU_oAC+BnJq5apCR=f3@uA3&YoQ96xqENy5!T^}+uZVtN3KAz
z_8A5)B>iNIVBQUnw^XL?U0)5|-Y6g-*p&u6D{dyN_gk^3G-(q#bdtCIKvbR|>1BH%xdNSEUMU-jHs5mj0(&SMU
z1Ip>2=<~i|e}Ggrz9xX62NLPO^ndfQ{QbUPX^hC-AdU1-BB;Mm?r(n~TmHDaCOm$m
z(cvAV^bccY>QSvNf$bW`IZn~7orPDqRaN8TYU6^*iU&U|Jd@~OmN)8hm=3&SswK=S
zoMwM7B9OaS_kJ&$W;3R*%HJ4wX_<&;oIWhNoe@&=$?W>J*{h1{bG_r0NpjBi2dTM(Rg3qiMxI#uHi(N1&5K8hDcA_=4wH^iuBBMq3zcaq6@%_mFJ0hVuO6uln
zYj3el87a(H@4+jSbj(-1ahlCvH}mu&n}8I;9K5lWn0WQI%&kYTA@f_)&w$K#rufPS
zq){8rCNmb4^XWJ?U(imJ=
zstjQbJ=Dt-PvduJT#}{BgJRzhO>o{K{L(QUA7^KaDfQlVHuQCb>8Y#!RaRZnX1`IZ
zlG?F|$xq28g%P?oAB97;p32vPUL49&GVnenDk;qONL}Up;Pp^-A-{$PTry(MpoDnhsxa;2$MrbKO#XRllM}~
zB;Ndi?MOA1SR)LLGtO5W&gC?s>jnI4+_#7Wcsg#2wB^pb<9}`^e3XUj9zDtpaL#_G
z`0zJ)nt!x$@LXD<5q8F7BKXmCyC<2$K<~Dvcew7oWE`UsZeE(jPdf47e0P0hVl@4HTVAYPs(WDA8Tm2P58dPVI4^u2N7fAODsgDx`s`@b>
z*D^E^a?z8Nlmr!wiqx|Q>%DKL6QD9q6Lsx5mS2#9M~n%Q?A3Vlu@Bzg&1z
zHrWRc_W*{tj5mUA)aNj_x;Tu^k~(7v3y`R1ixkieT)LTs{yE%b9gAvAQ_P@VoV(R;
zZ`b^+#d*5&si#QCud1H@(RZDQ<>h_6EN@JH6^fib{yzS5^sz%>uS{z@xJ{PjX(<}i
zrtJULF>gpv7}YhI&f(+9ncq|W8>h(rR@srZQ6~yk4lUDw;K6C<`_qAfhN|~Gv~hV4
zCp%*q_^bw=KvI{DB<2Fj2Ba}TTCP3r)AlSD?_3FD1A3S}(laovAxIHbiKFdd&ePXz;vHYGU5dCn}6JM&O_%a6?J{Y#Tm^_Oc{ZQ3~s<^C+Ap5LvGB
zp27n4Dc5{3;+2EONt~hNPTOG7Or4EE445TgkUzWB(N?H&1Xt?0mK=2xF+d8oEPm
z8#mJ)&O*&fXBnF5?tZa%x>pb*kTF_Iege2cQ#V%6^Emi0ezj^$%+g%#5mrj_cnb%2
zmqta}bXxnYGOCSbJ}2yR+wVw5+c>S0kwfe=#pETprniwtKU$ozwsXISG|Q4ZBV_^t
z#kG}k&T%ADYu^$WEdkM{FC)hkfWQUDbSenxBW7fCwJQvO6=7b-=q=9WL0JXhB-7HS
z@ap}64myaSS7#lxmi^$+8nu8vz_}jh3rybj#Blnl2P6?!A*u2dek*Mr6Ip`1LAZgH
zd&lj+-vOTn=yg9J)v0p;9peV6k}SG`aHplWL^d`|w2z89_5)Nr#jdqo#7gjlQt26D
z9o`R>)kIpZ$_=qFvwwijYGQ};h(bc1po)TQ#s#Ywd4BBkhq%gCx}n-QDudewFIe*N
z;*+S|!a!;ULL~Hx5I7mFa9|^AM1h^ISS4qrM8#q!L=0Y$qsGn_7hJ{(lF>feC=66W
zKvdEDj^p?A_j>J=MV~j&J%3|v+}2{<%vN}78bk-_6{vGM_L%Gd_t;skct9xF2493o
z%*MU^h)HzfGL|*mb*~Mm3uLX+4|ZP8O;-WCABNJOh*tpeIc!F(fvlbc
zwMf2aH)PU!R{59Ez1|Zd`gpV1H5vaFS4HTwx6=5Tt~=wHyRI;}H(HF&PT8xT?pR_T
zTQN}~;MP#AWL%2PgG%@%#+)U(f7xap@#u!S+}U-|kgn&^s+lt+fN|+L9GJ$s1;)!j
zHTw0~42~pp;fqPA&a_DSN(&y9*3S$~DmEY=)y*3K1u6!(q1cdWZRX$ghGL=%ornP+
zjM5XNH*VWWR%O|LY3}T>icg%op5QM7#zS|M+yeNAfH5Se0yt1?gFEXHbHvQHhm%}|
z9RbgMR%A1Uwb~aY1rgG^(2pcdK|Hw3PmmTyI>JWb1YUB)WiT8SrAN*EJ?P|dj^R@#
zhsTxeXvfYrPnwq}u(%NOzUytNIZ-iEe0(u;B-KtDiihw2Q`$qVHH2P%gS^f4{i)2{
zm)9wk=ZX6OEr-(
zoBp@EAej=7MB0gfn$*HK@pP8%
zGq(wKjTT~?zj#l{yKl<^5YPtuXKi57t{Wm}m
z%pby%R3`s>CYLcPK67hxhJrhdl5$>iVv6tU@xt;mdcW89Z7z$|<^-Drs$QQHT>#s6
z-0|lTxVK$iFd8pH%>ZjJ*OFhqOXIXq{xELmvt-wh1}%AV&v0Z%V_
zZZ30=bhPec7)aO_X@0MlvwgTB9s=`z>%jdIv$g_Yw-JK(b<3);=dR1VK0cfN=cIru
zlVEP;zF56s@1jjQwGJjB6*ygSn3%7xYc1n6AK0Ec<+g#m>^0YJ6pNdkB_3sFmLuLj
z>R+sw%4Yn%)+&KcW!%4BIwLfPq5b}B&Z^u4mO
zqhB4tz>zgWR8Xu~DY#t~ZsX*N1Thexf6h`mjIc(*oPemn$TKrtkR-{3-MPL;n47>c
z#O8yvP#p(D8`2e_((&ivn4RKPe*@V>{E0XI-Fl=w192)l^b{8h?d1kU3!TXVf2Ubkd!RkH=cJ
zfmRI-OtU8HSR!b8|Ej|d8=-`UgR>Blit+;X9wQR><=xe<2=+Zfo~hqayepym`G!6Z
zmryUgc%O_Jgl$eM9DB{!Tw?YU|orrv*5(4|?9VV`vL=y}C
zT_3AA#k&`&b!%WMAf`c&AZCpYDRMuP*B)YU8w-PVvB^tS^
z#{4o+z%Pf~!1J^ZY6$62pf+0}mu#n3bXV)%X~aqY7EsD9@5J5PL~M}oncOLj$^X;0
z{8O#&p&6QmxBu$}=uTRxG7UMDy=RHsvY}gsht6&LmR=tC*-plNW8^n?oTQWsLbv{(
z0qUTbB1B%SatU9zBV$3uR8w7wLS#U|Td`P-rusG1ZS_YPm{zfF#U}%qqg1sj{S6xm
zd$|l1B~USPcNl=mK&I+Q5k;FoUlqB{$YAN7y9{+ZCqsmEz;xsiD1~fMqf+?#W|+^d
zHo_UdioJY0o*BlQwG~|$EgRi!wrV*YG-;g>I?2Uv(F1VqvQMBMbLblu9IKU9eurFl
zYHC#n`<^9g0%uyEBmWUFL{tHM|MB?_D1=bZr~-yc(z&eS|Mm&NSn9-74B^mp3D>q@
zbi1db+3?$~v=VOWa7
zG}n7mrXa0PPB0|E+wk31(EBAR8B-g@!z2@Sb=SJK`eWRJ*&a4smw1=9hpVN%5ca}#
zFz`h8be&q4NK_eapQdwAr0rg}g$Irf%uRF$p`P9o$Fu=JQk#@94AN*CTph4B5F~y)
z_{NZ=S!Mgz$p=HTuf^?yWpMWvF<3seijd+8-K+(C%5)kA0=9Ex&w8YpKxlO&6k6I7
z!-(e%m!VxDrlJt|b$3gQz6j7IVk6!Gc5e(em&+|r(2w3M64mqp?Lr@DGEtIzEMFmH
zN5F6cUeHiX?G5BVHf7TFGtjn@ZfV_vA%VV#4xLczv#p`f;{c|l@3LZ9z(1EU3+h0d
z*$~!W^9N$(krr33PYy~7q9t<4I;D*^f~cbT;Kr|@_>pz|sVW5x^aTxK90(9V+(U;w
zd@xrL&(|NI%A+>{x-Of&*V_uq>`2nvMK&zprjssHt3iiCTvmhDMlhNK24R3ZZE(6s
z!Ar#G-~vE4-_Q1J@LW4#JwJ}XU=pt;=OG1{k%28hU@L>oHYD*p=|VbUXJmiN$EMfz
z(Oky|#l|*@h}(KJmEHC39L$t`6J(u$!esEGh@7z!ogkREFM83tKuKBwV?7ZRKY?P<
zYtQk5Oi?YnwDd)*L2%WlR1pA?+dHWp8A?O@;-U8=hGf*-1hlnlC{nwaQ96|A_5}f2H?H0&xbSg!I}wT;m~^0Wnx0oE^N9v7>Yx=4@HBZDoKRK
z%pz2@p2?3&nKqgOOwZCkW7_`2v#zrj6-t)hR+k%NR|)}V5Yq?tqf?->Sx@En{XXsxb=>F$#%0>s%W6+7
zSt>uIxoxc3*I|LyY!i&pn-#}Sk@PYKiSjOkRII(N5KuS9w2cYGBxCKSgWk{{;xCDS
z8Va4fVw^WA^Z>=v@h5;^{jZ+FS)KXtzFQ9U%V;z;c02=V(nR*bG)6esjd{wUT$Ya4
z21aR)T(13MYYd+(W!EYe;->au_O?EhkO?j~3&T}UFsvMsFkh*j;nM_Nu&|M7Mdv4QF~cg}!O^oacQ97&$ZA&JB1D
z*8^1Fp)pM91o4{u+0n+>X2{kz8o75`QX)i`q4_8CHZzPvi}$j2c3(
z{3s4>^hK5+*15u+)^0{uB}Bsl`brK!Q>3uS(dyGvP$~wq@zfiik@T*<40+zysB*6p
zshO`VbSfz{0Yvt8;iZ2-Cmmf`@nD4w*0V&s1ZZXPwso>iX{fh9k*c-w
z;L80q`W*Wy|F5xA$v?y~K{A;LMmS;U1UzYyVMUAD@zVIy`G~{N7CRvl5*gmb<9+Wc
z1lzTtg>lb8T>ybHQMLZwZ|}1u>a2_D|ki79C%#=
z-S7?9UoWVEc#f>Lx^cFz4xsWtJCU=$I}t)>3S7yJDNO2hWncDO*WkdVQ7}R9y}>@_
zGuDYIJ67lvNNR?yzOWyIb|WGXmHiD}*O_%fNKmUi!N7>l<%j=|wYPw(vg`J~C8VWG
zx)G3)mM-a*?oKI1VH46R-62RzBP{|F(jd~Q)CL3u=|;bG;r-mtbKY~G@tt>k-xzx^
z9Kw!kUu(@Z<3E4%2bud0v_r0O>jzum$Cs(euKV~OR)oac`)g@=9-OV?&Eo4=Q6`ZjV2A>B
zeDE^kv7)MEgNedHGcuzWRVz$AzvUq$%NYyt;lq?k`-L?U8SFPS8Y)D=^#{CZ<3bt{b`=<_J5KDvh@@ahLK
zc-7B6ZEKmJ
zdl|229f(B2)RXQitRFA6`L_{fwuhjvb5A0G^<;M$`|`>X^z(vEG;!{=@uRnPq*?eY6mZzw^+
ze%@b!x&Mo{K;wzbRZHM!<^%fM0-2*DA?>+8@|ZG|n7DOS*&EUGgaQH+A;;b-^60Ea
zzLN3po(?RI`Hw&J&A8LP1;HM`q001A+6GIt-QQBIL3g7k>&7)_gpvsnxi=xy7lNiA
zHG_NYCcHthJJK$y{5Nl>dDAabUsQzRs5kS6gCr4$CIaH@6biA1Ms+U98v=wGBpA)@
z>wt%*;ZFYr?Ld?^1G4!kByZZt*c~KG3JmB34R{0L1PGPXQ6UkoJervz#fEGo!;-JrI#>(J0&=H^>ThmQUp3%Z+#LW^8hGR*
z+L^j0+yRY=L?5}}=EvnW5;(*iuYrLB85FvsaBSlCm&Eg-*p$t7qjbzKkrMKVwO50j
zdXDUpkcWF9LPV$CgMiU#hx9qT5>E{q@A~hg9UYE?fM|k7++^ODYbL>LMjIAbv)lDW+Hf9JsGl6
z&J%Q3K~*#bfBOxOxgJjG)Y_z7YK#5bC=<$uGI#ACH07;?5I@~2`pm{FMyV;|uKO5{
zL}o4ne{i>VcHShK+Ew7moYg-QgeF7pkOTRu`9J+uOr#-C6LMq!e(W)Uev-^PBR6a5
z&x!&|$rDYclb&x)alX!eyvBMkNw*8*&wl#rC(UgrtXI$M<@e6{KOi~k(kpKDj;|37c_@v%pva~yjRJ#+`k0#}Eq$>DpoASi#j
zZESf2ok*aUYB?EnCOJ4bRAW6cH#cv&0j)u?Fa%lyV4Fx-+;Cb!T!u1bybG#Sm1>P;
zUCTuexr@(o2uTX3X$(EO0Ia@B;DSyn6#mn?T2-FY$5hPDKpxN9L5|@*-RV_a29}D}
zy9^i9Oi>d``^7d*W&b<<#AP0%3UBoq#3KJ~Wjs1ren*rHIqQEv`~{#Mg-j6lg3!?9A9y!F
zd(oghKV$RXmosjMCjAVJ-qo>YjKVKjs&hr{wzcpBjSR>-ydz^AeI-JM*MFWiO>e;L
z(iS-x?2g1iC?Uh@1KLhF677?c;!P^zW1IWS75C%Mh95dENewzxrJQ;OinKobhGZ-k
z7<$hR=08{f7<&$>KZRZ(-2GtJ77(KWkXRlAu0fTMF0#Rj4(xZo=WRJT90=y-afCe={StC6DPrTdRDf)+cLavDka7DAK
zTN%o-nm=@kmQ6yNJHz`5FwUa3+|Sv2hc42Q
z#1hX85+iD9X(3&QgoN0IJKiLZ=l-~C2rdvQhrE^;V0iMtqE$n5aCFozXL_I4Jz3Bl
zGI0!FTg!c`gnf0KW3UV|(hK*4U#H^BUs%|9bODz|A+Y^33}8ssqcfQ^vp6
zWNr#-@amL2d5-;mz?9xXrYFT7|DO;@f@IA9Y~gHzX0N$KWBv!~*Y~7VqMwNjelKn@h_WDpox6nL0suk_f#pSz%48PV6=|Yx5Q_a&{I0nkQX;FDn~1U-)U`(*K@h%jeusE~OyMIN
z4{CmHC7}GC$e0d-)yqrK9=OXOC5{mA0<0j?XBeY(
zcK-yPtZy^N76EqFp34oli0;2|i+oXrnR$h9{>%CY-e@$`{zwPGNB%#9!veljU@jmd
z48ar#|FYJ9*=$Y7W_SJnL@xxnWsvy!2gDg2&>j*vR0PY2q%fsq5fy`G!rjb`m%lG_
zZq{3riRfZC{|%KXv0navOJ#avyYcyTTd6XLhY2JSOXMFwtjjonph)BhF1mY|_s4TA
zz%2Pbx;=SMrOi4*R}Rp?mcGx##SsKZ1FiPZd;cdsesFuT`4u&xSYG=v8K@sqLeL=f
zPnMO&UJu7%g04U94{Q!4AyP-lb9rfXs;E0eJtGh51Dq(d;5&qU;k8S=CBUQ*qIL_y
zUt&eSkBg*=?@eQ{fd0?_Ueg;_my}(^79`vqad5HV(
zx{AP8LQc$lcE*ocdgmZgo&}&mM**>bYAW$oAjcN@mE?gO-|Bh787ovL4=jX)dg>97
z2I+lC0}&*8m5v8VC#MriWeti~y-`9+nF%m@h4(fA&|pNB#JUUwg2)It?=U{e{LA_I
zYdc{;+ldFH;=KSn_#ZqL@LNOk?b<&($}p(-$9+MKWA-8eH6N!GhJsdp_5=Xajg+V%B3r0NPjKkwojX$By*@rS^PY4E{}!4lP(ubbOF~bi)2VP#2%ysyeTT7p9$x;QnEgGd=qm#5s?ns>fO
zciu=A%@Mp6xGu{fN)e4j?neI~K@?5E7m)6!h7r^t{1?l63o+|X&+{NqfQM4q|HD13
z5C13b-~TmL7KBfried5J-DD8oH4qgEcmIP24eB5N;4a~IbQ;gZNnRZfuu$31#|~Vy
znuO@}LVoqLO|X3Q9C`l8n(kXNCT#`ZcPHZleUC~bz~yCllGLcgtA5}X-%;JKUSb5u
z*=UA(FK^eKeY7LQUg6|_Zw$AU1w0X<$Ug}5lMONd5sM>biAd?UepR}@70v>!TYXbZPf5mrDV>$mV5G!GA<58AVGyxga
zR4NQ6>qp7JGQ
zrTi=tL^@wX;G`iW=%|MSCB_MGhdcGUgD$op;ptj3P^Dv9X8(eg)09XwsEW#J{2mLv
zFrBQ-^05O^8Ex#|+u)ac&A0=)qlrCl;#YqVT-?G*kO)FI@;hJ4dHKt0vCS7KLS~n6
z4w3%7_TzY=sIK$hKyd#D0ia!O>G!XnzfHgsK@S{HlWtGXtG}P}5E5C~TQuQyK7ay8
zb$b75Dz$BJ_R~3?^#bjFDH8Qc@F(eO@0IUPEYRXt3SubH5bjEqAy1c@IC!X
z(s2#^O40?}V)5$9zg|D?5oEK^Jh$&He~5)53IKP})#Frs&{TyJ;r;iA2!)D4l8uhj
zyW#XPH^D7SK(Kl18FG*}i%>ShzjnMlI=DKPjsYp2Y=4Gwk|(yVIs0PmD1W(j5?{cp
z;AF_YWkyE!>4J@-pq$cHGcx|`w2dkuvg40LZur=nsFI<5iqZE)5d
ze;LmGcYyu`nNBZNiO_}-KPCDG!nvPe--$IP3ECJH;YY*tBXJm_e^78K3ps3J{O6W;B{S+PQSP~clScGnuu#Bxb`YNEvDnQ99
zcWJ@f0FgcsglHLgvQYDSt+#lMFEVdMoH533dX?=_C93T)WrfpylB^t!;8>$(0^|w!=3z)6Puz~D`cc@uh0+=CIkNw0Motr%bC_be2V{<
z7GTHlwF}DQTwNTh62!V2D#riC7mGG^
z+RPg)B@fl~j~(4;2}yZn){oIP;%`l}DY(}c71F~%#snO5>(AJ2$#5!hkv)w~byR(f
z1Ekm2*A3*1&=;}j#C0m|CQFGT$+#_o%aGqWQsHrEgA1iosKQk6Axe6DM3c#(03|tT
z5WC>?ybx5IRVQekzSs&Ndf!UPLKXi)W($ueI&`!o`&rZw#a;h~;?7v!_wXJ>amT>K
z*amrDShLD&*%og4i
z`&O-LqJ`7#X&l*ZRO&65MX*Q^ewn7%p*9zW0*I#V4{BLGO5o^Zd3E38gB0T#B77N(
zX|$`d?*LQ{<-{m%2S`vrEBH)t$`5HBV$WjcH^I3HasLy$>g|8`Bd!Vyg@
zRF?^6fa)^JbW$JSDq8k!0PG(Q-+2IxYB`KiD3cnGc*PXA{e`}hj3golVl-9oDl6&-
z9oJYQ2LhDSed=Y1N*2`)y~WX}+(?f2;zRu4Pv1xLL4j!w$O<+pE0hPVY~I-LC_HNcSoSU&?W#5);Fny-n2yghV=6c7X30bdF4VJx}8
zE>MDC(Ii5QYam?gCW*b}kk0_B2jw?ZYJpt}Ql%$9d6K^(<4(N4q!n}4Px`V3KGG`X@0)|
zVe4clX#pMi^lYx(T&Q0IUR)b^t7lm`lb_vx)ivu#h^bd02neMDo&jT*!*Nh9(1eT?
ze4Murq<7RomrEB2$+v?f+IyD;Y2B1#5>dv11-6Iq_dVa5J+}WsVlpCFnzz0gToPkz
zeHHC|E5JXt%dH{U(mjhejR`2QZm^3dAT9tY>K-7UyF??!-o9IBH`_3ll9e%>ryTDX{QY@_TQJSG1H
zT+MZtxT&W($V3Df3u!#_Zx=ui`=HQro0esnDbRZ;NXV?+|1OWsCr*pCizIWIvo_jy
zsbAKuRg{3vN-99|!`_{3Jk#IscF=EUTEP`&6yfUMv#H??0%cv?i;ps_MXWz_cqE^0+czmj+S}eml
zsFMc?NCr+$oUm%zWvQjP5NR`7=8Xrsl~^;fRQKJZ*ncG*q7l<2Ll7Kw=RXh}fKg-^
z3|XReEx2(V$O7y@-{2<3b}n%y(ZcGBpxIhmr9kh)a0cM*(6Z$6b?fiz0)pbb!Kkc|
z4`WPt>C+%niSF8(z%Zre(F=9Qpm>QD#}
zl^au&@Smne+UWC4Ftg|N9TvL9=U~KzD<&CTYIe9;fRYlnY=Ak~;Q}n7pNs1jKyoq&iGKw9)neBymt}s}rdRQ+2Ej
zyac$=)B_@iJM@9?z%^am$;5))cSa|$!M(dnI;v@OmbYWlUatghPRVvS7xo5dP`(Cv
z7=k&0B~)&v4WLS*21{KyfJx?Z~gPtLO8}ha$fzrRxX?6_kg>iHX4=!K<{kL)WDbU(LD7y+qs)?Dv>nw2hhwi2Dv_mt#$`F
z>Uza5gZBv_gscEv^?m*(WGTEUOdqV>pzTMGQxFkJ!vM%EW@CZnp70Y{_np$ZT-$Dj
zxEtZO&2ONs{ZqzDjzE?INBI@jF3;$l*&<}*2Tl|*wILSS6fym756r=*p^=3wr5Sfn
z2k`tr(ET7r;(I5|90BF@?nZ=@IwIn<@e5<7Xkb6SkR|5VFclsKL53hsLK=9TF88?C
zao7UlLXQyzf-b9@DLu;Y+G(;t2E%#=T={r+8pY81rbypSz15BxSgA*mg~acm?+Z6T
zJ?yv>TgO!#*a09qwTW|^NpfQLcM}u<6i6^(1!!LTuxvWGh3+~T@kb*EO4HQ1uPK(d
zHakvpoOiaU|M;L;*%!1#y1g)92tb6y6Whtm(J%}Q4E+D1{?_Mstfh4bSj`MF(IWjH
z;xI>l0dR>B*4{oi8|1-^~*ER#aDD&-J>R+1wjU2^
zBcxalf7jS7uk()A9e+GmIb8-+(B9z+V-#BnT#rV#9VRF7i~-tNj?#mW*yA8D>uxW&
zcHR@<3j<;H3x36X5`Y0W_Nxb*I-e?7|vWq
z;0pJvGg^=&`J-V&&t$&*@LtaRLCOw&BRFFw>t8rZfI7%Tw~Uke`_W|3xho_~jTQez
zn$F|bm6ipuyg~888zkz=doJkkg@`U6Ed<^q1&+4R=R&A?MY$@u6(TvTlyT-NH4;P)
zhFGiCmk7#USkob)hf4~E`$KxOSN7R__fhvn!`7@`>DSiM-W8#aNtFcWA#~Q>Lbtm)
z!MJbwjTCb%rhKQEPGOl(YAfVY1&9Hc{6I1*N?nJynKQmy^_l!lJpC@cb;=k^A$Co@#
zqT`)4|Fr2%lUmgZ49R8f{HeQ)Yi#ey47|(zN^!*tUmgeH9S4+aNXXtA*DV94$jd#;
z(tV6Mj1M9X5&~Ov*IJeT5iZz7fNxmdVJ<4RkT;)p{|@S?uS%SaZCT;
z9~pP)D-+~?MfKp2(Tv28TVEY?cbhC?wXoW{{*(>v4N_|-3j*8;|6cxq&*fmyhr?Ty;G@M>A)AgkQ*ePPSvGt2qsLktfHt
z4BcB+pAV_t-4t>=-a?O1nh{)0_^dlL$Rb$T#N~#PlaICQdu#uhI=a=eAVo0?FnCAb
zM?IZlm#8>f9;$JxKv}?k+$BNp%x?&&6kkfTn1R|77hj&goJ2*;2Oc;}uk@?NZ0MZ=
z?OW2Zho=+h+QNsJb4&{u{9@W1Nz_OfHM4HWZ%XI%l<$mysrE?|~HkWbx(8O5?(>sCqQ$V06o)
zlLjQSnre_&ng=dhz~tvzS`SS3~m
z{P=RR>HI^LWTD;|(=#81dAOg5rE3lvSM2R13B}&!qx2ddpVX{`*$*#X3bhFqTkWp=
z;`#6>{{&krlzp+)3Jc{IR`v()7rbiEwsm9P?`6#|bkGGI&g>remC*HjxogU%tR+Yg
zgKSt%ch5+9kt5c2;w>%Xb5&Kp)Xz^WWeKtFn+i4<4o|d+_G-T$C{Nd#vwUm~dV?3v
z>7x=cw`KdWc8!DIlj|X__b$sIU3b%yUr)A0HFD(o{S%LjSoS}olrTLM_N
za0OHG{Mn0vOTr0C<%^dnrwa5|2rSHT&2ECKo8zB{hj$*YUx`JJ3cl_=A5vet-=Syl
zs{cKp21ZZzL297~nG(o@qvq1|-KJGjD~$^AsN#;WHN4)nwQv9iOJFyK{kmNIZLu;K
zOu$D9;y!vT^J^mc)!!Ms>5PvobaHX;E=lQm=#M>Glu}PRTuoc5iRW4Vq(vadU#_oZ
zXdIPl+THL>YU4sf>MQ!?Z_)V6v4Iew^RCGG^%l$5tYqA^3v&@n4y=l5tYtEKruBC4
z<>Su`AL$`~bhgpORQWA4Gt1|JLp07#Q>TNhgIc=`3-kkbb+kw+DkRGIw|y5;GFK_Y
zqB?~Dqw?ut8D>1(fv87QKp)(GLHi|IUoUQsdD+iFk((=(#~;Jk{O(6MS!8DcT>kuv
zYziRZ)8EHSztYv1q}+s5WUrHJ>vR{K$&@ulo%T}EKSEM{HUaUK&}(?@uM64%dS
zpN=O!GTl*T*yn}0nlaWj7cvXptgt#G>$iHT3|+8KleDOSGFx@LrWhO$LUp+6y0E*p
zU@DsX)VX!z!h=~D-pu)XW#5$J?(DnWmfhdoX<0QjH9VgM^MQ;lVZ|KNDFF*m+b2&Q
zWcfk94M^UaflnMutk7Hu15nEt1qD_PD@~k?Dy4*u}@rWnkj05~0mdpFcz^L2@IWexlV>83bjk02woAlJH(W4oYd
zY^-<1S(XhRJF-S%*sMQ`_!et;N@e7#8FMJ#viUm&95`I(Tl>!aw-h(z{4Z;^(N
zU|zxc=hxdFqNUy*4EX!ayfSOm2A!C_EbR2Hb5^}*X`NPFsAD${v{A(tM=<6~Ao&^*
zJ%6g7}
z2-l+|E+?5L9^_o&EsOvxKZwJTQ2KOcBT4!}dnN`%&W1)CfZyO_znCzw{JJ3Mjq&Ks
zqIuHO3EvfrvfgBs?FW87s$Q}yhb5vgaM>aNrAtI)Um{YkVz~(p3#OZtCUD8|YcYYU
z>2~HVDunE3V+y2NF_I@8I%Ha>v&KjaiOc0)wsS3|83s>w+Lh>mww{HR736nQ8l(XJ
znm0v4QWD`&;-dQ?JJ8ihS`OMxY@~e7x?*AKc%}MucE|N{@q@&4+z|&1Fy$zg%)QDjgQv|WA`(#YzI5J
zeIGWqDT9d@)l|}M*dlxFB&Js{Priw_MQ(lmozu?9AkSE5CwUtk)0iWWcv?)y?n7A}
zQ1YaFsmwr|?5TMoX{g*zT|HG#gAvhyQQ@KRHH0ap*&id=;bWBHPUmhud);k+9p_NM6&SA28vBk%rRk0xR4oI3*;kM`1qT)~)wfT6R
zD7s`tz5QyJrtctmr|WYOm4Y~OV@@L(Uq35-|ERDJcSci*orUc~k9`bW@;j~6(dHM>Uxbt_`+Q+(6b`;KkMaT
zHl^5ZaeeTe4N_umuHu__r`d<)Obae?%ohFRDp#}Zq?e?B^Fles+^K}L-gAW?`C{{_
z4inYnMlulo;W(6;n{r3@mxdJB^V*V+7`tdrJ+Fvj^EYUK5%Ol
zsA})o^D@;dTjdx%FUil6sdgq#=aNr99uu);iYz;Uk*806RS9pN^&^jG(B7i&&SwhnF=W4Kq`1fGRc^n!vyW&worIY0<-6H5HPFAgY8~^V
zd>--Avm0xp>7(aZ5}l>WPC-kv(u%Gh%-xNIM0oOGAm%G-Ox?;$+3C+!q_fg1Slixc
zF{#HW2p~xT6pLA4){M5U@gzBZpk`4T4(h7w-i4DouLe*JOY}ijhzwWD#C=K$Q%+Ee
zHSlt(%Vn9?TmQO>KgE>pp0dHo_&~I^$FSiRKJCj39j081YQk3PT)bjyMb^h`gJUoG3P_Uug3Wy&5>TNFB7hV|r*gO>=$u*eR~pV1`5cxg
z+Ax%k?4B-xC5ks_hMHBRUOT
zA2A0*(ud7nw??hJm4mG+P$L2D#-h|I1etZPpB;tLQDJj_C&a^g-e*Gw-kV<)Nqimr
zuB+BxY~$zj*yw!(QTsEyO}Vk!(;bX~&^;r#G5yk1BB;Hdg=!@MhO9s#@|2Hc57y~W$
zM;#7)Civ5-p;4kG`s{Sx$Cm5j2*bg4JiQ};EG|sdNG+1M@I!8F+*5b5R!|Mes!R(f
za-69jhx#>4J%v>vuYa%TfzMpD?K(gvP_wbIc~dQzYA)3FZroyQ_~iOc_O-D7C0pkO
z9{nR;UfzPP$CFlZvE>Xqj9aN3G=<+Vj*%-?9jE9`j#80y?Eoq!rJ25@e(7`LtnyQ~N<+Mp>>_0yW84NpG?4
zWm510+%U$|1FKPY;kLRV16;-xX#mV;6t4|a0YV=U=CzQ3f~Id{PeCXICIQi?DsH^d
zho(c?PX*y8%Rex_M;z4=PvI*Ofhk!XX)d%GgZqa5r1&_dCW-g1`=cltw#E~D*{+^<
zd^??lTcc0u13kqlhmRb*t+mEVTMWKSo=X10UvgknfR#~;#cdUBKn>OT+Y}Uu^gYWy
zpdDJa7`JjIv~O2ltQE~z`Ugs
zqi4I|atL4%7nu>yChNr0Zl|NJrCUjl^d~YK8X5xQtEnZeC{@hh
zr7xAXz(gLd>0j`3fU(1*DURzYPY+mwC-|aIK$-<))}{}cPft1#Q~0}J=Pk$C;p*(~
zqxa~{=^K-Bh^`(qqG#xyMp6k4-B(DcdkrTn7&x}eV3wo7>?tM<8=slE=a=h6S5t=K
zAoOH>W?b2V#^pgR8Uwz=Lus_j{iJ8>{0@vj5lEAc$nLAlG0#kAMK>asl=(hdO*C&e
zorL*7AN7peKzM;o4(+4!@$dUe`
z^<{|Krs3QhUFaEHTflUNqkw?XcuwQ0blW{#n3fs$-I7W0F=(qx60gAV%GdD2#{1Ix
zT)Qf7%}1~`GAATaLKPxP@EwBDPSDUD!%Jn^5DBY5Z@j5h?YL-ifk%aIdN}x8Tud9s
z)26*5LZddcJH?|7a)c;-~`RF;w
z-biy2?Qlm3o4J&I5Eg$^Yi%@ugDBV=obnaEMRamB1s$SY7kyrIY;cJ;MMdjN9YOIF
ziCu1j*gM10;DgOVIhd8m^B#t-@k8x$`geRZS?G0|vHLRK
zN>5kb>;5?AaB5oyc`Z*6SQPD`XiLXAHY%^G23>x}iiZAAcXSXb
zx>;Ium0*r3yOW^p#B(D0-6k~r3Zvh3$ASVI(N6h0i~W5K`Vf`SO4gqheg-PJ*>606
z{4D^s%odcM1n()wTMnafe**U9NiUy;YZ{Xp)zmH=<%QxfO_(@LHJ?<0-b7VzT_
zLa~47b7R{A`gnHb+m?P2@3NN3q;0oD?b;nU%&bT~ftiA^7&>BD`p3!nwk6$48T!8}
z=R!)xNKCgZ(Yd#l(zZQn&7Vbo4HF@Pm&xR{$M4(+{>f?6XS(v=U_PFhWe=85>Zu^L
zxVdjE&qfF|$O$iv8MqK_=ff_3fLnrt`E(0NA8ss&j&K|3q#hm~o)Xg%lBVe*aTu=l
z1Go?Xv3MC91U87!s$2P6fdKW9#IAGK7V4BC-kQ)Afi+au84x|4nT
z6x>+0S}@KLgIr{bF`yh5ZBs-j3x}EQTe~uFrmjHQ;AbpBg}c85PT*xywTZ-}*qg|I
zX#ut*U0uW~7OTyrvEyur6xvAw;$r&oR$V~_V7)AX_ogqQ5Nu6DLGJA}P;<81;Ys#J
zZYEj~4m}RkZw&P2Dc=~sSU41%nhBEl-UA1I+?h*|T({>{nw<(P5*hw-3bqCwvU1{^V)p~+S+r|VaD>5Ev2
zd(fY%o{}X)QOh5@1p`fD$~L@>4=h;mktu>MokNZhgDfGtgD=*p_?;eij~m#p{1GBO
zeDpZ~B%qMNRP*+}1VTI)@C6(qYX)9Y1s@}tD?(4%5RJOktU|M7=`M&}JkLIW2~~Tg
zX}~BF+WXDkCqMK-jpE+dr>)lJ^eJ?yWuk5?$O9g=o9v<$6&2w>C`0TaRf99w4l;}h
z%0Sf+s@NBMC+Z=Z*%={Z@^m#jAPH6zY7hQ){2y*^aLC1v+z%m
zy=RWtYYANTX@)M7FX>^Rk+BGX`pi3#mpxm5q0gPEE`ao&rR6H66Q}dJJ?LFl
z%&^7*UoS&UEv8CiNh0hgk5`jHxPoclQ=sxN(#I>{Qz&W=;P
zg@Xn(Tj}O{oHaxpqEx?KK>sHgD79$~#HXgwlivEW
zDk->0#2>ciG+PtW;roypXn2JJKB_o7^OsWHDuX3`FG}o%A}1Opuvu-<@7G2n22g!`
zz*Z#^CBh@Tgn}uqu-XLI+BK&uySo&s=s`uj
z21v!#XW@>7eERAKJDpJ0fP)@q(}SiF^WPSeH$fVBH#X?qZnN>+;X}A%-n!JaBsSqE
zwGXx#^uXl>7ZEf?H!(hl3^HhW*(Xa>wd<$uYx*MpYF&~@gW@U!oK;tjwlfRhMl8_3
z5h^F~A6^2(dvbpNuochrp3t|1KKadq4>Cm*+|1nk;BP7Dzp$CNn%xh)1jr$0Fa>R@
zTwe$!d-Cs#RhOm-oqhR^mS=x6g)QX-mp}Vml?ZjbxO-w6@DtJH^6v6ufZnF>)ASC^
zRc$#s1Ov-bdMb}J<-8>OkTc=#=1X$1t^JVF4yY6EWi^`g?fqs6yaeTaaZnD+K3s5o
zgseUT4;dJ-$?u0di5}mJN50>OYHi=d9T^zx#1kzuP-K))(1_9JJ~43I7o`Q}3&pkX
z)|&R!YR1S{4iLnHl@wocb>>Ja&gBt_0}Z|PjnRq8ze*_VK19URC<`JtLEnCB*M7)j8^?%
z0*PEa!PPN&!C3a`(l|jp>tB&Y!xxtRSj^3dRp1iHOHC@DK39T=>-GYrQvlLy3UmQt
z!5Cp#5I{&u23(CMjd)xFs1F-UW#duH;b-BrPOc{ZD}
zLigy6(3{~qP3{T_-?sWuA0}_Rl{LBYXeS(ZJ}NTHLteO}$@e6kJ^zZEsFGVDiBO8Emej
z-~~p1U7Mi8x^LgGuy%KDbT)t1@16oU4sSd3#nA?#+^IQ9kK{!NKK9h8CoxS=9=CSo
z+~}(PYwJiLZbBVdtB`-~O(d}U*gey@Z@H*Nc`=vgE6YYngoBBT*&V)h6mj)XV>EaK+y`tDmkwW$
zDFY`wk*<+Br3I@2vJ}aUR3LvXwEWIro7E}`5-`PEw@K3Dj#ZnO)uEo|oDbOs_N2i6
zTs`<1C_xjt&ny*|?wS;GyWN!{M-Q=Uv|S|J2d*HCT*rX=};E>uQ`@t~T=3iv*A^Yf#_3rA({v;-Qr
zQu7~B78#<0+<3MqdB-&nS;41E9UdeFLg@&rrurfl&0H$RR^^F7E&d^qC%oH|r~8i!
z)e~A8VXOfN{2?%rOr-Afcl${NSC5hymM@k|P<1cjgDbaY4X=QA6UJbLZ1rg-_-4`&g0ufe9MmSPS>QnzOgY#E#HNr6hAn9CduT=3#11$
zid){8@t>rxKnen_ilr-_B7n_}R+vtt0%zU*F@9QtmW?Zr0gbGF#Kl(%F$peTm{UTN
zC=+TZGR~F~g*nUMF0%iL9j3~<4M7URMouZ-eDRo5Va)J)PcqlxSe`Wod?^Pbps77z
zbYz;XxfD1Sl7u}>hY|Nm1?a>n*;`C@iYJ#&{Ap(0LC63k+layGuJFGS4%40Og#0
zYA;xQfNK;b(lkPmzeg7zDnYV+F
z)Cj}+h}NH+ec2nf;0yz7>>L23(Q5W-pEOqdWBXvsFXy0&*j1Yent7V
zNn+Y9T0vQ)pR51nwH4z&VLH=XE?JdF(U$?jJu
z7zYwbaheQNYzj2S-v^w(-NE?3uYQ+12pY0?fvtG2OvAX_XHD1xY8Jrv`}Ho?_kJ;p
zOyPm2LDh3y)xlD?d}4uKT`DJo$3M_Z)=(4W*!dky981Z*b~l{I_pi1cI!{)1XzfhAhS2ix@OJ5t;XahZ^D+KCZR
z0*;mS4iAw5BB`OcWzQ>U@;hh<(KL*Rux#yuP2^8uNY%@xI(Y(SPBP(
ztWGRHOzQKFF}}122h-zwSm8?ZtS~}+nkFE?x!mWm0g)M=ov=;-@&X#5*L~ElvFZmb
zriY$;W&)WeA|{50fCl^PClIT6aypoeLYDRx;@wXy@vC9>sNhth*R5D6RZr=JNr+rw
z=N)+ZD64rjIF}l<42E|PI=^1v4bIJIe$Z<=j;f-$_cq|?l|t@)ZbIXL{lEUg9B7~K(3xf;rc&+I^82z{a1be8=hUSgh=v6GK*?N&r@
zE9iU)>K!^jA9V9thg$BEH91OhbAdF-B<{=SlM*1SeWy!p=}zSZ$5S2v5{@5wkiPbd
zg1^fbgF4{lEc#tO`BR31>A8@4lL;_|j?visF1|3%)|-3bq|}D};qu+wBasejgZCHl
zrS+L*D`%9V4Gz7BYqG!fMV&j7^H&;o>|QrH{rp_|DHef`JkYyMy$pr%DLM+mt1cne
zrCXhAW_|lQ`v5*mRMuynP2r+taO24nqkS6kq8bh^AQWLw1TulWiQ#b+oDGqvLtY6M
zhhqv+*bzX+CA>UyNQ8&slve%8lJVYUBJB=$5V-#8PRl;1!RO*bHnLPaQGWleiIMK_
zu`Pw^0iTwg^ybh{=P5nMlXtdXxcF4GIhm`p97g6TBp+5e=>6IjO>L`s2%9Iu_k(~Hl_TI+8^aId=LS1FsSQcb+n56Tcp2iqorp=7UL0php6a(GYdNHQM-=3478kd
zR&Z-AM6J;fmNU-))6&}y!C4%k$dJy#Ao-P@M6-<|d9}5DMt5;9)ZEBeXaBfEm_(q`
zt0i}R&i&N0<;gftX2Llk=U0^lb$uU+*>oatb__DNY^0SZ?K4yqkLrcewJn=%<=`1_Z&syAwUQI;{3J3DbZj1
z;-1q3?%c`(8S#oDb+s=K{k|NvKdn=cs?8I&to`5!GsvVOtWZy|ZG{frn_L}6k8|D~
zGy2#cA<-giGc*M6RjR%R2q*<_CNab#z%0f~P#J-1RRKh1!C&A-vtwV=4
zj4luCPG%kYFD<5AUi~_5*Gy&*lFsoV+|rn|@?$9UrqgPPlo*s^=Zej8VG`R^}2hkn8m#5Ocs9hggrPbSLQ6%lyu
z=jAwC?P-5}Au2nq^TbKa<19JDN)IO}Fc%alUBq6LdKnI9McnSGuF-esglX_!-9=a5
zO6vT`fJ=sGAx44a7>^Z;(=M^8lR+_wzfL$)YZ`neKct>^PMl~
zC02GIKys%kH5+KL*6s>R_MvdGBO&!B7ufMoHxS9MT$3XRE8UK9oUZKF>SE6JT!9W)
z%7Il{d^G+5T%eRj8KX!9V?L*$HaBM*5mhKJ;DG)XlW}6p!kJ+yNTMO=vSA04b*gb$
z;d;w^xOh{VO_xLeSHzmfTCz2JNv-y-f$lt_rO4Oc0V>yL0~z~|Ru@`kx1XSDC9chX
zo=h#E`mx-Gwyj&P*HPUThU}$Zg^zozqDLrvrC%KsPg^;Pv%Ng^s-DV=glg-XQ?h%
ze(7Bo)v^%^E?g4D9=`Vb9*!CNp-*n{d+Sl(5=xRl{K|ye>``(x5$C9To-0ER;ze!X
z(Uj5k`@quuYnFahgwF2w4+0@ULtXrl4r8}N4chL@HZvnP`yEbWrond{ea?~QzR%Ae
zoc{by`#kUSjrX5-j6Kf4qwKx*-fPZzUB9{4a9=RYn!>lq`oz@YL(OMbX_l@4taMN%?Y1k_yL`w%U{KZNcXH*N(Kmy@n
z(DO?2f*$;fIgbT^qwf*9`U4)H^Jo)Yp*X3(kX|qSv_d;ZP
z)uJY3&!boD99ZaZUcSla=m*wQdUcL(n_OCOBFrq&PJ)lPo?3smy~MhSE;R12!)ob
z21dOOkxmWJ;srLQvRW`394|G5{B4CETK8Q)HjP0)I@xGyLwq3#%Av(oo$v~*#Cy){
zF$t9~_6!mL-6A&trfy9^OtteN;XuF!E3VA!)H{D{*V`u`6+3}WqcL47xqDHN(x?!q
z0RS|cASVsn!q<)N`2^{g^LM0YaBy#JOO!8kJ{3>xZHxhORZ4m4J}BXP&eJG(9{wAC
zcHT+)B57hzwIFD^?)tn;k(LP>GDy~AFPVfcWQH=&%g@gbs2rQa+hjBd4YQ}MEW0Dk
zpvv~OI&pJQLG|o8(60$0$H0RcVSy&L(ny{!a-p+4fH#80{W%PZ%W^8XnC`
zyPx>}9Lky67)@r#6*ngeZ1uU27uuHF1yiu=BjXhEsWcmE0HaZr(;MUE`&l9K`TnBW
zmT@CJ+oxxc2ZItAD%9BC>tGYY9n+r#BX2()L2PfLBx8U>QwBHa1543Iqq+O;e#~GK
zoCYGc8DeJOVLekI=?cr(MfFxNqis9~xwgL)J~9J;>vz&Q9-eHGBalqW9_M#R)EkPi
zzVdNd@LS}y3g*_}D?SIEyIGlR5x%0(Z?+M*ID#Apwh_;4lOd#~IaXW;46xw7UOUqM
z%EazO@*uc5Sfch$Ram;;kLeVwCve5Z_0mGrA_xV}QtV$;H=88}oYSj{sL>D}@0-o}
zL%G?!P2I0))Q%_bd-CJm&3H#YJ&qIS@o#Fd+&q*P4pa|78fQ){{VqZufQh_mUV_L+
zd^F&@^^*DvJsX2=Z}Wt#YO$FY1kb%9fjH}vG!QTkx*)It_~N<-L7WoFst=ng5&a`X
zhALg_LoLVUH{F(zwRkE|cT!AS_lD(b?>41o_OrS#FKtaW#O!@tQ9SjVWv)AIq0Rp!
z`($4y0(`7&!eC<=-i>lR1jstr#-vD5E9c@hoE}fprEPkcvnuE<0FBX>@jbF5A8co3
zyPb8Q#NQo@M+vvjLsNK#%olL0ZCoISUy(opkYSd6`7=dK`lvK;t_dh(=$qsJLlP^!y@(JN_x63!}S?D{Xg@tiL*l6A@3B`qo_#b>k?n(sYZ
z>5hh9tnam?g-CHbkmdiKPH4OYgL&{NG5Q%0MN=@_CGHos_vk*_cML)c%J%*Y=oNh0bWfIEM%MDD4yL&ct
zdap~R_SKHP!98;bPw(-yEsf#D+JAXJ^J0uLI3;uRZ4o+WJ7CYbX;P#?rzVWP|HfpW
z+F#7`nH{mPexZVf22t*tG+tg_1RLMd(o#Ufoxx6Tc({|Flky>$;tf3SRY0CZHv2X4
zu{f^H1(MM{?$5iBoTB(7qfnX?CD_+qEYmi<*D!Md_!IZxkGJg%O=4U~0u;pkds+oi
z$__p;ydgk2z{WR_Wj%v^yGVXJAY^8On)8KRHmmLK+(93=>51>w_r1HugGmdA7s_%0
zI3>O8Q>PDFXPP&40vMQC!j17+3<_+$O`np!Uo?EQonz6Sl_g8=01OJh6AM22ZSrw&
zz2<_@Lit1EyBBw){d
zD%-Q$!P#ivon{9Hdpyvs4DveaH$@+zOF}>$1PW!6tb=*01Qo5zc-twxc-Deydr|%i
zmB9*rVieI>cVtU;f+ZX7S$vh-thOWhHaj!O803d@7HVL`2@e>nx7u33F%Oz$FnY59
z(Af{+Y!i>c1PgP|gDJm!{GEE^v3w11Jadw_<<|maMWY^B-LsA0_pVngi|M1l-U!)Y
zhZQ+-#s&Eqc^9+gc>B?|WyMH;wn+N7;D~tGbe>(LbDI>^8h|l5Pe6^zYLa&?5%>nO
zctar>5BIO}J{@%MTE7vwPj5#icRn5MO7sK7BOF=J_9jONdT2aeG@$_
zny}w86oXSFMrGKc(rGLp{cm0A5=o!1g8fAM!@@+6Ja*zT3-Um^XzQl3MQ&t%5NRWI
zFX&KJV!ZK-M|3QGVAa>c$f$|{T(p%60W`qXyh&UkmYcq}`9-O14qxNaxNO=JnP#;o
zR#%QMm*V^OitO
z-FH@?mG>^mvM?5XB$rYNX4USc)Nc{q`=6pxc!D3~$UQN39(Nm@RRqrh1b<_~3z<0-
zy=5bZu%@U!5kfkQK)z;6=g*VwU+HIiQKaY^3zCh-8*k+*$E7Wu30sds|tXYH1u39?xNs{w*Po}3SI!%u2=q6tmN
zTik12@@DJ~4V^Klap3cBI=kF)F|`OKDa>jigYMXMeF+91R}0RNLeXV#P0+Oo{k^s?
zA(E0SAE|~&HgfqwI6`-sqb2x`HykdS(SAmfBmkr3j2D=4i<|K>YdxW!`=W{a0*nIc
zqszu>Au4wPB^@}O%hVg-uHf{9;LylF+gUiDHW7gNXKanuj9KKZ^Vv!zzMJo(MWaBy
zCn*9dO{Ms5Y6FB_0?TZYNk|;R+8p`|Bzn-5OL#SrunF>Xg9ZPbLL%PZ@u-D04ybS2
zN5e3I0BwyL94-!i99<7@-v~r-t_31JOmNtt4BG9e)iTe1`udr(0;5!aF1euq9O;iL
zh{m`;!DDYOxkZi3meBeBMhIeeMf{lt0pIy_UJuqW&l*Sbvj-0}zuAyaovFB+wHu-E
z*~opm2ir|sghRPe^&kfz085FOgWPM;?6JdxB9Q%M2B}EzJV05cgY+X%OodmCzpMn=
z?oU&fUyb)^{g@qvU-@9$a-d5=Ul%So
zjsiS$98RnNz!h$!XP5`$Fl_q9>vKR43btS<2f_K1mMCn7qHs@K1$!Yys~kK7bS^o8
z52mOuip;}R^2Y@Pdq>4yo2Y=vYQ
z>ikd6l7o@tLla}-@&qvBAlDwmp)8LYyc?RCq0y%$8qQ(r@lCfN(#F2t5)9Bf9A0$p
z8=Y@&C>Ch8fW#9>(_}dEQUzy5%)iMFY$kwW{V60Aj+K~Ye=J8hocbSbSpWC7dThUh63HT
zX9o#aDk>^Qk35iFVmDdl3Lsk`0_{F2ed#
zs6b)b+w6l;ca&plcl-zt&dOPn)3VLz$?DkjM%)8a
za~9LeB}MZ;!?A^dfu6Uc%Uim}L0taS-4i?r!G4of2jp)ijaI_@OYqg~e^jTt#4l_U
zN#BmCcm4QsL81gg*-mOtIKO0dcLL>240s=NZJx0OI((?^Vl$NcMd{wUkMfbf${YU0
zl;4MP?$*Q8)}9itbl5HUP)ymssfU4e0}aSal%9DV<`As=A+#(8Qur7i8%VH1a@VgR
zKc2+*Buc~9@PtV_7&@+1C8wBWKrtfVK6|-s1CKu9r$g5^MXQicXoKU))Mp2m7*Ej_
zhXR>t*k(fz0BQLx99jk}&buv|^ng)H$0!HGXWZzJDDW8Ee_4@pgcu_5a!Nc}ZR%jM
z`RF7>$OCl`_+ohkS3Bk%S^GbUj07UUfiMfF
zdK~ZlLSDi-jhb}uoY}>k3=c6~e*|>Ux6Om8Xhbq8R1pAVCI(L|kV2&R@CpIEx}MhN
zbTdP_4N21?8!w?1B!;9A;0PTib*+aKCQJ`gt1o7+EJ}A9ZHqGF)#z
z0UWNV0ni@O?m+TF5Uv5rhcHMU85wEvLV-l{*GUxpa1XXYr=Bf&f;2QT0*Y$xDUix=
zLNjRCW6o-CSHuXKVO_9@zgFz(9U^%!W@9~UnB%g4)bZ?$Vz)!Wc^|5gc_P6;xAQvT
zBC@u5y}P{&p!~uGsD@2*qq`Id>PEgAqvy@SH|S|P5BiJ+0&~E1;pW|4vI9>qhM?*7
z2(NKK1#ND#k7&V2nu1G%kQ&42QkavgIHs35XeV&
zyp??Je$7VRo3wqrBFE3MxeDPmp#SeC`6q
z=CqVAUy3HG&)#90dI{n*mbOsMKnAhHHKdly>Egf+Mtg}BfjNbj|Db87zc**=>aH6}
zNJs*ErvMYycxzS%#ukDS%w_E@LE|gsg>Q>#5T-^@XWMTU_>;`{-HnPfU7#-jDTecP
zZ7)tT0cXAxjXsbLo74=4!j!;jGj-Zj#y)hL!>^kPvg2VEJkP2-f6+!T8H08ksn%_|
zW>`AJ0xM*Sop4MV#ptoH?@~ZJ1@ZESJjzjH=`n4}Mv1Hr5{XNDtJYyE$MU7jSmk}&
z$4GqYsN(hv-V`TD+vdXfQ)wT%NJvo)-~g|_3ZfW+p0xVg@B)|^<@AH}TA5%1-OMGg
zoz+Xq%22TB>e-uRqLAmps{jU8z3OfzUX0JC@%SOtbHQ~olQvF;5~i!_f^qLiM;f&`
zjdiD(h8h+UCB85opg#p}%w;*Y2bo~r{e#~fb3hwvYwuOX8E-7|`%b`Z!OQcbgNr_p
z2L(d99@|-{K1Vi-2XgEmLWE>G{fbs|o#$ZUlNC^ozQ`>nHtVEJ|Aj!e!jzmpL#O0S
zXE&soyix)%D^=ez0#m0n%tV*S!Q9tgI6HBw#zVMMIJBQi)?1P9o=kt{A4EbsH3Pg^
zP-Y??R~z*MFr|%as9B5xO4Cb
zmHbiMY*V4m^B{y*4>n;&xw&Jwc^~AXvDr2&T0A364W8P!g(uiK+KoF&E8)k!S1-=4
z_#|`0b@C!11N+#&YkUtoU3%Evl^x_HeBP(~qi|co1%d;)P!v&<8yZ4G0k(nvdpH$u
z6NR>_#NwzVH{8_eM70Tf%C1tcQ>)5G+%V9^%>dZ##+g!aW-=U$O_^YiY1@8i;U9_V4{T0gQ8{-9*O!6Fs{FT?%B6&Gh
z?zX4Y{;BjN4hooJG0Yu|G}s|=R83P@J|Ae(XEYyoXJi=Us@0a;w-pC0N2VxfSAwcZ
z+O0YsLO8*rQ36N~JnJ4R!Z%5xk5KTay|1{HrCeL=Si+Q)S^#IcI9@t%ly8`euk|`i
zG*EeXVe0WOLy8YosU_d4e-~ta%nRil1RCyBq&lEzfI<`v4L3hVsz#{2`7ikCjILY4--#o#!KwX;JeeI=wL*ps=z&R=ZuBU+(FeR
zurg(9O5^F;xy}$K>UWOg@s))hPC}QtF%iham@SK+xa*2^lty26ZyYgda8)2CRBcy^
zh!50Z5wRFKTZAH=M7vi%)^;OSPo95AdhZoCUe*&vPSgxYn|p4PIL8?60%n6e{*5Y{
z7m>AZ#I#I9O0k~N<`UZQUM-?s*d$pIrUR1i-ui>uM?5hl#c>rDx4(h?WD6C$+k+u|
zNh=jI37
z;7|e+gC%TrmPYO!-|xo-HCaUbS(KPg9~h!{If_^Rc)2`wFMs+VhXA5moBOw{V>dm$jQzvH%LG9a?_IPMdUa8priyQ)FcgO
z$-l-SGkwBU160Bq^x|!~M6U$0gnIAA3fhi{
zS(}8ceNcsj(6vOEifRJHRf27C*mT}zvfPJIy
zy~lj}qt+%~*0{5F+Y9l<6%@vu$H$wSID;$o`j>V`;qC|b7Qg%CI~y%eaPHun#1YWa
z`KIP@*{0VT&E+l+X4({!<E16fwcpC-y;71VkBG}>
zOO0{brfgu(a{lg-OHjf0kX5@q2&e*;ZXy!|!Hr-JEJoPxzALQ14!FP2WHPycs!L#(
zfd*T6fz?Y(_iQ6xh>0?Wk!(h1?&(GAiu7vCsGRc@v%j;R;-#Tj)k$tk%E^k{VaC}}
zPuim^1LNV6w-lU`WlM{tmZ?$_7$AHfm)(kz@`w>$4RnovEC8=SJ@$4)JU7MzRxI0de^
zv7w=2FhBz&0~D`T>}X+pU!5G|8Tz_%Yw4D#@mMAE%#UryE2U#8opu-j*8&2qg(zP!ATn2i
zK!&V#m>r}DyMA~;^Aoc&b|+2&9}6-GczJmg&A2NK+H#Fw$ErT@zC0?tvpMN+Xc*!A
zykj|}^J5v-(_ayC95GSwZ4=^*LmE_BPs1@)NgS{rt5(^c9LUOYi;06onzUSPm3jGR
zi+-PsVx@;FYppsnbI%mmK9PKc5*2128ZiF-B+>!aOb{N?{nI}E-nL&ZjgTl?LhV<*
z>YtWf%Kkn~5^^zUzfur4U-*clv=`#0BRymSiUvY?KUl4c-LtjD2t
z*qw8MYaPJ~ztCGP+{-vi5!vNv%%8zPdw6pu3KC90YvQLL+vnwjM5R>GJFw50b2EuL
zKYL0NUgQ`uk>_UTJu`mIov&>TulZ}lGkR~5*>A4a?uewy-|g(6PW#UBPOV|f |