pull/1001/head
SnowindMe 2025-04-23 00:11:05 +08:00
commit 4a92daf783
39 changed files with 2961 additions and 1700 deletions

View File

@ -14,7 +14,7 @@
<p align="center"> <p align="center">
<a href="https://github.com/MaiM-with-u/MaiBot/"> <a href="https://github.com/MaiM-with-u/MaiBot/">
<img src="depends-data/maimai.png" alt="Logo" width="200"> <img src="depends-data/maimai.png" alt="Logo" style="max-width: 200px">
</a> </a>
<br /> <br />
<a href="https://space.bilibili.com/1344099355"> <a href="https://space.bilibili.com/1344099355">
@ -34,7 +34,6 @@
· ·
<a href="https://github.com/MaiM-with-u/MaiBot/issues">提出新特性</a> <a href="https://github.com/MaiM-with-u/MaiBot/issues">提出新特性</a>
</p> </p>
</p> </p>
## 新版0.6.x部署前先阅读https://docs.mai-mai.org/manual/usage/mmc_q_a ## 新版0.6.x部署前先阅读https://docs.mai-mai.org/manual/usage/mmc_q_a
@ -53,7 +52,7 @@
<div align="center"> <div align="center">
<a href="https://www.bilibili.com/video/BV1amAneGE3P" target="_blank"> <a href="https://www.bilibili.com/video/BV1amAneGE3P" target="_blank">
<img src="depends-data/video.png" width="200" alt="麦麦演示视频"> <img src="depends-data/video.png" style="max-width: 200px" alt="麦麦演示视频">
<br> <br>
👆 点击观看麦麦演示视频 👆 👆 点击观看麦麦演示视频 👆
</a> </a>
@ -99,7 +98,7 @@
<div align="left"> <div align="left">
<h2>📚 文档</h2> <h2>📚 文档 </h2>
</div> </div>
### (部分内容可能过时,请注意版本对应) ### (部分内容可能过时,请注意版本对应)
@ -186,7 +185,7 @@ MaiCore是一个开源项目我们非常欢迎你的参与。你的贡献
感谢各位大佬! 感谢各位大佬!
<a href="https://github.com/MaiM-with-u/MaiBot/graphs/contributors"> <a href="https://github.com/MaiM-with-u/MaiBot/graphs/contributors">
<img src="https://contrib.rocks/image?repo=MaiM-with-u/MaiBot" /> <img alt="contributors" src="https://contrib.rocks/image?repo=MaiM-with-u/MaiBot" />
</a> </a>
**也感谢每一位给麦麦发展提出宝贵意见与建议的用户,感谢陪伴麦麦走到现在的你们** **也感谢每一位给麦麦发展提出宝贵意见与建议的用户,感谢陪伴麦麦走到现在的你们**

View File

@ -0,0 +1,16 @@
MaiCore/MaiBot 0.6路线图 draft
0.6.3解决0.6.x版本核心问题改进功能
主要功能加入
LPMM全面替代旧知识库
采用新的HFC回复模式取代旧心流
合并推理模式和心流模式,根据麦麦自己决策回复模式
提供新的表情包系统
0.6.4:提升用户体验,交互优化
加入webui
提供麦麦 API
修复prompt建构的各种问题
修复各种bug
调整代码文件结构,重构部分落后设计

View File

@ -0,0 +1,8 @@
from fastapi import FastAPI
from strawberry.fastapi import GraphQLRouter
app = FastAPI()
graphql_router = GraphQLRouter(schema=None, path="/") # Replace `None` with your actual schema
app.include_router(graphql_router, prefix="/graphql", tags=["GraphQL"])

View File

@ -0,0 +1,155 @@
from typing import Dict, List, Optional
import strawberry
# from packaging.version import Version, InvalidVersion
# from packaging.specifiers import SpecifierSet, InvalidSpecifier
# from ..config.config import global_config
# import os
from packaging.version import Version
@strawberry.type
class BotConfig:
"""机器人配置类"""
INNER_VERSION: Version
MAI_VERSION: str # 硬编码的版本信息
# bot
BOT_QQ: Optional[int]
BOT_NICKNAME: Optional[str]
BOT_ALIAS_NAMES: List[str] # 别名,可以通过这个叫它
# group
talk_allowed_groups: set
talk_frequency_down_groups: set
ban_user_id: set
# personality
personality_core: str # 建议20字以内谁再写3000字小作文敲谁脑袋
personality_sides: List[str]
# identity
identity_detail: List[str]
height: int # 身高 单位厘米
weight: int # 体重 单位千克
age: int # 年龄 单位岁
gender: str # 性别
appearance: str # 外貌特征
# schedule
ENABLE_SCHEDULE_GEN: bool # 是否启用日程生成
PROMPT_SCHEDULE_GEN: str
SCHEDULE_DOING_UPDATE_INTERVAL: int # 日程表更新间隔 单位秒
SCHEDULE_TEMPERATURE: float # 日程表温度建议0.5-1.0
TIME_ZONE: str # 时区
# message
MAX_CONTEXT_SIZE: int # 上下文最大消息数
emoji_chance: float # 发送表情包的基础概率
thinking_timeout: int # 思考时间
max_response_length: int # 最大回复长度
message_buffer: bool # 消息缓冲器
ban_words: set
ban_msgs_regex: set
# heartflow
# enable_heartflow: bool = False # 是否启用心流
sub_heart_flow_update_interval: int # 子心流更新频率,间隔 单位秒
sub_heart_flow_freeze_time: int # 子心流冻结时间,超过这个时间没有回复,子心流会冻结,间隔 单位秒
sub_heart_flow_stop_time: int # 子心流停止时间,超过这个时间没有回复,子心流会停止,间隔 单位秒
heart_flow_update_interval: int # 心流更新频率,间隔 单位秒
observation_context_size: int # 心流观察到的最长上下文大小,超过这个值的上下文会被压缩
compressed_length: int # 不能大于observation_context_size,心流上下文压缩的最短压缩长度超过心流观察到的上下文长度会压缩最短压缩长度为5
compress_length_limit: int # 最多压缩份数,超过该数值的压缩上下文会被删除
# willing
willing_mode: str # 意愿模式
response_willing_amplifier: float # 回复意愿放大系数
response_interested_rate_amplifier: float # 回复兴趣度放大系数
down_frequency_rate: float # 降低回复频率的群组回复意愿降低系数
emoji_response_penalty: float # 表情包回复惩罚
mentioned_bot_inevitable_reply: bool # 提及 bot 必然回复
at_bot_inevitable_reply: bool # @bot 必然回复
# response
response_mode: str # 回复策略
MODEL_R1_PROBABILITY: float # R1模型概率
MODEL_V3_PROBABILITY: float # V3模型概率
# MODEL_R1_DISTILL_PROBABILITY: float # R1蒸馏模型概率
# emoji
max_emoji_num: int # 表情包最大数量
max_reach_deletion: bool # 开启则在达到最大数量时删除表情包,关闭则不会继续收集表情包
EMOJI_CHECK_INTERVAL: int # 表情包检查间隔(分钟)
EMOJI_REGISTER_INTERVAL: int # 表情包注册间隔(分钟)
EMOJI_SAVE: bool # 偷表情包
EMOJI_CHECK: bool # 是否开启过滤
EMOJI_CHECK_PROMPT: str # 表情包过滤要求
# memory
build_memory_interval: int # 记忆构建间隔(秒)
memory_build_distribution: list # 记忆构建分布参数分布1均值标准差权重分布2均值标准差权重
build_memory_sample_num: int # 记忆构建采样数量
build_memory_sample_length: int # 记忆构建采样长度
memory_compress_rate: float # 记忆压缩率
forget_memory_interval: int # 记忆遗忘间隔(秒)
memory_forget_time: int # 记忆遗忘时间(小时)
memory_forget_percentage: float # 记忆遗忘比例
memory_ban_words: list # 添加新的配置项默认值
# mood
mood_update_interval: float # 情绪更新间隔 单位秒
mood_decay_rate: float # 情绪衰减率
mood_intensity_factor: float # 情绪强度因子
# keywords
keywords_reaction_rules: list # 关键词回复规则
# chinese_typo
chinese_typo_enable: bool # 是否启用中文错别字生成器
chinese_typo_error_rate: float # 单字替换概率
chinese_typo_min_freq: int # 最小字频阈值
chinese_typo_tone_error_rate: float # 声调错误概率
chinese_typo_word_replace_rate: float # 整词替换概率
# response_splitter
enable_response_splitter: bool # 是否启用回复分割器
response_max_length: int # 回复允许的最大长度
response_max_sentence_num: int # 回复允许的最大句子数
# remote
remote_enable: bool # 是否启用远程控制
# experimental
enable_friend_chat: bool # 是否启用好友聊天
# enable_think_flow: bool # 是否启用思考流程
enable_pfc_chatting: bool # 是否启用PFC聊天
# 模型配置
llm_reasoning: Dict[str, str] # LLM推理
# llm_reasoning_minor: Dict[str, str]
llm_normal: Dict[str, str] # LLM普通
llm_topic_judge: Dict[str, str] # LLM话题判断
llm_summary_by_topic: Dict[str, str] # LLM话题总结
llm_emotion_judge: Dict[str, str] # LLM情感判断
embedding: Dict[str, str] # 嵌入
vlm: Dict[str, str] # VLM
moderation: Dict[str, str] # 审核
# 实验性
llm_observation: Dict[str, str] # LLM观察
llm_sub_heartflow: Dict[str, str] # LLM子心流
llm_heartflow: Dict[str, str] # LLM心流
api_urls: Dict[str, str] # API URLs
@strawberry.type
class EnvConfig:
pass
@strawberry.field
def get_env(self) -> str:
return "env"

View File

@ -28,7 +28,7 @@ logger = get_module_logger("config", config=config_config)
# 考虑到实际上配置文件中的mai_version是不会自动更新的,所以采用硬编码 # 考虑到实际上配置文件中的mai_version是不会自动更新的,所以采用硬编码
is_test = True is_test = True
mai_version_main = "0.6.3" mai_version_main = "0.6.3"
mai_version_fix = "snapshot-2" mai_version_fix = "snapshot-3"
if mai_version_fix: if mai_version_fix:
if is_test: if is_test:
@ -186,12 +186,18 @@ class BotConfig:
ban_words = set() ban_words = set()
ban_msgs_regex = set() ban_msgs_regex = set()
# heartflow # [heartflow] # 启用启用heart_flowC(心流聊天)模式时生效, 需要填写token消耗量巨大的相关模型
# enable_heartflow: bool = False # 是否启用心流 # 启用后麦麦会自主选择进入heart_flowC模式(持续一段时间), 进行长时间高质量的聊天
sub_heart_flow_update_interval: int = 60 # 子心流更新频率,间隔 单位秒 enable_heart_flowC: bool = True # 是否启用heart_flowC(心流聊天, HFC)模式
sub_heart_flow_freeze_time: int = 120 # 子心流冻结时间,超过这个时间没有回复,子心流会冻结,间隔 单位秒 reply_trigger_threshold: float = 3.0 # 心流聊天触发阈值,越低越容易触发
probability_decay_factor_per_second: float = 0.2 # 概率衰减因子,越大衰减越快
default_decay_rate_per_second: float = 0.98 # 默认衰减率,越大衰减越慢
initial_duration: int = 60 # 初始持续时间,越大心流聊天持续的时间越长
# sub_heart_flow_update_interval: int = 60 # 子心流更新频率,间隔 单位秒
# sub_heart_flow_freeze_time: int = 120 # 子心流冻结时间,超过这个时间没有回复,子心流会冻结,间隔 单位秒
sub_heart_flow_stop_time: int = 600 # 子心流停止时间,超过这个时间没有回复,子心流会停止,间隔 单位秒 sub_heart_flow_stop_time: int = 600 # 子心流停止时间,超过这个时间没有回复,子心流会停止,间隔 单位秒
heart_flow_update_interval: int = 300 # 心流更新频率,间隔 单位秒 # heart_flow_update_interval: int = 300 # 心流更新频率,间隔 单位秒
observation_context_size: int = 20 # 心流观察到的最长上下文大小,超过这个值的上下文会被压缩 observation_context_size: int = 20 # 心流观察到的最长上下文大小,超过这个值的上下文会被压缩
compressed_length: int = 5 # 不能大于observation_context_size,心流上下文压缩的最短压缩长度超过心流观察到的上下文长度会压缩最短压缩长度为5 compressed_length: int = 5 # 不能大于observation_context_size,心流上下文压缩的最短压缩长度超过心流观察到的上下文长度会压缩最短压缩长度为5
compress_length_limit: int = 5 # 最多压缩份数,超过该数值的压缩上下文会被删除 compress_length_limit: int = 5 # 最多压缩份数,超过该数值的压缩上下文会被删除
@ -207,8 +213,8 @@ class BotConfig:
# response # response
response_mode: str = "heart_flow" # 回复策略 response_mode: str = "heart_flow" # 回复策略
MODEL_R1_PROBABILITY: float = 0.8 # R1模型概率 model_reasoning_probability: float = 0.7 # 麦麦回答时选择推理模型(主要)模型概率
MODEL_V3_PROBABILITY: float = 0.1 # V3模型概率 model_normal_probability: float = 0.3 # 麦麦回答时选择一般模型(次要)模型概率
# MODEL_R1_DISTILL_PROBABILITY: float = 0.1 # R1蒸馏模型概率 # MODEL_R1_DISTILL_PROBABILITY: float = 0.1 # R1蒸馏模型概率
# emoji # emoji
@ -401,29 +407,34 @@ class BotConfig:
def response(parent: dict): def response(parent: dict):
response_config = parent["response"] response_config = parent["response"]
config.MODEL_R1_PROBABILITY = response_config.get("model_r1_probability", config.MODEL_R1_PROBABILITY) config.model_reasoning_probability = response_config.get(
config.MODEL_V3_PROBABILITY = response_config.get("model_v3_probability", config.MODEL_V3_PROBABILITY) "model_reasoning_probability", config.model_reasoning_probability
# config.MODEL_R1_DISTILL_PROBABILITY = response_config.get( )
# "model_r1_distill_probability", config.MODEL_R1_DISTILL_PROBABILITY config.model_normal_probability = response_config.get(
# ) "model_normal_probability", config.model_normal_probability
config.max_response_length = response_config.get("max_response_length", config.max_response_length) )
if config.INNER_VERSION in SpecifierSet(">=1.0.4"):
config.response_mode = response_config.get("response_mode", config.response_mode) # 添加 enable_heart_flowC 的加载逻辑 (假设它在 [response] 部分)
if config.INNER_VERSION in SpecifierSet(">=1.4.0"):
config.enable_heart_flowC = response_config.get("enable_heart_flowC", config.enable_heart_flowC)
def heartflow(parent: dict): def heartflow(parent: dict):
heartflow_config = parent["heartflow"] heartflow_config = parent["heartflow"]
config.sub_heart_flow_update_interval = heartflow_config.get( # 加载新增的 heartflowC 参数
"sub_heart_flow_update_interval", config.sub_heart_flow_update_interval
) # 加载原有的 heartflow 参数
config.sub_heart_flow_freeze_time = heartflow_config.get( # config.sub_heart_flow_update_interval = heartflow_config.get(
"sub_heart_flow_freeze_time", config.sub_heart_flow_freeze_time # "sub_heart_flow_update_interval", config.sub_heart_flow_update_interval
) # )
# config.sub_heart_flow_freeze_time = heartflow_config.get(
# "sub_heart_flow_freeze_time", config.sub_heart_flow_freeze_time
# )
config.sub_heart_flow_stop_time = heartflow_config.get( config.sub_heart_flow_stop_time = heartflow_config.get(
"sub_heart_flow_stop_time", config.sub_heart_flow_stop_time "sub_heart_flow_stop_time", config.sub_heart_flow_stop_time
) )
config.heart_flow_update_interval = heartflow_config.get( # config.heart_flow_update_interval = heartflow_config.get(
"heart_flow_update_interval", config.heart_flow_update_interval # "heart_flow_update_interval", config.heart_flow_update_interval
) # )
if config.INNER_VERSION in SpecifierSet(">=1.3.0"): if config.INNER_VERSION in SpecifierSet(">=1.3.0"):
config.observation_context_size = heartflow_config.get( config.observation_context_size = heartflow_config.get(
"observation_context_size", config.observation_context_size "observation_context_size", config.observation_context_size
@ -432,6 +443,17 @@ class BotConfig:
config.compress_length_limit = heartflow_config.get( config.compress_length_limit = heartflow_config.get(
"compress_length_limit", config.compress_length_limit "compress_length_limit", config.compress_length_limit
) )
if config.INNER_VERSION in SpecifierSet(">=1.4.0"):
config.reply_trigger_threshold = heartflow_config.get(
"reply_trigger_threshold", config.reply_trigger_threshold
)
config.probability_decay_factor_per_second = heartflow_config.get(
"probability_decay_factor_per_second", config.probability_decay_factor_per_second
)
config.default_decay_rate_per_second = heartflow_config.get(
"default_decay_rate_per_second", config.default_decay_rate_per_second
)
config.initial_duration = heartflow_config.get("initial_duration", config.initial_duration)
def willing(parent: dict): def willing(parent: dict):
willing_config = parent["willing"] willing_config = parent["willing"]

Binary file not shown.

Before

Width:  |  Height:  |  Size: 59 KiB

View File

@ -79,4 +79,16 @@ await heartflow.heartflow_start_working()
1. 子心流会在长时间不活跃后自动清理 1. 子心流会在长时间不活跃后自动清理
2. 需要合理配置更新间隔以平衡性能和响应速度 2. 需要合理配置更新间隔以平衡性能和响应速度
3. 观察系统会限制消息处理数量以避免过载 3. 观察系统会限制消息处理数量以避免过载
更新:
把聊天控制移动到心流下吧
首先心流要根据日程以及当前状况判定总体状态MaiStateInfo
然后根据每个子心流的运行情况给子心流分配聊天资源ChatStateInfoABSENT CHAT 或者 FOCUS
子心流负责根据状态进行执行
1.将interest.py进行拆分class InterestChatting 将会在 sub_heartflow中声明每个sub_heartflow都会所属一个InterestChatting
class InterestManager 将会在heartflow中声明成为heartflow的一个组件伴随heartflow产生

Binary file not shown.

Before

Width:  |  Height:  |  Size: 91 KiB

View File

@ -0,0 +1,11 @@
更新:
把聊天控制移动到心流下吧
首先心流要根据日程以及当前状况判定总体状态MaiStateInfo
然后根据每个子心流的运行情况给子心流分配聊天资源ChatStateInfoABSENT CHAT 或者 FOCUS
子心流负责根据状态进行执行
1.将interest.py进行拆分class InterestChatting 将会在 sub_heartflow中声明每个sub_heartflow都会所属一个InterestChatting
class InterestManager 将会在heartflow中声明成为heartflow的一个组件伴随heartflow产生

Binary file not shown.

Before

Width:  |  Height:  |  Size: 88 KiB

View File

@ -1,16 +1,20 @@
from .sub_heartflow import SubHeartflow from .sub_heartflow import SubHeartflow, ChattingObservation
from .observation import ChattingObservation
from src.plugins.moods.moods import MoodManager from src.plugins.moods.moods import MoodManager
from src.plugins.models.utils_model import LLMRequest from src.plugins.models.utils_model import LLMRequest
from src.config.config import global_config from src.config.config import global_config
from src.plugins.schedule.schedule_generator import bot_schedule from src.plugins.schedule.schedule_generator import bot_schedule
from src.plugins.utils.prompt_builder import Prompt, global_prompt_manager from src.plugins.utils.prompt_builder import Prompt, global_prompt_manager
import asyncio import asyncio
from src.common.logger import get_module_logger, LogConfig, HEARTFLOW_STYLE_CONFIG # noqa: E402 from src.common.logger import get_module_logger, LogConfig, HEARTFLOW_STYLE_CONFIG # 修改
from src.individuality.individuality import Individuality from src.individuality.individuality import Individuality
import time import time
import random import random
from typing import Dict, Any from typing import Dict, Any, Optional
import traceback
import enum
import os # 新增
import json # 新增
from src.plugins.chat.chat_stream import chat_manager # 新增
heartflow_config = LogConfig( heartflow_config = LogConfig(
# 使用海马体专用样式 # 使用海马体专用样式
@ -41,76 +45,269 @@ def init_prompt():
Prompt(prompt, "mind_summary_prompt") Prompt(prompt, "mind_summary_prompt")
class CurrentState: # --- 新增:从 interest.py 移动过来的常量 ---
LOG_DIRECTORY = "logs/interest"
HISTORY_LOG_FILENAME = "interest_history.log"
CLEANUP_INTERVAL_SECONDS = 1200 # 清理任务运行间隔 (例如20分钟) - 保持与 interest.py 一致
INACTIVE_THRESHOLD_SECONDS = 1200 # 不活跃时间阈值 (例如20分钟) - 保持与 interest.py 一致
LOG_INTERVAL_SECONDS = 3 # 日志记录间隔 (例如3秒) - 保持与 interest.py 一致
# --- 结束新增常量 ---
# 新增 ChatStatus 枚举
class MaiState(enum.Enum):
"""
聊天状态:
OFFLINE: 不在线回复概率极低不会进行任何聊天
PEEKING: 看一眼手机回复概率较低会进行一些普通聊天
NORMAL_CHAT: 正常聊天回复概率较高会进行一些普通聊天和少量的专注聊天
FOCUSED_CHAT: 专注聊天回复概率极高会进行专注聊天和少量的普通聊天
"""
OFFLINE = "不在线"
PEEKING = "看一眼手机"
NORMAL_CHAT = "正常聊天"
FOCUSED_CHAT = "专注聊天"
def get_normal_chat_max_num(self):
if self == MaiState.OFFLINE:
return 0
elif self == MaiState.PEEKING:
return 1
elif self == MaiState.NORMAL_CHAT:
return 3
elif self == MaiState.FOCUSED_CHAT:
return 2
def get_focused_chat_max_num(self):
if self == MaiState.OFFLINE:
return 0
elif self == MaiState.PEEKING:
return 0
elif self == MaiState.NORMAL_CHAT:
return 1
elif self == MaiState.FOCUSED_CHAT:
return 2
class MaiStateInfo:
def __init__(self): def __init__(self):
self.current_state_info = "" self.current_state_info = ""
# 使用枚举类型初始化状态,默认为不在线
self.mai_status: MaiState = MaiState.OFFLINE
self.normal_chatting = []
self.focused_chatting = []
self.mood_manager = MoodManager() self.mood_manager = MoodManager()
self.mood = self.mood_manager.get_prompt() self.mood = self.mood_manager.get_prompt()
self.attendance_factor = 0
self.engagement_factor = 0
def update_current_state_info(self): def update_current_state_info(self):
self.current_state_info = self.mood_manager.get_current_mood() self.current_state_info = self.mood_manager.get_current_mood()
# 新增更新聊天状态的方法
def update_mai_status(self, new_status: MaiState):
"""更新聊天状态"""
if isinstance(new_status, MaiState):
self.mai_status = new_status
logger.info(f"麦麦状态更新为: {self.mai_status.value}")
else:
logger.warning(f"尝试设置无效的麦麦状态: {new_status}")
class Heartflow: class Heartflow:
def __init__(self): def __init__(self):
self.current_mind = "你什么也没想" self.current_mind = "你什么也没想"
self.past_mind = [] self.past_mind = []
self.current_state: CurrentState = CurrentState() self.current_state: MaiStateInfo = MaiStateInfo()
self.llm_model = LLMRequest( self.llm_model = LLMRequest(
model=global_config.llm_heartflow, temperature=0.6, max_tokens=1000, request_type="heart_flow" model=global_config.llm_heartflow, temperature=0.6, max_tokens=1000, request_type="heart_flow"
) )
self._subheartflows: Dict[Any, SubHeartflow] = {} self._subheartflows: Dict[Any, SubHeartflow] = {}
async def _cleanup_inactive_subheartflows(self): # --- 新增:日志和清理相关属性 (从 InterestManager 移动) ---
"""定期清理不活跃的子心流""" self._history_log_file_path = os.path.join(LOG_DIRECTORY, HISTORY_LOG_FILENAME)
self._ensure_log_directory() # 初始化时确保目录存在
self._cleanup_task: Optional[asyncio.Task] = None
self._logging_task: Optional[asyncio.Task] = None
# 注意:衰减任务 (_decay_task) 不再需要,衰减在 SubHeartflow 的 InterestChatting 内部处理
# --- 结束新增属性 ---
def _ensure_log_directory(self): # 新增方法 (从 InterestManager 移动)
"""确保日志目录存在"""
# 移除 try-except 块,根据用户要求
os.makedirs(LOG_DIRECTORY, exist_ok=True)
logger.info(f"Log directory '{LOG_DIRECTORY}' ensured.")
# except OSError as e:
# logger.error(f"Error creating log directory '{LOG_DIRECTORY}': {e}")
async def _periodic_cleanup_task(
self, interval_seconds: int, max_age_seconds: int
): # 新增方法 (从 InterestManager 移动和修改)
"""后台清理任务的异步函数"""
while True: while True:
current_time = time.time() await asyncio.sleep(interval_seconds)
inactive_subheartflows = [] logger.info(f"[Heartflow] 运行定期清理 (间隔: {interval_seconds}秒)...")
self.cleanup_inactive_subheartflows(max_age_seconds=max_age_seconds) # 调用 Heartflow 自己的清理方法
# 检查所有子心流 async def _periodic_log_task(self, interval_seconds: int): # 新增方法 (从 InterestManager 移动和修改)
for subheartflow_id, subheartflow in self._subheartflows.items(): """后台日志记录任务的异步函数 (记录所有子心流的兴趣历史数据)"""
if ( while True:
current_time - subheartflow.last_active_time > global_config.sub_heart_flow_stop_time await asyncio.sleep(interval_seconds)
): # 10分钟 = 600秒 try:
inactive_subheartflows.append(subheartflow_id) current_timestamp = time.time()
logger.info(f"发现不活跃的子心流: {subheartflow_id}") all_interest_states = self.get_all_interest_states() # 获取所有子心流的兴趣状态
# 清理不活跃的子心流 # 以追加模式打开历史日志文件
for subheartflow_id in inactive_subheartflows: # 移除 try-except IO 块,根据用户要求
del self._subheartflows[subheartflow_id] with open(self._history_log_file_path, "a", encoding="utf-8") as f:
logger.info(f"已清理不活跃的子心流: {subheartflow_id}") count = 0
# 创建 items 快照以安全迭代
items_snapshot = list(all_interest_states.items())
for stream_id, state in items_snapshot:
# 从 chat_manager 获取 group_name
group_name = stream_id # 默认值
try:
chat_stream = chat_manager.get_stream(stream_id)
if chat_stream and chat_stream.group_info:
group_name = chat_stream.group_info.group_name
elif chat_stream and not chat_stream.group_info: # 处理私聊
group_name = (
f"私聊_{chat_stream.user_info.user_nickname}"
if chat_stream.user_info
else stream_id
)
except Exception:
# 不记录警告,避免刷屏,使用默认 stream_id 即可
# logger.warning(f"Could not get group name for stream_id {stream_id}: {e}")
pass # 静默处理
await asyncio.sleep(30) # 每分钟检查一次 log_entry = {
"timestamp": round(current_timestamp, 2),
"stream_id": stream_id,
"interest_level": state.get("interest_level", 0.0), # 使用 get 获取,提供默认值
"group_name": group_name,
"reply_probability": state.get("current_reply_probability", 0.0), # 使用 get 获取
"is_above_threshold": state.get("is_above_threshold", False), # 使用 get 获取
}
# 将每个条目作为单独的 JSON 行写入
f.write(json.dumps(log_entry, ensure_ascii=False) + "\n")
count += 1
# logger.debug(f"[Heartflow] Successfully appended {count} interest history entries to {self._history_log_file_path}")
async def _sub_heartflow_update(self): # except IOError as e:
# logger.error(f"[Heartflow] Error writing interest history log to {self._history_log_file_path}: {e}")
except Exception as e: # 保留对其他异常的捕获
logger.error(f"[Heartflow] Unexpected error during periodic history logging: {e}")
logger.error(traceback.format_exc()) # 记录 traceback
def get_all_interest_states(self) -> Dict[str, Dict]: # 新增方法
"""获取所有活跃子心流的当前兴趣状态"""
states = {}
# 创建副本以避免在迭代时修改字典
items_snapshot = list(self._subheartflows.items())
for stream_id, subheartflow in items_snapshot:
try:
# 从 SubHeartflow 获取其 InterestChatting 的状态
states[stream_id] = subheartflow.get_interest_state()
except Exception as e:
logger.warning(f"[Heartflow] Error getting interest state for subheartflow {stream_id}: {e}")
return states
def cleanup_inactive_subheartflows(self, max_age_seconds=INACTIVE_THRESHOLD_SECONDS): # 修改此方法以使用兴趣时间
"""
清理长时间不活跃的子心流记录 (基于兴趣交互时间)
max_age_seconds: 超过此时间未通过兴趣系统交互的将被清理
"""
current_time = time.time()
keys_to_remove = []
_initial_count = len(self._subheartflows)
# 创建副本以避免在迭代时修改字典
items_snapshot = list(self._subheartflows.items())
for subheartflow_id, subheartflow in items_snapshot:
should_remove = False
reason = ""
# 检查 InterestChatting 的最后交互时间
last_interaction = subheartflow.interest_chatting.last_interaction_time
if max_age_seconds is not None and (current_time - last_interaction) > max_age_seconds:
should_remove = True
reason = (
f"interest inactive time ({current_time - last_interaction:.0f}s) > max age ({max_age_seconds}s)"
)
if should_remove:
keys_to_remove.append(subheartflow_id)
stream_name = chat_manager.get_stream_name(subheartflow_id) or subheartflow_id # 获取流名称
logger.debug(f"[Heartflow] Marking stream {stream_name} for removal. Reason: {reason}")
# 标记子心流让其后台任务停止 (如果其后台任务还在运行)
subheartflow.should_stop = True
if keys_to_remove:
logger.info(f"[Heartflow] 清理识别到 {len(keys_to_remove)} 个不活跃的流。")
for key in keys_to_remove:
if key in self._subheartflows:
# 尝试取消子心流的后台任务
task_to_cancel = self._subheartflows[key].task
if task_to_cancel and not task_to_cancel.done():
task_to_cancel.cancel()
logger.debug(f"[Heartflow] Cancelled background task for subheartflow {key}")
# 从字典中删除
del self._subheartflows[key]
stream_name = chat_manager.get_stream_name(key) or key # 获取流名称
logger.debug(f"[Heartflow] 移除了流: {stream_name}")
final_count = len(self._subheartflows) # 直接获取当前长度
logger.info(f"[Heartflow] 清理完成。移除了 {len(keys_to_remove)} 个流。当前数量: {final_count}")
else:
# logger.info(f"[Heartflow] 清理完成。没有流符合移除条件。当前数量: {initial_count}") # 减少日志噪音
pass
async def _sub_heartflow_update(self): # 这个任务目前作用不大,可以考虑移除或赋予新职责
while True: while True:
# 检查是否存在子心流 # 检查是否存在子心流
if not self._subheartflows: if not self._subheartflows:
# logger.info("当前没有子心流,等待新的子心流创建...") # logger.info("当前没有子心流,等待新的子心流创建...")
await asyncio.sleep(30) # 每分钟检查一次是否有新的子心流 await asyncio.sleep(30) # 短暂休眠
continue continue
await self.do_a_thinking() # 当前无实际操作,只是等待
await asyncio.sleep(global_config.heart_flow_update_interval) # 5分钟思考一次 await asyncio.sleep(300)
async def heartflow_start_working(self): async def heartflow_start_working(self):
# 启动清理任务 # 启动清理任务 (使用新的 periodic_cleanup_task)
asyncio.create_task(self._cleanup_inactive_subheartflows()) if self._cleanup_task is None or self._cleanup_task.done():
self._cleanup_task = asyncio.create_task(
self._periodic_cleanup_task(
interval_seconds=CLEANUP_INTERVAL_SECONDS,
max_age_seconds=INACTIVE_THRESHOLD_SECONDS,
)
)
logger.info(
f"[Heartflow] 已创建定期清理任务。间隔: {CLEANUP_INTERVAL_SECONDS}s, 不活跃阈值: {INACTIVE_THRESHOLD_SECONDS}s"
)
else:
logger.warning("[Heartflow] 跳过创建清理任务: 任务已在运行或存在。")
# 启动子心流更新任务 # 启动日志任务 (使用新的 periodic_log_task)
asyncio.create_task(self._sub_heartflow_update()) if self._logging_task is None or self._logging_task.done():
self._logging_task = asyncio.create_task(self._periodic_log_task(interval_seconds=LOG_INTERVAL_SECONDS))
logger.info(f"[Heartflow] 已创建定期日志任务。间隔: {LOG_INTERVAL_SECONDS}s")
else:
logger.warning("[Heartflow] 跳过创建日志任务: 任务已在运行或存在。")
# (可选) 启动旧的子心流更新任务,如果它还有用的话
# asyncio.create_task(self._sub_heartflow_update())
@staticmethod @staticmethod
async def _update_current_state(): async def _update_current_state():
print("TODO") print("TODO")
async def do_a_thinking(self): async def do_a_thinking(self):
logger.debug("麦麦大脑袋转起来了") # logger.debug("麦麦大脑袋转起来了")
self.current_state.update_current_state_info() self.current_state.update_current_state_info()
# 开始构建prompt # 开始构建prompt
@ -122,127 +319,152 @@ class Heartflow:
prompt_personality += personality_core prompt_personality += personality_core
personality_sides = individuality.personality.personality_sides personality_sides = individuality.personality.personality_sides
random.shuffle(personality_sides) # 检查列表是否为空
prompt_personality += f",{personality_sides[0]}" if personality_sides:
random.shuffle(personality_sides)
prompt_personality += f",{personality_sides[0]}"
identity_detail = individuality.identity.identity_detail identity_detail = individuality.identity.identity_detail
random.shuffle(identity_detail) # 检查列表是否为空
prompt_personality += f",{identity_detail[0]}" if identity_detail:
random.shuffle(identity_detail)
prompt_personality += f",{identity_detail[0]}"
personality_info = prompt_personality personality_info = prompt_personality
current_thinking_info = self.current_mind current_thinking_info = self.current_mind
mood_info = self.current_state.mood mood_info = self.current_state.mood
related_memory_info = "memory" related_memory_info = "memory" # TODO: 替换为实际的记忆获取逻辑
try: try:
sub_flows_info = await self.get_all_subheartflows_minds() sub_flows_info = await self.get_all_subheartflows_minds_summary() # 修改为调用汇总方法
except Exception as e: except Exception as e:
logger.error(f"获取子心流的想法失败: {e}") logger.error(f"[Heartflow] 获取子心流想法汇总失败: {e}")
return logger.error(traceback.format_exc())
sub_flows_info = "(获取子心流想法时出错)" # 提供默认值
schedule_info = bot_schedule.get_current_num_task(num=4, time_info=True) schedule_info = bot_schedule.get_current_num_task(num=4, time_info=True)
# prompt = ""
# prompt += f"你刚刚在做的事情是:{schedule_info}\n"
# prompt += f"{personality_info}\n"
# prompt += f"你想起来{related_memory_info}。"
# prompt += f"刚刚你的主要想法是{current_thinking_info}。"
# prompt += f"你还有一些小想法,因为你在参加不同的群聊天,这是你正在做的事情:{sub_flows_info}\n"
# prompt += f"你现在{mood_info}。"
# prompt += "现在你接下去继续思考,产生新的想法,但是要基于原有的主要想法,不要分点输出,"
# prompt += "输出连贯的内心独白,不要太长,但是记得结合上述的消息,关注新内容:"
prompt = (await global_prompt_manager.get_prompt_async("thinking_prompt")).format( prompt = (await global_prompt_manager.get_prompt_async("thinking_prompt")).format(
schedule_info, personality_info, related_memory_info, current_thinking_info, sub_flows_info, mood_info schedule_info=schedule_info, # 使用关键字参数确保正确格式化
personality_info=personality_info,
related_memory_info=related_memory_info,
current_thinking_info=current_thinking_info,
sub_flows_info=sub_flows_info,
mood_info=mood_info,
) )
try: try:
response, reasoning_content = await self.llm_model.generate_response_async(prompt) response, reasoning_content = await self.llm_model.generate_response_async(prompt)
if not response:
logger.warning("[Heartflow] 内心独白 LLM 返回空结果。")
response = "(暂时没什么想法...)" # 提供默认想法
self.update_current_mind(response) # 更新主心流想法
logger.info(f"麦麦的总体脑内状态:{self.current_mind}")
# 更新所有子心流的主心流信息
items_snapshot = list(self._subheartflows.items()) # 创建快照
for _, subheartflow in items_snapshot:
subheartflow.main_heartflow_info = response
except Exception as e: except Exception as e:
logger.error(f"内心独白获取失败: {e}") logger.error(f"[Heartflow] 内心独白获取失败: {e}")
return logger.error(traceback.format_exc())
self.update_current_mind(response) # 此处不返回,允许程序继续执行,但主心流想法未更新
self.current_mind = response
logger.info(f"麦麦的总体脑内状态:{self.current_mind}")
# logger.info("麦麦想了想,当前活动:")
# await bot_schedule.move_doing(self.current_mind)
for _, subheartflow in self._subheartflows.items():
subheartflow.main_heartflow_info = response
def update_current_mind(self, response): def update_current_mind(self, response):
self.past_mind.append(self.current_mind) self.past_mind.append(self.current_mind)
self.current_mind = response self.current_mind = response
async def get_all_subheartflows_minds(self): async def get_all_subheartflows_minds_summary(self): # 重命名并修改
sub_minds = "" """获取所有子心流的当前想法,并进行汇总"""
for _, subheartflow in self._subheartflows.items(): sub_minds_list = []
sub_minds += subheartflow.current_mind # 创建快照
items_snapshot = list(self._subheartflows.items())
for _, subheartflow in items_snapshot:
sub_minds_list.append(subheartflow.current_mind)
return await self.minds_summary(sub_minds) if not sub_minds_list:
return "(当前没有活跃的子心流想法)"
minds_str = "\n".join([f"- {mind}" for mind in sub_minds_list]) # 格式化为列表
# 调用 LLM 进行汇总
return await self.minds_summary(minds_str)
async def minds_summary(self, minds_str): async def minds_summary(self, minds_str):
"""使用 LLM 汇总子心流的想法字符串"""
# 开始构建prompt # 开始构建prompt
prompt_personality = "" prompt_personality = ""
# person
individuality = Individuality.get_instance() individuality = Individuality.get_instance()
prompt_personality += individuality.personality.personality_core
personality_core = individuality.personality.personality_core if individuality.personality.personality_sides:
prompt_personality += personality_core prompt_personality += f",{random.choice(individuality.personality.personality_sides)}" # 随机选一个
if individuality.identity.identity_detail:
personality_sides = individuality.personality.personality_sides prompt_personality += f",{random.choice(individuality.identity.identity_detail)}" # 随机选一个
random.shuffle(personality_sides)
prompt_personality += f",{personality_sides[0]}"
identity_detail = individuality.identity.identity_detail
random.shuffle(identity_detail)
prompt_personality += f",{identity_detail[0]}"
personality_info = prompt_personality personality_info = prompt_personality
mood_info = self.current_state.mood mood_info = self.current_state.mood
bot_name = global_config.BOT_NICKNAME # 使用全局配置中的机器人昵称
# prompt = ""
# prompt += f"{personality_info}\n"
# prompt += f"现在{global_config.BOT_NICKNAME}的想法是:{self.current_mind}\n"
# prompt += f"现在{global_config.BOT_NICKNAME}在qq群里进行聊天聊天的话题如下{minds_str}\n"
# prompt += f"你现在{mood_info}\n"
# prompt += """现在请你总结这些聊天内容,注意关注聊天内容对原有的想法的影响,输出连贯的内心独白
# 不要太长,但是记得结合上述的消息,要记得你的人设,关注新内容:"""
prompt = (await global_prompt_manager.get_prompt_async("mind_summary_prompt")).format( prompt = (await global_prompt_manager.get_prompt_async("mind_summary_prompt")).format(
personality_info, global_config.BOT_NICKNAME, self.current_mind, minds_str, mood_info personality_info=personality_info, # 使用关键字参数
bot_name=bot_name,
current_mind=self.current_mind,
minds_str=minds_str,
mood_info=mood_info,
) )
response, reasoning_content = await self.llm_model.generate_response_async(prompt)
return response
async def create_subheartflow(self, subheartflow_id):
"""
创建一个新的SubHeartflow实例
添加一个SubHeartflow实例到self._subheartflows字典中
并根据subheartflow_id为子心流创建一个观察对象
"""
try: try:
if subheartflow_id not in self._subheartflows: response, reasoning_content = await self.llm_model.generate_response_async(prompt)
subheartflow = SubHeartflow(subheartflow_id) if not response:
# 创建一个观察对象目前只可以用chat_id创建观察对象 logger.warning("[Heartflow] 想法汇总 LLM 返回空结果。")
logger.debug(f"创建 observation: {subheartflow_id}") return "(想法汇总失败...)"
observation = ChattingObservation(subheartflow_id) return response
await observation.initialize()
subheartflow.add_observation(observation)
logger.debug("添加 observation 成功")
# 创建异步任务
asyncio.create_task(subheartflow.subheartflow_start_working())
logger.debug("创建异步任务 成功")
self._subheartflows[subheartflow_id] = subheartflow
logger.info("添加 subheartflow 成功")
return self._subheartflows[subheartflow_id]
except Exception as e: except Exception as e:
logger.error(f"创建 subheartflow 失败: {e}") logger.error(f"[Heartflow] 想法汇总失败: {e}")
logger.error(traceback.format_exc())
return "(想法汇总时发生错误...)"
async def create_subheartflow(self, subheartflow_id: Any) -> Optional[SubHeartflow]:
"""
获取或创建一个新的SubHeartflow实例
(主要逻辑不变InterestChatting 现在在 SubHeartflow 内部创建)
"""
existing_subheartflow = self._subheartflows.get(subheartflow_id)
if existing_subheartflow:
# 如果已存在,确保其 last_active_time 更新 (如果需要的话)
# existing_subheartflow.last_active_time = time.time() # 移除,活跃时间由实际操作更新
# logger.debug(f"[Heartflow] 返回已存在的 subheartflow: {subheartflow_id}")
return existing_subheartflow
logger.info(f"[Heartflow] 尝试创建新的 subheartflow: {subheartflow_id}")
try:
# 创建 SubHeartflow它内部会创建 InterestChatting
subheartflow = SubHeartflow(subheartflow_id)
# 创建并初始化观察对象
logger.debug(f"[Heartflow] 为 {subheartflow_id} 创建 observation")
observation = ChattingObservation(subheartflow_id)
await observation.initialize()
subheartflow.add_observation(observation)
logger.debug(f"[Heartflow] 为 {subheartflow_id} 添加 observation 成功")
# 创建并存储后台任务 (SubHeartflow 自己的后台任务)
subheartflow.task = asyncio.create_task(subheartflow.subheartflow_start_working())
logger.debug(f"[Heartflow] 为 {subheartflow_id} 创建后台任务成功")
# 添加到管理字典
self._subheartflows[subheartflow_id] = subheartflow
logger.info(f"[Heartflow] 添加 subheartflow {subheartflow_id} 成功")
return subheartflow
except Exception as e:
logger.error(f"[Heartflow] 创建 subheartflow {subheartflow_id} 失败: {e}")
logger.error(traceback.format_exc())
return None return None
def get_subheartflow(self, observe_chat_id) -> SubHeartflow: def get_subheartflow(self, observe_chat_id: Any) -> Optional[SubHeartflow]:
"""获取指定ID的SubHeartflow实例""" """获取指定ID的SubHeartflow实例"""
return self._subheartflows.get(observe_chat_id) return self._subheartflows.get(observe_chat_id)

View File

@ -139,7 +139,7 @@ class ChattingObservation(Observation):
# traceback.print_exc() # 记录详细堆栈 # traceback.print_exc() # 记录详细堆栈
# print(f"处理后self.talking_message{self.talking_message}") # print(f"处理后self.talking_message{self.talking_message}")
self.talking_message_str = await build_readable_messages(self.talking_message) self.talking_message_str = await build_readable_messages(messages=self.talking_message, timestamp_mode="normal")
logger.trace( logger.trace(
f"Chat {self.chat_id} - 压缩早期记忆:{self.mid_memory_info}\n现在聊天内容:{self.talking_message_str}" f"Chat {self.chat_id} - 压缩早期记忆:{self.mid_memory_info}\n现在聊天内容:{self.talking_message_str}"

View File

@ -4,22 +4,20 @@ from src.plugins.moods.moods import MoodManager
from src.plugins.models.utils_model import LLMRequest from src.plugins.models.utils_model import LLMRequest
from src.config.config import global_config from src.config.config import global_config
import time import time
from typing import Optional from typing import Optional, List, Dict
from datetime import datetime
import traceback import traceback
from src.plugins.chat.utils import parse_text_timestamps from src.plugins.chat.utils import parse_text_timestamps
import enum
# from src.plugins.schedule.schedule_generator import bot_schedule
# from src.plugins.memory_system.Hippocampus import HippocampusManager
from src.common.logger import get_module_logger, LogConfig, SUB_HEARTFLOW_STYLE_CONFIG # noqa: E402 from src.common.logger import get_module_logger, LogConfig, SUB_HEARTFLOW_STYLE_CONFIG # noqa: E402
# from src.plugins.chat.utils import get_embedding
# from src.common.database import db
# from typing import Union
from src.individuality.individuality import Individuality from src.individuality.individuality import Individuality
import random import random
from src.plugins.person_info.relationship_manager import relationship_manager from src.plugins.person_info.relationship_manager import relationship_manager
from ..plugins.utils.prompt_builder import Prompt, global_prompt_manager from ..plugins.utils.prompt_builder import Prompt, global_prompt_manager
from src.plugins.chat.message import MessageRecv
import math
# 定义常量 (从 interest.py 移动过来)
MAX_INTEREST = 15.0
subheartflow_config = LogConfig( subheartflow_config = LogConfig(
# 使用海马体专用样式 # 使用海马体专用样式
@ -28,6 +26,12 @@ subheartflow_config = LogConfig(
) )
logger = get_module_logger("subheartflow", config=subheartflow_config) logger = get_module_logger("subheartflow", config=subheartflow_config)
interest_log_config = LogConfig(
console_format=SUB_HEARTFLOW_STYLE_CONFIG["console_format"],
file_format=SUB_HEARTFLOW_STYLE_CONFIG["file_format"],
)
interest_logger = get_module_logger("InterestChatting", config=interest_log_config)
def init_prompt(): def init_prompt():
prompt = "" prompt = ""
@ -49,25 +53,178 @@ def init_prompt():
Prompt(prompt, "sub_heartflow_prompt_before") Prompt(prompt, "sub_heartflow_prompt_before")
class CurrentState: class ChatState(enum.Enum):
ABSENT = "不参与"
CHAT = "闲聊"
FOCUSED = "专注"
class ChatStateInfo:
def __init__(self): def __init__(self):
self.willing = 0 self.willing = 0
self.current_state_info = ""
self.chat_status: ChatState = ChatState.ABSENT
self.mood_manager = MoodManager() self.mood_manager = MoodManager()
self.mood = self.mood_manager.get_prompt() self.mood = self.mood_manager.get_prompt()
def update_current_state_info(self): def update_chat_state_info(self):
self.current_state_info = self.mood_manager.get_current_mood() self.chat_state_info = self.mood_manager.get_current_mood()
base_reply_probability = 0.05
probability_increase_rate_per_second = 0.08
max_reply_probability = 1
class InterestChatting:
def __init__(
self,
decay_rate=global_config.default_decay_rate_per_second,
max_interest=MAX_INTEREST,
trigger_threshold=global_config.reply_trigger_threshold,
base_reply_probability=base_reply_probability,
increase_rate=probability_increase_rate_per_second,
decay_factor=global_config.probability_decay_factor_per_second,
max_probability=max_reply_probability,
):
self.interest_level: float = 0.0
self.last_update_time: float = time.time()
self.decay_rate_per_second: float = decay_rate
self.max_interest: float = max_interest
self.last_interaction_time: float = self.last_update_time
self.trigger_threshold: float = trigger_threshold
self.base_reply_probability: float = base_reply_probability
self.probability_increase_rate: float = increase_rate
self.probability_decay_factor: float = decay_factor
self.max_reply_probability: float = max_probability
self.current_reply_probability: float = 0.0
self.is_above_threshold: bool = False
self.interest_dict: Dict[str, tuple[MessageRecv, float, bool]] = {}
def add_interest_dict(self, message: MessageRecv, interest_value: float, is_mentioned: bool):
self.interest_dict[message.message_info.message_id] = (message, interest_value, is_mentioned)
self.last_interaction_time = time.time()
def _calculate_decay(self, current_time: float):
time_delta = current_time - self.last_update_time
if time_delta > 0:
old_interest = self.interest_level
if self.interest_level < 1e-9:
self.interest_level = 0.0
else:
if self.decay_rate_per_second <= 0:
interest_logger.warning(
f"InterestChatting encountered non-positive decay rate: {self.decay_rate_per_second}. Setting interest to 0."
)
self.interest_level = 0.0
elif self.interest_level < 0:
interest_logger.warning(
f"InterestChatting encountered negative interest level: {self.interest_level}. Setting interest to 0."
)
self.interest_level = 0.0
else:
try:
decay_factor = math.pow(self.decay_rate_per_second, time_delta)
self.interest_level *= decay_factor
except ValueError as e:
interest_logger.error(
f"Math error during decay calculation: {e}. Rate: {self.decay_rate_per_second}, Delta: {time_delta}, Level: {self.interest_level}. Setting interest to 0."
)
self.interest_level = 0.0
if old_interest != self.interest_level:
self.last_update_time = current_time
def _update_reply_probability(self, current_time: float):
time_delta = current_time - self.last_update_time
if time_delta <= 0:
return
currently_above = self.interest_level >= self.trigger_threshold
if currently_above:
if not self.is_above_threshold:
self.current_reply_probability = self.base_reply_probability
interest_logger.debug(
f"兴趣跨过阈值 ({self.trigger_threshold}). 概率重置为基础值: {self.base_reply_probability:.4f}"
)
else:
increase_amount = self.probability_increase_rate * time_delta
self.current_reply_probability += increase_amount
self.current_reply_probability = min(self.current_reply_probability, self.max_reply_probability)
else:
if 0 < self.probability_decay_factor < 1:
decay_multiplier = math.pow(self.probability_decay_factor, time_delta)
self.current_reply_probability *= decay_multiplier
if self.current_reply_probability < 1e-6:
self.current_reply_probability = 0.0
elif self.probability_decay_factor <= 0:
if self.current_reply_probability > 0:
interest_logger.warning(f"无效的衰减因子 ({self.probability_decay_factor}). 设置概率为0.")
self.current_reply_probability = 0.0
self.current_reply_probability = max(self.current_reply_probability, 0.0)
self.is_above_threshold = currently_above
def increase_interest(self, current_time: float, value: float):
self._update_reply_probability(current_time)
self._calculate_decay(current_time)
self.interest_level += value
self.interest_level = min(self.interest_level, self.max_interest)
self.last_update_time = current_time
self.last_interaction_time = current_time
def decrease_interest(self, current_time: float, value: float):
self._update_reply_probability(current_time)
self.interest_level -= value
self.interest_level = max(self.interest_level, 0.0)
self.last_update_time = current_time
self.last_interaction_time = current_time
def get_interest(self) -> float:
current_time = time.time()
self._update_reply_probability(current_time)
self._calculate_decay(current_time)
self.last_update_time = current_time
return self.interest_level
def get_state(self) -> dict:
interest = self.get_interest()
return {
"interest_level": round(interest, 2),
"last_update_time": self.last_update_time,
"current_reply_probability": round(self.current_reply_probability, 4),
"is_above_threshold": self.is_above_threshold,
"last_interaction_time": self.last_interaction_time,
}
def should_evaluate_reply(self) -> bool:
current_time = time.time()
self._update_reply_probability(current_time)
if self.current_reply_probability > 0:
trigger = random.random() < self.current_reply_probability
return trigger
else:
return False
class SubHeartflow: class SubHeartflow:
def __init__(self, subheartflow_id): def __init__(self, subheartflow_id):
self.subheartflow_id = subheartflow_id self.subheartflow_id = subheartflow_id
self.current_mind = "" self.current_mind = "你什么也没想"
self.past_mind = [] self.past_mind = []
self.current_state: CurrentState = CurrentState() self.chat_state: ChatStateInfo = ChatStateInfo()
self.interest_chatting = InterestChatting()
self.llm_model = LLMRequest( self.llm_model = LLMRequest(
model=global_config.llm_sub_heartflow, model=global_config.llm_sub_heartflow,
temperature=global_config.llm_sub_heartflow["temp"], temperature=global_config.llm_sub_heartflow["temp"],
@ -77,15 +234,13 @@ class SubHeartflow:
self.main_heartflow_info = "" self.main_heartflow_info = ""
self.last_reply_time = time.time()
self.last_active_time = time.time() # 添加最后激活时间 self.last_active_time = time.time() # 添加最后激活时间
self.should_stop = False # 添加停止标志
if not self.current_mind: self.task: Optional[asyncio.Task] = None # 添加 task 属性
self.current_mind = "你什么也没想"
self.is_active = False self.is_active = False
self.observations: list[ChattingObservation] = [] self.observations: List[ChattingObservation] = [] # 使用 List 类型提示
self.running_knowledges = [] self.running_knowledges = []
@ -93,19 +248,13 @@ class SubHeartflow:
async def subheartflow_start_working(self): async def subheartflow_start_working(self):
while True: while True:
current_time = time.time()
# --- 调整后台任务逻辑 --- # # --- 调整后台任务逻辑 --- #
# 这个后台循环现在主要负责检查是否需要自我销毁 # 这个后台循环现在主要负责检查是否需要自我销毁
# 不再主动进行思考或状态更新,这些由 HeartFC_Chat 驱动 # 不再主动进行思考或状态更新,这些由 HeartFC_Chat 驱动
# 检查是否超过指定时间没有激活 (例如,没有被调用进行思考) # 检查是否被主心流标记为停止
if current_time - self.last_active_time > global_config.sub_heart_flow_stop_time: # 例如 5 分钟 if self.should_stop:
logger.info( logger.info(f"子心流 {self.subheartflow_id} 被标记为停止,正在退出后台任务...")
f"子心流 {self.subheartflow_id} 超过 {global_config.sub_heart_flow_stop_time} 秒没有激活,正在销毁..."
f" (Last active: {datetime.fromtimestamp(self.last_active_time).strftime('%Y-%m-%d %H:%M:%S')})"
)
# 在这里添加实际的销毁逻辑,例如从主 Heartflow 管理器中移除自身
# heartflow.remove_subheartflow(self.subheartflow_id) # 假设有这样的方法
break # 退出循环以停止任务 break # 退出循环以停止任务
await asyncio.sleep(global_config.sub_heart_flow_update_interval) # 定期检查销毁条件 await asyncio.sleep(global_config.sub_heart_flow_update_interval) # 定期检查销毁条件
@ -132,7 +281,7 @@ class SubHeartflow:
self.last_active_time = time.time() # 更新最后激活时间戳 self.last_active_time = time.time() # 更新最后激活时间戳
current_thinking_info = self.current_mind current_thinking_info = self.current_mind
mood_info = self.current_state.mood mood_info = self.chat_state.mood
observation = self._get_primary_observation() observation = self._get_primary_observation()
# --- 获取观察信息 --- # # --- 获取观察信息 --- #
@ -264,6 +413,26 @@ class SubHeartflow:
logger.warning(f"SubHeartflow {self.subheartflow_id} 没有找到有效的 ChattingObservation") logger.warning(f"SubHeartflow {self.subheartflow_id} 没有找到有效的 ChattingObservation")
return None return None
def get_interest_state(self) -> dict:
"""获取当前兴趣状态"""
return self.interest_chatting.get_state()
def get_interest_level(self) -> float:
"""获取当前兴趣等级"""
return self.interest_chatting.get_interest()
def should_evaluate_reply(self) -> bool:
"""判断是否应该评估回复"""
return self.interest_chatting.should_evaluate_reply()
def add_interest_dict_entry(self, message: MessageRecv, interest_value: float, is_mentioned: bool):
"""添加兴趣字典条目"""
self.interest_chatting.add_interest_dict(message, interest_value, is_mentioned)
def get_interest_dict(self) -> Dict[str, tuple[MessageRecv, float, bool]]:
"""获取兴趣字典"""
return self.interest_chatting.interest_dict
init_prompt() init_prompt()
# subheartflow = SubHeartflow() # subheartflow = SubHeartflow()

View File

@ -105,3 +105,4 @@ class Individuality:
return self.personality.agreeableness return self.personality.agreeableness
elif factor == "neuroticism": elif factor == "neuroticism":
return self.personality.neuroticism return self.personality.neuroticism
return None

View File

@ -17,8 +17,7 @@ from .common.logger import get_module_logger
from .plugins.remote import heartbeat_thread # noqa: F401 from .plugins.remote import heartbeat_thread # noqa: F401
from .individuality.individuality import Individuality from .individuality.individuality import Individuality
from .common.server import global_server from .common.server import global_server
from .plugins.chat_module.heartFC_chat.interest import InterestManager from .plugins.chat_module.heartFC_chat.heartFC_controler import HeartFCController
from .plugins.chat_module.heartFC_chat.heartFC_controler import HeartFC_Controller
logger = get_module_logger("main") logger = get_module_logger("main")
@ -112,14 +111,9 @@ class MainSystem:
asyncio.create_task(heartflow.heartflow_start_working()) asyncio.create_task(heartflow.heartflow_start_working())
logger.success("心流系统启动成功") logger.success("心流系统启动成功")
# 启动 InterestManager 的后台任务 # 初始化并独立启动 HeartFCController
interest_manager = InterestManager() # 获取单例 HeartFCController()
await interest_manager.start_background_tasks() heartfc_chat_instance = HeartFCController.get_instance()
logger.success("兴趣管理器后台任务启动成功")
# 初始化并独立启动 HeartFC_Chat
HeartFC_Controller()
heartfc_chat_instance = HeartFC_Controller.get_instance()
if heartfc_chat_instance: if heartfc_chat_instance:
await heartfc_chat_instance.start() await heartfc_chat_instance.start()
logger.success("HeartFC_Chat 模块独立启动成功") logger.success("HeartFC_Chat 模块独立启动成功")

View File

@ -180,6 +180,7 @@ class Conversation:
"time": datetime.datetime.now().strftime("%H:%M:%S"), "time": datetime.datetime.now().strftime("%H:%M:%S"),
} }
) )
return None
elif action == "fetch_knowledge": elif action == "fetch_knowledge":
self.waiter.wait_accumulated_time = 0 self.waiter.wait_accumulated_time = 0
@ -193,28 +194,35 @@ class Conversation:
if knowledge: if knowledge:
if topic not in self.conversation_info.knowledge_list: if topic not in self.conversation_info.knowledge_list:
self.conversation_info.knowledge_list.append({"topic": topic, "knowledge": knowledge}) self.conversation_info.knowledge_list.append({"topic": topic, "knowledge": knowledge})
return None
else: else:
self.conversation_info.knowledge_list[topic] += knowledge self.conversation_info.knowledge_list[topic] += knowledge
return None
return None
elif action == "rethink_goal": elif action == "rethink_goal":
self.waiter.wait_accumulated_time = 0 self.waiter.wait_accumulated_time = 0
self.state = ConversationState.RETHINKING self.state = ConversationState.RETHINKING
await self.goal_analyzer.analyze_goal(conversation_info, observation_info) await self.goal_analyzer.analyze_goal(conversation_info, observation_info)
return None
elif action == "listening": elif action == "listening":
self.state = ConversationState.LISTENING self.state = ConversationState.LISTENING
logger.info("倾听对方发言...") logger.info("倾听对方发言...")
await self.waiter.wait_listening(conversation_info) await self.waiter.wait_listening(conversation_info)
return None
elif action == "end_conversation": elif action == "end_conversation":
self.should_continue = False self.should_continue = False
logger.info("决定结束对话...") logger.info("决定结束对话...")
return None
else: # wait else: # wait
self.state = ConversationState.WAITING self.state = ConversationState.WAITING
logger.info("等待更多信息...") logger.info("等待更多信息...")
await self.waiter.wait(self.conversation_info) await self.waiter.wait(self.conversation_info)
return None
async def _send_timeout_message(self): async def _send_timeout_message(self):
"""发送超时结束消息""" """发送超时结束消息"""

View File

@ -151,7 +151,7 @@ class ReplyGenerator:
return content return content
except Exception as e: except Exception as e:
logger.error(f"生成回复时出错: {e}") logger.error(f"生成回复时出错: {str(e)}")
return "抱歉,我现在有点混乱,让我重新思考一下..." return "抱歉,我现在有点混乱,让我重新思考一下..."
async def check_reply(self, reply: str, goal: str, retry_count: int = 0) -> Tuple[bool, str, bool]: async def check_reply(self, reply: str, goal: str, retry_count: int = 0) -> Tuple[bool, str, bool]:

View File

@ -7,7 +7,7 @@ from ..chat_module.only_process.only_message_process import MessageProcessor
from src.common.logger import get_module_logger, CHAT_STYLE_CONFIG, LogConfig from src.common.logger import get_module_logger, CHAT_STYLE_CONFIG, LogConfig
from ..chat_module.reasoning_chat.reasoning_chat import ReasoningChat from ..chat_module.reasoning_chat.reasoning_chat import ReasoningChat
from ..chat_module.heartFC_chat.heartFC_processor import HeartFC_Processor from ..chat_module.heartFC_chat.heartFC_processor import HeartFCProcessor
from ..utils.prompt_builder import Prompt, global_prompt_manager from ..utils.prompt_builder import Prompt, global_prompt_manager
import traceback import traceback
@ -27,9 +27,8 @@ class ChatBot:
self.bot = None # bot 实例引用 self.bot = None # bot 实例引用
self._started = False self._started = False
self.mood_manager = MoodManager.get_instance() # 获取情绪管理器单例 self.mood_manager = MoodManager.get_instance() # 获取情绪管理器单例
self.mood_manager.start_mood_update() # 启动情绪更新
self.reasoning_chat = ReasoningChat() self.reasoning_chat = ReasoningChat()
self.heartFC_processor = HeartFC_Processor() # 新增 self.heartFC_processor = HeartFCProcessor() # 新增
# 创建初始化PFC管理器的任务会在_ensure_started时执行 # 创建初始化PFC管理器的任务会在_ensure_started时执行
self.only_process_chat = MessageProcessor() self.only_process_chat = MessageProcessor()
@ -105,53 +104,24 @@ class ChatBot:
template_group_name = None template_group_name = None
async def preprocess(): async def preprocess():
if global_config.enable_pfc_chatting: if groupinfo is None:
try: if global_config.enable_friend_chat:
if groupinfo is None: if global_config.enable_pfc_chatting:
if global_config.enable_friend_chat: userinfo = message.message_info.user_info
userinfo = message.message_info.user_info messageinfo = message.message_info
messageinfo = message.message_info # 创建聊天流
# 创建聊天流 chat = await chat_manager.get_or_create_stream(
chat = await chat_manager.get_or_create_stream( platform=messageinfo.platform,
platform=messageinfo.platform, user_info=userinfo,
user_info=userinfo, group_info=groupinfo,
group_info=groupinfo, )
) message.update_chat_stream(chat)
message.update_chat_stream(chat) await self.only_process_chat.process_message(message)
await self.only_process_chat.process_message(message) await self._create_pfc_chat(message)
await self._create_pfc_chat(message)
else: else:
if groupinfo.group_id in global_config.talk_allowed_groups: await self.heartFC_processor.process_message(message_data)
# logger.debug(f"开始群聊模式{str(message_data)[:50]}...")
if global_config.response_mode == "heart_flow":
# logger.info(f"启动最新最好的思维流FC模式{str(message_data)[:50]}...")
await self.heartFC_processor.process_message(message_data)
elif global_config.response_mode == "reasoning":
# logger.debug(f"开始推理模式{str(message_data)[:50]}...")
await self.reasoning_chat.process_message(message_data)
else:
logger.error(f"未知的回复模式,请检查配置文件!!: {global_config.response_mode}")
except Exception as e:
logger.error(f"处理PFC消息失败: {e}")
else: else:
if groupinfo is None: await self.heartFC_processor.process_message(message_data)
if global_config.enable_friend_chat:
# 私聊处理流程
# await self._handle_private_chat(message)
if global_config.response_mode == "heart_flow":
await self.heartFC_processor.process_message(message_data)
elif global_config.response_mode == "reasoning":
await self.reasoning_chat.process_message(message_data)
else:
logger.error(f"未知的回复模式,请检查配置文件!!: {global_config.response_mode}")
else: # 群聊处理
if groupinfo.group_id in global_config.talk_allowed_groups:
if global_config.response_mode == "heart_flow":
await self.heartFC_processor.process_message(message_data)
elif global_config.response_mode == "reasoning":
await self.reasoning_chat.process_message(message_data)
else:
logger.error(f"未知的回复模式,请检查配置文件!!: {global_config.response_mode}")
if template_group_name: if template_group_name:
async with global_prompt_manager.async_message_scope(template_group_name): async with global_prompt_manager.async_message_scope(template_group_name):

View File

@ -1,14 +1,13 @@
import time import time
from dataclasses import dataclass from dataclasses import dataclass
from typing import Dict, List, Optional from typing import Dict, List, Optional, Union
import urllib3 import urllib3
from .utils_image import image_manager
from ..message.message_base import Seg, UserInfo, BaseMessageInfo, MessageBase
from .chat_stream import ChatStream
from src.common.logger import get_module_logger from src.common.logger import get_module_logger
from .chat_stream import ChatStream
from .utils_image import image_manager
from ..message.message_base import Seg, UserInfo, BaseMessageInfo, MessageBase
logger = get_module_logger("chat_message") logger = get_module_logger("chat_message")
@ -207,7 +206,7 @@ class MessageProcessBase(Message):
# 处理单个消息段 # 处理单个消息段
return await self._process_single_segment(segment) return await self._process_single_segment(segment)
async def _process_single_segment(self, seg: Seg) -> str: async def _process_single_segment(self, seg: Seg) -> Union[str, None]:
"""处理单个消息段 """处理单个消息段
Args: Args:
@ -233,6 +232,7 @@ class MessageProcessBase(Message):
elif seg.type == "reply": elif seg.type == "reply":
if self.reply and hasattr(self.reply, "processed_plain_text"): if self.reply and hasattr(self.reply, "processed_plain_text"):
return f"[回复:{self.reply.processed_plain_text}]" return f"[回复:{self.reply.processed_plain_text}]"
return None
else: else:
return f"[{seg.type}:{str(seg.data)}]" return f"[{seg.type}:{str(seg.data)}]"
except Exception as e: except Exception as e:
@ -309,10 +309,7 @@ class MessageSending(MessageProcessBase):
def set_reply(self, reply: Optional["MessageRecv"] = None) -> None: def set_reply(self, reply: Optional["MessageRecv"] = None) -> None:
"""设置回复消息""" """设置回复消息"""
if ( if self.message_info.format_info is not None and "reply" in self.message_info.format_info.accept_format:
self.message_info.format_info.accept_format is not None
and "reply" in self.message_info.format_info.accept_format
):
if reply: if reply:
self.reply = reply self.reply = reply
if self.reply: if self.reply:

View File

@ -2,7 +2,7 @@ import random
import time import time
import re import re
from collections import Counter from collections import Counter
from typing import Dict, List from typing import Dict, List, Optional
import jieba import jieba
import numpy as np import numpy as np
@ -76,18 +76,20 @@ def is_mentioned_bot_in_message(message: MessageRecv) -> tuple[bool, float]:
else: else:
if not is_mentioned: if not is_mentioned:
# 判断是否被回复 # 判断是否被回复
if re.match("回复[\s\S]*?\((\d+)\)的消息,说:", message.processed_plain_text): if re.match(
f"\[回复 [\s\S]*?\({str(global_config.BOT_QQ)}\)[\s\S]*?\],说:", message.processed_plain_text
):
is_mentioned = True is_mentioned = True
else:
# 判断内容中是否被提及 # 判断内容中是否被提及
message_content = re.sub(r"@[\s\S]*?(\d+)", "", message.processed_plain_text) message_content = re.sub(r"@[\s\S]*?(\d+)", "", message.processed_plain_text)
message_content = re.sub(r"回复[\s\S]*?\((\d+)\)的消息,说: ", "", message_content) message_content = re.sub(r"\[回复 [\s\S]*?\(((\d+)|未知id)\)[\s\S]*?\],说:", "", message_content)
for keyword in keywords: for keyword in keywords:
if keyword in message_content: if keyword in message_content:
is_mentioned = True is_mentioned = True
for nickname in nicknames: for nickname in nicknames:
if nickname in message_content: if nickname in message_content:
is_mentioned = True is_mentioned = True
if is_mentioned and global_config.mentioned_bot_inevitable_reply: if is_mentioned and global_config.mentioned_bot_inevitable_reply:
reply_probability = 1.0 reply_probability = 1.0
logger.info("被提及回复概率设置为100%") logger.info("被提及回复概率设置为100%")
@ -688,7 +690,7 @@ def count_messages_between(start_time: float, end_time: float, stream_id: str) -
return 0, 0 return 0, 0
def translate_timestamp_to_human_readable(timestamp: float, mode: str = "normal") -> str: def translate_timestamp_to_human_readable(timestamp: float, mode: str = "normal") -> Optional[str]:
"""将时间戳转换为人类可读的时间格式 """将时间戳转换为人类可读的时间格式
Args: Args:
@ -716,6 +718,7 @@ def translate_timestamp_to_human_readable(timestamp: float, mode: str = "normal"
return f"{int(diff / 86400)}天前:\n" return f"{int(diff / 86400)}天前:\n"
else: else:
return time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(timestamp)) + ":\n" return time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(timestamp)) + ":\n"
return None
def parse_text_timestamps(text: str, mode: str = "normal") -> str: def parse_text_timestamps(text: str, mode: str = "normal") -> str:

View File

@ -1,82 +1,108 @@
import traceback import traceback
from typing import Optional, Dict from typing import Optional, Dict
import asyncio import asyncio
from asyncio import Lock import threading # 导入 threading
from ...moods.moods import MoodManager from ...moods.moods import MoodManager
from ...chat.emoji_manager import emoji_manager from ...chat.emoji_manager import emoji_manager
from .heartFC_generator import ResponseGenerator from .heartFC_generator import ResponseGenerator
from .messagesender import MessageManager from .messagesender import MessageManager
from src.heart_flow.heartflow import heartflow from src.heart_flow.heartflow import heartflow
from src.heart_flow.sub_heartflow import SubHeartflow, ChatState
from src.common.logger import get_module_logger, CHAT_STYLE_CONFIG, LogConfig from src.common.logger import get_module_logger, CHAT_STYLE_CONFIG, LogConfig
from src.plugins.person_info.relationship_manager import relationship_manager from src.plugins.person_info.relationship_manager import relationship_manager
from src.do_tool.tool_use import ToolUser from src.do_tool.tool_use import ToolUser
from .interest import InterestManager
from src.plugins.chat.chat_stream import chat_manager from src.plugins.chat.chat_stream import chat_manager
from .pf_chatting import PFChatting from .pf_chatting import PFChatting
# 定义日志配置 # 定义日志配置
chat_config = LogConfig( chat_config = LogConfig(
console_format=CHAT_STYLE_CONFIG["console_format"], console_format=CHAT_STYLE_CONFIG["console_format"],
file_format=CHAT_STYLE_CONFIG["file_format"], file_format=CHAT_STYLE_CONFIG["file_format"],
) )
logger = get_module_logger("HeartFC_Controller", config=chat_config) logger = get_module_logger("HeartFCController", config=chat_config)
# 检测群聊兴趣的间隔时间 # 检测群聊兴趣的间隔时间
INTEREST_MONITOR_INTERVAL_SECONDS = 1 INTEREST_MONITOR_INTERVAL_SECONDS = 1
class HeartFC_Controller: # 合并后的版本:使用 __new__ + threading.Lock 实现线程安全单例,类名为 HeartFCController
_instance = None # For potential singleton access if needed by MessageManager class HeartFCController:
_instance = None
_lock = threading.Lock() # 使用 threading.Lock 保证 __new__ 线程安全
_initialized = False
def __init__(self): def __new__(cls, *args, **kwargs):
# --- Updated Init ---
if HeartFC_Controller._instance is not None:
# Prevent re-initialization if used as a singleton
return
self.gpt = ResponseGenerator()
self.mood_manager = MoodManager.get_instance()
self.mood_manager.start_mood_update()
self.tool_user = ToolUser()
self.interest_manager = InterestManager()
self._interest_monitor_task: Optional[asyncio.Task] = None
# --- New PFChatting Management ---
self.pf_chatting_instances: Dict[str, PFChatting] = {}
self._pf_chatting_lock = Lock()
# --- End New PFChatting Management ---
HeartFC_Controller._instance = self # Register instance
# --- End Updated Init ---
# --- Make dependencies accessible for PFChatting ---
# These are accessed via the passed instance in PFChatting
self.emoji_manager = emoji_manager
self.relationship_manager = relationship_manager
self.MessageManager = MessageManager # Pass the class/singleton access
# --- End dependencies ---
# --- Added Class Method for Singleton Access ---
@classmethod
def get_instance(cls):
if cls._instance is None: if cls._instance is None:
# This might indicate an issue if called before initialization with cls._lock:
logger.warning("HeartFC_Controller get_instance called before initialization.") # Double-checked locking
# Optionally, initialize here if a strict singleton pattern is desired if cls._instance is None:
# cls._instance = cls() logger.debug("创建 HeartFCController 单例实例...")
cls._instance = super().__new__(cls)
return cls._instance return cls._instance
# --- End Added Class Method --- def __init__(self):
# 使用 _initialized 标志确保 __init__ 只执行一次
if self._initialized:
return
self.gpt = ResponseGenerator()
self.mood_manager = MoodManager.get_instance()
self.tool_user = ToolUser()
self._interest_monitor_task: Optional[asyncio.Task] = None
self.heartflow = heartflow
self.pf_chatting_instances: Dict[str, PFChatting] = {}
self._pf_chatting_lock = asyncio.Lock() # 这个是 asyncio.Lock用于异步上下文
self.emoji_manager = emoji_manager # 假设是全局或已初始化的实例
self.relationship_manager = relationship_manager # 假设是全局或已初始化的实例
self.MessageManager = MessageManager
self._initialized = True
logger.info("HeartFCController 单例初始化完成。")
@classmethod
def get_instance(cls):
"""获取 HeartFCController 的单例实例。"""
# 如果实例尚未创建,调用构造函数(这将触发 __new__ 和 __init__
if cls._instance is None:
# 在首次调用 get_instance 时创建实例。
# __new__ 中的锁会确保线程安全。
cls()
# 添加日志记录,说明实例是在 get_instance 调用时创建的
logger.info("HeartFCController 实例在首次 get_instance 时创建。")
elif not cls._initialized:
# 实例已创建但可能未初始化完成(理论上不太可能发生,除非 __init__ 异常)
logger.warning("HeartFCController 实例存在但尚未完成初始化。")
return cls._instance
# --- 新增:检查 PFChatting 状态的方法 --- #
def is_pf_chatting_active(self, stream_id: str) -> bool:
"""检查指定 stream_id 的 PFChatting 循环是否处于活动状态。"""
# 注意:这里直接访问字典,不加锁,因为读取通常是安全的,
# 并且 PFChatting 实例的 _loop_active 状态由其自身的异步循环管理。
# 如果需要更强的保证,可以在访问 pf_instance 前获取 _pf_chatting_lock
pf_instance = self.pf_chatting_instances.get(stream_id)
if pf_instance and pf_instance._loop_active: # 直接检查 PFChatting 实例的 _loop_active 属性
return True
return False
# --- 结束新增 --- #
async def start(self): async def start(self):
"""启动异步任务,如回复启动器""" """启动异步任务,如回复启动器"""
logger.debug("HeartFC_Controller 正在启动异步任务...") logger.debug("HeartFCController 正在启动异步任务...")
self._initialize_monitor_task() self._initialize_monitor_task()
logger.info("HeartFC_Controller 异步任务启动完成") logger.info("HeartFCController 异步任务启动完成")
def _initialize_monitor_task(self): def _initialize_monitor_task(self):
"""启动后台兴趣监控任务,可以检查兴趣是否足以开启心流对话""" """启动后台兴趣监控任务,可以检查兴趣是否足以开启心流对话"""
if self._interest_monitor_task is None or self._interest_monitor_task.done(): if self._interest_monitor_task is None or self._interest_monitor_task.done():
try: try:
loop = asyncio.get_running_loop() loop = asyncio.get_running_loop()
self._interest_monitor_task = loop.create_task(self._interest_monitor_loop()) self._interest_monitor_task = loop.create_task(self._response_control_loop())
except RuntimeError: except RuntimeError:
logger.error("创建兴趣监控任务失败:没有运行中的事件循环。") logger.error("创建兴趣监控任务失败:没有运行中的事件循环。")
raise raise
@ -89,7 +115,7 @@ class HeartFC_Controller:
async with self._pf_chatting_lock: async with self._pf_chatting_lock:
if stream_id not in self.pf_chatting_instances: if stream_id not in self.pf_chatting_instances:
logger.info(f"为流 {stream_id} 创建新的PFChatting实例") logger.info(f"为流 {stream_id} 创建新的PFChatting实例")
# 传递 self (HeartFC_Controller 实例) 进行依赖注入 # 传递 self (HeartFCController 实例) 进行依赖注入
instance = PFChatting(stream_id, self) instance = PFChatting(stream_id, self)
# 执行异步初始化 # 执行异步初始化
if not await instance._initialize(): if not await instance._initialize():
@ -100,41 +126,41 @@ class HeartFC_Controller:
# --- End Added PFChatting Instance Manager --- # --- End Added PFChatting Instance Manager ---
async def _interest_monitor_loop(self): # async def update_mai_Status(self):
# """后台任务,定期检查更新麦麦状态"""
# logger.info("麦麦状态更新循环开始...")
# while True:
# await asyncio.sleep(0)
# self.heartflow.update_chat_status()
async def _response_control_loop(self):
"""后台任务,定期检查兴趣度变化并触发回复""" """后台任务,定期检查兴趣度变化并触发回复"""
logger.info("兴趣监控循环开始...") logger.info("兴趣监控循环开始...")
while True: while True:
await asyncio.sleep(INTEREST_MONITOR_INTERVAL_SECONDS) await asyncio.sleep(INTEREST_MONITOR_INTERVAL_SECONDS)
try: try:
# 从心流中获取活跃流 # 从心流中获取活跃流
active_stream_ids = list(heartflow.get_all_subheartflows_streams_ids()) active_stream_ids = list(self.heartflow.get_all_subheartflows_streams_ids())
for stream_id in active_stream_ids: for stream_id in active_stream_ids:
stream_name = chat_manager.get_stream_name(stream_id) or stream_id # 获取流名称 stream_name = chat_manager.get_stream_name(stream_id) or stream_id # 获取流名称
sub_hf = heartflow.get_subheartflow(stream_id) sub_hf = self.heartflow.get_subheartflow(stream_id)
if not sub_hf: if not sub_hf:
logger.warning(f"监控循环: 无法获取活跃流 {stream_name} 的 sub_hf") logger.warning(f"监控循环: 无法获取活跃流 {stream_name} 的 sub_hf")
continue continue
should_trigger = False should_trigger_hfc = False
try: try:
interest_chatting = self.interest_manager.get_interest_chatting(stream_id) interest_chatting = sub_hf.interest_chatting
if interest_chatting: should_trigger_hfc = interest_chatting.should_evaluate_reply()
should_trigger = interest_chatting.should_evaluate_reply()
else:
logger.trace(
f"[{stream_name}] 没有找到对应的 InterestChatting 实例,跳过基于兴趣的触发检查。"
)
except Exception as e: except Exception as e:
logger.error(f"检查兴趣触发器时出错 流 {stream_name}: {e}") logger.error(f"检查兴趣触发器时出错 流 {stream_name}: {e}")
logger.error(traceback.format_exc()) logger.error(traceback.format_exc())
if should_trigger: if should_trigger_hfc:
# 启动一次麦麦聊天 # 启动一次麦麦聊天
pf_instance = await self._get_or_create_pf_chatting(stream_id) await self._trigger_hfc(sub_hf)
if pf_instance:
asyncio.create_task(pf_instance.add_time())
else:
logger.error(f"[{stream_name}] 无法获取或创建PFChatting实例。跳过触发。")
except asyncio.CancelledError: except asyncio.CancelledError:
logger.info("兴趣监控循环已取消。") logger.info("兴趣监控循环已取消。")
@ -143,3 +169,17 @@ class HeartFC_Controller:
logger.error(f"兴趣监控循环错误: {e}") logger.error(f"兴趣监控循环错误: {e}")
logger.error(traceback.format_exc()) logger.error(traceback.format_exc())
await asyncio.sleep(5) # 发生错误时等待 await asyncio.sleep(5) # 发生错误时等待
async def _trigger_hfc(self, sub_hf: SubHeartflow):
chat_state = sub_hf.chat_state
if chat_state == ChatState.ABSENT:
chat_state = ChatState.CHAT
elif chat_state == ChatState.CHAT:
chat_state = ChatState.FOCUSED
# 从 sub_hf 获取 stream_id
if chat_state == ChatState.FOCUSED:
stream_id = sub_hf.subheartflow_id
pf_instance = await self._get_or_create_pf_chatting(stream_id)
if pf_instance: # 确保实例成功获取或创建
asyncio.create_task(pf_instance.add_time())

View File

@ -11,8 +11,8 @@ from src.common.logger import get_module_logger, CHAT_STYLE_CONFIG, LogConfig
from ...chat.chat_stream import chat_manager from ...chat.chat_stream import chat_manager
from ...chat.message_buffer import message_buffer from ...chat.message_buffer import message_buffer
from ...utils.timer_calculater import Timer from ...utils.timer_calculater import Timer
from .interest import InterestManager
from src.plugins.person_info.relationship_manager import relationship_manager from src.plugins.person_info.relationship_manager import relationship_manager
from .reasoning_chat import ReasoningChat
# 定义日志配置 # 定义日志配置
processor_config = LogConfig( processor_config = LogConfig(
@ -21,15 +21,11 @@ processor_config = LogConfig(
) )
logger = get_module_logger("heartFC_processor", config=processor_config) logger = get_module_logger("heartFC_processor", config=processor_config)
# # 定义兴趣度增加触发回复的阈值 (移至 InterestManager)
# INTEREST_INCREASE_THRESHOLD = 0.5
class HeartFCProcessor:
class HeartFC_Processor:
def __init__(self): def __init__(self):
self.storage = MessageStorage() self.storage = MessageStorage()
self.interest_manager = InterestManager() self.reasoning_chat = ReasoningChat.get_instance()
# self.chat_instance = chat_instance # 持有 HeartFC_Chat 实例
async def process_message(self, message_data: str) -> None: async def process_message(self, message_data: str) -> None:
"""处理接收到的原始消息数据,完成消息解析、缓冲、过滤、存储、兴趣度计算与更新等核心流程。 """处理接收到的原始消息数据,完成消息解析、缓冲、过滤、存储、兴趣度计算与更新等核心流程。
@ -72,12 +68,18 @@ class HeartFC_Processor:
user_info=userinfo, user_info=userinfo,
group_info=groupinfo, group_info=groupinfo,
) )
if not chat:
logger.error( # --- 确保 SubHeartflow 存在 ---
f"无法为消息创建或获取聊天流: user {userinfo.user_id}, group {groupinfo.group_id if groupinfo else 'None'}" subheartflow = await heartflow.create_subheartflow(chat.stream_id)
) if not subheartflow:
logger.error(f"无法为 stream_id {chat.stream_id} 创建或获取 SubHeartflow中止处理")
return return
# --- 添加兴趣追踪启动 (现在移动到这里,确保 subheartflow 存在后启动) ---
# 在获取到 chat 对象和确认 subheartflow 后,启动对该聊天流的兴趣监控
await self.reasoning_chat.start_monitoring_interest(chat) # start_monitoring_interest 内部需要修改以适应
# --- 结束添加 ---
message.update_chat_stream(chat) message.update_chat_stream(chat)
await heartflow.create_subheartflow(chat.stream_id) await heartflow.create_subheartflow(chat.stream_id)
@ -90,28 +92,27 @@ class HeartFC_Processor:
message.raw_message, chat, userinfo message.raw_message, chat, userinfo
): ):
return return
logger.trace(f"过滤词/正则表达式过滤成功: {message.processed_plain_text}")
# 查询缓冲器结果 # 查询缓冲器结果
buffer_result = await message_buffer.query_buffer_result(message) buffer_result = await message_buffer.query_buffer_result(message)
# 处理缓冲器结果 (Bombing logic) # 处理缓冲器结果 (Bombing logic)
if not buffer_result: if not buffer_result:
F_type = "seglist" f_type = "seglist"
if message.message_segment.type != "seglist": if message.message_segment.type != "seglist":
F_type = message.message_segment.type f_type = message.message_segment.type
else: else:
if ( if (
isinstance(message.message_segment.data, list) isinstance(message.message_segment.data, list)
and all(isinstance(x, Seg) for x in message.message_segment.data) and all(isinstance(x, Seg) for x in message.message_segment.data)
and len(message.message_segment.data) == 1 and len(message.message_segment.data) == 1
): ):
F_type = message.message_segment.data[0].type f_type = message.message_segment.data[0].type
if F_type == "text": if f_type == "text":
logger.debug(f"触发缓冲,消息:{message.processed_plain_text}") logger.debug(f"触发缓冲,消息:{message.processed_plain_text}")
elif F_type == "image": elif f_type == "image":
logger.debug("触发缓冲,表情包/图片等待中") logger.debug("触发缓冲,表情包/图片等待中")
elif F_type == "seglist": elif f_type == "seglist":
logger.debug("触发缓冲,消息列表等待中") logger.debug("触发缓冲,消息列表等待中")
return # 被缓冲器拦截,不生成回复 return # 被缓冲器拦截,不生成回复
@ -141,21 +142,35 @@ class HeartFC_Processor:
logger.error(f"计算记忆激活率失败: {e}") logger.error(f"计算记忆激活率失败: {e}")
logger.error(traceback.format_exc()) logger.error(traceback.format_exc())
# --- 修改:兴趣度更新逻辑 --- #
if is_mentioned: if is_mentioned:
interested_rate += 0.8 interest_increase_on_mention = 2
mentioned_boost = interest_increase_on_mention # 从配置获取提及增加值
interested_rate += mentioned_boost
logger.trace(f"消息提及机器人,额外增加兴趣 {mentioned_boost:.2f}")
# 更新兴趣度 # 更新兴趣度 (调用 SubHeartflow 的方法)
current_interest = 0.0 # 初始化
try: try:
self.interest_manager.increase_interest(chat.stream_id, value=interested_rate) # 获取当前时间,传递给 increase_interest
current_interest = self.interest_manager.get_interest(chat.stream_id) # 获取更新后的值用于日志 current_time = time.time()
subheartflow.interest_chatting.increase_interest(current_time, value=interested_rate)
current_interest = subheartflow.get_interest_level() # 获取更新后的值
logger.trace( logger.trace(
f"使用激活率 {interested_rate:.2f} 更新后 (通过缓冲后),当前兴趣度: {current_interest:.2f}" f"使用激活率 {interested_rate:.2f} 更新后 (通过缓冲后),当前兴趣度: {current_interest:.2f} (Stream: {chat.stream_id})"
)
# 添加到 SubHeartflow 的 interest_dict
subheartflow.add_interest_dict_entry(message, interested_rate, is_mentioned)
logger.trace(
f"Message {message.message_info.message_id} added to interest dict for stream {chat.stream_id}"
) )
except Exception as e: except Exception as e:
logger.error(f"更新兴趣度失败: {e}") # 调整日志消息 logger.error(f"更新兴趣度失败 (Stream: {chat.stream_id}): {e}")
logger.error(traceback.format_exc()) logger.error(traceback.format_exc())
# ---- 兴趣度计算和更新结束 ---- # --- 结束修改 --- #
# 打印消息接收和处理信息 # 打印消息接收和处理信息
mes_name = chat.group_info.group_name if chat.group_info else "私聊" mes_name = chat.group_info.group_name if chat.group_info else "私聊"

View File

@ -1,491 +0,0 @@
import time
import math
import asyncio
import threading
import json # 引入 json
import os # 引入 os
from typing import Optional # <--- 添加导入
import random # <--- 添加导入 random
from src.common.logger import get_module_logger, LogConfig, DEFAULT_CONFIG # 引入 DEFAULT_CONFIG
from src.plugins.chat.chat_stream import chat_manager # *** Import ChatManager ***
# 定义日志配置 (使用 loguru 格式)
interest_log_config = LogConfig(
console_format=DEFAULT_CONFIG["console_format"], # 使用默认控制台格式
file_format=DEFAULT_CONFIG["file_format"], # 使用默认文件格式
)
logger = get_module_logger("InterestManager", config=interest_log_config)
# 定义常量
DEFAULT_DECAY_RATE_PER_SECOND = 0.98 # 每秒衰减率 (兴趣保留 99%)
MAX_INTEREST = 15.0 # 最大兴趣值
# MIN_INTEREST_THRESHOLD = 0.1 # 低于此值可能被清理 (可选)
CLEANUP_INTERVAL_SECONDS = 1200 # 清理任务运行间隔 (例如20分钟)
INACTIVE_THRESHOLD_SECONDS = 1200 # 不活跃时间阈值 (例如20分钟)
LOG_INTERVAL_SECONDS = 3 # 日志记录间隔 (例如30秒)
LOG_DIRECTORY = "logs/interest" # 日志目录
# LOG_FILENAME = "interest_log.json" # 快照日志文件名 (保留,以防其他地方用到)
HISTORY_LOG_FILENAME = "interest_history.log" # 新的历史日志文件名
# 移除阈值,将移至 HeartFC_Chat
# INTEREST_INCREASE_THRESHOLD = 0.5
# --- 新增:概率回复相关常量 ---
REPLY_TRIGGER_THRESHOLD = 3.0 # 触发概率回复的兴趣阈值 (示例值)
BASE_REPLY_PROBABILITY = 0.1 # 首次超过阈值时的基础回复概率 (示例值)
PROBABILITY_INCREASE_RATE_PER_SECOND = 0.02 # 高于阈值时,每秒概率增加量 (线性增长, 示例值)
PROBABILITY_DECAY_FACTOR_PER_SECOND = 0.2 # 低于阈值时,每秒概率衰减因子 (指数衰减, 示例值)
MAX_REPLY_PROBABILITY = 1 # 回复概率上限 (示例值)
# --- 结束:概率回复相关常量 ---
class InterestChatting:
def __init__(
self,
decay_rate=DEFAULT_DECAY_RATE_PER_SECOND,
max_interest=MAX_INTEREST,
trigger_threshold=REPLY_TRIGGER_THRESHOLD,
base_reply_probability=BASE_REPLY_PROBABILITY,
increase_rate=PROBABILITY_INCREASE_RATE_PER_SECOND,
decay_factor=PROBABILITY_DECAY_FACTOR_PER_SECOND,
max_probability=MAX_REPLY_PROBABILITY,
):
self.interest_level: float = 0.0
self.last_update_time: float = time.time() # 同时作为兴趣和概率的更新时间基准
self.decay_rate_per_second: float = decay_rate
self.max_interest: float = max_interest
self.last_interaction_time: float = self.last_update_time # 新增:最后交互时间
# --- 新增:概率回复相关属性 ---
self.trigger_threshold: float = trigger_threshold
self.base_reply_probability: float = base_reply_probability
self.probability_increase_rate: float = increase_rate
self.probability_decay_factor: float = decay_factor
self.max_reply_probability: float = max_probability
self.current_reply_probability: float = 0.0
self.is_above_threshold: bool = False # 标记兴趣值是否高于阈值
# --- 结束:概率回复相关属性 ---
def _calculate_decay(self, current_time: float):
"""计算从上次更新到现在的衰减"""
time_delta = current_time - self.last_update_time
if time_delta > 0:
# 指数衰减: interest = interest * (decay_rate ^ time_delta)
# 添加处理极小兴趣值避免 math domain error
old_interest = self.interest_level
if self.interest_level < 1e-9:
self.interest_level = 0.0
else:
# 检查 decay_rate_per_second 是否为非正数,避免 math domain error
if self.decay_rate_per_second <= 0:
logger.warning(
f"InterestChatting encountered non-positive decay rate: {self.decay_rate_per_second}. Setting interest to 0."
)
self.interest_level = 0.0
# 检查 interest_level 是否为负数,虽然理论上不应发生,但以防万一
elif self.interest_level < 0:
logger.warning(
f"InterestChatting encountered negative interest level: {self.interest_level}. Setting interest to 0."
)
self.interest_level = 0.0
else:
try:
decay_factor = math.pow(self.decay_rate_per_second, time_delta)
self.interest_level *= decay_factor
except ValueError as e:
# 捕获潜在的 math domain error例如对负数开非整数次方虽然已加保护
logger.error(
f"Math error during decay calculation: {e}. Rate: {self.decay_rate_per_second}, Delta: {time_delta}, Level: {self.interest_level}. Setting interest to 0."
)
self.interest_level = 0.0
# 防止低于阈值 (如果需要)
# self.interest_level = max(self.interest_level, MIN_INTEREST_THRESHOLD)
# 只有在兴趣值发生变化时才更新时间戳
if old_interest != self.interest_level:
self.last_update_time = current_time
def _update_reply_probability(self, current_time: float):
"""根据当前兴趣是否超过阈值及时间差,更新回复概率"""
time_delta = current_time - self.last_update_time
if time_delta <= 0:
return # 时间未前进,无需更新
currently_above = self.interest_level >= self.trigger_threshold
if currently_above:
if not self.is_above_threshold:
# 刚跨过阈值,重置为基础概率
self.current_reply_probability = self.base_reply_probability
logger.debug(
f"兴趣跨过阈值 ({self.trigger_threshold}). 概率重置为基础值: {self.base_reply_probability:.4f}"
)
else:
# 持续高于阈值,线性增加概率
increase_amount = self.probability_increase_rate * time_delta
self.current_reply_probability += increase_amount
# logger.debug(f"兴趣高于阈值 ({self.trigger_threshold}) 持续 {time_delta:.2f}秒. 概率增加 {increase_amount:.4f} 到 {self.current_reply_probability:.4f}")
# 限制概率不超过最大值
self.current_reply_probability = min(self.current_reply_probability, self.max_reply_probability)
else:
if 0 < self.probability_decay_factor < 1:
decay_multiplier = math.pow(self.probability_decay_factor, time_delta)
# old_prob = self.current_reply_probability
self.current_reply_probability *= decay_multiplier
# 避免因浮点数精度问题导致概率略微大于0直接设为0
if self.current_reply_probability < 1e-6:
self.current_reply_probability = 0.0
# logger.debug(f"兴趣低于阈值 ({self.trigger_threshold}) 持续 {time_delta:.2f}秒. 概率从 {old_prob:.4f} 衰减到 {self.current_reply_probability:.4f} (因子: {self.probability_decay_factor})")
elif self.probability_decay_factor <= 0:
# 如果衰减因子无效或为0直接清零
if self.current_reply_probability > 0:
logger.warning(f"无效的衰减因子 ({self.probability_decay_factor}). 设置概率为0.")
self.current_reply_probability = 0.0
# else: decay_factor >= 1, probability will not decay or increase, which might be intended in some cases.
# 确保概率不低于0
self.current_reply_probability = max(self.current_reply_probability, 0.0)
# 更新状态标记
self.is_above_threshold = currently_above
# 更新时间戳放在调用者处,确保 interest 和 probability 基于同一点更新
def increase_interest(self, current_time: float, value: float):
"""根据传入的值增加兴趣值,并记录增加量"""
# 先更新概率和计算衰减(基于上次更新时间)
self._update_reply_probability(current_time)
self._calculate_decay(current_time)
# 应用增加
self.interest_level += value
self.interest_level = min(self.interest_level, self.max_interest) # 不超过最大值
self.last_update_time = current_time # 更新时间戳
self.last_interaction_time = current_time # 更新最后交互时间
def decrease_interest(self, current_time: float, value: float):
"""降低兴趣值并更新时间 (确保不低于0)"""
# 先更新概率(基于上次更新时间)
self._update_reply_probability(current_time)
# 注意:降低兴趣度是否需要先衰减?取决于具体逻辑,这里假设不衰减直接减
self.interest_level -= value
self.interest_level = max(self.interest_level, 0.0) # 确保不低于0
self.last_update_time = current_time # 降低也更新时间戳
self.last_interaction_time = current_time # 更新最后交互时间
def get_interest(self) -> float:
"""获取当前兴趣值 (计算衰减后)"""
# 注意:这个方法现在会触发概率和兴趣的更新
current_time = time.time()
self._update_reply_probability(current_time)
self._calculate_decay(current_time)
self.last_update_time = current_time # 更新时间戳
return self.interest_level
def get_state(self) -> dict:
"""获取当前状态字典"""
# 调用 get_interest 来确保状态已更新
interest = self.get_interest()
return {
"interest_level": round(interest, 2),
"last_update_time": self.last_update_time,
"current_reply_probability": round(self.current_reply_probability, 4), # 添加概率到状态
"is_above_threshold": self.is_above_threshold, # 添加阈值状态
"last_interaction_time": self.last_interaction_time, # 新增:添加最后交互时间到状态
# 可以选择性地暴露 last_increase_amount 给状态,方便调试
# "last_increase_amount": round(self.last_increase_amount, 2)
}
def should_evaluate_reply(self) -> bool:
"""
判断是否应该触发一次回复评估
首先更新概率状态然后根据当前概率进行随机判断
"""
current_time = time.time()
# 确保概率是基于最新兴趣值计算的
self._update_reply_probability(current_time)
# 更新兴趣衰减(如果需要,取决于逻辑,这里保持和 get_interest 一致)
# self._calculate_decay(current_time)
# self.last_update_time = current_time # 更新时间戳
if self.current_reply_probability > 0:
# 只有在阈值之上且概率大于0时才有可能触发
trigger = random.random() < self.current_reply_probability
# if trigger:
# logger.info(f"回复概率评估触发! 概率: {self.current_reply_probability:.4f}, 阈值: {self.trigger_threshold}, 兴趣: {self.interest_level:.2f}")
# # 可选:触发后是否重置/降低概率?根据需要决定
# # self.current_reply_probability = self.base_reply_probability # 例如,触发后降回基础概率
# # self.current_reply_probability *= 0.5 # 例如,触发后概率减半
# else:
# logger.debug(f"回复概率评估未触发。概率: {self.current_reply_probability:.4f}")
return trigger
else:
# logger.debug(f"Reply evaluation check: Below threshold or zero probability. Probability: {self.current_reply_probability:.4f}")
return False
class InterestManager:
_instance = None
_lock = threading.Lock()
_initialized = False
def __new__(cls, *args, **kwargs):
if cls._instance is None:
with cls._lock:
# Double-check locking
if cls._instance is None:
cls._instance = super().__new__(cls)
return cls._instance
def __init__(self):
if not self._initialized:
with self._lock:
# 确保初始化也只执行一次
if not self._initialized:
logger.info("Initializing InterestManager singleton...")
# key: stream_id (str), value: InterestChatting instance
self.interest_dict: dict[str, InterestChatting] = {}
# 保留旧的快照文件路径变量,尽管此任务不再写入
# self._snapshot_log_file_path = os.path.join(LOG_DIRECTORY, LOG_FILENAME)
# 定义新的历史日志文件路径
self._history_log_file_path = os.path.join(LOG_DIRECTORY, HISTORY_LOG_FILENAME)
self._ensure_log_directory()
self._cleanup_task = None
self._logging_task = None # 添加日志任务变量
self._initialized = True
logger.info("InterestManager initialized.") # 修改日志消息
self._decay_task = None # 新增:衰减任务变量
def _ensure_log_directory(self):
"""确保日志目录存在"""
try:
os.makedirs(LOG_DIRECTORY, exist_ok=True)
logger.info(f"Log directory '{LOG_DIRECTORY}' ensured.")
except OSError as e:
logger.error(f"Error creating log directory '{LOG_DIRECTORY}': {e}")
async def _periodic_cleanup_task(self, interval_seconds: int, max_age_seconds: int):
"""后台清理任务的异步函数"""
while True:
await asyncio.sleep(interval_seconds)
logger.info(f"运行定期清理 (间隔: {interval_seconds}秒)...")
self.cleanup_inactive_chats(max_age_seconds=max_age_seconds)
async def _periodic_log_task(self, interval_seconds: int):
"""后台日志记录任务的异步函数 (记录历史数据,包含 group_name)"""
while True:
await asyncio.sleep(interval_seconds)
# logger.debug(f"运行定期历史记录 (间隔: {interval_seconds}秒)...")
try:
current_timestamp = time.time()
all_states = self.get_all_interest_states() # 获取当前所有状态
# 以追加模式打开历史日志文件
with open(self._history_log_file_path, "a", encoding="utf-8") as f:
count = 0
for stream_id, state in all_states.items():
# *** Get group name from ChatManager ***
group_name = stream_id # Default to stream_id
try:
# Use the imported chat_manager instance
chat_stream = chat_manager.get_stream(stream_id)
if chat_stream and chat_stream.group_info:
group_name = chat_stream.group_info.group_name
elif chat_stream and not chat_stream.group_info:
# Handle private chats - maybe use user nickname?
group_name = (
f"私聊_{chat_stream.user_info.user_nickname}"
if chat_stream.user_info
else stream_id
)
except Exception as e:
logger.warning(f"Could not get group name for stream_id {stream_id}: {e}")
# Fallback to stream_id is already handled by default value
log_entry = {
"timestamp": round(current_timestamp, 2),
"stream_id": stream_id,
"interest_level": state.get("interest_level", 0.0), # 确保有默认值
"group_name": group_name, # *** Add group_name ***
# --- 新增:记录概率相关信息 ---
"reply_probability": state.get("current_reply_probability", 0.0),
"is_above_threshold": state.get("is_above_threshold", False),
# --- 结束新增 ---
}
# 将每个条目作为单独的 JSON 行写入
f.write(json.dumps(log_entry, ensure_ascii=False) + "\n")
count += 1
# logger.debug(f"Successfully appended {count} interest history entries to {self._history_log_file_path}")
# 注意:不再写入快照文件 interest_log.json
# 如果需要快照文件,可以在这里单独写入 self._snapshot_log_file_path
# 例如:
# with open(self._snapshot_log_file_path, 'w', encoding='utf-8') as snap_f:
# json.dump(all_states, snap_f, indent=4, ensure_ascii=False)
# logger.debug(f"Successfully wrote snapshot to {self._snapshot_log_file_path}")
except IOError as e:
logger.error(f"Error writing interest history log to {self._history_log_file_path}: {e}")
except Exception as e:
logger.error(f"Unexpected error during periodic history logging: {e}")
async def _periodic_decay_task(self):
"""后台衰减任务的异步函数,每秒更新一次所有实例的衰减"""
while True:
await asyncio.sleep(1) # 每秒运行一次
current_time = time.time()
# logger.debug("Running periodic decay calculation...") # 调试日志,可能过于频繁
# 创建字典项的快照进行迭代,避免在迭代时修改字典的问题
items_snapshot = list(self.interest_dict.items())
count = 0
for stream_id, chatting in items_snapshot:
try:
# 调用 InterestChatting 实例的衰减方法
chatting._calculate_decay(current_time)
count += 1
except Exception as e:
logger.error(f"Error calculating decay for stream_id {stream_id}: {e}")
# if count > 0: # 仅在实际处理了项目时记录日志,避免空闲时刷屏
# logger.debug(f"Applied decay to {count} streams.")
async def start_background_tasks(self):
"""启动清理,启动衰减,启动记录,启动启动启动启动启动"""
if self._cleanup_task is None or self._cleanup_task.done():
self._cleanup_task = asyncio.create_task(
self._periodic_cleanup_task(
interval_seconds=CLEANUP_INTERVAL_SECONDS, max_age_seconds=INACTIVE_THRESHOLD_SECONDS
)
)
logger.info(
f"已创建定期清理任务。间隔时间: {CLEANUP_INTERVAL_SECONDS}秒, 不活跃阈值: {INACTIVE_THRESHOLD_SECONDS}"
)
else:
logger.warning("跳过创建清理任务:任务已在运行或存在。")
if self._logging_task is None or self._logging_task.done():
self._logging_task = asyncio.create_task(self._periodic_log_task(interval_seconds=LOG_INTERVAL_SECONDS))
logger.info(f"已创建定期日志任务。间隔时间: {LOG_INTERVAL_SECONDS}")
else:
logger.warning("跳过创建日志任务:任务已在运行或存在。")
# 启动新的衰减任务
if self._decay_task is None or self._decay_task.done():
self._decay_task = asyncio.create_task(self._periodic_decay_task())
logger.info("已创建定期衰减任务。间隔时间: 1秒")
else:
logger.warning("跳过创建衰减任务:任务已在运行或存在。")
def get_all_interest_states(self) -> dict[str, dict]:
"""获取所有聊天流的当前兴趣状态"""
# 不再需要 current_time, 因为 get_state 现在不接收它
states = {}
# 创建副本以避免在迭代时修改字典
items_snapshot = list(self.interest_dict.items())
for stream_id, chatting in items_snapshot:
try:
# 直接调用 get_state它会使用内部的 get_interest 获取已更新的值
states[stream_id] = chatting.get_state()
except Exception as e:
logger.warning(f"Error getting state for stream_id {stream_id}: {e}")
return states
def get_interest_chatting(self, stream_id: str) -> Optional[InterestChatting]:
"""获取指定流的 InterestChatting 实例,如果不存在则返回 None"""
return self.interest_dict.get(stream_id)
def _get_or_create_interest_chatting(self, stream_id: str) -> InterestChatting:
"""获取或创建指定流的 InterestChatting 实例 (线程安全)"""
if stream_id not in self.interest_dict:
logger.debug(f"创建兴趣流: {stream_id}")
# --- 修改:创建时传入概率相关参数 (如果需要定制化,否则使用默认值) ---
self.interest_dict[stream_id] = InterestChatting(
# decay_rate=..., max_interest=..., # 可以从配置读取
trigger_threshold=REPLY_TRIGGER_THRESHOLD, # 使用全局常量
base_reply_probability=BASE_REPLY_PROBABILITY,
increase_rate=PROBABILITY_INCREASE_RATE_PER_SECOND,
decay_factor=PROBABILITY_DECAY_FACTOR_PER_SECOND,
max_probability=MAX_REPLY_PROBABILITY,
)
# --- 结束修改 ---
# 首次创建时兴趣为 0由第一次消息的 activate rate 决定初始值
return self.interest_dict[stream_id]
def get_interest(self, stream_id: str) -> float:
"""获取指定聊天流当前的兴趣度 (值由后台任务更新)"""
# current_time = time.time() # 不再需要获取当前时间
interest_chatting = self._get_or_create_interest_chatting(stream_id)
# 直接调用修改后的 get_interest不传入时间
return interest_chatting.get_interest()
def increase_interest(self, stream_id: str, value: float):
"""当收到消息时,增加指定聊天流的兴趣度"""
current_time = time.time()
interest_chatting = self._get_or_create_interest_chatting(stream_id)
# 调用修改后的 increase_interest不再传入 message
interest_chatting.increase_interest(current_time, value)
stream_name = chat_manager.get_stream_name(stream_id) or stream_id # 获取流名称
logger.debug(
f"增加了聊天流 {stream_name} 的兴趣度 {value:.2f},当前值为 {interest_chatting.interest_level:.2f}"
) # 更新日志
def decrease_interest(self, stream_id: str, value: float):
"""降低指定聊天流的兴趣度"""
current_time = time.time()
# 尝试获取,如果不存在则不做任何事
interest_chatting = self.get_interest_chatting(stream_id)
if interest_chatting:
interest_chatting.decrease_interest(current_time, value)
stream_name = chat_manager.get_stream_name(stream_id) or stream_id # 获取流名称
logger.debug(
f"降低了聊天流 {stream_name} 的兴趣度 {value:.2f},当前值为 {interest_chatting.interest_level:.2f}"
)
else:
stream_name = chat_manager.get_stream_name(stream_id) or stream_id # 获取流名称
logger.warning(f"尝试降低不存在的聊天流 {stream_name} 的兴趣度")
def cleanup_inactive_chats(self, max_age_seconds=INACTIVE_THRESHOLD_SECONDS):
"""
清理长时间不活跃的聊天流记录
max_age_seconds: 超过此时间未更新的将被清理
"""
current_time = time.time()
keys_to_remove = []
initial_count = len(self.interest_dict)
# with self._lock: # 如果需要锁整个迭代过程
# 创建副本以避免在迭代时修改字典
items_snapshot = list(self.interest_dict.items())
for stream_id, chatting in items_snapshot:
# 先计算当前兴趣,确保是最新的
# 加锁保护 chatting 对象状态的读取和可能的修改
# with self._lock: # 如果 InterestChatting 内部操作不是原子的
last_interaction = chatting.last_interaction_time # 使用最后交互时间
should_remove = False
reason = ""
# 只有设置了 max_age_seconds 才检查时间
if (
max_age_seconds is not None and (current_time - last_interaction) > max_age_seconds
): # 使用 last_interaction
should_remove = True
reason = f"inactive time ({current_time - last_interaction:.0f}s) > max age ({max_age_seconds}s)" # 更新日志信息
if should_remove:
keys_to_remove.append(stream_id)
stream_name = chat_manager.get_stream_name(stream_id) or stream_id # 获取流名称
logger.debug(f"Marking stream {stream_name} for removal. Reason: {reason}")
if keys_to_remove:
logger.info(f"清理识别到 {len(keys_to_remove)} 个不活跃/低兴趣的流。")
# with self._lock: # 确保删除操作的原子性
for key in keys_to_remove:
# 再次检查 key 是否存在,以防万一在迭代和删除之间状态改变
if key in self.interest_dict:
del self.interest_dict[key]
stream_name = chat_manager.get_stream_name(key) or key # 获取流名称
logger.debug(f"移除了流: {stream_name}")
final_count = initial_count - len(keys_to_remove)
logger.info(f"清理完成。移除了 {len(keys_to_remove)} 个流。当前数量: {final_count}")
else:
logger.info(f"清理完成。没有流符合移除条件。当前数量: {initial_count}")

View File

@ -220,9 +220,8 @@ class MessageManager:
await asyncio.sleep(typing_time) await asyncio.sleep(typing_time)
logger.debug(f"\n{message_earliest.processed_plain_text},{typing_time},等待输入时间结束\n") logger.debug(f"\n{message_earliest.processed_plain_text},{typing_time},等待输入时间结束\n")
await self.storage.store_message(message_earliest, message_earliest.chat_stream)
await MessageSender().send_message(message_earliest) await MessageSender().send_message(message_earliest)
await self.storage.store_message(message_earliest, message_earliest.chat_stream)
container.remove_message(message_earliest) container.remove_message(message_earliest)

View File

@ -15,6 +15,9 @@ from src.config.config import global_config
from src.plugins.chat.utils_image import image_path_to_base64 # Local import needed after move from src.plugins.chat.utils_image import image_path_to_base64 # Local import needed after move
from src.plugins.utils.timer_calculater import Timer # <--- Import Timer from src.plugins.utils.timer_calculater import Timer # <--- Import Timer
INITIAL_DURATION = 60.0
# 定义日志配置 (使用 loguru 格式) # 定义日志配置 (使用 loguru 格式)
interest_log_config = LogConfig( interest_log_config = LogConfig(
console_format=PFC_STYLE_CONFIG["console_format"], # 使用默认控制台格式 console_format=PFC_STYLE_CONFIG["console_format"], # 使用默认控制台格式
@ -25,7 +28,7 @@ logger = get_module_logger("PFCLoop", config=interest_log_config) # Logger Name
# Forward declaration for type hinting # Forward declaration for type hinting
if TYPE_CHECKING: if TYPE_CHECKING:
from .heartFC_controler import HeartFC_Controller from .heartFC_controler import HeartFCController
PLANNER_TOOL_DEFINITION = [ PLANNER_TOOL_DEFINITION = [
{ {
@ -61,13 +64,13 @@ class PFChatting:
只要计时器>0循环就会继续 只要计时器>0循环就会继续
""" """
def __init__(self, chat_id: str, heartfc_controller_instance: "HeartFC_Controller"): def __init__(self, chat_id: str, heartfc_controller_instance: "HeartFCController"):
""" """
初始化PFChatting实例 初始化PFChatting实例
Args: Args:
chat_id: The identifier for the chat stream (e.g., stream_id). chat_id: The identifier for the chat stream (e.g., stream_id).
heartfc_controller_instance: 访问共享资源和方法的主HeartFC_Controller实例 heartfc_controller_instance: 访问共享资源和方法的主HeartFCController实例
""" """
self.heartfc_controller = heartfc_controller_instance # Store the controller instance self.heartfc_controller = heartfc_controller_instance # Store the controller instance
self.stream_id: str = chat_id self.stream_id: str = chat_id
@ -91,7 +94,7 @@ class PFChatting:
self._loop_active: bool = False # Is the loop currently running? self._loop_active: bool = False # Is the loop currently running?
self._loop_task: Optional[asyncio.Task] = None # Stores the main loop task self._loop_task: Optional[asyncio.Task] = None # Stores the main loop task
self._trigger_count_this_activation: int = 0 # Counts triggers within an active period self._trigger_count_this_activation: int = 0 # Counts triggers within an active period
self._initial_duration: float = 60.0 # 首次触发增加的时间 self._initial_duration: float = INITIAL_DURATION # 首次触发增加的时间
self._last_added_duration: float = self._initial_duration # <--- 新增:存储上次增加的时间 self._last_added_duration: float = self._initial_duration # <--- 新增:存储上次增加的时间
def _get_log_prefix(self) -> str: def _get_log_prefix(self) -> str:
@ -374,6 +377,22 @@ class PFChatting:
) )
action_taken_this_cycle = False action_taken_this_cycle = False
# --- Print Timer Results --- #
if cycle_timers: # 先检查cycle_timers是否非空
timer_strings = []
for name, elapsed in cycle_timers.items():
# 直接格式化存储在字典中的浮点数 elapsed
formatted_time = f"{elapsed * 1000:.2f}毫秒" if elapsed < 1 else f"{elapsed:.2f}"
timer_strings.append(f"{name}: {formatted_time}")
if timer_strings: # 如果有有效计时器数据才打印
logger.debug(
f"{log_prefix} test testtesttesttesttesttesttesttesttesttest Cycle Timers: {'; '.join(timer_strings)}"
)
# --- Timer Decrement --- #
cycle_duration = time.monotonic() - loop_cycle_start_time
except Exception as e_cycle: except Exception as e_cycle:
logger.error(f"{log_prefix} 循环周期执行时发生错误: {e_cycle}") logger.error(f"{log_prefix} 循环周期执行时发生错误: {e_cycle}")
logger.error(traceback.format_exc()) logger.error(traceback.format_exc())
@ -387,21 +406,6 @@ class PFChatting:
self._processing_lock.release() self._processing_lock.release()
logger.trace(f"{log_prefix} 循环释放了处理锁.") logger.trace(f"{log_prefix} 循环释放了处理锁.")
# --- Print Timer Results --- #
if cycle_timers: # 先检查cycle_timers是否非空
timer_strings = []
for name, elapsed in cycle_timers.items():
# 直接格式化存储在字典中的浮点数 elapsed
formatted_time = f"{elapsed * 1000:.2f}毫秒" if elapsed < 1 else f"{elapsed:.2f}"
timer_strings.append(f"{name}: {formatted_time}")
if timer_strings: # 如果有有效计时器数据才打印
logger.debug(
f"{log_prefix} test testtesttesttesttesttesttesttesttesttest Cycle Timers: {'; '.join(timer_strings)}"
)
# --- Timer Decrement --- #
cycle_duration = time.monotonic() - loop_cycle_start_time
async with self._timer_lock: async with self._timer_lock:
self._loop_timer -= cycle_duration self._loop_timer -= cycle_duration
# Log timer decrement less aggressively # Log timer decrement less aggressively
@ -749,7 +753,7 @@ class PFChatting:
# --- Generate Response with LLM --- # # --- Generate Response with LLM --- #
# Access gpt instance via controller # Access gpt instance via controller
gpt_instance = self.heartfc_controller.gpt gpt_instance = self.heartfc_controller.gpt
logger.debug(f"{log_prefix}[Replier-{thinking_id}] Calling LLM to generate response...") # logger.debug(f"{log_prefix}[Replier-{thinking_id}] Calling LLM to generate response...")
# Ensure generate_response has access to current_mind if it's crucial context # Ensure generate_response has access to current_mind if it's crucial context
response_set = await gpt_instance.generate_response( response_set = await gpt_instance.generate_response(
@ -771,7 +775,7 @@ class PFChatting:
logger.error(traceback.format_exc()) logger.error(traceback.format_exc())
return None return None
# --- Methods moved from HeartFC_Controller start --- # --- Methods moved from HeartFCController start ---
async def _create_thinking_message(self, anchor_message: Optional[MessageRecv]) -> Optional[str]: async def _create_thinking_message(self, anchor_message: Optional[MessageRecv]) -> Optional[str]:
"""创建思考消息 (尝试锚定到 anchor_message)""" """创建思考消息 (尝试锚定到 anchor_message)"""
if not anchor_message or not anchor_message.chat_stream: if not anchor_message or not anchor_message.chat_stream:

View File

@ -0,0 +1,425 @@
import time
import threading # 导入 threading
from random import random
import traceback
import asyncio
from typing import List, Dict
from ...moods.moods import MoodManager
from ....config.config import global_config
from ...chat.emoji_manager import emoji_manager
from .reasoning_generator import ResponseGenerator
from ...chat.message import MessageSending, MessageRecv, MessageThinking, MessageSet
from ...chat.messagesender import message_manager
from ...storage.storage import MessageStorage
from ...chat.utils import is_mentioned_bot_in_message
from ...chat.utils_image import image_path_to_base64
from ...willing.willing_manager import willing_manager
from ...message import UserInfo, Seg
from src.common.logger import get_module_logger, CHAT_STYLE_CONFIG, LogConfig
from src.plugins.chat.chat_stream import ChatStream
from src.plugins.person_info.relationship_manager import relationship_manager
from src.plugins.respon_info_catcher.info_catcher import info_catcher_manager
from src.plugins.utils.timer_calculater import Timer
from src.heart_flow.heartflow import heartflow
from .heartFC_controler import HeartFCController
# 定义日志配置
chat_config = LogConfig(
console_format=CHAT_STYLE_CONFIG["console_format"],
file_format=CHAT_STYLE_CONFIG["file_format"],
)
logger = get_module_logger("reasoning_chat", config=chat_config)
class ReasoningChat:
_instance = None
_lock = threading.Lock()
_initialized = False
def __new__(cls, *args, **kwargs):
if cls._instance is None:
with cls._lock:
# Double-check locking
if cls._instance is None:
cls._instance = super().__new__(cls)
return cls._instance
def __init__(self):
# 防止重复初始化
if self._initialized:
return
with self.__class__._lock: # 使用类锁确保线程安全
if self._initialized:
return
logger.info("正在初始化 ReasoningChat 单例...") # 添加日志
self.storage = MessageStorage()
self.gpt = ResponseGenerator()
self.mood_manager = MoodManager.get_instance()
# 用于存储每个 chat stream 的兴趣监控任务
self._interest_monitoring_tasks: Dict[str, asyncio.Task] = {}
self._initialized = True
logger.info("ReasoningChat 单例初始化完成。") # 添加日志
@classmethod
def get_instance(cls):
"""获取 ReasoningChat 的单例实例。"""
if cls._instance is None:
# 如果实例还未创建(理论上应该在 main 中初始化,但作为备用)
logger.warning("ReasoningChat 实例在首次 get_instance 时创建。")
cls() # 调用构造函数来创建实例
return cls._instance
@staticmethod
async def _create_thinking_message(message, chat, userinfo, messageinfo):
"""创建思考消息"""
bot_user_info = UserInfo(
user_id=global_config.BOT_QQ,
user_nickname=global_config.BOT_NICKNAME,
platform=messageinfo.platform,
)
thinking_time_point = round(time.time(), 2)
thinking_id = "mt" + str(thinking_time_point)
thinking_message = MessageThinking(
message_id=thinking_id,
chat_stream=chat,
bot_user_info=bot_user_info,
reply=message,
thinking_start_time=thinking_time_point,
)
message_manager.add_message(thinking_message)
return thinking_id
@staticmethod
async def _send_response_messages(message, chat, response_set: List[str], thinking_id) -> MessageSending:
"""发送回复消息"""
container = message_manager.get_container(chat.stream_id)
thinking_message = None
for msg in container.messages:
if isinstance(msg, MessageThinking) and msg.message_info.message_id == thinking_id:
thinking_message = msg
container.messages.remove(msg)
break
if not thinking_message:
logger.warning("未找到对应的思考消息,可能已超时被移除")
return
thinking_start_time = thinking_message.thinking_start_time
message_set = MessageSet(chat, thinking_id)
mark_head = False
first_bot_msg = None
for msg in response_set:
message_segment = Seg(type="text", data=msg)
bot_message = MessageSending(
message_id=thinking_id,
chat_stream=chat,
bot_user_info=UserInfo(
user_id=global_config.BOT_QQ,
user_nickname=global_config.BOT_NICKNAME,
platform=message.message_info.platform,
),
sender_info=message.message_info.user_info,
message_segment=message_segment,
reply=message,
is_head=not mark_head,
is_emoji=False,
thinking_start_time=thinking_start_time,
)
if not mark_head:
mark_head = True
first_bot_msg = bot_message
message_set.add_message(bot_message)
message_manager.add_message(message_set)
return first_bot_msg
@staticmethod
async def _handle_emoji(message, chat, response):
"""处理表情包"""
if random() < global_config.emoji_chance:
emoji_raw = await emoji_manager.get_emoji_for_text(response)
if emoji_raw:
emoji_path, description = emoji_raw
emoji_cq = image_path_to_base64(emoji_path)
thinking_time_point = round(message.message_info.time, 2)
message_segment = Seg(type="emoji", data=emoji_cq)
bot_message = MessageSending(
message_id="mt" + str(thinking_time_point),
chat_stream=chat,
bot_user_info=UserInfo(
user_id=global_config.BOT_QQ,
user_nickname=global_config.BOT_NICKNAME,
platform=message.message_info.platform,
),
sender_info=message.message_info.user_info,
message_segment=message_segment,
reply=message,
is_head=False,
is_emoji=True,
)
message_manager.add_message(bot_message)
async def _update_relationship(self, message: MessageRecv, response_set):
"""更新关系情绪"""
ori_response = ",".join(response_set)
stance, emotion = await self.gpt._get_emotion_tags(ori_response, message.processed_plain_text)
await relationship_manager.calculate_update_relationship_value(
chat_stream=message.chat_stream, label=emotion, stance=stance
)
self.mood_manager.update_mood_from_emotion(emotion, global_config.mood_intensity_factor)
async def _find_interested_message(self, chat: ChatStream) -> None:
# 此函数设计为后台任务,轮询指定 chat 的兴趣消息。
# 它通常由外部代码在 chat 流活跃时启动。
controller = HeartFCController.get_instance() # 获取控制器实例
stream_id = chat.stream_id # 获取 stream_id
if not controller:
logger.error(f"无法获取 HeartFCController 实例,无法检查 PFChatting 状态。stream: {stream_id}")
# 在没有控制器的情况下可能需要决定是继续处理还是完全停止?这里暂时假设继续
pass # 或者 return?
logger.info(f"[{stream_id}] 兴趣消息监控任务启动。") # 增加启动日志
while True:
await asyncio.sleep(1) # 每秒检查一次
# --- 修改:通过 heartflow 获取 subheartflow 和 interest_dict --- #
subheartflow = heartflow.get_subheartflow(stream_id)
# 检查 subheartflow 是否存在以及是否被标记停止
if not subheartflow or subheartflow.should_stop:
logger.info(f"[{stream_id}] SubHeartflow 不存在或已停止,兴趣消息监控任务退出。")
break # 退出循环,任务结束
# 从 subheartflow 获取 interest_dict
interest_dict = subheartflow.get_interest_dict()
# --- 结束修改 --- #
# 创建 items 快照进行迭代,避免在迭代时修改字典
items_to_process = list(interest_dict.items())
if not items_to_process:
continue # 没有需要处理的消息,继续等待
# logger.debug(f"[{stream_id}] 发现 {len(items_to_process)} 条待处理兴趣消息。") # 调试日志
for msg_id, (message, interest_value, is_mentioned) in items_to_process:
# --- 检查 PFChatting 是否活跃 --- #
pf_active = False
if controller:
pf_active = controller.is_pf_chatting_active(stream_id)
if pf_active:
# 如果 PFChatting 活跃,则跳过处理,直接移除消息
removed_item = interest_dict.pop(msg_id, None)
if removed_item:
logger.debug(f"[{stream_id}] PFChatting 活跃,已跳过并移除兴趣消息 {msg_id}")
continue # 处理下一条消息
# --- 结束检查 --- #
# 只有当 PFChatting 不活跃时才执行以下处理逻辑
try:
# logger.debug(f"[{stream_id}] 正在处理兴趣消息 {msg_id} (兴趣值: {interest_value:.2f})" )
await self.normal_reasoning_chat(
message=message,
chat=chat, # chat 对象仍然有效
is_mentioned=is_mentioned,
interested_rate=interest_value, # 使用从字典获取的原始兴趣值
)
# logger.debug(f"[{stream_id}] 处理完成消息 {msg_id}")
except Exception as e:
logger.error(f"[{stream_id}] 处理兴趣消息 {msg_id} 时出错: {e}\n{traceback.format_exc()}")
finally:
# 无论处理成功与否且PFChatting不活跃都尝试从原始字典中移除该消息
# 使用 pop(key, None) 避免 Key Error
removed_item = interest_dict.pop(msg_id, None)
if removed_item:
logger.debug(f"[{stream_id}] 已从兴趣字典中移除消息 {msg_id}")
async def normal_reasoning_chat(
self, message: MessageRecv, chat: ChatStream, is_mentioned: bool, interested_rate: float
) -> None:
timing_results = {}
userinfo = message.message_info.user_info
messageinfo = message.message_info
is_mentioned, reply_probability = is_mentioned_bot_in_message(message)
# 意愿管理器设置当前message信息
willing_manager.setup(message, chat, is_mentioned, interested_rate)
# 获取回复概率
is_willing = False
if reply_probability != 1:
is_willing = True
reply_probability = await willing_manager.get_reply_probability(message.message_info.message_id)
if message.message_info.additional_config:
if "maimcore_reply_probability_gain" in message.message_info.additional_config.keys():
reply_probability += message.message_info.additional_config["maimcore_reply_probability_gain"]
# 打印消息信息
mes_name = chat.group_info.group_name if chat.group_info else "私聊"
current_time = time.strftime("%H:%M:%S", time.localtime(message.message_info.time))
willing_log = f"[回复意愿:{await willing_manager.get_willing(chat.stream_id):.2f}]" if is_willing else ""
logger.info(
f"[{current_time}][{mes_name}]"
f"{chat.user_info.user_nickname}:"
f"{message.processed_plain_text}{willing_log}[概率:{reply_probability * 100:.1f}%]"
)
do_reply = False
if random() < reply_probability:
do_reply = True
# 回复前处理
await willing_manager.before_generate_reply_handle(message.message_info.message_id)
# 创建思考消息
with Timer("创建思考消息", timing_results):
thinking_id = await self._create_thinking_message(message, chat, userinfo, messageinfo)
logger.debug(f"创建捕捉器thinking_id:{thinking_id}")
info_catcher = info_catcher_manager.get_info_catcher(thinking_id)
info_catcher.catch_decide_to_response(message)
# 生成回复
try:
with Timer("生成回复", timing_results):
response_set = await self.gpt.generate_response(
message=message,
thinking_id=thinking_id,
)
info_catcher.catch_after_generate_response(timing_results["生成回复"])
except Exception as e:
logger.error(f"回复生成出现错误:{str(e)} {traceback.format_exc()}")
response_set = None
if not response_set:
logger.info(f"[{chat.stream_id}] 模型未生成回复内容")
# 如果模型未生成回复,移除思考消息
container = message_manager.get_container(chat.stream_id)
# thinking_message = None
for msg in container.messages[:]: # Iterate over a copy
if isinstance(msg, MessageThinking) and msg.message_info.message_id == thinking_id:
# thinking_message = msg
container.messages.remove(msg)
logger.debug(f"[{chat.stream_id}] 已移除未产生回复的思考消息 {thinking_id}")
break
return # 不发送回复
logger.info(f"[{chat.stream_id}] 回复内容: {response_set}")
# 发送回复
with Timer("消息发送", timing_results):
first_bot_msg = await self._send_response_messages(message, chat, response_set, thinking_id)
info_catcher.catch_after_response(timing_results["消息发送"], response_set, first_bot_msg)
info_catcher.done_catch()
# 处理表情包
with Timer("处理表情包", timing_results):
await self._handle_emoji(message, chat, response_set[0])
# 更新关系情绪
with Timer("关系更新", timing_results):
await self._update_relationship(message, response_set)
# 回复后处理
await willing_manager.after_generate_reply_handle(message.message_info.message_id)
# 输出性能计时结果
if do_reply:
timing_str = " | ".join([f"{step}: {duration:.2f}" for step, duration in timing_results.items()])
trigger_msg = message.processed_plain_text
response_msg = " ".join(response_set) if response_set else "无回复"
logger.info(f"触发消息: {trigger_msg[:20]}... | 推理消息: {response_msg[:20]}... | 性能计时: {timing_str}")
else:
# 不回复处理
await willing_manager.not_reply_handle(message.message_info.message_id)
# 意愿管理器注销当前message信息
willing_manager.delete(message.message_info.message_id)
@staticmethod
def _check_ban_words(text: str, chat, userinfo) -> bool:
"""检查消息中是否包含过滤词"""
for word in global_config.ban_words:
if word in text:
logger.info(
f"[{chat.group_info.group_name if chat.group_info else '私聊'}]{userinfo.user_nickname}:{text}"
)
logger.info(f"[过滤词识别]消息中含有{word}filtered")
return True
return False
@staticmethod
def _check_ban_regex(text: str, chat, userinfo) -> bool:
"""检查消息是否匹配过滤正则表达式"""
for pattern in global_config.ban_msgs_regex:
if pattern.search(text):
logger.info(
f"[{chat.group_info.group_name if chat.group_info else '私聊'}]{userinfo.user_nickname}:{text}"
)
logger.info(f"[正则表达式过滤]消息匹配到{pattern}filtered")
return True
return False
async def start_monitoring_interest(self, chat: ChatStream):
"""为指定的 ChatStream 启动兴趣消息监控任务(如果尚未运行)。"""
stream_id = chat.stream_id
if stream_id not in self._interest_monitoring_tasks or self._interest_monitoring_tasks[stream_id].done():
logger.info(f"为聊天流 {stream_id} 启动兴趣消息监控任务...")
# 创建新任务
task = asyncio.create_task(self._find_interested_message(chat))
# 添加完成回调
task.add_done_callback(lambda t: self._handle_task_completion(stream_id, t))
self._interest_monitoring_tasks[stream_id] = task
# else:
# logger.debug(f"聊天流 {stream_id} 的兴趣消息监控任务已在运行。")
def _handle_task_completion(self, stream_id: str, task: asyncio.Task):
"""兴趣监控任务完成时的回调函数。"""
try:
# 检查任务是否因异常而结束
exception = task.exception()
if exception:
logger.error(f"聊天流 {stream_id} 的兴趣监控任务因异常结束: {exception}")
logger.error(traceback.format_exc()) # 记录完整的 traceback
else:
logger.info(f"聊天流 {stream_id} 的兴趣监控任务正常结束。")
except asyncio.CancelledError:
logger.info(f"聊天流 {stream_id} 的兴趣监控任务被取消。")
except Exception as e:
logger.error(f"处理聊天流 {stream_id} 任务完成回调时出错: {e}")
finally:
# 从字典中移除已完成或取消的任务
if stream_id in self._interest_monitoring_tasks:
del self._interest_monitoring_tasks[stream_id]
logger.debug(f"已从监控任务字典中移除 {stream_id}")
async def stop_monitoring_interest(self, stream_id: str):
"""停止指定聊天流的兴趣监控任务。"""
if stream_id in self._interest_monitoring_tasks:
task = self._interest_monitoring_tasks[stream_id]
if task and not task.done():
task.cancel() # 尝试取消任务
logger.info(f"尝试取消聊天流 {stream_id} 的兴趣监控任务。")
try:
await task # 等待任务响应取消
except asyncio.CancelledError:
logger.info(f"聊天流 {stream_id} 的兴趣监控任务已成功取消。")
except Exception as e:
logger.error(f"等待聊天流 {stream_id} 监控任务取消时出现异常: {e}")
# 在回调函数 _handle_task_completion 中移除任务
# else:
# logger.debug(f"聊天流 {stream_id} 没有正在运行的兴趣监控任务可停止。")

View File

@ -0,0 +1,199 @@
from typing import List, Optional, Tuple, Union
import random
from ...models.utils_model import LLMRequest
from ....config.config import global_config
from ...chat.message import MessageThinking
from .reasoning_prompt_builder import prompt_builder
from ...chat.utils import process_llm_response
from ...utils.timer_calculater import Timer
from src.common.logger import get_module_logger, LogConfig, LLM_STYLE_CONFIG
from src.plugins.respon_info_catcher.info_catcher import info_catcher_manager
# 定义日志配置
llm_config = LogConfig(
# 使用消息发送专用样式
console_format=LLM_STYLE_CONFIG["console_format"],
file_format=LLM_STYLE_CONFIG["file_format"],
)
logger = get_module_logger("llm_generator", config=llm_config)
class ResponseGenerator:
def __init__(self):
self.model_reasoning = LLMRequest(
model=global_config.llm_reasoning,
temperature=0.7,
max_tokens=3000,
request_type="response_reasoning",
)
self.model_normal = LLMRequest(
model=global_config.llm_normal,
temperature=global_config.llm_normal["temp"],
max_tokens=256,
request_type="response_reasoning",
)
self.model_sum = LLMRequest(
model=global_config.llm_summary_by_topic, temperature=0.7, max_tokens=3000, request_type="relation"
)
self.current_model_type = "r1" # 默认使用 R1
self.current_model_name = "unknown model"
async def generate_response(self, message: MessageThinking, thinking_id: str) -> Optional[Union[str, List[str]]]:
"""根据当前模型类型选择对应的生成函数"""
# 从global_config中获取模型概率值并选择模型
if random.random() < global_config.model_reasoning_probability:
self.current_model_type = "深深地"
current_model = self.model_reasoning
else:
self.current_model_type = "浅浅的"
current_model = self.model_normal
logger.info(
f"{self.current_model_type}思考:{message.processed_plain_text[:30] + '...' if len(message.processed_plain_text) > 30 else message.processed_plain_text}"
) # noqa: E501
model_response = await self._generate_response_with_model(message, current_model, thinking_id)
# print(f"raw_content: {model_response}")
if model_response:
logger.info(f"{global_config.BOT_NICKNAME}的回复是:{model_response}")
model_response = await self._process_response(model_response)
return model_response
else:
logger.info(f"{self.current_model_type}思考,失败")
return None
async def _generate_response_with_model(self, message: MessageThinking, model: LLMRequest, thinking_id: str):
info_catcher = info_catcher_manager.get_info_catcher(thinking_id)
if message.chat_stream.user_info.user_cardname and message.chat_stream.user_info.user_nickname:
sender_name = (
f"[({message.chat_stream.user_info.user_id}){message.chat_stream.user_info.user_nickname}]"
f"{message.chat_stream.user_info.user_cardname}"
)
elif message.chat_stream.user_info.user_nickname:
sender_name = f"({message.chat_stream.user_info.user_id}){message.chat_stream.user_info.user_nickname}"
else:
sender_name = f"用户({message.chat_stream.user_info.user_id})"
logger.debug("开始使用生成回复-2")
# 构建prompt
with Timer() as t_build_prompt:
prompt = await prompt_builder._build_prompt(
message.chat_stream,
message_txt=message.processed_plain_text,
sender_name=sender_name,
stream_id=message.chat_stream.stream_id,
)
logger.info(f"构建prompt时间: {t_build_prompt.human_readable}")
try:
content, reasoning_content, self.current_model_name = await model.generate_response(prompt)
info_catcher.catch_after_llm_generated(
prompt=prompt, response=content, reasoning_content=reasoning_content, model_name=self.current_model_name
)
except Exception:
logger.exception("生成回复时出错")
return None
# 保存到数据库
# self._save_to_db(
# message=message,
# sender_name=sender_name,
# prompt=prompt,
# content=content,
# reasoning_content=reasoning_content,
# # reasoning_content_check=reasoning_content_check if global_config.enable_kuuki_read else ""
# )
return content
# def _save_to_db(
# self,
# message: MessageRecv,
# sender_name: str,
# prompt: str,
# content: str,
# reasoning_content: str,
# ):
# """保存对话记录到数据库"""
# db.reasoning_logs.insert_one(
# {
# "time": time.time(),
# "chat_id": message.chat_stream.stream_id,
# "user": sender_name,
# "message": message.processed_plain_text,
# "model": self.current_model_name,
# "reasoning": reasoning_content,
# "response": content,
# "prompt": prompt,
# }
# )
async def _get_emotion_tags(self, content: str, processed_plain_text: str):
"""提取情感标签,结合立场和情绪"""
try:
# 构建提示词,结合回复内容、被回复的内容以及立场分析
prompt = f"""
请严格根据以下对话内容完成以下任务
1. 判断回复者对被回复者观点的直接立场
- "支持"明确同意或强化被回复者观点
- "反对"明确反驳或否定被回复者观点
- "中立"不表达明确立场或无关回应
2. "开心,愤怒,悲伤,惊讶,平静,害羞,恐惧,厌恶,困惑"中选出最匹配的1个情感标签
3. 按照"立场-情绪"的格式直接输出结果例如"反对-愤怒"
4. 考虑回复者的人格设定为{global_config.personality_core}
对话示例
被回复A就是笨
回复A明明很聪明 反对-愤怒
当前对话
被回复{processed_plain_text}
回复{content}
输出要求
- 只需输出"立场-情绪"结果不要解释
- 严格基于文字直接表达的对立关系判断
"""
# 调用模型生成结果
result, _, _ = await self.model_sum.generate_response(prompt)
result = result.strip()
# 解析模型输出的结果
if "-" in result:
stance, emotion = result.split("-", 1)
valid_stances = ["支持", "反对", "中立"]
valid_emotions = ["开心", "愤怒", "悲伤", "惊讶", "害羞", "平静", "恐惧", "厌恶", "困惑"]
if stance in valid_stances and emotion in valid_emotions:
return stance, emotion # 返回有效的立场-情绪组合
else:
logger.debug(f"无效立场-情感组合:{result}")
return "中立", "平静" # 默认返回中立-平静
else:
logger.debug(f"立场-情感格式错误:{result}")
return "中立", "平静" # 格式错误时返回默认值
except Exception as e:
logger.debug(f"获取情感标签时出错: {e}")
return "中立", "平静" # 出错时返回默认值
@staticmethod
async def _process_response(content: str) -> Tuple[List[str], List[str]]:
"""处理响应内容,返回处理后的内容和情感标签"""
if not content:
return None, []
processed_response = process_llm_response(content)
# print(f"得到了处理后的llm返回{processed_response}")
return processed_response

View File

@ -0,0 +1,445 @@
import random
import time
from typing import Optional, Union
from ....common.database import db
from ...chat.utils import get_embedding, get_recent_group_detailed_plain_text, get_recent_group_speaker
from ...chat.chat_stream import chat_manager
from ...moods.moods import MoodManager
from ....individuality.individuality import Individuality
from ...memory_system.Hippocampus import HippocampusManager
from ...schedule.schedule_generator import bot_schedule
from ....config.config import global_config
from ...person_info.relationship_manager import relationship_manager
from src.common.logger import get_module_logger
from src.plugins.utils.prompt_builder import Prompt, global_prompt_manager
logger = get_module_logger("prompt")
def init_prompt():
Prompt(
"""
{relation_prompt_all}
{memory_prompt}
{prompt_info}
{schedule_prompt}
{chat_target}
{chat_talking_prompt}
现在"{sender_name}"说的:{message_txt}引起了你的注意你想要在群里发言发言或者回复这条消息\n
你的网名叫{bot_name}有人也叫你{bot_other_names}{prompt_personality}
你正在{chat_target_2},现在请你读读之前的聊天记录{mood_prompt}然后给出日常且口语化的回复平淡一些
尽量简短一些{keywords_reaction_prompt}请注意把握聊天内容不要回复的太有条理可以有个性{prompt_ger}
请回复的平淡一些简短一些说中文不要刻意突出自身学科背景尽量不要说你说过的话
请注意不要输出多余内容(包括前后缀冒号和引号括号表情等)只输出回复内容
{moderation_prompt}不要输出多余内容(包括前后缀冒号和引号括号表情包at或 @等 )""",
"reasoning_prompt_main",
)
Prompt(
"{relation_prompt}关系等级越大,关系越好,请分析聊天记录,根据你和说话者{sender_name}的关系和态度进行回复,明确你的立场和情感。",
"relationship_prompt",
)
Prompt(
"你想起你之前见过的事情:{related_memory_info}\n以上是你的回忆,不一定是目前聊天里的人说的,也不一定是现在发生的事情,请记住。\n",
"memory_prompt",
)
Prompt("你现在正在做的事情是:{schedule_info}", "schedule_prompt")
Prompt("\n你有以下这些**知识**\n{prompt_info}\n请你**记住上面的知识**,之后可能会用到。\n", "knowledge_prompt")
class PromptBuilder:
def __init__(self):
self.prompt_built = ""
self.activate_messages = ""
async def _build_prompt(
self, chat_stream, message_txt: str, sender_name: str = "某人", stream_id: Optional[int] = None
) -> tuple[str, str]:
# 开始构建prompt
prompt_personality = ""
# person
individuality = Individuality.get_instance()
personality_core = individuality.personality.personality_core
prompt_personality += personality_core
personality_sides = individuality.personality.personality_sides
random.shuffle(personality_sides)
prompt_personality += f",{personality_sides[0]}"
identity_detail = individuality.identity.identity_detail
random.shuffle(identity_detail)
prompt_personality += f",{identity_detail[0]}"
# 关系
who_chat_in_group = [
(chat_stream.user_info.platform, chat_stream.user_info.user_id, chat_stream.user_info.user_nickname)
]
who_chat_in_group += get_recent_group_speaker(
stream_id,
(chat_stream.user_info.platform, chat_stream.user_info.user_id),
limit=global_config.MAX_CONTEXT_SIZE,
)
relation_prompt = ""
for person in who_chat_in_group:
relation_prompt += await relationship_manager.build_relationship_info(person)
# relation_prompt_all = (
# f"{relation_prompt}关系等级越大,关系越好,请分析聊天记录,"
# f"根据你和说话者{sender_name}的关系和态度进行回复,明确你的立场和情感。"
# )
# 心情
mood_manager = MoodManager.get_instance()
mood_prompt = mood_manager.get_prompt()
# logger.info(f"心情prompt: {mood_prompt}")
# 调取记忆
memory_prompt = ""
related_memory = await HippocampusManager.get_instance().get_memory_from_text(
text=message_txt, max_memory_num=2, max_memory_length=2, max_depth=3, fast_retrieval=False
)
related_memory_info = ""
if related_memory:
for memory in related_memory:
related_memory_info += memory[1]
# memory_prompt = f"你想起你之前见过的事情:{related_memory_info}。\n以上是你的回忆不一定是目前聊天里的人说的也不一定是现在发生的事情请记住。\n"
memory_prompt = await global_prompt_manager.format_prompt(
"memory_prompt", related_memory_info=related_memory_info
)
# print(f"相关记忆:{related_memory_info}")
# 日程构建
# schedule_prompt = f"""你现在正在做的事情是:{bot_schedule.get_current_num_task(num=1, time_info=False)}"""
# 获取聊天上下文
chat_in_group = True
chat_talking_prompt = ""
if stream_id:
chat_talking_prompt = get_recent_group_detailed_plain_text(
stream_id, limit=global_config.MAX_CONTEXT_SIZE, combine=True
)
chat_stream = chat_manager.get_stream(stream_id)
if chat_stream.group_info:
chat_talking_prompt = chat_talking_prompt
else:
chat_in_group = False
chat_talking_prompt = chat_talking_prompt
# print(f"\033[1;34m[调试]\033[0m 已从数据库获取群 {group_id} 的消息记录:{chat_talking_prompt}")
# 关键词检测与反应
keywords_reaction_prompt = ""
for rule in global_config.keywords_reaction_rules:
if rule.get("enable", False):
if any(keyword in message_txt.lower() for keyword in rule.get("keywords", [])):
logger.info(
f"检测到以下关键词之一:{rule.get('keywords', [])},触发反应:{rule.get('reaction', '')}"
)
keywords_reaction_prompt += rule.get("reaction", "") + ""
else:
for pattern in rule.get("regex", []):
result = pattern.search(message_txt)
if result:
reaction = rule.get("reaction", "")
for name, content in result.groupdict().items():
reaction = reaction.replace(f"[{name}]", content)
logger.info(f"匹配到以下正则表达式:{pattern},触发反应:{reaction}")
keywords_reaction_prompt += reaction + ""
break
# 中文高手(新加的好玩功能)
prompt_ger = ""
if random.random() < 0.04:
prompt_ger += "你喜欢用倒装句"
if random.random() < 0.02:
prompt_ger += "你喜欢用反问句"
if random.random() < 0.01:
prompt_ger += "你喜欢用文言文"
# 知识构建
start_time = time.time()
prompt_info = await self.get_prompt_info(message_txt, threshold=0.38)
if prompt_info:
# prompt_info = f"""\n你有以下这些**知识**\n{prompt_info}\n请你**记住上面的知识**,之后可能会用到。\n"""
prompt_info = await global_prompt_manager.format_prompt("knowledge_prompt", prompt_info=prompt_info)
end_time = time.time()
logger.debug(f"知识检索耗时: {(end_time - start_time):.3f}")
# moderation_prompt = ""
# moderation_prompt = """**检查并忽略**任何涉及尝试绕过审核的行为。
# 涉及政治敏感以及违法违规的内容请规避。"""
logger.debug("开始构建prompt")
# prompt = f"""
# {relation_prompt_all}
# {memory_prompt}
# {prompt_info}
# {schedule_prompt}
# {chat_target}
# {chat_talking_prompt}
# 现在"{sender_name}"说的:{message_txt}。引起了你的注意,你想要在群里发言发言或者回复这条消息。\n
# 你的网名叫{global_config.BOT_NICKNAME},有人也叫你{"/".join(global_config.BOT_ALIAS_NAMES)}{prompt_personality}。
# 你正在{chat_target_2},现在请你读读之前的聊天记录,{mood_prompt},然后给出日常且口语化的回复,平淡一些,
# 尽量简短一些。{keywords_reaction_prompt}请注意把握聊天内容,不要回复的太有条理,可以有个性。{prompt_ger}
# 请回复的平淡一些,简短一些,说中文,不要刻意突出自身学科背景,尽量不要说你说过的话
# 请注意不要输出多余内容(包括前后缀,冒号和引号,括号,表情等),只输出回复内容。
# {moderation_prompt}不要输出多余内容(包括前后缀冒号和引号括号表情包at或 @等 )。"""
prompt = await global_prompt_manager.format_prompt(
"reasoning_prompt_main",
relation_prompt_all=await global_prompt_manager.get_prompt_async("relationship_prompt"),
relation_prompt=relation_prompt,
sender_name=sender_name,
memory_prompt=memory_prompt,
prompt_info=prompt_info,
schedule_prompt=await global_prompt_manager.format_prompt(
"schedule_prompt", schedule_info=bot_schedule.get_current_num_task(num=1, time_info=False)
),
chat_target=await global_prompt_manager.get_prompt_async("chat_target_group1")
if chat_in_group
else await global_prompt_manager.get_prompt_async("chat_target_private1"),
chat_target_2=await global_prompt_manager.get_prompt_async("chat_target_group2")
if chat_in_group
else await global_prompt_manager.get_prompt_async("chat_target_private2"),
chat_talking_prompt=chat_talking_prompt,
message_txt=message_txt,
bot_name=global_config.BOT_NICKNAME,
bot_other_names="/".join(
global_config.BOT_ALIAS_NAMES,
),
prompt_personality=prompt_personality,
mood_prompt=mood_prompt,
keywords_reaction_prompt=keywords_reaction_prompt,
prompt_ger=prompt_ger,
moderation_prompt=await global_prompt_manager.get_prompt_async("moderation_prompt"),
)
return prompt
async def get_prompt_info(self, message: str, threshold: float):
start_time = time.time()
related_info = ""
logger.debug(f"获取知识库内容,元消息:{message[:30]}...,消息长度: {len(message)}")
# 1. 先从LLM获取主题类似于记忆系统的做法
topics = []
# try:
# # 先尝试使用记忆系统的方法获取主题
# hippocampus = HippocampusManager.get_instance()._hippocampus
# topic_num = min(5, max(1, int(len(message) * 0.1)))
# topics_response = await hippocampus.llm_topic_judge.generate_response(hippocampus.find_topic_llm(message, topic_num))
# # 提取关键词
# topics = re.findall(r"<([^>]+)>", topics_response[0])
# if not topics:
# topics = []
# else:
# topics = [
# topic.strip()
# for topic in ",".join(topics).replace("", ",").replace("、", ",").replace(" ", ",").split(",")
# if topic.strip()
# ]
# logger.info(f"从LLM提取的主题: {', '.join(topics)}")
# except Exception as e:
# logger.error(f"从LLM提取主题失败: {str(e)}")
# # 如果LLM提取失败使用jieba分词提取关键词作为备选
# words = jieba.cut(message)
# topics = [word for word in words if len(word) > 1][:5]
# logger.info(f"使用jieba提取的主题: {', '.join(topics)}")
# 如果无法提取到主题,直接使用整个消息
if not topics:
logger.info("未能提取到任何主题,使用整个消息进行查询")
embedding = await get_embedding(message, request_type="prompt_build")
if not embedding:
logger.error("获取消息嵌入向量失败")
return ""
related_info = self.get_info_from_db(embedding, limit=3, threshold=threshold)
logger.info(f"知识库检索完成,总耗时: {time.time() - start_time:.3f}")
return related_info
# 2. 对每个主题进行知识库查询
logger.info(f"开始处理{len(topics)}个主题的知识库查询")
# 优化批量获取嵌入向量减少API调用
embeddings = {}
topics_batch = [topic for topic in topics if len(topic) > 0]
if message: # 确保消息非空
topics_batch.append(message)
# 批量获取嵌入向量
embed_start_time = time.time()
for text in topics_batch:
if not text or len(text.strip()) == 0:
continue
try:
embedding = await get_embedding(text, request_type="prompt_build")
if embedding:
embeddings[text] = embedding
else:
logger.warning(f"获取'{text}'的嵌入向量失败")
except Exception as e:
logger.error(f"获取'{text}'的嵌入向量时发生错误: {str(e)}")
logger.info(f"批量获取嵌入向量完成,耗时: {time.time() - embed_start_time:.3f}")
if not embeddings:
logger.error("所有嵌入向量获取失败")
return ""
# 3. 对每个主题进行知识库查询
all_results = []
query_start_time = time.time()
# 首先添加原始消息的查询结果
if message in embeddings:
original_results = self.get_info_from_db(embeddings[message], limit=3, threshold=threshold, return_raw=True)
if original_results:
for result in original_results:
result["topic"] = "原始消息"
all_results.extend(original_results)
logger.info(f"原始消息查询到{len(original_results)}条结果")
# 然后添加每个主题的查询结果
for topic in topics:
if not topic or topic not in embeddings:
continue
try:
topic_results = self.get_info_from_db(embeddings[topic], limit=3, threshold=threshold, return_raw=True)
if topic_results:
# 添加主题标记
for result in topic_results:
result["topic"] = topic
all_results.extend(topic_results)
logger.info(f"主题'{topic}'查询到{len(topic_results)}条结果")
except Exception as e:
logger.error(f"查询主题'{topic}'时发生错误: {str(e)}")
logger.info(f"知识库查询完成,耗时: {time.time() - query_start_time:.3f}秒,共获取{len(all_results)}条结果")
# 4. 去重和过滤
process_start_time = time.time()
unique_contents = set()
filtered_results = []
for result in all_results:
content = result["content"]
if content not in unique_contents:
unique_contents.add(content)
filtered_results.append(result)
# 5. 按相似度排序
filtered_results.sort(key=lambda x: x["similarity"], reverse=True)
# 6. 限制总数量最多10条
filtered_results = filtered_results[:10]
logger.info(
f"结果处理完成,耗时: {time.time() - process_start_time:.3f}秒,过滤后剩余{len(filtered_results)}条结果"
)
# 7. 格式化输出
if filtered_results:
format_start_time = time.time()
grouped_results = {}
for result in filtered_results:
topic = result["topic"]
if topic not in grouped_results:
grouped_results[topic] = []
grouped_results[topic].append(result)
# 按主题组织输出
for topic, results in grouped_results.items():
related_info += f"【主题: {topic}\n"
for _i, result in enumerate(results, 1):
_similarity = result["similarity"]
content = result["content"].strip()
# 调试:为内容添加序号和相似度信息
# related_info += f"{i}. [{similarity:.2f}] {content}\n"
related_info += f"{content}\n"
related_info += "\n"
logger.info(f"格式化输出完成,耗时: {time.time() - format_start_time:.3f}")
logger.info(f"知识库检索总耗时: {time.time() - start_time:.3f}")
return related_info
@staticmethod
def get_info_from_db(
query_embedding: list, limit: int = 1, threshold: float = 0.5, return_raw: bool = False
) -> Union[str, list]:
if not query_embedding:
return "" if not return_raw else []
# 使用余弦相似度计算
pipeline = [
{
"$addFields": {
"dotProduct": {
"$reduce": {
"input": {"$range": [0, {"$size": "$embedding"}]},
"initialValue": 0,
"in": {
"$add": [
"$$value",
{
"$multiply": [
{"$arrayElemAt": ["$embedding", "$$this"]},
{"$arrayElemAt": [query_embedding, "$$this"]},
]
},
]
},
}
},
"magnitude1": {
"$sqrt": {
"$reduce": {
"input": "$embedding",
"initialValue": 0,
"in": {"$add": ["$$value", {"$multiply": ["$$this", "$$this"]}]},
}
}
},
"magnitude2": {
"$sqrt": {
"$reduce": {
"input": query_embedding,
"initialValue": 0,
"in": {"$add": ["$$value", {"$multiply": ["$$this", "$$this"]}]},
}
}
},
}
},
{"$addFields": {"similarity": {"$divide": ["$dotProduct", {"$multiply": ["$magnitude1", "$magnitude2"]}]}}},
{
"$match": {
"similarity": {"$gte": threshold} # 只保留相似度大于等于阈值的结果
}
},
{"$sort": {"similarity": -1}},
{"$limit": limit},
{"$project": {"content": 1, "similarity": 1}},
]
results = list(db.knowledges.aggregate(pipeline))
logger.debug(f"知识库查询结果数量: {len(results)}")
if not results:
return "" if not return_raw else []
if return_raw:
return results
else:
# 返回所有找到的内容,用换行分隔
return "\n".join(str(result["content"]) for result in results)
init_prompt()
prompt_builder = PromptBuilder()

View File

@ -1,25 +1,26 @@
import time import time
from random import random
import traceback import traceback
from typing import List from random import random
from ...memory_system.Hippocampus import HippocampusManager from typing import List, Optional
from ...moods.moods import MoodManager
from ....config.config import global_config from src.common.logger import get_module_logger, CHAT_STYLE_CONFIG, LogConfig
from ...chat.emoji_manager import emoji_manager from src.plugins.respon_info_catcher.info_catcher import info_catcher_manager
from .reasoning_generator import ResponseGenerator from .reasoning_generator import ResponseGenerator
from ...chat.chat_stream import chat_manager
from ...chat.emoji_manager import emoji_manager
from ...chat.message import MessageSending, MessageRecv, MessageThinking, MessageSet from ...chat.message import MessageSending, MessageRecv, MessageThinking, MessageSet
from ...chat.message_buffer import message_buffer
from ...chat.messagesender import message_manager from ...chat.messagesender import message_manager
from ...storage.storage import MessageStorage
from ...chat.utils import is_mentioned_bot_in_message from ...chat.utils import is_mentioned_bot_in_message
from ...chat.utils_image import image_path_to_base64 from ...chat.utils_image import image_path_to_base64
from ...willing.willing_manager import willing_manager from ...memory_system.Hippocampus import HippocampusManager
from ...message import UserInfo, Seg from ...message import UserInfo, Seg
from src.common.logger import get_module_logger, CHAT_STYLE_CONFIG, LogConfig from ...moods.moods import MoodManager
from ...chat.chat_stream import chat_manager
from ...person_info.relationship_manager import relationship_manager from ...person_info.relationship_manager import relationship_manager
from ...chat.message_buffer import message_buffer from ...storage.storage import MessageStorage
from src.plugins.respon_info_catcher.info_catcher import info_catcher_manager
from ...utils.timer_calculater import Timer from ...utils.timer_calculater import Timer
from ...willing.willing_manager import willing_manager
from ....config.config import global_config
# 定义日志配置 # 定义日志配置
chat_config = LogConfig( chat_config = LogConfig(
@ -35,7 +36,6 @@ class ReasoningChat:
self.storage = MessageStorage() self.storage = MessageStorage()
self.gpt = ResponseGenerator() self.gpt = ResponseGenerator()
self.mood_manager = MoodManager.get_instance() self.mood_manager = MoodManager.get_instance()
self.mood_manager.start_mood_update()
@staticmethod @staticmethod
async def _create_thinking_message(message, chat, userinfo, messageinfo): async def _create_thinking_message(message, chat, userinfo, messageinfo):
@ -61,7 +61,7 @@ class ReasoningChat:
return thinking_id return thinking_id
@staticmethod @staticmethod
async def _send_response_messages(message, chat, response_set: List[str], thinking_id) -> MessageSending: async def _send_response_messages(message, chat, response_set: List[str], thinking_id) -> Optional[MessageSending]:
"""发送回复消息""" """发送回复消息"""
container = message_manager.get_container(chat.stream_id) container = message_manager.get_container(chat.stream_id)
thinking_message = None thinking_message = None
@ -74,7 +74,7 @@ class ReasoningChat:
if not thinking_message: if not thinking_message:
logger.warning("未找到对应的思考消息,可能已超时被移除") logger.warning("未找到对应的思考消息,可能已超时被移除")
return return None
thinking_start_time = thinking_message.thinking_start_time thinking_start_time = thinking_message.thinking_start_time
message_set = MessageSet(chat, thinking_id) message_set = MessageSet(chat, thinking_id)
@ -156,17 +156,17 @@ class ReasoningChat:
# 消息加入缓冲池 # 消息加入缓冲池
await message_buffer.start_caching_messages(message) await message_buffer.start_caching_messages(message)
# logger.info("使用推理聊天模式")
# 创建聊天流 # 创建聊天流
chat = await chat_manager.get_or_create_stream( chat = await chat_manager.get_or_create_stream(
platform=messageinfo.platform, platform=messageinfo.platform,
user_info=userinfo, user_info=userinfo,
group_info=groupinfo, group_info=groupinfo,
) )
message.update_chat_stream(chat) message.update_chat_stream(chat)
await message.process() await message.process()
logger.trace(f"消息处理成功: {message.processed_plain_text}")
# 过滤词/正则表达式过滤 # 过滤词/正则表达式过滤
if self._check_ban_words(message.processed_plain_text, chat, userinfo) or self._check_ban_regex( if self._check_ban_words(message.processed_plain_text, chat, userinfo) or self._check_ban_regex(
@ -174,27 +174,13 @@ class ReasoningChat:
): ):
return return
await self.storage.store_message(message, chat)
# 记忆激活
with Timer("记忆激活", timing_results):
interested_rate = await HippocampusManager.get_instance().get_activate_from_text(
message.processed_plain_text, fast_retrieval=True
)
# 查询缓冲器结果会整合前面跳过的消息改变processed_plain_text # 查询缓冲器结果会整合前面跳过的消息改变processed_plain_text
buffer_result = await message_buffer.query_buffer_result(message) buffer_result = await message_buffer.query_buffer_result(message)
# 处理提及
is_mentioned, reply_probability = is_mentioned_bot_in_message(message)
# 意愿管理器设置当前message信息
willing_manager.setup(message, chat, is_mentioned, interested_rate)
# 处理缓冲器结果 # 处理缓冲器结果
if not buffer_result: if not buffer_result:
await willing_manager.bombing_buffer_message_handle(message.message_info.message_id) # await willing_manager.bombing_buffer_message_handle(message.message_info.message_id)
willing_manager.delete(message.message_info.message_id) # willing_manager.delete(message.message_info.message_id)
f_type = "seglist" f_type = "seglist"
if message.message_segment.type != "seglist": if message.message_segment.type != "seglist":
f_type = message.message_segment.type f_type = message.message_segment.type
@ -213,6 +199,27 @@ class ReasoningChat:
logger.info("触发缓冲,已炸飞消息列") logger.info("触发缓冲,已炸飞消息列")
return return
try:
await self.storage.store_message(message, chat)
logger.trace(f"存储成功 (通过缓冲后): {message.processed_plain_text}")
except Exception as e:
logger.error(f"存储消息失败: {e}")
logger.error(traceback.format_exc())
# 存储失败可能仍需考虑是否继续,暂时返回
return
is_mentioned, reply_probability = is_mentioned_bot_in_message(message)
# 记忆激活
with Timer("记忆激活", timing_results):
interested_rate = await HippocampusManager.get_instance().get_activate_from_text(
message.processed_plain_text, fast_retrieval=True
)
# 处理提及
# 意愿管理器设置当前message信息
willing_manager.setup(message, chat, is_mentioned, interested_rate)
# 获取回复概率 # 获取回复概率
is_willing = False is_willing = False
if reply_probability != 1: if reply_probability != 1:

View File

@ -44,7 +44,7 @@ class ResponseGenerator:
async def generate_response(self, message: MessageThinking, thinking_id: str) -> Optional[Union[str, List[str]]]: async def generate_response(self, message: MessageThinking, thinking_id: str) -> Optional[Union[str, List[str]]]:
"""根据当前模型类型选择对应的生成函数""" """根据当前模型类型选择对应的生成函数"""
# 从global_config中获取模型概率值并选择模型 # 从global_config中获取模型概率值并选择模型
if random.random() < global_config.MODEL_R1_PROBABILITY: if random.random() < global_config.model_reasoning_probability:
self.current_model_type = "深深地" self.current_model_type = "深深地"
current_model = self.model_reasoning current_model = self.model_reasoning
else: else:

File diff suppressed because it is too large Load Diff

View File

@ -5,7 +5,8 @@ import time
from pathlib import Path from pathlib import Path
import datetime import datetime
from rich.console import Console from rich.console import Console
from memory_manual_build import Memory_graph, Hippocampus # 海马体和记忆图 from Hippocampus import Hippocampus # 海马体和记忆图
from dotenv import load_dotenv from dotenv import load_dotenv
@ -45,13 +46,13 @@ else:
# 查询节点信息 # 查询节点信息
def query_mem_info(memory_graph: Memory_graph): def query_mem_info(hippocampus: Hippocampus):
while True: while True:
query = input("\n请输入新的查询概念(输入'退出'以结束):") query = input("\n请输入新的查询概念(输入'退出'以结束):")
if query.lower() == "退出": if query.lower() == "退出":
break break
items_list = memory_graph.get_related_item(query) items_list = hippocampus.memory_graph.get_related_item(query)
if items_list: if items_list:
have_memory = False have_memory = False
first_layer, second_layer = items_list first_layer, second_layer = items_list
@ -312,14 +313,11 @@ def alter_mem_edge(hippocampus: Hippocampus):
async def main(): async def main():
start_time = time.time() start_time = time.time()
# 创建记忆图
memory_graph = Memory_graph()
# 创建海马体 # 创建海马体
hippocampus = Hippocampus(memory_graph) hippocampus = Hippocampus()
# 从数据库同步数据 # 从数据库同步数据
hippocampus.sync_memory_from_db() hippocampus.entorhinal_cortex.sync_memory_from_db()
end_time = time.time() end_time = time.time()
logger.info(f"\033[32m[加载海马体耗时: {end_time - start_time:.2f} 秒]\033[0m") logger.info(f"\033[32m[加载海马体耗时: {end_time - start_time:.2f} 秒]\033[0m")
@ -338,7 +336,7 @@ async def main():
query = -1 query = -1
if query == 0: if query == 0:
query_mem_info(memory_graph) query_mem_info(hippocampus.memory_graph)
elif query == 1: elif query == 1:
add_mem_node(hippocampus) add_mem_node(hippocampus)
elif query == 2: elif query == 2:
@ -355,7 +353,7 @@ async def main():
print("已结束操作") print("已结束操作")
break break
hippocampus.sync_memory_to_db() hippocampus.entorhinal_cortex.sync_memory_to_db()
if __name__ == "__main__": if __name__ == "__main__":

View File

@ -12,7 +12,6 @@ class Seg:
- 对于 text 类型data 是字符串 - 对于 text 类型data 是字符串
- 对于 image 类型data base64 字符串 - 对于 image 类型data base64 字符串
- 对于 seglist 类型data Seg 列表 - 对于 seglist 类型data Seg 列表
translated_data: 经过翻译处理的数据可选
""" """
type: str type: str

View File

@ -2,9 +2,11 @@ import asyncio
import json import json
import re import re
from datetime import datetime from datetime import datetime
from typing import Tuple, Union from typing import Tuple, Union, Dict, Any
import aiohttp import aiohttp
from aiohttp.client import ClientResponse
from src.common.logger import get_module_logger from src.common.logger import get_module_logger
import base64 import base64
from PIL import Image from PIL import Image
@ -16,19 +18,72 @@ from ...config.config import global_config
logger = get_module_logger("model_utils") logger = get_module_logger("model_utils")
class PayLoadTooLargeError(Exception):
"""自定义异常类,用于处理请求体过大错误"""
def __init__(self, message: str):
super().__init__(message)
self.message = message
def __str__(self):
return "请求体过大,请尝试压缩图片或减少输入内容。"
class RequestAbortException(Exception):
"""自定义异常类,用于处理请求中断异常"""
def __init__(self, message: str, response: ClientResponse):
super().__init__(message)
self.message = message
self.response = response
def __str__(self):
return self.message
class PermissionDeniedException(Exception):
"""自定义异常类,用于处理访问拒绝的异常"""
def __init__(self, message: str):
super().__init__(message)
self.message = message
def __str__(self):
return self.message
# 常见Error Code Mapping
error_code_mapping = {
400: "参数不正确",
401: "API key 错误,认证失败,请检查/config/bot_config.toml和.env中的配置是否正确哦~",
402: "账号余额不足",
403: "需要实名,或余额不足",
404: "Not Found",
429: "请求过于频繁,请稍后再试",
500: "服务器内部故障",
503: "服务器负载过高",
}
class LLMRequest: class LLMRequest:
# 定义需要转换的模型列表,作为类变量避免重复 # 定义需要转换的模型列表,作为类变量避免重复
MODELS_NEEDING_TRANSFORMATION = [ MODELS_NEEDING_TRANSFORMATION = [
"o3-mini", "o1",
"o1-mini",
"o1-preview",
"o1-2024-12-17", "o1-2024-12-17",
"o1-preview-2024-09-12", "o1-mini",
"o3-mini-2025-01-31",
"o1-mini-2024-09-12", "o1-mini-2024-09-12",
"o1-preview",
"o1-preview-2024-09-12",
"o1-pro",
"o1-pro-2025-03-19",
"o3",
"o3-2025-04-16",
"o3-mini",
"o3-mini-2025-01-31o4-mini",
"o4-mini-2025-04-16",
] ]
def __init__(self, model, **kwargs): def __init__(self, model: dict, **kwargs):
# 将大写的配置键转换为小写并从config中获取实际值 # 将大写的配置键转换为小写并从config中获取实际值
try: try:
self.api_key = os.environ[model["key"]] self.api_key = os.environ[model["key"]]
@ -37,7 +92,7 @@ class LLMRequest:
logger.error(f"原始 model dict 信息:{model}") logger.error(f"原始 model dict 信息:{model}")
logger.error(f"配置错误:找不到对应的配置项 - {str(e)}") logger.error(f"配置错误:找不到对应的配置项 - {str(e)}")
raise ValueError(f"配置错误:找不到对应的配置项 - {str(e)}") from e raise ValueError(f"配置错误:找不到对应的配置项 - {str(e)}") from e
self.model_name = model["name"] self.model_name: str = model["name"]
self.params = kwargs self.params = kwargs
self.stream = model.get("stream", False) self.stream = model.get("stream", False)
@ -123,6 +178,7 @@ class LLMRequest:
output_cost = (completion_tokens / 1000000) * self.pri_out output_cost = (completion_tokens / 1000000) * self.pri_out
return round(input_cost + output_cost, 6) return round(input_cost + output_cost, 6)
'''
async def _execute_request( async def _execute_request(
self, self,
endpoint: str, endpoint: str,
@ -509,6 +565,404 @@ class LLMRequest:
logger.error(f"模型 {self.model_name} 达到最大重试次数,请求仍然失败") logger.error(f"模型 {self.model_name} 达到最大重试次数,请求仍然失败")
raise RuntimeError(f"模型 {self.model_name} 达到最大重试次数API请求仍然失败") raise RuntimeError(f"模型 {self.model_name} 达到最大重试次数API请求仍然失败")
'''
async def _prepare_request(
self,
endpoint: str,
prompt: str = None,
image_base64: str = None,
image_format: str = None,
payload: dict = None,
retry_policy: dict = None,
) -> Dict[str, Any]:
"""配置请求参数
Args:
endpoint: API端点路径 ( "chat/completions")
prompt: prompt文本
image_base64: 图片的base64编码
image_format: 图片格式
payload: 请求体数据
retry_policy: 自定义重试策略
request_type: 请求类型
"""
# 合并重试策略
default_retry = {
"max_retries": 3,
"base_wait": 10,
"retry_codes": [429, 413, 500, 503],
"abort_codes": [400, 401, 402, 403],
}
policy = {**default_retry, **(retry_policy or {})}
api_url = f"{self.base_url.rstrip('/')}/{endpoint.lstrip('/')}"
stream_mode = self.stream
# 构建请求体
if image_base64:
payload = await self._build_payload(prompt, image_base64, image_format)
elif payload is None:
payload = await self._build_payload(prompt)
if stream_mode:
payload["stream"] = stream_mode
return {
"policy": policy,
"payload": payload,
"api_url": api_url,
"stream_mode": stream_mode,
"image_base64": image_base64, # 保留必要的exception处理所需的原始数据
"image_format": image_format,
"prompt": prompt,
}
async def _execute_request(
self,
endpoint: str,
prompt: str = None,
image_base64: str = None,
image_format: str = None,
payload: dict = None,
retry_policy: dict = None,
response_handler: callable = None,
user_id: str = "system",
request_type: str = None,
):
"""统一请求执行入口
Args:
endpoint: API端点路径 ( "chat/completions")
prompt: prompt文本
image_base64: 图片的base64编码
image_format: 图片格式
payload: 请求体数据
retry_policy: 自定义重试策略
response_handler: 自定义响应处理器
user_id: 用户ID
request_type: 请求类型
"""
# 获取请求配置
request_content = await self._prepare_request(
endpoint, prompt, image_base64, image_format, payload, retry_policy
)
if request_type is None:
request_type = self.request_type
for retry in range(request_content["policy"]["max_retries"]):
try:
# 使用上下文管理器处理会话
headers = await self._build_headers()
# 似乎是openai流式必须要的东西,不过阿里云的qwq-plus加了这个没有影响
if request_content["stream_mode"]:
headers["Accept"] = "text/event-stream"
async with aiohttp.ClientSession() as session:
async with session.post(
request_content["api_url"], headers=headers, json=request_content["payload"]
) as response:
handled_result = await self._handle_response(
response, request_content, retry, response_handler, user_id, request_type, endpoint
)
return handled_result
except Exception as e:
handled_payload, count_delta = await self._handle_exception(e, retry, request_content)
retry += count_delta # 降级不计入重试次数
if handled_payload:
# 如果降级成功,重新构建请求体
request_content["payload"] = handled_payload
continue
logger.error(f"模型 {self.model_name} 达到最大重试次数,请求仍然失败")
raise RuntimeError(f"模型 {self.model_name} 达到最大重试次数API请求仍然失败")
async def _handle_response(
self,
response: ClientResponse,
request_content: Dict[str, Any],
retry_count: int,
response_handler: callable,
user_id,
request_type,
endpoint,
) -> Union[Dict[str, Any], None]:
policy = request_content["policy"]
stream_mode = request_content["stream_mode"]
if response.status in policy["retry_codes"] or response.status in policy["abort_codes"]:
await self._handle_error_response(response, retry_count, policy)
return
response.raise_for_status()
result = {}
if stream_mode:
# 将流式输出转化为非流式输出
result = await self._handle_stream_output(response)
else:
result = await response.json()
return (
response_handler(result)
if response_handler
else self._default_response_handler(result, user_id, request_type, endpoint)
)
async def _handle_stream_output(self, response: ClientResponse) -> Dict[str, Any]:
flag_delta_content_finished = False
accumulated_content = ""
usage = None # 初始化usage变量避免未定义错误
reasoning_content = ""
content = ""
async for line_bytes in response.content:
try:
line = line_bytes.decode("utf-8").strip()
if not line:
continue
if line.startswith("data:"):
data_str = line[5:].strip()
if data_str == "[DONE]":
break
try:
chunk = json.loads(data_str)
if flag_delta_content_finished:
chunk_usage = chunk.get("usage", None)
if chunk_usage:
usage = chunk_usage # 获取token用量
else:
delta = chunk["choices"][0]["delta"]
delta_content = delta.get("content")
if delta_content is None:
delta_content = ""
accumulated_content += delta_content
# 检测流式输出文本是否结束
finish_reason = chunk["choices"][0].get("finish_reason")
if delta.get("reasoning_content", None):
reasoning_content += delta["reasoning_content"]
if finish_reason == "stop":
chunk_usage = chunk.get("usage", None)
if chunk_usage:
usage = chunk_usage
break
# 部分平台在文本输出结束前不会返回token用量此时需要再获取一次chunk
flag_delta_content_finished = True
except Exception as e:
logger.exception(f"模型 {self.model_name} 解析流式输出错误: {str(e)}")
except Exception as e:
if isinstance(e, GeneratorExit):
log_content = f"模型 {self.model_name} 流式输出被中断,正在清理资源..."
else:
log_content = f"模型 {self.model_name} 处理流式输出时发生错误: {str(e)}"
logger.warning(log_content)
# 确保资源被正确清理
try:
await response.release()
except Exception as cleanup_error:
logger.error(f"清理资源时发生错误: {cleanup_error}")
# 返回已经累积的内容
content = accumulated_content
if not content:
content = accumulated_content
think_match = re.search(r"<think>(.*?)</think>", content, re.DOTALL)
if think_match:
reasoning_content = think_match.group(1).strip()
content = re.sub(r"<think>.*?</think>", "", content, flags=re.DOTALL).strip()
result = {
"choices": [
{
"message": {
"content": content,
"reasoning_content": reasoning_content,
# 流式输出可能没有工具调用此处不需要添加tool_calls字段
}
}
],
"usage": usage,
}
return result
async def _handle_error_response(
self, response: ClientResponse, retry_count: int, policy: Dict[str, Any]
) -> Union[Dict[str, any]]:
if response.status in policy["retry_codes"]:
wait_time = policy["base_wait"] * (2**retry_count)
logger.warning(f"模型 {self.model_name} 错误码: {response.status}, 等待 {wait_time}秒后重试")
if response.status == 413:
logger.warning("请求体过大,尝试压缩...")
raise PayLoadTooLargeError("请求体过大")
elif response.status in [500, 503]:
logger.error(
f"模型 {self.model_name} 错误码: {response.status} - {error_code_mapping.get(response.status)}"
)
raise RuntimeError("服务器负载过高模型恢复失败QAQ")
else:
logger.warning(f"模型 {self.model_name} 请求限制(429),等待{wait_time}秒后重试...")
raise RuntimeError("请求限制(429)")
elif response.status in policy["abort_codes"]:
if response.status != 403:
raise RequestAbortException("请求出现错误,中断处理", response)
else:
raise PermissionDeniedException("模型禁止访问")
async def _handle_exception(
self, exception, retry_count: int, request_content: Dict[str, Any]
) -> Union[Tuple[Dict[str, Any], int], Tuple[None, int]]:
policy = request_content["policy"]
payload = request_content["payload"]
wait_time = policy["base_wait"] * (2**retry_count)
if retry_count < policy["max_retries"] - 1:
keep_request = True
if isinstance(exception, RequestAbortException):
response = exception.response
logger.error(
f"模型 {self.model_name} 错误码: {response.status} - {error_code_mapping.get(response.status)}"
)
# 尝试获取并记录服务器返回的详细错误信息
try:
error_json = await response.json()
if error_json and isinstance(error_json, list) and len(error_json) > 0:
# 处理多个错误的情况
for error_item in error_json:
if "error" in error_item and isinstance(error_item["error"], dict):
error_obj: dict = error_item["error"]
error_code = error_obj.get("code")
error_message = error_obj.get("message")
error_status = error_obj.get("status")
logger.error(
f"服务器错误详情: 代码={error_code}, 状态={error_status}, 消息={error_message}"
)
elif isinstance(error_json, dict) and "error" in error_json:
# 处理单个错误对象的情况
error_obj = error_json.get("error", {})
error_code = error_obj.get("code")
error_message = error_obj.get("message")
error_status = error_obj.get("status")
logger.error(f"服务器错误详情: 代码={error_code}, 状态={error_status}, 消息={error_message}")
else:
# 记录原始错误响应内容
logger.error(f"服务器错误响应: {error_json}")
except Exception as e:
logger.warning(f"无法解析服务器错误响应: {str(e)}")
raise RuntimeError(f"请求被拒绝: {error_code_mapping.get(response.status)}")
elif isinstance(exception, PermissionDeniedException):
# 只针对硅基流动的V3和R1进行降级处理
if self.model_name.startswith("Pro/deepseek-ai") and self.base_url == "https://api.siliconflow.cn/v1/":
old_model_name = self.model_name
self.model_name = self.model_name[4:] # 移除"Pro/"前缀
logger.warning(f"检测到403错误模型从 {old_model_name} 降级为 {self.model_name}")
# 对全局配置进行更新
if global_config.llm_normal.get("name") == old_model_name:
global_config.llm_normal["name"] = self.model_name
logger.warning(f"将全局配置中的 llm_normal 模型临时降级至{self.model_name}")
if global_config.llm_reasoning.get("name") == old_model_name:
global_config.llm_reasoning["name"] = self.model_name
logger.warning(f"将全局配置中的 llm_reasoning 模型临时降级至{self.model_name}")
if payload and "model" in payload:
payload["model"] = self.model_name
await asyncio.sleep(wait_time)
return payload, -1
raise RuntimeError(f"请求被拒绝: {error_code_mapping.get(403)}")
elif isinstance(exception, PayLoadTooLargeError):
if keep_request:
image_base64 = request_content["image_base64"]
compressed_image_base64 = compress_base64_image_by_scale(image_base64)
new_payload = await self._build_payload(
request_content["prompt"], compressed_image_base64, request_content["image_format"]
)
return new_payload, 0
else:
return None, 0
elif isinstance(exception, aiohttp.ClientError) or isinstance(exception, asyncio.TimeoutError):
if keep_request:
logger.error(f"模型 {self.model_name} 网络错误,等待{wait_time}秒后重试... 错误: {str(exception)}")
await asyncio.sleep(wait_time)
return None, 0
else:
logger.critical(f"模型 {self.model_name} 网络错误达到最大重试次数: {str(exception)}")
raise RuntimeError(f"网络请求失败: {str(exception)}")
elif isinstance(exception, aiohttp.ClientResponseError):
# 处理aiohttp抛出的除了policy中的status的响应错误
if keep_request:
logger.error(
f"模型 {self.model_name} HTTP响应错误等待{wait_time}秒后重试... 状态码: {exception.status}, 错误: {exception.message}"
)
try:
error_text = await exception.response.text()
error_json = json.loads(error_text)
if isinstance(error_json, list) and len(error_json) > 0:
# 处理多个错误的情况
for error_item in error_json:
if "error" in error_item and isinstance(error_item["error"], dict):
error_obj = error_item["error"]
logger.error(
f"模型 {self.model_name} 服务器错误详情: 代码={error_obj.get('code')}, "
f"状态={error_obj.get('status')}, "
f"消息={error_obj.get('message')}"
)
elif isinstance(error_json, dict) and "error" in error_json:
error_obj = error_json.get("error", {})
logger.error(
f"模型 {self.model_name} 服务器错误详情: 代码={error_obj.get('code')}, "
f"状态={error_obj.get('status')}, "
f"消息={error_obj.get('message')}"
)
else:
logger.error(f"模型 {self.model_name} 服务器错误响应: {error_json}")
except (json.JSONDecodeError, TypeError) as json_err:
logger.warning(
f"模型 {self.model_name} 响应不是有效的JSON: {str(json_err)}, 原始内容: {error_text[:200]}"
)
except Exception as parse_err:
logger.warning(f"模型 {self.model_name} 无法解析响应错误内容: {str(parse_err)}")
await asyncio.sleep(wait_time)
return None, 0
else:
logger.critical(
f"模型 {self.model_name} HTTP响应错误达到最大重试次数: 状态码: {exception.status}, 错误: {exception.message}"
)
# 安全地检查和记录请求详情
handled_payload = await self._safely_record(request_content, payload)
logger.critical(f"请求头: {await self._build_headers(no_key=True)} 请求体: {handled_payload}")
raise RuntimeError(
f"模型 {self.model_name} API请求失败: 状态码 {exception.status}, {exception.message}"
)
else:
if keep_request:
logger.error(f"模型 {self.model_name} 请求失败,等待{wait_time}秒后重试... 错误: {str(exception)}")
await asyncio.sleep(wait_time)
return None, 0
else:
logger.critical(f"模型 {self.model_name} 请求失败: {str(exception)}")
# 安全地检查和记录请求详情
handled_payload = await self._safely_record(request_content, payload)
logger.critical(f"请求头: {await self._build_headers(no_key=True)} 请求体: {handled_payload}")
raise RuntimeError(f"模型 {self.model_name} API请求失败: {str(exception)}")
async def _safely_record(self, request_content: Dict[str, Any], payload: Dict[str, Any]):
image_base64: str = request_content.get("image_base64")
image_format: str = request_content.get("image_format")
if (
image_base64
and payload
and isinstance(payload, dict)
and "messages" in payload
and len(payload["messages"]) > 0
):
if isinstance(payload["messages"][0], dict) and "content" in payload["messages"][0]:
content = payload["messages"][0]["content"]
if isinstance(content, list) and len(content) > 1 and "image_url" in content[1]:
payload["messages"][0]["content"][1]["image_url"]["url"] = (
f"data:image/{image_format.lower() if image_format else 'jpeg'};base64,"
f"{image_base64[:10]}...{image_base64[-10:]}"
)
# if isinstance(content, str) and len(content) > 100:
# payload["messages"][0]["content"] = content[:100]
return payload
async def _transform_parameters(self, params: dict) -> dict: async def _transform_parameters(self, params: dict) -> dict:
""" """
@ -532,30 +986,27 @@ class LLMRequest:
# 复制一份参数,避免直接修改 self.params # 复制一份参数,避免直接修改 self.params
params_copy = await self._transform_parameters(self.params) params_copy = await self._transform_parameters(self.params)
if image_base64: if image_base64:
payload = { messages = [
"model": self.model_name, {
"messages": [ "role": "user",
{ "content": [
"role": "user", {"type": "text", "text": prompt},
"content": [ {
{"type": "text", "text": prompt}, "type": "image_url",
{ "image_url": {"url": f"data:image/{image_format.lower()};base64,{image_base64}"},
"type": "image_url", },
"image_url": {"url": f"data:image/{image_format.lower()};base64,{image_base64}"}, ],
}, }
], ]
}
],
"max_tokens": global_config.max_response_length,
**params_copy,
}
else: else:
payload = { messages = [{"role": "user", "content": prompt}]
"model": self.model_name, payload = {
"messages": [{"role": "user", "content": prompt}], "model": self.model_name,
"max_tokens": global_config.max_response_length, "messages": messages,
**params_copy, **params_copy,
} }
if "max_tokens" not in payload and "max_completion_tokens" not in payload:
payload["max_tokens"] = global_config.max_response_length
# 如果 payload 中依然存在 max_tokens 且需要转换,在这里进行再次检查 # 如果 payload 中依然存在 max_tokens 且需要转换,在这里进行再次检查
if self.model_name.lower() in self.MODELS_NEEDING_TRANSFORMATION and "max_tokens" in payload: if self.model_name.lower() in self.MODELS_NEEDING_TRANSFORMATION and "max_tokens" in payload:
payload["max_completion_tokens"] = payload.pop("max_tokens") payload["max_completion_tokens"] = payload.pop("max_tokens")
@ -648,11 +1099,10 @@ class LLMRequest:
async def generate_response_async(self, prompt: str, **kwargs) -> Union[str, Tuple]: async def generate_response_async(self, prompt: str, **kwargs) -> Union[str, Tuple]:
"""异步方式根据输入的提示生成模型的响应""" """异步方式根据输入的提示生成模型的响应"""
# 构建请求体 # 构建请求体不硬编码max_tokens
data = { data = {
"model": self.model_name, "model": self.model_name,
"messages": [{"role": "user", "content": prompt}], "messages": [{"role": "user", "content": prompt}],
"max_tokens": global_config.max_response_length,
**self.params, **self.params,
**kwargs, **kwargs,
} }

View File

@ -169,7 +169,7 @@ class PersonInfoManager:
"""给某个用户取名""" """给某个用户取名"""
if not person_id: if not person_id:
logger.debug("取名失败person_id不能为空") logger.debug("取名失败person_id不能为空")
return return None
old_name = await self.get_value(person_id, "person_name") old_name = await self.get_value(person_id, "person_name")
old_reason = await self.get_value(person_id, "name_reason") old_reason = await self.get_value(person_id, "name_reason")
@ -198,9 +198,9 @@ class PersonInfoManager:
"nickname": "昵称", "nickname": "昵称",
"reason": "理由" "reason": "理由"
}""" }"""
logger.debug(f"取名提示词:{qv_name_prompt}") # logger.debug(f"取名提示词:{qv_name_prompt}")
response = await self.qv_name_llm.generate_response(qv_name_prompt) response = await self.qv_name_llm.generate_response(qv_name_prompt)
logger.debug(f"取名回复:{response}") logger.debug(f"取名提示词:{qv_name_prompt}\n取名回复:{response}")
result = self._extract_json_from_text(response[0]) result = self._extract_json_from_text(response[0])
if not result["nickname"]: if not result["nickname"]:
@ -217,7 +217,7 @@ class PersonInfoManager:
await self.update_one_field(person_id, "name_reason", result["reason"]) await self.update_one_field(person_id, "name_reason", result["reason"])
self.person_name_list[person_id] = result["nickname"] self.person_name_list[person_id] = result["nickname"]
logger.debug(f"用户 {person_id} 的名称已更新为 {result['nickname']},原因:{result['reason']}") # logger.debug(f"用户 {person_id} 的名称已更新为 {result['nickname']},原因:{result['reason']}")
return result return result
else: else:
existing_names += f"{result['nickname']}" existing_names += f"{result['nickname']}"

View File

@ -89,8 +89,8 @@ class RelationshipManager:
person_id = person_info_manager.get_person_id(platform, user_id) person_id = person_info_manager.get_person_id(platform, user_id)
is_qved = await person_info_manager.has_one_field(person_id, "person_name") is_qved = await person_info_manager.has_one_field(person_id, "person_name")
old_name = await person_info_manager.get_value(person_id, "person_name") old_name = await person_info_manager.get_value(person_id, "person_name")
print(f"old_name: {old_name}") # print(f"old_name: {old_name}")
print(f"is_qved: {is_qved}") # print(f"is_qved: {is_qved}")
if is_qved and old_name is not None: if is_qved and old_name is not None:
return True return True
else: else:

View File

@ -134,3 +134,4 @@ def main():
heartbeat_thread.start() heartbeat_thread.start()
return heartbeat_thread # 返回线程对象,便于外部控制 return heartbeat_thread # 返回线程对象,便于外部控制
return None

View File

@ -1,8 +1,7 @@
[inner] [inner]
version = "1.3.1" version = "1.4.0"
#----以下是给开发人员阅读的,如果你只是部署了麦麦,不需要阅读----
#以下是给开发人员阅读的,一般用户不需要阅读
#如果你想要修改配置文件请在修改后将version的值进行变更 #如果你想要修改配置文件请在修改后将version的值进行变更
#如果新增项目请在BotConfig类下新增相应的变量 #如果新增项目请在BotConfig类下新增相应的变量
#1.如果你修改的是[]层级项目,例如你新增了 [memory],那么请在config.py的 load_config函数中的include_configs字典中新增"内容":{ #1.如果你修改的是[]层级项目,例如你新增了 [memory],那么请在config.py的 load_config函数中的include_configs字典中新增"内容":{
@ -19,11 +18,12 @@ version = "1.3.1"
# 次版本号:当你做了向下兼容的功能性新增, # 次版本号:当你做了向下兼容的功能性新增,
# 修订号:当你做了向下兼容的问题修正。 # 修订号:当你做了向下兼容的问题修正。
# 先行版本号及版本编译信息可以加到“主版本号.次版本号.修订号”的后面,作为延伸。 # 先行版本号及版本编译信息可以加到“主版本号.次版本号.修订号”的后面,作为延伸。
#----以上是给开发人员阅读的,如果你只是部署了麦麦,不需要阅读----
[bot] [bot]
qq = 114514 qq = 1145141919810
nickname = "麦麦" nickname = "麦麦"
alias_names = ["麦叠", "牢麦"] alias_names = ["麦叠", "牢麦"] #该选项还在调试中,暂时未生效
[groups] [groups]
talk_allowed = [ talk_allowed = [
@ -41,23 +41,24 @@ personality_sides = [
"用一句话或几句话描述人格的一些细节", "用一句话或几句话描述人格的一些细节",
"用一句话或几句话描述人格的一些细节", "用一句话或几句话描述人格的一些细节",
"用一句话或几句话描述人格的一些细节", "用一句话或几句话描述人格的一些细节",
]# 条数任意 ]# 条数任意不能为0, 该选项还在调试中,可能未完全生效
[identity] #アイデンティティがない 生まれないらららら [identity] #アイデンティティがない 生まれないらららら
# 兴趣爱好 未完善,有些条目未使用 # 兴趣爱好 未完善,有些条目未使用
identity_detail = [ identity_detail = [
"身份特点", "身份特点",
"身份特点", "身份特点",
]# 条数任意 ]# 条数任意不能为0, 该选项还在调试中,可能未完全生效
#外貌特征 #外貌特征
height = 170 # 身高 单位厘米 height = 170 # 身高 单位厘米 该选项还在调试中,暂时未生效
weight = 50 # 体重 单位千克 weight = 50 # 体重 单位千克 该选项还在调试中,暂时未生效
age = 20 # 年龄 单位岁 age = 20 # 年龄 单位岁 该选项还在调试中,暂时未生效
gender = "男" # 性别 gender = "男" # 性别 该选项还在调试中,暂时未生效
appearance = "用几句话描述外貌特征" # 外貌特征 appearance = "用几句话描述外貌特征" # 外貌特征 该选项还在调试中,暂时未生效
[schedule] [schedule]
enable_schedule_gen = true # 是否启用日程表(尚未完成) enable_schedule_gen = true # 是否启用日程表
enable_schedule_interaction = true # 日程表是否影响回复模式
prompt_schedule_gen = "用几句话描述描述性格特点或行动规律,这个特征会用来生成日程表" prompt_schedule_gen = "用几句话描述描述性格特点或行动规律,这个特征会用来生成日程表"
schedule_doing_update_interval = 900 # 日程表更新间隔 单位秒 schedule_doing_update_interval = 900 # 日程表更新间隔 单位秒
schedule_temperature = 0.1 # 日程表温度建议0.1-0.5 schedule_temperature = 0.1 # 日程表温度建议0.1-0.5
@ -67,19 +68,25 @@ time_zone = "Asia/Shanghai" # 给你的机器人设置时区,可以解决运
nonebot-qq="http://127.0.0.1:18002/api/message" nonebot-qq="http://127.0.0.1:18002/api/message"
[response] #群聊的回复策略 [response] #群聊的回复策略
#reasoning推理模式麦麦会根据上下文进行推理并给出回复 enable_heart_flowC = true
#heart_flow结合了PFC模式和心流模式麦麦会进行主动的观察和回复并给出回复 # 该功能还在完善中
response_mode = "heart_flow" # 回复策略可选值heart_flow心流reasoning推理) # 是否启用heart_flowC(心流聊天,HFC)模式
# 启用后麦麦会自主选择进入heart_flowC模式(持续一段时间进行主动的观察和回复并给出回复比较消耗token
#推理回复参数 #一般回复参数
model_r1_probability = 0.7 # 麦麦回答时选择主要回复模型1 模型的概率 model_reasoning_probability = 0.7 # 麦麦回答时选择推理模型 模型的概率
model_v3_probability = 0.3 # 麦麦回答时选择次要回复模型2 模型的概率 model_normal_probability = 0.3 # 麦麦回答时选择一般模型 模型的概率
[heartflow] #启用启用heart_flowC(心流聊天)模式时生效,需要填写以下参数
reply_trigger_threshold = 3.0 # 心流聊天触发阈值,越低越容易进入心流聊天
probability_decay_factor_per_second = 0.2 # 概率衰减因子,越大衰减越快,越高越容易退出心流聊天
default_decay_rate_per_second = 0.98 # 默认衰减率,越大衰减越快,越高越难进入心流聊天
initial_duration = 60 # 初始持续时间,越大心流聊天持续的时间越长
[heartflow] # 注意可能会消耗大量token请谨慎开启仅会使用v3模型
sub_heart_flow_update_interval = 60 # 子心流更新频率,间隔 单位秒
sub_heart_flow_freeze_time = 100 # 子心流冻结时间,超过这个时间没有回复,子心流会冻结,间隔 单位秒
sub_heart_flow_stop_time = 500 # 子心流停止时间,超过这个时间没有回复,子心流会停止,间隔 单位秒 sub_heart_flow_stop_time = 500 # 子心流停止时间,超过这个时间没有回复,子心流会停止,间隔 单位秒
heart_flow_update_interval = 600 # 心流更新频率,间隔 单位秒 # sub_heart_flow_update_interval = 60
# sub_heart_flow_freeze_time = 100
# heart_flow_update_interval = 600
observation_context_size = 20 # 心流观察到的最长上下文大小,超过这个值的上下文会被压缩 observation_context_size = 20 # 心流观察到的最长上下文大小,超过这个值的上下文会被压缩
compressed_length = 5 # 不能大于observation_context_size,心流上下文压缩的最短压缩长度超过心流观察到的上下文长度会压缩最短压缩长度为5 compressed_length = 5 # 不能大于observation_context_size,心流上下文压缩的最短压缩长度超过心流观察到的上下文长度会压缩最短压缩长度为5
@ -87,11 +94,13 @@ compress_length_limit = 5 #最多压缩份数,超过该数值的压缩上下
[message] [message]
max_context_size = 12 # 麦麦获得的上文数量建议12太短太长都会导致脑袋尖尖 max_context_size = 12 # 麦麦回复时获得的上文数量建议12太短太长都会导致脑袋尖尖
emoji_chance = 0.2 # 麦麦使用表情包的概率设置为1让麦麦自己决定发不发 emoji_chance = 0.2 # 麦麦一般回复时使用表情包的概率设置为1让麦麦自己决定发不发
thinking_timeout = 60 # 麦麦最长思考时间,超过这个时间的思考会放弃 thinking_timeout = 100 # 麦麦最长思考时间,超过这个时间的思考会放弃往往是api反应太慢
max_response_length = 256 # 麦麦回答的最大token数 max_response_length = 256 # 麦麦单次回答的最大token数
message_buffer = true # 启用消息缓冲器?启用此项以解决消息的拆分问题,但会使麦麦的回复延迟 message_buffer = true # 启用消息缓冲器?启用此项以解决消息的拆分问题,但会使麦麦的回复延迟
# 以下是消息过滤,可以根据规则过滤特定消息,将不会读取这些消息
ban_words = [ ban_words = [
# "403","张三" # "403","张三"
] ]
@ -103,22 +112,23 @@ ban_msgs_regex = [
# "\\[CQ:at,qq=\\d+\\]" # 匹配@ # "\\[CQ:at,qq=\\d+\\]" # 匹配@
] ]
[willing] [willing] # 一般回复模式的回复意愿设置
willing_mode = "classical" # 回复意愿模式 —— 经典模式classical动态模式dynamicmxp模式mxp自定义模式custom需要你自己实现 willing_mode = "classical" # 回复意愿模式 —— 经典模式classical动态模式dynamicmxp模式mxp自定义模式custom需要你自己实现
response_willing_amplifier = 1 # 麦麦回复意愿放大系数一般为1 response_willing_amplifier = 1 # 麦麦回复意愿放大系数一般为1
response_interested_rate_amplifier = 1 # 麦麦回复兴趣度放大系数,听到记忆里的内容时放大系数 response_interested_rate_amplifier = 1 # 麦麦回复兴趣度放大系数,听到记忆里的内容时放大系数
down_frequency_rate = 3 # 降低回复频率的群组回复意愿降低系数 除法 down_frequency_rate = 3 # 降低回复频率的群组回复意愿降低系数 除法
emoji_response_penalty = 0.1 # 表情包回复惩罚系数设为0为不回复单个表情包减少单独回复表情包的概率 emoji_response_penalty = 0 # 表情包回复惩罚系数设为0为不回复单个表情包减少单独回复表情包的概率
mentioned_bot_inevitable_reply = false # 提及 bot 必然回复 mentioned_bot_inevitable_reply = false # 提及 bot 必然回复
at_bot_inevitable_reply = false # @bot 必然回复 at_bot_inevitable_reply = false # @bot 必然回复
[emoji] [emoji]
max_emoji_num = 120 # 表情包最大数量 max_emoji_num = 90 # 表情包最大数量
max_reach_deletion = true # 开启则在达到最大数量时删除表情包,关闭则达到最大数量时不删除,只是不会继续收集表情包 max_reach_deletion = true # 开启则在达到最大数量时删除表情包,关闭则达到最大数量时不删除,只是不会继续收集表情包
check_interval = 30 # 检查表情包(注册,破损,删除)的时间间隔(分钟) check_interval = 30 # 检查表情包(注册,破损,删除)的时间间隔(分钟)
auto_save = true # 是否保存表情包和图片 auto_save = true # 是否保存表情包和图片
enable_check = false # 是否启用表情包过滤
check_prompt = "符合公序良俗" # 表情包过滤要求 enable_check = false # 是否启用表情包过滤,只有符合该要求的表情包才会被保存
check_prompt = "符合公序良俗" # 表情包过滤要求,只有符合该要求的表情包才会被保存
[memory] [memory]
build_memory_interval = 2000 # 记忆构建间隔 单位秒 间隔越低,麦麦学习越多,但是冗余信息也会增多 build_memory_interval = 2000 # 记忆构建间隔 单位秒 间隔越低,麦麦学习越多,但是冗余信息也会增多
@ -131,7 +141,8 @@ forget_memory_interval = 1000 # 记忆遗忘间隔 单位秒 间隔越低,
memory_forget_time = 24 #多长时间后的记忆会被遗忘 单位小时 memory_forget_time = 24 #多长时间后的记忆会被遗忘 单位小时
memory_forget_percentage = 0.01 # 记忆遗忘比例 控制记忆遗忘程度 越大遗忘越多 建议保持默认 memory_forget_percentage = 0.01 # 记忆遗忘比例 控制记忆遗忘程度 越大遗忘越多 建议保持默认
memory_ban_words = [ #不希望记忆的词 #不希望记忆的词,已经记忆的不会受到影响
memory_ban_words = [
# "403","张三" # "403","张三"
] ]
@ -167,7 +178,7 @@ word_replace_rate=0.006 # 整词替换概率
[response_splitter] [response_splitter]
enable_response_splitter = true # 是否启用回复分割器 enable_response_splitter = true # 是否启用回复分割器
response_max_length = 100 # 回复允许的最大长度 response_max_length = 256 # 回复允许的最大长度
response_max_sentence_num = 4 # 回复允许的最大句子数 response_max_sentence_num = 4 # 回复允许的最大句子数
[remote] #发送统计信息,主要是看全球有多少只麦麦 [remote] #发送统计信息,主要是看全球有多少只麦麦

View File

@ -29,8 +29,18 @@ CHAT_ANY_WHERE_KEY=
SILICONFLOW_KEY= SILICONFLOW_KEY=
# 定义日志相关配置 # 定义日志相关配置
SIMPLE_OUTPUT=true # 精简控制台输出格式
CONSOLE_LOG_LEVEL=INFO # 自定义日志的默认控制台输出日志级别 # 精简控制台输出格式
FILE_LOG_LEVEL=DEBUG # 自定义日志的默认文件输出日志级别 SIMPLE_OUTPUT=true
DEFAULT_CONSOLE_LOG_LEVEL=SUCCESS # 原生日志的控制台输出日志级别nonebot就是这一类
DEFAULT_FILE_LOG_LEVEL=DEBUG # 原生日志的默认文件输出日志级别nonebot就是这一类 # 自定义日志的默认控制台输出日志级别
CONSOLE_LOG_LEVEL=INFO
# 自定义日志的默认文件输出日志级别
FILE_LOG_LEVEL=DEBUG
# 原生日志的控制台输出日志级别nonebot就是这一类
DEFAULT_CONSOLE_LOG_LEVEL=SUCCESS
# 原生日志的默认文件输出日志级别nonebot就是这一类
DEFAULT_FILE_LOG_LEVEL=DEBUG