Merge branch 'dev' into dev

pull/992/head
2829798842 2025-05-28 08:56:59 +08:00 committed by GitHub
commit 1ac0b2d7ea
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
57 changed files with 910 additions and 1410 deletions

View File

@ -2,7 +2,8 @@
## [0.7.0] -2025-6-1 ## [0.7.0] -2025-6-1
- 重构数据库弃用MongoDB采用轻量sqlite,无需额外安装 - 重构数据库弃用MongoDB采用轻量sqlite,无需额外安装
- 重构HFC可扩展的聊天模式 - 重构HFC可扩展的聊天模式支持独立的表达模式
- HFC丰富HFC的决策信息更好的把握聊天内容
- HFC初步支持插件v0.1(测试版) - HFC初步支持插件v0.1(测试版)
- 重构表情包模块 - 重构表情包模块
- 移除日程系统 - 移除日程系统
@ -26,30 +27,39 @@
- 插件:禁言动作 - 插件:禁言动作
- 表达器:装饰语言风格 - 表达器:装饰语言风格
- 可通过插件添加和自定义HFC部件目前只支持action定义 - 可通过插件添加和自定义HFC部件目前只支持action定义
- 为专注模式添加关系线索
- 在专注模式下麦麦可以决定自行发送语音消息需要搭配tts适配器
- 优化reply减少复读
**新增表达方式学习**
- 在专注模式下,麦麦可以有独特的表达方式
- 自主学习群聊中的表达方式,更贴近群友
- 可自定义的学习频率和开关
- 根据人设生成额外的表达方式
**聊天管理**
- 移除不在线状态
- 优化自动模式下normal与focus聊天的切换机制
- 大幅精简聊天状态切换规则,减少复杂度
- 移除聊天限额数量
**插件系统** **插件系统**
- 添加示例插件 - 添加示例插件
- 示例插件:禁言插件 - 示例插件:禁言插件
- 示例插件:豆包绘图插件 - 示例插件:豆包绘图插件
**新增表达方式学习** **人格**
- 自主学习群聊中的表达方式,更贴近群友 - 简化了人格身份的配置
- 可自定义的学习频率和开关
- 根据人设生成额外的表达方式
**聊天管理**
- 移除不在线状态
- 大幅精简聊天状态切换规则,减少复杂度
- 移除聊天限额数量
**数据库重构** **数据库重构**
- 移除了默认使用MongoDB采用轻量sqlite - 移除了默认使用MongoDB采用轻量sqlite
- 无需额外安装数据库 - 无需额外安装数据库
- 提供迁移脚本 - 提供迁移脚本
**优化** **优化**
- 移除日程系统,减少幻觉(将会在未来版本回归) - 移除日程系统,减少幻觉(将会在未来版本回归)
- 移除主心流思考和LLM进入聊天判定 - 移除主心流思考和LLM进入聊天判定
- 支持qwen3模型支持自定义是否思考和思考长度
## [0.6.3-fix-4] - 2025-5-18 ## [0.6.3-fix-4] - 2025-5-18

View File

@ -1,6 +1,7 @@
from src.chat.heart_flow.heartflow import heartflow from src.chat.heart_flow.heartflow import heartflow
from src.chat.heart_flow.sub_heartflow import ChatState from src.chat.heart_flow.sub_heartflow import ChatState
from src.common.logger_manager import get_logger from src.common.logger_manager import get_logger
import time
logger = get_logger("api") logger = get_logger("api")
@ -30,6 +31,29 @@ async def get_subheartflow_cycle_info(subheartflow_id: str, history_len: int) ->
return None return None
async def get_normal_chat_replies(subheartflow_id: str, limit: int = 10) -> list:
"""获取子心流的NormalChat回复记录
Args:
subheartflow_id: 子心流ID
limit: 最大返回数量默认10条
Returns:
list: 回复记录列表如果未找到则返回空列表
"""
replies = await heartflow.api_get_normal_chat_replies(subheartflow_id, limit)
logger.debug(f"子心流 {subheartflow_id} NormalChat回复记录: 获取到 {len(replies) if replies else 0}")
if replies:
# 格式化时间戳为可读时间
for reply in replies:
if "time" in reply:
reply["formatted_time"] = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(reply["time"]))
return replies
else:
logger.warning(f"子心流 {subheartflow_id} NormalChat回复记录未找到")
return []
async def get_all_states(): async def get_all_states():
"""获取所有状态""" """获取所有状态"""
all_states = await heartflow.api_get_all_states() all_states = await heartflow.api_get_all_states()

View File

@ -62,7 +62,6 @@ class APIBotConfig:
# focus_chat # focus_chat
reply_trigger_threshold: float # 回复触发阈值 reply_trigger_threshold: float # 回复触发阈值
default_decay_rate_per_second: float # 默认每秒衰减率 default_decay_rate_per_second: float # 默认每秒衰减率
consecutive_no_reply_threshold: int # 连续不回复阈值
# compressed # compressed
compressed_length: int # 压缩长度 compressed_length: int # 压缩长度

View File

@ -149,7 +149,7 @@ class MaiEmoji:
emotion_str = ",".join(self.emotion) if self.emotion else "" emotion_str = ",".join(self.emotion) if self.emotion else ""
Emoji.create( Emoji.create(
hash=self.hash, emoji_hash=self.hash,
full_path=self.full_path, full_path=self.full_path,
format=self.format, format=self.format,
description=self.description, description=self.description,
@ -367,12 +367,14 @@ class EmojiManager:
return cls._instance return cls._instance
def __init__(self) -> None: def __init__(self) -> None:
self._initialized = None if self._initialized:
return # 如果已经初始化过,直接返回
self._scan_task = None self._scan_task = None
self.vlm = LLMRequest(model=global_config.model.vlm, temperature=0.3, max_tokens=1000, request_type="emoji") self.vlm = LLMRequest(model=global_config.model.vlm, temperature=0.3, max_tokens=1000, request_type="emoji")
self.llm_emotion_judge = LLMRequest( self.llm_emotion_judge = LLMRequest(
model=global_config.model.normal, max_tokens=600, request_type="emoji" model=global_config.model.utils, max_tokens=600, request_type="emoji"
) # 更高的温度更少的token后续可以根据情绪来调整温度 ) # 更高的温度更少的token后续可以根据情绪来调整温度
self.emoji_num = 0 self.emoji_num = 0
@ -389,6 +391,7 @@ class EmojiManager:
raise RuntimeError("数据库连接失败") raise RuntimeError("数据库连接失败")
_ensure_emoji_dir() _ensure_emoji_dir()
Emoji.create_table(safe=True) # Ensures table exists Emoji.create_table(safe=True) # Ensures table exists
self._initialized = True
def _ensure_db(self) -> None: def _ensure_db(self) -> None:
"""确保数据库已初始化""" """确保数据库已初始化"""
@ -467,7 +470,7 @@ class EmojiManager:
selected_emoji, similarity, matched_emotion = random.choice(top_emojis) selected_emoji, similarity, matched_emotion = random.choice(top_emojis)
# 更新使用次数 # 更新使用次数
self.record_usage(selected_emoji.emoji_hash) self.record_usage(selected_emoji.hash)
_time_end = time.time() _time_end = time.time()
@ -796,7 +799,7 @@ class EmojiManager:
# 删除选定的表情包 # 删除选定的表情包
logger.info(f"[决策] 删除表情包: {emoji_to_delete.description}") logger.info(f"[决策] 删除表情包: {emoji_to_delete.description}")
delete_success = await self.delete_emoji(emoji_to_delete.emoji_hash) delete_success = await self.delete_emoji(emoji_to_delete.hash)
if delete_success: if delete_success:
# 修复:等待异步注册完成 # 修复:等待异步注册完成

View File

@ -13,7 +13,6 @@ from src.chat.emoji_system.emoji_manager import emoji_manager
from src.chat.focus_chat.heartFC_sender import HeartFCSender from src.chat.focus_chat.heartFC_sender import HeartFCSender
from src.chat.utils.utils import process_llm_response from src.chat.utils.utils import process_llm_response
from src.chat.utils.info_catcher import info_catcher_manager from src.chat.utils.info_catcher import info_catcher_manager
from src.manager.mood_manager import mood_manager
from src.chat.heart_flow.utils_chat import get_chat_type_and_target_info from src.chat.heart_flow.utils_chat import get_chat_type_and_target_info
from src.chat.message_receive.chat_stream import ChatStream from src.chat.message_receive.chat_stream import ChatStream
from src.chat.focus_chat.hfc_utils import parse_thinking_id_to_timestamp from src.chat.focus_chat.hfc_utils import parse_thinking_id_to_timestamp
@ -78,10 +77,10 @@ class DefaultExpressor:
self.log_prefix = "expressor" self.log_prefix = "expressor"
# TODO: API-Adapter修改标记 # TODO: API-Adapter修改标记
self.express_model = LLMRequest( self.express_model = LLMRequest(
model=global_config.model.normal, model=global_config.model.focus_expressor,
temperature=global_config.model.normal["temp"], # temperature=global_config.model.focus_expressor["temp"],
max_tokens=256, max_tokens=256,
request_type="response_heartflow", request_type="focus_expressor",
) )
self.heart_fc_sender = HeartFCSender() self.heart_fc_sender = HeartFCSender()
@ -150,22 +149,22 @@ class DefaultExpressor:
action_data=action_data, action_data=action_data,
) )
with Timer("选择表情", cycle_timers): with Timer("选择表情", cycle_timers):
emoji_keyword = action_data.get("emojis", []) emoji_keyword = action_data.get("emojis", [])
emoji_base64 = await self._choose_emoji(emoji_keyword) emoji_base64 = await self._choose_emoji(emoji_keyword)
if emoji_base64: if emoji_base64:
reply.append(("emoji", emoji_base64)) reply.append(("emoji", emoji_base64))
if reply: if reply:
with Timer("发送消息", cycle_timers): with Timer("发送消息", cycle_timers):
sent_msg_list = await self.send_response_messages( sent_msg_list = await self.send_response_messages(
anchor_message=anchor_message, anchor_message=anchor_message,
thinking_id=thinking_id, thinking_id=thinking_id,
response_set=reply, response_set=reply,
) )
has_sent_something = True has_sent_something = True
else: else:
logger.warning(f"{self.log_prefix} 文本回复生成失败") logger.warning(f"{self.log_prefix} 文本回复生成失败")
if not has_sent_something: if not has_sent_something:
logger.warning(f"{self.log_prefix} 回复动作未包含任何有效内容") logger.warning(f"{self.log_prefix} 回复动作未包含任何有效内容")
@ -174,6 +173,7 @@ class DefaultExpressor:
except Exception as e: except Exception as e:
logger.error(f"回复失败: {e}") logger.error(f"回复失败: {e}")
traceback.print_exc()
return False, None return False, None
# --- 回复器 (Replier) 的定义 --- # # --- 回复器 (Replier) 的定义 --- #
@ -192,9 +192,9 @@ class DefaultExpressor:
""" """
try: try:
# 1. 获取情绪影响因子并调整模型温度 # 1. 获取情绪影响因子并调整模型温度
arousal_multiplier = mood_manager.get_arousal_multiplier() # arousal_multiplier = mood_manager.get_arousal_multiplier()
current_temp = float(global_config.model.normal["temp"]) * arousal_multiplier # current_temp = float(global_config.model.normal["temp"]) * arousal_multiplier
self.express_model.params["temperature"] = current_temp # 动态调整温度 # self.express_model.params["temperature"] = current_temp # 动态调整温度
# 2. 获取信息捕捉器 # 2. 获取信息捕捉器
info_catcher = info_catcher_manager.get_info_catcher(thinking_id) info_catcher = info_catcher_manager.get_info_catcher(thinking_id)
@ -439,7 +439,13 @@ class DefaultExpressor:
if type == "emoji": if type == "emoji":
typing = False typing = False
sent_msg = await self.heart_fc_sender.send_message(bot_message, has_thinking=True, typing=typing) if anchor_message.raw_message:
set_reply = True
else:
set_reply = False
sent_msg = await self.heart_fc_sender.send_message(
bot_message, has_thinking=True, typing=typing, set_reply=set_reply
)
reply_message_ids.append(part_message_id) # 记录我们生成的ID reply_message_ids.append(part_message_id) # 记录我们生成的ID

View File

@ -5,7 +5,7 @@ from src.common.logger_manager import get_logger
from src.llm_models.utils_model import LLMRequest from src.llm_models.utils_model import LLMRequest
from src.config.config import global_config from src.config.config import global_config
from src.chat.utils.chat_message_builder import get_raw_msg_by_timestamp_random, build_anonymous_messages from src.chat.utils.chat_message_builder import get_raw_msg_by_timestamp_random, build_anonymous_messages
from src.chat.focus_chat.heartflow_prompt_builder import Prompt, global_prompt_manager from src.chat.utils.prompt_builder import Prompt, global_prompt_manager
import os import os
import json import json
@ -61,10 +61,10 @@ class ExpressionLearner:
def __init__(self) -> None: def __init__(self) -> None:
# TODO: API-Adapter修改标记 # TODO: API-Adapter修改标记
self.express_learn_model: LLMRequest = LLMRequest( self.express_learn_model: LLMRequest = LLMRequest(
model=global_config.model.normal, model=global_config.model.focus_expressor,
temperature=0.1, temperature=0.1,
max_tokens=256, max_tokens=256,
request_type="response_heartflow", request_type="learn_expression",
) )
async def get_expression_by_chat_id(self, chat_id: str) -> Tuple[List[Dict[str, str]], List[Dict[str, str]]]: async def get_expression_by_chat_id(self, chat_id: str) -> Tuple[List[Dict[str, str]], List[Dict[str, str]]]:

View File

@ -3,7 +3,7 @@ import contextlib
import time import time
import traceback import traceback
from collections import deque from collections import deque
from typing import List, Optional, Dict, Any, Deque from typing import List, Optional, Dict, Any, Deque, Callable, Awaitable
from src.chat.message_receive.chat_stream import ChatStream from src.chat.message_receive.chat_stream import ChatStream
from src.chat.message_receive.chat_stream import chat_manager from src.chat.message_receive.chat_stream import chat_manager
from rich.traceback import install from rich.traceback import install
@ -19,6 +19,7 @@ from src.chat.focus_chat.info_processors.working_memory_processor import Working
from src.chat.focus_chat.info_processors.action_processor import ActionProcessor from src.chat.focus_chat.info_processors.action_processor import ActionProcessor
from src.chat.heart_flow.observation.hfcloop_observation import HFCloopObservation from src.chat.heart_flow.observation.hfcloop_observation import HFCloopObservation
from src.chat.heart_flow.observation.working_observation import WorkingMemoryObservation from src.chat.heart_flow.observation.working_observation import WorkingMemoryObservation
from src.chat.heart_flow.observation.structure_observation import StructureObservation
from src.chat.focus_chat.info_processors.tool_processor import ToolProcessor from src.chat.focus_chat.info_processors.tool_processor import ToolProcessor
from src.chat.focus_chat.expressors.default_expressor import DefaultExpressor from src.chat.focus_chat.expressors.default_expressor import DefaultExpressor
from src.chat.focus_chat.memory_activator import MemoryActivator from src.chat.focus_chat.memory_activator import MemoryActivator
@ -83,6 +84,7 @@ class HeartFChatting:
self, self,
chat_id: str, chat_id: str,
observations: list[Observation], observations: list[Observation],
on_stop_focus_chat: Optional[Callable[[], Awaitable[None]]] = None,
): ):
""" """
HeartFChatting 初始化函数 HeartFChatting 初始化函数
@ -90,6 +92,7 @@ class HeartFChatting:
参数: 参数:
chat_id: 聊天流唯一标识符(如stream_id) chat_id: 聊天流唯一标识符(如stream_id)
observations: 关联的观察列表 observations: 关联的观察列表
on_stop_focus_chat: 当收到stop_focus_chat命令时调用的回调函数
""" """
# 基础属性 # 基础属性
self.stream_id: str = chat_id # 聊天流ID self.stream_id: str = chat_id # 聊天流ID
@ -97,6 +100,7 @@ class HeartFChatting:
self.log_prefix: str = str(chat_id) # Initial default, will be updated self.log_prefix: str = str(chat_id) # Initial default, will be updated
self.hfcloop_observation = HFCloopObservation(observe_id=self.stream_id) self.hfcloop_observation = HFCloopObservation(observe_id=self.stream_id)
self.chatting_observation = observations[0] self.chatting_observation = observations[0]
self.structure_observation = StructureObservation(observe_id=self.stream_id)
self.memory_activator = MemoryActivator() self.memory_activator = MemoryActivator()
self.working_memory = WorkingMemory(chat_id=self.stream_id) self.working_memory = WorkingMemory(chat_id=self.stream_id)
@ -141,6 +145,9 @@ class HeartFChatting:
self._current_cycle: Optional[CycleDetail] = None self._current_cycle: Optional[CycleDetail] = None
self._shutting_down: bool = False # 关闭标志位 self._shutting_down: bool = False # 关闭标志位
# 存储回调函数
self.on_stop_focus_chat = on_stop_focus_chat
async def _initialize(self) -> bool: async def _initialize(self) -> bool:
""" """
执行懒初始化操作 执行懒初始化操作
@ -286,6 +293,19 @@ class HeartFChatting:
logger.debug(f"模板 {self.chat_stream.context.get_template_name()}") logger.debug(f"模板 {self.chat_stream.context.get_template_name()}")
loop_info = await self._observe_process_plan_action_loop(cycle_timers, thinking_id) loop_info = await self._observe_process_plan_action_loop(cycle_timers, thinking_id)
print(loop_info["loop_action_info"]["command"])
if loop_info["loop_action_info"]["command"] == "stop_focus_chat":
logger.info(f"{self.log_prefix} 麦麦决定停止专注聊天")
# 如果设置了回调函数,则调用它
if self.on_stop_focus_chat:
try:
await self.on_stop_focus_chat()
logger.info(f"{self.log_prefix} 成功调用回调函数处理停止专注聊天")
except Exception as e:
logger.error(f"{self.log_prefix} 调用停止专注聊天回调函数时出错: {e}")
logger.error(traceback.format_exc())
break
self._current_cycle.set_loop_info(loop_info) self._current_cycle.set_loop_info(loop_info)
self.hfcloop_observation.add_loop_info(self._current_cycle) self.hfcloop_observation.add_loop_info(self._current_cycle)
@ -408,17 +428,19 @@ class HeartFChatting:
return all_plan_info return all_plan_info
async def _observe_process_plan_action_loop(self, cycle_timers: dict, thinking_id: str) -> tuple[bool, str]: async def _observe_process_plan_action_loop(self, cycle_timers: dict, thinking_id: str) -> dict:
try: try:
with Timer("观察", cycle_timers): with Timer("观察", cycle_timers):
# await self.observations[0].observe() # await self.observations[0].observe()
await self.chatting_observation.observe() await self.chatting_observation.observe()
await self.working_observation.observe() await self.working_observation.observe()
await self.hfcloop_observation.observe() await self.hfcloop_observation.observe()
await self.structure_observation.observe()
observations: List[Observation] = [] observations: List[Observation] = []
observations.append(self.chatting_observation) observations.append(self.chatting_observation)
observations.append(self.working_observation) observations.append(self.working_observation)
observations.append(self.hfcloop_observation) observations.append(self.hfcloop_observation)
observations.append(self.structure_observation)
loop_observation_info = { loop_observation_info = {
"observations": observations, "observations": observations,
@ -462,13 +484,14 @@ class HeartFChatting:
logger.info(f"{self.log_prefix} 麦麦决定'{action_str}', 原因'{reasoning}'") logger.info(f"{self.log_prefix} 麦麦决定'{action_str}', 原因'{reasoning}'")
success, reply_text = await self._handle_action( success, reply_text, command = await self._handle_action(
action_type, reasoning, action_data, cycle_timers, thinking_id action_type, reasoning, action_data, cycle_timers, thinking_id
) )
loop_action_info = { loop_action_info = {
"action_taken": success, "action_taken": success,
"reply_text": reply_text, "reply_text": reply_text,
"command": command,
} }
loop_info = { loop_info = {
@ -483,7 +506,12 @@ class HeartFChatting:
except Exception as e: except Exception as e:
logger.error(f"{self.log_prefix} FOCUS聊天处理失败: {e}") logger.error(f"{self.log_prefix} FOCUS聊天处理失败: {e}")
logger.error(traceback.format_exc()) logger.error(traceback.format_exc())
return {} return {
"loop_observation_info": {},
"loop_processor_info": {},
"loop_plan_info": {},
"loop_action_info": {"action_taken": False, "reply_text": "", "command": ""},
}
async def _handle_action( async def _handle_action(
self, self,
@ -492,7 +520,7 @@ class HeartFChatting:
action_data: dict, action_data: dict,
cycle_timers: dict, cycle_timers: dict,
thinking_id: str, thinking_id: str,
) -> tuple[bool, str]: ) -> tuple[bool, str, str]:
""" """
处理规划动作使用动作工厂创建相应的动作处理器 处理规划动作使用动作工厂创建相应的动作处理器
@ -504,36 +532,48 @@ class HeartFChatting:
thinking_id: 思考ID thinking_id: 思考ID
返回: 返回:
tuple[bool, str]: (是否执行了动作, 思考消息ID) tuple[bool, str, str]: (是否执行了动作, 思考消息ID, 命令)
""" """
try: try:
# 使用工厂创建动作处理器实例 # 使用工厂创建动作处理器实例
action_handler = self.action_manager.create_action( try:
action_name=action, action_handler = self.action_manager.create_action(
action_data=action_data, action_name=action,
reasoning=reasoning, action_data=action_data,
cycle_timers=cycle_timers, reasoning=reasoning,
thinking_id=thinking_id, cycle_timers=cycle_timers,
observations=self.all_observations, thinking_id=thinking_id,
expressor=self.expressor, observations=self.all_observations,
chat_stream=self.chat_stream, expressor=self.expressor,
log_prefix=self.log_prefix, chat_stream=self.chat_stream,
shutting_down=self._shutting_down, log_prefix=self.log_prefix,
) shutting_down=self._shutting_down,
)
except Exception as e:
logger.error(f"{self.log_prefix} 创建动作处理器时出错: {e}")
traceback.print_exc()
return False, "", ""
if not action_handler: if not action_handler:
logger.warning(f"{self.log_prefix} 未能创建动作处理器: {action}, 原因: {reasoning}") logger.warning(f"{self.log_prefix} 未能创建动作处理器: {action}, 原因: {reasoning}")
return False, "" return False, "", ""
# 处理动作并获取结果 # 处理动作并获取结果
success, reply_text = await action_handler.handle_action() result = await action_handler.handle_action()
if len(result) == 3:
return success, reply_text success, reply_text, command = result
else:
success, reply_text = result
command = ""
logger.info(
f"{self.log_prefix} 麦麦决定'{action}', 原因'{reasoning}',返回结果'{success}', '{reply_text}', '{command}'"
)
return success, reply_text, command
except Exception as e: except Exception as e:
logger.error(f"{self.log_prefix} 处理{action}时出错: {e}") logger.error(f"{self.log_prefix} 处理{action}时出错: {e}")
traceback.print_exc() traceback.print_exc()
return False, "" return False, "", ""
async def shutdown(self): async def shutdown(self):
"""优雅关闭HeartFChatting实例取消活动循环任务""" """优雅关闭HeartFChatting实例取消活动循环任务"""

View File

@ -73,7 +73,7 @@ class HeartFCSender:
thinking_message = self.thinking_messages.get(chat_id, {}).get(message_id) thinking_message = self.thinking_messages.get(chat_id, {}).get(message_id)
return thinking_message.thinking_start_time if thinking_message else None return thinking_message.thinking_start_time if thinking_message else None
async def send_message(self, message: MessageSending, has_thinking=False, typing=False): async def send_message(self, message: MessageSending, has_thinking=False, typing=False, set_reply=False):
""" """
处理发送并存储一条消息 处理发送并存储一条消息
@ -97,7 +97,7 @@ class HeartFCSender:
message_id = message.message_info.message_id message_id = message.message_info.message_id
try: try:
if has_thinking: if set_reply:
_ = message.update_thinking_time() _ = message.update_thinking_time()
# --- 条件应用 set_reply 逻辑 --- # --- 条件应用 set_reply 逻辑 ---

View File

@ -205,8 +205,8 @@ class HeartFCMessageReceiver:
# 6. 兴趣度计算与更新 # 6. 兴趣度计算与更新
interested_rate, is_mentioned = await _calculate_interest(message) interested_rate, is_mentioned = await _calculate_interest(message)
await subheartflow.interest_chatting.increase_interest(value=interested_rate) # await subheartflow.interest_chatting.increase_interest(value=interested_rate)
subheartflow.interest_chatting.add_interest_dict(message, interested_rate, is_mentioned) subheartflow.add_interest_message(message, interested_rate, is_mentioned)
# 7. 日志记录 # 7. 日志记录
mes_name = chat.group_info.group_name if chat.group_info else "私聊" mes_name = chat.group_info.group_name if chat.group_info else "私聊"
@ -219,7 +219,8 @@ class HeartFCMessageReceiver:
) )
# 8. 关系处理 # 8. 关系处理
await _process_relationship(message) if global_config.relationship.give_name:
await _process_relationship(message)
except Exception as e: except Exception as e:
await _handle_error(e, "消息处理失败", message) await _handle_error(e, "消息处理失败", message)

View File

@ -76,7 +76,10 @@ class StructuredInfo:
""" """
info_str = "" info_str = ""
# print(f"self.data: {self.data}")
for key, value in self.data.items(): for key, value in self.data.items():
# print(f"key: {key}, value: {value}")
info_str += f"信息类型:{key},信息内容:{value}\n" info_str += f"信息类型:{key},信息内容:{value}\n"
return info_str return info_str

View File

@ -8,7 +8,6 @@ from src.chat.heart_flow.observation.hfcloop_observation import HFCloopObservati
from src.chat.heart_flow.observation.chatting_observation import ChattingObservation from src.chat.heart_flow.observation.chatting_observation import ChattingObservation
from src.chat.message_receive.chat_stream import chat_manager from src.chat.message_receive.chat_stream import chat_manager
from typing import Dict from typing import Dict
from src.llm_models.utils_model import LLMRequest
from src.config.config import global_config from src.config.config import global_config
import random import random
@ -21,15 +20,11 @@ class ActionProcessor(BaseProcessor):
用于处理Observation对象将其转换为ObsInfo对象 用于处理Observation对象将其转换为ObsInfo对象
""" """
log_prefix = "聊天信息处理" log_prefix = "动作处理"
def __init__(self): def __init__(self):
"""初始化观察处理器""" """初始化观察处理器"""
super().__init__() super().__init__()
# TODO: API-Adapter修改标记
self.model_summary = LLMRequest(
model=global_config.model.observation, temperature=0.7, max_tokens=300, request_type="chat_observation"
)
async def process_info( async def process_info(
self, self,
@ -56,45 +51,57 @@ class ActionProcessor(BaseProcessor):
all_actions = None all_actions = None
hfc_obs = None hfc_obs = None
chat_obs = None chat_obs = None
# 收集所有观察对象
for obs in observations: for obs in observations:
if isinstance(obs, HFCloopObservation): if isinstance(obs, HFCloopObservation):
hfc_obs = obs hfc_obs = obs
if isinstance(obs, ChattingObservation): if isinstance(obs, ChattingObservation):
chat_obs = obs chat_obs = obs
# 合并所有动作变更
merged_action_changes = {"add": [], "remove": []}
reasons = []
# 处理HFCloopObservation
if hfc_obs: if hfc_obs:
obs = hfc_obs obs = hfc_obs
# 创建动作信息
all_actions = obs.all_actions all_actions = obs.all_actions
action_changes = await self.analyze_loop_actions(obs) action_changes = await self.analyze_loop_actions(obs)
if action_changes["add"] or action_changes["remove"]: if action_changes["add"] or action_changes["remove"]:
action_info.set_action_changes(action_changes) # 合并动作变更
# 设置变更原因 merged_action_changes["add"].extend(action_changes["add"])
reasons = [] merged_action_changes["remove"].extend(action_changes["remove"])
# 收集变更原因
if action_changes["add"]: if action_changes["add"]:
reasons.append(f"添加动作{action_changes['add']}因为检测到大量无回复") reasons.append(f"添加动作{action_changes['add']}因为检测到大量无回复")
if action_changes["remove"]: if action_changes["remove"]:
reasons.append(f"移除动作{action_changes['remove']}因为检测到连续回复") reasons.append(f"移除动作{action_changes['remove']}因为检测到连续回复")
action_info.set_reason(" | ".join(reasons))
# 处理ChattingObservation
if chat_obs and all_actions is not None: if chat_obs and all_actions is not None:
obs = chat_obs obs = chat_obs
action_changes = {"add": [], "remove": []}
# 检查动作的关联类型 # 检查动作的关联类型
chat_context = chat_manager.get_stream(obs.chat_id).context chat_context = chat_manager.get_stream(obs.chat_id).context
type_mismatched_actions = []
for action_name in all_actions.keys(): for action_name in all_actions.keys():
data = all_actions[action_name] data = all_actions[action_name]
if data.get("associated_types"): if data.get("associated_types"):
if not chat_context.check_types(data["associated_types"]): if not chat_context.check_types(data["associated_types"]):
action_changes["remove"].append(action_name) type_mismatched_actions.append(action_name)
logger.debug(f"{self.log_prefix} 动作 {action_name} 关联类型不匹配,移除该动作") logger.debug(f"{self.log_prefix} 动作 {action_name} 关联类型不匹配,移除该动作")
if len(action_changes["remove"]) > 0:
action_info.set_action_changes(action_changes) if type_mismatched_actions:
# 设置变更原因 # 合并到移除列表中
reasons = [] merged_action_changes["remove"].extend(type_mismatched_actions)
if action_info.get_reason(): reasons.append(f"移除动作{type_mismatched_actions}因为关联类型不匹配")
reasons.append(action_info.get_reason())
if action_changes["remove"]: # 如果有任何动作变更设置到action_info中
reasons.append(f"移除动作{action_changes['remove']}因为关联类型不匹配") if merged_action_changes["add"] or merged_action_changes["remove"]:
action_info.set_reason(" | ".join(reasons)) action_info.set_action_changes(merged_action_changes)
action_info.set_reason(" | ".join(reasons))
processed_infos.append(action_info) processed_infos.append(action_info)
@ -128,8 +135,15 @@ class ActionProcessor(BaseProcessor):
reply_sequence.append(action_type == "reply") reply_sequence.append(action_type == "reply")
# 检查no_reply比例 # 检查no_reply比例
if len(recent_cycles) >= 5 and (no_reply_count / len(recent_cycles)) >= 0.8: print(f"no_reply_count: {no_reply_count}, len(recent_cycles): {len(recent_cycles)}")
result["add"].append("exit_focus_chat") # print(1111111111111111111111111111111111111111111111111111111111111111111111111111111111111111)
if len(recent_cycles) >= (4 * global_config.focus_chat.exit_focus_threshold) and (
no_reply_count / len(recent_cycles)
) >= (0.6 * global_config.focus_chat.exit_focus_threshold):
if global_config.chat.chat_mode == "auto":
result["add"].append("exit_focus_chat")
result["remove"].append("no_reply")
result["remove"].append("reply")
# 获取最近三次的reply状态 # 获取最近三次的reply状态
last_three = reply_sequence[-3:] if len(reply_sequence) >= 3 else reply_sequence last_three = reply_sequence[-3:] if len(reply_sequence) >= 3 else reply_sequence

View File

@ -28,7 +28,7 @@ class ChattingInfoProcessor(BaseProcessor):
super().__init__() super().__init__()
# TODO: API-Adapter修改标记 # TODO: API-Adapter修改标记
self.model_summary = LLMRequest( self.model_summary = LLMRequest(
model=global_config.model.observation, temperature=0.7, max_tokens=300, request_type="chat_observation" model=global_config.model.utils_small, temperature=0.7, max_tokens=300, request_type="chat_observation"
) )
async def process_info( async def process_info(

View File

@ -71,10 +71,10 @@ class MindProcessor(BaseProcessor):
self.subheartflow_id = subheartflow_id self.subheartflow_id = subheartflow_id
self.llm_model = LLMRequest( self.llm_model = LLMRequest(
model=global_config.model.sub_heartflow, model=global_config.model.focus_chat_mind,
temperature=global_config.model.sub_heartflow["temp"], temperature=global_config.model.focus_chat_mind["temp"],
max_tokens=800, max_tokens=800,
request_type="sub_heart_flow", request_type="focus_chat_mind",
) )
self.current_mind = "" self.current_mind = ""

View File

@ -61,10 +61,10 @@ class WorkingMemoryProcessor(BaseProcessor):
self.subheartflow_id = subheartflow_id self.subheartflow_id = subheartflow_id
self.llm_model = LLMRequest( self.llm_model = LLMRequest(
model=global_config.model.sub_heartflow, model=global_config.model.focus_chat_mind,
temperature=global_config.model.sub_heartflow["temp"], temperature=global_config.model.focus_chat_mind["temp"],
max_tokens=800, max_tokens=800,
request_type="working_memory", request_type="focus_working_memory",
) )
name = chat_manager.get_stream_name(self.subheartflow_id) name = chat_manager.get_stream_name(self.subheartflow_id)

View File

@ -4,24 +4,58 @@ from src.chat.heart_flow.observation.hfcloop_observation import HFCloopObservati
from src.llm_models.utils_model import LLMRequest from src.llm_models.utils_model import LLMRequest
from src.config.config import global_config from src.config.config import global_config
from src.common.logger_manager import get_logger from src.common.logger_manager import get_logger
from src.chat.utils.prompt_builder import Prompt from src.chat.utils.prompt_builder import Prompt, global_prompt_manager
from datetime import datetime from datetime import datetime
from src.chat.memory_system.Hippocampus import HippocampusManager from src.chat.memory_system.Hippocampus import HippocampusManager
from typing import List, Dict from typing import List, Dict
import difflib import difflib
import json
from json_repair import repair_json
logger = get_logger("memory_activator") logger = get_logger("memory_activator")
def get_keywords_from_json(json_str):
"""
从JSON字符串中提取关键词列表
Args:
json_str: JSON格式的字符串
Returns:
List[str]: 关键词列表
"""
try:
# 使用repair_json修复JSON格式
fixed_json = repair_json(json_str)
# 如果repair_json返回的是字符串需要解析为Python对象
if isinstance(fixed_json, str):
result = json.loads(fixed_json)
else:
# 如果repair_json直接返回了字典对象直接使用
result = fixed_json
# 提取关键词
keywords = result.get("keywords", [])
return keywords
except Exception as e:
logger.error(f"解析关键词JSON失败: {e}")
return []
def init_prompt(): def init_prompt():
# --- Group Chat Prompt --- # --- Group Chat Prompt ---
memory_activator_prompt = """ memory_activator_prompt = """
你是一个记忆分析器你需要根据以下信息来进行会议 你是一个记忆分析器你需要根据以下信息来进行回忆
以下是一场聊天中的信息请根据这些信息总结出几个关键词作为记忆回忆的触发词 以下是一场聊天中的信息请根据这些信息总结出几个关键词作为记忆回忆的触发词
{obs_info_text} {obs_info_text}
历史关键词请避免重复提取这些关键词
{cached_keywords}
请输出一个json格式包含以下字段 请输出一个json格式包含以下字段
{{ {{
"keywords": ["关键词1", "关键词2", "关键词3",......] "keywords": ["关键词1", "关键词2", "关键词3",......]
@ -36,9 +70,10 @@ class MemoryActivator:
def __init__(self): def __init__(self):
# TODO: API-Adapter修改标记 # TODO: API-Adapter修改标记
self.summary_model = LLMRequest( self.summary_model = LLMRequest(
model=global_config.model.summary, temperature=0.7, max_tokens=50, request_type="chat_observation" model=global_config.model.memory_summary, temperature=0.7, max_tokens=50, request_type="chat_observation"
) )
self.running_memory = [] self.running_memory = []
self.cached_keywords = set() # 用于缓存历史关键词
async def activate_memory(self, observations) -> List[Dict]: async def activate_memory(self, observations) -> List[Dict]:
""" """
@ -61,31 +96,47 @@ class MemoryActivator:
elif isinstance(observation, HFCloopObservation): elif isinstance(observation, HFCloopObservation):
obs_info_text += observation.get_observe_info() obs_info_text += observation.get_observe_info()
logger.debug(f"回忆待检索内容obs_info_text: {obs_info_text}") # logger.debug(f"回忆待检索内容obs_info_text: {obs_info_text}")
# prompt = await global_prompt_manager.format_prompt( # 将缓存的关键词转换为字符串用于prompt
# "memory_activator_prompt", cached_keywords_str = ", ".join(self.cached_keywords) if self.cached_keywords else "暂无历史关键词"
# obs_info_text=obs_info_text,
# )
# logger.debug(f"prompt: {prompt}") prompt = await global_prompt_manager.format_prompt(
"memory_activator_prompt",
# response = await self.summary_model.generate_response(prompt) obs_info_text=obs_info_text,
cached_keywords=cached_keywords_str,
# logger.debug(f"response: {response}")
# # 只取response的第一个元素字符串
# response_str = response[0]
# keywords = list(get_keywords_from_json(response_str))
# #调用记忆系统获取相关记忆
# related_memory = await HippocampusManager.get_instance().get_memory_from_topic(
# valid_keywords=keywords, max_memory_num=3, max_memory_length=2, max_depth=3
# )
related_memory = await HippocampusManager.get_instance().get_memory_from_text(
text=obs_info_text, max_memory_num=5, max_memory_length=2, max_depth=3, fast_retrieval=True
) )
logger.debug(f"prompt: {prompt}")
response = await self.summary_model.generate_response(prompt)
logger.debug(f"response: {response}")
# 只取response的第一个元素字符串
response_str = response[0]
keywords = list(get_keywords_from_json(response_str))
# 更新关键词缓存
if keywords:
# 限制缓存大小最多保留10个关键词
if len(self.cached_keywords) > 10:
# 转换为列表,移除最早的关键词
cached_list = list(self.cached_keywords)
self.cached_keywords = set(cached_list[-8:])
# 添加新的关键词到缓存
self.cached_keywords.update(keywords)
logger.debug(f"更新关键词缓存: {self.cached_keywords}")
# 调用记忆系统获取相关记忆
related_memory = await HippocampusManager.get_instance().get_memory_from_topic(
valid_keywords=keywords, max_memory_num=3, max_memory_length=2, max_depth=3
)
# related_memory = await HippocampusManager.get_instance().get_memory_from_text(
# text=obs_info_text, max_memory_num=5, max_memory_length=2, max_depth=3, fast_retrieval=False
# )
# logger.debug(f"获取到的记忆: {related_memory}") # logger.debug(f"获取到的记忆: {related_memory}")
# 激活时所有已有记忆的duration+1达到3则移除 # 激活时所有已有记忆的duration+1达到3则移除

View File

@ -28,8 +28,6 @@ class ActionManager:
self._registered_actions: Dict[str, ActionInfo] = {} self._registered_actions: Dict[str, ActionInfo] = {}
# 当前正在使用的动作集合,默认加载默认动作 # 当前正在使用的动作集合,默认加载默认动作
self._using_actions: Dict[str, ActionInfo] = {} self._using_actions: Dict[str, ActionInfo] = {}
# 临时备份原始使用中的动作
self._original_actions_backup: Optional[Dict[str, ActionInfo]] = None
# 默认动作集,仅作为快照,用于恢复默认 # 默认动作集,仅作为快照,用于恢复默认
self._default_actions: Dict[str, ActionInfo] = {} self._default_actions: Dict[str, ActionInfo] = {}
@ -160,9 +158,9 @@ class ActionManager:
Optional[BaseAction]: 创建的动作处理器实例如果动作名称未注册则返回None Optional[BaseAction]: 创建的动作处理器实例如果动作名称未注册则返回None
""" """
# 检查动作是否在当前使用的动作集中 # 检查动作是否在当前使用的动作集中
if action_name not in self._using_actions: # if action_name not in self._using_actions:
logger.warning(f"当前不可用的动作类型: {action_name}") # logger.warning(f"当前不可用的动作类型: {action_name}")
return None # return None
handler_class = _ACTION_REGISTRY.get(action_name) handler_class = _ACTION_REGISTRY.get(action_name)
if not handler_class: if not handler_class:
@ -278,22 +276,20 @@ class ActionManager:
return True return True
def temporarily_remove_actions(self, actions_to_remove: List[str]) -> None: def temporarily_remove_actions(self, actions_to_remove: List[str]) -> None:
"""临时移除使用集中的指定动作,备份原始使用集""" """临时移除使用集中的指定动作"""
if self._original_actions_backup is None:
self._original_actions_backup = self._using_actions.copy()
for name in actions_to_remove: for name in actions_to_remove:
self._using_actions.pop(name, None) self._using_actions.pop(name, None)
def restore_actions(self) -> None: def restore_actions(self) -> None:
"""恢复之前备份的原始使用集""" """恢复到默认动作集"""
if self._original_actions_backup is not None: logger.debug(
self._using_actions = self._original_actions_backup.copy() f"恢复动作集: 从 {list(self._using_actions.keys())} 恢复到默认动作集 {list(self._default_actions.keys())}"
self._original_actions_backup = None )
self._using_actions = self._default_actions.copy()
def restore_default_actions(self) -> None: def restore_default_actions(self) -> None:
"""恢复默认动作集到使用集""" """恢复默认动作集到使用集"""
self._using_actions = self._default_actions.copy() self._using_actions = self._default_actions.copy()
self._original_actions_backup = None
def get_action(self, action_name: str) -> Optional[Type[BaseAction]]: def get_action(self, action_name: str) -> Optional[Type[BaseAction]]:
""" """

View File

@ -1,5 +1,6 @@
# 导入所有动作模块以确保装饰器被执行 # 导入所有动作模块以确保装饰器被执行
from . import reply_action # noqa from . import reply_action # noqa
from . import no_reply_action # noqa from . import no_reply_action # noqa
from . import exit_focus_chat_action # noqa
# 在此处添加更多动作模块导入 # 在此处添加更多动作模块导入

View File

@ -5,8 +5,6 @@ from src.chat.focus_chat.planners.actions.base_action import BaseAction, registe
from typing import Tuple, List from typing import Tuple, List
from src.chat.heart_flow.observation.observation import Observation from src.chat.heart_flow.observation.observation import Observation
from src.chat.message_receive.chat_stream import ChatStream from src.chat.message_receive.chat_stream import ChatStream
from src.chat.heart_flow.heartflow import heartflow
from src.chat.heart_flow.sub_heartflow import ChatState
logger = get_logger("action_taken") logger = get_logger("action_taken")
@ -27,7 +25,7 @@ class ExitFocusChatAction(BaseAction):
"当前内容不需要持续专注关注,你决定退出专注聊天", "当前内容不需要持续专注关注,你决定退出专注聊天",
"聊天内容已经完成,你决定退出专注聊天", "聊天内容已经完成,你决定退出专注聊天",
] ]
default = True default = False
def __init__( def __init__(
self, self,
@ -56,7 +54,6 @@ class ExitFocusChatAction(BaseAction):
self.observations = observations self.observations = observations
self.log_prefix = log_prefix self.log_prefix = log_prefix
self._shutting_down = shutting_down self._shutting_down = shutting_down
self.chat_id = chat_stream.stream_id
async def handle_action(self) -> Tuple[bool, str]: async def handle_action(self) -> Tuple[bool, str]:
""" """
@ -74,23 +71,8 @@ class ExitFocusChatAction(BaseAction):
try: try:
# 转换状态 # 转换状态
status_message = "" status_message = ""
self.sub_heartflow = await heartflow.get_or_create_subheartflow(self.chat_id) command = "stop_focus_chat"
if self.sub_heartflow: return True, status_message, command
try:
# 转换为normal_chat状态
await self.sub_heartflow.change_chat_state(ChatState.CHAT)
status_message = "已成功切换到普通聊天模式"
logger.info(f"{self.log_prefix} {status_message}")
except Exception as e:
error_msg = f"切换到普通聊天模式失败: {str(e)}"
logger.error(f"{self.log_prefix} {error_msg}")
return False, error_msg
else:
warning_msg = "未找到有效的sub heartflow实例无法切换状态"
logger.warning(f"{self.log_prefix} {warning_msg}")
return False, warning_msg
return True, status_message
except asyncio.CancelledError: except asyncio.CancelledError:
logger.info(f"{self.log_prefix} 处理 'exit_focus_chat' 时等待被中断 (CancelledError)") logger.info(f"{self.log_prefix} 处理 'exit_focus_chat' 时等待被中断 (CancelledError)")
@ -99,4 +81,4 @@ class ExitFocusChatAction(BaseAction):
error_msg = f"处理 'exit_focus_chat' 时发生错误: {str(e)}" error_msg = f"处理 'exit_focus_chat' 时发生错误: {str(e)}"
logger.error(f"{self.log_prefix} {error_msg}") logger.error(f"{self.log_prefix} {error_msg}")
logger.error(traceback.format_exc()) logger.error(traceback.format_exc())
return False, error_msg return False, "", ""

View File

@ -33,7 +33,10 @@ class MemoryManager:
self._id_map: Dict[str, MemoryItem] = {} self._id_map: Dict[str, MemoryItem] = {}
self.llm_summarizer = LLMRequest( self.llm_summarizer = LLMRequest(
model=global_config.model.summary, temperature=0.3, max_tokens=512, request_type="memory_summarization" model=global_config.model.focus_working_memory,
temperature=0.3,
max_tokens=512,
request_type="memory_summarization",
) )
@property @property

View File

@ -88,34 +88,36 @@ class BackgroundTaskManager:
f"聊天状态更新任务已启动 间隔:{STATE_UPDATE_INTERVAL_SECONDS}s", f"聊天状态更新任务已启动 间隔:{STATE_UPDATE_INTERVAL_SECONDS}s",
"_state_update_task", "_state_update_task",
), ),
(
self._run_cleanup_cycle,
"info",
f"清理任务已启动 间隔:{CLEANUP_INTERVAL_SECONDS}s",
"_cleanup_task",
),
# 新增私聊激活任务配置
(
# Use lambda to pass the interval to the runner function
lambda: self._run_private_chat_activation_cycle(PRIVATE_CHAT_ACTIVATION_CHECK_INTERVAL_SECONDS),
"debug",
f"私聊激活检查任务已启动 间隔:{PRIVATE_CHAT_ACTIVATION_CHECK_INTERVAL_SECONDS}s",
"_private_chat_activation_task",
),
] ]
# 根据 chat_mode 条件添加专注评估任务 # 根据 chat_mode 条件添加其他任务
if not (global_config.chat.chat_mode == "normal"): if not (global_config.chat.chat_mode == "normal"):
task_configs.append( task_configs.extend(
( [
self._run_into_focus_cycle, (
"debug", # 设为debug避免过多日志 self._run_cleanup_cycle,
f"专注评估任务已启动 间隔:{INTEREST_EVAL_INTERVAL_SECONDS}s", "info",
"_into_focus_task", f"清理任务已启动 间隔:{CLEANUP_INTERVAL_SECONDS}s",
) "_cleanup_task",
),
# 新增私聊激活任务配置
(
# Use lambda to pass the interval to the runner function
lambda: self._run_private_chat_activation_cycle(PRIVATE_CHAT_ACTIVATION_CHECK_INTERVAL_SECONDS),
"debug",
f"私聊激活检查任务已启动 间隔:{PRIVATE_CHAT_ACTIVATION_CHECK_INTERVAL_SECONDS}s",
"_private_chat_activation_task",
),
# (
# self._run_into_focus_cycle,
# "debug", # 设为debug避免过多日志
# f"专注评估任务已启动 间隔:{INTEREST_EVAL_INTERVAL_SECONDS}s",
# "_into_focus_task",
# )
]
) )
else: else:
logger.info("聊天模式为 normal跳过启动专注评估任务") logger.info("聊天模式为 normal跳过启动清理任务、私聊激活任务和专注评估任务")
# 统一启动所有任务 # 统一启动所有任务
for task_func, log_level, log_msg, task_attr_name in task_configs: for task_func, log_level, log_msg, task_attr_name in task_configs:
@ -215,10 +217,10 @@ class BackgroundTaskManager:
logger.info(f"[清理任务] 清理完成, 共停止 {stopped_count}/{len(flows_to_stop)} 个子心流") logger.info(f"[清理任务] 清理完成, 共停止 {stopped_count}/{len(flows_to_stop)} 个子心流")
# --- 新增兴趣评估工作函数 --- # --- 新增兴趣评估工作函数 ---
async def _perform_into_focus_work(self): # async def _perform_into_focus_work(self):
"""执行一轮子心流兴趣评估与提升检查。""" # """执行一轮子心流兴趣评估与提升检查。"""
# 直接调用 subheartflow_manager 的方法,并传递当前状态信息 # # 直接调用 subheartflow_manager 的方法,并传递当前状态信息
await self.subheartflow_manager.sbhf_normal_into_focus() # await self.subheartflow_manager.sbhf_normal_into_focus()
async def _run_state_update_cycle(self, interval: int): async def _run_state_update_cycle(self, interval: int):
await _run_periodic_loop(task_name="State Update", interval=interval, task_func=self._perform_state_update_work) await _run_periodic_loop(task_name="State Update", interval=interval, task_func=self._perform_state_update_work)
@ -229,12 +231,12 @@ class BackgroundTaskManager:
) )
# --- 新增兴趣评估任务运行器 --- # --- 新增兴趣评估任务运行器 ---
async def _run_into_focus_cycle(self): # async def _run_into_focus_cycle(self):
await _run_periodic_loop( # await _run_periodic_loop(
task_name="Into Focus", # task_name="Into Focus",
interval=INTEREST_EVAL_INTERVAL_SECONDS, # interval=INTEREST_EVAL_INTERVAL_SECONDS,
task_func=self._perform_into_focus_work, # task_func=self._perform_into_focus_work,
) # )
# 新增私聊激活任务运行器 # 新增私聊激活任务运行器
async def _run_private_chat_activation_cycle(self, interval: int): async def _run_private_chat_activation_cycle(self, interval: int):

View File

@ -1,6 +1,6 @@
from src.chat.heart_flow.sub_heartflow import SubHeartflow, ChatState from src.chat.heart_flow.sub_heartflow import SubHeartflow, ChatState
from src.common.logger_manager import get_logger from src.common.logger_manager import get_logger
from typing import Any, Optional from typing import Any, Optional, List
from src.chat.heart_flow.mai_state_manager import MaiStateInfo, MaiStateManager from src.chat.heart_flow.mai_state_manager import MaiStateInfo, MaiStateManager
from src.chat.heart_flow.subheartflow_manager import SubHeartflowManager from src.chat.heart_flow.subheartflow_manager import SubHeartflowManager
from src.chat.heart_flow.background_tasks import BackgroundTaskManager # Import BackgroundTaskManager from src.chat.heart_flow.background_tasks import BackgroundTaskManager # Import BackgroundTaskManager
@ -57,6 +57,23 @@ class Heartflow:
return heartfc_instance.get_cycle_history(last_n=history_len) return heartfc_instance.get_cycle_history(last_n=history_len)
async def api_get_normal_chat_replies(self, subheartflow_id: str, limit: int = 10) -> Optional[List[dict]]:
"""获取子心流的NormalChat回复记录
Args:
subheartflow_id: 子心流ID
limit: 最大返回数量默认10条
Returns:
Optional[List[dict]]: 回复记录列表如果子心流不存在则返回None
"""
subheartflow = await self.subheartflow_manager.get_or_create_subheartflow(subheartflow_id)
if not subheartflow:
logger.warning(f"尝试获取不存在的子心流 {subheartflow_id} 的NormalChat回复记录")
return None
return subheartflow.get_normal_chat_recent_replies(limit)
async def heartflow_start_working(self): async def heartflow_start_working(self):
"""启动后台任务""" """启动后台任务"""
await self.background_task_manager.start_tasks() await self.background_task_manager.start_tasks()

View File

@ -1,200 +0,0 @@
import asyncio
from src.config.config import global_config
from typing import Optional, Dict
import traceback
from src.common.logger_manager import get_logger
from src.chat.message_receive.message import MessageRecv
import math
# 定义常量 (从 interest.py 移动过来)
MAX_INTEREST = 15.0
logger = get_logger("interest_chatting")
PROBABILITY_INCREASE_RATE_PER_SECOND = 0.1
PROBABILITY_DECREASE_RATE_PER_SECOND = 0.1
MAX_REPLY_PROBABILITY = 1
class InterestChatting:
def __init__(
self,
decay_rate=global_config.focus_chat.default_decay_rate_per_second,
max_interest=MAX_INTEREST,
trigger_threshold=global_config.focus_chat.reply_trigger_threshold,
max_probability=MAX_REPLY_PROBABILITY,
):
# 基础属性初始化
self.interest_level: float = 0.0
self.decay_rate_per_second: float = decay_rate
self.max_interest: float = max_interest
self.trigger_threshold: float = trigger_threshold
self.max_reply_probability: float = max_probability
self.is_above_threshold: bool = False
# 任务相关属性初始化
self.update_task: Optional[asyncio.Task] = None
self._stop_event = asyncio.Event()
self._task_lock = asyncio.Lock()
self._is_running = False
self.interest_dict: Dict[str, tuple[MessageRecv, float, bool]] = {}
self.update_interval = 1.0
self.above_threshold = False
self.start_hfc_probability = 0.0
async def initialize(self):
async with self._task_lock:
if self._is_running:
logger.debug("后台兴趣更新任务已在运行中。")
return
# 清理已完成或已取消的任务
if self.update_task and (self.update_task.done() or self.update_task.cancelled()):
self.update_task = None
if not self.update_task:
self._stop_event.clear()
self._is_running = True
self.update_task = asyncio.create_task(self._run_update_loop(self.update_interval))
logger.debug("后台兴趣更新任务已创建并启动。")
def add_interest_dict(self, message: MessageRecv, interest_value: float, is_mentioned: bool):
"""添加消息到兴趣字典
参数:
message: 接收到的消息
interest_value: 兴趣值
is_mentioned: 是否被提及
功能:
1. 将消息添加到兴趣字典
2. 更新最后交互时间
3. 如果字典长度超过10删除最旧的消息
"""
# 添加新消息
self.interest_dict[message.message_info.message_id] = (message, interest_value, is_mentioned)
# 如果字典长度超过10删除最旧的消息
if len(self.interest_dict) > 10:
oldest_key = next(iter(self.interest_dict))
self.interest_dict.pop(oldest_key)
async def _calculate_decay(self):
"""计算兴趣值的衰减
参数:
current_time: 当前时间戳
处理逻辑:
1. 计算时间差
2. 处理各种异常情况(负值/零值)
3. 正常计算衰减
4. 更新最后更新时间
"""
# 处理极小兴趣值情况
if self.interest_level < 1e-9:
self.interest_level = 0.0
return
# 异常情况处理
if self.decay_rate_per_second <= 0:
logger.warning(f"衰减率({self.decay_rate_per_second})无效重置兴趣值为0")
self.interest_level = 0.0
return
# 正常衰减计算
try:
decay_factor = math.pow(self.decay_rate_per_second, self.update_interval)
self.interest_level *= decay_factor
except ValueError as e:
logger.error(
f"衰减计算错误: {e} 参数: 衰减率={self.decay_rate_per_second} 时间差={self.update_interval} 当前兴趣={self.interest_level}"
)
self.interest_level = 0.0
async def _update_reply_probability(self):
self.above_threshold = self.interest_level >= self.trigger_threshold
if self.above_threshold:
self.start_hfc_probability += PROBABILITY_INCREASE_RATE_PER_SECOND
else:
if self.start_hfc_probability > 0:
self.start_hfc_probability = max(0, self.start_hfc_probability - PROBABILITY_DECREASE_RATE_PER_SECOND)
async def increase_interest(self, value: float):
self.interest_level += value
self.interest_level = min(self.interest_level, self.max_interest)
async def decrease_interest(self, value: float):
self.interest_level -= value
self.interest_level = max(self.interest_level, 0.0)
async def get_interest(self) -> float:
return self.interest_level
async def get_state(self) -> dict:
interest = self.interest_level # 直接使用属性值
return {
"interest_level": round(interest, 2),
"start_hfc_probability": round(self.start_hfc_probability, 4),
"above_threshold": self.above_threshold,
}
# --- 新增后台更新任务相关方法 ---
async def _run_update_loop(self, update_interval: float = 1.0):
"""后台循环,定期更新兴趣和回复概率。"""
try:
while not self._stop_event.is_set():
try:
if self.interest_level != 0:
await self._calculate_decay()
await self._update_reply_probability()
# 等待下一个周期或停止事件
await asyncio.wait_for(self._stop_event.wait(), timeout=update_interval)
except asyncio.TimeoutError:
# 正常超时,继续循环
continue
except Exception as e:
logger.error(f"InterestChatting 更新循环出错: {e}")
logger.error(traceback.format_exc())
# 防止错误导致CPU飙升稍作等待
await asyncio.sleep(5)
except asyncio.CancelledError:
logger.info("InterestChatting 更新循环被取消。")
finally:
self._is_running = False
logger.info("InterestChatting 更新循环已停止。")
async def stop_updates(self):
"""停止后台更新任务,使用锁确保并发安全"""
async with self._task_lock:
if not self._is_running:
logger.debug("后台兴趣更新任务未运行。")
return
logger.info("正在停止 InterestChatting 后台更新任务...")
self._stop_event.set()
if self.update_task and not self.update_task.done():
try:
# 等待任务结束,设置超时
await asyncio.wait_for(self.update_task, timeout=5.0)
logger.info("InterestChatting 后台更新任务已成功停止。")
except asyncio.TimeoutError:
logger.warning("停止 InterestChatting 后台任务超时,尝试取消...")
self.update_task.cancel()
try:
await self.update_task # 等待取消完成
except asyncio.CancelledError:
logger.info("InterestChatting 后台更新任务已被取消。")
except Exception as e:
logger.error(f"停止 InterestChatting 后台任务时发生异常: {e}")
finally:
self.update_task = None
self._is_running = False

View File

@ -1,5 +1,4 @@
from datetime import datetime from datetime import datetime
from src.llm_models.utils_model import LLMRequest
from src.config.config import global_config from src.config.config import global_config
import traceback import traceback
from src.chat.utils.chat_message_builder import ( from src.chat.utils.chat_message_builder import (
@ -66,10 +65,6 @@ class ChattingObservation(Observation):
self.oldest_messages = [] self.oldest_messages = []
self.oldest_messages_str = "" self.oldest_messages_str = ""
self.compressor_prompt = "" self.compressor_prompt = ""
# TODO: API-Adapter修改标记
self.model_summary = LLMRequest(
model=global_config.model.observation, temperature=0.7, max_tokens=300, request_type="chat_observation"
)
async def initialize(self): async def initialize(self):
self.is_group_chat, self.chat_target_info = await get_chat_type_and_target_info(self.chat_id) self.is_group_chat, self.chat_target_info = await get_chat_type_and_target_info(self.chat_id)

View File

@ -84,10 +84,4 @@ class HFCloopObservation:
else: else:
cycle_info_block += "\n你还没看过消息\n" cycle_info_block += "\n你还没看过消息\n"
using_actions = self.action_manager.get_using_actions()
for action_name, action_info in using_actions.items():
action_description = action_info["description"]
cycle_info_block += f"\n你在聊天中可以使用{action_name},这个动作的描述是{action_description}\n"
cycle_info_block += "注意,除了上述动作选项之外,你在群聊里不能做其他任何事情,这是你能力的边界\n"
self.observe_info = cycle_info_block self.observe_info = cycle_info_block

View File

@ -26,7 +26,7 @@ class StructureObservation:
for structured_info in self.structured_info: for structured_info in self.structured_info:
if structured_info.get("ttl") > 0: if structured_info.get("ttl") > 0:
structured_info["ttl"] -= 1 structured_info["ttl"] -= 1
observed_structured_infos.append(structured_info) observed_structured_infos.append(structured_info)
logger.debug(f"观察到结构化信息仍旧在: {structured_info}") logger.debug(f"观察到结构化信息仍旧在: {structured_info}")
self.structured_info = observed_structured_infos self.structured_info = observed_structured_infos

View File

@ -12,7 +12,6 @@ from src.chat.normal_chat.normal_chat import NormalChat
from src.chat.heart_flow.mai_state_manager import MaiStateInfo from src.chat.heart_flow.mai_state_manager import MaiStateInfo
from src.chat.heart_flow.chat_state_info import ChatState, ChatStateInfo from src.chat.heart_flow.chat_state_info import ChatState, ChatStateInfo
from .utils_chat import get_chat_type_and_target_info from .utils_chat import get_chat_type_and_target_info
from .interest_chatting import InterestChatting
from src.config.config import global_config from src.config.config import global_config
@ -51,7 +50,7 @@ class SubHeartflow:
# --- End Initialization --- # --- End Initialization ---
# 兴趣检测器 # 兴趣检测器
self.interest_chatting: InterestChatting = InterestChatting() self.interest_dict: Dict[str, tuple[MessageRecv, float, bool]] = {}
# 活动状态管理 # 活动状态管理
self.should_stop = False # 停止标志 self.should_stop = False # 停止标志
@ -85,8 +84,8 @@ class SubHeartflow:
# --- End using utility function --- # --- End using utility function ---
# Initialize interest system (existing logic) # Initialize interest system (existing logic)
await self.interest_chatting.initialize() # await self.interest_chatting.initialize()
logger.debug(f"{self.log_prefix} InterestChatting 实例已初始化。") # logger.debug(f"{self.log_prefix} InterestChatting 实例已初始化。")
# 根据配置决定初始状态 # 根据配置决定初始状态
if global_config.chat.chat_mode == "focus": if global_config.chat.chat_mode == "focus":
@ -129,7 +128,12 @@ class SubHeartflow:
return False return False
# 在 rewind 为 True 或 NormalChat 实例尚未创建时,创建新实例 # 在 rewind 为 True 或 NormalChat 实例尚未创建时,创建新实例
if rewind or not self.normal_chat_instance: if rewind or not self.normal_chat_instance:
self.normal_chat_instance = NormalChat(chat_stream=chat_stream, interest_dict=self.get_interest_dict()) # 提供回调函数用于接收需要切换到focus模式的通知
self.normal_chat_instance = NormalChat(
chat_stream=chat_stream,
interest_dict=self.interest_dict,
on_switch_to_focus_callback=self._handle_switch_to_focus_request,
)
# 进行异步初始化 # 进行异步初始化
await self.normal_chat_instance.initialize() await self.normal_chat_instance.initialize()
@ -144,6 +148,38 @@ class SubHeartflow:
self.normal_chat_instance = None # 启动/初始化失败,清理实例 self.normal_chat_instance = None # 启动/初始化失败,清理实例
return False return False
async def _handle_switch_to_focus_request(self) -> None:
"""
处理来自NormalChat的切换到focus模式的请求
Args:
stream_id: 请求切换的stream_id
"""
logger.info(f"{self.log_prefix} 收到NormalChat请求切换到focus模式")
# 切换到focus模式
current_state = self.chat_state.chat_status
if current_state == ChatState.NORMAL:
await self.change_chat_state(ChatState.FOCUSED)
logger.info(f"{self.log_prefix} 已根据NormalChat请求从NORMAL切换到FOCUSED状态")
else:
logger.warning(f"{self.log_prefix} 当前状态为{current_state.value}无法切换到FOCUSED状态")
async def _handle_stop_focus_chat_request(self) -> None:
"""
处理来自HeartFChatting的停止focus模式的请求
当收到stop_focus_chat命令时被调用
"""
logger.info(f"{self.log_prefix} 收到HeartFChatting请求停止focus模式")
# 切换到normal模式
current_state = self.chat_state.chat_status
if current_state == ChatState.FOCUSED:
await self.change_chat_state(ChatState.NORMAL)
logger.info(f"{self.log_prefix} 已根据HeartFChatting请求从FOCUSED切换到NORMAL状态")
else:
logger.warning(f"{self.log_prefix} 当前状态为{current_state.value}无法切换到NORMAL状态")
async def _stop_heart_fc_chat(self): async def _stop_heart_fc_chat(self):
"""停止并清理 HeartFChatting 实例""" """停止并清理 HeartFChatting 实例"""
if self.heart_fc_instance: if self.heart_fc_instance:
@ -160,7 +196,7 @@ class SubHeartflow:
async def _start_heart_fc_chat(self) -> bool: async def _start_heart_fc_chat(self) -> bool:
"""启动 HeartFChatting 实例,确保 NormalChat 已停止""" """启动 HeartFChatting 实例,确保 NormalChat 已停止"""
await self._stop_normal_chat() # 确保普通聊天监控已停止 await self._stop_normal_chat() # 确保普通聊天监控已停止
self.clear_interest_dict() # 清理兴趣字典,准备专注聊天 self.interest_dict.clear()
log_prefix = self.log_prefix log_prefix = self.log_prefix
# 如果实例已存在,检查其循环任务状态 # 如果实例已存在,检查其循环任务状态
@ -189,6 +225,7 @@ class SubHeartflow:
self.heart_fc_instance = HeartFChatting( self.heart_fc_instance = HeartFChatting(
chat_id=self.subheartflow_id, chat_id=self.subheartflow_id,
observations=self.observations, observations=self.observations,
on_stop_focus_chat=self._handle_stop_focus_chat_request,
) )
# 初始化并启动 HeartFChatting # 初始化并启动 HeartFChatting
@ -237,7 +274,7 @@ class SubHeartflow:
elif new_state == ChatState.ABSENT: elif new_state == ChatState.ABSENT:
logger.info(f"{log_prefix} 进入 ABSENT 状态,停止所有聊天活动...") logger.info(f"{log_prefix} 进入 ABSENT 状态,停止所有聊天活动...")
self.clear_interest_dict() self.interest_dict.clear()
await self._stop_normal_chat() await self._stop_normal_chat()
await self._stop_heart_fc_chat() await self._stop_heart_fc_chat()
state_changed = True state_changed = True
@ -278,25 +315,35 @@ class SubHeartflow:
logger.warning(f"SubHeartflow {self.subheartflow_id} 没有找到有效的 ChattingObservation") logger.warning(f"SubHeartflow {self.subheartflow_id} 没有找到有效的 ChattingObservation")
return None return None
async def get_interest_state(self) -> dict:
return await self.interest_chatting.get_state()
def get_normal_chat_last_speak_time(self) -> float: def get_normal_chat_last_speak_time(self) -> float:
if self.normal_chat_instance: if self.normal_chat_instance:
return self.normal_chat_instance.last_speak_time return self.normal_chat_instance.last_speak_time
return 0 return 0
def get_interest_dict(self) -> Dict[str, tuple[MessageRecv, float, bool]]: def get_normal_chat_recent_replies(self, limit: int = 10) -> List[dict]:
return self.interest_chatting.interest_dict """获取NormalChat实例的最近回复记录
def clear_interest_dict(self): Args:
self.interest_chatting.interest_dict.clear() limit: 最大返回数量默认10条
Returns:
List[dict]: 最近的回复记录列表如果没有NormalChat实例则返回空列表
"""
if self.normal_chat_instance:
return self.normal_chat_instance.get_recent_replies(limit)
return []
def add_interest_message(self, message: MessageRecv, interest_value: float, is_mentioned: bool):
self.interest_dict[message.message_info.message_id] = (message, interest_value, is_mentioned)
# 如果字典长度超过10删除最旧的消息
if len(self.interest_dict) > 10:
oldest_key = next(iter(self.interest_dict))
self.interest_dict.pop(oldest_key)
async def get_full_state(self) -> dict: async def get_full_state(self) -> dict:
"""获取子心流的完整状态,包括兴趣、思维和聊天状态。""" """获取子心流的完整状态,包括兴趣、思维和聊天状态。"""
interest_state = await self.get_interest_state()
return { return {
"interest_state": interest_state, "interest_state": "interest_state",
"chat_state": self.chat_state.chat_status.value, "chat_state": self.chat_state.chat_status.value,
"chat_state_changed_time": self.chat_state_changed_time, "chat_state_changed_time": self.chat_state_changed_time,
} }
@ -314,11 +361,6 @@ class SubHeartflow:
await self._stop_normal_chat() await self._stop_normal_chat()
await self._stop_heart_fc_chat() await self._stop_heart_fc_chat()
# 停止兴趣更新任务
if self.interest_chatting:
logger.info(f"{self.log_prefix} 停止兴趣系统后台任务...")
await self.interest_chatting.stop_updates()
# 取消可能存在的旧后台任务 (self.task) # 取消可能存在的旧后台任务 (self.task)
if self.task and not self.task.done(): if self.task and not self.task.done():
logger.debug(f"{self.log_prefix} 取消子心流主任务 (Shutdown)...") logger.debug(f"{self.log_prefix} 取消子心流主任务 (Shutdown)...")

View File

@ -1,6 +1,5 @@
import asyncio import asyncio
import time import time
import random
from typing import Dict, Any, Optional, List from typing import Dict, Any, Optional, List
from src.common.logger_manager import get_logger from src.common.logger_manager import get_logger
from src.chat.message_receive.chat_stream import chat_manager from src.chat.message_receive.chat_stream import chat_manager
@ -186,41 +185,41 @@ class SubHeartflowManager:
f"{log_prefix} 完成,共处理 {processed_count} 个子心流,成功将 {changed_count} 个非 ABSENT 子心流的状态更改为 ABSENT。" f"{log_prefix} 完成,共处理 {processed_count} 个子心流,成功将 {changed_count} 个非 ABSENT 子心流的状态更改为 ABSENT。"
) )
async def sbhf_normal_into_focus(self): # async def sbhf_normal_into_focus(self):
"""评估子心流兴趣度满足条件则提升到FOCUSED状态基于start_hfc_probability""" # """评估子心流兴趣度满足条件则提升到FOCUSED状态基于start_hfc_probability"""
try: # try:
for sub_hf in list(self.subheartflows.values()): # for sub_hf in list(self.subheartflows.values()):
flow_id = sub_hf.subheartflow_id # flow_id = sub_hf.subheartflow_id
stream_name = chat_manager.get_stream_name(flow_id) or flow_id # stream_name = chat_manager.get_stream_name(flow_id) or flow_id
# 跳过已经是FOCUSED状态的子心流 # # 跳过已经是FOCUSED状态的子心流
if sub_hf.chat_state.chat_status == ChatState.FOCUSED: # if sub_hf.chat_state.chat_status == ChatState.FOCUSED:
continue # continue
if sub_hf.interest_chatting.start_hfc_probability == 0: # if sub_hf.interest_chatting.start_hfc_probability == 0:
continue # continue
else: # else:
logger.debug( # logger.debug(
f"{stream_name},现在状态: {sub_hf.chat_state.chat_status.value},进入专注概率: {sub_hf.interest_chatting.start_hfc_probability}" # f"{stream_name},现在状态: {sub_hf.chat_state.chat_status.value},进入专注概率: {sub_hf.interest_chatting.start_hfc_probability}"
) # )
if random.random() >= sub_hf.interest_chatting.start_hfc_probability: # if random.random() >= sub_hf.interest_chatting.start_hfc_probability:
continue # continue
# 获取最新状态并执行提升 # # 获取最新状态并执行提升
current_subflow = self.subheartflows.get(flow_id) # current_subflow = self.subheartflows.get(flow_id)
if not current_subflow: # if not current_subflow:
continue # continue
logger.info( # logger.info(
f"{stream_name} 触发 认真水群 (概率={current_subflow.interest_chatting.start_hfc_probability:.2f})" # f"{stream_name} 触发 认真水群 (概率={current_subflow.interest_chatting.start_hfc_probability:.2f})"
) # )
# 执行状态提升 # # 执行状态提升
await current_subflow.change_chat_state(ChatState.FOCUSED) # await current_subflow.change_chat_state(ChatState.FOCUSED)
except Exception as e: # except Exception as e:
logger.error(f"启动HFC 兴趣评估失败: {e}", exc_info=True) # logger.error(f"启动HFC 兴趣评估失败: {e}", exc_info=True)
async def sbhf_focus_into_normal(self, subflow_id: Any): async def sbhf_focus_into_normal(self, subflow_id: Any):
""" """
@ -249,7 +248,7 @@ class SubHeartflowManager:
) )
try: try:
# 从HFC到CHAT时清空兴趣字典 # 从HFC到CHAT时清空兴趣字典
subflow.clear_interest_dict() subflow.interest_dict.clear()
await subflow.change_chat_state(target_state) await subflow.change_chat_state(target_state)
final_state = subflow.chat_state.chat_status final_state = subflow.chat_state.chat_status
if final_state == target_state: if final_state == target_state:

View File

@ -193,7 +193,6 @@ class MemoryGraph:
class Hippocampus: class Hippocampus:
def __init__(self): def __init__(self):
self.memory_graph = MemoryGraph() self.memory_graph = MemoryGraph()
self.llm_topic_judge = None
self.model_summary = None self.model_summary = None
self.entorhinal_cortex = None self.entorhinal_cortex = None
self.parahippocampal_gyrus = None self.parahippocampal_gyrus = None
@ -205,8 +204,7 @@ class Hippocampus:
# 从数据库加载记忆图 # 从数据库加载记忆图
self.entorhinal_cortex.sync_memory_from_db() self.entorhinal_cortex.sync_memory_from_db()
# TODO: API-Adapter修改标记 # TODO: API-Adapter修改标记
self.llm_topic_judge = LLMRequest(global_config.model.topic_judge, request_type="memory") self.model_summary = LLMRequest(global_config.model.memory_summary, request_type="memory")
self.model_summary = LLMRequest(global_config.model.summary, request_type="memory")
def get_all_node_names(self) -> list: def get_all_node_names(self) -> list:
"""获取记忆图中所有节点的名字列表""" """获取记忆图中所有节点的名字列表"""
@ -344,7 +342,7 @@ class Hippocampus:
# 使用LLM提取关键词 # 使用LLM提取关键词
topic_num = min(5, max(1, int(len(text) * 0.1))) # 根据文本长度动态调整关键词数量 topic_num = min(5, max(1, int(len(text) * 0.1))) # 根据文本长度动态调整关键词数量
# logger.info(f"提取关键词数量: {topic_num}") # logger.info(f"提取关键词数量: {topic_num}")
topics_response = await self.llm_topic_judge.generate_response(self.find_topic_llm(text, topic_num)) topics_response = await self.model_summary.generate_response(self.find_topic_llm(text, topic_num))
# 提取关键词 # 提取关键词
keywords = re.findall(r"<([^>]+)>", topics_response[0]) keywords = re.findall(r"<([^>]+)>", topics_response[0])
@ -528,12 +526,12 @@ class Hippocampus:
if not keywords: if not keywords:
return [] return []
# logger.info(f"提取的关键词: {', '.join(keywords)}") logger.info(f"提取的关键词: {', '.join(keywords)}")
# 过滤掉不存在于记忆图中的关键词 # 过滤掉不存在于记忆图中的关键词
valid_keywords = [keyword for keyword in keywords if keyword in self.memory_graph.G] valid_keywords = [keyword for keyword in keywords if keyword in self.memory_graph.G]
if not valid_keywords: if not valid_keywords:
# logger.info("没有找到有效的关键词节点") logger.info("没有找到有效的关键词节点")
return [] return []
logger.debug(f"有效的关键词: {', '.join(valid_keywords)}") logger.debug(f"有效的关键词: {', '.join(valid_keywords)}")
@ -699,7 +697,7 @@ class Hippocampus:
# 使用LLM提取关键词 # 使用LLM提取关键词
topic_num = min(5, max(1, int(len(text) * 0.1))) # 根据文本长度动态调整关键词数量 topic_num = min(5, max(1, int(len(text) * 0.1))) # 根据文本长度动态调整关键词数量
# logger.info(f"提取关键词数量: {topic_num}") # logger.info(f"提取关键词数量: {topic_num}")
topics_response = await self.llm_topic_judge.generate_response(self.find_topic_llm(text, topic_num)) topics_response = await self.model_summary.generate_response(self.find_topic_llm(text, topic_num))
# 提取关键词 # 提取关键词
keywords = re.findall(r"<([^>]+)>", topics_response[0]) keywords = re.findall(r"<([^>]+)>", topics_response[0])
@ -1126,7 +1124,7 @@ class ParahippocampalGyrus:
# 2. 使用LLM提取关键主题 # 2. 使用LLM提取关键主题
topic_num = self.hippocampus.calculate_topic_num(input_text, compress_rate) topic_num = self.hippocampus.calculate_topic_num(input_text, compress_rate)
topics_response = await self.hippocampus.llm_topic_judge.generate_response( topics_response = await self.hippocampus.model_summary.generate_response(
self.hippocampus.find_topic_llm(input_text, topic_num) self.hippocampus.find_topic_llm(input_text, topic_num)
) )

View File

@ -1,63 +0,0 @@
# -*- coding: utf-8 -*-
import asyncio
import time
import sys
import os
# 添加项目根目录到系统路径
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))))
from src.chat.memory_system.Hippocampus import HippocampusManager
from rich.traceback import install
install(extra_lines=3)
async def test_memory_system():
"""测试记忆系统的主要功能"""
try:
# 初始化记忆系统
print("开始初始化记忆系统...")
hippocampus_manager = HippocampusManager.get_instance()
hippocampus_manager.initialize()
print("记忆系统初始化完成")
# 测试记忆构建
# print("开始测试记忆构建...")
# await hippocampus_manager.build_memory()
# print("记忆构建完成")
# 测试记忆检索
test_text = "千石可乐在群里聊天"
# test_text = '''千石可乐分不清AI的陪伴和人类的陪伴,是这样吗?'''
print(f"开始测试记忆检索,测试文本: {test_text}\n")
memories = await hippocampus_manager.get_memory_from_text(
text=test_text, max_memory_num=3, max_memory_length=2, max_depth=3, fast_retrieval=False
)
await asyncio.sleep(1)
print("检索到的记忆:")
for topic, memory_items in memories:
print(f"主题: {topic}")
print(f"- {memory_items}")
except Exception as e:
print(f"测试过程中出现错误: {e}")
raise
async def main():
"""主函数"""
try:
start_time = time.time()
await test_memory_system()
end_time = time.time()
print(f"测试完成,总耗时: {end_time - start_time:.2f}")
except Exception as e:
print(f"程序执行出错: {e}")
raise
if __name__ == "__main__":
asyncio.run(main())

View File

@ -1,365 +0,0 @@
# -*- coding: utf-8 -*-
import os
import sys
import time
from pathlib import Path
import datetime
from rich.console import Console
from Hippocampus import Hippocampus # 海马体和记忆图
from dotenv import load_dotenv
from rich.traceback import install
install(extra_lines=3)
"""
我想 总有那么一个瞬间
你会想和某天才变态少女助手一样
往Bot的海马体里插上几个电极 不是吗
Let's do some dirty job.
"""
# 获取当前文件的目录
current_dir = Path(__file__).resolve().parent
# 获取项目根目录(上三层目录)
project_root = current_dir.parent.parent.parent
# env.dev文件路径
env_path = project_root / ".env.dev"
# from chat.config import global_config
root_path = os.path.abspath(os.path.join(os.path.dirname(__file__), "../../.."))
sys.path.append(root_path)
from src.common.logger import get_module_logger # noqa E402
from common.database.database import db # noqa E402
logger = get_module_logger("mem_alter")
console = Console()
# 加载环境变量
if env_path.exists():
logger.info(f"{env_path} 加载环境变量")
load_dotenv(env_path)
else:
logger.warning(f"未找到环境变量文件: {env_path}")
logger.info("将使用默认配置")
# 查询节点信息
def query_mem_info(hippocampus: Hippocampus):
while True:
query = input("\n请输入新的查询概念(输入'退出'以结束):")
if query.lower() == "退出":
break
items_list = hippocampus.memory_graph.get_related_item(query)
if items_list:
have_memory = False
first_layer, second_layer = items_list
if first_layer:
have_memory = True
print("\n直接相关的记忆:")
for item in first_layer:
print(f"- {item}")
if second_layer:
have_memory = True
print("\n间接相关的记忆:")
for item in second_layer:
print(f"- {item}")
if not have_memory:
print("\n未找到相关记忆。")
else:
print("未找到相关记忆。")
# 增加概念节点
def add_mem_node(hippocampus: Hippocampus):
while True:
concept = input("请输入节点概念名:\n")
result = db.graph_data.nodes.count_documents({"concept": concept})
if result != 0:
console.print("[yellow]已存在名为“{concept}”的节点,行为已取消[/yellow]")
continue
memory_items = list()
while True:
context = input("请输入节点描述信息(输入'终止'以结束)")
if context.lower() == "终止":
break
memory_items.append(context)
current_time = datetime.datetime.now().timestamp()
hippocampus.memory_graph.G.add_node(
concept, memory_items=memory_items, created_time=current_time, last_modified=current_time
)
# 删除概念节点(及连接到它的边)
def remove_mem_node(hippocampus: Hippocampus):
concept = input("请输入节点概念名:\n")
result = db.graph_data.nodes.count_documents({"concept": concept})
if result == 0:
console.print(f"[red]不存在名为“{concept}”的节点[/red]")
edges = db.graph_data.edges.find({"$or": [{"source": concept}, {"target": concept}]})
for edge in edges:
console.print(f"[yellow]存在边“{edge['source']} -> {edge['target']}”, 请慎重考虑[/yellow]")
console.print(f"[yellow]确定要移除名为“{concept}”的节点以及其相关边吗[/yellow]")
destory = console.input(f"[red]请输入“{concept}”以删除节点 其他输入将被视为取消操作[/red]\n")
if destory == concept:
hippocampus.memory_graph.G.remove_node(concept)
else:
logger.info("[green]删除操作已取消[/green]")
# 增加节点间边
def add_mem_edge(hippocampus: Hippocampus):
while True:
source = input("请输入 **第一个节点** 名称(输入'退出'以结束):\n")
if source.lower() == "退出":
break
if db.graph_data.nodes.count_documents({"concept": source}) == 0:
console.print(f"[yellow]“{source}”节点不存在,操作已取消。[/yellow]")
continue
target = input("请输入 **第二个节点** 名称:\n")
if db.graph_data.nodes.count_documents({"concept": target}) == 0:
console.print(f"[yellow]“{target}”节点不存在,操作已取消。[/yellow]")
continue
if source == target:
console.print(f"[yellow]试图创建“{source} <-> {target}”自环,操作已取消。[/yellow]")
continue
hippocampus.memory_graph.connect_dot(source, target)
edge = hippocampus.memory_graph.G.get_edge_data(source, target)
if edge["strength"] == 1:
console.print(f"[green]成功创建边“{source} <-> {target}默认权重1[/green]")
else:
console.print(
f"[yellow]边“{source} <-> {target}”已存在,"
f"更新权重: {edge['strength'] - 1} <-> {edge['strength']}[/yellow]"
)
# 删除节点间边
def remove_mem_edge(hippocampus: Hippocampus):
while True:
source = input("请输入 **第一个节点** 名称(输入'退出'以结束):\n")
if source.lower() == "退出":
break
if db.graph_data.nodes.count_documents({"concept": source}) == 0:
console.print("[yellow]“{source}”节点不存在,操作已取消。[/yellow]")
continue
target = input("请输入 **第二个节点** 名称:\n")
if db.graph_data.nodes.count_documents({"concept": target}) == 0:
console.print("[yellow]“{target}”节点不存在,操作已取消。[/yellow]")
continue
if source == target:
console.print("[yellow]试图创建“{source} <-> {target}”自环,操作已取消。[/yellow]")
continue
edge = hippocampus.memory_graph.G.get_edge_data(source, target)
if edge is None:
console.print("[yellow]边“{source} <-> {target}”不存在,操作已取消。[/yellow]")
continue
else:
accept = console.input("[orange]请输入“确认”以确认删除操作(其他输入视为取消)[/orange]\n")
if accept.lower() == "确认":
hippocampus.memory_graph.G.remove_edge(source, target)
console.print(f"[green]边“{source} <-> {target}”已删除。[green]")
# 修改节点信息
def alter_mem_node(hippocampus: Hippocampus):
batch_environment = dict()
while True:
concept = input("请输入节点概念名(输入'终止'以结束):\n")
if concept.lower() == "终止":
break
_, node = hippocampus.memory_graph.get_dot(concept)
if node is None:
console.print(f"[yellow]“{concept}”节点不存在,操作已取消。[/yellow]")
continue
console.print("[yellow]注意,请确保你知道自己在做什么[/yellow]")
console.print("[yellow]你将获得一个执行任意代码的环境[/yellow]")
console.print("[red]你已经被警告过了。[/red]\n")
node_environment = {"concept": "<节点名>", "memory_items": "<记忆文本数组>"}
console.print(
"[green]环境变量中会有env与batchEnv两个dict, env在切换节点时会清空, batchEnv在操作终止时才会清空[/green]"
)
console.print(
f"[green] env 会被初始化为[/green]\n{node_environment}\n[green]且会在用户代码执行完毕后被提交 [/green]"
)
console.print(
"[yellow]为便于书写临时脚本请手动在输入代码通过Ctrl+C等方式触发KeyboardInterrupt来结束代码执行[/yellow]"
)
# 拷贝数据以防操作炸了
node_environment = dict(node)
node_environment["concept"] = concept
while True:
def user_exec(script, env, batch_env):
return eval(script, env, batch_env)
try:
command = console.input()
except KeyboardInterrupt:
# 稍微防一下小天才
try:
if isinstance(node_environment["memory_items"], list):
node["memory_items"] = node_environment["memory_items"]
else:
raise Exception
except Exception as e:
console.print(
f"[red]我不知道你做了什么但显然nodeEnviroment['memory_items']已经不是个数组了,"
f"操作已取消: {str(e)}[/red]"
)
break
try:
user_exec(command, node_environment, batch_environment)
except Exception as e:
console.print(e)
console.print(
"[red]自定义代码执行时发生异常,已捕获,请重试(可通过 console.print(locals()) 检查环境状态)[/red]"
)
# 修改边信息
def alter_mem_edge(hippocampus: Hippocampus):
batch_enviroment = dict()
while True:
source = input("请输入 **第一个节点** 名称(输入'终止'以结束):\n")
if source.lower() == "终止":
break
if hippocampus.memory_graph.get_dot(source) is None:
console.print(f"[yellow]“{source}”节点不存在,操作已取消。[/yellow]")
continue
target = input("请输入 **第二个节点** 名称:\n")
if hippocampus.memory_graph.get_dot(target) is None:
console.print(f"[yellow]“{target}”节点不存在,操作已取消。[/yellow]")
continue
edge = hippocampus.memory_graph.G.get_edge_data(source, target)
if edge is None:
console.print(f"[yellow]边“{source} <-> {target}”不存在,操作已取消。[/yellow]")
continue
console.print("[yellow]注意,请确保你知道自己在做什么[/yellow]")
console.print("[yellow]你将获得一个执行任意代码的环境[/yellow]")
console.print("[red]你已经被警告过了。[/red]\n")
edge_environment = {"source": "<节点名>", "target": "<节点名>", "strength": "<强度值,装在一个list里>"}
console.print(
"[green]环境变量中会有env与batchEnv两个dict, env在切换节点时会清空, batchEnv在操作终止时才会清空[/green]"
)
console.print(
f"[green] env 会被初始化为[/green]\n{edge_environment}\n[green]且会在用户代码执行完毕后被提交 [/green]"
)
console.print(
"[yellow]为便于书写临时脚本请手动在输入代码通过Ctrl+C等方式触发KeyboardInterrupt来结束代码执行[/yellow]"
)
# 拷贝数据以防操作炸了
edge_environment["strength"] = [edge["strength"]]
edge_environment["source"] = source
edge_environment["target"] = target
while True:
def user_exec(script, env, batch_env):
return eval(script, env, batch_env)
try:
command = console.input()
except KeyboardInterrupt:
# 稍微防一下小天才
try:
if isinstance(edge_environment["strength"][0], int):
edge["strength"] = edge_environment["strength"][0]
else:
raise Exception
except Exception as e:
console.print(
f"[red]我不知道你做了什么但显然edgeEnviroment['strength']已经不是个int了"
f"操作已取消: {str(e)}[/red]"
)
break
try:
user_exec(command, edge_environment, batch_enviroment)
except Exception as e:
console.print(e)
console.print(
"[red]自定义代码执行时发生异常,已捕获,请重试(可通过 console.print(locals()) 检查环境状态)[/red]"
)
async def main():
start_time = time.time()
# 创建海马体
hippocampus = Hippocampus()
# 从数据库同步数据
hippocampus.entorhinal_cortex.sync_memory_from_db()
end_time = time.time()
logger.info(f"\033[32m[加载海马体耗时: {end_time - start_time:.2f} 秒]\033[0m")
while True:
try:
query = int(
input(
"""请输入操作类型
0 -> 查询节点; 1 -> 增加节点; 2 -> 移除节点; 3 -> 增加边; 4 -> 移除边;
5 -> 修改节点; 6 -> 修改边; 其他任意输入 -> 退出
"""
)
)
except ValueError:
query = -1
if query == 0:
query_mem_info(hippocampus.memory_graph)
elif query == 1:
add_mem_node(hippocampus)
elif query == 2:
remove_mem_node(hippocampus)
elif query == 3:
add_mem_edge(hippocampus)
elif query == 4:
remove_mem_edge(hippocampus)
elif query == 5:
alter_mem_node(hippocampus)
elif query == 6:
alter_mem_edge(hippocampus)
else:
print("已结束操作")
break
hippocampus.entorhinal_cortex.sync_memory_to_db()
if __name__ == "__main__":
import asyncio
asyncio.run(main())

View File

@ -1,126 +0,0 @@
import asyncio
import os
import time
from typing import Tuple, Union
import aiohttp
import requests
from src.common.logger import get_module_logger
from rich.traceback import install
install(extra_lines=3)
logger = get_module_logger("offline_llm")
class LLMRequestOff:
def __init__(self, model_name="deepseek-ai/DeepSeek-V3", **kwargs):
self.model_name = model_name
self.params = kwargs
self.api_key = os.getenv("SILICONFLOW_KEY")
self.base_url = os.getenv("SILICONFLOW_BASE_URL")
if not self.api_key or not self.base_url:
raise ValueError("环境变量未正确加载SILICONFLOW_KEY 或 SILICONFLOW_BASE_URL 未设置")
logger.info(f"API URL: {self.base_url}") # 使用 logger 记录 base_url
def generate_response(self, prompt: str) -> Union[str, Tuple[str, str]]:
"""根据输入的提示生成模型的响应"""
headers = {"Authorization": f"Bearer {self.api_key}", "Content-Type": "application/json"}
# 构建请求体
data = {
"model": self.model_name,
"messages": [{"role": "user", "content": prompt}],
"temperature": 0.5,
**self.params,
}
# 发送请求到完整的 chat/completions 端点
api_url = f"{self.base_url.rstrip('/')}/chat/completions"
logger.info(f"Request URL: {api_url}") # 记录请求的 URL
max_retries = 3
base_wait_time = 15 # 基础等待时间(秒)
for retry in range(max_retries):
try:
response = requests.post(api_url, headers=headers, json=data)
if response.status_code == 429:
wait_time = base_wait_time * (2**retry) # 指数退避
logger.warning(f"遇到请求限制(429),等待{wait_time}秒后重试...")
time.sleep(wait_time)
continue
response.raise_for_status() # 检查其他响应状态
result = response.json()
if "choices" in result and len(result["choices"]) > 0:
content = result["choices"][0]["message"]["content"]
reasoning_content = result["choices"][0]["message"].get("reasoning_content", "")
return content, reasoning_content
return "没有返回结果", ""
except Exception as e:
if retry < max_retries - 1: # 如果还有重试机会
wait_time = base_wait_time * (2**retry)
logger.error(f"[回复]请求失败,等待{wait_time}秒后重试... 错误: {str(e)}")
time.sleep(wait_time)
else:
logger.error(f"请求失败: {str(e)}")
return f"请求失败: {str(e)}", ""
logger.error("达到最大重试次数,请求仍然失败")
return "达到最大重试次数,请求仍然失败", ""
async def generate_response_async(self, prompt: str) -> Union[str, Tuple[str, str]]:
"""异步方式根据输入的提示生成模型的响应"""
headers = {"Authorization": f"Bearer {self.api_key}", "Content-Type": "application/json"}
# 构建请求体
data = {
"model": self.model_name,
"messages": [{"role": "user", "content": prompt}],
"temperature": 0.5,
**self.params,
}
# 发送请求到完整的 chat/completions 端点
api_url = f"{self.base_url.rstrip('/')}/chat/completions"
logger.info(f"Request URL: {api_url}") # 记录请求的 URL
max_retries = 3
base_wait_time = 15
async with aiohttp.ClientSession() as session:
for retry in range(max_retries):
try:
async with session.post(api_url, headers=headers, json=data) as response:
if response.status == 429:
wait_time = base_wait_time * (2**retry) # 指数退避
logger.warning(f"遇到请求限制(429),等待{wait_time}秒后重试...")
await asyncio.sleep(wait_time)
continue
response.raise_for_status() # 检查其他响应状态
result = await response.json()
if "choices" in result and len(result["choices"]) > 0:
content = result["choices"][0]["message"]["content"]
reasoning_content = result["choices"][0]["message"].get("reasoning_content", "")
return content, reasoning_content
return "没有返回结果", ""
except Exception as e:
if retry < max_retries - 1: # 如果还有重试机会
wait_time = base_wait_time * (2**retry)
logger.error(f"[回复]请求失败,等待{wait_time}秒后重试... 错误: {str(e)}")
await asyncio.sleep(wait_time)
else:
logger.error(f"请求失败: {str(e)}")
return f"请求失败: {str(e)}", ""
logger.error("达到最大重试次数,请求仍然失败")
return "达到最大重试次数,请求仍然失败", ""

View File

@ -7,7 +7,7 @@ from src.chat.message_receive.chat_stream import chat_manager
from src.chat.message_receive.message import MessageRecv from src.chat.message_receive.message import MessageRecv
from src.experimental.only_message_process import MessageProcessor from src.experimental.only_message_process import MessageProcessor
from src.experimental.PFC.pfc_manager import PFCManager from src.experimental.PFC.pfc_manager import PFCManager
from src.chat.focus_chat.heartflow_message_revceiver import HeartFCMessageReceiver from src.chat.focus_chat.heartflow_message_processor import HeartFCMessageReceiver
from src.chat.utils.prompt_builder import Prompt, global_prompt_manager from src.chat.utils.prompt_builder import Prompt, global_prompt_manager
from src.config.config import global_config from src.config.config import global_config

View File

@ -1,4 +1,4 @@
from ..person_info.person_info import person_info_manager from src.person_info.person_info import person_info_manager
from src.common.logger_manager import get_logger from src.common.logger_manager import get_logger
import asyncio import asyncio
from dataclasses import dataclass, field from dataclasses import dataclass, field

View File

@ -27,7 +27,7 @@ logger = get_logger("normal_chat")
class NormalChat: class NormalChat:
def __init__(self, chat_stream: ChatStream, interest_dict: dict = None): def __init__(self, chat_stream: ChatStream, interest_dict: dict = None, on_switch_to_focus_callback=None):
"""初始化 NormalChat 实例。只进行同步操作。""" """初始化 NormalChat 实例。只进行同步操作。"""
# Basic info from chat_stream (sync) # Basic info from chat_stream (sync)
@ -50,6 +50,17 @@ class NormalChat:
self._chat_task: Optional[asyncio.Task] = None self._chat_task: Optional[asyncio.Task] = None
self._initialized = False # Track initialization status self._initialized = False # Track initialization status
# 记录最近的回复内容,每项包含: {time, user_message, response, is_mentioned, is_reference_reply}
self.recent_replies = []
self.max_replies_history = 20 # 最多保存最近20条回复记录
# 添加回调函数用于在满足条件时通知切换到focus_chat模式
self.on_switch_to_focus_callback = on_switch_to_focus_callback
# 最近回复检查相关
self._last_check_time = time.time()
self._check_interval = 10 # 每10秒检查一次是否需要切换到focus模式
async def initialize(self): async def initialize(self):
"""异步初始化,获取聊天类型和目标信息。""" """异步初始化,获取聊天类型和目标信息。"""
if self._initialized: if self._initialized:
@ -197,6 +208,12 @@ class NormalChat:
logger.info(f"[{self.stream_name}] 兴趣监控任务被取消或置空,退出") logger.info(f"[{self.stream_name}] 兴趣监控任务被取消或置空,退出")
break break
# 定期检查是否需要切换到focus模式
# current_time = time.time()
# if current_time - self._last_check_time > self._check_interval:
# await self._check_switch_to_focus()
# self._last_check_time = current_time
items_to_process = list(self.interest_dict.items()) items_to_process = list(self.interest_dict.items())
if not items_to_process: if not items_to_process:
continue continue
@ -312,6 +329,28 @@ class NormalChat:
# 检查 first_bot_msg 是否为 None (例如思考消息已被移除的情况) # 检查 first_bot_msg 是否为 None (例如思考消息已被移除的情况)
if first_bot_msg: if first_bot_msg:
info_catcher.catch_after_response(timing_results["消息发送"], response_set, first_bot_msg) info_catcher.catch_after_response(timing_results["消息发送"], response_set, first_bot_msg)
# 记录回复信息到最近回复列表中
reply_info = {
"time": time.time(),
"user_message": message.processed_plain_text,
"user_info": {
"user_id": message.message_info.user_info.user_id,
"user_nickname": message.message_info.user_info.user_nickname,
},
"response": response_set,
"is_mentioned": is_mentioned,
"is_reference_reply": message.reply is not None, # 判断是否为引用回复
"timing": {k: round(v, 2) for k, v in timing_results.items()},
}
self.recent_replies.append(reply_info)
# 保持最近回复历史在限定数量内
if len(self.recent_replies) > self.max_replies_history:
self.recent_replies = self.recent_replies[-self.max_replies_history :]
# 检查是否需要切换到focus模式
await self._check_switch_to_focus()
else: else:
logger.warning(f"[{self.stream_name}] 思考消息 {thinking_id} 在发送前丢失,无法记录 info_catcher") logger.warning(f"[{self.stream_name}] 思考消息 {thinking_id} 在发送前丢失,无法记录 info_catcher")
@ -520,3 +559,49 @@ class NormalChat:
except Exception as e: except Exception as e:
logger.error(f"[{self.stream_name}] 清理思考消息时出错: {e}") logger.error(f"[{self.stream_name}] 清理思考消息时出错: {e}")
traceback.print_exc() traceback.print_exc()
# 获取最近回复记录的方法
def get_recent_replies(self, limit: int = 10) -> List[dict]:
"""获取最近的回复记录
Args:
limit: 最大返回数量默认10条
Returns:
List[dict]: 最近的回复记录列表每项包含
time: 回复时间戳
user_message: 用户消息内容
user_info: 用户信息(user_id, user_nickname)
response: 回复内容
is_mentioned: 是否被提及(@)
is_reference_reply: 是否为引用回复
timing: 各阶段耗时
"""
# 返回最近的limit条记录按时间倒序排列
return sorted(self.recent_replies[-limit:], key=lambda x: x["time"], reverse=True)
async def _check_switch_to_focus(self) -> None:
"""检查是否满足切换到focus模式的条件"""
if not self.on_switch_to_focus_callback:
return # 如果没有设置回调函数,直接返回
current_time = time.time()
time_threshold = 120 / global_config.focus_chat.auto_focus_threshold
reply_threshold = 6 * global_config.focus_chat.auto_focus_threshold
one_minute_ago = current_time - time_threshold
# 统计1分钟内的回复数量
recent_reply_count = sum(1 for reply in self.recent_replies if reply["time"] > one_minute_ago)
# print(111111111111111333333333333333333333333331111111111111111111111111111111111)
# print(recent_reply_count)
# 如果1分钟内回复数量大于8触发切换到focus模式
if recent_reply_count > reply_threshold:
logger.info(
f"[{self.stream_name}] 检测到1分钟内回复数量({recent_reply_count})大于{reply_threshold}触发切换到focus模式"
)
try:
# 调用回调函数通知上层切换到focus模式
await self.on_switch_to_focus_callback()
except Exception as e:
logger.error(f"[{self.stream_name}] 触发切换到focus模式时出错: {e}\n{traceback.format_exc()}")

View File

@ -3,7 +3,7 @@ import random
from src.llm_models.utils_model import LLMRequest from src.llm_models.utils_model import LLMRequest
from src.config.config import global_config from src.config.config import global_config
from src.chat.message_receive.message import MessageThinking from src.chat.message_receive.message import MessageThinking
from src.chat.focus_chat.heartflow_prompt_builder import prompt_builder from src.chat.normal_chat.normal_prompt import prompt_builder
from src.chat.utils.utils import process_llm_response from src.chat.utils.utils import process_llm_response
from src.chat.utils.timer_calculator import Timer from src.chat.utils.timer_calculator import Timer
from src.common.logger_manager import get_logger from src.common.logger_manager import get_logger
@ -17,20 +17,20 @@ class NormalChatGenerator:
def __init__(self): def __init__(self):
# TODO: API-Adapter修改标记 # TODO: API-Adapter修改标记
self.model_reasoning = LLMRequest( self.model_reasoning = LLMRequest(
model=global_config.model.reasoning, model=global_config.model.normal_chat_1,
temperature=0.7, # temperature=0.7,
max_tokens=3000, max_tokens=3000,
request_type="response_reasoning", request_type="normal_chat_1",
) )
self.model_normal = LLMRequest( self.model_normal = LLMRequest(
model=global_config.model.normal, model=global_config.model.normal_chat_2,
temperature=global_config.model.normal["temp"], # temperature=global_config.model.normal_chat_2["temp"],
max_tokens=256, max_tokens=256,
request_type="response_reasoning", request_type="normal_chat_2",
) )
self.model_sum = LLMRequest( self.model_sum = LLMRequest(
model=global_config.model.summary, temperature=0.7, max_tokens=3000, request_type="relation" model=global_config.model.memory_summary, temperature=0.7, max_tokens=3000, request_type="relation"
) )
self.current_model_type = "r1" # 默认使用 R1 self.current_model_type = "r1" # 默认使用 R1
self.current_model_name = "unknown model" self.current_model_name = "unknown model"
@ -38,7 +38,7 @@ class NormalChatGenerator:
async def generate_response(self, message: MessageThinking, thinking_id: str) -> Optional[Union[str, List[str]]]: async def generate_response(self, message: MessageThinking, thinking_id: str) -> Optional[Union[str, List[str]]]:
"""根据当前模型类型选择对应的生成函数""" """根据当前模型类型选择对应的生成函数"""
# 从global_config中获取模型概率值并选择模型 # 从global_config中获取模型概率值并选择模型
if random.random() < global_config.normal_chat.reasoning_model_probability: if random.random() < global_config.normal_chat.normal_chat_first_probability:
self.current_model_type = "深深地" self.current_model_type = "深深地"
current_model = self.model_reasoning current_model = self.model_reasoning
else: else:

View File

@ -17,14 +17,14 @@ logger = get_logger("prompt")
def init_prompt(): def init_prompt():
Prompt( # Prompt(
""" # """
你有以下信息可供参考 # 你有以下信息可供参考
{structured_info} # {structured_info}
以上的消息是你获取到的消息或许可以帮助你更好地回复 # 以上的消息是你获取到的消息,或许可以帮助你更好地回复
""", # """,
"info_from_tools", # "info_from_tools",
) # )
Prompt("你正在qq群里聊天下面是群里在聊的内容", "chat_target_group1") Prompt("你正在qq群里聊天下面是群里在聊的内容", "chat_target_group1")
Prompt("你正在和{sender_name}聊天,这是你们之前聊的内容:", "chat_target_private1") Prompt("你正在和{sender_name}聊天,这是你们之前聊的内容:", "chat_target_private1")
@ -94,9 +94,7 @@ class PromptBuilder:
in_mind_reply=None, in_mind_reply=None,
target_message=None, target_message=None,
) -> Optional[str]: ) -> Optional[str]:
if build_mode == "normal": return await self._build_prompt_normal(chat_stream, message_txt or "", sender_name)
return await self._build_prompt_normal(chat_stream, message_txt or "", sender_name)
return None
async def _build_prompt_normal(self, chat_stream, message_txt: str, sender_name: str = "某人") -> str: async def _build_prompt_normal(self, chat_stream, message_txt: str, sender_name: str = "某人") -> str:
prompt_personality = individuality.get_prompt(x_person=2, level=2) prompt_personality = individuality.get_prompt(x_person=2, level=2)
@ -107,7 +105,7 @@ class PromptBuilder:
who_chat_in_group = get_recent_group_speaker( who_chat_in_group = get_recent_group_speaker(
chat_stream.stream_id, chat_stream.stream_id,
(chat_stream.user_info.platform, chat_stream.user_info.user_id) if chat_stream.user_info else None, (chat_stream.user_info.platform, chat_stream.user_info.user_id) if chat_stream.user_info else None,
limit=global_config.focus_chat.observation_context_size, limit=global_config.normal_chat.max_context_size,
) )
elif chat_stream.user_info: elif chat_stream.user_info:
who_chat_in_group.append( who_chat_in_group.append(
@ -118,8 +116,7 @@ class PromptBuilder:
for person in who_chat_in_group: for person in who_chat_in_group:
if len(person) >= 3 and person[0] and person[1]: if len(person) >= 3 and person[0] and person[1]:
relation_prompt += await relationship_manager.build_relationship_info(person) relation_prompt += await relationship_manager.build_relationship_info(person)
else:
logger.warning(f"Invalid person tuple encountered for relationship prompt: {person}")
mood_prompt = mood_manager.get_mood_prompt() mood_prompt = mood_manager.get_mood_prompt()
reply_styles1 = [ reply_styles1 = [
("然后给出日常且口语化的回复,平淡一些", 0.4), ("然后给出日常且口语化的回复,平淡一些", 0.4),
@ -194,6 +191,8 @@ class PromptBuilder:
if random.random() < 0.04: if random.random() < 0.04:
prompt_ger += "你喜欢用流行梗" prompt_ger += "你喜欢用流行梗"
moderation_prompt_block = "请不要输出违法违规内容,不要输出色情,暴力,政治相关内容,如有敏感内容,请规避。"
# 知识构建 # 知识构建
start_time = time.time() start_time = time.time()
prompt_info = await self.get_prompt_info(message_txt, threshold=0.38) prompt_info = await self.get_prompt_info(message_txt, threshold=0.38)
@ -231,7 +230,7 @@ class PromptBuilder:
keywords_reaction_prompt=keywords_reaction_prompt, keywords_reaction_prompt=keywords_reaction_prompt,
prompt_ger=prompt_ger, prompt_ger=prompt_ger,
# moderation_prompt=await global_prompt_manager.get_prompt_async("moderation_prompt"), # moderation_prompt=await global_prompt_manager.get_prompt_async("moderation_prompt"),
moderation_prompt="", moderation_prompt=moderation_prompt_block,
) )
else: else:
template_name = "reasoning_prompt_private_main" template_name = "reasoning_prompt_private_main"
@ -254,7 +253,7 @@ class PromptBuilder:
keywords_reaction_prompt=keywords_reaction_prompt, keywords_reaction_prompt=keywords_reaction_prompt,
prompt_ger=prompt_ger, prompt_ger=prompt_ger,
# moderation_prompt=await global_prompt_manager.get_prompt_async("moderation_prompt"), # moderation_prompt=await global_prompt_manager.get_prompt_async("moderation_prompt"),
moderation_prompt="", moderation_prompt=moderation_prompt_block,
) )
# --- End choosing template --- # --- End choosing template ---

View File

@ -63,7 +63,7 @@ def is_mentioned_bot_in_message(message: MessageRecv) -> tuple[bool, float]:
) )
# 判断是否被@ # 判断是否被@
if re.search(f"@[\s\S]*?id:{global_config.bot.qq_account}", message.processed_plain_text): if re.search(rf"@<(.+?):{global_config.bot.qq_account}>", message.processed_plain_text):
is_at = True is_at = True
is_mentioned = True is_mentioned = True
@ -74,13 +74,18 @@ def is_mentioned_bot_in_message(message: MessageRecv) -> tuple[bool, float]:
if not is_mentioned: if not is_mentioned:
# 判断是否被回复 # 判断是否被回复
if re.match( if re.match(
f"\[回复 [\s\S]*?\({str(global_config.bot.qq_account)}\)[\s\S]*?],说:", message.processed_plain_text rf"\[回复 (.+?)\({str(global_config.bot.qq_account)}\)(.+?)\],说:", message.processed_plain_text
) or re.match(
rf"\[回复<(.+?)(?=:{str(global_config.bot.qq_account)}>)\:{str(global_config.bot.qq_account)}>(.+?)\],说:",
message.processed_plain_text,
): ):
is_mentioned = True is_mentioned = True
else: else:
# 判断内容中是否被提及 # 判断内容中是否被提及
message_content = re.sub(r"@[\s\S]*?(\d+)", "", message.processed_plain_text) message_content = re.sub(r"@(.+?)(\d+)", "", message.processed_plain_text)
message_content = re.sub(r"\[回复 [\s\S]*?\(((\d+)|未知id)\)[\s\S]*?],说:", "", message_content) message_content = re.sub(r"@<(.+?)(?=:(\d+))\:(\d+)>", "", message_content)
message_content = re.sub(r"\[回复 (.+?)\(((\d+)|未知id)\)(.+?)\],说:", "", message_content)
message_content = re.sub(r"\[回复<(.+?)(?=:(\d+))\:(\d+)>(.+?)\],说:", "", message_content)
for keyword in keywords: for keyword in keywords:
if keyword in message_content: if keyword in message_content:
is_mentioned = True is_mentioned = True

View File

@ -83,7 +83,7 @@ class ImageManager:
current_timestamp = time.time() current_timestamp = time.time()
defaults = {"description": description, "timestamp": current_timestamp} defaults = {"description": description, "timestamp": current_timestamp}
desc_obj, created = ImageDescriptions.get_or_create( desc_obj, created = ImageDescriptions.get_or_create(
hash=image_hash, type=description_type, defaults=defaults image_description_hash=image_hash, type=description_type, defaults=defaults
) )
if not created: # 如果记录已存在,则更新 if not created: # 如果记录已存在,则更新
desc_obj.description = description desc_obj.description = description
@ -130,6 +130,7 @@ class ImageManager:
# 根据配置决定是否保存图片 # 根据配置决定是否保存图片
if global_config.emoji.save_emoji: if global_config.emoji.save_emoji:
# 生成文件名和路径 # 生成文件名和路径
logger.debug(f"保存表情包: {image_hash}")
current_timestamp = time.time() current_timestamp = time.time()
filename = f"{int(current_timestamp)}_{image_hash[:8]}.{image_format}" filename = f"{int(current_timestamp)}_{image_hash[:8]}.{image_format}"
emoji_dir = os.path.join(self.IMAGE_DIR, "emoji") emoji_dir = os.path.join(self.IMAGE_DIR, "emoji")
@ -150,13 +151,13 @@ class ImageManager:
img_obj.save() img_obj.save()
except Images.DoesNotExist: except Images.DoesNotExist:
Images.create( Images.create(
hash=image_hash, emoji_hash=image_hash,
path=file_path, path=file_path,
type="emoji", type="emoji",
description=description, description=description,
timestamp=current_timestamp, timestamp=current_timestamp,
) )
logger.trace(f"保存表情包元数据: {file_path}") # logger.debug(f"保存表情包元数据: {file_path}")
except Exception as e: except Exception as e:
logger.error(f"保存表情包文件或元数据失败: {str(e)}") logger.error(f"保存表情包文件或元数据失败: {str(e)}")
@ -223,7 +224,7 @@ class ImageManager:
img_obj.save() img_obj.save()
except Images.DoesNotExist: except Images.DoesNotExist:
Images.create( Images.create(
hash=image_hash, emoji_hash=image_hash,
path=file_path, path=file_path,
type="image", type="image",
description=description, description=description,

View File

@ -663,11 +663,11 @@ PROCESSOR_STYLE_CONFIG = {
PLANNER_STYLE_CONFIG = { PLANNER_STYLE_CONFIG = {
"advanced": { "advanced": {
"console_format": "<level>{time:HH:mm:ss}</level> | <fg #4DCDFF>规划器</fg #4DCDFF> | <fg #4DCDFF>{message}</fg #4DCDFF>", "console_format": "<level>{time:HH:mm:ss}</level> | <fg #069AFF>规划器</fg #069AFF> | <fg #069AFF>{message}</fg #069AFF>",
"file_format": "{time:YYYY-MM-DD HH:mm:ss} | {level: <8} | {extra[module]: <15} | 规划器 | {message}", "file_format": "{time:YYYY-MM-DD HH:mm:ss} | {level: <8} | {extra[module]: <15} | 规划器 | {message}",
}, },
"simple": { "simple": {
"console_format": "<level>{time:HH:mm:ss}</level> | <fg #4DCDFF>规划器</fg #4DCDFF> | <fg #4DCDFF>{message}</fg #4DCDFF>", "console_format": "<level>{time:HH:mm:ss}</level> | <fg #069AFF>规划器</fg #069AFF> | <fg #069AFF>{message}</fg #069AFF>",
"file_format": "{time:YYYY-MM-DD HH:mm:ss} | {level: <8} | {extra[module]: <15} | 规划器 | {message}", "file_format": "{time:YYYY-MM-DD HH:mm:ss} | {level: <8} | {extra[module]: <15} | 规划器 | {message}",
}, },
} }

View File

@ -32,6 +32,7 @@ from src.config.official_configs import (
FocusChatProcessorConfig, FocusChatProcessorConfig,
MessageReceiveConfig, MessageReceiveConfig,
MaimMessageConfig, MaimMessageConfig,
RelationshipConfig,
) )
install(extra_lines=3) install(extra_lines=3)
@ -45,7 +46,7 @@ TEMPLATE_DIR = "template"
# 考虑到实际上配置文件中的mai_version是不会自动更新的,所以采用硬编码 # 考虑到实际上配置文件中的mai_version是不会自动更新的,所以采用硬编码
# 对该字段的更新请严格参照语义化版本规范https://semver.org/lang/zh-CN/ # 对该字段的更新请严格参照语义化版本规范https://semver.org/lang/zh-CN/
MMC_VERSION = "0.7.0-snapshot.1" MMC_VERSION = "0.7.0-snapshot.2"
def update_config(): def update_config():
@ -143,6 +144,7 @@ class Config(ConfigBase):
bot: BotConfig bot: BotConfig
personality: PersonalityConfig personality: PersonalityConfig
identity: IdentityConfig identity: IdentityConfig
relationship: RelationshipConfig
chat: ChatConfig chat: ChatConfig
message_receive: MessageReceiveConfig message_receive: MessageReceiveConfig
normal_chat: NormalChatConfig normal_chat: NormalChatConfig

View File

@ -41,25 +41,18 @@ class PersonalityConfig(ConfigBase):
class IdentityConfig(ConfigBase): class IdentityConfig(ConfigBase):
"""个体特征配置类""" """个体特征配置类"""
height: int = 170
"""身高(单位:厘米)"""
weight: float = 50
"""体重(单位:千克)"""
age: int = 18
"""年龄(单位:岁)"""
gender: str = ""
"""性别(男/女)"""
appearance: str = "可爱"
"""外貌描述"""
identity_detail: list[str] = field(default_factory=lambda: []) identity_detail: list[str] = field(default_factory=lambda: [])
"""身份特征""" """身份特征"""
@dataclass
class RelationshipConfig(ConfigBase):
"""关系配置类"""
give_name: bool = False
"""是否给其他人取名"""
@dataclass @dataclass
class ChatConfig(ConfigBase): class ChatConfig(ConfigBase):
"""聊天配置类""" """聊天配置类"""
@ -83,7 +76,7 @@ class MessageReceiveConfig(ConfigBase):
class NormalChatConfig(ConfigBase): class NormalChatConfig(ConfigBase):
"""普通聊天配置类""" """普通聊天配置类"""
reasoning_model_probability: float = 0.3 normal_chat_first_probability: float = 0.3
""" """
发言时选择推理模型的概率0-1之间 发言时选择推理模型的概率0-1之间
选择普通模型的概率为 1 - reasoning_normal_model_probability 选择普通模型的概率为 1 - reasoning_normal_model_probability
@ -130,18 +123,15 @@ class NormalChatConfig(ConfigBase):
class FocusChatConfig(ConfigBase): class FocusChatConfig(ConfigBase):
"""专注聊天配置类""" """专注聊天配置类"""
reply_trigger_threshold: float = 3.0 auto_focus_threshold: float = 1.0
"""心流聊天触发阈值,越低越容易触发""" """自动切换到专注聊天的阈值,越低越容易进入专注聊天"""
default_decay_rate_per_second: float = 0.98 exit_focus_threshold: float = 1.0
"""默认衰减率,越大衰减越快""" """自动退出专注聊天的阈值,越低越容易退出专注聊天"""
observation_context_size: int = 12 observation_context_size: int = 12
"""可观察到的最长上下文大小,超过这个值的上下文会被压缩""" """可观察到的最长上下文大小,超过这个值的上下文会被压缩"""
consecutive_no_reply_threshold: int = 3
"""连续不回复的次数阈值"""
compressed_length: int = 5 compressed_length: int = 5
"""心流上下文压缩的最短压缩长度超过心流观察到的上下文长度会压缩最短压缩长度为5""" """心流上下文压缩的最短压缩长度超过心流观察到的上下文长度会压缩最短压缩长度为5"""
@ -193,9 +183,12 @@ class EmojiConfig(ConfigBase):
check_interval: int = 120 check_interval: int = 120
"""表情包检查间隔(分钟)""" """表情包检查间隔(分钟)"""
save_pic: bool = False save_pic: bool = True
"""是否保存图片""" """是否保存图片"""
save_emoji: bool = True
"""是否保存表情包"""
cache_emoji: bool = True cache_emoji: bool = True
"""是否缓存表情包""" """是否缓存表情包"""
@ -390,32 +383,41 @@ class ModelConfig(ConfigBase):
model_max_output_length: int = 800 # 最大回复长度 model_max_output_length: int = 800 # 最大回复长度
reasoning: dict[str, Any] = field(default_factory=lambda: {}) utils: dict[str, Any] = field(default_factory=lambda: {})
"""推理模型配置""" """组件模型配置"""
normal: dict[str, Any] = field(default_factory=lambda: {}) utils_small: dict[str, Any] = field(default_factory=lambda: {})
"""普通模型配置""" """组件小模型配置"""
topic_judge: dict[str, Any] = field(default_factory=lambda: {}) normal_chat_1: dict[str, Any] = field(default_factory=lambda: {})
"""主题判断模型配置""" """normal_chat首要回复模型模型配置"""
summary: dict[str, Any] = field(default_factory=lambda: {}) normal_chat_2: dict[str, Any] = field(default_factory=lambda: {})
"""摘要模型配置""" """normal_chat次要回复模型配置"""
memory_summary: dict[str, Any] = field(default_factory=lambda: {})
"""记忆的概括模型配置"""
vlm: dict[str, Any] = field(default_factory=lambda: {}) vlm: dict[str, Any] = field(default_factory=lambda: {})
"""视觉语言模型配置""" """视觉语言模型配置"""
heartflow: dict[str, Any] = field(default_factory=lambda: {}) focus_working_memory: dict[str, Any] = field(default_factory=lambda: {})
"""心流模型配置""" """专注工作记忆模型配置"""
observation: dict[str, Any] = field(default_factory=lambda: {}) focus_chat_mind: dict[str, Any] = field(default_factory=lambda: {})
"""观察模型配置""" """专注聊天规划模型配置"""
sub_heartflow: dict[str, Any] = field(default_factory=lambda: {}) focus_self_recognize: dict[str, Any] = field(default_factory=lambda: {})
"""子心流模型配置""" """专注自我识别模型配置"""
plan: dict[str, Any] = field(default_factory=lambda: {}) focus_tool_use: dict[str, Any] = field(default_factory=lambda: {})
"""计划模型配置""" """专注工具使用模型配置"""
focus_planner: dict[str, Any] = field(default_factory=lambda: {})
"""专注规划模型配置"""
focus_expressor: dict[str, Any] = field(default_factory=lambda: {})
"""专注表达器模型配置"""
embedding: dict[str, Any] = field(default_factory=lambda: {}) embedding: dict[str, Any] = field(default_factory=lambda: {})
"""嵌入模型配置""" """嵌入模型配置"""
@ -428,6 +430,3 @@ class ModelConfig(ConfigBase):
pfc_reply_checker: dict[str, Any] = field(default_factory=lambda: {}) pfc_reply_checker: dict[str, Any] = field(default_factory=lambda: {})
"""PFC回复检查模型配置""" """PFC回复检查模型配置"""
tool_use: dict[str, Any] = field(default_factory=lambda: {})
"""工具使用模型配置"""

View File

@ -44,7 +44,7 @@ class GoalAnalyzer:
def __init__(self, stream_id: str, private_name: str): def __init__(self, stream_id: str, private_name: str):
# TODO: API-Adapter修改标记 # TODO: API-Adapter修改标记
self.llm = LLMRequest( self.llm = LLMRequest(
model=global_config.model.normal, temperature=0.7, max_tokens=1000, request_type="conversation_goal" model=global_config.model.utils, temperature=0.7, max_tokens=1000, request_type="conversation_goal"
) )
self.personality_info = individuality.get_prompt(x_person=2, level=3) self.personality_info = individuality.get_prompt(x_person=2, level=3)

View File

@ -16,8 +16,8 @@ class KnowledgeFetcher:
def __init__(self, private_name: str): def __init__(self, private_name: str):
# TODO: API-Adapter修改标记 # TODO: API-Adapter修改标记
self.llm = LLMRequest( self.llm = LLMRequest(
model=global_config.model.normal, model=global_config.model.utils,
temperature=global_config.model.normal["temp"], temperature=global_config.model.utils["temp"],
max_tokens=1000, max_tokens=1000,
request_type="knowledge_fetch", request_type="knowledge_fetch",
) )

View File

@ -33,10 +33,10 @@ def init_prompt() -> None:
class PersonalityExpression: class PersonalityExpression:
def __init__(self): def __init__(self):
self.express_learn_model: LLMRequest = LLMRequest( self.express_learn_model: LLMRequest = LLMRequest(
model=global_config.model.normal, model=global_config.model.focus_expressor,
temperature=0.1, temperature=0.1,
max_tokens=256, max_tokens=256,
request_type="response_heartflow", request_type="learn_expression",
) )
self.meta_file_path = os.path.join("data", "expression", "personality", "expression_style_meta.json") self.meta_file_path = os.path.join("data", "expression", "personality", "expression_style_meta.json")
self.expressions_file_path = os.path.join("data", "expression", "personality", "expressions.json") self.expressions_file_path = os.path.join("data", "expression", "personality", "expressions.json")
@ -83,7 +83,7 @@ class PersonalityExpression:
logger.error(f"删除旧的表达文件 {self.expressions_file_path} 失败: {e}") logger.error(f"删除旧的表达文件 {self.expressions_file_path} 失败: {e}")
if count >= self.max_calculations: if count >= self.max_calculations:
logger.info(f"对于风格 '{current_style_text}' 已达到最大计算次数 ({self.max_calculations})。跳过提取。") logger.debug(f"对于风格 '{current_style_text}' 已达到最大计算次数 ({self.max_calculations})。跳过提取。")
# 即使跳过,也更新元数据以反映当前风格已被识别且计数已满 # 即使跳过,也更新元数据以反映当前风格已被识别且计数已满
self._write_meta_data({"last_style_text": current_style_text, "count": count}) self._write_meta_data({"last_style_text": current_style_text, "count": count})
return return

View File

@ -7,99 +7,24 @@ class Identity:
"""身份特征类""" """身份特征类"""
identity_detail: List[str] # 身份细节描述 identity_detail: List[str] # 身份细节描述
height: int # 身高(厘米)
weight: float # 体重(千克)
age: int # 年龄
gender: str # 性别
appearance: str # 外貌特征
_instance = None def __init__(self, identity_detail: List[str] = None):
def __new__(cls, *args, **kwargs):
if cls._instance is None:
cls._instance = super().__new__(cls)
return cls._instance
def __init__(
self,
identity_detail: List[str] = None,
height: int = 0,
weight: float = 0,
age: int = 0,
gender: str = "",
appearance: str = "",
):
"""初始化身份特征 """初始化身份特征
Args: Args:
identity_detail: 身份细节描述列表 identity_detail: 身份细节描述列表
height: 身高厘米
weight: 体重千克
age: 年龄
gender: 性别
appearance: 外貌特征
""" """
if identity_detail is None: if identity_detail is None:
identity_detail = [] identity_detail = []
self.identity_detail = identity_detail self.identity_detail = identity_detail
self.height = height
self.weight = weight
self.age = age
self.gender = gender
self.appearance = appearance
@classmethod
def get_instance(cls) -> "Identity":
"""获取Identity单例实例
Returns:
Identity: 单例实例
"""
if cls._instance is None:
cls._instance = cls()
return cls._instance
@classmethod
def initialize(
cls, identity_detail: List[str], height: int, weight: float, age: int, gender: str, appearance: str
) -> "Identity":
"""初始化身份特征
Args:
identity_detail: 身份细节描述列表
height: 身高厘米
weight: 体重千克
age: 年龄
gender: 性别
appearance: 外貌特征
Returns:
Identity: 初始化后的身份特征实例
"""
instance = cls.get_instance()
instance.identity_detail = identity_detail
instance.height = height
instance.weight = weight
instance.age = age
instance.gender = gender
instance.appearance = appearance
return instance
def to_dict(self) -> dict: def to_dict(self) -> dict:
"""将身份特征转换为字典格式""" """将身份特征转换为字典格式"""
return { return {
"identity_detail": self.identity_detail, "identity_detail": self.identity_detail,
"height": self.height,
"weight": self.weight,
"age": self.age,
"gender": self.gender,
"appearance": self.appearance,
} }
@classmethod @classmethod
def from_dict(cls, data: dict) -> "Identity": def from_dict(cls, data: dict) -> "Identity":
"""从字典创建身份特征实例""" """从字典创建身份特征实例"""
instance = cls.get_instance() return cls(identity_detail=data.get("identity_detail", []))
for key, value in data.items():
setattr(instance, key, value)
return instance

View File

@ -1,6 +1,4 @@
from typing import Optional from typing import Optional
from numpy import double
from .personality import Personality from .personality import Personality
from .identity import Identity from .identity import Identity
from .expression_style import PersonalityExpression from .expression_style import PersonalityExpression
@ -27,11 +25,6 @@ class Individuality:
personality_core: str, personality_core: str,
personality_sides: list, personality_sides: list,
identity_detail: list, identity_detail: list,
height: int,
weight: double,
age: int,
gender: str,
appearance: str,
) -> None: ) -> None:
"""初始化个体特征 """初始化个体特征
@ -40,11 +33,6 @@ class Individuality:
personality_core: 人格核心特点 personality_core: 人格核心特点
personality_sides: 人格侧面描述 personality_sides: 人格侧面描述
identity_detail: 身份细节描述 identity_detail: 身份细节描述
height: 身高厘米
weight: 体重千克
age: 年龄
gender: 性别
appearance: 外貌特征
""" """
# 初始化人格 # 初始化人格
self.personality = Personality.initialize( self.personality = Personality.initialize(
@ -52,9 +40,7 @@ class Individuality:
) )
# 初始化身份 # 初始化身份
self.identity = Identity.initialize( self.identity = Identity(identity_detail=identity_detail)
identity_detail=identity_detail, height=height, weight=weight, age=age, gender=gender, appearance=appearance
)
await self.express_style.extract_and_store_personality_expressions() await self.express_style.extract_and_store_personality_expressions()
@ -120,7 +106,7 @@ class Individuality:
获取身份特征的prompt 获取身份特征的prompt
Args: Args:
level (int): 详细程度 (1: 随机细节, 2: 所有细节+外貌年龄性别, 3: 同2) level (int): 详细程度 (1: 随机细节, 2: 所有细节, 3: 同2)
x_person (int, optional): 人称代词 (0: 无人称, 1: , 2: ). 默认为 2. x_person (int, optional): 人称代词 (0: 无人称, 1: , 2: ). 默认为 2.
Returns: Returns:
@ -145,23 +131,10 @@ class Individuality:
identity_detail = list(self.identity.identity_detail) identity_detail = list(self.identity.identity_detail)
random.shuffle(identity_detail) random.shuffle(identity_detail)
if level == 1: if level == 1:
identity_parts.append(f"身份是{identity_detail[0]}") identity_parts.append(f"{identity_detail[0]}")
elif level >= 2: elif level >= 2:
details_str = "".join(identity_detail) details_str = "".join(identity_detail)
identity_parts.append(f"身份是{details_str}") identity_parts.append(f"{details_str}")
# 根据level添加其他身份信息
if level >= 3:
if self.identity.appearance:
identity_parts.append(f"{self.identity.appearance}")
if self.identity.age > 0:
identity_parts.append(f"年龄大约{self.identity.age}")
if self.identity.gender:
identity_parts.append(f"性别是{self.identity.gender}")
if self.identity.height:
identity_parts.append(f"身高大约{self.identity.height}厘米")
if self.identity.weight:
identity_parts.append(f"体重大约{self.identity.weight}千克")
if identity_parts: if identity_parts:
details_str = "".join(identity_parts) details_str = "".join(identity_parts)

View File

@ -117,6 +117,9 @@ class LLMRequest:
self.model_name: str = model["name"] self.model_name: str = model["name"]
self.params = kwargs self.params = kwargs
self.enable_thinking = model.get("enable_thinking", False)
self.temp = model.get("temp", 0.7)
self.thinking_budget = model.get("thinking_budget", 4096)
self.stream = model.get("stream", False) self.stream = model.get("stream", False)
self.pri_in = model.get("pri_in", 0) self.pri_in = model.get("pri_in", 0)
self.pri_out = model.get("pri_out", 0) self.pri_out = model.get("pri_out", 0)
@ -435,7 +438,7 @@ class LLMRequest:
logger.error( logger.error(
f"模型 {self.model_name} 错误码: {response.status} - {error_code_mapping.get(response.status)}" f"模型 {self.model_name} 错误码: {response.status} - {error_code_mapping.get(response.status)}"
) )
raise RuntimeError("服务器负载过高,模型复失败QAQ") raise RuntimeError("服务器负载过高,模型复失败QAQ")
else: else:
logger.warning(f"模型 {self.model_name} 请求限制(429),等待{wait_time}秒后重试...") logger.warning(f"模型 {self.model_name} 请求限制(429),等待{wait_time}秒后重试...")
raise RuntimeError("请求限制(429)") raise RuntimeError("请求限制(429)")
@ -459,6 +462,8 @@ class LLMRequest:
logger.error( logger.error(
f"模型 {self.model_name} 错误码: {response.status} - {error_code_mapping.get(response.status)}" f"模型 {self.model_name} 错误码: {response.status} - {error_code_mapping.get(response.status)}"
) )
print(request_content)
print(response)
# 尝试获取并记录服务器返回的详细错误信息 # 尝试获取并记录服务器返回的详细错误信息
try: try:
error_json = await response.json() error_json = await response.json()
@ -495,11 +500,11 @@ class LLMRequest:
logger.warning(f"检测到403错误模型从 {old_model_name} 降级为 {self.model_name}") logger.warning(f"检测到403错误模型从 {old_model_name} 降级为 {self.model_name}")
# 对全局配置进行更新 # 对全局配置进行更新
if global_config.model.normal.get("name") == old_model_name: if global_config.model.normal_chat_2.get("name") == old_model_name:
global_config.model.normal["name"] = self.model_name global_config.model.normal_chat_2["name"] = self.model_name
logger.warning(f"将全局配置中的 llm_normal 模型临时降级至{self.model_name}") logger.warning(f"将全局配置中的 llm_normal 模型临时降级至{self.model_name}")
if global_config.model.reasoning.get("name") == old_model_name: if global_config.model.normal_chat_1.get("name") == old_model_name:
global_config.model.reasoning["name"] = self.model_name global_config.model.normal_chat_1["name"] = self.model_name
logger.warning(f"将全局配置中的 llm_reasoning 模型临时降级至{self.model_name}") logger.warning(f"将全局配置中的 llm_reasoning 模型临时降级至{self.model_name}")
if payload and "model" in payload: if payload and "model" in payload:
@ -599,8 +604,9 @@ class LLMRequest:
new_params = dict(params) new_params = dict(params)
if self.model_name.lower() in self.MODELS_NEEDING_TRANSFORMATION: if self.model_name.lower() in self.MODELS_NEEDING_TRANSFORMATION:
# 删除 'temperature' 参数(如果存在) # 删除 'temperature' 参数如果存在但避免删除我们在_build_payload中添加的自定义温度
new_params.pop("temperature", None) if "temperature" in new_params and new_params["temperature"] == 0.7:
new_params.pop("temperature")
# 如果存在 'max_tokens',则重命名为 'max_completion_tokens' # 如果存在 'max_tokens',则重命名为 'max_completion_tokens'
if "max_tokens" in new_params: if "max_tokens" in new_params:
new_params["max_completion_tokens"] = new_params.pop("max_tokens") new_params["max_completion_tokens"] = new_params.pop("max_tokens")
@ -630,6 +636,18 @@ class LLMRequest:
"messages": messages, "messages": messages,
**params_copy, **params_copy,
} }
# 添加temp参数如果不是默认值0.7
if self.temp != 0.7:
payload["temperature"] = self.temp
# 添加enable_thinking参数如果不是默认值False
if not self.enable_thinking:
payload["enable_thinking"] = False
if self.thinking_budget != 4096:
payload["thinking_budget"] = self.thinking_budget
if "max_tokens" not in payload and "max_completion_tokens" not in payload: if "max_tokens" not in payload and "max_completion_tokens" not in payload:
payload["max_tokens"] = global_config.model.model_max_output_length payload["max_tokens"] = global_config.model.model_max_output_length
# 如果 payload 中依然存在 max_tokens 且需要转换,在这里进行再次检查 # 如果 payload 中依然存在 max_tokens 且需要转换,在这里进行再次检查

View File

@ -96,11 +96,6 @@ class MainSystem:
personality_core=global_config.personality.personality_core, personality_core=global_config.personality.personality_core,
personality_sides=global_config.personality.personality_sides, personality_sides=global_config.personality.personality_sides,
identity_detail=global_config.identity.identity_detail, identity_detail=global_config.identity.identity_detail,
height=global_config.identity.height,
weight=global_config.identity.weight,
age=global_config.identity.age,
gender=global_config.identity.gender,
appearance=global_config.identity.appearance,
) )
logger.success("个体特征初始化成功") logger.success("个体特征初始化成功")

View File

@ -58,7 +58,7 @@ class PersonInfoManager:
self.person_name_list = {} self.person_name_list = {}
# TODO: API-Adapter修改标记 # TODO: API-Adapter修改标记
self.qv_name_llm = LLMRequest( self.qv_name_llm = LLMRequest(
model=global_config.model.normal, model=global_config.model.utils,
max_tokens=256, max_tokens=256,
request_type="qv_name", request_type="qv_name",
) )
@ -104,10 +104,14 @@ class PersonInfoManager:
def get_person_id_by_person_name(self, person_name: str): def get_person_id_by_person_name(self, person_name: str):
"""根据用户名获取用户ID""" """根据用户名获取用户ID"""
document = db.person_info.find_one({"person_name": person_name}) try:
if document: record = PersonInfo.get_or_none(PersonInfo.person_name == person_name)
return document["person_id"] if record:
else: return record.person_id
else:
return ""
except Exception as e:
logger.error(f"根据用户名 {person_name} 获取用户ID时出错 (Peewee): {e}")
return "" return ""
@staticmethod @staticmethod
@ -257,21 +261,19 @@ class PersonInfoManager:
current_name_set = set(self.person_name_list.values()) current_name_set = set(self.person_name_list.values())
while current_try < max_retries: while current_try < max_retries:
prompt_personality = individuality.get_prompt(x_person=2, level=1) # prompt_personality = individuality.get_prompt(x_person=2, level=1)
bot_name = individuality.personality.bot_nickname bot_name = individuality.personality.bot_nickname
qv_name_prompt = f"你是{bot_name}{prompt_personality}" qv_name_prompt = f"你是{bot_name}一个伪装成人类的AI你不能让别人发现这一点"
qv_name_prompt += f"现在你想给一个用户取一个昵称,用户的qq昵称是{user_nickname}" qv_name_prompt += f"现在你想给一个用户取一个昵称,用户的qq昵称是{user_nickname}"
qv_name_prompt += f"用户的qq群昵称名是{user_cardname}" qv_name_prompt += f"用户的qq群昵称名是{user_cardname}"
if user_avatar: if user_avatar:
qv_name_prompt += f"用户的qq头像是{user_avatar}" qv_name_prompt += f"用户的qq头像是{user_avatar}"
if old_name: if old_name:
qv_name_prompt += f"你之前叫他{old_name},是因为{old_reason}" qv_name_prompt += f"你之前叫他{old_name},是因为{old_reason}"
qv_name_prompt += f"\n其他取名的要求是:{request},不要太浮夸" qv_name_prompt += f"\n其他取名的要求是:{request},不要太浮夸,简短,"
qv_name_prompt += ( qv_name_prompt += "\n请根据以上用户信息想想你叫他什么比较好不要太浮夸请最好使用用户的qq昵称可以稍作修改优先使用原文。优先使用用户的qq昵称或者群昵称原文。"
"\n请根据以上用户信息想想你叫他什么比较好不要太浮夸请最好使用用户的qq昵称可以稍作修改"
)
if existing_names_str: if existing_names_str:
qv_name_prompt += f"\n请注意,以下名称已被你尝试过或已知存在,请避免:{existing_names_str}\n" qv_name_prompt += f"\n请注意,以下名称已被你尝试过或已知存在,请避免:{existing_names_str}\n"

View File

@ -297,6 +297,8 @@ class RelationshipManager:
relationship_value = await person_info_manager.get_value(person_id, "relationship_value") relationship_value = await person_info_manager.get_value(person_id, "relationship_value")
level_num = self.calculate_level_num(relationship_value) level_num = self.calculate_level_num(relationship_value)
relation_value_prompt = ""
if level_num == 0 or level_num == 5: if level_num == 0 or level_num == 5:
relationship_level = ["厌恶", "冷漠以对", "认识", "友好对待", "喜欢", "暧昧"] relationship_level = ["厌恶", "冷漠以对", "认识", "友好对待", "喜欢", "暧昧"]
relation_prompt2_list = [ relation_prompt2_list = [
@ -307,9 +309,11 @@ class RelationshipManager:
"积极回复", "积极回复",
"友善和包容的回复", "友善和包容的回复",
] ]
return f"{relationship_level[level_num]}{person_name},打算{relation_prompt2_list[level_num]}\n" relation_value_prompt = (
f"{relationship_level[level_num]}{person_name},打算{relation_prompt2_list[level_num]}"
)
elif level_num == 2: elif level_num == 2:
return "" relation_value_prompt = ""
else: else:
if random.random() < 0.6: if random.random() < 0.6:
relationship_level = ["厌恶", "冷漠以对", "认识", "友好对待", "喜欢", "暧昧"] relationship_level = ["厌恶", "冷漠以对", "认识", "友好对待", "喜欢", "暧昧"]
@ -321,9 +325,20 @@ class RelationshipManager:
"积极回复", "积极回复",
"友善和包容的回复", "友善和包容的回复",
] ]
return f"{relationship_level[level_num]}{person_name},打算{relation_prompt2_list[level_num]}\n" relation_value_prompt = (
f"{relationship_level[level_num]}{person_name},打算{relation_prompt2_list[level_num]}"
)
else: else:
return "" relation_value_prompt = ""
if relation_value_prompt:
nickname_str = await person_info_manager.get_value(person_id, "nickname")
platform = await person_info_manager.get_value(person_id, "platform")
relation_prompt = f"{relation_value_prompt}ta在{platform}上的昵称是{nickname_str}\n"
else:
relation_prompt = ""
return relation_prompt
@staticmethod @staticmethod
def calculate_level_num(relationship_value) -> int: def calculate_level_num(relationship_value) -> int:

View File

@ -3,5 +3,5 @@
# 导入所有动作模块以确保装饰器被执行 # 导入所有动作模块以确保装饰器被执行
from . import test_action # noqa from . import test_action # noqa
from . import online_action # noqa # from . import online_action # noqa
from . import mute_action # noqa from . import mute_action # noqa

View File

@ -0,0 +1,63 @@
from src.common.logger_manager import get_logger
from src.chat.focus_chat.planners.actions.plugin_action import PluginAction, register_action
from typing import Tuple
logger = get_logger("group_whole_ban_action")
@register_action
class GroupWholeBanAction(PluginAction):
"""群聊全体禁言动作处理类"""
action_name = "group_whole_ban_action"
action_description = "开启或关闭群聊全体禁言,当群聊过于混乱或需要安静时使用"
action_parameters = {
"enable": "是否开启全体禁言输入True开启False关闭必填",
}
action_require = [
"当群聊过于混乱需要安静时使用",
"当需要临时暂停群聊讨论时使用",
"当有人要求开启全体禁言时使用",
"当管理员需要发布重要公告时使用",
]
default = False
associated_types = ["command", "text"]
async def process(self) -> Tuple[bool, str]:
"""处理群聊全体禁言动作"""
logger.info(f"{self.log_prefix} 执行全体禁言动作: {self.reasoning}")
# 获取参数
enable = self.action_data.get("enable")
if enable is None:
error_msg = "全体禁言参数不完整需要enable参数"
logger.error(f"{self.log_prefix} {error_msg}")
return False, error_msg
# 确保enable是布尔类型
if isinstance(enable, str):
if enable.lower() in ["true", "1", "yes", "开启", ""]:
enable = True
elif enable.lower() in ["false", "0", "no", "关闭", ""]:
enable = False
else:
error_msg = f"无效的enable参数: {enable}应该是True或False"
logger.error(f"{self.log_prefix} {error_msg}")
return False, error_msg
# 发送表达情绪的消息
action_text = "开启" if enable else "关闭"
await self.send_message_by_expressor(f"我要{action_text}全体禁言")
try:
# 发送群聊全体禁言命令,按照新格式
await self.send_message(type="command", data={"name": "GROUP_WHOLE_BAN", "args": {"enable": enable}})
logger.info(f"{self.log_prefix} 成功{action_text}全体禁言")
return True, f"成功{action_text}全体禁言"
except Exception as e:
logger.error(f"{self.log_prefix} 执行全体禁言动作时出错: {e}")
await self.send_message_by_expressor(f"执行全体禁言动作时出错: {e}")
return False, f"执行全体禁言动作时出错: {e}"

View File

@ -7,15 +7,14 @@ logger = get_logger("mute_action")
@register_action @register_action
class MuteAction(PluginAction): class MuteAction(PluginAction):
"""测试动作处理类""" """群聊禁言动作处理类"""
action_name = "mute_action" action_name = "mute_action"
action_description = ( action_description = "如果某人违反了公序良俗,或者别人戳你太多,或者某人刷屏,一定要禁言某人,如果你很生气,可以禁言某人,可以自选禁言时长,视严重程度而定。"
"如果某人违反了公序良俗,或者别人戳你太多,,或者某人刷屏,一定要禁言某人,如果你很生气,可以禁言某人"
)
action_parameters = { action_parameters = {
"target": "禁言对象,输入你要禁言的对象的名字,必填,", "target": "禁言对象,输入你要禁言的对象的名字,必填",
"duration": "禁言时长,输入你要禁言的时长,单位为秒,必填", "duration": "禁言时长,输入你要禁言的时长,单位为秒,必填,必须为数字",
"reason": "禁言理由,可选",
} }
action_require = [ action_require = [
"当有人违反了公序良俗时使用", "当有人违反了公序良俗时使用",
@ -25,32 +24,48 @@ class MuteAction(PluginAction):
"当千石可乐或可乐酱要求你禁言时使用", "当千石可乐或可乐酱要求你禁言时使用",
"当你想回避某个话题时使用", "当你想回避某个话题时使用",
] ]
default = True # 不是默认动作,需要手动添加到使用集 default = True # 默认动作,是否手动添加到使用集
associated_types = ["command", "text"] # associated_types = ["command", "text"]
associated_types = ["text"]
async def process(self) -> Tuple[bool, str]: async def process(self) -> Tuple[bool, str]:
"""处理测试动作""" """处理群聊禁言动作"""
logger.info(f"{self.log_prefix} 执行online动作: {self.reasoning}") logger.info(f"{self.log_prefix} 执行禁言动作: {self.reasoning}")
# 发送测试消息 # 获取参数
target = self.action_data.get("target") target = self.action_data.get("target")
duration = self.action_data.get("duration") duration = self.action_data.get("duration")
reason = self.action_data.get("reason") reason = self.action_data.get("reason", "违反群规")
if not target or not duration:
error_msg = "禁言参数不完整需要target和duration"
logger.error(f"{self.log_prefix} {error_msg}")
return False, error_msg
# 获取用户ID
platform, user_id = await self.get_user_id_by_person_name(target) platform, user_id = await self.get_user_id_by_person_name(target)
await self.send_message_by_expressor(f"我要禁言{target}{platform},时长{duration}秒,理由{reason},表达情绪") if not user_id:
error_msg = f"未找到用户 {target} 的ID"
logger.error(f"{self.log_prefix} {error_msg}")
return False, error_msg
# 发送表达情绪的消息
await self.send_message_by_expressor(f"我要禁言{target},时长{duration}秒,理由:{reason}")
try: try:
# 确保duration是字符串类型
duration_str = str(duration)
# 发送群聊禁言命令,按照新格式
await self.send_message( await self.send_message(
type="command", type="command", data={"name": "GROUP_BAN", "args": {"qq_id": str(user_id), "duration": duration_str}}
data={"name": "GROUP_BAN", "args": {"qq_id": f"{user_id}", "duration": f"{duration}"}},
# target = target
) )
logger.info(f"{self.log_prefix} 成功禁言用户 {target}({user_id}),时长 {duration}")
return True, f"成功禁言 {target},时长 {duration}"
except Exception as e: except Exception as e:
logger.error(f"{self.log_prefix} 执行mute动作时出错: {e}") logger.error(f"{self.log_prefix} 执行禁言动作时出错: {e}")
await self.send_message_by_expressor(f"执行mute动作时出错: {e}") await self.send_message_by_expressor(f"执行禁言动作时出错: {e}")
return False, f"执行禁言动作时出错: {e}"
return False, "执行mute动作时出错"
return True, "测试动作执行成功"

View File

@ -1,44 +0,0 @@
from src.common.logger_manager import get_logger
from src.chat.focus_chat.planners.actions.plugin_action import PluginAction, register_action
from typing import Tuple
logger = get_logger("check_online_action")
@register_action
class CheckOnlineAction(PluginAction):
"""测试动作处理类"""
action_name = "check_online_action"
action_description = "这是一个检查在线状态的动作当有人要求你检查Maibot麦麦 机器人)在线状态时使用"
action_parameters = {"mode": "查看模式"}
action_require = [
"当有人要求你检查Maibot麦麦 机器人)在线状态时使用",
"mode参数为version时查看在线版本状态默认用这种",
"mode参数为type时查看在线系统类型分布",
]
default = False # 不是默认动作,需要手动添加到使用集
associated_types = ["text"]
async def process(self) -> Tuple[bool, str]:
"""处理测试动作"""
logger.info(f"{self.log_prefix} 执行online动作: {self.reasoning}")
# 发送测试消息
mode = self.action_data.get("mode", "type")
await self.send_message_by_expressor("我看看")
try:
if mode == "type":
await self.send_message("text", "#online detail")
elif mode == "version":
await self.send_message("text", "#online")
except Exception as e:
logger.error(f"{self.log_prefix} 执行online动作时出错: {e}")
await self.send_message_by_expressor("执行online动作时出错: {e}")
return False, "执行online动作时出错"
return True, "测试动作执行成功"

View File

@ -153,7 +153,7 @@ class PicAction(PluginAction):
if encode_success: if encode_success:
base64_image_string = encode_result base64_image_string = encode_result
send_success = await self.send_message(type="emoji", data=base64_image_string) send_success = await self.send_message(type="image", data=base64_image_string)
if send_success: if send_success:
await self.send_message_by_expressor("图片表情已发送!") await self.send_message_by_expressor("图片表情已发送!")
return True, "图片表情已发送" return True, "图片表情已发送"

View File

@ -1,5 +1,3 @@
from src.llm_models.utils_model import LLMRequest
from src.config.config import global_config
import json import json
from src.common.logger_manager import get_logger from src.common.logger_manager import get_logger
from src.tools.tool_can_use import get_all_tool_definitions, get_tool_instance from src.tools.tool_can_use import get_all_tool_definitions, get_tool_instance
@ -8,11 +6,6 @@ logger = get_logger("tool_use")
class ToolUser: class ToolUser:
def __init__(self):
self.llm_model_tool = LLMRequest(
model=global_config.model.tool_use, temperature=0.2, max_tokens=1000, request_type="tool_use"
)
@staticmethod @staticmethod
def _define_tools(): def _define_tools():
"""获取所有已注册工具的定义 """获取所有已注册工具的定义

View File

@ -1,5 +1,5 @@
[inner] [inner]
version = "2.4.0" version = "2.5.0"
#----以下是给开发人员阅读的,如果你只是部署了麦麦,不需要阅读---- #----以下是给开发人员阅读的,如果你只是部署了麦麦,不需要阅读----
#如果你想要修改配置文件请在修改后将version的值进行变更 #如果你想要修改配置文件请在修改后将version的值进行变更
@ -18,28 +18,26 @@ nickname = "麦麦"
alias_names = ["麦叠", "牢麦"] #仅在 专注聊天 有效 alias_names = ["麦叠", "牢麦"] #仅在 专注聊天 有效
[personality] [personality]
personality_core = "用一句话或几句话描述人格的核心特点" # 建议20字以内谁再写3000字小作文敲谁脑袋 personality_core = "是一个积极向上的女大学生" # 建议20字以内谁再写3000字小作文敲谁脑袋
personality_sides = [ personality_sides = [
"用一句话或几句话描述人格的一些细节", "用一句话或几句话描述人格的一些细节",
"用一句话或几句话描述人格的一些细节", "用一句话或几句话描述人格的一些细节",
"用一句话或几句话描述人格的一些细节", "用一句话或几句话描述人格的一些细节",
"用一句话或几句话描述人格的一些细节",
"用一句话或几句话描述人格的一些细节",
]# 条数任意不能为0, 该选项还在调试中,可能未完全生效
# 身份特点 部分选项仅在 专注聊天 有效
[identity] #アイデンティティがない 生まれないらららら
identity_detail = [
"身份特点",
"身份特点",
]# 条数任意不能为0 ]# 条数任意不能为0
#外貌特征 # 身份特点
age = 18 # 年龄 单位岁 [identity] #アイデンティティがない 生まれないらららら
gender = "女" # 性别 identity_detail = [
height = "170" # 身高单位cm "年龄为19岁",
weight = "50" # 体重单位kg "是女孩子",
appearance = "用一句或几句话描述外貌特征" # 外貌特征 "身高为160cm",
"有橙色的短发",
]
# 可以描述外贸,性别,身高,职业,属性等等描述
# 条数任意不能为0
[relationship]
give_name = true # 麦麦是否给其他人取名,关闭后无法使用禁言功能
[chat] #麦麦的聊天通用设置 [chat] #麦麦的聊天通用设置
chat_mode = "normal" # 聊天模式 —— 普通模式normal专注模式focus在普通模式和专注模式之间自动切换 chat_mode = "normal" # 聊天模式 —— 普通模式normal专注模式focus在普通模式和专注模式之间自动切换
@ -65,7 +63,7 @@ ban_msgs_regex = [
[normal_chat] #普通聊天 [normal_chat] #普通聊天
#一般回复参数 #一般回复参数
reasoning_model_probability = 0.3 # 麦麦回答时选择推理模型的概率与之相对的普通模型的概率为1 - reasoning_model_probability normal_chat_first_probability = 0.3 # 麦麦回答时选择首要模型的概率与之相对的次要模型的概率为1 - normal_chat_first_probability
max_context_size = 15 #上下文长度 max_context_size = 15 #上下文长度
emoji_chance = 0.2 # 麦麦一般回复时使用表情包的概率设置为1让麦麦自己决定发不发 emoji_chance = 0.2 # 麦麦一般回复时使用表情包的概率设置为1让麦麦自己决定发不发
thinking_timeout = 120 # 麦麦最长思考时间超过这个时间的思考会放弃往往是api反应太慢 thinking_timeout = 120 # 麦麦最长思考时间超过这个时间的思考会放弃往往是api反应太慢
@ -81,11 +79,10 @@ at_bot_inevitable_reply = false # @bot 必然回复
talk_frequency_down_groups = [] #降低回复频率的群号码 talk_frequency_down_groups = [] #降低回复频率的群号码
[focus_chat] #专注聊天 [focus_chat] #专注聊天
reply_trigger_threshold = 3.0 # 专注聊天触发阈值,越低越容易进入专注聊天 auto_focus_threshold = 1 # 自动切换到专注聊天的阈值,越低越容易进入专注聊天
default_decay_rate_per_second = 0.98 # 默认衰减率,越大衰减越快,越高越难进入专注聊天 exit_focus_threshold = 1 # 自动退出专注聊天的阈值,越低越容易退出专注聊天
consecutive_no_reply_threshold = 3 # 连续不回复的阈值,越低越容易结束专注聊天
think_interval = 1 # 思考间隔 单位秒 think_interval = 3 # 思考间隔 单位秒,可以有效减少消耗
observation_context_size = 15 # 观察到的最长上下文大小,建议15太短太长都会导致脑袋尖尖 observation_context_size = 15 # 观察到的最长上下文大小,建议15太短太长都会导致脑袋尖尖
compressed_length = 5 # 不能大于chat.observation_context_size,心流上下文压缩的最短压缩长度超过心流观察到的上下文长度会压缩最短压缩长度为5 compressed_length = 5 # 不能大于chat.observation_context_size,心流上下文压缩的最短压缩长度超过心流观察到的上下文长度会压缩最短压缩长度为5
@ -93,23 +90,21 @@ compress_length_limit = 5 #最多压缩份数,超过该数值的压缩上下
[focus_chat_processor] # 专注聊天处理器打开可以实现更多功能但是会增加token消耗 [focus_chat_processor] # 专注聊天处理器打开可以实现更多功能但是会增加token消耗
self_identify_processor = true # 是否启用自我识别处理器 self_identify_processor = true # 是否启用自我识别处理器
tool_use_processor = true # 是否启用工具使用处理器 tool_use_processor = false # 是否启用工具使用处理器
working_memory_processor = true # 是否启用工作记忆处理器 working_memory_processor = false # 是否启用工作记忆处理器
[expression] [expression]
# 表达方式 # 表达方式
expression_style = "描述麦麦说话的表达风格,表达习惯" expression_style = "描述麦麦说话的表达风格,表达习惯"
enable_expression_learning = true # 是否启用表达学习 enable_expression_learning = true # 是否启用表达学习
learning_interval = 300 # 学习间隔 单位秒 learning_interval = 600 # 学习间隔 单位秒
[emoji] [emoji]
max_reg_num = 40 # 表情包最大注册数量 max_reg_num = 40 # 表情包最大注册数量
do_replace = true # 开启则在达到最大数量时删除(替换)表情包,关闭则达到最大数量时不会继续收集表情包 do_replace = true # 开启则在达到最大数量时删除(替换)表情包,关闭则达到最大数量时不会继续收集表情包
check_interval = 120 # 检查表情包(注册,破损,删除)的时间间隔(分钟) check_interval = 120 # 检查表情包(注册,破损,删除)的时间间隔(分钟)
save_pic = false # 是否保存图片 save_pic = true # 是否保存图片
cache_emoji = true # 是否缓存表情包 cache_emoji = true # 是否缓存表情包
steal_emoji = true # 是否偷取表情包,让麦麦可以发送她保存的这些表情包 steal_emoji = true # 是否偷取表情包,让麦麦可以发送她保存的这些表情包
content_filtration = false # 是否启用表情包过滤,只有符合该要求的表情包才会被保存 content_filtration = false # 是否启用表情包过滤,只有符合该要求的表情包才会被保存
@ -138,7 +133,7 @@ mood_update_interval = 1.0 # 情绪更新间隔 单位秒
mood_decay_rate = 0.95 # 情绪衰减率 mood_decay_rate = 0.95 # 情绪衰减率
mood_intensity_factor = 1.0 # 情绪强度因子 mood_intensity_factor = 1.0 # 情绪强度因子
[keyword_reaction] # 针对某个关键词作出反应 [keyword_reaction] # 针对某个关键词作出反应,仅在 普通聊天 有效
enable = true # 关键词反应功能的总开关 enable = true # 关键词反应功能的总开关
[[keyword_reaction.rules]] # 如果想要新增多个关键词直接复制本条修改keywords和reaction即可 [[keyword_reaction.rules]] # 如果想要新增多个关键词直接复制本条修改keywords和reaction即可
@ -186,10 +181,9 @@ enable = true
[experimental] #实验性功能 [experimental] #实验性功能
enable_friend_chat = false # 是否启用好友聊天 enable_friend_chat = false # 是否启用好友聊天
pfc_chatting = false # 是否启用PFC聊天该功能仅作用于私聊与回复模式独立 pfc_chatting = false # 是否启用PFC聊天该功能仅作用于私聊与回复模式独立在0.7.0暂时无效
#下面的模型若使用硅基流动则不需要更改使用ds官方则改成.env自定义的宏使用自定义模型则选择定位相似的模型自己填写 #下面的模型若使用硅基流动则不需要更改使用ds官方则改成.env自定义的宏使用自定义模型则选择定位相似的模型自己填写
#推理模型
# 额外字段 # 额外字段
# 下面的模型有以下额外字段可以添加: # 下面的模型有以下额外字段可以添加:
@ -200,14 +194,9 @@ pfc_chatting = false # 是否启用PFC聊天该功能仅作用于私聊
[model] [model]
model_max_output_length = 800 # 模型单次返回的最大token数 model_max_output_length = 800 # 模型单次返回的最大token数
#这个模型必须是推理模型 #------------必填:组件模型------------
[model.reasoning] # 一般聊天模式的推理回复模型
name = "Pro/deepseek-ai/DeepSeek-R1"
provider = "SILICONFLOW"
pri_in = 1.0 #模型的输入价格(非必填,可以记录消耗)
pri_out = 4.0 #模型的输出价格(非必填,可以记录消耗)
[model.normal] #V3 回复模型 专注和一般聊天模式共用的回复模型 [model.utils] # 在麦麦的一些组件中使用的模型,例如表情包模块,取名模块,消耗量不大
name = "Pro/deepseek-ai/DeepSeek-V3" name = "Pro/deepseek-ai/DeepSeek-V3"
provider = "SILICONFLOW" provider = "SILICONFLOW"
pri_in = 2 #模型的输入价格(非必填,可以记录消耗) pri_in = 2 #模型的输入价格(非必填,可以记录消耗)
@ -215,17 +204,20 @@ pri_out = 8 #模型的输出价格(非必填,可以记录消耗)
#默认temp 0.2 如果你使用的是老V3或者其他模型请自己修改temp参数 #默认temp 0.2 如果你使用的是老V3或者其他模型请自己修改temp参数
temp = 0.2 #模型的温度新V3建议0.1-0.3 temp = 0.2 #模型的温度新V3建议0.1-0.3
[model.topic_judge] #主题判断模型建议使用qwen2.5 7b [model.utils_small] # 在麦麦的一些组件中使用的小模型,消耗量较大
name = "Pro/Qwen/Qwen2.5-7B-Instruct" # 强烈建议使用免费的小模型
name = "Qwen/Qwen3-8B"
provider = "SILICONFLOW" provider = "SILICONFLOW"
pri_in = 0.35 enable_thinking = false # 是否启用思考
pri_out = 0.35 pri_in = 0
pri_out = 0
[model.summary] #概括模型建议使用qwen2.5 32b 及以上 [model.memory_summary] # 记忆的概括模型
name = "Qwen/Qwen2.5-32B-Instruct" name = "Qwen/Qwen3-30B-A3B"
provider = "SILICONFLOW" provider = "SILICONFLOW"
pri_in = 1.26 enable_thinking = false # 是否启用思考
pri_out = 1.26 pri_in = 0.7
pri_out = 2.8
[model.vlm] # 图像识别模型 [model.vlm] # 图像识别模型
name = "Pro/Qwen/Qwen2.5-VL-7B-Instruct" name = "Pro/Qwen/Qwen2.5-VL-7B-Instruct"
@ -233,40 +225,85 @@ provider = "SILICONFLOW"
pri_in = 0.35 pri_in = 0.35
pri_out = 0.35 pri_out = 0.35
[model.heartflow] # 用于控制麦麦是否参与聊天的模型
name = "Qwen/Qwen2.5-32B-Instruct"
provider = "SILICONFLOW"
pri_in = 1.26
pri_out = 1.26
[model.observation] #观察模型,压缩聊天内容,建议用免费的
# name = "Pro/Qwen/Qwen2.5-7B-Instruct"
name = "Qwen/Qwen2.5-7B-Instruct"
provider = "SILICONFLOW"
pri_in = 0
pri_out = 0
[model.sub_heartflow] #心流:认真聊天时,生成麦麦的内心想法,必须使用具有工具调用能力的模型
name = "Pro/deepseek-ai/DeepSeek-V3"
provider = "SILICONFLOW"
pri_in = 2
pri_out = 8
temp = 0.3 #模型的温度新V3建议0.1-0.3
[model.plan] #决策:认真聊天时,负责决定麦麦该做什么
name = "Pro/deepseek-ai/DeepSeek-V3"
provider = "SILICONFLOW"
pri_in = 2
pri_out = 8
#嵌入模型 #嵌入模型
[model.embedding]
[model.embedding] #嵌入
name = "BAAI/bge-m3" name = "BAAI/bge-m3"
provider = "SILICONFLOW" provider = "DEV"
pri_in = 0 pri_in = 0
pri_out = 0 pri_out = 0
#------------普通聊天必填模型------------
[model.normal_chat_1] # 一般聊天模式的首要回复模型,推荐使用 推理模型
name = "Pro/deepseek-ai/DeepSeek-R1"
provider = "SILICONFLOW"
pri_in = 4.0 #模型的输入价格(非必填,可以记录消耗)
pri_out = 16.0 #模型的输出价格(非必填,可以记录消耗)
[model.normal_chat_2] # 一般聊天模式的次要回复模型,推荐使用 非推理模型
name = "Pro/deepseek-ai/DeepSeek-V3"
provider = "SILICONFLOW"
pri_in = 2 #模型的输入价格(非必填,可以记录消耗)
pri_out = 8 #模型的输出价格(非必填,可以记录消耗)
#默认temp 0.2 如果你使用的是老V3或者其他模型请自己修改temp参数
temp = 0.2 #模型的温度新V3建议0.1-0.3
#------------专注聊天必填模型------------
[model.focus_working_memory] #工作记忆模型
name = "Qwen/Qwen3-30B-A3B"
provider = "SILICONFLOW"
enable_thinking = false # 是否启用思考
pri_in = 0.7
pri_out = 2.8
[model.focus_chat_mind] #聊天规划:认真聊天时,生成麦麦对聊天的规划想法
name = "Pro/deepseek-ai/DeepSeek-V3"
# name = "Qwen/Qwen3-30B-A3B"
provider = "SILICONFLOW"
# enable_thinking = false # 是否启用思考
pri_in = 2
pri_out = 8
temp = 0.3
[model.focus_tool_use] #工具调用模型,需要使用支持工具调用的模型
name = "Qwen/Qwen3-14B"
provider = "SILICONFLOW"
enable_thinking = false # 是否启用思考
pri_in = 0.5
pri_out = 2
[model.focus_planner] #决策:认真聊天时,负责决定麦麦该做什么
name = "Pro/deepseek-ai/DeepSeek-V3"
# name = "Qwen/Qwen3-30B-A3B"
provider = "SILICONFLOW"
# enable_thinking = false # 是否启用思考
pri_in = 2
pri_out = 8
temp = 0.3
#表达器模型,用于表达麦麦的想法,生成最终回复,对语言风格影响极大
#也用于表达方式学习
[model.focus_expressor]
name = "Pro/deepseek-ai/DeepSeek-V3"
# name = "Qwen/Qwen3-30B-A3B"
provider = "SILICONFLOW"
# enable_thinking = false # 是否启用思考
pri_in = 2
pri_out = 8
temp = 0.3
#自我识别模型,用于自我认知和身份识别
[model.focus_self_recognize]
# name = "Pro/deepseek-ai/DeepSeek-V3"
name = "Qwen/Qwen3-30B-A3B"
provider = "SILICONFLOW"
enable_thinking = false # 是否启用思考
pri_in = 0.7
pri_out = 2.8
temp = 0.7
#私聊PFC需要开启PFC功能默认三个模型均为硅基流动v3如果需要支持多人同时私聊或频繁调用建议把其中的一个或两个换成官方v3或其它模型以免撞到429 #私聊PFC需要开启PFC功能默认三个模型均为硅基流动v3如果需要支持多人同时私聊或频繁调用建议把其中的一个或两个换成官方v3或其它模型以免撞到429
@ -294,15 +331,6 @@ pri_in = 2
pri_out = 8 pri_out = 8
#以下模型暂时没有使用!!
#以下模型暂时没有使用!!
#以下模型暂时没有使用!!
#以下模型暂时没有使用!!
#以下模型暂时没有使用!!
[model.tool_use] #工具调用模型需要使用支持工具调用的模型建议使用qwen2.5 32b
name = "Qwen/Qwen2.5-32B-Instruct"
provider = "SILICONFLOW"
pri_in = 1.26
pri_out = 1.26