Merge branch 'dev' of https://github.com/MaiM-with-u/MaiBot into PFC-test

pull/937/head
Bakadax 2025-05-16 22:59:07 +08:00
commit 5e9177e775
81 changed files with 1243 additions and 1533 deletions

View File

@ -1,6 +1,6 @@
from fastapi import HTTPException from fastapi import HTTPException
from rich.traceback import install from rich.traceback import install
from src.config.config import BotConfig from src.config.config import Config
from src.common.logger_manager import get_logger from src.common.logger_manager import get_logger
import os import os
@ -14,8 +14,8 @@ async def reload_config():
from src.config import config as config_module from src.config import config as config_module
logger.debug("正在重载配置文件...") logger.debug("正在重载配置文件...")
bot_config_path = os.path.join(BotConfig.get_config_dir(), "bot_config.toml") bot_config_path = os.path.join(Config.get_config_dir(), "bot_config.toml")
config_module.global_config = BotConfig.load_config(config_path=bot_config_path) config_module.global_config = Config.load_config(config_path=bot_config_path)
logger.debug("配置文件重载成功") logger.debug("配置文件重载成功")
return {"status": "reloaded"} return {"status": "reloaded"}
except FileNotFoundError as e: except FileNotFoundError as e:

View File

@ -369,14 +369,15 @@ class EmojiManager:
def __init__(self): def __init__(self):
self._initialized = None self._initialized = None
self._scan_task = None self._scan_task = None
self.vlm = LLMRequest(model=global_config.vlm, temperature=0.3, max_tokens=1000, request_type="emoji")
self.vlm = LLMRequest(model=global_config.model.vlm, temperature=0.3, max_tokens=1000, request_type="emoji")
self.llm_emotion_judge = LLMRequest( self.llm_emotion_judge = LLMRequest(
model=global_config.llm_normal, max_tokens=600, request_type="emoji" model=global_config.model.normal, max_tokens=600, request_type="emoji"
) # 更高的温度更少的token后续可以根据情绪来调整温度 ) # 更高的温度更少的token后续可以根据情绪来调整温度
self.emoji_num = 0 self.emoji_num = 0
self.emoji_num_max = global_config.max_emoji_num self.emoji_num_max = global_config.emoji.max_reg_num
self.emoji_num_max_reach_deletion = global_config.max_reach_deletion self.emoji_num_max_reach_deletion = global_config.emoji.do_replace
self.emoji_objects: list[MaiEmoji] = [] # 存储MaiEmoji对象的列表使用类型注解明确列表元素类型 self.emoji_objects: list[MaiEmoji] = [] # 存储MaiEmoji对象的列表使用类型注解明确列表元素类型
logger.info("启动表情包管理器") logger.info("启动表情包管理器")
@ -613,18 +614,18 @@ class EmojiManager:
logger.warning(f"[警告] 表情包目录不存在: {EMOJI_DIR}") logger.warning(f"[警告] 表情包目录不存在: {EMOJI_DIR}")
os.makedirs(EMOJI_DIR, exist_ok=True) os.makedirs(EMOJI_DIR, exist_ok=True)
logger.info(f"[创建] 已创建表情包目录: {EMOJI_DIR}") logger.info(f"[创建] 已创建表情包目录: {EMOJI_DIR}")
await asyncio.sleep(global_config.EMOJI_CHECK_INTERVAL * 60) await asyncio.sleep(global_config.emoji.check_interval * 60)
continue continue
# 检查目录是否为空 # 检查目录是否为空
files = os.listdir(EMOJI_DIR) files = os.listdir(EMOJI_DIR)
if not files: if not files:
logger.warning(f"[警告] 表情包目录为空: {EMOJI_DIR}") logger.warning(f"[警告] 表情包目录为空: {EMOJI_DIR}")
await asyncio.sleep(global_config.EMOJI_CHECK_INTERVAL * 60) await asyncio.sleep(global_config.emoji.check_interval * 60)
continue continue
# 检查是否需要处理表情包(数量超过最大值或不足) # 检查是否需要处理表情包(数量超过最大值或不足)
if (self.emoji_num > self.emoji_num_max and global_config.max_reach_deletion) or ( if (self.emoji_num > self.emoji_num_max and global_config.emoji.do_replace) or (
self.emoji_num < self.emoji_num_max self.emoji_num < self.emoji_num_max
): ):
try: try:
@ -651,7 +652,7 @@ class EmojiManager:
except Exception as e: except Exception as e:
logger.error(f"[错误] 扫描表情包目录失败: {str(e)}") logger.error(f"[错误] 扫描表情包目录失败: {str(e)}")
await asyncio.sleep(global_config.EMOJI_CHECK_INTERVAL * 60) await asyncio.sleep(global_config.emoji.check_interval * 60)
async def get_all_emoji_from_db(self): async def get_all_emoji_from_db(self):
"""获取所有表情包并初始化为MaiEmoji类对象更新 self.emoji_objects""" """获取所有表情包并初始化为MaiEmoji类对象更新 self.emoji_objects"""
@ -788,7 +789,7 @@ class EmojiManager:
# 构建提示词 # 构建提示词
prompt = ( prompt = (
f"{global_config.BOT_NICKNAME}的表情包存储已满({self.emoji_num}/{self.emoji_num_max})" f"{global_config.bot.nickname}的表情包存储已满({self.emoji_num}/{self.emoji_num_max})"
f"需要决定是否删除一个旧表情包来为新表情包腾出空间。\n\n" f"需要决定是否删除一个旧表情包来为新表情包腾出空间。\n\n"
f"新表情包信息:\n" f"新表情包信息:\n"
f"描述: {new_emoji.description}\n\n" f"描述: {new_emoji.description}\n\n"
@ -871,10 +872,10 @@ class EmojiManager:
description, _ = await self.vlm.generate_response_for_image(prompt, image_base64, image_format) description, _ = await self.vlm.generate_response_for_image(prompt, image_base64, image_format)
# 审核表情包 # 审核表情包
if global_config.EMOJI_CHECK: if global_config.emoji.content_filtration:
prompt = f''' prompt = f'''
这是一个表情包请对这个表情包进行审核标准如下 这是一个表情包请对这个表情包进行审核标准如下
1. 必须符合"{global_config.EMOJI_CHECK_PROMPT}"的要求 1. 必须符合"{global_config.emoji.filtration_prompt}"的要求
2. 不能是色情暴力等违法违规内容必须符合公序良俗 2. 不能是色情暴力等违法违规内容必须符合公序良俗
3. 不能是任何形式的截图聊天记录或视频截图 3. 不能是任何形式的截图聊天记录或视频截图
4. 不要出现5个以上文字 4. 不要出现5个以上文字

View File

@ -26,9 +26,10 @@ logger = get_logger("expressor")
class DefaultExpressor: class DefaultExpressor:
def __init__(self, chat_id: str): def __init__(self, chat_id: str):
self.log_prefix = "expressor" self.log_prefix = "expressor"
# TODO: API-Adapter修改标记
self.express_model = LLMRequest( self.express_model = LLMRequest(
model=global_config.llm_normal, model=global_config.model.normal,
temperature=global_config.llm_normal["temp"], temperature=global_config.model.normal["temp"],
max_tokens=256, max_tokens=256,
request_type="response_heartflow", request_type="response_heartflow",
) )
@ -52,8 +53,8 @@ class DefaultExpressor:
messageinfo = anchor_message.message_info messageinfo = anchor_message.message_info
thinking_time_point = parse_thinking_id_to_timestamp(thinking_id) thinking_time_point = parse_thinking_id_to_timestamp(thinking_id)
bot_user_info = UserInfo( bot_user_info = UserInfo(
user_id=global_config.BOT_QQ, user_id=global_config.bot.qq_account,
user_nickname=global_config.BOT_NICKNAME, user_nickname=global_config.bot.nickname,
platform=messageinfo.platform, platform=messageinfo.platform,
) )
# logger.debug(f"创建思考消息:{anchor_message}") # logger.debug(f"创建思考消息:{anchor_message}")
@ -153,7 +154,7 @@ class DefaultExpressor:
try: try:
# 1. 获取情绪影响因子并调整模型温度 # 1. 获取情绪影响因子并调整模型温度
arousal_multiplier = mood_manager.get_arousal_multiplier() arousal_multiplier = mood_manager.get_arousal_multiplier()
current_temp = float(global_config.llm_normal["temp"]) * arousal_multiplier current_temp = float(global_config.model.normal["temp"]) * arousal_multiplier
self.express_model.params["temperature"] = current_temp # 动态调整温度 self.express_model.params["temperature"] = current_temp # 动态调整温度
# 2. 获取信息捕捉器 # 2. 获取信息捕捉器
@ -195,6 +196,7 @@ class DefaultExpressor:
try: try:
with Timer("LLM生成", {}): # 内部计时器,可选保留 with Timer("LLM生成", {}): # 内部计时器,可选保留
# TODO: API-Adapter修改标记
# logger.info(f"{self.log_prefix}[Replier-{thinking_id}]\nPrompt:\n{prompt}\n") # logger.info(f"{self.log_prefix}[Replier-{thinking_id}]\nPrompt:\n{prompt}\n")
content, reasoning_content, model_name = await self.express_model.generate_response(prompt) content, reasoning_content, model_name = await self.express_model.generate_response(prompt)
@ -342,8 +344,8 @@ class DefaultExpressor:
thinking_start_time = await self.heart_fc_sender.get_thinking_start_time(self.chat_id, thinking_id) thinking_start_time = await self.heart_fc_sender.get_thinking_start_time(self.chat_id, thinking_id)
bot_user_info = UserInfo( bot_user_info = UserInfo(
user_id=global_config.BOT_QQ, user_id=global_config.bot.qq_account,
user_nickname=global_config.BOT_NICKNAME, user_nickname=global_config.bot.nickname,
platform=self.chat_stream.platform, platform=self.chat_stream.platform,
) )

View File

@ -77,8 +77,9 @@ def init_prompt() -> None:
class ExpressionLearner: class ExpressionLearner:
def __init__(self) -> None: def __init__(self) -> None:
# TODO: API-Adapter修改标记
self.express_learn_model: LLMRequest = LLMRequest( self.express_learn_model: LLMRequest = LLMRequest(
model=global_config.llm_normal, model=global_config.model.normal,
temperature=0.1, temperature=0.1,
max_tokens=256, max_tokens=256,
request_type="response_heartflow", request_type="response_heartflow",
@ -289,7 +290,7 @@ class ExpressionLearner:
# 构建prompt # 构建prompt
prompt = await global_prompt_manager.format_prompt( prompt = await global_prompt_manager.format_prompt(
"personality_expression_prompt", "personality_expression_prompt",
personality=global_config.expression_style, personality=global_config.personality.expression_style,
) )
# logger.info(f"个性表达方式提取prompt: {prompt}") # logger.info(f"个性表达方式提取prompt: {prompt}")

View File

@ -91,7 +91,6 @@ class HeartFChatting:
self.action_manager = ActionManager() self.action_manager = ActionManager()
self.action_planner = ActionPlanner(log_prefix=self.log_prefix, action_manager=self.action_manager, stream_id=self.stream_id, chat_stream=self.chat_stream) self.action_planner = ActionPlanner(log_prefix=self.log_prefix, action_manager=self.action_manager, stream_id=self.stream_id, chat_stream=self.chat_stream)
# --- 处理器列表 --- # --- 处理器列表 ---
self.processors: List[BaseProcessor] = [] self.processors: List[BaseProcessor] = []
self._register_default_processors() self._register_default_processors()
@ -526,5 +525,3 @@ class HeartFChatting:
if last_n is not None: if last_n is not None:
history = history[-last_n:] history = history[-last_n:]
return [cycle.to_dict() for cycle in history] return [cycle.to_dict() for cycle in history]

View File

@ -112,7 +112,7 @@ def _check_ban_words(text: str, chat, userinfo) -> bool:
Returns: Returns:
bool: 是否包含过滤词 bool: 是否包含过滤词
""" """
for word in global_config.ban_words: for word in global_config.chat.ban_words:
if word in text: if word in text:
chat_name = chat.group_info.group_name if chat.group_info else "私聊" chat_name = chat.group_info.group_name if chat.group_info else "私聊"
logger.info(f"[{chat_name}]{userinfo.user_nickname}:{text}") logger.info(f"[{chat_name}]{userinfo.user_nickname}:{text}")
@ -132,7 +132,7 @@ def _check_ban_regex(text: str, chat, userinfo) -> bool:
Returns: Returns:
bool: 是否匹配过滤正则 bool: 是否匹配过滤正则
""" """
for pattern in global_config.ban_msgs_regex: for pattern in global_config.chat.ban_msgs_regex:
if pattern.search(text): if pattern.search(text):
chat_name = chat.group_info.group_name if chat.group_info else "私聊" chat_name = chat.group_info.group_name if chat.group_info else "私聊"
logger.info(f"[{chat_name}]{userinfo.user_nickname}:{text}") logger.info(f"[{chat_name}]{userinfo.user_nickname}:{text}")

View File

@ -143,7 +143,7 @@ async def _build_prompt_focus(
message_list_before_now = get_raw_msg_before_timestamp_with_chat( message_list_before_now = get_raw_msg_before_timestamp_with_chat(
chat_id=chat_stream.stream_id, chat_id=chat_stream.stream_id,
timestamp=time.time(), timestamp=time.time(),
limit=global_config.observation_context_size, limit=global_config.chat.observation_context_size,
) )
chat_talking_prompt = await build_readable_messages( chat_talking_prompt = await build_readable_messages(
message_list_before_now, message_list_before_now,
@ -216,7 +216,7 @@ async def _build_prompt_focus(
chat_target=chat_target_1, # Used in group template chat_target=chat_target_1, # Used in group template
# chat_talking_prompt=chat_talking_prompt, # chat_talking_prompt=chat_talking_prompt,
chat_info=chat_talking_prompt, chat_info=chat_talking_prompt,
bot_name=global_config.BOT_NICKNAME, bot_name=global_config.bot.nickname,
# prompt_personality=prompt_personality, # prompt_personality=prompt_personality,
prompt_personality="", prompt_personality="",
reason=reason, reason=reason,
@ -232,7 +232,7 @@ async def _build_prompt_focus(
info_from_tools=structured_info_prompt, info_from_tools=structured_info_prompt,
sender_name=effective_sender_name, # Used in private template sender_name=effective_sender_name, # Used in private template
chat_talking_prompt=chat_talking_prompt, chat_talking_prompt=chat_talking_prompt,
bot_name=global_config.BOT_NICKNAME, bot_name=global_config.bot.nickname,
prompt_personality=prompt_personality, prompt_personality=prompt_personality,
# chat_target and chat_target_2 are not used in private template # chat_target and chat_target_2 are not used in private template
current_mind_info=current_mind_info, current_mind_info=current_mind_info,
@ -287,7 +287,7 @@ class PromptBuilder:
who_chat_in_group = get_recent_group_speaker( who_chat_in_group = get_recent_group_speaker(
chat_stream.stream_id, chat_stream.stream_id,
(chat_stream.user_info.platform, chat_stream.user_info.user_id) if chat_stream.user_info else None, (chat_stream.user_info.platform, chat_stream.user_info.user_id) if chat_stream.user_info else None,
limit=global_config.observation_context_size, limit=global_config.chat.observation_context_size,
) )
elif chat_stream.user_info: elif chat_stream.user_info:
who_chat_in_group.append( who_chat_in_group.append(
@ -335,7 +335,7 @@ class PromptBuilder:
message_list_before_now = get_raw_msg_before_timestamp_with_chat( message_list_before_now = get_raw_msg_before_timestamp_with_chat(
chat_id=chat_stream.stream_id, chat_id=chat_stream.stream_id,
timestamp=time.time(), timestamp=time.time(),
limit=global_config.observation_context_size, limit=global_config.chat.observation_context_size,
) )
chat_talking_prompt = await build_readable_messages( chat_talking_prompt = await build_readable_messages(
message_list_before_now, message_list_before_now,
@ -347,18 +347,15 @@ class PromptBuilder:
# 关键词检测与反应 # 关键词检测与反应
keywords_reaction_prompt = "" keywords_reaction_prompt = ""
for rule in global_config.keywords_reaction_rules: for rule in global_config.keyword_reaction.rules:
if rule.get("enable", False): if rule.enable:
if any(keyword in message_txt.lower() for keyword in rule.get("keywords", [])): if any(keyword in message_txt for keyword in rule.keywords):
logger.info( logger.info(f"检测到以下关键词之一:{rule.keywords},触发反应:{rule.reaction}")
f"检测到以下关键词之一:{rule.get('keywords', [])},触发反应:{rule.get('reaction', '')}" keywords_reaction_prompt += f"{rule.reaction}"
)
keywords_reaction_prompt += rule.get("reaction", "") + ""
else: else:
for pattern in rule.get("regex", []): for pattern in rule.regex:
result = pattern.search(message_txt) if result := pattern.search(message_txt):
if result: reaction = rule.reaction
reaction = rule.get("reaction", "")
for name, content in result.groupdict().items(): for name, content in result.groupdict().items():
reaction = reaction.replace(f"[{name}]", content) reaction = reaction.replace(f"[{name}]", content)
logger.info(f"匹配到以下正则表达式:{pattern},触发反应:{reaction}") logger.info(f"匹配到以下正则表达式:{pattern},触发反应:{reaction}")
@ -410,8 +407,8 @@ class PromptBuilder:
nickname_info=nickname_injection_str, # <--- 注入绰号信息 nickname_info=nickname_injection_str, # <--- 注入绰号信息
chat_talking_prompt=chat_talking_prompt, chat_talking_prompt=chat_talking_prompt,
message_txt=message_txt, message_txt=message_txt,
bot_name=global_config.BOT_NICKNAME, bot_name=global_config.bot.nickname,
bot_other_names="/".join(global_config.BOT_ALIAS_NAMES), bot_other_names="/".join(global_config.bot.alias_names),
prompt_personality=prompt_personality, prompt_personality=prompt_personality,
mood_prompt=mood_prompt, mood_prompt=mood_prompt,
reply_style1=reply_style1_chosen, reply_style1=reply_style1_chosen,
@ -432,8 +429,8 @@ class PromptBuilder:
prompt_info=prompt_info, prompt_info=prompt_info,
chat_talking_prompt=chat_talking_prompt, chat_talking_prompt=chat_talking_prompt,
message_txt=message_txt, message_txt=message_txt,
bot_name=global_config.BOT_NICKNAME, bot_name=global_config.bot.nickname,
bot_other_names="/".join(global_config.BOT_ALIAS_NAMES), bot_other_names="/".join(global_config.bot.alias_names),
prompt_personality=prompt_personality, prompt_personality=prompt_personality,
mood_prompt=mood_prompt, mood_prompt=mood_prompt,
reply_style1=reply_style1_chosen, reply_style1=reply_style1_chosen,

View File

@ -26,8 +26,9 @@ class ChattingInfoProcessor(BaseProcessor):
def __init__(self): def __init__(self):
"""初始化观察处理器""" """初始化观察处理器"""
super().__init__() super().__init__()
# TODO: API-Adapter修改标记
self.llm_summary = LLMRequest( self.llm_summary = LLMRequest(
model=global_config.llm_observation, temperature=0.7, max_tokens=300, request_type="chat_observation" model=global_config.model.observation, temperature=0.7, max_tokens=300, request_type="chat_observation"
) )
async def process_info( async def process_info(
@ -108,12 +109,12 @@ class ChattingInfoProcessor(BaseProcessor):
"created_at": datetime.now().timestamp(), "created_at": datetime.now().timestamp(),
} }
obs.mid_memorys.append(mid_memory) obs.mid_memories.append(mid_memory)
if len(obs.mid_memorys) > obs.max_mid_memory_len: if len(obs.mid_memories) > obs.max_mid_memory_len:
obs.mid_memorys.pop(0) # 移除最旧的 obs.mid_memories.pop(0) # 移除最旧的
mid_memory_str = "之前聊天的内容概述是:\n" mid_memory_str = "之前聊天的内容概述是:\n"
for mid_memory_item in obs.mid_memorys: # 重命名循环变量以示区分 for mid_memory_item in obs.mid_memories: # 重命名循环变量以示区分
time_diff = int((datetime.now().timestamp() - mid_memory_item["created_at"]) / 60) time_diff = int((datetime.now().timestamp() - mid_memory_item["created_at"]) / 60)
mid_memory_str += ( mid_memory_str += (
f"距离现在{time_diff}分钟前(聊天记录id:{mid_memory_item['id']}){mid_memory_item['theme']}\n" f"距离现在{time_diff}分钟前(聊天记录id:{mid_memory_item['id']}){mid_memory_item['theme']}\n"

View File

@ -81,8 +81,8 @@ class MindProcessor(BaseProcessor):
self.subheartflow_id = subheartflow_id self.subheartflow_id = subheartflow_id
self.llm_model = LLMRequest( self.llm_model = LLMRequest(
model=global_config.llm_sub_heartflow, model=global_config.model.sub_heartflow,
temperature=global_config.llm_sub_heartflow["temp"], temperature=global_config.model.sub_heartflow["temp"],
max_tokens=800, max_tokens=800,
request_type="sub_heart_flow", request_type="sub_heart_flow",
) )

View File

@ -52,7 +52,7 @@ class ToolProcessor(BaseProcessor):
self.subheartflow_id = subheartflow_id self.subheartflow_id = subheartflow_id
self.log_prefix = f"[{subheartflow_id}:ToolExecutor] " self.log_prefix = f"[{subheartflow_id}:ToolExecutor] "
self.llm_model = LLMRequest( self.llm_model = LLMRequest(
model=global_config.llm_tool_use, model=global_config.model.tool_use,
max_tokens=500, max_tokens=500,
request_type="tool_execution", request_type="tool_execution",
) )

View File

@ -34,8 +34,9 @@ def init_prompt():
class MemoryActivator: class MemoryActivator:
def __init__(self): def __init__(self):
# TODO: API-Adapter修改标记
self.summary_model = LLMRequest( self.summary_model = LLMRequest(
model=global_config.llm_summary, temperature=0.7, max_tokens=50, request_type="chat_observation" model=global_config.model.summary, temperature=0.7, max_tokens=50, request_type="chat_observation"
) )
self.running_memory = [] self.running_memory = []

View File

@ -1,7 +1,5 @@
from typing import Dict, List, Optional, Callable, Coroutine, Type, Any, Union from typing import Dict, List, Optional, Callable, Coroutine, Type, Any
import os from src.chat.focus_chat.planners.actions.base_action import BaseAction, _ACTION_REGISTRY
import importlib
from src.chat.focus_chat.planners.actions.base_action import BaseAction, _ACTION_REGISTRY, _DEFAULT_ACTIONS
from src.chat.heart_flow.observation.observation import Observation from src.chat.heart_flow.observation.observation import Observation
from src.chat.focus_chat.expressors.default_expressor import DefaultExpressor from src.chat.focus_chat.expressors.default_expressor import DefaultExpressor
from src.chat.message_receive.chat_stream import ChatStream from src.chat.message_receive.chat_stream import ChatStream
@ -9,8 +7,6 @@ from src.chat.focus_chat.heartFC_Cycleinfo import CycleDetail
from src.common.logger_manager import get_logger from src.common.logger_manager import get_logger
# 导入动作类,确保装饰器被执行 # 导入动作类,确保装饰器被执行
from src.chat.focus_chat.planners.actions.reply_action import ReplyAction
from src.chat.focus_chat.planners.actions.no_reply_action import NoReplyAction
logger = get_logger("action_factory") logger = get_logger("action_factory")
@ -31,20 +27,19 @@ class ActionManager:
self._using_actions: Dict[str, ActionInfo] = {} self._using_actions: Dict[str, ActionInfo] = {}
# 临时备份原始使用中的动作 # 临时备份原始使用中的动作
self._original_actions_backup: Optional[Dict[str, ActionInfo]] = None self._original_actions_backup: Optional[Dict[str, ActionInfo]] = None
# 默认动作集,仅作为快照,用于恢复默认 # 默认动作集,仅作为快照,用于恢复默认
self._default_actions: Dict[str, ActionInfo] = {} self._default_actions: Dict[str, ActionInfo] = {}
# 加载所有已注册动作 # 加载所有已注册动作
self._load_registered_actions() self._load_registered_actions()
# 初始化时将默认动作加载到使用中的动作 # 初始化时将默认动作加载到使用中的动作
self._using_actions = self._default_actions.copy() self._using_actions = self._default_actions.copy()
# logger.info(f"当前可用动作: {list(self._using_actions.keys())}") # logger.info(f"当前可用动作: {list(self._using_actions.keys())}")
# for action_name, action_info in self._using_actions.items(): # for action_name, action_info in self._using_actions.items():
# logger.info(f"动作名称: {action_name}, 动作信息: {action_info}") # logger.info(f"动作名称: {action_name}, 动作信息: {action_info}")
def _load_registered_actions(self) -> None: def _load_registered_actions(self) -> None:
""" """
@ -54,35 +49,35 @@ class ActionManager:
# 从_ACTION_REGISTRY获取所有已注册动作 # 从_ACTION_REGISTRY获取所有已注册动作
for action_name, action_class in _ACTION_REGISTRY.items(): for action_name, action_class in _ACTION_REGISTRY.items():
# 获取动作相关信息 # 获取动作相关信息
action_description:str = getattr(action_class, "action_description", "") action_description: str = getattr(action_class, "action_description", "")
action_parameters:dict[str:str] = getattr(action_class, "action_parameters", {}) action_parameters: dict[str:str] = getattr(action_class, "action_parameters", {})
action_require:list[str] = getattr(action_class, "action_require", []) action_require: list[str] = getattr(action_class, "action_require", [])
is_default:bool = getattr(action_class, "default", False) is_default: bool = getattr(action_class, "default", False)
if action_name and action_description: if action_name and action_description:
# 创建动作信息字典 # 创建动作信息字典
action_info = { action_info = {
"description": action_description, "description": action_description,
"parameters": action_parameters, "parameters": action_parameters,
"require": action_require "require": action_require,
} }
# 注册2 # 注册2
print("注册2") print("注册2")
print(action_info) print(action_info)
# 添加到所有已注册的动作 # 添加到所有已注册的动作
self._registered_actions[action_name] = action_info self._registered_actions[action_name] = action_info
# 添加到默认动作(如果是默认动作) # 添加到默认动作(如果是默认动作)
if is_default: if is_default:
self._default_actions[action_name] = action_info self._default_actions[action_name] = action_info
logger.info(f"所有注册动作: {list(self._registered_actions.keys())}") logger.info(f"所有注册动作: {list(self._registered_actions.keys())}")
logger.info(f"默认动作: {list(self._default_actions.keys())}") logger.info(f"默认动作: {list(self._default_actions.keys())}")
# for action_name, action_info in self._default_actions.items(): # for action_name, action_info in self._default_actions.items():
# logger.info(f"动作名称: {action_name}, 动作信息: {action_info}") # logger.info(f"动作名称: {action_name}, 动作信息: {action_info}")
except Exception as e: except Exception as e:
logger.error(f"加载已注册动作失败: {e}") logger.error(f"加载已注册动作失败: {e}")
@ -129,7 +124,7 @@ class ActionManager:
if action_name not in self._using_actions: if action_name not in self._using_actions:
logger.warning(f"当前不可用的动作类型: {action_name}") logger.warning(f"当前不可用的动作类型: {action_name}")
return None return None
handler_class = _ACTION_REGISTRY.get(action_name) handler_class = _ACTION_REGISTRY.get(action_name)
if not handler_class: if not handler_class:
logger.warning(f"未注册的动作类型: {action_name}") logger.warning(f"未注册的动作类型: {action_name}")
@ -153,7 +148,7 @@ class ActionManager:
expressor=expressor, expressor=expressor,
chat_stream=chat_stream, chat_stream=chat_stream,
) )
return instance return instance
except Exception as e: except Exception as e:
@ -167,7 +162,7 @@ class ActionManager:
def get_default_actions(self) -> Dict[str, ActionInfo]: def get_default_actions(self) -> Dict[str, ActionInfo]:
"""获取默认动作集""" """获取默认动作集"""
return self._default_actions.copy() return self._default_actions.copy()
def get_using_actions(self) -> Dict[str, ActionInfo]: def get_using_actions(self) -> Dict[str, ActionInfo]:
"""获取当前正在使用的动作集""" """获取当前正在使用的动作集"""
return self._using_actions.copy() return self._using_actions.copy()
@ -175,21 +170,21 @@ class ActionManager:
def add_action_to_using(self, action_name: str) -> bool: def add_action_to_using(self, action_name: str) -> bool:
""" """
添加已注册的动作到当前使用的动作集 添加已注册的动作到当前使用的动作集
Args: Args:
action_name: 动作名称 action_name: 动作名称
Returns: Returns:
bool: 添加是否成功 bool: 添加是否成功
""" """
if action_name not in self._registered_actions: if action_name not in self._registered_actions:
logger.warning(f"添加失败: 动作 {action_name} 未注册") logger.warning(f"添加失败: 动作 {action_name} 未注册")
return False return False
if action_name in self._using_actions: if action_name in self._using_actions:
logger.info(f"动作 {action_name} 已经在使用中") logger.info(f"动作 {action_name} 已经在使用中")
return True return True
self._using_actions[action_name] = self._registered_actions[action_name] self._using_actions[action_name] = self._registered_actions[action_name]
logger.info(f"添加动作 {action_name} 到使用集") logger.info(f"添加动作 {action_name} 到使用集")
return True return True
@ -197,17 +192,17 @@ class ActionManager:
def remove_action_from_using(self, action_name: str) -> bool: def remove_action_from_using(self, action_name: str) -> bool:
""" """
从当前使用的动作集中移除指定动作 从当前使用的动作集中移除指定动作
Args: Args:
action_name: 动作名称 action_name: 动作名称
Returns: Returns:
bool: 移除是否成功 bool: 移除是否成功
""" """
if action_name not in self._using_actions: if action_name not in self._using_actions:
logger.warning(f"移除失败: 动作 {action_name} 不在当前使用的动作集中") logger.warning(f"移除失败: 动作 {action_name} 不在当前使用的动作集中")
return False return False
del self._using_actions[action_name] del self._using_actions[action_name]
logger.info(f"已从使用集中移除动作 {action_name}") logger.info(f"已从使用集中移除动作 {action_name}")
return True return True
@ -215,30 +210,26 @@ class ActionManager:
def add_action(self, action_name: str, description: str, parameters: Dict = None, require: List = None) -> bool: def add_action(self, action_name: str, description: str, parameters: Dict = None, require: List = None) -> bool:
""" """
添加新的动作到注册集 添加新的动作到注册集
Args: Args:
action_name: 动作名称 action_name: 动作名称
description: 动作描述 description: 动作描述
parameters: 动作参数定义默认为空字典 parameters: 动作参数定义默认为空字典
require: 动作依赖项默认为空列表 require: 动作依赖项默认为空列表
Returns: Returns:
bool: 添加是否成功 bool: 添加是否成功
""" """
if action_name in self._registered_actions: if action_name in self._registered_actions:
return False return False
if parameters is None: if parameters is None:
parameters = {} parameters = {}
if require is None: if require is None:
require = [] require = []
action_info = { action_info = {"description": description, "parameters": parameters, "require": require}
"description": description,
"parameters": parameters,
"require": require
}
self._registered_actions[action_name] = action_info self._registered_actions[action_name] = action_info
return True return True
@ -264,7 +255,7 @@ class ActionManager:
if self._original_actions_backup is not None: if self._original_actions_backup is not None:
self._using_actions = self._original_actions_backup.copy() self._using_actions = self._original_actions_backup.copy()
self._original_actions_backup = None self._original_actions_backup = None
def restore_default_actions(self) -> None: def restore_default_actions(self) -> None:
"""恢复默认动作集到使用集""" """恢复默认动作集到使用集"""
self._using_actions = self._default_actions.copy() self._using_actions = self._default_actions.copy()
@ -273,10 +264,10 @@ class ActionManager:
def get_action(self, action_name: str) -> Optional[Type[BaseAction]]: def get_action(self, action_name: str) -> Optional[Type[BaseAction]]:
""" """
获取指定动作的处理器类 获取指定动作的处理器类
Args: Args:
action_name: 动作名称 action_name: 动作名称
Returns: Returns:
Optional[Type[BaseAction]]: 动作处理器类如果不存在则返回None Optional[Type[BaseAction]]: 动作处理器类如果不存在则返回None
""" """

View File

@ -12,7 +12,7 @@ _DEFAULT_ACTIONS: Dict[str, str] = {}
def register_action(cls): def register_action(cls):
""" """
动作注册装饰器 动作注册装饰器
用法: 用法:
@register_action @register_action
class MyAction(BaseAction): class MyAction(BaseAction):
@ -24,22 +24,22 @@ def register_action(cls):
if not hasattr(cls, "action_name") or not hasattr(cls, "action_description"): if not hasattr(cls, "action_name") or not hasattr(cls, "action_description"):
logger.error(f"动作类 {cls.__name__} 缺少必要的属性: action_name 或 action_description") logger.error(f"动作类 {cls.__name__} 缺少必要的属性: action_name 或 action_description")
return cls return cls
action_name = getattr(cls, "action_name") action_name = cls.action_name
action_description = getattr(cls, "action_description") action_description = cls.action_description
is_default = getattr(cls, "default", False) is_default = getattr(cls, "default", False)
if not action_name or not action_description: if not action_name or not action_description:
logger.error(f"动作类 {cls.__name__} 的 action_name 或 action_description 为空") logger.error(f"动作类 {cls.__name__} 的 action_name 或 action_description 为空")
return cls return cls
# 将动作类注册到全局注册表 # 将动作类注册到全局注册表
_ACTION_REGISTRY[action_name] = cls _ACTION_REGISTRY[action_name] = cls
# 如果是默认动作,添加到默认动作集 # 如果是默认动作,添加到默认动作集
if is_default: if is_default:
_DEFAULT_ACTIONS[action_name] = action_description _DEFAULT_ACTIONS[action_name] = action_description
logger.info(f"已注册动作: {action_name} -> {cls.__name__},默认: {is_default}") logger.info(f"已注册动作: {action_name} -> {cls.__name__},默认: {is_default}")
return cls return cls
@ -60,15 +60,14 @@ class BaseAction(ABC):
cycle_timers: 计时器字典 cycle_timers: 计时器字典
thinking_id: 思考ID thinking_id: 思考ID
""" """
#每个动作必须实现 # 每个动作必须实现
self.action_name:str = "base_action" self.action_name: str = "base_action"
self.action_description:str = "基础动作" self.action_description: str = "基础动作"
self.action_parameters:dict = {} self.action_parameters: dict = {}
self.action_require:list[str] = [] self.action_require: list[str] = []
self.default:bool = False self.default: bool = False
self.action_data = action_data self.action_data = action_data
self.reasoning = reasoning self.reasoning = reasoning
self.cycle_timers = cycle_timers self.cycle_timers = cycle_timers

View File

@ -29,7 +29,7 @@ class NoReplyAction(BaseAction):
action_require = [ action_require = [
"话题无关/无聊/不感兴趣/不懂", "话题无关/无聊/不感兴趣/不懂",
"最后一条消息是你自己发的且无人回应你", "最后一条消息是你自己发的且无人回应你",
"你发送了太多消息,且无人回复" "你发送了太多消息,且无人回复",
] ]
default = True default = True
@ -46,7 +46,7 @@ class NoReplyAction(BaseAction):
total_no_reply_count: int = 0, total_no_reply_count: int = 0,
total_waiting_time: float = 0.0, total_waiting_time: float = 0.0,
shutting_down: bool = False, shutting_down: bool = False,
**kwargs **kwargs,
): ):
"""初始化不回复动作处理器 """初始化不回复动作处理器

View File

@ -2,9 +2,8 @@
# -*- coding: utf-8 -*- # -*- coding: utf-8 -*-
from src.common.logger_manager import get_logger from src.common.logger_manager import get_logger
from src.chat.utils.timer_calculator import Timer
from src.chat.focus_chat.planners.actions.base_action import BaseAction, register_action from src.chat.focus_chat.planners.actions.base_action import BaseAction, register_action
from typing import Tuple, List, Optional from typing import Tuple, List
from src.chat.heart_flow.observation.observation import Observation from src.chat.heart_flow.observation.observation import Observation
from src.chat.focus_chat.expressors.default_expressor import DefaultExpressor from src.chat.focus_chat.expressors.default_expressor import DefaultExpressor
from src.chat.message_receive.chat_stream import ChatStream from src.chat.message_receive.chat_stream import ChatStream
@ -22,14 +21,14 @@ class ReplyAction(BaseAction):
处理构建和发送消息回复的动作 处理构建和发送消息回复的动作
""" """
action_name:str = "reply" action_name: str = "reply"
action_description:str = "表达想法,可以只包含文本、表情或两者都有" action_description: str = "表达想法,可以只包含文本、表情或两者都有"
action_parameters:dict[str:str] = { action_parameters: dict[str:str] = {
"text": "你想要表达的内容(可选)", "text": "你想要表达的内容(可选)",
"emojis": "描述当前使用表情包的场景(可选)", "emojis": "描述当前使用表情包的场景(可选)",
"target": "你想要回复的原始文本内容(非必须,仅文本,不包含发送者)(可选)", "target": "你想要回复的原始文本内容(非必须,仅文本,不包含发送者)(可选)",
} }
action_require:list[str] = [ action_require: list[str] = [
"有实质性内容需要表达", "有实质性内容需要表达",
"有人提到你,但你还没有回应他", "有人提到你,但你还没有回应他",
"在合适的时候添加表情(不要总是添加)", "在合适的时候添加表情(不要总是添加)",
@ -38,7 +37,7 @@ class ReplyAction(BaseAction):
"一次只回复一个人,一次只回复一个话题,突出重点", "一次只回复一个人,一次只回复一个话题,突出重点",
"如果是自己发的消息想继续,需自然衔接", "如果是自己发的消息想继续,需自然衔接",
"避免重复或评价自己的发言,不要和自己聊天", "避免重复或评价自己的发言,不要和自己聊天",
"注意:回复尽量简短一些。可以参考贴吧,知乎和微博的回复风格,回复不要浮夸,不要用夸张修辞,平淡一些。" "注意:回复尽量简短一些。可以参考贴吧,知乎和微博的回复风格,回复不要浮夸,不要用夸张修辞,平淡一些。",
] ]
default = True default = True
@ -54,7 +53,7 @@ class ReplyAction(BaseAction):
chat_stream: ChatStream, chat_stream: ChatStream,
current_cycle: CycleDetail, current_cycle: CycleDetail,
log_prefix: str, log_prefix: str,
**kwargs **kwargs,
): ):
"""初始化回复动作处理器 """初始化回复动作处理器
@ -89,9 +88,9 @@ class ReplyAction(BaseAction):
reasoning=self.reasoning, reasoning=self.reasoning,
reply_data=self.action_data, reply_data=self.action_data,
cycle_timers=self.cycle_timers, cycle_timers=self.cycle_timers,
thinking_id=self.thinking_id thinking_id=self.thinking_id,
) )
async def _handle_reply( async def _handle_reply(
self, reasoning: str, reply_data: dict, cycle_timers: dict, thinking_id: str self, reasoning: str, reply_data: dict, cycle_timers: dict, thinking_id: str
) -> tuple[bool, str]: ) -> tuple[bool, str]:

View File

@ -6,7 +6,6 @@ from rich.traceback import install
from src.chat.message_receive.chat_stream import ChatStream from src.chat.message_receive.chat_stream import ChatStream
from src.chat.models.utils_model import LLMRequest from src.chat.models.utils_model import LLMRequest
from src.config.config import global_config from src.config.config import global_config
from src.chat.focus_chat.heartflow_prompt_builder import prompt_builder
from src.chat.focus_chat.info.info_base import InfoBase from src.chat.focus_chat.info.info_base import InfoBase
from src.chat.focus_chat.info.obs_info import ObsInfo from src.chat.focus_chat.info.obs_info import ObsInfo
from src.chat.focus_chat.info.cycle_info import CycleInfo from src.chat.focus_chat.info.cycle_info import CycleInfo
@ -24,6 +23,7 @@ logger = get_logger("planner")
install(extra_lines=3) install(extra_lines=3)
def init_prompt(): def init_prompt():
Prompt( Prompt(
"""你的名字是{bot_name},{prompt_personality}{chat_context_description}。需要基于以下信息决定如何参与对话: """你的名字是{bot_name},{prompt_personality}{chat_context_description}。需要基于以下信息决定如何参与对话:
@ -50,8 +50,9 @@ def init_prompt():
}} }}
请输出你的决策 JSON""", 请输出你的决策 JSON""",
"planner_prompt",) "planner_prompt",
)
Prompt( Prompt(
""" """
action_name: {action_name} action_name: {action_name}
@ -63,14 +64,14 @@ action_name: {action_name}
""", """,
"action_prompt", "action_prompt",
) )
class ActionPlanner: class ActionPlanner:
def __init__(self, log_prefix: str, action_manager: ActionManager, stream_id: str, chat_stream: ChatStream): def __init__(self, log_prefix: str, action_manager: ActionManager, stream_id: str, chat_stream: ChatStream):
self.log_prefix = log_prefix self.log_prefix = log_prefix
# LLM规划器配置 # LLM规划器配置
self.planner_llm = LLMRequest( self.planner_llm = LLMRequest(
model=global_config.llm_plan, model=global_config.model.plan,
max_tokens=1000, max_tokens=1000,
request_type="action_planning", # 用于动作规划 request_type="action_planning", # 用于动作规划
) )
@ -110,10 +111,10 @@ class ActionPlanner:
cycle_info = info.get_observe_info() cycle_info = info.get_observe_info()
elif isinstance(info, StructuredInfo): elif isinstance(info, StructuredInfo):
logger.debug(f"{self.log_prefix} 结构化信息: {info}") logger.debug(f"{self.log_prefix} 结构化信息: {info}")
structured_info = info.get_data() _structured_info = info.get_data()
current_available_actions = self.action_manager.get_using_actions() current_available_actions = self.action_manager.get_using_actions()
# --- 构建提示词 (调用修改后的 PromptBuilder 方法) --- # --- 构建提示词 (调用修改后的 PromptBuilder 方法) ---
prompt = await self.build_planner_prompt( prompt = await self.build_planner_prompt(
is_group_chat=is_group_chat, # <-- Pass HFC state is_group_chat=is_group_chat, # <-- Pass HFC state
@ -204,7 +205,6 @@ class ActionPlanner:
# 返回结果字典 # 返回结果字典
return plan_result return plan_result
async def build_planner_prompt( async def build_planner_prompt(
self, self,
is_group_chat: bool, # Now passed as argument is_group_chat: bool, # Now passed as argument
@ -225,7 +225,6 @@ class ActionPlanner:
) )
chat_context_description = f"你正在和 {chat_target_name} 私聊" chat_context_description = f"你正在和 {chat_target_name} 私聊"
chat_content_block = "" chat_content_block = ""
if observed_messages_str: if observed_messages_str:
chat_content_block = f"聊天记录:\n{observed_messages_str}" chat_content_block = f"聊天记录:\n{observed_messages_str}"
@ -241,7 +240,6 @@ class ActionPlanner:
individuality = Individuality.get_instance() individuality = Individuality.get_instance()
personality_block = individuality.get_prompt(x_person=2, level=2) personality_block = individuality.get_prompt(x_person=2, level=2)
action_options_block = "" action_options_block = ""
for using_actions_name, using_actions_info in current_available_actions.items(): for using_actions_name, using_actions_info in current_available_actions.items():
# print(using_actions_name) # print(using_actions_name)
@ -251,39 +249,38 @@ class ActionPlanner:
# print(using_actions_info["description"]) # print(using_actions_info["description"])
using_action_prompt = await global_prompt_manager.get_prompt_async("action_prompt") using_action_prompt = await global_prompt_manager.get_prompt_async("action_prompt")
param_text = "" param_text = ""
for param_name, param_description in using_actions_info["parameters"].items(): for param_name, param_description in using_actions_info["parameters"].items():
param_text += f"{param_name}: {param_description}\n" param_text += f"{param_name}: {param_description}\n"
require_text = "" require_text = ""
for require_item in using_actions_info["require"]: for require_item in using_actions_info["require"]:
require_text += f"- {require_item}\n" require_text += f"- {require_item}\n"
using_action_prompt = using_action_prompt.format( using_action_prompt = using_action_prompt.format(
action_name=using_actions_name, action_name=using_actions_name,
action_description=using_actions_info["description"], action_description=using_actions_info["description"],
action_parameters=param_text, action_parameters=param_text,
action_require=require_text, action_require=require_text,
) )
action_options_block += using_action_prompt action_options_block += using_action_prompt
# 需要获取用于上下文的历史消息 # 需要获取用于上下文的历史消息
message_list_before_now = get_raw_msg_before_timestamp_with_chat( message_list_before_now = get_raw_msg_before_timestamp_with_chat(
chat_id=self.stream_id, chat_id=self.stream_id,
timestamp=time.time(), # 使用当前时间作为参考点 timestamp=time.time(), # 使用当前时间作为参考点
limit=global_config.observation_context_size, # 使用与 prompt 构建一致的 limit limit=global_config.chat.observation_context_size, # 使用与 prompt 构建一致的 limit
) )
# 调用工具函数获取格式化后的绰号字符串 # 调用工具函数获取格式化后的绰号字符串
nickname_injection_str = await nickname_manager.get_nickname_prompt_injection( nickname_injection_str = await nickname_manager.get_nickname_prompt_injection(
self.chat_stream, message_list_before_now self.chat_stream, message_list_before_now
) )
planner_prompt_template = await global_prompt_manager.get_prompt_async("planner_prompt") planner_prompt_template = await global_prompt_manager.get_prompt_async("planner_prompt")
prompt = planner_prompt_template.format( prompt = planner_prompt_template.format(
bot_name=global_config.BOT_NICKNAME, bot_name=global_config.bot.nickname,
prompt_personality=personality_block, prompt_personality=personality_block,
chat_context_description=chat_context_description, chat_context_description=chat_context_description,
chat_content_block=chat_content_block, chat_content_block=chat_content_block,

View File

@ -35,8 +35,9 @@ class Heartflow:
self.subheartflow_manager: SubHeartflowManager = SubHeartflowManager(self.current_state) self.subheartflow_manager: SubHeartflowManager = SubHeartflowManager(self.current_state)
# LLM模型配置 # LLM模型配置
# TODO: API-Adapter修改标记
self.llm_model = LLMRequest( self.llm_model = LLMRequest(
model=global_config.llm_heartflow, temperature=0.6, max_tokens=1000, request_type="heart_flow" model=global_config.model.heartflow, temperature=0.6, max_tokens=1000, request_type="heart_flow"
) )
# 外部依赖模块 # 外部依赖模块

View File

@ -20,9 +20,9 @@ MAX_REPLY_PROBABILITY = 1
class InterestChatting: class InterestChatting:
def __init__( def __init__(
self, self,
decay_rate=global_config.default_decay_rate_per_second, decay_rate=global_config.focus_chat.default_decay_rate_per_second,
max_interest=MAX_INTEREST, max_interest=MAX_INTEREST,
trigger_threshold=global_config.reply_trigger_threshold, trigger_threshold=global_config.focus_chat.reply_trigger_threshold,
max_probability=MAX_REPLY_PROBABILITY, max_probability=MAX_REPLY_PROBABILITY,
): ):
# 基础属性初始化 # 基础属性初始化

View File

@ -18,19 +18,14 @@ enable_unlimited_hfc_chat = True # 调试用:无限专注聊天
prevent_offline_state = True prevent_offline_state = True
# 目前默认不启用OFFLINE状态 # 目前默认不启用OFFLINE状态
# 不同状态下普通聊天的最大消息数 MAX_NORMAL_CHAT_NUM_PEEKING = int(global_config.chat.base_normal_chat_num / 2)
base_normal_chat_num = global_config.base_normal_chat_num MAX_NORMAL_CHAT_NUM_NORMAL = global_config.chat.base_normal_chat_num
base_focused_chat_num = global_config.base_focused_chat_num MAX_NORMAL_CHAT_NUM_FOCUSED = global_config.chat.base_normal_chat_num + 1
MAX_NORMAL_CHAT_NUM_PEEKING = int(base_normal_chat_num / 2)
MAX_NORMAL_CHAT_NUM_NORMAL = base_normal_chat_num
MAX_NORMAL_CHAT_NUM_FOCUSED = base_normal_chat_num + 1
# 不同状态下专注聊天的最大消息数 # 不同状态下专注聊天的最大消息数
MAX_FOCUSED_CHAT_NUM_PEEKING = int(base_focused_chat_num / 2) MAX_FOCUSED_CHAT_NUM_PEEKING = int(global_config.chat.base_focused_chat_num / 2)
MAX_FOCUSED_CHAT_NUM_NORMAL = base_focused_chat_num MAX_FOCUSED_CHAT_NUM_NORMAL = global_config.chat.base_focused_chat_num
MAX_FOCUSED_CHAT_NUM_FOCUSED = base_focused_chat_num + 2 MAX_FOCUSED_CHAT_NUM_FOCUSED = global_config.chat.base_focused_chat_num + 2
# -- 状态定义 -- # -- 状态定义 --

View File

@ -53,19 +53,20 @@ class ChattingObservation(Observation):
self.talking_message = [] self.talking_message = []
self.talking_message_str = "" self.talking_message_str = ""
self.talking_message_str_truncate = "" self.talking_message_str_truncate = ""
self.name = global_config.BOT_NICKNAME self.name = global_config.bot.nickname
self.nick_name = global_config.BOT_ALIAS_NAMES self.nick_name = global_config.bot.alias_names
self.max_now_obs_len = global_config.observation_context_size self.max_now_obs_len = global_config.chat.observation_context_size
self.overlap_len = global_config.compressed_length self.overlap_len = global_config.focus_chat.compressed_length
self.mid_memorys = [] self.mid_memories = []
self.max_mid_memory_len = global_config.compress_length_limit self.max_mid_memory_len = global_config.focus_chat.compress_length_limit
self.mid_memory_info = "" self.mid_memory_info = ""
self.person_list = [] self.person_list = []
self.oldest_messages = [] self.oldest_messages = []
self.oldest_messages_str = "" self.oldest_messages_str = ""
self.compressor_prompt = "" self.compressor_prompt = ""
# TODO: API-Adapter修改标记
self.llm_summary = LLMRequest( self.llm_summary = LLMRequest(
model=global_config.llm_observation, temperature=0.7, max_tokens=300, request_type="chat_observation" model=global_config.model.observation, temperature=0.7, max_tokens=300, request_type="chat_observation"
) )
async def initialize(self): async def initialize(self):
@ -83,7 +84,7 @@ class ChattingObservation(Observation):
for id in ids: for id in ids:
print(f"id{id}") print(f"id{id}")
try: try:
for mid_memory in self.mid_memorys: for mid_memory in self.mid_memories:
if mid_memory["id"] == id: if mid_memory["id"] == id:
mid_memory_by_id = mid_memory mid_memory_by_id = mid_memory
msg_str = "" msg_str = ""
@ -101,7 +102,7 @@ class ChattingObservation(Observation):
else: else:
mid_memory_str = "之前的聊天内容:\n" mid_memory_str = "之前的聊天内容:\n"
for mid_memory in self.mid_memorys: for mid_memory in self.mid_memories:
mid_memory_str += f"{mid_memory['theme']}\n" mid_memory_str += f"{mid_memory['theme']}\n"
return mid_memory_str + "现在群里正在聊:\n" + self.talking_message_str return mid_memory_str + "现在群里正在聊:\n" + self.talking_message_str

View File

@ -76,8 +76,9 @@ class SubHeartflowManager:
# 为 LLM 状态评估创建一个 LLMRequest 实例 # 为 LLM 状态评估创建一个 LLMRequest 实例
# 使用与 Heartflow 相同的模型和参数 # 使用与 Heartflow 相同的模型和参数
# TODO: API-Adapter修改标记
self.llm_state_evaluator = LLMRequest( self.llm_state_evaluator = LLMRequest(
model=global_config.llm_heartflow, # 与 Heartflow 一致 model=global_config.model.heartflow, # 与 Heartflow 一致
temperature=0.6, # 与 Heartflow 一致 temperature=0.6, # 与 Heartflow 一致
max_tokens=1000, # 与 Heartflow 一致 (虽然可能不需要这么多) max_tokens=1000, # 与 Heartflow 一致 (虽然可能不需要这么多)
request_type="subheartflow_state_eval", # 保留特定的请求类型 request_type="subheartflow_state_eval", # 保留特定的请求类型
@ -278,7 +279,7 @@ class SubHeartflowManager:
focused_limit = current_state.get_focused_chat_max_num() focused_limit = current_state.get_focused_chat_max_num()
# --- 新增:检查是否允许进入 FOCUS 模式 --- # # --- 新增:检查是否允许进入 FOCUS 模式 --- #
if not global_config.allow_focus_mode: if not global_config.chat.allow_focus_mode:
if int(time.time()) % 60 == 0: # 每60秒输出一次日志避免刷屏 if int(time.time()) % 60 == 0: # 每60秒输出一次日志避免刷屏
logger.trace("未开启 FOCUSED 状态 (allow_focus_mode=False)") logger.trace("未开启 FOCUSED 状态 (allow_focus_mode=False)")
return # 如果不允许,直接返回 return # 如果不允许,直接返回
@ -766,7 +767,7 @@ class SubHeartflowManager:
focused_limit = current_mai_state.get_focused_chat_max_num() focused_limit = current_mai_state.get_focused_chat_max_num()
# --- 检查是否允许 FOCUS 模式 --- # # --- 检查是否允许 FOCUS 模式 --- #
if not global_config.allow_focus_mode: if not global_config.chat.allow_focus_mode:
# Log less frequently to avoid spam # Log less frequently to avoid spam
# if int(time.time()) % 60 == 0: # if int(time.time()) % 60 == 0:
# logger.debug(f"{log_prefix_task} 配置不允许进入 FOCUSED 状态") # logger.debug(f"{log_prefix_task} 配置不允许进入 FOCUSED 状态")

View File

@ -19,9 +19,10 @@ from ..utils.chat_message_builder import (
build_readable_messages, build_readable_messages,
) # 导入 build_readable_messages ) # 导入 build_readable_messages
from ..utils.utils import translate_timestamp_to_human_readable from ..utils.utils import translate_timestamp_to_human_readable
from .memory_config import MemoryConfig
from rich.traceback import install from rich.traceback import install
from ...config.config import global_config
install(extra_lines=3) install(extra_lines=3)
@ -195,18 +196,16 @@ class Hippocampus:
self.llm_summary = None self.llm_summary = None
self.entorhinal_cortex = None self.entorhinal_cortex = None
self.parahippocampal_gyrus = None self.parahippocampal_gyrus = None
self.config = None
def initialize(self, global_config): def initialize(self):
# 使用导入的 MemoryConfig dataclass 和其 from_global_config 方法
self.config = MemoryConfig.from_global_config(global_config)
# 初始化子组件 # 初始化子组件
self.entorhinal_cortex = EntorhinalCortex(self) self.entorhinal_cortex = EntorhinalCortex(self)
self.parahippocampal_gyrus = ParahippocampalGyrus(self) self.parahippocampal_gyrus = ParahippocampalGyrus(self)
# 从数据库加载记忆图 # 从数据库加载记忆图
self.entorhinal_cortex.sync_memory_from_db() self.entorhinal_cortex.sync_memory_from_db()
self.llm_topic_judge = LLMRequest(self.config.llm_topic_judge, request_type="memory") # TODO: API-Adapter修改标记
self.llm_summary = LLMRequest(self.config.llm_summary, request_type="memory") self.llm_topic_judge = LLMRequest(global_config.model.topic_judge, request_type="memory")
self.llm_summary = LLMRequest(global_config.model.summary, request_type="memory")
def get_all_node_names(self) -> list: def get_all_node_names(self) -> list:
"""获取记忆图中所有节点的名字列表""" """获取记忆图中所有节点的名字列表"""
@ -792,7 +791,6 @@ class EntorhinalCortex:
def __init__(self, hippocampus: Hippocampus): def __init__(self, hippocampus: Hippocampus):
self.hippocampus = hippocampus self.hippocampus = hippocampus
self.memory_graph = hippocampus.memory_graph self.memory_graph = hippocampus.memory_graph
self.config = hippocampus.config
def get_memory_sample(self): def get_memory_sample(self):
"""从数据库获取记忆样本""" """从数据库获取记忆样本"""
@ -801,13 +799,13 @@ class EntorhinalCortex:
# 创建双峰分布的记忆调度器 # 创建双峰分布的记忆调度器
sample_scheduler = MemoryBuildScheduler( sample_scheduler = MemoryBuildScheduler(
n_hours1=self.config.memory_build_distribution[0], n_hours1=global_config.memory.memory_build_distribution[0],
std_hours1=self.config.memory_build_distribution[1], std_hours1=global_config.memory.memory_build_distribution[1],
weight1=self.config.memory_build_distribution[2], weight1=global_config.memory.memory_build_distribution[2],
n_hours2=self.config.memory_build_distribution[3], n_hours2=global_config.memory.memory_build_distribution[3],
std_hours2=self.config.memory_build_distribution[4], std_hours2=global_config.memory.memory_build_distribution[4],
weight2=self.config.memory_build_distribution[5], weight2=global_config.memory.memory_build_distribution[5],
total_samples=self.config.build_memory_sample_num, total_samples=global_config.memory.memory_build_sample_num,
) )
timestamps = sample_scheduler.get_timestamp_array() timestamps = sample_scheduler.get_timestamp_array()
@ -818,7 +816,7 @@ class EntorhinalCortex:
for timestamp in timestamps: for timestamp in timestamps:
# 调用修改后的 random_get_msg_snippet # 调用修改后的 random_get_msg_snippet
messages = self.random_get_msg_snippet( messages = self.random_get_msg_snippet(
timestamp, self.config.build_memory_sample_length, max_memorized_time_per_msg timestamp, global_config.memory.memory_build_sample_length, max_memorized_time_per_msg
) )
if messages: if messages:
time_diff = (datetime.datetime.now().timestamp() - timestamp) / 3600 time_diff = (datetime.datetime.now().timestamp() - timestamp) / 3600
@ -1099,7 +1097,6 @@ class ParahippocampalGyrus:
def __init__(self, hippocampus: Hippocampus): def __init__(self, hippocampus: Hippocampus):
self.hippocampus = hippocampus self.hippocampus = hippocampus
self.memory_graph = hippocampus.memory_graph self.memory_graph = hippocampus.memory_graph
self.config = hippocampus.config
async def memory_compress(self, messages: list, compress_rate=0.1): async def memory_compress(self, messages: list, compress_rate=0.1):
"""压缩和总结消息内容,生成记忆主题和摘要。 """压缩和总结消息内容,生成记忆主题和摘要。
@ -1159,7 +1156,7 @@ class ParahippocampalGyrus:
# 3. 过滤掉包含禁用关键词的topic # 3. 过滤掉包含禁用关键词的topic
filtered_topics = [ filtered_topics = [
topic for topic in topics if not any(keyword in topic for keyword in self.config.memory_ban_words) topic for topic in topics if not any(keyword in topic for keyword in global_config.memory.memory_ban_words)
] ]
logger.debug(f"过滤后话题: {filtered_topics}") logger.debug(f"过滤后话题: {filtered_topics}")
@ -1222,7 +1219,7 @@ class ParahippocampalGyrus:
bar = "" * filled_length + "-" * (bar_length - filled_length) bar = "" * filled_length + "-" * (bar_length - filled_length)
logger.debug(f"进度: [{bar}] {progress:.1f}% ({i}/{len(memory_samples)})") logger.debug(f"进度: [{bar}] {progress:.1f}% ({i}/{len(memory_samples)})")
compress_rate = self.config.memory_compress_rate compress_rate = global_config.memory.memory_compress_rate
try: try:
compressed_memory, similar_topics_dict = await self.memory_compress(messages, compress_rate) compressed_memory, similar_topics_dict = await self.memory_compress(messages, compress_rate)
except Exception as e: except Exception as e:
@ -1322,7 +1319,7 @@ class ParahippocampalGyrus:
edge_data = self.memory_graph.G[source][target] edge_data = self.memory_graph.G[source][target]
last_modified = edge_data.get("last_modified") last_modified = edge_data.get("last_modified")
if current_time - last_modified > 3600 * self.config.memory_forget_time: if current_time - last_modified > 3600 * global_config.memory.memory_forget_time:
current_strength = edge_data.get("strength", 1) current_strength = edge_data.get("strength", 1)
new_strength = current_strength - 1 new_strength = current_strength - 1
@ -1430,8 +1427,8 @@ class ParahippocampalGyrus:
async def operation_consolidate_memory(self): async def operation_consolidate_memory(self):
"""整合记忆:合并节点内相似的记忆项""" """整合记忆:合并节点内相似的记忆项"""
start_time = time.time() start_time = time.time()
percentage = self.config.consolidate_memory_percentage percentage = global_config.memory.consolidate_memory_percentage
similarity_threshold = self.config.consolidation_similarity_threshold similarity_threshold = global_config.memory.consolidation_similarity_threshold
logger.info(f"[整合] 开始检查记忆节点... 检查比例: {percentage:.2%}, 合并阈值: {similarity_threshold}") logger.info(f"[整合] 开始检查记忆节点... 检查比例: {percentage:.2%}, 合并阈值: {similarity_threshold}")
# 获取所有至少有2条记忆项的节点 # 获取所有至少有2条记忆项的节点
@ -1544,7 +1541,6 @@ class ParahippocampalGyrus:
class HippocampusManager: class HippocampusManager:
_instance = None _instance = None
_hippocampus = None _hippocampus = None
_global_config = None
_initialized = False _initialized = False
@classmethod @classmethod
@ -1559,19 +1555,15 @@ class HippocampusManager:
raise RuntimeError("HippocampusManager 尚未初始化,请先调用 initialize 方法") raise RuntimeError("HippocampusManager 尚未初始化,请先调用 initialize 方法")
return cls._hippocampus return cls._hippocampus
def initialize(self, global_config): def initialize(self):
"""初始化海马体实例""" """初始化海马体实例"""
if self._initialized: if self._initialized:
return self._hippocampus return self._hippocampus
self._global_config = global_config
self._hippocampus = Hippocampus() self._hippocampus = Hippocampus()
self._hippocampus.initialize(global_config) self._hippocampus.initialize()
self._initialized = True self._initialized = True
# 输出记忆系统参数信息
config = self._hippocampus.config
# 输出记忆图统计信息 # 输出记忆图统计信息
memory_graph = self._hippocampus.memory_graph.G memory_graph = self._hippocampus.memory_graph.G
node_count = len(memory_graph.nodes()) node_count = len(memory_graph.nodes())
@ -1579,9 +1571,9 @@ class HippocampusManager:
logger.success(f"""-------------------------------- logger.success(f"""--------------------------------
记忆系统参数配置: 记忆系统参数配置:
构建间隔: {global_config.build_memory_interval}|样本数: {config.build_memory_sample_num},长度: {config.build_memory_sample_length}|压缩率: {config.memory_compress_rate} 构建间隔: {global_config.memory.memory_build_interval}|样本数: {global_config.memory.memory_build_sample_num},长度: {global_config.memory.memory_build_sample_length}|压缩率: {global_config.memory.memory_compress_rate}
记忆构建分布: {config.memory_build_distribution} 记忆构建分布: {global_config.memory.memory_build_distribution}
遗忘间隔: {global_config.forget_memory_interval}|遗忘比例: {global_config.memory_forget_percentage}|遗忘: {config.memory_forget_time}小时之后 遗忘间隔: {global_config.memory.forget_memory_interval}|遗忘比例: {global_config.memory.memory_forget_percentage}|遗忘: {global_config.memory.memory_forget_time}小时之后
记忆图统计信息: 节点数量: {node_count}, 连接数量: {edge_count} 记忆图统计信息: 节点数量: {node_count}, 连接数量: {edge_count}
--------------------------------""") # noqa: E501 --------------------------------""") # noqa: E501

View File

@ -7,7 +7,6 @@ import os
# 添加项目根目录到系统路径 # 添加项目根目录到系统路径
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))) sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))))
from src.chat.memory_system.Hippocampus import HippocampusManager from src.chat.memory_system.Hippocampus import HippocampusManager
from src.config.config import global_config
from rich.traceback import install from rich.traceback import install
install(extra_lines=3) install(extra_lines=3)
@ -19,7 +18,7 @@ async def test_memory_system():
# 初始化记忆系统 # 初始化记忆系统
print("开始初始化记忆系统...") print("开始初始化记忆系统...")
hippocampus_manager = HippocampusManager.get_instance() hippocampus_manager = HippocampusManager.get_instance()
hippocampus_manager.initialize(global_config=global_config) hippocampus_manager.initialize()
print("记忆系统初始化完成") print("记忆系统初始化完成")
# 测试记忆构建 # 测试记忆构建

View File

@ -1,48 +0,0 @@
from dataclasses import dataclass
from typing import List
@dataclass
class MemoryConfig:
"""记忆系统配置类"""
# 记忆构建相关配置
memory_build_distribution: List[float] # 记忆构建的时间分布参数
build_memory_sample_num: int # 每次构建记忆的样本数量
build_memory_sample_length: int # 每个样本的消息长度
memory_compress_rate: float # 记忆压缩率
# 记忆遗忘相关配置
memory_forget_time: int # 记忆遗忘时间(小时)
# 记忆过滤相关配置
memory_ban_words: List[str] # 记忆过滤词列表
# 新增:记忆整合相关配置
consolidation_similarity_threshold: float # 相似度阈值
consolidate_memory_percentage: float # 检查节点比例
consolidate_memory_interval: int # 记忆整合间隔
llm_topic_judge: str # 话题判断模型
llm_summary: str # 话题总结模型
@classmethod
def from_global_config(cls, global_config):
"""从全局配置创建记忆系统配置"""
# 使用 getattr 提供默认值,防止全局配置缺少这些项
return cls(
memory_build_distribution=getattr(
global_config, "memory_build_distribution", (24, 12, 0.5, 168, 72, 0.5)
), # 添加默认值
build_memory_sample_num=getattr(global_config, "build_memory_sample_num", 5),
build_memory_sample_length=getattr(global_config, "build_memory_sample_length", 30),
memory_compress_rate=getattr(global_config, "memory_compress_rate", 0.1),
memory_forget_time=getattr(global_config, "memory_forget_time", 24 * 7),
memory_ban_words=getattr(global_config, "memory_ban_words", []),
# 新增加载整合配置,并提供默认值
consolidation_similarity_threshold=getattr(global_config, "consolidation_similarity_threshold", 0.7),
consolidate_memory_percentage=getattr(global_config, "consolidate_memory_percentage", 0.01),
consolidate_memory_interval=getattr(global_config, "consolidate_memory_interval", 1000),
llm_topic_judge=getattr(global_config, "llm_topic_judge", "default_judge_model"), # 添加默认模型名
llm_summary=getattr(global_config, "llm_summary", "default_summary_model"), # 添加默认模型名
)

View File

@ -64,21 +64,21 @@ class ChatBot:
userinfo = message.message_info.user_info userinfo = message.message_info.user_info
# 用户黑名单拦截 # 用户黑名单拦截
if userinfo.user_id in global_config.ban_user_id: if userinfo.user_id in global_config.chat_target.ban_user_id:
logger.debug(f"用户{userinfo.user_id}被禁止回复") logger.debug(f"用户{userinfo.user_id}被禁止回复")
return return
if groupinfo is None and global_config.enable_friend_whitelist: if groupinfo is None and global_config.experimental.enable_friend_whitelist:
logger.trace("检测到私聊消息,检查") logger.trace("检测到私聊消息,检查")
# 好友黑名单拦截 # 好友黑名单拦截
if userinfo.user_id not in global_config.talk_allowed_private: if userinfo.user_id not in global_config.experimental.talk_allowed_private:
logger.debug(f"用户{userinfo.user_id}没有私聊权限") logger.debug(f"用户{userinfo.user_id}没有私聊权限")
return return
elif not global_config.enable_friend_whitelist: elif not global_config.experimental.enable_friend_whitelist:
logger.debug("私聊白名单模式未启用,跳过私聊权限检查。") logger.debug("私聊白名单模式未启用,跳过私聊权限检查。")
# 群聊黑名单拦截 # 群聊黑名单拦截
if groupinfo is not None and groupinfo.group_id not in global_config.talk_allowed_groups: if groupinfo is not None and groupinfo.group_id not in global_config.chat_target.talk_allowed_groups:
logger.trace(f"{groupinfo.group_id}被禁止回复") logger.trace(f"{groupinfo.group_id}被禁止回复")
return return
@ -94,7 +94,7 @@ class ChatBot:
else: else:
template_group_name = None template_group_name = None
if not global_config.enable_Legacy_HFC: if not global_config.experimental.enable_Legacy_HFC:
hfc_processor = self.heartflow_processor hfc_processor = self.heartflow_processor
else: else:
hfc_processor = self.legacy_hfc_processor hfc_processor = self.legacy_hfc_processor
@ -105,10 +105,10 @@ class ChatBot:
if groupinfo is None: if groupinfo is None:
logger.trace("检测到私聊消息") logger.trace("检测到私聊消息")
# 是否在配置信息中开启私聊模式 # 是否在配置信息中开启私聊模式
if global_config.enable_friend_chat: if global_config.experimental.enable_friend_chat:
logger.trace("私聊模式已启用") logger.trace("私聊模式已启用")
# 是否进入PFC # 是否进入PFC
if global_config.enable_pfc_chatting: if global_config.pfc.enable:
logger.trace("进入PFC私聊处理流程") logger.trace("进入PFC私聊处理流程")
await self.pfc_processor.process_message(message_data) await self.pfc_processor.process_message(message_data)
# 禁止PFC进入普通的心流消息处理逻辑 # 禁止PFC进入普通的心流消息处理逻辑

View File

@ -38,7 +38,7 @@ class MessageBuffer:
async def start_caching_messages(self, message: MessageRecv): async def start_caching_messages(self, message: MessageRecv):
"""添加消息,启动缓冲""" """添加消息,启动缓冲"""
if not global_config.message_buffer: if not global_config.chat.message_buffer:
person_id = person_info_manager.get_person_id( person_id = person_info_manager.get_person_id(
message.message_info.user_info.platform, message.message_info.user_info.user_id message.message_info.user_info.platform, message.message_info.user_info.user_id
) )
@ -107,7 +107,7 @@ class MessageBuffer:
async def query_buffer_result(self, message: MessageRecv) -> bool: async def query_buffer_result(self, message: MessageRecv) -> bool:
"""查询缓冲结果,并清理""" """查询缓冲结果,并清理"""
if not global_config.message_buffer: if not global_config.chat.message_buffer:
return True return True
person_id_ = self.get_person_id_( person_id_ = self.get_person_id_(
message.message_info.platform, message.message_info.user_info.user_id, message.message_info.group_info message.message_info.platform, message.message_info.user_info.user_id, message.message_info.group_info

View File

@ -279,7 +279,7 @@ class MessageManager:
) )
# 检查是否超时 # 检查是否超时
if thinking_time > global_config.thinking_timeout: if thinking_time > global_config.normal_chat.thinking_timeout:
logger.warning( logger.warning(
f"[{chat_id}] 消息思考超时 ({thinking_time:.1f}秒),移除消息 {message_earliest.message_info.message_id}" f"[{chat_id}] 消息思考超时 ({thinking_time:.1f}秒),移除消息 {message_earliest.message_info.message_id}"
) )

View File

@ -111,8 +111,8 @@ class LLMRequest:
def __init__(self, model: dict, **kwargs): def __init__(self, model: dict, **kwargs):
# 将大写的配置键转换为小写并从config中获取实际值 # 将大写的配置键转换为小写并从config中获取实际值
try: try:
self.api_key = os.environ[model["key"]] self.api_key = os.environ[f"{model['provider']}_KEY"]
self.base_url = os.environ[model["base_url"]] self.base_url = os.environ[f"{model['provider']}_BASE_URL"]
except AttributeError as e: except AttributeError as e:
logger.error(f"原始 model dict 信息:{model}") logger.error(f"原始 model dict 信息:{model}")
logger.error(f"配置错误:找不到对应的配置项 - {str(e)}") logger.error(f"配置错误:找不到对应的配置项 - {str(e)}")
@ -500,11 +500,11 @@ class LLMRequest:
logger.warning(f"检测到403错误模型从 {old_model_name} 降级为 {self.model_name}") logger.warning(f"检测到403错误模型从 {old_model_name} 降级为 {self.model_name}")
# 对全局配置进行更新 # 对全局配置进行更新
if global_config.llm_normal.get("name") == old_model_name: if global_config.model.normal.get("name") == old_model_name:
global_config.llm_normal["name"] = self.model_name global_config.model.normal["name"] = self.model_name
logger.warning(f"将全局配置中的 llm_normal 模型临时降级至{self.model_name}") logger.warning(f"将全局配置中的 llm_normal 模型临时降级至{self.model_name}")
if global_config.llm_reasoning.get("name") == old_model_name: if global_config.model.reasoning.get("name") == old_model_name:
global_config.llm_reasoning["name"] = self.model_name global_config.model.reasoning["name"] = self.model_name
logger.warning(f"将全局配置中的 llm_reasoning 模型临时降级至{self.model_name}") logger.warning(f"将全局配置中的 llm_reasoning 模型临时降级至{self.model_name}")
if payload and "model" in payload: if payload and "model" in payload:
@ -636,7 +636,7 @@ class LLMRequest:
**params_copy, **params_copy,
} }
if "max_tokens" not in payload and "max_completion_tokens" not in payload: if "max_tokens" not in payload and "max_completion_tokens" not in payload:
payload["max_tokens"] = global_config.model_max_output_length payload["max_tokens"] = global_config.model.model_max_output_length
# 如果 payload 中依然存在 max_tokens 且需要转换,在这里进行再次检查 # 如果 payload 中依然存在 max_tokens 且需要转换,在这里进行再次检查
if self.model_name.lower() in self.MODELS_NEEDING_TRANSFORMATION and "max_tokens" in payload: if self.model_name.lower() in self.MODELS_NEEDING_TRANSFORMATION and "max_tokens" in payload:
payload["max_completion_tokens"] = payload.pop("max_tokens") payload["max_completion_tokens"] = payload.pop("max_tokens")

View File

@ -74,8 +74,8 @@ class NormalChat:
messageinfo = message.message_info messageinfo = message.message_info
bot_user_info = UserInfo( bot_user_info = UserInfo(
user_id=global_config.BOT_QQ, user_id=global_config.bot.qq_account,
user_nickname=global_config.BOT_NICKNAME, user_nickname=global_config.bot.nickname,
platform=messageinfo.platform, platform=messageinfo.platform,
) )
@ -122,8 +122,8 @@ class NormalChat:
message_id=thinking_id, message_id=thinking_id,
chat_stream=self.chat_stream, # 使用 self.chat_stream chat_stream=self.chat_stream, # 使用 self.chat_stream
bot_user_info=UserInfo( bot_user_info=UserInfo(
user_id=global_config.BOT_QQ, user_id=global_config.bot.qq_account,
user_nickname=global_config.BOT_NICKNAME, user_nickname=global_config.bot.nickname,
platform=message.message_info.platform, platform=message.message_info.platform,
), ),
sender_info=message.message_info.user_info, sender_info=message.message_info.user_info,
@ -148,7 +148,7 @@ class NormalChat:
# 改为实例方法 # 改为实例方法
async def _handle_emoji(self, message: MessageRecv, response: str): async def _handle_emoji(self, message: MessageRecv, response: str):
"""处理表情包""" """处理表情包"""
if random() < global_config.emoji_chance: if random() < global_config.normal_chat.emoji_chance:
emoji_raw = await emoji_manager.get_emoji_for_text(response) emoji_raw = await emoji_manager.get_emoji_for_text(response)
if emoji_raw: if emoji_raw:
emoji_path, description = emoji_raw emoji_path, description = emoji_raw
@ -161,8 +161,8 @@ class NormalChat:
message_id="mt" + str(thinking_time_point), message_id="mt" + str(thinking_time_point),
chat_stream=self.chat_stream, # 使用 self.chat_stream chat_stream=self.chat_stream, # 使用 self.chat_stream
bot_user_info=UserInfo( bot_user_info=UserInfo(
user_id=global_config.BOT_QQ, user_id=global_config.bot.qq_account,
user_nickname=global_config.BOT_NICKNAME, user_nickname=global_config.bot.nickname,
platform=message.message_info.platform, platform=message.message_info.platform,
), ),
sender_info=message.message_info.user_info, sender_info=message.message_info.user_info,
@ -187,7 +187,7 @@ class NormalChat:
label=emotion, label=emotion,
stance=stance, # 使用 self.chat_stream stance=stance, # 使用 self.chat_stream
) )
self.mood_manager.update_mood_from_emotion(emotion, global_config.mood_intensity_factor) self.mood_manager.update_mood_from_emotion(emotion, global_config.mood.mood_intensity_factor)
async def _reply_interested_message(self) -> None: async def _reply_interested_message(self) -> None:
""" """
@ -432,7 +432,7 @@ class NormalChat:
def _check_ban_words(text: str, chat: ChatStream, userinfo: UserInfo) -> bool: def _check_ban_words(text: str, chat: ChatStream, userinfo: UserInfo) -> bool:
"""检查消息中是否包含过滤词""" """检查消息中是否包含过滤词"""
stream_name = chat_manager.get_stream_name(chat.stream_id) or chat.stream_id stream_name = chat_manager.get_stream_name(chat.stream_id) or chat.stream_id
for word in global_config.ban_words: for word in global_config.chat.ban_words:
if word in text: if word in text:
logger.info( logger.info(
f"[{stream_name}][{chat.group_info.group_name if chat.group_info else '私聊'}]" f"[{stream_name}][{chat.group_info.group_name if chat.group_info else '私聊'}]"
@ -447,7 +447,7 @@ class NormalChat:
def _check_ban_regex(text: str, chat: ChatStream, userinfo: UserInfo) -> bool: def _check_ban_regex(text: str, chat: ChatStream, userinfo: UserInfo) -> bool:
"""检查消息是否匹配过滤正则表达式""" """检查消息是否匹配过滤正则表达式"""
stream_name = chat_manager.get_stream_name(chat.stream_id) or chat.stream_id stream_name = chat_manager.get_stream_name(chat.stream_id) or chat.stream_id
for pattern in global_config.ban_msgs_regex: for pattern in global_config.chat.ban_msgs_regex:
if pattern.search(text): if pattern.search(text):
logger.info( logger.info(
f"[{stream_name}][{chat.group_info.group_name if chat.group_info else '私聊'}]" f"[{stream_name}][{chat.group_info.group_name if chat.group_info else '私聊'}]"

View File

@ -15,21 +15,22 @@ logger = get_logger("llm")
class NormalChatGenerator: class NormalChatGenerator:
def __init__(self): def __init__(self):
# TODO: API-Adapter修改标记
self.model_reasoning = LLMRequest( self.model_reasoning = LLMRequest(
model=global_config.llm_reasoning, model=global_config.model.reasoning,
temperature=0.7, temperature=0.7,
max_tokens=3000, max_tokens=3000,
request_type="response_reasoning", request_type="response_reasoning",
) )
self.model_normal = LLMRequest( self.model_normal = LLMRequest(
model=global_config.llm_normal, model=global_config.model.normal,
temperature=global_config.llm_normal["temp"], temperature=global_config.model.normal["temp"],
max_tokens=256, max_tokens=256,
request_type="response_reasoning", request_type="response_reasoning",
) )
self.model_sum = LLMRequest( self.model_sum = LLMRequest(
model=global_config.llm_summary, temperature=0.7, max_tokens=3000, request_type="relation" model=global_config.model.summary, temperature=0.7, max_tokens=3000, request_type="relation"
) )
self.current_model_type = "r1" # 默认使用 R1 self.current_model_type = "r1" # 默认使用 R1
self.current_model_name = "unknown model" self.current_model_name = "unknown model"
@ -37,7 +38,7 @@ class NormalChatGenerator:
async def generate_response(self, message: MessageThinking, thinking_id: str) -> Optional[Union[str, List[str]]]: async def generate_response(self, message: MessageThinking, thinking_id: str) -> Optional[Union[str, List[str]]]:
"""根据当前模型类型选择对应的生成函数""" """根据当前模型类型选择对应的生成函数"""
# 从global_config中获取模型概率值并选择模型 # 从global_config中获取模型概率值并选择模型
if random.random() < global_config.model_reasoning_probability: if random.random() < global_config.normal_chat.reasoning_model_probability:
self.current_model_type = "深深地" self.current_model_type = "深深地"
current_model = self.model_reasoning current_model = self.model_reasoning
else: else:
@ -51,7 +52,7 @@ class NormalChatGenerator:
model_response = await self._generate_response_with_model(message, current_model, thinking_id) model_response = await self._generate_response_with_model(message, current_model, thinking_id)
if model_response: if model_response:
logger.info(f"{global_config.BOT_NICKNAME}的回复是:{model_response}") logger.info(f"{global_config.bot.nickname}的回复是:{model_response}")
model_response = await self._process_response(model_response) model_response = await self._process_response(model_response)
return model_response return model_response
@ -113,7 +114,7 @@ class NormalChatGenerator:
- "中立"不表达明确立场或无关回应 - "中立"不表达明确立场或无关回应
2. "开心,愤怒,悲伤,惊讶,平静,害羞,恐惧,厌恶,困惑"中选出最匹配的1个情感标签 2. "开心,愤怒,悲伤,惊讶,平静,害羞,恐惧,厌恶,困惑"中选出最匹配的1个情感标签
3. 按照"立场-情绪"的格式直接输出结果例如"反对-愤怒" 3. 按照"立场-情绪"的格式直接输出结果例如"反对-愤怒"
4. 考虑回复者的人格设定为{global_config.personality_core} 4. 考虑回复者的人格设定为{global_config.personality.personality_core}
对话示例 对话示例
被回复A就是笨 被回复A就是笨

View File

@ -1,18 +1,20 @@
import asyncio import asyncio
from src.config.config import global_config
from .willing_manager import BaseWillingManager from .willing_manager import BaseWillingManager
class ClassicalWillingManager(BaseWillingManager): class ClassicalWillingManager(BaseWillingManager):
def __init__(self): def __init__(self):
super().__init__() super().__init__()
self._decay_task: asyncio.Task = None self._decay_task: asyncio.Task | None = None
async def _decay_reply_willing(self): async def _decay_reply_willing(self):
"""定期衰减回复意愿""" """定期衰减回复意愿"""
while True: while True:
await asyncio.sleep(1) await asyncio.sleep(1)
for chat_id in self.chat_reply_willing: for chat_id in self.chat_reply_willing:
self.chat_reply_willing[chat_id] = max(0, self.chat_reply_willing[chat_id] * 0.9) self.chat_reply_willing[chat_id] = max(0.0, self.chat_reply_willing[chat_id] * 0.9)
async def async_task_starter(self): async def async_task_starter(self):
if self._decay_task is None: if self._decay_task is None:
@ -23,35 +25,33 @@ class ClassicalWillingManager(BaseWillingManager):
chat_id = willing_info.chat_id chat_id = willing_info.chat_id
current_willing = self.chat_reply_willing.get(chat_id, 0) current_willing = self.chat_reply_willing.get(chat_id, 0)
interested_rate = willing_info.interested_rate * self.global_config.response_interested_rate_amplifier interested_rate = willing_info.interested_rate * global_config.normal_chat.response_interested_rate_amplifier
if interested_rate > 0.4: if interested_rate > 0.4:
current_willing += interested_rate - 0.3 current_willing += interested_rate - 0.3
if willing_info.is_mentioned_bot and current_willing < 1.0: if willing_info.is_mentioned_bot:
current_willing += 1 current_willing += 1 if current_willing < 1.0 else 0.05
elif willing_info.is_mentioned_bot:
current_willing += 0.05
is_emoji_not_reply = False is_emoji_not_reply = False
if willing_info.is_emoji: if willing_info.is_emoji:
if self.global_config.emoji_response_penalty != 0: if global_config.normal_chat.emoji_response_penalty != 0:
current_willing *= self.global_config.emoji_response_penalty current_willing *= global_config.normal_chat.emoji_response_penalty
else: else:
is_emoji_not_reply = True is_emoji_not_reply = True
self.chat_reply_willing[chat_id] = min(current_willing, 3.0) self.chat_reply_willing[chat_id] = min(current_willing, 3.0)
reply_probability = min( reply_probability = min(
max((current_willing - 0.5), 0.01) * self.global_config.response_willing_amplifier * 2, 1 max((current_willing - 0.5), 0.01) * global_config.normal_chat.response_willing_amplifier * 2, 1
) )
# 检查群组权限(如果是群聊) # 检查群组权限(如果是群聊)
if ( if (
willing_info.group_info willing_info.group_info
and willing_info.group_info.group_id in self.global_config.talk_frequency_down_groups and willing_info.group_info.group_id in global_config.chat_target.talk_frequency_down_groups
): ):
reply_probability = reply_probability / self.global_config.down_frequency_rate reply_probability = reply_probability / global_config.normal_chat.down_frequency_rate
if is_emoji_not_reply: if is_emoji_not_reply:
reply_probability = 0 reply_probability = 0
@ -61,7 +61,7 @@ class ClassicalWillingManager(BaseWillingManager):
async def before_generate_reply_handle(self, message_id): async def before_generate_reply_handle(self, message_id):
chat_id = self.ongoing_messages[message_id].chat_id chat_id = self.ongoing_messages[message_id].chat_id
current_willing = self.chat_reply_willing.get(chat_id, 0) current_willing = self.chat_reply_willing.get(chat_id, 0)
self.chat_reply_willing[chat_id] = max(0, current_willing - 1.8) self.chat_reply_willing[chat_id] = max(0.0, current_willing - 1.8)
async def after_generate_reply_handle(self, message_id): async def after_generate_reply_handle(self, message_id):
if message_id not in self.ongoing_messages: if message_id not in self.ongoing_messages:
@ -70,7 +70,7 @@ class ClassicalWillingManager(BaseWillingManager):
chat_id = self.ongoing_messages[message_id].chat_id chat_id = self.ongoing_messages[message_id].chat_id
current_willing = self.chat_reply_willing.get(chat_id, 0) current_willing = self.chat_reply_willing.get(chat_id, 0)
if current_willing < 1: if current_willing < 1:
self.chat_reply_willing[chat_id] = min(1, current_willing + 0.4) self.chat_reply_willing[chat_id] = min(1.0, current_willing + 0.4)
async def bombing_buffer_message_handle(self, message_id): async def bombing_buffer_message_handle(self, message_id):
return await super().bombing_buffer_message_handle(message_id) return await super().bombing_buffer_message_handle(message_id)

View File

@ -19,6 +19,7 @@ Mxp 模式:梦溪畔独家赞助
下下策是询问一个菜鸟@梦溪畔 下下策是询问一个菜鸟@梦溪畔
""" """
from src.config.config import global_config
from .willing_manager import BaseWillingManager from .willing_manager import BaseWillingManager
from typing import Dict from typing import Dict
import asyncio import asyncio
@ -50,8 +51,6 @@ class MxpWillingManager(BaseWillingManager):
self.mention_willing_gain = 0.6 # 提及意愿增益 self.mention_willing_gain = 0.6 # 提及意愿增益
self.interest_willing_gain = 0.3 # 兴趣意愿增益 self.interest_willing_gain = 0.3 # 兴趣意愿增益
self.emoji_response_penalty = self.global_config.emoji_response_penalty # 表情包回复惩罚
self.down_frequency_rate = self.global_config.down_frequency_rate # 降低回复频率的群组惩罚系数
self.single_chat_gain = 0.12 # 单聊增益 self.single_chat_gain = 0.12 # 单聊增益
self.fatigue_messages_triggered_num = self.expected_replies_per_min # 疲劳消息触发数量(int) self.fatigue_messages_triggered_num = self.expected_replies_per_min # 疲劳消息触发数量(int)
@ -179,10 +178,10 @@ class MxpWillingManager(BaseWillingManager):
probability = self._willing_to_probability(current_willing) probability = self._willing_to_probability(current_willing)
if w_info.is_emoji: if w_info.is_emoji:
probability *= self.emoji_response_penalty probability *= global_config.normal_chat.emoji_response_penalty
if w_info.group_info and w_info.group_info.group_id in self.global_config.talk_frequency_down_groups: if w_info.group_info and w_info.group_info.group_id in global_config.chat_target.talk_frequency_down_groups:
probability /= self.down_frequency_rate probability /= global_config.normal_chat.down_frequency_rate
self.temporary_willing = current_willing self.temporary_willing = current_willing

View File

@ -1,6 +1,6 @@
from src.common.logger import LogConfig, WILLING_STYLE_CONFIG, LoguruLogger, get_module_logger from src.common.logger import LogConfig, WILLING_STYLE_CONFIG, LoguruLogger, get_module_logger
from dataclasses import dataclass from dataclasses import dataclass
from src.config.config import global_config, BotConfig from src.config.config import global_config
from src.chat.message_receive.chat_stream import ChatStream, GroupInfo from src.chat.message_receive.chat_stream import ChatStream, GroupInfo
from src.chat.message_receive.message import MessageRecv from src.chat.message_receive.message import MessageRecv
from src.chat.person_info.person_info import person_info_manager, PersonInfoManager from src.chat.person_info.person_info import person_info_manager, PersonInfoManager
@ -93,7 +93,6 @@ class BaseWillingManager(ABC):
self.chat_reply_willing: Dict[str, float] = {} # 存储每个聊天流的回复意愿(chat_id) self.chat_reply_willing: Dict[str, float] = {} # 存储每个聊天流的回复意愿(chat_id)
self.ongoing_messages: Dict[str, WillingInfo] = {} # 当前正在进行的消息(message_id) self.ongoing_messages: Dict[str, WillingInfo] = {} # 当前正在进行的消息(message_id)
self.lock = asyncio.Lock() self.lock = asyncio.Lock()
self.global_config: BotConfig = global_config
self.logger: LoguruLogger = logger self.logger: LoguruLogger = logger
def setup(self, message: MessageRecv, chat: ChatStream, is_mentioned_bot: bool, interested_rate: float): def setup(self, message: MessageRecv, chat: ChatStream, is_mentioned_bot: bool, interested_rate: float):
@ -173,7 +172,7 @@ def init_willing_manager() -> BaseWillingManager:
Returns: Returns:
对应mode的WillingManager实例 对应mode的WillingManager实例
""" """
mode = global_config.willing_mode.lower() mode = global_config.normal_chat.willing_mode.lower()
return BaseWillingManager.create(mode) return BaseWillingManager.create(mode)

View File

@ -60,8 +60,9 @@ person_info_default = {
class PersonInfoManager: class PersonInfoManager:
def __init__(self): def __init__(self):
self.person_name_list = {} self.person_name_list = {}
# TODO: API-Adapter修改标记
self.qv_name_llm = LLMRequest( self.qv_name_llm = LLMRequest(
model=global_config.llm_normal, model=global_config.model.normal,
max_tokens=256, max_tokens=256,
request_type="qv_name", request_type="qv_name",
) )

View File

@ -190,8 +190,8 @@ async def _build_readable_messages_internal(
person_id = person_info_manager.get_person_id(platform, user_id) person_id = person_info_manager.get_person_id(platform, user_id)
# 根据 replace_bot_name 参数决定是否替换机器人名称 # 根据 replace_bot_name 参数决定是否替换机器人名称
if replace_bot_name and user_id == global_config.BOT_QQ: if replace_bot_name and user_id == global_config.bot.qq_account:
person_name = f"{global_config.BOT_NICKNAME}(你)" person_name = f"{global_config.bot.nickname}(你)"
else: else:
person_name = await person_info_manager.get_value(person_id, "person_name") person_name = await person_info_manager.get_value(person_id, "person_name")
@ -250,7 +250,7 @@ async def _build_readable_messages_internal(
message_details_raw.sort(key=lambda x: x[0]) # 按时间戳(第一个元素)升序排序,越早的消息排在前面 message_details_raw.sort(key=lambda x: x[0]) # 按时间戳(第一个元素)升序排序,越早的消息排在前面
# 应用截断逻辑 (如果 truncate 为 True) # 应用截断逻辑 (如果 truncate 为 True)
if not global_config.long_message_auto_truncate: if not global_config.memory.long_message_auto_truncate:
truncate = False truncate = False
message_details: List[Tuple[float, str, str]] = [] message_details: List[Tuple[float, str, str]] = []
n_messages = len(message_details_raw) n_messages = len(message_details_raw)
@ -429,7 +429,7 @@ async def build_anonymous_messages(messages: List[Dict[str, Any]]) -> str:
output_lines = [] output_lines = []
def get_anon_name(platform, user_id): def get_anon_name(platform, user_id):
if user_id == global_config.BOT_QQ: if user_id == global_config.bot.qq_account:
return "SELF" return "SELF"
person_id = person_info_manager.get_person_id(platform, user_id) person_id = person_info_manager.get_person_id(platform, user_id)
if person_id not in person_map: if person_id not in person_map:
@ -503,7 +503,7 @@ async def get_person_id_list(messages: List[Dict[str, Any]]) -> List[str]:
user_id = user_info.get("user_id") user_id = user_info.get("user_id")
# 检查必要信息是否存在 且 不是机器人自己 # 检查必要信息是否存在 且 不是机器人自己
if not all([platform, user_id]) or user_id == global_config.BOT_QQ: if not all([platform, user_id]) or user_id == global_config.bot.qq_account:
continue continue
person_id = person_info_manager.get_person_id(platform, user_id) person_id = person_info_manager.get_person_id(platform, user_id)

View File

@ -9,7 +9,6 @@ from typing import List
class InfoCatcher: class InfoCatcher:
def __init__(self): def __init__(self):
self.chat_history = [] # 聊天历史,长度为三倍使用的上下文喵~ self.chat_history = [] # 聊天历史,长度为三倍使用的上下文喵~
self.context_length = global_config.observation_context_size
self.chat_history_in_thinking = [] # 思考期间的聊天内容喵~ self.chat_history_in_thinking = [] # 思考期间的聊天内容喵~
self.chat_history_after_response = [] # 回复后的聊天内容,长度为一倍上下文喵~ self.chat_history_after_response = [] # 回复后的聊天内容,长度为一倍上下文喵~
@ -143,7 +142,7 @@ class InfoCatcher:
messages_before = ( messages_before = (
db.messages.find({"chat_id": chat_id, "message_id": {"$lt": message_id}}) db.messages.find({"chat_id": chat_id, "message_id": {"$lt": message_id}})
.sort("time", -1) .sort("time", -1)
.limit(self.context_length * 3) .limit(global_config.chat.observation_context_size * 3)
) # 获取更多历史信息 ) # 获取更多历史信息
return list(messages_before) return list(messages_before)

View File

@ -173,8 +173,8 @@ def db_message_to_str(message_dict: dict) -> str:
def is_mentioned_bot_in_message(message: MessageRecv) -> tuple[bool, float]: def is_mentioned_bot_in_message(message: MessageRecv) -> tuple[bool, float]:
"""检查消息是否提到了机器人""" """检查消息是否提到了机器人"""
keywords = [global_config.BOT_NICKNAME] keywords = [global_config.bot.nickname]
nicknames = global_config.BOT_ALIAS_NAMES nicknames = global_config.bot.alias_names
reply_probability = 0.0 reply_probability = 0.0
is_at = False is_at = False
is_mentioned = False is_mentioned = False
@ -194,18 +194,18 @@ def is_mentioned_bot_in_message(message: MessageRecv) -> tuple[bool, float]:
) )
# 判断是否被@ # 判断是否被@
if re.search(f"@[\s\S]*?id:{global_config.BOT_QQ}", message.processed_plain_text): if re.search(f"@[\s\S]*?id:{global_config.bot.qq_account}", message.processed_plain_text):
is_at = True is_at = True
is_mentioned = True is_mentioned = True
if is_at and global_config.at_bot_inevitable_reply: if is_at and global_config.normal_chat.at_bot_inevitable_reply:
reply_probability = 1.0 reply_probability = 1.0
logger.info("被@回复概率设置为100%") logger.info("被@回复概率设置为100%")
else: else:
if not is_mentioned: if not is_mentioned:
# 判断是否被回复 # 判断是否被回复
if re.match( if re.match(
f"\[回复 [\s\S]*?\({str(global_config.BOT_QQ)}\)[\s\S]*?\],说:", f"\[回复 [\s\S]*?\({str(global_config.bot.qq_account)}\)[\s\S]*?\],说:",
message.processed_plain_text, message.processed_plain_text,
): ):
is_mentioned = True is_mentioned = True
@ -219,7 +219,7 @@ def is_mentioned_bot_in_message(message: MessageRecv) -> tuple[bool, float]:
for nickname in nicknames: for nickname in nicknames:
if nickname in message_content: if nickname in message_content:
is_mentioned = True is_mentioned = True
if is_mentioned and global_config.mentioned_bot_inevitable_reply: if is_mentioned and global_config.normal_chat.mentioned_bot_inevitable_reply:
reply_probability = 1.0 reply_probability = 1.0
logger.info("被提及回复概率设置为100%") logger.info("被提及回复概率设置为100%")
return is_mentioned, reply_probability return is_mentioned, reply_probability
@ -227,7 +227,8 @@ def is_mentioned_bot_in_message(message: MessageRecv) -> tuple[bool, float]:
async def get_embedding(text, request_type="embedding"): async def get_embedding(text, request_type="embedding"):
"""获取文本的embedding向量""" """获取文本的embedding向量"""
llm = LLMRequest(model=global_config.embedding, request_type=request_type) # TODO: API-Adapter修改标记
llm = LLMRequest(model=global_config.model.embedding, request_type=request_type)
# return llm.get_embedding_sync(text) # return llm.get_embedding_sync(text)
try: try:
embedding = await llm.get_embedding(text) embedding = await llm.get_embedding(text)
@ -294,7 +295,7 @@ def get_recent_group_speaker(chat_stream_id: int, sender, limit: int = 12) -> li
user_info = UserInfo.from_dict(msg_db_data["user_info"]) user_info = UserInfo.from_dict(msg_db_data["user_info"])
if ( if (
(user_info.platform, user_info.user_id) != sender (user_info.platform, user_info.user_id) != sender
and user_info.user_id != global_config.BOT_QQ and user_info.user_id != global_config.bot.qq_account
and (user_info.platform, user_info.user_id, user_info.user_nickname) not in who_chat_in_group and (user_info.platform, user_info.user_id, user_info.user_nickname) not in who_chat_in_group
and len(who_chat_in_group) < 5 and len(who_chat_in_group) < 5
): # 排除重复排除消息发送者排除bot限制加载的关系数目 ): # 排除重复排除消息发送者排除bot限制加载的关系数目
@ -476,7 +477,6 @@ def split_into_sentences_w_remove_punctuation(original_text: str) -> list[str]:
processed_sentences_after_merge.append(s) processed_sentences_after_merge.append(s)
if perform_book_title_recovery_here and local_book_title_mapping: if perform_book_title_recovery_here and local_book_title_mapping:
# 假设 processed_sentences_after_merge 是最终的句子列表
processed_sentences_after_merge = recover_book_titles(processed_sentences_after_merge, local_book_title_mapping) processed_sentences_after_merge = recover_book_titles(processed_sentences_after_merge, local_book_title_mapping)
return processed_sentences_after_merge return processed_sentences_after_merge
@ -510,7 +510,7 @@ def random_remove_punctuation(text: str) -> str:
def process_llm_response(text: str) -> list[str]: def process_llm_response(text: str) -> list[str]:
# 先保护颜文字 # 先保护颜文字
if global_config.enable_kaomoji_protection: if global_config.response_splitter.enable_kaomoji_protection:
protected_text, kaomoji_mapping = protect_kaomoji(text) protected_text, kaomoji_mapping = protect_kaomoji(text)
logger.trace(f"保护颜文字后的文本: {protected_text}") logger.trace(f"保护颜文字后的文本: {protected_text}")
else: else:
@ -528,8 +528,8 @@ def process_llm_response(text: str) -> list[str]:
logger.debug(f"{text}去除括号处理后的文本: {cleaned_text}") logger.debug(f"{text}去除括号处理后的文本: {cleaned_text}")
# 对清理后的文本进行进一步处理 # 对清理后的文本进行进一步处理
max_length = global_config.response_max_length * 2 max_length = global_config.response_splitter.max_length * 2
max_sentence_num = global_config.response_max_sentence_num max_sentence_num = global_config.response_splitter.max_sentence_num
# 如果基本上是中文,则进行长度过滤 # 如果基本上是中文,则进行长度过滤
if get_western_ratio(cleaned_text) < 0.1: if get_western_ratio(cleaned_text) < 0.1:
if len(cleaned_text) > max_length: if len(cleaned_text) > max_length:
@ -537,20 +537,20 @@ def process_llm_response(text: str) -> list[str]:
return ["懒得说"] return ["懒得说"]
typo_generator = ChineseTypoGenerator( typo_generator = ChineseTypoGenerator(
error_rate=global_config.chinese_typo_error_rate, error_rate=global_config.chinese_typo.error_rate,
min_freq=global_config.chinese_typo_min_freq, min_freq=global_config.chinese_typo.min_freq,
tone_error_rate=global_config.chinese_typo_tone_error_rate, tone_error_rate=global_config.chinese_typo.tone_error_rate,
word_replace_rate=global_config.chinese_typo_word_replace_rate, word_replace_rate=global_config.chinese_typo.word_replace_rate,
) )
if global_config.enable_response_splitter: if global_config.response_splitter.enable:
split_sentences = split_into_sentences_w_remove_punctuation(cleaned_text) split_sentences = split_into_sentences_w_remove_punctuation(cleaned_text)
else: else:
split_sentences = [cleaned_text] split_sentences = [cleaned_text]
sentences = [] sentences = []
for sentence in split_sentences: for sentence in split_sentences:
if global_config.chinese_typo_enable: if global_config.chinese_typo.enable:
typoed_text, typo_corrections = typo_generator.create_typo_sentence(sentence) typoed_text, typo_corrections = typo_generator.create_typo_sentence(sentence)
sentences.append(typoed_text) sentences.append(typoed_text)
if typo_corrections: if typo_corrections:
@ -560,14 +560,14 @@ def process_llm_response(text: str) -> list[str]:
if len(sentences) > max_sentence_num: if len(sentences) > max_sentence_num:
logger.warning(f"分割后消息数量过多 ({len(sentences)} 条),返回默认回复") logger.warning(f"分割后消息数量过多 ({len(sentences)} 条),返回默认回复")
return [f"{global_config.BOT_NICKNAME}不知道哦"] return [f"{global_config.bot.nickname}不知道哦"]
# if extracted_contents: # if extracted_contents:
# for content in extracted_contents: # for content in extracted_contents:
# sentences.append(content) # sentences.append(content)
# 在所有句子处理完毕后,对包含占位符的列表进行恢复 # 在所有句子处理完毕后,对包含占位符的列表进行恢复
if global_config.enable_kaomoji_protection: if global_config.response_splitter.enable_kaomoji_protection:
sentences = recover_kaomoji(sentences, kaomoji_mapping) sentences = recover_kaomoji(sentences, kaomoji_mapping)
return sentences return sentences

View File

@ -36,7 +36,7 @@ class ImageManager:
self._ensure_description_collection() self._ensure_description_collection()
self._ensure_image_dir() self._ensure_image_dir()
self._initialized = True self._initialized = True
self._llm = LLMRequest(model=global_config.vlm, temperature=0.4, max_tokens=300, request_type="image") self._llm = LLMRequest(model=global_config.model.vlm, temperature=0.4, max_tokens=300, request_type="image")
def _ensure_image_dir(self): def _ensure_image_dir(self):
"""确保图像存储目录存在""" """确保图像存储目录存在"""
@ -134,7 +134,7 @@ class ImageManager:
return f"[表情包,含义看起来是:{cached_description}]" return f"[表情包,含义看起来是:{cached_description}]"
# 根据配置决定是否保存图片 # 根据配置决定是否保存图片
if global_config.save_emoji: if global_config.emoji.cache_emoji:
# 生成文件名和路径 # 生成文件名和路径
timestamp = int(time.time()) timestamp = int(time.time())
filename = f"{timestamp}_{image_hash[:8]}.{image_format}" filename = f"{timestamp}_{image_hash[:8]}.{image_format}"
@ -200,7 +200,7 @@ class ImageManager:
return "[图片]" return "[图片]"
# 根据配置决定是否保存图片 # 根据配置决定是否保存图片
if global_config.save_pic: if global_config.emoji.save_pic:
# 生成文件名和路径 # 生成文件名和路径
timestamp = int(time.time()) timestamp = int(time.time())
filename = f"{timestamp}_{image_hash[:8]}.{image_format}" filename = f"{timestamp}_{image_hash[:8]}.{image_format}"

View File

@ -35,7 +35,7 @@ class TelemetryHeartBeatTask(AsyncTask):
info_dict = { info_dict = {
"os_type": "Unknown", "os_type": "Unknown",
"py_version": platform.python_version(), "py_version": platform.python_version(),
"mmc_version": global_config.MAI_VERSION, "mmc_version": global_config.MMC_VERSION,
} }
match platform.system(): match platform.system():
@ -133,10 +133,9 @@ class TelemetryHeartBeatTask(AsyncTask):
async def run(self): async def run(self):
# 发送心跳 # 发送心跳
if global_config.remote_enable: if global_config.telemetry.enable:
if self.client_uuid is None: if self.client_uuid is None and not await self._req_uuid():
if not await self._req_uuid(): logger.error("获取UUID失败跳过此次心跳")
logger.error("获取UUID失败跳过此次心跳") return
return
await self._send_heartbeat() await self._send_heartbeat()

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,116 @@
from dataclasses import dataclass, fields, MISSING
from typing import TypeVar, Type, Any, get_origin, get_args
T = TypeVar("T", bound="ConfigBase")
TOML_DICT_TYPE = {
int,
float,
str,
bool,
list,
dict,
}
@dataclass
class ConfigBase:
"""配置类的基类"""
@classmethod
def from_dict(cls: Type[T], data: dict[str, Any]) -> T:
"""从字典加载配置字段"""
if not isinstance(data, dict):
raise TypeError(f"Expected a dictionary, got {type(data).__name__}")
init_args: dict[str, Any] = {}
for f in fields(cls):
field_name = f.name
if field_name.startswith("_"):
# 跳过以 _ 开头的字段
continue
if field_name not in data:
if f.default is not MISSING or f.default_factory is not MISSING:
# 跳过未提供且有默认值/默认构造方法的字段
continue
else:
raise ValueError(f"Missing required field: '{field_name}'")
value = data[field_name]
field_type = f.type
try:
init_args[field_name] = cls._convert_field(value, field_type)
except TypeError as e:
raise TypeError(f"Field '{field_name}' has a type error: {e}") from e
except Exception as e:
raise RuntimeError(f"Failed to convert field '{field_name}' to target type: {e}") from e
return cls(**init_args)
@classmethod
def _convert_field(cls, value: Any, field_type: Type[Any]) -> Any:
"""
转换字段值为指定类型
1. 对于嵌套的 dataclass递归调用相应的 from_dict 方法
2. 对于泛型集合类型list, set, tuple递归转换每个元素
3. 对于基础类型int, str, float, bool直接转换
4. 对于其他类型尝试直接转换如果失败则抛出异常
"""
# 如果是嵌套的 dataclass递归调用 from_dict 方法
if isinstance(field_type, type) and issubclass(field_type, ConfigBase):
if not isinstance(value, dict):
raise TypeError(f"Expected a dictionary for {field_type.__name__}, got {type(value).__name__}")
return field_type.from_dict(value)
# 处理泛型集合类型list, set, tuple
field_origin_type = get_origin(field_type)
field_type_args = get_args(field_type)
if field_origin_type in {list, set, tuple}:
# 检查提供的value是否为list
if not isinstance(value, list):
raise TypeError(f"Expected an list for {field_type.__name__}, got {type(value).__name__}")
if field_origin_type is list:
return [cls._convert_field(item, field_type_args[0]) for item in value]
elif field_origin_type is set:
return {cls._convert_field(item, field_type_args[0]) for item in value}
elif field_origin_type is tuple:
# 检查提供的value长度是否与类型参数一致
if len(value) != len(field_type_args):
raise TypeError(
f"Expected {len(field_type_args)} items for {field_type.__name__}, got {len(value)}"
)
return tuple(cls._convert_field(item, arg) for item, arg in zip(value, field_type_args))
if field_origin_type is dict:
# 检查提供的value是否为dict
if not isinstance(value, dict):
raise TypeError(f"Expected a dictionary for {field_type.__name__}, got {type(value).__name__}")
# 检查字典的键值类型
if len(field_type_args) != 2:
raise TypeError(f"Expected a dictionary with two type arguments for {field_type.__name__}")
key_type, value_type = field_type_args
return {cls._convert_field(k, key_type): cls._convert_field(v, value_type) for k, v in value.items()}
# 处理基础类型,例如 int, str 等
if field_type is Any or isinstance(value, field_type):
return value
# 其他类型,尝试直接转换
try:
return field_type(value)
except (ValueError, TypeError) as e:
raise TypeError(f"Cannot convert {type(value).__name__} to {field_type.__name__}") from e
def __str__(self):
"""返回配置类的字符串表示"""
return f"{self.__class__.__name__}({', '.join(f'{f.name}={getattr(self, f.name)}' for f in fields(self))})"

View File

@ -0,0 +1,540 @@
from dataclasses import dataclass, field
from typing import Any
from src.config.config_base import ConfigBase
"""
须知
1. 本文件中记录了所有的配置项
2. 所有新增的class都需要继承自ConfigBase
3. 所有新增的class都应在config.py中的Config类中添加字段
4. 对于新增的字段若为可选项则应在其后添加field()并设置default_factory或default
"""
@dataclass
class BotConfig(ConfigBase):
"""QQ机器人配置类"""
qq_account: str
"""QQ账号"""
nickname: str
"""昵称"""
alias_names: list[str] = field(default_factory=lambda: [])
"""别名列表"""
@dataclass
class ChatTargetConfig(ConfigBase):
"""
聊天目标配置类
此类中有聊天的群组和用户配置
"""
talk_allowed_groups: set[str] = field(default_factory=lambda: set())
"""允许聊天的群组列表"""
talk_frequency_down_groups: set[str] = field(default_factory=lambda: set())
"""降低聊天频率的群组列表"""
ban_user_id: set[str] = field(default_factory=lambda: set())
"""禁止聊天的用户列表"""
@dataclass
class PersonalityConfig(ConfigBase):
"""人格配置类"""
personality_core: str
"""核心人格"""
expression_style: str
"""表达风格"""
enable_expression_learner: bool = True
"""是否启用新发言习惯注入,关闭则启用旧方法"""
personality_sides: list[str] = field(default_factory=lambda: [])
"""人格侧写"""
personality_detail_level: int = 0
"""人设消息注入 prompt 详细等级 (0: 采用默认配置, 1: 核心/随机细节, 2: 核心+随机侧面/全部细节, 3: 全部)"""
@dataclass
class IdentityConfig(ConfigBase):
"""个体特征配置类"""
height: int = 170
"""身高(单位:厘米)"""
weight: float = 50.0
"""体重(单位:千克)"""
age: int = 18
"""年龄(单位:岁)"""
gender: str = ""
"""性别(男/女)"""
appearance: str = "可爱"
"""外貌描述"""
identity_detail: list[str] = field(default_factory=lambda: [])
"""身份特征"""
@dataclass
class PlatformsConfig(ConfigBase):
"""平台配置类"""
qq: str
"""QQ适配器连接URL配置"""
@dataclass
class ChatConfig(ConfigBase):
"""聊天配置类"""
allow_focus_mode: bool = True
"""是否允许专注聊天状态"""
base_normal_chat_num: int = 3
"""最多允许多少个群进行普通聊天"""
base_focused_chat_num: int = 2
"""最多允许多少个群进行专注聊天"""
observation_context_size: int = 12
"""可观察到的最长上下文大小,超过这个值的上下文会被压缩"""
message_buffer: bool = True
"""消息缓冲器"""
ban_words: set[str] = field(default_factory=lambda: set())
"""过滤词列表"""
ban_msgs_regex: set[str] = field(default_factory=lambda: set())
"""过滤正则表达式列表"""
allow_remove_duplicates: bool = True
"""是否开启心流去重(如果发现心流截断问题严重可尝试关闭)"""
@dataclass
class NormalChatConfig(ConfigBase):
"""普通聊天配置类"""
reasoning_model_probability: float = 0.3
"""
发言时选择推理模型的概率0-1之间
选择普通模型的概率为 1 - reasoning_model_probability
"""
emoji_chance: float = 0.2
"""发送表情包的基础概率"""
thinking_timeout: int = 120
"""最长思考时间"""
willing_mode: str = "classical"
"""意愿模式"""
response_willing_amplifier: float = 1.0
"""回复意愿放大系数"""
response_interested_rate_amplifier: float = 1.0
"""回复兴趣度放大系数"""
down_frequency_rate: float = 3.0
"""降低回复频率的群组回复意愿降低系数"""
emoji_response_penalty: float = 0.0
"""表情包回复惩罚系数"""
mentioned_bot_inevitable_reply: bool = False
"""提及 bot 必然回复"""
at_bot_inevitable_reply: bool = False
"""@bot 必然回复"""
@dataclass
class FocusChatConfig(ConfigBase):
"""专注聊天配置类"""
reply_trigger_threshold: float = 3.0
"""心流聊天触发阈值,越低越容易触发"""
default_decay_rate_per_second: float = 0.98
"""默认衰减率,越大衰减越快"""
consecutive_no_reply_threshold: int = 3
"""连续不回复的次数阈值"""
compressed_length: int = 5
"""心流上下文压缩的最短压缩长度超过心流观察到的上下文长度会压缩最短压缩长度为5"""
compress_length_limit: int = 5
"""最多压缩份数,超过该数值的压缩上下文会被删除"""
@dataclass
class EmojiConfig(ConfigBase):
"""表情包配置类"""
max_reg_num: int = 200
"""表情包最大注册数量"""
do_replace: bool = True
"""达到最大注册数量时替换旧表情包"""
check_interval: int = 120
"""表情包检查间隔(分钟)"""
save_pic: bool = False
"""是否保存图片"""
cache_emoji: bool = True
"""是否缓存表情包"""
steal_emoji: bool = True
"""是否偷取表情包,让麦麦可以发送她保存的这些表情包"""
content_filtration: bool = False
"""是否开启表情包过滤"""
filtration_prompt: str = "符合公序良俗"
"""表情包过滤要求"""
@dataclass
class MemoryConfig(ConfigBase):
"""记忆配置类"""
memory_build_interval: int = 600
"""记忆构建间隔(秒)"""
memory_build_distribution: tuple[
float,
float,
float,
float,
float,
float,
] = field(default_factory=lambda: (6.0, 3.0, 0.6, 32.0, 12.0, 0.4))
"""记忆构建分布参数分布1均值标准差权重分布2均值标准差权重"""
memory_build_sample_num: int = 8
"""记忆构建采样数量"""
memory_build_sample_length: int = 40
"""记忆构建采样长度"""
memory_compress_rate: float = 0.1
"""记忆压缩率"""
forget_memory_interval: int = 1000
"""记忆遗忘间隔(秒)"""
memory_forget_time: int = 24
"""记忆遗忘时间(小时)"""
memory_forget_percentage: float = 0.01
"""记忆遗忘比例"""
consolidate_memory_interval: int = 1000
"""记忆整合间隔(秒)"""
consolidation_similarity_threshold: float = 0.7
"""整合相似度阈值"""
consolidate_memory_percentage: float = 0.01
"""整合检查节点比例"""
memory_ban_words: list[str] = field(default_factory=lambda: ["表情包", "图片", "回复", "聊天记录"])
"""不允许记忆的词列表"""
long_message_auto_truncate: bool = True
"""HFC 模式过长消息自动截断"""
@dataclass
class MoodConfig(ConfigBase):
"""情绪配置类"""
mood_update_interval: int = 1
"""情绪更新间隔(秒)"""
mood_decay_rate: float = 0.95
"""情绪衰减率"""
mood_intensity_factor: float = 0.7
"""情绪强度因子"""
@dataclass
class KeywordRuleConfig(ConfigBase):
"""关键词规则配置类"""
enable: bool = True
"""是否启用关键词规则"""
keywords: list[str] = field(default_factory=lambda: [])
"""关键词列表"""
regex: list[str] = field(default_factory=lambda: [])
"""正则表达式列表"""
reaction: str = ""
"""关键词触发的反应"""
@dataclass
class KeywordReactionConfig(ConfigBase):
"""关键词配置类"""
enable: bool = True
"""是否启用关键词反应"""
rules: list[KeywordRuleConfig] = field(default_factory=lambda: [])
"""关键词反应规则列表"""
@dataclass
class ChineseTypoConfig(ConfigBase):
"""中文错别字配置类"""
enable: bool = True
"""是否启用中文错别字生成器"""
error_rate: float = 0.01
"""单字替换概率"""
min_freq: int = 9
"""最小字频阈值"""
tone_error_rate: float = 0.1
"""声调错误概率"""
word_replace_rate: float = 0.006
"""整词替换概率"""
@dataclass
class ResponseSplitterConfig(ConfigBase):
"""回复分割器配置类"""
enable: bool = True
"""是否启用回复分割器"""
max_length: int = 256
"""回复允许的最大长度"""
max_sentence_num: int = 3
"""回复允许的最大句子数"""
enable_kaomoji_protection: bool = False
"""是否启用颜文字保护"""
@dataclass
class TelemetryConfig(ConfigBase):
"""遥测配置类"""
enable: bool = True
"""是否启用遥测"""
@dataclass
class ExperimentalConfig(ConfigBase):
"""实验功能配置类"""
enable_friend_chat: bool = False
"""是否启用好友聊天"""
talk_allowed_private: set[str] = field(default_factory=lambda: set())
"""允许聊天的私聊列表"""
enable_Legacy_HFC: bool = False
"""是否启用 Legacy_HFC 处理器"""
enable_friend_whitelist: bool = True
"""是否启用好友白名单"""
rename_person: bool = True
"""是否启用改名工具"""
@dataclass
class ScheduleConfig(ConfigBase):
"""日程配置类"""
enable: bool = False
"""是否启用日程生成"""
prompt_schedule_gen: str = "无日程"
"""日程生成提示"""
schedule_doing_update_interval: int = 300
"""日程表更新间隔 单位秒"""
schedule_temperature: float = 0.5
"""日程表温度建议0.5-1.0"""
time_zone: str = "Asia/Shanghai"
"""时区"""
@dataclass
class GroupNicknameConfig(ConfigBase):
"""绰号处理系统配置类"""
enable_nickname_mapping: bool = False
"""绰号映射功能总开关"""
max_nicknames_in_prompt: int = 10
"""Prompt 中最多注入的绰号数量"""
nickname_probability_smoothing: int = 1
"""绰号加权随机选择的平滑因子"""
nickname_queue_max_size: int = 100
"""绰号处理队列最大容量"""
nickname_process_sleep_interval: float = 5.0
"""绰号处理进程休眠间隔(秒)"""
nickname_analysis_history_limit: int = 30
"""绰号处理可见最大上下文"""
nickname_analysis_probability: float = 0.1
"""绰号随机概率命中"""
@dataclass
class PFCConfig(ConfigBase):
"""PFC配置类"""
enable: bool = False
"""是否启用PFC"""
pfc_message_buffer_size: int = 2
"""PFC 聊天消息缓冲数量"""
pfc_recent_history_display_count: int = 18
"""PFC 对话最大可见上下文"""
enable_pfc_reply_checker: bool = True
"""是否启用 PFC 的回复检查器"""
pfc_max_reply_attempts: int = 3
"""发言最多尝试次数"""
pfc_max_chat_history_for_checker: int = 30
"""checker聊天记录最大可见上文长度"""
pfc_emotion_update_intensity: float = 0.6
"""情绪更新强度"""
pfc_emotion_history_count: int = 5
"""情绪更新最大可见上下文长度"""
pfc_relationship_incremental_interval: int = 10
"""关系值增值强度"""
pfc_relationship_incremental_msg_count: int = 10
"""会话中,关系值判断最大可见上下文"""
pfc_relationship_incremental_default_change: float = 1.0
"""会话中,关系值默认更新值"""
pfc_relationship_incremental_max_change: float = 5.0
"""会话中,关系值最大可变值"""
pfc_relationship_final_msg_count: int = 30
"""会话结束时,关系值判断最大可见上下文"""
pfc_relationship_final_default_change: float = 5.0
"""会话结束时,关系值默认更新值"""
pfc_relationship_final_max_change: float = 50.0
"""会话结束时,关系值最大可变值"""
pfc_historical_fallback_exclude_seconds: int = 45
"""pfc 翻看聊天记录排除最近时长"""
enable_idle_chat: bool = True
"""是否启用 pfc 主动发言"""
idle_check_interval: int = 10
"""主动发言检查间隔(分钟)"""
min_cooldown: int = 7200
"""主动发言最短冷却时间(秒)"""
max_cooldown: int = 18000
"""主动发言最长冷却时间(秒)"""
@dataclass
class ModelConfig(ConfigBase):
"""模型配置类"""
model_max_output_length: int = 800
"""最大回复长度"""
reasoning: dict[str, Any] = field(default_factory=lambda: {})
"""推理模型配置"""
normal: dict[str, Any] = field(default_factory=lambda: {})
"""普通模型配置"""
topic_judge: dict[str, Any] = field(default_factory=lambda: {})
"""主题判断模型配置"""
summary: dict[str, Any] = field(default_factory=lambda: {})
"""摘要模型配置"""
vlm: dict[str, Any] = field(default_factory=lambda: {})
"""视觉语言模型配置"""
heartflow: dict[str, Any] = field(default_factory=lambda: {})
"""心流模型配置"""
observation: dict[str, Any] = field(default_factory=lambda: {})
"""观察模型配置"""
sub_heartflow: dict[str, Any] = field(default_factory=lambda: {})
"""子心流模型配置"""
plan: dict[str, Any] = field(default_factory=lambda: {})
"""计划模型配置"""
embedding: dict[str, Any] = field(default_factory=lambda: {})
"""嵌入模型配置"""
pfc_action_planner: dict[str, Any] = field(default_factory=lambda: {})
"""PFC动作规划模型配置"""
pfc_chat: dict[str, Any] = field(default_factory=lambda: {})
"""PFC聊天模型配置"""
# pfc_reply_checker: dict[str, Any] = field(default_factory=lambda: {})
# """PFC回复检查模型配置"""
tool_use: dict[str, Any] = field(default_factory=lambda: {})
"""工具使用模型配置"""
nickname_mapping: dict[str, str] = field(default_factory=lambda: {})
"""绰号映射LLM配置"""
scheduler_all: dict[str, str] = field(default_factory=lambda: {})
"""全局日程LLM配置"""
scheduler_doing: dict[str, str] = field(default_factory=lambda: {})
"""当前活动日程LLM配置"""
PFC_relationship_eval: dict[str, str] = field(default_factory=lambda: {})
"""PFC关系评估LLM配置"""

View File

@ -222,8 +222,8 @@ class HeartFChatting:
# --- 移除 gpt_instance, 直接初始化 LLM 模型 --- # --- 移除 gpt_instance, 直接初始化 LLM 模型 ---
# self.gpt_instance = HeartFCGenerator() # <-- 移除 # self.gpt_instance = HeartFCGenerator() # <-- 移除
self.model_normal = LLMRequest( # <-- 新增 LLM 初始化 self.model_normal = LLMRequest( # <-- 新增 LLM 初始化
model=global_config.llm_normal, model=global_config.model.normal,
temperature=global_config.llm_normal["temp"], temperature=global_config.model.normal["temp"],
max_tokens=256, max_tokens=256,
request_type="response_heartflow", request_type="response_heartflow",
) )
@ -231,7 +231,7 @@ class HeartFChatting:
# LLM规划器配置 # LLM规划器配置
self.planner_llm = LLMRequest( self.planner_llm = LLMRequest(
model=global_config.llm_plan, model=global_config.model.plan,
max_tokens=1000, max_tokens=1000,
request_type="action_planning", # 用于动作规划 request_type="action_planning", # 用于动作规划
) )
@ -876,7 +876,7 @@ class HeartFChatting:
message_list_before_now = get_raw_msg_before_timestamp_with_chat( message_list_before_now = get_raw_msg_before_timestamp_with_chat(
chat_id=self.stream_id, chat_id=self.stream_id,
timestamp=time.time(), # 使用当前时间作为参考点 timestamp=time.time(), # 使用当前时间作为参考点
limit=global_config.observation_context_size, # 使用与 prompt 构建一致的 limit limit=global_config.chat.observation_context_size, # 使用与 prompt 构建一致的 limit
) )
# 调用工具函数获取格式化后的绰号字符串 # 调用工具函数获取格式化后的绰号字符串
nickname_injection_str = await nickname_manager.get_nickname_prompt_injection( nickname_injection_str = await nickname_manager.get_nickname_prompt_injection(
@ -1196,8 +1196,8 @@ class HeartFChatting:
first_bot_msg: Optional[MessageSending] = None first_bot_msg: Optional[MessageSending] = None
reply_message_ids = [] # 记录实际发送的消息ID reply_message_ids = [] # 记录实际发送的消息ID
bot_user_info = UserInfo( bot_user_info = UserInfo(
user_id=global_config.BOT_QQ, user_id=global_config.bot.qq_account,
user_nickname=global_config.BOT_NICKNAME, user_nickname=global_config.bot.nickname,
platform=anchor_message.message_info.platform, platform=anchor_message.message_info.platform,
) )
@ -1263,8 +1263,8 @@ class HeartFChatting:
thinking_time_point = round(time.time(), 2) # 用于唯一ID thinking_time_point = round(time.time(), 2) # 用于唯一ID
message_segment = Seg(type="emoji", data=emoji_cq) message_segment = Seg(type="emoji", data=emoji_cq)
bot_user_info = UserInfo( bot_user_info = UserInfo(
user_id=global_config.BOT_QQ, user_id=global_config.bot.qq_account,
user_nickname=global_config.BOT_NICKNAME, user_nickname=global_config.bot.nickname,
platform=anchor_message.message_info.platform, platform=anchor_message.message_info.platform,
) )
bot_message = MessageSending( bot_message = MessageSending(
@ -1318,7 +1318,7 @@ class HeartFChatting:
try: try:
# 1. 获取情绪影响因子并调整模型温度 # 1. 获取情绪影响因子并调整模型温度
arousal_multiplier = mood_manager.get_arousal_multiplier() arousal_multiplier = mood_manager.get_arousal_multiplier()
current_temp = global_config.llm_normal["temp"] * arousal_multiplier current_temp = global_config.model.normal["temp"] * arousal_multiplier
self.model_normal.temperature = current_temp # 动态调整温度 self.model_normal.temperature = current_temp # 动态调整温度
# 2. 获取信息捕捉器 # 2. 获取信息捕捉器
@ -1401,8 +1401,8 @@ class HeartFChatting:
chat = anchor_message.chat_stream chat = anchor_message.chat_stream
messageinfo = anchor_message.message_info messageinfo = anchor_message.message_info
bot_user_info = UserInfo( bot_user_info = UserInfo(
user_id=global_config.BOT_QQ, user_id=global_config.bot.qq_account,
user_nickname=global_config.BOT_NICKNAME, user_nickname=global_config.bot.nickname,
platform=messageinfo.platform, platform=messageinfo.platform,
) )

View File

@ -38,7 +38,7 @@ class Heartflow:
# LLM模型配置 # LLM模型配置
self.llm_model = LLMRequest( self.llm_model = LLMRequest(
model=global_config.llm_heartflow, temperature=0.6, max_tokens=1000, request_type="heart_flow" model=global_config.model.heartflow, temperature=0.6, max_tokens=1000, request_type="heart_flow"
) )
# 外部依赖模块 # 外部依赖模块

View File

@ -20,9 +20,9 @@ MAX_REPLY_PROBABILITY = 1
class InterestChatting: class InterestChatting:
def __init__( def __init__(
self, self,
decay_rate=global_config.default_decay_rate_per_second, decay_rate=global_config.focus_chat.default_decay_rate_per_second,
max_interest=MAX_INTEREST, max_interest=MAX_INTEREST,
trigger_threshold=global_config.reply_trigger_threshold, trigger_threshold=global_config.focus_chat.reply_trigger_threshold,
max_probability=MAX_REPLY_PROBABILITY, max_probability=MAX_REPLY_PROBABILITY,
): ):
# 基础属性初始化 # 基础属性初始化

View File

@ -19,8 +19,8 @@ prevent_offline_state = True
# 目前默认不启用OFFLINE状态 # 目前默认不启用OFFLINE状态
# 不同状态下普通聊天的最大消息数 # 不同状态下普通聊天的最大消息数
base_normal_chat_num = global_config.base_normal_chat_num base_normal_chat_num = global_config.chat.base_normal_chat_num
base_focused_chat_num = global_config.base_focused_chat_num base_focused_chat_num = global_config.chat.base_focused_chat_num
MAX_NORMAL_CHAT_NUM_PEEKING = int(base_normal_chat_num / 2) MAX_NORMAL_CHAT_NUM_PEEKING = int(base_normal_chat_num / 2)

View File

@ -101,7 +101,7 @@ class Mind:
else self.individuality.personality.personality_core else self.individuality.personality.personality_core
) )
mood_info = mai_state_info.get_mood_prompt() mood_info = mai_state_info.get_mood_prompt()
bot_name = global_config.BOT_NICKNAME bot_name = global_config.bot.nickname
try: try:
prompt = (await global_prompt_manager.get_prompt_async("mind_summary_prompt")).format( prompt = (await global_prompt_manager.get_prompt_async("mind_summary_prompt")).format(

View File

@ -66,16 +66,16 @@ class ChattingObservation(Observation):
self.talking_message = [] self.talking_message = []
self.talking_message_str = "" self.talking_message_str = ""
self.talking_message_str_truncate = "" self.talking_message_str_truncate = ""
self.name = global_config.BOT_NICKNAME self.name = global_config.bot.nickname
self.nick_name = global_config.BOT_ALIAS_NAMES self.nick_name = global_config.bot.alias_names
self.max_now_obs_len = global_config.observation_context_size self.max_now_obs_len = global_config.chat.observation_context_size
self.overlap_len = global_config.compressed_length self.overlap_len = global_config.focus_chat.compressed_length
self.mid_memorys = [] self.mid_memorys = []
self.max_mid_memory_len = global_config.compress_length_limit self.max_mid_memory_len = global_config.focus_chat.compress_length_limit
self.mid_memory_info = "" self.mid_memory_info = ""
self.person_list = [] self.person_list = []
self.llm_summary = LLMRequest( self.llm_summary = LLMRequest(
model=global_config.llm_observation, temperature=0.7, max_tokens=300, request_type="chat_observation" model=global_config.model.observation, temperature=0.7, max_tokens=300, request_type="chat_observation"
) )
async def initialize(self): async def initialize(self):

View File

@ -190,8 +190,8 @@ class SubMind:
self.subheartflow_id = subheartflow_id self.subheartflow_id = subheartflow_id
self.llm_model = LLMRequest( self.llm_model = LLMRequest(
model=global_config.llm_sub_heartflow, model=global_config.model.sub_heartflow,
temperature=global_config.llm_sub_heartflow["temp"], temperature=global_config.model.sub_heartflow["temp"],
max_tokens=1000, max_tokens=1000,
request_type="sub_heart_flow", request_type="sub_heart_flow",
) )
@ -594,7 +594,7 @@ class SubMind:
message_list_for_nicknames = get_raw_msg_before_timestamp_with_chat( message_list_for_nicknames = get_raw_msg_before_timestamp_with_chat(
chat_id=self.subheartflow_id, chat_id=self.subheartflow_id,
timestamp=time.time(), timestamp=time.time(),
limit=global_config.observation_context_size, limit=global_config.chat.observation_context_size,
) )
nickname_injection_str = await nickname_manager.get_nickname_prompt_injection( nickname_injection_str = await nickname_manager.get_nickname_prompt_injection(
chat_stream, message_list_for_nicknames chat_stream, message_list_for_nicknames
@ -683,7 +683,7 @@ class SubMind:
logger.warning(f"{self.log_prefix} LLM返回空结果思考失败。") logger.warning(f"{self.log_prefix} LLM返回空结果思考失败。")
# ---------- 7. 应用概率性去重和修饰 ---------- # ---------- 7. 应用概率性去重和修饰 ----------
if global_config.allow_remove_duplicates: if global_config.chat.allow_remove_duplicates:
new_content = content # 保存 LLM 直接输出的结果 new_content = content # 保存 LLM 直接输出的结果
try: try:
similarity = calculate_similarity(previous_mind, new_content) similarity = calculate_similarity(previous_mind, new_content)

View File

@ -77,7 +77,7 @@ class SubHeartflowManager:
# 为 LLM 状态评估创建一个 LLMRequest 实例 # 为 LLM 状态评估创建一个 LLMRequest 实例
# 使用与 Heartflow 相同的模型和参数 # 使用与 Heartflow 相同的模型和参数
self.llm_state_evaluator = LLMRequest( self.llm_state_evaluator = LLMRequest(
model=global_config.llm_heartflow, # 与 Heartflow 一致 model=global_config.model.heartflow, # 与 Heartflow 一致
temperature=0.6, # 与 Heartflow 一致 temperature=0.6, # 与 Heartflow 一致
max_tokens=1000, # 与 Heartflow 一致 (虽然可能不需要这么多) max_tokens=1000, # 与 Heartflow 一致 (虽然可能不需要这么多)
request_type="subheartflow_state_eval", # 保留特定的请求类型 request_type="subheartflow_state_eval", # 保留特定的请求类型
@ -278,7 +278,7 @@ class SubHeartflowManager:
focused_limit = current_state.get_focused_chat_max_num() focused_limit = current_state.get_focused_chat_max_num()
# --- 新增:检查是否允许进入 FOCUS 模式 --- # # --- 新增:检查是否允许进入 FOCUS 模式 --- #
if not global_config.allow_focus_mode: if not global_config.chat.allow_focus_mode:
if int(time.time()) % 60 == 0: # 每60秒输出一次日志避免刷屏 if int(time.time()) % 60 == 0: # 每60秒输出一次日志避免刷屏
logger.trace("未开启 FOCUSED 状态 (allow_focus_mode=False)") logger.trace("未开启 FOCUSED 状态 (allow_focus_mode=False)")
return # 如果不允许,直接返回 return # 如果不允许,直接返回
@ -766,7 +766,7 @@ class SubHeartflowManager:
focused_limit = current_mai_state.get_focused_chat_max_num() focused_limit = current_mai_state.get_focused_chat_max_num()
# --- 检查是否允许 FOCUS 模式 --- # # --- 检查是否允许 FOCUS 模式 --- #
if not global_config.allow_focus_mode: if not global_config.chat.allow_focus_mode:
# Log less frequently to avoid spam # Log less frequently to avoid spam
# if int(time.time()) % 60 == 0: # if int(time.time()) % 60 == 0:
# logger.debug(f"{log_prefix_task} 配置不允许进入 FOCUSED 状态") # logger.debug(f"{log_prefix_task} 配置不允许进入 FOCUSED 状态")

View File

@ -5,10 +5,10 @@ from src.config.config import global_config
from src.chat.message_receive.message import MessageRecv from src.chat.message_receive.message import MessageRecv
from src.chat.message_receive.storage import MessageStorage from src.chat.message_receive.storage import MessageStorage
from src.chat.utils.utils import is_mentioned_bot_in_message from src.chat.utils.utils import is_mentioned_bot_in_message
from maim_message import Seg from maim_message import Seg, UserInfo
from .heart_flow.heartflow import heartflow from .heart_flow.heartflow import heartflow
from src.common.logger_manager import get_logger from src.common.logger_manager import get_logger
from src.chat.message_receive.chat_stream import chat_manager from src.chat.message_receive.chat_stream import ChatStream, chat_manager
from src.chat.message_receive.message_buffer import message_buffer from src.chat.message_receive.message_buffer import message_buffer
from src.chat.utils.timer_calculator import Timer from src.chat.utils.timer_calculator import Timer
from src.chat.person_info.relationship_manager import relationship_manager from src.chat.person_info.relationship_manager import relationship_manager
@ -100,7 +100,7 @@ def _get_message_type(message: MessageRecv) -> str:
return "seglist" return "seglist"
def _check_ban_words(text: str, chat, userinfo) -> bool: def _check_ban_words(text: str, chat: ChatStream, userinfo: UserInfo) -> bool:
"""检查消息是否包含过滤词 """检查消息是否包含过滤词
Args: Args:
@ -111,7 +111,7 @@ def _check_ban_words(text: str, chat, userinfo) -> bool:
Returns: Returns:
bool: 是否包含过滤词 bool: 是否包含过滤词
""" """
for word in global_config.ban_words: for word in global_config.chat.ban_words:
if word in text: if word in text:
chat_name = chat.group_info.group_name if chat.group_info else "私聊" chat_name = chat.group_info.group_name if chat.group_info else "私聊"
logger.info(f"[{chat_name}]{userinfo.user_nickname}:{text}") logger.info(f"[{chat_name}]{userinfo.user_nickname}:{text}")
@ -120,7 +120,7 @@ def _check_ban_words(text: str, chat, userinfo) -> bool:
return False return False
def _check_ban_regex(text: str, chat, userinfo) -> bool: def _check_ban_regex(text: str, chat: ChatStream, userinfo: UserInfo) -> bool:
"""检查消息是否匹配过滤正则表达式 """检查消息是否匹配过滤正则表达式
Args: Args:
@ -131,7 +131,7 @@ def _check_ban_regex(text: str, chat, userinfo) -> bool:
Returns: Returns:
bool: 是否匹配过滤正则 bool: 是否匹配过滤正则
""" """
for pattern in global_config.ban_msgs_regex: for pattern in global_config.chat.ban_msgs_regex:
if pattern.search(text): if pattern.search(text):
chat_name = chat.group_info.group_name if chat.group_info else "私聊" chat_name = chat.group_info.group_name if chat.group_info else "私聊"
logger.info(f"[{chat_name}]{userinfo.user_nickname}:{text}") logger.info(f"[{chat_name}]{userinfo.user_nickname}:{text}")

View File

@ -244,7 +244,7 @@ async def _build_prompt_focus(reason, current_mind_info, structured_info, chat_s
message_list_before_now = get_raw_msg_before_timestamp_with_chat( message_list_before_now = get_raw_msg_before_timestamp_with_chat(
chat_id=chat_stream.stream_id, chat_id=chat_stream.stream_id,
timestamp=time.time(), timestamp=time.time(),
limit=global_config.observation_context_size, limit=global_config.chat.observation_context_size,
) )
chat_talking_prompt = await build_readable_messages( chat_talking_prompt = await build_readable_messages(
message_list_before_now, message_list_before_now,
@ -264,7 +264,7 @@ async def _build_prompt_focus(reason, current_mind_info, structured_info, chat_s
prompt_ger += "**不用输出对方的网名或绰号**" prompt_ger += "**不用输出对方的网名或绰号**"
if random.random() < 0.00: if random.random() < 0.00:
prompt_ger += "你喜欢用反问句" prompt_ger += "你喜欢用反问句"
if is_group_chat and global_config.enable_expression_learner: if is_group_chat and global_config.personality.enable_expression_learner:
# 从/data/expression/对应chat_id/expressions.json中读取表达方式 # 从/data/expression/对应chat_id/expressions.json中读取表达方式
( (
learnt_style_expressions, learnt_style_expressions,
@ -350,7 +350,7 @@ async def _build_prompt_focus(reason, current_mind_info, structured_info, chat_s
nickname_info=nickname_injection_str, nickname_info=nickname_injection_str,
chat_target=chat_target_1, # Used in group template chat_target=chat_target_1, # Used in group template
chat_talking_prompt=chat_talking_prompt, chat_talking_prompt=chat_talking_prompt,
bot_name=global_config.BOT_NICKNAME, bot_name=global_config.bot.nickname,
prompt_personality=prompt_personality, prompt_personality=prompt_personality,
chat_target_2=chat_target_2, # Used in group template chat_target_2=chat_target_2, # Used in group template
current_mind_info=current_mind_info, current_mind_info=current_mind_info,
@ -370,7 +370,7 @@ async def _build_prompt_focus(reason, current_mind_info, structured_info, chat_s
info_from_tools=structured_info_prompt, info_from_tools=structured_info_prompt,
sender_name=effective_sender_name, # Used in private template sender_name=effective_sender_name, # Used in private template
chat_talking_prompt=chat_talking_prompt, chat_talking_prompt=chat_talking_prompt,
bot_name=global_config.BOT_NICKNAME, bot_name=global_config.bot.nickname,
prompt_personality=prompt_personality, prompt_personality=prompt_personality,
# chat_target and chat_target_2 are not used in private template # chat_target and chat_target_2 are not used in private template
current_mind_info=current_mind_info, current_mind_info=current_mind_info,
@ -426,7 +426,7 @@ class PromptBuilder:
who_chat_in_group = get_recent_group_speaker( who_chat_in_group = get_recent_group_speaker(
chat_stream.stream_id, chat_stream.stream_id,
(chat_stream.user_info.platform, chat_stream.user_info.user_id) if chat_stream.user_info else None, (chat_stream.user_info.platform, chat_stream.user_info.user_id) if chat_stream.user_info else None,
limit=global_config.observation_context_size, limit=global_config.chat.observation_context_size,
) )
elif chat_stream.user_info: elif chat_stream.user_info:
who_chat_in_group.append( who_chat_in_group.append(
@ -473,7 +473,7 @@ class PromptBuilder:
message_list_before_now = get_raw_msg_before_timestamp_with_chat( message_list_before_now = get_raw_msg_before_timestamp_with_chat(
chat_id=chat_stream.stream_id, chat_id=chat_stream.stream_id,
timestamp=time.time(), timestamp=time.time(),
limit=global_config.observation_context_size, limit=global_config.chat.observation_context_size,
) )
chat_talking_prompt = await build_readable_messages( chat_talking_prompt = await build_readable_messages(
message_list_before_now, message_list_before_now,
@ -485,7 +485,8 @@ class PromptBuilder:
# 关键词检测与反应 # 关键词检测与反应
keywords_reaction_prompt = "" keywords_reaction_prompt = ""
for rule in global_config.keywords_reaction_rules: keywords_reaction_prompt = ""
for rule in global_config.keyword_reaction.rules:
if rule.get("enable", False): if rule.get("enable", False):
if any(keyword in message_txt.lower() for keyword in rule.get("keywords", [])): if any(keyword in message_txt.lower() for keyword in rule.get("keywords", [])):
logger.info( logger.info(
@ -517,7 +518,7 @@ class PromptBuilder:
end_time = time.time() end_time = time.time()
logger.debug(f"知识检索耗时: {(end_time - start_time):.3f}") logger.debug(f"知识检索耗时: {(end_time - start_time):.3f}")
if global_config.ENABLE_SCHEDULE_GEN: if global_config.schedule.enable:
schedule_prompt = await global_prompt_manager.format_prompt( schedule_prompt = await global_prompt_manager.format_prompt(
"schedule_prompt", schedule_info=bot_schedule.get_current_num_task(num=1, time_info=False) "schedule_prompt", schedule_info=bot_schedule.get_current_num_task(num=1, time_info=False)
) )
@ -550,8 +551,8 @@ class PromptBuilder:
chat_target_2=chat_target_2, chat_target_2=chat_target_2,
chat_talking_prompt=chat_talking_prompt, chat_talking_prompt=chat_talking_prompt,
message_txt=message_txt, message_txt=message_txt,
bot_name=global_config.BOT_NICKNAME, bot_name=global_config.bot.nickname,
bot_other_names="/".join(global_config.BOT_ALIAS_NAMES), bot_other_names="/".join(global_config.bot.alias_names),
prompt_personality=prompt_personality, prompt_personality=prompt_personality,
mood_prompt=mood_prompt, mood_prompt=mood_prompt,
reply_style1=reply_style1_chosen, reply_style1=reply_style1_chosen,
@ -573,8 +574,8 @@ class PromptBuilder:
schedule_prompt=schedule_prompt, schedule_prompt=schedule_prompt,
chat_talking_prompt=chat_talking_prompt, chat_talking_prompt=chat_talking_prompt,
message_txt=message_txt, message_txt=message_txt,
bot_name=global_config.BOT_NICKNAME, bot_name=global_config.bot.nickname,
bot_other_names="/".join(global_config.BOT_ALIAS_NAMES), bot_other_names="/".join(global_config.bot.alias_names),
prompt_personality=prompt_personality, prompt_personality=prompt_personality,
mood_prompt=mood_prompt, mood_prompt=mood_prompt,
reply_style1=reply_style1_chosen, reply_style1=reply_style1_chosen,
@ -933,7 +934,7 @@ class PromptBuilder:
planner_prompt_template = await global_prompt_manager.get_prompt_async("planner_prompt") planner_prompt_template = await global_prompt_manager.get_prompt_async("planner_prompt")
prompt = planner_prompt_template.format( prompt = planner_prompt_template.format(
bot_name=global_config.BOT_NICKNAME, bot_name=global_config.bot.nickname,
nickname_info=nickname_info, nickname_info=nickname_info,
prompt_personality=prompt_personality, prompt_personality=prompt_personality,
chat_context_description=chat_context_description, chat_context_description=chat_context_description,

View File

@ -75,8 +75,8 @@ class NormalChat:
messageinfo = message.message_info messageinfo = message.message_info
bot_user_info = UserInfo( bot_user_info = UserInfo(
user_id=global_config.BOT_QQ, user_id=global_config.bot.qq_account,
user_nickname=global_config.BOT_NICKNAME, user_nickname=global_config.bot.nickname,
platform=messageinfo.platform, platform=messageinfo.platform,
) )
@ -123,8 +123,8 @@ class NormalChat:
message_id=thinking_id, message_id=thinking_id,
chat_stream=self.chat_stream, # 使用 self.chat_stream chat_stream=self.chat_stream, # 使用 self.chat_stream
bot_user_info=UserInfo( bot_user_info=UserInfo(
user_id=global_config.BOT_QQ, user_id=global_config.bot.qq_account,
user_nickname=global_config.BOT_NICKNAME, user_nickname=global_config.bot.nickname,
platform=message.message_info.platform, platform=message.message_info.platform,
), ),
sender_info=message.message_info.user_info, sender_info=message.message_info.user_info,
@ -149,7 +149,7 @@ class NormalChat:
# 改为实例方法 # 改为实例方法
async def _handle_emoji(self, message: MessageRecv, response: str): async def _handle_emoji(self, message: MessageRecv, response: str):
"""处理表情包""" """处理表情包"""
if random() < global_config.emoji_chance: if random() < global_config.normal_chat.emoji_chance:
emoji_raw = await emoji_manager.get_emoji_for_text(response) emoji_raw = await emoji_manager.get_emoji_for_text(response)
if emoji_raw: if emoji_raw:
emoji_path, description = emoji_raw emoji_path, description = emoji_raw
@ -162,8 +162,8 @@ class NormalChat:
message_id="mt" + str(thinking_time_point), message_id="mt" + str(thinking_time_point),
chat_stream=self.chat_stream, # 使用 self.chat_stream chat_stream=self.chat_stream, # 使用 self.chat_stream
bot_user_info=UserInfo( bot_user_info=UserInfo(
user_id=global_config.BOT_QQ, user_id=global_config.bot.qq_account,
user_nickname=global_config.BOT_NICKNAME, user_nickname=global_config.bot.nickname,
platform=message.message_info.platform, platform=message.message_info.platform,
), ),
sender_info=message.message_info.user_info, sender_info=message.message_info.user_info,
@ -188,7 +188,7 @@ class NormalChat:
label=emotion, label=emotion,
stance=stance, # 使用 self.chat_stream stance=stance, # 使用 self.chat_stream
) )
self.mood_manager.update_mood_from_emotion(emotion, global_config.mood_intensity_factor) self.mood_manager.update_mood_from_emotion(emotion, global_config.mood.mood_intensity_factor)
async def _reply_interested_message(self) -> None: async def _reply_interested_message(self) -> None:
""" """
@ -432,7 +432,7 @@ class NormalChat:
def _check_ban_words(text: str, chat: ChatStream, userinfo: UserInfo) -> bool: def _check_ban_words(text: str, chat: ChatStream, userinfo: UserInfo) -> bool:
"""检查消息中是否包含过滤词""" """检查消息中是否包含过滤词"""
stream_name = chat_manager.get_stream_name(chat.stream_id) or chat.stream_id stream_name = chat_manager.get_stream_name(chat.stream_id) or chat.stream_id
for word in global_config.ban_words: for word in global_config.chat.ban_words:
if word in text: if word in text:
logger.info( logger.info(
f"[{stream_name}][{chat.group_info.group_name if chat.group_info else '私聊'}]" f"[{stream_name}][{chat.group_info.group_name if chat.group_info else '私聊'}]"
@ -447,7 +447,7 @@ class NormalChat:
def _check_ban_regex(text: str, chat: ChatStream, userinfo: UserInfo) -> bool: def _check_ban_regex(text: str, chat: ChatStream, userinfo: UserInfo) -> bool:
"""检查消息是否匹配过滤正则表达式""" """检查消息是否匹配过滤正则表达式"""
stream_name = chat_manager.get_stream_name(chat.stream_id) or chat.stream_id stream_name = chat_manager.get_stream_name(chat.stream_id) or chat.stream_id
for pattern in global_config.ban_msgs_regex: for pattern in global_config.chat.ban_msgs_regex:
if pattern.search(text): if pattern.search(text):
logger.info( logger.info(
f"[{stream_name}][{chat.group_info.group_name if chat.group_info else '私聊'}]" f"[{stream_name}][{chat.group_info.group_name if chat.group_info else '私聊'}]"

View File

@ -16,20 +16,20 @@ logger = get_logger("llm")
class NormalChatGenerator: class NormalChatGenerator:
def __init__(self): def __init__(self):
self.model_reasoning = LLMRequest( self.model_reasoning = LLMRequest(
model=global_config.llm_reasoning, model=global_config.model.reasoning,
temperature=0.7, temperature=0.7,
max_tokens=3000, max_tokens=3000,
request_type="response_reasoning", request_type="response_reasoning",
) )
self.model_normal = LLMRequest( self.model_normal = LLMRequest(
model=global_config.llm_normal, model=global_config.model.normal,
temperature=global_config.llm_normal["temp"], temperature=global_config.model.normal["temp"],
max_tokens=256, max_tokens=256,
request_type="response_reasoning", request_type="response_reasoning",
) )
self.model_sum = LLMRequest( self.model_sum = LLMRequest(
model=global_config.llm_summary, temperature=0.7, max_tokens=3000, request_type="relation" model=global_config.model.summary, temperature=0.7, max_tokens=3000, request_type="relation"
) )
self.current_model_type = "r1" # 默认使用 R1 self.current_model_type = "r1" # 默认使用 R1
self.current_model_name = "unknown model" self.current_model_name = "unknown model"
@ -37,7 +37,7 @@ class NormalChatGenerator:
async def generate_response(self, message: MessageThinking, thinking_id: str) -> Optional[Union[str, List[str]]]: async def generate_response(self, message: MessageThinking, thinking_id: str) -> Optional[Union[str, List[str]]]:
"""根据当前模型类型选择对应的生成函数""" """根据当前模型类型选择对应的生成函数"""
# 从global_config中获取模型概率值并选择模型 # 从global_config中获取模型概率值并选择模型
if random.random() < global_config.model_reasoning_probability: if random.random() < global_config.normal_chat.reasoning_model_probability:
self.current_model_type = "深深地" self.current_model_type = "深深地"
current_model = self.model_reasoning current_model = self.model_reasoning
else: else:
@ -51,7 +51,7 @@ class NormalChatGenerator:
model_response = await self._generate_response_with_model(message, current_model, thinking_id) model_response = await self._generate_response_with_model(message, current_model, thinking_id)
if model_response: if model_response:
logger.info(f"{global_config.BOT_NICKNAME}的回复是:{model_response}") logger.info(f"{global_config.bot.nickname}的回复是:{model_response}")
model_response = await self._process_response(model_response) model_response = await self._process_response(model_response)
return model_response return model_response
@ -113,7 +113,7 @@ class NormalChatGenerator:
- "中立"不表达明确立场或无关回应 - "中立"不表达明确立场或无关回应
2. "开心,愤怒,悲伤,惊讶,平静,害羞,恐惧,厌恶,困惑"中选出最匹配的1个情感标签 2. "开心,愤怒,悲伤,惊讶,平静,害羞,恐惧,厌恶,困惑"中选出最匹配的1个情感标签
3. 按照"立场-情绪"的格式直接输出结果例如"反对-愤怒" 3. 按照"立场-情绪"的格式直接输出结果例如"反对-愤怒"
4. 考虑回复者的人格设定为{global_config.personality_core} 4. 考虑回复者的人格设定为{global_config.personality.personality_core}
对话示例 对话示例
被回复A就是笨 被回复A就是笨

View File

@ -13,7 +13,7 @@ from src.common.logger import get_module_logger, SCHEDULE_STYLE_CONFIG, LogConfi
from src.chat.models.utils_model import LLMRequest # noqa: E402 from src.chat.models.utils_model import LLMRequest # noqa: E402
from src.config.config import global_config # noqa: E402 from src.config.config import global_config # noqa: E402
TIME_ZONE = tz.gettz(global_config.TIME_ZONE) # 设置时区 TIME_ZONE = tz.gettz(global_config.schedule.time_zone) # 设置时区
schedule_config = LogConfig( schedule_config = LogConfig(
@ -31,14 +31,14 @@ class ScheduleGenerator:
# 使用离线LLM模型 # 使用离线LLM模型
self.enable_output = None self.enable_output = None
self.llm_scheduler_all = LLMRequest( self.llm_scheduler_all = LLMRequest(
model=global_config.llm_scheduler_all, model=global_config.model.scheduler_all,
temperature=global_config.llm_scheduler_all["temp"], temperature=global_config.model.scheduler_all["temp"],
max_tokens=7000, max_tokens=7000,
request_type="schedule", request_type="schedule",
) )
self.llm_scheduler_doing = LLMRequest( self.llm_scheduler_doing = LLMRequest(
model=global_config.llm_scheduler_doing, model=global_config.model.scheduler_doing,
temperature=global_config.llm_scheduler_doing["temp"], temperature=global_config.model.scheduler_doing["temp"],
max_tokens=2048, max_tokens=2048,
request_type="schedule", request_type="schedule",
) )
@ -73,7 +73,7 @@ class ScheduleGenerator:
async def mai_schedule_start(self): async def mai_schedule_start(self):
"""启动日程系统每5分钟执行一次move_doing并在日期变化时重新检查日程""" """启动日程系统每5分钟执行一次move_doing并在日期变化时重新检查日程"""
try: try:
if global_config.ENABLE_SCHEDULE_GEN: if global_config.schedule.enable:
logger.info(f"日程系统启动/刷新时间: {self.start_time.strftime('%Y-%m-%d %H:%M:%S')}") logger.info(f"日程系统启动/刷新时间: {self.start_time.strftime('%Y-%m-%d %H:%M:%S')}")
# 初始化日程 # 初始化日程
await self.check_and_create_today_schedule() await self.check_and_create_today_schedule()

View File

@ -140,7 +140,7 @@ class IdleChat:
self._lock: asyncio.Lock = asyncio.Lock() self._lock: asyncio.Lock = asyncio.Lock()
# LLM请求对象用于生成主动对话内容 # LLM请求对象用于生成主动对话内容
self.llm = LLMRequest(model=global_config.llm_normal, temperature=0.5, max_tokens=500, request_type="idle_chat") self.llm = LLMRequest(model=global_config.model.normal, temperature=0.5, max_tokens=500, request_type="idle_chat")
# 工作状态 # 工作状态
self.active_instances_count: int = 0 self.active_instances_count: int = 0
@ -149,9 +149,9 @@ class IdleChat:
self._task: Optional[asyncio.Task] = None self._task: Optional[asyncio.Task] = None
# 配置参数 - 从global_config加载 # 配置参数 - 从global_config加载
self.min_cooldown = global_config.min_cooldown # 最短冷却时间默认2小时 self.min_cooldown = global_config.pfc.min_cooldown # 最短冷却时间默认2小时
self.max_cooldown = global_config.max_cooldown # 最长冷却时间默认5小时 self.max_cooldown = global_config.pfc.max_cooldown # 最长冷却时间默认5小时
self.check_interval = global_config.idle_check_interval * 60 # 检查间隔默认10分钟转换为秒 self.check_interval = global_config.pfc.idle_check_interval * 60 # 检查间隔默认10分钟转换为秒
self.active_hours_start = 7 # 活动开始时间 self.active_hours_start = 7 # 活动开始时间
self.active_hours_end = 23 # 活动结束时间 self.active_hours_end = 23 # 活动结束时间
@ -162,7 +162,7 @@ class IdleChat:
def start(self) -> None: def start(self) -> None:
"""启动主动聊天检测""" """启动主动聊天检测"""
# 检查是否启用了主动聊天功能 # 检查是否启用了主动聊天功能
if not global_config.enable_idle_chat: if not global_config.pfc.enable_idle_chat:
logger.info(f"[私聊][{self.private_name}]主动聊天功能已禁用配置enable_idle_chat=False") logger.info(f"[私聊][{self.private_name}]主动聊天功能已禁用配置enable_idle_chat=False")
return return
@ -352,7 +352,7 @@ class IdleChat:
try: try:
while self._running: while self._running:
# 检查是否启用了主动聊天功能 # 检查是否启用了主动聊天功能
if not global_config.enable_idle_chat: if not global_config.pfc.enable_idle_chat:
# 如果禁用了功能,等待一段时间后再次检查配置 # 如果禁用了功能,等待一段时间后再次检查配置
await asyncio.sleep(60) # 每分钟检查一次配置变更 await asyncio.sleep(60) # 每分钟检查一次配置变更
continue continue
@ -482,7 +482,7 @@ class IdleChat:
relationship_description = full_relationship_text.split("")[1].replace("", "") relationship_description = full_relationship_text.split("")[1].replace("", "")
# 暂不使用 # 暂不使用
# if global_config.ENABLE_SCHEDULE_GEN: # if global_config.schedule.enable:
# schedule_prompt = await global_prompt_manager.format_prompt( # schedule_prompt = await global_prompt_manager.format_prompt(
# "schedule_prompt", schedule_info=bot_schedule.get_current_num_task(num=1, time_info=False) # "schedule_prompt", schedule_info=bot_schedule.get_current_num_task(num=1, time_info=False)
# ) # )
@ -491,7 +491,7 @@ class IdleChat:
# 构建提示词,暂存废弃部分这是你的日程{schedule_prompt} # 构建提示词,暂存废弃部分这是你的日程{schedule_prompt}
current_time = datetime.now().strftime("%H:%M") current_time = datetime.now().strftime("%H:%M")
prompt = f"""你是{global_config.BOT_NICKNAME} prompt = f"""你是{global_config.bot.nickname}
你正在与用户{self.private_name}进行QQ私聊你们的关系是{relationship_description} 你正在与用户{self.private_name}进行QQ私聊你们的关系是{relationship_description}
现在时间{current_time} 现在时间{current_time}

View File

@ -127,7 +127,7 @@ class ActionHandler(ABC):
"time": send_time, "time": send_time,
"user_info": { "user_info": {
"user_id": self.conversation.bot_qq_str, "user_id": self.conversation.bot_qq_str,
"user_nickname": global_config.BOT_NICKNAME, "user_nickname": global_config.bot.nickname,
"platform": self.conversation.chat_stream.platform "platform": self.conversation.chat_stream.platform
if self.conversation.chat_stream if self.conversation.chat_stream
else "unknown_platform", else "unknown_platform",
@ -138,7 +138,7 @@ class ActionHandler(ABC):
observation_info.chat_history.append(bot_message_dict) observation_info.chat_history.append(bot_message_dict)
observation_info.chat_history_count = len(observation_info.chat_history) observation_info.chat_history_count = len(observation_info.chat_history)
self.logger.debug( self.logger.debug(
f"[私聊][{self.conversation.private_name}] {global_config.BOT_NICKNAME}发送的消息 ('{message_content[:30]}...')已添加到 chat_history。当前历史数: {observation_info.chat_history_count}" f"[私聊][{self.conversation.private_name}] {global_config.bot.nickname}发送的消息 ('{message_content[:30]}...')已添加到 chat_history。当前历史数: {observation_info.chat_history_count}"
) )
# 限制历史记录长度 # 限制历史记录长度
@ -422,7 +422,7 @@ class BaseTextReplyHandler(ActionHandler):
current_time_value_for_check = observation_info.current_time_str or "获取时间失败" current_time_value_for_check = observation_info.current_time_str or "获取时间失败"
# 调用 ReplyChecker # 调用 ReplyChecker
if global_config.enable_pfc_reply_checker: if global_config.pfc.enable_pfc_reply_checker:
self.logger.debug(f"{log_prefix} 调用 ReplyChecker 检查 (配置已启用)...") self.logger.debug(f"{log_prefix} 调用 ReplyChecker 检查 (配置已启用)...")
is_suitable_check, reason_check, need_replan_check = await self.conversation.reply_checker.check( is_suitable_check, reason_check, need_replan_check = await self.conversation.reply_checker.check(
reply=current_content_for_check, reply=current_content_for_check,

View File

@ -159,9 +159,9 @@ class ActionPlanner:
self.private_name = private_name self.private_name = private_name
# 初始化 LLM 请求对象 # 初始化 LLM 请求对象
try: try:
llm_config = global_config.llm_PFC_action_planner llm_config = global_config.model.pfc_action_planner
if not isinstance(llm_config, dict): if not isinstance(llm_config, dict):
raise TypeError(f"LLM config 'llm_PFC_action_planner' is not a dictionary: {llm_config}") raise TypeError(f"LLM config 'pfc_action_planner' is not a dictionary: {llm_config}")
self.llm = LLMRequest( self.llm = LLMRequest(
model=llm_config, model=llm_config,
@ -178,7 +178,7 @@ class ActionPlanner:
# 获取个性化信息和机器人名称 # 获取个性化信息和机器人名称
# self.personality_info = Individuality.get_instance().get_prompt(x_person=2, level=3) # self.personality_info = Individuality.get_instance().get_prompt(x_person=2, level=3)
self.name = global_config.BOT_NICKNAME self.name = global_config.bot.nickname
# 获取 ChatObserver 实例 (单例模式) # 获取 ChatObserver 实例 (单例模式)
self.chat_observer = ChatObserver.get_instance(stream_id, private_name) self.chat_observer = ChatObserver.get_instance(stream_id, private_name)

View File

@ -125,7 +125,7 @@ class ChatObserver:
# 获取消息的发送者 # 获取消息的发送者
user_info = message.get("user_info", {}) user_info = message.get("user_info", {})
if user_info and str(user_info.get("user_id")) != str(global_config.BOT_QQ): if user_info and str(user_info.get("user_id")) != str(global_config.bot.qq_account):
# 用户发送了消息通知IdleChat # 用户发送了消息通知IdleChat
asyncio.create_task(IdleChat.register_user_response(self.private_name)) asyncio.create_task(IdleChat.register_user_response(self.private_name))
logger.debug(f"[私聊][{self.private_name}] 检测到用户消息已通知IdleChat更新用户响应状态") logger.debug(f"[私聊][{self.private_name}] 检测到用户消息已通知IdleChat更新用户响应状态")
@ -352,7 +352,7 @@ class ChatObserver:
for msg in messages: for msg in messages:
try: try:
user_info = UserInfo.from_dict(msg.get("user_info", {})) user_info = UserInfo.from_dict(msg.get("user_info", {}))
if user_info.user_id == global_config.BOT_QQ: if user_info.user_id == global_config.bot.qq_account:
self.update_bot_speak_time(msg["time"]) self.update_bot_speak_time(msg["time"])
else: else:
self.update_user_speak_time(msg["time"]) self.update_user_speak_time(msg["time"])

View File

@ -74,7 +74,7 @@ class Conversation:
self._initialized: bool = False self._initialized: bool = False
self.bot_qq_str: Optional[str] = str(global_config.BOT_QQ) if global_config.BOT_QQ else None self.bot_qq_str: Optional[str] = str(global_config.bot.qq_account) if global_config.bot.qq_account else None
if not self.bot_qq_str: if not self.bot_qq_str:
logger.error(f"[私聊][{self.private_name}] 严重错误:未能从配置中获取 BOT_QQ ID") logger.error(f"[私聊][{self.private_name}] 严重错误:未能从配置中获取 BOT_QQ ID")

View File

@ -143,7 +143,7 @@ async def initialize_core_components(conversation_instance: "Conversation"):
) )
conversation_instance.relationship_updater = PfcRelationshipUpdater( conversation_instance.relationship_updater = PfcRelationshipUpdater(
private_name=conversation_instance.private_name, bot_name=global_config.BOT_NICKNAME private_name=conversation_instance.private_name, bot_name=global_config.bot.nickname
) )
conversation_instance.relationship_translator = PfcRepationshipTranslator( conversation_instance.relationship_translator = PfcRepationshipTranslator(
private_name=conversation_instance.private_name private_name=conversation_instance.private_name
@ -151,7 +151,7 @@ async def initialize_core_components(conversation_instance: "Conversation"):
logger.debug(f"[私聊][{conversation_instance.private_name}] (Initializer) PfcRelationship 初始化完成。") logger.debug(f"[私聊][{conversation_instance.private_name}] (Initializer) PfcRelationship 初始化完成。")
conversation_instance.emotion_updater = PfcEmotionUpdater( conversation_instance.emotion_updater = PfcEmotionUpdater(
private_name=conversation_instance.private_name, bot_name=global_config.BOT_NICKNAME private_name=conversation_instance.private_name, bot_name=global_config.bot.nickname
) )
logger.debug(f"[私聊][{conversation_instance.private_name}] (Initializer) PfcEmotion 初始化完成。") logger.debug(f"[私聊][{conversation_instance.private_name}] (Initializer) PfcEmotion 初始化完成。")

View File

@ -176,7 +176,7 @@ async def run_conversation_loop(conversation_instance: "Conversation"):
if action in ["wait", "listening"] and new_msg_count_action_planning > 0: if action in ["wait", "listening"] and new_msg_count_action_planning > 0:
should_interrupt_action_planning = True should_interrupt_action_planning = True
interrupt_reason_action_planning = f"规划 {action} 期间收到 {new_msg_count_action_planning} 条新消息" interrupt_reason_action_planning = f"规划 {action} 期间收到 {new_msg_count_action_planning} 条新消息"
elif other_new_msg_count_action_planning > global_config.pfc_message_buffer_size: elif other_new_msg_count_action_planning > global_config.pfc.pfc_message_buffer_size:
should_interrupt_action_planning = True should_interrupt_action_planning = True
interrupt_reason_action_planning = ( interrupt_reason_action_planning = (
f"规划 {action} 期间收到 {other_new_msg_count_action_planning} 条来自他人的新消息" f"规划 {action} 期间收到 {other_new_msg_count_action_planning} 条来自他人的新消息"
@ -271,7 +271,7 @@ async def run_conversation_loop(conversation_instance: "Conversation"):
f"[私聊][{conversation_instance.private_name}] (Loop) Found {len(other_new_messages_this_check)} 'other_new_messages_this_check'." f"[私聊][{conversation_instance.private_name}] (Loop) Found {len(other_new_messages_this_check)} 'other_new_messages_this_check'."
) )
if len(other_new_messages_this_check) > global_config.pfc_message_buffer_size: if len(other_new_messages_this_check) > global_config.pfc.pfc_message_buffer_size:
logger.info( logger.info(
f"[私聊][{conversation_instance.private_name}] (Loop) LLM动作 '{action}' 执行期间收到 {len(other_new_messages_this_check)} 条来自他人的新消息将取消LLM任务。" f"[私聊][{conversation_instance.private_name}] (Loop) LLM动作 '{action}' 执行期间收到 {len(other_new_messages_this_check)} 条来自他人的新消息将取消LLM任务。"
) )

View File

@ -39,8 +39,8 @@ class DirectMessageSender:
try: try:
# 获取麦麦的信息 # 获取麦麦的信息
bot_user_info = UserInfo( bot_user_info = UserInfo(
user_id=global_config.BOT_QQ, user_id=global_config.bot.qq_account,
user_nickname=global_config.BOT_NICKNAME, user_nickname=global_config.bot.nickname,
platform=chat_stream.platform, platform=chat_stream.platform,
) )

View File

@ -155,7 +155,7 @@ class ObservationInfo:
try: try:
from ...config.config import global_config from ...config.config import global_config
self.bot_id = str(global_config.BOT_QQ) if global_config.BOT_QQ else None self.bot_id = str(global_config.bot.qq_account) if global_config.bot.qq_account else None
if not self.bot_id: if not self.bot_id:
logger.error(f"[私聊][{self.private_name}] 未能从配置中获取 BOT_QQ ID") logger.error(f"[私聊][{self.private_name}] 未能从配置中获取 BOT_QQ ID")
except ImportError: except ImportError:

View File

@ -42,13 +42,14 @@ class GoalAnalyzer:
"""对话目标分析器""" """对话目标分析器"""
def __init__(self, stream_id: str, private_name: str): def __init__(self, stream_id: str, private_name: str):
# TODO: API-Adapter修改标记
self.llm = LLMRequest( self.llm = LLMRequest(
model=global_config.llm_normal, temperature=0.7, max_tokens=1000, request_type="conversation_goal" model=global_config.model.normal, temperature=0.7, max_tokens=1000, request_type="conversation_goal"
) )
self.personality_info = Individuality.get_instance().get_prompt(x_person=2, level=3) self.personality_info = Individuality.get_instance().get_prompt(x_person=2, level=3)
self.name = global_config.BOT_NICKNAME self.name = global_config.bot.nickname
self.nick_name = global_config.BOT_ALIAS_NAMES self.nick_name = global_config.bot.alias_names
self.private_name = private_name self.private_name = private_name
self.chat_observer = ChatObserver.get_instance(stream_id, private_name) self.chat_observer = ChatObserver.get_instance(stream_id, private_name)

View File

@ -21,7 +21,7 @@ class PfcEmotionUpdater:
self.bot_name = bot_name self.bot_name = bot_name
self.mood_mng = mood_manager self.mood_mng = mood_manager
# LLM 实例 (根据 global_config.llm_summary 配置) # LLM 实例 (根据 global_config.model.summary 配置)
llm_config_summary = getattr(global_config, "llm_summary", None) llm_config_summary = getattr(global_config, "llm_summary", None)
if llm_config_summary and isinstance(llm_config_summary, dict): if llm_config_summary and isinstance(llm_config_summary, dict):
logger.debug(f"[私聊][{self.private_name}] 使用 llm_summary 配置初始化情绪判断LLM。") logger.debug(f"[私聊][{self.private_name}] 使用 llm_summary 配置初始化情绪判断LLM。")

View File

@ -101,7 +101,7 @@ class PFCProcessor:
chat_id = str(message.chat_stream.stream_id) chat_id = str(message.chat_stream.stream_id)
private_name = str(message.message_info.user_info.user_nickname) # 假设 UserInfo 有 user_nickname private_name = str(message.message_info.user_info.user_nickname) # 假设 UserInfo 有 user_nickname
if global_config.enable_pfc_chatting: if global_config.pfc.enable:
await self.pfc_manager.get_or_create_conversation(chat_id, private_name) await self.pfc_manager.get_or_create_conversation(chat_id, private_name)
except Exception as e: except Exception as e:
@ -110,7 +110,7 @@ class PFCProcessor:
@staticmethod @staticmethod
def _check_ban_words(text: str, userinfo: UserInfo) -> bool: # 明确 userinfo 类型 def _check_ban_words(text: str, userinfo: UserInfo) -> bool: # 明确 userinfo 类型
"""检查消息中是否包含过滤词""" """检查消息中是否包含过滤词"""
for word in global_config.ban_words: for word in global_config.chat.ban_words:
if word in text: if word in text:
logger.info(f"[私聊]{userinfo.user_nickname}:{text}") # 假设 UserInfo 有 user_nickname logger.info(f"[私聊]{userinfo.user_nickname}:{text}") # 假设 UserInfo 有 user_nickname
logger.info(f"[过滤词识别]消息中含有{word}filtered") logger.info(f"[过滤词识别]消息中含有{word}filtered")
@ -120,7 +120,7 @@ class PFCProcessor:
@staticmethod @staticmethod
def _check_ban_regex(text: str, userinfo: UserInfo) -> bool: # 明确 userinfo 类型 def _check_ban_regex(text: str, userinfo: UserInfo) -> bool: # 明确 userinfo 类型
"""检查消息是否匹配过滤正则表达式""" """检查消息是否匹配过滤正则表达式"""
for pattern in global_config.ban_msgs_regex: for pattern in global_config.chat.ban_msgs_regex:
if pattern.search(text): # 假设 ban_msgs_regex 中的元素是已编译的正则对象 if pattern.search(text): # 假设 ban_msgs_regex 中的元素是已编译的正则对象
logger.info(f"[私聊]{userinfo.user_nickname}:{text}") # _nickname logger.info(f"[私聊]{userinfo.user_nickname}:{text}") # _nickname
logger.info(f"[正则表达式过滤]消息匹配到{pattern.pattern}filtered") # .pattern 获取原始表达式字符串 logger.info(f"[正则表达式过滤]消息匹配到{pattern.pattern}filtered") # .pattern 获取原始表达式字符串

View File

@ -712,7 +712,7 @@ async def build_chat_history_text(observation_info: ObservationInfo, private_nam
if hasattr(observation_info, "chat_history_str") and observation_info.chat_history_str: if hasattr(observation_info, "chat_history_str") and observation_info.chat_history_str:
chat_history_text = observation_info.chat_history_str chat_history_text = observation_info.chat_history_str
elif hasattr(observation_info, "chat_history") and observation_info.chat_history: elif hasattr(observation_info, "chat_history") and observation_info.chat_history:
history_slice = observation_info.chat_history[-global_config.pfc_recent_history_display_count :] history_slice = observation_info.chat_history[-global_config.pfc.pfc_recent_history_display_count :]
chat_history_text = await build_readable_messages( chat_history_text = await build_readable_messages(
history_slice, replace_bot_name=True, merge_messages=False, timestamp_mode="relative", read_mark=0.0 history_slice, replace_bot_name=True, merge_messages=False, timestamp_mode="relative", read_mark=0.0
) )
@ -722,7 +722,7 @@ async def build_chat_history_text(observation_info: ObservationInfo, private_nam
unread_count = getattr(observation_info, "new_messages_count", 0) unread_count = getattr(observation_info, "new_messages_count", 0)
unread_messages = getattr(observation_info, "unprocessed_messages", []) unread_messages = getattr(observation_info, "unprocessed_messages", [])
if unread_count > 0 and unread_messages: if unread_count > 0 and unread_messages:
bot_qq_str = str(global_config.BOT_QQ) if global_config.BOT_QQ else None # 安全获取 bot_qq_str = str(global_config.bot.qq_account) if global_config.bot.qq_account else None # 安全获取
if bot_qq_str: if bot_qq_str:
other_unread_messages = [ other_unread_messages = [
msg for msg in unread_messages if msg.get("user_info", {}).get("user_id") != bot_qq_str msg for msg in unread_messages if msg.get("user_info", {}).get("user_id") != bot_qq_str

View File

@ -11,10 +11,10 @@ class ReplyChecker:
"""回复检查器 - 新版:仅检查机器人自身发言的精确重复""" """回复检查器 - 新版:仅检查机器人自身发言的精确重复"""
def __init__(self, stream_id: str, private_name: str): def __init__(self, stream_id: str, private_name: str):
self.name = global_config.BOT_NICKNAME self.name = global_config.bot.nickname
self.private_name = private_name self.private_name = private_name
self.chat_observer = ChatObserver.get_instance(stream_id, private_name) self.chat_observer = ChatObserver.get_instance(stream_id, private_name)
self.bot_qq_str = str(global_config.BOT_QQ) self.bot_qq_str = str(global_config.bot.qq_account)
def _normalize_text(self, text: str) -> str: def _normalize_text(self, text: str) -> str:
""" """
@ -51,7 +51,7 @@ class ReplyChecker:
""" """
if not self.bot_qq_str: if not self.bot_qq_str:
logger.error( logger.error(
f"[私聊][{self.private_name}] ReplyChecker: BOT_QQ 未配置,无法检查{global_config.BOT_NICKNAME}自身消息。" f"[私聊][{self.private_name}] ReplyChecker: BOT_QQ 未配置,无法检查{global_config.bot.nickname}自身消息。"
) )
return True, "BOT_QQ未配置跳过重复检查。", False # 无法检查则默认通过 return True, "BOT_QQ未配置跳过重复检查。", False # 无法检查则默认通过
@ -79,7 +79,7 @@ class ReplyChecker:
normalized_historical_text = self._normalize_text(historical_message_text) normalized_historical_text = self._normalize_text(historical_message_text)
logger.debug( logger.debug(
f"[私聊][{self.private_name}] ReplyChecker: 历史记录 (反向索引 {i}) ({global_config.BOT_NICKNAME}): " f"[私聊][{self.private_name}] ReplyChecker: 历史记录 (反向索引 {i}) ({global_config.bot.nickname}): "
f"原始='{historical_message_text[:50]}...', 规范化后='{normalized_historical_text[:50]}...'" f"原始='{historical_message_text[:50]}...', 规范化后='{normalized_historical_text[:50]}...'"
) )
if ( if (
@ -87,7 +87,7 @@ class ReplyChecker:
): # 确保规范化后不为空串才比较 ): # 确保规范化后不为空串才比较
logger.warning(f"[私聊][{self.private_name}] ReplyChecker: !!! 成功拦截一次复读 !!!") logger.warning(f"[私聊][{self.private_name}] ReplyChecker: !!! 成功拦截一次复读 !!!")
logger.warning( logger.warning(
f"[私聊][{self.private_name}] ReplyChecker 检测到{global_config.BOT_NICKNAME}自身重复消息 (规范化后内容相同): '{normalized_reply[:50]}...'" f"[私聊][{self.private_name}] ReplyChecker 检测到{global_config.bot.nickname}自身重复消息 (规范化后内容相同): '{normalized_reply[:50]}...'"
) )
match_found = True match_found = True
# 返回: 不合适, 原因, 不需要重规划 (让上层逻辑决定是否重试生成) # 返回: 不合适, 原因, 不需要重规划 (让上层逻辑决定是否重试生成)

View File

@ -160,13 +160,13 @@ class ReplyGenerator:
def __init__(self, stream_id: str, private_name: str): def __init__(self, stream_id: str, private_name: str):
self.llm = LLMRequest( self.llm = LLMRequest(
model=global_config.llm_PFC_chat, model=global_config.model.pfc_chat,
temperature=global_config.llm_PFC_chat["temp"], temperature=global_config.model.pfc_chat["temp"],
max_tokens=300, # 对于JSON输出这个可能需要适当调整但一般回复短JSON结构也简单 max_tokens=300, # 对于JSON输出这个可能需要适当调整但一般回复短JSON结构也简单
request_type="reply_generation", request_type="reply_generation",
) )
self.personality_info = Individuality.get_instance().get_prompt(x_person=2, level=3) self.personality_info = Individuality.get_instance().get_prompt(x_person=2, level=3)
self.name = global_config.BOT_NICKNAME self.name = global_config.bot.nickname
self.private_name = private_name self.private_name = private_name
self.chat_observer = ChatObserver.get_instance(stream_id, private_name) self.chat_observer = ChatObserver.get_instance(stream_id, private_name)
self.reply_checker = ReplyChecker(stream_id, private_name) self.reply_checker = ReplyChecker(stream_id, private_name)
@ -239,7 +239,7 @@ class ReplyGenerator:
# 我们先做一个合理的假设: “最近聊天记录” 字符串 chat_history_text 是基于 # 我们先做一个合理的假设: “最近聊天记录” 字符串 chat_history_text 是基于
# observation_info.chat_history 的一个有限的尾部片段生成的。 # observation_info.chat_history 的一个有限的尾部片段生成的。
# 假设这个片段的长度由 global_config.pfc_recent_history_display_count 控制默认为20条。 # 假设这个片段的长度由 global_config.pfc.pfc_recent_history_display_count 控制默认为20条。
recent_history_display_count = getattr(global_config, "pfc_recent_history_display_count", 20) recent_history_display_count = getattr(global_config, "pfc_recent_history_display_count", 20)
if observation_info and observation_info.chat_history and len(observation_info.chat_history) > 0: if observation_info and observation_info.chat_history and len(observation_info.chat_history) > 0:

View File

@ -17,7 +17,7 @@ class Waiter:
def __init__(self, stream_id: str, private_name: str): def __init__(self, stream_id: str, private_name: str):
self.chat_observer = ChatObserver.get_instance(stream_id, private_name) self.chat_observer = ChatObserver.get_instance(stream_id, private_name)
self.name = global_config.BOT_NICKNAME self.name = global_config.bot.nickname
self.private_name = private_name self.private_name = private_name
# self.wait_accumulated_time = 0 # 不再需要累加计时 # self.wait_accumulated_time = 0 # 不再需要累加计时

View File

@ -206,11 +206,11 @@ class Individuality:
if not self.personality or not self.identity: if not self.personality or not self.identity:
return "个体特征尚未完全初始化。" return "个体特征尚未完全初始化。"
if global_config.personality_detail_level == 1: if global_config.personality.personality_detail_level == 1:
level = 1 level = 1
elif global_config.personality_detail_level == 2: elif global_config.personality.personality_detail_level == 2:
level = 2 level = 2
elif global_config.personality_detail_level == 3: elif global_config.personality.personality_detail_level == 3:
level = 3 level = 3
else: # level = 0 else: # level = 0
pass pass

View File

@ -42,7 +42,7 @@ class MainSystem:
async def initialize(self): async def initialize(self):
"""初始化系统组件""" """初始化系统组件"""
logger.debug(f"正在唤醒{global_config.BOT_NICKNAME}......") logger.debug(f"正在唤醒{global_config.bot.nickname}......")
# 其他初始化任务 # 其他初始化任务
await asyncio.gather(self._init_components()) await asyncio.gather(self._init_components())
@ -83,10 +83,10 @@ class MainSystem:
# 初始化日程 # 初始化日程
bot_schedule.initialize( bot_schedule.initialize(
name=global_config.BOT_NICKNAME, name=global_config.bot.nickname,
personality=global_config.personality_core, personality=global_config.personality.personality_core,
behavior=global_config.PROMPT_SCHEDULE_GEN, behavior=global_config.schedule.prompt_schedule_gen,
interval=global_config.SCHEDULE_DOING_UPDATE_INTERVAL, interval=global_config.schedule.schedule_doing_update_interval,
) )
asyncio.create_task(bot_schedule.mai_schedule_start()) asyncio.create_task(bot_schedule.mai_schedule_start())
@ -95,7 +95,7 @@ class MainSystem:
asyncio.create_task(chat_manager._auto_save_task()) asyncio.create_task(chat_manager._auto_save_task())
# 使用HippocampusManager初始化海马体 # 使用HippocampusManager初始化海马体
self.hippocampus_manager.initialize(global_config=global_config) self.hippocampus_manager.initialize()
# await asyncio.sleep(0.5) #防止logger输出飞了 # await asyncio.sleep(0.5) #防止logger输出飞了
# 将bot.py中的chat_bot.message_process消息处理函数注册到api.py的消息处理基类中 # 将bot.py中的chat_bot.message_process消息处理函数注册到api.py的消息处理基类中
@ -103,15 +103,15 @@ class MainSystem:
# 初始化个体特征 # 初始化个体特征
self.individuality.initialize( self.individuality.initialize(
bot_nickname=global_config.BOT_NICKNAME, bot_nickname=global_config.bot.nickname,
personality_core=global_config.personality_core, personality_core=global_config.personality.personality_core,
personality_sides=global_config.personality_sides, personality_sides=global_config.personality.personality_sides,
identity_detail=global_config.identity_detail, identity_detail=global_config.identity.identity_detail,
height=global_config.height, height=global_config.identity.height,
weight=global_config.weight, weight=global_config.identity.weight,
age=global_config.age, age=global_config.identity.age,
gender=global_config.gender, gender=global_config.identity.gender,
appearance=global_config.appearance, appearance=global_config.identity.appearance,
) )
logger.success("个体特征初始化成功") logger.success("个体特征初始化成功")
@ -124,7 +124,7 @@ class MainSystem:
logger.success("全局消息管理器启动成功") logger.success("全局消息管理器启动成功")
# 启动心流系统主循环 # 启动心流系统主循环
if not global_config.enable_Legacy_HFC: if not global_config.experimental.enable_Legacy_HFC:
asyncio.create_task(heartflow.heartflow_start_working()) asyncio.create_task(heartflow.heartflow_start_working())
logger.success("心流系统启动成功") logger.success("心流系统启动成功")
else: else:
@ -156,7 +156,7 @@ class MainSystem:
async def build_memory_task(): async def build_memory_task():
"""记忆构建任务""" """记忆构建任务"""
while True: while True:
await asyncio.sleep(global_config.build_memory_interval) await asyncio.sleep(global_config.memory.memory_build_interval)
logger.info("正在进行记忆构建") logger.info("正在进行记忆构建")
await HippocampusManager.get_instance().build_memory() await HippocampusManager.get_instance().build_memory()
@ -164,16 +164,18 @@ class MainSystem:
async def forget_memory_task(): async def forget_memory_task():
"""记忆遗忘任务""" """记忆遗忘任务"""
while True: while True:
await asyncio.sleep(global_config.forget_memory_interval) await asyncio.sleep(global_config.memory.forget_memory_interval)
print("\033[1;32m[记忆遗忘]\033[0m 开始遗忘记忆...") print("\033[1;32m[记忆遗忘]\033[0m 开始遗忘记忆...")
await HippocampusManager.get_instance().forget_memory(percentage=global_config.memory_forget_percentage) await HippocampusManager.get_instance().forget_memory(
percentage=global_config.memory.memory_forget_percentage
)
print("\033[1;32m[记忆遗忘]\033[0m 记忆遗忘完成") print("\033[1;32m[记忆遗忘]\033[0m 记忆遗忘完成")
@staticmethod @staticmethod
async def consolidate_memory_task(): async def consolidate_memory_task():
"""记忆整合任务""" """记忆整合任务"""
while True: while True:
await asyncio.sleep(global_config.consolidate_memory_interval) await asyncio.sleep(global_config.memory.consolidate_memory_interval)
print("\033[1;32m[记忆整合]\033[0m 开始整合记忆...") print("\033[1;32m[记忆整合]\033[0m 开始整合记忆...")
await HippocampusManager.get_instance().consolidate_memory() await HippocampusManager.get_instance().consolidate_memory()
print("\033[1;32m[记忆整合]\033[0m 记忆整合完成") print("\033[1;32m[记忆整合]\033[0m 记忆整合完成")

View File

@ -34,14 +34,14 @@ class MoodUpdateTask(AsyncTask):
def __init__(self): def __init__(self):
super().__init__( super().__init__(
task_name="Mood Update Task", task_name="Mood Update Task",
wait_before_start=global_config.mood_update_interval, wait_before_start=global_config.mood.mood_update_interval,
run_interval=global_config.mood_update_interval, run_interval=global_config.mood.mood_update_interval,
) )
# 从配置文件获取衰减率 # 从配置文件获取衰减率
self.decay_rate_valence: float = 1 - global_config.mood_decay_rate self.decay_rate_valence: float = 1 - global_config.mood.mood_decay_rate
"""愉悦度衰减率""" """愉悦度衰减率"""
self.decay_rate_arousal: float = 1 - global_config.mood_decay_rate self.decay_rate_arousal: float = 1 - global_config.mood.mood_decay_rate
"""唤醒度衰减率""" """唤醒度衰减率"""
self.last_update = time.time() self.last_update = time.time()

View File

@ -154,7 +154,7 @@ class NicknameManager:
if not self.is_enabled: if not self.is_enabled:
logger.info("绰号处理功能已禁用,处理器未启动。") logger.info("绰号处理功能已禁用,处理器未启动。")
return return
if global_config.max_nicknames_in_prompt == 0: # 考虑有神秘的用户输入为0的可能性 if global_config.group_nickname.max_nicknames_in_prompt == 0: # 考虑有神秘的用户输入为0的可能性
logger.error("[错误] 绰号注入数量不合适,绰号处理功能已禁用!") logger.error("[错误] 绰号注入数量不合适,绰号处理功能已禁用!")
return return
@ -210,7 +210,7 @@ class NicknameManager:
if not self.is_enabled: if not self.is_enabled:
return return
if random.random() < global_config.nickname_analysis_probability: if random.random() < global_config.group_nickname.nickname_analysis_probability:
logger.debug("跳过绰号分析:随机概率未命中。") logger.debug("跳过绰号分析:随机概率未命中。")
return return
@ -266,8 +266,8 @@ class NicknameManager:
None, None,
) )
user_name_map[user_id] = ( user_name_map[user_id] = (
latest_nickname or f"{global_config.BOT_NICKNAME}" latest_nickname or f"{global_config.bot.nickname}"
if user_id == global_config.BOT_QQ if user_id == global_config.bot.qq_account
else "未知" else "未知"
) )

View File

@ -44,7 +44,7 @@ class ChangeMoodTool(BaseTool):
_ori_response = ",".join(response_set) _ori_response = ",".join(response_set)
# _stance, emotion = await gpt._get_emotion_tags(ori_response, message_processed_plain_text) # _stance, emotion = await gpt._get_emotion_tags(ori_response, message_processed_plain_text)
emotion = "平静" emotion = "平静"
mood_manager.update_mood_from_emotion(emotion, global_config.mood_intensity_factor) mood_manager.update_mood_from_emotion(emotion, global_config.mood.mood_intensity_factor)
return {"name": "change_mood", "content": f"你的心情刚刚变化了,现在的心情是: {emotion}"} return {"name": "change_mood", "content": f"你的心情刚刚变化了,现在的心情是: {emotion}"}
except Exception as e: except Exception as e:
logger.error(f"心情改变工具执行失败: {str(e)}") logger.error(f"心情改变工具执行失败: {str(e)}")

View File

@ -65,7 +65,7 @@ def register_tool(tool_class: Type[BaseTool]):
if not tool_name: if not tool_name:
raise ValueError(f"工具类 {tool_class.__name__} 没有定义 name 属性") raise ValueError(f"工具类 {tool_class.__name__} 没有定义 name 属性")
if not global_config.rename_person and tool_name == "rename_person": if not global_config.experimental.rename_person and tool_name == "rename_person":
logger.info("改名功能已关闭,改名工具未注册") logger.info("改名功能已关闭,改名工具未注册")
return return

View File

@ -15,7 +15,7 @@ logger = get_logger("tool_use")
class ToolUser: class ToolUser:
def __init__(self): def __init__(self):
self.llm_model_tool = LLMRequest( self.llm_model_tool = LLMRequest(
model=global_config.llm_tool_use, temperature=0.2, max_tokens=1000, request_type="tool_use" model=global_config.model.tool_use, temperature=0.2, max_tokens=1000, request_type="tool_use"
) )
@staticmethod @staticmethod
@ -37,7 +37,7 @@ class ToolUser:
# print(f"intol111111111111111111111111111111111222222222222mid_memory_info{mid_memory_info}") # print(f"intol111111111111111111111111111111111222222222222mid_memory_info{mid_memory_info}")
# 这些信息应该从调用者传入而不是从self获取 # 这些信息应该从调用者传入而不是从self获取
bot_name = global_config.BOT_NICKNAME bot_name = global_config.bot.nickname
prompt = "" prompt = ""
prompt += mid_memory_info prompt += mid_memory_info
prompt += "你正在思考如何回复群里的消息。\n" prompt += "你正在思考如何回复群里的消息。\n"

View File

@ -1,104 +0,0 @@
[inner.version]
describe = "版本号"
important = true
can_edit = false
[bot.qq]
describe = "机器人的QQ号"
important = true
can_edit = true
[bot.nickname]
describe = "机器人的昵称"
important = true
can_edit = true
[bot.alias_names]
describe = "机器人的别名列表,该选项还在调试中,暂时未生效"
important = false
can_edit = true
[groups.talk_allowed]
describe = "可以回复消息的群号码列表"
important = true
can_edit = true
[groups.talk_frequency_down]
describe = "降低回复频率的群号码列表"
important = false
can_edit = true
[groups.ban_user_id]
describe = "禁止回复和读取消息的QQ号列表"
important = false
can_edit = true
[personality.personality_core]
describe = "用一句话或几句话描述人格的核心特点建议20字以内"
important = true
can_edit = true
[personality.personality_sides]
describe = "用一句话或几句话描述人格的一些细节条数任意不能为0该选项还在调试中"
important = false
can_edit = true
[identity.identity_detail]
describe = "身份特点列表条数任意不能为0该选项还在调试中"
important = false
can_edit = true
[identity.age]
describe = "年龄,单位岁"
important = false
can_edit = true
[identity.gender]
describe = "性别"
important = false
can_edit = true
[identity.appearance]
describe = "外貌特征描述,该选项还在调试中,暂时未生效"
important = false
can_edit = true
[platforms.nonebot-qq]
describe = "nonebot-qq适配器提供的链接"
important = true
can_edit = true
[chat.allow_focus_mode]
describe = "是否允许专注聊天状态"
important = false
can_edit = true
[chat.base_normal_chat_num]
describe = "最多允许多少个群进行普通聊天"
important = false
can_edit = true
[chat.base_focused_chat_num]
describe = "最多允许多少个群进行专注聊天"
important = false
can_edit = true
[chat.observation_context_size]
describe = "观察到的最长上下文大小建议15太短太长都会导致脑袋尖尖"
important = false
can_edit = true
[chat.message_buffer]
describe = "启用消息缓冲器,启用此项以解决消息的拆分问题,但会使麦麦的回复延迟"
important = false
can_edit = true
[chat.ban_words]
describe = "需要过滤的消息列表"
important = false
can_edit = true
[chat.ban_msgs_regex]
describe = "需要过滤的消息原始消息匹配的正则表达式匹配到的消息将被过滤支持CQ码"
important = false
can_edit = true

View File

@ -1,18 +1,10 @@
[inner] [inner]
version = "1.7.1" version = "2.0.1"
#----以下是给开发人员阅读的,如果你只是部署了麦麦,不需要阅读---- #----以下是给开发人员阅读的,如果你只是部署了麦麦,不需要阅读----
#如果你想要修改配置文件请在修改后将version的值进行变更 #如果你想要修改配置文件请在修改后将version的值进行变更
#如果新增项目请在BotConfig类下新增相应的变量 #如果新增项目请阅读src/config/official_configs.py中的说明
#1.如果你修改的是[]层级项目,例如你新增了 [memory],那么请在config.py的 load_config函数中的include_configs字典中新增"内容":{ #
#"func":memory,
#"support":">=0.0.0", #新的版本号
#"necessary":False #是否必须
#}
#2.如果你修改的是[]下的项目,例如你新增了[memory]下的 memory_ban_words ,那么请在config.py的 load_config函数中的 memory函数下新增版本判断:
# if config.INNER_VERSION in SpecifierSet(">=0.0.2"):
# config.memory_ban_words = set(memory_config.get("memory_ban_words", []))
# 版本格式:主版本号.次版本号.修订号,版本号递增规则如下: # 版本格式:主版本号.次版本号.修订号,版本号递增规则如下:
# 主版本号:当你做了不兼容的 API 修改, # 主版本号:当你做了不兼容的 API 修改,
# 次版本号:当你做了向下兼容的功能性新增, # 次版本号:当你做了向下兼容的功能性新增,
@ -21,11 +13,11 @@ version = "1.7.1"
#----以上是给开发人员阅读的,如果你只是部署了麦麦,不需要阅读---- #----以上是给开发人员阅读的,如果你只是部署了麦麦,不需要阅读----
[bot] [bot]
qq = 1145141919810 qq_account = 1145141919810
nickname = "麦麦" nickname = "麦麦"
alias_names = ["麦叠", "牢麦"] #该选项还在调试中,暂时未生效 alias_names = ["麦叠", "牢麦"] #该选项还在调试中,暂时未生效
[groups] [chat_target]
talk_allowed = [ talk_allowed = [
123, 123,
123, 123,
@ -54,10 +46,13 @@ identity_detail = [
"身份特点", "身份特点",
"身份特点", "身份特点",
]# 条数任意不能为0, 该选项还在调试中 ]# 条数任意不能为0, 该选项还在调试中
#外貌特征 #外貌特征
age = 20 # 年龄 单位岁 age = 18 # 年龄 单位岁
gender = "男" # 性别 gender = "女" # 性别
appearance = "用几句话描述外貌特征" # 外貌特征 该选项还在调试中,暂时未生效 height = "170" # 身高单位cm
weight = "50" # 体重单位kg
appearance = "用一句或几句话描述外貌特征" # 外貌特征 该选项还在调试中,暂时未生效
[schedule] [schedule]
enable_schedule_gen = true # 是否启用日程表 enable_schedule_gen = true # 是否启用日程表
@ -95,11 +90,10 @@ ban_msgs_regex = [
[normal_chat] #普通聊天 [normal_chat] #普通聊天
#一般回复参数 #一般回复参数
model_reasoning_probability = 0.7 # 麦麦回答时选择推理模型 模型的概率 reasoning_model_probability = 0.3 # 麦麦回答时选择推理模型的概率与之相对的普通模型的概率为1 - reasoning_model_probability
model_normal_probability = 0.3 # 麦麦回答时选择一般模型 模型的概率
emoji_chance = 0.2 # 麦麦一般回复时使用表情包的概率设置为1让麦麦自己决定发不发 emoji_chance = 0.2 # 麦麦一般回复时使用表情包的概率设置为1让麦麦自己决定发不发
thinking_timeout = 100 # 麦麦最长思考时间超过这个时间的思考会放弃往往是api反应太慢 thinking_timeout = 120 # 麦麦最长思考时间超过这个时间的思考会放弃往往是api反应太慢
willing_mode = "classical" # 回复意愿模式 —— 经典模式classicalmxp模式mxp自定义模式custom需要你自己实现 willing_mode = "classical" # 回复意愿模式 —— 经典模式classicalmxp模式mxp自定义模式custom需要你自己实现
response_willing_amplifier = 1 # 麦麦回复意愿放大系数一般为1 response_willing_amplifier = 1 # 麦麦回复意愿放大系数一般为1
@ -110,8 +104,8 @@ mentioned_bot_inevitable_reply = false # 提及 bot 必然回复
at_bot_inevitable_reply = false # @bot 必然回复 at_bot_inevitable_reply = false # @bot 必然回复
[focus_chat] #专注聊天 [focus_chat] #专注聊天
reply_trigger_threshold = 3.6 # 专注聊天触发阈值,越低越容易进入专注聊天 reply_trigger_threshold = 3.0 # 专注聊天触发阈值,越低越容易进入专注聊天
default_decay_rate_per_second = 0.95 # 默认衰减率,越大衰减越快,越高越难进入专注聊天 default_decay_rate_per_second = 0.98 # 默认衰减率,越大衰减越快,越高越难进入专注聊天
consecutive_no_reply_threshold = 3 # 连续不回复的阈值,越低越容易结束专注聊天 consecutive_no_reply_threshold = 3 # 连续不回复的阈值,越低越容易结束专注聊天
# 以下选项暂时无效 # 以下选项暂时无效
@ -120,14 +114,14 @@ compress_length_limit = 5 #最多压缩份数,超过该数值的压缩上下
[emoji] [emoji]
max_emoji_num = 40 # 表情包最大数量 max_reg_num = 40 # 表情包最大注册数量
max_reach_deletion = true # 开启则在达到最大数量时删除表情包,关闭则达到最大数量时不删除,只是不会继续收集表情包 do_replace = true # 开启则在达到最大数量时删除(替换)表情包,关闭则达到最大数量时不会继续收集表情包
check_interval = 10 # 检查表情包(注册,破损,删除)的时间间隔(分钟) check_interval = 120 # 检查表情包(注册,破损,删除)的时间间隔(分钟)
save_pic = false # 是否保存图片 save_pic = false # 是否保存图片
save_emoji = false # 是否保存表情包 cache_emoji = true # 是否缓存表情包
steal_emoji = true # 是否偷取表情包,让麦麦可以发送她保存的这些表情包 steal_emoji = true # 是否偷取表情包,让麦麦可以发送她保存的这些表情包
enable_check = false # 是否启用表情包过滤,只有符合该要求的表情包才会被保存 content_filtration = false # 是否启用表情包过滤,只有符合该要求的表情包才会被保存
check_prompt = "符合公序良俗" # 表情包过滤要求,只有符合该要求的表情包才会被保存 filtration_prompt = "符合公序良俗" # 表情包过滤要求,只有符合该要求的表情包才会被保存
[group_nickname] [group_nickname]
enable_nickname_mapping = false # 绰号映射功能总开关(默认关闭,建议关闭) enable_nickname_mapping = false # 绰号映射功能总开关(默认关闭,建议关闭)
@ -139,10 +133,10 @@ nickname_analysis_history_limit = 30 # 绰号处理可见最大上下文
nickname_analysis_probability = 0.1 # 绰号随机概率命中,该值越大,绰号分析越频繁 nickname_analysis_probability = 0.1 # 绰号随机概率命中,该值越大,绰号分析越频繁
[memory] [memory]
build_memory_interval = 2000 # 记忆构建间隔 单位秒 间隔越低,麦麦学习越多,但是冗余信息也会增多 memory_build_interval = 2000 # 记忆构建间隔 单位秒 间隔越低,麦麦学习越多,但是冗余信息也会增多
build_memory_distribution = [6.0,3.0,0.6,32.0,12.0,0.4] # 记忆构建分布参数分布1均值标准差权重分布2均值标准差权重 memory_build_distribution = [6.0, 3.0, 0.6, 32.0, 12.0, 0.4] # 记忆构建分布参数分布1均值标准差权重分布2均值标准差权重
build_memory_sample_num = 8 # 采样数量,数值越高记忆采样次数越多 memory_build_sample_num = 8 # 采样数量,数值越高记忆采样次数越多
build_memory_sample_length = 40 # 采样长度,数值越高一段记忆内容越丰富 memory_build_sample_length = 40 # 采样长度,数值越高一段记忆内容越丰富
memory_compress_rate = 0.1 # 记忆压缩率 控制记忆精简程度 建议保持默认,调高可以获得更多信息,但是冗余信息也会增多 memory_compress_rate = 0.1 # 记忆压缩率 控制记忆精简程度 建议保持默认,调高可以获得更多信息,但是冗余信息也会增多
forget_memory_interval = 1000 # 记忆遗忘间隔 单位秒 间隔越低,麦麦遗忘越频繁,记忆更精简,但更难学习 forget_memory_interval = 1000 # 记忆遗忘间隔 单位秒 间隔越低,麦麦遗忘越频繁,记忆更精简,但更难学习
@ -154,9 +148,7 @@ consolidation_similarity_threshold = 0.7 # 相似度阈值
consolidation_check_percentage = 0.01 # 检查节点比例 consolidation_check_percentage = 0.01 # 检查节点比例
#不希望记忆的词,已经记忆的不会受到影响 #不希望记忆的词,已经记忆的不会受到影响
memory_ban_words = [ memory_ban_words = [ "表情包", "图片", "回复", "聊天记录" ]
# "403","张三"
]
long_message_auto_truncate = true # HFC 模式过长消息自动截断,防止他人 prompt 恶意注入减少token消耗但可能损失图片/长文信息,按需选择状态(默认开启) long_message_auto_truncate = true # HFC 模式过长消息自动截断,防止他人 prompt 恶意注入减少token消耗但可能损失图片/长文信息,按需选择状态(默认开启)
@ -165,40 +157,38 @@ mood_update_interval = 1.0 # 情绪更新间隔 单位秒
mood_decay_rate = 0.95 # 情绪衰减率 mood_decay_rate = 0.95 # 情绪衰减率
mood_intensity_factor = 1.0 # 情绪强度因子 mood_intensity_factor = 1.0 # 情绪强度因子
[keywords_reaction] # 针对某个关键词作出反应 [keyword_reaction] # 针对某个关键词作出反应
enable = true # 关键词反应功能的总开关 enable = true # 关键词反应功能的总开关
[[keywords_reaction.rules]] # 如果想要新增多个关键词直接复制本条修改keywords和reaction即可 [[keyword_reaction.rules]] # 如果想要新增多个关键词直接复制本条修改keywords和reaction即可
enable = true # 是否启用此条为了人类在未来AI战争能更好地识别AIbushi默认开启 enable = true # 是否启用此条为了人类在未来AI战争能更好地识别AIbushi默认开启
keywords = ["人机", "bot", "机器", "入机", "robot", "机器人","ai","AI"] # 会触发反应的关键词 keywords = ["人机", "bot", "机器", "入机", "robot", "机器人","ai","AI"] # 会触发反应的关键词
reaction = "有人说你是人机或者机器人,否定这一事实,攻击或反问他。注意,只有认为你是机器人才回复,否则不要否认" # 触发之后添加的提示词 reaction = "有人说你是人机或者机器人,否定这一事实,攻击或反问他。注意,只有认为你是机器人才回复,否则不要否认" # 触发之后添加的提示词
[[keywords_reaction.rules]] # 就像这样复制 [[keyword_reaction.rules]] # 就像这样复制
enable = false # 仅作示例,不会触发 enable = false # 仅作示例,不会触发
keywords = ["测试关键词回复","test",""] keywords = ["测试关键词回复","test",""]
reaction = "回答“测试成功”" # 修复错误的引号 reaction = "回答“测试成功”" # 修复错误的引号
[[keywords_reaction.rules]] # 使用正则表达式匹配句式 [[keyword_reaction.rules]] # 使用正则表达式匹配句式
enable = false # 仅作示例,不会触发 enable = false # 仅作示例,不会触发
regex = ["^(?P<n>\\S{1,20})是这样的$"] # 将匹配到的词汇命名为n反应中对应的[n]会被替换为匹配到的内容,若不了解正则表达式请勿编写 regex = ["^(?P<n>\\S{1,20})是这样的$"] # 将匹配到的词汇命名为n反应中对应的[n]会被替换为匹配到的内容,若不了解正则表达式请勿编写
reaction = "请按照以下模板造句:[n]是这样的xx只要xx就可以可是[n]要考虑的事情就很多了比如什么时候xx什么时候xx什么时候xx。请自由发挥替换xx部分只需保持句式结构同时表达一种将[n]过度重视的反讽意味)" reaction = "请按照以下模板造句:[n]是这样的xx只要xx就可以可是[n]要考虑的事情就很多了比如什么时候xx什么时候xx什么时候xx。请自由发挥替换xx部分只需保持句式结构同时表达一种将[n]过度重视的反讽意味)"
[chinese_typo] [chinese_typo]
enable = true # 是否启用中文错别字生成器 enable = true # 是否启用中文错别字生成器
error_rate=0.001 # 单字替换概率 error_rate=0.01 # 单字替换概率
min_freq=9 # 最小字频阈值 min_freq=9 # 最小字频阈值
tone_error_rate=0.1 # 声调错误概率 tone_error_rate=0.1 # 声调错误概率
word_replace_rate=0.006 # 整词替换概率 word_replace_rate=0.006 # 整词替换概率
[response_splitter] [response_splitter]
enable_response_splitter = true # 是否启用回复分割器 enable = true # 是否启用回复分割器
response_max_length = 256 # 回复允许的最大长度 max_length = 256 # 回复允许的最大长度
response_max_sentence_num = 4 # 回复允许的最大句子数 max_sentence_num = 4 # 回复允许的最大句子数
enable_kaomoji_protection = false # 是否启用颜文字保护 enable_kaomoji_protection = false # 是否启用颜文字保护
model_max_output_length = 256 # 模型单次返回的最大token数 [telemetry] #发送统计信息,主要是看全球有多少只麦麦
[remote] #发送统计信息,主要是看全球有多少只麦麦
enable = true enable = true
[experimental] #实验性功能 [experimental] #实验性功能
@ -249,14 +239,17 @@ max_cooldown = 18000 # 最长冷却时间5小时 (18000秒)
# stream = <true|false> : 用于指定模型是否是使用流式输出 # stream = <true|false> : 用于指定模型是否是使用流式输出
# 如果不指定,则该项是 False # 如果不指定,则该项是 False
[model]
model_max_output_length = 800 # 模型单次返回的最大token数
#这个模型必须是推理模型 #这个模型必须是推理模型
[model.llm_reasoning] # 一般聊天模式的推理回复模型 [model.reasoning] # 一般聊天模式的推理回复模型
name = "Pro/deepseek-ai/DeepSeek-R1" name = "Pro/deepseek-ai/DeepSeek-R1"
provider = "SILICONFLOW" provider = "SILICONFLOW"
pri_in = 1.0 #模型的输入价格(非必填,可以记录消耗) pri_in = 1.0 #模型的输入价格(非必填,可以记录消耗)
pri_out = 4.0 #模型的输出价格(非必填,可以记录消耗) pri_out = 4.0 #模型的输出价格(非必填,可以记录消耗)
[model.llm_normal] #V3 回复模型 专注和一般聊天模式共用的回复模型 [model.normal] #V3 回复模型 专注和一般聊天模式共用的回复模型
name = "Pro/deepseek-ai/DeepSeek-V3" name = "Pro/deepseek-ai/DeepSeek-V3"
provider = "SILICONFLOW" provider = "SILICONFLOW"
pri_in = 2 #模型的输入价格(非必填,可以记录消耗) pri_in = 2 #模型的输入价格(非必填,可以记录消耗)
@ -264,13 +257,13 @@ pri_out = 8 #模型的输出价格(非必填,可以记录消耗)
#默认temp 0.2 如果你使用的是老V3或者其他模型请自己修改temp参数 #默认temp 0.2 如果你使用的是老V3或者其他模型请自己修改temp参数
temp = 0.2 #模型的温度新V3建议0.1-0.3 temp = 0.2 #模型的温度新V3建议0.1-0.3
[model.llm_topic_judge] #主题判断模型建议使用qwen2.5 7b [model.topic_judge] #主题判断模型建议使用qwen2.5 7b
name = "Pro/Qwen/Qwen2.5-7B-Instruct" name = "Pro/Qwen/Qwen2.5-7B-Instruct"
provider = "SILICONFLOW" provider = "SILICONFLOW"
pri_in = 0.35 pri_in = 0.35
pri_out = 0.35 pri_out = 0.35
[model.llm_summary] #概括模型建议使用qwen2.5 32b 及以上 [model.summary] #概括模型建议使用qwen2.5 32b 及以上
name = "Qwen/Qwen2.5-32B-Instruct" name = "Qwen/Qwen2.5-32B-Instruct"
provider = "SILICONFLOW" provider = "SILICONFLOW"
pri_in = 1.26 pri_in = 1.26
@ -282,27 +275,27 @@ provider = "SILICONFLOW"
pri_in = 0.35 pri_in = 0.35
pri_out = 0.35 pri_out = 0.35
[model.llm_heartflow] # 用于控制麦麦是否参与聊天的模型 [model.heartflow] # 用于控制麦麦是否参与聊天的模型
name = "Qwen/Qwen2.5-32B-Instruct" name = "Qwen/Qwen2.5-32B-Instruct"
provider = "SILICONFLOW" provider = "SILICONFLOW"
pri_in = 1.26 pri_in = 1.26
pri_out = 1.26 pri_out = 1.26
[model.llm_observation] #观察模型,压缩聊天内容,建议用免费的 [model.observation] #观察模型,压缩聊天内容,建议用免费的
# name = "Pro/Qwen/Qwen2.5-7B-Instruct" # name = "Pro/Qwen/Qwen2.5-7B-Instruct"
name = "Qwen/Qwen2.5-7B-Instruct" name = "Qwen/Qwen2.5-7B-Instruct"
provider = "SILICONFLOW" provider = "SILICONFLOW"
pri_in = 0 pri_in = 0
pri_out = 0 pri_out = 0
[model.llm_sub_heartflow] #心流:认真水群时,生成麦麦的内心想法,必须使用具有工具调用能力的模型 [model.sub_heartflow] #心流:认真水群时,生成麦麦的内心想法,必须使用具有工具调用能力的模型
name = "Pro/deepseek-ai/DeepSeek-V3" name = "Pro/deepseek-ai/DeepSeek-V3"
provider = "SILICONFLOW" provider = "SILICONFLOW"
pri_in = 2 pri_in = 2
pri_out = 8 pri_out = 8
temp = 0.3 #模型的温度新V3建议0.1-0.3 temp = 0.3 #模型的温度新V3建议0.1-0.3
[model.llm_plan] #决策:认真水群时,负责决定麦麦该做什么 [model.plan] #决策:认真水群时,负责决定麦麦该做什么
name = "Pro/deepseek-ai/DeepSeek-V3" name = "Pro/deepseek-ai/DeepSeek-V3"
provider = "SILICONFLOW" provider = "SILICONFLOW"
pri_in = 2 pri_in = 2
@ -320,7 +313,7 @@ pri_out = 0
#私聊PFC需要开启PFC功能默认三个模型均为硅基流动v3如果需要支持多人同时私聊或频繁调用建议把其中的一个或两个换成官方v3或其它模型以免撞到429 #私聊PFC需要开启PFC功能默认三个模型均为硅基流动v3如果需要支持多人同时私聊或频繁调用建议把其中的一个或两个换成官方v3或其它模型以免撞到429
#PFC决策模型 #PFC决策模型
[model.llm_PFC_action_planner] [model.pfc_action_planner]
name = "Pro/deepseek-ai/DeepSeek-V3" name = "Pro/deepseek-ai/DeepSeek-V3"
provider = "SILICONFLOW" provider = "SILICONFLOW"
temp = 0.3 temp = 0.3
@ -328,7 +321,7 @@ pri_in = 2
pri_out = 8 pri_out = 8
#PFC聊天模型 #PFC聊天模型
[model.llm_PFC_chat] [model.pfc_chat]
name = "Pro/deepseek-ai/DeepSeek-V3" name = "Pro/deepseek-ai/DeepSeek-V3"
provider = "SILICONFLOW" provider = "SILICONFLOW"
temp = 0.3 temp = 0.3
@ -375,7 +368,7 @@ pri_out = 8
#以下模型暂时没有使用!! #以下模型暂时没有使用!!
#以下模型暂时没有使用!! #以下模型暂时没有使用!!
[model.llm_tool_use] #工具调用模型需要使用支持工具调用的模型建议使用qwen2.5 32b [model.tool_use] #工具调用模型需要使用支持工具调用的模型建议使用qwen2.5 32b
name = "Qwen/Qwen2.5-32B-Instruct" name = "Qwen/Qwen2.5-32B-Instruct"
provider = "SILICONFLOW" provider = "SILICONFLOW"
pri_in = 1.26 pri_in = 1.26

View File

@ -0,0 +1,7 @@
from src.config.config import global_config
class TestConfig:
def test_load(self):
config = global_config
print(config)