From 71a85667e36cf5857390191bfa439e268b6b15f1 Mon Sep 17 00:00:00 2001 From: SengokuCola <1026294844@qq.com> Date: Wed, 31 Dec 2025 00:43:08 +0800 Subject: [PATCH] =?UTF-8?q?feat=EF=BC=9A=E5=8F=AF=E9=80=89=EF=BC=8C?= =?UTF-8?q?=E7=94=B1llm=E6=89=A7=E8=A1=8C=E5=9B=9E=E5=A4=8D?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/chat/heart_flow/heartFC_chat.py | 38 +++++++++++++++++++++++------ src/chat/planner_actions/planner.py | 21 +++++++++++++--- src/config/official_configs.py | 3 +++ src/dream/dream_generator.py | 17 +++---------- template/bot_config_template.toml | 4 ++- 5 files changed, 57 insertions(+), 26 deletions(-) diff --git a/src/chat/heart_flow/heartFC_chat.py b/src/chat/heart_flow/heartFC_chat.py index 6b53fc04..9de93ef2 100644 --- a/src/chat/heart_flow/heartFC_chat.py +++ b/src/chat/heart_flow/heartFC_chat.py @@ -244,12 +244,14 @@ class HeartFChatting: thinking_id, actions, selected_expressions: Optional[List[int]] = None, + quote_message: Optional[bool] = None, ) -> Tuple[Dict[str, Any], str, Dict[str, float]]: with Timer("回复发送", cycle_timers): reply_text = await self._send_response( reply_set=response_set, message_data=action_message, selected_expressions=selected_expressions, + quote_message=quote_message, ) # 获取 platform,如果不存在则从 chat_stream 获取,如果还是 None 则使用默认值 @@ -526,15 +528,22 @@ class HeartFChatting: reply_set: "ReplySetModel", message_data: "DatabaseMessages", selected_expressions: Optional[List[int]] = None, + quote_message: Optional[bool] = None, ) -> str: - new_message_count = message_api.count_new_messages( - chat_id=self.chat_stream.stream_id, start_time=self.last_read_time, end_time=time.time() - ) - - need_reply = new_message_count >= random.randint(2, 3) - - if need_reply: - logger.info(f"{self.log_prefix} 从思考到回复,共有{new_message_count}条新消息,使用引用回复") + # 根据 llm_quote 配置决定是否使用 quote_message 参数 + if global_config.chat.llm_quote and quote_message is not None: + # 如果配置为 true,使用 llm_quote 参数决定是否引用回复 + need_reply = quote_message + if need_reply: + logger.info(f"{self.log_prefix} LLM 决定使用引用回复") + else: + # 如果配置为 false,使用原来的模式 + new_message_count = message_api.count_new_messages( + chat_id=self.chat_stream.stream_id, start_time=self.last_read_time, end_time=time.time() + ) + need_reply = new_message_count >= random.randint(2, 3) + if need_reply: + logger.info(f"{self.log_prefix} 从思考到回复,共有{new_message_count}条新消息,使用引用回复") reply_text = "" first_replied = False @@ -640,6 +649,7 @@ class HeartFChatting: # 从 Planner 的 action_data 中提取未知词语列表(仅在 reply 时使用) unknown_words = None + quote_message = None if isinstance(action_planner_info.action_data, dict): uw = action_planner_info.action_data.get("unknown_words") if isinstance(uw, list): @@ -651,6 +661,17 @@ class HeartFChatting: cleaned_uw.append(s) if cleaned_uw: unknown_words = cleaned_uw + + # 从 Planner 的 action_data 中提取 quote_message 参数 + qm = action_planner_info.action_data.get("quote_message") + if qm is not None: + # 支持多种格式:true/false, "true"/"false", 1/0 + if isinstance(qm, bool): + quote_message = qm + elif isinstance(qm, str): + quote_message = qm.lower() in ("true", "1", "yes") + elif isinstance(qm, (int, float)): + quote_message = bool(qm) success, llm_response = await generator_api.generate_reply( chat_stream=self.chat_stream, @@ -682,6 +703,7 @@ class HeartFChatting: thinking_id=thinking_id, actions=chosen_action_plan_infos, selected_expressions=selected_expressions, + quote_message=quote_message, ) self.last_active_time = time.time() return { diff --git a/src/chat/planner_actions/planner.py b/src/chat/planner_actions/planner.py index c6724fdf..92ff1d79 100644 --- a/src/chat/planner_actions/planner.py +++ b/src/chat/planner_actions/planner.py @@ -522,19 +522,32 @@ class ActionPlanner: # 根据 think_mode 配置决定 reply action 的示例 JSON # 在 JSON 中直接作为 action 参数携带 unknown_words 和 question if global_config.chat.think_mode == "classic": - reply_action_example = ( + reply_action_example = "" + if global_config.chat.llm_quote: + reply_action_example += "5.如果要明确回复消息,使用quote,如果消息不多不需要明确回复,设置quote为false\n" + reply_action_example += ( '{{"action":"reply", "target_message_id":"消息id(m+数字)", ' '"unknown_words":["词语1","词语2"], ' - '"question":"需要查询的问题"}' + '"question":"需要查询的问题"' ) + if global_config.chat.llm_quote: + reply_action_example += ', "quote_message":"如果需要引用该message,设置为true"' + reply_action_example += "}" else: reply_action_example = ( "5.think_level表示思考深度,0表示该回复不需要思考和回忆,1表示该回复需要进行回忆和思考\n" - + '{{"action":"reply", "think_level":数值等级(0或1), ' + ) + if global_config.chat.llm_quote: + reply_action_example += "6.如果要明确回复消息,使用quote,如果消息不多不需要明确回复,设置quote为false\n" + reply_action_example += ( + '{{"action":"reply", "think_level":数值等级(0或1), ' '"target_message_id":"消息id(m+数字)", ' '"unknown_words":["词语1","词语2"], ' - '"question":"需要查询的问题"}' + '"question":"需要查询的问题"' ) + if global_config.chat.llm_quote: + reply_action_example += ', "quote_message":"如果需要引用该message,设置为true"' + reply_action_example += "}" planner_prompt_template = await global_prompt_manager.get_prompt_async("planner_prompt") prompt = planner_prompt_template.format( diff --git a/src/config/official_configs.py b/src/config/official_configs.py index 514d2639..9816fd1e 100644 --- a/src/config/official_configs.py +++ b/src/config/official_configs.py @@ -125,6 +125,9 @@ class ChatConfig(ConfigBase): plan_reply_log_max_per_chat: int = 1024 """每个聊天流最大保存的Plan/Reply日志数量,超过此数量时会自动删除最老的日志""" + llm_quote: bool = False + """是否在 reply action 中启用 quote 参数,启用后 LLM 可以控制是否引用消息""" + def _parse_stream_config_to_chat_id(self, stream_config_str: str) -> Optional[str]: """与 ChatStream.get_stream_id 一致地从 "platform:id:type" 生成 chat_id。""" try: diff --git a/src/dream/dream_generator.py b/src/dream/dream_generator.py index dd830ed0..edbc3160 100644 --- a/src/dream/dream_generator.py +++ b/src/dream/dream_generator.py @@ -44,18 +44,6 @@ def get_random_dream_styles(count: int = 2) -> List[str]: """从梦境风格列表中随机选择指定数量的风格""" return random.sample(DREAM_STYLES, min(count, len(DREAM_STYLES))) - -def get_dream_summary_model() -> LLMRequest: - """获取用于生成梦境总结的 utils 模型实例""" - global _dream_summary_model - if _dream_summary_model is None: - _dream_summary_model = LLMRequest( - model_set=model_config.model_task_config.replyer, - request_type="dream.summary", - ) - return _dream_summary_model - - def init_dream_summary_prompt() -> None: """初始化梦境总结的提示词""" Prompt( @@ -186,7 +174,10 @@ async def generate_dream_summary( ) # 调用 utils 模型生成梦境 - summary_model = get_dream_summary_model() + summary_model = LLMRequest( + model_set=model_config.model_task_config.replyer, + request_type="dream.summary", + ) dream_content, (reasoning, model_name, _) = await summary_model.generate_response_async( dream_prompt, temperature=0.8, diff --git a/template/bot_config_template.toml b/template/bot_config_template.toml index d84ae494..3b21a432 100644 --- a/template/bot_config_template.toml +++ b/template/bot_config_template.toml @@ -1,5 +1,5 @@ [inner] -version = "7.3.2" +version = "7.3.3" #----以下是给开发人员阅读的,如果你只是部署了麦麦,不需要阅读---- # 如果你想要修改配置文件,请递增version的值 @@ -118,6 +118,8 @@ think_mode = "dynamic" # 思考模式,可选:classic(默认浅度思考和 plan_reply_log_max_per_chat = 1024 # 每个聊天保存最大的Plan/Reply日志数量,超过此数量时会自动删除最老的日志 +llm_quote = false # 是否由llm执行引用 + enable_talk_value_rules = true # 是否启用动态发言频率规则 # 动态发言频率规则:按时段/按chat_id调整 talk_value(优先匹配具体chat,再匹配全局)