From 1a9bee542e5623add11ac48b6b396d3bdff99ebb Mon Sep 17 00:00:00 2001 From: SengokuCola <1026294844@qq.com> Date: Fri, 22 Aug 2025 17:22:33 +0800 Subject: [PATCH 1/5] =?UTF-8?q?ref=E5=88=86=E7=A6=BBplannerprompt=E4=B8=8E?= =?UTF-8?q?=E4=BA=BA=E6=A0=BC=E8=A7=A3=E8=80=A6?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../heart_flow/heartflow_message_processor.py | 11 +++++++---- src/chat/planner_actions/planner.py | 18 ++++++++++-------- src/chat/replyer/default_generator.py | 2 +- 3 files changed, 18 insertions(+), 13 deletions(-) diff --git a/src/chat/heart_flow/heartflow_message_processor.py b/src/chat/heart_flow/heartflow_message_processor.py index 8d0f4426..dc953102 100644 --- a/src/chat/heart_flow/heartflow_message_processor.py +++ b/src/chat/heart_flow/heartflow_message_processor.py @@ -78,8 +78,12 @@ async def _calculate_interest(message: MessageRecv) -> Tuple[float, bool, list[s interested_rate += base_interest if is_mentioned: - interest_increase_on_mention = 1 + interest_increase_on_mention = 2 interested_rate += interest_increase_on_mention + + + message.interest_value = interested_rate + message.is_mentioned = is_mentioned return interested_rate, is_mentioned, keywords @@ -110,9 +114,8 @@ class HeartFCMessageReceiver: chat = message.chat_stream # 2. 兴趣度计算与更新 - interested_rate, is_mentioned, keywords = await _calculate_interest(message) - message.interest_value = interested_rate - message.is_mentioned = is_mentioned + interested_rate, keywords = await _calculate_interest(message) + await self.storage.store_message(message, chat) diff --git a/src/chat/planner_actions/planner.py b/src/chat/planner_actions/planner.py index 2cb2a469..6759c73a 100644 --- a/src/chat/planner_actions/planner.py +++ b/src/chat/planner_actions/planner.py @@ -33,7 +33,7 @@ def init_prompt(): Prompt( """ {time_block} -{identity_block} +{name_block} 你现在需要根据聊天内容,选择的合适的action来参与聊天。 请你根据以下行事风格来决定action: {plan_style} @@ -298,7 +298,7 @@ class ActionPlanner: actions_before_now = get_actions_by_timestamp_with_chat( chat_id=self.chat_id, - timestamp_start=time.time() - 3600, + timestamp_start=time.time() - 600, timestamp_end=time.time(), limit=5, ) @@ -306,8 +306,12 @@ class ActionPlanner: actions_before_now_block = build_readable_actions( actions=actions_before_now, ) + + if actions_before_now: + actions_before_now_block = f"你刚刚选择并执行过的action是:\n{actions_before_now_block}" + else: + actions_before_now_block = "" - actions_before_now_block = f"你刚刚选择并执行过的action是:\n{actions_before_now_block}" if refresh_time: self.last_obs_time_mark = time.time() @@ -322,8 +326,7 @@ class ActionPlanner: 动作:no_action 动作描述:不进行动作,等待合适的时机 - 当你刚刚发送了消息,没有人回复时,选择no_action -- 如果有别的动作(非回复)满足条件,可以不用no_action -- 当你一次发送了太多消息,为了避免打扰聊天节奏,选择no_action +- 当你一次发送了太多消息,为了避免过于烦人,可以不回复 { "action": "no_action", "reason":"不动作的原因" @@ -378,8 +381,7 @@ class ActionPlanner: bot_nickname = f",也有人叫你{','.join(global_config.bot.alias_names)}" else: bot_nickname = "" - bot_core_personality = global_config.personality.personality_core - identity_block = f"你的名字是{bot_name}{bot_nickname},你{bot_core_personality}:" + name_block = f"你的名字是{bot_name}{bot_nickname},请注意哪些是你自己的发言。" planner_prompt_template = await global_prompt_manager.get_prompt_async("planner_prompt") prompt = planner_prompt_template.format( @@ -391,7 +393,7 @@ class ActionPlanner: mentioned_bonus=mentioned_bonus, action_options_text=action_options_block, moderation_prompt=moderation_prompt_block, - identity_block=identity_block, + name_block=name_block, plan_style=global_config.personality.plan_style, ) return prompt, message_id_list diff --git a/src/chat/replyer/default_generator.py b/src/chat/replyer/default_generator.py index 59340914..c8a78aee 100644 --- a/src/chat/replyer/default_generator.py +++ b/src/chat/replyer/default_generator.py @@ -1015,7 +1015,7 @@ class DefaultReplyer: async def llm_generate_content(self, prompt: str): with Timer("LLM生成", {}): # 内部计时器,可选保留 # 直接使用已初始化的模型实例 - logger.info(f"使用模型集生成回复: {self.express_model.model_for_task}") + logger.info(f"使用模型集生成回复: {', '.join(map(str, self.express_model.model_for_task.model_list))}") if global_config.debug.show_prompt: logger.info(f"\n{prompt}\n") From 1ddfa47e6803b618153be843f851515dd6339c7d Mon Sep 17 00:00:00 2001 From: SengokuCola <1026294844@qq.com> Date: Fri, 22 Aug 2025 17:29:50 +0800 Subject: [PATCH 2/5] =?UTF-8?q?feat=EF=BC=9A=E5=8F=AF=E4=BB=A5=E9=80=89?= =?UTF-8?q?=E6=8B=A9=E6=BF=80=E6=B4=BB=E5=80=BC=E8=AE=A1=E7=AE=97=E6=A8=A1?= =?UTF-8?q?=E5=BC=8F=EF=BC=8C=E9=BB=98=E8=AE=A4=E5=BF=AB=E9=80=9F?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/chat/heart_flow/heartflow_message_processor.py | 4 ++-- src/config/official_configs.py | 3 +++ template/bot_config_template.toml | 2 ++ 3 files changed, 7 insertions(+), 2 deletions(-) diff --git a/src/chat/heart_flow/heartflow_message_processor.py b/src/chat/heart_flow/heartflow_message_processor.py index dc953102..ab6f6613 100644 --- a/src/chat/heart_flow/heartflow_message_processor.py +++ b/src/chat/heart_flow/heartflow_message_processor.py @@ -38,7 +38,7 @@ async def _calculate_interest(message: MessageRecv) -> Tuple[float, bool, list[s interested_rate, keywords,keywords_lite = await hippocampus_manager.get_activate_from_text( message.processed_plain_text, max_depth= 4, - fast_retrieval=False, + fast_retrieval=global_config.chat.interest_rate_mode == "fast", ) message.key_words = keywords message.key_words_lite = keywords_lite @@ -85,7 +85,7 @@ async def _calculate_interest(message: MessageRecv) -> Tuple[float, bool, list[s message.interest_value = interested_rate message.is_mentioned = is_mentioned - return interested_rate, is_mentioned, keywords + return interested_rate, keywords class HeartFCMessageReceiver: diff --git a/src/config/official_configs.py b/src/config/official_configs.py index 0087cd62..4acc58fe 100644 --- a/src/config/official_configs.py +++ b/src/config/official_configs.py @@ -70,6 +70,9 @@ class ChatConfig(ConfigBase): max_context_size: int = 18 """上下文长度""" + + interest_rate_mode: Literal["fast", "accurate"] = "fast" + """兴趣值计算模式,fast为快速计算,accurate为精确计算""" mentioned_bot_inevitable_reply: bool = False """提及 bot 必然回复""" diff --git a/template/bot_config_template.toml b/template/bot_config_template.toml index 60cb4dd7..fb711500 100644 --- a/template/bot_config_template.toml +++ b/template/bot_config_template.toml @@ -65,6 +65,8 @@ focus_value = 0.5 max_context_size = 20 # 上下文长度 +interest_rate_mode = "fast" #激活值计算模式,可选fast或者accurate + mentioned_bot_inevitable_reply = true # 提及 bot 大概率回复 at_bot_inevitable_reply = true # @bot 或 提及bot 大概率回复 From b525e1e098791fbf57d8fddba23b6d2e54793850 Mon Sep 17 00:00:00 2001 From: SengokuCola <1026294844@qq.com> Date: Fri, 22 Aug 2025 22:35:27 +0800 Subject: [PATCH 3/5] =?UTF-8?q?feat=EF=BC=9A=E9=9D=9E=E6=A0=B8=E5=BF=83?= =?UTF-8?q?=E5=8A=A8=E4=BD=9C=E5=B7=B2=E4=BB=8Eplanner=E5=88=86=E7=A6=BB?= =?UTF-8?q?=E5=88=B0=E5=89=AFplanner=EF=BC=8C=E7=A7=BB=E9=99=A4action?= =?UTF-8?q?=E7=9A=84=E6=BF=80=E6=B4=BB=E6=9C=BA=E5=88=B6=EF=BC=8C=E6=B7=BB?= =?UTF-8?q?=E5=8A=A0=E6=A8=A1=E5=9E=8B?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/chat/heart_flow/heartFC_chat.py | 43 +- src/chat/planner_actions/action_modifier.py | 26 +- src/chat/planner_actions/planner.py | 641 ++++++++++++++++---- src/chat/utils/chat_message_builder.py | 8 +- src/chat/utils/utils.py | 6 +- src/config/api_ada_configs.py | 3 + src/config/official_configs.py | 3 + src/plugin_system/base/component_types.py | 6 +- template/bot_config_template.toml | 4 +- template/model_config_template.toml | 9 +- 10 files changed, 609 insertions(+), 140 deletions(-) diff --git a/src/chat/heart_flow/heartFC_chat.py b/src/chat/heart_flow/heartFC_chat.py index f4200028..910e95fa 100644 --- a/src/chat/heart_flow/heartFC_chat.py +++ b/src/chat/heart_flow/heartFC_chat.py @@ -29,6 +29,7 @@ from src.plugin_system.core import events_manager from src.plugin_system.apis import generator_api, send_api, message_api, database_api from src.mais4u.mai_think import mai_thinking_manager from src.mais4u.s4u_config import s4u_config +from src.chat.utils.chat_message_builder import build_readable_messages_with_id, build_readable_actions, get_actions_by_timestamp_with_chat, get_raw_msg_before_timestamp_with_chat if TYPE_CHECKING: from src.common.data_models.database_data_model import DatabaseMessages @@ -402,8 +403,8 @@ class HeartFChatting: ) ] else: - # 第一步:动作修改 - with Timer("动作修改", cycle_timers): + # 第一步:动作检查 + with Timer("动作检查", cycle_timers): try: await self.action_modifier.modify_actions() available_actions = self.action_manager.get_using_actions() @@ -412,10 +413,45 @@ class HeartFChatting: # 执行planner planner_info = self.action_planner.get_necessary_info() + + + + + + message_list_before_now = get_raw_msg_before_timestamp_with_chat( + chat_id=self.stream_id, + timestamp=time.time(), + limit=int(global_config.chat.max_context_size * 0.6), + ) + chat_content_block, message_id_list = build_readable_messages_with_id( + messages=message_list_before_now, + timestamp_mode="normal_no_YMD", + read_mark=self.action_planner.last_obs_time_mark, + truncate=True, + show_actions=True, + ) + + actions_before_now = get_actions_by_timestamp_with_chat( + chat_id=self.stream_id, + timestamp_start=time.time() - 600, + timestamp_end=time.time(), + limit=5, + ) + + actions_before_now_block = build_readable_actions( + actions=actions_before_now, + ) + + + + prompt_info = await self.action_planner.build_planner_prompt( is_group_chat=planner_info[0], chat_target_info=planner_info[1], current_available_actions=planner_info[2], + chat_content_block=chat_content_block, + actions_before_now_block=actions_before_now_block, + message_id_list=message_id_list, ) if not await events_manager.handle_mai_events( EventType.ON_PLAN, None, prompt_info[0], None, self.chat_stream.stream_id @@ -427,6 +463,9 @@ class HeartFChatting: loop_start_time=self.last_read_time, available_actions=available_actions, ) + + for action in action_to_use_info: + print(action.action_type) # 3. 并行执行所有动作 action_tasks = [ diff --git a/src/chat/planner_actions/action_modifier.py b/src/chat/planner_actions/action_modifier.py index 03c72ffc..9bf65f01 100644 --- a/src/chat/planner_actions/action_modifier.py +++ b/src/chat/planner_actions/action_modifier.py @@ -60,7 +60,7 @@ class ActionModifier: removals_s1: List[Tuple[str, str]] = [] removals_s2: List[Tuple[str, str]] = [] - removals_s3: List[Tuple[str, str]] = [] + # removals_s3: List[Tuple[str, str]] = [] self.action_manager.restore_actions() all_actions = self.action_manager.get_using_actions() @@ -103,26 +103,28 @@ class ActionModifier: self.action_manager.remove_action_from_using(action_name) logger.debug(f"{self.log_prefix}阶段二移除动作: {action_name},原因: {reason}") + + # === 第三阶段:激活类型判定 === - if chat_content is not None: - logger.debug(f"{self.log_prefix}开始激活类型判定阶段") + # if chat_content is not None: + # logger.debug(f"{self.log_prefix}开始激活类型判定阶段") # 获取当前使用的动作集(经过第一阶段处理) - current_using_actions = self.action_manager.get_using_actions() + # current_using_actions = self.action_manager.get_using_actions() # 获取因激活类型判定而需要移除的动作 - removals_s3 = await self._get_deactivated_actions_by_type( - current_using_actions, - chat_content, - ) + # removals_s3 = await self._get_deactivated_actions_by_type( + # current_using_actions, + # chat_content, + # ) # 应用第三阶段的移除 - for action_name, reason in removals_s3: - self.action_manager.remove_action_from_using(action_name) - logger.debug(f"{self.log_prefix}阶段三移除动作: {action_name},原因: {reason}") + # for action_name, reason in removals_s3: + # self.action_manager.remove_action_from_using(action_name) + # logger.debug(f"{self.log_prefix}阶段三移除动作: {action_name},原因: {reason}") # === 统一日志记录 === - all_removals = removals_s1 + removals_s2 + removals_s3 + all_removals = removals_s1 + removals_s2 removals_summary: str = "" if all_removals: removals_summary = " | ".join([f"{name}({reason})" for name, reason in all_removals]) diff --git a/src/chat/planner_actions/planner.py b/src/chat/planner_actions/planner.py index 6759c73a..6272cd94 100644 --- a/src/chat/planner_actions/planner.py +++ b/src/chat/planner_actions/planner.py @@ -21,8 +21,9 @@ from src.chat.utils.chat_message_builder import ( from src.chat.utils.utils import get_chat_type_and_target_info from src.chat.planner_actions.action_manager import ActionManager from src.chat.message_receive.chat_stream import get_chat_manager -from src.plugin_system.base.component_types import ActionInfo, ChatMode, ComponentType +from src.plugin_system.base.component_types import ActionInfo, ChatMode, ComponentType, ActionActivationType from src.plugin_system.core.component_registry import component_registry +import random logger = get_logger("planner") @@ -82,6 +83,36 @@ def init_prompt(): """, "action_prompt", ) + + + Prompt( + """ +{time_block} +{name_block} +请你根据聊天内容,选择一个或多个action来参与聊天。如果没有合适的action,请选择no_action。 + +{chat_context_description},以下是具体的聊天内容 +{chat_content_block} + +{moderation_prompt} + +现在请你根据聊天内容和用户的最新消息选择合适的action和触发action的消息: +{actions_before_now_block} + +no_action:不选择任何动作 +{{ + "action": "no_action", + "reason":"不动作的原因" +}} + +{action_options_text} + +请选择,并说明触发action的消息id和选择该action的原因。消息id格式:m+数字 + +请根据动作示例,以严格的 JSON 格式输出,且仅包含 JSON 内容: +""", + "sub_planner_prompt", + ) class ActionPlanner: @@ -93,6 +124,9 @@ class ActionPlanner: self.planner_llm = LLMRequest( model_set=model_config.model_task_config.planner, request_type="planner" ) # 用于动作规划 + self.planner_small_llm = LLMRequest( + model_set=model_config.model_task_config.planner_small, request_type="planner_small" + ) # 用于动作规划 self.last_obs_time_mark = 0.0 # 添加重试计数器 @@ -100,7 +134,7 @@ class ActionPlanner: self.max_plan_retries = 3 def find_message_by_id( - self, message_id: str, message_id_list: List[DatabaseMessages] + self, message_id: str, message_id_list: List[Tuple[str, DatabaseMessages]] ) -> Optional[DatabaseMessages]: # sourcery skip: use-next """ @@ -114,8 +148,8 @@ class ActionPlanner: 找到的原始消息字典,如果未找到则返回None """ for item in message_id_list: - if item.message_id == message_id: - return item + if item[0] == message_id: + return item[1] return None def get_latest_message(self, message_id_list: List[DatabaseMessages]) -> Optional[DatabaseMessages]: @@ -129,6 +163,247 @@ class ActionPlanner: 最新的消息字典,如果列表为空则返回None """ return message_id_list[-1] if message_id_list else None + + def _parse_single_action(self, action_json: dict, message_id_list: List[Tuple[str, DatabaseMessages]], current_available_actions: List[Tuple[str, ActionInfo]]) -> List[ActionPlannerInfo]: + """解析单个action JSON并返回ActionPlannerInfo列表""" + action_planner_infos = [] + + try: + action = action_json.get("action", "no_action") + reasoning = action_json.get("reason", "未提供原因") + action_data = {} + + # 将所有其他属性添加到action_data + for key, value in action_json.items(): + if key not in ["action", "reasoning"]: + action_data[key] = value + + # 非no_action动作需要target_message_id + target_message = None + if action != "no_action": + if target_message_id := action_json.get("target_message_id"): + # 根据target_message_id查找原始消息 + target_message = self.find_message_by_id(target_message_id, message_id_list) + if target_message is None: + logger.warning(f"{self.log_prefix}无法找到target_message_id '{target_message_id}' 对应的消息") + # 选择最新消息作为target_message + target_message = self.get_latest_message(message_id_list) + else: + logger.warning(f"{self.log_prefix}动作'{action}'缺少target_message_id") + + # 验证action是否可用 + available_action_names = [action_name for action_name, _ in current_available_actions] + if action != "no_action" and action != "reply" and action not in available_action_names: + logger.warning( + f"{self.log_prefix}LLM 返回了当前不可用或无效的动作: '{action}' (可用: {available_action_names}),将强制使用 'no_action'" + ) + reasoning = f"LLM 返回了当前不可用的动作 '{action}' (可用: {available_action_names})。原始理由: {reasoning}" + action = "no_action" + + # 创建ActionPlannerInfo对象 + # 将列表转换为字典格式 + available_actions_dict = dict(current_available_actions) + action_planner_infos.append(ActionPlannerInfo( + action_type=action, + reasoning=reasoning, + action_data=action_data, + action_message=target_message, + available_actions=available_actions_dict, + )) + + except Exception as e: + logger.error(f"{self.log_prefix}解析单个action时出错: {e}") + # 将列表转换为字典格式 + available_actions_dict = dict(current_available_actions) + action_planner_infos.append(ActionPlannerInfo( + action_type="no_action", + reasoning=f"解析单个action时出错: {e}", + action_data={}, + action_message=None, + available_actions=available_actions_dict, + )) + + return action_planner_infos + + async def sub_plan( + self, + action_list: List[Tuple[str, ActionInfo]], + actions_before_now_block: str, + chat_content_block: str, + message_id_list: List[Tuple[str, DatabaseMessages]], + is_group_chat: bool = False, + chat_target_info: Optional[dict] = None, + # current_available_actions: Dict[str, ActionInfo] = {}, + ) -> List[ActionPlannerInfo]: + # 构建副planner并执行(单个副planner) + try: + if actions_before_now_block: + actions_before_now_block = f"你刚刚选择并执行过的action是:\n{actions_before_now_block}" + else: + actions_before_now_block = "" + + chat_context_description = "你现在正在一个群聊中" + chat_target_name = None + if not is_group_chat and chat_target_info: + chat_target_name = ( + chat_target_info.get("person_name") or chat_target_info.get("user_nickname") or "对方" + ) + chat_context_description = f"你正在和 {chat_target_name} 私聊" + + action_options_block = "" + + for using_actions_name, using_actions_info in action_list: + if using_actions_info.action_parameters: + param_text = "\n" + for param_name, param_description in using_actions_info.action_parameters.items(): + param_text += f' "{param_name}":"{param_description}"\n' + param_text = param_text.rstrip("\n") + else: + param_text = "" + + require_text = "" + for require_item in using_actions_info.action_require: + require_text += f"- {require_item}\n" + require_text = require_text.rstrip("\n") + + using_action_prompt = await global_prompt_manager.get_prompt_async("action_prompt") + using_action_prompt = using_action_prompt.format( + action_name=using_actions_name, + action_description=using_actions_info.description, + action_parameters=param_text, + action_require=require_text, + ) + + action_options_block += using_action_prompt + + moderation_prompt_block = "请不要输出违法违规内容,不要输出色情,暴力,政治相关内容,如有敏感内容,请规避。" + time_block = f"当前时间:{datetime.now().strftime('%Y-%m-%d %H:%M:%S')}" + bot_name = global_config.bot.nickname + if global_config.bot.alias_names: + bot_nickname = f",也有人叫你{','.join(global_config.bot.alias_names)}" + else: + bot_nickname = "" + name_block = f"你的名字是{bot_name}{bot_nickname},请注意哪些是你自己的发言。" + + planner_prompt_template = await global_prompt_manager.get_prompt_async("sub_planner_prompt") + prompt = planner_prompt_template.format( + time_block=time_block, + chat_context_description=chat_context_description, + chat_content_block=chat_content_block, + actions_before_now_block=actions_before_now_block, + action_options_text=action_options_block, + moderation_prompt=moderation_prompt_block, + name_block=name_block, + ) + # return prompt, message_id_list + except Exception as e: + logger.error(f"构建 Planner 提示词时出错: {e}") + logger.error(traceback.format_exc()) + return "构建 Planner Prompt 时出错", [] + + # --- 调用 LLM (普通文本生成) --- + llm_content = None + action_planner_infos = [] # 存储多个ActionPlannerInfo对象 + + try: + llm_content, (reasoning_content, _, _) = await self.planner_llm.generate_response_async(prompt=prompt) + + if global_config.debug.show_prompt: + logger.info(f"{self.log_prefix}副规划器原始提示词: {prompt}") + logger.info(f"{self.log_prefix}副规划器原始响应: {llm_content}") + if reasoning_content: + logger.info(f"{self.log_prefix}副规划器推理: {reasoning_content}") + else: + logger.debug(f"{self.log_prefix}副规划器原始提示词: {prompt}") + logger.debug(f"{self.log_prefix}副规划器原始响应: {llm_content}") + if reasoning_content: + logger.debug(f"{self.log_prefix}副规划器推理: {reasoning_content}") + + except Exception as req_e: + logger.error(f"{self.log_prefix}副规划器LLM 请求执行失败: {req_e}") + # 返回一个默认的no_action + action_planner_infos.append(ActionPlannerInfo( + action_type="no_action", + reasoning=f"副规划器LLM 请求失败,模型出现问题: {req_e}", + action_data={}, + action_message=None, + available_actions=action_list, + )) + return action_planner_infos + + if llm_content: + try: + parsed_json = json.loads(repair_json(llm_content)) + + # 处理不同的JSON格式 + if isinstance(parsed_json, list): + # 如果是列表,处理每个action + if parsed_json: + logger.info(f"{self.log_prefix}LLM返回了{len(parsed_json)}个action") + for action_item in parsed_json: + if isinstance(action_item, dict): + action_planner_infos.extend(self._parse_single_action( + action_item, message_id_list, action_list + )) + else: + logger.warning(f"{self.log_prefix}列表中的action项不是字典类型: {type(action_item)}") + else: + logger.warning(f"{self.log_prefix}LLM返回了空列表") + action_planner_infos.append(ActionPlannerInfo( + action_type="no_action", + reasoning="LLM返回了空列表,选择no_action", + action_data={}, + action_message=None, + available_actions=action_list, + )) + elif isinstance(parsed_json, dict): + # 如果是单个字典,处理单个action + action_planner_infos.extend(self._parse_single_action( + parsed_json, message_id_list, action_list + )) + else: + logger.error(f"{self.log_prefix}解析后的JSON不是字典或列表类型: {type(parsed_json)}") + action_planner_infos.append(ActionPlannerInfo( + action_type="no_action", + reasoning=f"解析后的JSON类型错误: {type(parsed_json)}", + action_data={}, + action_message=None, + available_actions=action_list, + )) + + except Exception as json_e: + logger.warning(f"{self.log_prefix}解析LLM响应JSON失败 {json_e}. LLM原始输出: '{llm_content}'") + traceback.print_exc() + action_planner_infos.append(ActionPlannerInfo( + action_type="no_action", + reasoning=f"解析LLM响应JSON失败: {json_e}. 将使用默认动作 'no_action'.", + action_data={}, + action_message=None, + available_actions=action_list, + )) + else: + # 如果没有LLM内容,返回默认的no_action + action_planner_infos.append(ActionPlannerInfo( + action_type="no_action", + reasoning="副规划器没有获得LLM响应", + action_data={}, + action_message=None, + available_actions=action_list, + )) + + # 如果没有解析到任何action,返回默认的no_action + if not action_planner_infos: + action_planner_infos.append(ActionPlannerInfo( + action_type="no_action", + reasoning="副规划器没有解析到任何有效action", + action_data={}, + action_message=None, + available_actions=action_list, + )) + + logger.info(f"{self.log_prefix}副规划器返回了{len(action_planner_infos)}个action") + return action_planner_infos + async def plan( self, @@ -147,17 +422,144 @@ class ActionPlanner: target_message: Optional[DatabaseMessages] = None # 初始化target_message变量 prompt: str = "" message_id_list: list = [] + + message_list_before_now = get_raw_msg_before_timestamp_with_chat( + chat_id=self.chat_id, + timestamp=time.time(), + limit=int(global_config.chat.max_context_size * 0.6), + ) + chat_content_block, message_id_list = build_readable_messages_with_id( + messages=message_list_before_now, + timestamp_mode="normal_no_YMD", + read_mark=self.last_obs_time_mark, + truncate=True, + show_actions=True, + ) + + actions_before_now = get_actions_by_timestamp_with_chat( + chat_id=self.chat_id, + timestamp_start=time.time() - 600, + timestamp_end=time.time(), + limit=5, + ) + + actions_before_now_block = build_readable_actions( + actions=actions_before_now, + ) + + + message_list_before_now_short = message_list_before_now[:5] + + chat_content_block_short, message_id_list_short = build_readable_messages_with_id( + messages=message_list_before_now_short, + timestamp_mode="normal_no_YMD", + truncate=False, + show_actions=False, + ) + + self.last_obs_time_mark = time.time() try: + logger.info(f"{self.log_prefix}开始构建副Planner") + sub_planner_actions = {} + + for action_name, action_info in available_actions.items(): + if action_info.activation_type == ActionActivationType.LLM_JUDGE or action_info.activation_type == ActionActivationType.ALWAYS: + sub_planner_actions[action_name] = action_info + elif action_info.activation_type == ActionActivationType.RANDOM: + if random.random() < action_info.random_activation_probability: + sub_planner_actions[action_name] = action_info + elif action_info.activation_type == ActionActivationType.KEYWORD: + if action_info.activation_keywords: + for keyword in action_info.activation_keywords: + if keyword in chat_content_block_short: + sub_planner_actions[action_name] = action_info + elif action_info.activation_type == ActionActivationType.NEVER: + pass + else: + logger.warning(f"{self.log_prefix}未知的激活类型: {action_info.activation_type},跳过处理") + + sub_planner_actions_num = len(sub_planner_actions) + sub_planner_size = global_config.chat.planner_size + if global_config.chat.planner_size > int(global_config.chat.planner_size): + if random.random() < global_config.chat.planner_size - int(global_config.chat.planner_size): + sub_planner_size = int(global_config.chat.planner_size) + 1 + sub_planner_num = int(sub_planner_actions_num / sub_planner_size) + if sub_planner_actions_num % sub_planner_size != 0: + sub_planner_num += 1 + + logger.info(f"{self.log_prefix}副规划器数量: {sub_planner_num}, 副规划器大小: {sub_planner_size}") + + # 将sub_planner_actions随机分配到sub_planner_num个List中 + sub_planner_lists = [] + if sub_planner_actions_num > 0: + # 将actions转换为列表并随机打乱 + action_items = list(sub_planner_actions.items()) + random.shuffle(action_items) + + # 初始化所有子列表 + for i in range(sub_planner_num): + sub_planner_lists.append([]) + + # 分配actions到各个子列表 + for i, (action_name, action_info) in enumerate(action_items): + # 确保每个列表至少有一个action + if i < sub_planner_num: + sub_planner_lists[i].append((action_name, action_info)) + else: + # 随机选择一个列表添加action,但不超过最大大小限制 + available_lists = [j for j, lst in enumerate(sub_planner_lists) + if len(lst) < sub_planner_size] + if available_lists: + target_list = random.choice(available_lists) + sub_planner_lists[target_list].append((action_name, action_info)) + + logger.info(f"{self.log_prefix}成功将{len(sub_planner_actions)}个actions分配到{sub_planner_num}个子列表中") + for i, lst in enumerate(sub_planner_lists): + logger.debug(f"{self.log_prefix}子列表{i+1}: {len(lst)}个actions") + else: + logger.info(f"{self.log_prefix}没有可用的actions需要分配") + + + # 先获取必要信息 is_group_chat, chat_target_info, current_available_actions = self.get_necessary_info() + + # 并行执行所有副规划器 + import asyncio + + async def execute_sub_plan(action_list): + return await self.sub_plan( + action_list=action_list, + actions_before_now_block=actions_before_now_block, + chat_content_block=chat_content_block_short, + message_id_list=message_id_list_short, + is_group_chat=is_group_chat, + chat_target_info=chat_target_info, + # current_available_actions=current_available_actions, + ) + + # 创建所有任务 + sub_plan_tasks = [execute_sub_plan(action_list) for action_list in sub_planner_lists] + + # 并行执行所有任务 + sub_plan_results = await asyncio.gather(*sub_plan_tasks) + + # 收集所有结果 + all_sub_planner_results = [] + for sub_result in sub_plan_results: + all_sub_planner_results.extend(sub_result) + + logger.info(f"{self.log_prefix}所有副规划器共返回了{len(all_sub_planner_results)}个action") # --- 构建提示词 (调用修改后的 PromptBuilder 方法) --- prompt, message_id_list = await self.build_planner_prompt( is_group_chat=is_group_chat, # <-- Pass HFC state chat_target_info=chat_target_info, # <-- 传递获取到的聊天目标信息 - current_available_actions=current_available_actions, # <-- Pass determined actions + current_available_actions="", # <-- Pass determined actions mode=mode, - refresh_time=True, + chat_content_block=chat_content_block, + actions_before_now_block=actions_before_now_block, + message_id_list=message_id_list, ) # --- 调用 LLM (普通文本生成) --- @@ -185,60 +587,54 @@ class ActionPlanner: try: parsed_json = json.loads(repair_json(llm_content)) + # 处理不同的JSON格式,复用_parse_single_action函数 if isinstance(parsed_json, list): if parsed_json: + # 使用最后一个action(保持原有逻辑) parsed_json = parsed_json[-1] logger.warning(f"{self.log_prefix}LLM返回了多个JSON对象,使用最后一个: {parsed_json}") else: parsed_json = {} - if not isinstance(parsed_json, dict): - logger.error(f"{self.log_prefix}解析后的JSON不是字典类型: {type(parsed_json)}") - parsed_json = {} - - action = parsed_json.get("action", "no_action") - reasoning = parsed_json.get("reason", "未提供原因") - - # 将所有其他属性添加到action_data - for key, value in parsed_json.items(): - if key not in ["action", "reasoning"]: - action_data[key] = value - - # 非no_action动作需要target_message_id - if action != "no_action": - if target_message_id := parsed_json.get("target_message_id"): - # 根据target_message_id查找原始消息 - target_message = self.find_message_by_id(target_message_id, message_id_list) - # 如果获取的target_message为None,输出warning并重新plan - if target_message is None: - self.plan_retry_count += 1 - logger.warning( - f"{self.log_prefix}无法找到target_message_id '{target_message_id}' 对应的消息,重试次数: {self.plan_retry_count}/{self.max_plan_retries}" - ) - # 仍有重试次数 - if self.plan_retry_count < self.max_plan_retries: - # 递归重新plan - return await self.plan(mode, loop_start_time, available_actions) - logger.error( - f"{self.log_prefix}连续{self.max_plan_retries}次plan获取target_message失败,选择最新消息作为target_message" - ) - target_message = self.get_latest_message(message_id_list) - self.plan_retry_count = 0 # 重置计数器 - else: - logger.warning(f"{self.log_prefix}动作'{action}'缺少target_message_id") - - if action != "no_action" and action != "reply" and action not in current_available_actions: - logger.warning( - f"{self.log_prefix}LLM 返回了当前不可用或无效的动作: '{action}' (可用: {list(current_available_actions.keys())}),将强制使用 'no_action'" + if isinstance(parsed_json, dict): + # 使用_parse_single_action函数解析单个action + # 将字典转换为列表格式 + current_available_actions_list = list(current_available_actions.items()) + action_planner_infos = self._parse_single_action( + parsed_json, message_id_list, current_available_actions_list ) - reasoning = f"LLM 返回了当前不可用的动作 '{action}' (可用: {list(current_available_actions.keys())})。原始理由: {reasoning}" + + if action_planner_infos: + # 获取第一个(也是唯一一个)action的信息 + action_info = action_planner_infos[0] + action = action_info.action_type + reasoning = action_info.reasoning + action_data.update(action_info.action_data) + target_message = action_info.action_message + + # 处理target_message为None的情况(保持原有的重试逻辑) + if target_message is None and action != "no_action": + # 尝试获取最新消息作为target_message + target_message = self.get_latest_message(message_id_list) + if target_message is None: + logger.warning(f"{self.log_prefix}无法获取任何消息作为target_message") + else: + # 如果没有解析到action,使用默认值 + action = "no_action" + reasoning = "解析action失败" + target_message = None + else: + logger.error(f"{self.log_prefix}解析后的JSON不是字典类型: {type(parsed_json)}") action = "no_action" + reasoning = f"解析后的JSON类型错误: {type(parsed_json)}" + target_message = None except Exception as json_e: logger.warning(f"{self.log_prefix}解析LLM响应JSON失败 {json_e}. LLM原始输出: '{llm_content}'") traceback.print_exc() - reasoning = f"解析LLM响应JSON失败: {json_e}. 将使用默认动作 'no_action'." action = "no_action" + reasoning = f"解析LLM响应JSON失败: {json_e}. 将使用默认动作 'no_action'." + target_message = None except Exception as outer_e: logger.error(f"{self.log_prefix}Planner 处理过程中发生意外错误,规划失败,将执行 no_action: {outer_e}") @@ -246,30 +642,70 @@ class ActionPlanner: action = "no_action" reasoning = f"Planner 内部处理错误: {outer_e}" - is_parallel = False + is_parallel = True if mode == ChatMode.NORMAL and action in current_available_actions: - is_parallel = current_available_actions[action].parallel_action + if is_parallel: + is_parallel = current_available_actions[action].parallel_action action_data["loop_start_time"] = loop_start_time - actions = [ - ActionPlannerInfo( - action_type=action, - reasoning=reasoning, - action_data=action_data, - action_message=target_message, - available_actions=available_actions, - ) - ] - - if action != "reply" and is_parallel: - actions.append( - ActionPlannerInfo( - action_type="reply", + # 过滤掉no_action,除非所有结果都是no_action + def filter_no_actions(action_list): + """过滤no_action,如果所有都是no_action则返回一个""" + non_no_actions = [a for a in action_list if a.action_type != "no_action"] + if non_no_actions: + return non_no_actions + else: + # 如果所有都是no_action,返回第一个 + return [action_list[0]] if action_list else [] + + # 根据is_parallel决定返回值 + if is_parallel: + # 如果为真,将主规划器的结果和副规划器的结果都返回 + main_actions = [] + + # 添加主规划器的action(如果不是no_action) + if action != "no_action": + main_actions.append(ActionPlannerInfo( + action_type=action, + reasoning=reasoning, + action_data=action_data, action_message=target_message, available_actions=available_actions, - ) - ) + )) + + # 先合并主副规划器的结果 + all_actions = main_actions + all_sub_planner_results + + # 然后统一过滤no_action + actions = filter_no_actions(all_actions) + + # 如果所有结果都是no_action,返回一个no_action + if not actions: + actions = [ActionPlannerInfo( + action_type="no_action", + reasoning="所有规划器都选择不执行动作", + action_data={}, + action_message=None, + available_actions=available_actions, + )] + + logger.info(f"{self.log_prefix}并行模式:返回主规划器{len(main_actions)}个action + 副规划器{len(all_sub_planner_results)}个action,过滤后总计{len(actions)}个action") + else: + # 如果为假,只返回副规划器的结果 + actions = filter_no_actions(all_sub_planner_results) + + # 如果所有结果都是no_action,返回一个no_action + if not actions: + actions = [ActionPlannerInfo( + action_type="no_action", + reasoning="副规划器都选择不执行动作", + action_data={}, + action_message=None, + available_actions=available_actions, + )] + + logger.info(f"{self.log_prefix}非并行模式:返回副规划器的{len(actions)}个action(已过滤no_action)") return actions, target_message @@ -278,43 +714,19 @@ class ActionPlanner: is_group_chat: bool, # Now passed as argument chat_target_info: Optional[dict], # Now passed as argument current_available_actions: Dict[str, ActionInfo], - refresh_time: bool = False, mode: ChatMode = ChatMode.FOCUS, + actions_before_now_block :str = "", + chat_content_block :str = "", + message_id_list :List[Tuple[str, DatabaseMessages]] = None, ) -> tuple[str, List[DatabaseMessages]]: # sourcery skip: use-join """构建 Planner LLM 的提示词 (获取模板并填充数据)""" try: - message_list_before_now = get_raw_msg_before_timestamp_with_chat( - chat_id=self.chat_id, - timestamp=time.time(), - limit=int(global_config.chat.max_context_size * 0.6), - ) - chat_content_block, message_id_list = build_readable_messages_with_id( - messages=message_list_before_now, - timestamp_mode="normal_no_YMD", - read_mark=self.last_obs_time_mark, - truncate=True, - show_actions=True, - ) - - actions_before_now = get_actions_by_timestamp_with_chat( - chat_id=self.chat_id, - timestamp_start=time.time() - 600, - timestamp_end=time.time(), - limit=5, - ) - - actions_before_now_block = build_readable_actions( - actions=actions_before_now, - ) - - if actions_before_now: + + if actions_before_now_block: actions_before_now_block = f"你刚刚选择并执行过的action是:\n{actions_before_now_block}" else: actions_before_now_block = "" - if refresh_time: - self.last_obs_time_mark = time.time() - mentioned_bonus = "" if global_config.chat.mentioned_bot_inevitable_reply: mentioned_bonus = "\n- 有人提到你" @@ -348,29 +760,32 @@ class ActionPlanner: action_options_block = "" - for using_actions_name, using_actions_info in current_available_actions.items(): - if using_actions_info.action_parameters: - param_text = "\n" - for param_name, param_description in using_actions_info.action_parameters.items(): - param_text += f' "{param_name}":"{param_description}"\n' - param_text = param_text.rstrip("\n") - else: - param_text = "" + if current_available_actions: + for using_actions_name, using_actions_info in current_available_actions.items(): + if using_actions_info.action_parameters: + param_text = "\n" + for param_name, param_description in using_actions_info.action_parameters.items(): + param_text += f' "{param_name}":"{param_description}"\n' + param_text = param_text.rstrip("\n") + else: + param_text = "" - require_text = "" - for require_item in using_actions_info.action_require: - require_text += f"- {require_item}\n" - require_text = require_text.rstrip("\n") + require_text = "" + for require_item in using_actions_info.action_require: + require_text += f"- {require_item}\n" + require_text = require_text.rstrip("\n") - using_action_prompt = await global_prompt_manager.get_prompt_async("action_prompt") - using_action_prompt = using_action_prompt.format( - action_name=using_actions_name, - action_description=using_actions_info.description, - action_parameters=param_text, - action_require=require_text, - ) + using_action_prompt = await global_prompt_manager.get_prompt_async("action_prompt") + using_action_prompt = using_action_prompt.format( + action_name=using_actions_name, + action_description=using_actions_info.description, + action_parameters=param_text, + action_require=require_text, + ) - action_options_block += using_action_prompt + action_options_block += using_action_prompt + else: + action_options_block = "" moderation_prompt_block = "请不要输出违法违规内容,不要输出色情,暴力,政治相关内容,如有敏感内容,请规避。" diff --git a/src/chat/utils/chat_message_builder.py b/src/chat/utils/chat_message_builder.py index 51edd045..0aca6bae 100644 --- a/src/chat/utils/chat_message_builder.py +++ b/src/chat/utils/chat_message_builder.py @@ -361,10 +361,10 @@ def _build_readable_messages_internal( # 创建时间戳到消息ID的映射,用于在消息前添加[id]标识符 timestamp_to_id_mapping: Dict[float, str] = {} if message_id_list: - for msg in message_id_list: + for msg_id, msg in message_id_list: timestamp = msg.time if timestamp is not None: - timestamp_to_id_mapping[timestamp] = msg.message_id + timestamp_to_id_mapping[timestamp] = msg_id def process_pic_ids(content: Optional[str]) -> str: """处理内容中的图片ID,将其替换为[图片x]格式""" @@ -477,7 +477,7 @@ def _build_readable_messages_internal( readable_time = translate_timestamp_to_human_readable(timestamp, mode=timestamp_mode) # 查找消息id(如果有)并构建id_prefix - message_id = timestamp_to_id_mapping.get(timestamp) + message_id = timestamp_to_id_mapping.get(timestamp, "") id_prefix = f"[{message_id}]" if message_id else "" if is_action: @@ -606,7 +606,7 @@ def build_readable_messages_with_id( truncate: bool = False, show_actions: bool = False, show_pic: bool = True, -) -> Tuple[str, List[DatabaseMessages]]: +) -> Tuple[str, List[Tuple[str, DatabaseMessages]]]: """ 将消息列表转换为可读的文本格式,并返回原始(时间戳, 昵称, 内容)列表。 允许通过参数控制格式化行为。 diff --git a/src/chat/utils/utils.py b/src/chat/utils/utils.py index 472a9cdd..7634593c 100644 --- a/src/chat/utils/utils.py +++ b/src/chat/utils/utils.py @@ -685,7 +685,7 @@ def assign_message_ids(messages: List[DatabaseMessages]) -> List[DatabaseMessage Returns: List[DatabaseMessages]: 分配了唯一ID的消息列表(写入message_id属性) """ - result: List[DatabaseMessages] = list(messages) # 复制原始消息列表 + result: List[Tuple[str, DatabaseMessages]] = [] # 复制原始消息列表 used_ids = set() len_i = len(messages) if len_i > 100: @@ -695,7 +695,7 @@ def assign_message_ids(messages: List[DatabaseMessages]) -> List[DatabaseMessage a = 1 b = 9 - for i, _ in enumerate(result): + for i, message in enumerate(messages): # 生成唯一的简短ID while True: # 使用索引+随机数生成简短ID @@ -705,7 +705,7 @@ def assign_message_ids(messages: List[DatabaseMessages]) -> List[DatabaseMessage if message_id not in used_ids: used_ids.add(message_id) break - result[i].message_id = message_id + result.append((message_id, message)) return result diff --git a/src/config/api_ada_configs.py b/src/config/api_ada_configs.py index bd881bfd..60dfd419 100644 --- a/src/config/api_ada_configs.py +++ b/src/config/api_ada_configs.py @@ -117,6 +117,9 @@ class ModelTaskConfig(ConfigBase): planner: TaskConfig """规划模型配置""" + planner_small: TaskConfig + """副规划模型配置""" + embedding: TaskConfig """嵌入模型配置""" diff --git a/src/config/official_configs.py b/src/config/official_configs.py index 4acc58fe..9de10b4a 100644 --- a/src/config/official_configs.py +++ b/src/config/official_configs.py @@ -76,6 +76,9 @@ class ChatConfig(ConfigBase): mentioned_bot_inevitable_reply: bool = False """提及 bot 必然回复""" + + planner_size: int = 1 + """副规划器大小,越小,麦麦的动作执行能力越精细,但是消耗更多token,调大可以缓解429类错误""" at_bot_inevitable_reply: bool = False """@bot 必然回复""" diff --git a/src/plugin_system/base/component_types.py b/src/plugin_system/base/component_types.py index 5570c2ad..643f42b7 100644 --- a/src/plugin_system/base/component_types.py +++ b/src/plugin_system/base/component_types.py @@ -115,9 +115,9 @@ class ActionInfo(ComponentInfo): action_require: List[str] = field(default_factory=list) # 动作需求说明 associated_types: List[str] = field(default_factory=list) # 关联的消息类型 # 激活类型相关 - focus_activation_type: ActionActivationType = ActionActivationType.ALWAYS - normal_activation_type: ActionActivationType = ActionActivationType.ALWAYS - activation_type: ActionActivationType = ActionActivationType.ALWAYS + focus_activation_type: ActionActivationType = ActionActivationType.ALWAYS #已弃用 + normal_activation_type: ActionActivationType = ActionActivationType.ALWAYS #已弃用 + activation_type: ActionActivationType = ActionActivationType.ALWAYS random_activation_probability: float = 0.0 llm_judge_prompt: str = "" activation_keywords: List[str] = field(default_factory=list) # 激活关键词列表 diff --git a/template/bot_config_template.toml b/template/bot_config_template.toml index fb711500..3b037de0 100644 --- a/template/bot_config_template.toml +++ b/template/bot_config_template.toml @@ -1,5 +1,5 @@ [inner] -version = "6.6.1" +version = "6.7.0" #----以下是给开发人员阅读的,如果你只是部署了麦麦,不需要阅读---- #如果你想要修改配置文件,请递增version的值 @@ -67,6 +67,8 @@ max_context_size = 20 # 上下文长度 interest_rate_mode = "fast" #激活值计算模式,可选fast或者accurate +planner_size = 1 # 副规划器大小,越小,麦麦的动作执行能力越精细,但是消耗更多token,调大可以缓解429类错误 + mentioned_bot_inevitable_reply = true # 提及 bot 大概率回复 at_bot_inevitable_reply = true # @bot 或 提及bot 大概率回复 diff --git a/template/model_config_template.toml b/template/model_config_template.toml index 4c32e876..e3b041e6 100644 --- a/template/model_config_template.toml +++ b/template/model_config_template.toml @@ -1,5 +1,5 @@ [inner] -version = "1.3.1" +version = "1.4.0" # 配置文件版本号迭代规则同bot_config.toml @@ -117,11 +117,16 @@ model_list = ["siliconflow-deepseek-v3"] temperature = 0.2 # 模型温度,新V3建议0.1-0.3 max_tokens = 800 -[model_task_config.planner] #决策:负责决定麦麦该做什么的模型 +[model_task_config.planner] #决策:负责决定麦麦该什么时候回复的模型 model_list = ["siliconflow-deepseek-v3"] temperature = 0.3 max_tokens = 800 +[model_task_config.planner_small] #副决策:负责决定麦麦该做什么的模型 +model_list = ["qwen3-14b"] +temperature = 0.3 +max_tokens = 800 + [model_task_config.emotion] #负责麦麦的情绪变化 model_list = ["siliconflow-deepseek-v3"] temperature = 0.3 From 5d2f4aa9e8ac4f5f3f437c33df38fab0b661663e Mon Sep 17 00:00:00 2001 From: SengokuCola <1026294844@qq.com> Date: Fri, 22 Aug 2025 23:13:30 +0800 Subject: [PATCH 4/5] =?UTF-8?q?fix=EF=BC=9A=E4=B8=BA=E5=89=AFaction?= =?UTF-8?q?=E8=BF=87=E6=BB=A4=E5=B7=B2=E7=BB=8F=E6=89=A7=E8=A1=8C=E7=9A=84?= =?UTF-8?q?=E5=8A=A8=E4=BD=9C?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/chat/planner_actions/planner.py | 21 +++++++++++++++++---- template/bot_config_template.toml | 2 +- 2 files changed, 18 insertions(+), 5 deletions(-) diff --git a/src/chat/planner_actions/planner.py b/src/chat/planner_actions/planner.py index 6272cd94..765d0b8b 100644 --- a/src/chat/planner_actions/planner.py +++ b/src/chat/planner_actions/planner.py @@ -228,7 +228,7 @@ class ActionPlanner: async def sub_plan( self, action_list: List[Tuple[str, ActionInfo]], - actions_before_now_block: str, + actions_before_now: List[ActionPlannerInfo], chat_content_block: str, message_id_list: List[Tuple[str, DatabaseMessages]], is_group_chat: bool = False, @@ -237,8 +237,21 @@ class ActionPlanner: ) -> List[ActionPlannerInfo]: # 构建副planner并执行(单个副planner) try: + # 获取最近的actions + # 只保留action_type在action_list中的ActionPlannerInfo + action_names_in_list = [name for name, _ in action_list] + actions_before_now = [ + action for action in actions_before_now + if action.action_type in action_names_in_list + ] + + actions_before_now_block = build_readable_actions( + actions=actions_before_now, + ) + + if actions_before_now_block: - actions_before_now_block = f"你刚刚选择并执行过的action是:\n{actions_before_now_block}" + actions_before_now_block = f"你刚刚选择并执行过的action是,请注意如果相同的内容已经被执行,请不要重复执行:\n{actions_before_now_block}" else: actions_before_now_block = "" @@ -306,7 +319,7 @@ class ActionPlanner: action_planner_infos = [] # 存储多个ActionPlannerInfo对象 try: - llm_content, (reasoning_content, _, _) = await self.planner_llm.generate_response_async(prompt=prompt) + llm_content, (reasoning_content, _, _) = await self.planner_small_llm.generate_response_async(prompt=prompt) if global_config.debug.show_prompt: logger.info(f"{self.log_prefix}副规划器原始提示词: {prompt}") @@ -530,7 +543,7 @@ class ActionPlanner: async def execute_sub_plan(action_list): return await self.sub_plan( action_list=action_list, - actions_before_now_block=actions_before_now_block, + actions_before_now=actions_before_now, chat_content_block=chat_content_block_short, message_id_list=message_id_list_short, is_group_chat=is_group_chat, diff --git a/template/bot_config_template.toml b/template/bot_config_template.toml index 3b037de0..37d248a1 100644 --- a/template/bot_config_template.toml +++ b/template/bot_config_template.toml @@ -67,7 +67,7 @@ max_context_size = 20 # 上下文长度 interest_rate_mode = "fast" #激活值计算模式,可选fast或者accurate -planner_size = 1 # 副规划器大小,越小,麦麦的动作执行能力越精细,但是消耗更多token,调大可以缓解429类错误 +planner_size = 2 # 副规划器大小,越小,麦麦的动作执行能力越精细,但是消耗更多token,调大可以缓解429类错误 mentioned_bot_inevitable_reply = true # 提及 bot 大概率回复 at_bot_inevitable_reply = true # @bot 或 提及bot 大概率回复 From fb5dcbe860f2d780a80a4c51409abe0509fd62bb Mon Sep 17 00:00:00 2001 From: SengokuCola <1026294844@qq.com> Date: Fri, 22 Aug 2025 23:20:27 +0800 Subject: [PATCH 5/5] =?UTF-8?q?fix=EF=BC=9A=E6=8F=90=E5=8F=96=E5=87=BA?= =?UTF-8?q?=E9=94=99?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/chat/planner_actions/planner.py | 6 +++--- template/bot_config_template.toml | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/src/chat/planner_actions/planner.py b/src/chat/planner_actions/planner.py index 765d0b8b..1d0a3859 100644 --- a/src/chat/planner_actions/planner.py +++ b/src/chat/planner_actions/planner.py @@ -1,7 +1,7 @@ import json import time import traceback -from typing import Dict, Optional, Tuple, List +from typing import Dict, Optional, Tuple, List, Any from rich.traceback import install from datetime import datetime from json_repair import repair_json @@ -228,7 +228,7 @@ class ActionPlanner: async def sub_plan( self, action_list: List[Tuple[str, ActionInfo]], - actions_before_now: List[ActionPlannerInfo], + actions_before_now: List[Dict[str, Any]], chat_content_block: str, message_id_list: List[Tuple[str, DatabaseMessages]], is_group_chat: bool = False, @@ -242,7 +242,7 @@ class ActionPlanner: action_names_in_list = [name for name, _ in action_list] actions_before_now = [ action for action in actions_before_now - if action.action_type in action_names_in_list + if action["action_name"] in action_names_in_list ] actions_before_now_block = build_readable_actions( diff --git a/template/bot_config_template.toml b/template/bot_config_template.toml index 37d248a1..b3f64751 100644 --- a/template/bot_config_template.toml +++ b/template/bot_config_template.toml @@ -1,5 +1,5 @@ [inner] -version = "6.7.0" +version = "6.7.1" #----以下是给开发人员阅读的,如果你只是部署了麦麦,不需要阅读---- #如果你想要修改配置文件,请递增version的值