diff --git a/src/chat/replyer/group_generator.py b/src/chat/replyer/group_generator.py index e2eb9085..e98cf8a7 100644 --- a/src/chat/replyer/group_generator.py +++ b/src/chat/replyer/group_generator.py @@ -227,13 +227,14 @@ class DefaultReplyer: traceback.print_exc() return False, llm_response - async def build_expression_habits(self, chat_history: str, target: str) -> Tuple[str, List[int]]: + async def build_expression_habits(self, chat_history: str, target: str, reply_reason: str = "") -> Tuple[str, List[int]]: # sourcery skip: for-append-to-extend """构建表达习惯块 Args: chat_history: 聊天历史记录 target: 目标消息内容 + reply_reason: planner给出的回复理由 Returns: str: 表达习惯信息字符串 @@ -246,7 +247,7 @@ class DefaultReplyer: # 使用从处理器传来的选中表达方式 # 使用模型预测选择表达方式 selected_expressions, selected_ids = await expression_selector.select_suitable_expressions( - self.chat_stream.stream_id, chat_history, max_num=8, target_message=target + self.chat_stream.stream_id, chat_history, max_num=8, target_message=target, reply_reason=reply_reason ) if selected_expressions: @@ -787,7 +788,7 @@ class DefaultReplyer: # 并行执行七个构建任务 task_results = await asyncio.gather( self._time_and_run_task( - self.build_expression_habits(chat_talking_prompt_short, target), "expression_habits" + self.build_expression_habits(chat_talking_prompt_short, target, reply_reason), "expression_habits" ), self._time_and_run_task( self.build_tool_info(chat_talking_prompt_short, sender, target, enable_tool=enable_tool), "tool_info" diff --git a/src/chat/replyer/private_generator.py b/src/chat/replyer/private_generator.py index 5f194048..d8438308 100644 --- a/src/chat/replyer/private_generator.py +++ b/src/chat/replyer/private_generator.py @@ -241,13 +241,14 @@ class PrivateReplyer: return f"{sender_relation}" - async def build_expression_habits(self, chat_history: str, target: str) -> Tuple[str, List[int]]: + async def build_expression_habits(self, chat_history: str, target: str, reply_reason: str = "") -> Tuple[str, List[int]]: # sourcery skip: for-append-to-extend """构建表达习惯块 Args: chat_history: 聊天历史记录 target: 目标消息内容 + reply_reason: planner给出的回复理由 Returns: str: 表达习惯信息字符串 @@ -260,7 +261,7 @@ class PrivateReplyer: # 使用从处理器传来的选中表达方式 # 使用模型预测选择表达方式 selected_expressions, selected_ids = await expression_selector.select_suitable_expressions( - self.chat_stream.stream_id, chat_history, max_num=8, target_message=target + self.chat_stream.stream_id, chat_history, max_num=8, target_message=target, reply_reason=reply_reason ) if selected_expressions: @@ -706,7 +707,7 @@ class PrivateReplyer: # 并行执行八个构建任务 task_results = await asyncio.gather( self._time_and_run_task( - self.build_expression_habits(chat_talking_prompt_short, target), "expression_habits" + self.build_expression_habits(chat_talking_prompt_short, target, reply_reason), "expression_habits" ), self._time_and_run_task(self.build_relation_info(chat_talking_prompt_short, sender), "relation_info"), self._time_and_run_task( diff --git a/src/chat/utils/chat_history_summarizer.py b/src/chat/utils/chat_history_summarizer.py index 6b71706d..36bb5ff0 100644 --- a/src/chat/utils/chat_history_summarizer.py +++ b/src/chat/utils/chat_history_summarizer.py @@ -89,9 +89,6 @@ class ChatHistorySummarizer: current_time = time.time() try: - logger.info( - f"{self.log_prefix} 开始处理聊天概括,时间窗口: {self.last_check_time:.2f} -> {current_time:.2f}" - ) # 获取从上次检查时间到当前时间的新消息 new_messages = message_api.get_messages_by_time_in_chat( chat_id=self.chat_id, @@ -109,6 +106,10 @@ class ChatHistorySummarizer: await self._check_and_package(current_time) self.last_check_time = current_time return + + logger.info( + f"{self.log_prefix} 开始处理聊天概括,时间窗口: {self.last_check_time:.2f} -> {current_time:.2f}" + ) # 有新消息,更新最后检查时间 self.last_check_time = current_time diff --git a/src/express/expression_selector.py b/src/express/expression_selector.py index e5daed31..32bee81e 100644 --- a/src/express/expression_selector.py +++ b/src/express/expression_selector.py @@ -27,10 +27,11 @@ def init_prompt(): 请你分析聊天内容的语境、情绪、话题类型,从上述情境中选择最适合当前聊天情境的,最多{max_num}个情境。 考虑因素包括: -1. 聊天的情绪氛围(轻松、严肃、幽默等) -2. 话题类型(日常、技术、游戏、情感等) -3. 情境与当前语境的匹配度 +1.聊天的情绪氛围(轻松、严肃、幽默等) +2.话题类型(日常、技术、游戏、情感等) +3.情境与当前语境的匹配度 {target_message_extra_block} +{reply_reason_block} 请以JSON格式输出,只需要输出选中的情境编号: 例如: @@ -163,6 +164,7 @@ class ExpressionSelector: chat_info: str, max_num: int = 10, target_message: Optional[str] = None, + reply_reason: Optional[str] = None, ) -> Tuple[List[Dict[str, Any]], List[int]]: """ 选择适合的表达方式(使用classic模式:随机选择+LLM选择) @@ -172,6 +174,7 @@ class ExpressionSelector: chat_info: 聊天内容信息 max_num: 最大选择数量 target_message: 目标消息内容 + reply_reason: planner给出的回复理由 Returns: Tuple[List[Dict[str, Any]], List[int]]: 选中的表达方式列表和ID列表 @@ -183,7 +186,7 @@ class ExpressionSelector: # 使用classic模式(随机选择+LLM选择) logger.debug(f"使用classic模式为聊天流 {chat_id} 选择表达方式") - return await self._select_expressions_classic(chat_id, chat_info, max_num, target_message) + return await self._select_expressions_classic(chat_id, chat_info, max_num, target_message, reply_reason) async def _select_expressions_classic( self, @@ -191,6 +194,7 @@ class ExpressionSelector: chat_info: str, max_num: int = 10, target_message: Optional[str] = None, + reply_reason: Optional[str] = None, ) -> Tuple[List[Dict[str, Any]], List[int]]: """ classic模式:随机选择+LLM选择 @@ -200,6 +204,7 @@ class ExpressionSelector: chat_info: 聊天内容信息 max_num: 最大选择数量 target_message: 目标消息内容 + reply_reason: planner给出的回复理由 Returns: Tuple[List[Dict[str, Any]], List[int]]: 选中的表达方式列表和ID列表 @@ -229,12 +234,18 @@ class ExpressionSelector: all_situations_str = "\n".join(all_situations) if target_message: - target_message_str = f",现在你想要回复消息:{target_message}" + target_message_str = f",现在你想要对上面的这条消息进行恢复:“{target_message}”" target_message_extra_block = "4.考虑你要回复的目标消息" else: target_message_str = "" target_message_extra_block = "" + # 构建reply_reason块 + if reply_reason: + reply_reason_block = f"5.考虑你的回复理由:{reply_reason}" + else: + reply_reason_block = "" + # 3. 构建prompt(只包含情境,不包含完整的表达方式) prompt = (await global_prompt_manager.get_prompt_async("expression_evaluation_prompt")).format( bot_name=global_config.bot.nickname, @@ -243,11 +254,15 @@ class ExpressionSelector: max_num=max_num, target_message=target_message_str, target_message_extra_block=target_message_extra_block, + reply_reason_block=reply_reason_block, ) # 4. 调用LLM content, (reasoning_content, model_name, _) = await self.llm_model.generate_response_async(prompt=prompt) + + print(prompt) + if not content: logger.warning("LLM返回空结果") return [], []