From 87529b63e391455d510b972a0bda316fa794c178 Mon Sep 17 00:00:00 2001 From: UnCLAS-Prommer Date: Tue, 9 Sep 2025 22:34:10 +0800 Subject: [PATCH] =?UTF-8?q?=E6=B7=BB=E5=8A=A0=E6=9B=B4=E5=A4=9A=E7=A7=8D?= =?UTF-8?q?=E7=B1=BB=E7=9A=84=E5=8F=91=E9=80=81=E7=B1=BB=E5=9E=8B?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/chat/heart_flow/heartFC_chat.py | 62 +++--- src/chat/message_receive/bot.py | 8 +- .../message_receive/uni_message_sender.py | 6 +- src/chat/replyer/default_generator.py | 4 +- src/common/data_models/__init__.py | 3 +- src/common/data_models/llm_data_model.py | 5 +- src/common/data_models/message_data_model.py | 56 ++++- .../data_models/message_data_model_ref.md | 36 ++++ src/plugin_system/apis/generator_api.py | 20 +- src/plugin_system/apis/send_api.py | 198 +++++++++++++----- src/plugin_system/base/base_action.py | 4 +- src/plugin_system/base/base_command.py | 2 +- 12 files changed, 290 insertions(+), 114 deletions(-) create mode 100644 src/common/data_models/message_data_model_ref.md diff --git a/src/chat/heart_flow/heartFC_chat.py b/src/chat/heart_flow/heartFC_chat.py index f3d9becb..8605a369 100644 --- a/src/chat/heart_flow/heartFC_chat.py +++ b/src/chat/heart_flow/heartFC_chat.py @@ -9,6 +9,7 @@ from rich.traceback import install from src.config.config import global_config from src.common.logger import get_logger from src.common.data_models.info_data_model import ActionPlannerInfo +from src.common.data_models.message_data_model import ReplyContentType from src.chat.message_receive.chat_stream import ChatStream, get_chat_manager from src.chat.utils.prompt_builder import global_prompt_manager from src.chat.utils.timer_calculator import Timer @@ -32,6 +33,7 @@ from src.chat.utils.chat_message_builder import ( if TYPE_CHECKING: from src.common.data_models.database_data_model import DatabaseMessages + from src.common.data_models.message_data_model import ReplySetModel ERROR_LOOP_INFO = { @@ -155,21 +157,22 @@ class HeartFChatting: timer_strings.append(f"{name}: {formatted_time}") # 获取动作类型,兼容新旧格式 - action_type = "未知动作" - if hasattr(self, "_current_cycle_detail") and self._current_cycle_detail: - loop_plan_info = self._current_cycle_detail.loop_plan_info - if isinstance(loop_plan_info, dict): - action_result = loop_plan_info.get("action_result", {}) - if isinstance(action_result, dict): - # 旧格式:action_result是字典 - action_type = action_result.get("action_type", "未知动作") - elif isinstance(action_result, list) and action_result: - # 新格式:action_result是actions列表 - # TODO: 把这里写明白 - action_type = action_result[0].action_type or "未知动作" - elif isinstance(loop_plan_info, list) and loop_plan_info: - # 直接是actions列表的情况 - action_type = loop_plan_info[0].get("action_type", "未知动作") + # 移除无用代码 + # action_type = "未知动作" + # if hasattr(self, "_current_cycle_detail") and self._current_cycle_detail: + # loop_plan_info = self._current_cycle_detail.loop_plan_info + # if isinstance(loop_plan_info, dict): + # action_result = loop_plan_info.get("action_result", {}) + # if isinstance(action_result, dict): + # # 旧格式:action_result是字典 + # action_type = action_result.get("action_type", "未知动作") + # elif isinstance(action_result, list) and action_result: + # # 新格式:action_result是actions列表 + # # TODO: 把这里写明白 + # action_type = action_result[0].action_type or "未知动作" + # elif isinstance(loop_plan_info, list) and loop_plan_info: + # # 直接是actions列表的情况 + # action_type = loop_plan_info[0].get("action_type", "未知动作") logger.info( f"{self.log_prefix} 第{self._current_cycle_detail.cycle_id}次思考," @@ -177,7 +180,7 @@ class HeartFChatting: + (f"\n详情: {'; '.join(timer_strings)}" if timer_strings else "") ) - async def caculate_interest_value(self, recent_messages_list: List["DatabaseMessages"]) -> float: + async def calculate_interest_value(self, recent_messages_list: List["DatabaseMessages"]) -> float: total_interest = 0.0 for msg in recent_messages_list: interest_value = msg.interest_value @@ -199,7 +202,7 @@ class HeartFChatting: if recent_messages_list: self.last_read_time = time.time() await self._observe( - interest_value=await self.caculate_interest_value(recent_messages_list), + interest_value=await self.calculate_interest_value(recent_messages_list), recent_messages_list=recent_messages_list, ) else: @@ -210,7 +213,7 @@ class HeartFChatting: async def _send_and_store_reply( self, - response_set, + response_set: "ReplySetModel", action_message: "DatabaseMessages", cycle_timers: Dict[str, float], thinking_id, @@ -288,11 +291,11 @@ class HeartFChatting: normal_mode_probability += global_config.chat.at_bot_inevitable_reply # 根据概率决定使用直接回复 - interest_triggerd = False - focus_triggerd = False + interest_triggered = False + focus_triggered = False if random.random() < normal_mode_probability: - interest_triggerd = True + interest_triggered = True logger.info(f"{self.log_prefix} 有新消息,在{normal_mode_probability * 100:.0f}%概率下选择回复") @@ -305,14 +308,14 @@ class HeartFChatting: available_actions: Dict[str, ActionInfo] = {} # 如果兴趣度不足以激活 - if not interest_triggerd: + if not interest_triggered: # 看看专注值够不够 if random.random() < self.frequency_control.get_final_focus_value(): # 专注值足够,仍然进入正式思考 - focus_triggerd = True # 都没触发,路边 + focus_triggered = True # 都没触发,路边 # 任意一种触发都行 - if interest_triggerd or focus_triggerd: + if interest_triggered or focus_triggered: # 进入正式思考模式 cycle_timers, thinking_id = self.start_cycle() logger.info(f"{self.log_prefix} 开始第{self._cycle_counter}次思考") @@ -357,7 +360,7 @@ class HeartFChatting: prompt_info = (modified_message.llm_prompt, prompt_info[1]) with Timer("规划器", cycle_timers): # 根据不同触发,进入不同plan - if focus_triggerd: + if focus_triggered: mode = ChatMode.FOCUS else: mode = ChatMode.NORMAL @@ -519,7 +522,7 @@ class HeartFChatting: async def _send_response( self, - reply_set, + reply_set: "ReplySetModel", message_data: "DatabaseMessages", selected_expressions: Optional[List[int]] = None, ) -> str: @@ -534,8 +537,11 @@ class HeartFChatting: reply_text = "" first_replied = False - for reply_seg in reply_set: - data = reply_seg[1] + for reply_content in reply_set.reply_data: + + if reply_content.content_type != ReplyContentType.TEXT: + continue + data: str = reply_content.content # type: ignore if not first_replied: await send_api.text_to_stream( text=data, diff --git a/src/chat/message_receive/bot.py b/src/chat/message_receive/bot.py index d1c27af4..6fb74cd0 100644 --- a/src/chat/message_receive/bot.py +++ b/src/chat/message_receive/bot.py @@ -171,10 +171,10 @@ class ChatBot: await message.process() _ = Person.register_person( - platform=message.message_info.platform, - user_id=message.message_info.user_info.user_id, - nickname=user_info.user_nickname, - ) # type: ignore + platform=message.message_info.platform, # type: ignore + user_id=message.message_info.user_info.user_id, # type: ignore + nickname=user_info.user_nickname, # type: ignore + ) await self.s4u_message_processor.process_message(message) diff --git a/src/chat/message_receive/uni_message_sender.py b/src/chat/message_receive/uni_message_sender.py index dc858dd6..7a71c58c 100644 --- a/src/chat/message_receive/uni_message_sender.py +++ b/src/chat/message_receive/uni_message_sender.py @@ -15,7 +15,7 @@ install(extra_lines=3) logger = get_logger("sender") -async def send_message(message: MessageSending, show_log=True) -> bool: +async def _send_message(message: MessageSending, show_log=True) -> bool: """合并后的消息发送函数,包含WS发送和日志记录""" message_preview = truncate_message(message.processed_plain_text, max_length=200) @@ -32,7 +32,7 @@ async def send_message(message: MessageSending, show_log=True) -> bool: raise e # 重新抛出其他异常 -class HeartFCSender: +class UniversalMessageSender: """管理消息的注册、即时处理、发送和存储,并跟踪思考状态。""" def __init__(self): @@ -76,7 +76,7 @@ class HeartFCSender: ) await asyncio.sleep(typing_time) - sent_msg = await send_message(message, show_log=show_log) + sent_msg = await _send_message(message, show_log=show_log) if not sent_msg: return False diff --git a/src/chat/replyer/default_generator.py b/src/chat/replyer/default_generator.py index 9163b1b4..4311a2ba 100644 --- a/src/chat/replyer/default_generator.py +++ b/src/chat/replyer/default_generator.py @@ -15,7 +15,7 @@ from src.config.config import global_config, model_config from src.llm_models.utils_model import LLMRequest from src.chat.message_receive.message import UserInfo, Seg, MessageRecv, MessageSending from src.chat.message_receive.chat_stream import ChatStream -from src.chat.message_receive.uni_message_sender import HeartFCSender +from src.chat.message_receive.uni_message_sender import UniversalMessageSender from src.chat.utils.timer_calculator import Timer # <--- Import Timer from src.chat.utils.utils import get_chat_type_and_target_info from src.chat.utils.prompt_builder import Prompt, global_prompt_manager @@ -142,7 +142,7 @@ class DefaultReplyer: self.express_model = LLMRequest(model_set=model_config.model_task_config.replyer, request_type=request_type) self.chat_stream = chat_stream self.is_group_chat, self.chat_target_info = get_chat_type_and_target_info(self.chat_stream.stream_id) - self.heart_fc_sender = HeartFCSender() + self.heart_fc_sender = UniversalMessageSender() self.memory_activator = MemoryActivator() from src.plugin_system.core.tool_use import ToolExecutor # 延迟导入ToolExecutor,不然会循环依赖 diff --git a/src/common/data_models/__init__.py b/src/common/data_models/__init__.py index 222ff59c..d1303dc2 100644 --- a/src/common/data_models/__init__.py +++ b/src/common/data_models/__init__.py @@ -6,7 +6,8 @@ class BaseDataModel: def deepcopy(self): return copy.deepcopy(self) -def temporarily_transform_class_to_dict(obj: Any) -> Any: + +def transform_class_to_dict(obj: Any) -> Any: # sourcery skip: assign-if-exp, reintroduce-else """ 将对象或容器中的 BaseDataModel 子类(类对象)或 BaseDataModel 实例 diff --git a/src/common/data_models/llm_data_model.py b/src/common/data_models/llm_data_model.py index 1d5b75e0..5aed53ba 100644 --- a/src/common/data_models/llm_data_model.py +++ b/src/common/data_models/llm_data_model.py @@ -1,8 +1,9 @@ from dataclasses import dataclass -from typing import Optional, List, Tuple, TYPE_CHECKING, Any +from typing import Optional, List, TYPE_CHECKING from . import BaseDataModel if TYPE_CHECKING: + from src.common.data_models.message_data_model import ReplySetModel from src.llm_models.payload_content.tool_option import ToolCall @dataclass @@ -13,4 +14,4 @@ class LLMGenerationDataModel(BaseDataModel): tool_calls: Optional[List["ToolCall"]] = None prompt: Optional[str] = None selected_expressions: Optional[List[int]] = None - reply_set: Optional[List[Tuple[str, Any]]] = None \ No newline at end of file + reply_set: Optional["ReplySetModel"] = None \ No newline at end of file diff --git a/src/common/data_models/message_data_model.py b/src/common/data_models/message_data_model.py index 5ba17278..70b970df 100644 --- a/src/common/data_models/message_data_model.py +++ b/src/common/data_models/message_data_model.py @@ -1,4 +1,4 @@ -from typing import Optional, TYPE_CHECKING, List, Tuple, Union +from typing import Optional, TYPE_CHECKING, List, Tuple, Union, Dict from dataclasses import dataclass, field from enum import Enum @@ -40,9 +40,35 @@ class MessageAndActionModel(BaseDataModel): class ReplyContentType(Enum): TEXT = "text" IMAGE = "image" + EMOJI = "emoji" + COMMAND = "command" VOICE = "voice" + FORWARD = "forward" HYBRID = "hybrid" # 混合类型,包含多种内容 + def __repr__(self) -> str: + return self.value + + +@dataclass +class ReplyContent(BaseDataModel): + content_type: ReplyContentType | str + content: Union[str, Dict, List["ReplyContent"]] # 支持嵌套的 ReplyContent + + def __post_init__(self): + if isinstance(self.content_type, ReplyContentType): + if self.content_type not in [ReplyContentType.HYBRID, ReplyContentType.FORWARD] and isinstance( + self.content, List + ): + raise ValueError( + f"非混合类型/转发类型的内容不能是列表,content_type: {self.content_type}, content: {self.content}" + ) + elif self.content_type in [ReplyContentType.HYBRID, ReplyContentType.FORWARD]: + if not isinstance(self.content, List): + raise ValueError( + f"混合类型/转发类型的内容必须是列表,content_type: {self.content_type}, content: {self.content}" + ) + @dataclass class ReplySetModel(BaseDataModel): @@ -50,28 +76,42 @@ class ReplySetModel(BaseDataModel): 回复集数据模型,用于多种回复类型的返回 """ - reply_set_data: List[Tuple[ReplyContentType | str, Union[str, "ReplySetModel"]]] = field(default_factory=list) + reply_data: List[ReplyContent] = field(default_factory=list) + + def __len__(self): + return len(self.reply_data) def add_text_content(self, text: str): """添加文本内容""" - self.reply_set_data.append((ReplyContentType.TEXT, text)) + self.reply_data.append(ReplyContent(content_type=ReplyContentType.TEXT, content=text)) def add_image_content(self, image_base64: str): """添加图片内容,base64编码的图片数据""" - self.reply_set_data.append((ReplyContentType.IMAGE, image_base64)) + self.reply_data.append(ReplyContent(content_type=ReplyContentType.IMAGE, content=image_base64)) def add_voice_content(self, voice_base64: str): """添加语音内容,base64编码的音频数据""" - self.reply_set_data.append((ReplyContentType.VOICE, voice_base64)) + self.reply_data.append(ReplyContent(content_type=ReplyContentType.VOICE, content=voice_base64)) - def add_hybrid_content(self, hybrid_content: "ReplySetModel"): + def add_hybrid_content(self, hybrid_content: List[Tuple[ReplyContentType, str]]): """ 添加混合型内容,可以包含多种类型的内容 实际解析时只关注最外层,没有递归嵌套处理 """ - self.reply_set_data.append((ReplyContentType.HYBRID, hybrid_content)) + for content_type, content in hybrid_content: + assert isinstance(content, str), "混合内容的每个项必须是字符串" + self.reply_data.append(ReplyContent(content_type=content_type, content=content)) def add_custom_content(self, content_type: str, content: str): """添加自定义类型的内容""" - self.reply_set_data.append((content_type, content)) + self.reply_data.append(ReplyContent(content_type=content_type, content=content)) + + def add_forward_content(self, forward_content: List[Tuple[ReplyContentType, Union[str, ReplyContent]]]): + """添加转发内容,可以是字符串或ReplyContent,嵌套的转发内容需要自己构造放入""" + for content_type, content in forward_content: + if isinstance(content, ReplyContent): + self.reply_data.append(content) + else: + assert isinstance(content, str), "转发内容的每个data必须是字符串或ReplyContent" + self.reply_data.append(ReplyContent(content_type=content_type, content=content)) diff --git a/src/common/data_models/message_data_model_ref.md b/src/common/data_models/message_data_model_ref.md new file mode 100644 index 00000000..66476ea1 --- /dev/null +++ b/src/common/data_models/message_data_model_ref.md @@ -0,0 +1,36 @@ +# 对于`message_data_model.py`中`class ReplyContent`的规划解读 + +分类讨论如下: +- `ReplyContent.TEXT`: 单独的文本,`_level = 0`,`content`为`str`类型。 +- `ReplyContent.IMAGE`: 单独的图片,`_level = 0`,`content`为`str`类型(图片base64)。 +- `ReplyContent.EMOJI`: 单独的表情包,`_level = 0`,`content`为`str`类型(图片base64)。 +- `ReplyContent.VOICE`: 单独的语音,`_level = 0`,`content`为`str`类型(语音base64)。 +- `ReplyContent.HYBRID`: 混合内容,`_level = 0` + - 其应该是一个列表,列表内应该只接受`str`类型的内容(图片和文本混合体) +- `ReplyContent.FORWARD`: 转发消息,`_level = n` + - 其应该是一个列表,列表接受`str`类型(图片/文本),`ReplyContent`类型(嵌套转发,嵌套有最高层数限制) +- `ReplyContent.COMMAND`: 指令消息,`_level = 0` + - 其应该是一个列表,列表内应该只接受`Dict`类型的内容 + +未来规划: +- `ReplyContent.AT`: 单独的艾特,`_level = 0`,`content`为`str`类型(用户ID)。 + +内容构造方式: +- 对于`TEXT`, `IMAGE`, `EMOJI`, `VOICE`,直接传入对应类型的内容,且`content`应该为`str`。 +- 对于`COMMAND`,传入一个字典,字典内的内容类型应符合上述规定。 +- 对于`HYBRID`, `FORWARD`,传入一个列表,列表内的内容类型应符合上述规定。 + +因此,我们的类型注解应该是: +```python +from typing import Union, List, Dict + +ReplyContentType = Union[ + str, # TEXT, IMAGE, EMOJI, VOICE + List[Union[str, 'ReplyContent']], # HYBRID, FORWARD + Dict # COMMAND +] +``` + +现在`_level`被移除了,在解析的时候显式地检查内容的类型和结构即可。 + +`send_api`的custom_reply_set_to_stream仅在特定的类型下提供reply)message \ No newline at end of file diff --git a/src/plugin_system/apis/generator_api.py b/src/plugin_system/apis/generator_api.py index 257c60fa..29675d7d 100644 --- a/src/plugin_system/apis/generator_api.py +++ b/src/plugin_system/apis/generator_api.py @@ -12,6 +12,7 @@ import traceback from typing import Tuple, Any, Dict, List, Optional, TYPE_CHECKING from rich.traceback import install from src.common.logger import get_logger +from src.common.data_models.message_data_model import ReplySetModel from src.chat.replyer.default_generator import DefaultReplyer from src.chat.message_receive.chat_stream import ChatStream from src.chat.utils.utils import process_llm_response @@ -138,12 +139,11 @@ async def generate_reply( if not success: logger.warning("[GeneratorAPI] 回复生成失败") return False, None + reply_set: Optional[ReplySetModel] = None if content := llm_response.content: reply_set = process_human_text(content, enable_splitter, enable_chinese_typo) - else: - reply_set = [] llm_response.reply_set = reply_set - logger.debug(f"[GeneratorAPI] 回复生成成功,生成了 {len(reply_set)} 个回复项") + logger.debug(f"[GeneratorAPI] 回复生成成功,生成了 {len(reply_set) if reply_set else 0} 个回复项") return success, llm_response @@ -159,6 +159,7 @@ async def generate_reply( logger.error(traceback.format_exc()) return False, None + async def rewrite_reply( chat_stream: Optional[ChatStream] = None, reply_data: Optional[Dict[str, Any]] = None, @@ -208,12 +209,12 @@ async def rewrite_reply( reason=reason, reply_to=reply_to, ) - reply_set = [] + reply_set: Optional[ReplySetModel] = None if success and llm_response and (content := llm_response.content): reply_set = process_human_text(content, enable_splitter, enable_chinese_typo) llm_response.reply_set = reply_set if success: - logger.info(f"[GeneratorAPI] 重写回复成功,生成了 {len(reply_set)} 个回复项") + logger.info(f"[GeneratorAPI] 重写回复成功,生成了 {len(reply_set) if reply_set else 0} 个回复项") else: logger.warning("[GeneratorAPI] 重写回复失败") @@ -227,7 +228,7 @@ async def rewrite_reply( return False, None -def process_human_text(content: str, enable_splitter: bool, enable_chinese_typo: bool) -> List[Tuple[str, Any]]: +def process_human_text(content: str, enable_splitter: bool, enable_chinese_typo: bool) -> Optional[ReplySetModel]: """将文本处理为更拟人化的文本 Args: @@ -238,18 +239,17 @@ def process_human_text(content: str, enable_splitter: bool, enable_chinese_typo: if not isinstance(content, str): raise ValueError("content 必须是字符串类型") try: + reply_set = ReplySetModel() processed_response = process_llm_response(content, enable_splitter, enable_chinese_typo) - reply_set = [] for text in processed_response: - reply_seg = ("text", text) - reply_set.append(reply_seg) + reply_set.add_text_content(text) return reply_set except Exception as e: logger.error(f"[GeneratorAPI] 处理人形文本时出错: {e}") - return [] + return None async def generate_response_custom( diff --git a/src/plugin_system/apis/send_api.py b/src/plugin_system/apis/send_api.py index 21f764cd..3487712b 100644 --- a/src/plugin_system/apis/send_api.py +++ b/src/plugin_system/apis/send_api.py @@ -21,17 +21,19 @@ import traceback import time -from typing import Optional, Union, Dict, Any, List, TYPE_CHECKING +from typing import Optional, Union, Dict, List, TYPE_CHECKING from src.common.logger import get_logger +from src.common.data_models.message_data_model import ReplyContentType from src.config.config import global_config from src.chat.message_receive.chat_stream import get_chat_manager -from src.chat.message_receive.uni_message_sender import HeartFCSender +from src.chat.message_receive.uni_message_sender import UniversalMessageSender from src.chat.message_receive.message import MessageSending, MessageRecv from maim_message import Seg, UserInfo if TYPE_CHECKING: from src.common.data_models.database_data_model import DatabaseMessages + from src.common.data_models.message_data_model import ReplySetModel logger = get_logger("send_api") @@ -42,8 +44,7 @@ logger = get_logger("send_api") async def _send_to_target( - message_type: str, - content: Union[str, dict], + message_segment: Seg, stream_id: str, display_message: str = "", typing: bool = False, @@ -56,8 +57,7 @@ async def _send_to_target( """向指定目标发送消息的内部实现 Args: - message_type: 消息类型,如"text"、"image"、"emoji"等 - content: 消息内容 + message_segment: stream_id: 目标流ID display_message: 显示消息 typing: 是否模拟打字等待。 @@ -74,7 +74,7 @@ async def _send_to_target( return False if show_log: - logger.debug(f"[SendAPI] 发送{message_type}消息到 {stream_id}") + logger.debug(f"[SendAPI] 发送{message_segment.type}消息到 {stream_id}") # 查找目标聊天流 target_stream = get_chat_manager().get_stream(stream_id) @@ -83,7 +83,7 @@ async def _send_to_target( return False # 创建发送器 - heart_fc_sender = HeartFCSender() + message_sender = UniversalMessageSender() # 生成消息ID current_time = time.time() @@ -96,13 +96,11 @@ async def _send_to_target( platform=target_stream.platform, ) - # 创建消息段 - message_segment = Seg(type=message_type, data=content) # type: ignore - reply_to_platform_id = "" anchor_message: Union["MessageRecv", None] = None if reply_message: - anchor_message = message_dict_to_message_recv(reply_message.flatten()) + anchor_message = db_message_to_message_recv(reply_message) + logger.info(f"[SendAPI] 找到匹配的回复消息,发送者: {anchor_message.message_info.user_info.user_id}") # type: ignore if anchor_message: anchor_message.update_chat_stream(target_stream) assert anchor_message.message_info.user_info, "用户信息缺失" @@ -120,14 +118,14 @@ async def _send_to_target( display_message=display_message, reply=anchor_message, is_head=True, - is_emoji=(message_type == "emoji"), + is_emoji=(message_segment.type == "emoji"), thinking_start_time=current_time, reply_to=reply_to_platform_id, selected_expressions=selected_expressions, ) # 发送消息 - sent_msg = await heart_fc_sender.send_message( + sent_msg = await message_sender.send_message( bot_message, typing=typing, set_reply=set_reply, @@ -148,7 +146,7 @@ async def _send_to_target( return False -def message_dict_to_message_recv(message_dict: Dict[str, Any]) -> Optional[MessageRecv]: +def db_message_to_message_recv(message_obj: "DatabaseMessages") -> MessageRecv: """将数据库dict重建为MessageRecv对象 Args: message_dict: 消息字典 @@ -158,44 +156,41 @@ def message_dict_to_message_recv(message_dict: Dict[str, Any]) -> Optional[Messa """ # 构建MessageRecv对象 user_info = { - "platform": message_dict.get("user_platform", ""), - "user_id": message_dict.get("user_id", ""), - "user_nickname": message_dict.get("user_nickname", ""), - "user_cardname": message_dict.get("user_cardname", ""), + "platform": message_obj.user_info.platform or "", + "user_id": message_obj.user_info.user_id or "", + "user_nickname": message_obj.user_info.user_nickname or "", + "user_cardname": message_obj.user_info.user_cardname or "", } group_info = {} - if message_dict.get("chat_info_group_id"): + if message_obj.chat_info.group_info: group_info = { - "platform": message_dict.get("chat_info_group_platform", ""), - "group_id": message_dict.get("chat_info_group_id", ""), - "group_name": message_dict.get("chat_info_group_name", ""), + "platform": message_obj.chat_info.group_info.group_platform or "", + "group_id": message_obj.chat_info.group_info.group_id or "", + "group_name": message_obj.chat_info.group_info.group_name or "", } format_info = {"content_format": "", "accept_format": ""} template_info = {"template_items": {}} message_info = { - "platform": message_dict.get("chat_info_platform", ""), - "message_id": message_dict.get("message_id"), - "time": message_dict.get("time"), + "platform": message_obj.chat_info.platform or "", + "message_id": message_obj.message_id, + "time": message_obj.time, "group_info": group_info, "user_info": user_info, - "additional_config": message_dict.get("additional_config"), + "additional_config": message_obj.additional_config, "format_info": format_info, "template_info": template_info, } message_dict_recv = { "message_info": message_info, - "raw_message": message_dict.get("processed_plain_text"), - "processed_plain_text": message_dict.get("processed_plain_text"), + "raw_message": message_obj.processed_plain_text, + "processed_plain_text": message_obj.processed_plain_text, } - message_recv = MessageRecv(message_dict_recv) - - logger.info(f"[SendAPI] 找到匹配的回复消息,发送者: {message_dict.get('user_nickname', '')}") - return message_recv + return MessageRecv(message_dict_recv) # ============================================================================= @@ -225,11 +220,10 @@ async def text_to_stream( bool: 是否发送成功 """ return await _send_to_target( - "text", - text, - stream_id, - "", - typing, + message_segment=Seg(type="text", data=text), + stream_id=stream_id, + display_message="", + typing=typing, set_reply=set_reply, reply_message=reply_message, storage_message=storage_message, @@ -255,10 +249,9 @@ async def emoji_to_stream( bool: 是否发送成功 """ return await _send_to_target( - "emoji", - emoji_base64, - stream_id, - "", + message_segment=Seg(type="emoji", data=emoji_base64), + stream_id=stream_id, + display_message="", typing=False, storage_message=storage_message, set_reply=set_reply, @@ -284,10 +277,9 @@ async def image_to_stream( bool: 是否发送成功 """ return await _send_to_target( - "image", - image_base64, - stream_id, - "", + message_segment=Seg(type="image", data=image_base64), + stream_id=stream_id, + display_message="", typing=False, storage_message=storage_message, set_reply=set_reply, @@ -314,10 +306,9 @@ async def command_to_stream( bool: 是否发送成功 """ return await _send_to_target( - "command", - command, - stream_id, - display_message, + message_segment=Seg(type="command", data=command), # type: ignore + stream_id=stream_id, + display_message=display_message, typing=False, storage_message=storage_message, set_reply=set_reply, @@ -327,7 +318,7 @@ async def command_to_stream( async def custom_to_stream( message_type: str, - content: str | dict, + content: str | Dict, stream_id: str, display_message: str = "", typing: bool = False, @@ -351,8 +342,7 @@ async def custom_to_stream( bool: 是否发送成功 """ return await _send_to_target( - message_type=message_type, - content=content, + message_segment=Seg(type=message_type, data=content), # type: ignore stream_id=stream_id, display_message=display_message, typing=typing, @@ -361,3 +351,105 @@ async def custom_to_stream( storage_message=storage_message, show_log=show_log, ) + + +async def custom_reply_set_to_stream( + reply_set: "ReplySetModel", + stream_id: str, + display_message: str = "", # 基本没用 + typing: bool = False, + reply_message: Optional["DatabaseMessages"] = None, + set_reply: bool = False, + storage_message: bool = True, + show_log: bool = True, +) -> bool: + """向指定流发送混合型消息集""" + flag: bool = True + for reply_content in reply_set.reply_data: + status: bool = False + content_type = reply_content.content_type + message_data = reply_content.content + if content_type == ReplyContentType.TEXT: + status = await _send_to_target( + message_segment=Seg(type="text", data=message_data), # type: ignore + stream_id=stream_id, + display_message=display_message, + typing=typing, + reply_message=reply_message, + set_reply=set_reply, + storage_message=storage_message, + show_log=show_log, + ) + elif content_type in [ + ReplyContentType.IMAGE, + ReplyContentType.EMOJI, + ReplyContentType.COMMAND, + ReplyContentType.VOICE, + ]: + message_segment: Seg + if ReplyContentType == ReplyContentType.IMAGE: + message_segment = Seg(type="image", data=message_data) # type: ignore + elif ReplyContentType == ReplyContentType.EMOJI: + message_segment = Seg(type="emoji", data=message_data) # type: ignore + elif ReplyContentType == ReplyContentType.COMMAND: + message_segment = Seg(type="command", data=message_data) # type: ignore + elif ReplyContentType == ReplyContentType.VOICE: + message_segment = Seg(type="voice", data=message_data) # type: ignore + status = await _send_to_target( + message_segment=message_segment, + stream_id=stream_id, + display_message=display_message, + typing=False, + reply_message=reply_message, + set_reply=set_reply, + storage_message=storage_message, + show_log=show_log, + ) + elif content_type == ReplyContentType.HYBRID: + assert isinstance(message_data, list), "混合类型内容必须是列表" + sub_seg_list: List[Seg] = [] + for sub_content in message_data: + sub_content_type = sub_content.content_type + sub_content_data = sub_content.content + + if sub_content_type == ReplyContentType.TEXT: + sub_seg_list.append(Seg(type="text", data=sub_content_data)) # type: ignore + elif sub_content_type == ReplyContentType.IMAGE: + sub_seg_list.append(Seg(type="image", data=sub_content_data)) # type: ignore + elif sub_content_type == ReplyContentType.EMOJI: + sub_seg_list.append(Seg(type="emoji", data=sub_content_data)) # type: ignore + else: + logger.warning(f"[SendAPI] 混合类型中不支持的子内容类型: {repr(sub_content_type)}") + continue + status = await _send_to_target( + message_segment=Seg(type="seglist", data=sub_seg_list), # type: ignore + stream_id=stream_id, + display_message=display_message, + typing=typing, + reply_message=reply_message, + set_reply=set_reply, + storage_message=storage_message, + show_log=show_log, + ) + elif content_type == ReplyContentType.FORWARD: + assert isinstance(message_data, list), "转发类型内容必须是列表" + # TODO: 完成转发消息的发送机制 + else: + message_type_in_str = ( + content_type.value if isinstance(content_type, ReplyContentType) else str(content_type) + ) + return await _send_to_target( + message_segment=Seg(type=message_type_in_str, data=message_data), # type: ignore + stream_id=stream_id, + display_message=display_message, + typing=typing, + reply_message=reply_message, + set_reply=set_reply, + storage_message=storage_message, + show_log=show_log, + ) + if not status: + flag = False + logger.error(f"[SendAPI] 发送{repr(content_type)}消息失败,消息内容:{str(message_data)[:100]}") + + return flag diff --git a/src/plugin_system/base/base_action.py b/src/plugin_system/base/base_action.py index 0e58885b..3f06f929 100644 --- a/src/plugin_system/base/base_action.py +++ b/src/plugin_system/base/base_action.py @@ -2,7 +2,7 @@ import time import asyncio from abc import ABC, abstractmethod -from typing import Tuple, Optional, TYPE_CHECKING +from typing import Tuple, Optional, TYPE_CHECKING, Dict from src.common.logger import get_logger from src.chat.message_receive.chat_stream import ChatStream @@ -285,7 +285,7 @@ class BaseAction(ABC): async def send_custom( self, message_type: str, - content: str, + content: str | Dict, typing: bool = False, set_reply: bool = False, reply_message: Optional["DatabaseMessages"] = None, diff --git a/src/plugin_system/base/base_command.py b/src/plugin_system/base/base_command.py index 633eba34..1b4e2486 100644 --- a/src/plugin_system/base/base_command.py +++ b/src/plugin_system/base/base_command.py @@ -120,7 +120,7 @@ class BaseCommand(ABC): async def send_type( self, message_type: str, - content: str, + content: str | Dict, display_message: str = "", typing: bool = False, set_reply: bool = False,