合并消息的正确存储

pull/1239/head
UnCLAS-Prommer 2025-09-15 15:21:37 +08:00
parent d560d02761
commit 48ed58c4a8
No known key found for this signature in database
8 changed files with 21 additions and 8 deletions

View File

@ -165,8 +165,6 @@ class ForwardMessages(BaseEventHandler):
self.messages: List[str] = []
async def execute(self, message: MaiMessages | None) -> Tuple[bool, bool, None, None, None]:
if self.get_config("print_message.enabled", False):
return True, True, None, None, None
if not message:
return True, True, None, None, None
stream_id = message.stream_id or ""
@ -194,7 +192,7 @@ class HelloWorldPlugin(BasePlugin):
# 插件基本信息
plugin_name: str = "hello_world_plugin" # 内部标识符
enable_plugin: bool = True
enable_plugin: bool = False
dependencies: List[str] = [] # 插件依赖列表
python_dependencies: List[str] = [] # Python包依赖列表
config_file_name: str = "config.toml" # 配置文件名

View File

@ -8,6 +8,7 @@ from typing import Optional, Any, List
from maim_message import Seg, UserInfo, BaseMessageInfo, MessageBase
from src.common.logger import get_logger
from src.config.config import global_config
from src.chat.utils.utils_image import get_image_manager
from src.chat.utils.utils_voice import get_voice_text
from .chat_stream import ChatStream
@ -79,6 +80,14 @@ class Message(MessageBase):
if processed:
segments_text.append(processed)
return " ".join(segments_text)
elif segment.type == "forward":
segments_text = []
for node_dict in segment.data:
message = MessageBase.from_dict(node_dict) # type: ignore
processed_text = await self._process_message_segments(message.message_segment)
if processed_text:
segments_text.append(f"{global_config.bot.nickname}: {processed_text}")
return "[合并消息]: " + "\n".join(segments_text)
else:
# 处理单个消息段
return await self._process_single_segment(segment) # type: ignore

View File

@ -33,7 +33,6 @@ class MessageStorage:
async def store_message(message: Union[MessageSending, MessageRecv], chat_stream: ChatStream) -> None:
"""存储消息到数据库"""
try:
# 莫越权 救世啊
pattern = r"<MainRule>.*?</MainRule>|<schedule>.*?</schedule>|<UserMessage>.*?</UserMessage>"
# print(message)

View File

@ -310,6 +310,7 @@ async def command_to_stream(
display_message=display_message,
typing=False,
storage_message=storage_message,
set_reply=False,
)

View File

@ -397,6 +397,8 @@ class BaseAction(ABC):
reply_set=reply_set,
stream_id=self.chat_id,
storage_message=storage_message,
set_reply=False,
reply_message=None,
)
async def send_voice(self, audio_base64: str) -> bool:

View File

@ -320,6 +320,8 @@ class BaseCommand(ABC):
reply_set=reply_set,
stream_id=chat_stream.stream_id,
storage_message=storage_message,
set_reply=False,
reply_message=None,
)
async def send_custom(

View File

@ -377,4 +377,6 @@ class BaseEventHandler(ABC):
reply_set=reply_set,
stream_id=stream_id,
storage_message=storage_message,
set_reply=False,
reply_message=None,
)

View File

@ -2,7 +2,7 @@ import asyncio
import contextlib
from typing import List, Dict, Optional, Type, Tuple, TYPE_CHECKING
from src.chat.message_receive.message import MessageRecv
from src.chat.message_receive.message import MessageRecv, MessageSending
from src.chat.message_receive.chat_stream import get_chat_manager
from src.common.logger import get_logger
from src.plugin_system.base.component_types import EventType, EventHandlerInfo, MaiMessages, CustomEventHandlerResult
@ -66,7 +66,7 @@ class EventsManager:
async def handle_mai_events(
self,
event_type: EventType,
message: Optional[MessageRecv] = None,
message: Optional[MessageRecv | MessageSending] = None,
llm_prompt: Optional[str] = None,
llm_response: Optional["LLMGenerationDataModel"] = None,
stream_id: Optional[str] = None,
@ -206,7 +206,7 @@ class EventsManager:
def _transform_event_message(
self,
message: MessageRecv,
message: MessageRecv | MessageSending,
llm_prompt: Optional[str] = None,
llm_response: Optional["LLMGenerationDataModel"] = None,
) -> MaiMessages:
@ -295,7 +295,7 @@ class EventsManager:
def _prepare_message(
self,
event_type: EventType,
message: Optional[MessageRecv] = None,
message: Optional[MessageRecv | MessageSending] = None,
llm_prompt: Optional[str] = None,
llm_response: Optional["LLMGenerationDataModel"] = None,
stream_id: Optional[str] = None,