mirror of https://github.com/Mai-with-u/MaiBot.git
fix:优化记忆提取,提供细节prompt debug项目
parent
6d70cf7528
commit
3e5058eb0f
|
|
@ -379,7 +379,7 @@ class EmojiManager:
|
|||
|
||||
self._scan_task = None
|
||||
|
||||
self.vlm = LLMRequest(model_set=model_config.model_task_config.vlm, request_type="emoji")
|
||||
self.vlm = LLMRequest(model_set=model_config.model_task_config.vlm, request_type="emoji.see")
|
||||
self.llm_emotion_judge = LLMRequest(
|
||||
model_set=model_config.model_task_config.utils, request_type="emoji"
|
||||
) # 更高的温度,更少的token(后续可以根据情绪来调整温度)
|
||||
|
|
@ -940,16 +940,16 @@ class EmojiManager:
|
|||
image_base64 = get_image_manager().transform_gif(image_base64) # type: ignore
|
||||
if not image_base64:
|
||||
raise RuntimeError("GIF表情包转换失败")
|
||||
prompt = "这是一个动态图表情包,每一张图代表了动态图的某一帧,黑色背景代表透明,描述一下表情包表达的情感和内容,描述细节,从互联网梗,meme的角度去分析"
|
||||
prompt = "这是一个动态图表情包,每一张图代表了动态图的某一帧,黑色背景代表透明,简短描述一下表情包表达的情感和内容,描述细节,从互联网梗,meme的角度去分析"
|
||||
description, _ = await self.vlm.generate_response_for_image(
|
||||
prompt, image_base64, "jpg", temperature=0.3, max_tokens=1000
|
||||
prompt, image_base64, "jpg", temperature=0.5
|
||||
)
|
||||
else:
|
||||
prompt = (
|
||||
"这是一个表情包,请详细描述一下表情包所表达的情感和内容,描述细节,从互联网梗,meme的角度去分析"
|
||||
"这是一个表情包,请详细描述一下表情包所表达的情感和内容,简短描述细节,从互联网梗,meme的角度去分析"
|
||||
)
|
||||
description, _ = await self.vlm.generate_response_for_image(
|
||||
prompt, image_base64, image_format, temperature=0.3, max_tokens=1000
|
||||
prompt, image_base64, image_format, temperature=0.5
|
||||
)
|
||||
|
||||
# 审核表情包
|
||||
|
|
@ -970,13 +970,14 @@ class EmojiManager:
|
|||
|
||||
# 第二步:LLM情感分析 - 基于详细描述生成情感标签列表
|
||||
emotion_prompt = f"""
|
||||
请你识别这个表情包的含义和适用场景,给我简短的描述,每个描述不要超过15个字
|
||||
这是一个基于这个表情包的描述:'{description}'
|
||||
你可以关注其幽默和讽刺意味,动用贴吧,微博,小红书的知识,必须从互联网梗,meme的角度去分析
|
||||
请直接输出描述,不要出现任何其他内容,如果有多个描述,可以用逗号分隔
|
||||
这是一个聊天场景中的表情包描述:'{description}'
|
||||
|
||||
请你识别这个表情包的含义和适用场景,给我简短的描述,每个描述不要超过15个字
|
||||
你可以关注其幽默和讽刺意味,动用贴吧,微博,小红书的知识,必须从互联网梗,meme的角度去分析
|
||||
请直接输出描述,不要出现任何其他内容,如果有多个描述,可以用逗号分隔
|
||||
"""
|
||||
emotions_text, _ = await self.llm_emotion_judge.generate_response_async(
|
||||
emotion_prompt, temperature=0.7, max_tokens=600
|
||||
emotion_prompt, temperature=0.7, max_tokens=256
|
||||
)
|
||||
|
||||
# 处理情感列表
|
||||
|
|
|
|||
|
|
@ -136,7 +136,8 @@ class DefaultReplyer:
|
|||
# logger.debug(f"replyer生成内容: {content}")
|
||||
|
||||
logger.info(f"replyer生成内容: {content}")
|
||||
logger.info(f"replyer生成推理: {reasoning_content}")
|
||||
if global_config.debug.show_replyer_reasoning:
|
||||
logger.info(f"replyer生成推理:\n{reasoning_content}")
|
||||
logger.info(f"replyer生成模型: {model_name}")
|
||||
|
||||
llm_response.content = content
|
||||
|
|
@ -1000,7 +1001,7 @@ class DefaultReplyer:
|
|||
# 直接使用已初始化的模型实例
|
||||
# logger.info(f"\n{prompt}\n")
|
||||
|
||||
if global_config.debug.show_prompt:
|
||||
if global_config.debug.show_replyer_prompt:
|
||||
logger.info(f"\n{prompt}\n")
|
||||
else:
|
||||
logger.debug(f"\nreplyer_Prompt:{prompt}\n")
|
||||
|
|
|
|||
|
|
@ -922,7 +922,7 @@ class PrivateReplyer:
|
|||
# 直接使用已初始化的模型实例
|
||||
logger.info(f"\n{prompt}\n")
|
||||
|
||||
if global_config.debug.show_prompt:
|
||||
if global_config.debug.show_replyer_prompt:
|
||||
logger.info(f"\n{prompt}\n")
|
||||
else:
|
||||
logger.debug(f"\n{prompt}\n")
|
||||
|
|
@ -934,6 +934,8 @@ class PrivateReplyer:
|
|||
content = content.strip()
|
||||
|
||||
logger.info(f"使用 {model_name} 生成回复内容: {content}")
|
||||
if global_config.debug.show_replyer_reasoning:
|
||||
logger.info(f"使用 {model_name} 生成回复推理:\n{reasoning_content}")
|
||||
return content, reasoning_content, model_name, tool_calls
|
||||
|
||||
async def get_prompt_info(self, message: str, sender: str, target: str):
|
||||
|
|
|
|||
|
|
@ -641,6 +641,12 @@ class DebugConfig(ConfigBase):
|
|||
|
||||
show_prompt: bool = False
|
||||
"""是否显示prompt"""
|
||||
|
||||
show_replyer_prompt: bool = True
|
||||
"""是否显示回复器prompt"""
|
||||
|
||||
show_replyer_reasoning: bool = True
|
||||
"""是否显示回复器推理"""
|
||||
|
||||
|
||||
@dataclass
|
||||
|
|
|
|||
|
|
@ -270,13 +270,28 @@ class LLMRequest:
|
|||
audio_base64=audio_base64,
|
||||
extra_params=model_info.extra_params,
|
||||
)
|
||||
except (EmptyResponseException, NetworkConnectionError) as e:
|
||||
except EmptyResponseException as e:
|
||||
# 空回复:通常为临时问题,单独记录并重试
|
||||
retry_remain -= 1
|
||||
if retry_remain <= 0:
|
||||
logger.error(f"模型 '{model_info.name}' 在用尽对临时错误的重试次数后仍然失败。")
|
||||
logger.error(f"模型 '{model_info.name}' 在多次出现空回复后仍然失败。")
|
||||
raise ModelAttemptFailed(f"模型 '{model_info.name}' 重试耗尽", original_exception=e) from e
|
||||
|
||||
logger.warning(f"模型 '{model_info.name}' 遇到可重试错误: {str(e)}。剩余重试次数: {retry_remain}")
|
||||
logger.warning(
|
||||
f"模型 '{model_info.name}' 返回空回复(可重试)。剩余重试次数: {retry_remain}"
|
||||
)
|
||||
await asyncio.sleep(api_provider.retry_interval)
|
||||
|
||||
except NetworkConnectionError as e:
|
||||
# 网络错误:单独记录并重试
|
||||
retry_remain -= 1
|
||||
if retry_remain <= 0:
|
||||
logger.error(f"模型 '{model_info.name}' 在网络错误重试用尽后仍然失败。")
|
||||
raise ModelAttemptFailed(f"模型 '{model_info.name}' 重试耗尽", original_exception=e) from e
|
||||
|
||||
logger.warning(
|
||||
f"模型 '{model_info.name}' 遇到网络错误(可重试): {str(e)}。剩余重试次数: {retry_remain}"
|
||||
)
|
||||
await asyncio.sleep(api_provider.retry_interval)
|
||||
|
||||
except RespNotOkException as e:
|
||||
|
|
|
|||
|
|
@ -1,13 +1,8 @@
|
|||
from typing import Tuple
|
||||
import asyncio
|
||||
from datetime import datetime
|
||||
|
||||
from src.common.logger import get_logger
|
||||
from src.config.config import global_config
|
||||
from src.chat.utils.prompt_builder import Prompt
|
||||
from src.llm_models.payload_content.tool_option import ToolParamType
|
||||
from src.plugin_system import BaseAction, ActionActivationType
|
||||
from src.chat.utils.utils import cut_key_words
|
||||
from src.memory_system.Memory_chest import global_memory_chest
|
||||
from src.plugin_system.base.base_tool import BaseTool
|
||||
from src.plugin_system.apis.message_api import get_messages_by_time_in_chat, build_readable_messages
|
||||
|
|
@ -125,12 +120,10 @@ class GetMemoryTool(BaseTool):
|
|||
chat_answer = results.get("chat")
|
||||
|
||||
# 构建返回内容
|
||||
content_parts = [f"问题:{question}"]
|
||||
content_parts = []
|
||||
|
||||
if memory_answer:
|
||||
content_parts.append(f"对问题'{question}',你回忆的信息是:{memory_answer}")
|
||||
else:
|
||||
content_parts.append(f"对问题'{question}',没有什么印象")
|
||||
|
||||
if chat_answer:
|
||||
content_parts.append(f"对问题'{question}',基于聊天记录的回答:{chat_answer}")
|
||||
|
|
@ -139,8 +132,13 @@ class GetMemoryTool(BaseTool):
|
|||
content_parts.append(f"在 {time_point} 的时间点,你没有参与聊天")
|
||||
elif time_range:
|
||||
content_parts.append(f"在 {time_range} 的时间范围内,你没有参与聊天")
|
||||
|
||||
return {"content": "\n".join(content_parts)}
|
||||
|
||||
if content_parts:
|
||||
retrieval_content = f"问题:{question}" + "\n".join(content_parts)
|
||||
return {"content": retrieval_content}
|
||||
else:
|
||||
return {"content": ""}
|
||||
|
||||
|
||||
async def _get_answer_from_chat_history(self, question: str, time_point: str = None, time_range: str = None) -> str:
|
||||
"""从聊天记录中获取问题的答案"""
|
||||
|
|
@ -245,53 +243,3 @@ class GetMemoryTool(BaseTool):
|
|||
except Exception as e:
|
||||
logger.error(f"从聊天记录获取答案失败: {e}")
|
||||
return ""
|
||||
|
||||
class GetMemoryAction(BaseAction):
|
||||
"""关系动作 - 获取记忆"""
|
||||
|
||||
activation_type = ActionActivationType.LLM_JUDGE
|
||||
parallel_action = True
|
||||
|
||||
# 动作基本信息
|
||||
action_name = "get_memory"
|
||||
action_description = (
|
||||
"在记忆中搜寻某个问题的答案"
|
||||
)
|
||||
|
||||
# 动作参数定义
|
||||
action_parameters = {
|
||||
"question": "需要搜寻或回答的问题",
|
||||
}
|
||||
|
||||
# 动作使用场景
|
||||
action_require = [
|
||||
"在记忆中搜寻某个问题的答案",
|
||||
"有你不了解的概念",
|
||||
"有人提问关于过去的事情",
|
||||
"你需要根据记忆回答某个问题",
|
||||
]
|
||||
|
||||
# 关联类型
|
||||
associated_types = ["text"]
|
||||
|
||||
async def execute(self) -> Tuple[bool, str]:
|
||||
"""执行关系动作"""
|
||||
|
||||
question = self.action_data.get("question", "")
|
||||
answer = await global_memory_chest.get_answer_by_question(self.chat_id, question)
|
||||
if not answer:
|
||||
await self.store_action_info(
|
||||
action_build_into_prompt=True,
|
||||
action_prompt_display=f"你回忆了有关问题:{question}的记忆,但是没有找到相关记忆",
|
||||
action_done=True,
|
||||
)
|
||||
|
||||
return False, f"问题:{question},没有找到相关记忆"
|
||||
|
||||
await self.store_action_info(
|
||||
action_build_into_prompt=True,
|
||||
action_prompt_display=f"你回忆了有关问题:{question}的记忆,答案是:{answer}",
|
||||
action_done=True,
|
||||
)
|
||||
|
||||
return True, f"成功获取记忆: {answer}"
|
||||
|
|
|
|||
|
|
@ -7,7 +7,7 @@ from src.plugin_system.base.config_types import ConfigField
|
|||
# 导入依赖的系统组件
|
||||
from src.common.logger import get_logger
|
||||
|
||||
from src.plugins.built_in.memory.build_memory import GetMemoryAction, GetMemoryTool
|
||||
from src.plugins.built_in.memory.build_memory import GetMemoryTool
|
||||
|
||||
logger = get_logger("memory_build")
|
||||
|
||||
|
|
@ -48,7 +48,6 @@ class MemoryBuildPlugin(BasePlugin):
|
|||
|
||||
# --- 根据配置注册组件 ---
|
||||
components = []
|
||||
# components.append((GetMemoryAction.get_action_info(), GetMemoryAction))
|
||||
components.append((GetMemoryTool.get_tool_info(), GetMemoryTool))
|
||||
|
||||
return components
|
||||
|
|
|
|||
|
|
@ -1,5 +1,5 @@
|
|||
[inner]
|
||||
version = "6.19.1"
|
||||
version = "6.19.2"
|
||||
|
||||
#----以下是给开发人员阅读的,如果你只是部署了麦麦,不需要阅读----
|
||||
#如果你想要修改配置文件,请递增version的值
|
||||
|
|
@ -221,6 +221,8 @@ library_log_levels = { aiohttp = "WARNING"} # 设置特定库的日志级别
|
|||
|
||||
[debug]
|
||||
show_prompt = false # 是否显示prompt
|
||||
show_replyer_prompt = false # 是否显示回复器prompt
|
||||
show_replyer_reasoning = false # 是否显示回复器推理
|
||||
|
||||
[maim_message]
|
||||
auth_token = [] # 认证令牌,用于API验证,为空则不启用验证
|
||||
|
|
|
|||
Loading…
Reference in New Issue