根据开发组建议重命名,移除多余的Prompt后缀

pull/1496/head
UnCLAS-Prommer 2026-02-02 21:10:39 +08:00
parent b793a3d62b
commit 74b852dd8b
No known key found for this signature in database
38 changed files with 27 additions and 27 deletions

View File

@ -68,7 +68,7 @@ class ExpressionLearner:
# 学习用(开启行编号,便于溯源)
random_msg_str: str = await build_anonymous_messages(random_msg, show_ids=True)
prompt_template = prompt_manager.get_prompt("learn_style_prompt")
prompt_template = prompt_manager.get_prompt("learn_style")
prompt_template.add_context("bot_name", global_config.bot.nickname)
prompt_template.add_context("chat_str", random_msg_str)

View File

@ -378,7 +378,7 @@ class ExpressionSelector:
reply_reason_block = ""
# 3. 构建prompt只包含情境不包含完整的表达方式
prompt_template = prompt_manager.get_prompt("expression_evaluation_prompt")
prompt_template = prompt_manager.get_prompt("expression_evaluation")
prompt_template.add_context("bot_name", global_config.bot.nickname)
prompt_template.add_context("chat_observe_info", chat_context)
prompt_template.add_context("all_situations", all_situations_str)

View File

@ -200,7 +200,7 @@ class JargonExplainer:
explanations_text = "\n".join(jargon_explanations)
# 使用LLM概括黑话解释
prompt_of_summarize = prompt_manager.get_prompt("jargon_explainer_summarize_prompt")
prompt_of_summarize = prompt_manager.get_prompt("jargon_explainer_summarize")
prompt_of_summarize.add_context("chat_context", lambda _: chat_context)
prompt_of_summarize.add_context("jargon_explanations", lambda _: explanations_text)
summarize_prompt = await prompt_manager.render_prompt(prompt_of_summarize)

View File

@ -193,7 +193,7 @@ class JargonMiner:
"- 请参考上一次推断的含义,结合新的上下文信息,给出更准确或更新的推断结果"
)
prompt1_template = prompt_manager.get_prompt("jargon_inference_with_context_prompt")
prompt1_template = prompt_manager.get_prompt("jargon_inference_with_context")
prompt1_template.add_context("bot_name", global_config.bot.nickname)
prompt1_template.add_context("content", str(content))
prompt1_template.add_context("raw_content_list", raw_content_text)
@ -233,7 +233,7 @@ class JargonMiner:
return
# 步骤2: 仅基于content推断
prompt2_template = prompt_manager.get_prompt("jargon_inference_content_only_prompt")
prompt2_template = prompt_manager.get_prompt("jargon_inference_content_only")
prompt2_template.add_context("content", str(content))
prompt2 = await prompt_manager.render_prompt(prompt2_template)
@ -275,7 +275,7 @@ class JargonMiner:
logger.debug(f"jargon {content} 推断1结果: {response1}")
# 步骤3: 比较两个推断结果
prompt3_template = prompt_manager.get_prompt("jargon_compare_inference_prompt")
prompt3_template = prompt_manager.get_prompt("jargon_compare_inference")
prompt3_template.add_context("inference1", json.dumps(inference1, ensure_ascii=False))
prompt3_template.add_context("inference2", json.dumps(inference2, ensure_ascii=False))
prompt3 = await prompt_manager.render_prompt(prompt3_template)

View File

@ -72,7 +72,7 @@ class ReflectTracker:
# LLM Judge
try:
prompt_template = prompt_manager.get_prompt("reflect_judge_prompt")
prompt_template = prompt_manager.get_prompt("reflect_judge")
prompt_template.add_context("situation", str(self.expression.situation))
prompt_template.add_context("style", str(self.expression.style))
prompt_template.add_context("context_block", context_block)

View File

@ -313,7 +313,7 @@ class BrainChatting:
current_available_actions=available_actions,
chat_content_block=chat_content_block,
message_id_list=message_id_list,
prompt_key="brain_planner_prompt_react",
prompt_key="brain_planner",
)
continue_flag, modified_message = await events_manager.handle_mai_events(
EventType.ON_PLAN, None, prompt_info[0], None, self.chat_stream.stream_id

View File

@ -200,7 +200,7 @@ class BrainPlanner:
prompt_build_start = time.perf_counter()
# 构建包含所有动作的提示词:使用统一的 ReAct Prompt
prompt_key = "brain_planner_prompt_react"
prompt_key = "brain_planner"
# 这里不记录日志,避免重复打印,由调用方按需控制 log_prompt
prompt, message_id_list = await self.build_planner_prompt(
chat_target_info=chat_target_info,
@ -254,7 +254,7 @@ class BrainPlanner:
message_id_list: List[Tuple[str, "DatabaseMessages"]],
chat_content_block: str = "",
interest: str = "",
prompt_key: str = "brain_planner_prompt_react",
prompt_key: str = "brain_planner",
) -> tuple[str, List[Tuple[str, "DatabaseMessages"]]]:
"""构建 Planner LLM 的提示词 (获取模板并填充数据)"""
try:
@ -381,7 +381,7 @@ class BrainPlanner:
require_text = require_text.rstrip("\n")
# 获取动作提示模板并填充
using_action_prompt_template = prompt_manager.get_prompt("brain_action_prompt")
using_action_prompt_template = prompt_manager.get_prompt("brain_action")
using_action_prompt_template.add_context("action_name", action_name)
using_action_prompt_template.add_context("action_description", action_info.description)
using_action_prompt_template.add_context("action_parameters", param_text)

View File

@ -600,7 +600,7 @@ class ActionPlanner:
reply_action_example += ', "quote":"如果需要引用该message设置为true"'
reply_action_example += "}"
planner_prompt_template = prompt_manager.get_prompt("planner_prompt")
planner_prompt_template = prompt_manager.get_prompt("planner")
planner_prompt_template.add_context("time_block", time_block)
planner_prompt_template.add_context("chat_context_description", chat_context_description)
planner_prompt_template.add_context("chat_content_block", chat_content_block)
@ -695,7 +695,7 @@ class ActionPlanner:
parallel_text = ""
# 获取动作提示模板并填充
using_action_prompt = prompt_manager.get_prompt("action_prompt")
using_action_prompt = prompt_manager.get_prompt("action")
using_action_prompt.add_context("action_name", action_name)
using_action_prompt.add_context("action_description", action_info.description)
using_action_prompt.add_context("action_parameters", param_text)

View File

@ -960,9 +960,9 @@ class DefaultReplyer:
# think_level=0: 轻量回复(简短平淡)
# think_level=1: 中等回复(日常口语化)
if think_level == 0:
prompt_name = "replyer_prompt_0"
prompt_name = "replyer_light"
else: # think_level == 1 或默认
prompt_name = "replyer_prompt"
prompt_name = "replyer"
# 根据配置构建最终的 reply_style支持 multiple_reply_style 按概率随机替换
reply_style = global_config.personality.reply_style
@ -1082,7 +1082,7 @@ class DefaultReplyer:
except Exception:
reply_style = global_config.personality.reply_style
prompt_template = prompt_manager.get_prompt("default_expressor_prompt")
prompt_template = prompt_manager.get_prompt("default_expressor")
prompt_template.add_context("expression_habits_block", expression_habits_block)
# prompt_template.add_context("relation_info_block", relation_info)
prompt_template.add_context("chat_target", chat_target_1)
@ -1169,7 +1169,7 @@ class DefaultReplyer:
if global_config.lpmm_knowledge.lpmm_mode == "agent":
return ""
template_prompt = prompt_manager.get_prompt("lpmm_get_knowledge_prompt")
template_prompt = prompt_manager.get_prompt("lpmm_get_knowledge")
template_prompt.add_context("bot_name", global_config.bot.nickname)
template_prompt.add_context("time_now", lambda _: time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()))
template_prompt.add_context("chat_history", message)

View File

@ -809,11 +809,11 @@ class PrivateReplyer:
# 使用统一的 is_bot_self 函数判断是否是机器人自己(支持多平台,包括 WebUI
if is_bot_self(platform, user_id):
prompt_template = prompt_manager.get_prompt("private_replyer_self_prompt")
prompt_template = prompt_manager.get_prompt("private_replyer_self")
prompt_template.add_context("target", target)
prompt_template.add_context("reason", reply_reason)
else:
prompt_template = prompt_manager.get_prompt("private_replyer_prompt")
prompt_template = prompt_manager.get_prompt("private_replyer")
prompt_template.add_context("reply_target_block", reply_target_block)
prompt_template.add_context("planner_reasoning", planner_reasoning)
prompt_template.add_context("expression_habits_block", expression_habits_block)
@ -923,7 +923,7 @@ class PrivateReplyer:
# 兜底:即使 multiple_reply_style 配置异常也不影响正常回复
reply_style = global_config.personality.reply_style
prompt_template = prompt_manager.get_prompt("default_expressor_prompt")
prompt_template = prompt_manager.get_prompt("default_expressor")
prompt_template.add_context("expression_habits_block", expression_habits_block)
# prompt_template.add_context("relation_info_block", relation_info)
prompt_template.add_context("chat_target", chat_target_1)
@ -1010,7 +1010,7 @@ class PrivateReplyer:
if global_config.lpmm_knowledge.lpmm_mode == "agent":
return ""
prompt_template = prompt_manager.get_prompt("lpmm_get_knowledge_prompt")
prompt_template = prompt_manager.get_prompt("lpmm_get_knowledge")
prompt_template.add_context("bot_name", global_config.bot.nickname)
prompt_template.add_context("time_now", time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()))
prompt_template.add_context("chat_history", message)

View File

@ -224,7 +224,7 @@ async def run_dream_agent_once(
tool_registry = get_dream_tool_registry()
tool_defs = tool_registry.get_tool_definitions()
head_prompt_template = prompt_manager.get_prompt("dream_react_head_prompt")
head_prompt_template = prompt_manager.get_prompt("dream_react_head")
head_prompt_template.add_context("bot_name", global_config.bot.nickname)
head_prompt_template.add_context("time_now", time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()))
head_prompt_template.add_context("chat_id", chat_id)

View File

@ -143,7 +143,7 @@ async def generate_dream_summary(
dream_styles_text = "\n".join([f"{i + 1}. {style}" for i, style in enumerate(selected_styles)])
# 使用 Prompt 管理器格式化梦境生成 prompt
dream_prompt_template = prompt_manager.get_prompt("dream_summary_prompt")
dream_prompt_template = prompt_manager.get_prompt("dream_summary")
dream_prompt_template.add_context("chat_id", chat_id)
dream_prompt_template.add_context("total_iterations", str(total_iterations))
dream_prompt_template.add_context("time_cost", str(time_cost))

View File

@ -658,7 +658,7 @@ class ChatHistorySummarizer:
history_topics_block = "\n".join(f"- {t}" for t in existing_topics) if existing_topics else "(当前无历史话题)"
messages_block = "\n".join(numbered_lines)
prompt_template = prompt_manager.get_prompt("hippo_topic_analysis_prompt")
prompt_template = prompt_manager.get_prompt("hippo_topic_analysis")
prompt_template.add_context("history_topics_block", history_topics_block)
prompt_template.add_context("messages_block", messages_block)
prompt = await prompt_manager.render_prompt(prompt_template)
@ -814,7 +814,7 @@ class ChatHistorySummarizer:
Returns:
tuple[bool, List[str], str]: (是否成功, 关键词列表, 概括)
"""
prompt_template = prompt_manager.get_prompt("hippo_topic_summary_prompt")
prompt_template = prompt_manager.get_prompt("hippo_topic_summary")
prompt_template.add_context("topic", topic)
prompt_template.add_context("original_text", original_text)
prompt = await prompt_manager.render_prompt(prompt_template)

View File

@ -662,7 +662,7 @@ async def _react_agent_solve_question(
return None
# 执行最终评估
evaluation_prompt_template = prompt_manager.get_prompt("memory_retrieval_react_final_prompt")
evaluation_prompt_template = prompt_manager.get_prompt("memory_retrieval_react_final")
evaluation_prompt_template.add_context("bot_name", bot_name)
evaluation_prompt_template.add_context("time_now", time_now)
evaluation_prompt_template.add_context("chat_history", chat_history)

View File

@ -80,7 +80,7 @@ class ToolExecutor:
return [], [], ""
# 构建工具调用提示词
prompt_template = prompt_manager.get_prompt("tool_executor_prompt")
prompt_template = prompt_manager.get_prompt("tool_executor")
prompt_template.add_context("target_message", target_message)
prompt_template.add_context("chat_history", chat_history)
prompt_template.add_context("sender", sender)