remove:移除Planner问题配置项

pull/1489/head
SengokuCola 2026-01-12 00:37:50 +08:00
parent f591245540
commit 87d4c7c38a
5 changed files with 36 additions and 98 deletions

View File

@ -53,7 +53,6 @@ reply
4.不要选择回复你自己发送的消息
5.不要单独对表情包进行回复
6.将上下文中所有含义不明的疑似黑话的缩写词均写入unknown_words中
7.如果你对上下文存在疑问有需要查询的问题写入question中
{reply_action_example}
no_reply
@ -225,24 +224,6 @@ class ActionPlanner:
reasoning = "未提供原因"
action_data = {key: value for key, value in action_json.items() if key not in ["action"]}
# 验证和清理 question
if "question" in action_data:
q = action_data.get("question")
if isinstance(q, str):
cleaned_q = q.strip()
if cleaned_q:
action_data["question"] = cleaned_q
else:
# 如果清理后为空字符串,移除该字段
action_data.pop("question", None)
elif q is None:
# 如果为 None移除该字段
action_data.pop("question", None)
else:
# 如果不是字符串类型,记录警告并移除
logger.warning(f"{self.log_prefix}question 格式不正确,应为字符串类型,已忽略")
action_data.pop("question", None)
# 非no_reply动作需要target_message_id
target_message = None
@ -520,15 +501,14 @@ class ActionPlanner:
name_block = f"你的名字是{bot_name}{bot_nickname},请注意哪些是你自己的发言。"
# 根据 think_mode 配置决定 reply action 的示例 JSON
# 在 JSON 中直接作为 action 参数携带 unknown_words 和 question
# 在 JSON 中直接作为 action 参数携带 unknown_words
if global_config.chat.think_mode == "classic":
reply_action_example = ""
if global_config.chat.llm_quote:
reply_action_example += "5.如果要明确回复消息使用quote如果消息不多不需要明确回复设置quote为false\n"
reply_action_example += (
'{{"action":"reply", "target_message_id":"消息id(m+数字)", '
'"unknown_words":["词语1","词语2"], '
'"question":"需要查询的问题"'
'"unknown_words":["词语1","词语2"]'
)
if global_config.chat.llm_quote:
reply_action_example += ', "quote":"如果需要引用该message设置为true"'
@ -542,8 +522,7 @@ class ActionPlanner:
reply_action_example += (
'{{"action":"reply", "think_level":数值等级(0或1), '
'"target_message_id":"消息id(m+数字)", '
'"unknown_words":["词语1","词语2"], '
'"question":"需要查询的问题"'
'"unknown_words":["词语1","词语2"]'
)
if global_config.chat.llm_quote:
reply_action_example += ', "quote":"如果需要引用该message设置为true"'

View File

@ -845,18 +845,6 @@ class DefaultReplyer:
chat_id, message_list_before_short, chat_talking_prompt_short, unknown_words
)
# 从 chosen_actions 中提取 question仅在 reply 动作中)
question = None
if chosen_actions:
for action_info in chosen_actions:
if action_info.action_type == "reply" and isinstance(action_info.action_data, dict):
q = action_info.action_data.get("question")
if isinstance(q, str):
cleaned_q = q.strip()
if cleaned_q:
question = cleaned_q
break
# 并行执行构建任务(包括黑话解释,可配置关闭)
task_results = await asyncio.gather(
self._time_and_run_task(
@ -871,7 +859,7 @@ class DefaultReplyer:
self._time_and_run_task(self.build_personality_prompt(), "personality_prompt"),
self._time_and_run_task(
build_memory_retrieval_prompt(
chat_talking_prompt_short, sender, target, self.chat_stream, think_level=think_level, unknown_words=unknown_words, question=question
chat_talking_prompt_short, sender, target, self.chat_stream, think_level=think_level, unknown_words=unknown_words
),
"memory_retrieval",
),

View File

@ -710,18 +710,6 @@ class PrivateReplyer:
else:
jargon_coroutine = self._build_disabled_jargon_explanation()
# 从 chosen_actions 中提取 question仅在 reply 动作中)
question = None
if chosen_actions:
for action_info in chosen_actions:
if action_info.action_type == "reply" and isinstance(action_info.action_data, dict):
q = action_info.action_data.get("question")
if isinstance(q, str):
cleaned_q = q.strip()
if cleaned_q:
question = cleaned_q
break
# 并行执行九个构建任务(包括黑话解释,可配置关闭)
task_results = await asyncio.gather(
self._time_and_run_task(
@ -736,7 +724,7 @@ class PrivateReplyer:
self._time_and_run_task(self.build_personality_prompt(), "personality_prompt"),
self._time_and_run_task(
build_memory_retrieval_prompt(
chat_talking_prompt_short, sender, target, self.chat_stream, think_level=1, unknown_words=unknown_words, question=question
chat_talking_prompt_short, sender, target, self.chat_stream, think_level=1, unknown_words=unknown_words
),
"memory_retrieval",
),

View File

@ -57,7 +57,7 @@ TEMPLATE_DIR = os.path.join(PROJECT_ROOT, "template")
# 考虑到实际上配置文件中的mai_version是不会自动更新的,所以采用硬编码
# 对该字段的更新请严格参照语义化版本规范https://semver.org/lang/zh-CN/
MMC_VERSION = "0.12.2"
MMC_VERSION = "0.13.0-snapshot.1"
def get_key_comment(toml_table, key):

View File

@ -1097,7 +1097,6 @@ async def build_memory_retrieval_prompt(
chat_stream,
think_level: int = 1,
unknown_words: Optional[List[str]] = None,
question: Optional[str] = None,
) -> str:
"""构建记忆检索提示
使用两段式查询第一步生成问题第二步使用ReAct Agent查询答案
@ -1109,7 +1108,6 @@ async def build_memory_retrieval_prompt(
chat_stream: 聊天流对象
think_level: 思考深度等级
unknown_words: Planner 提供的未知词语列表优先使用此列表而不是从聊天记录匹配
question: Planner 提供的问题 planner_question 配置开启时直接使用此问题进行检索
Returns:
str: 记忆检索结果字符串
@ -1144,52 +1142,37 @@ async def build_memory_retrieval_prompt(
if not recent_query_history:
recent_query_history = "最近没有查询记录。"
# 第一步:生成问题或使用 Planner 提供的问题
# 第一步:使用 LLM 生成问题
question_prompt = await global_prompt_manager.format_prompt(
"memory_retrieval_question_prompt",
bot_name=bot_name,
time_now=time_now,
chat_history=message,
recent_query_history=recent_query_history,
sender=sender,
target_message=target,
)
success, response, reasoning_content, model_name = await llm_api.generate_with_model(
question_prompt,
model_config=model_config.model_task_config.tool_use,
request_type="memory.question",
)
if global_config.debug.show_memory_prompt:
logger.info(f"{log_prefix}记忆检索问题生成提示词: {question_prompt}")
# logger.info(f"记忆检索问题生成响应: {response}")
if not success:
logger.error(f"{log_prefix}LLM生成问题失败: {response}")
return ""
# 解析概念列表和问题列表,只取第一个问题
single_question: Optional[str] = None
# 如果 planner_question 配置开启,只使用 Planner 提供的问题,不使用旧模式
if global_config.memory.planner_question:
if question and isinstance(question, str) and question.strip():
# 清理和验证 question
single_question = question.strip()
logger.info(f"{log_prefix}使用 Planner 提供的 question: {single_question}")
else:
# planner_question 开启但没有提供 question跳过记忆检索
logger.debug(f"{log_prefix}planner_question 已开启但未提供 question跳过记忆检索")
end_time = time.time()
logger.info(f"{log_prefix}无当次查询,不返回任何结果,耗时: {(end_time - start_time):.3f}")
return ""
else:
# planner_question 关闭使用旧模式LLM 生成问题
question_prompt = await global_prompt_manager.format_prompt(
"memory_retrieval_question_prompt",
bot_name=bot_name,
time_now=time_now,
chat_history=message,
recent_query_history=recent_query_history,
sender=sender,
target_message=target,
)
success, response, reasoning_content, model_name = await llm_api.generate_with_model(
question_prompt,
model_config=model_config.model_task_config.tool_use,
request_type="memory.question",
)
if global_config.debug.show_memory_prompt:
logger.info(f"{log_prefix}记忆检索问题生成提示词: {question_prompt}")
# logger.info(f"记忆检索问题生成响应: {response}")
if not success:
logger.error(f"{log_prefix}LLM生成问题失败: {response}")
return ""
# 解析概念列表和问题列表,只取第一个问题
_, questions = parse_questions_json(response)
if questions and len(questions) > 0:
single_question = questions[0].strip()
logger.info(f"{log_prefix}解析到问题: {single_question}")
_, questions = parse_questions_json(response)
if questions and len(questions) > 0:
single_question = questions[0].strip()
logger.info(f"{log_prefix}解析到问题: {single_question}")
# 初始阶段:使用 Planner 提供的 unknown_words 进行检索(如果提供)
initial_info = ""