feat:可选开启在记忆中使用jargon识别

feat:可选开启在记忆中使用jargon识别
pull/1385/head
SengokuCola 2025-11-26 16:47:38 +08:00
parent 4a530a7bca
commit 185d3b7243
3 changed files with 38 additions and 25 deletions

View File

@ -244,6 +244,9 @@ class MemoryConfig(ConfigBase):
max_agent_iterations: int = 5 max_agent_iterations: int = 5
"""Agent最多迭代轮数最低为1""" """Agent最多迭代轮数最低为1"""
enable_jargon_detection: bool = True
"""记忆检索过程中是否启用黑话识别"""
def __post_init__(self): def __post_init__(self):
"""验证配置值""" """验证配置值"""
if self.max_agent_iterations < 1: if self.max_agent_iterations < 1:

View File

@ -342,8 +342,9 @@ async def _react_agent_solve_question(
""" """
start_time = time.time() start_time = time.time()
collected_info = initial_info if initial_info else "" collected_info = initial_info if initial_info else ""
enable_jargon_detection = global_config.memory.enable_jargon_detection
seen_jargon_concepts: Set[str] = set() seen_jargon_concepts: Set[str] = set()
if initial_jargon_concepts: if enable_jargon_detection and initial_jargon_concepts:
for concept in initial_jargon_concepts: for concept in initial_jargon_concepts:
concept = (concept or "").strip() concept = (concept or "").strip()
if concept: if concept:
@ -697,20 +698,21 @@ async def _react_agent_solve_question(
tool_builder.add_text_content(observation_text) tool_builder.add_text_content(observation_text)
tool_builder.add_tool_call(tool_call_item.call_id) tool_builder.add_tool_call(tool_call_item.call_id)
conversation_messages.append(tool_builder.build()) conversation_messages.append(tool_builder.build())
jargon_concepts = _match_jargon_from_text(stripped_observation, chat_id) if enable_jargon_detection:
if jargon_concepts: jargon_concepts = _match_jargon_from_text(stripped_observation, chat_id)
jargon_info = "" if jargon_concepts:
new_concepts = [] jargon_info = ""
for concept in jargon_concepts: new_concepts = []
normalized_concept = concept.strip() for concept in jargon_concepts:
if normalized_concept and normalized_concept not in seen_jargon_concepts: normalized_concept = concept.strip()
new_concepts.append(normalized_concept) if normalized_concept and normalized_concept not in seen_jargon_concepts:
seen_jargon_concepts.add(normalized_concept) new_concepts.append(normalized_concept)
if new_concepts: seen_jargon_concepts.add(normalized_concept)
jargon_info = await _retrieve_concepts_with_jargon(new_concepts, chat_id) if new_concepts:
if jargon_info: jargon_info = await _retrieve_concepts_with_jargon(new_concepts, chat_id)
collected_info += f"\n{jargon_info}\n" if jargon_info:
logger.info(f"工具输出触发黑话解析: {new_concepts}") collected_info += f"\n{jargon_info}\n"
logger.info(f"工具输出触发黑话解析: {new_concepts}")
# logger.info(f"ReAct Agent 第 {iteration + 1} 次迭代 工具 {i+1} 执行结果: {observation_text}") # logger.info(f"ReAct Agent 第 {iteration + 1} 次迭代 工具 {i+1} 执行结果: {observation_text}")
thinking_steps.append(step) thinking_steps.append(step)
@ -859,13 +861,15 @@ async def _process_single_question(
# 直接使用ReAct Agent查询不再从thinking_back获取缓存 # 直接使用ReAct Agent查询不再从thinking_back获取缓存
logger.info(f"使用ReAct Agent查询问题: {question[:50]}...") logger.info(f"使用ReAct Agent查询问题: {question[:50]}...")
jargon_concepts_for_agent = initial_jargon_concepts if global_config.memory.enable_jargon_detection else None
found_answer, answer, thinking_steps, is_timeout = await _react_agent_solve_question( found_answer, answer, thinking_steps, is_timeout = await _react_agent_solve_question(
question=question, question=question,
chat_id=chat_id, chat_id=chat_id,
max_iterations=global_config.memory.max_agent_iterations, max_iterations=global_config.memory.max_agent_iterations,
timeout=120.0, timeout=120.0,
initial_info=question_initial_info, initial_info=question_initial_info,
initial_jargon_concepts=initial_jargon_concepts, initial_jargon_concepts=jargon_concepts_for_agent,
) )
# 存储查询历史到数据库(超时时不存储) # 存储查询历史到数据库(超时时不存储)
@ -950,17 +954,22 @@ async def build_memory_retrieval_prompt(
if questions: if questions:
logger.info(f"解析到 {len(questions)} 个问题: {questions}") logger.info(f"解析到 {len(questions)} 个问题: {questions}")
# 使用匹配逻辑自动识别聊天中的黑话概念 enable_jargon_detection = global_config.memory.enable_jargon_detection
concepts = _match_jargon_from_text(message, chat_id) concepts: List[str] = []
if concepts:
logger.info(f"黑话匹配命中 {len(concepts)} 个概念: {concepts}") if enable_jargon_detection:
# 使用匹配逻辑自动识别聊天中的黑话概念
concepts = _match_jargon_from_text(message, chat_id)
if concepts:
logger.info(f"黑话匹配命中 {len(concepts)} 个概念: {concepts}")
else:
logger.debug("黑话匹配未命中任何概念")
else: else:
logger.debug("黑话匹配未命中任何概念") logger.debug("已禁用记忆检索中的黑话识别")
# 对匹配到的概念进行jargon检索作为初始信息 # 对匹配到的概念进行jargon检索作为初始信息
initial_info = "" initial_info = ""
if concepts: if enable_jargon_detection and concepts:
# logger.info(f"开始对 {len(concepts)} 个概念进行jargon检索")
concept_info = await _retrieve_concepts_with_jargon(concepts, chat_id) concept_info = await _retrieve_concepts_with_jargon(concepts, chat_id)
if concept_info: if concept_info:
initial_info += concept_info initial_info += concept_info
@ -985,7 +994,7 @@ async def build_memory_retrieval_prompt(
chat_id=chat_id, chat_id=chat_id,
context=message, context=message,
initial_info=initial_info, initial_info=initial_info,
initial_jargon_concepts=concepts, initial_jargon_concepts=concepts if enable_jargon_detection else None,
) )
for question in questions for question in questions
] ]

View File

@ -1,5 +1,5 @@
[inner] [inner]
version = "6.23.4" version = "6.23.5"
#----以下是给开发人员阅读的,如果你只是部署了麦麦,不需要阅读---- #----以下是给开发人员阅读的,如果你只是部署了麦麦,不需要阅读----
#如果你想要修改配置文件请递增version的值 #如果你想要修改配置文件请递增version的值
@ -109,6 +109,7 @@ include_planner_reasoning = false # 是否将planner推理加入replyer默认
[memory] [memory]
max_agent_iterations = 3 # 记忆思考深度最低为1不深入思考 max_agent_iterations = 3 # 记忆思考深度最低为1不深入思考
enable_jargon_detection = true # 记忆检索过程中是否启用黑话识别
[jargon] [jargon]
all_global = true # 是否开启全局黑话模式,注意,此功能关闭后,已经记录的全局黑话不会改变,需要手动删除 all_global = true # 是否开启全局黑话模式,注意,此功能关闭后,已经记录的全局黑话不会改变,需要手动删除