pull/1377/head
墨梓柒 2025-11-20 15:46:07 +08:00
commit 367541e165
No known key found for this signature in database
GPG Key ID: 4A65B9DBA35F7635
9 changed files with 108 additions and 53 deletions

View File

@ -1,7 +1,10 @@
# Changelog
## [0.11.5] - 2025-11-21
### 功能更改和修复
- 优化planner和replyer的协同
- 细化debug的log
## [0.11.4] - 2025-11-20
## [0.11.4] - 2025-11-19
### 🌟 主要更新内容
- **首个官方 Web 管理界面上线**在此版本之前MaiBot 没有 WebUI所有配置需手动编辑 TOML 文件
- **认证系统**Token 安全登录(支持系统生成 64 位随机令牌 / 自定义 Token首次配置向导
@ -41,7 +44,7 @@
告别手动编辑配置文件,享受现代化图形界面!
## [0.11.3] - 2025-11-19
## [0.11.3] - 2025-11-18
### 功能更改和修复
- 优化记忆提取策略
- 优化黑话提取

View File

@ -230,7 +230,7 @@ class HeartFChatting:
if (message.is_mentioned or message.is_at) and global_config.chat.mentioned_bot_reply:
mentioned_message = message
logger.info(f"{self.log_prefix} 当前talk_value: {global_config.chat.get_talk_value(self.stream_id)}")
# logger.info(f"{self.log_prefix} 当前talk_value: {global_config.chat.get_talk_value(self.stream_id)}")
# *控制频率用
if mentioned_message:
@ -410,7 +410,7 @@ class HeartFChatting:
# asyncio.create_task(self.chat_history_summarizer.process())
cycle_timers, thinking_id = self.start_cycle()
logger.info(f"{self.log_prefix} 开始第{self._cycle_counter}次思考")
logger.info(f"{self.log_prefix} 开始第{self._cycle_counter}次思考(频率: {global_config.chat.get_talk_value(self.stream_id)})")
# 第一步:动作检查
available_actions: Dict[str, ActionInfo] = {}

View File

@ -92,9 +92,10 @@ class QAManager:
# 过滤阈值
result = dyn_select_top_k(result, 0.5, 1.0)
for res in result:
raw_paragraph = self.embed_manager.paragraphs_embedding_store.store[res[0]].str
logger.info(f"找到相关文段,相关系数:{res[1]:.8f}\n{raw_paragraph}\n\n")
if global_config.debug.show_lpmm_paragraph:
for res in result:
raw_paragraph = self.embed_manager.paragraphs_embedding_store.store[res[0]].str
logger.info(f"找到相关文段,相关系数:{res[1]:.8f}\n{raw_paragraph}\n\n")
return result, ppr_node_weights

View File

@ -7,7 +7,6 @@ from typing import Dict, Optional, Tuple, List, TYPE_CHECKING, Union
from rich.traceback import install
from datetime import datetime
from json_repair import repair_json
from src.llm_models.utils_model import LLMRequest
from src.config.config import global_config, model_config
from src.common.logger import get_logger
@ -164,6 +163,45 @@ class ActionPlanner:
return item[1]
return None
def _replace_message_ids_with_text(
self, text: Optional[str], message_id_list: List[Tuple[str, "DatabaseMessages"]]
) -> Optional[str]:
"""将文本中的 m+数字 消息ID替换为原消息内容并添加双引号"""
if not text:
return text
id_to_message = {msg_id: msg for msg_id, msg in message_id_list}
# 匹配m后带2-4位数字前后不是字母数字下划线
pattern = r"(?<![A-Za-z0-9_])m\d{2,4}(?![A-Za-z0-9_])"
matches = re.findall(pattern, text)
if matches:
available_ids = set(id_to_message.keys())
found_ids = set(matches)
missing_ids = found_ids - available_ids
if missing_ids:
logger.info(f"{self.log_prefix}planner理由中引用的消息ID不在当前上下文中: {missing_ids}, 可用ID: {list(available_ids)[:10]}...")
logger.info(f"{self.log_prefix}planner理由替换: 找到{len(matches)}个消息ID引用其中{len(found_ids & available_ids)}个在上下文中")
def _replace(match: re.Match[str]) -> str:
msg_id = match.group(0)
message = id_to_message.get(msg_id)
if not message:
logger.warning(f"{self.log_prefix}planner理由引用 {msg_id} 未找到对应消息,保持原样")
return msg_id
msg_text = (message.processed_plain_text or message.display_message or "").strip()
if not msg_text:
logger.warning(f"{self.log_prefix}planner理由引用 {msg_id} 的消息内容为空,保持原样")
return msg_id
preview = msg_text if len(msg_text) <= 100 else f"{msg_text[:97]}..."
logger.info(f"{self.log_prefix}planner理由引用 {msg_id} -> 消息({preview}")
return f"消息({msg_text}"
return re.sub(pattern, _replace, text)
def _parse_single_action(
self,
action_json: dict,
@ -176,7 +214,10 @@ class ActionPlanner:
try:
action = action_json.get("action", "no_reply")
reasoning = action_json.get("reason", "未提供原因")
original_reasoning = action_json.get("reason", "未提供原因")
reasoning = self._replace_message_ids_with_text(original_reasoning, message_id_list)
if reasoning is None:
reasoning = original_reasoning
action_data = {key: value for key, value in action_json.items() if key not in ["action", "reason"]}
# 非no_reply动作需要target_message_id
target_message = None
@ -573,9 +614,6 @@ class ActionPlanner:
# 调用LLM
llm_content, (reasoning_content, _, _) = await self.planner_llm.generate_response_async(prompt=prompt)
# logger.info(f"{self.log_prefix}规划器原始提示词: {prompt}")
# logger.info(f"{self.log_prefix}规划器原始响应: {llm_content}")
if global_config.debug.show_planner_prompt:
logger.info(f"{self.log_prefix}规划器原始提示词: {prompt}")
logger.info(f"{self.log_prefix}规划器原始响应: {llm_content}")
@ -604,6 +642,7 @@ class ActionPlanner:
if llm_content:
try:
json_objects, extracted_reasoning = self._extract_json_from_markdown(llm_content)
extracted_reasoning = self._replace_message_ids_with_text(extracted_reasoning, message_id_list) or ""
if json_objects:
logger.debug(f"{self.log_prefix}从响应中提取到{len(json_objects)}个JSON对象")
filtered_actions_list = list(filtered_actions.items())

View File

@ -107,7 +107,7 @@ class ChatHistorySummarizer:
self.last_check_time = current_time
return
logger.info(
logger.debug(
f"{self.log_prefix} 开始处理聊天概括,时间窗口: {self.last_check_time:.2f} -> {current_time:.2f}"
)
@ -119,7 +119,7 @@ class ChatHistorySummarizer:
before_count = len(self.current_batch.messages)
self.current_batch.messages.extend(new_messages)
self.current_batch.end_time = current_time
logger.info(f"{self.log_prefix} 批次更新: {before_count} -> {len(self.current_batch.messages)} 条消息")
logger.info(f"{self.log_prefix} 更新聊天话题: {before_count} -> {len(self.current_batch.messages)} 条消息")
else:
# 创建新批次
self.current_batch = MessageBatch(
@ -127,7 +127,7 @@ class ChatHistorySummarizer:
start_time=new_messages[0].time if new_messages else current_time,
end_time=current_time,
)
logger.info(f"{self.log_prefix} 新建批次: {len(new_messages)} 条消息")
logger.info(f"{self.log_prefix} 新建聊天话题: {len(new_messages)} 条消息")
# 检查是否需要打包
await self._check_and_package(current_time)

View File

@ -581,9 +581,15 @@ class DebugConfig(ConfigBase):
show_jargon_prompt: bool = False
"""是否显示jargon相关提示词"""
show_memory_prompt: bool = False
"""是否显示记忆检索相关prompt"""
show_planner_prompt: bool = False
"""是否显示planner相关提示词"""
show_lpmm_paragraph: bool = False
"""是否显示lpmm找到的相关文段日志"""
@dataclass
class ExperimentalConfig(ConfigBase):

View File

@ -384,10 +384,10 @@ class JargonMiner:
logger.error(f"jargon {content} 推断2解析失败: {e}")
return
logger.info(f"jargon {content} 推断2提示词: {prompt2}")
logger.info(f"jargon {content} 推断2结果: {response2}")
logger.info(f"jargon {content} 推断1提示词: {prompt1}")
logger.info(f"jargon {content} 推断1结果: {response1}")
# logger.info(f"jargon {content} 推断2提示词: {prompt2}")
# logger.info(f"jargon {content} 推断2结果: {response2}")
# logger.info(f"jargon {content} 推断1提示词: {prompt1}")
# logger.info(f"jargon {content} 推断1结果: {response1}")
if global_config.debug.show_jargon_prompt:
logger.info(f"jargon {content} 推断2提示词: {prompt2}")

View File

@ -16,8 +16,8 @@ from src.llm_models.payload_content.message import MessageBuilder, RoleType, Mes
logger = get_logger("memory_retrieval")
THINKING_BACK_NOT_FOUND_RETENTION_SECONDS = 3600 # 未找到答案记录保留时长
THINKING_BACK_CLEANUP_INTERVAL_SECONDS = 300 # 清理频率
THINKING_BACK_NOT_FOUND_RETENTION_SECONDS = 36000 # 未找到答案记录保留时长
THINKING_BACK_CLEANUP_INTERVAL_SECONDS = 3000 # 清理频率
_last_not_found_cleanup_ts: float = 0.0
@ -340,7 +340,8 @@ async def _react_agent_solve_question(
max_iterations=max_iterations,
)
logger.info(f"ReAct Agent 第 {iteration + 1} 次Prompt: {prompt}")
if global_config.debug.show_memory_prompt:
logger.info(f"ReAct Agent 第 {iteration + 1} 次Prompt: {prompt}")
success, response, reasoning_content, model_name, tool_calls = await llm_api.generate_with_model_with_tools(
prompt,
model_config=model_config.model_task_config.tool_use,
@ -380,42 +381,43 @@ async def _react_agent_solve_question(
messages.extend(_conversation_messages)
# 优化日志展示 - 合并所有消息到一条日志
log_lines = []
for idx, msg in enumerate(messages, 1):
role_name = msg.role.value if hasattr(msg.role, "value") else str(msg.role)
if global_config.debug.show_memory_prompt:
# 优化日志展示 - 合并所有消息到一条日志
log_lines = []
for idx, msg in enumerate(messages, 1):
role_name = msg.role.value if hasattr(msg.role, "value") else str(msg.role)
# 处理内容 - 显示完整内容,不截断
if isinstance(msg.content, str):
full_content = msg.content
content_type = "文本"
elif isinstance(msg.content, list):
text_parts = [item for item in msg.content if isinstance(item, str)]
image_count = len([item for item in msg.content if isinstance(item, tuple)])
full_content = "".join(text_parts) if text_parts else ""
content_type = f"混合({len(text_parts)}段文本, {image_count}张图片)"
else:
full_content = str(msg.content)
content_type = "未知"
# 处理内容 - 显示完整内容,不截断
if isinstance(msg.content, str):
full_content = msg.content
content_type = "文本"
elif isinstance(msg.content, list):
text_parts = [item for item in msg.content if isinstance(item, str)]
image_count = len([item for item in msg.content if isinstance(item, tuple)])
full_content = "".join(text_parts) if text_parts else ""
content_type = f"混合({len(text_parts)}段文本, {image_count}张图片)"
else:
full_content = str(msg.content)
content_type = "未知"
# 构建单条消息的日志信息
msg_info = f"\n[消息 {idx}] 角色: {role_name} 内容类型: {content_type}\n========================================"
# 构建单条消息的日志信息
msg_info = f"\n[消息 {idx}] 角色: {role_name} 内容类型: {content_type}\n========================================"
if full_content:
msg_info += f"\n{full_content}"
if full_content:
msg_info += f"\n{full_content}"
if msg.tool_calls:
msg_info += f"\n 工具调用: {len(msg.tool_calls)}"
for tool_call in msg.tool_calls:
msg_info += f"\n - {tool_call}"
if msg.tool_calls:
msg_info += f"\n 工具调用: {len(msg.tool_calls)}"
for tool_call in msg.tool_calls:
msg_info += f"\n - {tool_call}"
if msg.tool_call_id:
msg_info += f"\n 工具调用ID: {msg.tool_call_id}"
if msg.tool_call_id:
msg_info += f"\n 工具调用ID: {msg.tool_call_id}"
log_lines.append(msg_info)
log_lines.append(msg_info)
# 合并所有消息为一条日志输出
logger.info(f"消息列表 (共{len(messages)}条):{''.join(log_lines)}")
# 合并所有消息为一条日志输出
logger.info(f"消息列表 (共{len(messages)}条):{''.join(log_lines)}")
return messages
@ -1068,7 +1070,8 @@ async def build_memory_retrieval_prompt(
request_type="memory.question",
)
logger.info(f"记忆检索问题生成提示词: {question_prompt}")
if global_config.debug.show_memory_prompt:
logger.info(f"记忆检索问题生成提示词: {question_prompt}")
logger.info(f"记忆检索问题生成响应: {response}")
if not success:

View File

@ -1,5 +1,5 @@
[inner]
version = "6.21.6"
version = "6.21.8"
#----以下是给开发人员阅读的,如果你只是部署了麦麦,不需要阅读----
#如果你想要修改配置文件请递增version的值
@ -211,6 +211,9 @@ show_prompt = false # 是否显示prompt
show_replyer_prompt = false # 是否显示回复器prompt
show_replyer_reasoning = false # 是否显示回复器推理
show_jargon_prompt = false # 是否显示jargon相关提示词
show_memory_prompt = false # 是否显示记忆检索相关提示词
show_planner_prompt = false # 是否显示planner的prompt和原始返回结果
show_lpmm_paragraph = false # 是否显示lpmm找到的相关文段日志
[maim_message]
auth_token = [] # 认证令牌用于API验证为空则不启用验证