ref:将make_question从Plugin改为回复前工作;为tool添加chat_id字段

pull/1318/head
SengokuCola 2025-10-25 15:56:17 +08:00
parent 5ee3d7ea43
commit bd5fc8969b
8 changed files with 420 additions and 172 deletions

View File

@ -22,6 +22,7 @@ from src.express.expression_learner import expression_learner_manager
from src.chat.frequency_control.frequency_control import frequency_control_manager
from src.memory_system.question_maker import QuestionMaker
from src.memory_system.questions import global_conflict_tracker
from src.memory_system.curious import check_and_make_question
from src.person_info.person_info import Person
from src.plugin_system.base.component_types import EventType, ActionInfo
from src.plugin_system.core import events_manager
@ -184,12 +185,12 @@ class HeartFChatting:
)
question_probability = 0
if time.time() - self.last_active_time > 3600:
question_probability = 0.001
elif time.time() - self.last_active_time > 1200:
if time.time() - self.last_active_time > 7200:
question_probability = 0.0003
else:
elif time.time() - self.last_active_time > 3600:
question_probability = 0.0001
else:
question_probability = 0.00003
question_probability = question_probability * global_config.chat.get_auto_chat_value(self.stream_id)
@ -333,6 +334,9 @@ class HeartFChatting:
asyncio.create_task(global_memory_chest.build_running_content(chat_id=self.stream_id))
asyncio.create_task(frequency_control_manager.get_or_create_frequency_control(self.stream_id).trigger_frequency_adjust())
# 添加curious检测任务 - 检测聊天记录中的矛盾、冲突或需要提问的内容
asyncio.create_task(check_and_make_question(self.stream_id, recent_messages_list))
cycle_timers, thinking_id = self.start_cycle()
logger.info(f"{self.log_prefix} 开始第{self._cycle_counter}次思考")

View File

@ -0,0 +1,185 @@
import time
import asyncio
from typing import List, Optional, Tuple
from src.common.logger import get_logger
from src.chat.utils.chat_message_builder import (
get_raw_msg_before_timestamp_with_chat,
build_readable_messages_with_id,
)
from src.llm_models.utils_model import LLMRequest
from src.config.config import model_config, global_config
from src.memory_system.questions import global_conflict_tracker
from src.memory_system.memory_utils import parse_md_json
logger = get_logger("curious")
class CuriousDetector:
"""
好奇心检测器 - 检测聊天记录中的矛盾冲突或需要提问的内容
"""
def __init__(self, chat_id: str):
self.chat_id = chat_id
self.llm_request = LLMRequest(
model_set=model_config.model_task_config.utils,
request_type="curious_detector",
)
async def detect_questions(self, recent_messages: List) -> Optional[str]:
"""
检测最近消息中是否有需要提问的内容
Args:
recent_messages: 最近的消息列表
Returns:
Optional[str]: 如果检测到需要提问的内容返回问题文本否则返回None
"""
try:
if not recent_messages or len(recent_messages) < 2:
return None
# 构建聊天内容
chat_content_block, _ = build_readable_messages_with_id(
messages=recent_messages,
timestamp_mode="normal_no_YMD",
read_mark=0.0,
truncate=True,
show_actions=True,
)
# 检查是否已经有问题在跟踪中
existing_questions = global_conflict_tracker.get_questions_by_chat_id(self.chat_id)
if len(existing_questions) > 0:
logger.debug(f"当前已有{len(existing_questions)}个问题在跟踪中,跳过检测")
return None
# 构建检测提示词
prompt = f"""你是一个严谨的聊天内容分析器。请分析以下聊天记录,检测是否存在需要提问的内容。
检测条件
1. 聊天中存在逻辑矛盾或冲突的信息
2. 有人反对或否定之前提出的信息
3. 存在观点不一致的情况
4. 有模糊不清或需要澄清的概念
5. 有人提出了质疑或反驳
**重要限制**
- 忽略涉及违法暴力色情政治等敏感话题的内容
- 不要对敏感话题提问
- 只有在确实存在矛盾或冲突时才提问
- 如果聊天内容正常没有矛盾请输出NO
**聊天记录**
{chat_content_block}
请分析上述聊天记录如果发现需要提问的内容请用JSON格式输出
```json
{{
"question": "具体的问题描述,要完整描述涉及的概念和问题",
"reason": "为什么需要提问这个问题的理由"
}}
```
如果没有需要提问的内容请只输出NO"""
if global_config.debug.show_prompt:
logger.info(f"好奇心检测提示词: {prompt}")
else:
logger.debug("已发送好奇心检测提示词")
result_text, _ = await self.llm_request.generate_response_async(prompt, temperature=0.3)
if not result_text:
return None
result_text = result_text.strip()
# 检查是否输出NO
if result_text.upper() == "NO":
logger.debug("未检测到需要提问的内容")
return None
# 尝试解析JSON
try:
questions, reasoning = parse_md_json(result_text)
if questions and len(questions) > 0:
question_data = questions[0]
question = question_data.get("question", "")
reason = question_data.get("reason", "")
if question and question.strip():
logger.info(f"检测到需要提问的内容: {question}")
logger.info(f"提问理由: {reason}")
return question
except Exception as e:
logger.warning(f"解析问题JSON失败: {e}")
logger.debug(f"原始响应: {result_text}")
return None
except Exception as e:
logger.error(f"好奇心检测失败: {e}")
return None
async def make_question_from_detection(self, question: str, context: str = "") -> bool:
"""
将检测到的问题记录到冲突追踪器中
Args:
question: 检测到的问题
context: 问题上下文
Returns:
bool: 是否成功记录
"""
try:
if not question or not question.strip():
return False
# 记录问题到冲突追踪器,并开始跟踪
await global_conflict_tracker.track_conflict(
question=question.strip(),
context=context,
start_following=False,
chat_id=self.chat_id
)
logger.info(f"已记录问题到冲突追踪器: {question}")
return True
except Exception as e:
logger.error(f"记录问题失败: {e}")
return False
async def check_and_make_question(chat_id: str, recent_messages: List) -> bool:
"""
检查聊天记录并生成问题如果检测到需要提问的内容
Args:
chat_id: 聊天ID
recent_messages: 最近的消息列表
Returns:
bool: 是否检测到并记录了问题
"""
try:
detector = CuriousDetector(chat_id)
# 检测是否需要提问
question = await detector.detect_questions(recent_messages)
if question:
# 记录问题
success = await detector.make_question_from_detection(question)
if success:
logger.info(f"成功检测并记录问题: {question}")
return True
return False
except Exception as e:
logger.error(f"检查并生成问题失败: {e}")
return False

View File

@ -1,14 +1,25 @@
from typing import Optional, Type
from typing import Optional, Type, TYPE_CHECKING
from src.plugin_system.base.base_tool import BaseTool
from src.plugin_system.base.component_types import ComponentType
from src.common.logger import get_logger
if TYPE_CHECKING:
from src.chat.message_receive.chat_stream import ChatStream
logger = get_logger("tool_api")
def get_tool_instance(tool_name: str) -> Optional[BaseTool]:
"""获取公开工具实例"""
def get_tool_instance(tool_name: str, chat_stream: Optional["ChatStream"] = None) -> Optional[BaseTool]:
"""获取公开工具实例
Args:
tool_name: 工具名称
chat_stream: 聊天流对象用于传递聊天上下文信息
Returns:
Optional[BaseTool]: 工具实例如果未找到则返回None
"""
from src.plugin_system.core import component_registry
# 获取插件配置
@ -19,7 +30,7 @@ def get_tool_instance(tool_name: str) -> Optional[BaseTool]:
plugin_config = None
tool_class: Type[BaseTool] = component_registry.get_component_class(tool_name, ComponentType.TOOL) # type: ignore
return tool_class(plugin_config) if tool_class else None
return tool_class(plugin_config, chat_stream) if tool_class else None
def get_llm_available_tool_definitions():

View File

@ -1,10 +1,13 @@
from abc import ABC, abstractmethod
from typing import Any, List, Optional, Tuple
from typing import Any, List, Optional, Tuple, TYPE_CHECKING
from rich.traceback import install
from src.common.logger import get_logger
from src.plugin_system.base.component_types import ComponentType, ToolInfo, ToolParamType
if TYPE_CHECKING:
from src.chat.message_receive.chat_stream import ChatStream
install(extra_lines=3)
logger = get_logger("base_tool")
@ -29,8 +32,23 @@ class BaseTool(ABC):
available_for_llm: bool = False
"""是否可供LLM使用"""
def __init__(self, plugin_config: Optional[dict] = None):
def __init__(self, plugin_config: Optional[dict] = None, chat_stream: Optional["ChatStream"] = None):
"""初始化工具基类
Args:
plugin_config: 插件配置字典
chat_stream: 聊天流对象用于获取聊天上下文信息
"""
self.plugin_config = plugin_config or {} # 直接存储插件配置字典
# =============================================================================
# 便捷属性 - 直接在初始化时获取常用聊天信息与BaseAction保持一致
# =============================================================================
# 获取聊天流对象
self.chat_stream = chat_stream
self.chat_id = self.chat_stream.stream_id if self.chat_stream else None
self.platform = getattr(self.chat_stream, "platform", None) if self.chat_stream else None
@classmethod
def get_tool_definition(cls) -> dict[str, Any]:

View File

@ -223,7 +223,7 @@ class ToolExecutor:
function_args["llm_called"] = True # 标记为LLM调用
# 获取对应工具实例
tool_instance = tool_instance or get_tool_instance(function_name)
tool_instance = tool_instance or get_tool_instance(function_name, self.chat_stream)
if not tool_instance:
logger.warning(f"未知工具名称: {function_name}")
return None

View File

@ -1,34 +0,0 @@
{
"manifest_version": 1,
"name": "MaiCurious插件 (MaiCurious Actions)",
"version": "1.0.0",
"description": "可以好奇",
"author": {
"name": "SengokuCola",
"url": "https://github.com/MaiM-with-u"
},
"license": "GPL-v3.0-or-later",
"host_application": {
"min_version": "0.11.0"
},
"homepage_url": "https://github.com/MaiM-with-u/maibot",
"repository_url": "https://github.com/MaiM-with-u/maibot",
"keywords": ["curious", "action", "built-in"],
"categories": ["Deep Think"],
"default_locale": "zh-CN",
"locales_path": "_locales",
"plugin_info": {
"is_built_in": true,
"plugin_type": "action_provider",
"components": [
{
"type": "action",
"name": "maicurious",
"description": "发送好奇"
}
]
}
}

View File

@ -1,118 +0,0 @@
from typing import List, Tuple, Type, Any
# 导入新插件系统
from src.plugin_system import BasePlugin, register_plugin, ComponentInfo
from src.plugin_system.base.config_types import ConfigField
from src.person_info.person_info import Person
from src.plugin_system.base.base_tool import BaseTool, ToolParamType
# 导入依赖的系统组件
from src.common.logger import get_logger
from src.plugins.built_in.relation.relation import BuildRelationAction
from src.plugin_system.apis import llm_api
from src.plugin_system.base.base_action import BaseAction
from src.plugin_system.base.component_types import ActionActivationType
from src.plugin_system.apis import config_api
from src.plugin_system.apis import frequency_api
from src.plugin_system.apis import generator_api
from src.memory_system.questions import global_conflict_tracker
logger = get_logger("question_actions")
class CuriousAction(BaseAction):
"""频率调节动作 - 调整聊天发言频率"""
activation_type = ActionActivationType.ALWAYS
parallel_action = True
# 动作基本信息
action_name = "make_question"
action_description = "提出一个问题,当有人反驳你的观点,或其他人之间有观点冲突时使用"
# 动作参数定义
action_parameters = {
"question": "对存在疑问的信息提出一个问题,描述全面,完整的描述涉及的概念和问题",
}
action_require = [
f"当聊天记录中的概念存在逻辑上的矛盾时使用",
f"当有人反对或否定你提出的信息时使用",
f"或当你对现有的概念或事物存在疑问时使用",
f"有人认为你的观点是错误的请选择question动作",
f"有人与你观点不一致请选择question动作",
f"请忽略涉及违法,暴力,色情,政治等敏感话题的内容,不要对敏感话题提问",
]
# 关联类型
associated_types = ["text"]
async def execute(self) -> Tuple[bool, str]:
"""执行频率调节动作"""
try:
if len(global_conflict_tracker.question_tracker_list) > 1:
return False, "当前已有问题请先解答完再提问不要再使用make_question动作"
question = self.action_data.get("question", "")
# 存储问题到冲突追踪器
if question:
await global_conflict_tracker.record_conflict(conflict_content=question, start_following=True,chat_id=self.chat_id)
logger.info(f"已存储问题到冲突追踪器: {question}")
await self.store_action_info(
action_build_into_prompt=True,
action_prompt_display=f"你产生了一个问题:{question}",
action_done=True,
)
return True, f"问题{question}已记录,不要重复提问该问题"
except Exception as e:
error_msg = f"问题生成失败: {str(e)}"
logger.error(f"{self.log_prefix} {error_msg}", exc_info=True)
await self.send_text("问题生成失败")
return False, error_msg
@register_plugin
class CuriousPlugin(BasePlugin):
"""关系动作插件
系统内置插件提供基础的聊天交互功能
- Reply: 回复动作
- NoReply: 不回复动作
- Emoji: 表情动作
注意插件基本信息优先从_manifest.json文件中读取
"""
# 插件基本信息
plugin_name: str = "maicurious" # 内部标识符
enable_plugin: bool = True
dependencies: list[str] = [] # 插件依赖列表
python_dependencies: list[str] = [] # Python包依赖列表
config_file_name: str = "config.toml"
# 配置节描述
config_section_descriptions = {
"plugin": "插件启用配置",
"components": "核心组件启用配置",
}
# 配置Schema定义
config_schema: dict = {
"plugin": {
"enabled": ConfigField(type=bool, default=True, description="是否启用插件"),
"config_version": ConfigField(type=str, default="3.0.0", description="配置文件版本"),
}
}
def get_plugin_components(self) -> List[Tuple[ComponentInfo, Type]]:
"""返回插件包含的组件列表"""
# --- 根据配置注册组件 ---
components = []
components.append((CuriousAction.get_action_info(), CuriousAction))
return components

View File

@ -1,4 +1,6 @@
from typing import Tuple
import asyncio
from datetime import datetime
from src.common.logger import get_logger
from src.config.config import global_config
@ -8,22 +10,73 @@ from src.plugin_system import BaseAction, ActionActivationType
from src.chat.utils.utils import cut_key_words
from src.memory_system.Memory_chest import global_memory_chest
from src.plugin_system.base.base_tool import BaseTool
from src.plugin_system.apis.message_api import get_messages_by_time_in_chat, build_readable_messages
from src.llm_models.utils_model import LLMRequest
from src.config.config import model_config
from typing import Any
logger = get_logger("memory")
def parse_datetime_to_timestamp(value: str) -> float:
"""
接受多种常见格式并转换为时间戳
支持示例
- 2025-09-29
- 2025-09-29 00:00:00
- 2025/09/29 00:00
- 2025-09-29T00:00:00
"""
value = value.strip()
fmts = [
"%Y-%m-%d %H:%M:%S",
"%Y-%m-%d %H:%M",
"%Y/%m/%d %H:%M:%S",
"%Y/%m/%d %H:%M",
"%Y-%m-%d",
"%Y/%m/%d",
"%Y-%m-%dT%H:%M:%S",
"%Y-%m-%dT%H:%M",
]
last_err = None
for fmt in fmts:
try:
dt = datetime.strptime(value, fmt)
return dt.timestamp()
except Exception as e:
last_err = e
raise ValueError(f"无法解析时间: {value} ({last_err})")
def parse_time_range(time_range: str) -> tuple[float, float]:
"""
解析时间范围字符串返回开始和结束时间戳
格式: "YYYY-MM-DD HH:MM:SS - YYYY-MM-DD HH:MM:SS"
"""
if " - " not in time_range:
raise ValueError("时间范围格式错误,应使用 ' - ' 分隔开始和结束时间")
start_str, end_str = time_range.split(" - ", 1)
start_timestamp = parse_datetime_to_timestamp(start_str.strip())
end_timestamp = parse_datetime_to_timestamp(end_str.strip())
if start_timestamp > end_timestamp:
raise ValueError("开始时间不能晚于结束时间")
return start_timestamp, end_timestamp
class GetMemoryTool(BaseTool):
"""获取用户信息"""
name = "get_memory"
description = "在记忆中搜索,获取某个问题的答案"
description = "在记忆中搜索,获取某个问题的答案,可以指定搜索的时间范围或时间点"
parameters = [
("question", ToolParamType.STRING, "需要获取答案的问题", True, None)
("question", ToolParamType.STRING, "需要获取答案的问题", True, None),
("time_point", ToolParamType.STRING, "需要获取记忆的时间点格式为YYYY-MM-DD HH:MM:SS", False, None),
("time_range", ToolParamType.STRING, "需要获取记忆的时间范围格式为YYYY-MM-DD HH:MM:SS - YYYY-MM-DD HH:MM:SS", False, None)
]
available_for_llm = True
async def execute(self, function_args: dict[str, Any]) -> dict[str, Any]:
"""执行比较两个数的大小
"""执行记忆搜索
Args:
function_args: 工具参数
@ -32,12 +85,141 @@ class GetMemoryTool(BaseTool):
dict: 工具执行结果
"""
question: str = function_args.get("question") # type: ignore
time_point: str = function_args.get("time_point") # type: ignore
time_range: str = function_args.get("time_range") # type: ignore
answer = await global_memory_chest.get_answer_by_question(question=question)
if not answer:
return {"content": f"问题:{question},没有找到相关记忆"}
# 检查是否指定了时间参数
has_time_params = bool(time_point or time_range)
return {"content": f"问题:{question},答案:{answer}"}
if has_time_params and not self.chat_id:
return {"content": f"问题:{question}无法获取聊天记录缺少chat_id"}
# 创建并行任务
tasks = []
# 原任务:从记忆仓库获取答案
memory_task = asyncio.create_task(
global_memory_chest.get_answer_by_question(question=question)
)
tasks.append(("memory", memory_task))
# 新任务:从聊天记录获取答案(如果指定了时间参数)
chat_task = None
if has_time_params:
chat_task = asyncio.create_task(
self._get_answer_from_chat_history(question, time_point, time_range)
)
tasks.append(("chat", chat_task))
# 等待所有任务完成
results = {}
for task_name, task in tasks:
try:
results[task_name] = await task
except Exception as e:
logger.error(f"任务 {task_name} 执行失败: {e}")
results[task_name] = None
# 处理结果
memory_answer = results.get("memory")
chat_answer = results.get("chat")
# 构建返回内容
content_parts = [f"问题:{question}"]
if memory_answer:
content_parts.append(f"记忆仓库答案:{memory_answer}")
else:
content_parts.append("记忆仓库:没有找到相关记忆")
if chat_answer:
content_parts.append(f"聊天记录答案:{chat_answer}")
elif has_time_params:
content_parts.append("聊天记录:没有找到相关记录")
return {"content": "\n".join(content_parts)}
async def _get_answer_from_chat_history(self, question: str, time_point: str = None, time_range: str = None) -> str:
"""从聊天记录中获取问题的答案"""
try:
# 确定时间范围
if time_point:
# 时间点搜索前后25条记录
target_timestamp = parse_datetime_to_timestamp(time_point)
# 获取前后各25条记录总共50条
messages_before = get_messages_by_time_in_chat(
chat_id=self.chat_id,
start_time=0,
end_time=target_timestamp,
limit=25,
limit_mode="latest"
)
messages_after = get_messages_by_time_in_chat(
chat_id=self.chat_id,
start_time=target_timestamp,
end_time=float('inf'),
limit=25,
limit_mode="earliest"
)
messages = messages_before + messages_after
elif time_range:
# 时间范围搜索范围内最多50条记录
start_timestamp, end_timestamp = parse_time_range(time_range)
messages = get_messages_by_time_in_chat(
chat_id=self.chat_id,
start_time=start_timestamp,
end_time=end_timestamp,
limit=50,
limit_mode="latest"
)
else:
return "未指定时间参数"
if not messages:
return "没有找到相关聊天记录"
# 将消息转换为可读格式
chat_content = build_readable_messages(messages, timestamp_mode="relative")
if not chat_content.strip():
return "聊天记录为空"
# 使用LLM分析聊天内容并回答问题
try:
llm_request = LLMRequest(
model_set=model_config.model_task_config.utils_small,
request_type="chat_history_analysis"
)
analysis_prompt = f"""请根据以下聊天记录内容,回答用户的问题。
聊天记录
{chat_content}
用户问题{question}
请仔细分析聊天记录提取与问题相关的信息并给出准确的答案如果聊天记录中没有相关信息请说明"聊天记录中没有找到相关信息"
答案"""
response, (reasoning, model_name, tool_calls) = await llm_request.generate_response_async(
prompt=analysis_prompt,
temperature=0.3,
max_tokens=500
)
return f"基于聊天记录分析:{response}"
except Exception as llm_error:
logger.error(f"LLM分析聊天记录失败: {llm_error}")
# 如果LLM分析失败返回聊天内容的摘要
if len(chat_content) > 300:
chat_content = chat_content[:300] + "..."
return f"聊天记录摘要:{chat_content}"
except Exception as e:
logger.error(f"从聊天记录获取答案失败: {e}")
return f"聊天记录分析失败: {str(e)}"
class GetMemoryAction(BaseAction):
"""关系动作 - 获取记忆"""
@ -45,7 +227,7 @@ class GetMemoryAction(BaseAction):
activation_type = ActionActivationType.LLM_JUDGE
parallel_action = True
# 动作基本信息
# 动作基本信息
action_name = "get_memory"
action_description = (
"在记忆中搜寻某个问题的答案"
@ -60,7 +242,7 @@ class GetMemoryAction(BaseAction):
action_require = [
"在记忆中搜寻某个问题的答案",
"有你不了解的概念",
"有人提问关于过去的事情"
"有人提问关于过去的事情",
"你需要根据记忆回答某个问题",
]