mirror of https://github.com/Mai-with-u/MaiBot.git
180 lines
7.1 KiB
Python
180 lines
7.1 KiB
Python
from .observation import ChattingObservation
|
||
from src.plugins.models.utils_model import LLMRequest
|
||
from src.config.config import global_config
|
||
import time
|
||
import traceback
|
||
from src.common.logger_manager import get_logger
|
||
from src.individuality.individuality import Individuality
|
||
import random
|
||
from ..plugins.utils.prompt_builder import Prompt, global_prompt_manager
|
||
from src.do_tool.tool_use import ToolUser
|
||
from src.plugins.utils.json_utils import safe_json_dumps, process_llm_tool_calls
|
||
from src.heart_flow.chat_state_info import ChatStateInfo
|
||
from src.plugins.chat.chat_stream import chat_manager
|
||
from src.plugins.heartFC_chat.heartFC_Cycleinfo import CycleInfo
|
||
import difflib
|
||
from src.plugins.person_info.relationship_manager import relationship_manager
|
||
from src.plugins.memory_system.Hippocampus import HippocampusManager
|
||
import jieba
|
||
from src.common.logger_manager import get_logger
|
||
from src.heart_flow.sub_mind import SubMind
|
||
logger = get_logger("tool_use")
|
||
|
||
def init_prompt():
|
||
# ... 原有代码 ...
|
||
|
||
# 添加工具执行器提示词
|
||
tool_executor_prompt = """
|
||
你是一个专门执行工具的助手。你的名字是{bot_name}。现在是{time_now}。
|
||
|
||
你要在群聊中扮演以下角色:
|
||
{prompt_personality}
|
||
|
||
你当前的额外信息:
|
||
{extra_info}
|
||
|
||
你的心情是:{mood_info}
|
||
|
||
{relation_prompt}
|
||
|
||
群里正在进行的聊天内容:
|
||
{chat_observe_info}
|
||
|
||
请仔细分析聊天内容,考虑以下几点:
|
||
1. 内容中是否包含需要查询信息的问题
|
||
2. 是否需要执行特定操作
|
||
3. 是否有明确的工具使用指令
|
||
4. 考虑用户与你的关系以及当前的对话氛围
|
||
|
||
如果需要使用工具,请直接调用相应的工具函数。如果不需要使用工具,请简单输出"无需使用工具"。
|
||
尽量只在确实必要时才使用工具。
|
||
"""
|
||
Prompt(tool_executor_prompt, "tool_executor_prompt")
|
||
|
||
class ToolExecutor:
|
||
def __init__(self, subheartflow_id: str):
|
||
self.subheartflow_id = subheartflow_id
|
||
self.log_prefix = f"[{subheartflow_id}:ToolExecutor] "
|
||
self.llm_model = LLMRequest(
|
||
model=global_config.llm_summary, # 为工具执行器配置单独的模型
|
||
# temperature=global_config.llm_summary["temp"],
|
||
# max_tokens=800,
|
||
request_type="tool_execution",
|
||
)
|
||
self.structured_info = []
|
||
|
||
async def execute_tools(self, sub_mind: SubMind, chat_target_name="对方", is_group_chat=False, return_details=False, cycle_info=None):
|
||
"""
|
||
并行执行工具,返回结构化信息
|
||
|
||
参数:
|
||
sub_mind: 子思维对象
|
||
chat_target_name: 聊天目标名称,默认为"对方"
|
||
is_group_chat: 是否为群聊,默认为False
|
||
return_details: 是否返回详细信息,默认为False
|
||
cycle_info: 循环信息对象,可用于记录详细执行信息
|
||
|
||
返回:
|
||
如果return_details为False:
|
||
List[Dict]: 工具执行结果的结构化信息列表
|
||
如果return_details为True:
|
||
Tuple[List[Dict], List[str], str]: (工具执行结果列表, 使用的工具列表, 工具执行提示词)
|
||
"""
|
||
# 初始化工具
|
||
tool_instance = ToolUser()
|
||
tools = tool_instance._define_tools()
|
||
|
||
observation: ChattingObservation = sub_mind.observations[0] if sub_mind.observations else None
|
||
|
||
# 获取观察内容
|
||
chat_observe_info = observation.get_observe_info()
|
||
person_list = observation.person_list
|
||
|
||
# extra structured info
|
||
extra_structured_info = sub_mind.structured_info_str
|
||
|
||
# 构建关系信息
|
||
relation_prompt = "【关系信息】\n"
|
||
for person in person_list:
|
||
relation_prompt += await relationship_manager.build_relationship_info(person, is_id=True)
|
||
|
||
# 获取个性信息
|
||
individuality = Individuality.get_instance()
|
||
prompt_personality = individuality.get_prompt(x_person=2, level=2)
|
||
|
||
# 获取心情信息
|
||
mood_info = observation.chat_state.mood if hasattr(observation, "chat_state") else ""
|
||
|
||
# 获取时间信息
|
||
time_now = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
|
||
|
||
# 构建专用于工具调用的提示词
|
||
prompt = await global_prompt_manager.format_prompt(
|
||
"tool_executor_prompt",
|
||
extra_info=extra_structured_info,
|
||
chat_observe_info=chat_observe_info,
|
||
chat_target_name=chat_target_name,
|
||
is_group_chat=is_group_chat,
|
||
relation_prompt=relation_prompt,
|
||
prompt_personality=prompt_personality,
|
||
mood_info=mood_info,
|
||
bot_name=individuality.name,
|
||
time_now=time_now
|
||
)
|
||
|
||
# 如果指定了cycle_info,记录工具执行的prompt
|
||
if cycle_info:
|
||
cycle_info.set_tooluse_info(prompt=prompt)
|
||
|
||
# 调用LLM,专注于工具使用
|
||
logger.info(f"开始执行工具调用{prompt}")
|
||
response, _, tool_calls = await self.llm_model.generate_response_tool_async(
|
||
prompt=prompt, tools=tools
|
||
)
|
||
|
||
logger.debug(f"获取到工具原始输出:\n{tool_calls}")
|
||
# 处理工具调用和结果收集,类似于SubMind中的逻辑
|
||
new_structured_items = []
|
||
used_tools = [] # 记录使用了哪些工具
|
||
|
||
if tool_calls:
|
||
success, valid_tool_calls, error_msg = process_llm_tool_calls(tool_calls)
|
||
if success and valid_tool_calls:
|
||
for tool_call in valid_tool_calls:
|
||
try:
|
||
# 记录使用的工具名称
|
||
tool_name = tool_call.get("name", "unknown_tool")
|
||
used_tools.append(tool_name)
|
||
|
||
result = await tool_instance._execute_tool_call(tool_call)
|
||
|
||
name = result.get("type", "unknown_type")
|
||
content = result.get("content", "")
|
||
|
||
logger.info(f"工具{name},获得信息:{content}")
|
||
if result:
|
||
new_item = {
|
||
"type": result.get("type", "unknown_type"),
|
||
"id": result.get("id", f"tool_exec_{time.time()}"),
|
||
"content": result.get("content", ""),
|
||
"ttl": 3,
|
||
}
|
||
new_structured_items.append(new_item)
|
||
except Exception as e:
|
||
logger.error(f"{self.log_prefix}工具执行失败: {e}")
|
||
|
||
# 如果指定了cycle_info,记录工具执行结果
|
||
if cycle_info:
|
||
cycle_info.set_tooluse_info(
|
||
tools_used=used_tools,
|
||
tool_results=new_structured_items
|
||
)
|
||
|
||
# 根据return_details决定返回值
|
||
if return_details:
|
||
return new_structured_items, used_tools, prompt
|
||
else:
|
||
return new_structured_items
|
||
|
||
|
||
init_prompt() |