死亡性的试图加入心流 爆炸概率90%

pull/876/head
114514 2025-04-28 16:17:51 +08:00
parent 166c85a908
commit f81c31fabf
7 changed files with 571 additions and 206 deletions

View File

@ -1,7 +1,10 @@
# PFC/action_planner.py
import traceback
import time
from typing import Tuple, Optional # 增加了 Optional
from typing import Tuple, Optional, Dict, Any # 增加了 Optional, Dict, Any
from src.common.logger_manager import get_logger
from ..models.utils_model import LLMRequest
# from ..models.utils_model import LLMRequest # Assuming LLMRequest is needed, ensure import path is correct
from src.common.utils_llm import LLMRequest # Using updated common location assumption
from ...config.config import global_config
from .chat_observer import ChatObserver
from .pfc_utils import get_items_from_json
@ -21,6 +24,8 @@ PROMPT_INITIAL_REPLY = """{persona_text}。现在你在参与一场QQ私聊
当前对话目标
{goals_str}
你现在的想法
{pfc_heartflow}
最近行动历史概要
{action_history_summary}
@ -43,23 +48,25 @@ block_and_ignore: 更加极端的结束对话方式,直接结束对话并在
请以JSON格式输出你的决策
{{
"action": "选择的行动类型 (必须是上面列表中的一个)",
"reason": "选择该行动的详细原因 (必须有解释你是如何根据“上一次行动结果”、“对话记录”和自身设定人设做出合理判断的)"
"reason": "选择该行动的详细原因 (必须有解释你是如何根据“上一次行动结果”、“对话记录”、你的想法和自身设定人设做出合理判断的)"
}}
注意请严格按照JSON格式输出不要包含任何其他内容"""
注意请严格按照JSON格式输出不要包含任何其他内容""" # Updated reason instruction
# Prompt(2): 上一次成功回复后,决定继续发言时的决策 Prompt
PROMPT_FOLLOW_UP = """{persona_text}。现在你在参与一场QQ私聊刚刚你已经回复了对方请根据以下【所有信息】审慎且灵活的决策下一步行动可以继续发送新消息可以等待可以倾听可以调取知识甚至可以屏蔽对方
PROMPT_FOLLOW_UP = """{persona_text}。现在你在参与一场QQ私聊刚刚你已经回复了对方请根据以下【所有信息】审慎且灵活的决策下一步行动可以继续发送新消息可以等待可以倾听可以调取知识甚至可以屏蔽对方
当前对话目标
{goals_str}
你现在的想法
{pfc_heartflow}
最近行动历史概要
{action_history_summary}
上一次行动的详细情况和结果
{last_action_context}
时间和超时提示
{time_since_last_bot_message_info}{timeout_context}
{time_since_last_bot_message_info}{timeout_context}
最近的对话记录(包括你已成功发送的消息 新收到的消息)
{chat_history_text}
@ -76,10 +83,10 @@ block_and_ignore: 更加极端的结束对话方式,直接结束对话并在
请以JSON格式输出你的决策
{{
"action": "选择的行动类型 (必须是上面列表中的一个)",
"reason": "选择该行动的详细原因 (必须有解释你是如何根据“上一次行动结果”、“对话记录”和自身设定人设做出合理判断的。请说明你为什么选择继续发言而不是等待,以及打算发送什么类型的新消息连续发言,必须记录已经发言了几次)"
"reason": "选择该行动的详细原因 (必须有解释你是如何根据“上一次行动结果”、“对话记录”、你的想法和自身设定人设做出合理判断的。请说明你为什么选择继续发言而不是等待,以及打算发送什么类型的新消息连续发言,必须记录已经发言了几次)"
}}
注意请严格按照JSON格式输出不要包含任何其他内容"""
注意请严格按照JSON格式输出不要包含任何其他内容""" # Updated reason instruction
# ActionPlanner 类定义,顶格
@ -87,70 +94,80 @@ class ActionPlanner:
"""行动规划器"""
def __init__(self, stream_id: str):
self.llm = LLMRequest(
model=global_config.llm_PFC_action_planner,
temperature=global_config.llm_PFC_action_planner["temp"],
max_tokens=1500,
request_type="action_planning",
)
self.personality_info = Individuality.get_instance().get_prompt(type="personality", x_person=2, level=3)
self.identity_detail_info = Individuality.get_instance().get_prompt(type="identity", x_person=2, level=2)
# Ensure correct LLM config path/structure
try:
self.llm = LLMRequest(
model=global_config.llm_PFC_action_planner,
temperature=global_config.llm_PFC_action_planner.get("temp", 0.7), # Use .get for safety
max_tokens=global_config.llm_PFC_action_planner.get("max_tokens", 1500), # Use .get for safety
request_type="action_planning",
)
except AttributeError:
logger.error("Config error: llm_PFC_action_planner not found or missing keys ('temp'/'max_tokens'). Using fallback.")
# Fallback or raise error
self.llm = LLMRequest(model=global_config.llm_normal, temperature=0.7, max_tokens=1000, request_type="action_planning_fallback")
# Load personality/identity prompts
self.individuality = Individuality.get_instance() # Store instance
self.personality_info = self.individuality.get_prompt(type="personality", x_person=2, level=3)
self.identity_detail_info = self.individuality.get_prompt(type="identity", x_person=2, level=2)
self.name = global_config.BOT_NICKNAME
self.chat_observer = ChatObserver.get_instance(stream_id)
# self.action_planner_info = ActionPlannerInfo() # 移除未使用的变量
# 修改 plan 方法签名,增加 last_successful_reply_action 参数
# 修改 plan 方法签名,增加 pfc_heartflow 参数
async def plan(
self,
observation_info: ObservationInfo,
conversation_info: ConversationInfo,
last_successful_reply_action: Optional[str],
pfc_heartflow: Optional[str], # <--- 新增参数
) -> Tuple[str, str]:
"""规划下一步行动
Args:
observation_info: 决策信息
conversation_info: 对话信息
last_successful_reply_action: 上一次成功的回复动作类型 ('direct_reply' 'send_new_message' None)
last_successful_reply_action: 上一次成功的回复动作类型
pfc_heartflow: 当前的心流文本 # <--- 新增参数说明
Returns:
Tuple[str, str]: (行动类型, 行动原因)
"""
# --- 获取 Bot 上次发言时间信息 ---
# (这部分逻辑不变)
time_since_last_bot_message_info = ""
try:
bot_id = str(global_config.BOT_QQ)
if hasattr(observation_info, "chat_history") and observation_info.chat_history:
for i in range(len(observation_info.chat_history) - 1, -1, -1):
msg = observation_info.chat_history[i]
if not isinstance(msg, dict):
continue
sender_info = msg.get("user_info", {})
sender_id = str(sender_info.get("user_id")) if isinstance(sender_info, dict) else None
msg_time = msg.get("time")
if sender_id == bot_id and msg_time:
time_diff = time.time() - msg_time
if time_diff < 60.0:
time_since_last_bot_message_info = (
f"提示:你上一条成功发送的消息是在 {time_diff:.1f} 秒前。\n"
)
break
else:
logger.debug("Observation info chat history is empty or not available for bot time check.")
except AttributeError:
logger.warning("ObservationInfo object might not have chat_history attribute yet for bot time check.")
except Exception as e:
logger.warning(f"获取 Bot 上次发言时间时出错: {e}")
bot_last_speak_time = observation_info.last_bot_speak_time # Use ObservationInfo directly
if bot_last_speak_time:
time_diff = time.time() - bot_last_speak_time
if time_diff < 3600: # Show within an hour
time_since_last_bot_message_info = f"提示:你上一条成功发送的消息是在 {time_diff:.1f} 秒前。\n"
# --- 获取超时提示信息 ---
# (这部分逻辑不变)
timeout_context = ""
<<<<<<< HEAD
# Check for timeout goal added by Waiter
if hasattr(conversation_info, "goal_list") and conversation_info.goal_list:
last_goal_item = conversation_info.goal_list[-1]
goal_text = ""
reason_text = ""
if isinstance(last_goal_item, dict):
goal_text = last_goal_item.get("goal", "")
reason_text = last_goal_item.get("reason", "") # Get reason from dict
elif isinstance(last_goal_item, tuple) and len(last_goal_item) > 0:
goal_text = last_goal_item[0]
if len(last_goal_item) > 1:
reason_text = last_goal_item[1] # Get reason from tuple
# Check if goal indicates a wait timeout
if "分钟,思考接下来要做什么" in goal_text or "对方话说一半消失了" in goal_text:
timeout_context = f"重要提示:检测到等待超时。({reason_text}) 请基于此情况规划下一步。\n"
=======
try:
if hasattr(conversation_info, "goal_list") and conversation_info.goal_list:
last_goal_tuple = conversation_info.goal_list[-1]
if isinstance(last_goal_tuple, tuple) and len(last_goal_tuple) > 0:
last_goal_text = last_goal_tuple[0]
last_goal_dict = conversation_info.goal_list[-1]
if isinstance(last_goal_dict, dict) and "goal" in last_goal_dict:
last_goal_text = last_goal_dict["goal"]
if isinstance(last_goal_text, str) and "分钟,思考接下来要做什么" in last_goal_text:
try:
timeout_minutes_text = last_goal_text.split("")[0].replace("你等待了", "")
@ -163,28 +180,52 @@ class ActionPlanner:
logger.warning("ConversationInfo object might not have goal_list attribute yet for timeout check.")
except Exception as e:
logger.warning(f"检查超时目标时出错: {e}")
>>>>>>> 3cfa1e6b17340f82f2937a2243b8e99030196294
# --- 构建通用 Prompt 参数 ---
logger.debug(f"开始规划行动:当前目标: {getattr(conversation_info, 'goal_list', '不可用')}")
# 构建对话目标 (goals_str)
<<<<<<< HEAD
goals_str = "- 目前没有明确对话目标,请考虑设定一个。\n" # Default
if hasattr(conversation_info, "goal_list") and conversation_info.goal_list:
temp_goals_str = ""
for goal_reason in conversation_info.goal_list:
goal = "目标内容缺失"
reasoning = "没有明确原因"
if isinstance(goal_reason, tuple) and len(goal_reason) > 0:
goal = goal_reason[0]
if len(goal_reason) > 1: reasoning = goal_reason[1]
elif isinstance(goal_reason, dict):
goal = goal_reason.get("goal", "目标内容缺失")
reasoning = goal_reason.get("reason", "没有明确原因") # Use 'reason' key based on Waiter
else:
goal = str(goal_reason)
goal = str(goal) if goal is not None else "目标内容缺失"
reasoning = str(reasoning) if reasoning is not None else "没有明确原因"
temp_goals_str += f"- 目标:{goal}\n 原因:{reasoning}\n"
if temp_goals_str: # Only overwrite default if goals were found
goals_str = temp_goals_str
=======
goals_str = ""
try:
if hasattr(conversation_info, "goal_list") and conversation_info.goal_list:
for goal_reason in conversation_info.goal_list:
if isinstance(goal_reason, tuple) and len(goal_reason) > 0:
goal = goal_reason[0]
reasoning = goal_reason[1] if len(goal_reason) > 1 else "没有明确原因"
elif isinstance(goal_reason, dict):
if isinstance(goal_reason, dict):
goal = goal_reason.get("goal", "目标内容缺失")
reasoning = goal_reason.get("reasoning", "没有明确原因")
else:
goal = str(goal_reason)
reasoning = "没有明确原因"
goal = str(goal) if goal is not None else "目标内容缺失"
reasoning = str(reasoning) if reasoning is not None else "没有明确原因"
goals_str += f"- 目标:{goal}\n 原因:{reasoning}\n"
if not goals_str:
if not goals_str:
goals_str = "- 目前没有明确对话目标,请考虑设定一个。\n"
else:
goals_str = "- 目前没有明确对话目标,请考虑设定一个。\n"
except AttributeError:
logger.warning("ConversationInfo object might not have goal_list attribute yet.")
@ -192,43 +233,37 @@ class ActionPlanner:
except Exception as e:
logger.error(f"构建对话目标字符串时出错: {e}")
goals_str = "- 构建对话目标时出错。\n"
>>>>>>> 3cfa1e6b17340f82f2937a2243b8e99030196294
# 获取聊天历史记录 (chat_history_text)
chat_history_text = ""
try:
if hasattr(observation_info, "chat_history") and observation_info.chat_history:
chat_history_text = observation_info.chat_history_str
if not chat_history_text:
chat_history_text = "还没有聊天记录。\n"
else:
chat_history_text = "还没有聊天记录。\n"
chat_history_text = "还没有聊天记录。\n" # Default
if hasattr(observation_info, 'chat_history_str') and observation_info.chat_history_str:
chat_history_text = observation_info.chat_history_str + "\n" # Ensure newline
if hasattr(observation_info, "new_messages_count") and observation_info.new_messages_count > 0:
if hasattr(observation_info, "unprocessed_messages") and observation_info.unprocessed_messages:
new_messages_list = observation_info.unprocessed_messages
# Append unprocessed messages if any
if hasattr(observation_info, 'new_messages_count') and observation_info.new_messages_count > 0:
if hasattr(observation_info, 'unprocessed_messages') and observation_info.unprocessed_messages:
new_messages_list = observation_info.unprocessed_messages
try:
# Ensure build_readable_messages exists and handles the list format correctly
new_messages_str = await build_readable_messages(
new_messages_list,
replace_bot_name=True,
merge_messages=False,
timestamp_mode="relative",
read_mark=0.0,
read_mark=0.0, # Assuming this param exists
)
chat_history_text += (
f"\n--- 以下是 {observation_info.new_messages_count} 条新消息 ---\n{new_messages_str}"
)
else:
logger.warning(
"ObservationInfo has new_messages_count > 0 but unprocessed_messages is empty or missing."
)
except AttributeError:
logger.warning("ObservationInfo object might be missing expected attributes for chat history.")
chat_history_text = "获取聊天记录时出错。\n"
except Exception as e:
logger.error(f"处理聊天记录时发生未知错误: {e}")
chat_history_text = "处理聊天记录时出错。\n"
chat_history_text += f"\n--- 以下是 {observation_info.new_messages_count} 条新收到的消息 ---\n{new_messages_str}\n"
except Exception as build_err:
logger.error(f"Error building readable messages: {build_err}")
chat_history_text += "\n--- (无法格式化新消息) ---\n"
else:
logger.warning("new_messages_count > 0 but unprocessed_messages is empty/missing.")
chat_history_text += f"\n--- (有 {observation_info.new_messages_count} 条新消息,但无法显示内容) ---\n"
# 构建 Persona 文本 (persona_text)
# (这部分逻辑不变)
# Using stored Individuality instance
identity_details_only = self.identity_detail_info
identity_addon = ""
if isinstance(identity_details_only, str):
@ -245,71 +280,57 @@ class ActionPlanner:
persona_text = f"你的名字是{self.name}{self.personality_info}{identity_addon}"
# 构建行动历史和上一次行动结果 (action_history_summary, last_action_context)
# (这部分逻辑不变)
action_history_summary = "你最近执行的行动历史:\n"
last_action_context = "关于你【上一次尝试】的行动:\n"
action_history_list = []
try:
if hasattr(conversation_info, "done_action") and conversation_info.done_action:
action_history_list = conversation_info.done_action[-5:]
else:
logger.debug("Conversation info done_action is empty or not available.")
except AttributeError:
logger.warning("ConversationInfo object might not have done_action attribute yet.")
except Exception as e:
logger.error(f"访问行动历史时出错: {e}")
if hasattr(conversation_info, "done_action") and conversation_info.done_action:
action_history_list = conversation_info.done_action[-5:] # Get last 5
if not action_history_list:
action_history_summary += "- 还没有执行过行动。\n"
last_action_context += "- 这是你规划的第一个行动。\n"
else:
for i, action_data in enumerate(action_history_list):
action_type = "未知"
plan_reason = "未知"
status = "未知"
# Default values
action_type = "未知行动"
plan_reason = "未知原因"
status = "未知状态"
final_reason = ""
action_time = ""
action_time = "未知时间"
# Check if action_data is a dictionary (new format)
if isinstance(action_data, dict):
action_type = action_data.get("action", "未知")
plan_reason = action_data.get("plan_reason", "未知规划原因")
status = action_data.get("status", "未知")
final_reason = action_data.get("final_reason", "")
action_time = action_data.get("time", "")
elif isinstance(action_data, tuple):
# 假设旧格式兼容
if len(action_data) > 0:
action_type = action_data[0]
if len(action_data) > 1:
plan_reason = action_data[1] # 可能是规划原因或最终原因
if len(action_data) > 2:
status = action_data[2]
if status == "recall" and len(action_data) > 3:
final_reason = action_data[3]
elif status == "done" and action_type in ["direct_reply", "send_new_message"]:
plan_reason = "成功发送" # 简化显示
action_type = action_data.get("action", action_type)
plan_reason = action_data.get("plan_reason", plan_reason)
status = action_data.get("status", status)
final_reason = action_data.get("final_reason", "") # Get final_reason if exists
action_time = action_data.get("time", action_time)
else:
# Handle potential old format or unexpected data gracefully
logger.warning(f"Unexpected action history format: {action_data}")
action_type = str(action_data) # Basic representation
# Build summary line
reason_text = f", 失败/取消原因: {final_reason}" if final_reason else ""
summary_line = f"- 时间:{action_time}, 尝试行动:'{action_type}', 状态:{status}{reason_text}"
action_history_summary += summary_line + "\n"
# Build context for the very last action
if i == len(action_history_list) - 1:
last_action_context += f"- 上次【规划】的行动是: '{action_type}'\n"
last_action_context += f"- 当时规划的【原因】是: {plan_reason}\n"
if status == "done":
last_action_context += "- 该行动已【成功执行】。\n"
# 记录这次成功的行动类型,供下次决策
# self.last_successful_action_type = action_type # 不在这里记录,由 conversation 控制
elif status == "recall":
last_action_context += "- 但该行动最终【未能执行/被取消】。\n"
if final_reason:
last_action_context += f"- 【重要】失败/取消的具体原因是: “{final_reason}\n"
else:
last_action_context += "- 【重要】失败/取消原因未明确记录。\n"
# self.last_successful_action_type = None # 行动失败,清除记录
else:
last_action_context += f"- 该行动当前状态: {status}\n"
# self.last_successful_action_type = None # 非完成状态,清除记录
# Handle other potential statuses like 'start' or unexpected ones
last_action_context += f"- 该行动当前状态: {status} (未完成或状态未知)\n"
# --- 选择 Prompt ---
if last_successful_reply_action in ["direct_reply", "send_new_message"]:
@ -320,36 +341,46 @@ class ActionPlanner:
logger.debug("使用 PROMPT_INITIAL_REPLY (首次/非连续回复决策)")
# --- 格式化最终的 Prompt ---
prompt = prompt_template.format(
persona_text=persona_text,
goals_str=goals_str if goals_str.strip() else "- 目前没有明确对话目标,请考虑设定一个。",
action_history_summary=action_history_summary,
last_action_context=last_action_context,
time_since_last_bot_message_info=time_since_last_bot_message_info,
timeout_context=timeout_context,
chat_history_text=chat_history_text if chat_history_text.strip() else "还没有聊天记录。",
)
# Provide default for heartflow if None or empty
heartflow_for_prompt = pfc_heartflow if pfc_heartflow else "你现在还没有明确的想法,请先思考。"
try:
prompt = prompt_template.format(
persona_text=persona_text,
goals_str=goals_str.strip(), # Remove leading/trailing whitespace
pfc_heartflow=heartflow_for_prompt, # <--- 传入心流
action_history_summary=action_history_summary.strip(),
last_action_context=last_action_context.strip(),
time_since_last_bot_message_info=time_since_last_bot_message_info,
timeout_context=timeout_context,
chat_history_text=chat_history_text.strip(),
)
except KeyError as e:
logger.error(f"格式化行动规划 Prompt 时出错,缺少键: {e}")
# Handle error: maybe return a default action or raise
return "wait", f"内部错误:无法格式化行动规划提示(缺少 {e}"
logger.debug(f"发送到LLM的最终提示词:\n------\n{prompt}\n------")
try:
content, _ = await self.llm.generate_response_async(prompt)
logger.debug(f"LLM原始返回内容: {content}")
# Use default_values in get_items_from_json for robustness
success, result = get_items_from_json(
content,
"action",
"reason",
default_values={"action": "wait", "reason": "LLM返回格式错误或未提供原因默认等待"},
required_types={"action": str, "reason": str} # Add type checking
)
action = result.get("action", "wait")
action = result.get("action", "wait") # Ensure default if key somehow missing after get_items
reason = result.get("reason", "LLM未提供原因默认等待")
# 验证action类型
# 更新 valid_actions 列表以包含 send_new_message
valid_actions = [
"direct_reply",
"send_new_message", # 添加新动作
"send_new_message",
"fetch_knowledge",
"wait",
"listening",
@ -362,10 +393,14 @@ class ActionPlanner:
reason = f"(原始行动'{action}'无效已强制改为wait) {reason}"
action = "wait"
# Sanitize reason (optional, e.g., remove extra quotes)
reason = reason.strip().strip('"')
logger.info(f"规划的行动: {action}")
logger.info(f"行动原因: {reason}")
return action, reason
except Exception as e:
logger.error(f"规划行动时调用 LLM 或处理结果出错: {str(e)}")
return "wait", f"行动规划处理中发生错误,暂时等待: {str(e)}"
logger.error(traceback.format_exc()) # Log full traceback
return "wait", f"行动规划处理中发生错误,暂时等待: {str(e)}"

View File

@ -188,10 +188,7 @@ class Conversation:
goal_ended = False
if hasattr(self.conversation_info, "goal_list") and self.conversation_info.goal_list:
for goal_item in self.conversation_info.goal_list:
current_goal = None
if isinstance(goal_item, tuple) and len(goal_item) > 0:
current_goal = goal_item[0]
elif isinstance(goal_item, dict):
if isinstance(goal_item, dict):
current_goal = goal_item.get("goal")
if current_goal == "结束对话":

View File

@ -1,10 +1,11 @@
from typing import Optional
from typing import Optional, List, Dict, Any
class ConversationInfo:
def __init__(self):
self.done_action = []
self.goal_list = []
self.knowledge_list = []
self.memory_list = []
self.done_action: List[Dict[str, Any]] = [] # Added type hint
self.goal_list: List[Dict[str, Any]] = [] # Added type hint (assuming goal list contains dicts)
self.knowledge_list: List[Dict[str, Any]] = [] # Added type hint
self.memory_list = [] # Keep as is if type is unknown or mixed
self.last_successful_reply_action: Optional[str] = None
self.current_heartflow: Optional[str] = None # <--- 新增:存储当前心流

View File

@ -60,16 +60,10 @@ class GoalAnalyzer:
goals_str = ""
if conversation_info.goal_list:
for goal_reason in conversation_info.goal_list:
# 处理字典或元组格式
if isinstance(goal_reason, tuple):
# 假设元组的第一个元素是目标,第二个元素是原因
goal = goal_reason[0]
reasoning = goal_reason[1] if len(goal_reason) > 1 else "没有明确原因"
elif isinstance(goal_reason, dict):
goal = goal_reason.get("goal")
if isinstance(goal_reason, dict):
goal = goal_reason.get("goal", "目标内容缺失")
reasoning = goal_reason.get("reasoning", "没有明确原因")
else:
# 如果是其他类型,尝试转为字符串
goal = str(goal_reason)
reasoning = "没有明确原因"

View File

@ -0,0 +1,266 @@
# PFC/pfc_heartflow.py
import traceback
import time
from typing import Optional, Dict, Any
from src.individuality.individuality import Individuality
from src.common.logger import get_module_logger
from src.common.utils_llm import LLMRequest # Assuming utils_model is renamed or refactored
from ...config.config import global_config # Make sure path is correct
from .observation_info import ObservationInfo
from .conversation_info import ConversationInfo
from maim_message import UserInfo # Assuming maim_message provides UserInfo
from src.plugins.utils.chat_message_builder import build_readable_messages # Make sure path is correct
logger = get_module_logger("pfc_heartflow")
# --- Heartflow Generation Prompts ---
# Prompt (1): Initial Heartflow Generation (When conversation starts or bot receives first message)
PROMPT_INITIAL = """{persona_text}
现在你正在上网{sender_name} ({sender_id}) 刚刚给你发来了一条QQ私聊消息
最近的聊天记录(包括你已成功发送的消息 新收到的消息)
{chat_history_text}
--- 消息结束 ---
请根据以上信息结合你的性格设想一下你收到这条消息时的第一反应和真实想法
要求
- 输出一段连贯自然真实的内心独白
- 像一个真人在思考语气可以随意一些符合你的性格
- 不要分点输出不要包含任何动作描写表情符号引号或括号
- **只输出内心独白内容不要任何额外文字或格式**
"""
# Prompt (2): Heartflow Update After Successfully Sending a Message
PROMPT_AFTER_SEND = """{persona_text}
你正在和 {sender_name} ({sender_id}) QQ私聊
你刚才的想法是{previous_heartflow}
刚刚你成功发送了一条消息给对方"{last_bot_message}"
你们最近的聊天记录(包括你刚发送的消息)
{chat_history_text}
--- 消息结束 ---
现在请结合你刚才的想法你发送的消息以及聊天记录继续思考
- 你的想法可以是对刚才发送内容的补充延伸或者思考对方可能的反应或者计划下一步说什么
- 保持想法的连贯性但也要注意话题的推进不要停留在完全相同的想法上除非你觉得有必要强调
- 输出一段连贯自然真实的内心独白
- 像一个真人在思考语气可以随意一些符合你的性格
- 不要分点输出不要包含任何动作描写表情符号引号或括号
- **只输出内心独白内容不要任何额外文字或格式**
"""
# Prompt (3): Heartflow Update After Reply Check Failed (e.g., message rejected)
PROMPT_AFTER_FAIL = """{persona_text}
你正在和 {sender_name} ({sender_id}) QQ私聊
你之前的想法是{previous_heartflow}
你本来想发送一条消息"{failed_message}"
但是这个想法/回复因为{fail_reason}被你自己否定了/觉得不合适
你们最近的聊天记录
{chat_history_text}
--- 消息结束 ---
现在请结合你之前的想法被否定的消息以及失败原因重新思考
- 你可能会反思为什么刚才的想法不合适或者思考替代的说法或者决定暂时不回复
- 保持想法的连贯性但要根据失败原因调整思路
- 输出一段连贯自然真实的内心独白
- 像一个真人在思考语气可以随意一些符合你的性格
- 不要分点输出不要包含任何动作描写表情符号引号或括号
- **只输出内心独白内容不要任何额外文字或格式**
"""
# Prompt (4): Heartflow Update After Waiting Timeout
PROMPT_AFTER_TIMEOUT = """{persona_text}
你正在和 {sender_name} ({sender_id}) QQ私聊
你之前的想法是{previous_heartflow}
你上次发言是 {time_since_last_bot_speak:.1f} 秒前
你已经等待了对方 {wait_duration:.1f} 分钟没有回应 ({timeout_reason})
你们最近的聊天记录
{chat_history_text}
--- 消息结束 ---
对方长时间没有回复你现在的想法是什么
- 你可能会思考对方为什么没回是在忙吗还是对话结束了
- 你可能会考虑是否要发点什么打破沉默或者就此结束对话
- 结合你之前的想法和等待的情况进行思考
- 输出一段连贯自然真实的内心独白
- 像一个真人在思考语气可以随意一些符合你的性格
- 不要分点输出不要包含任何动作描写表情符号引号或括号
- **只输出内心独白内容不要任何额外文字或格式**
"""
# Prompt (5): Heartflow Update When Rethinking Goal
PROMPT_WHEN_RETHINKING = """{persona_text}
你正在和 {sender_name} ({sender_id}) QQ私聊
你之前的想法是{previous_heartflow}
你觉得现在需要重新思考一下对话的目标或方向了({rethink_reason})
你们最近的聊天记录
{chat_history_text}
--- 消息结束 ---
请结合你之前的想法和需要重新思考目标的原因梳理一下你现在的思路
- 你可能会回顾一下之前的对话思考当前进展如何
- 你可能会考虑开启新的话题或者如何引导对话到你期望的方向
- 输出一段连贯自然真实的内心独白
- 像一个真人在思考语气可以随意一些符合你的性格
- 不要分点输出不要包含任何动作描写表情符号引号或括号
- **只输出内心独白内容不要任何额外文字或格式**
"""
class HeartflowGenerator:
"""心流生成器"""
def __init__(self, stream_id: str):
self.stream_id = stream_id
# 这里假设你在 config.py 中定义了名为 'llm_PFC_heartflow' 的新 LLM 配置
# 需要确保 global_config 中存在 llm_PFC_heartflow 这个键
try:
self.llm = LLMRequest(
model=global_config.llm_PFC_heartflow, # 使用新的配置
temperature=global_config.llm_PFC_heartflow.get("temp", 0.8), # 假设温度设置,可调整
max_tokens=global_config.llm_PFC_heartflow.get("max_tokens", 200), # 限制心流长度,可调整
request_type="heartflow_generation",
)
except AttributeError:
logger.error("*"*20)
logger.error("错误:无法找到 'llm_PFC_heartflow' 配置!")
logger.error("请确保在 config.py 中定义了 llm_PFC_heartflow 的 LLM 配置。")
logger.error("将使用 llm_normal 作为备用,但这可能不是最佳效果。")
logger.error("*"*20)
# 使用备用配置,但这可能不是最优选择
self.llm = LLMRequest(
model=global_config.llm_normal,
temperature=0.8,
max_tokens=200,
request_type="heartflow_generation_fallback",
)
self.personality_info = Individuality.get_instance().get_prompt(type="personality", x_person=2, level=3)
self.identity_detail_info = Individuality.get_instance().get_prompt(type="identity", x_person=2, level=2)
self.name = global_config.BOT_NICKNAME
self.bot_id = str(global_config.BOT_QQ)
async def generate_heartflow(
self,
situation: str, # e.g., "initial", "after_send", "after_fail", "after_timeout", "rethinking"
observation_info: ObservationInfo,
conversation_info: ConversationInfo,
context_data: Optional[Dict[str, Any]] = None # To pass specific data like failed_message, reason, etc.
) -> str:
"""根据不同情境生成心流
Args:
situation: 当前情境标识符
observation_info: 观察信息
conversation_info: 对话信息
context_data: 传递特定情境所需的数据 (e.g., {'last_bot_message': '...', 'previous_heartflow': '...'})
Returns:
str: 生成的心流文本
"""
if context_data is None:
context_data = {}
logger.info(f"开始生成心流,情境: {situation}")
# --- 构建通用 Prompt 参数 ---
# Persona Text (Character info)
identity_details_only = self.identity_detail_info
identity_addon = ""
if isinstance(identity_details_only, str):
pronouns = ["", "", ""]
for p in pronouns:
if identity_details_only.startswith(p):
identity_details_only = identity_details_only[len(p) :]
break
if identity_details_only.endswith(""):
identity_details_only = identity_details_only[:-1]
cleaned_details = identity_details_only.strip(", ")
if cleaned_details:
identity_addon = f"并且{cleaned_details}"
persona_text = f"你的名字是{self.name}{self.personality_info}{identity_addon}"
# Chat History
chat_history_text = "还没有聊天记录。"
sender_name = "对方"
sender_id = "未知"
if hasattr(observation_info, 'chat_history_str') and observation_info.chat_history_str:
chat_history_text = observation_info.chat_history_str
# Try to get sender info from the last message if available
last_msg = observation_info.chat_history[-1] if observation_info.chat_history else None
if isinstance(last_msg, dict):
user_info_dict = last_msg.get('user_info')
if isinstance(user_info_dict, dict):
user_info = UserInfo.from_dict(user_info_dict)
# Get info of the other person, not the bot itself
if str(user_info.user_id) != self.bot_id:
sender_name = user_info.user_nickname or "对方"
sender_id = str(user_info.user_id)
# Previous Heartflow (handle None case)
previous_heartflow = context_data.get('previous_heartflow', conversation_info.current_heartflow if hasattr(conversation_info, 'current_heartflow') else '')
if not previous_heartflow:
previous_heartflow = "你之前还没来得及形成具体的想法。" # Default if no previous thought
# --- 选择并格式化 Prompt ---
prompt_template = None
format_params = {
"persona_text": persona_text,
"sender_name": sender_name,
"sender_id": sender_id,
"chat_history_text": chat_history_text,
"previous_heartflow": previous_heartflow,
"bot_name": self.name,
# Add more common params if needed
}
if situation == "initial":
prompt_template = PROMPT_INITIAL
# 'initial' specific params (if any, likely none needed beyond common ones)
elif situation == "after_send":
prompt_template = PROMPT_AFTER_SEND
format_params["last_bot_message"] = context_data.get("last_bot_message", "(未能获取到刚发送的消息)")
elif situation == "after_fail":
prompt_template = PROMPT_AFTER_FAIL
format_params["failed_message"] = context_data.get("failed_message", "(未能获取到失败的消息)")
format_params["fail_reason"] = context_data.get("fail_reason", "(未知原因)")
elif situation == "after_timeout":
prompt_template = PROMPT_AFTER_TIMEOUT
# Calculate times - requires observation_info to be up-to-date
now = time.time()
time_since_last_bot_speak = (now - observation_info.last_bot_speak_time) if observation_info.last_bot_speak_time else float('inf')
wait_duration_minutes = context_data.get("wait_duration", 0) / 60.0 # Expect duration in seconds from context_data
format_params["time_since_last_bot_speak"] = time_since_last_bot_speak
format_params["wait_duration"] = wait_duration_minutes
format_params["timeout_reason"] = context_data.get("timeout_reason", "长时间未回应")
elif situation == "rethinking":
prompt_template = PROMPT_WHEN_RETHINKING
format_params["rethink_reason"] = context_data.get("rethink_reason", "需要调整对话方向")
else:
logger.warning(f"未知的的心流生成情境: {situation},将使用 'initial' 作为默认。")
prompt_template = PROMPT_INITIAL
# --- 调用 LLM 生成 ---
try:
final_prompt = prompt_template.format(**format_params)
logger.debug(f"发送到LLM的心流生成提示词 (情境: {situation}):\n------\n{final_prompt}\n------")
heartflow_content, _ = await self.llm.generate_response_async(final_prompt)
# Clean up potential unwanted prefixes/suffixes if LLM adds them
heartflow_content = heartflow_content.strip().strip('\"').strip('\'').strip("内心独白:").strip()
logger.info(f"生成的心流 (情境: {situation}): 『{heartflow_content}")
return heartflow_content
except KeyError as e:
logger.error(f"格式化心流 Prompt 时缺少键: {e}")
logger.error(f"可用参数: {format_params.keys()}")
return f"(生成内心想法时出错:缺少参数 {e}"
except Exception as e:
logger.error(f"生成心流时出错 (情境: {situation}): {e}")
logger.error(traceback.format_exc())
return "(生成内心想法时遇到错误)"

View File

@ -1,6 +1,9 @@
from typing import Tuple, List, Dict, Any
# PFC/reply_generator.py
import traceback
from typing import Tuple, List, Dict, Any, Optional # Added Optional
from src.common.logger import get_module_logger
from ..models.utils_model import LLMRequest
# from ..models.utils_model import LLMRequest # Ensure correct path
from src.common.utils_llm import LLMRequest # Using updated common location assumption
from ...config.config import global_config
from .chat_observer import ChatObserver
from .reply_checker import ReplyChecker
@ -16,13 +19,17 @@ logger = get_module_logger("reply_generator")
# Prompt for direct_reply (首次回复)
PROMPT_DIRECT_REPLY = """{persona_text}。现在你在参与一场QQ私聊请根据以下信息生成一条回复
当前对话目标{goals_str}
最近的聊天记录
当前对话目标
{goals_str}
你现在的想法
{pfc_heartflow}
最近的聊天记录
{chat_history_text}
--- 消息结束 ---
请根据上述信息结合聊天记录回复对方该回复应该
1. 符合对话目标""的角度发言不要自己与自己对话
请根据上述信息结合聊天记录和你自己的想法回复对方该回复应该
1. 符合对话目标和你的内心想法""的角度发言不要自己与自己对话
2. 符合你的性格特征和身份细节
3. 通俗易懂自然流畅像正常聊天一样简短通常20字以内除非特殊情况
4. 适当利用相关知识但不要生硬引用
@ -33,18 +40,22 @@ PROMPT_DIRECT_REPLY = """{persona_text}。现在你在参与一场QQ私聊
请你注意不要输出多余内容(包括前后缀冒号和引号括号表情等)只输出回复内容
不要输出多余内容(包括前后缀冒号和引号括号表情包at或 @等 )
请直接输出回复内容不需要任何额外格式"""
请直接输出回复内容不需要任何额外格式""" # Added heartflow placeholder and updated instructions
# Prompt for send_new_message (追问/补充)
PROMPT_SEND_NEW_MESSAGE = """{persona_text}。现在你在参与一场QQ私聊**刚刚你已经发送了一条或多条消息**,现在请根据以下信息再发一条新消息:
PROMPT_SEND_NEW_MESSAGE = """{persona_text}。现在你在参与一场QQ私聊**刚刚你已经发送了一条或多条消息**,现在请根据以下信息再发一条新消息:
当前对话目标{goals_str}
最近的聊天记录
当前对话目标
{goals_str}
你现在的想法
{pfc_heartflow}
最近的聊天记录
{chat_history_text}
--- 消息结束 ---
请根据上述信息结合聊天记录继续发一条新消息例如对之前消息的补充深入话题或追问等等该消息应该
1. 符合对话目标""的角度发言不要自己与自己对话
请根据上述信息结合聊天记录和你自己的想法继续发一条新消息例如对之前消息的补充深入话题或追问等等该消息应该
1. 符合对话目标和你的内心想法""的角度发言不要自己与自己对话
2. 符合你的性格特征和身份细节
3. 通俗易懂自然流畅像正常聊天一样简短通常20字以内除非特殊情况
4. 适当利用相关知识但不要生硬引用
@ -55,28 +66,41 @@ PROMPT_SEND_NEW_MESSAGE = """{persona_text}。现在你在参与一场QQ私聊
请你注意不要输出多余内容(包括前后缀冒号和引号括号表情等)只输出消息内容
不要输出多余内容(包括前后缀冒号和引号括号表情包at或 @等 )
请直接输出回复内容不需要任何额外格式"""
请直接输出回复内容不需要任何额外格式""" # Added heartflow placeholder and updated instructions
class ReplyGenerator:
"""回复生成器"""
def __init__(self, stream_id: str):
self.llm = LLMRequest(
model=global_config.llm_PFC_chat,
temperature=global_config.llm_PFC_chat["temp"],
max_tokens=300,
request_type="reply_generation",
)
self.personality_info = Individuality.get_instance().get_prompt(type="personality", x_person=2, level=3)
self.identity_detail_info = Individuality.get_instance().get_prompt(type="identity", x_person=2, level=2)
# Ensure correct LLM config path/structure
try:
self.llm = LLMRequest(
model=global_config.llm_PFC_chat,
temperature=global_config.llm_PFC_chat.get("temp", 0.7), # Use .get for safety
max_tokens=global_config.llm_PFC_chat.get("max_tokens", 300), # Use .get for safety
request_type="reply_generation",
)
except AttributeError:
logger.error("Config error: llm_PFC_chat not found or missing keys ('temp'/'max_tokens'). Using fallback.")
# Fallback or raise error
self.llm = LLMRequest(model=global_config.llm_normal, temperature=0.7, max_tokens=300, request_type="reply_generation_fallback")
self.individuality = Individuality.get_instance() # Store instance
self.personality_info = self.individuality.get_prompt(type="personality", x_person=2, level=3)
self.identity_detail_info = self.individuality.get_prompt(type="identity", x_person=2, level=2)
self.name = global_config.BOT_NICKNAME
self.chat_observer = ChatObserver.get_instance(stream_id)
self.reply_checker = ReplyChecker(stream_id)
self.reply_checker = ReplyChecker(stream_id) # Assuming ReplyChecker exists
# 修改 generate 方法签名,增加 action_type 参数
# 修改 generate 方法签名,增加 action_type 和 pfc_heartflow 参数
async def generate(
self, observation_info: ObservationInfo, conversation_info: ConversationInfo, action_type: str
self,
observation_info: ObservationInfo,
conversation_info: ConversationInfo,
action_type: str,
pfc_heartflow: Optional[str], # <--- 新增参数
) -> str:
"""生成回复
@ -84,49 +108,78 @@ class ReplyGenerator:
observation_info: 观察信息
conversation_info: 对话信息
action_type: 当前执行的动作类型 ('direct_reply' 'send_new_message')
pfc_heartflow: 当前的心流文本 # <--- 新增参数说明
Returns:
str: 生成的回复
"""
# 构建提示词
logger.debug(f"开始生成回复 (动作类型: {action_type}):当前目标: {conversation_info.goal_list}")
logger.debug(f"开始生成回复 (动作类型: {action_type}):当前目标: {getattr(conversation_info, 'goal_list', '不可用')}")
# --- 构建通用 Prompt 参数 ---
# (这部分逻辑基本不变)
# 构建对话目标 (goals_str)
goals_str = ""
if conversation_info.goal_list:
# 构建对话目标 (goals_str) - Robust handling
goals_str = "- 目前没有明确对话目标。\n" # Default
if hasattr(conversation_info, "goal_list") and conversation_info.goal_list:
temp_goals_str = ""
for goal_reason in conversation_info.goal_list:
if isinstance(goal_reason, tuple):
goal = goal_reason[0] if len(goal_reason) > 0 else "目标内容缺失"
reasoning = goal_reason[1] if len(goal_reason) > 1 else "没有明确原因"
<<<<<<< HEAD
<<<<<<< HEAD
goal = "目标内容缺失"
reasoning = "没有明确原因"
if isinstance(goal_reason, tuple) and len(goal_reason) > 0:
goal = goal_reason[0]
if len(goal_reason) > 1: reasoning = goal_reason[1]
elif isinstance(goal_reason, dict):
=======
if isinstance(goal_reason, dict):
>>>>>>> 3cfa1e6b17340f82f2937a2243b8e99030196294
=======
if isinstance(goal_reason, dict):
>>>>>>> 3cfa1e6b17340f82f2937a2243b8e99030196294
goal = goal_reason.get("goal", "目标内容缺失")
reasoning = goal_reason.get("reasoning", "没有明确原因")
reasoning = goal_reason.get("reason", "没有明确原因") # Use 'reason' key
else:
goal = str(goal_reason)
<<<<<<< HEAD
=======
reasoning = "没有明确原因"
<<<<<<< HEAD
>>>>>>> 3cfa1e6b17340f82f2937a2243b8e99030196294
=======
>>>>>>> 3cfa1e6b17340f82f2937a2243b8e99030196294
goal = str(goal) if goal is not None else "目标内容缺失"
reasoning = str(reasoning) if reasoning is not None else "没有明确原因"
goals_str += f"- 目标:{goal}\n 原因:{reasoning}\n"
else:
goals_str = "- 目前没有明确对话目标\n" # 简化无目标情况
temp_goals_str += f"- 目标:{goal}\n 原因:{reasoning}\n"
if temp_goals_str:
goals_str = temp_goals_str
# 获取聊天历史记录 (chat_history_text) - Robust handling
chat_history_text = "还没有聊天记录。\n" # Default
if hasattr(observation_info, 'chat_history_str') and observation_info.chat_history_str:
chat_history_text = observation_info.chat_history_str + "\n" # Ensure newline
# Append unprocessed messages if any
if hasattr(observation_info, 'new_messages_count') and observation_info.new_messages_count > 0:
if hasattr(observation_info, 'unprocessed_messages') and observation_info.unprocessed_messages:
new_messages_list = observation_info.unprocessed_messages
try:
new_messages_str = await build_readable_messages(
new_messages_list,
replace_bot_name=True,
merge_messages=False,
timestamp_mode="relative",
read_mark=0.0,
)
chat_history_text += f"\n--- 以下是 {observation_info.new_messages_count} 条新收到的消息 ---\n{new_messages_str}\n"
except Exception as build_err:
logger.error(f"Error building readable messages for reply gen: {build_err}")
chat_history_text += "\n--- (无法格式化新消息) ---\n"
else:
logger.warning("Reply gen: new_messages_count > 0 but unprocessed_messages is empty/missing.")
chat_history_text += f"\n--- (有 {observation_info.new_messages_count} 条新消息,但无法显示内容) ---\n"
# 获取聊天历史记录 (chat_history_text)
chat_history_text = observation_info.chat_history_str
if observation_info.new_messages_count > 0 and observation_info.unprocessed_messages:
new_messages_list = observation_info.unprocessed_messages
new_messages_str = await build_readable_messages(
new_messages_list,
replace_bot_name=True,
merge_messages=False,
timestamp_mode="relative",
read_mark=0.0,
)
chat_history_text += f"\n--- 以下是 {observation_info.new_messages_count} 条新消息 ---\n{new_messages_str}"
elif not chat_history_text:
chat_history_text = "还没有聊天记录。"
# 构建 Persona 文本 (persona_text)
identity_details_only = self.identity_detail_info
@ -153,20 +206,34 @@ class ReplyGenerator:
logger.info("使用 PROMPT_DIRECT_REPLY (首次/非连续回复生成)")
# --- 格式化最终的 Prompt ---
prompt = prompt_template.format(
persona_text=persona_text, goals_str=goals_str, chat_history_text=chat_history_text
)
# Provide default for heartflow if None or empty
heartflow_for_prompt = pfc_heartflow if pfc_heartflow else "无(没有内心想法信息)"
try:
prompt = prompt_template.format(
persona_text=persona_text,
goals_str=goals_str.strip(),
pfc_heartflow=heartflow_for_prompt, # <--- 传入心流
chat_history_text=chat_history_text.strip(),
)
except KeyError as e:
logger.error(f"格式化回复生成 Prompt 时出错,缺少键: {e}")
return f"抱歉,我在组织语言时遇到了点内部问题(缺少参数 {e}),请稍后再试。"
# --- 调用 LLM 生成 ---
logger.debug(f"发送到LLM的生成提示词:\n------\n{prompt}\n------")
try:
content, _ = await self.llm.generate_response_async(prompt)
logger.debug(f"生成的回复: {content}")
# 移除旧的检查新消息逻辑,这应该由 conversation 控制流处理
# Basic cleaning
content = content.strip().strip('"')
logger.debug(f"生成的原始回复: {content}")
# No check for new messages here, handled by Conversation loop
return content
except Exception as e:
logger.error(f"生成回复时出错: {e}")
logger.error(traceback.format_exc()) # Log traceback
return "抱歉,我现在有点混乱,让我重新思考一下..."
# check_reply 方法保持不变
@ -174,6 +241,11 @@ class ReplyGenerator:
self, reply: str, goal: str, chat_history: List[Dict[str, Any]], chat_history_str: str, retry_count: int = 0
) -> Tuple[bool, str, bool]:
"""检查回复是否合适
(此方法逻辑保持不变)
(此方法逻辑保持不变, 不接收心流)
"""
return await self.reply_checker.check(reply, goal, chat_history, chat_history_str, retry_count)
# Ensure ReplyChecker exists
if not hasattr(self, 'reply_checker'):
logger.error("ReplyChecker not initialized in ReplyGenerator!")
return False, "内部错误:无法检查回复", True # Assume replan is needed
return await self.reply_checker.check(reply, goal, chat_history, chat_history_str, retry_count)

View File

@ -39,7 +39,7 @@ class Waiter:
logger.info(f"等待超过 {DESIRED_TIMEOUT_SECONDS} 秒...添加思考目标。")
wait_goal = {
"goal": f"你等待了{elapsed_time / 60:.1f}分钟,注意可能在对方看来聊天已经结束,思考接下来要做什么",
"reason": "对方很久没有回复你的消息了",
"reasoning": "对方很久没有回复你的消息了",
}
conversation_info.goal_list.append(wait_goal)
logger.info(f"添加目标: {wait_goal}")
@ -66,7 +66,7 @@ class Waiter:
wait_goal = {
# 保持 goal 文本一致
"goal": f"你等待了{elapsed_time / 60:.1f}分钟,对方似乎话说一半突然消失了,可能忙去了?也可能忘记了回复?要问问吗?还是结束对话?或继续等待?思考接下来要做什么",
"reason": "对方话说一半消失了,很久没有回复",
"reasoning": "对方话说一半消失了,很久没有回复",
}
conversation_info.goal_list.append(wait_goal)
logger.info(f"添加目标: {wait_goal}")