Update self_processor.py

pull/998/head
2829798842 2025-05-28 21:10:55 +08:00 committed by GitHub
parent b0c7daa1e4
commit ebdf2de581
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
1 changed files with 123 additions and 53 deletions

View File

@ -7,45 +7,63 @@ import traceback
from src.common.logger_manager import get_logger
from src.individuality.individuality import individuality
from src.chat.utils.prompt_builder import Prompt, global_prompt_manager
from src.chat.utils.json_utils import safe_json_dumps
from src.chat.message_receive.chat_stream import chat_manager
from src.person_info.relationship_manager import relationship_manager
from .base_processor import BaseProcessor
from src.chat.focus_chat.info.mind_info import MindInfo
from typing import List, Optional
from src.chat.heart_flow.observation.hfcloop_observation import HFCloopObservation
from typing import Dict
from src.chat.focus_chat.info.info_base import InfoBase
from src.chat.focus_chat.info.self_info import SelfInfo
logger = get_logger("processor")
def init_prompt():
indentify_prompt = """
{name_block}
You are an AI, but you disguise yourself as a human. Your personality is: {prompt_personality}.
{indentify_block}
{relation_prompt}
group_prompt = """
你的名字是{bot_name}
{memory_str}
It is now {time_now}, you are online, chatting with netizens in the QQ group. The following is the ongoing chat content:
{extra_info}
{relation_prompt}
{cycle_info_block}
现在是{time_now}你正在上网和qq群里的网友们聊天以下是正在进行的聊天内容
{chat_observe_info}
Now please think about your self-identity based on the existing information:
1. What kind of person are you, and what is your relationship with people in the group?
2. What is your image?
3. Think about whether anyone mentioned you, or if any images are related to you.
4. Does your self-identity help with your response? If you need self-related information to help you participate in the chat, please output it; otherwise, please output a brief self-identity of more than ten words.
5. Generally, you don't need to output self-identity, just output a brief self-identity of more than ten words, unless there is an obvious scenario that requires self-identity.
以下是你之前对聊天的观察和规划你的名字是{bot_name}
{last_mind}
The output content should be plain, speak in Chinese, don't be exaggerated, be plain.
Please do not output redundant content (including prefixes and suffixes, colons and quotation marks, parentheses (), emoticons, @ or @ etc.). Only output self-identity content, remember to clearly state that this is your self-identity.
"""
Prompt(indentify_prompt, "indentify_prompt")
现在请你继续输出观察和规划输出要求
1. 先关注未读新消息的内容和近期回复历史
2. 根据新信息修改和删除之前的观察和规划
3. 根据聊天内容继续输出观察和规划
4. 注意群聊的时间线索话题由谁发起进展状况如何思考聊天的时间线
6. 语言简洁自然不要分点不要浮夸不要修辞仅输出思考内容就好"""
Prompt(group_prompt, "sub_heartflow_prompt_before")
private_prompt = """
你的名字是{bot_name}
{memory_str}
{extra_info}
{relation_prompt}
{cycle_info_block}
现在是{time_now}你正在上网和qq群里的网友们聊天以下是正在进行的聊天内容
{chat_observe_info}
以下是你之前对聊天的观察和规划你的名字是{bot_name}
{last_mind}
现在请你继续输出观察和规划输出要求
1. 先关注未读新消息的内容和近期回复历史
2. 根据新信息修改和删除之前的观察和规划
3. 根据聊天内容继续输出观察和规划
4. 注意群聊的时间线索话题由谁发起进展状况如何思考聊天的时间线
6. 语言简洁自然不要分点不要浮夸不要修辞仅输出思考内容就好"""
Prompt(private_prompt, "sub_heartflow_prompt_private_before")
class SelfProcessor(BaseProcessor):
log_prefix = "自我认同"
class MindProcessor(BaseProcessor):
log_prefix = "聊天思考"
def __init__(self, subheartflow_id: str):
super().__init__()
@ -53,14 +71,48 @@ class SelfProcessor(BaseProcessor):
self.subheartflow_id = subheartflow_id
self.llm_model = LLMRequest(
model=global_config.model.focus_self_recognize,
temperature=global_config.model.focus_self_recognize["temp"],
model=global_config.model.focus_chat_mind,
temperature=global_config.model.focus_chat_mind["temp"],
max_tokens=800,
request_type="self_identify",
request_type="focus_chat_mind",
)
self.current_mind = ""
self.past_mind = []
self.structured_info = []
self.structured_info_str = ""
name = chat_manager.get_stream_name(self.subheartflow_id)
self.log_prefix = f"[{name}] "
self._update_structured_info_str()
def _update_structured_info_str(self):
"""根据 structured_info 更新 structured_info_str"""
if not self.structured_info:
self.structured_info_str = ""
return
lines = ["【信息】"]
for item in self.structured_info:
# 简化展示突出内容和类型包含TTL供调试
type_str = item.get("type", "未知类型")
content_str = item.get("content", "")
if type_str == "info":
lines.append(f"刚刚: {content_str}")
elif type_str == "memory":
lines.append(f"{content_str}")
elif type_str == "comparison_result":
lines.append(f"数字大小比较结果: {content_str}")
elif type_str == "time_info":
lines.append(f"{content_str}")
elif type_str == "lpmm_knowledge":
lines.append(f"你知道:{content_str}")
else:
lines.append(f"{type_str}的信息: {content_str}")
self.structured_info_str = "\n".join(lines)
logger.debug(f"{self.log_prefix} 更新 structured_info_str: \n{self.structured_info_str}")
async def process_info(
self, observations: Optional[List[Observation]] = None, running_memorys: Optional[List[Dict]] = None, *infos
@ -73,18 +125,14 @@ class SelfProcessor(BaseProcessor):
Returns:
List[InfoBase]: 处理后的结构化信息列表
"""
self_info_str = await self.self_indentify(observations, running_memorys)
current_mind = await self.do_thinking_before_reply(observations, running_memorys)
if self_info_str:
self_info = SelfInfo()
self_info.set_self_info(self_info_str)
else:
self_info = None
return None
mind_info = MindInfo()
mind_info.set_current_mind(current_mind)
return [self_info]
return [mind_info]
async def self_indentify(
async def do_thinking_before_reply(
self, observations: Optional[List[Observation]] = None, running_memorys: Optional[List[Dict]] = None
):
"""
@ -100,12 +148,31 @@ class SelfProcessor(BaseProcessor):
tuple: (current_mind, past_mind, prompt) 当前想法过去的想法列表和使用的prompt
"""
# ---------- 0. 更新和清理 structured_info ----------
if self.structured_info:
updated_info = []
for item in self.structured_info:
item["ttl"] -= 1
if item["ttl"] > 0:
updated_info.append(item)
else:
logger.debug(f"{self.log_prefix} 移除过期的 structured_info 项: {item['id']}")
self.structured_info = updated_info
self._update_structured_info_str()
logger.debug(
f"{self.log_prefix} 当前完整的 structured_info: {safe_json_dumps(self.structured_info, ensure_ascii=False)}"
)
memory_str = ""
if running_memorys:
memory_str = "以下是当前在聊天中,你回忆起的记忆:\n"
for running_memory in running_memorys:
memory_str += f"{running_memory['topic']}: {running_memory['content']}\n"
# ---------- 1. 准备基础数据 ----------
# 获取现有想法和情绪状态
previous_mind = self.current_mind if self.current_mind else ""
if observations is None:
observations = []
for observation in observations:
@ -123,49 +190,52 @@ class SelfProcessor(BaseProcessor):
chat_observe_info = observation.get_observe_info()
person_list = observation.person_list
if isinstance(observation, HFCloopObservation):
# hfcloop_observe_info = observation.get_observe_info()
pass
hfcloop_observe_info = observation.get_observe_info()
nickname_str = ""
for nicknames in global_config.bot.alias_names:
nickname_str += f"{nicknames},"
name_block = f"你的名字是{global_config.bot.nickname},你的昵称有{nickname_str},有人也会用这些昵称称呼你。"
personality_block = individuality.get_personality_prompt(x_person=2, level=2)
identity_block = individuality.get_identity_prompt(x_person=2, level=2)
# ---------- 3. 准备个性化数据 ----------
# 获取个性化信息
relation_prompt = ""
for person in person_list:
relation_prompt += await relationship_manager.build_relationship_info(person, is_id=True)
prompt = (await global_prompt_manager.get_prompt_async("indentify_prompt")).format(
name_block=name_block,
prompt_personality=personality_block,
indentify_block=identity_block,
template_name = "sub_heartflow_prompt_before" if is_group_chat else "sub_heartflow_prompt_private_before"
logger.debug(f"{self.log_prefix} 使用{'群聊' if is_group_chat else '私聊'}思考模板")
prompt = (await global_prompt_manager.get_prompt_async(template_name)).format(
bot_name=individuality.name,
memory_str=memory_str,
extra_info=self.structured_info_str,
relation_prompt=relation_prompt,
time_now=time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()),
chat_observe_info=chat_observe_info,
last_mind=previous_mind,
cycle_info_block=hfcloop_observe_info,
chat_target_name=chat_target_name,
)
content = ""
content = "(不知道该想些什么...)"
try:
content, _ = await self.llm_model.generate_response_async(prompt=prompt)
if not content:
logger.warning(f"{self.log_prefix} LLM返回空结果自我识别失败。")
logger.warning(f"{self.log_prefix} LLM返回空结果思考失败。")
except Exception as e:
# 处理总体异常
logger.error(f"{self.log_prefix} 执行LLM请求或处理响应时出错: {e}")
logger.error(traceback.format_exc())
content = "自我识别过程中出现错误"
content = "思考过程中出现错误"
if content == "None":
content = ""
# 记录初步思考结果
logger.debug(f"{self.log_prefix} 自我识别prompt: \n{prompt}\n")
logger.info(f"{self.log_prefix} 自我识别结果: {content}")
logger.debug(f"{self.log_prefix} 思考prompt: \n{prompt}\n")
logger.info(f"{self.log_prefix} 思考结果: {content}")
self.update_current_mind(content)
return content
def update_current_mind(self, response):
if self.current_mind: # 只有当 current_mind 非空时才添加到 past_mind
self.past_mind.append(self.current_mind)
self.current_mind = response
init_prompt()