feat:情绪可开关,默认关

pull/1273/head
SengokuCola 2025-09-30 17:57:54 +08:00
parent 9781b5f09c
commit 993f85bfeb
14 changed files with 1715 additions and 2235 deletions

2
.gitignore vendored
View File

@ -20,6 +20,8 @@ MaiBot-Napcat-Adapter
nonebot-maibot-adapter/
MaiMBot-LPMM
*.zip
run_bot.bat
run_na.bat
run.bat
log_debug/
run_amds.bat

21
bot.py
View File

@ -5,16 +5,29 @@ import sys
import time
import platform
import traceback
import shutil
from dotenv import load_dotenv
from pathlib import Path
from rich.traceback import install
if os.path.exists(".env"):
load_dotenv(".env", override=True)
env_path = Path(__file__).parent / ".env"
template_env_path = Path(__file__).parent / "template" / "template.env"
if env_path.exists():
load_dotenv(str(env_path), override=True)
print("成功加载环境变量配置")
else:
print("未找到.env文件请确保程序所需的环境变量被正确设置")
raise FileNotFoundError(".env 文件不存在,请创建并配置所需的环境变量")
try:
if template_env_path.exists():
shutil.copyfile(template_env_path, env_path)
print("未找到.env已从 template/template.env 自动创建")
load_dotenv(str(env_path), override=True)
else:
print("未找到.env文件也未找到模板 template/template.env")
raise FileNotFoundError(".env 文件不存在,请创建并配置所需的环境变量")
except Exception as e:
print(f"自动创建 .env 失败: {e}")
raise
# 最早期初始化日志系统,确保所有后续模块都使用正确的日志格式
from src.common.logger import initialize_logging, get_logger, shutdown_logging

View File

@ -20,6 +20,7 @@ from src.chat.message_receive.uni_message_sender import UniversalMessageSender
from src.chat.utils.timer_calculator import Timer # <--- Import Timer
from src.chat.utils.utils import get_chat_type_and_target_info
from src.chat.utils.prompt_builder import global_prompt_manager
from src.mood.mood_manager import mood_manager
from src.chat.utils.chat_message_builder import (
build_readable_messages,
get_raw_msg_before_timestamp_with_chat,
@ -283,38 +284,13 @@ class DefaultReplyer:
expression_habits_block += f"{style_habits_str}\n"
return f"{expression_habits_title}\n{expression_habits_block}", selected_ids
# async def build_memory_block(self, chat_history: List[DatabaseMessages], target: str) -> str:
# """构建记忆块
# Args:
# chat_history: 聊天历史记录
# target: 目标消息内容
# Returns:
# str: 记忆信息字符串
# """
# if not global_config.memory.enable_memory:
# return ""
# instant_memory = None
# running_memories = await self.memory_activator.activate_memory_with_chat_history(
# target_message=target, chat_history=chat_history
# )
# if not running_memories:
# return ""
# memory_str = "以下是当前在聊天中,你回忆起的记忆:\n"
# for running_memory in running_memories:
# keywords, content = running_memory
# memory_str += f"- {keywords}{content}\n"
# if instant_memory:
# memory_str += f"- {instant_memory}\n"
# return memory_str
async def build_mood_state_prompt(self) -> str:
"""构建情绪状态提示"""
if not global_config.mood.enable_mood:
return ""
mood_state = await mood_manager.get_mood_by_chat_id(self.chat_stream.stream_id).get_mood()
return f"你现在的心情是:{mood_state}"
async def build_memory_block(self) -> str:
"""构建记忆块
@ -526,50 +502,6 @@ class DefaultReplyer:
return core_dialogue_prompt, all_dialogue_prompt
def build_mai_think_context(
self,
chat_id: str,
memory_block: str,
relation_info: str,
time_block: str,
chat_target_1: str,
chat_target_2: str,
identity_block: str,
sender: str,
target: str,
chat_info: str,
) -> Any:
"""构建 mai_think 上下文信息
Args:
chat_id: 聊天ID
memory_block: 记忆块内容
relation_info: 关系信息
time_block: 时间块内容
chat_target_1: 聊天目标1
chat_target_2: 聊天目标2
identity_block: 身份块内容
sender: 发送者名称
target: 目标消息内容
chat_info: 聊天信息
Returns:
Any: mai_think 实例
"""
mai_think = mai_thinking_manager.get_mai_think(chat_id)
mai_think.memory_block = memory_block
mai_think.relation_info_block = relation_info
mai_think.time_block = time_block
mai_think.chat_target = chat_target_1
mai_think.chat_target_2 = chat_target_2
mai_think.chat_info = chat_info
mai_think.identity = identity_block
mai_think.sender = sender
mai_think.target = target
return mai_think
async def build_actions_prompt(
self, available_actions: Dict[str, ActionInfo], chosen_actions_info: Optional[List[ActionPlannerInfo]] = None
) -> str:
@ -717,6 +649,7 @@ class DefaultReplyer:
self._time_and_run_task(self.get_prompt_info(chat_talking_prompt_short, sender, target), "prompt_info"),
self._time_and_run_task(self.build_actions_prompt(available_actions, chosen_actions), "actions_info"),
self._time_and_run_task(self.build_personality_prompt(), "personality_prompt"),
self._time_and_run_task(self.build_mood_state_prompt(), "mood_state_prompt"),
)
# 任务名称中英文映射
@ -729,6 +662,7 @@ class DefaultReplyer:
"prompt_info": "获取知识",
"actions_info": "动作信息",
"personality_prompt": "人格信息",
"mood_state_prompt": "情绪状态",
}
# 处理结果
@ -759,6 +693,7 @@ class DefaultReplyer:
actions_info: str = results_dict["actions_info"]
personality_prompt: str = results_dict["personality_prompt"]
keywords_reaction_prompt = await self.build_keywords_reaction_prompt(target)
mood_state_prompt: str = results_dict["mood_state_prompt"]
if extra_info:
extra_info_block = f"以下是你在回复时需要参考的信息,现在请你阅读以下内容,进行决策\n{extra_info}\n以上是你在回复时需要参考的信息,现在请你阅读以下内容,进行决策"
@ -789,6 +724,7 @@ class DefaultReplyer:
tool_info_block=tool_info,
memory_block=memory_block,
knowledge_prompt=prompt_info,
mood_state=mood_state_prompt,
# memory_block=memory_block,
# relation_info_block=relation_info,
extra_info_block=extra_info_block,
@ -809,6 +745,7 @@ class DefaultReplyer:
tool_info_block=tool_info,
memory_block=memory_block,
knowledge_prompt=prompt_info,
mood_state=mood_state_prompt,
# memory_block=memory_block,
# relation_info_block=relation_info,
extra_info_block=extra_info_block,

View File

@ -26,6 +26,7 @@ from src.chat.utils.chat_message_builder import (
replace_user_references,
)
from src.chat.express.expression_selector import expression_selector
from src.mood.mood_manager import mood_manager
# from src.memory_system.memory_activator import MemoryActivator
@ -281,37 +282,12 @@ class PrivateReplyer:
return f"{expression_habits_title}\n{expression_habits_block}", selected_ids
# async def build_memory_block(self, chat_history: List[DatabaseMessages], target: str) -> str:
# """构建记忆块
# Args:
# chat_history: 聊天历史记录
# target: 目标消息内容
# Returns:
# str: 记忆信息字符串
# """
# if not global_config.memory.enable_memory:
# return ""
# instant_memory = None
# running_memories = await self.memory_activator.activate_memory_with_chat_history(
# target_message=target, chat_history=chat_history
# )
# if not running_memories:
# return ""
# memory_str = "以下是当前在聊天中,你回忆起的记忆:\n"
# for running_memory in running_memories:
# keywords, content = running_memory
# memory_str += f"- {keywords}{content}\n"
# if instant_memory:
# memory_str += f"- {instant_memory}\n"
# return memory_str
async def build_mood_state_prompt(self) -> str:
"""构建情绪状态提示"""
if not global_config.mood.enable_mood:
return ""
mood_state = await mood_manager.get_mood_by_chat_id(self.chat_stream.stream_id).get_mood()
return f"你现在的心情是:{mood_state}"
async def build_memory_block(self) -> str:
@ -600,6 +576,7 @@ class PrivateReplyer:
self._time_and_run_task(self.get_prompt_info(chat_talking_prompt_short, sender, target), "prompt_info"),
self._time_and_run_task(self.build_actions_prompt(available_actions, chosen_actions), "actions_info"),
self._time_and_run_task(self.build_personality_prompt(), "personality_prompt"),
self._time_and_run_task(self.build_mood_state_prompt(), "mood_state_prompt"),
)
# 任务名称中英文映射
@ -611,6 +588,7 @@ class PrivateReplyer:
"prompt_info": "获取知识",
"actions_info": "动作信息",
"personality_prompt": "人格信息",
"mood_state_prompt": "情绪状态",
}
# 处理结果
@ -639,6 +617,7 @@ class PrivateReplyer:
prompt_info: str = results_dict["prompt_info"] # 直接使用格式化后的结果
actions_info: str = results_dict["actions_info"]
personality_prompt: str = results_dict["personality_prompt"]
mood_state_prompt: str = results_dict["mood_state_prompt"]
keywords_reaction_prompt = await self.build_keywords_reaction_prompt(target)
if extra_info:
@ -660,12 +639,12 @@ class PrivateReplyer:
expression_habits_block=expression_habits_block,
tool_info_block=tool_info,
knowledge_prompt=prompt_info,
mood_state=mood_state_prompt,
memory_block=memory_block,
relation_info_block=relation_info,
extra_info_block=extra_info_block,
identity=personality_prompt,
action_descriptions=actions_info,
dialogue_prompt=dialogue_prompt,
time_block=time_block,
target=target,
@ -681,6 +660,7 @@ class PrivateReplyer:
expression_habits_block=expression_habits_block,
tool_info_block=tool_info,
knowledge_prompt=prompt_info,
mood_state=mood_state_prompt,
memory_block=memory_block,
relation_info_block=relation_info,
extra_info_block=extra_info_block,

View File

@ -22,7 +22,7 @@ def init_replyer_prompt():
{reply_target_block}
{identity}
你正在群里聊天,现在请你读读之前的聊天记录然后给出日常且口语化的回复平淡一些
你正在群里聊天,现在请你读读之前的聊天记录然后给出日常且口语化的回复平淡一些{mood_state}
尽量简短一些{keywords_reaction_prompt}请注意把握聊天内容不要回复的太有条理可以有个性
{reply_style}
请注意不要输出多余内容(包括前后缀冒号和引号括号表情等)只输出回复内容
@ -41,7 +41,7 @@ def init_replyer_prompt():
{background_dialogue_prompt}
你现在想补充说明你刚刚自己的发言内容{target}原因是{reason}
请你根据聊天内容组织一条新回复注意{target} 是刚刚你自己的发言你要在这基础上进一步发言请按照你自己的角度来继续进行回复注意保持上下文的连贯性
请你根据聊天内容组织一条新回复注意{target} 是刚刚你自己的发言你要在这基础上进一步发言请按照你自己的角度来继续进行回复注意保持上下文的连贯性{mood_state}
{identity}
尽量简短一些{keywords_reaction_prompt}请注意把握聊天内容不要回复的太有条理可以有个性
{reply_style}
@ -63,7 +63,7 @@ def init_replyer_prompt():
{reply_target_block}
{identity}
你正在和{sender_name}聊天,现在请你读读之前的聊天记录然后给出日常且口语化的回复平淡一些
你正在和{sender_name}聊天,现在请你读读之前的聊天记录然后给出日常且口语化的回复平淡一些{mood_state}
尽量简短一些{keywords_reaction_prompt}请注意把握聊天内容不要回复的太有条理可以有个性
{reply_style}
请注意不要输出多余内容(包括前后缀冒号和引号括号表情等)只输出回复内容
@ -81,7 +81,7 @@ def init_replyer_prompt():
{dialogue_prompt}
你现在想补充说明你刚刚自己的发言内容{target}原因是{reason}
请你根据聊天内容组织一条新回复注意{target} 是刚刚你自己的发言你要在这基础上进一步发言请按照你自己的角度来继续进行回复注意保持上下文的连贯性
请你根据聊天内容组织一条新回复注意{target} 是刚刚你自己的发言你要在这基础上进一步发言请按照你自己的角度来继续进行回复注意保持上下文的连贯性{mood_state}
{identity}
尽量简短一些{keywords_reaction_prompt}请注意把握聊天内容不要回复的太有条理可以有个性
{reply_style}

View File

@ -151,7 +151,7 @@ class Prompt(str):
@staticmethod
def _process_escaped_braces(template) -> str:
"""处理模板中的转义花括号,将 \{\} 替换为临时标记""" # type: ignore
"""处理模板中的转义花括号,替换为临时标记""" # type: ignore
# 如果传入的是列表,将其转换为字符串
if isinstance(template, list):
template = "\n".join(str(item) for item in template)

View File

@ -30,6 +30,7 @@ from src.config.official_configs import (
RelationshipConfig,
ToolConfig,
VoiceConfig,
MoodConfig,
MemoryConfig,
DebugConfig,
)
@ -54,7 +55,7 @@ TEMPLATE_DIR = os.path.join(PROJECT_ROOT, "template")
# 考虑到实际上配置文件中的mai_version是不会自动更新的,所以采用硬编码
# 对该字段的更新请严格参照语义化版本规范https://semver.org/lang/zh-CN/
MMC_VERSION = "0.11.0-snapshot.1"
MMC_VERSION = "0.11.0-snapshot.2"
def get_key_comment(toml_table, key):
@ -356,6 +357,7 @@ class Config(ConfigBase):
tool: ToolConfig
memory: MemoryConfig
debug: DebugConfig
mood: MoodConfig
voice: VoiceConfig

View File

@ -38,9 +38,6 @@ class PersonalityConfig(ConfigBase):
personality: str
"""人格"""
emotion_style: str
"""情感特征"""
reply_style: str = ""
"""表达风格"""
@ -278,6 +275,19 @@ class ToolConfig(ConfigBase):
"""是否在聊天中启用工具"""
@dataclass
class MoodConfig(ConfigBase):
"""情绪配置类"""
enable_mood: bool = True
"""是否启用情绪系统"""
mood_update_threshold: float = 1
"""情绪更新阈值,越高,更新越慢"""
emotion_style: str = "情绪较为稳定,但遭遇特定事件的时候起伏较大"
"""情感特征,影响情绪的变化情况"""
@dataclass
class VoiceConfig(ConfigBase):
"""语音识别配置类"""

View File

@ -14,7 +14,6 @@ from src.common.server import get_global_server, Server
from src.mood.mood_manager import mood_manager
from src.chat.knowledge import lpmm_start_up
from src.memory_system.Hippocampus import hippocampus_manager
from src.memory_system.hippocampus_to_memory_chest_task import HippocampusToMemoryChestTask
from src.memory_system.memory_management_task import MemoryManagementTask
from rich.traceback import install
from src.migrate_helper.migrate import check_and_run_migrations
@ -86,8 +85,9 @@ class MainSystem:
logger.info("表情包管理器初始化成功")
# 启动情绪管理器
await mood_manager.start()
logger.info("情绪管理器初始化成功")
if global_config.mood.enable_mood:
await mood_manager.start()
logger.info("情绪管理器初始化成功")
# 初始化聊天管理器
await get_chat_manager()._initialize()
@ -99,10 +99,6 @@ class MainSystem:
hippocampus_manager.initialize()
logger.info("记忆系统初始化成功")
# 添加海马体到记忆仓库的转换任务
await async_task_manager.add_task(HippocampusToMemoryChestTask())
logger.info("海马体到记忆仓库转换任务已启动")
# 添加记忆管理任务
await async_task_manager.add_task(MemoryManagementTask())
logger.info("记忆管理任务已启动")

View File

@ -207,343 +207,6 @@ class Hippocampus:
def get_memory_from_keyword(self, keyword: str, max_depth: int = 2) -> list:
"""从关键词获取相关记忆。
Args:
keyword (str): 关键词
max_depth (int, optional): 记忆检索深度默认为21表示只获取直接相关的记忆2表示获取间接相关的记忆
Returns:
list: 记忆列表每个元素是一个元组 (topic, memory_content, similarity)
- topic: str, 记忆主题
- memory_content: str, 该主题下的完整记忆内容
- similarity: float, 与关键词的相似度
"""
if not keyword:
return []
# 获取所有节点
all_nodes = list(self.memory_graph.G.nodes())
memories = []
# 计算关键词的词集合
keyword_words = set(jieba.cut(keyword))
# 遍历所有节点,计算相似度
for node in all_nodes:
node_words = set(jieba.cut(node))
all_words = keyword_words | node_words
v1 = [1 if word in keyword_words else 0 for word in all_words]
v2 = [1 if word in node_words else 0 for word in all_words]
similarity = cosine_similarity(v1, v2)
# 如果相似度超过阈值,获取该节点的记忆
if similarity >= 0.3: # 可以调整这个阈值
node_data = self.memory_graph.G.nodes[node]
# 直接使用完整的记忆内容
if memory_items := node_data.get("memory_items", ""):
memories.append((node, memory_items, similarity))
# 按相似度降序排序
memories.sort(key=lambda x: x[2], reverse=True)
return memories
async def get_keywords_from_text(self, text: str) -> Tuple[List[str], List]:
"""从文本中提取关键词。
Args:
text (str): 输入文本
fast_retrieval (bool, optional): 是否使用快速检索默认为False
如果为True使用jieba分词提取关键词速度更快但可能不够准确
如果为False使用LLM提取关键词速度较慢但更准确
"""
if not text:
return [], []
# 使用LLM提取关键词 - 根据详细文本长度分布优化topic_num计算
text_length = len(text)
topic_num: int | list[int] = 0
keywords_lite = cut_key_words(text)
if keywords_lite:
logger.debug(f"提取关键词极简版: {keywords_lite}")
if text_length <= 12:
topic_num = [1, 3] # 6-10字符: 1个关键词 (27.18%的文本)
elif text_length <= 20:
topic_num = [2, 4] # 11-20字符: 2个关键词 (22.76%的文本)
elif text_length <= 30:
topic_num = [3, 5] # 21-30字符: 3个关键词 (10.33%的文本)
elif text_length <= 50:
topic_num = [4, 5] # 31-50字符: 4个关键词 (9.79%的文本)
else:
topic_num = 5 # 51+字符: 5个关键词 (其余长文本)
topics_response, _ = await self.model_small.generate_response_async(self.find_topic_llm(text, topic_num))
# 提取关键词
keywords = re.findall(r"<([^>]+)>", topics_response)
if not keywords:
keywords = []
else:
keywords = [
keyword.strip()
for keyword in ",".join(keywords).replace("", ",").replace("", ",").replace(" ", ",").split(",")
if keyword.strip()
]
if keywords:
logger.debug(f"提取关键词: {keywords}")
return keywords, keywords_lite
async def get_memory_from_topic(
self,
keywords: list[str],
max_memory_num: int = 3,
max_memory_length: int = 2,
max_depth: int = 3,
) -> list:
"""从文本中提取关键词并获取相关记忆。
Args:
keywords (list): 输入文本
max_memory_num (int, optional): 返回的记忆条目数量上限默认为3表示最多返回3条与输入文本相关度最高的记忆
max_memory_length (int, optional): 每个主题最多返回的记忆条目数量默认为2表示每个主题最多返回2条相似度最高的记忆
max_depth (int, optional): 记忆检索深度默认为3值越大检索范围越广可以获取更多间接相关的记忆但速度会变慢
Returns:
list: 记忆列表每个元素是一个元组 (topic, memory_content)
- topic: str, 记忆主题
- memory_content: str, 该主题下的完整记忆内容
"""
if not keywords:
return []
logger.info(f"提取的关键词: {', '.join(keywords)}")
# 过滤掉不存在于记忆图中的关键词
valid_keywords = [keyword for keyword in keywords if keyword in self.memory_graph.G]
if not valid_keywords:
logger.debug("没有找到有效的关键词节点")
return []
logger.debug(f"有效的关键词: {', '.join(valid_keywords)}")
# 从每个关键词获取记忆
activate_map = {} # 存储每个词的累计激活值
# 对每个关键词进行扩散式检索
for keyword in valid_keywords:
logger.debug(f"开始以关键词 '{keyword}' 为中心进行扩散检索 (最大深度: {max_depth}):")
# 初始化激活值
activation_values = {keyword: 1.0}
# 记录已访问的节点
visited_nodes = {keyword}
# 待处理的节点队列,每个元素是(节点, 激活值, 当前深度)
nodes_to_process = [(keyword, 1.0, 0)]
while nodes_to_process:
current_node, current_activation, current_depth = nodes_to_process.pop(0)
# 如果激活值小于0或超过最大深度停止扩散
if current_activation <= 0 or current_depth >= max_depth:
continue
# 获取当前节点的所有邻居
neighbors = list(self.memory_graph.G.neighbors(current_node))
for neighbor in neighbors:
if neighbor in visited_nodes:
continue
# 获取连接强度
edge_data = self.memory_graph.G[current_node][neighbor]
strength = edge_data.get("strength", 1)
# 计算新的激活值
new_activation = current_activation - (1 / strength)
if new_activation > 0:
activation_values[neighbor] = new_activation
visited_nodes.add(neighbor)
nodes_to_process.append((neighbor, new_activation, current_depth + 1))
# logger.debug(
# f"节点 '{neighbor}' 被激活,激活值: {new_activation:.2f} (通过 '{current_node}' 连接,强度: {strength}, 深度: {current_depth + 1})"
# ) # noqa: E501
# 更新激活映射
for node, activation_value in activation_values.items():
if activation_value > 0:
if node in activate_map:
activate_map[node] += activation_value
else:
activate_map[node] = activation_value
# 基于激活值平方的独立概率选择
remember_map = {}
# logger.info("基于激活值平方的归一化选择:")
# 计算所有激活值的平方和
total_squared_activation = sum(activation**2 for activation in activate_map.values())
if total_squared_activation > 0:
# 计算归一化的激活值
normalized_activations = {
node: (activation**2) / total_squared_activation for node, activation in activate_map.items()
}
# 按归一化激活值排序并选择前max_memory_num个
sorted_nodes = sorted(normalized_activations.items(), key=lambda x: x[1], reverse=True)[:max_memory_num]
# 将选中的节点添加到remember_map
for node, normalized_activation in sorted_nodes:
remember_map[node] = activate_map[node] # 使用原始激活值
logger.debug(
f"节点 '{node}' (归一化激活值: {normalized_activation:.2f}, 激活值: {activate_map[node]:.2f})"
)
else:
logger.info("没有有效的激活值")
# 从选中的节点中提取记忆
all_memories = []
# logger.info("开始从选中的节点中提取记忆:")
for node, activation in remember_map.items():
logger.debug(f"处理节点 '{node}' (激活值: {activation:.2f}):")
node_data = self.memory_graph.G.nodes[node]
if memory_items := node_data.get("memory_items", ""):
logger.debug("节点包含完整记忆")
# 计算记忆与关键词的相似度
memory_words = set(jieba.cut(memory_items))
text_words = set(keywords)
if all_words := memory_words | text_words:
# 计算相似度(虽然这里没有使用,但保持逻辑一致性)
v1 = [1 if word in memory_words else 0 for word in all_words]
v2 = [1 if word in text_words else 0 for word in all_words]
_ = cosine_similarity(v1, v2) # 计算但不使用用_表示
# 添加完整记忆到结果中
all_memories.append((node, memory_items, activation))
else:
logger.info("节点没有记忆")
# 去重(基于记忆内容)
logger.debug("开始记忆去重:")
seen_memories = set()
unique_memories = []
for topic, memory_items, activation_value in all_memories:
# memory_items现在是完整的字符串格式
memory = memory_items or ""
if memory not in seen_memories:
seen_memories.add(memory)
unique_memories.append((topic, memory_items, activation_value))
logger.debug(f"保留记忆: {memory} (来自节点: {topic}, 激活值: {activation_value:.2f})")
else:
logger.debug(f"跳过重复记忆: {memory} (来自节点: {topic})")
# 转换为(关键词, 记忆)格式
result = []
for topic, memory_items, _ in unique_memories:
# memory_items现在是完整的字符串格式
memory = memory_items or ""
result.append((topic, memory))
logger.debug(f"选中记忆: {memory} (来自节点: {topic})")
return result
async def get_activate_from_text(
self, text: str, max_depth: int = 3, fast_retrieval: bool = False
) -> tuple[float, list[str], list[str]]:
"""从文本中提取关键词并获取相关记忆。
Args:
text (str): 输入文本
max_depth (int, optional): 记忆检索深度默认为2
fast_retrieval (bool, optional): 是否使用快速检索默认为False
如果为True使用jieba分词和TF-IDF提取关键词速度更快但可能不够准确
如果为False使用LLM提取关键词速度较慢但更准确
Returns:
float: 激活节点数与总节点数的比值
list[str]: 有效的关键词
"""
keywords, keywords_lite = await self.get_keywords_from_text(text)
# 过滤掉不存在于记忆图中的关键词
valid_keywords = [keyword for keyword in keywords if keyword in self.memory_graph.G]
if not valid_keywords:
# logger.info("没有找到有效的关键词节点")
return 0, keywords, keywords_lite
logger.debug(f"有效的关键词: {', '.join(valid_keywords)}")
# 从每个关键词获取记忆
activate_map = {} # 存储每个词的累计激活值
# 对每个关键词进行扩散式检索
for keyword in valid_keywords:
logger.debug(f"开始以关键词 '{keyword}' 为中心进行扩散检索 (最大深度: {max_depth}):")
# 初始化激活值
activation_values = {keyword: 1.5}
# 记录已访问的节点
visited_nodes = {keyword}
# 待处理的节点队列,每个元素是(节点, 激活值, 当前深度)
nodes_to_process = [(keyword, 1.0, 0)]
while nodes_to_process:
current_node, current_activation, current_depth = nodes_to_process.pop(0)
# 如果激活值小于0或超过最大深度停止扩散
if current_activation <= 0 or current_depth >= max_depth:
continue
# 获取当前节点的所有邻居
neighbors = list(self.memory_graph.G.neighbors(current_node))
for neighbor in neighbors:
if neighbor in visited_nodes:
continue
# 获取连接强度
edge_data = self.memory_graph.G[current_node][neighbor]
strength = edge_data.get("strength", 1)
# 计算新的激活值
new_activation = current_activation - (1 / strength)
if new_activation > 0:
activation_values[neighbor] = new_activation
visited_nodes.add(neighbor)
nodes_to_process.append((neighbor, new_activation, current_depth + 1))
# logger.debug(
# f"节点 '{neighbor}' 被激活,激活值: {new_activation:.2f} (通过 '{current_node}' 连接,强度: {strength}, 深度: {current_depth + 1})") # noqa: E501
# 更新激活映射
for node, activation_value in activation_values.items():
if activation_value > 0:
if node in activate_map:
activate_map[node] += activation_value
else:
activate_map[node] = activation_value
# 输出激活映射
# logger.info("激活映射统计:")
# for node, total_activation in sorted(activate_map.items(), key=lambda x: x[1], reverse=True):
# logger.info(f"节点 '{node}': 累计激活值 = {total_activation:.2f}")
# 计算激活节点数与总节点数的比值
total_activation = sum(activate_map.values())
# logger.debug(f"总激活值: {total_activation:.2f}")
total_nodes = len(self.memory_graph.G.nodes())
# activated_nodes = len(activate_map)
activation_ratio = total_activation / total_nodes if total_nodes > 0 else 0
activation_ratio = activation_ratio * 50
logger.debug(f"总激活值: {total_activation:.2f}, 总节点数: {total_nodes}, 激活: {activation_ratio}")
return activation_ratio, keywords, keywords_lite
# 负责海马体与其他部分的交互
class EntorhinalCortex:
def __init__(self, hippocampus: Hippocampus):
@ -905,11 +568,6 @@ class ParahippocampalGyrus:
self.memory_graph = hippocampus.memory_graph
class HippocampusManager:
def __init__(self):
self._hippocampus: Hippocampus = None # type: ignore
@ -942,41 +600,6 @@ class HippocampusManager:
raise RuntimeError("HippocampusManager 尚未初始化,请先调用 initialize 方法")
return self._hippocampus
async def get_memory_from_topic(
self, valid_keywords: list[str], max_memory_num: int = 3, max_memory_length: int = 2, max_depth: int = 3
) -> list:
"""从文本中获取相关记忆的公共接口"""
if not self._initialized:
raise RuntimeError("HippocampusManager 尚未初始化,请先调用 initialize 方法")
try:
response = await self._hippocampus.get_memory_from_topic(
valid_keywords, max_memory_num, max_memory_length, max_depth
)
except Exception as e:
logger.error(f"文本激活记忆失败: {e}")
response = []
return response
async def get_activate_from_text(
self, text: str, max_depth: int = 3, fast_retrieval: bool = False
) -> tuple[float, list[str], list[str]]:
"""从文本中获取激活值的公共接口"""
if not self._initialized:
raise RuntimeError("HippocampusManager 尚未初始化,请先调用 initialize 方法")
try:
return await self._hippocampus.get_activate_from_text(text, max_depth, fast_retrieval)
except Exception as e:
logger.error(f"文本产生激活值失败: {e}")
logger.error(traceback.format_exc())
return 0.0, [], []
def get_memory_from_keyword(self, keyword: str, max_depth: int = 2) -> list:
"""从关键词获取相关记忆的公共接口"""
if not self._initialized:
raise RuntimeError("HippocampusManager 尚未初始化,请先调用 initialize 方法")
return self._hippocampus.get_memory_from_keyword(keyword, max_depth)
def get_all_node_names(self) -> list:
"""获取所有节点名称的公共接口"""
if not self._initialized:

View File

@ -21,20 +21,6 @@ def init_prompt():
{chat_talking_prompt}
以上是群里正在进行的聊天记录
{identity_block}
你刚刚的情绪状态是{mood_state}
现在发送了消息引起了你的注意你对其进行了阅读和思考请你输出一句话描述你新的情绪状态
你的情绪特点是:{emotion_style}
请只输出新的情绪状态不要输出其他内容
""",
"change_mood_prompt",
)
Prompt(
"""
{chat_talking_prompt}
以上是群里正在进行的聊天记录
{identity_block}
你先前的情绪状态是{mood_state}
你的情绪特点是:{emotion_style}
@ -81,78 +67,6 @@ class ChatMood:
self.last_change_time: float = 0
async def update_mood_by_message(self, message: MessageRecv):
self.regression_count = 0
during_last_time = message.message_info.time - self.last_change_time # type: ignore
base_probability = 0.05
time_multiplier = 4 * (1 - math.exp(-0.01 * during_last_time))
# 基于消息长度计算基础兴趣度
message_length = len(message.processed_plain_text or "")
interest_multiplier = min(2.0, 1.0 + message_length / 100)
logger.debug(
f"base_probability: {base_probability}, time_multiplier: {time_multiplier}, interest_multiplier: {interest_multiplier}"
)
update_probability = global_config.mood.mood_update_threshold * min(
1.0, base_probability * time_multiplier * interest_multiplier
)
if random.random() > update_probability:
return
logger.debug(f"{self.log_prefix} 更新情绪状态,更新概率: {update_probability:.2f}")
message_time: float = message.message_info.time # type: ignore
message_list_before_now = get_raw_msg_by_timestamp_with_chat_inclusive(
chat_id=self.chat_id,
timestamp_start=self.last_change_time,
timestamp_end=message_time,
limit=int(global_config.chat.max_context_size / 3),
limit_mode="last",
)
chat_talking_prompt = build_readable_messages(
message_list_before_now,
replace_bot_name=True,
timestamp_mode="normal_no_YMD",
read_mark=0.0,
truncate=True,
show_actions=True,
)
bot_name = global_config.bot.nickname
if global_config.bot.alias_names:
bot_nickname = f",也有人叫你{','.join(global_config.bot.alias_names)}"
else:
bot_nickname = ""
identity_block = f"你的名字是{bot_name}{bot_nickname}"
prompt = await global_prompt_manager.format_prompt(
"change_mood_prompt",
chat_talking_prompt=chat_talking_prompt,
identity_block=identity_block,
mood_state=self.mood_state,
emotion_style=global_config.personality.emotion_style,
)
response, (reasoning_content, _, _) = await self.mood_model.generate_response_async(
prompt=prompt, temperature=0.7
)
if global_config.debug.show_prompt:
logger.info(f"{self.log_prefix} prompt: {prompt}")
logger.info(f"{self.log_prefix} response: {response}")
logger.info(f"{self.log_prefix} reasoning_content: {reasoning_content}")
logger.info(f"{self.log_prefix} 情绪状态更新为: {response}")
self.mood_state = response
self.last_change_time = message_time
async def get_mood(self) -> str:
self.regression_count = 0
@ -189,7 +103,7 @@ class ChatMood:
chat_talking_prompt=chat_talking_prompt,
identity_block=identity_block,
mood_state=self.mood_state,
emotion_style=global_config.personality.emotion_style,
emotion_style=global_config.mood.emotion_style,
)
response, (reasoning_content, _, _) = await self.mood_model.generate_response_async(
@ -240,7 +154,7 @@ class ChatMood:
chat_talking_prompt=chat_talking_prompt,
identity_block=identity_block,
mood_state=self.mood_state,
emotion_style=global_config.personality.emotion_style,
emotion_style=global_config.mood.emotion_style,
)
response, (reasoning_content, _, _) = await self.mood_model.generate_response_async(
@ -290,7 +204,6 @@ class MoodManager:
if self.task_started:
return
logger.info("启动情绪回归任务...")
task = MoodRegressionTask(self)
await async_task_manager.add_task(task)
self.task_started = True

View File

@ -1,5 +1,5 @@
[inner]
version = "6.16.0"
version = "6.17.0"
#----以下是给开发人员阅读的,如果你只是部署了麦麦,不需要阅读----
#如果你想要修改配置文件请递增version的值
@ -23,8 +23,7 @@ personality = "是一个女大学生,现在在读大二,会刷贴吧。"
#アイデンティティがない 生まれないらららら
# 描述麦麦说话的表达风格,表达习惯,如要修改,可以酌情新增内容
reply_style = "请回复的平淡一些,简短一些,说中文,不要刻意突出自身学科背景。可以参考贴吧,知乎和微博的回复风格。"
# 情感特征,影响情绪的变化情况
emotion_style = "情绪较为稳定,但遭遇特定事件的时候起伏较大"
# 麦麦的兴趣,会影响麦麦对什么话题进行回复
interest = "对技术相关话题,游戏和动漫相关话题感兴趣,也对日常话题感兴趣,不喜欢太过沉重严肃的话题"
@ -90,6 +89,12 @@ enable_relationship = true # 是否启用关系系统
[tool]
enable_tool = true # 是否启用回复工具
[mood]
enable_mood = false # 是否启用情绪系统
mood_update_threshold = 1 # 情绪更新阈值,越高,更新越慢
# 情感特征,影响情绪的变化情况
emotion_style = "情绪较为稳定,但遭遇特定事件的时候起伏较大"
[emoji]
emoji_chance = 0.4 # 麦麦激活表情包动作的概率
max_reg_num = 100 # 表情包最大注册数量
@ -172,7 +177,7 @@ file_log_level = "DEBUG" # 文件日志级别,可选: DEBUG, INFO, WARNING, ER
# 第三方库日志控制
suppress_libraries = ["faiss","httpx", "urllib3", "asyncio", "websockets", "httpcore", "requests", "peewee", "openai","uvicorn","jieba"] # 完全屏蔽的库
library_log_levels = { "aiohttp" = "WARNING"} # 设置特定库的日志级别
library_log_levels = { aiohttp = "WARNING"} # 设置特定库的日志级别
[debug]
show_prompt = false # 是否显示prompt

View File

@ -9,7 +9,7 @@ base_url = "https://api.deepseek.com/v1" # API服务商的BaseURL
api_key = "your-api-key-here" # API密钥请替换为实际的API密钥
client_type = "openai" # 请求客户端(可选,默认值为"openai"使用gimini等Google系模型时请配置为"gemini"
max_retry = 2 # 最大重试次数单个模型API调用失败最多重试的次数
timeout = 30 # API请求超时时间单位
timeout = 120 # API请求超时时间单位
retry_interval = 10 # 重试间隔时间(单位:秒)
[[api_providers]] # 阿里 百炼 API服务商配置
@ -18,7 +18,7 @@ base_url = "https://dashscope.aliyuncs.com/compatible-mode/v1"
api_key = "your-bailian-key"
client_type = "openai"
max_retry = 2
timeout = 15
timeout = 120
retry_interval = 5
[[api_providers]] # 特殊Google的Gimini使用特殊API与OpenAI格式不兼容需要配置client为"gemini"
@ -27,7 +27,7 @@ base_url = "https://generativelanguage.googleapis.com/v1beta"
api_key = "your-google-api-key-1"
client_type = "gemini"
max_retry = 2
timeout = 30
timeout = 120
retry_interval = 10
[[api_providers]] # SiliconFlow的API服务商配置
@ -36,7 +36,7 @@ base_url = "https://api.siliconflow.cn/v1"
api_key = "your-siliconflow-api-key"
client_type = "openai"
max_retry = 2
timeout = 60
timeout = 120
retry_interval = 10

3265
uv.lock

File diff suppressed because it is too large Load Diff