From 57c9dacb9954add27975bb4acdaf37d17d6f541d Mon Sep 17 00:00:00 2001
From: SengokuCola <1026294844@qq.com>
Date: Tue, 27 May 2025 10:50:47 +0800
Subject: [PATCH 01/17] =?UTF-8?q?fix=EF=BC=9A=E7=AE=80=E5=8C=96=E4=BA=86?=
=?UTF-8?q?=E8=BA=AB=E4=BB=BD=E9=85=8D=E7=BD=AE?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
changelogs/changelog.md | 6 +
src/chat/memory_system/debug_memory.py | 63 ---
.../memory_system/manually_alter_memory.py | 365 ------------------
src/chat/memory_system/offline_llm.py | 126 ------
src/chat/message_receive/message_buffer.py | 2 +-
src/config/official_configs.py | 15 -
src/individuality/identity.py | 79 +---
src/individuality/individuality.py | 35 +-
template/bot_config_template.toml | 27 +-
9 files changed, 24 insertions(+), 694 deletions(-)
delete mode 100644 src/chat/memory_system/debug_memory.py
delete mode 100644 src/chat/memory_system/manually_alter_memory.py
delete mode 100644 src/chat/memory_system/offline_llm.py
diff --git a/changelogs/changelog.md b/changelogs/changelog.md
index 00bdf2af..1702392c 100644
--- a/changelogs/changelog.md
+++ b/changelogs/changelog.md
@@ -32,6 +32,12 @@
- 示例插件:禁言插件
- 示例插件:豆包绘图插件
+**人格**
+- 简化了人格身份的配置
+
+**语音**
+- 麦麦可以决定自行发送语音消息(需要搭配tts适配器)
+
**新增表达方式学习**
- 自主学习群聊中的表达方式,更贴近群友
- 可自定义的学习频率和开关
diff --git a/src/chat/memory_system/debug_memory.py b/src/chat/memory_system/debug_memory.py
deleted file mode 100644
index b09e703a..00000000
--- a/src/chat/memory_system/debug_memory.py
+++ /dev/null
@@ -1,63 +0,0 @@
-# -*- coding: utf-8 -*-
-import asyncio
-import time
-import sys
-import os
-
-# 添加项目根目录到系统路径
-sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))))
-from src.chat.memory_system.Hippocampus import HippocampusManager
-from rich.traceback import install
-
-install(extra_lines=3)
-
-
-async def test_memory_system():
- """测试记忆系统的主要功能"""
- try:
- # 初始化记忆系统
- print("开始初始化记忆系统...")
- hippocampus_manager = HippocampusManager.get_instance()
- hippocampus_manager.initialize()
- print("记忆系统初始化完成")
-
- # 测试记忆构建
- # print("开始测试记忆构建...")
- # await hippocampus_manager.build_memory()
- # print("记忆构建完成")
-
- # 测试记忆检索
- test_text = "千石可乐在群里聊天"
-
- # test_text = '''千石可乐:分不清AI的陪伴和人类的陪伴,是这样吗?'''
- print(f"开始测试记忆检索,测试文本: {test_text}\n")
- memories = await hippocampus_manager.get_memory_from_text(
- text=test_text, max_memory_num=3, max_memory_length=2, max_depth=3, fast_retrieval=False
- )
-
- await asyncio.sleep(1)
-
- print("检索到的记忆:")
- for topic, memory_items in memories:
- print(f"主题: {topic}")
- print(f"- {memory_items}")
-
- except Exception as e:
- print(f"测试过程中出现错误: {e}")
- raise
-
-
-async def main():
- """主函数"""
- try:
- start_time = time.time()
- await test_memory_system()
- end_time = time.time()
- print(f"测试完成,总耗时: {end_time - start_time:.2f} 秒")
- except Exception as e:
- print(f"程序执行出错: {e}")
- raise
-
-
-if __name__ == "__main__":
- asyncio.run(main())
diff --git a/src/chat/memory_system/manually_alter_memory.py b/src/chat/memory_system/manually_alter_memory.py
deleted file mode 100644
index 9bbf59f5..00000000
--- a/src/chat/memory_system/manually_alter_memory.py
+++ /dev/null
@@ -1,365 +0,0 @@
-# -*- coding: utf-8 -*-
-import os
-import sys
-import time
-from pathlib import Path
-import datetime
-from rich.console import Console
-from Hippocampus import Hippocampus # 海马体和记忆图
-
-
-from dotenv import load_dotenv
-from rich.traceback import install
-
-install(extra_lines=3)
-
-
-"""
-我想 总有那么一个瞬间
-你会想和某天才变态少女助手一样
-往Bot的海马体里插上几个电极 不是吗
-
-Let's do some dirty job.
-"""
-
-# 获取当前文件的目录
-current_dir = Path(__file__).resolve().parent
-# 获取项目根目录(上三层目录)
-project_root = current_dir.parent.parent.parent
-# env.dev文件路径
-env_path = project_root / ".env.dev"
-
-# from chat.config import global_config
-root_path = os.path.abspath(os.path.join(os.path.dirname(__file__), "../../.."))
-sys.path.append(root_path)
-
-from src.common.logger import get_module_logger # noqa E402
-from common.database.database import db # noqa E402
-
-logger = get_module_logger("mem_alter")
-console = Console()
-
-# 加载环境变量
-if env_path.exists():
- logger.info(f"从 {env_path} 加载环境变量")
- load_dotenv(env_path)
-else:
- logger.warning(f"未找到环境变量文件: {env_path}")
- logger.info("将使用默认配置")
-
-
-# 查询节点信息
-def query_mem_info(hippocampus: Hippocampus):
- while True:
- query = input("\n请输入新的查询概念(输入'退出'以结束):")
- if query.lower() == "退出":
- break
-
- items_list = hippocampus.memory_graph.get_related_item(query)
- if items_list:
- have_memory = False
- first_layer, second_layer = items_list
- if first_layer:
- have_memory = True
- print("\n直接相关的记忆:")
- for item in first_layer:
- print(f"- {item}")
- if second_layer:
- have_memory = True
- print("\n间接相关的记忆:")
- for item in second_layer:
- print(f"- {item}")
- if not have_memory:
- print("\n未找到相关记忆。")
- else:
- print("未找到相关记忆。")
-
-
-# 增加概念节点
-def add_mem_node(hippocampus: Hippocampus):
- while True:
- concept = input("请输入节点概念名:\n")
- result = db.graph_data.nodes.count_documents({"concept": concept})
-
- if result != 0:
- console.print("[yellow]已存在名为“{concept}”的节点,行为已取消[/yellow]")
- continue
-
- memory_items = list()
- while True:
- context = input("请输入节点描述信息(输入'终止'以结束)")
- if context.lower() == "终止":
- break
- memory_items.append(context)
-
- current_time = datetime.datetime.now().timestamp()
- hippocampus.memory_graph.G.add_node(
- concept, memory_items=memory_items, created_time=current_time, last_modified=current_time
- )
-
-
-# 删除概念节点(及连接到它的边)
-def remove_mem_node(hippocampus: Hippocampus):
- concept = input("请输入节点概念名:\n")
- result = db.graph_data.nodes.count_documents({"concept": concept})
-
- if result == 0:
- console.print(f"[red]不存在名为“{concept}”的节点[/red]")
-
- edges = db.graph_data.edges.find({"$or": [{"source": concept}, {"target": concept}]})
-
- for edge in edges:
- console.print(f"[yellow]存在边“{edge['source']} -> {edge['target']}”, 请慎重考虑[/yellow]")
-
- console.print(f"[yellow]确定要移除名为“{concept}”的节点以及其相关边吗[/yellow]")
- destory = console.input(f"[red]请输入“{concept}”以删除节点 其他输入将被视为取消操作[/red]\n")
- if destory == concept:
- hippocampus.memory_graph.G.remove_node(concept)
- else:
- logger.info("[green]删除操作已取消[/green]")
-
-
-# 增加节点间边
-def add_mem_edge(hippocampus: Hippocampus):
- while True:
- source = input("请输入 **第一个节点** 名称(输入'退出'以结束):\n")
- if source.lower() == "退出":
- break
- if db.graph_data.nodes.count_documents({"concept": source}) == 0:
- console.print(f"[yellow]“{source}”节点不存在,操作已取消。[/yellow]")
- continue
-
- target = input("请输入 **第二个节点** 名称:\n")
- if db.graph_data.nodes.count_documents({"concept": target}) == 0:
- console.print(f"[yellow]“{target}”节点不存在,操作已取消。[/yellow]")
- continue
-
- if source == target:
- console.print(f"[yellow]试图创建“{source} <-> {target}”自环,操作已取消。[/yellow]")
- continue
-
- hippocampus.memory_graph.connect_dot(source, target)
- edge = hippocampus.memory_graph.G.get_edge_data(source, target)
- if edge["strength"] == 1:
- console.print(f"[green]成功创建边“{source} <-> {target}”,默认权重1[/green]")
- else:
- console.print(
- f"[yellow]边“{source} <-> {target}”已存在,"
- f"更新权重: {edge['strength'] - 1} <-> {edge['strength']}[/yellow]"
- )
-
-
-# 删除节点间边
-def remove_mem_edge(hippocampus: Hippocampus):
- while True:
- source = input("请输入 **第一个节点** 名称(输入'退出'以结束):\n")
- if source.lower() == "退出":
- break
- if db.graph_data.nodes.count_documents({"concept": source}) == 0:
- console.print("[yellow]“{source}”节点不存在,操作已取消。[/yellow]")
- continue
-
- target = input("请输入 **第二个节点** 名称:\n")
- if db.graph_data.nodes.count_documents({"concept": target}) == 0:
- console.print("[yellow]“{target}”节点不存在,操作已取消。[/yellow]")
- continue
-
- if source == target:
- console.print("[yellow]试图创建“{source} <-> {target}”自环,操作已取消。[/yellow]")
- continue
-
- edge = hippocampus.memory_graph.G.get_edge_data(source, target)
- if edge is None:
- console.print("[yellow]边“{source} <-> {target}”不存在,操作已取消。[/yellow]")
- continue
- else:
- accept = console.input("[orange]请输入“确认”以确认删除操作(其他输入视为取消)[/orange]\n")
- if accept.lower() == "确认":
- hippocampus.memory_graph.G.remove_edge(source, target)
- console.print(f"[green]边“{source} <-> {target}”已删除。[green]")
-
-
-# 修改节点信息
-def alter_mem_node(hippocampus: Hippocampus):
- batch_environment = dict()
- while True:
- concept = input("请输入节点概念名(输入'终止'以结束):\n")
- if concept.lower() == "终止":
- break
- _, node = hippocampus.memory_graph.get_dot(concept)
- if node is None:
- console.print(f"[yellow]“{concept}”节点不存在,操作已取消。[/yellow]")
- continue
-
- console.print("[yellow]注意,请确保你知道自己在做什么[/yellow]")
- console.print("[yellow]你将获得一个执行任意代码的环境[/yellow]")
- console.print("[red]你已经被警告过了。[/red]\n")
-
- node_environment = {"concept": "<节点名>", "memory_items": "<记忆文本数组>"}
- console.print(
- "[green]环境变量中会有env与batchEnv两个dict, env在切换节点时会清空, batchEnv在操作终止时才会清空[/green]"
- )
- console.print(
- f"[green] env 会被初始化为[/green]\n{node_environment}\n[green]且会在用户代码执行完毕后被提交 [/green]"
- )
- console.print(
- "[yellow]为便于书写临时脚本,请手动在输入代码通过Ctrl+C等方式触发KeyboardInterrupt来结束代码执行[/yellow]"
- )
-
- # 拷贝数据以防操作炸了
- node_environment = dict(node)
- node_environment["concept"] = concept
-
- while True:
-
- def user_exec(script, env, batch_env):
- return eval(script, env, batch_env)
-
- try:
- command = console.input()
- except KeyboardInterrupt:
- # 稍微防一下小天才
- try:
- if isinstance(node_environment["memory_items"], list):
- node["memory_items"] = node_environment["memory_items"]
- else:
- raise Exception
-
- except Exception as e:
- console.print(
- f"[red]我不知道你做了什么,但显然nodeEnviroment['memory_items']已经不是个数组了,"
- f"操作已取消: {str(e)}[/red]"
- )
- break
-
- try:
- user_exec(command, node_environment, batch_environment)
- except Exception as e:
- console.print(e)
- console.print(
- "[red]自定义代码执行时发生异常,已捕获,请重试(可通过 console.print(locals()) 检查环境状态)[/red]"
- )
-
-
-# 修改边信息
-def alter_mem_edge(hippocampus: Hippocampus):
- batch_enviroment = dict()
- while True:
- source = input("请输入 **第一个节点** 名称(输入'终止'以结束):\n")
- if source.lower() == "终止":
- break
- if hippocampus.memory_graph.get_dot(source) is None:
- console.print(f"[yellow]“{source}”节点不存在,操作已取消。[/yellow]")
- continue
-
- target = input("请输入 **第二个节点** 名称:\n")
- if hippocampus.memory_graph.get_dot(target) is None:
- console.print(f"[yellow]“{target}”节点不存在,操作已取消。[/yellow]")
- continue
-
- edge = hippocampus.memory_graph.G.get_edge_data(source, target)
- if edge is None:
- console.print(f"[yellow]边“{source} <-> {target}”不存在,操作已取消。[/yellow]")
- continue
-
- console.print("[yellow]注意,请确保你知道自己在做什么[/yellow]")
- console.print("[yellow]你将获得一个执行任意代码的环境[/yellow]")
- console.print("[red]你已经被警告过了。[/red]\n")
-
- edge_environment = {"source": "<节点名>", "target": "<节点名>", "strength": "<强度值,装在一个list里>"}
- console.print(
- "[green]环境变量中会有env与batchEnv两个dict, env在切换节点时会清空, batchEnv在操作终止时才会清空[/green]"
- )
- console.print(
- f"[green] env 会被初始化为[/green]\n{edge_environment}\n[green]且会在用户代码执行完毕后被提交 [/green]"
- )
- console.print(
- "[yellow]为便于书写临时脚本,请手动在输入代码通过Ctrl+C等方式触发KeyboardInterrupt来结束代码执行[/yellow]"
- )
-
- # 拷贝数据以防操作炸了
- edge_environment["strength"] = [edge["strength"]]
- edge_environment["source"] = source
- edge_environment["target"] = target
-
- while True:
-
- def user_exec(script, env, batch_env):
- return eval(script, env, batch_env)
-
- try:
- command = console.input()
- except KeyboardInterrupt:
- # 稍微防一下小天才
- try:
- if isinstance(edge_environment["strength"][0], int):
- edge["strength"] = edge_environment["strength"][0]
- else:
- raise Exception
-
- except Exception as e:
- console.print(
- f"[red]我不知道你做了什么,但显然edgeEnviroment['strength']已经不是个int了,"
- f"操作已取消: {str(e)}[/red]"
- )
- break
-
- try:
- user_exec(command, edge_environment, batch_enviroment)
- except Exception as e:
- console.print(e)
- console.print(
- "[red]自定义代码执行时发生异常,已捕获,请重试(可通过 console.print(locals()) 检查环境状态)[/red]"
- )
-
-
-async def main():
- start_time = time.time()
-
- # 创建海马体
- hippocampus = Hippocampus()
-
- # 从数据库同步数据
- hippocampus.entorhinal_cortex.sync_memory_from_db()
-
- end_time = time.time()
- logger.info(f"\033[32m[加载海马体耗时: {end_time - start_time:.2f} 秒]\033[0m")
-
- while True:
- try:
- query = int(
- input(
- """请输入操作类型
-0 -> 查询节点; 1 -> 增加节点; 2 -> 移除节点; 3 -> 增加边; 4 -> 移除边;
-5 -> 修改节点; 6 -> 修改边; 其他任意输入 -> 退出
-"""
- )
- )
- except ValueError:
- query = -1
-
- if query == 0:
- query_mem_info(hippocampus.memory_graph)
- elif query == 1:
- add_mem_node(hippocampus)
- elif query == 2:
- remove_mem_node(hippocampus)
- elif query == 3:
- add_mem_edge(hippocampus)
- elif query == 4:
- remove_mem_edge(hippocampus)
- elif query == 5:
- alter_mem_node(hippocampus)
- elif query == 6:
- alter_mem_edge(hippocampus)
- else:
- print("已结束操作")
- break
-
- hippocampus.entorhinal_cortex.sync_memory_to_db()
-
-
-if __name__ == "__main__":
- import asyncio
-
- asyncio.run(main())
diff --git a/src/chat/memory_system/offline_llm.py b/src/chat/memory_system/offline_llm.py
deleted file mode 100644
index d4862ad3..00000000
--- a/src/chat/memory_system/offline_llm.py
+++ /dev/null
@@ -1,126 +0,0 @@
-import asyncio
-import os
-import time
-from typing import Tuple, Union
-
-import aiohttp
-import requests
-from src.common.logger import get_module_logger
-from rich.traceback import install
-
-install(extra_lines=3)
-
-logger = get_module_logger("offline_llm")
-
-
-class LLMRequestOff:
- def __init__(self, model_name="deepseek-ai/DeepSeek-V3", **kwargs):
- self.model_name = model_name
- self.params = kwargs
- self.api_key = os.getenv("SILICONFLOW_KEY")
- self.base_url = os.getenv("SILICONFLOW_BASE_URL")
-
- if not self.api_key or not self.base_url:
- raise ValueError("环境变量未正确加载:SILICONFLOW_KEY 或 SILICONFLOW_BASE_URL 未设置")
-
- logger.info(f"API URL: {self.base_url}") # 使用 logger 记录 base_url
-
- def generate_response(self, prompt: str) -> Union[str, Tuple[str, str]]:
- """根据输入的提示生成模型的响应"""
- headers = {"Authorization": f"Bearer {self.api_key}", "Content-Type": "application/json"}
-
- # 构建请求体
- data = {
- "model": self.model_name,
- "messages": [{"role": "user", "content": prompt}],
- "temperature": 0.5,
- **self.params,
- }
-
- # 发送请求到完整的 chat/completions 端点
- api_url = f"{self.base_url.rstrip('/')}/chat/completions"
- logger.info(f"Request URL: {api_url}") # 记录请求的 URL
-
- max_retries = 3
- base_wait_time = 15 # 基础等待时间(秒)
-
- for retry in range(max_retries):
- try:
- response = requests.post(api_url, headers=headers, json=data)
-
- if response.status_code == 429:
- wait_time = base_wait_time * (2**retry) # 指数退避
- logger.warning(f"遇到请求限制(429),等待{wait_time}秒后重试...")
- time.sleep(wait_time)
- continue
-
- response.raise_for_status() # 检查其他响应状态
-
- result = response.json()
- if "choices" in result and len(result["choices"]) > 0:
- content = result["choices"][0]["message"]["content"]
- reasoning_content = result["choices"][0]["message"].get("reasoning_content", "")
- return content, reasoning_content
- return "没有返回结果", ""
-
- except Exception as e:
- if retry < max_retries - 1: # 如果还有重试机会
- wait_time = base_wait_time * (2**retry)
- logger.error(f"[回复]请求失败,等待{wait_time}秒后重试... 错误: {str(e)}")
- time.sleep(wait_time)
- else:
- logger.error(f"请求失败: {str(e)}")
- return f"请求失败: {str(e)}", ""
-
- logger.error("达到最大重试次数,请求仍然失败")
- return "达到最大重试次数,请求仍然失败", ""
-
- async def generate_response_async(self, prompt: str) -> Union[str, Tuple[str, str]]:
- """异步方式根据输入的提示生成模型的响应"""
- headers = {"Authorization": f"Bearer {self.api_key}", "Content-Type": "application/json"}
-
- # 构建请求体
- data = {
- "model": self.model_name,
- "messages": [{"role": "user", "content": prompt}],
- "temperature": 0.5,
- **self.params,
- }
-
- # 发送请求到完整的 chat/completions 端点
- api_url = f"{self.base_url.rstrip('/')}/chat/completions"
- logger.info(f"Request URL: {api_url}") # 记录请求的 URL
-
- max_retries = 3
- base_wait_time = 15
-
- async with aiohttp.ClientSession() as session:
- for retry in range(max_retries):
- try:
- async with session.post(api_url, headers=headers, json=data) as response:
- if response.status == 429:
- wait_time = base_wait_time * (2**retry) # 指数退避
- logger.warning(f"遇到请求限制(429),等待{wait_time}秒后重试...")
- await asyncio.sleep(wait_time)
- continue
-
- response.raise_for_status() # 检查其他响应状态
-
- result = await response.json()
- if "choices" in result and len(result["choices"]) > 0:
- content = result["choices"][0]["message"]["content"]
- reasoning_content = result["choices"][0]["message"].get("reasoning_content", "")
- return content, reasoning_content
- return "没有返回结果", ""
-
- except Exception as e:
- if retry < max_retries - 1: # 如果还有重试机会
- wait_time = base_wait_time * (2**retry)
- logger.error(f"[回复]请求失败,等待{wait_time}秒后重试... 错误: {str(e)}")
- await asyncio.sleep(wait_time)
- else:
- logger.error(f"请求失败: {str(e)}")
- return f"请求失败: {str(e)}", ""
-
- logger.error("达到最大重试次数,请求仍然失败")
- return "达到最大重试次数,请求仍然失败", ""
diff --git a/src/chat/message_receive/message_buffer.py b/src/chat/message_receive/message_buffer.py
index 2df256ce..f513b22a 100644
--- a/src/chat/message_receive/message_buffer.py
+++ b/src/chat/message_receive/message_buffer.py
@@ -1,4 +1,4 @@
-from ..person_info.person_info import person_info_manager
+from src.person_info.person_info import person_info_manager
from src.common.logger_manager import get_logger
import asyncio
from dataclasses import dataclass, field
diff --git a/src/config/official_configs.py b/src/config/official_configs.py
index a5b7805a..519e4430 100644
--- a/src/config/official_configs.py
+++ b/src/config/official_configs.py
@@ -41,21 +41,6 @@ class PersonalityConfig(ConfigBase):
class IdentityConfig(ConfigBase):
"""个体特征配置类"""
- height: int = 170
- """身高(单位:厘米)"""
-
- weight: float = 50
- """体重(单位:千克)"""
-
- age: int = 18
- """年龄(单位:岁)"""
-
- gender: str = "女"
- """性别(男/女)"""
-
- appearance: str = "可爱"
- """外貌描述"""
-
identity_detail: list[str] = field(default_factory=lambda: [])
"""身份特征"""
diff --git a/src/individuality/identity.py b/src/individuality/identity.py
index f79da547..bb312598 100644
--- a/src/individuality/identity.py
+++ b/src/individuality/identity.py
@@ -7,99 +7,24 @@ class Identity:
"""身份特征类"""
identity_detail: List[str] # 身份细节描述
- height: int # 身高(厘米)
- weight: float # 体重(千克)
- age: int # 年龄
- gender: str # 性别
- appearance: str # 外貌特征
- _instance = None
-
- def __new__(cls, *args, **kwargs):
- if cls._instance is None:
- cls._instance = super().__new__(cls)
- return cls._instance
-
- def __init__(
- self,
- identity_detail: List[str] = None,
- height: int = 0,
- weight: float = 0,
- age: int = 0,
- gender: str = "",
- appearance: str = "",
- ):
+ def __init__(self, identity_detail: List[str] = None):
"""初始化身份特征
Args:
identity_detail: 身份细节描述列表
- height: 身高(厘米)
- weight: 体重(千克)
- age: 年龄
- gender: 性别
- appearance: 外貌特征
"""
if identity_detail is None:
identity_detail = []
self.identity_detail = identity_detail
- self.height = height
- self.weight = weight
- self.age = age
- self.gender = gender
- self.appearance = appearance
-
- @classmethod
- def get_instance(cls) -> "Identity":
- """获取Identity单例实例
-
- Returns:
- Identity: 单例实例
- """
- if cls._instance is None:
- cls._instance = cls()
- return cls._instance
-
- @classmethod
- def initialize(
- cls, identity_detail: List[str], height: int, weight: float, age: int, gender: str, appearance: str
- ) -> "Identity":
- """初始化身份特征
-
- Args:
- identity_detail: 身份细节描述列表
- height: 身高(厘米)
- weight: 体重(千克)
- age: 年龄
- gender: 性别
- appearance: 外貌特征
-
- Returns:
- Identity: 初始化后的身份特征实例
- """
- instance = cls.get_instance()
- instance.identity_detail = identity_detail
- instance.height = height
- instance.weight = weight
- instance.age = age
- instance.gender = gender
- instance.appearance = appearance
- return instance
def to_dict(self) -> dict:
"""将身份特征转换为字典格式"""
return {
"identity_detail": self.identity_detail,
- "height": self.height,
- "weight": self.weight,
- "age": self.age,
- "gender": self.gender,
- "appearance": self.appearance,
}
@classmethod
def from_dict(cls, data: dict) -> "Identity":
"""从字典创建身份特征实例"""
- instance = cls.get_instance()
- for key, value in data.items():
- setattr(instance, key, value)
- return instance
+ return cls(identity_detail=data.get("identity_detail", []))
diff --git a/src/individuality/individuality.py b/src/individuality/individuality.py
index ba462c5e..d6682fd0 100644
--- a/src/individuality/individuality.py
+++ b/src/individuality/individuality.py
@@ -1,6 +1,4 @@
from typing import Optional
-
-from numpy import double
from .personality import Personality
from .identity import Identity
from .expression_style import PersonalityExpression
@@ -27,11 +25,6 @@ class Individuality:
personality_core: str,
personality_sides: list,
identity_detail: list,
- height: int,
- weight: double,
- age: int,
- gender: str,
- appearance: str,
) -> None:
"""初始化个体特征
@@ -40,11 +33,6 @@ class Individuality:
personality_core: 人格核心特点
personality_sides: 人格侧面描述
identity_detail: 身份细节描述
- height: 身高(厘米)
- weight: 体重(千克)
- age: 年龄
- gender: 性别
- appearance: 外貌特征
"""
# 初始化人格
self.personality = Personality.initialize(
@@ -52,9 +40,7 @@ class Individuality:
)
# 初始化身份
- self.identity = Identity.initialize(
- identity_detail=identity_detail, height=height, weight=weight, age=age, gender=gender, appearance=appearance
- )
+ self.identity = Identity(identity_detail=identity_detail)
await self.express_style.extract_and_store_personality_expressions()
@@ -120,7 +106,7 @@ class Individuality:
获取身份特征的prompt
Args:
- level (int): 详细程度 (1: 随机细节, 2: 所有细节+外貌年龄性别, 3: 同2)
+ level (int): 详细程度 (1: 随机细节, 2: 所有细节, 3: 同2)
x_person (int, optional): 人称代词 (0: 无人称, 1: 我, 2: 你). 默认为 2.
Returns:
@@ -145,23 +131,10 @@ class Individuality:
identity_detail = list(self.identity.identity_detail)
random.shuffle(identity_detail)
if level == 1:
- identity_parts.append(f"身份是{identity_detail[0]}")
+ identity_parts.append(f"{identity_detail[0]}")
elif level >= 2:
details_str = "、".join(identity_detail)
- identity_parts.append(f"身份是{details_str}")
-
- # 根据level添加其他身份信息
- if level >= 3:
- if self.identity.appearance:
- identity_parts.append(f"{self.identity.appearance}")
- if self.identity.age > 0:
- identity_parts.append(f"年龄大约{self.identity.age}岁")
- if self.identity.gender:
- identity_parts.append(f"性别是{self.identity.gender}")
- if self.identity.height:
- identity_parts.append(f"身高大约{self.identity.height}厘米")
- if self.identity.weight:
- identity_parts.append(f"体重大约{self.identity.weight}千克")
+ identity_parts.append(f"{details_str}")
if identity_parts:
details_str = ",".join(identity_parts)
diff --git a/template/bot_config_template.toml b/template/bot_config_template.toml
index ef6bbfa5..a9f53a74 100644
--- a/template/bot_config_template.toml
+++ b/template/bot_config_template.toml
@@ -1,5 +1,5 @@
[inner]
-version = "2.4.0"
+version = "2.5.0"
#----以下是给开发人员阅读的,如果你只是部署了麦麦,不需要阅读----
#如果你想要修改配置文件,请在修改后将version的值进行变更
@@ -25,21 +25,18 @@ personality_sides = [
"用一句话或几句话描述人格的一些细节",
"用一句话或几句话描述人格的一些细节",
"用一句话或几句话描述人格的一些细节",
-]# 条数任意,不能为0, 该选项还在调试中,可能未完全生效
-
-# 身份特点 部分选项仅在 专注聊天 有效
-[identity] #アイデンティティがない 生まれないらららら
-identity_detail = [
- "身份特点",
- "身份特点",
]# 条数任意,不能为0
-#外貌特征
-age = 18 # 年龄 单位岁
-gender = "女" # 性别
-height = "170" # 身高(单位cm)
-weight = "50" # 体重(单位kg)
-appearance = "用一句或几句话描述外貌特征" # 外貌特征
+# 身份特点
+[identity] #アイデンティティがない 生まれないらららら
+identity_detail = [
+ "年龄为19岁",
+ "是女孩子",
+ "身高为160cm",
+ "有橙色的短发",
+]
+# 可以描述外贸,性别,身高,职业,属性等等描述
+# 条数任意,不能为0
[chat] #麦麦的聊天通用设置
chat_mode = "normal" # 聊天模式 —— 普通模式:normal,专注模式:focus,在普通模式和专注模式之间自动切换
@@ -96,8 +93,6 @@ self_identify_processor = true # 是否启用自我识别处理器
tool_use_processor = true # 是否启用工具使用处理器
working_memory_processor = true # 是否启用工作记忆处理器
-
-
[expression]
# 表达方式
expression_style = "描述麦麦说话的表达风格,表达习惯"
From 890a7b650543ec319ff8f2e7445832479e036572 Mon Sep 17 00:00:00 2001
From: SengokuCola <1026294844@qq.com>
Date: Tue, 27 May 2025 12:41:23 +0800
Subject: [PATCH 02/17] =?UTF-8?q?feat=EF=BC=9A=E4=BF=AE=E5=A4=8D=E4=BA=86a?=
=?UTF-8?q?ction=E5=8F=98=E6=9B=B4=EF=BC=8C=E4=BF=AE=E6=94=B9=E4=BA=86?=
=?UTF-8?q?=E9=BB=98=E8=AE=A4=E9=85=8D=E7=BD=AE=EF=BC=8C=E6=8F=90=E5=8D=87?=
=?UTF-8?q?=E7=89=88=E6=9C=AC=E5=8F=B7?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
changelogs/changelog.md | 40 ++++++++--------
src/chat/emoji_system/emoji_manager.py | 11 +++--
.../expressors/exprssion_learner.py | 2 +-
.../info_processors/action_processor.py | 47 ++++++++++++-------
.../info_processors/self_processor.py | 30 +++++++++++-
src/chat/focus_chat/planners/planner.py | 33 +++++++++++--
.../observation/hfcloop_observation.py | 6 ---
src/chat/normal_chat/normal_chat_generator.py | 2 +-
.../normal_prompt.py} | 34 +++++++-------
src/chat/utils/utils_image.py | 6 +--
src/common/logger.py | 4 +-
src/config/config.py | 2 +-
src/config/official_configs.py | 3 ++
src/individuality/expression_style.py | 2 +-
src/llm_models/utils_model.py | 3 +-
src/main.py | 5 --
src/person_info/relationship_manager.py | 19 ++++++--
.../test_plugin_pic/actions/pic_action.py | 2 +-
template/bot_config_template.toml | 2 +-
19 files changed, 163 insertions(+), 90 deletions(-)
rename src/chat/{focus_chat/heartflow_prompt_builder.py => normal_chat/normal_prompt.py} (94%)
diff --git a/changelogs/changelog.md b/changelogs/changelog.md
index 1702392c..17825e38 100644
--- a/changelogs/changelog.md
+++ b/changelogs/changelog.md
@@ -2,7 +2,8 @@
## [0.7.0] -2025-6-1
- 重构数据库,弃用MongoDB,采用轻量sqlite,无需额外安装
-- 重构HFC,可扩展的聊天模式
+- 重构HFC,可扩展的聊天模式,支持独立的表达模式
+- HFC,丰富HFC的决策信息,更好的把握聊天内容
- HFC初步支持插件v0.1(测试版)
- 重构表情包模块
- 移除日程系统
@@ -26,6 +27,20 @@
- 插件:禁言动作
- 表达器:装饰语言风格
- 可通过插件添加和自定义HFC部件(目前只支持action定义)
+- 为专注模式添加关系线索
+- 在专注模式下,麦麦可以决定自行发送语音消息(需要搭配tts适配器)
+- 优化reply,减少复读
+
+**新增表达方式学习**
+- 在专注模式下,麦麦可以有独特的表达方式
+- 自主学习群聊中的表达方式,更贴近群友
+- 可自定义的学习频率和开关
+- 根据人设生成额外的表达方式
+
+**聊天管理**
+- 移除不在线状态
+- 大幅精简聊天状态切换规则,减少复杂度
+- 移除聊天限额数量
**插件系统**
- 添加示例插件
@@ -35,27 +50,14 @@
**人格**
- 简化了人格身份的配置
-**语音**
-- 麦麦可以决定自行发送语音消息(需要搭配tts适配器)
-
-**新增表达方式学习**
-- 自主学习群聊中的表达方式,更贴近群友
-- 可自定义的学习频率和开关
-- 根据人设生成额外的表达方式
-
-**聊天管理**
- - 移除不在线状态
- - 大幅精简聊天状态切换规则,减少复杂度
- - 移除聊天限额数量
-
**数据库重构**
- - 移除了默认使用MongoDB,采用轻量sqlite
- - 无需额外安装数据库
- - 提供迁移脚本
+- 移除了默认使用MongoDB,采用轻量sqlite
+- 无需额外安装数据库
+- 提供迁移脚本
**优化**
- - 移除日程系统,减少幻觉(将会在未来版本回归)
- - 移除主心流思考和LLM进入聊天判定
+- 移除日程系统,减少幻觉(将会在未来版本回归)
+- 移除主心流思考和LLM进入聊天判定
## [0.6.3-fix-4] - 2025-5-18
diff --git a/src/chat/emoji_system/emoji_manager.py b/src/chat/emoji_system/emoji_manager.py
index 51275c9b..6d8d4fbc 100644
--- a/src/chat/emoji_system/emoji_manager.py
+++ b/src/chat/emoji_system/emoji_manager.py
@@ -149,7 +149,7 @@ class MaiEmoji:
emotion_str = ",".join(self.emotion) if self.emotion else ""
Emoji.create(
- hash=self.hash,
+ emoji_hash=self.hash,
full_path=self.full_path,
format=self.format,
description=self.description,
@@ -367,7 +367,9 @@ class EmojiManager:
return cls._instance
def __init__(self) -> None:
- self._initialized = None
+ if self._initialized:
+ return # 如果已经初始化过,直接返回
+
self._scan_task = None
self.vlm = LLMRequest(model=global_config.model.vlm, temperature=0.3, max_tokens=1000, request_type="emoji")
@@ -389,6 +391,7 @@ class EmojiManager:
raise RuntimeError("数据库连接失败")
_ensure_emoji_dir()
Emoji.create_table(safe=True) # Ensures table exists
+ self._initialized = True
def _ensure_db(self) -> None:
"""确保数据库已初始化"""
@@ -467,7 +470,7 @@ class EmojiManager:
selected_emoji, similarity, matched_emotion = random.choice(top_emojis)
# 更新使用次数
- self.record_usage(selected_emoji.emoji_hash)
+ self.record_usage(selected_emoji.hash)
_time_end = time.time()
@@ -796,7 +799,7 @@ class EmojiManager:
# 删除选定的表情包
logger.info(f"[决策] 删除表情包: {emoji_to_delete.description}")
- delete_success = await self.delete_emoji(emoji_to_delete.emoji_hash)
+ delete_success = await self.delete_emoji(emoji_to_delete.hash)
if delete_success:
# 修复:等待异步注册完成
diff --git a/src/chat/focus_chat/expressors/exprssion_learner.py b/src/chat/focus_chat/expressors/exprssion_learner.py
index 31cb5d13..2ad0e68c 100644
--- a/src/chat/focus_chat/expressors/exprssion_learner.py
+++ b/src/chat/focus_chat/expressors/exprssion_learner.py
@@ -5,7 +5,7 @@ from src.common.logger_manager import get_logger
from src.llm_models.utils_model import LLMRequest
from src.config.config import global_config
from src.chat.utils.chat_message_builder import get_raw_msg_by_timestamp_random, build_anonymous_messages
-from src.chat.focus_chat.heartflow_prompt_builder import Prompt, global_prompt_manager
+from src.chat.utils.prompt_builder import Prompt, global_prompt_manager
import os
import json
diff --git a/src/chat/focus_chat/info_processors/action_processor.py b/src/chat/focus_chat/info_processors/action_processor.py
index 3ef38914..6c78dd78 100644
--- a/src/chat/focus_chat/info_processors/action_processor.py
+++ b/src/chat/focus_chat/info_processors/action_processor.py
@@ -56,45 +56,57 @@ class ActionProcessor(BaseProcessor):
all_actions = None
hfc_obs = None
chat_obs = None
+
+ # 收集所有观察对象
for obs in observations:
if isinstance(obs, HFCloopObservation):
hfc_obs = obs
if isinstance(obs, ChattingObservation):
chat_obs = obs
+
+ # 合并所有动作变更
+ merged_action_changes = {"add": [], "remove": []}
+ reasons = []
+
+ # 处理HFCloopObservation
if hfc_obs:
obs = hfc_obs
- # 创建动作信息
all_actions = obs.all_actions
action_changes = await self.analyze_loop_actions(obs)
if action_changes["add"] or action_changes["remove"]:
- action_info.set_action_changes(action_changes)
- # 设置变更原因
- reasons = []
+ # 合并动作变更
+ merged_action_changes["add"].extend(action_changes["add"])
+ merged_action_changes["remove"].extend(action_changes["remove"])
+
+ # 收集变更原因
if action_changes["add"]:
reasons.append(f"添加动作{action_changes['add']}因为检测到大量无回复")
if action_changes["remove"]:
reasons.append(f"移除动作{action_changes['remove']}因为检测到连续回复")
- action_info.set_reason(" | ".join(reasons))
+
+ # 处理ChattingObservation
if chat_obs and all_actions is not None:
obs = chat_obs
- action_changes = {"add": [], "remove": []}
# 检查动作的关联类型
chat_context = chat_manager.get_stream(obs.chat_id).context
+ type_mismatched_actions = []
+
for action_name in all_actions.keys():
data = all_actions[action_name]
if data.get("associated_types"):
if not chat_context.check_types(data["associated_types"]):
- action_changes["remove"].append(action_name)
+ type_mismatched_actions.append(action_name)
logger.debug(f"{self.log_prefix} 动作 {action_name} 关联类型不匹配,移除该动作")
- if len(action_changes["remove"]) > 0:
- action_info.set_action_changes(action_changes)
- # 设置变更原因
- reasons = []
- if action_info.get_reason():
- reasons.append(action_info.get_reason())
- if action_changes["remove"]:
- reasons.append(f"移除动作{action_changes['remove']}因为关联类型不匹配")
- action_info.set_reason(" | ".join(reasons))
+
+ if type_mismatched_actions:
+ # 合并到移除列表中
+ merged_action_changes["remove"].extend(type_mismatched_actions)
+ reasons.append(f"移除动作{type_mismatched_actions}因为关联类型不匹配")
+
+ # 如果有任何动作变更,设置到action_info中
+ if merged_action_changes["add"] or merged_action_changes["remove"]:
+ action_info.set_action_changes(merged_action_changes)
+ action_info.set_reason(" | ".join(reasons))
processed_infos.append(action_info)
@@ -129,7 +141,8 @@ class ActionProcessor(BaseProcessor):
# 检查no_reply比例
if len(recent_cycles) >= 5 and (no_reply_count / len(recent_cycles)) >= 0.8:
- result["add"].append("exit_focus_chat")
+ if global_config.chat.chat_mode == "auto":
+ result["add"].append("exit_focus_chat")
# 获取最近三次的reply状态
last_three = reply_sequence[-3:] if len(reply_sequence) >= 3 else reply_sequence
diff --git a/src/chat/focus_chat/info_processors/self_processor.py b/src/chat/focus_chat/info_processors/self_processor.py
index 692c5207..9d0643d6 100644
--- a/src/chat/focus_chat/info_processors/self_processor.py
+++ b/src/chat/focus_chat/info_processors/self_processor.py
@@ -15,6 +15,7 @@ from src.chat.heart_flow.observation.hfcloop_observation import HFCloopObservati
from typing import Dict
from src.chat.focus_chat.info.info_base import InfoBase
from src.chat.focus_chat.info.self_info import SelfInfo
+from src.chat.utils.utils import get_recent_group_speaker
logger = get_logger("processor")
@@ -22,7 +23,7 @@ logger = get_logger("processor")
def init_prompt():
indentify_prompt = """
{name_block}
-你是一个AI,但是你伪装成了一个人类,你的人格是,{prompt_personality}。
+你是一个AI,但是你伪装成了一个人类,你的人格是,{prompt_personality}
{indentify_block}
{relation_prompt}
@@ -100,12 +101,27 @@ class SelfProcessor(BaseProcessor):
如果return_prompt为True:
tuple: (current_mind, past_mind, prompt) 当前想法、过去的想法列表和使用的prompt
"""
+
+
+ for observation in observations:
+ if isinstance(observation, ChattingObservation):
+ is_group_chat = observation.is_group_chat
+ chat_target_info = observation.chat_target_info
+ chat_target_name = "对方" # 私聊默认名称
+ person_list = observation.person_list
memory_str = ""
if running_memorys:
memory_str = "以下是当前在聊天中,你回忆起的记忆:\n"
for running_memory in running_memorys:
memory_str += f"{running_memory['topic']}: {running_memory['content']}\n"
+
+
+ relation_prompt = ""
+ for person in person_list:
+ if len(person) >= 3 and person[0] and person[1]:
+ relation_prompt += await relationship_manager.build_relationship_info(person,is_id=True)
+
if observations is None:
observations = []
@@ -135,9 +151,17 @@ class SelfProcessor(BaseProcessor):
personality_block = individuality.get_personality_prompt(x_person=2, level=2)
identity_block = individuality.get_identity_prompt(x_person=2, level=2)
- relation_prompt = ""
+ if is_group_chat:
+ relation_prompt_init = "在这个群聊中,你:\n"
+ else:
+ relation_prompt_init = ""
for person in person_list:
relation_prompt += await relationship_manager.build_relationship_info(person, is_id=True)
+ if relation_prompt:
+ relation_prompt = relation_prompt_init + relation_prompt
+ else:
+ relation_prompt = relation_prompt_init + "没有特别在意的人\n"
+
prompt = (await global_prompt_manager.get_prompt_async("indentify_prompt")).format(
name_block=name_block,
@@ -148,6 +172,8 @@ class SelfProcessor(BaseProcessor):
time_now=time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()),
chat_observe_info=chat_observe_info,
)
+
+ # print(prompt)
content = ""
try:
diff --git a/src/chat/focus_chat/planners/planner.py b/src/chat/focus_chat/planners/planner.py
index 5581d06f..267522c6 100644
--- a/src/chat/focus_chat/planners/planner.py
+++ b/src/chat/focus_chat/planners/planner.py
@@ -36,6 +36,8 @@ def init_prompt():
{mind_info_block}
{cycle_info_block}
+{action_available_block}
+
请综合分析聊天内容和你看到的新消息,参考聊天规划,选择合适的action:
{action_options_text}
@@ -43,6 +45,8 @@ def init_prompt():
你必须从上面列出的可用action中选择一个,并说明原因。
你的决策必须以严格的 JSON 格式输出,且仅包含 JSON 内容,不要有任何其他文字或解释。
+{moderation_prompt}
+
请你以下面格式输出你选择的action:
{{
"action": "action_name",
@@ -104,6 +108,7 @@ class ActionPlanner:
add_actions = info.get_add_actions()
remove_actions = info.get_remove_actions()
reason = info.get_reason()
+ print(f"{self.log_prefix} 动作变更: {add_actions} {remove_actions} {reason}")
# 处理动作的增加
for action_name in add_actions:
@@ -120,6 +125,14 @@ class ActionPlanner:
if action in remove_actions:
action = "no_reply"
reasoning = f"之前选择的动作{action}已被移除,原因: {reason}"
+
+ using_actions = self.action_manager.get_using_actions()
+ action_available_block = ""
+ for action_name, action_info in using_actions.items():
+ action_description = action_info["description"]
+ action_available_block += f"\n你在聊天中可以使用{action_name},这个动作的描述是{action_description}\n"
+ action_available_block += "注意,除了上述动作选项之外,你在群聊里不能做其他任何事情,这是你能力的边界\n"
+
# 继续处理其他信息
for info in all_plan_info:
@@ -142,11 +155,11 @@ class ActionPlanner:
# 获取当前可用的动作
current_available_actions = self.action_manager.get_using_actions()
- # 如果没有可用动作,直接返回no_reply
- if not current_available_actions:
- logger.warning(f"{self.log_prefix}没有可用的动作,将使用no_reply")
+ # 如果没有可用动作或只有no_reply动作,直接返回no_reply
+ if not current_available_actions or (len(current_available_actions) == 1 and "no_reply" in current_available_actions):
action = "no_reply"
- reasoning = "没有可用的动作"
+ reasoning = "没有可用的动作" if not current_available_actions else "只有no_reply动作可用,跳过规划"
+ logger.info(f"{self.log_prefix}{reasoning}")
return {
"action_result": {"action_type": action, "action_data": action_data, "reasoning": reasoning},
"current_mind": current_mind,
@@ -164,6 +177,7 @@ class ActionPlanner:
current_available_actions=current_available_actions, # <-- Pass determined actions
cycle_info=cycle_info, # <-- Pass cycle info
extra_info=extra_info,
+ action_available_block=action_available_block,
)
# --- 调用 LLM (普通文本生成) ---
@@ -249,6 +263,7 @@ class ActionPlanner:
chat_target_info: Optional[dict], # Now passed as argument
observed_messages_str: str,
current_mind: Optional[str],
+ action_available_block: str,
current_available_actions: Dict[str, ActionInfo],
cycle_info: Optional[str],
extra_info: list[str],
@@ -306,7 +321,13 @@ class ActionPlanner:
action_options_block += using_action_prompt
extra_info_block = "\n".join(extra_info)
- extra_info_block = f"以下是一些额外的信息,现在请你阅读以下内容,进行决策\n{extra_info_block}\n以上是一些额外的信息,现在请你阅读以下内容,进行决策"
+ if extra_info:
+ extra_info_block = f"以下是一些额外的信息,现在请你阅读以下内容,进行决策\n{extra_info_block}\n以上是一些额外的信息,现在请你阅读以下内容,进行决策"
+ else:
+ extra_info_block = ""
+
+
+ moderation_prompt_block = "请不要输出违法违规内容,不要输出色情,暴力,政治相关内容,如有敏感内容,请规避。"
planner_prompt_template = await global_prompt_manager.get_prompt_async("planner_prompt")
prompt = planner_prompt_template.format(
@@ -318,7 +339,9 @@ class ActionPlanner:
mind_info_block=mind_info_block,
cycle_info_block=cycle_info,
action_options_text=action_options_block,
+ action_available_block=action_available_block,
extra_info_block=extra_info_block,
+ moderation_prompt=moderation_prompt_block,
)
return prompt
diff --git a/src/chat/heart_flow/observation/hfcloop_observation.py b/src/chat/heart_flow/observation/hfcloop_observation.py
index bd8f3f34..171aaeb7 100644
--- a/src/chat/heart_flow/observation/hfcloop_observation.py
+++ b/src/chat/heart_flow/observation/hfcloop_observation.py
@@ -84,10 +84,4 @@ class HFCloopObservation:
else:
cycle_info_block += "\n你还没看过消息\n"
- using_actions = self.action_manager.get_using_actions()
- for action_name, action_info in using_actions.items():
- action_description = action_info["description"]
- cycle_info_block += f"\n你在聊天中可以使用{action_name},这个动作的描述是{action_description}\n"
- cycle_info_block += "注意,除了上述动作选项之外,你在群聊里不能做其他任何事情,这是你能力的边界\n"
-
self.observe_info = cycle_info_block
diff --git a/src/chat/normal_chat/normal_chat_generator.py b/src/chat/normal_chat/normal_chat_generator.py
index efa1ec54..04be8b3a 100644
--- a/src/chat/normal_chat/normal_chat_generator.py
+++ b/src/chat/normal_chat/normal_chat_generator.py
@@ -3,7 +3,7 @@ import random
from src.llm_models.utils_model import LLMRequest
from src.config.config import global_config
from src.chat.message_receive.message import MessageThinking
-from src.chat.focus_chat.heartflow_prompt_builder import prompt_builder
+from src.chat.normal_chat.normal_prompt import prompt_builder
from src.chat.utils.utils import process_llm_response
from src.chat.utils.timer_calculator import Timer
from src.common.logger_manager import get_logger
diff --git a/src/chat/focus_chat/heartflow_prompt_builder.py b/src/chat/normal_chat/normal_prompt.py
similarity index 94%
rename from src/chat/focus_chat/heartflow_prompt_builder.py
rename to src/chat/normal_chat/normal_prompt.py
index e0be2d80..8308ab20 100644
--- a/src/chat/focus_chat/heartflow_prompt_builder.py
+++ b/src/chat/normal_chat/normal_prompt.py
@@ -17,14 +17,14 @@ logger = get_logger("prompt")
def init_prompt():
- Prompt(
- """
-你有以下信息可供参考:
-{structured_info}
-以上的消息是你获取到的消息,或许可以帮助你更好地回复。
-""",
- "info_from_tools",
- )
+# Prompt(
+# """
+# 你有以下信息可供参考:
+# {structured_info}
+# 以上的消息是你获取到的消息,或许可以帮助你更好地回复。
+# """,
+# "info_from_tools",
+# )
Prompt("你正在qq群里聊天,下面是群里在聊的内容:", "chat_target_group1")
Prompt("你正在和{sender_name}聊天,这是你们之前聊的内容:", "chat_target_private1")
@@ -94,9 +94,9 @@ class PromptBuilder:
in_mind_reply=None,
target_message=None,
) -> Optional[str]:
- if build_mode == "normal":
- return await self._build_prompt_normal(chat_stream, message_txt or "", sender_name)
- return None
+
+ return await self._build_prompt_normal(chat_stream, message_txt or "", sender_name)
+
async def _build_prompt_normal(self, chat_stream, message_txt: str, sender_name: str = "某人") -> str:
prompt_personality = individuality.get_prompt(x_person=2, level=2)
@@ -107,7 +107,7 @@ class PromptBuilder:
who_chat_in_group = get_recent_group_speaker(
chat_stream.stream_id,
(chat_stream.user_info.platform, chat_stream.user_info.user_id) if chat_stream.user_info else None,
- limit=global_config.focus_chat.observation_context_size,
+ limit=global_config.normal_chat.max_context_size,
)
elif chat_stream.user_info:
who_chat_in_group.append(
@@ -118,8 +118,8 @@ class PromptBuilder:
for person in who_chat_in_group:
if len(person) >= 3 and person[0] and person[1]:
relation_prompt += await relationship_manager.build_relationship_info(person)
- else:
- logger.warning(f"Invalid person tuple encountered for relationship prompt: {person}")
+
+
mood_prompt = mood_manager.get_mood_prompt()
reply_styles1 = [
("然后给出日常且口语化的回复,平淡一些", 0.4),
@@ -193,6 +193,8 @@ class PromptBuilder:
prompt_ger += "你喜欢用文言文"
if random.random() < 0.04:
prompt_ger += "你喜欢用流行梗"
+
+ moderation_prompt_block = "请不要输出违法违规内容,不要输出色情,暴力,政治相关内容,如有敏感内容,请规避。"
# 知识构建
start_time = time.time()
@@ -231,7 +233,7 @@ class PromptBuilder:
keywords_reaction_prompt=keywords_reaction_prompt,
prompt_ger=prompt_ger,
# moderation_prompt=await global_prompt_manager.get_prompt_async("moderation_prompt"),
- moderation_prompt="",
+ moderation_prompt=moderation_prompt_block,
)
else:
template_name = "reasoning_prompt_private_main"
@@ -254,7 +256,7 @@ class PromptBuilder:
keywords_reaction_prompt=keywords_reaction_prompt,
prompt_ger=prompt_ger,
# moderation_prompt=await global_prompt_manager.get_prompt_async("moderation_prompt"),
- moderation_prompt="",
+ moderation_prompt=moderation_prompt_block,
)
# --- End choosing template ---
diff --git a/src/chat/utils/utils_image.py b/src/chat/utils/utils_image.py
index ca9f00aa..ea27ff1d 100644
--- a/src/chat/utils/utils_image.py
+++ b/src/chat/utils/utils_image.py
@@ -83,7 +83,7 @@ class ImageManager:
current_timestamp = time.time()
defaults = {"description": description, "timestamp": current_timestamp}
desc_obj, created = ImageDescriptions.get_or_create(
- hash=image_hash, type=description_type, defaults=defaults
+ image_description_hash=image_hash, type=description_type, defaults=defaults
)
if not created: # 如果记录已存在,则更新
desc_obj.description = description
@@ -150,7 +150,7 @@ class ImageManager:
img_obj.save()
except Images.DoesNotExist:
Images.create(
- hash=image_hash,
+ emoji_hash=image_hash,
path=file_path,
type="emoji",
description=description,
@@ -223,7 +223,7 @@ class ImageManager:
img_obj.save()
except Images.DoesNotExist:
Images.create(
- hash=image_hash,
+ emoji_hash=image_hash,
path=file_path,
type="image",
description=description,
diff --git a/src/common/logger.py b/src/common/logger.py
index 6c11b09d..3ed0fd7f 100644
--- a/src/common/logger.py
+++ b/src/common/logger.py
@@ -663,11 +663,11 @@ PROCESSOR_STYLE_CONFIG = {
PLANNER_STYLE_CONFIG = {
"advanced": {
- "console_format": "{time:HH:mm:ss} | 规划器 | {message}",
+ "console_format": "{time:HH:mm:ss} | 规划器 | {message}",
"file_format": "{time:YYYY-MM-DD HH:mm:ss} | {level: <8} | {extra[module]: <15} | 规划器 | {message}",
},
"simple": {
- "console_format": "{time:HH:mm:ss} | 规划器 | {message}",
+ "console_format": "{time:HH:mm:ss} | 规划器 | {message}",
"file_format": "{time:YYYY-MM-DD HH:mm:ss} | {level: <8} | {extra[module]: <15} | 规划器 | {message}",
},
}
diff --git a/src/config/config.py b/src/config/config.py
index 29f93a9e..a438b1fc 100644
--- a/src/config/config.py
+++ b/src/config/config.py
@@ -45,7 +45,7 @@ TEMPLATE_DIR = "template"
# 考虑到,实际上配置文件中的mai_version是不会自动更新的,所以采用硬编码
# 对该字段的更新,请严格参照语义化版本规范:https://semver.org/lang/zh-CN/
-MMC_VERSION = "0.7.0-snapshot.1"
+MMC_VERSION = "0.7.0-snapshot.2"
def update_config():
diff --git a/src/config/official_configs.py b/src/config/official_configs.py
index 519e4430..778c34ae 100644
--- a/src/config/official_configs.py
+++ b/src/config/official_configs.py
@@ -181,6 +181,9 @@ class EmojiConfig(ConfigBase):
save_pic: bool = False
"""是否保存图片"""
+ save_emoji: bool = False
+ """是否保存表情包"""
+
cache_emoji: bool = True
"""是否缓存表情包"""
diff --git a/src/individuality/expression_style.py b/src/individuality/expression_style.py
index 30906c45..a64ba9cf 100644
--- a/src/individuality/expression_style.py
+++ b/src/individuality/expression_style.py
@@ -83,7 +83,7 @@ class PersonalityExpression:
logger.error(f"删除旧的表达文件 {self.expressions_file_path} 失败: {e}")
if count >= self.max_calculations:
- logger.info(f"对于风格 '{current_style_text}' 已达到最大计算次数 ({self.max_calculations})。跳过提取。")
+ logger.debug(f"对于风格 '{current_style_text}' 已达到最大计算次数 ({self.max_calculations})。跳过提取。")
# 即使跳过,也更新元数据以反映当前风格已被识别且计数已满
self._write_meta_data({"last_style_text": current_style_text, "count": count})
return
diff --git a/src/llm_models/utils_model.py b/src/llm_models/utils_model.py
index cda51b94..2c45e523 100644
--- a/src/llm_models/utils_model.py
+++ b/src/llm_models/utils_model.py
@@ -435,7 +435,7 @@ class LLMRequest:
logger.error(
f"模型 {self.model_name} 错误码: {response.status} - {error_code_mapping.get(response.status)}"
)
- raise RuntimeError("服务器负载过高,模型恢复失败QAQ")
+ raise RuntimeError("服务器负载过高,模型回复失败QAQ")
else:
logger.warning(f"模型 {self.model_name} 请求限制(429),等待{wait_time}秒后重试...")
raise RuntimeError("请求限制(429)")
@@ -459,6 +459,7 @@ class LLMRequest:
logger.error(
f"模型 {self.model_name} 错误码: {response.status} - {error_code_mapping.get(response.status)}"
)
+ print(response)
# 尝试获取并记录服务器返回的详细错误信息
try:
error_json = await response.json()
diff --git a/src/main.py b/src/main.py
index 3b0cbf01..d55d74f0 100644
--- a/src/main.py
+++ b/src/main.py
@@ -96,11 +96,6 @@ class MainSystem:
personality_core=global_config.personality.personality_core,
personality_sides=global_config.personality.personality_sides,
identity_detail=global_config.identity.identity_detail,
- height=global_config.identity.height,
- weight=global_config.identity.weight,
- age=global_config.identity.age,
- gender=global_config.identity.gender,
- appearance=global_config.identity.appearance,
)
logger.success("个体特征初始化成功")
diff --git a/src/person_info/relationship_manager.py b/src/person_info/relationship_manager.py
index 5388ac62..4ef9151b 100644
--- a/src/person_info/relationship_manager.py
+++ b/src/person_info/relationship_manager.py
@@ -297,6 +297,8 @@ class RelationshipManager:
relationship_value = await person_info_manager.get_value(person_id, "relationship_value")
level_num = self.calculate_level_num(relationship_value)
+ relation_value_prompt = ""
+
if level_num == 0 or level_num == 5:
relationship_level = ["厌恶", "冷漠以对", "认识", "友好对待", "喜欢", "暧昧"]
relation_prompt2_list = [
@@ -307,9 +309,9 @@ class RelationshipManager:
"积极回复",
"友善和包容的回复",
]
- return f"你{relationship_level[level_num]}{person_name},打算{relation_prompt2_list[level_num]}。\n"
+ relation_value_prompt = f"你{relationship_level[level_num]}{person_name},打算{relation_prompt2_list[level_num]}。"
elif level_num == 2:
- return ""
+ relation_value_prompt = ""
else:
if random.random() < 0.6:
relationship_level = ["厌恶", "冷漠以对", "认识", "友好对待", "喜欢", "暧昧"]
@@ -321,9 +323,18 @@ class RelationshipManager:
"积极回复",
"友善和包容的回复",
]
- return f"你{relationship_level[level_num]}{person_name},打算{relation_prompt2_list[level_num]}。\n"
+ relation_value_prompt = f"你{relationship_level[level_num]}{person_name},打算{relation_prompt2_list[level_num]}。"
else:
- return ""
+ relation_value_prompt = ""
+
+ if relation_value_prompt:
+ nickname_str = await person_info_manager.get_value(person_id, "nickname")
+ platform = await person_info_manager.get_value(person_id, "platform")
+ relation_prompt = f"{relation_value_prompt},ta在{platform}上的昵称是{nickname_str}。\n"
+ else:
+ relation_prompt = ""
+
+ return relation_prompt
@staticmethod
def calculate_level_num(relationship_value) -> int:
diff --git a/src/plugins/test_plugin_pic/actions/pic_action.py b/src/plugins/test_plugin_pic/actions/pic_action.py
index 0a965e87..6521dafc 100644
--- a/src/plugins/test_plugin_pic/actions/pic_action.py
+++ b/src/plugins/test_plugin_pic/actions/pic_action.py
@@ -153,7 +153,7 @@ class PicAction(PluginAction):
if encode_success:
base64_image_string = encode_result
- send_success = await self.send_message(type="emoji", data=base64_image_string)
+ send_success = await self.send_message(type="image", data=base64_image_string)
if send_success:
await self.send_message_by_expressor("图片表情已发送!")
return True, "图片表情已发送"
diff --git a/template/bot_config_template.toml b/template/bot_config_template.toml
index a9f53a74..bc546b6a 100644
--- a/template/bot_config_template.toml
+++ b/template/bot_config_template.toml
@@ -104,7 +104,7 @@ learning_interval = 300 # 学习间隔 单位秒
max_reg_num = 40 # 表情包最大注册数量
do_replace = true # 开启则在达到最大数量时删除(替换)表情包,关闭则达到最大数量时不会继续收集表情包
check_interval = 120 # 检查表情包(注册,破损,删除)的时间间隔(分钟)
-save_pic = false # 是否保存图片
+save_pic = true # 是否保存图片
cache_emoji = true # 是否缓存表情包
steal_emoji = true # 是否偷取表情包,让麦麦可以发送她保存的这些表情包
content_filtration = false # 是否启用表情包过滤,只有符合该要求的表情包才会被保存
From 5b8e4c06907b2a90774b1c475e55f8cdde17beed Mon Sep 17 00:00:00 2001
From: SengokuCola <1026294844@qq.com>
Date: Tue, 27 May 2025 12:44:46 +0800
Subject: [PATCH 03/17] druff
---
src/chat/emoji_system/emoji_manager.py | 2 +-
.../info_processors/action_processor.py | 16 ++++++-------
.../info_processors/self_processor.py | 11 +++------
src/chat/focus_chat/planners/planner.py | 12 +++++-----
src/chat/normal_chat/normal_prompt.py | 23 ++++++++-----------
src/person_info/relationship_manager.py | 14 +++++++----
6 files changed, 37 insertions(+), 41 deletions(-)
diff --git a/src/chat/emoji_system/emoji_manager.py b/src/chat/emoji_system/emoji_manager.py
index 6d8d4fbc..376e6b8f 100644
--- a/src/chat/emoji_system/emoji_manager.py
+++ b/src/chat/emoji_system/emoji_manager.py
@@ -369,7 +369,7 @@ class EmojiManager:
def __init__(self) -> None:
if self._initialized:
return # 如果已经初始化过,直接返回
-
+
self._scan_task = None
self.vlm = LLMRequest(model=global_config.model.vlm, temperature=0.3, max_tokens=1000, request_type="emoji")
diff --git a/src/chat/focus_chat/info_processors/action_processor.py b/src/chat/focus_chat/info_processors/action_processor.py
index 6c78dd78..c2fa6dbb 100644
--- a/src/chat/focus_chat/info_processors/action_processor.py
+++ b/src/chat/focus_chat/info_processors/action_processor.py
@@ -56,18 +56,18 @@ class ActionProcessor(BaseProcessor):
all_actions = None
hfc_obs = None
chat_obs = None
-
+
# 收集所有观察对象
for obs in observations:
if isinstance(obs, HFCloopObservation):
hfc_obs = obs
if isinstance(obs, ChattingObservation):
chat_obs = obs
-
+
# 合并所有动作变更
merged_action_changes = {"add": [], "remove": []}
reasons = []
-
+
# 处理HFCloopObservation
if hfc_obs:
obs = hfc_obs
@@ -77,32 +77,32 @@ class ActionProcessor(BaseProcessor):
# 合并动作变更
merged_action_changes["add"].extend(action_changes["add"])
merged_action_changes["remove"].extend(action_changes["remove"])
-
+
# 收集变更原因
if action_changes["add"]:
reasons.append(f"添加动作{action_changes['add']}因为检测到大量无回复")
if action_changes["remove"]:
reasons.append(f"移除动作{action_changes['remove']}因为检测到连续回复")
-
+
# 处理ChattingObservation
if chat_obs and all_actions is not None:
obs = chat_obs
# 检查动作的关联类型
chat_context = chat_manager.get_stream(obs.chat_id).context
type_mismatched_actions = []
-
+
for action_name in all_actions.keys():
data = all_actions[action_name]
if data.get("associated_types"):
if not chat_context.check_types(data["associated_types"]):
type_mismatched_actions.append(action_name)
logger.debug(f"{self.log_prefix} 动作 {action_name} 关联类型不匹配,移除该动作")
-
+
if type_mismatched_actions:
# 合并到移除列表中
merged_action_changes["remove"].extend(type_mismatched_actions)
reasons.append(f"移除动作{type_mismatched_actions}因为关联类型不匹配")
-
+
# 如果有任何动作变更,设置到action_info中
if merged_action_changes["add"] or merged_action_changes["remove"]:
action_info.set_action_changes(merged_action_changes)
diff --git a/src/chat/focus_chat/info_processors/self_processor.py b/src/chat/focus_chat/info_processors/self_processor.py
index 9d0643d6..c4c4ead5 100644
--- a/src/chat/focus_chat/info_processors/self_processor.py
+++ b/src/chat/focus_chat/info_processors/self_processor.py
@@ -15,7 +15,6 @@ from src.chat.heart_flow.observation.hfcloop_observation import HFCloopObservati
from typing import Dict
from src.chat.focus_chat.info.info_base import InfoBase
from src.chat.focus_chat.info.self_info import SelfInfo
-from src.chat.utils.utils import get_recent_group_speaker
logger = get_logger("processor")
@@ -101,8 +100,7 @@ class SelfProcessor(BaseProcessor):
如果return_prompt为True:
tuple: (current_mind, past_mind, prompt) 当前想法、过去的想法列表和使用的prompt
"""
-
-
+
for observation in observations:
if isinstance(observation, ChattingObservation):
is_group_chat = observation.is_group_chat
@@ -115,13 +113,11 @@ class SelfProcessor(BaseProcessor):
memory_str = "以下是当前在聊天中,你回忆起的记忆:\n"
for running_memory in running_memorys:
memory_str += f"{running_memory['topic']}: {running_memory['content']}\n"
-
relation_prompt = ""
for person in person_list:
if len(person) >= 3 and person[0] and person[1]:
- relation_prompt += await relationship_manager.build_relationship_info(person,is_id=True)
-
+ relation_prompt += await relationship_manager.build_relationship_info(person, is_id=True)
if observations is None:
observations = []
@@ -161,7 +157,6 @@ class SelfProcessor(BaseProcessor):
relation_prompt = relation_prompt_init + relation_prompt
else:
relation_prompt = relation_prompt_init + "没有特别在意的人\n"
-
prompt = (await global_prompt_manager.get_prompt_async("indentify_prompt")).format(
name_block=name_block,
@@ -172,7 +167,7 @@ class SelfProcessor(BaseProcessor):
time_now=time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()),
chat_observe_info=chat_observe_info,
)
-
+
# print(prompt)
content = ""
diff --git a/src/chat/focus_chat/planners/planner.py b/src/chat/focus_chat/planners/planner.py
index 267522c6..57c83cff 100644
--- a/src/chat/focus_chat/planners/planner.py
+++ b/src/chat/focus_chat/planners/planner.py
@@ -125,14 +125,13 @@ class ActionPlanner:
if action in remove_actions:
action = "no_reply"
reasoning = f"之前选择的动作{action}已被移除,原因: {reason}"
-
+
using_actions = self.action_manager.get_using_actions()
action_available_block = ""
for action_name, action_info in using_actions.items():
action_description = action_info["description"]
action_available_block += f"\n你在聊天中可以使用{action_name},这个动作的描述是{action_description}\n"
- action_available_block += "注意,除了上述动作选项之外,你在群聊里不能做其他任何事情,这是你能力的边界\n"
-
+ action_available_block += "注意,除了上述动作选项之外,你在群聊里不能做其他任何事情,这是你能力的边界\n"
# 继续处理其他信息
for info in all_plan_info:
@@ -156,7 +155,9 @@ class ActionPlanner:
current_available_actions = self.action_manager.get_using_actions()
# 如果没有可用动作或只有no_reply动作,直接返回no_reply
- if not current_available_actions or (len(current_available_actions) == 1 and "no_reply" in current_available_actions):
+ if not current_available_actions or (
+ len(current_available_actions) == 1 and "no_reply" in current_available_actions
+ ):
action = "no_reply"
reasoning = "没有可用的动作" if not current_available_actions else "只有no_reply动作可用,跳过规划"
logger.info(f"{self.log_prefix}{reasoning}")
@@ -325,8 +326,7 @@ class ActionPlanner:
extra_info_block = f"以下是一些额外的信息,现在请你阅读以下内容,进行决策\n{extra_info_block}\n以上是一些额外的信息,现在请你阅读以下内容,进行决策"
else:
extra_info_block = ""
-
-
+
moderation_prompt_block = "请不要输出违法违规内容,不要输出色情,暴力,政治相关内容,如有敏感内容,请规避。"
planner_prompt_template = await global_prompt_manager.get_prompt_async("planner_prompt")
diff --git a/src/chat/normal_chat/normal_prompt.py b/src/chat/normal_chat/normal_prompt.py
index 8308ab20..88a1fadc 100644
--- a/src/chat/normal_chat/normal_prompt.py
+++ b/src/chat/normal_chat/normal_prompt.py
@@ -17,14 +17,14 @@ logger = get_logger("prompt")
def init_prompt():
-# Prompt(
-# """
-# 你有以下信息可供参考:
-# {structured_info}
-# 以上的消息是你获取到的消息,或许可以帮助你更好地回复。
-# """,
-# "info_from_tools",
-# )
+ # Prompt(
+ # """
+ # 你有以下信息可供参考:
+ # {structured_info}
+ # 以上的消息是你获取到的消息,或许可以帮助你更好地回复。
+ # """,
+ # "info_from_tools",
+ # )
Prompt("你正在qq群里聊天,下面是群里在聊的内容:", "chat_target_group1")
Prompt("你正在和{sender_name}聊天,这是你们之前聊的内容:", "chat_target_private1")
@@ -94,10 +94,8 @@ class PromptBuilder:
in_mind_reply=None,
target_message=None,
) -> Optional[str]:
-
return await self._build_prompt_normal(chat_stream, message_txt or "", sender_name)
-
async def _build_prompt_normal(self, chat_stream, message_txt: str, sender_name: str = "某人") -> str:
prompt_personality = individuality.get_prompt(x_person=2, level=2)
is_group_chat = bool(chat_stream.group_info)
@@ -118,8 +116,7 @@ class PromptBuilder:
for person in who_chat_in_group:
if len(person) >= 3 and person[0] and person[1]:
relation_prompt += await relationship_manager.build_relationship_info(person)
-
-
+
mood_prompt = mood_manager.get_mood_prompt()
reply_styles1 = [
("然后给出日常且口语化的回复,平淡一些", 0.4),
@@ -193,7 +190,7 @@ class PromptBuilder:
prompt_ger += "你喜欢用文言文"
if random.random() < 0.04:
prompt_ger += "你喜欢用流行梗"
-
+
moderation_prompt_block = "请不要输出违法违规内容,不要输出色情,暴力,政治相关内容,如有敏感内容,请规避。"
# 知识构建
diff --git a/src/person_info/relationship_manager.py b/src/person_info/relationship_manager.py
index 4ef9151b..37f75955 100644
--- a/src/person_info/relationship_manager.py
+++ b/src/person_info/relationship_manager.py
@@ -298,7 +298,7 @@ class RelationshipManager:
level_num = self.calculate_level_num(relationship_value)
relation_value_prompt = ""
-
+
if level_num == 0 or level_num == 5:
relationship_level = ["厌恶", "冷漠以对", "认识", "友好对待", "喜欢", "暧昧"]
relation_prompt2_list = [
@@ -309,7 +309,9 @@ class RelationshipManager:
"积极回复",
"友善和包容的回复",
]
- relation_value_prompt = f"你{relationship_level[level_num]}{person_name},打算{relation_prompt2_list[level_num]}。"
+ relation_value_prompt = (
+ f"你{relationship_level[level_num]}{person_name},打算{relation_prompt2_list[level_num]}。"
+ )
elif level_num == 2:
relation_value_prompt = ""
else:
@@ -323,17 +325,19 @@ class RelationshipManager:
"积极回复",
"友善和包容的回复",
]
- relation_value_prompt = f"你{relationship_level[level_num]}{person_name},打算{relation_prompt2_list[level_num]}。"
+ relation_value_prompt = (
+ f"你{relationship_level[level_num]}{person_name},打算{relation_prompt2_list[level_num]}。"
+ )
else:
relation_value_prompt = ""
-
+
if relation_value_prompt:
nickname_str = await person_info_manager.get_value(person_id, "nickname")
platform = await person_info_manager.get_value(person_id, "platform")
relation_prompt = f"{relation_value_prompt},ta在{platform}上的昵称是{nickname_str}。\n"
else:
relation_prompt = ""
-
+
return relation_prompt
@staticmethod
From 0391111c82d3ba7cb8ade890af793128cb7c476f Mon Sep 17 00:00:00 2001
From: SengokuCola <1026294844@qq.com>
Date: Tue, 27 May 2025 14:28:41 +0800
Subject: [PATCH 04/17] =?UTF-8?q?feat=EF=BC=9A=E6=8B=86=E5=88=86=E9=87=8D?=
=?UTF-8?q?=E5=91=BD=E5=90=8D=E6=A8=A1=E5=9E=8B=E9=85=8D=E7=BD=AE=EF=BC=8C?=
=?UTF-8?q?=E4=BF=AE=E5=A4=8D=E5=8A=A8=E4=BD=9C=E6=81=A2=E5=A4=8D=E9=97=AE?=
=?UTF-8?q?=E9=A2=98?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
.../expressors/default_expressor.py | 6 +-
.../info_processors/action_processor.py | 3 -
.../info_processors/mind_processor.py | 6 +-
.../info_processors/self_processor.py | 6 +-
.../info_processors/tool_processor.py | 4 +-
.../working_memory_processor.py | 6 +-
src/chat/focus_chat/memory_activator.py | 2 +-
.../focus_chat/planners/action_manager.py | 15 ++---
src/chat/focus_chat/planners/planner.py | 12 ++--
.../working_memory/memory_manager.py | 2 +-
src/chat/heart_flow/background_tasks.py | 36 +++++------
.../observation/chatting_observation.py | 4 --
src/chat/memory_system/Hippocampus.py | 10 ++-
src/chat/normal_chat/normal_chat_generator.py | 4 +-
src/chat/utils/utils_image.py | 3 +-
src/config/official_configs.py | 41 +++++++-----
src/llm_models/utils_model.py | 5 +-
src/tools/tool_use.py | 9 ---
template/bot_config_template.toml | 63 +++++++++++--------
19 files changed, 119 insertions(+), 118 deletions(-)
diff --git a/src/chat/focus_chat/expressors/default_expressor.py b/src/chat/focus_chat/expressors/default_expressor.py
index 2d0d1f35..a9166f67 100644
--- a/src/chat/focus_chat/expressors/default_expressor.py
+++ b/src/chat/focus_chat/expressors/default_expressor.py
@@ -78,10 +78,10 @@ class DefaultExpressor:
self.log_prefix = "expressor"
# TODO: API-Adapter修改标记
self.express_model = LLMRequest(
- model=global_config.model.normal,
- temperature=global_config.model.normal["temp"],
+ model=global_config.model.focus_expressor,
+ temperature=global_config.model.focus_expressor["temp"],
max_tokens=256,
- request_type="response_heartflow",
+ request_type="focus_expressor",
)
self.heart_fc_sender = HeartFCSender()
diff --git a/src/chat/focus_chat/info_processors/action_processor.py b/src/chat/focus_chat/info_processors/action_processor.py
index c2fa6dbb..1f05ac84 100644
--- a/src/chat/focus_chat/info_processors/action_processor.py
+++ b/src/chat/focus_chat/info_processors/action_processor.py
@@ -27,9 +27,6 @@ class ActionProcessor(BaseProcessor):
"""初始化观察处理器"""
super().__init__()
# TODO: API-Adapter修改标记
- self.model_summary = LLMRequest(
- model=global_config.model.observation, temperature=0.7, max_tokens=300, request_type="chat_observation"
- )
async def process_info(
self,
diff --git a/src/chat/focus_chat/info_processors/mind_processor.py b/src/chat/focus_chat/info_processors/mind_processor.py
index 8e911ace..2a1642ad 100644
--- a/src/chat/focus_chat/info_processors/mind_processor.py
+++ b/src/chat/focus_chat/info_processors/mind_processor.py
@@ -71,10 +71,10 @@ class MindProcessor(BaseProcessor):
self.subheartflow_id = subheartflow_id
self.llm_model = LLMRequest(
- model=global_config.model.sub_heartflow,
- temperature=global_config.model.sub_heartflow["temp"],
+ model=global_config.model.focus_chat_mind,
+ temperature=global_config.model.focus_chat_mind["temp"],
max_tokens=800,
- request_type="sub_heart_flow",
+ request_type="focus_chat_mind",
)
self.current_mind = ""
diff --git a/src/chat/focus_chat/info_processors/self_processor.py b/src/chat/focus_chat/info_processors/self_processor.py
index c4c4ead5..6589e2f7 100644
--- a/src/chat/focus_chat/info_processors/self_processor.py
+++ b/src/chat/focus_chat/info_processors/self_processor.py
@@ -54,10 +54,10 @@ class SelfProcessor(BaseProcessor):
self.subheartflow_id = subheartflow_id
self.llm_model = LLMRequest(
- model=global_config.model.sub_heartflow,
- temperature=global_config.model.sub_heartflow["temp"],
+ model=global_config.model.focus_self_recognize,
+ temperature=global_config.model.focus_self_recognize["temp"],
max_tokens=800,
- request_type="self_identify",
+ request_type="focus_self_identify",
)
name = chat_manager.get_stream_name(self.subheartflow_id)
diff --git a/src/chat/focus_chat/info_processors/tool_processor.py b/src/chat/focus_chat/info_processors/tool_processor.py
index 39ac8dc6..b1568cf3 100644
--- a/src/chat/focus_chat/info_processors/tool_processor.py
+++ b/src/chat/focus_chat/info_processors/tool_processor.py
@@ -49,9 +49,9 @@ class ToolProcessor(BaseProcessor):
self.subheartflow_id = subheartflow_id
self.log_prefix = f"[{subheartflow_id}:ToolExecutor] "
self.llm_model = LLMRequest(
- model=global_config.model.tool_use,
+ model=global_config.model.focus_tool_use,
max_tokens=500,
- request_type="tool_execution",
+ request_type="focus_tool",
)
self.structured_info = []
diff --git a/src/chat/focus_chat/info_processors/working_memory_processor.py b/src/chat/focus_chat/info_processors/working_memory_processor.py
index cceb1623..d7e3d83e 100644
--- a/src/chat/focus_chat/info_processors/working_memory_processor.py
+++ b/src/chat/focus_chat/info_processors/working_memory_processor.py
@@ -61,10 +61,10 @@ class WorkingMemoryProcessor(BaseProcessor):
self.subheartflow_id = subheartflow_id
self.llm_model = LLMRequest(
- model=global_config.model.sub_heartflow,
- temperature=global_config.model.sub_heartflow["temp"],
+ model=global_config.model.focus_chat_mind,
+ temperature=global_config.model.focus_chat_mind["temp"],
max_tokens=800,
- request_type="working_memory",
+ request_type="focus_working_memory",
)
name = chat_manager.get_stream_name(self.subheartflow_id)
diff --git a/src/chat/focus_chat/memory_activator.py b/src/chat/focus_chat/memory_activator.py
index 0d5d6322..0f75d09e 100644
--- a/src/chat/focus_chat/memory_activator.py
+++ b/src/chat/focus_chat/memory_activator.py
@@ -36,7 +36,7 @@ class MemoryActivator:
def __init__(self):
# TODO: API-Adapter修改标记
self.summary_model = LLMRequest(
- model=global_config.model.summary, temperature=0.7, max_tokens=50, request_type="chat_observation"
+ model=global_config.model.memory_summary, temperature=0.7, max_tokens=50, request_type="chat_observation"
)
self.running_memory = []
diff --git a/src/chat/focus_chat/planners/action_manager.py b/src/chat/focus_chat/planners/action_manager.py
index 6cf8de6d..62db09a9 100644
--- a/src/chat/focus_chat/planners/action_manager.py
+++ b/src/chat/focus_chat/planners/action_manager.py
@@ -28,8 +28,7 @@ class ActionManager:
self._registered_actions: Dict[str, ActionInfo] = {}
# 当前正在使用的动作集合,默认加载默认动作
self._using_actions: Dict[str, ActionInfo] = {}
- # 临时备份原始使用中的动作
- self._original_actions_backup: Optional[Dict[str, ActionInfo]] = None
+
# 默认动作集,仅作为快照,用于恢复默认
self._default_actions: Dict[str, ActionInfo] = {}
@@ -278,22 +277,18 @@ class ActionManager:
return True
def temporarily_remove_actions(self, actions_to_remove: List[str]) -> None:
- """临时移除使用集中的指定动作,备份原始使用集"""
- if self._original_actions_backup is None:
- self._original_actions_backup = self._using_actions.copy()
+ """临时移除使用集中的指定动作"""
for name in actions_to_remove:
self._using_actions.pop(name, None)
def restore_actions(self) -> None:
- """恢复之前备份的原始使用集"""
- if self._original_actions_backup is not None:
- self._using_actions = self._original_actions_backup.copy()
- self._original_actions_backup = None
+ """恢复到默认动作集"""
+ logger.debug(f"恢复动作集: 从 {list(self._using_actions.keys())} 恢复到默认动作集 {list(self._default_actions.keys())}")
+ self._using_actions = self._default_actions.copy()
def restore_default_actions(self) -> None:
"""恢复默认动作集到使用集"""
self._using_actions = self._default_actions.copy()
- self._original_actions_backup = None
def get_action(self, action_name: str) -> Optional[Type[BaseAction]]:
"""
diff --git a/src/chat/focus_chat/planners/planner.py b/src/chat/focus_chat/planners/planner.py
index 57c83cff..576e8979 100644
--- a/src/chat/focus_chat/planners/planner.py
+++ b/src/chat/focus_chat/planners/planner.py
@@ -78,9 +78,9 @@ class ActionPlanner:
self.log_prefix = log_prefix
# LLM规划器配置
self.planner_llm = LLMRequest(
- model=global_config.model.plan,
+ model=global_config.model.focus_planner,
max_tokens=1000,
- request_type="action_planning", # 用于动作规划
+ request_type="focus_planner", # 用于动作规划
)
self.action_manager = action_manager
@@ -161,6 +161,10 @@ class ActionPlanner:
action = "no_reply"
reasoning = "没有可用的动作" if not current_available_actions else "只有no_reply动作可用,跳过规划"
logger.info(f"{self.log_prefix}{reasoning}")
+ self.action_manager.restore_actions()
+ logger.debug(
+ f"{self.log_prefix}恢复到默认动作集, 当前可用: {list(self.action_manager.get_using_actions().keys())}"
+ )
return {
"action_result": {"action_type": action, "action_data": action_data, "reasoning": reasoning},
"current_mind": current_mind,
@@ -241,10 +245,10 @@ class ActionPlanner:
f"{self.log_prefix}规划器Prompt:\n{prompt}\n\n决策动作:{action},\n动作信息: '{action_data}'\n理由: {reasoning}"
)
- # 恢复原始动作集
+ # 恢复到默认动作集
self.action_manager.restore_actions()
logger.debug(
- f"{self.log_prefix}恢复了原始动作集, 当前可用: {list(self.action_manager.get_using_actions().keys())}"
+ f"{self.log_prefix}恢复到默认动作集, 当前可用: {list(self.action_manager.get_using_actions().keys())}"
)
action_result = {"action_type": action, "action_data": action_data, "reasoning": reasoning}
diff --git a/src/chat/focus_chat/working_memory/memory_manager.py b/src/chat/focus_chat/working_memory/memory_manager.py
index 2ee8a36d..0157e4f8 100644
--- a/src/chat/focus_chat/working_memory/memory_manager.py
+++ b/src/chat/focus_chat/working_memory/memory_manager.py
@@ -33,7 +33,7 @@ class MemoryManager:
self._id_map: Dict[str, MemoryItem] = {}
self.llm_summarizer = LLMRequest(
- model=global_config.model.summary, temperature=0.3, max_tokens=512, request_type="memory_summarization"
+ model=global_config.model.focus_working_memory, temperature=0.3, max_tokens=512, request_type="memory_summarization"
)
@property
diff --git a/src/chat/heart_flow/background_tasks.py b/src/chat/heart_flow/background_tasks.py
index 4d2438b6..4e4d502b 100644
--- a/src/chat/heart_flow/background_tasks.py
+++ b/src/chat/heart_flow/background_tasks.py
@@ -88,34 +88,34 @@ class BackgroundTaskManager:
f"聊天状态更新任务已启动 间隔:{STATE_UPDATE_INTERVAL_SECONDS}s",
"_state_update_task",
),
- (
- self._run_cleanup_cycle,
- "info",
- f"清理任务已启动 间隔:{CLEANUP_INTERVAL_SECONDS}s",
- "_cleanup_task",
- ),
- # 新增私聊激活任务配置
- (
- # Use lambda to pass the interval to the runner function
- lambda: self._run_private_chat_activation_cycle(PRIVATE_CHAT_ACTIVATION_CHECK_INTERVAL_SECONDS),
- "debug",
- f"私聊激活检查任务已启动 间隔:{PRIVATE_CHAT_ACTIVATION_CHECK_INTERVAL_SECONDS}s",
- "_private_chat_activation_task",
- ),
]
- # 根据 chat_mode 条件添加专注评估任务
+ # 根据 chat_mode 条件添加其他任务
if not (global_config.chat.chat_mode == "normal"):
- task_configs.append(
+ task_configs.extend([
+ (
+ self._run_cleanup_cycle,
+ "info",
+ f"清理任务已启动 间隔:{CLEANUP_INTERVAL_SECONDS}s",
+ "_cleanup_task",
+ ),
+ # 新增私聊激活任务配置
+ (
+ # Use lambda to pass the interval to the runner function
+ lambda: self._run_private_chat_activation_cycle(PRIVATE_CHAT_ACTIVATION_CHECK_INTERVAL_SECONDS),
+ "debug",
+ f"私聊激活检查任务已启动 间隔:{PRIVATE_CHAT_ACTIVATION_CHECK_INTERVAL_SECONDS}s",
+ "_private_chat_activation_task",
+ ),
(
self._run_into_focus_cycle,
"debug", # 设为debug,避免过多日志
f"专注评估任务已启动 间隔:{INTEREST_EVAL_INTERVAL_SECONDS}s",
"_into_focus_task",
)
- )
+ ])
else:
- logger.info("聊天模式为 normal,跳过启动专注评估任务")
+ logger.info("聊天模式为 normal,跳过启动清理任务、私聊激活任务和专注评估任务")
# 统一启动所有任务
for task_func, log_level, log_msg, task_attr_name in task_configs:
diff --git a/src/chat/heart_flow/observation/chatting_observation.py b/src/chat/heart_flow/observation/chatting_observation.py
index b43074fa..4264a76c 100644
--- a/src/chat/heart_flow/observation/chatting_observation.py
+++ b/src/chat/heart_flow/observation/chatting_observation.py
@@ -66,10 +66,6 @@ class ChattingObservation(Observation):
self.oldest_messages = []
self.oldest_messages_str = ""
self.compressor_prompt = ""
- # TODO: API-Adapter修改标记
- self.model_summary = LLMRequest(
- model=global_config.model.observation, temperature=0.7, max_tokens=300, request_type="chat_observation"
- )
async def initialize(self):
self.is_group_chat, self.chat_target_info = await get_chat_type_and_target_info(self.chat_id)
diff --git a/src/chat/memory_system/Hippocampus.py b/src/chat/memory_system/Hippocampus.py
index d7a13bfe..9424706e 100644
--- a/src/chat/memory_system/Hippocampus.py
+++ b/src/chat/memory_system/Hippocampus.py
@@ -193,7 +193,6 @@ class MemoryGraph:
class Hippocampus:
def __init__(self):
self.memory_graph = MemoryGraph()
- self.llm_topic_judge = None
self.model_summary = None
self.entorhinal_cortex = None
self.parahippocampal_gyrus = None
@@ -205,8 +204,7 @@ class Hippocampus:
# 从数据库加载记忆图
self.entorhinal_cortex.sync_memory_from_db()
# TODO: API-Adapter修改标记
- self.llm_topic_judge = LLMRequest(global_config.model.topic_judge, request_type="memory")
- self.model_summary = LLMRequest(global_config.model.summary, request_type="memory")
+ self.model_summary = LLMRequest(global_config.model.memory_summary, request_type="memory")
def get_all_node_names(self) -> list:
"""获取记忆图中所有节点的名字列表"""
@@ -344,7 +342,7 @@ class Hippocampus:
# 使用LLM提取关键词
topic_num = min(5, max(1, int(len(text) * 0.1))) # 根据文本长度动态调整关键词数量
# logger.info(f"提取关键词数量: {topic_num}")
- topics_response = await self.llm_topic_judge.generate_response(self.find_topic_llm(text, topic_num))
+ topics_response = await self.model_summary.generate_response(self.find_topic_llm(text, topic_num))
# 提取关键词
keywords = re.findall(r"<([^>]+)>", topics_response[0])
@@ -699,7 +697,7 @@ class Hippocampus:
# 使用LLM提取关键词
topic_num = min(5, max(1, int(len(text) * 0.1))) # 根据文本长度动态调整关键词数量
# logger.info(f"提取关键词数量: {topic_num}")
- topics_response = await self.llm_topic_judge.generate_response(self.find_topic_llm(text, topic_num))
+ topics_response = await self.model_summary.generate_response(self.find_topic_llm(text, topic_num))
# 提取关键词
keywords = re.findall(r"<([^>]+)>", topics_response[0])
@@ -1126,7 +1124,7 @@ class ParahippocampalGyrus:
# 2. 使用LLM提取关键主题
topic_num = self.hippocampus.calculate_topic_num(input_text, compress_rate)
- topics_response = await self.hippocampus.llm_topic_judge.generate_response(
+ topics_response = await self.hippocampus.model_summary.generate_response(
self.hippocampus.find_topic_llm(input_text, topic_num)
)
diff --git a/src/chat/normal_chat/normal_chat_generator.py b/src/chat/normal_chat/normal_chat_generator.py
index 04be8b3a..7fa6b032 100644
--- a/src/chat/normal_chat/normal_chat_generator.py
+++ b/src/chat/normal_chat/normal_chat_generator.py
@@ -17,7 +17,7 @@ class NormalChatGenerator:
def __init__(self):
# TODO: API-Adapter修改标记
self.model_reasoning = LLMRequest(
- model=global_config.model.reasoning,
+ model=global_config.model.normal_chat_1,
temperature=0.7,
max_tokens=3000,
request_type="response_reasoning",
@@ -30,7 +30,7 @@ class NormalChatGenerator:
)
self.model_sum = LLMRequest(
- model=global_config.model.summary, temperature=0.7, max_tokens=3000, request_type="relation"
+ model=global_config.model.memory_summary, temperature=0.7, max_tokens=3000, request_type="relation"
)
self.current_model_type = "r1" # 默认使用 R1
self.current_model_name = "unknown model"
diff --git a/src/chat/utils/utils_image.py b/src/chat/utils/utils_image.py
index ea27ff1d..abd99aa2 100644
--- a/src/chat/utils/utils_image.py
+++ b/src/chat/utils/utils_image.py
@@ -130,6 +130,7 @@ class ImageManager:
# 根据配置决定是否保存图片
if global_config.emoji.save_emoji:
# 生成文件名和路径
+ logger.debug(f"保存表情包: {image_hash}")
current_timestamp = time.time()
filename = f"{int(current_timestamp)}_{image_hash[:8]}.{image_format}"
emoji_dir = os.path.join(self.IMAGE_DIR, "emoji")
@@ -156,7 +157,7 @@ class ImageManager:
description=description,
timestamp=current_timestamp,
)
- logger.trace(f"保存表情包元数据: {file_path}")
+ # logger.debug(f"保存表情包元数据: {file_path}")
except Exception as e:
logger.error(f"保存表情包文件或元数据失败: {str(e)}")
diff --git a/src/config/official_configs.py b/src/config/official_configs.py
index 778c34ae..a35026e5 100644
--- a/src/config/official_configs.py
+++ b/src/config/official_configs.py
@@ -178,10 +178,10 @@ class EmojiConfig(ConfigBase):
check_interval: int = 120
"""表情包检查间隔(分钟)"""
- save_pic: bool = False
+ save_pic: bool = True
"""是否保存图片"""
- save_emoji: bool = False
+ save_emoji: bool = True
"""是否保存表情包"""
cache_emoji: bool = True
@@ -384,27 +384,33 @@ class ModelConfig(ConfigBase):
normal: dict[str, Any] = field(default_factory=lambda: {})
"""普通模型配置"""
- topic_judge: dict[str, Any] = field(default_factory=lambda: {})
- """主题判断模型配置"""
-
- summary: dict[str, Any] = field(default_factory=lambda: {})
- """摘要模型配置"""
+ memory_summary: dict[str, Any] = field(default_factory=lambda: {})
+ """记忆的概括模型配置"""
vlm: dict[str, Any] = field(default_factory=lambda: {})
"""视觉语言模型配置"""
- heartflow: dict[str, Any] = field(default_factory=lambda: {})
- """心流模型配置"""
-
observation: dict[str, Any] = field(default_factory=lambda: {})
"""观察模型配置"""
- sub_heartflow: dict[str, Any] = field(default_factory=lambda: {})
- """子心流模型配置"""
-
- plan: dict[str, Any] = field(default_factory=lambda: {})
- """计划模型配置"""
+ focus_working_memory: dict[str, Any] = field(default_factory=lambda: {})
+ """专注工作记忆模型配置"""
+
+ focus_chat_mind: dict[str, Any] = field(default_factory=lambda: {})
+ """专注聊天规划模型配置"""
+
+ focus_self_recognize: dict[str, Any] = field(default_factory=lambda: {})
+ """专注自我识别模型配置"""
+
+ focus_tool_use: dict[str, Any] = field(default_factory=lambda: {})
+ """专注工具使用模型配置"""
+ focus_planner: dict[str, Any] = field(default_factory=lambda: {})
+ """专注规划模型配置"""
+
+ focus_expressor: dict[str, Any] = field(default_factory=lambda: {})
+ """专注表达器模型配置"""
+
embedding: dict[str, Any] = field(default_factory=lambda: {})
"""嵌入模型配置"""
@@ -417,5 +423,6 @@ class ModelConfig(ConfigBase):
pfc_reply_checker: dict[str, Any] = field(default_factory=lambda: {})
"""PFC回复检查模型配置"""
- tool_use: dict[str, Any] = field(default_factory=lambda: {})
- """工具使用模型配置"""
+
+
+
diff --git a/src/llm_models/utils_model.py b/src/llm_models/utils_model.py
index 2c45e523..2a45c5c9 100644
--- a/src/llm_models/utils_model.py
+++ b/src/llm_models/utils_model.py
@@ -459,6 +459,7 @@ class LLMRequest:
logger.error(
f"模型 {self.model_name} 错误码: {response.status} - {error_code_mapping.get(response.status)}"
)
+ print(request_content)
print(response)
# 尝试获取并记录服务器返回的详细错误信息
try:
@@ -499,8 +500,8 @@ class LLMRequest:
if global_config.model.normal.get("name") == old_model_name:
global_config.model.normal["name"] = self.model_name
logger.warning(f"将全局配置中的 llm_normal 模型临时降级至{self.model_name}")
- if global_config.model.reasoning.get("name") == old_model_name:
- global_config.model.reasoning["name"] = self.model_name
+ if global_config.model.normal_chat_1.get("name") == old_model_name:
+ global_config.model.normal_chat_1["name"] = self.model_name
logger.warning(f"将全局配置中的 llm_reasoning 模型临时降级至{self.model_name}")
if payload and "model" in payload:
diff --git a/src/tools/tool_use.py b/src/tools/tool_use.py
index 8ddc747d..caca2cb6 100644
--- a/src/tools/tool_use.py
+++ b/src/tools/tool_use.py
@@ -1,18 +1,9 @@
-from src.llm_models.utils_model import LLMRequest
-from src.config.config import global_config
import json
from src.common.logger_manager import get_logger
from src.tools.tool_can_use import get_all_tool_definitions, get_tool_instance
logger = get_logger("tool_use")
-
-
class ToolUser:
- def __init__(self):
- self.llm_model_tool = LLMRequest(
- model=global_config.model.tool_use, temperature=0.2, max_tokens=1000, request_type="tool_use"
- )
-
@staticmethod
def _define_tools():
"""获取所有已注册工具的定义
diff --git a/template/bot_config_template.toml b/template/bot_config_template.toml
index bc546b6a..0156cf36 100644
--- a/template/bot_config_template.toml
+++ b/template/bot_config_template.toml
@@ -196,7 +196,7 @@ pfc_chatting = false # 是否启用PFC聊天,该功能仅作用于私聊,与
model_max_output_length = 800 # 模型单次返回的最大token数
#这个模型必须是推理模型
-[model.reasoning] # 一般聊天模式的推理回复模型
+[model.normal_chat_1] # 一般聊天模式的首要回复模型,推荐使用 推理模型
name = "Pro/deepseek-ai/DeepSeek-R1"
provider = "SILICONFLOW"
pri_in = 1.0 #模型的输入价格(非必填,可以记录消耗)
@@ -210,13 +210,7 @@ pri_out = 8 #模型的输出价格(非必填,可以记录消耗)
#默认temp 0.2 如果你使用的是老V3或者其他模型,请自己修改temp参数
temp = 0.2 #模型的温度,新V3建议0.1-0.3
-[model.topic_judge] #主题判断模型:建议使用qwen2.5 7b
-name = "Pro/Qwen/Qwen2.5-7B-Instruct"
-provider = "SILICONFLOW"
-pri_in = 0.35
-pri_out = 0.35
-
-[model.summary] #概括模型,建议使用qwen2.5 32b 及以上
+[model.memory_summary] # 记忆的概括模型,建议使用qwen2.5 32b 及以上
name = "Qwen/Qwen2.5-32B-Instruct"
provider = "SILICONFLOW"
pri_in = 1.26
@@ -228,12 +222,6 @@ provider = "SILICONFLOW"
pri_in = 0.35
pri_out = 0.35
-[model.heartflow] # 用于控制麦麦是否参与聊天的模型
-name = "Qwen/Qwen2.5-32B-Instruct"
-provider = "SILICONFLOW"
-pri_in = 1.26
-pri_out = 1.26
-
[model.observation] #观察模型,压缩聊天内容,建议用免费的
# name = "Pro/Qwen/Qwen2.5-7B-Instruct"
name = "Qwen/Qwen2.5-7B-Instruct"
@@ -241,19 +229,48 @@ provider = "SILICONFLOW"
pri_in = 0
pri_out = 0
-[model.sub_heartflow] #心流:认真聊天时,生成麦麦的内心想法,必须使用具有工具调用能力的模型
+[model.focus_working_memory] #工作记忆模型,建议使用qwen2.5 32b
+# name = "Pro/Qwen/Qwen2.5-7B-Instruct"
+name = "Qwen/Qwen2.5-32B-Instruct"
+provider = "SILICONFLOW"
+pri_in = 1.26
+pri_out = 1.26
+
+[model.focus_chat_mind] #聊天规划:认真聊天时,生成麦麦对聊天的规划想法
name = "Pro/deepseek-ai/DeepSeek-V3"
provider = "SILICONFLOW"
pri_in = 2
pri_out = 8
temp = 0.3 #模型的温度,新V3建议0.1-0.3
-[model.plan] #决策:认真聊天时,负责决定麦麦该做什么
+[model.focus_tool_use] #工具调用模型,需要使用支持工具调用的模型,建议使用qwen2.5 32b
+name = "Qwen/Qwen2.5-32B-Instruct"
+provider = "SILICONFLOW"
+pri_in = 1.26
+pri_out = 1.26
+
+[model.focus_planner] #决策:认真聊天时,负责决定麦麦该做什么
name = "Pro/deepseek-ai/DeepSeek-V3"
provider = "SILICONFLOW"
pri_in = 2
pri_out = 8
+#表达器模型,用于生成表达方式
+[model.focus_expressor]
+name = "Pro/deepseek-ai/DeepSeek-V3"
+provider = "SILICONFLOW"
+pri_in = 2
+pri_out = 8
+temp = 0.3
+
+#自我识别模型,用于自我认知和身份识别
+[model.focus_self_recognize]
+name = "Pro/deepseek-ai/DeepSeek-V3"
+provider = "SILICONFLOW"
+pri_in = 2
+pri_out = 8
+temp = 0.3
+
#嵌入模型
[model.embedding] #嵌入
@@ -263,6 +280,9 @@ pri_in = 0
pri_out = 0
+
+
+
#私聊PFC:需要开启PFC功能,默认三个模型均为硅基流动v3,如果需要支持多人同时私聊或频繁调用,建议把其中的一个或两个换成官方v3或其它模型,以免撞到429
#PFC决策模型
@@ -289,15 +309,6 @@ pri_in = 2
pri_out = 8
-#以下模型暂时没有使用!!
-#以下模型暂时没有使用!!
-#以下模型暂时没有使用!!
-#以下模型暂时没有使用!!
-#以下模型暂时没有使用!!
-[model.tool_use] #工具调用模型,需要使用支持工具调用的模型,建议使用qwen2.5 32b
-name = "Qwen/Qwen2.5-32B-Instruct"
-provider = "SILICONFLOW"
-pri_in = 1.26
-pri_out = 1.26
+
From 548a583cc75c0b81a4da766a2f8da906bd73bf5c Mon Sep 17 00:00:00 2001
From: SengokuCola <1026294844@qq.com>
Date: Tue, 27 May 2025 17:07:51 +0800
Subject: [PATCH 05/17] =?UTF-8?q?fix=EF=BC=9A=E4=BF=AE=E5=A4=8D=E7=A6=81?=
=?UTF-8?q?=E8=A8=80=E6=8F=92=E4=BB=B6?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
.../expressors/default_expressor.py | 6 +-
src/chat/focus_chat/heartFC_sender.py | 4 +-
.../info_processors/self_processor.py | 13 ++--
src/person_info/person_info.py | 12 ++-
src/plugins/test_plugin/actions/__init__.py | 2 +-
.../actions/group_whole_ban_action.py | 73 +++++++++++++++++++
.../test_plugin/actions/mute_action.py | 64 +++++++++++-----
.../test_plugin/actions/online_action.py | 44 -----------
8 files changed, 140 insertions(+), 78 deletions(-)
create mode 100644 src/plugins/test_plugin/actions/group_whole_ban_action.py
delete mode 100644 src/plugins/test_plugin/actions/online_action.py
diff --git a/src/chat/focus_chat/expressors/default_expressor.py b/src/chat/focus_chat/expressors/default_expressor.py
index a9166f67..be3e827f 100644
--- a/src/chat/focus_chat/expressors/default_expressor.py
+++ b/src/chat/focus_chat/expressors/default_expressor.py
@@ -439,7 +439,11 @@ class DefaultExpressor:
if type == "emoji":
typing = False
- sent_msg = await self.heart_fc_sender.send_message(bot_message, has_thinking=True, typing=typing)
+ if anchor_message.raw_message:
+ set_reply = True
+ else:
+ set_reply = False
+ sent_msg = await self.heart_fc_sender.send_message(bot_message, has_thinking=True, typing=typing, set_reply=set_reply)
reply_message_ids.append(part_message_id) # 记录我们生成的ID
diff --git a/src/chat/focus_chat/heartFC_sender.py b/src/chat/focus_chat/heartFC_sender.py
index 81d463b0..4f2c873e 100644
--- a/src/chat/focus_chat/heartFC_sender.py
+++ b/src/chat/focus_chat/heartFC_sender.py
@@ -73,7 +73,7 @@ class HeartFCSender:
thinking_message = self.thinking_messages.get(chat_id, {}).get(message_id)
return thinking_message.thinking_start_time if thinking_message else None
- async def send_message(self, message: MessageSending, has_thinking=False, typing=False):
+ async def send_message(self, message: MessageSending, has_thinking=False, typing=False, set_reply=False):
"""
处理、发送并存储一条消息。
@@ -97,7 +97,7 @@ class HeartFCSender:
message_id = message.message_info.message_id
try:
- if has_thinking:
+ if set_reply:
_ = message.update_thinking_time()
# --- 条件应用 set_reply 逻辑 ---
diff --git a/src/chat/focus_chat/info_processors/self_processor.py b/src/chat/focus_chat/info_processors/self_processor.py
index 6589e2f7..5e67c71f 100644
--- a/src/chat/focus_chat/info_processors/self_processor.py
+++ b/src/chat/focus_chat/info_processors/self_processor.py
@@ -31,12 +31,13 @@ def init_prompt():
现在是{time_now},你正在上网,和qq群里的网友们聊天,以下是正在进行的聊天内容:
{chat_observe_info}
-现在请你根据现有的信息,思考自我认同
-1. 你是一个什么样的人,你和群里的人关系如何
-2. 你的形象是什么
-3. 思考有没有人提到你,或者图片与你有关
-4. 你的自我认同是否有助于你的回答,如果你需要自我相关的信息来帮你参与聊天,请输出,否则请输出十几个字的简短自我认同
-5. 一般情况下不用输出自我认同,只需要输出十几个字的简短自我认同就好,除非有明显需要自我认同的场景
+现在请你根据现有的信息,思考自我认同:请严格遵守以下规则
+1. 请严格参考最上方的人设,适当参考记忆和当前聊天内容
+2. 你是一个什么样的人,你和群里的人关系如何
+3. 你的形象是什么
+4. 思考有没有人提到你,或者图片与你有关
+5. 你的自我认同是否有助于你的回答,如果你需要自我相关的信息来帮你参与聊天,请输出,否则请输出十几个字的简短自我认同
+6. 一般情况下不用输出自我认同,只需要输出十几个字的简短自我认同就好,除非有明显需要自我认同的场景
输出内容平淡一些,说中文,不要浮夸,平淡一些。
请注意不要输出多余内容(包括前后缀,冒号和引号,括号(),表情包,at或 @等 )。只输出自我认同内容,记得明确说明这是你的自我认同。
diff --git a/src/person_info/person_info.py b/src/person_info/person_info.py
index 78878129..f4e14df7 100644
--- a/src/person_info/person_info.py
+++ b/src/person_info/person_info.py
@@ -104,10 +104,14 @@ class PersonInfoManager:
def get_person_id_by_person_name(self, person_name: str):
"""根据用户名获取用户ID"""
- document = db.person_info.find_one({"person_name": person_name})
- if document:
- return document["person_id"]
- else:
+ try:
+ record = PersonInfo.get_or_none(PersonInfo.person_name == person_name)
+ if record:
+ return record.person_id
+ else:
+ return ""
+ except Exception as e:
+ logger.error(f"根据用户名 {person_name} 获取用户ID时出错 (Peewee): {e}")
return ""
@staticmethod
diff --git a/src/plugins/test_plugin/actions/__init__.py b/src/plugins/test_plugin/actions/__init__.py
index dc99db14..7d96ea8a 100644
--- a/src/plugins/test_plugin/actions/__init__.py
+++ b/src/plugins/test_plugin/actions/__init__.py
@@ -3,5 +3,5 @@
# 导入所有动作模块以确保装饰器被执行
from . import test_action # noqa
-from . import online_action # noqa
+# from . import online_action # noqa
from . import mute_action # noqa
diff --git a/src/plugins/test_plugin/actions/group_whole_ban_action.py b/src/plugins/test_plugin/actions/group_whole_ban_action.py
new file mode 100644
index 00000000..bb9f3531
--- /dev/null
+++ b/src/plugins/test_plugin/actions/group_whole_ban_action.py
@@ -0,0 +1,73 @@
+from src.common.logger_manager import get_logger
+from src.chat.focus_chat.planners.actions.plugin_action import PluginAction, register_action
+from typing import Tuple
+
+logger = get_logger("group_whole_ban_action")
+
+
+@register_action
+class GroupWholeBanAction(PluginAction):
+ """群聊全体禁言动作处理类"""
+
+ action_name = "group_whole_ban_action"
+ action_description = (
+ "开启或关闭群聊全体禁言,当群聊过于混乱或需要安静时使用"
+ )
+ action_parameters = {
+ "enable": "是否开启全体禁言,输入True开启,False关闭,必填",
+ }
+ action_require = [
+ "当群聊过于混乱需要安静时使用",
+ "当需要临时暂停群聊讨论时使用",
+ "当有人要求开启全体禁言时使用",
+ "当管理员需要发布重要公告时使用",
+ ]
+ default = False
+ associated_types = ["command", "text"]
+
+ async def process(self) -> Tuple[bool, str]:
+ """处理群聊全体禁言动作"""
+ logger.info(f"{self.log_prefix} 执行全体禁言动作: {self.reasoning}")
+
+ # 获取参数
+ enable = self.action_data.get("enable")
+
+ if enable is None:
+ error_msg = "全体禁言参数不完整,需要enable参数"
+ logger.error(f"{self.log_prefix} {error_msg}")
+ return False, error_msg
+
+ # 确保enable是布尔类型
+ if isinstance(enable, str):
+ if enable.lower() in ['true', '1', 'yes', '开启', '是']:
+ enable = True
+ elif enable.lower() in ['false', '0', 'no', '关闭', '否']:
+ enable = False
+ else:
+ error_msg = f"无效的enable参数: {enable},应该是True或False"
+ logger.error(f"{self.log_prefix} {error_msg}")
+ return False, error_msg
+
+ # 发送表达情绪的消息
+ action_text = "开启" if enable else "关闭"
+ await self.send_message_by_expressor(f"我要{action_text}全体禁言")
+
+ try:
+ # 发送群聊全体禁言命令,按照新格式
+ await self.send_message(
+ type="command",
+ data={
+ "name": "GROUP_WHOLE_BAN",
+ "args": {
+ "enable": enable
+ }
+ }
+ )
+
+ logger.info(f"{self.log_prefix} 成功{action_text}全体禁言")
+ return True, f"成功{action_text}全体禁言"
+
+ except Exception as e:
+ logger.error(f"{self.log_prefix} 执行全体禁言动作时出错: {e}")
+ await self.send_message_by_expressor(f"执行全体禁言动作时出错: {e}")
+ return False, f"执行全体禁言动作时出错: {e}"
\ No newline at end of file
diff --git a/src/plugins/test_plugin/actions/mute_action.py b/src/plugins/test_plugin/actions/mute_action.py
index d0f947c4..9712b762 100644
--- a/src/plugins/test_plugin/actions/mute_action.py
+++ b/src/plugins/test_plugin/actions/mute_action.py
@@ -7,15 +7,16 @@ logger = get_logger("mute_action")
@register_action
class MuteAction(PluginAction):
- """测试动作处理类"""
+ """群聊禁言动作处理类"""
action_name = "mute_action"
action_description = (
- "如果某人违反了公序良俗,或者别人戳你太多,,或者某人刷屏,一定要禁言某人,如果你很生气,可以禁言某人"
+ "如果某人违反了公序良俗,或者别人戳你太多,或者某人刷屏,一定要禁言某人,如果你很生气,可以禁言某人,可以自选禁言时长,视严重程度而定"
)
action_parameters = {
- "target": "禁言对象,输入你要禁言的对象的名字,必填,",
- "duration": "禁言时长,输入你要禁言的时长,单位为秒,必填",
+ "target": "禁言对象,输入你要禁言的对象的名字,必填",
+ "duration": "禁言时长,输入你要禁言的时长,单位为秒,必填,必须为数字",
+ "reason": "禁言理由,可选",
}
action_require = [
"当有人违反了公序良俗时使用",
@@ -25,32 +26,55 @@ class MuteAction(PluginAction):
"当千石可乐或可乐酱要求你禁言时使用",
"当你想回避某个话题时使用",
]
- default = True # 不是默认动作,需要手动添加到使用集
- associated_types = ["command", "text"]
+ default = True # 默认动作,是否手动添加到使用集
+ # associated_types = ["command", "text"]
+ associated_types = ["text"]
async def process(self) -> Tuple[bool, str]:
- """处理测试动作"""
- logger.info(f"{self.log_prefix} 执行online动作: {self.reasoning}")
+ """处理群聊禁言动作"""
+ logger.info(f"{self.log_prefix} 执行禁言动作: {self.reasoning}")
- # 发送测试消息
+ # 获取参数
target = self.action_data.get("target")
duration = self.action_data.get("duration")
- reason = self.action_data.get("reason")
- platform, user_id = await self.get_user_id_by_person_name(target)
+ reason = self.action_data.get("reason", "违反群规")
+
+ if not target or not duration:
+ error_msg = "禁言参数不完整,需要target和duration"
+ logger.error(f"{self.log_prefix} {error_msg}")
+ return False, error_msg
- await self.send_message_by_expressor(f"我要禁言{target},{platform},时长{duration}秒,理由{reason},表达情绪")
+ # 获取用户ID
+ platform, user_id = await self.get_user_id_by_person_name(target)
+
+ if not user_id:
+ error_msg = f"未找到用户 {target} 的ID"
+ logger.error(f"{self.log_prefix} {error_msg}")
+ return False, error_msg
+
+ # 发送表达情绪的消息
+ await self.send_message_by_expressor(f"我要禁言{target},时长{duration}秒,理由:{reason}")
try:
+ # 确保duration是字符串类型
+ duration_str = str(duration)
+
+ # 发送群聊禁言命令,按照新格式
await self.send_message(
type="command",
- data={"name": "GROUP_BAN", "args": {"qq_id": f"{user_id}", "duration": f"{duration}"}},
- # target = target
+ data={
+ "name": "GROUP_BAN",
+ "args": {
+ "qq_id": str(user_id),
+ "duration": duration_str
+ }
+ }
)
+
+ logger.info(f"{self.log_prefix} 成功禁言用户 {target}({user_id}),时长 {duration} 秒")
+ return True, f"成功禁言 {target},时长 {duration} 秒"
except Exception as e:
- logger.error(f"{self.log_prefix} 执行mute动作时出错: {e}")
- await self.send_message_by_expressor(f"执行mute动作时出错: {e}")
-
- return False, "执行mute动作时出错"
-
- return True, "测试动作执行成功"
+ logger.error(f"{self.log_prefix} 执行禁言动作时出错: {e}")
+ await self.send_message_by_expressor(f"执行禁言动作时出错: {e}")
+ return False, f"执行禁言动作时出错: {e}"
diff --git a/src/plugins/test_plugin/actions/online_action.py b/src/plugins/test_plugin/actions/online_action.py
deleted file mode 100644
index c6a2fe6c..00000000
--- a/src/plugins/test_plugin/actions/online_action.py
+++ /dev/null
@@ -1,44 +0,0 @@
-from src.common.logger_manager import get_logger
-from src.chat.focus_chat.planners.actions.plugin_action import PluginAction, register_action
-from typing import Tuple
-
-logger = get_logger("check_online_action")
-
-
-@register_action
-class CheckOnlineAction(PluginAction):
- """测试动作处理类"""
-
- action_name = "check_online_action"
- action_description = "这是一个检查在线状态的动作,当有人要求你检查Maibot(麦麦 机器人)在线状态时使用"
- action_parameters = {"mode": "查看模式"}
- action_require = [
- "当有人要求你检查Maibot(麦麦 机器人)在线状态时使用",
- "mode参数为version时查看在线版本状态,默认用这种",
- "mode参数为type时查看在线系统类型分布",
- ]
- default = False # 不是默认动作,需要手动添加到使用集
- associated_types = ["text"]
-
- async def process(self) -> Tuple[bool, str]:
- """处理测试动作"""
- logger.info(f"{self.log_prefix} 执行online动作: {self.reasoning}")
-
- # 发送测试消息
- mode = self.action_data.get("mode", "type")
-
- await self.send_message_by_expressor("我看看")
-
- try:
- if mode == "type":
- await self.send_message("text", "#online detail")
- elif mode == "version":
- await self.send_message("text", "#online")
-
- except Exception as e:
- logger.error(f"{self.log_prefix} 执行online动作时出错: {e}")
- await self.send_message_by_expressor("执行online动作时出错: {e}")
-
- return False, "执行online动作时出错"
-
- return True, "测试动作执行成功"
From 52f7cc3762a4496a76b12176175af489c5d6a659 Mon Sep 17 00:00:00 2001
From: SengokuCola <1026294844@qq.com>
Date: Tue, 27 May 2025 18:21:05 +0800
Subject: [PATCH 06/17] =?UTF-8?q?fix=EF=BC=9A=E4=BC=98=E5=8C=96=E8=AE=B0?=
=?UTF-8?q?=E5=BF=86=E6=8F=90=E5=8F=96=EF=BC=8C=E4=BF=AE=E5=A4=8D=E7=A0=B4?=
=?UTF-8?q?=E6=8D=9F=E7=9A=84tool=E4=BF=A1=E6=81=AF?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
.../expressors/exprssion_learner.py | 4 +-
src/chat/focus_chat/heartFC_chat.py | 6 +-
src/chat/focus_chat/info/structured_info.py | 4 +
.../info_processors/tool_processor.py | 13 ++-
src/chat/focus_chat/memory_activator.py | 99 ++++++++++++++-----
src/chat/focus_chat/planners/planner.py | 23 ++---
src/chat/memory_system/Hippocampus.py | 4 +-
src/individuality/expression_style.py | 4 +-
template/bot_config_template.toml | 3 +-
9 files changed, 110 insertions(+), 50 deletions(-)
diff --git a/src/chat/focus_chat/expressors/exprssion_learner.py b/src/chat/focus_chat/expressors/exprssion_learner.py
index 2ad0e68c..fbe44eb3 100644
--- a/src/chat/focus_chat/expressors/exprssion_learner.py
+++ b/src/chat/focus_chat/expressors/exprssion_learner.py
@@ -61,10 +61,10 @@ class ExpressionLearner:
def __init__(self) -> None:
# TODO: API-Adapter修改标记
self.express_learn_model: LLMRequest = LLMRequest(
- model=global_config.model.normal,
+ model=global_config.model.focus_expressor,
temperature=0.1,
max_tokens=256,
- request_type="response_heartflow",
+ request_type="learn_expression",
)
async def get_expression_by_chat_id(self, chat_id: str) -> Tuple[List[Dict[str, str]], List[Dict[str, str]]]:
diff --git a/src/chat/focus_chat/heartFC_chat.py b/src/chat/focus_chat/heartFC_chat.py
index 7463791b..d989fb5e 100644
--- a/src/chat/focus_chat/heartFC_chat.py
+++ b/src/chat/focus_chat/heartFC_chat.py
@@ -19,6 +19,7 @@ from src.chat.focus_chat.info_processors.working_memory_processor import Working
from src.chat.focus_chat.info_processors.action_processor import ActionProcessor
from src.chat.heart_flow.observation.hfcloop_observation import HFCloopObservation
from src.chat.heart_flow.observation.working_observation import WorkingMemoryObservation
+from src.chat.heart_flow.observation.structure_observation import StructureObservation
from src.chat.focus_chat.info_processors.tool_processor import ToolProcessor
from src.chat.focus_chat.expressors.default_expressor import DefaultExpressor
from src.chat.focus_chat.memory_activator import MemoryActivator
@@ -97,6 +98,7 @@ class HeartFChatting:
self.log_prefix: str = str(chat_id) # Initial default, will be updated
self.hfcloop_observation = HFCloopObservation(observe_id=self.stream_id)
self.chatting_observation = observations[0]
+ self.structure_observation = StructureObservation(observe_id=self.stream_id)
self.memory_activator = MemoryActivator()
self.working_memory = WorkingMemory(chat_id=self.stream_id)
@@ -415,11 +417,13 @@ class HeartFChatting:
await self.chatting_observation.observe()
await self.working_observation.observe()
await self.hfcloop_observation.observe()
+ await self.structure_observation.observe()
observations: List[Observation] = []
observations.append(self.chatting_observation)
observations.append(self.working_observation)
observations.append(self.hfcloop_observation)
-
+ observations.append(self.structure_observation)
+
loop_observation_info = {
"observations": observations,
}
diff --git a/src/chat/focus_chat/info/structured_info.py b/src/chat/focus_chat/info/structured_info.py
index 3a55c81f..616e942d 100644
--- a/src/chat/focus_chat/info/structured_info.py
+++ b/src/chat/focus_chat/info/structured_info.py
@@ -76,7 +76,11 @@ class StructuredInfo:
"""
info_str = ""
+ # print(f"self.data: {self.data}")
+
for key, value in self.data.items():
+
+ # print(f"key: {key}, value: {value}")
info_str += f"信息类型:{key},信息内容:{value}\n"
return info_str
diff --git a/src/chat/focus_chat/info_processors/tool_processor.py b/src/chat/focus_chat/info_processors/tool_processor.py
index b1568cf3..6980c908 100644
--- a/src/chat/focus_chat/info_processors/tool_processor.py
+++ b/src/chat/focus_chat/info_processors/tool_processor.py
@@ -75,10 +75,12 @@ class ToolProcessor(BaseProcessor):
result, used_tools, prompt = await self.execute_tools(observation, running_memorys)
# 更新WorkingObservation中的结构化信息
+ logger.debug(f"工具调用结果: {result}")
+
for observation in observations:
if isinstance(observation, StructureObservation):
for structured_info in result:
- logger.debug(f"{self.log_prefix} 更新WorkingObservation中的结构化信息: {structured_info}")
+ # logger.debug(f"{self.log_prefix} 更新WorkingObservation中的结构化信息: {structured_info}")
observation.add_structured_info(structured_info)
working_infos = observation.get_observe_info()
@@ -87,7 +89,12 @@ class ToolProcessor(BaseProcessor):
structured_info = StructuredInfo()
if working_infos:
for working_info in working_infos:
- structured_info.set_info(working_info.get("type"), working_info.get("content"))
+ # print(f"working_info: {working_info}")
+ # print(f"working_info.get('type'): {working_info.get('type')}")
+ # print(f"working_info.get('content'): {working_info.get('content')}")
+ structured_info.set_info(key=working_info.get('type'), value=working_info.get('content'))
+ # info = structured_info.get_processed_info()
+ # print(f"info: {info}")
return [structured_info]
@@ -155,7 +162,7 @@ class ToolProcessor(BaseProcessor):
)
# 调用LLM,专注于工具使用
- # logger.debug(f"开始执行工具调用{prompt}")
+ logger.debug(f"开始执行工具调用{prompt}")
response, _, tool_calls = await self.llm_model.generate_response_tool_async(prompt=prompt, tools=tools)
logger.debug(f"获取到工具原始输出:\n{tool_calls}")
diff --git a/src/chat/focus_chat/memory_activator.py b/src/chat/focus_chat/memory_activator.py
index 0f75d09e..e097bdbb 100644
--- a/src/chat/focus_chat/memory_activator.py
+++ b/src/chat/focus_chat/memory_activator.py
@@ -4,24 +4,58 @@ from src.chat.heart_flow.observation.hfcloop_observation import HFCloopObservati
from src.llm_models.utils_model import LLMRequest
from src.config.config import global_config
from src.common.logger_manager import get_logger
-from src.chat.utils.prompt_builder import Prompt
+from src.chat.utils.prompt_builder import Prompt, global_prompt_manager
from datetime import datetime
from src.chat.memory_system.Hippocampus import HippocampusManager
from typing import List, Dict
import difflib
+import json
+from json_repair import repair_json
logger = get_logger("memory_activator")
+def get_keywords_from_json(json_str):
+ """
+ 从JSON字符串中提取关键词列表
+
+ Args:
+ json_str: JSON格式的字符串
+
+ Returns:
+ List[str]: 关键词列表
+ """
+ try:
+ # 使用repair_json修复JSON格式
+ fixed_json = repair_json(json_str)
+
+ # 如果repair_json返回的是字符串,需要解析为Python对象
+ if isinstance(fixed_json, str):
+ result = json.loads(fixed_json)
+ else:
+ # 如果repair_json直接返回了字典对象,直接使用
+ result = fixed_json
+
+ # 提取关键词
+ keywords = result.get("keywords", [])
+ return keywords
+ except Exception as e:
+ logger.error(f"解析关键词JSON失败: {e}")
+ return []
+
+
def init_prompt():
# --- Group Chat Prompt ---
memory_activator_prompt = """
- 你是一个记忆分析器,你需要根据以下信息来进行会议
+ 你是一个记忆分析器,你需要根据以下信息来进行回忆
以下是一场聊天中的信息,请根据这些信息,总结出几个关键词作为记忆回忆的触发词
{obs_info_text}
+ 历史关键词(请避免重复提取这些关键词):
+ {cached_keywords}
+
请输出一个json格式,包含以下字段:
{{
"keywords": ["关键词1", "关键词2", "关键词3",......]
@@ -39,6 +73,7 @@ class MemoryActivator:
model=global_config.model.memory_summary, temperature=0.7, max_tokens=50, request_type="chat_observation"
)
self.running_memory = []
+ self.cached_keywords = set() # 用于缓存历史关键词
async def activate_memory(self, observations) -> List[Dict]:
"""
@@ -61,31 +96,47 @@ class MemoryActivator:
elif isinstance(observation, HFCloopObservation):
obs_info_text += observation.get_observe_info()
- logger.debug(f"回忆待检索内容:obs_info_text: {obs_info_text}")
+ # logger.debug(f"回忆待检索内容:obs_info_text: {obs_info_text}")
- # prompt = await global_prompt_manager.format_prompt(
- # "memory_activator_prompt",
- # obs_info_text=obs_info_text,
- # )
-
- # logger.debug(f"prompt: {prompt}")
-
- # response = await self.summary_model.generate_response(prompt)
-
- # logger.debug(f"response: {response}")
-
- # # 只取response的第一个元素(字符串)
- # response_str = response[0]
- # keywords = list(get_keywords_from_json(response_str))
-
- # #调用记忆系统获取相关记忆
- # related_memory = await HippocampusManager.get_instance().get_memory_from_topic(
- # valid_keywords=keywords, max_memory_num=3, max_memory_length=2, max_depth=3
- # )
- related_memory = await HippocampusManager.get_instance().get_memory_from_text(
- text=obs_info_text, max_memory_num=5, max_memory_length=2, max_depth=3, fast_retrieval=True
+ # 将缓存的关键词转换为字符串,用于prompt
+ cached_keywords_str = ", ".join(self.cached_keywords) if self.cached_keywords else "暂无历史关键词"
+
+ prompt = await global_prompt_manager.format_prompt(
+ "memory_activator_prompt",
+ obs_info_text=obs_info_text,
+ cached_keywords=cached_keywords_str,
)
+ logger.debug(f"prompt: {prompt}")
+
+ response = await self.summary_model.generate_response(prompt)
+
+ logger.debug(f"response: {response}")
+
+ # 只取response的第一个元素(字符串)
+ response_str = response[0]
+ keywords = list(get_keywords_from_json(response_str))
+
+ # 更新关键词缓存
+ if keywords:
+ # 限制缓存大小,最多保留10个关键词
+ if len(self.cached_keywords) > 10:
+ # 转换为列表,移除最早的关键词
+ cached_list = list(self.cached_keywords)
+ self.cached_keywords = set(cached_list[-8:])
+
+ # 添加新的关键词到缓存
+ self.cached_keywords.update(keywords)
+ logger.debug(f"更新关键词缓存: {self.cached_keywords}")
+
+ #调用记忆系统获取相关记忆
+ related_memory = await HippocampusManager.get_instance().get_memory_from_topic(
+ valid_keywords=keywords, max_memory_num=3, max_memory_length=2, max_depth=3
+ )
+ # related_memory = await HippocampusManager.get_instance().get_memory_from_text(
+ # text=obs_info_text, max_memory_num=5, max_memory_length=2, max_depth=3, fast_retrieval=False
+ # )
+
# logger.debug(f"获取到的记忆: {related_memory}")
# 激活时,所有已有记忆的duration+1,达到3则移除
diff --git a/src/chat/focus_chat/planners/planner.py b/src/chat/focus_chat/planners/planner.py
index 576e8979..cf025ee6 100644
--- a/src/chat/focus_chat/planners/planner.py
+++ b/src/chat/focus_chat/planners/planner.py
@@ -36,9 +36,8 @@ def init_prompt():
{mind_info_block}
{cycle_info_block}
-{action_available_block}
-
请综合分析聊天内容和你看到的新消息,参考聊天规划,选择合适的action:
+注意,除了下面动作选项之外,你在群聊里不能做其他任何事情,这是你能力的边界,现在请你选择合适的action:
{action_options_text}
@@ -126,13 +125,6 @@ class ActionPlanner:
action = "no_reply"
reasoning = f"之前选择的动作{action}已被移除,原因: {reason}"
- using_actions = self.action_manager.get_using_actions()
- action_available_block = ""
- for action_name, action_info in using_actions.items():
- action_description = action_info["description"]
- action_available_block += f"\n你在聊天中可以使用{action_name},这个动作的描述是{action_description}\n"
- action_available_block += "注意,除了上述动作选项之外,你在群聊里不能做其他任何事情,这是你能力的边界\n"
-
# 继续处理其他信息
for info in all_plan_info:
if isinstance(info, ObsInfo):
@@ -147,7 +139,8 @@ class ActionPlanner:
elif isinstance(info, SelfInfo):
self_info = info.get_processed_info()
elif isinstance(info, StructuredInfo):
- _structured_info = info.get_data()
+ structured_info = info.get_processed_info()
+ # print(f"structured_info: {structured_info}")
elif not isinstance(info, ActionInfo): # 跳过已处理的ActionInfo
extra_info.append(info.get_processed_info())
@@ -178,11 +171,10 @@ class ActionPlanner:
chat_target_info=None,
observed_messages_str=observed_messages_str, # <-- Pass local variable
current_mind=current_mind, # <-- Pass argument
- # structured_info=structured_info, # <-- Pass SubMind info
+ structured_info=structured_info, # <-- Pass SubMind info
current_available_actions=current_available_actions, # <-- Pass determined actions
cycle_info=cycle_info, # <-- Pass cycle info
extra_info=extra_info,
- action_available_block=action_available_block,
)
# --- 调用 LLM (普通文本生成) ---
@@ -268,7 +260,7 @@ class ActionPlanner:
chat_target_info: Optional[dict], # Now passed as argument
observed_messages_str: str,
current_mind: Optional[str],
- action_available_block: str,
+ structured_info: Optional[str],
current_available_actions: Dict[str, ActionInfo],
cycle_info: Optional[str],
extra_info: list[str],
@@ -326,7 +318,8 @@ class ActionPlanner:
action_options_block += using_action_prompt
extra_info_block = "\n".join(extra_info)
- if extra_info:
+ extra_info_block += f"\n{structured_info}"
+ if extra_info or structured_info:
extra_info_block = f"以下是一些额外的信息,现在请你阅读以下内容,进行决策\n{extra_info_block}\n以上是一些额外的信息,现在请你阅读以下内容,进行决策"
else:
extra_info_block = ""
@@ -343,7 +336,7 @@ class ActionPlanner:
mind_info_block=mind_info_block,
cycle_info_block=cycle_info,
action_options_text=action_options_block,
- action_available_block=action_available_block,
+ # action_available_block=action_available_block,
extra_info_block=extra_info_block,
moderation_prompt=moderation_prompt_block,
)
diff --git a/src/chat/memory_system/Hippocampus.py b/src/chat/memory_system/Hippocampus.py
index 9424706e..4ed26e5e 100644
--- a/src/chat/memory_system/Hippocampus.py
+++ b/src/chat/memory_system/Hippocampus.py
@@ -526,12 +526,12 @@ class Hippocampus:
if not keywords:
return []
- # logger.info(f"提取的关键词: {', '.join(keywords)}")
+ logger.info(f"提取的关键词: {', '.join(keywords)}")
# 过滤掉不存在于记忆图中的关键词
valid_keywords = [keyword for keyword in keywords if keyword in self.memory_graph.G]
if not valid_keywords:
- # logger.info("没有找到有效的关键词节点")
+ logger.info("没有找到有效的关键词节点")
return []
logger.debug(f"有效的关键词: {', '.join(valid_keywords)}")
diff --git a/src/individuality/expression_style.py b/src/individuality/expression_style.py
index a64ba9cf..841d44e5 100644
--- a/src/individuality/expression_style.py
+++ b/src/individuality/expression_style.py
@@ -33,10 +33,10 @@ def init_prompt() -> None:
class PersonalityExpression:
def __init__(self):
self.express_learn_model: LLMRequest = LLMRequest(
- model=global_config.model.normal,
+ model=global_config.model.focus_expressor,
temperature=0.1,
max_tokens=256,
- request_type="response_heartflow",
+ request_type="learn_expression",
)
self.meta_file_path = os.path.join("data", "expression", "personality", "expression_style_meta.json")
self.expressions_file_path = os.path.join("data", "expression", "personality", "expressions.json")
diff --git a/template/bot_config_template.toml b/template/bot_config_template.toml
index 0156cf36..094399ea 100644
--- a/template/bot_config_template.toml
+++ b/template/bot_config_template.toml
@@ -255,7 +255,8 @@ provider = "SILICONFLOW"
pri_in = 2
pri_out = 8
-#表达器模型,用于生成表达方式
+#表达器模型,用于表达麦麦的想法,生成最终回复,对语言风格影响极大
+#也用于表达方式学习
[model.focus_expressor]
name = "Pro/deepseek-ai/DeepSeek-V3"
provider = "SILICONFLOW"
From cad9b40bb3eeac68baf865b4326f8bca7405b13c Mon Sep 17 00:00:00 2001
From: SengokuCola <1026294844@qq.com>
Date: Tue, 27 May 2025 18:35:33 +0800
Subject: [PATCH 07/17] =?UTF-8?q?better=EF=BC=9A=E8=BF=9B=E4=B8=80?=
=?UTF-8?q?=E6=AD=A5=E6=8B=86=E5=88=86=E6=A8=A1=E5=9E=8B=E9=85=8D=E7=BD=AE?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
src/chat/emoji_system/emoji_manager.py | 2 +-
.../expressors/default_expressor.py | 6 +-
.../info_processors/chattinginfo_processor.py | 2 +-
src/chat/normal_chat/normal_chat_generator.py | 8 +--
src/config/official_configs.py | 20 ++++---
src/experimental/PFC/pfc.py | 2 +-
src/experimental/PFC/pfc_KnowledgeFetcher.py | 4 +-
src/llm_models/utils_model.py | 4 +-
src/person_info/person_info.py | 2 +-
template/bot_config_template.toml | 60 +++++++++++--------
10 files changed, 61 insertions(+), 49 deletions(-)
diff --git a/src/chat/emoji_system/emoji_manager.py b/src/chat/emoji_system/emoji_manager.py
index 376e6b8f..964721e4 100644
--- a/src/chat/emoji_system/emoji_manager.py
+++ b/src/chat/emoji_system/emoji_manager.py
@@ -374,7 +374,7 @@ class EmojiManager:
self.vlm = LLMRequest(model=global_config.model.vlm, temperature=0.3, max_tokens=1000, request_type="emoji")
self.llm_emotion_judge = LLMRequest(
- model=global_config.model.normal, max_tokens=600, request_type="emoji"
+ model=global_config.model.utils, max_tokens=600, request_type="emoji"
) # 更高的温度,更少的token(后续可以根据情绪来调整温度)
self.emoji_num = 0
diff --git a/src/chat/focus_chat/expressors/default_expressor.py b/src/chat/focus_chat/expressors/default_expressor.py
index be3e827f..4e46acaa 100644
--- a/src/chat/focus_chat/expressors/default_expressor.py
+++ b/src/chat/focus_chat/expressors/default_expressor.py
@@ -192,9 +192,9 @@ class DefaultExpressor:
"""
try:
# 1. 获取情绪影响因子并调整模型温度
- arousal_multiplier = mood_manager.get_arousal_multiplier()
- current_temp = float(global_config.model.normal["temp"]) * arousal_multiplier
- self.express_model.params["temperature"] = current_temp # 动态调整温度
+ # arousal_multiplier = mood_manager.get_arousal_multiplier()
+ # current_temp = float(global_config.model.normal["temp"]) * arousal_multiplier
+ # self.express_model.params["temperature"] = current_temp # 动态调整温度
# 2. 获取信息捕捉器
info_catcher = info_catcher_manager.get_info_catcher(thinking_id)
diff --git a/src/chat/focus_chat/info_processors/chattinginfo_processor.py b/src/chat/focus_chat/info_processors/chattinginfo_processor.py
index 1fcab5e4..d0b6df5f 100644
--- a/src/chat/focus_chat/info_processors/chattinginfo_processor.py
+++ b/src/chat/focus_chat/info_processors/chattinginfo_processor.py
@@ -28,7 +28,7 @@ class ChattingInfoProcessor(BaseProcessor):
super().__init__()
# TODO: API-Adapter修改标记
self.model_summary = LLMRequest(
- model=global_config.model.observation, temperature=0.7, max_tokens=300, request_type="chat_observation"
+ model=global_config.model.utils_small, temperature=0.7, max_tokens=300, request_type="chat_observation"
)
async def process_info(
diff --git a/src/chat/normal_chat/normal_chat_generator.py b/src/chat/normal_chat/normal_chat_generator.py
index 7fa6b032..0a34ffc8 100644
--- a/src/chat/normal_chat/normal_chat_generator.py
+++ b/src/chat/normal_chat/normal_chat_generator.py
@@ -20,13 +20,13 @@ class NormalChatGenerator:
model=global_config.model.normal_chat_1,
temperature=0.7,
max_tokens=3000,
- request_type="response_reasoning",
+ request_type="normal_chat_1",
)
self.model_normal = LLMRequest(
- model=global_config.model.normal,
- temperature=global_config.model.normal["temp"],
+ model=global_config.model.normal_chat_2,
+ temperature=global_config.model.normal_chat_2["temp"],
max_tokens=256,
- request_type="response_reasoning",
+ request_type="normal_chat_2",
)
self.model_sum = LLMRequest(
diff --git a/src/config/official_configs.py b/src/config/official_configs.py
index a35026e5..731c847f 100644
--- a/src/config/official_configs.py
+++ b/src/config/official_configs.py
@@ -377,12 +377,19 @@ class ModelConfig(ConfigBase):
"""模型配置类"""
model_max_output_length: int = 800 # 最大回复长度
+
+
+ utils: dict[str, Any] = field(default_factory=lambda: {})
+ """组件模型配置"""
+
+ utils_small: dict[str, Any] = field(default_factory=lambda: {})
+ """组件小模型配置"""
- reasoning: dict[str, Any] = field(default_factory=lambda: {})
- """推理模型配置"""
-
- normal: dict[str, Any] = field(default_factory=lambda: {})
- """普通模型配置"""
+ normal_chat_1: dict[str, Any] = field(default_factory=lambda: {})
+ """normal_chat首要回复模型模型配置"""
+
+ normal_chat_2: dict[str, Any] = field(default_factory=lambda: {})
+ """normal_chat次要回复模型配置"""
memory_summary: dict[str, Any] = field(default_factory=lambda: {})
"""记忆的概括模型配置"""
@@ -390,9 +397,6 @@ class ModelConfig(ConfigBase):
vlm: dict[str, Any] = field(default_factory=lambda: {})
"""视觉语言模型配置"""
- observation: dict[str, Any] = field(default_factory=lambda: {})
- """观察模型配置"""
-
focus_working_memory: dict[str, Any] = field(default_factory=lambda: {})
"""专注工作记忆模型配置"""
diff --git a/src/experimental/PFC/pfc.py b/src/experimental/PFC/pfc.py
index d487a1aa..78397780 100644
--- a/src/experimental/PFC/pfc.py
+++ b/src/experimental/PFC/pfc.py
@@ -44,7 +44,7 @@ class GoalAnalyzer:
def __init__(self, stream_id: str, private_name: str):
# TODO: API-Adapter修改标记
self.llm = LLMRequest(
- model=global_config.model.normal, temperature=0.7, max_tokens=1000, request_type="conversation_goal"
+ model=global_config.model.utils, temperature=0.7, max_tokens=1000, request_type="conversation_goal"
)
self.personality_info = individuality.get_prompt(x_person=2, level=3)
diff --git a/src/experimental/PFC/pfc_KnowledgeFetcher.py b/src/experimental/PFC/pfc_KnowledgeFetcher.py
index 769d54da..b94cd5b1 100644
--- a/src/experimental/PFC/pfc_KnowledgeFetcher.py
+++ b/src/experimental/PFC/pfc_KnowledgeFetcher.py
@@ -16,8 +16,8 @@ class KnowledgeFetcher:
def __init__(self, private_name: str):
# TODO: API-Adapter修改标记
self.llm = LLMRequest(
- model=global_config.model.normal,
- temperature=global_config.model.normal["temp"],
+ model=global_config.model.utils,
+ temperature=global_config.model.utils["temp"],
max_tokens=1000,
request_type="knowledge_fetch",
)
diff --git a/src/llm_models/utils_model.py b/src/llm_models/utils_model.py
index 2a45c5c9..f2476192 100644
--- a/src/llm_models/utils_model.py
+++ b/src/llm_models/utils_model.py
@@ -497,8 +497,8 @@ class LLMRequest:
logger.warning(f"检测到403错误,模型从 {old_model_name} 降级为 {self.model_name}")
# 对全局配置进行更新
- if global_config.model.normal.get("name") == old_model_name:
- global_config.model.normal["name"] = self.model_name
+ if global_config.model.normal_chat_2.get("name") == old_model_name:
+ global_config.model.normal_chat_2["name"] = self.model_name
logger.warning(f"将全局配置中的 llm_normal 模型临时降级至{self.model_name}")
if global_config.model.normal_chat_1.get("name") == old_model_name:
global_config.model.normal_chat_1["name"] = self.model_name
diff --git a/src/person_info/person_info.py b/src/person_info/person_info.py
index f4e14df7..9a85d171 100644
--- a/src/person_info/person_info.py
+++ b/src/person_info/person_info.py
@@ -58,7 +58,7 @@ class PersonInfoManager:
self.person_name_list = {}
# TODO: API-Adapter修改标记
self.qv_name_llm = LLMRequest(
- model=global_config.model.normal,
+ model=global_config.model.utils,
max_tokens=256,
request_type="qv_name",
)
diff --git a/template/bot_config_template.toml b/template/bot_config_template.toml
index 094399ea..5721f8f9 100644
--- a/template/bot_config_template.toml
+++ b/template/bot_config_template.toml
@@ -133,7 +133,7 @@ mood_update_interval = 1.0 # 情绪更新间隔 单位秒
mood_decay_rate = 0.95 # 情绪衰减率
mood_intensity_factor = 1.0 # 情绪强度因子
-[keyword_reaction] # 针对某个关键词作出反应
+[keyword_reaction] # 针对某个关键词作出反应,仅在 普通聊天 有效
enable = true # 关键词反应功能的总开关
[[keyword_reaction.rules]] # 如果想要新增多个关键词,直接复制本条,修改keywords和reaction即可
@@ -181,10 +181,9 @@ enable = true
[experimental] #实验性功能
enable_friend_chat = false # 是否启用好友聊天
-pfc_chatting = false # 是否启用PFC聊天,该功能仅作用于私聊,与回复模式独立
+pfc_chatting = false # 是否启用PFC聊天,该功能仅作用于私聊,与回复模式独立,在0.7.0暂时无效
#下面的模型若使用硅基流动则不需要更改,使用ds官方则改成.env自定义的宏,使用自定义模型则选择定位相似的模型自己填写
-#推理模型
# 额外字段
# 下面的模型有以下额外字段可以添加:
@@ -195,14 +194,9 @@ pfc_chatting = false # 是否启用PFC聊天,该功能仅作用于私聊,与
[model]
model_max_output_length = 800 # 模型单次返回的最大token数
-#这个模型必须是推理模型
-[model.normal_chat_1] # 一般聊天模式的首要回复模型,推荐使用 推理模型
-name = "Pro/deepseek-ai/DeepSeek-R1"
-provider = "SILICONFLOW"
-pri_in = 1.0 #模型的输入价格(非必填,可以记录消耗)
-pri_out = 4.0 #模型的输出价格(非必填,可以记录消耗)
+#------------必填:组件模型------------
-[model.normal] #V3 回复模型 专注和一般聊天模式共用的回复模型
+[model.utils] # 在麦麦的一些组件中使用的模型,例如表情包模块,取名模块,消耗量不大
name = "Pro/deepseek-ai/DeepSeek-V3"
provider = "SILICONFLOW"
pri_in = 2 #模型的输入价格(非必填,可以记录消耗)
@@ -210,6 +204,13 @@ pri_out = 8 #模型的输出价格(非必填,可以记录消耗)
#默认temp 0.2 如果你使用的是老V3或者其他模型,请自己修改temp参数
temp = 0.2 #模型的温度,新V3建议0.1-0.3
+[model.utils_small] # 在麦麦的一些组件中使用的小模型,消耗量较大
+# 强烈建议使用免费的小模型
+name = "Qwen/Qwen2.5-7B-Instruct"
+provider = "SILICONFLOW"
+pri_in = 0
+pri_out = 0
+
[model.memory_summary] # 记忆的概括模型,建议使用qwen2.5 32b 及以上
name = "Qwen/Qwen2.5-32B-Instruct"
provider = "SILICONFLOW"
@@ -222,15 +223,32 @@ provider = "SILICONFLOW"
pri_in = 0.35
pri_out = 0.35
-[model.observation] #观察模型,压缩聊天内容,建议用免费的
-# name = "Pro/Qwen/Qwen2.5-7B-Instruct"
-name = "Qwen/Qwen2.5-7B-Instruct"
+#嵌入模型
+[model.embedding]
+name = "BAAI/bge-m3"
provider = "SILICONFLOW"
pri_in = 0
pri_out = 0
-[model.focus_working_memory] #工作记忆模型,建议使用qwen2.5 32b
-# name = "Pro/Qwen/Qwen2.5-7B-Instruct"
+#------------普通聊天必填模型------------
+
+[model.normal_chat_1] # 一般聊天模式的首要回复模型,推荐使用 推理模型
+name = "Pro/deepseek-ai/DeepSeek-R1"
+provider = "SILICONFLOW"
+pri_in = 1.0 #模型的输入价格(非必填,可以记录消耗)
+pri_out = 4.0 #模型的输出价格(非必填,可以记录消耗)
+
+[model.normal_chat_2] # 一般聊天模式的次要回复模型,推荐使用 非推理模型
+name = "Pro/deepseek-ai/DeepSeek-V3"
+provider = "SILICONFLOW"
+pri_in = 2 #模型的输入价格(非必填,可以记录消耗)
+pri_out = 8 #模型的输出价格(非必填,可以记录消耗)
+#默认temp 0.2 如果你使用的是老V3或者其他模型,请自己修改temp参数
+temp = 0.2 #模型的温度,新V3建议0.1-0.3
+
+#------------专注聊天必填模型------------
+
+[model.focus_working_memory] #工作记忆模型
name = "Qwen/Qwen2.5-32B-Instruct"
provider = "SILICONFLOW"
pri_in = 1.26
@@ -243,7 +261,7 @@ pri_in = 2
pri_out = 8
temp = 0.3 #模型的温度,新V3建议0.1-0.3
-[model.focus_tool_use] #工具调用模型,需要使用支持工具调用的模型,建议使用qwen2.5 32b
+[model.focus_tool_use] #工具调用模型,需要使用支持工具调用的模型
name = "Qwen/Qwen2.5-32B-Instruct"
provider = "SILICONFLOW"
pri_in = 1.26
@@ -272,16 +290,6 @@ pri_in = 2
pri_out = 8
temp = 0.3
-#嵌入模型
-
-[model.embedding] #嵌入
-name = "BAAI/bge-m3"
-provider = "SILICONFLOW"
-pri_in = 0
-pri_out = 0
-
-
-
#私聊PFC:需要开启PFC功能,默认三个模型均为硅基流动v3,如果需要支持多人同时私聊或频繁调用,建议把其中的一个或两个换成官方v3或其它模型,以免撞到429
From 7e59382603b4e077d61593a905ed01d9592e9754 Mon Sep 17 00:00:00 2001
From: SengokuCola <1026294844@qq.com>
Date: Tue, 27 May 2025 20:50:06 +0800
Subject: [PATCH 08/17] =?UTF-8?q?feat=EF=BC=9A=E6=94=AF=E6=8C=81qwen3?=
=?UTF-8?q?=E6=A8=A1=E5=9E=8B=E7=9A=84enable=5Fthinking=E5=8F=82=E6=95=B0?=
=?UTF-8?q?=E5=92=8Cthinking=5Fbudget=E5=8F=82=E6=95=B0?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
.../info_processors/self_processor.py | 2 +-
src/chat/focus_chat/planners/planner.py | 3 +-
.../observation/structure_observation.py | 4 +-
src/llm_models/utils_model.py | 20 +++++++-
.../test_plugin/actions/mute_action.py | 2 +-
template/bot_config_template.toml | 51 ++++++++++++-------
6 files changed, 56 insertions(+), 26 deletions(-)
diff --git a/src/chat/focus_chat/info_processors/self_processor.py b/src/chat/focus_chat/info_processors/self_processor.py
index 5e67c71f..35bb6ec2 100644
--- a/src/chat/focus_chat/info_processors/self_processor.py
+++ b/src/chat/focus_chat/info_processors/self_processor.py
@@ -32,7 +32,7 @@ def init_prompt():
{chat_observe_info}
现在请你根据现有的信息,思考自我认同:请严格遵守以下规则
-1. 请严格参考最上方的人设,适当参考记忆和当前聊天内容
+1. 请严格参考最上方的人设,适当参考记忆和当前聊天内容,不要被记忆和当前聊天内容中相反的内容误导
2. 你是一个什么样的人,你和群里的人关系如何
3. 你的形象是什么
4. 思考有没有人提到你,或者图片与你有关
diff --git a/src/chat/focus_chat/planners/planner.py b/src/chat/focus_chat/planners/planner.py
index cf025ee6..05443006 100644
--- a/src/chat/focus_chat/planners/planner.py
+++ b/src/chat/focus_chat/planners/planner.py
@@ -180,8 +180,9 @@ class ActionPlanner:
# --- 调用 LLM (普通文本生成) ---
llm_content = None
try:
- llm_content, _, _ = await self.planner_llm.generate_response(prompt=prompt)
+ llm_content, reasoning_content, _ = await self.planner_llm.generate_response(prompt=prompt)
logger.debug(f"{self.log_prefix}[Planner] LLM 原始 JSON 响应 (预期): {llm_content}")
+ logger.debug(f"{self.log_prefix}[Planner] LLM 原始理由 响应 (预期): {reasoning_content}")
except Exception as req_e:
logger.error(f"{self.log_prefix}[Planner] LLM 请求执行失败: {req_e}")
reasoning = f"LLM 请求失败,你的模型出现问题: {req_e}"
diff --git a/src/chat/heart_flow/observation/structure_observation.py b/src/chat/heart_flow/observation/structure_observation.py
index 2732ef0b..73b5bf75 100644
--- a/src/chat/heart_flow/observation/structure_observation.py
+++ b/src/chat/heart_flow/observation/structure_observation.py
@@ -26,7 +26,7 @@ class StructureObservation:
for structured_info in self.structured_info:
if structured_info.get("ttl") > 0:
structured_info["ttl"] -= 1
- observed_structured_infos.append(structured_info)
- logger.debug(f"观察到结构化信息仍旧在: {structured_info}")
+ observed_structured_infos.append(structured_info)
+ logger.debug(f"观察到结构化信息仍旧在: {structured_info}")
self.structured_info = observed_structured_infos
diff --git a/src/llm_models/utils_model.py b/src/llm_models/utils_model.py
index f2476192..319c020f 100644
--- a/src/llm_models/utils_model.py
+++ b/src/llm_models/utils_model.py
@@ -117,6 +117,9 @@ class LLMRequest:
self.model_name: str = model["name"]
self.params = kwargs
+ self.enable_thinking = model.get("enable_thinking", False)
+ self.temp = model.get("temp", 0.7)
+ self.thinking_budget = model.get("thinking_budget", 4096)
self.stream = model.get("stream", False)
self.pri_in = model.get("pri_in", 0)
self.pri_out = model.get("pri_out", 0)
@@ -601,8 +604,9 @@ class LLMRequest:
new_params = dict(params)
if self.model_name.lower() in self.MODELS_NEEDING_TRANSFORMATION:
- # 删除 'temperature' 参数(如果存在)
- new_params.pop("temperature", None)
+ # 删除 'temperature' 参数(如果存在),但避免删除我们在_build_payload中添加的自定义温度
+ if "temperature" in new_params and new_params["temperature"] == 0.7:
+ new_params.pop("temperature")
# 如果存在 'max_tokens',则重命名为 'max_completion_tokens'
if "max_tokens" in new_params:
new_params["max_completion_tokens"] = new_params.pop("max_tokens")
@@ -632,6 +636,18 @@ class LLMRequest:
"messages": messages,
**params_copy,
}
+
+ # 添加temp参数(如果不是默认值0.7)
+ if self.temp != 0.7:
+ payload["temperature"] = self.temp
+
+ # 添加enable_thinking参数(如果不是默认值False)
+ if not self.enable_thinking:
+ payload["enable_thinking"] = False
+
+ if self.thinking_budget != 4096:
+ payload["thinking_budget"] = self.thinking_budget
+
if "max_tokens" not in payload and "max_completion_tokens" not in payload:
payload["max_tokens"] = global_config.model.model_max_output_length
# 如果 payload 中依然存在 max_tokens 且需要转换,在这里进行再次检查
diff --git a/src/plugins/test_plugin/actions/mute_action.py b/src/plugins/test_plugin/actions/mute_action.py
index 9712b762..279ee010 100644
--- a/src/plugins/test_plugin/actions/mute_action.py
+++ b/src/plugins/test_plugin/actions/mute_action.py
@@ -11,7 +11,7 @@ class MuteAction(PluginAction):
action_name = "mute_action"
action_description = (
- "如果某人违反了公序良俗,或者别人戳你太多,或者某人刷屏,一定要禁言某人,如果你很生气,可以禁言某人,可以自选禁言时长,视严重程度而定"
+ "如果某人违反了公序良俗,或者别人戳你太多,或者某人刷屏,一定要禁言某人,如果你很生气,可以禁言某人,可以自选禁言时长,视严重程度而定。"
)
action_parameters = {
"target": "禁言对象,输入你要禁言的对象的名字,必填",
diff --git a/template/bot_config_template.toml b/template/bot_config_template.toml
index 5721f8f9..00859c48 100644
--- a/template/bot_config_template.toml
+++ b/template/bot_config_template.toml
@@ -206,16 +206,18 @@ temp = 0.2 #模型的温度,新V3建议0.1-0.3
[model.utils_small] # 在麦麦的一些组件中使用的小模型,消耗量较大
# 强烈建议使用免费的小模型
-name = "Qwen/Qwen2.5-7B-Instruct"
+name = "Qwen/Qwen3-8B"
provider = "SILICONFLOW"
+enable_thinking = false # 是否启用思考
pri_in = 0
pri_out = 0
-[model.memory_summary] # 记忆的概括模型,建议使用qwen2.5 32b 及以上
-name = "Qwen/Qwen2.5-32B-Instruct"
+[model.memory_summary] # 记忆的概括模型
+name = "Qwen/Qwen3-30B-A3B"
provider = "SILICONFLOW"
-pri_in = 1.26
-pri_out = 1.26
+enable_thinking = false # 是否启用思考
+pri_in = 0.7
+pri_out = 2.8
[model.vlm] # 图像识别模型
name = "Pro/Qwen/Qwen2.5-VL-7B-Instruct"
@@ -226,7 +228,7 @@ pri_out = 0.35
#嵌入模型
[model.embedding]
name = "BAAI/bge-m3"
-provider = "SILICONFLOW"
+provider = "DEV"
pri_in = 0
pri_out = 0
@@ -235,8 +237,8 @@ pri_out = 0
[model.normal_chat_1] # 一般聊天模式的首要回复模型,推荐使用 推理模型
name = "Pro/deepseek-ai/DeepSeek-R1"
provider = "SILICONFLOW"
-pri_in = 1.0 #模型的输入价格(非必填,可以记录消耗)
-pri_out = 4.0 #模型的输出价格(非必填,可以记录消耗)
+pri_in = 4.0 #模型的输入价格(非必填,可以记录消耗)
+pri_out = 16.0 #模型的输出价格(非必填,可以记录消耗)
[model.normal_chat_2] # 一般聊天模式的次要回复模型,推荐使用 非推理模型
name = "Pro/deepseek-ai/DeepSeek-V3"
@@ -249,46 +251,57 @@ temp = 0.2 #模型的温度,新V3建议0.1-0.3
#------------专注聊天必填模型------------
[model.focus_working_memory] #工作记忆模型
-name = "Qwen/Qwen2.5-32B-Instruct"
+name = "Qwen/Qwen3-30B-A3B"
provider = "SILICONFLOW"
-pri_in = 1.26
-pri_out = 1.26
+enable_thinking = false # 是否启用思考
+pri_in = 0.7
+pri_out = 2.8
[model.focus_chat_mind] #聊天规划:认真聊天时,生成麦麦对聊天的规划想法
name = "Pro/deepseek-ai/DeepSeek-V3"
+# name = "Qwen/Qwen3-30B-A3B"
provider = "SILICONFLOW"
+# enable_thinking = false # 是否启用思考
pri_in = 2
pri_out = 8
-temp = 0.3 #模型的温度,新V3建议0.1-0.3
+temp = 0.3
[model.focus_tool_use] #工具调用模型,需要使用支持工具调用的模型
-name = "Qwen/Qwen2.5-32B-Instruct"
+name = "Qwen/Qwen3-14B"
provider = "SILICONFLOW"
-pri_in = 1.26
-pri_out = 1.26
+enable_thinking = false # 是否启用思考
+pri_in = 0.5
+pri_out = 2
[model.focus_planner] #决策:认真聊天时,负责决定麦麦该做什么
name = "Pro/deepseek-ai/DeepSeek-V3"
+# name = "Qwen/Qwen3-30B-A3B"
provider = "SILICONFLOW"
+# enable_thinking = false # 是否启用思考
pri_in = 2
pri_out = 8
+temp = 0.3
#表达器模型,用于表达麦麦的想法,生成最终回复,对语言风格影响极大
#也用于表达方式学习
[model.focus_expressor]
name = "Pro/deepseek-ai/DeepSeek-V3"
+# name = "Qwen/Qwen3-30B-A3B"
provider = "SILICONFLOW"
+# enable_thinking = false # 是否启用思考
pri_in = 2
pri_out = 8
temp = 0.3
#自我识别模型,用于自我认知和身份识别
[model.focus_self_recognize]
-name = "Pro/deepseek-ai/DeepSeek-V3"
+# name = "Pro/deepseek-ai/DeepSeek-V3"
+name = "Qwen/Qwen3-30B-A3B"
provider = "SILICONFLOW"
-pri_in = 2
-pri_out = 8
-temp = 0.3
+enable_thinking = false # 是否启用思考
+pri_in = 0.7
+pri_out = 2.8
+temp = 0.7
From 369de9d13712208de80e99eabe50481ae6e8f127 Mon Sep 17 00:00:00 2001
From: SengokuCola <1026294844@qq.com>
Date: Tue, 27 May 2025 21:45:03 +0800
Subject: [PATCH 09/17] =?UTF-8?q?feat=EF=BC=9A=E4=BC=98=E5=8C=96=E4=BA=86a?=
=?UTF-8?q?uto=E5=88=87=E6=8D=A2=E8=81=8A=E5=A4=A9=E6=A8=A1=E5=BC=8F?=
=?UTF-8?q?=E6=9C=BA=E5=88=B6=EF=BC=8C=E4=BF=AE=E6=94=B9=E5=8F=96=E5=90=8D?=
=?UTF-8?q?prompt=EF=BC=8C=E4=B8=8D=E5=86=8D=E5=A4=84=E7=90=86temp?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
src/api/apiforgui.py | 24 ++++++
.../expressors/default_expressor.py | 2 +-
...iver.py => heartflow_message_processor.py} | 3 +-
src/chat/heart_flow/background_tasks.py | 32 +++----
src/chat/heart_flow/heartflow.py | 19 ++++-
src/chat/heart_flow/sub_heartflow.py | 37 +++++++-
src/chat/heart_flow/subheartflow_manager.py | 56 ++++++------
src/chat/message_receive/bot.py | 2 +-
src/chat/normal_chat/normal_chat.py | 85 ++++++++++++++++++-
src/chat/normal_chat/normal_chat_generator.py | 4 +-
src/config/config.py | 2 +
src/config/official_configs.py | 14 +--
src/person_info/person_info.py | 8 +-
template/bot_config_template.toml | 19 +++--
14 files changed, 237 insertions(+), 70 deletions(-)
rename src/chat/focus_chat/{heartflow_message_revceiver.py => heartflow_message_processor.py} (98%)
diff --git a/src/api/apiforgui.py b/src/api/apiforgui.py
index d6f22329..41313dc7 100644
--- a/src/api/apiforgui.py
+++ b/src/api/apiforgui.py
@@ -1,6 +1,7 @@
from src.chat.heart_flow.heartflow import heartflow
from src.chat.heart_flow.sub_heartflow import ChatState
from src.common.logger_manager import get_logger
+import time
logger = get_logger("api")
@@ -30,6 +31,29 @@ async def get_subheartflow_cycle_info(subheartflow_id: str, history_len: int) ->
return None
+async def get_normal_chat_replies(subheartflow_id: str, limit: int = 10) -> list:
+ """获取子心流的NormalChat回复记录
+
+ Args:
+ subheartflow_id: 子心流ID
+ limit: 最大返回数量,默认10条
+
+ Returns:
+ list: 回复记录列表,如果未找到则返回空列表
+ """
+ replies = await heartflow.api_get_normal_chat_replies(subheartflow_id, limit)
+ logger.debug(f"子心流 {subheartflow_id} NormalChat回复记录: 获取到 {len(replies) if replies else 0} 条")
+ if replies:
+ # 格式化时间戳为可读时间
+ for reply in replies:
+ if "time" in reply:
+ reply["formatted_time"] = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(reply["time"]))
+ return replies
+ else:
+ logger.warning(f"子心流 {subheartflow_id} NormalChat回复记录未找到")
+ return []
+
+
async def get_all_states():
"""获取所有状态"""
all_states = await heartflow.api_get_all_states()
diff --git a/src/chat/focus_chat/expressors/default_expressor.py b/src/chat/focus_chat/expressors/default_expressor.py
index 4e46acaa..e0da8019 100644
--- a/src/chat/focus_chat/expressors/default_expressor.py
+++ b/src/chat/focus_chat/expressors/default_expressor.py
@@ -79,7 +79,7 @@ class DefaultExpressor:
# TODO: API-Adapter修改标记
self.express_model = LLMRequest(
model=global_config.model.focus_expressor,
- temperature=global_config.model.focus_expressor["temp"],
+ # temperature=global_config.model.focus_expressor["temp"],
max_tokens=256,
request_type="focus_expressor",
)
diff --git a/src/chat/focus_chat/heartflow_message_revceiver.py b/src/chat/focus_chat/heartflow_message_processor.py
similarity index 98%
rename from src/chat/focus_chat/heartflow_message_revceiver.py
rename to src/chat/focus_chat/heartflow_message_processor.py
index 57f133f7..cbef9d5e 100644
--- a/src/chat/focus_chat/heartflow_message_revceiver.py
+++ b/src/chat/focus_chat/heartflow_message_processor.py
@@ -219,7 +219,8 @@ class HeartFCMessageReceiver:
)
# 8. 关系处理
- await _process_relationship(message)
+ if global_config.relationship.give_name:
+ await _process_relationship(message)
except Exception as e:
await _handle_error(e, "消息处理失败", message)
diff --git a/src/chat/heart_flow/background_tasks.py b/src/chat/heart_flow/background_tasks.py
index 4e4d502b..b509f84b 100644
--- a/src/chat/heart_flow/background_tasks.py
+++ b/src/chat/heart_flow/background_tasks.py
@@ -107,12 +107,12 @@ class BackgroundTaskManager:
f"私聊激活检查任务已启动 间隔:{PRIVATE_CHAT_ACTIVATION_CHECK_INTERVAL_SECONDS}s",
"_private_chat_activation_task",
),
- (
- self._run_into_focus_cycle,
- "debug", # 设为debug,避免过多日志
- f"专注评估任务已启动 间隔:{INTEREST_EVAL_INTERVAL_SECONDS}s",
- "_into_focus_task",
- )
+ # (
+ # self._run_into_focus_cycle,
+ # "debug", # 设为debug,避免过多日志
+ # f"专注评估任务已启动 间隔:{INTEREST_EVAL_INTERVAL_SECONDS}s",
+ # "_into_focus_task",
+ # )
])
else:
logger.info("聊天模式为 normal,跳过启动清理任务、私聊激活任务和专注评估任务")
@@ -215,10 +215,10 @@ class BackgroundTaskManager:
logger.info(f"[清理任务] 清理完成, 共停止 {stopped_count}/{len(flows_to_stop)} 个子心流")
# --- 新增兴趣评估工作函数 ---
- async def _perform_into_focus_work(self):
- """执行一轮子心流兴趣评估与提升检查。"""
- # 直接调用 subheartflow_manager 的方法,并传递当前状态信息
- await self.subheartflow_manager.sbhf_normal_into_focus()
+ # async def _perform_into_focus_work(self):
+ # """执行一轮子心流兴趣评估与提升检查。"""
+ # # 直接调用 subheartflow_manager 的方法,并传递当前状态信息
+ # await self.subheartflow_manager.sbhf_normal_into_focus()
async def _run_state_update_cycle(self, interval: int):
await _run_periodic_loop(task_name="State Update", interval=interval, task_func=self._perform_state_update_work)
@@ -229,12 +229,12 @@ class BackgroundTaskManager:
)
# --- 新增兴趣评估任务运行器 ---
- async def _run_into_focus_cycle(self):
- await _run_periodic_loop(
- task_name="Into Focus",
- interval=INTEREST_EVAL_INTERVAL_SECONDS,
- task_func=self._perform_into_focus_work,
- )
+ # async def _run_into_focus_cycle(self):
+ # await _run_periodic_loop(
+ # task_name="Into Focus",
+ # interval=INTEREST_EVAL_INTERVAL_SECONDS,
+ # task_func=self._perform_into_focus_work,
+ # )
# 新增私聊激活任务运行器
async def _run_private_chat_activation_cycle(self, interval: int):
diff --git a/src/chat/heart_flow/heartflow.py b/src/chat/heart_flow/heartflow.py
index 6e7a55b4..6e938872 100644
--- a/src/chat/heart_flow/heartflow.py
+++ b/src/chat/heart_flow/heartflow.py
@@ -1,6 +1,6 @@
from src.chat.heart_flow.sub_heartflow import SubHeartflow, ChatState
from src.common.logger_manager import get_logger
-from typing import Any, Optional
+from typing import Any, Optional, List
from src.chat.heart_flow.mai_state_manager import MaiStateInfo, MaiStateManager
from src.chat.heart_flow.subheartflow_manager import SubHeartflowManager
from src.chat.heart_flow.background_tasks import BackgroundTaskManager # Import BackgroundTaskManager
@@ -57,6 +57,23 @@ class Heartflow:
return heartfc_instance.get_cycle_history(last_n=history_len)
+ async def api_get_normal_chat_replies(self, subheartflow_id: str, limit: int = 10) -> Optional[List[dict]]:
+ """获取子心流的NormalChat回复记录
+
+ Args:
+ subheartflow_id: 子心流ID
+ limit: 最大返回数量,默认10条
+
+ Returns:
+ Optional[List[dict]]: 回复记录列表,如果子心流不存在则返回None
+ """
+ subheartflow = await self.subheartflow_manager.get_or_create_subheartflow(subheartflow_id)
+ if not subheartflow:
+ logger.warning(f"尝试获取不存在的子心流 {subheartflow_id} 的NormalChat回复记录")
+ return None
+
+ return subheartflow.get_normal_chat_recent_replies(limit)
+
async def heartflow_start_working(self):
"""启动后台任务"""
await self.background_task_manager.start_tasks()
diff --git a/src/chat/heart_flow/sub_heartflow.py b/src/chat/heart_flow/sub_heartflow.py
index 60973ba9..664bb54b 100644
--- a/src/chat/heart_flow/sub_heartflow.py
+++ b/src/chat/heart_flow/sub_heartflow.py
@@ -129,7 +129,12 @@ class SubHeartflow:
return False
# 在 rewind 为 True 或 NormalChat 实例尚未创建时,创建新实例
if rewind or not self.normal_chat_instance:
- self.normal_chat_instance = NormalChat(chat_stream=chat_stream, interest_dict=self.get_interest_dict())
+ # 提供回调函数,用于接收需要切换到focus模式的通知
+ self.normal_chat_instance = NormalChat(
+ chat_stream=chat_stream,
+ interest_dict=self.get_interest_dict(),
+ on_switch_to_focus_callback=self._handle_switch_to_focus_request
+ )
# 进行异步初始化
await self.normal_chat_instance.initialize()
@@ -144,6 +149,23 @@ class SubHeartflow:
self.normal_chat_instance = None # 启动/初始化失败,清理实例
return False
+ async def _handle_switch_to_focus_request(self) -> None:
+ """
+ 处理来自NormalChat的切换到focus模式的请求
+
+ Args:
+ stream_id: 请求切换的stream_id
+ """
+ logger.info(f"{self.log_prefix} 收到NormalChat请求切换到focus模式")
+
+ # 切换到focus模式
+ current_state = self.chat_state.chat_status
+ if current_state == ChatState.NORMAL:
+ await self.change_chat_state(ChatState.FOCUSED)
+ logger.info(f"{self.log_prefix} 已根据NormalChat请求从NORMAL切换到FOCUSED状态")
+ else:
+ logger.warning(f"{self.log_prefix} 当前状态为{current_state.value},无法切换到FOCUSED状态")
+
async def _stop_heart_fc_chat(self):
"""停止并清理 HeartFChatting 实例"""
if self.heart_fc_instance:
@@ -289,6 +311,19 @@ class SubHeartflow:
def get_interest_dict(self) -> Dict[str, tuple[MessageRecv, float, bool]]:
return self.interest_chatting.interest_dict
+ def get_normal_chat_recent_replies(self, limit: int = 10) -> List[dict]:
+ """获取NormalChat实例的最近回复记录
+
+ Args:
+ limit: 最大返回数量,默认10条
+
+ Returns:
+ List[dict]: 最近的回复记录列表,如果没有NormalChat实例则返回空列表
+ """
+ if self.normal_chat_instance:
+ return self.normal_chat_instance.get_recent_replies(limit)
+ return []
+
def clear_interest_dict(self):
self.interest_chatting.interest_dict.clear()
diff --git a/src/chat/heart_flow/subheartflow_manager.py b/src/chat/heart_flow/subheartflow_manager.py
index fb82550c..5217202a 100644
--- a/src/chat/heart_flow/subheartflow_manager.py
+++ b/src/chat/heart_flow/subheartflow_manager.py
@@ -186,41 +186,41 @@ class SubHeartflowManager:
f"{log_prefix} 完成,共处理 {processed_count} 个子心流,成功将 {changed_count} 个非 ABSENT 子心流的状态更改为 ABSENT。"
)
- async def sbhf_normal_into_focus(self):
- """评估子心流兴趣度,满足条件则提升到FOCUSED状态(基于start_hfc_probability)"""
- try:
- for sub_hf in list(self.subheartflows.values()):
- flow_id = sub_hf.subheartflow_id
- stream_name = chat_manager.get_stream_name(flow_id) or flow_id
+ # async def sbhf_normal_into_focus(self):
+ # """评估子心流兴趣度,满足条件则提升到FOCUSED状态(基于start_hfc_probability)"""
+ # try:
+ # for sub_hf in list(self.subheartflows.values()):
+ # flow_id = sub_hf.subheartflow_id
+ # stream_name = chat_manager.get_stream_name(flow_id) or flow_id
- # 跳过已经是FOCUSED状态的子心流
- if sub_hf.chat_state.chat_status == ChatState.FOCUSED:
- continue
+ # # 跳过已经是FOCUSED状态的子心流
+ # if sub_hf.chat_state.chat_status == ChatState.FOCUSED:
+ # continue
- if sub_hf.interest_chatting.start_hfc_probability == 0:
- continue
- else:
- logger.debug(
- f"{stream_name},现在状态: {sub_hf.chat_state.chat_status.value},进入专注概率: {sub_hf.interest_chatting.start_hfc_probability}"
- )
+ # if sub_hf.interest_chatting.start_hfc_probability == 0:
+ # continue
+ # else:
+ # logger.debug(
+ # f"{stream_name},现在状态: {sub_hf.chat_state.chat_status.value},进入专注概率: {sub_hf.interest_chatting.start_hfc_probability}"
+ # )
- if random.random() >= sub_hf.interest_chatting.start_hfc_probability:
- continue
+ # if random.random() >= sub_hf.interest_chatting.start_hfc_probability:
+ # continue
- # 获取最新状态并执行提升
- current_subflow = self.subheartflows.get(flow_id)
- if not current_subflow:
- continue
+ # # 获取最新状态并执行提升
+ # current_subflow = self.subheartflows.get(flow_id)
+ # if not current_subflow:
+ # continue
- logger.info(
- f"{stream_name} 触发 认真水群 (概率={current_subflow.interest_chatting.start_hfc_probability:.2f})"
- )
+ # logger.info(
+ # f"{stream_name} 触发 认真水群 (概率={current_subflow.interest_chatting.start_hfc_probability:.2f})"
+ # )
- # 执行状态提升
- await current_subflow.change_chat_state(ChatState.FOCUSED)
+ # # 执行状态提升
+ # await current_subflow.change_chat_state(ChatState.FOCUSED)
- except Exception as e:
- logger.error(f"启动HFC 兴趣评估失败: {e}", exc_info=True)
+ # except Exception as e:
+ # logger.error(f"启动HFC 兴趣评估失败: {e}", exc_info=True)
async def sbhf_focus_into_normal(self, subflow_id: Any):
"""
diff --git a/src/chat/message_receive/bot.py b/src/chat/message_receive/bot.py
index e000cc3f..7889a75e 100644
--- a/src/chat/message_receive/bot.py
+++ b/src/chat/message_receive/bot.py
@@ -7,7 +7,7 @@ from src.chat.message_receive.chat_stream import chat_manager
from src.chat.message_receive.message import MessageRecv
from src.experimental.only_message_process import MessageProcessor
from src.experimental.PFC.pfc_manager import PFCManager
-from src.chat.focus_chat.heartflow_message_revceiver import HeartFCMessageReceiver
+from src.chat.focus_chat.heartflow_message_processor import HeartFCMessageReceiver
from src.chat.utils.prompt_builder import Prompt, global_prompt_manager
from src.config.config import global_config
diff --git a/src/chat/normal_chat/normal_chat.py b/src/chat/normal_chat/normal_chat.py
index 3264ab5b..34b79639 100644
--- a/src/chat/normal_chat/normal_chat.py
+++ b/src/chat/normal_chat/normal_chat.py
@@ -27,7 +27,7 @@ logger = get_logger("normal_chat")
class NormalChat:
- def __init__(self, chat_stream: ChatStream, interest_dict: dict = None):
+ def __init__(self, chat_stream: ChatStream, interest_dict: dict = None, on_switch_to_focus_callback=None):
"""初始化 NormalChat 实例。只进行同步操作。"""
# Basic info from chat_stream (sync)
@@ -49,6 +49,17 @@ class NormalChat:
self.last_speak_time = 0
self._chat_task: Optional[asyncio.Task] = None
self._initialized = False # Track initialization status
+
+ # 记录最近的回复内容,每项包含: {time, user_message, response, is_mentioned, is_reference_reply}
+ self.recent_replies = []
+ self.max_replies_history = 20 # 最多保存最近20条回复记录
+
+ # 添加回调函数,用于在满足条件时通知切换到focus_chat模式
+ self.on_switch_to_focus_callback = on_switch_to_focus_callback
+
+ # 最近回复检查相关
+ self._last_check_time = time.time()
+ self._check_interval = 10 # 每10秒检查一次是否需要切换到focus模式
async def initialize(self):
"""异步初始化,获取聊天类型和目标信息。"""
@@ -196,6 +207,12 @@ class NormalChat:
if self._chat_task is None or self._chat_task.cancelled():
logger.info(f"[{self.stream_name}] 兴趣监控任务被取消或置空,退出")
break
+
+ # 定期检查是否需要切换到focus模式
+ current_time = time.time()
+ if current_time - self._last_check_time > self._check_interval:
+ await self._check_switch_to_focus()
+ self._last_check_time = current_time
items_to_process = list(self.interest_dict.items())
if not items_to_process:
@@ -312,6 +329,28 @@ class NormalChat:
# 检查 first_bot_msg 是否为 None (例如思考消息已被移除的情况)
if first_bot_msg:
info_catcher.catch_after_response(timing_results["消息发送"], response_set, first_bot_msg)
+
+ # 记录回复信息到最近回复列表中
+ reply_info = {
+ "time": time.time(),
+ "user_message": message.processed_plain_text,
+ "user_info": {
+ "user_id": message.message_info.user_info.user_id,
+ "user_nickname": message.message_info.user_info.user_nickname
+ },
+ "response": response_set,
+ "is_mentioned": is_mentioned,
+ "is_reference_reply": message.reply is not None, # 判断是否为引用回复
+ "timing": {k: round(v, 2) for k, v in timing_results.items()}
+ }
+ self.recent_replies.append(reply_info)
+ # 保持最近回复历史在限定数量内
+ if len(self.recent_replies) > self.max_replies_history:
+ self.recent_replies = self.recent_replies[-self.max_replies_history:]
+
+ # 检查是否需要切换到focus模式
+ await self._check_switch_to_focus()
+
else:
logger.warning(f"[{self.stream_name}] 思考消息 {thinking_id} 在发送前丢失,无法记录 info_catcher")
@@ -520,3 +559,47 @@ class NormalChat:
except Exception as e:
logger.error(f"[{self.stream_name}] 清理思考消息时出错: {e}")
traceback.print_exc()
+
+ # 获取最近回复记录的方法
+ def get_recent_replies(self, limit: int = 10) -> List[dict]:
+ """获取最近的回复记录
+
+ Args:
+ limit: 最大返回数量,默认10条
+
+ Returns:
+ List[dict]: 最近的回复记录列表,每项包含:
+ time: 回复时间戳
+ user_message: 用户消息内容
+ user_info: 用户信息(user_id, user_nickname)
+ response: 回复内容
+ is_mentioned: 是否被提及(@)
+ is_reference_reply: 是否为引用回复
+ timing: 各阶段耗时
+ """
+ # 返回最近的limit条记录,按时间倒序排列
+ return sorted(self.recent_replies[-limit:], key=lambda x: x["time"], reverse=True)
+
+ async def _check_switch_to_focus(self) -> None:
+ """检查是否满足切换到focus模式的条件"""
+ if not self.on_switch_to_focus_callback:
+ return # 如果没有设置回调函数,直接返回
+ current_time = time.time()
+
+ time_threshold = 120 / global_config.focus_chat.auto_focus_threshold
+ reply_threshold = 6 * global_config.focus_chat.auto_focus_threshold
+
+ one_minute_ago = current_time - time_threshold
+
+ # 统计1分钟内的回复数量
+ recent_reply_count = sum(1 for reply in self.recent_replies if reply["time"] > one_minute_ago)
+ # print(111111111111111333333333333333333333333331111111111111111111111111111111111)
+ # print(recent_reply_count)
+ # 如果1分钟内回复数量大于8,触发切换到focus模式
+ if recent_reply_count > reply_threshold:
+ logger.info(f"[{self.stream_name}] 检测到1分钟内回复数量({recent_reply_count})大于{reply_threshold},触发切换到focus模式")
+ try:
+ # 调用回调函数通知上层切换到focus模式
+ await self.on_switch_to_focus_callback()
+ except Exception as e:
+ logger.error(f"[{self.stream_name}] 触发切换到focus模式时出错: {e}\n{traceback.format_exc()}")
diff --git a/src/chat/normal_chat/normal_chat_generator.py b/src/chat/normal_chat/normal_chat_generator.py
index 0a34ffc8..6debc8ed 100644
--- a/src/chat/normal_chat/normal_chat_generator.py
+++ b/src/chat/normal_chat/normal_chat_generator.py
@@ -18,13 +18,13 @@ class NormalChatGenerator:
# TODO: API-Adapter修改标记
self.model_reasoning = LLMRequest(
model=global_config.model.normal_chat_1,
- temperature=0.7,
+ # temperature=0.7,
max_tokens=3000,
request_type="normal_chat_1",
)
self.model_normal = LLMRequest(
model=global_config.model.normal_chat_2,
- temperature=global_config.model.normal_chat_2["temp"],
+ # temperature=global_config.model.normal_chat_2["temp"],
max_tokens=256,
request_type="normal_chat_2",
)
diff --git a/src/config/config.py b/src/config/config.py
index a438b1fc..fc4ea0fc 100644
--- a/src/config/config.py
+++ b/src/config/config.py
@@ -32,6 +32,7 @@ from src.config.official_configs import (
FocusChatProcessorConfig,
MessageReceiveConfig,
MaimMessageConfig,
+ RelationshipConfig,
)
install(extra_lines=3)
@@ -143,6 +144,7 @@ class Config(ConfigBase):
bot: BotConfig
personality: PersonalityConfig
identity: IdentityConfig
+ relationship: RelationshipConfig
chat: ChatConfig
message_receive: MessageReceiveConfig
normal_chat: NormalChatConfig
diff --git a/src/config/official_configs.py b/src/config/official_configs.py
index 731c847f..80f0ae7a 100644
--- a/src/config/official_configs.py
+++ b/src/config/official_configs.py
@@ -44,6 +44,13 @@ class IdentityConfig(ConfigBase):
identity_detail: list[str] = field(default_factory=lambda: [])
"""身份特征"""
+@dataclass
+class RelationshipConfig(ConfigBase):
+ """关系配置类"""
+
+ give_name: bool = False
+ """是否给其他人取名"""
+
@dataclass
class ChatConfig(ConfigBase):
@@ -115,11 +122,8 @@ class NormalChatConfig(ConfigBase):
class FocusChatConfig(ConfigBase):
"""专注聊天配置类"""
- reply_trigger_threshold: float = 3.0
- """心流聊天触发阈值,越低越容易触发"""
-
- default_decay_rate_per_second: float = 0.98
- """默认衰减率,越大衰减越快"""
+ auto_focus_threshold: float = 1.0
+ """自动切换到专注聊天的阈值,越低越容易进入专注聊天"""
observation_context_size: int = 12
"""可观察到的最长上下文大小,超过这个值的上下文会被压缩"""
diff --git a/src/person_info/person_info.py b/src/person_info/person_info.py
index 9a85d171..021297cc 100644
--- a/src/person_info/person_info.py
+++ b/src/person_info/person_info.py
@@ -264,17 +264,17 @@ class PersonInfoManager:
prompt_personality = individuality.get_prompt(x_person=2, level=1)
bot_name = individuality.personality.bot_nickname
- qv_name_prompt = f"你是{bot_name},{prompt_personality}"
- qv_name_prompt += f"现在你想给一个用户取一个昵称,用户是的qq昵称是{user_nickname},"
+ qv_name_prompt = f"你是{bot_name},一个伪装成人类的AI,你不能让别人发现这一点,"
+ qv_name_prompt += f"现在你想给一个用户取一个昵称,用户的qq昵称是{user_nickname},"
qv_name_prompt += f"用户的qq群昵称名是{user_cardname},"
if user_avatar:
qv_name_prompt += f"用户的qq头像是{user_avatar},"
if old_name:
qv_name_prompt += f"你之前叫他{old_name},是因为{old_reason},"
- qv_name_prompt += f"\n其他取名的要求是:{request},不要太浮夸"
+ qv_name_prompt += f"\n其他取名的要求是:{request},不要太浮夸,简短,"
qv_name_prompt += (
- "\n请根据以上用户信息,想想你叫他什么比较好,不要太浮夸,请最好使用用户的qq昵称,可以稍作修改"
+ "\n请根据以上用户信息,想想你叫他什么比较好,不要太浮夸,请最好使用用户的qq昵称,可以稍作修改,优先使用原文。优先使用用户的qq昵称或者群昵称原文。"
)
if existing_names_str:
diff --git a/template/bot_config_template.toml b/template/bot_config_template.toml
index 00859c48..9a1fd893 100644
--- a/template/bot_config_template.toml
+++ b/template/bot_config_template.toml
@@ -18,13 +18,11 @@ nickname = "麦麦"
alias_names = ["麦叠", "牢麦"] #仅在 专注聊天 有效
[personality]
-personality_core = "用一句话或几句话描述人格的核心特点" # 建议20字以内,谁再写3000字小作文敲谁脑袋
+personality_core = "是一个积极向上的女大学生" # 建议20字以内,谁再写3000字小作文敲谁脑袋
personality_sides = [
"用一句话或几句话描述人格的一些细节",
"用一句话或几句话描述人格的一些细节",
"用一句话或几句话描述人格的一些细节",
- "用一句话或几句话描述人格的一些细节",
- "用一句话或几句话描述人格的一些细节",
]# 条数任意,不能为0
# 身份特点
@@ -38,6 +36,9 @@ identity_detail = [
# 可以描述外贸,性别,身高,职业,属性等等描述
# 条数任意,不能为0
+[relationship]
+give_name = true # 麦麦是否给其他人取名,关闭后无法使用禁言功能
+
[chat] #麦麦的聊天通用设置
chat_mode = "normal" # 聊天模式 —— 普通模式:normal,专注模式:focus,在普通模式和专注模式之间自动切换
# chat_mode = "focus"
@@ -78,11 +79,11 @@ at_bot_inevitable_reply = false # @bot 必然回复
talk_frequency_down_groups = [] #降低回复频率的群号码
[focus_chat] #专注聊天
-reply_trigger_threshold = 3.0 # 专注聊天触发阈值,越低越容易进入专注聊天
-default_decay_rate_per_second = 0.98 # 默认衰减率,越大衰减越快,越高越难进入专注聊天
+auto_focus_threshold = 1 # 自动切换到专注聊天的阈值,越低越容易进入专注聊天
+
consecutive_no_reply_threshold = 3 # 连续不回复的阈值,越低越容易结束专注聊天
-think_interval = 1 # 思考间隔 单位秒
+think_interval = 3 # 思考间隔 单位秒,可以有效减少消耗
observation_context_size = 15 # 观察到的最长上下文大小,建议15,太短太长都会导致脑袋尖尖
compressed_length = 5 # 不能大于chat.observation_context_size,心流上下文压缩的最短压缩长度,超过心流观察到的上下文长度,会压缩,最短压缩长度为5
@@ -90,14 +91,14 @@ compress_length_limit = 5 #最多压缩份数,超过该数值的压缩上下
[focus_chat_processor] # 专注聊天处理器,打开可以实现更多功能,但是会增加token消耗
self_identify_processor = true # 是否启用自我识别处理器
-tool_use_processor = true # 是否启用工具使用处理器
-working_memory_processor = true # 是否启用工作记忆处理器
+tool_use_processor = false # 是否启用工具使用处理器
+working_memory_processor = false # 是否启用工作记忆处理器
[expression]
# 表达方式
expression_style = "描述麦麦说话的表达风格,表达习惯"
enable_expression_learning = true # 是否启用表达学习
-learning_interval = 300 # 学习间隔 单位秒
+learning_interval = 600 # 学习间隔 单位秒
[emoji]
From 43be52a2b38cc7b88a3074fdfdc49661e35979ba Mon Sep 17 00:00:00 2001
From: zrzluck99
Date: Tue, 27 May 2025 22:10:37 +0800
Subject: [PATCH 10/17] =?UTF-8?q?fix:=20=E4=BF=AE=E5=A4=8D=E4=BA=86=20chat?=
=?UTF-8?q?=20=E6=8F=92=E4=BB=B6=E4=B8=AD=E4=B8=8D=E8=83=BD=E6=AD=A3?=
=?UTF-8?q?=E7=A1=AE=E8=AF=86=E5=88=ABat=E7=9A=84=E9=94=99=E8=AF=AF,=20?=
=?UTF-8?q?=E5=B9=B6=E6=B7=BB=E5=8A=A0=E4=BA=86=E5=AF=B9linuxqq=E7=9A=84?=
=?UTF-8?q?=E6=94=AF=E6=8C=81?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
src/chat/utils/utils.py | 16 ++++++++++++----
1 file changed, 12 insertions(+), 4 deletions(-)
diff --git a/src/chat/utils/utils.py b/src/chat/utils/utils.py
index 19703ec4..fd59e4c2 100644
--- a/src/chat/utils/utils.py
+++ b/src/chat/utils/utils.py
@@ -63,7 +63,11 @@ def is_mentioned_bot_in_message(message: MessageRecv) -> tuple[bool, float]:
)
# 判断是否被@
- if re.search(f"@[\s\S]*?(id:{global_config.bot.qq_account})", message.processed_plain_text):
+ if re.search(
+ rf"@(.+?)(id:{global_config.bot.qq_account})", message.processed_plain_text
+ ) or re.search(
+ rf"@<(.+?)(?=:{global_config.bot.qq_account}>)\:{global_config.bot.qq_account}>", message.processed_plain_text
+ ):
is_at = True
is_mentioned = True
@@ -74,13 +78,17 @@ def is_mentioned_bot_in_message(message: MessageRecv) -> tuple[bool, float]:
if not is_mentioned:
# 判断是否被回复
if re.match(
- f"\[回复 [\s\S]*?\({str(global_config.bot.qq_account)}\):[\s\S]*?],说:", message.processed_plain_text
+ rf"\[回复 (.+?)\({str(global_config.bot.qq_account)}\):(.+?)\],说:", message.processed_plain_text
+ ) or re.match(
+ rf"\[回复<(.+?)(?=:{str(global_config.bot.qq_account)}>)\:{str(global_config.bot.qq_account)}>:(.+?)\],说:", message.processed_plain_text
):
is_mentioned = True
else:
# 判断内容中是否被提及
- message_content = re.sub(r"@[\s\S]*?((\d+))", "", message.processed_plain_text)
- message_content = re.sub(r"\[回复 [\s\S]*?\(((\d+)|未知id)\):[\s\S]*?],说:", "", message_content)
+ message_content = re.sub(r"@(.+?)((\d+))", "", message.processed_plain_text)
+ message_content = re.sub(r"@<(.+?)(?=:(\d+))\:(\d+)>", "", message_content)
+ message_content = re.sub(r"\[回复 (.+?)\(((\d+)|未知id)\):(.+?)\],说:", "", message_content)
+ message_content = re.sub(r"\[回复<(.+?)(?=:(\d+))\:(\d+)>:(.+?)\],说:", "", message_content)
for keyword in keywords:
if keyword in message_content:
is_mentioned = True
From e25755118e9ba46f8974df0313fe32a6b1eab106 Mon Sep 17 00:00:00 2001
From: zrzluck99 <50124826+zrzluck99@users.noreply.github.com>
Date: Tue, 27 May 2025 22:22:15 +0800
Subject: [PATCH 11/17] Update src/chat/utils/utils.py
Co-authored-by: gemini-code-assist[bot] <176961590+gemini-code-assist[bot]@users.noreply.github.com>
---
src/chat/utils/utils.py | 4 +---
1 file changed, 1 insertion(+), 3 deletions(-)
diff --git a/src/chat/utils/utils.py b/src/chat/utils/utils.py
index fd59e4c2..0501e436 100644
--- a/src/chat/utils/utils.py
+++ b/src/chat/utils/utils.py
@@ -64,9 +64,7 @@ def is_mentioned_bot_in_message(message: MessageRecv) -> tuple[bool, float]:
# 判断是否被@
if re.search(
- rf"@(.+?)(id:{global_config.bot.qq_account})", message.processed_plain_text
- ) or re.search(
- rf"@<(.+?)(?=:{global_config.bot.qq_account}>)\:{global_config.bot.qq_account}>", message.processed_plain_text
+ rf"@<(.+?):{global_config.bot.qq_account}>"
):
is_at = True
is_mentioned = True
From aa67b595408952583351e26f5d1d7eec96652c66 Mon Sep 17 00:00:00 2001
From: "github-actions[bot]"
Date: Tue, 27 May 2025 14:53:17 +0000
Subject: [PATCH 12/17] =?UTF-8?q?=F0=9F=A4=96=20=E8=87=AA=E5=8A=A8?=
=?UTF-8?q?=E6=A0=BC=E5=BC=8F=E5=8C=96=E4=BB=A3=E7=A0=81=20[skip=20ci]?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
src/chat/utils/utils.py | 7 +++----
1 file changed, 3 insertions(+), 4 deletions(-)
diff --git a/src/chat/utils/utils.py b/src/chat/utils/utils.py
index 0501e436..2c183934 100644
--- a/src/chat/utils/utils.py
+++ b/src/chat/utils/utils.py
@@ -63,9 +63,7 @@ def is_mentioned_bot_in_message(message: MessageRecv) -> tuple[bool, float]:
)
# 判断是否被@
- if re.search(
- rf"@<(.+?):{global_config.bot.qq_account}>"
- ):
+ if re.search(rf"@<(.+?):{global_config.bot.qq_account}>"):
is_at = True
is_mentioned = True
@@ -78,7 +76,8 @@ def is_mentioned_bot_in_message(message: MessageRecv) -> tuple[bool, float]:
if re.match(
rf"\[回复 (.+?)\({str(global_config.bot.qq_account)}\):(.+?)\],说:", message.processed_plain_text
) or re.match(
- rf"\[回复<(.+?)(?=:{str(global_config.bot.qq_account)}>)\:{str(global_config.bot.qq_account)}>:(.+?)\],说:", message.processed_plain_text
+ rf"\[回复<(.+?)(?=:{str(global_config.bot.qq_account)}>)\:{str(global_config.bot.qq_account)}>:(.+?)\],说:",
+ message.processed_plain_text,
):
is_mentioned = True
else:
From 43e465860f64cb94930d9c359dbeafd88b4e143d Mon Sep 17 00:00:00 2001
From: SengokuCola <1026294844@qq.com>
Date: Tue, 27 May 2025 23:08:44 +0800
Subject: [PATCH 13/17] =?UTF-8?q?fix=EF=BC=9A=E4=BC=98=E5=8C=96=E7=A6=BB?=
=?UTF-8?q?=E5=BC=80focus=E6=A8=A1=E5=BC=8F=E7=9A=84=E6=9C=BA=E5=88=B6?=
=?UTF-8?q?=EF=BC=8C=E5=AE=8C=E5=85=A8=E7=A7=BB=E9=99=A4Interest=E6=9C=BA?=
=?UTF-8?q?=E5=88=B6=EF=BC=8C?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
src/api/apiforgui.py | 4 +-
.../expressors/default_expressor.py | 36 +++++----
src/chat/focus_chat/heartFC_chat.py | 80 +++++++++++++------
.../focus_chat/heartflow_message_processor.py | 4 +-
src/chat/focus_chat/info/structured_info.py | 3 +-
.../info_processors/action_processor.py | 5 +-
.../info_processors/tool_processor.py | 4 +-
src/chat/focus_chat/memory_activator.py | 16 ++--
.../focus_chat/planners/action_manager.py | 11 +--
.../focus_chat/planners/actions/__init__.py | 1 +
.../actions/exit_focus_chat_action.py | 26 +-----
src/chat/focus_chat/planners/planner.py | 4 +-
.../working_memory/memory_manager.py | 5 +-
src/chat/heart_flow/background_tasks.py | 46 ++++++-----
src/chat/heart_flow/heartflow.py | 6 +-
src/chat/heart_flow/interest_chatting.py | 4 +-
.../observation/chatting_observation.py | 1 -
src/chat/heart_flow/sub_heartflow.py | 64 ++++++++-------
src/chat/heart_flow/subheartflow_manager.py | 57 +++++++------
src/chat/normal_chat/normal_chat.py | 44 +++++-----
src/config/official_configs.py | 27 +++----
src/llm_models/utils_model.py | 8 +-
src/person_info/person_info.py | 6 +-
.../actions/group_whole_ban_action.py | 26 ++----
.../test_plugin/actions/mute_action.py | 21 ++---
src/tools/tool_use.py | 2 +
template/bot_config_template.toml | 2 +-
27 files changed, 263 insertions(+), 250 deletions(-)
diff --git a/src/api/apiforgui.py b/src/api/apiforgui.py
index 41313dc7..853e8b49 100644
--- a/src/api/apiforgui.py
+++ b/src/api/apiforgui.py
@@ -33,11 +33,11 @@ async def get_subheartflow_cycle_info(subheartflow_id: str, history_len: int) ->
async def get_normal_chat_replies(subheartflow_id: str, limit: int = 10) -> list:
"""获取子心流的NormalChat回复记录
-
+
Args:
subheartflow_id: 子心流ID
limit: 最大返回数量,默认10条
-
+
Returns:
list: 回复记录列表,如果未找到则返回空列表
"""
diff --git a/src/chat/focus_chat/expressors/default_expressor.py b/src/chat/focus_chat/expressors/default_expressor.py
index e0da8019..befe045e 100644
--- a/src/chat/focus_chat/expressors/default_expressor.py
+++ b/src/chat/focus_chat/expressors/default_expressor.py
@@ -13,7 +13,6 @@ from src.chat.emoji_system.emoji_manager import emoji_manager
from src.chat.focus_chat.heartFC_sender import HeartFCSender
from src.chat.utils.utils import process_llm_response
from src.chat.utils.info_catcher import info_catcher_manager
-from src.manager.mood_manager import mood_manager
from src.chat.heart_flow.utils_chat import get_chat_type_and_target_info
from src.chat.message_receive.chat_stream import ChatStream
from src.chat.focus_chat.hfc_utils import parse_thinking_id_to_timestamp
@@ -150,22 +149,22 @@ class DefaultExpressor:
action_data=action_data,
)
- with Timer("选择表情", cycle_timers):
- emoji_keyword = action_data.get("emojis", [])
- emoji_base64 = await self._choose_emoji(emoji_keyword)
- if emoji_base64:
- reply.append(("emoji", emoji_base64))
+ with Timer("选择表情", cycle_timers):
+ emoji_keyword = action_data.get("emojis", [])
+ emoji_base64 = await self._choose_emoji(emoji_keyword)
+ if emoji_base64:
+ reply.append(("emoji", emoji_base64))
- if reply:
- with Timer("发送消息", cycle_timers):
- sent_msg_list = await self.send_response_messages(
- anchor_message=anchor_message,
- thinking_id=thinking_id,
- response_set=reply,
- )
- has_sent_something = True
- else:
- logger.warning(f"{self.log_prefix} 文本回复生成失败")
+ if reply:
+ with Timer("发送消息", cycle_timers):
+ sent_msg_list = await self.send_response_messages(
+ anchor_message=anchor_message,
+ thinking_id=thinking_id,
+ response_set=reply,
+ )
+ has_sent_something = True
+ else:
+ logger.warning(f"{self.log_prefix} 文本回复生成失败")
if not has_sent_something:
logger.warning(f"{self.log_prefix} 回复动作未包含任何有效内容")
@@ -174,6 +173,7 @@ class DefaultExpressor:
except Exception as e:
logger.error(f"回复失败: {e}")
+ traceback.print_exc()
return False, None
# --- 回复器 (Replier) 的定义 --- #
@@ -443,7 +443,9 @@ class DefaultExpressor:
set_reply = True
else:
set_reply = False
- sent_msg = await self.heart_fc_sender.send_message(bot_message, has_thinking=True, typing=typing, set_reply=set_reply)
+ sent_msg = await self.heart_fc_sender.send_message(
+ bot_message, has_thinking=True, typing=typing, set_reply=set_reply
+ )
reply_message_ids.append(part_message_id) # 记录我们生成的ID
diff --git a/src/chat/focus_chat/heartFC_chat.py b/src/chat/focus_chat/heartFC_chat.py
index d989fb5e..6e8beebe 100644
--- a/src/chat/focus_chat/heartFC_chat.py
+++ b/src/chat/focus_chat/heartFC_chat.py
@@ -3,7 +3,7 @@ import contextlib
import time
import traceback
from collections import deque
-from typing import List, Optional, Dict, Any, Deque
+from typing import List, Optional, Dict, Any, Deque, Callable, Awaitable
from src.chat.message_receive.chat_stream import ChatStream
from src.chat.message_receive.chat_stream import chat_manager
from rich.traceback import install
@@ -84,6 +84,7 @@ class HeartFChatting:
self,
chat_id: str,
observations: list[Observation],
+ on_stop_focus_chat: Optional[Callable[[], Awaitable[None]]] = None,
):
"""
HeartFChatting 初始化函数
@@ -91,6 +92,7 @@ class HeartFChatting:
参数:
chat_id: 聊天流唯一标识符(如stream_id)
observations: 关联的观察列表
+ on_stop_focus_chat: 当收到stop_focus_chat命令时调用的回调函数
"""
# 基础属性
self.stream_id: str = chat_id # 聊天流ID
@@ -143,6 +145,9 @@ class HeartFChatting:
self._current_cycle: Optional[CycleDetail] = None
self._shutting_down: bool = False # 关闭标志位
+ # 存储回调函数
+ self.on_stop_focus_chat = on_stop_focus_chat
+
async def _initialize(self) -> bool:
"""
执行懒初始化操作
@@ -287,6 +292,19 @@ class HeartFChatting:
async with global_prompt_manager.async_message_scope(self.chat_stream.context.get_template_name()):
logger.debug(f"模板 {self.chat_stream.context.get_template_name()}")
loop_info = await self._observe_process_plan_action_loop(cycle_timers, thinking_id)
+
+ print(loop_info["loop_action_info"]["command"])
+ if loop_info["loop_action_info"]["command"] == "stop_focus_chat":
+ logger.info(f"{self.log_prefix} 麦麦决定停止专注聊天")
+ # 如果设置了回调函数,则调用它
+ if self.on_stop_focus_chat:
+ try:
+ await self.on_stop_focus_chat()
+ logger.info(f"{self.log_prefix} 成功调用回调函数处理停止专注聊天")
+ except Exception as e:
+ logger.error(f"{self.log_prefix} 调用停止专注聊天回调函数时出错: {e}")
+ logger.error(traceback.format_exc())
+ break
self._current_cycle.set_loop_info(loop_info)
@@ -410,7 +428,7 @@ class HeartFChatting:
return all_plan_info
- async def _observe_process_plan_action_loop(self, cycle_timers: dict, thinking_id: str) -> tuple[bool, str]:
+ async def _observe_process_plan_action_loop(self, cycle_timers: dict, thinking_id: str) -> dict:
try:
with Timer("观察", cycle_timers):
# await self.observations[0].observe()
@@ -466,13 +484,14 @@ class HeartFChatting:
logger.info(f"{self.log_prefix} 麦麦决定'{action_str}', 原因'{reasoning}'")
- success, reply_text = await self._handle_action(
+ success, reply_text, command = await self._handle_action(
action_type, reasoning, action_data, cycle_timers, thinking_id
)
loop_action_info = {
"action_taken": success,
"reply_text": reply_text,
+ "command": command,
}
loop_info = {
@@ -487,7 +506,12 @@ class HeartFChatting:
except Exception as e:
logger.error(f"{self.log_prefix} FOCUS聊天处理失败: {e}")
logger.error(traceback.format_exc())
- return {}
+ return {
+ "loop_observation_info": {},
+ "loop_processor_info": {},
+ "loop_plan_info": {},
+ "loop_action_info": {"action_taken": False, "reply_text": "", "command": ""},
+ }
async def _handle_action(
self,
@@ -496,7 +520,7 @@ class HeartFChatting:
action_data: dict,
cycle_timers: dict,
thinking_id: str,
- ) -> tuple[bool, str]:
+ ) -> tuple[bool, str, str]:
"""
处理规划动作,使用动作工厂创建相应的动作处理器
@@ -508,36 +532,46 @@ class HeartFChatting:
thinking_id: 思考ID
返回:
- tuple[bool, str]: (是否执行了动作, 思考消息ID)
+ tuple[bool, str, str]: (是否执行了动作, 思考消息ID, 命令)
"""
try:
# 使用工厂创建动作处理器实例
- action_handler = self.action_manager.create_action(
- action_name=action,
- action_data=action_data,
- reasoning=reasoning,
- cycle_timers=cycle_timers,
- thinking_id=thinking_id,
- observations=self.all_observations,
- expressor=self.expressor,
- chat_stream=self.chat_stream,
- log_prefix=self.log_prefix,
- shutting_down=self._shutting_down,
- )
+ try:
+ action_handler = self.action_manager.create_action(
+ action_name=action,
+ action_data=action_data,
+ reasoning=reasoning,
+ cycle_timers=cycle_timers,
+ thinking_id=thinking_id,
+ observations=self.all_observations,
+ expressor=self.expressor,
+ chat_stream=self.chat_stream,
+ log_prefix=self.log_prefix,
+ shutting_down=self._shutting_down,
+ )
+ except Exception as e:
+ logger.error(f"{self.log_prefix} 创建动作处理器时出错: {e}")
+ traceback.print_exc()
+ return False, "", ""
if not action_handler:
logger.warning(f"{self.log_prefix} 未能创建动作处理器: {action}, 原因: {reasoning}")
- return False, ""
+ return False, "", ""
# 处理动作并获取结果
- success, reply_text = await action_handler.handle_action()
-
- return success, reply_text
+ result = await action_handler.handle_action()
+ if len(result) == 3:
+ success, reply_text, command = result
+ else:
+ success, reply_text = result
+ command = ""
+ logger.info(f"{self.log_prefix} 麦麦决定'{action}', 原因'{reasoning}',返回结果'{success}', '{reply_text}', '{command}'")
+ return success, reply_text, command
except Exception as e:
logger.error(f"{self.log_prefix} 处理{action}时出错: {e}")
traceback.print_exc()
- return False, ""
+ return False, "", ""
async def shutdown(self):
"""优雅关闭HeartFChatting实例,取消活动循环任务"""
diff --git a/src/chat/focus_chat/heartflow_message_processor.py b/src/chat/focus_chat/heartflow_message_processor.py
index cbef9d5e..c1efeb52 100644
--- a/src/chat/focus_chat/heartflow_message_processor.py
+++ b/src/chat/focus_chat/heartflow_message_processor.py
@@ -205,8 +205,8 @@ class HeartFCMessageReceiver:
# 6. 兴趣度计算与更新
interested_rate, is_mentioned = await _calculate_interest(message)
- await subheartflow.interest_chatting.increase_interest(value=interested_rate)
- subheartflow.interest_chatting.add_interest_dict(message, interested_rate, is_mentioned)
+ # await subheartflow.interest_chatting.increase_interest(value=interested_rate)
+ subheartflow.add_interest_message(message, interested_rate, is_mentioned)
# 7. 日志记录
mes_name = chat.group_info.group_name if chat.group_info else "私聊"
diff --git a/src/chat/focus_chat/info/structured_info.py b/src/chat/focus_chat/info/structured_info.py
index 616e942d..a925a6d1 100644
--- a/src/chat/focus_chat/info/structured_info.py
+++ b/src/chat/focus_chat/info/structured_info.py
@@ -77,9 +77,8 @@ class StructuredInfo:
info_str = ""
# print(f"self.data: {self.data}")
-
+
for key, value in self.data.items():
-
# print(f"key: {key}, value: {value}")
info_str += f"信息类型:{key},信息内容:{value}\n"
diff --git a/src/chat/focus_chat/info_processors/action_processor.py b/src/chat/focus_chat/info_processors/action_processor.py
index 1f05ac84..fe2d8675 100644
--- a/src/chat/focus_chat/info_processors/action_processor.py
+++ b/src/chat/focus_chat/info_processors/action_processor.py
@@ -8,7 +8,6 @@ from src.chat.heart_flow.observation.hfcloop_observation import HFCloopObservati
from src.chat.heart_flow.observation.chatting_observation import ChattingObservation
from src.chat.message_receive.chat_stream import chat_manager
from typing import Dict
-from src.llm_models.utils_model import LLMRequest
from src.config.config import global_config
import random
@@ -137,9 +136,11 @@ class ActionProcessor(BaseProcessor):
reply_sequence.append(action_type == "reply")
# 检查no_reply比例
- if len(recent_cycles) >= 5 and (no_reply_count / len(recent_cycles)) >= 0.8:
+ if len(recent_cycles) >= (5 * global_config.focus_chat.exit_focus_threshold) and (no_reply_count / len(recent_cycles)) >= (0.75 * global_config.focus_chat.exit_focus_threshold):
if global_config.chat.chat_mode == "auto":
result["add"].append("exit_focus_chat")
+ result["remove"].append("no_reply")
+ result["remove"].append("reply")
# 获取最近三次的reply状态
last_three = reply_sequence[-3:] if len(reply_sequence) >= 3 else reply_sequence
diff --git a/src/chat/focus_chat/info_processors/tool_processor.py b/src/chat/focus_chat/info_processors/tool_processor.py
index 6980c908..2d52a04a 100644
--- a/src/chat/focus_chat/info_processors/tool_processor.py
+++ b/src/chat/focus_chat/info_processors/tool_processor.py
@@ -76,7 +76,7 @@ class ToolProcessor(BaseProcessor):
# 更新WorkingObservation中的结构化信息
logger.debug(f"工具调用结果: {result}")
-
+
for observation in observations:
if isinstance(observation, StructureObservation):
for structured_info in result:
@@ -92,7 +92,7 @@ class ToolProcessor(BaseProcessor):
# print(f"working_info: {working_info}")
# print(f"working_info.get('type'): {working_info.get('type')}")
# print(f"working_info.get('content'): {working_info.get('content')}")
- structured_info.set_info(key=working_info.get('type'), value=working_info.get('content'))
+ structured_info.set_info(key=working_info.get("type"), value=working_info.get("content"))
# info = structured_info.get_processed_info()
# print(f"info: {info}")
diff --git a/src/chat/focus_chat/memory_activator.py b/src/chat/focus_chat/memory_activator.py
index e097bdbb..2aa8fa54 100644
--- a/src/chat/focus_chat/memory_activator.py
+++ b/src/chat/focus_chat/memory_activator.py
@@ -19,24 +19,24 @@ logger = get_logger("memory_activator")
def get_keywords_from_json(json_str):
"""
从JSON字符串中提取关键词列表
-
+
Args:
json_str: JSON格式的字符串
-
+
Returns:
List[str]: 关键词列表
"""
try:
# 使用repair_json修复JSON格式
fixed_json = repair_json(json_str)
-
+
# 如果repair_json返回的是字符串,需要解析为Python对象
if isinstance(fixed_json, str):
result = json.loads(fixed_json)
else:
# 如果repair_json直接返回了字典对象,直接使用
result = fixed_json
-
+
# 提取关键词
keywords = result.get("keywords", [])
return keywords
@@ -100,7 +100,7 @@ class MemoryActivator:
# 将缓存的关键词转换为字符串,用于prompt
cached_keywords_str = ", ".join(self.cached_keywords) if self.cached_keywords else "暂无历史关键词"
-
+
prompt = await global_prompt_manager.format_prompt(
"memory_activator_prompt",
obs_info_text=obs_info_text,
@@ -116,7 +116,7 @@ class MemoryActivator:
# 只取response的第一个元素(字符串)
response_str = response[0]
keywords = list(get_keywords_from_json(response_str))
-
+
# 更新关键词缓存
if keywords:
# 限制缓存大小,最多保留10个关键词
@@ -124,12 +124,12 @@ class MemoryActivator:
# 转换为列表,移除最早的关键词
cached_list = list(self.cached_keywords)
self.cached_keywords = set(cached_list[-8:])
-
+
# 添加新的关键词到缓存
self.cached_keywords.update(keywords)
logger.debug(f"更新关键词缓存: {self.cached_keywords}")
- #调用记忆系统获取相关记忆
+ # 调用记忆系统获取相关记忆
related_memory = await HippocampusManager.get_instance().get_memory_from_topic(
valid_keywords=keywords, max_memory_num=3, max_memory_length=2, max_depth=3
)
diff --git a/src/chat/focus_chat/planners/action_manager.py b/src/chat/focus_chat/planners/action_manager.py
index 62db09a9..d2ed378c 100644
--- a/src/chat/focus_chat/planners/action_manager.py
+++ b/src/chat/focus_chat/planners/action_manager.py
@@ -29,7 +29,6 @@ class ActionManager:
# 当前正在使用的动作集合,默认加载默认动作
self._using_actions: Dict[str, ActionInfo] = {}
-
# 默认动作集,仅作为快照,用于恢复默认
self._default_actions: Dict[str, ActionInfo] = {}
@@ -159,9 +158,9 @@ class ActionManager:
Optional[BaseAction]: 创建的动作处理器实例,如果动作名称未注册则返回None
"""
# 检查动作是否在当前使用的动作集中
- if action_name not in self._using_actions:
- logger.warning(f"当前不可用的动作类型: {action_name}")
- return None
+ # if action_name not in self._using_actions:
+ # logger.warning(f"当前不可用的动作类型: {action_name}")
+ # return None
handler_class = _ACTION_REGISTRY.get(action_name)
if not handler_class:
@@ -283,7 +282,9 @@ class ActionManager:
def restore_actions(self) -> None:
"""恢复到默认动作集"""
- logger.debug(f"恢复动作集: 从 {list(self._using_actions.keys())} 恢复到默认动作集 {list(self._default_actions.keys())}")
+ logger.debug(
+ f"恢复动作集: 从 {list(self._using_actions.keys())} 恢复到默认动作集 {list(self._default_actions.keys())}"
+ )
self._using_actions = self._default_actions.copy()
def restore_default_actions(self) -> None:
diff --git a/src/chat/focus_chat/planners/actions/__init__.py b/src/chat/focus_chat/planners/actions/__init__.py
index 3f2baf66..6fc139d7 100644
--- a/src/chat/focus_chat/planners/actions/__init__.py
+++ b/src/chat/focus_chat/planners/actions/__init__.py
@@ -1,5 +1,6 @@
# 导入所有动作模块以确保装饰器被执行
from . import reply_action # noqa
from . import no_reply_action # noqa
+from . import exit_focus_chat_action # noqa
# 在此处添加更多动作模块导入
diff --git a/src/chat/focus_chat/planners/actions/exit_focus_chat_action.py b/src/chat/focus_chat/planners/actions/exit_focus_chat_action.py
index c7ba6483..8ab43f96 100644
--- a/src/chat/focus_chat/planners/actions/exit_focus_chat_action.py
+++ b/src/chat/focus_chat/planners/actions/exit_focus_chat_action.py
@@ -5,8 +5,6 @@ from src.chat.focus_chat.planners.actions.base_action import BaseAction, registe
from typing import Tuple, List
from src.chat.heart_flow.observation.observation import Observation
from src.chat.message_receive.chat_stream import ChatStream
-from src.chat.heart_flow.heartflow import heartflow
-from src.chat.heart_flow.sub_heartflow import ChatState
logger = get_logger("action_taken")
@@ -27,7 +25,7 @@ class ExitFocusChatAction(BaseAction):
"当前内容不需要持续专注关注,你决定退出专注聊天",
"聊天内容已经完成,你决定退出专注聊天",
]
- default = True
+ default = False
def __init__(
self,
@@ -56,7 +54,6 @@ class ExitFocusChatAction(BaseAction):
self.observations = observations
self.log_prefix = log_prefix
self._shutting_down = shutting_down
- self.chat_id = chat_stream.stream_id
async def handle_action(self) -> Tuple[bool, str]:
"""
@@ -74,23 +71,8 @@ class ExitFocusChatAction(BaseAction):
try:
# 转换状态
status_message = ""
- self.sub_heartflow = await heartflow.get_or_create_subheartflow(self.chat_id)
- if self.sub_heartflow:
- try:
- # 转换为normal_chat状态
- await self.sub_heartflow.change_chat_state(ChatState.CHAT)
- status_message = "已成功切换到普通聊天模式"
- logger.info(f"{self.log_prefix} {status_message}")
- except Exception as e:
- error_msg = f"切换到普通聊天模式失败: {str(e)}"
- logger.error(f"{self.log_prefix} {error_msg}")
- return False, error_msg
- else:
- warning_msg = "未找到有效的sub heartflow实例,无法切换状态"
- logger.warning(f"{self.log_prefix} {warning_msg}")
- return False, warning_msg
-
- return True, status_message
+ command = "stop_focus_chat"
+ return True, status_message, command
except asyncio.CancelledError:
logger.info(f"{self.log_prefix} 处理 'exit_focus_chat' 时等待被中断 (CancelledError)")
@@ -99,4 +81,4 @@ class ExitFocusChatAction(BaseAction):
error_msg = f"处理 'exit_focus_chat' 时发生错误: {str(e)}"
logger.error(f"{self.log_prefix} {error_msg}")
logger.error(traceback.format_exc())
- return False, error_msg
+ return False, "", ""
diff --git a/src/chat/focus_chat/planners/planner.py b/src/chat/focus_chat/planners/planner.py
index 05443006..4a62a0cf 100644
--- a/src/chat/focus_chat/planners/planner.py
+++ b/src/chat/focus_chat/planners/planner.py
@@ -156,7 +156,7 @@ class ActionPlanner:
logger.info(f"{self.log_prefix}{reasoning}")
self.action_manager.restore_actions()
logger.debug(
- f"{self.log_prefix}恢复到默认动作集, 当前可用: {list(self.action_manager.get_using_actions().keys())}"
+ f"{self.log_prefix}沉默后恢复到默认动作集, 当前可用: {list(self.action_manager.get_using_actions().keys())}"
)
return {
"action_result": {"action_type": action, "action_data": action_data, "reasoning": reasoning},
@@ -241,7 +241,7 @@ class ActionPlanner:
# 恢复到默认动作集
self.action_manager.restore_actions()
logger.debug(
- f"{self.log_prefix}恢复到默认动作集, 当前可用: {list(self.action_manager.get_using_actions().keys())}"
+ f"{self.log_prefix}规划后恢复到默认动作集, 当前可用: {list(self.action_manager.get_using_actions().keys())}"
)
action_result = {"action_type": action, "action_data": action_data, "reasoning": reasoning}
diff --git a/src/chat/focus_chat/working_memory/memory_manager.py b/src/chat/focus_chat/working_memory/memory_manager.py
index 0157e4f8..af9d8700 100644
--- a/src/chat/focus_chat/working_memory/memory_manager.py
+++ b/src/chat/focus_chat/working_memory/memory_manager.py
@@ -33,7 +33,10 @@ class MemoryManager:
self._id_map: Dict[str, MemoryItem] = {}
self.llm_summarizer = LLMRequest(
- model=global_config.model.focus_working_memory, temperature=0.3, max_tokens=512, request_type="memory_summarization"
+ model=global_config.model.focus_working_memory,
+ temperature=0.3,
+ max_tokens=512,
+ request_type="memory_summarization",
)
@property
diff --git a/src/chat/heart_flow/background_tasks.py b/src/chat/heart_flow/background_tasks.py
index b509f84b..9479804e 100644
--- a/src/chat/heart_flow/background_tasks.py
+++ b/src/chat/heart_flow/background_tasks.py
@@ -92,28 +92,30 @@ class BackgroundTaskManager:
# 根据 chat_mode 条件添加其他任务
if not (global_config.chat.chat_mode == "normal"):
- task_configs.extend([
- (
- self._run_cleanup_cycle,
- "info",
- f"清理任务已启动 间隔:{CLEANUP_INTERVAL_SECONDS}s",
- "_cleanup_task",
- ),
- # 新增私聊激活任务配置
- (
- # Use lambda to pass the interval to the runner function
- lambda: self._run_private_chat_activation_cycle(PRIVATE_CHAT_ACTIVATION_CHECK_INTERVAL_SECONDS),
- "debug",
- f"私聊激活检查任务已启动 间隔:{PRIVATE_CHAT_ACTIVATION_CHECK_INTERVAL_SECONDS}s",
- "_private_chat_activation_task",
- ),
- # (
- # self._run_into_focus_cycle,
- # "debug", # 设为debug,避免过多日志
- # f"专注评估任务已启动 间隔:{INTEREST_EVAL_INTERVAL_SECONDS}s",
- # "_into_focus_task",
- # )
- ])
+ task_configs.extend(
+ [
+ (
+ self._run_cleanup_cycle,
+ "info",
+ f"清理任务已启动 间隔:{CLEANUP_INTERVAL_SECONDS}s",
+ "_cleanup_task",
+ ),
+ # 新增私聊激活任务配置
+ (
+ # Use lambda to pass the interval to the runner function
+ lambda: self._run_private_chat_activation_cycle(PRIVATE_CHAT_ACTIVATION_CHECK_INTERVAL_SECONDS),
+ "debug",
+ f"私聊激活检查任务已启动 间隔:{PRIVATE_CHAT_ACTIVATION_CHECK_INTERVAL_SECONDS}s",
+ "_private_chat_activation_task",
+ ),
+ # (
+ # self._run_into_focus_cycle,
+ # "debug", # 设为debug,避免过多日志
+ # f"专注评估任务已启动 间隔:{INTEREST_EVAL_INTERVAL_SECONDS}s",
+ # "_into_focus_task",
+ # )
+ ]
+ )
else:
logger.info("聊天模式为 normal,跳过启动清理任务、私聊激活任务和专注评估任务")
diff --git a/src/chat/heart_flow/heartflow.py b/src/chat/heart_flow/heartflow.py
index 6e938872..e1f8d957 100644
--- a/src/chat/heart_flow/heartflow.py
+++ b/src/chat/heart_flow/heartflow.py
@@ -59,11 +59,11 @@ class Heartflow:
async def api_get_normal_chat_replies(self, subheartflow_id: str, limit: int = 10) -> Optional[List[dict]]:
"""获取子心流的NormalChat回复记录
-
+
Args:
subheartflow_id: 子心流ID
limit: 最大返回数量,默认10条
-
+
Returns:
Optional[List[dict]]: 回复记录列表,如果子心流不存在则返回None
"""
@@ -71,7 +71,7 @@ class Heartflow:
if not subheartflow:
logger.warning(f"尝试获取不存在的子心流 {subheartflow_id} 的NormalChat回复记录")
return None
-
+
return subheartflow.get_normal_chat_recent_replies(limit)
async def heartflow_start_working(self):
diff --git a/src/chat/heart_flow/interest_chatting.py b/src/chat/heart_flow/interest_chatting.py
index bce372b5..4cb477c0 100644
--- a/src/chat/heart_flow/interest_chatting.py
+++ b/src/chat/heart_flow/interest_chatting.py
@@ -20,9 +20,9 @@ MAX_REPLY_PROBABILITY = 1
class InterestChatting:
def __init__(
self,
- decay_rate=global_config.focus_chat.default_decay_rate_per_second,
+ decay_rate=0.95,
max_interest=MAX_INTEREST,
- trigger_threshold=global_config.focus_chat.reply_trigger_threshold,
+ trigger_threshold=4,
max_probability=MAX_REPLY_PROBABILITY,
):
# 基础属性初始化
diff --git a/src/chat/heart_flow/observation/chatting_observation.py b/src/chat/heart_flow/observation/chatting_observation.py
index 4264a76c..187b8027 100644
--- a/src/chat/heart_flow/observation/chatting_observation.py
+++ b/src/chat/heart_flow/observation/chatting_observation.py
@@ -1,5 +1,4 @@
from datetime import datetime
-from src.llm_models.utils_model import LLMRequest
from src.config.config import global_config
import traceback
from src.chat.utils.chat_message_builder import (
diff --git a/src/chat/heart_flow/sub_heartflow.py b/src/chat/heart_flow/sub_heartflow.py
index 664bb54b..a7e980f4 100644
--- a/src/chat/heart_flow/sub_heartflow.py
+++ b/src/chat/heart_flow/sub_heartflow.py
@@ -12,7 +12,6 @@ from src.chat.normal_chat.normal_chat import NormalChat
from src.chat.heart_flow.mai_state_manager import MaiStateInfo
from src.chat.heart_flow.chat_state_info import ChatState, ChatStateInfo
from .utils_chat import get_chat_type_and_target_info
-from .interest_chatting import InterestChatting
from src.config.config import global_config
@@ -51,7 +50,7 @@ class SubHeartflow:
# --- End Initialization ---
# 兴趣检测器
- self.interest_chatting: InterestChatting = InterestChatting()
+ self.interest_dict: Dict[str, tuple[MessageRecv, float, bool]] = {}
# 活动状态管理
self.should_stop = False # 停止标志
@@ -85,8 +84,8 @@ class SubHeartflow:
# --- End using utility function ---
# Initialize interest system (existing logic)
- await self.interest_chatting.initialize()
- logger.debug(f"{self.log_prefix} InterestChatting 实例已初始化。")
+ # await self.interest_chatting.initialize()
+ # logger.debug(f"{self.log_prefix} InterestChatting 实例已初始化。")
# 根据配置决定初始状态
if global_config.chat.chat_mode == "focus":
@@ -131,9 +130,9 @@ class SubHeartflow:
if rewind or not self.normal_chat_instance:
# 提供回调函数,用于接收需要切换到focus模式的通知
self.normal_chat_instance = NormalChat(
- chat_stream=chat_stream,
- interest_dict=self.get_interest_dict(),
- on_switch_to_focus_callback=self._handle_switch_to_focus_request
+ chat_stream=chat_stream,
+ interest_dict=self.interest_dict,
+ on_switch_to_focus_callback=self._handle_switch_to_focus_request,
)
# 进行异步初始化
@@ -152,12 +151,12 @@ class SubHeartflow:
async def _handle_switch_to_focus_request(self) -> None:
"""
处理来自NormalChat的切换到focus模式的请求
-
+
Args:
stream_id: 请求切换的stream_id
"""
logger.info(f"{self.log_prefix} 收到NormalChat请求切换到focus模式")
-
+
# 切换到focus模式
current_state = self.chat_state.chat_status
if current_state == ChatState.NORMAL:
@@ -166,6 +165,21 @@ class SubHeartflow:
else:
logger.warning(f"{self.log_prefix} 当前状态为{current_state.value},无法切换到FOCUSED状态")
+ async def _handle_stop_focus_chat_request(self) -> None:
+ """
+ 处理来自HeartFChatting的停止focus模式的请求
+ 当收到stop_focus_chat命令时被调用
+ """
+ logger.info(f"{self.log_prefix} 收到HeartFChatting请求停止focus模式")
+
+ # 切换到normal模式
+ current_state = self.chat_state.chat_status
+ if current_state == ChatState.FOCUSED:
+ await self.change_chat_state(ChatState.NORMAL)
+ logger.info(f"{self.log_prefix} 已根据HeartFChatting请求从FOCUSED切换到NORMAL状态")
+ else:
+ logger.warning(f"{self.log_prefix} 当前状态为{current_state.value},无法切换到NORMAL状态")
+
async def _stop_heart_fc_chat(self):
"""停止并清理 HeartFChatting 实例"""
if self.heart_fc_instance:
@@ -182,7 +196,7 @@ class SubHeartflow:
async def _start_heart_fc_chat(self) -> bool:
"""启动 HeartFChatting 实例,确保 NormalChat 已停止"""
await self._stop_normal_chat() # 确保普通聊天监控已停止
- self.clear_interest_dict() # 清理兴趣字典,准备专注聊天
+ self.interest_dict.clear()
log_prefix = self.log_prefix
# 如果实例已存在,检查其循环任务状态
@@ -211,6 +225,7 @@ class SubHeartflow:
self.heart_fc_instance = HeartFChatting(
chat_id=self.subheartflow_id,
observations=self.observations,
+ on_stop_focus_chat=self._handle_stop_focus_chat_request,
)
# 初始化并启动 HeartFChatting
@@ -259,7 +274,7 @@ class SubHeartflow:
elif new_state == ChatState.ABSENT:
logger.info(f"{log_prefix} 进入 ABSENT 状态,停止所有聊天活动...")
- self.clear_interest_dict()
+ self.interest_dict.clear()
await self._stop_normal_chat()
await self._stop_heart_fc_chat()
state_changed = True
@@ -300,38 +315,35 @@ class SubHeartflow:
logger.warning(f"SubHeartflow {self.subheartflow_id} 没有找到有效的 ChattingObservation")
return None
- async def get_interest_state(self) -> dict:
- return await self.interest_chatting.get_state()
-
def get_normal_chat_last_speak_time(self) -> float:
if self.normal_chat_instance:
return self.normal_chat_instance.last_speak_time
return 0
- def get_interest_dict(self) -> Dict[str, tuple[MessageRecv, float, bool]]:
- return self.interest_chatting.interest_dict
-
def get_normal_chat_recent_replies(self, limit: int = 10) -> List[dict]:
"""获取NormalChat实例的最近回复记录
-
+
Args:
limit: 最大返回数量,默认10条
-
+
Returns:
List[dict]: 最近的回复记录列表,如果没有NormalChat实例则返回空列表
"""
if self.normal_chat_instance:
return self.normal_chat_instance.get_recent_replies(limit)
return []
-
- def clear_interest_dict(self):
- self.interest_chatting.interest_dict.clear()
+
+ def add_interest_message(self, message: MessageRecv, interest_value: float, is_mentioned: bool):
+ self.interest_dict[message.message_info.message_id] = (message, interest_value, is_mentioned)
+ # 如果字典长度超过10,删除最旧的消息
+ if len(self.interest_dict) > 10:
+ oldest_key = next(iter(self.interest_dict))
+ self.interest_dict.pop(oldest_key)
async def get_full_state(self) -> dict:
"""获取子心流的完整状态,包括兴趣、思维和聊天状态。"""
- interest_state = await self.get_interest_state()
return {
- "interest_state": interest_state,
+ "interest_state": "interest_state",
"chat_state": self.chat_state.chat_status.value,
"chat_state_changed_time": self.chat_state_changed_time,
}
@@ -349,10 +361,6 @@ class SubHeartflow:
await self._stop_normal_chat()
await self._stop_heart_fc_chat()
- # 停止兴趣更新任务
- if self.interest_chatting:
- logger.info(f"{self.log_prefix} 停止兴趣系统后台任务...")
- await self.interest_chatting.stop_updates()
# 取消可能存在的旧后台任务 (self.task)
if self.task and not self.task.done():
diff --git a/src/chat/heart_flow/subheartflow_manager.py b/src/chat/heart_flow/subheartflow_manager.py
index 5217202a..6e8e7b86 100644
--- a/src/chat/heart_flow/subheartflow_manager.py
+++ b/src/chat/heart_flow/subheartflow_manager.py
@@ -1,6 +1,5 @@
import asyncio
import time
-import random
from typing import Dict, Any, Optional, List
from src.common.logger_manager import get_logger
from src.chat.message_receive.chat_stream import chat_manager
@@ -187,40 +186,40 @@ class SubHeartflowManager:
)
# async def sbhf_normal_into_focus(self):
- # """评估子心流兴趣度,满足条件则提升到FOCUSED状态(基于start_hfc_probability)"""
- # try:
- # for sub_hf in list(self.subheartflows.values()):
- # flow_id = sub_hf.subheartflow_id
- # stream_name = chat_manager.get_stream_name(flow_id) or flow_id
+ # """评估子心流兴趣度,满足条件则提升到FOCUSED状态(基于start_hfc_probability)"""
+ # try:
+ # for sub_hf in list(self.subheartflows.values()):
+ # flow_id = sub_hf.subheartflow_id
+ # stream_name = chat_manager.get_stream_name(flow_id) or flow_id
- # # 跳过已经是FOCUSED状态的子心流
- # if sub_hf.chat_state.chat_status == ChatState.FOCUSED:
- # continue
+ # # 跳过已经是FOCUSED状态的子心流
+ # if sub_hf.chat_state.chat_status == ChatState.FOCUSED:
+ # continue
- # if sub_hf.interest_chatting.start_hfc_probability == 0:
- # continue
- # else:
- # logger.debug(
- # f"{stream_name},现在状态: {sub_hf.chat_state.chat_status.value},进入专注概率: {sub_hf.interest_chatting.start_hfc_probability}"
- # )
+ # if sub_hf.interest_chatting.start_hfc_probability == 0:
+ # continue
+ # else:
+ # logger.debug(
+ # f"{stream_name},现在状态: {sub_hf.chat_state.chat_status.value},进入专注概率: {sub_hf.interest_chatting.start_hfc_probability}"
+ # )
- # if random.random() >= sub_hf.interest_chatting.start_hfc_probability:
- # continue
+ # if random.random() >= sub_hf.interest_chatting.start_hfc_probability:
+ # continue
- # # 获取最新状态并执行提升
- # current_subflow = self.subheartflows.get(flow_id)
- # if not current_subflow:
- # continue
+ # # 获取最新状态并执行提升
+ # current_subflow = self.subheartflows.get(flow_id)
+ # if not current_subflow:
+ # continue
- # logger.info(
- # f"{stream_name} 触发 认真水群 (概率={current_subflow.interest_chatting.start_hfc_probability:.2f})"
- # )
+ # logger.info(
+ # f"{stream_name} 触发 认真水群 (概率={current_subflow.interest_chatting.start_hfc_probability:.2f})"
+ # )
- # # 执行状态提升
- # await current_subflow.change_chat_state(ChatState.FOCUSED)
+ # # 执行状态提升
+ # await current_subflow.change_chat_state(ChatState.FOCUSED)
- # except Exception as e:
- # logger.error(f"启动HFC 兴趣评估失败: {e}", exc_info=True)
+ # except Exception as e:
+ # logger.error(f"启动HFC 兴趣评估失败: {e}", exc_info=True)
async def sbhf_focus_into_normal(self, subflow_id: Any):
"""
@@ -249,7 +248,7 @@ class SubHeartflowManager:
)
try:
# 从HFC到CHAT时,清空兴趣字典
- subflow.clear_interest_dict()
+ subflow.interest_dict.clear()
await subflow.change_chat_state(target_state)
final_state = subflow.chat_state.chat_status
if final_state == target_state:
diff --git a/src/chat/normal_chat/normal_chat.py b/src/chat/normal_chat/normal_chat.py
index 34b79639..0cf0908f 100644
--- a/src/chat/normal_chat/normal_chat.py
+++ b/src/chat/normal_chat/normal_chat.py
@@ -49,14 +49,14 @@ class NormalChat:
self.last_speak_time = 0
self._chat_task: Optional[asyncio.Task] = None
self._initialized = False # Track initialization status
-
+
# 记录最近的回复内容,每项包含: {time, user_message, response, is_mentioned, is_reference_reply}
self.recent_replies = []
self.max_replies_history = 20 # 最多保存最近20条回复记录
-
+
# 添加回调函数,用于在满足条件时通知切换到focus_chat模式
self.on_switch_to_focus_callback = on_switch_to_focus_callback
-
+
# 最近回复检查相关
self._last_check_time = time.time()
self._check_interval = 10 # 每10秒检查一次是否需要切换到focus模式
@@ -207,12 +207,12 @@ class NormalChat:
if self._chat_task is None or self._chat_task.cancelled():
logger.info(f"[{self.stream_name}] 兴趣监控任务被取消或置空,退出")
break
-
+
# 定期检查是否需要切换到focus模式
- current_time = time.time()
- if current_time - self._last_check_time > self._check_interval:
- await self._check_switch_to_focus()
- self._last_check_time = current_time
+ # current_time = time.time()
+ # if current_time - self._last_check_time > self._check_interval:
+ # await self._check_switch_to_focus()
+ # self._last_check_time = current_time
items_to_process = list(self.interest_dict.items())
if not items_to_process:
@@ -329,28 +329,28 @@ class NormalChat:
# 检查 first_bot_msg 是否为 None (例如思考消息已被移除的情况)
if first_bot_msg:
info_catcher.catch_after_response(timing_results["消息发送"], response_set, first_bot_msg)
-
+
# 记录回复信息到最近回复列表中
reply_info = {
"time": time.time(),
"user_message": message.processed_plain_text,
"user_info": {
"user_id": message.message_info.user_info.user_id,
- "user_nickname": message.message_info.user_info.user_nickname
+ "user_nickname": message.message_info.user_info.user_nickname,
},
"response": response_set,
"is_mentioned": is_mentioned,
"is_reference_reply": message.reply is not None, # 判断是否为引用回复
- "timing": {k: round(v, 2) for k, v in timing_results.items()}
+ "timing": {k: round(v, 2) for k, v in timing_results.items()},
}
self.recent_replies.append(reply_info)
# 保持最近回复历史在限定数量内
if len(self.recent_replies) > self.max_replies_history:
- self.recent_replies = self.recent_replies[-self.max_replies_history:]
-
+ self.recent_replies = self.recent_replies[-self.max_replies_history :]
+
# 检查是否需要切换到focus模式
await self._check_switch_to_focus()
-
+
else:
logger.warning(f"[{self.stream_name}] 思考消息 {thinking_id} 在发送前丢失,无法记录 info_catcher")
@@ -563,10 +563,10 @@ class NormalChat:
# 获取最近回复记录的方法
def get_recent_replies(self, limit: int = 10) -> List[dict]:
"""获取最近的回复记录
-
+
Args:
limit: 最大返回数量,默认10条
-
+
Returns:
List[dict]: 最近的回复记录列表,每项包含:
time: 回复时间戳
@@ -583,21 +583,23 @@ class NormalChat:
async def _check_switch_to_focus(self) -> None:
"""检查是否满足切换到focus模式的条件"""
if not self.on_switch_to_focus_callback:
- return # 如果没有设置回调函数,直接返回
+ return # 如果没有设置回调函数,直接返回
current_time = time.time()
-
+
time_threshold = 120 / global_config.focus_chat.auto_focus_threshold
reply_threshold = 6 * global_config.focus_chat.auto_focus_threshold
-
+
one_minute_ago = current_time - time_threshold
-
+
# 统计1分钟内的回复数量
recent_reply_count = sum(1 for reply in self.recent_replies if reply["time"] > one_minute_ago)
# print(111111111111111333333333333333333333333331111111111111111111111111111111111)
# print(recent_reply_count)
# 如果1分钟内回复数量大于8,触发切换到focus模式
if recent_reply_count > reply_threshold:
- logger.info(f"[{self.stream_name}] 检测到1分钟内回复数量({recent_reply_count})大于{reply_threshold},触发切换到focus模式")
+ logger.info(
+ f"[{self.stream_name}] 检测到1分钟内回复数量({recent_reply_count})大于{reply_threshold},触发切换到focus模式"
+ )
try:
# 调用回调函数通知上层切换到focus模式
await self.on_switch_to_focus_callback()
diff --git a/src/config/official_configs.py b/src/config/official_configs.py
index 80f0ae7a..4814052a 100644
--- a/src/config/official_configs.py
+++ b/src/config/official_configs.py
@@ -44,12 +44,13 @@ class IdentityConfig(ConfigBase):
identity_detail: list[str] = field(default_factory=lambda: [])
"""身份特征"""
+
@dataclass
class RelationshipConfig(ConfigBase):
"""关系配置类"""
give_name: bool = False
- """是否给其他人取名"""
+ """是否给其他人取名"""
@dataclass
@@ -125,6 +126,9 @@ class FocusChatConfig(ConfigBase):
auto_focus_threshold: float = 1.0
"""自动切换到专注聊天的阈值,越低越容易进入专注聊天"""
+ exit_focus_threshold: float = 1.0
+ """自动退出专注聊天的阈值,越低越容易退出专注聊天"""
+
observation_context_size: int = 12
"""可观察到的最长上下文大小,超过这个值的上下文会被压缩"""
@@ -381,17 +385,16 @@ class ModelConfig(ConfigBase):
"""模型配置类"""
model_max_output_length: int = 800 # 最大回复长度
-
-
+
utils: dict[str, Any] = field(default_factory=lambda: {})
"""组件模型配置"""
-
+
utils_small: dict[str, Any] = field(default_factory=lambda: {})
"""组件小模型配置"""
normal_chat_1: dict[str, Any] = field(default_factory=lambda: {})
"""normal_chat首要回复模型模型配置"""
-
+
normal_chat_2: dict[str, Any] = field(default_factory=lambda: {})
"""normal_chat次要回复模型配置"""
@@ -403,22 +406,22 @@ class ModelConfig(ConfigBase):
focus_working_memory: dict[str, Any] = field(default_factory=lambda: {})
"""专注工作记忆模型配置"""
-
+
focus_chat_mind: dict[str, Any] = field(default_factory=lambda: {})
"""专注聊天规划模型配置"""
-
+
focus_self_recognize: dict[str, Any] = field(default_factory=lambda: {})
"""专注自我识别模型配置"""
-
+
focus_tool_use: dict[str, Any] = field(default_factory=lambda: {})
"""专注工具使用模型配置"""
focus_planner: dict[str, Any] = field(default_factory=lambda: {})
"""专注规划模型配置"""
-
+
focus_expressor: dict[str, Any] = field(default_factory=lambda: {})
"""专注表达器模型配置"""
-
+
embedding: dict[str, Any] = field(default_factory=lambda: {})
"""嵌入模型配置"""
@@ -430,7 +433,3 @@ class ModelConfig(ConfigBase):
pfc_reply_checker: dict[str, Any] = field(default_factory=lambda: {})
"""PFC回复检查模型配置"""
-
-
-
-
diff --git a/src/llm_models/utils_model.py b/src/llm_models/utils_model.py
index 319c020f..712d51d8 100644
--- a/src/llm_models/utils_model.py
+++ b/src/llm_models/utils_model.py
@@ -636,18 +636,18 @@ class LLMRequest:
"messages": messages,
**params_copy,
}
-
+
# 添加temp参数(如果不是默认值0.7)
if self.temp != 0.7:
payload["temperature"] = self.temp
-
+
# 添加enable_thinking参数(如果不是默认值False)
if not self.enable_thinking:
payload["enable_thinking"] = False
-
+
if self.thinking_budget != 4096:
payload["thinking_budget"] = self.thinking_budget
-
+
if "max_tokens" not in payload and "max_completion_tokens" not in payload:
payload["max_tokens"] = global_config.model.model_max_output_length
# 如果 payload 中依然存在 max_tokens 且需要转换,在这里进行再次检查
diff --git a/src/person_info/person_info.py b/src/person_info/person_info.py
index 021297cc..80edc4db 100644
--- a/src/person_info/person_info.py
+++ b/src/person_info/person_info.py
@@ -261,7 +261,7 @@ class PersonInfoManager:
current_name_set = set(self.person_name_list.values())
while current_try < max_retries:
- prompt_personality = individuality.get_prompt(x_person=2, level=1)
+ # prompt_personality = individuality.get_prompt(x_person=2, level=1)
bot_name = individuality.personality.bot_nickname
qv_name_prompt = f"你是{bot_name},一个伪装成人类的AI,你不能让别人发现这一点,"
@@ -273,9 +273,7 @@ class PersonInfoManager:
qv_name_prompt += f"你之前叫他{old_name},是因为{old_reason},"
qv_name_prompt += f"\n其他取名的要求是:{request},不要太浮夸,简短,"
- qv_name_prompt += (
- "\n请根据以上用户信息,想想你叫他什么比较好,不要太浮夸,请最好使用用户的qq昵称,可以稍作修改,优先使用原文。优先使用用户的qq昵称或者群昵称原文。"
- )
+ qv_name_prompt += "\n请根据以上用户信息,想想你叫他什么比较好,不要太浮夸,请最好使用用户的qq昵称,可以稍作修改,优先使用原文。优先使用用户的qq昵称或者群昵称原文。"
if existing_names_str:
qv_name_prompt += f"\n请注意,以下名称已被你尝试过或已知存在,请避免:{existing_names_str}。\n"
diff --git a/src/plugins/test_plugin/actions/group_whole_ban_action.py b/src/plugins/test_plugin/actions/group_whole_ban_action.py
index bb9f3531..7e655312 100644
--- a/src/plugins/test_plugin/actions/group_whole_ban_action.py
+++ b/src/plugins/test_plugin/actions/group_whole_ban_action.py
@@ -10,15 +10,13 @@ class GroupWholeBanAction(PluginAction):
"""群聊全体禁言动作处理类"""
action_name = "group_whole_ban_action"
- action_description = (
- "开启或关闭群聊全体禁言,当群聊过于混乱或需要安静时使用"
- )
+ action_description = "开启或关闭群聊全体禁言,当群聊过于混乱或需要安静时使用"
action_parameters = {
"enable": "是否开启全体禁言,输入True开启,False关闭,必填",
}
action_require = [
"当群聊过于混乱需要安静时使用",
- "当需要临时暂停群聊讨论时使用",
+ "当需要临时暂停群聊讨论时使用",
"当有人要求开启全体禁言时使用",
"当管理员需要发布重要公告时使用",
]
@@ -31,7 +29,7 @@ class GroupWholeBanAction(PluginAction):
# 获取参数
enable = self.action_data.get("enable")
-
+
if enable is None:
error_msg = "全体禁言参数不完整,需要enable参数"
logger.error(f"{self.log_prefix} {error_msg}")
@@ -39,9 +37,9 @@ class GroupWholeBanAction(PluginAction):
# 确保enable是布尔类型
if isinstance(enable, str):
- if enable.lower() in ['true', '1', 'yes', '开启', '是']:
+ if enable.lower() in ["true", "1", "yes", "开启", "是"]:
enable = True
- elif enable.lower() in ['false', '0', 'no', '关闭', '否']:
+ elif enable.lower() in ["false", "0", "no", "关闭", "否"]:
enable = False
else:
error_msg = f"无效的enable参数: {enable},应该是True或False"
@@ -54,20 +52,12 @@ class GroupWholeBanAction(PluginAction):
try:
# 发送群聊全体禁言命令,按照新格式
- await self.send_message(
- type="command",
- data={
- "name": "GROUP_WHOLE_BAN",
- "args": {
- "enable": enable
- }
- }
- )
-
+ await self.send_message(type="command", data={"name": "GROUP_WHOLE_BAN", "args": {"enable": enable}})
+
logger.info(f"{self.log_prefix} 成功{action_text}全体禁言")
return True, f"成功{action_text}全体禁言"
except Exception as e:
logger.error(f"{self.log_prefix} 执行全体禁言动作时出错: {e}")
await self.send_message_by_expressor(f"执行全体禁言动作时出错: {e}")
- return False, f"执行全体禁言动作时出错: {e}"
\ No newline at end of file
+ return False, f"执行全体禁言动作时出错: {e}"
diff --git a/src/plugins/test_plugin/actions/mute_action.py b/src/plugins/test_plugin/actions/mute_action.py
index 279ee010..c693c59a 100644
--- a/src/plugins/test_plugin/actions/mute_action.py
+++ b/src/plugins/test_plugin/actions/mute_action.py
@@ -10,9 +10,7 @@ class MuteAction(PluginAction):
"""群聊禁言动作处理类"""
action_name = "mute_action"
- action_description = (
- "如果某人违反了公序良俗,或者别人戳你太多,或者某人刷屏,一定要禁言某人,如果你很生气,可以禁言某人,可以自选禁言时长,视严重程度而定。"
- )
+ action_description = "如果某人违反了公序良俗,或者别人戳你太多,或者某人刷屏,一定要禁言某人,如果你很生气,可以禁言某人,可以自选禁言时长,视严重程度而定。"
action_parameters = {
"target": "禁言对象,输入你要禁言的对象的名字,必填",
"duration": "禁言时长,输入你要禁言的时长,单位为秒,必填,必须为数字",
@@ -38,7 +36,7 @@ class MuteAction(PluginAction):
target = self.action_data.get("target")
duration = self.action_data.get("duration")
reason = self.action_data.get("reason", "违反群规")
-
+
if not target or not duration:
error_msg = "禁言参数不完整,需要target和duration"
logger.error(f"{self.log_prefix} {error_msg}")
@@ -46,7 +44,7 @@ class MuteAction(PluginAction):
# 获取用户ID
platform, user_id = await self.get_user_id_by_person_name(target)
-
+
if not user_id:
error_msg = f"未找到用户 {target} 的ID"
logger.error(f"{self.log_prefix} {error_msg}")
@@ -58,19 +56,12 @@ class MuteAction(PluginAction):
try:
# 确保duration是字符串类型
duration_str = str(duration)
-
+
# 发送群聊禁言命令,按照新格式
await self.send_message(
- type="command",
- data={
- "name": "GROUP_BAN",
- "args": {
- "qq_id": str(user_id),
- "duration": duration_str
- }
- }
+ type="command", data={"name": "GROUP_BAN", "args": {"qq_id": str(user_id), "duration": duration_str}}
)
-
+
logger.info(f"{self.log_prefix} 成功禁言用户 {target}({user_id}),时长 {duration} 秒")
return True, f"成功禁言 {target},时长 {duration} 秒"
diff --git a/src/tools/tool_use.py b/src/tools/tool_use.py
index caca2cb6..b6fabb21 100644
--- a/src/tools/tool_use.py
+++ b/src/tools/tool_use.py
@@ -3,6 +3,8 @@ from src.common.logger_manager import get_logger
from src.tools.tool_can_use import get_all_tool_definitions, get_tool_instance
logger = get_logger("tool_use")
+
+
class ToolUser:
@staticmethod
def _define_tools():
diff --git a/template/bot_config_template.toml b/template/bot_config_template.toml
index 9a1fd893..87eba0ba 100644
--- a/template/bot_config_template.toml
+++ b/template/bot_config_template.toml
@@ -80,7 +80,7 @@ talk_frequency_down_groups = [] #降低回复频率的群号码
[focus_chat] #专注聊天
auto_focus_threshold = 1 # 自动切换到专注聊天的阈值,越低越容易进入专注聊天
-
+exit_focus_threshold = 1 # 自动退出专注聊天的阈值,越低越容易退出专注聊天
consecutive_no_reply_threshold = 3 # 连续不回复的阈值,越低越容易结束专注聊天
think_interval = 3 # 思考间隔 单位秒,可以有效减少消耗
From 7b05bb3b6630b03a575414a351dd352ee209afc9 Mon Sep 17 00:00:00 2001
From: SengokuCola <1026294844@qq.com>
Date: Tue, 27 May 2025 23:09:50 +0800
Subject: [PATCH 14/17] ruff
---
src/chat/focus_chat/heartFC_chat.py | 10 ++++++----
.../focus_chat/info_processors/action_processor.py | 4 +++-
src/chat/focus_chat/planners/action_manager.py | 4 ++--
src/chat/heart_flow/interest_chatting.py | 1 -
src/chat/heart_flow/sub_heartflow.py | 3 +--
5 files changed, 12 insertions(+), 10 deletions(-)
diff --git a/src/chat/focus_chat/heartFC_chat.py b/src/chat/focus_chat/heartFC_chat.py
index 6e8beebe..936059d1 100644
--- a/src/chat/focus_chat/heartFC_chat.py
+++ b/src/chat/focus_chat/heartFC_chat.py
@@ -292,7 +292,7 @@ class HeartFChatting:
async with global_prompt_manager.async_message_scope(self.chat_stream.context.get_template_name()):
logger.debug(f"模板 {self.chat_stream.context.get_template_name()}")
loop_info = await self._observe_process_plan_action_loop(cycle_timers, thinking_id)
-
+
print(loop_info["loop_action_info"]["command"])
if loop_info["loop_action_info"]["command"] == "stop_focus_chat":
logger.info(f"{self.log_prefix} 麦麦决定停止专注聊天")
@@ -441,7 +441,7 @@ class HeartFChatting:
observations.append(self.working_observation)
observations.append(self.hfcloop_observation)
observations.append(self.structure_observation)
-
+
loop_observation_info = {
"observations": observations,
}
@@ -562,10 +562,12 @@ class HeartFChatting:
result = await action_handler.handle_action()
if len(result) == 3:
success, reply_text, command = result
- else:
+ else:
success, reply_text = result
command = ""
- logger.info(f"{self.log_prefix} 麦麦决定'{action}', 原因'{reasoning}',返回结果'{success}', '{reply_text}', '{command}'")
+ logger.info(
+ f"{self.log_prefix} 麦麦决定'{action}', 原因'{reasoning}',返回结果'{success}', '{reply_text}', '{command}'"
+ )
return success, reply_text, command
except Exception as e:
diff --git a/src/chat/focus_chat/info_processors/action_processor.py b/src/chat/focus_chat/info_processors/action_processor.py
index fe2d8675..6979a1d8 100644
--- a/src/chat/focus_chat/info_processors/action_processor.py
+++ b/src/chat/focus_chat/info_processors/action_processor.py
@@ -136,7 +136,9 @@ class ActionProcessor(BaseProcessor):
reply_sequence.append(action_type == "reply")
# 检查no_reply比例
- if len(recent_cycles) >= (5 * global_config.focus_chat.exit_focus_threshold) and (no_reply_count / len(recent_cycles)) >= (0.75 * global_config.focus_chat.exit_focus_threshold):
+ if len(recent_cycles) >= (5 * global_config.focus_chat.exit_focus_threshold) and (
+ no_reply_count / len(recent_cycles)
+ ) >= (0.75 * global_config.focus_chat.exit_focus_threshold):
if global_config.chat.chat_mode == "auto":
result["add"].append("exit_focus_chat")
result["remove"].append("no_reply")
diff --git a/src/chat/focus_chat/planners/action_manager.py b/src/chat/focus_chat/planners/action_manager.py
index d2ed378c..7be944ae 100644
--- a/src/chat/focus_chat/planners/action_manager.py
+++ b/src/chat/focus_chat/planners/action_manager.py
@@ -159,8 +159,8 @@ class ActionManager:
"""
# 检查动作是否在当前使用的动作集中
# if action_name not in self._using_actions:
- # logger.warning(f"当前不可用的动作类型: {action_name}")
- # return None
+ # logger.warning(f"当前不可用的动作类型: {action_name}")
+ # return None
handler_class = _ACTION_REGISTRY.get(action_name)
if not handler_class:
diff --git a/src/chat/heart_flow/interest_chatting.py b/src/chat/heart_flow/interest_chatting.py
index 4cb477c0..8e493179 100644
--- a/src/chat/heart_flow/interest_chatting.py
+++ b/src/chat/heart_flow/interest_chatting.py
@@ -1,5 +1,4 @@
import asyncio
-from src.config.config import global_config
from typing import Optional, Dict
import traceback
from src.common.logger_manager import get_logger
diff --git a/src/chat/heart_flow/sub_heartflow.py b/src/chat/heart_flow/sub_heartflow.py
index a7e980f4..eb5affb3 100644
--- a/src/chat/heart_flow/sub_heartflow.py
+++ b/src/chat/heart_flow/sub_heartflow.py
@@ -332,7 +332,7 @@ class SubHeartflow:
if self.normal_chat_instance:
return self.normal_chat_instance.get_recent_replies(limit)
return []
-
+
def add_interest_message(self, message: MessageRecv, interest_value: float, is_mentioned: bool):
self.interest_dict[message.message_info.message_id] = (message, interest_value, is_mentioned)
# 如果字典长度超过10,删除最旧的消息
@@ -361,7 +361,6 @@ class SubHeartflow:
await self._stop_normal_chat()
await self._stop_heart_fc_chat()
-
# 取消可能存在的旧后台任务 (self.task)
if self.task and not self.task.done():
logger.debug(f"{self.log_prefix} 取消子心流主任务 (Shutdown)...")
From e0a8905c7782e3b2f6e6e32a7c352eff3af56cb3 Mon Sep 17 00:00:00 2001
From: SengokuCola <1026294844@qq.com>
Date: Tue, 27 May 2025 23:15:35 +0800
Subject: [PATCH 15/17] =?UTF-8?q?=E4=BF=AE=E5=A4=8D=E9=85=8D=E7=BD=AE?=
=?UTF-8?q?=E5=90=8D=E7=A7=B0?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
src/api/config_api.py | 1 -
src/chat/heart_flow/interest_chatting.py | 199 ------------------
src/chat/normal_chat/normal_chat_generator.py | 2 +-
src/config/official_configs.py | 5 +-
template/bot_config_template.toml | 3 +-
5 files changed, 3 insertions(+), 207 deletions(-)
delete mode 100644 src/chat/heart_flow/interest_chatting.py
diff --git a/src/api/config_api.py b/src/api/config_api.py
index 3e3ff286..d28b1e80 100644
--- a/src/api/config_api.py
+++ b/src/api/config_api.py
@@ -62,7 +62,6 @@ class APIBotConfig:
# focus_chat
reply_trigger_threshold: float # 回复触发阈值
default_decay_rate_per_second: float # 默认每秒衰减率
- consecutive_no_reply_threshold: int # 连续不回复阈值
# compressed
compressed_length: int # 压缩长度
diff --git a/src/chat/heart_flow/interest_chatting.py b/src/chat/heart_flow/interest_chatting.py
deleted file mode 100644
index 8e493179..00000000
--- a/src/chat/heart_flow/interest_chatting.py
+++ /dev/null
@@ -1,199 +0,0 @@
-import asyncio
-from typing import Optional, Dict
-import traceback
-from src.common.logger_manager import get_logger
-from src.chat.message_receive.message import MessageRecv
-import math
-
-
-# 定义常量 (从 interest.py 移动过来)
-MAX_INTEREST = 15.0
-
-logger = get_logger("interest_chatting")
-
-PROBABILITY_INCREASE_RATE_PER_SECOND = 0.1
-PROBABILITY_DECREASE_RATE_PER_SECOND = 0.1
-MAX_REPLY_PROBABILITY = 1
-
-
-class InterestChatting:
- def __init__(
- self,
- decay_rate=0.95,
- max_interest=MAX_INTEREST,
- trigger_threshold=4,
- max_probability=MAX_REPLY_PROBABILITY,
- ):
- # 基础属性初始化
- self.interest_level: float = 0.0
- self.decay_rate_per_second: float = decay_rate
- self.max_interest: float = max_interest
-
- self.trigger_threshold: float = trigger_threshold
- self.max_reply_probability: float = max_probability
- self.is_above_threshold: bool = False
-
- # 任务相关属性初始化
- self.update_task: Optional[asyncio.Task] = None
- self._stop_event = asyncio.Event()
- self._task_lock = asyncio.Lock()
- self._is_running = False
-
- self.interest_dict: Dict[str, tuple[MessageRecv, float, bool]] = {}
- self.update_interval = 1.0
-
- self.above_threshold = False
- self.start_hfc_probability = 0.0
-
- async def initialize(self):
- async with self._task_lock:
- if self._is_running:
- logger.debug("后台兴趣更新任务已在运行中。")
- return
-
- # 清理已完成或已取消的任务
- if self.update_task and (self.update_task.done() or self.update_task.cancelled()):
- self.update_task = None
-
- if not self.update_task:
- self._stop_event.clear()
- self._is_running = True
- self.update_task = asyncio.create_task(self._run_update_loop(self.update_interval))
- logger.debug("后台兴趣更新任务已创建并启动。")
-
- def add_interest_dict(self, message: MessageRecv, interest_value: float, is_mentioned: bool):
- """添加消息到兴趣字典
-
- 参数:
- message: 接收到的消息
- interest_value: 兴趣值
- is_mentioned: 是否被提及
-
- 功能:
- 1. 将消息添加到兴趣字典
- 2. 更新最后交互时间
- 3. 如果字典长度超过10,删除最旧的消息
- """
- # 添加新消息
- self.interest_dict[message.message_info.message_id] = (message, interest_value, is_mentioned)
-
- # 如果字典长度超过10,删除最旧的消息
- if len(self.interest_dict) > 10:
- oldest_key = next(iter(self.interest_dict))
- self.interest_dict.pop(oldest_key)
-
- async def _calculate_decay(self):
- """计算兴趣值的衰减
-
- 参数:
- current_time: 当前时间戳
-
- 处理逻辑:
- 1. 计算时间差
- 2. 处理各种异常情况(负值/零值)
- 3. 正常计算衰减
- 4. 更新最后更新时间
- """
-
- # 处理极小兴趣值情况
- if self.interest_level < 1e-9:
- self.interest_level = 0.0
- return
-
- # 异常情况处理
- if self.decay_rate_per_second <= 0:
- logger.warning(f"衰减率({self.decay_rate_per_second})无效,重置兴趣值为0")
- self.interest_level = 0.0
- return
-
- # 正常衰减计算
- try:
- decay_factor = math.pow(self.decay_rate_per_second, self.update_interval)
- self.interest_level *= decay_factor
- except ValueError as e:
- logger.error(
- f"衰减计算错误: {e} 参数: 衰减率={self.decay_rate_per_second} 时间差={self.update_interval} 当前兴趣={self.interest_level}"
- )
- self.interest_level = 0.0
-
- async def _update_reply_probability(self):
- self.above_threshold = self.interest_level >= self.trigger_threshold
- if self.above_threshold:
- self.start_hfc_probability += PROBABILITY_INCREASE_RATE_PER_SECOND
- else:
- if self.start_hfc_probability > 0:
- self.start_hfc_probability = max(0, self.start_hfc_probability - PROBABILITY_DECREASE_RATE_PER_SECOND)
-
- async def increase_interest(self, value: float):
- self.interest_level += value
- self.interest_level = min(self.interest_level, self.max_interest)
-
- async def decrease_interest(self, value: float):
- self.interest_level -= value
- self.interest_level = max(self.interest_level, 0.0)
-
- async def get_interest(self) -> float:
- return self.interest_level
-
- async def get_state(self) -> dict:
- interest = self.interest_level # 直接使用属性值
- return {
- "interest_level": round(interest, 2),
- "start_hfc_probability": round(self.start_hfc_probability, 4),
- "above_threshold": self.above_threshold,
- }
-
- # --- 新增后台更新任务相关方法 ---
- async def _run_update_loop(self, update_interval: float = 1.0):
- """后台循环,定期更新兴趣和回复概率。"""
- try:
- while not self._stop_event.is_set():
- try:
- if self.interest_level != 0:
- await self._calculate_decay()
-
- await self._update_reply_probability()
-
- # 等待下一个周期或停止事件
- await asyncio.wait_for(self._stop_event.wait(), timeout=update_interval)
- except asyncio.TimeoutError:
- # 正常超时,继续循环
- continue
- except Exception as e:
- logger.error(f"InterestChatting 更新循环出错: {e}")
- logger.error(traceback.format_exc())
- # 防止错误导致CPU飙升,稍作等待
- await asyncio.sleep(5)
- except asyncio.CancelledError:
- logger.info("InterestChatting 更新循环被取消。")
- finally:
- self._is_running = False
- logger.info("InterestChatting 更新循环已停止。")
-
- async def stop_updates(self):
- """停止后台更新任务,使用锁确保并发安全"""
- async with self._task_lock:
- if not self._is_running:
- logger.debug("后台兴趣更新任务未运行。")
- return
-
- logger.info("正在停止 InterestChatting 后台更新任务...")
- self._stop_event.set()
-
- if self.update_task and not self.update_task.done():
- try:
- # 等待任务结束,设置超时
- await asyncio.wait_for(self.update_task, timeout=5.0)
- logger.info("InterestChatting 后台更新任务已成功停止。")
- except asyncio.TimeoutError:
- logger.warning("停止 InterestChatting 后台任务超时,尝试取消...")
- self.update_task.cancel()
- try:
- await self.update_task # 等待取消完成
- except asyncio.CancelledError:
- logger.info("InterestChatting 后台更新任务已被取消。")
- except Exception as e:
- logger.error(f"停止 InterestChatting 后台任务时发生异常: {e}")
- finally:
- self.update_task = None
- self._is_running = False
diff --git a/src/chat/normal_chat/normal_chat_generator.py b/src/chat/normal_chat/normal_chat_generator.py
index 6debc8ed..b55d41e7 100644
--- a/src/chat/normal_chat/normal_chat_generator.py
+++ b/src/chat/normal_chat/normal_chat_generator.py
@@ -38,7 +38,7 @@ class NormalChatGenerator:
async def generate_response(self, message: MessageThinking, thinking_id: str) -> Optional[Union[str, List[str]]]:
"""根据当前模型类型选择对应的生成函数"""
# 从global_config中获取模型概率值并选择模型
- if random.random() < global_config.normal_chat.reasoning_model_probability:
+ if random.random() < global_config.normal_chat.normal_chat_first_probability:
self.current_model_type = "深深地"
current_model = self.model_reasoning
else:
diff --git a/src/config/official_configs.py b/src/config/official_configs.py
index 4814052a..afd6676b 100644
--- a/src/config/official_configs.py
+++ b/src/config/official_configs.py
@@ -76,7 +76,7 @@ class MessageReceiveConfig(ConfigBase):
class NormalChatConfig(ConfigBase):
"""普通聊天配置类"""
- reasoning_model_probability: float = 0.3
+ normal_chat_first_probability: float = 0.3
"""
发言时选择推理模型的概率(0-1之间)
选择普通模型的概率为 1 - reasoning_normal_model_probability
@@ -132,9 +132,6 @@ class FocusChatConfig(ConfigBase):
observation_context_size: int = 12
"""可观察到的最长上下文大小,超过这个值的上下文会被压缩"""
- consecutive_no_reply_threshold: int = 3
- """连续不回复的次数阈值"""
-
compressed_length: int = 5
"""心流上下文压缩的最短压缩长度,超过心流观察到的上下文长度,会压缩,最短压缩长度为5"""
diff --git a/template/bot_config_template.toml b/template/bot_config_template.toml
index 87eba0ba..1b7a3feb 100644
--- a/template/bot_config_template.toml
+++ b/template/bot_config_template.toml
@@ -63,7 +63,7 @@ ban_msgs_regex = [
[normal_chat] #普通聊天
#一般回复参数
-reasoning_model_probability = 0.3 # 麦麦回答时选择推理模型的概率(与之相对的,普通模型的概率为1 - reasoning_model_probability)
+normal_chat_first_probability = 0.3 # 麦麦回答时选择首要模型的概率(与之相对的,次要模型的概率为1 - normal_chat_first_probability)
max_context_size = 15 #上下文长度
emoji_chance = 0.2 # 麦麦一般回复时使用表情包的概率,设置为1让麦麦自己决定发不发
thinking_timeout = 120 # 麦麦最长思考时间,超过这个时间的思考会放弃(往往是api反应太慢)
@@ -81,7 +81,6 @@ talk_frequency_down_groups = [] #降低回复频率的群号码
[focus_chat] #专注聊天
auto_focus_threshold = 1 # 自动切换到专注聊天的阈值,越低越容易进入专注聊天
exit_focus_threshold = 1 # 自动退出专注聊天的阈值,越低越容易退出专注聊天
-consecutive_no_reply_threshold = 3 # 连续不回复的阈值,越低越容易结束专注聊天
think_interval = 3 # 思考间隔 单位秒,可以有效减少消耗
From ba34644dc3f53ac83c29b5341dd27404fd142665 Mon Sep 17 00:00:00 2001
From: SengokuCola <1026294844@qq.com>
Date: Tue, 27 May 2025 23:17:53 +0800
Subject: [PATCH 16/17] Update changelog.md
---
changelogs/changelog.md | 2 ++
1 file changed, 2 insertions(+)
diff --git a/changelogs/changelog.md b/changelogs/changelog.md
index 17825e38..55d87d40 100644
--- a/changelogs/changelog.md
+++ b/changelogs/changelog.md
@@ -39,6 +39,7 @@
**聊天管理**
- 移除不在线状态
+- 优化自动模式下normal与focus聊天的切换机制
- 大幅精简聊天状态切换规则,减少复杂度
- 移除聊天限额数量
@@ -58,6 +59,7 @@
**优化**
- 移除日程系统,减少幻觉(将会在未来版本回归)
- 移除主心流思考和LLM进入聊天判定
+- 支持qwen3模型,支持自定义是否思考和思考长度
## [0.6.3-fix-4] - 2025-5-18
From 2c973244e3160f5647ff5866a09d28be887d8535 Mon Sep 17 00:00:00 2001
From: SengokuCola <1026294844@qq.com>
Date: Tue, 27 May 2025 23:34:09 +0800
Subject: [PATCH 17/17] =?UTF-8?q?fix=E4=BF=AE=E5=A4=8Dat?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
src/chat/focus_chat/info_processors/action_processor.py | 9 +++++----
src/chat/utils/utils.py | 2 +-
2 files changed, 6 insertions(+), 5 deletions(-)
diff --git a/src/chat/focus_chat/info_processors/action_processor.py b/src/chat/focus_chat/info_processors/action_processor.py
index 6979a1d8..6970cd13 100644
--- a/src/chat/focus_chat/info_processors/action_processor.py
+++ b/src/chat/focus_chat/info_processors/action_processor.py
@@ -20,12 +20,11 @@ class ActionProcessor(BaseProcessor):
用于处理Observation对象,将其转换为ObsInfo对象。
"""
- log_prefix = "聊天信息处理"
+ log_prefix = "动作处理"
def __init__(self):
"""初始化观察处理器"""
super().__init__()
- # TODO: API-Adapter修改标记
async def process_info(
self,
@@ -136,9 +135,11 @@ class ActionProcessor(BaseProcessor):
reply_sequence.append(action_type == "reply")
# 检查no_reply比例
- if len(recent_cycles) >= (5 * global_config.focus_chat.exit_focus_threshold) and (
+ print(f"no_reply_count: {no_reply_count}, len(recent_cycles): {len(recent_cycles)}")
+ # print(1111111111111111111111111111111111111111111111111111111111111111111111111111111111111111)
+ if len(recent_cycles) >= (4 * global_config.focus_chat.exit_focus_threshold) and (
no_reply_count / len(recent_cycles)
- ) >= (0.75 * global_config.focus_chat.exit_focus_threshold):
+ ) >= (0.6 * global_config.focus_chat.exit_focus_threshold):
if global_config.chat.chat_mode == "auto":
result["add"].append("exit_focus_chat")
result["remove"].append("no_reply")
diff --git a/src/chat/utils/utils.py b/src/chat/utils/utils.py
index 2c183934..c7a45675 100644
--- a/src/chat/utils/utils.py
+++ b/src/chat/utils/utils.py
@@ -63,7 +63,7 @@ def is_mentioned_bot_in_message(message: MessageRecv) -> tuple[bool, float]:
)
# 判断是否被@
- if re.search(rf"@<(.+?):{global_config.bot.qq_account}>"):
+ if re.search(rf"@<(.+?):{global_config.bot.qq_account}>", message.processed_plain_text):
is_at = True
is_mentioned = True