pull/1215/head
Windpicker-owo 2025-08-23 03:24:43 +08:00
commit 555300894d
35 changed files with 873 additions and 1502 deletions

View File

@ -46,7 +46,7 @@
## 🔥 更新和安装
**最新版本: v0.10.0** ([更新日志](changelogs/changelog.md))
**最新版本: v0.10.1** ([更新日志](changelogs/changelog.md))
可前往 [Release](https://github.com/MaiM-with-u/MaiBot/releases/) 页面下载最新版本
可前往 [启动器发布页面](https://github.com/MaiM-with-u/mailauncher/releases/)下载最新启动器
@ -59,9 +59,8 @@
- [🚀 最新版本部署教程](https://docs.mai-mai.org/manual/deployment/mmc_deploy_windows.html) - 基于 MaiCore 的新版本部署方式(与旧版本不兼容)
> [!WARNING]
> - 从 0.6.x 旧版本升级前请务必阅读:[升级指南](https://docs.mai-mai.org/faq/maibot/update_to_07.html)
> - 项目处于活跃开发阶段,功能和 API 可能随时调整。
> - 文档未完善,有问题可以提交 Issue 或者 Discussion。
> - 有问题可以提交 Issue 或者 Discussion。
> - QQ 机器人存在被限制风险,请自行了解,谨慎使用。
> - 由于程序处于开发中,可能消耗较多 token。

45
bot.py
View File

@ -1,7 +1,13 @@
import asyncio
import hashlib
import os
import sys
import time
import platform
import traceback
from dotenv import load_dotenv
from pathlib import Path
from rich.traceback import install
if os.path.exists(".env"):
load_dotenv(".env", override=True)
@ -9,22 +15,14 @@ if os.path.exists(".env"):
else:
print("未找到.env文件请确保程序所需的环境变量被正确设置")
raise FileNotFoundError(".env 文件不存在,请创建并配置所需的环境变量")
import sys
import time
import platform
import traceback
from pathlib import Path
from rich.traceback import install
# maim_message imports for console input
# 最早期初始化日志系统,确保所有后续模块都使用正确的日志格式
from src.common.logger import initialize_logging, get_logger, shutdown_logging
initialize_logging()
from src.main import MainSystem #noqa
from src.manager.async_task_manager import async_task_manager #noqa
from src.main import MainSystem # noqa
from src.manager.async_task_manager import async_task_manager # noqa
logger = get_logger("main")
@ -48,21 +46,6 @@ app = None
loop = None
async def request_shutdown() -> bool:
"""请求关闭程序"""
try:
if loop and not loop.is_closed():
try:
loop.run_until_complete(graceful_shutdown())
except Exception as ge: # 捕捉优雅关闭时可能发生的错误
logger.error(f"优雅关闭时发生错误: {ge}")
return False
return True
except Exception as e:
logger.error(f"请求关闭程序时发生错误: {e}")
return False
def easter_egg():
# 彩蛋
from colorama import init, Fore
@ -76,15 +59,15 @@ def easter_egg():
print(rainbow_text)
async def graceful_shutdown():
async def graceful_shutdown(): # sourcery skip: use-named-expression
try:
logger.info("正在优雅关闭麦麦...")
# 触发 ON_STOP 事件
from src.plugin_system.core.events_manager import events_manager
from src.plugin_system.base.component_types import EventType
asyncio.run(events_manager.handle_mai_events(event_type=EventType.ON_STOP))
# logger.info("已触发 ON_STOP 事件")
# 触发 ON_STOP 事件
_ = await events_manager.handle_mai_events(event_type=EventType.ON_STOP)
# 停止所有异步任务
await async_task_manager.stop_and_wait_all_tasks()

View File

@ -1,6 +1,16 @@
# Changelog
## [0.10.0] - 2025-7-1
## [0.10.1] - 2025-8-
- 为planner添加单独控制的提示词
- 修复激活值计算异常的BUG
- 修复lpmm日志错误
- 修复首句不回复的问题
- 修复emoji管理器的一个BUG
- 优化对模型请求的处理
- 重构内部代码
## [0.10.0] - 2025-8-18
### 🌟 主要功能更改
- 优化的回复生成,现在的回复对上下文把控更加精准
- 新的回复逻辑控制现在合并了normal和focus模式更加统一

View File

@ -110,7 +110,6 @@ class LogFormatter:
"plugin_system": "#FF0080",
"experimental": "#FFFFFF",
"person_info": "#008000",
"individuality": "#000080",
"manager": "#800080",
"llm_models": "#008080",
"plugins": "#800000",

View File

@ -65,7 +65,6 @@ class ExpressionLearner:
self.chat_id = chat_id
self.chat_name = get_chat_manager().get_stream_name(chat_id) or chat_id
# 维护每个chat的上次学习时间
self.last_learning_time: float = time.time()
@ -73,9 +72,6 @@ class ExpressionLearner:
self.min_messages_for_learning = 25 # 触发学习所需的最少消息数
self.min_learning_interval = 300 # 最短学习时间间隔(秒)
def can_learn_for_chat(self) -> bool:
"""
检查指定聊天流是否允许学习表达
@ -107,7 +103,9 @@ class ExpressionLearner:
# 获取该聊天流的学习强度
try:
_, enable_learning, learning_intensity = global_config.expression.get_expression_config_for_chat(self.chat_id)
_, enable_learning, learning_intensity = global_config.expression.get_expression_config_for_chat(
self.chat_id
)
except Exception as e:
logger.error(f"获取聊天流 {self.chat_id} 的学习配置失败: {e}")
return False
@ -169,33 +167,6 @@ class ExpressionLearner:
logger.error(f"为聊天流 {self.chat_name} 触发学习失败: {e}")
return False
# def get_expression_by_chat_id(self) -> Tuple[List[Dict[str, float]], List[Dict[str, float]]]:
# """
# 获取指定chat_id的style表达方式已禁用grammar的获取
# 返回的每个表达方式字典中都包含了source_id, 用于后续的更新操作
# """
# learnt_style_expressions = []
# # 直接从数据库查询
# style_query = Expression.select().where((Expression.chat_id == self.chat_id) & (Expression.type == "style"))
# for expr in style_query:
# # 确保create_date存在如果不存在则使用last_active_time
# create_date = expr.create_date if expr.create_date is not None else expr.last_active_time
# learnt_style_expressions.append(
# {
# "situation": expr.situation,
# "style": expr.style,
# "count": expr.count,
# "last_active_time": expr.last_active_time,
# "source_id": self.chat_id,
# "type": "style",
# "create_date": create_date,
# }
# )
# return learnt_style_expressions
def _apply_global_decay_to_database(self, current_time: float) -> None:
"""
对数据库中的所有表达方式应用全局衰减
@ -414,6 +385,7 @@ class ExpressionLearner:
init_prompt()
class ExpressionLearnerManager:
def __init__(self):
self.expression_learners = {}
@ -445,7 +417,6 @@ class ExpressionLearnerManager:
except Exception as e:
logger.error(f"创建目录失败 {directory}: {e}")
def _auto_migrate_json_to_db(self):
"""
自动将/data/expression/learnt_style learnt_grammar 下所有expressions.json迁移到数据库

View File

@ -16,8 +16,8 @@ from src.chat.utils.timer_calculator import Timer
from src.chat.planner_actions.planner import ActionPlanner
from src.chat.planner_actions.action_modifier import ActionModifier
from src.chat.planner_actions.action_manager import ActionManager
from src.chat.chat_loop.hfc_utils import CycleDetail
from src.chat.chat_loop.hfc_utils import send_typing, stop_typing
from src.chat.heart_flow.hfc_utils import CycleDetail
from src.chat.heart_flow.hfc_utils import send_typing, stop_typing
from src.chat.memory_system.Hippocampus import hippocampus_manager
from src.chat.frequency_control.talk_frequency_control import talk_frequency_control
from src.chat.frequency_control.focus_value_control import focus_value_control
@ -29,6 +29,7 @@ from src.plugin_system.core import events_manager
from src.plugin_system.apis import generator_api, send_api, message_api, database_api
from src.mais4u.mai_think import mai_thinking_manager
from src.mais4u.s4u_config import s4u_config
from src.chat.utils.chat_message_builder import build_readable_messages_with_id, build_readable_actions, get_actions_by_timestamp_with_chat, get_raw_msg_before_timestamp_with_chat
if TYPE_CHECKING:
from src.common.data_models.database_data_model import DatabaseMessages
@ -402,8 +403,8 @@ class HeartFChatting:
)
]
else:
# 第一步:动作修改
with Timer("动作修改", cycle_timers):
# 第一步:动作检查
with Timer("动作检查", cycle_timers):
try:
await self.action_modifier.modify_actions()
available_actions = self.action_manager.get_using_actions()
@ -412,10 +413,45 @@ class HeartFChatting:
# 执行planner
planner_info = self.action_planner.get_necessary_info()
message_list_before_now = get_raw_msg_before_timestamp_with_chat(
chat_id=self.stream_id,
timestamp=time.time(),
limit=int(global_config.chat.max_context_size * 0.6),
)
chat_content_block, message_id_list = build_readable_messages_with_id(
messages=message_list_before_now,
timestamp_mode="normal_no_YMD",
read_mark=self.action_planner.last_obs_time_mark,
truncate=True,
show_actions=True,
)
actions_before_now = get_actions_by_timestamp_with_chat(
chat_id=self.stream_id,
timestamp_start=time.time() - 600,
timestamp_end=time.time(),
limit=5,
)
actions_before_now_block = build_readable_actions(
actions=actions_before_now,
)
prompt_info = await self.action_planner.build_planner_prompt(
is_group_chat=planner_info[0],
chat_target_info=planner_info[1],
current_available_actions=planner_info[2],
chat_content_block=chat_content_block,
actions_before_now_block=actions_before_now_block,
message_id_list=message_id_list,
)
if not await events_manager.handle_mai_events(
EventType.ON_PLAN, None, prompt_info[0], None, self.chat_stream.stream_id
@ -428,6 +464,9 @@ class HeartFChatting:
available_actions=available_actions,
)
for action in action_to_use_info:
print(action.action_type)
# 3. 并行执行所有动作
action_tasks = [
asyncio.create_task(
@ -679,7 +718,7 @@ class HeartFChatting:
}
else:
try:
success, response_set, prompt, selected_expressions = await generator_api.generate_reply(
success, llm_response = await generator_api.generate_reply(
chat_stream=self.chat_stream,
reply_message=action_planner_info.action_message,
available_actions=available_actions,
@ -688,10 +727,9 @@ class HeartFChatting:
enable_tool=global_config.tool.enable_tool,
request_type="replyer",
from_plugin=False,
return_expressions=True,
)
if not success or not response_set:
if not success or not llm_response or not llm_response.reply_set:
if action_planner_info.action_message:
logger.info(f"{action_planner_info.action_message.processed_plain_text} 的回复生成失败")
else:
@ -701,7 +739,8 @@ class HeartFChatting:
except asyncio.CancelledError:
logger.debug(f"{self.log_prefix} 并行执行:回复生成任务已被取消")
return {"action_type": "reply", "success": False, "reply_text": "", "loop_info": None}
response_set = llm_response.reply_set
selected_expressions = llm_response.selected_expressions
loop_info, reply_text, _ = await self._send_and_store_reply(
response_set=response_set,
action_message=action_planner_info.action_message, # type: ignore

View File

@ -2,36 +2,29 @@ import traceback
from typing import Any, Optional, Dict
from src.common.logger import get_logger
from src.chat.heart_flow.sub_heartflow import SubHeartflow
from src.chat.heart_flow.heartFC_chat import HeartFChatting
logger = get_logger("heartflow")
class Heartflow:
"""主心流协调器,负责初始化并协调聊天"""
def __init__(self):
self.subheartflows: Dict[Any, "SubHeartflow"] = {}
async def get_or_create_subheartflow(self, subheartflow_id: Any) -> Optional["SubHeartflow"]:
"""获取或创建一个新的SubHeartflow实例"""
if subheartflow_id in self.subheartflows:
if subflow := self.subheartflows.get(subheartflow_id):
return subflow
self.heartflow_chat_list: Dict[Any, HeartFChatting] = {}
async def get_or_create_heartflow_chat(self, chat_id: Any) -> Optional[HeartFChatting]:
"""获取或创建一个新的HeartFChatting实例"""
try:
new_subflow = SubHeartflow(subheartflow_id)
await new_subflow.initialize()
# 注册子心流
self.subheartflows[subheartflow_id] = new_subflow
return new_subflow
if chat_id in self.heartflow_chat_list:
if chat := self.heartflow_chat_list.get(chat_id):
return chat
else:
new_chat = HeartFChatting(chat_id = chat_id)
await new_chat.start()
self.heartflow_chat_list[chat_id] = new_chat
return new_chat
except Exception as e:
logger.error(f"创建子心流 {subheartflow_id} 失败: {e}", exc_info=True)
logger.error(f"创建心流聊天 {chat_id} 失败: {e}", exc_info=True)
traceback.print_exc()
return None
heartflow = Heartflow()

View File

@ -18,7 +18,7 @@ from src.mood.mood_manager import mood_manager
from src.person_info.person_info import Person
if TYPE_CHECKING:
from src.chat.heart_flow.sub_heartflow import SubHeartflow
from src.chat.heart_flow.heartFC_chat import HeartFChatting
logger = get_logger("chat")
@ -38,7 +38,7 @@ async def _calculate_interest(message: MessageRecv) -> Tuple[float, bool, list[s
interested_rate, keywords,keywords_lite = await hippocampus_manager.get_activate_from_text(
message.processed_plain_text,
max_depth= 4,
fast_retrieval=False,
fast_retrieval=global_config.chat.interest_rate_mode == "fast",
)
message.key_words = keywords
message.key_words_lite = keywords_lite
@ -78,10 +78,14 @@ async def _calculate_interest(message: MessageRecv) -> Tuple[float, bool, list[s
interested_rate += base_interest
if is_mentioned:
interest_increase_on_mention = 1
interest_increase_on_mention = 2
interested_rate += interest_increase_on_mention
return interested_rate, is_mentioned, keywords
message.interest_value = interested_rate
message.is_mentioned = is_mentioned
return interested_rate, keywords
class HeartFCMessageReceiver:
@ -110,17 +114,16 @@ class HeartFCMessageReceiver:
chat = message.chat_stream
# 2. 兴趣度计算与更新
interested_rate, is_mentioned, keywords = await _calculate_interest(message)
message.interest_value = interested_rate
message.is_mentioned = is_mentioned
interested_rate, keywords = await _calculate_interest(message)
await self.storage.store_message(message, chat)
subheartflow: SubHeartflow = await heartflow.get_or_create_subheartflow(chat.stream_id) # type: ignore
heartflow_chat: HeartFChatting = await heartflow.get_or_create_heartflow_chat(chat.stream_id) # type: ignore
# subheartflow.add_message_to_normal_chat_cache(message, interested_rate, is_mentioned)
if global_config.mood.enable_mood:
chat_mood = mood_manager.get_mood_by_chat_id(subheartflow.chat_id)
chat_mood = mood_manager.get_mood_by_chat_id(heartflow_chat.stream_id)
asyncio.create_task(chat_mood.update_mood_by_message(message, interested_rate))
# 3. 日志记录

View File

@ -1,41 +0,0 @@
from rich.traceback import install
from src.common.logger import get_logger
from src.chat.message_receive.chat_stream import get_chat_manager
from src.chat.chat_loop.heartFC_chat import HeartFChatting
from src.chat.utils.utils import get_chat_type_and_target_info
logger = get_logger("sub_heartflow")
install(extra_lines=3)
class SubHeartflow:
def __init__(
self,
subheartflow_id,
):
"""子心流初始化函数
Args:
subheartflow_id: 子心流唯一标识符
"""
# 基础属性,两个值是一样的
self.subheartflow_id = subheartflow_id
self.chat_id = subheartflow_id
self.is_group_chat, self.chat_target_info = get_chat_type_and_target_info(self.chat_id)
self.log_prefix = get_chat_manager().get_stream_name(self.subheartflow_id) or self.subheartflow_id
# focus模式退出冷却时间管理
self.last_focus_exit_time: float = 0 # 上次退出focus模式的时间
# 随便水群 normal_chat 和 认真水群 focus_chat 实例
# CHAT模式激活 随便水群 FOCUS模式激活 认真水群
self.heart_fc_instance: HeartFChatting = HeartFChatting(
chat_id=self.subheartflow_id,
) # 该sub_heartflow的HeartFChatting实例
async def initialize(self):
"""异步初始化方法,创建兴趣流并确定聊天类型"""
await self.heart_fc_instance.start()

View File

@ -2,7 +2,7 @@ import random
import asyncio
import hashlib
import time
from typing import List, Any, Dict, TYPE_CHECKING, Tuple
from typing import List, Dict, TYPE_CHECKING, Tuple
from src.common.logger import get_logger
from src.config.config import global_config, model_config
@ -60,7 +60,7 @@ class ActionModifier:
removals_s1: List[Tuple[str, str]] = []
removals_s2: List[Tuple[str, str]] = []
removals_s3: List[Tuple[str, str]] = []
# removals_s3: List[Tuple[str, str]] = []
self.action_manager.restore_actions()
all_actions = self.action_manager.get_using_actions()
@ -103,26 +103,28 @@ class ActionModifier:
self.action_manager.remove_action_from_using(action_name)
logger.debug(f"{self.log_prefix}阶段二移除动作: {action_name},原因: {reason}")
# === 第三阶段:激活类型判定 ===
if chat_content is not None:
logger.debug(f"{self.log_prefix}开始激活类型判定阶段")
# if chat_content is not None:
# logger.debug(f"{self.log_prefix}开始激活类型判定阶段")
# 获取当前使用的动作集(经过第一阶段处理)
current_using_actions = self.action_manager.get_using_actions()
# current_using_actions = self.action_manager.get_using_actions()
# 获取因激活类型判定而需要移除的动作
removals_s3 = await self._get_deactivated_actions_by_type(
current_using_actions,
chat_content,
)
# removals_s3 = await self._get_deactivated_actions_by_type(
# current_using_actions,
# chat_content,
# )
# 应用第三阶段的移除
for action_name, reason in removals_s3:
self.action_manager.remove_action_from_using(action_name)
logger.debug(f"{self.log_prefix}阶段三移除动作: {action_name},原因: {reason}")
# for action_name, reason in removals_s3:
# self.action_manager.remove_action_from_using(action_name)
# logger.debug(f"{self.log_prefix}阶段三移除动作: {action_name},原因: {reason}")
# === 统一日志记录 ===
all_removals = removals_s1 + removals_s2 + removals_s3
all_removals = removals_s1 + removals_s2
removals_summary: str = ""
if all_removals:
removals_summary = " | ".join([f"{name}({reason})" for name, reason in all_removals])
@ -161,7 +163,7 @@ class ActionModifier:
deactivated_actions = []
# 分类处理不同激活类型的actions
llm_judge_actions = {}
llm_judge_actions: Dict[str, ActionInfo] = {}
actions_to_check = list(actions_with_info.items())
random.shuffle(actions_to_check)
@ -218,7 +220,7 @@ class ActionModifier:
async def _process_llm_judge_actions_parallel(
self,
llm_judge_actions: Dict[str, Any],
llm_judge_actions: Dict[str, ActionInfo],
chat_content: str = "",
) -> Dict[str, bool]:
"""
@ -237,7 +239,7 @@ class ActionModifier:
current_time = time.time()
results = {}
tasks_to_run = {}
tasks_to_run: Dict[str, ActionInfo] = {}
# 检查缓存
for action_name, action_info in llm_judge_actions.items():

View File

@ -1,7 +1,7 @@
import json
import time
import traceback
from typing import Dict, Optional, Tuple, List
from typing import Dict, Optional, Tuple, List, Any
from rich.traceback import install
from datetime import datetime
from json_repair import repair_json
@ -21,8 +21,9 @@ from src.chat.utils.chat_message_builder import (
from src.chat.utils.utils import get_chat_type_and_target_info
from src.chat.planner_actions.action_manager import ActionManager
from src.chat.message_receive.chat_stream import get_chat_manager
from src.plugin_system.base.component_types import ActionInfo, ChatMode, ComponentType
from src.plugin_system.base.component_types import ActionInfo, ChatMode, ComponentType, ActionActivationType
from src.plugin_system.core.component_registry import component_registry
import random
logger = get_logger("planner")
@ -33,7 +34,7 @@ def init_prompt():
Prompt(
"""
{time_block}
{identity_block}
{name_block}
你现在需要根据聊天内容选择的合适的action来参与聊天
请你根据以下行事风格来决定action:
{plan_style}
@ -84,6 +85,36 @@ def init_prompt():
)
Prompt(
"""
{time_block}
{name_block}
请你根据聊天内容选择一个或多个action来参与聊天如果没有合适的action请选择no_action
{chat_context_description}以下是具体的聊天内容
{chat_content_block}
{moderation_prompt}
现在请你根据聊天内容和用户的最新消息选择合适的action和触发action的消息:
{actions_before_now_block}
no_action不选择任何动作
{{
"action": "no_action",
"reason":"不动作的原因"
}}
{action_options_text}
请选择并说明触发action的消息id和选择该action的原因消息id格式:m+数字
请根据动作示例以严格的 JSON 格式输出且仅包含 JSON 内容
""",
"sub_planner_prompt",
)
class ActionPlanner:
def __init__(self, chat_id: str, action_manager: ActionManager):
self.chat_id = chat_id
@ -93,6 +124,9 @@ class ActionPlanner:
self.planner_llm = LLMRequest(
model_set=model_config.model_task_config.planner, request_type="planner"
) # 用于动作规划
self.planner_small_llm = LLMRequest(
model_set=model_config.model_task_config.planner_small, request_type="planner_small"
) # 用于动作规划
self.last_obs_time_mark = 0.0
# 添加重试计数器
@ -100,7 +134,7 @@ class ActionPlanner:
self.max_plan_retries = 3
def find_message_by_id(
self, message_id: str, message_id_list: List[DatabaseMessages]
self, message_id: str, message_id_list: List[Tuple[str, DatabaseMessages]]
) -> Optional[DatabaseMessages]:
# sourcery skip: use-next
"""
@ -114,8 +148,8 @@ class ActionPlanner:
找到的原始消息字典如果未找到则返回None
"""
for item in message_id_list:
if item.message_id == message_id:
return item
if item[0] == message_id:
return item[1]
return None
def get_latest_message(self, message_id_list: List[DatabaseMessages]) -> Optional[DatabaseMessages]:
@ -130,6 +164,274 @@ class ActionPlanner:
"""
return message_id_list[-1] if message_id_list else None
def _parse_single_action(self, action_json: dict, message_id_list: List[Tuple[str, DatabaseMessages]], current_available_actions: List[Tuple[str, ActionInfo]]) -> List[ActionPlannerInfo]:
"""解析单个action JSON并返回ActionPlannerInfo列表"""
action_planner_infos = []
try:
action = action_json.get("action", "no_action")
reasoning = action_json.get("reason", "未提供原因")
action_data = {}
# 将所有其他属性添加到action_data
for key, value in action_json.items():
if key not in ["action", "reasoning"]:
action_data[key] = value
# 非no_action动作需要target_message_id
target_message = None
if action != "no_action":
if target_message_id := action_json.get("target_message_id"):
# 根据target_message_id查找原始消息
target_message = self.find_message_by_id(target_message_id, message_id_list)
if target_message is None:
logger.warning(f"{self.log_prefix}无法找到target_message_id '{target_message_id}' 对应的消息")
# 选择最新消息作为target_message
target_message = self.get_latest_message(message_id_list)
else:
logger.warning(f"{self.log_prefix}动作'{action}'缺少target_message_id")
# 验证action是否可用
available_action_names = [action_name for action_name, _ in current_available_actions]
if action != "no_action" and action != "reply" and action not in available_action_names:
logger.warning(
f"{self.log_prefix}LLM 返回了当前不可用或无效的动作: '{action}' (可用: {available_action_names}),将强制使用 'no_action'"
)
reasoning = f"LLM 返回了当前不可用的动作 '{action}' (可用: {available_action_names})。原始理由: {reasoning}"
action = "no_action"
# 创建ActionPlannerInfo对象
# 将列表转换为字典格式
available_actions_dict = dict(current_available_actions)
action_planner_infos.append(ActionPlannerInfo(
action_type=action,
reasoning=reasoning,
action_data=action_data,
action_message=target_message,
available_actions=available_actions_dict,
))
except Exception as e:
logger.error(f"{self.log_prefix}解析单个action时出错: {e}")
# 将列表转换为字典格式
available_actions_dict = dict(current_available_actions)
action_planner_infos.append(ActionPlannerInfo(
action_type="no_action",
reasoning=f"解析单个action时出错: {e}",
action_data={},
action_message=None,
available_actions=available_actions_dict,
))
return action_planner_infos
async def sub_plan(
self,
action_list: List[Tuple[str, ActionInfo]],
actions_before_now: List[Dict[str, Any]],
chat_content_block: str,
message_id_list: List[Tuple[str, DatabaseMessages]],
is_group_chat: bool = False,
chat_target_info: Optional[dict] = None,
# current_available_actions: Dict[str, ActionInfo] = {},
) -> List[ActionPlannerInfo]:
# 构建副planner并执行(单个副planner)
try:
# 获取最近的actions
# 只保留action_type在action_list中的ActionPlannerInfo
action_names_in_list = [name for name, _ in action_list]
# actions_before_now是List[Dict[str, Any]]格式需要提取action_type字段
filtered_actions = []
# print(actions_before_now)
# print(action_names_in_list)
for action_record in actions_before_now:
if isinstance(action_record, dict) and 'action_name' in action_record:
action_type = action_record['action_name']
if action_type in action_names_in_list:
filtered_actions.append(action_record)
actions_before_now_block = build_readable_actions(
actions=filtered_actions,
)
if actions_before_now_block:
actions_before_now_block = f"你刚刚选择并执行过的action是请注意如果相同的内容已经被执行请不要重复执行\n{actions_before_now_block}"
else:
actions_before_now_block = ""
chat_context_description = "你现在正在一个群聊中"
chat_target_name = None
if not is_group_chat and chat_target_info:
chat_target_name = (
chat_target_info.get("person_name") or chat_target_info.get("user_nickname") or "对方"
)
chat_context_description = f"你正在和 {chat_target_name} 私聊"
action_options_block = ""
for using_actions_name, using_actions_info in action_list:
if using_actions_info.action_parameters:
param_text = "\n"
for param_name, param_description in using_actions_info.action_parameters.items():
param_text += f' "{param_name}":"{param_description}"\n'
param_text = param_text.rstrip("\n")
else:
param_text = ""
require_text = ""
for require_item in using_actions_info.action_require:
require_text += f"- {require_item}\n"
require_text = require_text.rstrip("\n")
using_action_prompt = await global_prompt_manager.get_prompt_async("action_prompt")
using_action_prompt = using_action_prompt.format(
action_name=using_actions_name,
action_description=using_actions_info.description,
action_parameters=param_text,
action_require=require_text,
)
action_options_block += using_action_prompt
moderation_prompt_block = "请不要输出违法违规内容,不要输出色情,暴力,政治相关内容,如有敏感内容,请规避。"
time_block = f"当前时间:{datetime.now().strftime('%Y-%m-%d %H:%M:%S')}"
bot_name = global_config.bot.nickname
if global_config.bot.alias_names:
bot_nickname = f",也有人叫你{','.join(global_config.bot.alias_names)}"
else:
bot_nickname = ""
name_block = f"你的名字是{bot_name}{bot_nickname},请注意哪些是你自己的发言。"
planner_prompt_template = await global_prompt_manager.get_prompt_async("sub_planner_prompt")
prompt = planner_prompt_template.format(
time_block=time_block,
chat_context_description=chat_context_description,
chat_content_block=chat_content_block,
actions_before_now_block=actions_before_now_block,
action_options_text=action_options_block,
moderation_prompt=moderation_prompt_block,
name_block=name_block,
)
# return prompt, message_id_list
except Exception as e:
logger.error(f"构建 Planner 提示词时出错: {e}")
logger.error(traceback.format_exc())
# 返回一个默认的no_action而不是字符串
return [ActionPlannerInfo(
action_type="no_action",
reasoning=f"构建 Planner Prompt 时出错: {e}",
action_data={},
action_message=None,
available_actions=action_list,
)]
# --- 调用 LLM (普通文本生成) ---
llm_content = None
action_planner_infos = [] # 存储多个ActionPlannerInfo对象
try:
llm_content, (reasoning_content, _, _) = await self.planner_small_llm.generate_response_async(prompt=prompt)
if global_config.debug.show_prompt:
logger.info(f"{self.log_prefix}副规划器原始提示词: {prompt}")
logger.info(f"{self.log_prefix}副规划器原始响应: {llm_content}")
if reasoning_content:
logger.info(f"{self.log_prefix}副规划器推理: {reasoning_content}")
else:
logger.debug(f"{self.log_prefix}副规划器原始提示词: {prompt}")
logger.debug(f"{self.log_prefix}副规划器原始响应: {llm_content}")
if reasoning_content:
logger.debug(f"{self.log_prefix}副规划器推理: {reasoning_content}")
except Exception as req_e:
logger.error(f"{self.log_prefix}副规划器LLM 请求执行失败: {req_e}")
# 返回一个默认的no_action
action_planner_infos.append(ActionPlannerInfo(
action_type="no_action",
reasoning=f"副规划器LLM 请求失败,模型出现问题: {req_e}",
action_data={},
action_message=None,
available_actions=action_list,
))
return action_planner_infos
if llm_content:
try:
parsed_json = json.loads(repair_json(llm_content))
# 处理不同的JSON格式
if isinstance(parsed_json, list):
# 如果是列表处理每个action
if parsed_json:
logger.info(f"{self.log_prefix}LLM返回了{len(parsed_json)}个action")
for action_item in parsed_json:
if isinstance(action_item, dict):
action_planner_infos.extend(self._parse_single_action(
action_item, message_id_list, action_list
))
else:
logger.warning(f"{self.log_prefix}列表中的action项不是字典类型: {type(action_item)}")
else:
logger.warning(f"{self.log_prefix}LLM返回了空列表")
action_planner_infos.append(ActionPlannerInfo(
action_type="no_action",
reasoning="LLM返回了空列表选择no_action",
action_data={},
action_message=None,
available_actions=action_list,
))
elif isinstance(parsed_json, dict):
# 如果是单个字典处理单个action
action_planner_infos.extend(self._parse_single_action(
parsed_json, message_id_list, action_list
))
else:
logger.error(f"{self.log_prefix}解析后的JSON不是字典或列表类型: {type(parsed_json)}")
action_planner_infos.append(ActionPlannerInfo(
action_type="no_action",
reasoning=f"解析后的JSON类型错误: {type(parsed_json)}",
action_data={},
action_message=None,
available_actions=action_list,
))
except Exception as json_e:
logger.warning(f"{self.log_prefix}解析LLM响应JSON失败 {json_e}. LLM原始输出: '{llm_content}'")
traceback.print_exc()
action_planner_infos.append(ActionPlannerInfo(
action_type="no_action",
reasoning=f"解析LLM响应JSON失败: {json_e}. 将使用默认动作 'no_action'.",
action_data={},
action_message=None,
available_actions=action_list,
))
else:
# 如果没有LLM内容返回默认的no_action
action_planner_infos.append(ActionPlannerInfo(
action_type="no_action",
reasoning="副规划器没有获得LLM响应",
action_data={},
action_message=None,
available_actions=action_list,
))
# 如果没有解析到任何action返回默认的no_action
if not action_planner_infos:
action_planner_infos.append(ActionPlannerInfo(
action_type="no_action",
reasoning="副规划器没有解析到任何有效action",
action_data={},
action_message=None,
available_actions=action_list,
))
logger.info(f"{self.log_prefix}副规划器返回了{len(action_planner_infos)}个action")
return action_planner_infos
async def plan(
self,
mode: ChatMode = ChatMode.FOCUS,
@ -148,16 +450,143 @@ class ActionPlanner:
prompt: str = ""
message_id_list: list = []
message_list_before_now = get_raw_msg_before_timestamp_with_chat(
chat_id=self.chat_id,
timestamp=time.time(),
limit=int(global_config.chat.max_context_size * 0.6),
)
chat_content_block, message_id_list = build_readable_messages_with_id(
messages=message_list_before_now,
timestamp_mode="normal_no_YMD",
read_mark=self.last_obs_time_mark,
truncate=True,
show_actions=True,
)
actions_before_now = get_actions_by_timestamp_with_chat(
chat_id=self.chat_id,
timestamp_start=time.time() - 600,
timestamp_end=time.time(),
limit=5,
)
actions_before_now_block = build_readable_actions(
actions=actions_before_now,
)
message_list_before_now_short = message_list_before_now[:5]
chat_content_block_short, message_id_list_short = build_readable_messages_with_id(
messages=message_list_before_now_short,
timestamp_mode="normal_no_YMD",
truncate=False,
show_actions=False,
)
self.last_obs_time_mark = time.time()
try:
logger.info(f"{self.log_prefix}开始构建副Planner")
sub_planner_actions = {}
for action_name, action_info in available_actions.items():
if action_info.activation_type == ActionActivationType.LLM_JUDGE or action_info.activation_type == ActionActivationType.ALWAYS:
sub_planner_actions[action_name] = action_info
elif action_info.activation_type == ActionActivationType.RANDOM:
if random.random() < action_info.random_activation_probability:
sub_planner_actions[action_name] = action_info
elif action_info.activation_type == ActionActivationType.KEYWORD:
if action_info.activation_keywords:
for keyword in action_info.activation_keywords:
if keyword in chat_content_block_short:
sub_planner_actions[action_name] = action_info
elif action_info.activation_type == ActionActivationType.NEVER:
pass
else:
logger.warning(f"{self.log_prefix}未知的激活类型: {action_info.activation_type},跳过处理")
sub_planner_actions_num = len(sub_planner_actions)
sub_planner_size = global_config.chat.planner_size
if global_config.chat.planner_size > int(global_config.chat.planner_size):
if random.random() < global_config.chat.planner_size - int(global_config.chat.planner_size):
sub_planner_size = int(global_config.chat.planner_size) + 1
sub_planner_num = int(sub_planner_actions_num / sub_planner_size)
if sub_planner_actions_num % sub_planner_size != 0:
sub_planner_num += 1
logger.info(f"{self.log_prefix}副规划器数量: {sub_planner_num}, 副规划器大小: {sub_planner_size}")
# 将sub_planner_actions随机分配到sub_planner_num个List中
sub_planner_lists = []
if sub_planner_actions_num > 0:
# 将actions转换为列表并随机打乱
action_items = list(sub_planner_actions.items())
random.shuffle(action_items)
# 初始化所有子列表
for i in range(sub_planner_num):
sub_planner_lists.append([])
# 分配actions到各个子列表
for i, (action_name, action_info) in enumerate(action_items):
# 确保每个列表至少有一个action
if i < sub_planner_num:
sub_planner_lists[i].append((action_name, action_info))
else:
# 随机选择一个列表添加action但不超过最大大小限制
available_lists = [j for j, lst in enumerate(sub_planner_lists)
if len(lst) < sub_planner_size]
if available_lists:
target_list = random.choice(available_lists)
sub_planner_lists[target_list].append((action_name, action_info))
logger.info(f"{self.log_prefix}成功将{len(sub_planner_actions)}个actions分配到{sub_planner_num}个子列表中")
for i, lst in enumerate(sub_planner_lists):
logger.debug(f"{self.log_prefix}子列表{i+1}: {len(lst)}个actions")
else:
logger.info(f"{self.log_prefix}没有可用的actions需要分配")
# 先获取必要信息
is_group_chat, chat_target_info, current_available_actions = self.get_necessary_info()
# 并行执行所有副规划器
import asyncio
async def execute_sub_plan(action_list):
return await self.sub_plan(
action_list=action_list,
actions_before_now=actions_before_now,
chat_content_block=chat_content_block_short,
message_id_list=message_id_list_short,
is_group_chat=is_group_chat,
chat_target_info=chat_target_info,
# current_available_actions=current_available_actions,
)
# 创建所有任务
sub_plan_tasks = [execute_sub_plan(action_list) for action_list in sub_planner_lists]
# 并行执行所有任务
sub_plan_results = await asyncio.gather(*sub_plan_tasks)
# 收集所有结果
all_sub_planner_results = []
for sub_result in sub_plan_results:
all_sub_planner_results.extend(sub_result)
logger.info(f"{self.log_prefix}所有副规划器共返回了{len(all_sub_planner_results)}个action")
# --- 构建提示词 (调用修改后的 PromptBuilder 方法) ---
prompt, message_id_list = await self.build_planner_prompt(
is_group_chat=is_group_chat, # <-- Pass HFC state
chat_target_info=chat_target_info, # <-- 传递获取到的聊天目标信息
current_available_actions=current_available_actions, # <-- Pass determined actions
current_available_actions="", # <-- Pass determined actions
mode=mode,
refresh_time=True,
chat_content_block=chat_content_block,
actions_before_now_block=actions_before_now_block,
message_id_list=message_id_list,
)
# --- 调用 LLM (普通文本生成) ---
@ -185,60 +614,54 @@ class ActionPlanner:
try:
parsed_json = json.loads(repair_json(llm_content))
# 处理不同的JSON格式复用_parse_single_action函数
if isinstance(parsed_json, list):
if parsed_json:
# 使用最后一个action保持原有逻辑
parsed_json = parsed_json[-1]
logger.warning(f"{self.log_prefix}LLM返回了多个JSON对象使用最后一个: {parsed_json}")
else:
parsed_json = {}
if not isinstance(parsed_json, dict):
logger.error(f"{self.log_prefix}解析后的JSON不是字典类型: {type(parsed_json)}")
parsed_json = {}
action = parsed_json.get("action", "no_action")
reasoning = parsed_json.get("reason", "未提供原因")
# 将所有其他属性添加到action_data
for key, value in parsed_json.items():
if key not in ["action", "reasoning"]:
action_data[key] = value
# 非no_action动作需要target_message_id
if action != "no_action":
if target_message_id := parsed_json.get("target_message_id"):
# 根据target_message_id查找原始消息
target_message = self.find_message_by_id(target_message_id, message_id_list)
# 如果获取的target_message为None输出warning并重新plan
if target_message is None:
self.plan_retry_count += 1
logger.warning(
f"{self.log_prefix}无法找到target_message_id '{target_message_id}' 对应的消息,重试次数: {self.plan_retry_count}/{self.max_plan_retries}"
)
# 仍有重试次数
if self.plan_retry_count < self.max_plan_retries:
# 递归重新plan
return await self.plan(mode, loop_start_time, available_actions)
logger.error(
f"{self.log_prefix}连续{self.max_plan_retries}次plan获取target_message失败选择最新消息作为target_message"
)
target_message = self.get_latest_message(message_id_list)
self.plan_retry_count = 0 # 重置计数器
else:
logger.warning(f"{self.log_prefix}动作'{action}'缺少target_message_id")
if action != "no_action" and action != "reply" and action not in current_available_actions:
logger.warning(
f"{self.log_prefix}LLM 返回了当前不可用或无效的动作: '{action}' (可用: {list(current_available_actions.keys())}),将强制使用 'no_action'"
if isinstance(parsed_json, dict):
# 使用_parse_single_action函数解析单个action
# 将字典转换为列表格式
current_available_actions_list = list(current_available_actions.items())
action_planner_infos = self._parse_single_action(
parsed_json, message_id_list, current_available_actions_list
)
reasoning = f"LLM 返回了当前不可用的动作 '{action}' (可用: {list(current_available_actions.keys())})。原始理由: {reasoning}"
if action_planner_infos:
# 获取第一个也是唯一一个action的信息
action_info = action_planner_infos[0]
action = action_info.action_type
reasoning = action_info.reasoning
action_data.update(action_info.action_data)
target_message = action_info.action_message
# 处理target_message为None的情况保持原有的重试逻辑
if target_message is None and action != "no_action":
# 尝试获取最新消息作为target_message
target_message = self.get_latest_message(message_id_list)
if target_message is None:
logger.warning(f"{self.log_prefix}无法获取任何消息作为target_message")
else:
# 如果没有解析到action使用默认值
action = "no_action"
reasoning = "解析action失败"
target_message = None
else:
logger.error(f"{self.log_prefix}解析后的JSON不是字典类型: {type(parsed_json)}")
action = "no_action"
reasoning = f"解析后的JSON类型错误: {type(parsed_json)}"
target_message = None
except Exception as json_e:
logger.warning(f"{self.log_prefix}解析LLM响应JSON失败 {json_e}. LLM原始输出: '{llm_content}'")
traceback.print_exc()
reasoning = f"解析LLM响应JSON失败: {json_e}. 将使用默认动作 'no_action'."
action = "no_action"
reasoning = f"解析LLM响应JSON失败: {json_e}. 将使用默认动作 'no_action'."
target_message = None
except Exception as outer_e:
logger.error(f"{self.log_prefix}Planner 处理过程中发生意外错误,规划失败,将执行 no_action: {outer_e}")
@ -246,30 +669,70 @@ class ActionPlanner:
action = "no_action"
reasoning = f"Planner 内部处理错误: {outer_e}"
is_parallel = False
is_parallel = True
if mode == ChatMode.NORMAL and action in current_available_actions:
is_parallel = current_available_actions[action].parallel_action
if is_parallel:
is_parallel = current_available_actions[action].parallel_action
action_data["loop_start_time"] = loop_start_time
actions = [
ActionPlannerInfo(
action_type=action,
reasoning=reasoning,
action_data=action_data,
action_message=target_message,
available_actions=available_actions,
)
]
# 过滤掉no_action除非所有结果都是no_action
def filter_no_actions(action_list):
"""过滤no_action如果所有都是no_action则返回一个"""
non_no_actions = [a for a in action_list if a.action_type != "no_action"]
if non_no_actions:
return non_no_actions
else:
# 如果所有都是no_action返回第一个
return [action_list[0]] if action_list else []
if action != "reply" and is_parallel:
actions.append(
ActionPlannerInfo(
action_type="reply",
# 根据is_parallel决定返回值
if is_parallel:
# 如果为真,将主规划器的结果和副规划器的结果都返回
main_actions = []
# 添加主规划器的action如果不是no_action
if action != "no_action":
main_actions.append(ActionPlannerInfo(
action_type=action,
reasoning=reasoning,
action_data=action_data,
action_message=target_message,
available_actions=available_actions,
)
)
))
# 先合并主副规划器的结果
all_actions = main_actions + all_sub_planner_results
# 然后统一过滤no_action
actions = filter_no_actions(all_actions)
# 如果所有结果都是no_action返回一个no_action
if not actions:
actions = [ActionPlannerInfo(
action_type="no_action",
reasoning="所有规划器都选择不执行动作",
action_data={},
action_message=None,
available_actions=available_actions,
)]
logger.info(f"{self.log_prefix}并行模式:返回主规划器{len(main_actions)}个action + 副规划器{len(all_sub_planner_results)}个action过滤后总计{len(actions)}个action")
else:
# 如果为假,只返回副规划器的结果
actions = filter_no_actions(all_sub_planner_results)
# 如果所有结果都是no_action返回一个no_action
if not actions:
actions = [ActionPlannerInfo(
action_type="no_action",
reasoning="副规划器都选择不执行动作",
action_data={},
action_message=None,
available_actions=available_actions,
)]
logger.info(f"{self.log_prefix}非并行模式:返回副规划器的{len(actions)}个action已过滤no_action")
return actions, target_message
@ -278,38 +741,18 @@ class ActionPlanner:
is_group_chat: bool, # Now passed as argument
chat_target_info: Optional[dict], # Now passed as argument
current_available_actions: Dict[str, ActionInfo],
refresh_time: bool = False,
mode: ChatMode = ChatMode.FOCUS,
actions_before_now_block :str = "",
chat_content_block :str = "",
message_id_list :List[Tuple[str, DatabaseMessages]] = None,
) -> tuple[str, List[DatabaseMessages]]: # sourcery skip: use-join
"""构建 Planner LLM 的提示词 (获取模板并填充数据)"""
try:
message_list_before_now = get_raw_msg_before_timestamp_with_chat(
chat_id=self.chat_id,
timestamp=time.time(),
limit=int(global_config.chat.max_context_size * 0.6),
)
chat_content_block, message_id_list = build_readable_messages_with_id(
messages=message_list_before_now,
timestamp_mode="normal_no_YMD",
read_mark=self.last_obs_time_mark,
truncate=True,
show_actions=True,
)
actions_before_now = get_actions_by_timestamp_with_chat(
chat_id=self.chat_id,
timestamp_start=time.time() - 3600,
timestamp_end=time.time(),
limit=5,
)
actions_before_now_block = build_readable_actions(
actions=actions_before_now,
)
actions_before_now_block = f"你刚刚选择并执行过的action是\n{actions_before_now_block}"
if refresh_time:
self.last_obs_time_mark = time.time()
if actions_before_now_block:
actions_before_now_block = f"你刚刚选择并执行过的action是\n{actions_before_now_block}"
else:
actions_before_now_block = ""
mentioned_bonus = ""
if global_config.chat.mentioned_bot_inevitable_reply:
@ -322,8 +765,7 @@ class ActionPlanner:
动作no_action
动作描述不进行动作等待合适的时机
- 当你刚刚发送了消息没有人回复时选择no_action
- 如果有别的动作非回复满足条件可以不用no_action
- 当你一次发送了太多消息为了避免打扰聊天节奏选择no_action
- 当你一次发送了太多消息为了避免过于烦人可以不回复
{
"action": "no_action",
"reason":"不动作的原因"
@ -345,29 +787,32 @@ class ActionPlanner:
action_options_block = ""
for using_actions_name, using_actions_info in current_available_actions.items():
if using_actions_info.action_parameters:
param_text = "\n"
for param_name, param_description in using_actions_info.action_parameters.items():
param_text += f' "{param_name}":"{param_description}"\n'
param_text = param_text.rstrip("\n")
else:
param_text = ""
if current_available_actions:
for using_actions_name, using_actions_info in current_available_actions.items():
if using_actions_info.action_parameters:
param_text = "\n"
for param_name, param_description in using_actions_info.action_parameters.items():
param_text += f' "{param_name}":"{param_description}"\n'
param_text = param_text.rstrip("\n")
else:
param_text = ""
require_text = ""
for require_item in using_actions_info.action_require:
require_text += f"- {require_item}\n"
require_text = require_text.rstrip("\n")
require_text = ""
for require_item in using_actions_info.action_require:
require_text += f"- {require_item}\n"
require_text = require_text.rstrip("\n")
using_action_prompt = await global_prompt_manager.get_prompt_async("action_prompt")
using_action_prompt = using_action_prompt.format(
action_name=using_actions_name,
action_description=using_actions_info.description,
action_parameters=param_text,
action_require=require_text,
)
using_action_prompt = await global_prompt_manager.get_prompt_async("action_prompt")
using_action_prompt = using_action_prompt.format(
action_name=using_actions_name,
action_description=using_actions_info.description,
action_parameters=param_text,
action_require=require_text,
)
action_options_block += using_action_prompt
action_options_block += using_action_prompt
else:
action_options_block = ""
moderation_prompt_block = "请不要输出违法违规内容,不要输出色情,暴力,政治相关内容,如有敏感内容,请规避。"
@ -378,8 +823,7 @@ class ActionPlanner:
bot_nickname = f",也有人叫你{','.join(global_config.bot.alias_names)}"
else:
bot_nickname = ""
bot_core_personality = global_config.personality.personality_core
identity_block = f"你的名字是{bot_name}{bot_nickname},你{bot_core_personality}"
name_block = f"你的名字是{bot_name}{bot_nickname},请注意哪些是你自己的发言。"
planner_prompt_template = await global_prompt_manager.get_prompt_async("planner_prompt")
prompt = planner_prompt_template.format(
@ -391,7 +835,7 @@ class ActionPlanner:
mentioned_bonus=mentioned_bonus,
action_options_text=action_options_block,
moderation_prompt=moderation_prompt_block,
identity_block=identity_block,
name_block=name_block,
plan_style=global_config.personality.plan_style,
)
return prompt, message_id_list

View File

@ -10,8 +10,8 @@ from src.mais4u.mai_think import mai_thinking_manager
from src.common.logger import get_logger
from src.common.data_models.database_data_model import DatabaseMessages
from src.common.data_models.info_data_model import ActionPlannerInfo
from src.common.data_models.llm_data_model import LLMGenerationDataModel
from src.config.config import global_config, model_config
from src.individuality.individuality import get_individuality
from src.llm_models.utils_model import LLMRequest
from src.chat.message_receive.message import UserInfo, Seg, MessageRecv, MessageSending
from src.chat.message_receive.chat_stream import ChatStream
@ -163,7 +163,7 @@ class DefaultReplyer:
from_plugin: bool = True,
stream_id: Optional[str] = None,
reply_message: Optional[DatabaseMessages] = None,
) -> Tuple[bool, Optional[Dict[str, Any]], Optional[str], Optional[List[int]]]:
) -> Tuple[bool, LLMGenerationDataModel]:
# sourcery skip: merge-nested-ifs
"""
回复器 (Replier): 负责生成回复文本的核心逻辑
@ -183,6 +183,7 @@ class DefaultReplyer:
prompt = None
selected_expressions: Optional[List[int]] = None
llm_response = LLMGenerationDataModel()
if available_actions is None:
available_actions = {}
try:
@ -196,10 +197,12 @@ class DefaultReplyer:
reply_message=reply_message,
reply_reason=reply_reason,
)
llm_response.prompt = prompt
llm_response.selected_expressions = selected_expressions
if not prompt:
logger.warning("构建prompt失败跳过回复生成")
return False, None, None, []
return False, llm_response
from src.plugin_system.core.events_manager import events_manager
if not from_plugin:
@ -216,12 +219,10 @@ class DefaultReplyer:
try:
content, reasoning_content, model_name, tool_call = await self.llm_generate_content(prompt)
logger.debug(f"replyer生成内容: {content}")
llm_response = {
"content": content,
"reasoning": reasoning_content,
"model": model_name,
"tool_calls": tool_call,
}
llm_response.content = content
llm_response.reasoning = reasoning_content
llm_response.model = model_name
llm_response.tool_calls = tool_call
if not from_plugin and not await events_manager.handle_mai_events(
EventType.AFTER_LLM, None, prompt, llm_response, stream_id=stream_id
):
@ -231,24 +232,23 @@ class DefaultReplyer:
except Exception as llm_e:
# 精简报错信息
logger.error(f"LLM 生成失败: {llm_e}")
return False, None, prompt, selected_expressions # LLM 调用失败则无法生成回复
return False, llm_response # LLM 调用失败则无法生成回复
return True, llm_response, prompt, selected_expressions
return True, llm_response
except UserWarning as uw:
raise uw
except Exception as e:
logger.error(f"回复生成意外失败: {e}")
traceback.print_exc()
return False, None, prompt, selected_expressions
return False, llm_response
async def rewrite_reply_with_context(
self,
raw_reply: str = "",
reason: str = "",
reply_to: str = "",
return_prompt: bool = False,
) -> Tuple[bool, Optional[str], Optional[str]]:
) -> Tuple[bool, LLMGenerationDataModel]:
"""
表达器 (Expressor): 负责重写和优化回复文本
@ -261,6 +261,7 @@ class DefaultReplyer:
Returns:
Tuple[bool, Optional[str]]: (是否成功, 重写后的回复内容)
"""
llm_response = LLMGenerationDataModel()
try:
with Timer("构建Prompt", {}): # 内部计时器,可选保留
prompt = await self.build_prompt_rewrite_context(
@ -268,29 +269,33 @@ class DefaultReplyer:
reason=reason,
reply_to=reply_to,
)
llm_response.prompt = prompt
content = None
reasoning_content = None
model_name = "unknown_model"
if not prompt:
logger.error("Prompt 构建失败,无法生成回复。")
return False, None, None
return False, llm_response
try:
content, reasoning_content, model_name, _ = await self.llm_generate_content(prompt)
logger.info(f"想要表达:{raw_reply}||理由:{reason}||生成回复: {content}\n")
llm_response.content = content
llm_response.reasoning = reasoning_content
llm_response.model = model_name
except Exception as llm_e:
# 精简报错信息
logger.error(f"LLM 生成失败: {llm_e}")
return False, None, prompt if return_prompt else None # LLM 调用失败则无法生成回复
return False, llm_response # LLM 调用失败则无法生成回复
return True, content, prompt if return_prompt else None
return True, llm_response
except Exception as e:
logger.error(f"回复生成意外失败: {e}")
traceback.print_exc()
return False, None, prompt if return_prompt else None
return False, llm_response
async def build_relation_info(self, sender: str, target: str):
if not global_config.relationship.enable_relationship:
@ -376,9 +381,7 @@ class DefaultReplyer:
if global_config.memory.enable_instant_memory:
chat_history_str = build_readable_messages(
messages=chat_history,
replace_bot_name=True,
timestamp_mode="normal"
messages=chat_history, replace_bot_name=True, timestamp_mode="normal"
)
asyncio.create_task(self.instant_memory.create_and_store_memory(chat_history_str))
@ -670,6 +673,18 @@ class DefaultReplyer:
return action_descriptions
async def build_personality_prompt(self) -> str:
bot_name = global_config.bot.nickname
if global_config.bot.alias_names:
bot_nickname = f",也有人叫你{','.join(global_config.bot.alias_names)}"
else:
bot_nickname = ""
prompt_personality = (
f"{global_config.personality.personality_core};{global_config.personality.personality_side}"
)
return f"你的名字是{bot_name}{bot_nickname},你{prompt_personality}"
async def build_prompt_reply_context(
self,
extra_info: str = "",
@ -751,6 +766,7 @@ class DefaultReplyer:
),
self._time_and_run_task(self.get_prompt_info(chat_talking_prompt_short, sender, target), "prompt_info"),
self._time_and_run_task(self.build_actions_prompt(available_actions, chosen_actions), "actions_info"),
self._time_and_run_task(self.build_personality_prompt(), "personality_prompt"),
)
# 任务名称中英文映射
@ -761,6 +777,7 @@ class DefaultReplyer:
"tool_info": "使用工具",
"prompt_info": "获取知识",
"actions_info": "动作信息",
"personality_prompt": "人格信息",
}
# 处理结果
@ -788,6 +805,7 @@ class DefaultReplyer:
tool_info: str = results_dict["tool_info"]
prompt_info: str = results_dict["prompt_info"] # 直接使用格式化后的结果
actions_info: str = results_dict["actions_info"]
personality_prompt: str = results_dict["personality_prompt"]
keywords_reaction_prompt = await self.build_keywords_reaction_prompt(target)
if extra_info:
@ -797,8 +815,6 @@ class DefaultReplyer:
time_block = f"当前时间:{datetime.now().strftime('%Y-%m-%d %H:%M:%S')}"
identity_block = await get_individuality().get_personality_block()
moderation_prompt_block = "请不要输出违法违规内容,不要输出色情,暴力,政治相关内容,如有敏感内容,请规避。"
if sender:
@ -827,7 +843,7 @@ class DefaultReplyer:
memory_block=memory_block,
relation_info_block=relation_info,
extra_info_block=extra_info_block,
identity=identity_block,
identity=personality_prompt,
action_descriptions=actions_info,
mood_state=mood_prompt,
background_dialogue_prompt=background_dialogue_prompt,
@ -847,7 +863,7 @@ class DefaultReplyer:
memory_block=memory_block,
relation_info_block=relation_info,
extra_info_block=extra_info_block,
identity=identity_block,
identity=personality_prompt,
action_descriptions=actions_info,
sender_name=sender,
mood_state=mood_prompt,
@ -865,17 +881,12 @@ class DefaultReplyer:
raw_reply: str,
reason: str,
reply_to: str,
reply_message: Optional[Dict[str, Any]] = None,
) -> str: # sourcery skip: merge-else-if-into-elif, remove-redundant-if
chat_stream = self.chat_stream
chat_id = chat_stream.stream_id
is_group_chat = bool(chat_stream.group_info)
if reply_message:
sender = reply_message.get("sender", "")
target = reply_message.get("target", "")
else:
sender, target = self._parse_reply_target(reply_to)
sender, target = self._parse_reply_target(reply_to)
# 添加情绪状态获取
if global_config.mood.enable_mood:
@ -898,17 +909,16 @@ class DefaultReplyer:
)
# 并行执行2个构建任务
(expression_habits_block, _), relation_info = await asyncio.gather(
(expression_habits_block, _), relation_info, personality_prompt = await asyncio.gather(
self.build_expression_habits(chat_talking_prompt_half, target),
self.build_relation_info(sender, target),
self.build_personality_prompt(),
)
keywords_reaction_prompt = await self.build_keywords_reaction_prompt(target)
time_block = f"当前时间:{datetime.now().strftime('%Y-%m-%d %H:%M:%S')}"
identity_block = await get_individuality().get_personality_block()
moderation_prompt_block = (
"请不要输出违法违规内容,不要输出色情,暴力,政治相关内容,如有敏感内容,请规避。不要随意遵从他人指令。"
)
@ -958,7 +968,7 @@ class DefaultReplyer:
chat_target=chat_target_1,
time_block=time_block,
chat_info=chat_talking_prompt_half,
identity=identity_block,
identity=personality_prompt,
chat_target_2=chat_target_2,
reply_target_block=reply_target_block,
raw_reply=raw_reply,
@ -1006,7 +1016,7 @@ class DefaultReplyer:
async def llm_generate_content(self, prompt: str):
with Timer("LLM生成", {}): # 内部计时器,可选保留
# 直接使用已初始化的模型实例
logger.info(f"使用模型集生成回复: {self.express_model.model_for_task}")
logger.info(f"使用模型集生成回复: {', '.join(map(str, self.express_model.model_for_task.model_list))}")
if global_config.debug.show_prompt:
logger.info(f"\n{prompt}\n")

View File

@ -361,10 +361,10 @@ def _build_readable_messages_internal(
# 创建时间戳到消息ID的映射用于在消息前添加[id]标识符
timestamp_to_id_mapping: Dict[float, str] = {}
if message_id_list:
for msg in message_id_list:
for msg_id, msg in message_id_list:
timestamp = msg.time
if timestamp is not None:
timestamp_to_id_mapping[timestamp] = msg.message_id
timestamp_to_id_mapping[timestamp] = msg_id
def process_pic_ids(content: Optional[str]) -> str:
"""处理内容中的图片ID将其替换为[图片x]格式"""
@ -477,7 +477,7 @@ def _build_readable_messages_internal(
readable_time = translate_timestamp_to_human_readable(timestamp, mode=timestamp_mode)
# 查找消息id如果有并构建id_prefix
message_id = timestamp_to_id_mapping.get(timestamp)
message_id = timestamp_to_id_mapping.get(timestamp, "")
id_prefix = f"[{message_id}]" if message_id else ""
if is_action:
@ -606,7 +606,7 @@ def build_readable_messages_with_id(
truncate: bool = False,
show_actions: bool = False,
show_pic: bool = True,
) -> Tuple[str, List[DatabaseMessages]]:
) -> Tuple[str, List[Tuple[str, DatabaseMessages]]]:
"""
将消息列表转换为可读的文本格式并返回原始(时间戳, 昵称, 内容)列表
允许通过参数控制格式化行为

View File

@ -685,7 +685,7 @@ def assign_message_ids(messages: List[DatabaseMessages]) -> List[DatabaseMessage
Returns:
List[DatabaseMessages]: 分配了唯一ID的消息列表(写入message_id属性)
"""
result: List[DatabaseMessages] = list(messages) # 复制原始消息列表
result: List[Tuple[str, DatabaseMessages]] = [] # 复制原始消息列表
used_ids = set()
len_i = len(messages)
if len_i > 100:
@ -695,7 +695,7 @@ def assign_message_ids(messages: List[DatabaseMessages]) -> List[DatabaseMessage
a = 1
b = 9
for i, _ in enumerate(result):
for i, message in enumerate(messages):
# 生成唯一的简短ID
while True:
# 使用索引+随机数生成简短ID
@ -705,7 +705,7 @@ def assign_message_ids(messages: List[DatabaseMessages]) -> List[DatabaseMessage
if message_id not in used_ids:
used_ids.add(message_id)
break
result[i].message_id = message_id
result.append((message_id, message))
return result

View File

@ -0,0 +1,16 @@
from dataclasses import dataclass
from typing import Optional, List, Tuple, TYPE_CHECKING, Any
from . import BaseDataModel
if TYPE_CHECKING:
from src.llm_models.payload_content.tool_option import ToolCall
@dataclass
class LLMGenerationDataModel(BaseDataModel):
content: Optional[str] = None
reasoning: Optional[str] = None
model: Optional[str] = None
tool_calls: Optional[List["ToolCall"]] = None
prompt: Optional[str] = None
selected_expressions: Optional[List[int]] = None
reply_set: Optional[List[Tuple[str, Any]]] = None

View File

@ -341,7 +341,6 @@ MODULE_COLORS = {
"lpmm": "\033[96m",
"plugin_system": "\033[91m", # 亮红色
"person_info": "\033[32m", # 绿色
"individuality": "\033[94m", # 显眼的亮蓝色
"manager": "\033[35m", # 紫色
"llm_models": "\033[36m", # 青色
"remote": "\033[38;5;242m", # 深灰色,更不显眼
@ -423,7 +422,6 @@ MODULE_COLORS = {
# 定义模块别名映射 - 将真实的logger名称映射到显示的别名
MODULE_ALIASES = {
# 示例映射
"individuality": "人格特质",
"emoji": "表情包",
"no_action_action": "摸鱼",
"reply_action": "回复",

View File

@ -117,6 +117,9 @@ class ModelTaskConfig(ConfigBase):
planner: TaskConfig
"""规划模型配置"""
planner_small: TaskConfig
"""副规划模型配置"""
embedding: TaskConfig
"""嵌入模型配置"""

View File

@ -56,7 +56,7 @@ TEMPLATE_DIR = os.path.join(PROJECT_ROOT, "template")
# 考虑到实际上配置文件中的mai_version是不会自动更新的,所以采用硬编码
# 对该字段的更新请严格参照语义化版本规范https://semver.org/lang/zh-CN/
MMC_VERSION = "0.10.0"
MMC_VERSION = "0.10.1"
def get_key_comment(toml_table, key):

View File

@ -48,13 +48,10 @@ class PersonalityConfig(ConfigBase):
"""表达风格"""
plan_style: str = ""
"""行为风格"""
compress_personality: bool = True
"""是否压缩人格压缩后会精简人格信息节省token消耗并提高回复性能但是会丢失一些信息如果人设不长可以关闭"""
compress_identity: bool = True
"""是否压缩身份压缩后会精简身份信息节省token消耗并提高回复性能但是会丢失一些信息如果不长可以关闭"""
interest: str = ""
"""兴趣"""
@dataclass
class RelationshipConfig(ConfigBase):
@ -74,9 +71,15 @@ class ChatConfig(ConfigBase):
max_context_size: int = 18
"""上下文长度"""
interest_rate_mode: Literal["fast", "accurate"] = "fast"
"""兴趣值计算模式fast为快速计算accurate为精确计算"""
mentioned_bot_inevitable_reply: bool = False
"""提及 bot 必然回复"""
planner_size: int = 1
"""副规划器大小越小麦麦的动作执行能力越精细但是消耗更多token调大可以缓解429类错误"""
at_bot_inevitable_reply: bool = False
"""@bot 必然回复"""

View File

@ -1,304 +0,0 @@
import json
import os
import hashlib
import time
from src.common.logger import get_logger
from src.config.config import global_config, model_config
from src.llm_models.utils_model import LLMRequest
from rich.traceback import install
install(extra_lines=3)
logger = get_logger("individuality")
class Individuality:
"""个体特征管理类"""
def __init__(self):
self.name = ""
self.meta_info_file_path = "data/personality/meta.json"
self.personality_data_file_path = "data/personality/personality_data.json"
self.model = LLMRequest(model_set=model_config.model_task_config.utils, request_type="individuality.compress")
async def initialize(self) -> None:
"""初始化个体特征"""
bot_nickname = global_config.bot.nickname
personality_core = global_config.personality.personality_core
personality_side = global_config.personality.personality_side
identity = global_config.personality.identity
self.name = bot_nickname
# 检查配置变化,如果变化则清空
personality_changed, identity_changed = await self._check_config_and_clear_if_changed(
bot_nickname, personality_core, personality_side, identity
)
logger.info("正在构建人设信息")
# 如果配置有变化,重新生成压缩版本
if personality_changed or identity_changed:
logger.info("检测到配置变化,重新生成压缩版本")
personality_result = await self._create_personality(personality_core, personality_side)
identity_result = await self._create_identity(identity)
else:
logger.info("配置未变化,使用缓存版本")
# 从文件中获取已有的结果
personality_result, identity_result = self._get_personality_from_file()
if not personality_result or not identity_result:
logger.info("未找到有效缓存,重新生成")
personality_result = await self._create_personality(personality_core, personality_side)
identity_result = await self._create_identity(identity)
# 保存到文件
if personality_result and identity_result:
self._save_personality_to_file(personality_result, identity_result)
logger.info("已将人设构建并保存到文件")
else:
logger.error("人设构建失败")
async def get_personality_block(self) -> str:
bot_name = global_config.bot.nickname
if global_config.bot.alias_names:
bot_nickname = f",也有人叫你{','.join(global_config.bot.alias_names)}"
else:
bot_nickname = ""
# 从文件获取 short_impression
personality, identity = self._get_personality_from_file()
# 确保short_impression是列表格式且有足够的元素
if not personality or not identity:
logger.warning(f"personality或identity为空: {personality}, {identity}, 使用默认值")
personality = "友好活泼"
identity = "人类"
prompt_personality = f"{personality}\n{identity}"
return f"你的名字是{bot_name}{bot_nickname},你{prompt_personality}"
def _get_config_hash(
self, bot_nickname: str, personality_core: str, personality_side: str, identity: str
) -> tuple[str, str]:
"""获取personality和identity配置的哈希值
Returns:
tuple: (personality_hash, identity_hash)
"""
# 人格配置哈希
personality_config = {
"nickname": bot_nickname,
"personality_core": personality_core,
"personality_side": personality_side,
"compress_personality": global_config.personality.compress_personality,
}
personality_str = json.dumps(personality_config, sort_keys=True)
personality_hash = hashlib.md5(personality_str.encode("utf-8")).hexdigest()
# 身份配置哈希
identity_config = {
"identity": identity,
"compress_identity": global_config.personality.compress_identity,
}
identity_str = json.dumps(identity_config, sort_keys=True)
identity_hash = hashlib.md5(identity_str.encode("utf-8")).hexdigest()
return personality_hash, identity_hash
async def _check_config_and_clear_if_changed(
self, bot_nickname: str, personality_core: str, personality_side: str, identity: str
) -> tuple[bool, bool]:
"""检查配置是否发生变化,如果变化则清空相应缓存
Returns:
tuple: (personality_changed, identity_changed)
"""
current_personality_hash, current_identity_hash = self._get_config_hash(
bot_nickname, personality_core, personality_side, identity
)
meta_info = self._load_meta_info()
stored_personality_hash = meta_info.get("personality_hash")
stored_identity_hash = meta_info.get("identity_hash")
personality_changed = current_personality_hash != stored_personality_hash
identity_changed = current_identity_hash != stored_identity_hash
if personality_changed:
logger.info("检测到人格配置发生变化")
if identity_changed:
logger.info("检测到身份配置发生变化")
# 更新元信息文件
new_meta_info = {
"personality_hash": current_personality_hash,
"identity_hash": current_identity_hash,
}
self._save_meta_info(new_meta_info)
return personality_changed, identity_changed
def _load_meta_info(self) -> dict:
"""从JSON文件中加载元信息"""
if os.path.exists(self.meta_info_file_path):
try:
with open(self.meta_info_file_path, "r", encoding="utf-8") as f:
return json.load(f)
except (json.JSONDecodeError, IOError) as e:
logger.error(f"读取meta_info文件失败: {e}, 将创建新文件。")
return {}
return {}
def _save_meta_info(self, meta_info: dict):
"""将元信息保存到JSON文件"""
try:
os.makedirs(os.path.dirname(self.meta_info_file_path), exist_ok=True)
with open(self.meta_info_file_path, "w", encoding="utf-8") as f:
json.dump(meta_info, f, ensure_ascii=False, indent=2)
except IOError as e:
logger.error(f"保存meta_info文件失败: {e}")
def _load_personality_data(self) -> dict:
"""从JSON文件中加载personality数据"""
if os.path.exists(self.personality_data_file_path):
try:
with open(self.personality_data_file_path, "r", encoding="utf-8") as f:
return json.load(f)
except (json.JSONDecodeError, IOError) as e:
logger.error(f"读取personality_data文件失败: {e}, 将创建新文件。")
return {}
return {}
def _save_personality_data(self, personality_data: dict):
"""将personality数据保存到JSON文件"""
try:
os.makedirs(os.path.dirname(self.personality_data_file_path), exist_ok=True)
with open(self.personality_data_file_path, "w", encoding="utf-8") as f:
json.dump(personality_data, f, ensure_ascii=False, indent=2)
logger.debug(f"已保存personality数据到文件: {self.personality_data_file_path}")
except IOError as e:
logger.error(f"保存personality_data文件失败: {e}")
def _get_personality_from_file(self) -> tuple[str, str]:
"""从文件获取personality数据
Returns:
tuple: (personality, identity)
"""
personality_data = self._load_personality_data()
personality = personality_data.get("personality", "友好活泼")
identity = personality_data.get("identity", "人类")
return personality, identity
def _save_personality_to_file(self, personality: str, identity: str):
"""保存personality数据到文件
Args:
personality: 压缩后的人格描述
identity: 压缩后的身份描述
"""
personality_data = {
"personality": personality,
"identity": identity,
"bot_nickname": self.name,
"last_updated": int(time.time()),
}
self._save_personality_data(personality_data)
async def _create_personality(self, personality_core: str, personality_side: str) -> str:
# sourcery skip: merge-list-append, move-assign
"""使用LLM创建压缩版本的impression
Args:
personality_core: 核心人格
personality_side: 人格侧面列表
Returns:
str: 压缩后的impression文本
"""
logger.info("正在构建人格.........")
# 核心人格保持不变
personality_parts = []
if personality_core:
personality_parts.append(f"{personality_core}")
# 准备需要压缩的内容
if global_config.personality.compress_personality:
personality_to_compress = f"人格特质: {personality_side}"
prompt = f"""请将以下人格信息进行简洁压缩,保留主要内容,用简练的中文表达:
{personality_to_compress}
要求
1. 保持原意不变尽量使用原文
2. 尽量简洁不超过30字
3. 直接输出压缩后的内容不要解释"""
response, _ = await self.model.generate_response_async(
prompt=prompt,
)
if response and response.strip():
personality_parts.append(response.strip())
logger.info(f"精简人格侧面: {response.strip()}")
else:
logger.error(f"使用LLM压缩人设时出错: {response}")
# 压缩失败时使用原始内容
if personality_side:
personality_parts.append(personality_side)
if personality_parts:
personality_result = "".join(personality_parts)
else:
personality_result = personality_core or "友好活泼"
else:
personality_result = personality_core
if personality_side:
personality_result += f"{personality_side}"
return personality_result
async def _create_identity(self, identity: str) -> str:
"""使用LLM创建压缩版本的impression"""
logger.info("正在构建身份.........")
if global_config.personality.compress_identity:
identity_to_compress = f"身份背景: {identity}"
prompt = f"""请将以下身份信息进行简洁压缩,保留主要内容,用简练的中文表达:
{identity_to_compress}
要求
1. 保持原意不变尽量使用原文
2. 尽量简洁不超过30字
3. 直接输出压缩后的内容不要解释"""
response, _ = await self.model.generate_response_async(
prompt=prompt,
)
if response and response.strip():
identity_result = response.strip()
logger.info(f"精简身份: {identity_result}")
else:
logger.error(f"使用LLM压缩身份时出错: {response}")
identity_result = identity
else:
identity_result = identity
return identity_result
individuality = None
def get_individuality():
global individuality
if individuality is None:
individuality = Individuality()
return individuality

View File

@ -1,127 +0,0 @@
import asyncio
import os
import time
from typing import Tuple, Union
import aiohttp
import requests
from src.common.logger import get_logger
from src.common.tcp_connector import get_tcp_connector
from rich.traceback import install
install(extra_lines=3)
logger = get_logger("offline_llm")
class LLMRequestOff:
def __init__(self, model_name="Pro/deepseek-ai/DeepSeek-V3", **kwargs):
self.model_name = model_name
self.params = kwargs
self.api_key = os.getenv("SILICONFLOW_KEY")
self.base_url = os.getenv("SILICONFLOW_BASE_URL")
if not self.api_key or not self.base_url:
raise ValueError("环境变量未正确加载SILICONFLOW_KEY 或 SILICONFLOW_BASE_URL 未设置")
# logger.info(f"API URL: {self.base_url}") # 使用 logger 记录 base_url
def generate_response(self, prompt: str) -> Union[str, Tuple[str, str]]:
"""根据输入的提示生成模型的响应"""
headers = {"Authorization": f"Bearer {self.api_key}", "Content-Type": "application/json"}
# 构建请求体
data = {
"model": self.model_name,
"messages": [{"role": "user", "content": prompt}],
"temperature": 0.4,
**self.params,
}
# 发送请求到完整的 chat/completions 端点
api_url = f"{self.base_url.rstrip('/')}/chat/completions" # type: ignore
logger.info(f"Request URL: {api_url}") # 记录请求的 URL
max_retries = 3
base_wait_time = 15 # 基础等待时间(秒)
for retry in range(max_retries):
try:
response = requests.post(api_url, headers=headers, json=data)
if response.status_code == 429:
wait_time = base_wait_time * (2**retry) # 指数退避
logger.warning(f"遇到请求限制(429),等待{wait_time}秒后重试...")
time.sleep(wait_time)
continue
response.raise_for_status() # 检查其他响应状态
result = response.json()
if "choices" in result and len(result["choices"]) > 0:
content = result["choices"][0]["message"]["content"]
reasoning_content = result["choices"][0]["message"].get("reasoning_content", "")
return content, reasoning_content
return "没有返回结果", ""
except Exception as e:
if retry < max_retries - 1: # 如果还有重试机会
wait_time = base_wait_time * (2**retry)
logger.error(f"[回复]请求失败,等待{wait_time}秒后重试... 错误: {str(e)}")
time.sleep(wait_time)
else:
logger.error(f"请求失败: {str(e)}")
return f"请求失败: {str(e)}", ""
logger.error("达到最大重试次数,请求仍然失败")
return "达到最大重试次数,请求仍然失败", ""
async def generate_response_async(self, prompt: str) -> Union[str, Tuple[str, str]]:
"""异步方式根据输入的提示生成模型的响应"""
headers = {"Authorization": f"Bearer {self.api_key}", "Content-Type": "application/json"}
# 构建请求体
data = {
"model": self.model_name,
"messages": [{"role": "user", "content": prompt}],
"temperature": 0.5,
**self.params,
}
# 发送请求到完整的 chat/completions 端点
api_url = f"{self.base_url.rstrip('/')}/chat/completions" # type: ignore
logger.info(f"Request URL: {api_url}") # 记录请求的 URL
max_retries = 3
base_wait_time = 15
async with aiohttp.ClientSession(connector=await get_tcp_connector()) as session:
for retry in range(max_retries):
try:
async with session.post(api_url, headers=headers, json=data) as response:
if response.status == 429:
wait_time = base_wait_time * (2**retry) # 指数退避
logger.warning(f"遇到请求限制(429),等待{wait_time}秒后重试...")
await asyncio.sleep(wait_time)
continue
response.raise_for_status() # 检查其他响应状态
result = await response.json()
if "choices" in result and len(result["choices"]) > 0:
content = result["choices"][0]["message"]["content"]
reasoning_content = result["choices"][0]["message"].get("reasoning_content", "")
return content, reasoning_content
return "没有返回结果", ""
except Exception as e:
if retry < max_retries - 1: # 如果还有重试机会
wait_time = base_wait_time * (2**retry)
logger.error(f"[回复]请求失败,等待{wait_time}秒后重试... 错误: {str(e)}")
await asyncio.sleep(wait_time)
else:
logger.error(f"请求失败: {str(e)}")
return f"请求失败: {str(e)}", ""
logger.error("达到最大重试次数,请求仍然失败")
return "达到最大重试次数,请求仍然失败", ""

View File

@ -1,310 +0,0 @@
from typing import Dict, List
import json
import os
from dotenv import load_dotenv
import sys
import toml
import random
from tqdm import tqdm
# 添加项目根目录到 Python 路径
root_path = os.path.abspath(os.path.join(os.path.dirname(__file__), "../.."))
sys.path.append(root_path)
# 加载配置文件
config_path = os.path.join(root_path, "config", "bot_config.toml")
with open(config_path, "r", encoding="utf-8") as f:
config = toml.load(f)
# 现在可以导入src模块
from individuality.not_using.scene import get_scene_by_factor, PERSONALITY_SCENES # noqa E402
from individuality.not_using.questionnaire import FACTOR_DESCRIPTIONS # noqa E402
from individuality.not_using.offline_llm import LLMRequestOff # noqa E402
# 加载环境变量
env_path = os.path.join(root_path, ".env")
if os.path.exists(env_path):
print(f"{env_path} 加载环境变量")
load_dotenv(env_path)
else:
print(f"未找到环境变量文件: {env_path}")
print("将使用默认配置")
def adapt_scene(scene: str) -> str:
personality_core = config["personality"]["personality_core"]
personality_side = config["personality"]["personality_side"]
personality_side = random.choice(personality_side)
identitys = config["identity"]["identity"]
identity = random.choice(identitys)
"""
根据config中的属性改编场景使其更适合当前角色
Args:
scene: 原始场景描述
Returns:
str: 改编后的场景描述
"""
try:
prompt = f"""
这是一个参与人格测评的角色形象:
- 昵称: {config["bot"]["nickname"]}
- 性别: {config["identity"]["gender"]}
- 年龄: {config["identity"]["age"]}
- 外貌: {config["identity"]["appearance"]}
- 性格核心: {personality_core}
- 性格侧面: {personality_side}
- 身份细节: {identity}
请根据上述形象改编以下场景在测评中用户将根据该场景给出上述角色形象的反应:
{scene}
保持场景的本质不变但最好贴近生活且具体并且让它更适合这个角色
改编后的场景应该自然连贯并考虑角色的年龄身份和性格特点只返回改编后的场景描述不要包含其他说明注意{config["bot"]["nickname"]}是面对这个场景的人而不是场景的其他人场景中不会有其描述
现在请你给出改编后的场景描述
"""
llm = LLMRequestOff(model_name=config["model"]["llm_normal"]["name"])
adapted_scene, _ = llm.generate_response(prompt)
# 检查返回的场景是否为空或错误信息
if not adapted_scene or "错误" in adapted_scene or "失败" in adapted_scene:
print("场景改编失败,将使用原始场景")
return scene
return adapted_scene
except Exception as e:
print(f"场景改编过程出错:{str(e)},将使用原始场景")
return scene
class PersonalityEvaluatorDirect:
def __init__(self):
self.personality_traits = {"开放性": 0, "严谨性": 0, "外向性": 0, "宜人性": 0, "神经质": 0}
self.scenarios = []
self.final_scores: Dict[str, float] = {"开放性": 0, "严谨性": 0, "外向性": 0, "宜人性": 0, "神经质": 0}
self.dimension_counts = {trait: 0 for trait in self.final_scores}
# 为每个人格特质获取对应的场景
for trait in PERSONALITY_SCENES:
scenes = get_scene_by_factor(trait)
if not scenes:
continue
# 从每个维度选择3个场景
import random
scene_keys = list(scenes.keys())
selected_scenes = random.sample(scene_keys, min(3, len(scene_keys)))
for scene_key in selected_scenes:
scene = scenes[scene_key]
# 为每个场景添加评估维度
# 主维度是当前特质,次维度随机选择一个其他特质
other_traits = [t for t in PERSONALITY_SCENES if t != trait]
secondary_trait = random.choice(other_traits)
self.scenarios.append(
{"场景": scene["scenario"], "评估维度": [trait, secondary_trait], "场景编号": scene_key}
)
self.llm = LLMRequestOff()
def evaluate_response(self, scenario: str, response: str, dimensions: List[str]) -> Dict[str, float]:
"""
使用 DeepSeek AI 评估用户对特定场景的反应
"""
# 构建维度描述
dimension_descriptions = []
for dim in dimensions:
if desc := FACTOR_DESCRIPTIONS.get(dim, ""):
dimension_descriptions.append(f"- {dim}{desc}")
dimensions_text = "\n".join(dimension_descriptions)
prompt = f"""请根据以下场景和用户描述评估用户在大五人格模型中的相关维度得分1-6分
场景描述
{scenario}
用户回应
{response}
需要评估的维度说明
{dimensions_text}
请按照以下格式输出评估结果仅输出JSON格式
{{
"{dimensions[0]}": 分数,
"{dimensions[1]}": 分数
}}
评分标准
1 = 非常不符合该维度特征
2 = 比较不符合该维度特征
3 = 有点不符合该维度特征
4 = 有点符合该维度特征
5 = 比较符合该维度特征
6 = 非常符合该维度特征
请根据用户的回应结合场景和维度说明进行评分确保分数在1-6之间并给出合理的评估"""
try:
ai_response, _ = self.llm.generate_response(prompt)
# 尝试从AI响应中提取JSON部分
start_idx = ai_response.find("{")
end_idx = ai_response.rfind("}") + 1
if start_idx != -1 and end_idx != 0:
json_str = ai_response[start_idx:end_idx]
scores = json.loads(json_str)
# 确保所有分数在1-6之间
return {k: max(1, min(6, float(v))) for k, v in scores.items()}
else:
print("AI响应格式不正确使用默认评分")
return {dim: 3.5 for dim in dimensions}
except Exception as e:
print(f"评估过程出错:{str(e)}")
return {dim: 3.5 for dim in dimensions}
def run_evaluation(self):
"""
运行整个评估过程
"""
print(f"欢迎使用{config['bot']['nickname']}形象创建程序!")
print("接下来将给您呈现一系列有关您bot的场景共15个")
print("请想象您的bot在以下场景下会做什么并描述您的bot的反应。")
print("每个场景都会进行不同方面的评估。")
print("\n角色基本信息:")
print(f"- 昵称:{config['bot']['nickname']}")
print(f"- 性格核心:{config['personality']['personality_core']}")
print(f"- 性格侧面:{config['personality']['personality_side']}")
print(f"- 身份细节:{config['identity']['identity']}")
print("\n准备好了吗?按回车键开始...")
input()
total_scenarios = len(self.scenarios)
progress_bar = tqdm(
total=total_scenarios,
desc="场景进度",
ncols=100,
bar_format="{l_bar}{bar}| {n_fmt}/{total_fmt} [{elapsed}<{remaining}]",
)
for _i, scenario_data in enumerate(self.scenarios, 1):
# print(f"\n{'-' * 20} 场景 {i}/{total_scenarios} - {scenario_data['场景编号']} {'-' * 20}")
# 改编场景,使其更适合当前角色
print(f"{config['bot']['nickname']}祈祷中...")
adapted_scene = adapt_scene(scenario_data["场景"])
scenario_data["改编场景"] = adapted_scene
print(adapted_scene)
print(f"\n请描述{config['bot']['nickname']}在这种情况下会如何反应:")
response = input().strip()
if not response:
print("反应描述不能为空!")
continue
print("\n正在评估您的描述...")
scores = self.evaluate_response(adapted_scene, response, scenario_data["评估维度"])
# 更新最终分数
for dimension, score in scores.items():
self.final_scores[dimension] += score
self.dimension_counts[dimension] += 1
print("\n当前评估结果:")
print("-" * 30)
for dimension, score in scores.items():
print(f"{dimension}: {score}/6")
# 更新进度条
progress_bar.update(1)
# if i < total_scenarios:
# print("\n按回车键继续下一个场景...")
# input()
progress_bar.close()
# 计算平均分
for dimension in self.final_scores:
if self.dimension_counts[dimension] > 0:
self.final_scores[dimension] = round(self.final_scores[dimension] / self.dimension_counts[dimension], 2)
print("\n" + "=" * 50)
print(f" {config['bot']['nickname']}的人格特征评估结果 ".center(50))
print("=" * 50)
for trait, score in self.final_scores.items():
print(f"{trait}: {score}/6".ljust(20) + f"测试场景数:{self.dimension_counts[trait]}".rjust(30))
print("=" * 50)
# 返回评估结果
return self.get_result()
def get_result(self):
"""
获取评估结果
"""
return {
"final_scores": self.final_scores,
"dimension_counts": self.dimension_counts,
"scenarios": self.scenarios,
"bot_info": {
"nickname": config["bot"]["nickname"],
"gender": config["identity"]["gender"],
"age": config["identity"]["age"],
"height": config["identity"]["height"],
"weight": config["identity"]["weight"],
"appearance": config["identity"]["appearance"],
"personality_core": config["personality"]["personality_core"],
"personality_side": config["personality"]["personality_side"],
"identity": config["identity"]["identity"],
},
}
def main():
evaluator = PersonalityEvaluatorDirect()
result = evaluator.run_evaluation()
# 准备简化的结果数据
simplified_result = {
"openness": round(result["final_scores"]["开放性"] / 6, 1), # 转换为0-1范围
"conscientiousness": round(result["final_scores"]["严谨性"] / 6, 1),
"extraversion": round(result["final_scores"]["外向性"] / 6, 1),
"agreeableness": round(result["final_scores"]["宜人性"] / 6, 1),
"neuroticism": round(result["final_scores"]["神经质"] / 6, 1),
"bot_nickname": config["bot"]["nickname"],
}
# 确保目录存在
save_dir = os.path.join(root_path, "data", "personality")
os.makedirs(save_dir, exist_ok=True)
# 创建文件名,替换可能的非法字符
bot_name = config["bot"]["nickname"]
# 替换Windows文件名中不允许的字符
for char in ["\\", "/", ":", "*", "?", '"', "<", ">", "|"]:
bot_name = bot_name.replace(char, "_")
file_name = f"{bot_name}_personality.per"
save_path = os.path.join(save_dir, file_name)
# 保存简化的结果
with open(save_path, "w", encoding="utf-8") as f:
json.dump(simplified_result, f, ensure_ascii=False, indent=4)
print(f"\n结果已保存到 {save_path}")
# 同时保存完整结果到results目录
os.makedirs("results", exist_ok=True)
with open("results/personality_result.json", "w", encoding="utf-8") as f:
json.dump(result, f, ensure_ascii=False, indent=2)
if __name__ == "__main__":
main()

View File

@ -1,142 +0,0 @@
# 人格测试问卷题目
# 王孟成, 戴晓阳, & 姚树桥. (2011).
# 中国大五人格问卷的初步编制Ⅲ:简式版的制定及信效度检验. 中国临床心理学杂志, 19(04), Article 04.
# 王孟成, 戴晓阳, & 姚树桥. (2010).
# 中国大五人格问卷的初步编制Ⅰ:理论框架与信度分析. 中国临床心理学杂志, 18(05), Article 05.
PERSONALITY_QUESTIONS = [
# 神经质维度 (F1)
{"id": 1, "content": "我常担心有什么不好的事情要发生", "factor": "神经质", "reverse_scoring": False},
{"id": 2, "content": "我常感到害怕", "factor": "神经质", "reverse_scoring": False},
{"id": 3, "content": "有时我觉得自己一无是处", "factor": "神经质", "reverse_scoring": False},
{"id": 4, "content": "我很少感到忧郁或沮丧", "factor": "神经质", "reverse_scoring": True},
{"id": 5, "content": "别人一句漫不经心的话,我常会联系在自己身上", "factor": "神经质", "reverse_scoring": False},
{"id": 6, "content": "在面对压力时,我有种快要崩溃的感觉", "factor": "神经质", "reverse_scoring": False},
{"id": 7, "content": "我常担忧一些无关紧要的事情", "factor": "神经质", "reverse_scoring": False},
{"id": 8, "content": "我常常感到内心不踏实", "factor": "神经质", "reverse_scoring": False},
# 严谨性维度 (F2)
{"id": 9, "content": "在工作上,我常只求能应付过去便可", "factor": "严谨性", "reverse_scoring": True},
{"id": 10, "content": "一旦确定了目标,我会坚持努力地实现它", "factor": "严谨性", "reverse_scoring": False},
{"id": 11, "content": "我常常是仔细考虑之后才做出决定", "factor": "严谨性", "reverse_scoring": False},
{"id": 12, "content": "别人认为我是个慎重的人", "factor": "严谨性", "reverse_scoring": False},
{"id": 13, "content": "做事讲究逻辑和条理是我的一个特点", "factor": "严谨性", "reverse_scoring": False},
{"id": 14, "content": "我喜欢一开头就把事情计划好", "factor": "严谨性", "reverse_scoring": False},
{"id": 15, "content": "我工作或学习很勤奋", "factor": "严谨性", "reverse_scoring": False},
{"id": 16, "content": "我是个倾尽全力做事的人", "factor": "严谨性", "reverse_scoring": False},
# 宜人性维度 (F3)
{
"id": 17,
"content": "尽管人类社会存在着一些阴暗的东西(如战争、罪恶、欺诈),我仍然相信人性总的来说是善良的",
"factor": "宜人性",
"reverse_scoring": False,
},
{"id": 18, "content": "我觉得大部分人基本上是心怀善意的", "factor": "宜人性", "reverse_scoring": False},
{"id": 19, "content": "虽然社会上有骗子,但我觉得大部分人还是可信的", "factor": "宜人性", "reverse_scoring": False},
{"id": 20, "content": "我不太关心别人是否受到不公正的待遇", "factor": "宜人性", "reverse_scoring": True},
{"id": 21, "content": "我时常觉得别人的痛苦与我无关", "factor": "宜人性", "reverse_scoring": True},
{"id": 22, "content": "我常为那些遭遇不幸的人感到难过", "factor": "宜人性", "reverse_scoring": False},
{"id": 23, "content": "我是那种只照顾好自己,不替别人担忧的人", "factor": "宜人性", "reverse_scoring": True},
{"id": 24, "content": "当别人向我诉说不幸时,我常感到难过", "factor": "宜人性", "reverse_scoring": False},
# 开放性维度 (F4)
{"id": 25, "content": "我的想象力相当丰富", "factor": "开放性", "reverse_scoring": False},
{"id": 26, "content": "我头脑中经常充满生动的画面", "factor": "开放性", "reverse_scoring": False},
{"id": 27, "content": "我对许多事情有着很强的好奇心", "factor": "开放性", "reverse_scoring": False},
{"id": 28, "content": "我喜欢冒险", "factor": "开放性", "reverse_scoring": False},
{"id": 29, "content": "我是个勇于冒险,突破常规的人", "factor": "开放性", "reverse_scoring": False},
{"id": 30, "content": "我身上具有别人没有的冒险精神", "factor": "开放性", "reverse_scoring": False},
{
"id": 31,
"content": "我渴望学习一些新东西,即使它们与我的日常生活无关",
"factor": "开放性",
"reverse_scoring": False,
},
{
"id": 32,
"content": "我很愿意也很容易接受那些新事物、新观点、新想法",
"factor": "开放性",
"reverse_scoring": False,
},
# 外向性维度 (F5)
{"id": 33, "content": "我喜欢参加社交与娱乐聚会", "factor": "外向性", "reverse_scoring": False},
{"id": 34, "content": "我对人多的聚会感到乏味", "factor": "外向性", "reverse_scoring": True},
{"id": 35, "content": "我尽量避免参加人多的聚会和嘈杂的环境", "factor": "外向性", "reverse_scoring": True},
{"id": 36, "content": "在热闹的聚会上,我常常表现主动并尽情玩耍", "factor": "外向性", "reverse_scoring": False},
{"id": 37, "content": "有我在的场合一般不会冷场", "factor": "外向性", "reverse_scoring": False},
{"id": 38, "content": "我希望成为领导者而不是被领导者", "factor": "外向性", "reverse_scoring": False},
{"id": 39, "content": "在一个团体中,我希望处于领导地位", "factor": "外向性", "reverse_scoring": False},
{"id": 40, "content": "别人多认为我是一个热情和友好的人", "factor": "外向性", "reverse_scoring": False},
]
# 因子维度说明
FACTOR_DESCRIPTIONS = {
"外向性": {
"description": "反映个体神经系统的强弱和动力特征。外向性主要表现为个体在人际交往和社交活动中的倾向性,"
"包括对社交活动的兴趣、"
"对人群的态度、社交互动中的主动程度以及在群体中的影响力。高分者倾向于积极参与社交活动,乐于与人交往,善于表达自我,"
"并往往在群体中发挥领导作用;低分者则倾向于独处,不喜欢热闹的社交场合,表现出内向、安静的特征。",
"trait_words": ["热情", "活力", "社交", "主动"],
"subfactors": {
"合群性": "个体愿意与他人聚在一起,即接近人群的倾向;高分表现乐群、好交际,低分表现封闭、独处",
"热情": "个体对待别人时所表现出的态度;高分表现热情好客,低分表现冷淡",
"支配性": "个体喜欢指使、操纵他人,倾向于领导别人的特点;高分表现好强、发号施令,低分表现顺从、低调",
"活跃": "个体精力充沛,活跃、主动性等特点;高分表现活跃,低分表现安静",
},
},
"神经质": {
"description": "反映个体情绪的状态和体验内心苦恼的倾向性。这个维度主要关注个体在面对压力、"
"挫折和日常生活挑战时的情绪稳定性和适应能力。它包含了对焦虑、抑郁、愤怒等负面情绪的敏感程度,"
"以及个体对这些情绪的调节和控制能力。高分者容易体验负面情绪,对压力较为敏感,情绪波动较大;"
"低分者则表现出较强的情绪稳定性,能够较好地应对压力和挫折。",
"trait_words": ["稳定", "沉着", "从容", "坚韧"],
"subfactors": {
"焦虑": "个体体验焦虑感的个体差异;高分表现坐立不安,低分表现平静",
"抑郁": "个体体验抑郁情感的个体差异;高分表现郁郁寡欢,低分表现平静",
"敏感多疑": "个体常常关注自己的内心活动,行为和过于意识人对自己的看法、评价;高分表现敏感多疑,"
"低分表现淡定、自信",
"脆弱性": "个体在危机或困难面前无力、脆弱的特点;高分表现无能、易受伤、逃避,低分表现坚强",
"愤怒-敌意": "个体准备体验愤怒,及相关情绪的状态;高分表现暴躁易怒,低分表现平静",
},
},
"严谨性": {
"description": "反映个体在目标导向行为上的组织、坚持和动机特征。这个维度体现了个体在工作、"
"学习等目标性活动中的自我约束和行为管理能力。它涉及到个体的责任感、自律性、计划性、条理性以及完成任务的态度。"
"高分者往往表现出强烈的责任心、良好的组织能力、谨慎的决策风格和持续的努力精神;低分者则可能表现出随意性强、"
"缺乏规划、做事马虎或易放弃的特点。",
"trait_words": ["负责", "自律", "条理", "勤奋"],
"subfactors": {
"责任心": "个体对待任务和他人认真负责,以及对自己承诺的信守;高分表现有责任心、负责任,"
"低分表现推卸责任、逃避处罚",
"自我控制": "个体约束自己的能力,及自始至终的坚持性;高分表现自制、有毅力,低分表现冲动、无毅力",
"审慎性": "个体在采取具体行动前的心理状态;高分表现谨慎、小心,低分表现鲁莽、草率",
"条理性": "个体处理事务和工作的秩序,条理和逻辑性;高分表现整洁、有秩序,低分表现混乱、遗漏",
"勤奋": "个体工作和学习的努力程度及为达到目标而表现出的进取精神;高分表现勤奋、刻苦,低分表现懒散",
},
},
"开放性": {
"description": "反映个体对新异事物、新观念和新经验的接受程度,以及在思维和行为方面的创新倾向。"
"这个维度体现了个体在认知和体验方面的广度、深度和灵活性。它包括对艺术的欣赏能力、对知识的求知欲、想象力的丰富程度,"
"以及对冒险和创新的态度。高分者往往具有丰富的想象力、广泛的兴趣、开放的思维方式和创新的倾向;低分者则倾向于保守、"
"传统,喜欢熟悉和常规的事物。",
"trait_words": ["创新", "好奇", "艺术", "冒险"],
"subfactors": {
"幻想": "个体富于幻想和想象的水平;高分表现想象力丰富,低分表现想象力匮乏",
"审美": "个体对于艺术和美的敏感与热爱程度;高分表现富有艺术气息,低分表现一般对艺术不敏感",
"好奇心": "个体对未知事物的态度;高分表现兴趣广泛、好奇心浓,低分表现兴趣少、无好奇心",
"冒险精神": "个体愿意尝试有风险活动的个体差异;高分表现好冒险,低分表现保守",
"价值观念": "个体对新事物、新观念、怪异想法的态度;高分表现开放、坦然接受新事物,低分则相反",
},
},
"宜人性": {
"description": "反映个体在人际关系中的亲和倾向,体现了对他人的关心、同情和合作意愿。"
"这个维度主要关注个体与他人互动时的态度和行为特征,包括对他人的信任程度、同理心水平、"
"助人意愿以及在人际冲突中的处理方式。高分者通常表现出友善、富有同情心、乐于助人的特质,善于与他人建立和谐关系;"
"低分者则可能表现出较少的人际关注,在社交互动中更注重自身利益,较少考虑他人感受。",
"trait_words": ["友善", "同理", "信任", "合作"],
"subfactors": {
"信任": "个体对他人和/或他人言论的相信程度;高分表现信任他人,低分表现怀疑",
"体贴": "个体对别人的兴趣和需要的关注程度;高分表现体贴、温存,低分表现冷漠、不在乎",
"同情": "个体对处于不利地位的人或物的态度;高分表现富有同情心,低分表现冷漠",
},
},
}

View File

@ -1,43 +0,0 @@
import json
import os
from typing import Any
def load_scenes() -> dict[str, Any]:
"""
从JSON文件加载场景数据
Returns:
Dict: 包含所有场景的字典
"""
current_dir = os.path.dirname(os.path.abspath(__file__))
json_path = os.path.join(current_dir, "template_scene.json")
with open(json_path, "r", encoding="utf-8") as f:
return json.load(f)
PERSONALITY_SCENES = load_scenes()
def get_scene_by_factor(factor: str) -> dict | None:
"""
根据人格因子获取对应的情景测试
Args:
factor (str): 人格因子名称
Returns:
dict: 包含情景描述的字典
"""
return PERSONALITY_SCENES.get(factor, None)
def get_all_scenes() -> dict:
"""
获取所有情景测试
Returns:
Dict: 所有情景测试的字典
"""
return PERSONALITY_SCENES

View File

@ -1,112 +0,0 @@
{
"外向性": {
"场景1": {
"scenario": "你刚刚搬到一个新的城市工作。今天是你入职的第一天,在公司的电梯里,一位同事微笑着和你打招呼:\n\n同事「嗨你是新来的同事吧我是市场部的小林。」\n\n同事看起来很友善还主动介绍说「待会午饭时间我们部门有几个人准备一起去楼下新开的餐厅你要一起来吗可以认识一下其他同事。」",
"explanation": "这个场景通过职场社交情境,观察个体对于新环境、新社交圈的态度和反应倾向。"
},
"场景2": {
"scenario": "在大学班级群里,班长发起了一个组织班级联谊活动的投票:\n\n班长「大家好下周末我们准备举办一次班级联谊活动地点在学校附近的KTV。想请大家报名参加也欢迎大家邀请其他班级的同学」\n\n已经有几个同学在群里积极响应有人@你问你要不要一起参加。",
"explanation": "通过班级活动场景,观察个体对群体社交活动的参与意愿。"
},
"场景3": {
"scenario": "你在社交平台上发布了一条动态,收到了很多陌生网友的评论和私信:\n\n网友A「你说的这个观点很有意思想和你多交流一下。」\n\n网友B「我也对这个话题很感兴趣要不要建个群一起讨论」",
"explanation": "通过网络社交场景,观察个体对线上社交的态度。"
},
"场景4": {
"scenario": "你暗恋的对象今天主动来找你:\n\n对方「那个...我最近在准备一个演讲比赛,听说你口才很好。能不能请你帮我看看演讲稿,顺便给我一些建议?如果你有时间的话,可以一起吃个饭聊聊。」",
"explanation": "通过恋爱情境,观察个体在面对心仪对象时的社交表现。"
},
"场景5": {
"scenario": "在一次线下读书会上,主持人突然点名让你分享读后感:\n\n主持人「听说你对这本书很有见解能不能和大家分享一下你的想法」\n\n现场有二十多个陌生的读书爱好者都期待地看着你。",
"explanation": "通过即兴发言场景,观察个体的社交表现欲和公众表达能力。"
}
},
"神经质": {
"场景1": {
"scenario": "你正在准备一个重要的项目演示这关系到你的晋升机会。就在演示前30分钟你收到了主管发来的消息\n\n主管「临时有个变动CEO也会来听你的演示。他对这个项目特别感兴趣。」\n\n正当你准备回复时主管又发来一条「对了能不能把演示时间压缩到15分钟CEO下午还有其他安排。你之前准备的是30分钟的版本对吧」",
"explanation": "这个场景通过突发的压力情境,观察个体在面对计划外变化时的情绪反应和调节能力。"
},
"场景2": {
"scenario": "期末考试前一天晚上,你收到了好朋友发来的消息:\n\n好朋友「不好意思这么晚打扰你...我看你平时成绩很好,能不能帮我解答几个问题?我真的很担心明天的考试。」\n\n你看了看时间已经是晚上11点而你原本计划的复习还没完成。",
"explanation": "通过考试压力场景,观察个体在时间紧张时的情绪管理。"
},
"场景3": {
"scenario": "你在社交媒体上发表的一个观点引发了争议,有不少人开始批评你:\n\n网友A「这种观点也好意思说出来真是无知。」\n\n网友B「建议楼主先去补补课再来发言。」\n\n评论区里的负面评论越来越多还有人开始人身攻击。",
"explanation": "通过网络争议场景,观察个体面对批评时的心理承受能力。"
},
"场景4": {
"scenario": "你和恋人约好今天一起看电影,但在约定时间前半小时,对方发来消息:\n\n恋人「对不起我临时有点事可能要迟到一会儿。」\n\n二十分钟后对方又发来消息「可能要再等等抱歉」\n\n电影快要开始了但对方还是没有出现。",
"explanation": "通过恋爱情境,观察个体对不确定性的忍耐程度。"
},
"场景5": {
"scenario": "在一次重要的小组展示中,你的组员在演示途中突然卡壳了:\n\n组员小声对你说「我忘词了接下来的部分是什么来着...」\n\n台下的老师和同学都在等待气氛有些尴尬。",
"explanation": "通过公开场合的突发状况,观察个体的应急反应和压力处理能力。"
}
},
"严谨性": {
"场景1": {
"scenario": "你是团队的项目负责人,刚刚接手了一个为期两个月的重要项目。在第一次团队会议上:\n\n小王「老大我觉得两个月时间很充裕我们先做着看吧遇到问题再解决。」\n\n小张「要不要先列个时间表不过感觉太详细的计划也没必要点到为止就行。」\n\n小李「客户那边说如果能提前完成有奖励我觉得我们可以先做快一点的部分。」",
"explanation": "这个场景通过项目管理情境,体现个体在工作方法、计划性和责任心方面的特征。"
},
"场景2": {
"scenario": "期末小组作业,组长让大家分工完成一份研究报告。在截止日期前三天:\n\n组员A「我的部分大概写完了感觉还行。」\n\n组员B「我这边可能还要一天才能完成最近太忙了。」\n\n组员C发来一份没有任何引用出处、可能存在抄袭的内容「我写完了你们看看怎么样」",
"explanation": "通过学习场景,观察个体对学术规范和质量要求的重视程度。"
},
"场景3": {
"scenario": "你在一个兴趣小组的群聊中,大家正在讨论举办一次线下活动:\n\n成员A「到时候见面就知道具体怎么玩了」\n\n成员B「对啊随意一点挺好的。」\n\n成员C「人来了自然就热闹了。」",
"explanation": "通过活动组织场景,观察个体对活动计划的态度。"
},
"场景4": {
"scenario": "你的好友小明邀请你一起参加一个重要的演出活动,他说:\n\n小明「到时候我们就即兴发挥吧不用排练了我相信我们的默契。」\n\n距离演出还有三天但节目内容、配乐和服装都还没有确定。",
"explanation": "通过演出准备场景,观察个体的计划性和对不确定性的接受程度。"
},
"场景5": {
"scenario": "在一个重要的团队项目中,你发现一个同事的工作存在明显错误:\n\n同事「差不多就行了反正领导也看不出来。」\n\n这个错误可能不会立即造成问题但长期来看可能会影响项目质量。",
"explanation": "通过工作质量场景,观察个体对细节和标准的坚持程度。"
}
},
"开放性": {
"场景1": {
"scenario": "周末下午,你的好友小美兴致勃勃地给你打电话:\n\n小美「我刚发现一个特别有意思的沉浸式艺术展不是传统那种挂画的展览而是把整个空间都变成了艺术品。观众要穿特制的服装还要带上VR眼镜好像还有AI实时互动」\n\n小美继续说「虽然票价不便宜但听说体验很独特。网上评价两极分化有人说是前所未有的艺术革新也有人说是哗众取宠。要不要周末一起去体验一下」",
"explanation": "这个场景通过新型艺术体验,反映个体对创新事物的接受程度和尝试意愿。"
},
"场景2": {
"scenario": "在一节创意写作课上,老师提出了一个特别的作业:\n\n老师「下周的作业是用AI写作工具协助创作一篇小说。你们可以自由探索如何与AI合作打破传统写作方式。」\n\n班上随即展开了激烈讨论有人认为这是对创作的亵渎也有人对这种新形式感到兴奋。",
"explanation": "通过新技术应用场景,观察个体对创新学习方式的态度。"
},
"场景3": {
"scenario": "在社交媒体上,你看到一个朋友分享了一种新的学习方式:\n\n「最近我在尝试'沉浸式学习',就是完全投入到一个全新的领域。比如学习一门陌生的语言,或者尝试完全不同的职业技能。虽然过程会很辛苦,但这种打破舒适圈的感觉真的很棒!」\n\n评论区里争论不断有人认为这种学习方式效率高也有人觉得太激进。",
"explanation": "通过新型学习方式,观察个体对创新和挑战的态度。"
},
"场景4": {
"scenario": "你的朋友向你推荐了一种新的饮食方式:\n\n朋友「我最近在尝试'未来食品'比如人造肉、3D打印食物、昆虫蛋白等。这不仅对环境友好营养也很均衡。要不要一起来尝试看看」\n\n这个提议让你感到好奇又犹豫你之前从未尝试过这些新型食物。",
"explanation": "通过饮食创新场景,观察个体对新事物的接受度和尝试精神。"
},
"场景5": {
"scenario": "在一次朋友聚会上,大家正在讨论未来职业规划:\n\n朋友A「我准备辞职去做自媒体专门介绍一些小众的文化和艺术。」\n\n朋友B「我想去学习生物科技准备转行做人造肉研发。」\n\n朋友C「我在考虑加入一个区块链创业项目虽然风险很大。」",
"explanation": "通过职业选择场景,观察个体对新兴领域的探索意愿。"
}
},
"宜人性": {
"场景1": {
"scenario": "在回家的公交车上,你遇到这样一幕:\n\n一位老奶奶颤颤巍巍地上了车车上座位已经坐满了。她站在你旁边看起来很疲惫。这时你听到前排两个年轻人的对话\n\n年轻人A「那个老太太好像站不稳看起来挺累的。」\n\n年轻人B「现在的老年人真是...我看她包里还有菜,肯定是去菜市场买完菜回来的,这么多人都不知道叫子女开车接送。」\n\n就在这时老奶奶一个趔趄差点摔倒。她扶住了扶手但包里的东西洒了一些出来。",
"explanation": "这个场景通过公共场合的助人情境,体现个体的同理心和对他人需求的关注程度。"
},
"场景2": {
"scenario": "在班级群里,有同学发起为生病住院的同学捐款:\n\n同学A「大家好小林最近得了重病住院医药费很贵家里负担很重。我们要不要一起帮帮他」\n\n同学B「我觉得这是他家里的事我们不方便参与吧。」\n\n同学C「但是都是同学一场帮帮忙也是应该的。」",
"explanation": "通过同学互助场景,观察个体的助人意愿和同理心。"
},
"场景3": {
"scenario": "在一个网络讨论组里,有人发布了求助信息:\n\n求助者「最近心情很低落感觉生活很压抑不知道该怎么办...」\n\n评论区里已经有一些回复\n「生活本来就是这样想开点」\n「你这样子太消极了要积极面对。」\n「谁还没点烦心事啊过段时间就好了。」",
"explanation": "通过网络互助场景,观察个体的共情能力和安慰方式。"
},
"场景4": {
"scenario": "你的朋友向你倾诉工作压力:\n\n朋友「最近工作真的好累感觉快坚持不下去了...」\n\n但今天你也遇到了很多烦心事心情也不太好。",
"explanation": "通过感情关系场景,观察个体在自身状态不佳时的关怀能力。"
},
"场景5": {
"scenario": "在一次团队项目中,新来的同事小王因为经验不足,造成了一个严重的错误。在部门会议上:\n\n主管「这个错误造成了很大的损失是谁负责的这部分」\n\n小王看起来很紧张欲言又止。你知道是他造成的错误同时你也是这个项目的共同负责人。",
"explanation": "通过职场情境,观察个体在面对他人过错时的态度和处理方式。"
}
}
}

View File

@ -10,7 +10,6 @@ from src.chat.message_receive.chat_stream import get_chat_manager
from src.config.config import global_config
from src.chat.message_receive.bot import chat_bot
from src.common.logger import get_logger
from src.individuality.individuality import get_individuality, Individuality
from src.common.server import get_global_server, Server
from src.mood.mood_manager import mood_manager
from src.chat.knowledge import lpmm_start_up
@ -44,8 +43,6 @@ class MainSystem:
else:
self.hippocampus_manager = None
self.individuality: Individuality = get_individuality()
# 使用消息API替代直接的FastAPI实例
self.app: MessageServer = get_global_api()
self.server: Server = get_global_server()
@ -122,9 +119,6 @@ class MainSystem:
# 将bot.py中的chat_bot.message_process消息处理函数注册到api.py的消息处理基类中
self.app.register_message_handler(chat_bot.message_process)
# 初始化个体特征
await self.individuality.initialize()
await check_and_run_migrations()

View File

@ -637,7 +637,6 @@ class PersonInfoManager:
current_name_set = set(self.person_name_list.values())
while current_try < max_retries:
# prompt_personality =get_individuality().get_prompt(x_person=2, level=1)
bot_name = global_config.bot.nickname
qv_name_prompt = f"你是{bot_name}一个伪装成人类的AI你不能让别人发现这一点"

View File

@ -4,9 +4,10 @@ import os
import pickle
import random
import asyncio
from typing import List, Dict, Any, TYPE_CHECKING
from typing import List, Dict, Any
from src.config.config import global_config
from src.common.logger import get_logger
from src.common.data_models.database_data_model import DatabaseMessages
from src.person_info.relationship_manager import get_relationship_manager
from src.person_info.person_info import Person, get_person_id
from src.chat.message_receive.chat_stream import get_chat_manager
@ -17,8 +18,6 @@ from src.chat.utils.chat_message_builder import (
num_new_messages_since,
)
if TYPE_CHECKING:
from src.common.data_models.database_data_model import DatabaseMessages
logger = get_logger("relationship_builder")

View File

@ -21,6 +21,7 @@ from src.plugin_system.base.component_types import ActionInfo
if TYPE_CHECKING:
from src.common.data_models.info_data_model import ActionPlannerInfo
from src.common.data_models.database_data_model import DatabaseMessages
from src.common.data_models.llm_data_model import LLMGenerationDataModel
install(extra_lines=3)
@ -85,11 +86,9 @@ async def generate_reply(
enable_tool: bool = False,
enable_splitter: bool = True,
enable_chinese_typo: bool = True,
return_prompt: bool = False,
request_type: str = "generator_api",
from_plugin: bool = True,
return_expressions: bool = False,
) -> Tuple[bool, List[Tuple[str, Any]], Optional[str], Optional[List[int]]]:
) -> Tuple[bool, Optional["LLMGenerationDataModel"]]:
"""生成回复
Args:
@ -117,7 +116,7 @@ async def generate_reply(
replyer = get_replyer(chat_stream, chat_id, request_type=request_type)
if not replyer:
logger.error("[GeneratorAPI] 无法获取回复器")
return False, [], None, None
return False, None
if not extra_info and action_data:
extra_info = action_data.get("extra_info", "")
@ -126,7 +125,7 @@ async def generate_reply(
reply_reason = action_data.get("reason", "")
# 调用回复器生成回复
success, llm_response_dict, prompt, selected_expressions = await replyer.generate_reply_with_context(
success, llm_response = await replyer.generate_reply_with_context(
extra_info=extra_info,
available_actions=available_actions,
chosen_actions=chosen_actions,
@ -138,43 +137,27 @@ async def generate_reply(
)
if not success:
logger.warning("[GeneratorAPI] 回复生成失败")
return False, [], None, None
assert llm_response_dict is not None, "llm_response_dict不应为None" # 虽然说不会出现llm_response为空的情况
if content := llm_response_dict.get("content", ""):
return False, None
if content := llm_response.content:
reply_set = process_human_text(content, enable_splitter, enable_chinese_typo)
else:
reply_set = []
llm_response.reply_set = reply_set
logger.debug(f"[GeneratorAPI] 回复生成成功,生成了 {len(reply_set)} 个回复项")
# if return_prompt:
# if return_expressions:
# return success, reply_set, prompt, selected_expressions
# else:
# return success, reply_set, prompt, None
# else:
# if return_expressions:
# return success, reply_set, (None, selected_expressions)
# else:
# return success, reply_set, None
return (
success,
reply_set,
prompt if return_prompt else None,
selected_expressions if return_expressions else None,
)
return success, llm_response
except ValueError as ve:
raise ve
except UserWarning as uw:
logger.warning(f"[GeneratorAPI] 中断了生成: {uw}")
return False, [], None, None
return False, None
except Exception as e:
logger.error(f"[GeneratorAPI] 生成回复时出错: {e}")
logger.error(traceback.format_exc())
return False, [], None, None
return False, None
async def rewrite_reply(
chat_stream: Optional[ChatStream] = None,
@ -185,9 +168,8 @@ async def rewrite_reply(
raw_reply: str = "",
reason: str = "",
reply_to: str = "",
return_prompt: bool = False,
request_type: str = "generator_api",
) -> Tuple[bool, List[Tuple[str, Any]], Optional[str]]:
) -> Tuple[bool, Optional["LLMGenerationDataModel"]]:
"""重写回复
Args:
@ -210,7 +192,7 @@ async def rewrite_reply(
replyer = get_replyer(chat_stream, chat_id, request_type=request_type)
if not replyer:
logger.error("[GeneratorAPI] 无法获取回复器")
return False, [], None
return False, None
logger.info("[GeneratorAPI] 开始重写回复")
@ -221,29 +203,28 @@ async def rewrite_reply(
reply_to = reply_to or reply_data.get("reply_to", "")
# 调用回复器重写回复
success, content, prompt = await replyer.rewrite_reply_with_context(
success, llm_response = await replyer.rewrite_reply_with_context(
raw_reply=raw_reply,
reason=reason,
reply_to=reply_to,
return_prompt=return_prompt,
)
reply_set = []
if content:
if success and llm_response and (content := llm_response.content):
reply_set = process_human_text(content, enable_splitter, enable_chinese_typo)
llm_response.reply_set = reply_set
if success:
logger.info(f"[GeneratorAPI] 重写回复成功,生成了 {len(reply_set)} 个回复项")
else:
logger.warning("[GeneratorAPI] 重写回复失败")
return success, reply_set, prompt if return_prompt else None
return success, llm_response
except ValueError as ve:
raise ve
except Exception as e:
logger.error(f"[GeneratorAPI] 重写回复时出错: {e}")
return False, [], None
return False, None
def process_human_text(content: str, enable_splitter: bool, enable_chinese_typo: bool) -> List[Tuple[str, Any]]:

View File

@ -38,9 +38,10 @@ class BaseEventHandler(ABC):
self.subcribe(event_name)
@abstractmethod
async def execute(self, message: MaiMessages) -> Tuple[bool, bool, Optional[str]]:
async def execute(self, message: MaiMessages | None) -> Tuple[bool, bool, Optional[str]]:
"""执行事件处理的抽象方法,子类必须实现
Args:
message (MaiMessages | None): 事件消息对象当你注册的事件为ON_START和ON_STOP时message为None
Returns:
Tuple[bool, bool, Optional[str]]: (是否执行成功, 是否需要继续处理, 可选的返回消息)
"""

View File

@ -116,8 +116,8 @@ class ActionInfo(ComponentInfo):
action_require: List[str] = field(default_factory=list) # 动作需求说明
associated_types: List[str] = field(default_factory=list) # 关联的消息类型
# 激活类型相关
focus_activation_type: ActionActivationType = ActionActivationType.ALWAYS
normal_activation_type: ActionActivationType = ActionActivationType.ALWAYS
focus_activation_type: ActionActivationType = ActionActivationType.ALWAYS #已弃用
normal_activation_type: ActionActivationType = ActionActivationType.ALWAYS #已弃用
activation_type: ActionActivationType = ActionActivationType.ALWAYS
random_activation_probability: float = 0.0
llm_judge_prompt: str = ""

View File

@ -1,6 +1,6 @@
import asyncio
import contextlib
from typing import List, Dict, Optional, Type, Tuple, Any, Coroutine
from typing import List, Dict, Optional, Type, Tuple, Any, TYPE_CHECKING
from src.chat.message_receive.message import MessageRecv
from src.chat.message_receive.chat_stream import get_chat_manager
@ -9,6 +9,9 @@ from src.plugin_system.base.component_types import EventType, EventHandlerInfo,
from src.plugin_system.base.base_events_handler import BaseEventHandler
from .global_announcement_manager import global_announcement_manager
if TYPE_CHECKING:
from src.common.data_models.llm_data_model import LLMGenerationDataModel
logger = get_logger("events_manager")
@ -47,7 +50,7 @@ class EventsManager:
event_type: EventType,
message: Optional[MessageRecv] = None,
llm_prompt: Optional[str] = None,
llm_response: Optional[Dict[str, Any]] = None,
llm_response: Optional["LLMGenerationDataModel"] = None,
stream_id: Optional[str] = None,
action_usage: Optional[List[str]] = None,
) -> Optional[MaiMessages]:
@ -62,22 +65,12 @@ class EventsManager:
else:
return self._transform_event_without_message(stream_id, llm_prompt, llm_response, action_usage)
return None # ON_START, ON_STOP事件没有消息体
def _execute_handler(
self,
handler: BaseEventHandler,
message: Optional[MaiMessages]
) -> Coroutine[Any, Any, tuple[bool, bool, Any]]:
"""封装了调用 handler.execute 的逻辑。"""
return handler.execute(message) if message else handler.execute()
return None # ON_START, ON_STOP事件没有消息体
def _dispatch_handler_task(self, handler: BaseEventHandler, message: Optional[MaiMessages]):
"""分发一个非阻塞(异步)的事件处理任务。"""
try:
# 无论是否有 message都统一调用
coro = self._execute_handler(handler, message)
task = asyncio.create_task(coro)
task = asyncio.create_task(handler.execute(message))
task_name = f"{handler.plugin_name}-{handler.handler_name}"
task.set_name(task_name)
@ -87,15 +80,10 @@ class EventsManager:
except Exception as e:
logger.error(f"创建事件处理器任务 {handler.handler_name} 时发生异常: {e}", exc_info=True)
async def _dispatch_intercepting_handler(
self,
handler: BaseEventHandler,
message: Optional[MaiMessages]
) -> bool:
async def _dispatch_intercepting_handler(self, handler: BaseEventHandler, message: Optional[MaiMessages]) -> bool:
"""分发并等待一个阻塞(同步)的事件处理器,返回是否应继续处理。"""
try:
# 统一调用
success, continue_processing, result = await self._execute_handler(handler, message)
success, continue_processing, result = await handler.execute(message)
if not success:
logger.error(f"EventHandler {handler.handler_name} 执行失败: {result}")
@ -105,14 +93,14 @@ class EventsManager:
return continue_processing
except Exception as e:
logger.error(f"EventHandler {handler.handler_name} 发生异常: {e}", exc_info=True)
return True # 发生异常时默认不中断其他处理
return True # 发生异常时默认不中断其他处理
async def handle_mai_events(
self,
event_type: EventType,
message: Optional[MessageRecv] = None,
llm_prompt: Optional[str] = None,
llm_response: Optional[Dict[str, Any]] = None,
llm_response: Optional["LLMGenerationDataModel"] = None,
stream_id: Optional[str] = None,
action_usage: Optional[List[str]] = None,
) -> bool:
@ -137,7 +125,11 @@ class EventsManager:
for handler in handlers:
# 3. 前置检查和配置加载
if current_stream_id and handler.handler_name in global_announcement_manager.get_disabled_chat_event_handlers(current_stream_id):
if (
current_stream_id
and handler.handler_name
in global_announcement_manager.get_disabled_chat_event_handlers(current_stream_id)
):
continue
# 统一加载插件配置
@ -181,16 +173,16 @@ class EventsManager:
return False
def _transform_event_message(
self, message: MessageRecv, llm_prompt: Optional[str] = None, llm_response: Optional[Dict[str, Any]] = None
self, message: MessageRecv, llm_prompt: Optional[str] = None, llm_response: Optional["LLMGenerationDataModel"] = None
) -> MaiMessages:
"""转换事件消息格式"""
# 直接赋值部分内容
transformed_message = MaiMessages(
llm_prompt=llm_prompt,
llm_response_content=llm_response.get("content") if llm_response else None,
llm_response_reasoning=llm_response.get("reasoning") if llm_response else None,
llm_response_model=llm_response.get("model") if llm_response else None,
llm_response_tool_call=llm_response.get("tool_calls") if llm_response else None,
llm_response_content=llm_response.content if llm_response else None,
llm_response_reasoning=llm_response.reasoning if llm_response else None,
llm_response_model=llm_response.model if llm_response else None,
llm_response_tool_call=llm_response.tool_calls if llm_response else None,
raw_message=message.raw_message,
additional_data=message.message_info.additional_config or {},
)
@ -234,7 +226,7 @@ class EventsManager:
return transformed_message
def _build_message_from_stream(
self, stream_id: str, llm_prompt: Optional[str] = None, llm_response: Optional[Dict[str, Any]] = None
self, stream_id: str, llm_prompt: Optional[str] = None, llm_response: Optional["LLMGenerationDataModel"] = None
) -> MaiMessages:
"""从流ID构建消息"""
chat_stream = get_chat_manager().get_stream(stream_id)
@ -246,7 +238,7 @@ class EventsManager:
self,
stream_id: str,
llm_prompt: Optional[str] = None,
llm_response: Optional[Dict[str, Any]] = None,
llm_response: Optional["LLMGenerationDataModel"] = None,
action_usage: Optional[List[str]] = None,
) -> MaiMessages:
"""没有message对象时进行转换"""
@ -255,10 +247,10 @@ class EventsManager:
return MaiMessages(
stream_id=stream_id,
llm_prompt=llm_prompt,
llm_response_content=(llm_response.get("content") if llm_response else None),
llm_response_reasoning=(llm_response.get("reasoning") if llm_response else None),
llm_response_model=llm_response.get("model") if llm_response else None,
llm_response_tool_call=(llm_response.get("tool_calls") if llm_response else None),
llm_response_content=(llm_response.content if llm_response else None),
llm_response_reasoning=(llm_response.reasoning if llm_response else None),
llm_response_model=(llm_response.model if llm_response else None),
llm_response_tool_call=(llm_response.tool_calls if llm_response else None),
is_group_message=(not (not chat_stream.group_info)),
is_private_message=(not chat_stream.group_info),
action_usage=action_usage,

View File

@ -1,5 +1,5 @@
[inner]
version = "6.5.0"
version = "6.7.1"
#----以下是给开发人员阅读的,如果你只是部署了麦麦,不需要阅读----
#如果你想要修改配置文件请递增version的值
@ -22,6 +22,7 @@ alias_names = ["麦叠", "牢麦"] # 麦麦的别名
personality_core = "是一个女孩子"
# 人格的细节,描述人格的一些侧面
personality_side = "有时候说话不过脑子,喜欢开玩笑, 有时候会表现得无语,有时候会喜欢说一些奇怪的话"
#アイデンティティがない 生まれないらららら
# 可以描述外貌,性别,身高,职业,属性等等描述
identity = "年龄为19岁,是女孩子,身高为160cm,有黑色的短发"
@ -29,10 +30,11 @@ identity = "年龄为19岁,是女孩子,身高为160cm,有黑色的短发"
# 描述麦麦说话的表达风格,表达习惯,如要修改,可以酌情新增内容
reply_style = "回复可以简短一些。可以参考贴吧,知乎和微博的回复风格,回复不要浮夸,不要用夸张修辞,平淡一些。不要浮夸,不要夸张修辞。"
# 描述麦麦的行为风格,会影响麦麦什么时候回复,什么时候使用动作,麦麦考虑的可就多了
plan_style = "当你刚刚发送了消息没有人回复时不要选择action如果有别的动作非回复满足条件可以选择当你一次发送了太多消息为了避免打扰聊天节奏不要选择动作"
compress_personality = false # 是否压缩人格压缩后会精简人格信息节省token消耗并提高回复性能但是会丢失一些信息如果人设不长可以关闭
compress_identity = true # 是否压缩身份压缩后会精简身份信息节省token消耗并提高回复性能但是会丢失一些信息如果不长可以关闭
# 麦麦的兴趣,会影响麦麦对什么话题进行回复
interest = "对技术相关话题,游戏和动漫相关话题感兴趣,也对日常话题感兴趣,不喜欢太过沉重严肃的话题"
[expression]
# 表达学习配置
@ -63,6 +65,10 @@ focus_value = 0.5
max_context_size = 20 # 上下文长度
interest_rate_mode = "fast" #激活值计算模式可选fast或者accurate
planner_size = 2 # 副规划器大小越小麦麦的动作执行能力越精细但是消耗更多token调大可以缓解429类错误
mentioned_bot_inevitable_reply = true # 提及 bot 大概率回复
at_bot_inevitable_reply = true # @bot 或 提及bot 大概率回复

View File

@ -1,5 +1,5 @@
[inner]
version = "1.3.1"
version = "1.4.1"
# 配置文件版本号迭代规则同bot_config.toml
@ -40,14 +40,14 @@ price_out = 8.0 # 输出价格用于API调用统计
#force_stream_mode = true # 强制流式输出模式若模型不支持非流式输出请取消该注释启用强制流式输出若无该字段默认值为false
[[models]]
model_identifier = "Pro/deepseek-ai/DeepSeek-V3"
model_identifier = "deepseek-ai/DeepSeek-V3"
name = "siliconflow-deepseek-v3"
api_provider = "SiliconFlow"
price_in = 2.0
price_out = 8.0
[[models]]
model_identifier = "Pro/deepseek-ai/DeepSeek-R1-Distill-Qwen-32B"
model_identifier = "deepseek-ai/DeepSeek-R1-Distill-Qwen-32B"
name = "deepseek-r1-distill-qwen-32b"
api_provider = "SiliconFlow"
price_in = 4.0
@ -117,11 +117,16 @@ model_list = ["siliconflow-deepseek-v3"]
temperature = 0.2 # 模型温度新V3建议0.1-0.3
max_tokens = 800
[model_task_config.planner] #决策:负责决定麦麦该什么的模型
[model_task_config.planner] #决策:负责决定麦麦该什么时候回复的模型
model_list = ["siliconflow-deepseek-v3"]
temperature = 0.3
max_tokens = 800
[model_task_config.planner_small] #副决策:负责决定麦麦该做什么的模型
model_list = ["qwen3-14b"]
temperature = 0.3
max_tokens = 800
[model_task_config.emotion] #负责麦麦的情绪变化
model_list = ["siliconflow-deepseek-v3"]
temperature = 0.3