mirror of https://github.com/Mai-with-u/MaiBot.git
Merge branch 'MaiM-with-u:dev' into dev
commit
c898a957b5
|
|
@ -3,12 +3,13 @@
|
|||
|
||||
## [0.10.2] - 2025-8-31
|
||||
### 🌟 主要功能更改
|
||||
- 精简了人格相关配置,提供更清晰,有效的自定义
|
||||
- 大幅优化了聊天逻辑,更易配置,动态控制
|
||||
- 记忆系统重新启用,更好更优秀
|
||||
- 更好的event系统
|
||||
- 现在支持提及100%回复
|
||||
|
||||
### 细节功能更改
|
||||
- 更好的event系统
|
||||
- 记忆系统优化
|
||||
- 为空回复添加重试机制
|
||||
- 修复tts插件可能的复读问题
|
||||
|
||||
|
|
|
|||
|
|
@ -272,6 +272,9 @@ class FrequencyControl:
|
|||
# 限制调整范围
|
||||
target_talk_adjust = max(self.min_adjust, min(self.max_adjust, target_talk_adjust))
|
||||
|
||||
# 记录调整前的值
|
||||
old_adjust = self.talk_frequency_adjust
|
||||
|
||||
# 平滑调整
|
||||
self.talk_frequency_adjust = (
|
||||
self.talk_frequency_adjust * (1 - self.smoothing_factor) +
|
||||
|
|
@ -288,12 +291,21 @@ class FrequencyControl:
|
|||
adjust_direction = "不调整(该时段无活跃度)"
|
||||
else:
|
||||
adjust_direction = "保持"
|
||||
|
||||
# 计算实际变化方向
|
||||
actual_change = ""
|
||||
if self.talk_frequency_adjust > old_adjust:
|
||||
actual_change = f"(实际提高: {old_adjust:.2f}→{self.talk_frequency_adjust:.2f})"
|
||||
elif self.talk_frequency_adjust < old_adjust:
|
||||
actual_change = f"(实际降低: {old_adjust:.2f}→{self.talk_frequency_adjust:.2f})"
|
||||
else:
|
||||
actual_change = f"(无变化: {self.talk_frequency_adjust:.2f})"
|
||||
|
||||
logger.info(
|
||||
f"{self.log_prefix} 发言频率调整: "
|
||||
f"当前: {message_count}消息/{user_count}用户, 人均: {message_count/user_count if user_count > 0 else 0:.2f}消息/用户, "
|
||||
f"基准: {current_hour_10min_messages:.2f}消息/{current_hour_10min_users:.2f}用户,人均:{current_hour_10min_messages/current_hour_10min_users if current_hour_10min_users > 0 else 0:.2f}消息/用户, "
|
||||
f"调整: {adjust_direction} → {target_talk_adjust:.2f} → {self.talk_frequency_adjust:.2f}"
|
||||
f"当前: {message_count}消息/{user_count}用户, 人均: {message_count/user_count if user_count > 0 else 0:.2f}消息|"
|
||||
f"基准: {current_hour_10min_messages:.2f}消息/{current_hour_10min_users:.2f}用户,人均:{current_hour_10min_messages/current_hour_10min_users if current_hour_10min_users > 0 else 0:.2f}消息|"
|
||||
f"目标调整: {adjust_direction}到{target_talk_adjust:.2f}, 实际结果: {self.talk_frequency_adjust:.2f} {actual_change}"
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
|
|
@ -368,6 +380,9 @@ class FrequencyControl:
|
|||
# 限制调整范围
|
||||
target_focus_adjust = max(self.min_adjust, min(self.max_adjust, target_focus_adjust))
|
||||
|
||||
# 记录调整前的值
|
||||
old_focus_adjust = self.focus_value_adjust
|
||||
|
||||
# 平滑调整
|
||||
self.focus_value_adjust = (
|
||||
self.focus_value_adjust * (1 - self.smoothing_factor) +
|
||||
|
|
@ -388,13 +403,22 @@ class FrequencyControl:
|
|||
adjust_direction = "不调整(该时段无活跃度)"
|
||||
else:
|
||||
adjust_direction = "保持"
|
||||
|
||||
# 计算实际变化方向
|
||||
actual_change = ""
|
||||
if self.focus_value_adjust > old_focus_adjust:
|
||||
actual_change = f"(实际提高: {old_focus_adjust:.2f}→{self.focus_value_adjust:.2f})"
|
||||
elif self.focus_value_adjust < old_focus_adjust:
|
||||
actual_change = f"(实际降低: {old_focus_adjust:.2f}→{self.focus_value_adjust:.2f})"
|
||||
else:
|
||||
actual_change = f"(无变化: {self.focus_value_adjust:.2f})"
|
||||
|
||||
logger.info(
|
||||
f"{self.log_prefix} 专注度调整(10分钟): "
|
||||
f"当前: {message_count}消息/{user_count}用户,人均:{message_count/user_count if user_count > 0 else 0:.2f}消息/用户, "
|
||||
f"基准: {current_hour_10min_messages:.2f}消息/{current_hour_10min_users:.2f}用户,人均:{current_hour_10min_messages/current_hour_10min_users if current_hour_10min_users > 0 else 0:.2f}消息/用户, "
|
||||
f"当前: {message_count}消息/{user_count}用户,人均:{message_count/user_count if user_count > 0 else 0:.2f}消息|"
|
||||
f"基准: {current_hour_10min_messages:.2f}消息/{current_hour_10min_users:.2f}用户,人均:{current_hour_10min_messages/current_hour_10min_users if current_hour_10min_users > 0 else 0:.2f}消息|"
|
||||
f"比率: 用户{user_count/current_hour_10min_users if current_hour_10min_users > 0 else 0:.2f}x, 消息{message_count/current_hour_10min_messages if current_hour_10min_messages > 0 else 0:.2f}x, "
|
||||
f"调整: {adjust_direction} → {target_focus_adjust:.2f} → {self.focus_value_adjust:.2f}"
|
||||
f"目标调整: {adjust_direction}到{target_focus_adjust:.2f}, 实际结果: {self.focus_value_adjust:.2f} {actual_change}"
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
|
|
|
|||
|
|
@ -40,40 +40,39 @@ def init_prompt():
|
|||
"""
|
||||
{time_block}
|
||||
{name_block}
|
||||
你现在需要根据聊天内容,选择的合适的action来参与聊天。
|
||||
请你根据以下行事风格来决定action:
|
||||
{plan_style}
|
||||
|
||||
{chat_context_description},以下是具体的聊天内容
|
||||
**聊天内容**
|
||||
{chat_content_block}
|
||||
|
||||
{moderation_prompt}
|
||||
|
||||
现在请你根据聊天内容和用户的最新消息选择合适的action和触发action的消息:
|
||||
**动作记录**
|
||||
{actions_before_now_block}
|
||||
|
||||
动作:no_action
|
||||
动作描述:不进行动作,等待合适的时机
|
||||
- 当你刚刚发送了消息,没有人回复时,选择no_action
|
||||
- 当你一次发送了太多消息,为了避免过于烦人,可以不回复
|
||||
**回复标准**
|
||||
请你根据聊天内容和用户的最新消息选择合适回复或者沉默:
|
||||
1.你可以选择呼叫了你的名字,但是你没有做出回应的消息进行回复
|
||||
2.你可以自然的顺着正在进行的聊天内容进行回复或自然的提出一个问题
|
||||
3.你的兴趣是:{interest}
|
||||
4.如果你刚刚进行了回复,不要对同一个话题重复回应
|
||||
5.请控制你的发言频率,不要太过频繁的发言,当你刚刚发送了消息,没有人回复时,选择no_action
|
||||
6.如果有人对你感到厌烦,请减少回复
|
||||
7.如果有人对你进行攻击,或者情绪激动,请你以合适的方法应对
|
||||
8.最好不要选择图片和表情包作为回复对象
|
||||
{moderation_prompt}
|
||||
|
||||
**动作**
|
||||
保持沉默:no_action
|
||||
{{
|
||||
"action": "no_action",
|
||||
"reason":"不动作的原因"
|
||||
"reason":"不回复的原因"
|
||||
}}
|
||||
|
||||
动作:reply
|
||||
动作描述:参与聊天回复,发送文本进行表达
|
||||
- 你想要闲聊或者随便附和
|
||||
- 有人提到了你,但是你还没有回应
|
||||
- 如果你刚刚进行了回复,不要对同一个话题重复回应
|
||||
进行回复:reply
|
||||
{{
|
||||
"action": "reply",
|
||||
"target_message_id":"想要回复的消息id",
|
||||
"reason":"回复的原因"
|
||||
}}
|
||||
|
||||
你必须从上面列出的可用action中选择一个,并说明触发action的消息id(不是消息原文)和选择该action的原因。消息id格式:m+数字
|
||||
|
||||
请根据动作示例,以严格的 JSON 格式输出,且仅包含 JSON 内容:
|
||||
""",
|
||||
"planner_prompt",
|
||||
|
|
@ -84,26 +83,29 @@ def init_prompt():
|
|||
{time_block}
|
||||
{name_block}
|
||||
|
||||
{chat_context_description},以下是具体的聊天内容
|
||||
{chat_context_description}
|
||||
**聊天内容**
|
||||
{chat_content_block}
|
||||
|
||||
{moderation_prompt}
|
||||
|
||||
现在,最新的聊天消息引起了你的兴趣,你想要对其中的消息进行回复,回复标准如下:
|
||||
- 你想要闲聊或者随便附和
|
||||
- 有人提到了你,但是你还没有回应
|
||||
- 如果你刚刚进行了回复,不要对同一个话题重复回应
|
||||
|
||||
你之前的动作记录:
|
||||
**动作记录**
|
||||
{actions_before_now_block}
|
||||
|
||||
**回复标准**
|
||||
请你选择合适的消息进行回复:
|
||||
1.你可以选择呼叫了你的名字,但是你没有做出回应的消息进行回复
|
||||
2.你可以自然的顺着正在进行的聊天内容进行回复,或者自然的提出一个问题
|
||||
3.你的兴趣是{interest}
|
||||
4.如果有人对你感到厌烦,请你不要太积极的提问或是表达,可以进行顺从
|
||||
5.如果有人对你进行攻击,或者情绪激动,请你以合适的方法应对
|
||||
6.最好不要选择图片和表情包作为回复对象
|
||||
7.{moderation_prompt}
|
||||
|
||||
请你从新消息中选出一条需要回复的消息并输出其id,输出格式如下:
|
||||
{{
|
||||
"action": "reply",
|
||||
"target_message_id":"想要回复的消息id,消息id格式:m+数字",
|
||||
"reason":"回复的原因"
|
||||
}}
|
||||
|
||||
请根据示例,以严格的 JSON 格式输出,且仅包含 JSON 内容:
|
||||
""",
|
||||
"planner_reply_prompt",
|
||||
|
|
@ -127,12 +129,18 @@ def init_prompt():
|
|||
"""
|
||||
{name_block}
|
||||
|
||||
{chat_context_description},{time_block},现在请你根据以下聊天内容,选择一个或多个action来参与聊天。如果没有合适的action,请选择no_action。,
|
||||
{chat_context_description},{time_block},现在请你根据以下聊天内容,选择一个或多个合适的action。如果没有合适的action,请选择no_action。,
|
||||
{chat_content_block}
|
||||
|
||||
{moderation_prompt}
|
||||
现在请你根据聊天内容和用户的最新消息选择合适的action和触发action的消息:
|
||||
**要求**
|
||||
1.action必须符合使用条件,如果符合条件,就选择
|
||||
2.如果聊天内容不适合使用action,即使符合条件,也不要使用
|
||||
3.{moderation_prompt}
|
||||
4.请注意如果相同的内容已经被执行,请不要重复执行
|
||||
这是你最近执行过的动作:
|
||||
{actions_before_now_block}
|
||||
|
||||
**可用的action**
|
||||
|
||||
no_action:不选择任何动作
|
||||
{{
|
||||
|
|
@ -142,9 +150,6 @@ no_action:不选择任何动作
|
|||
|
||||
{action_options_text}
|
||||
|
||||
这是你最近执行过的动作,请注意如果相同的内容已经被执行,请不要重复执行:
|
||||
{actions_before_now_block}
|
||||
|
||||
请选择,并说明触发action的消息id和选择该action的原因。消息id格式:m+数字
|
||||
请根据动作示例,以严格的 JSON 格式输出,且仅包含 JSON 内容:
|
||||
""",
|
||||
|
|
@ -593,6 +598,7 @@ class ActionPlanner:
|
|||
chat_content_block=chat_content_block,
|
||||
# actions_before_now_block=actions_before_now_block,
|
||||
message_id_list=message_id_list,
|
||||
interest=global_config.personality.interest,
|
||||
)
|
||||
|
||||
# --- 调用 LLM (普通文本生成) ---
|
||||
|
|
@ -753,6 +759,7 @@ class ActionPlanner:
|
|||
mode: ChatMode = ChatMode.FOCUS,
|
||||
# actions_before_now_block :str = "",
|
||||
chat_content_block: str = "",
|
||||
interest: str = "",
|
||||
) -> tuple[str, List[Tuple[str, "DatabaseMessages"]]]: # sourcery skip: use-join
|
||||
"""构建 Planner LLM 的提示词 (获取模板并填充数据)"""
|
||||
try:
|
||||
|
|
@ -830,7 +837,7 @@ class ActionPlanner:
|
|||
# action_options_text=action_options_block,
|
||||
moderation_prompt=moderation_prompt_block,
|
||||
name_block=name_block,
|
||||
plan_style=global_config.personality.plan_style,
|
||||
interest=interest,
|
||||
)
|
||||
else:
|
||||
planner_prompt_template = await global_prompt_manager.get_prompt_async("planner_reply_prompt")
|
||||
|
|
@ -841,6 +848,7 @@ class ActionPlanner:
|
|||
moderation_prompt=moderation_prompt_block,
|
||||
name_block=name_block,
|
||||
actions_before_now_block=actions_before_now_block,
|
||||
interest=interest,
|
||||
)
|
||||
return prompt, message_id_list
|
||||
except Exception as e:
|
||||
|
|
|
|||
|
|
@ -51,11 +51,14 @@ def init_prompt():
|
|||
{chat_info}
|
||||
{identity}
|
||||
|
||||
你正在{chat_target_2},{reply_target_block}
|
||||
对这句话,你想表达,原句:{raw_reply},原因是:{reason}。你现在要思考怎么组织回复
|
||||
你现在的心情是:{mood_state}
|
||||
你正在{chat_target_2},{reply_target_block}
|
||||
你想要对上述的发言进行回复,回复的具体内容(原句)是:{raw_reply}
|
||||
原因是:{reason}
|
||||
现在请你将这条具体内容改写成一条适合在群聊中发送的回复消息。
|
||||
你需要使用合适的语法和句法,参考聊天内容,组织一条日常且口语化的回复。请你修改你想表达的原句,符合你的表达风格和语言习惯
|
||||
{reply_style},你可以完全重组回复,保留最基本的表达含义就好,但重组后保持语意通顺。
|
||||
{reply_style}
|
||||
你可以完全重组回复,保留最基本的表达含义就好,但重组后保持语意通顺。
|
||||
{keywords_reaction_prompt}
|
||||
{moderation_prompt}
|
||||
不要输出多余内容(包括前后缀,冒号和引号,括号,表情包,emoji,at或 @等 ),只输出一条回复就好。
|
||||
|
|
@ -66,44 +69,39 @@ def init_prompt():
|
|||
|
||||
# s4u 风格的 prompt 模板
|
||||
Prompt(
|
||||
"""
|
||||
{expression_habits_block}{tool_info_block}
|
||||
{knowledge_prompt}{memory_block}{relation_info_block}
|
||||
{extra_info_block}
|
||||
{identity}
|
||||
{action_descriptions}
|
||||
{time_block}
|
||||
你现在的主要任务是和 {sender_name} 聊天。同时,也有其他用户会参与聊天,你可以参考他们的回复内容,但是你现在想回复{sender_name}的发言。
|
||||
"""{identity}
|
||||
你正在群聊中聊天,你想要回复 {sender_name} 的发言。同时,也有其他用户会参与聊天,你可以参考他们的回复内容,但是你现在想回复{sender_name}的发言。
|
||||
|
||||
{time_block}
|
||||
{background_dialogue_prompt}
|
||||
{core_dialogue_prompt}
|
||||
|
||||
{expression_habits_block}{tool_info_block}
|
||||
{knowledge_prompt}{memory_block}{relation_info_block}
|
||||
{extra_info_block}
|
||||
|
||||
{reply_target_block}
|
||||
|
||||
|
||||
你现在的心情是:{mood_state}
|
||||
你的心情:{mood_state}
|
||||
{reply_style}
|
||||
注意不要复读你说过的话
|
||||
{keywords_reaction_prompt}
|
||||
请注意不要输出多余内容(包括前后缀,冒号和引号,at或 @等 )。只输出回复内容。
|
||||
{moderation_prompt}
|
||||
不要输出多余内容(包括前后缀,冒号和引号,括号(),表情包,emoji,at或 @等 )。只输出一条回复就好
|
||||
现在,你说:
|
||||
""",
|
||||
现在,你说:""",
|
||||
"replyer_prompt",
|
||||
)
|
||||
|
||||
Prompt(
|
||||
"""
|
||||
{expression_habits_block}{tool_info_block}
|
||||
{knowledge_prompt}{memory_block}{relation_info_block}
|
||||
{extra_info_block}
|
||||
{identity}
|
||||
{action_descriptions}
|
||||
"""{identity}
|
||||
{time_block}
|
||||
你现在正在一个QQ群里聊天,以下是正在进行的聊天内容:
|
||||
{background_dialogue_prompt}
|
||||
|
||||
{expression_habits_block}{tool_info_block}
|
||||
{knowledge_prompt}{memory_block}{relation_info_block}
|
||||
{extra_info_block}
|
||||
|
||||
你现在想补充说明你刚刚自己的发言内容:{target},原因是{reason}
|
||||
请你根据聊天内容,组织一条新回复。注意,{target} 是刚刚你自己的发言,你要在这基础上进一步发言,请按照你自己的角度来继续进行回复。
|
||||
注意保持上下文的连贯性。
|
||||
|
|
@ -638,9 +636,12 @@ class DefaultReplyer:
|
|||
"""构建动作提示"""
|
||||
|
||||
action_descriptions = ""
|
||||
skip_names = ["emoji","build_memory","build_relation","reply"]
|
||||
if available_actions:
|
||||
action_descriptions = "除了进行回复之外,你可以做以下这些动作,不过这些动作由另一个模型决定,:\n"
|
||||
for action_name, action_info in available_actions.items():
|
||||
if action_name in skip_names:
|
||||
continue
|
||||
action_description = action_info.description
|
||||
action_descriptions += f"- {action_name}: {action_description}\n"
|
||||
action_descriptions += "\n"
|
||||
|
|
@ -649,7 +650,7 @@ class DefaultReplyer:
|
|||
if chosen_actions_info:
|
||||
for action_plan_info in chosen_actions_info:
|
||||
action_name = action_plan_info.action_type
|
||||
if action_name == "reply":
|
||||
if action_name in skip_names:
|
||||
continue
|
||||
action_description: str = "无描述"
|
||||
reasoning: str = "无原因"
|
||||
|
|
@ -673,18 +674,18 @@ class DefaultReplyer:
|
|||
bot_nickname = ""
|
||||
|
||||
prompt_personality = (
|
||||
f"{global_config.personality.personality_core};{global_config.personality.personality_side}"
|
||||
f"{global_config.personality.personality};"
|
||||
)
|
||||
return f"你的名字是{bot_name}{bot_nickname},你{prompt_personality}"
|
||||
|
||||
async def build_prompt_reply_context(
|
||||
self,
|
||||
reply_message: DatabaseMessages,
|
||||
extra_info: str = "",
|
||||
reply_reason: str = "",
|
||||
available_actions: Optional[Dict[str, ActionInfo]] = None,
|
||||
chosen_actions: Optional[List[ActionPlannerInfo]] = None,
|
||||
enable_tool: bool = True,
|
||||
reply_message: Optional[DatabaseMessages] = None,
|
||||
) -> Tuple[str, List[int]]:
|
||||
"""
|
||||
构建回复器上下文
|
||||
|
|
@ -725,6 +726,7 @@ class DefaultReplyer:
|
|||
mood_prompt = chat_mood.mood_state
|
||||
|
||||
target = replace_user_references(target, chat_stream.platform, replace_bot_name=True)
|
||||
target = re.sub(r"\\[picid:[^\\]]+\\]", "[图片]", target)
|
||||
|
||||
message_list_before_now_long = get_raw_msg_before_timestamp_with_chat(
|
||||
chat_id=chat_id,
|
||||
|
|
@ -780,14 +782,14 @@ class DefaultReplyer:
|
|||
for name, result, duration in task_results:
|
||||
results_dict[name] = result
|
||||
chinese_name = task_name_mapping.get(name, name)
|
||||
if duration < 0.01:
|
||||
if duration < 0.1:
|
||||
almost_zero_str += f"{chinese_name},"
|
||||
continue
|
||||
|
||||
timing_logs.append(f"{chinese_name}: {duration:.1f}s")
|
||||
if duration > 8:
|
||||
logger.warning(f"回复生成前信息获取耗时过长: {chinese_name} 耗时: {duration:.1f}s,请使用更快的模型")
|
||||
logger.info(f"回复准备: {'; '.join(timing_logs)}; {almost_zero_str} <0.01s")
|
||||
logger.info(f"回复准备: {'; '.join(timing_logs)}; {almost_zero_str} <0.1s")
|
||||
|
||||
expression_habits_block, selected_expressions = results_dict["expression_habits"]
|
||||
expression_habits_block: str
|
||||
|
|
@ -809,6 +811,11 @@ class DefaultReplyer:
|
|||
|
||||
moderation_prompt_block = "请不要输出违法违规内容,不要输出色情,暴力,政治相关内容,如有敏感内容,请规避。"
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
if sender:
|
||||
if is_group_chat:
|
||||
reply_target_block = (
|
||||
|
|
@ -879,6 +886,8 @@ class DefaultReplyer:
|
|||
is_group_chat = bool(chat_stream.group_info)
|
||||
|
||||
sender, target = self._parse_reply_target(reply_to)
|
||||
target = replace_user_references(target, chat_stream.platform, replace_bot_name=True)
|
||||
target = re.sub(r"\\[picid:[^\\]]+\\]", "[图片]", target)
|
||||
|
||||
# 添加情绪状态获取
|
||||
if global_config.mood.enable_mood:
|
||||
|
|
@ -1008,6 +1017,8 @@ class DefaultReplyer:
|
|||
# 直接使用已初始化的模型实例
|
||||
logger.info(f"使用模型集生成回复: {', '.join(map(str, self.express_model.model_for_task.model_list))}")
|
||||
|
||||
logger.info(f"\n{prompt}\n")
|
||||
|
||||
if global_config.debug.show_prompt:
|
||||
logger.info(f"\n{prompt}\n")
|
||||
else:
|
||||
|
|
|
|||
|
|
@ -728,7 +728,7 @@ class StatisticOutputTask(AsyncTask):
|
|||
f"<td>{stat_data[STD_TIME_COST_BY_MODEL][model_name]:.1f} 秒</td>"
|
||||
f"</tr>"
|
||||
for model_name, count in sorted(stat_data[REQ_CNT_BY_MODEL].items())
|
||||
]
|
||||
] if stat_data[REQ_CNT_BY_MODEL] else ["<tr><td colspan='8' style='text-align: center; color: #999;'>暂无数据</td></tr>"]
|
||||
)
|
||||
# 按请求类型分类统计
|
||||
type_rows = "\n".join(
|
||||
|
|
@ -744,7 +744,7 @@ class StatisticOutputTask(AsyncTask):
|
|||
f"<td>{stat_data[STD_TIME_COST_BY_TYPE][req_type]:.1f} 秒</td>"
|
||||
f"</tr>"
|
||||
for req_type, count in sorted(stat_data[REQ_CNT_BY_TYPE].items())
|
||||
]
|
||||
] if stat_data[REQ_CNT_BY_TYPE] else ["<tr><td colspan='8' style='text-align: center; color: #999;'>暂无数据</td></tr>"]
|
||||
)
|
||||
# 按模块分类统计
|
||||
module_rows = "\n".join(
|
||||
|
|
@ -760,7 +760,7 @@ class StatisticOutputTask(AsyncTask):
|
|||
f"<td>{stat_data[STD_TIME_COST_BY_MODULE][module_name]:.1f} 秒</td>"
|
||||
f"</tr>"
|
||||
for module_name, count in sorted(stat_data[REQ_CNT_BY_MODULE].items())
|
||||
]
|
||||
] if stat_data[REQ_CNT_BY_MODULE] else ["<tr><td colspan='8' style='text-align: center; color: #999;'>暂无数据</td></tr>"]
|
||||
)
|
||||
|
||||
# 聊天消息统计
|
||||
|
|
@ -768,7 +768,7 @@ class StatisticOutputTask(AsyncTask):
|
|||
[
|
||||
f"<tr><td>{self.name_mapping[chat_id][0]}</td><td>{count}</td></tr>"
|
||||
for chat_id, count in sorted(stat_data[MSG_CNT_BY_CHAT].items())
|
||||
]
|
||||
] if stat_data[MSG_CNT_BY_CHAT] else ["<tr><td colspan='2' style='text-align: center; color: #999;'>暂无数据</td></tr>"]
|
||||
)
|
||||
# 生成HTML
|
||||
return f"""
|
||||
|
|
@ -820,145 +820,192 @@ class StatisticOutputTask(AsyncTask):
|
|||
</tbody>
|
||||
</table>
|
||||
|
||||
|
||||
<h2>数据分布图表</h2>
|
||||
<div style="display: flex; flex-wrap: wrap; gap: 20px; margin-top: 20px;">
|
||||
<div style="flex: 1; min-width: 300px;">
|
||||
<h3>模型花费分布</h3>
|
||||
<canvas id="modelPieChart_{div_id}" width="300" height="300"></canvas>
|
||||
</div>
|
||||
<div style="flex: 1; min-width: 300px;">
|
||||
<h3>模块花费分布</h3>
|
||||
<canvas id="modulePieChart_{div_id}" width="300" height="300"></canvas>
|
||||
</div>
|
||||
<div style="flex: 1; min-width: 300px;">
|
||||
<h3>请求类型花费分布</h3>
|
||||
<canvas id="typePieChart_{div_id}" width="300" height="300"></canvas>
|
||||
</div>
|
||||
<div style="flex: 1; min-width: 300px;">
|
||||
<h3>聊天消息分布</h3>
|
||||
<canvas id="chatPieChart_{div_id}" width="300" height="300"></canvas>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<script>
|
||||
// 为当前统计卡片创建饼图
|
||||
createPieCharts_{div_id}();
|
||||
document.addEventListener('DOMContentLoaded', function() {{
|
||||
createPieCharts_{div_id}();
|
||||
}});
|
||||
|
||||
function createPieCharts_{div_id}() {{
|
||||
const colors = ['#3498db', '#e74c3c', '#2ecc71', '#f39c12', '#9b59b6', '#1abc9c', '#34495e', '#e67e22', '#95a5a6', '#f1c40f'];
|
||||
|
||||
// 模型调用次数饼图
|
||||
const modelData = {{
|
||||
labels: {[f'"{model_name}"' for model_name in sorted(stat_data[REQ_CNT_BY_MODEL].keys())]},
|
||||
datasets: [{{
|
||||
data: {[stat_data[REQ_CNT_BY_MODEL][model_name] for model_name in sorted(stat_data[REQ_CNT_BY_MODEL].keys())]},
|
||||
backgroundColor: colors[:len(stat_data[REQ_CNT_BY_MODEL])],
|
||||
borderColor: colors[:len(stat_data[REQ_CNT_BY_MODEL])],
|
||||
borderWidth: 2
|
||||
}}]
|
||||
}};
|
||||
|
||||
new Chart(document.getElementById('modelPieChart_{div_id}'), {{
|
||||
type: 'pie',
|
||||
data: modelData,
|
||||
options: {{
|
||||
responsive: true,
|
||||
plugins: {{
|
||||
legend: {{
|
||||
position: 'bottom'
|
||||
}},
|
||||
tooltip: {{
|
||||
callbacks: {{
|
||||
label: function(context) {{
|
||||
const total = context.dataset.data.reduce((a, b) => a + b, 0);
|
||||
const percentage = ((context.parsed / total) * 100).toFixed(1);
|
||||
return context.label + ': ' + context.parsed + ' (' + percentage + '%)';
|
||||
// 模型花费分布饼图
|
||||
const modelLabels = {list(sorted(stat_data[COST_BY_MODEL].keys())) if stat_data[COST_BY_MODEL] else []};
|
||||
if (modelLabels.length > 0) {{
|
||||
const modelData = {{
|
||||
labels: modelLabels,
|
||||
datasets: [{{
|
||||
data: {[stat_data[COST_BY_MODEL][model_name] for model_name in sorted(stat_data[COST_BY_MODEL].keys())] if stat_data[COST_BY_MODEL] else []},
|
||||
backgroundColor: colors.slice(0, {len(stat_data[COST_BY_MODEL]) if stat_data[COST_BY_MODEL] else 0}),
|
||||
borderColor: colors.slice(0, {len(stat_data[COST_BY_MODEL]) if stat_data[COST_BY_MODEL] else 0}),
|
||||
borderWidth: 2
|
||||
}}]
|
||||
}};
|
||||
|
||||
new Chart(document.getElementById('modelPieChart_{div_id}'), {{
|
||||
type: 'pie',
|
||||
data: modelData,
|
||||
options: {{
|
||||
responsive: true,
|
||||
plugins: {{
|
||||
legend: {{
|
||||
position: 'bottom'
|
||||
}},
|
||||
tooltip: {{
|
||||
callbacks: {{
|
||||
label: function(context) {{
|
||||
const total = context.dataset.data.reduce((a, b) => a + b, 0);
|
||||
const percentage = ((context.parsed / total) * 100).toFixed(1);
|
||||
return context.label + ': ¥' + context.parsed.toFixed(2) + ' (' + percentage + '%)';
|
||||
}}
|
||||
}}
|
||||
}}
|
||||
}}
|
||||
}}
|
||||
}}
|
||||
}});
|
||||
}});
|
||||
}} else {{
|
||||
document.getElementById('modelPieChart_{div_id}').style.display = 'none';
|
||||
document.querySelector('#modelPieChart_{div_id}').parentElement.querySelector('h3').textContent = '模型花费分布 (无数据)';
|
||||
}}
|
||||
|
||||
// 模块调用次数饼图
|
||||
const moduleData = {{
|
||||
labels: {[f'"{module_name}"' for module_name in sorted(stat_data[REQ_CNT_BY_MODULE].keys())]},
|
||||
datasets: [{{
|
||||
data: {[stat_data[REQ_CNT_BY_MODULE][module_name] for module_name in sorted(stat_data[REQ_CNT_BY_MODULE].keys())]},
|
||||
backgroundColor: colors[:len(stat_data[REQ_CNT_BY_MODULE])],
|
||||
borderColor: colors[:len(stat_data[REQ_CNT_BY_MODULE])],
|
||||
borderWidth: 2
|
||||
}}]
|
||||
}};
|
||||
|
||||
new Chart(document.getElementById('modulePieChart_{div_id}'), {{
|
||||
type: 'pie',
|
||||
data: moduleData,
|
||||
options: {{
|
||||
responsive: true,
|
||||
plugins: {{
|
||||
legend: {{
|
||||
position: 'bottom'
|
||||
}},
|
||||
tooltip: {{
|
||||
callbacks: {{
|
||||
label: function(context) {{
|
||||
const total = context.dataset.data.reduce((a, b) => a + b, 0);
|
||||
const percentage = ((context.parsed / total) * 100).toFixed(1);
|
||||
return context.label + ': ' + context.parsed + ' (' + percentage + '%)';
|
||||
// 模块花费分布饼图
|
||||
const moduleLabels = {list(sorted(stat_data[COST_BY_MODULE].keys())) if stat_data[COST_BY_MODULE] else []};
|
||||
if (moduleLabels.length > 0) {{
|
||||
const moduleData = {{
|
||||
labels: moduleLabels,
|
||||
datasets: [{{
|
||||
data: {[stat_data[COST_BY_MODULE][module_name] for module_name in sorted(stat_data[COST_BY_MODULE].keys())] if stat_data[COST_BY_MODULE] else []},
|
||||
backgroundColor: colors.slice(0, {len(stat_data[COST_BY_MODULE]) if stat_data[COST_BY_MODULE] else 0}),
|
||||
borderColor: colors.slice(0, {len(stat_data[COST_BY_MODULE]) if stat_data[COST_BY_MODULE] else 0}),
|
||||
borderWidth: 2
|
||||
}}]
|
||||
}};
|
||||
|
||||
new Chart(document.getElementById('modulePieChart_{div_id}'), {{
|
||||
type: 'pie',
|
||||
data: moduleData,
|
||||
options: {{
|
||||
responsive: true,
|
||||
plugins: {{
|
||||
legend: {{
|
||||
position: 'bottom'
|
||||
}},
|
||||
tooltip: {{
|
||||
callbacks: {{
|
||||
label: function(context) {{
|
||||
const total = context.dataset.data.reduce((a, b) => a + b, 0);
|
||||
const percentage = ((context.parsed / total) * 100).toFixed(1);
|
||||
return context.label + ': ¥' + context.parsed.toFixed(2) + ' (' + percentage + '%)';
|
||||
}}
|
||||
}}
|
||||
}}
|
||||
}}
|
||||
}}
|
||||
}}
|
||||
}});
|
||||
}});
|
||||
}} else {{
|
||||
document.getElementById('modulePieChart_{div_id}').style.display = 'none';
|
||||
document.querySelector('#modulePieChart_{div_id}').parentElement.querySelector('h3').textContent = '模块花费分布 (无数据)';
|
||||
}}
|
||||
|
||||
// 请求类型分布饼图
|
||||
const typeData = {{
|
||||
labels: {[f'"{req_type}"' for req_type in sorted(stat_data[REQ_CNT_BY_TYPE].keys())]},
|
||||
datasets: [{{
|
||||
data: {[stat_data[REQ_CNT_BY_TYPE][req_type] for req_type in sorted(stat_data[REQ_CNT_BY_TYPE].keys())]},
|
||||
backgroundColor: colors[:len(stat_data[REQ_CNT_BY_TYPE])],
|
||||
borderColor: colors[:len(stat_data[REQ_CNT_BY_TYPE])],
|
||||
borderWidth: 2
|
||||
}}]
|
||||
}};
|
||||
|
||||
new Chart(document.getElementById('typePieChart_{div_id}'), {{
|
||||
type: 'pie',
|
||||
data: typeData,
|
||||
options: {{
|
||||
responsive: true,
|
||||
plugins: {{
|
||||
legend: {{
|
||||
position: 'bottom'
|
||||
}},
|
||||
tooltip: {{
|
||||
callbacks: {{
|
||||
label: function(context) {{
|
||||
const total = context.dataset.data.reduce((a, b) => a + b, 0);
|
||||
const percentage = ((context.parsed / total) * 100).toFixed(1);
|
||||
return context.label + ': ' + context.parsed + ' (' + percentage + '%)';
|
||||
// 请求类型花费分布饼图
|
||||
const typeLabels = {list(sorted(stat_data[COST_BY_TYPE].keys())) if stat_data[COST_BY_TYPE] else []};
|
||||
if (typeLabels.length > 0) {{
|
||||
const typeData = {{
|
||||
labels: typeLabels,
|
||||
datasets: [{{
|
||||
data: {[stat_data[COST_BY_TYPE][req_type] for req_type in sorted(stat_data[COST_BY_TYPE].keys())] if stat_data[COST_BY_TYPE] else []},
|
||||
backgroundColor: colors.slice(0, {len(stat_data[COST_BY_TYPE]) if stat_data[COST_BY_TYPE] else 0}),
|
||||
borderColor: colors.slice(0, {len(stat_data[COST_BY_TYPE]) if stat_data[COST_BY_TYPE] else 0}),
|
||||
borderWidth: 2
|
||||
}}]
|
||||
}};
|
||||
|
||||
new Chart(document.getElementById('typePieChart_{div_id}'), {{
|
||||
type: 'pie',
|
||||
data: typeData,
|
||||
options: {{
|
||||
responsive: true,
|
||||
plugins: {{
|
||||
legend: {{
|
||||
position: 'bottom'
|
||||
}},
|
||||
tooltip: {{
|
||||
callbacks: {{
|
||||
label: function(context) {{
|
||||
const total = context.dataset.data.reduce((a, b) => a + b, 0);
|
||||
const percentage = ((context.parsed / total) * 100).toFixed(1);
|
||||
return context.label + ': ¥' + context.parsed.toFixed(2) + ' (' + percentage + '%)';
|
||||
}}
|
||||
}}
|
||||
}}
|
||||
}}
|
||||
}}
|
||||
}}
|
||||
}});
|
||||
}});
|
||||
}} else {{
|
||||
document.getElementById('typePieChart_{div_id}').style.display = 'none';
|
||||
document.querySelector('#typePieChart_{div_id}').parentElement.querySelector('h3').textContent = '请求类型花费分布 (无数据)';
|
||||
}}
|
||||
|
||||
// 聊天消息分布饼图
|
||||
const chatData = {{
|
||||
labels: {[f'"{self.name_mapping[chat_id][0]}"' for chat_id in sorted(stat_data[MSG_CNT_BY_CHAT].keys())]},
|
||||
datasets: [{{
|
||||
data: {[stat_data[MSG_CNT_BY_CHAT][chat_id] for chat_id in sorted(stat_data[MSG_CNT_BY_CHAT].keys())]},
|
||||
backgroundColor: colors[:len(stat_data[MSG_CNT_BY_CHAT])],
|
||||
borderColor: colors[:len(stat_data[MSG_CNT_BY_CHAT])],
|
||||
borderWidth: 2
|
||||
}}]
|
||||
}};
|
||||
|
||||
new Chart(document.getElementById('chatPieChart_{div_id}'), {{
|
||||
type: 'pie',
|
||||
data: chatData,
|
||||
options: {{
|
||||
responsive: true,
|
||||
plugins: {{
|
||||
legend: {{
|
||||
position: 'bottom'
|
||||
}},
|
||||
tooltip: {{
|
||||
callbacks: {{
|
||||
label: function(context) {{
|
||||
const total = context.dataset.data.reduce((a, b) => a + b, 0);
|
||||
const percentage = ((context.parsed / total) * 100).toFixed(1);
|
||||
return context.label + ': ' + context.parsed + ' (' + percentage + '%)';
|
||||
const chatLabels = {[self.name_mapping[chat_id][0] for chat_id in sorted(stat_data[MSG_CNT_BY_CHAT].keys())] if stat_data[MSG_CNT_BY_CHAT] else []};
|
||||
if (chatLabels.length > 0) {{
|
||||
const chatData = {{
|
||||
labels: chatLabels,
|
||||
datasets: [{{
|
||||
data: {[stat_data[MSG_CNT_BY_CHAT][chat_id] for chat_id in sorted(stat_data[MSG_CNT_BY_CHAT].keys())] if stat_data[MSG_CNT_BY_CHAT] else []},
|
||||
backgroundColor: colors.slice(0, {len(stat_data[MSG_CNT_BY_CHAT]) if stat_data[MSG_CNT_BY_CHAT] else 0}),
|
||||
borderColor: colors.slice(0, {len(stat_data[MSG_CNT_BY_CHAT]) if stat_data[MSG_CNT_BY_CHAT] else 0}),
|
||||
borderWidth: 2
|
||||
}}]
|
||||
}};
|
||||
|
||||
new Chart(document.getElementById('chatPieChart_{div_id}'), {{
|
||||
type: 'pie',
|
||||
data: chatData,
|
||||
options: {{
|
||||
responsive: true,
|
||||
plugins: {{
|
||||
legend: {{
|
||||
position: 'bottom'
|
||||
}},
|
||||
tooltip: {{
|
||||
callbacks: {{
|
||||
label: function(context) {{
|
||||
const total = context.dataset.data.reduce((a, b) => a + b, 0);
|
||||
const percentage = ((context.parsed / total) * 100).toFixed(1);
|
||||
return context.label + ': ' + context.parsed + ' (' + percentage + '%)';
|
||||
}}
|
||||
}}
|
||||
}}
|
||||
}}
|
||||
}}
|
||||
}}
|
||||
}});
|
||||
}});
|
||||
}} else {{
|
||||
document.getElementById('chatPieChart_{div_id}').style.display = 'none';
|
||||
document.querySelector('#chatPieChart_{div_id}').parentElement.querySelector('h3').textContent = '聊天消息分布 (无数据)';
|
||||
}}
|
||||
}}
|
||||
</script>
|
||||
|
||||
</div>
|
||||
"""
|
||||
|
|
|
|||
|
|
@ -56,7 +56,7 @@ TEMPLATE_DIR = os.path.join(PROJECT_ROOT, "template")
|
|||
|
||||
# 考虑到,实际上配置文件中的mai_version是不会自动更新的,所以采用硬编码
|
||||
# 对该字段的更新,请严格参照语义化版本规范:https://semver.org/lang/zh-CN/
|
||||
MMC_VERSION = "0.10.2-snapshot.2"
|
||||
MMC_VERSION = "0.10.2-snapshot.3"
|
||||
|
||||
|
||||
def get_key_comment(toml_table, key):
|
||||
|
|
|
|||
|
|
@ -35,21 +35,15 @@ class BotConfig(ConfigBase):
|
|||
class PersonalityConfig(ConfigBase):
|
||||
"""人格配置类"""
|
||||
|
||||
personality_core: str
|
||||
"""核心人格"""
|
||||
personality: str
|
||||
"""人格"""
|
||||
|
||||
personality_side: str
|
||||
"""人格侧写"""
|
||||
|
||||
identity: str = ""
|
||||
"""身份特征"""
|
||||
emotion_style: str
|
||||
"""情感特征"""
|
||||
|
||||
reply_style: str = ""
|
||||
"""表达风格"""
|
||||
|
||||
plan_style: str = ""
|
||||
"""行为风格"""
|
||||
|
||||
interest: str = ""
|
||||
"""兴趣"""
|
||||
|
||||
|
|
|
|||
|
|
@ -178,7 +178,7 @@ class ChatAction:
|
|||
else:
|
||||
bot_nickname = ""
|
||||
|
||||
prompt_personality = global_config.personality.personality_core
|
||||
prompt_personality = global_config.personality.personality
|
||||
indentify_block = f"你的名字是{bot_name}{bot_nickname},你{prompt_personality}:"
|
||||
|
||||
try:
|
||||
|
|
@ -241,7 +241,7 @@ class ChatAction:
|
|||
else:
|
||||
bot_nickname = ""
|
||||
|
||||
prompt_personality = global_config.personality.personality_core
|
||||
prompt_personality = global_config.personality.personality
|
||||
indentify_block = f"你的名字是{bot_name}{bot_nickname},你{prompt_personality}:"
|
||||
try:
|
||||
# 冷却池处理:过滤掉冷却中的动作
|
||||
|
|
|
|||
|
|
@ -182,7 +182,7 @@ class ChatMood:
|
|||
else:
|
||||
bot_nickname = ""
|
||||
|
||||
prompt_personality = global_config.personality.personality_core
|
||||
prompt_personality = global_config.personality.personality
|
||||
indentify_block = f"你的名字是{bot_name}{bot_nickname},你{prompt_personality}:"
|
||||
|
||||
async def _update_text_mood():
|
||||
|
|
@ -261,7 +261,7 @@ class ChatMood:
|
|||
else:
|
||||
bot_nickname = ""
|
||||
|
||||
prompt_personality = global_config.personality.personality_core
|
||||
prompt_personality = global_config.personality.personality
|
||||
indentify_block = f"你的名字是{bot_name}{bot_nickname},你{prompt_personality}:"
|
||||
|
||||
async def _regress_text_mood():
|
||||
|
|
|
|||
|
|
@ -25,7 +25,8 @@ def init_prompt():
|
|||
你刚刚的情绪状态是:{mood_state}
|
||||
|
||||
现在,发送了消息,引起了你的注意,你对其进行了阅读和思考,请你输出一句话描述你新的情绪状态
|
||||
请只输出情绪状态,不要输出其他内容:
|
||||
你的情绪特点是:{emotion_style}
|
||||
请只输出新的情绪状态,不要输出其他内容:
|
||||
""",
|
||||
"change_mood_prompt",
|
||||
)
|
||||
|
|
@ -38,7 +39,8 @@ def init_prompt():
|
|||
你之前的情绪状态是:{mood_state}
|
||||
|
||||
距离你上次关注群里消息已经过去了一段时间,你冷静了下来,请你输出一句话描述你现在的情绪状态
|
||||
请只输出情绪状态,不要输出其他内容:
|
||||
你的情绪特点是:{emotion_style}
|
||||
请只输出新的情绪状态,不要输出其他内容:
|
||||
""",
|
||||
"regress_mood_prompt",
|
||||
)
|
||||
|
|
@ -115,14 +117,14 @@ class ChatMood:
|
|||
else:
|
||||
bot_nickname = ""
|
||||
|
||||
prompt_personality = global_config.personality.personality_core
|
||||
identity_block = f"你的名字是{bot_name}{bot_nickname},你{prompt_personality}:"
|
||||
identity_block = f"你的名字是{bot_name}{bot_nickname}"
|
||||
|
||||
prompt = await global_prompt_manager.format_prompt(
|
||||
"change_mood_prompt",
|
||||
chat_talking_prompt=chat_talking_prompt,
|
||||
identity_block=identity_block,
|
||||
mood_state=self.mood_state,
|
||||
emotion_style=global_config.personality.emotion_style,
|
||||
)
|
||||
|
||||
response, (reasoning_content, _, _) = await self.mood_model.generate_response_async(
|
||||
|
|
@ -164,14 +166,14 @@ class ChatMood:
|
|||
else:
|
||||
bot_nickname = ""
|
||||
|
||||
prompt_personality = global_config.personality.personality_core
|
||||
identity_block = f"你的名字是{bot_name}{bot_nickname},你{prompt_personality}:"
|
||||
identity_block = f"你的名字是{bot_name}{bot_nickname}"
|
||||
|
||||
prompt = await global_prompt_manager.format_prompt(
|
||||
"regress_mood_prompt",
|
||||
chat_talking_prompt=chat_talking_prompt,
|
||||
identity_block=identity_block,
|
||||
mood_state=self.mood_state,
|
||||
emotion_style=global_config.personality.emotion_style,
|
||||
)
|
||||
|
||||
response, (reasoning_content, _, _) = await self.mood_model.generate_response_async(
|
||||
|
|
|
|||
|
|
@ -11,6 +11,7 @@
|
|||
import time
|
||||
from typing import List, Dict, Any, Tuple, Optional
|
||||
from src.common.data_models.database_data_model import DatabaseMessages
|
||||
from src.common.database.database_model import Images
|
||||
from src.config.config import global_config
|
||||
from src.chat.utils.chat_message_builder import (
|
||||
get_raw_msg_by_timestamp,
|
||||
|
|
@ -488,3 +489,15 @@ def filter_mai_messages(messages: List[DatabaseMessages]) -> List[DatabaseMessag
|
|||
过滤后的消息列表
|
||||
"""
|
||||
return [msg for msg in messages if msg.user_info.user_id != str(global_config.bot.qq_account)]
|
||||
|
||||
|
||||
|
||||
|
||||
def translate_pid_to_description(pid: str) -> str:
|
||||
image = Images.get_or_none(Images.image_id == pid)
|
||||
description = ""
|
||||
if image and image.description:
|
||||
description = image.description
|
||||
else:
|
||||
description = "[图片]"
|
||||
return description
|
||||
|
|
@ -1,5 +1,5 @@
|
|||
[inner]
|
||||
version = "6.8.0"
|
||||
version = "6.9.0"
|
||||
|
||||
#----以下是给开发人员阅读的,如果你只是部署了麦麦,不需要阅读----
|
||||
#如果你想要修改配置文件,请递增version的值
|
||||
|
|
@ -18,21 +18,13 @@ nickname = "麦麦" # 麦麦的昵称
|
|||
alias_names = ["麦叠", "牢麦"] # 麦麦的别名
|
||||
|
||||
[personality]
|
||||
# 建议50字以内,描述人格的核心特质
|
||||
personality_core = "是一个女孩子"
|
||||
# 人格的细节,描述人格的一些侧面
|
||||
personality_side = "有时候说话不过脑子,喜欢开玩笑, 有时候会表现得无语,有时候会喜欢说一些奇怪的话"
|
||||
|
||||
# 建议120字以内,描述人格特质 和 身份特征
|
||||
personality = "是一个女大学生,现在在读大二,会刷贴吧。有时候说话不过脑子,有时候会喜欢说一些奇怪的话。年龄为19岁,有黑色的短发。"
|
||||
#アイデンティティがない 生まれないらららら
|
||||
# 可以描述外貌,性别,身高,职业,属性等等描述
|
||||
identity = "年龄为19岁,是女孩子,身高为160cm,有黑色的短发"
|
||||
|
||||
# 描述麦麦说话的表达风格,表达习惯,如要修改,可以酌情新增内容
|
||||
reply_style = "回复可以简短一些。可以参考贴吧,知乎和微博的回复风格,回复不要浮夸,不要用夸张修辞,平淡一些。不要浮夸,不要夸张修辞。"
|
||||
|
||||
# 描述麦麦的行为风格,会影响麦麦什么时候回复,什么时候使用动作,麦麦考虑的可就多了
|
||||
plan_style = "当你刚刚发送了消息,没有人回复时,不要选择action,如果有别的动作(非回复)满足条件,可以选择,当你一次发送了太多消息,为了避免打扰聊天节奏,不要选择动作"
|
||||
|
||||
# 情感特征,影响情绪的变化情况
|
||||
emotion_style = "情绪较为稳定,但遭遇特定事件的时候起伏较大"
|
||||
# 麦麦的兴趣,会影响麦麦对什么话题进行回复
|
||||
interest = "对技术相关话题,游戏和动漫相关话题感兴趣,也对日常话题感兴趣,不喜欢太过沉重严肃的话题"
|
||||
|
||||
|
|
@ -68,7 +60,7 @@ at_bot_inevitable_reply = 1 # at时,回复概率增幅,1为100%回复,0为
|
|||
|
||||
max_context_size = 20 # 上下文长度
|
||||
|
||||
planner_size = 2.5 # 副规划器大小,越小,麦麦的动作执行能力越精细,但是消耗更多token,调大可以缓解429类错误
|
||||
planner_size = 3.5 # 副规划器大小,越小,麦麦的动作执行能力越精细,但是消耗更多token,调大可以缓解429类错误
|
||||
|
||||
focus_value_adjust = [
|
||||
["", "8:00,1", "12:00,0.8", "18:00,1", "01:00,0.3"],
|
||||
|
|
@ -104,7 +96,7 @@ talk_frequency_adjust = [
|
|||
enable_relationship = true # 是否启用关系系统
|
||||
|
||||
[tool]
|
||||
enable_tool = false # 是否在普通聊天中启用工具
|
||||
enable_tool = false # 是否启用回复工具
|
||||
|
||||
[mood]
|
||||
enable_mood = true # 是否启用情绪系统
|
||||
|
|
@ -122,7 +114,6 @@ filtration_prompt = "符合公序良俗" # 表情包过滤要求,只有符合
|
|||
|
||||
[memory]
|
||||
enable_memory = true # 是否启用记忆系统
|
||||
|
||||
forget_memory_interval = 1500 # 记忆遗忘间隔 单位秒 间隔越低,麦麦遗忘越频繁,记忆更精简,但更难学习
|
||||
memory_forget_time = 48 #多长时间后的记忆会被遗忘 单位小时
|
||||
memory_forget_percentage = 0.008 # 记忆遗忘比例 控制记忆遗忘程度 越大遗忘越多 建议保持默认
|
||||
|
|
|
|||
Loading…
Reference in New Issue