diff --git a/src/config/config.py b/src/config/config.py index 312f3e95..1afad2b1 100644 --- a/src/config/config.py +++ b/src/config/config.py @@ -153,6 +153,7 @@ class BotConfig: "用一句话或几句话描述人格的一些侧面", ] ) + personality_detail_level: int = 0 # 人设消息注入 prompt 详细等级 (0: 采用默认配置, 1: 核心/随机细节, 2: 核心+随机侧面/全部细节, 3: 全部) # identity identity_detail: List[str] = field( default_factory=lambda: [ @@ -178,6 +179,7 @@ class BotConfig: base_normal_chat_num: int = 3 # 最多允许多少个群进行普通聊天 base_focused_chat_num: int = 2 # 最多允许多少个群进行专注聊天 + allow_remove_duplicates: bool = True # 是否开启心流去重(如果发现心流截断问题严重可尝试关闭) observation_context_size: int = 12 # 心流观察到的最长上下文大小,超过这个值的上下文会被压缩 @@ -296,7 +298,9 @@ class BotConfig: llm_heartflow: Dict[str, str] = field(default_factory=lambda: {}) llm_tool_use: Dict[str, str] = field(default_factory=lambda: {}) llm_plan: Dict[str, str] = field(default_factory=lambda: {}) - llm_nickname_mapping: Dict[str, Any] = field(default_factory=dict) + llm_nickname_mapping: Dict[str, str] = field(default_factory=lambda: {}) + llm_scheduler_all: Dict[str, str] = field(default_factory=lambda: {}) + llm_scheduler_doing: Dict[str, str] = field(default_factory=lambda: {}) api_urls: Dict[str, str] = field(default_factory=lambda: {}) @@ -365,9 +369,10 @@ class BotConfig: def personality(parent: dict): personality_config = parent["personality"] - if config.INNER_VERSION in SpecifierSet(">=1.2.4"): + if config.INNER_VERSION in SpecifierSet(">=1.6.1.2"): config.personality_core = personality_config.get("personality_core", config.personality_core) config.personality_sides = personality_config.get("personality_sides", config.personality_sides) + config.personality_detail_level = personality_config.get("personality_detail_level", config.personality_sides) def identity(parent: dict): identity_config = parent["identity"] @@ -411,7 +416,7 @@ class BotConfig: config.steal_emoji = emoji_config.get("steal_emoji", config.steal_emoji) def group_nickname(parent: dict): - if config.INNER_VERSION in SpecifierSet(">=1.6.2"): + if config.INNER_VERSION in SpecifierSet(">=1.6.1.1"): gn_config = parent.get("group_nickname", {}) config.ENABLE_NICKNAME_MAPPING = gn_config.get( "enable_nickname_mapping", config.ENABLE_NICKNAME_MAPPING @@ -515,6 +520,8 @@ class BotConfig: "llm_PFC_chat", "llm_PFC_reply_checker", "llm_nickname_mapping", + "llm_scheduler_all", + "llm_scheduler_doing", ] for item in config_list: @@ -720,7 +727,7 @@ class BotConfig: "chat": {"func": chat, "support": ">=1.6.0", "necessary": False}, "normal_chat": {"func": normal_chat, "support": ">=1.6.0", "necessary": False}, "focus_chat": {"func": focus_chat, "support": ">=1.6.0", "necessary": False}, - "group_nickname": {"func": group_nickname, "support": ">=0.6.3", "necessary": False}, + "group_nickname": {"func": group_nickname, "support": ">=1.6.1.1", "necessary": False}, } # 原地修改,将 字符串版本表达式 转换成 版本对象 diff --git a/src/heart_flow/sub_mind.py b/src/heart_flow/sub_mind.py index 65013af3..60888e25 100644 --- a/src/heart_flow/sub_mind.py +++ b/src/heart_flow/sub_mind.py @@ -415,77 +415,78 @@ class SubMind: logger.warning(f"{self.log_prefix} LLM返回空结果,思考失败。") # ---------- 6. 应用概率性去重和修饰 ---------- - new_content = content # 保存 LLM 直接输出的结果 - try: - similarity = calculate_similarity(previous_mind, new_content) - replacement_prob = calculate_replacement_probability(similarity) - logger.debug(f"{self.log_prefix} 新旧想法相似度: {similarity:.2f}, 替换概率: {replacement_prob:.2f}") + if global_config.allow_remove_duplicates: + new_content = content # 保存 LLM 直接输出的结果 + try: + similarity = calculate_similarity(previous_mind, new_content) + replacement_prob = calculate_replacement_probability(similarity) + logger.debug(f"{self.log_prefix} 新旧想法相似度: {similarity:.2f}, 替换概率: {replacement_prob:.2f}") - # 定义词语列表 (移到判断之前) - yu_qi_ci_liebiao = ["嗯", "哦", "啊", "唉", "哈", "唔"] - zhuan_zhe_liebiao = ["但是", "不过", "然而", "可是", "只是"] - cheng_jie_liebiao = ["然后", "接着", "此外", "而且", "另外"] - zhuan_jie_ci_liebiao = zhuan_zhe_liebiao + cheng_jie_liebiao + # 定义词语列表 (移到判断之前) + yu_qi_ci_liebiao = ["嗯", "哦", "啊", "唉", "哈", "唔"] + zhuan_zhe_liebiao = ["但是", "不过", "然而", "可是", "只是"] + cheng_jie_liebiao = ["然后", "接着", "此外", "而且", "另外"] + zhuan_jie_ci_liebiao = zhuan_zhe_liebiao + cheng_jie_liebiao - if random.random() < replacement_prob: - # 相似度非常高时,尝试去重或特殊处理 - if similarity == 1.0: - logger.debug(f"{self.log_prefix} 想法完全重复 (相似度 1.0),执行特殊处理...") - # 随机截取大约一半内容 - if len(new_content) > 1: # 避免内容过短无法截取 - split_point = max( - 1, len(new_content) // 2 + random.randint(-len(new_content) // 4, len(new_content) // 4) - ) - truncated_content = new_content[:split_point] - else: - truncated_content = new_content # 如果只有一个字符或者为空,就不截取了 - - # 添加语气词和转折/承接词 - yu_qi_ci = random.choice(yu_qi_ci_liebiao) - zhuan_jie_ci = random.choice(zhuan_jie_ci_liebiao) - content = f"{yu_qi_ci}{zhuan_jie_ci},{truncated_content}" - logger.debug(f"{self.log_prefix} 想法重复,特殊处理后: {content}") - - else: - # 相似度较高但非100%,执行标准去重逻辑 - logger.debug(f"{self.log_prefix} 执行概率性去重 (概率: {replacement_prob:.2f})...") - matcher = difflib.SequenceMatcher(None, previous_mind, new_content) - deduplicated_parts = [] - last_match_end_in_b = 0 - for _i, j, n in matcher.get_matching_blocks(): - if last_match_end_in_b < j: - deduplicated_parts.append(new_content[last_match_end_in_b:j]) - last_match_end_in_b = j + n - - deduplicated_content = "".join(deduplicated_parts).strip() - - if deduplicated_content: - # 根据概率决定是否添加词语 - prefix_str = "" - if random.random() < 0.3: # 30% 概率添加语气词 - prefix_str += random.choice(yu_qi_ci_liebiao) - if random.random() < 0.7: # 70% 概率添加转折/承接词 - prefix_str += random.choice(zhuan_jie_ci_liebiao) - - # 组合最终结果 - if prefix_str: - content = f"{prefix_str},{deduplicated_content}" # 更新 content - logger.debug(f"{self.log_prefix} 去重并添加引导词后: {content}") + if random.random() < replacement_prob: + # 相似度非常高时,尝试去重或特殊处理 + if similarity == 1.0: + logger.debug(f"{self.log_prefix} 想法完全重复 (相似度 1.0),执行特殊处理...") + # 随机截取大约一半内容 + if len(new_content) > 1: # 避免内容过短无法截取 + split_point = max( + 1, len(new_content) // 2 + random.randint(-len(new_content) // 4, len(new_content) // 4) + ) + truncated_content = new_content[:split_point] else: - content = deduplicated_content # 更新 content - logger.debug(f"{self.log_prefix} 去重后 (未添加引导词): {content}") - else: - logger.warning(f"{self.log_prefix} 去重后内容为空,保留原始LLM输出: {new_content}") - content = new_content # 保留原始 content - else: - logger.debug(f"{self.log_prefix} 未执行概率性去重 (概率: {replacement_prob:.2f})") - # content 保持 new_content 不变 + truncated_content = new_content # 如果只有一个字符或者为空,就不截取了 - except Exception as e: - logger.error(f"{self.log_prefix} 应用概率性去重或特殊处理时出错: {e}") - logger.error(traceback.format_exc()) - # 出错时保留原始 content - content = new_content + # 添加语气词和转折/承接词 + yu_qi_ci = random.choice(yu_qi_ci_liebiao) + zhuan_jie_ci = random.choice(zhuan_jie_ci_liebiao) + content = f"{yu_qi_ci}{zhuan_jie_ci},{truncated_content}" + logger.debug(f"{self.log_prefix} 想法重复,特殊处理后: {content}") + + else: + # 相似度较高但非100%,执行标准去重逻辑 + logger.debug(f"{self.log_prefix} 执行概率性去重 (概率: {replacement_prob:.2f})...") + matcher = difflib.SequenceMatcher(None, previous_mind, new_content) + deduplicated_parts = [] + last_match_end_in_b = 0 + for _i, j, n in matcher.get_matching_blocks(): + if last_match_end_in_b < j: + deduplicated_parts.append(new_content[last_match_end_in_b:j]) + last_match_end_in_b = j + n + + deduplicated_content = "".join(deduplicated_parts).strip() + + if deduplicated_content: + # 根据概率决定是否添加词语 + prefix_str = "" + if random.random() < 0.3: # 30% 概率添加语气词 + prefix_str += random.choice(yu_qi_ci_liebiao) + if random.random() < 0.7: # 70% 概率添加转折/承接词 + prefix_str += random.choice(zhuan_jie_ci_liebiao) + + # 组合最终结果 + if prefix_str: + content = f"{prefix_str},{deduplicated_content}" # 更新 content + logger.debug(f"{self.log_prefix} 去重并添加引导词后: {content}") + else: + content = deduplicated_content # 更新 content + logger.debug(f"{self.log_prefix} 去重后 (未添加引导词): {content}") + else: + logger.warning(f"{self.log_prefix} 去重后内容为空,保留原始LLM输出: {new_content}") + content = new_content # 保留原始 content + else: + logger.debug(f"{self.log_prefix} 未执行概率性去重 (概率: {replacement_prob:.2f})") + # content 保持 new_content 不变 + + except Exception as e: + logger.error(f"{self.log_prefix} 应用概率性去重或特殊处理时出错: {e}") + logger.error(traceback.format_exc()) + # 出错时保留原始 content + content = new_content # ---------- 7. 更新思考状态并返回结果 ---------- logger.info(f"{self.log_prefix} 最终心流思考结果: {content}") diff --git a/src/individuality/individuality.py b/src/individuality/individuality.py index 38131ea1..c57eeaf9 100644 --- a/src/individuality/individuality.py +++ b/src/individuality/individuality.py @@ -3,6 +3,7 @@ from .personality import Personality from .identity import Identity import random from rich.traceback import install +from src.config.config import global_config install(extra_lines=3) @@ -205,6 +206,15 @@ class Individuality: if not self.personality or not self.identity: return "个体特征尚未完全初始化。" + if global_config.personality_detail_level == 1: + level = 1 + elif global_config.personality_detail_level == 2: + level = 2 + elif global_config.personality_detail_level == 3: + level = 3 + else: # level = 0 + pass + # 调用新的独立方法 prompt_personality = self.get_personality_prompt(level, x_person) prompt_identity = self.get_identity_prompt(level, x_person) diff --git a/src/plugins/schedule/schedule_generator.py b/src/plugins/schedule/schedule_generator.py index 6bd2e587..6b0bc530 100644 --- a/src/plugins/schedule/schedule_generator.py +++ b/src/plugins/schedule/schedule_generator.py @@ -31,13 +31,13 @@ class ScheduleGenerator: # 使用离线LLM模型 self.enable_output = None self.llm_scheduler_all = LLMRequest( - model=global_config.llm_reasoning, + model=global_config.llm_scheduler_all, temperature=global_config.SCHEDULE_TEMPERATURE + 0.3, max_tokens=7000, request_type="schedule", ) self.llm_scheduler_doing = LLMRequest( - model=global_config.llm_normal, + model=global_config.llm_scheduler_all, temperature=global_config.SCHEDULE_TEMPERATURE, max_tokens=2048, request_type="schedule", diff --git a/template/bot_config_template.toml b/template/bot_config_template.toml index 6723f2de..f88f8521 100644 --- a/template/bot_config_template.toml +++ b/template/bot_config_template.toml @@ -1,5 +1,5 @@ [inner] -version = "1.6.2" +version = "1.6.1.2" #----以下是给开发人员阅读的,如果你只是部署了麦麦,不需要阅读---- #如果你想要修改配置文件,请在修改后将version的值进行变更 @@ -42,6 +42,7 @@ personality_sides = [ "用一句话或几句话描述人格的一些细节", "用一句话或几句话描述人格的一些细节", ]# 条数任意,不能为0, 该选项还在调试中,可能未完全生效 +personality_detail_level = 0 # 人设消息注入 prompt 详细等级 (0: 采用默认配置, 1: 核心/随机细节, 2: 核心+随机侧面/全部细节, 3: 全部) [identity] #アイデンティティがない 生まれないらららら # 兴趣爱好 未完善,有些条目未使用 @@ -71,6 +72,7 @@ allow_focus_mode = true # 是否允许专注聊天状态 # 启用后麦麦会自主选择进入heart_flowC模式(持续一段时间),进行主动的观察和回复,并给出回复,比较消耗token base_normal_chat_num = 3 # 最多允许多少个群进行普通聊天 base_focused_chat_num = 2 # 最多允许多少个群进行专注聊天 +allow_remove_duplicates = true # 是否开启心流去重(如果发现心流截断问题严重可尝试关闭) observation_context_size = 15 # 观察到的最长上下文大小,建议15,太短太长都会导致脑袋尖尖 message_buffer = true # 启用消息缓冲器?启用此项以解决消息的拆分问题,但会使麦麦的回复延迟 @@ -306,6 +308,22 @@ temp = 0.3 pri_in = 2 pri_out = 8 +#日程模型 +[model.llm_scheduler_all] +name = "deepseek-ai/DeepSeek-V3" +provider = "SILICONFLOW" +temp = 0.3 +pri_in = 2 +pri_out = 8 + +#在干嘛模型 +[model.llm_scheduler_doing] +name = "deepseek-ai/DeepSeek-V3" +provider = "SILICONFLOW" +temp = 0.3 +pri_in = 2 +pri_out = 8 + #以下模型暂时没有使用!! #以下模型暂时没有使用!! #以下模型暂时没有使用!!