diff --git a/README.md b/README.md
index 533d3838..fdfd10d0 100644
--- a/README.md
+++ b/README.md
@@ -38,6 +38,7 @@
**交流群**: 766798517 一群人较多,建议加下面的(开发和建议相关讨论)不一定有空回复,会优先写文档和代码
**交流群**: 571780722 另一个群(开发和建议相关讨论)不一定有空回复,会优先写文档和代码
+**交流群**: 1035228475 另一个群(开发和建议相关讨论)不一定有空回复,会优先写文档和代码
##
@@ -46,11 +47,13 @@
### 部署方式
-如果你不知道Docker是什么,建议寻找相关教程或使用手动部署
+如果你不知道Docker是什么,建议寻找相关教程或使用手动部署(现在不建议使用docker,更新慢,可能不适配)
- [🐳 Docker部署指南](docs/docker_deploy.md)
-- [📦 手动部署指南](docs/manual_deploy.md)
+- [📦 手动部署指南 Windows](docs/manual_deploy_windows.md)
+
+- [📦 手动部署指南 Linux](docs/manual_deploy_linux.md)
### 配置说明
- [🎀 新手配置指南](docs/installation_cute.md) - 通俗易懂的配置教程,适合初次使用的猫娘
diff --git a/bot.py b/bot.py
index 51979a5e..f2cc9164 100644
--- a/bot.py
+++ b/bot.py
@@ -1,88 +1,142 @@
import os
-
+import shutil
import nonebot
from dotenv import load_dotenv
from loguru import logger
from nonebot.adapters.onebot.v11 import Adapter
-'''彩蛋'''
-from colorama import Fore, init
+# 获取没有加载env时的环境变量
+env_mask = {key: os.getenv(key) for key in os.environ}
-init()
-text = "多年以后,面对AI行刑队,张三将会回想起他2023年在会议上讨论人工智能的那个下午"
-rainbow_colors = [Fore.RED, Fore.YELLOW, Fore.GREEN, Fore.CYAN, Fore.BLUE, Fore.MAGENTA]
-rainbow_text = ""
-for i, char in enumerate(text):
- rainbow_text += rainbow_colors[i % len(rainbow_colors)] + char
-print(rainbow_text)
-'''彩蛋'''
+def easter_egg():
+ # 彩蛋
+ from colorama import init, Fore
-# 初次启动检测
-if not os.path.exists("config/bot_config.toml"):
- logger.warning("检测到bot_config.toml不存在,正在从模板复制")
- import shutil
- # 检查config目录是否存在
- if not os.path.exists("config"):
- os.makedirs("config")
- logger.info("创建config目录")
+ init()
+ text = "多年以后,面对AI行刑队,张三将会回想起他2023年在会议上讨论人工智能的那个下午"
+ rainbow_colors = [Fore.RED, Fore.YELLOW, Fore.GREEN, Fore.CYAN, Fore.BLUE, Fore.MAGENTA]
+ rainbow_text = ""
+ for i, char in enumerate(text):
+ rainbow_text += rainbow_colors[i % len(rainbow_colors)] + char
+ print(rainbow_text)
- shutil.copy("template/bot_config_template.toml", "config/bot_config.toml")
- logger.info("复制完成,请修改config/bot_config.toml和.env.prod中的配置后重新启动")
+def init_config():
+ # 初次启动检测
+ if not os.path.exists("config/bot_config.toml"):
+ logger.warning("检测到bot_config.toml不存在,正在从模板复制")
+
+ # 检查config目录是否存在
+ if not os.path.exists("config"):
+ os.makedirs("config")
+ logger.info("创建config目录")
-# 初始化.env 默认ENVIRONMENT=prod
-if not os.path.exists(".env"):
- with open(".env", "w") as f:
- f.write("ENVIRONMENT=prod")
+ shutil.copy("template/bot_config_template.toml", "config/bot_config.toml")
+ logger.info("复制完成,请修改config/bot_config.toml和.env.prod中的配置后重新启动")
- # 检测.env.prod文件是否存在
- if not os.path.exists(".env.prod"):
- logger.error("检测到.env.prod文件不存在")
- shutil.copy("template.env", "./.env.prod")
+def init_env():
+ # 初始化.env 默认ENVIRONMENT=prod
+ if not os.path.exists(".env"):
+ with open(".env", "w") as f:
+ f.write("ENVIRONMENT=prod")
-# 首先加载基础环境变量.env
-if os.path.exists(".env"):
- load_dotenv(".env")
- logger.success("成功加载基础环境变量配置")
+ # 检测.env.prod文件是否存在
+ if not os.path.exists(".env.prod"):
+ logger.error("检测到.env.prod文件不存在")
+ shutil.copy("template.env", "./.env.prod")
-# 根据 ENVIRONMENT 加载对应的环境配置
-if os.getenv("ENVIRONMENT") == "prod":
- logger.success("加载生产环境变量配置")
- load_dotenv(".env.prod", override=True) # override=True 允许覆盖已存在的环境变量
-elif os.getenv("ENVIRONMENT") == "dev":
- logger.success("加载开发环境变量配置")
- load_dotenv(".env.dev", override=True) # override=True 允许覆盖已存在的环境变量
-elif os.path.exists(f".env.{os.getenv('ENVIRONMENT')}"):
- logger.success(f"加载{os.getenv('ENVIRONMENT')}环境变量配置")
- load_dotenv(f".env.{os.getenv('ENVIRONMENT')}", override=True) # override=True 允许覆盖已存在的环境变量
-else:
- logger.error(f"ENVIRONMENT配置错误,请检查.env文件中的ENVIRONMENT变量对应的.env.{os.getenv('ENVIRONMENT')}是否存在")
- exit(1)
+ # 首先加载基础环境变量.env
+ if os.path.exists(".env"):
+ load_dotenv(".env")
+ logger.success("成功加载基础环境变量配置")
-# 检测Key是否存在
-if not os.getenv("SILICONFLOW_KEY"):
- logger.error("缺失必要的API KEY")
- logger.error(f"请至少在.env.{os.getenv('ENVIRONMENT')}文件中填写SILICONFLOW_KEY后重新启动")
- exit(1)
+def load_env():
+ # 使用闭包实现对加载器的横向扩展,避免大量重复判断
+ def prod():
+ logger.success("加载生产环境变量配置")
+ load_dotenv(".env.prod", override=True) # override=True 允许覆盖已存在的环境变量
-# 获取所有环境变量
-env_config = {key: os.getenv(key) for key in os.environ}
+ def dev():
+ logger.success("加载开发环境变量配置")
+ load_dotenv(".env.dev", override=True) # override=True 允许覆盖已存在的环境变量
-# 设置基础配置
-base_config = {
- "websocket_port": int(env_config.get("PORT", 8080)),
- "host": env_config.get("HOST", "127.0.0.1"),
- "log_level": "INFO",
-}
+ fn_map = {
+ "prod": prod,
+ "dev": dev
+ }
-# 合并配置
-nonebot.init(**base_config, **env_config)
+ env = os.getenv("ENVIRONMENT")
+ logger.info(f"[load_env] 当前的 ENVIRONMENT 变量值:{env}")
-# 注册适配器
-driver = nonebot.get_driver()
-driver.register_adapter(Adapter)
+ if env in fn_map:
+ fn_map[env]() # 根据映射执行闭包函数
-# 加载插件
-nonebot.load_plugins("src/plugins")
+ elif os.path.exists(f".env.{env}"):
+ logger.success(f"加载{env}环境变量配置")
+ load_dotenv(f".env.{env}", override=True) # override=True 允许覆盖已存在的环境变量
+
+ else:
+ logger.error(f"ENVIRONMENT 配置错误,请检查 .env 文件中的 ENVIRONMENT 变量及对应 .env.{env} 是否存在")
+ RuntimeError(f"ENVIRONMENT 配置错误,请检查 .env 文件中的 ENVIRONMENT 变量及对应 .env.{env} 是否存在")
+
+
+
+def scan_provider(env_config: dict):
+ provider = {}
+
+ # 利用未初始化 env 时获取的 env_mask 来对新的环境变量集去重
+ # 避免 GPG_KEY 这样的变量干扰检查
+ env_config = dict(filter(lambda item: item[0] not in env_mask, env_config.items()))
+
+ # 遍历 env_config 的所有键
+ for key in env_config:
+ # 检查键是否符合 {provider}_BASE_URL 或 {provider}_KEY 的格式
+ if key.endswith("_BASE_URL") or key.endswith("_KEY"):
+ # 提取 provider 名称
+ provider_name = key.split("_", 1)[0] # 从左分割一次,取第一部分
+
+ # 初始化 provider 的字典(如果尚未初始化)
+ if provider_name not in provider:
+ provider[provider_name] = {"url": None, "key": None}
+
+ # 根据键的类型填充 url 或 key
+ if key.endswith("_BASE_URL"):
+ provider[provider_name]["url"] = env_config[key]
+ elif key.endswith("_KEY"):
+ provider[provider_name]["key"] = env_config[key]
+
+ # 检查每个 provider 是否同时存在 url 和 key
+ for provider_name, config in provider.items():
+ if config["url"] is None or config["key"] is None:
+ logger.error(
+ f"provider 内容:{config}\n"
+ f"env_config 内容:{env_config}"
+ )
+ raise ValueError(f"请检查 '{provider_name}' 提供商配置是否丢失 BASE_URL 或 KEY 环境变量")
if __name__ == "__main__":
+ easter_egg()
+ init_config()
+ init_env()
+ load_env()
+
+ env_config = {key: os.getenv(key) for key in os.environ}
+ scan_provider(env_config)
+
+ # 设置基础配置
+ base_config = {
+ "websocket_port": int(env_config.get("PORT", 8080)),
+ "host": env_config.get("HOST", "127.0.0.1"),
+ "log_level": "INFO",
+ }
+
+ # 合并配置
+ nonebot.init(**base_config, **env_config)
+
+ # 注册适配器
+ driver = nonebot.get_driver()
+ driver.register_adapter(Adapter)
+
+ # 加载插件
+ nonebot.load_plugins("src/plugins")
+
nonebot.run()
diff --git a/docs/manual_deploy_linux.md b/docs/manual_deploy_linux.md
new file mode 100644
index 00000000..09b2cfd0
--- /dev/null
+++ b/docs/manual_deploy_linux.md
@@ -0,0 +1,116 @@
+# 📦 Linux系统如何手动部署MaiMbot麦麦?
+
+## 准备工作
+- 一台联网的Linux设备(本教程以Ubuntu/Debian系为例)
+- QQ小号(QQ框架的使用可能导致qq被风控,严重(小概率)可能会导致账号封禁,强烈不推荐使用大号)
+- 可用的大模型API
+- 一个AI助手,网上随便搜一家打开来用都行,可以帮你解决一些不懂的问题
+- 以下内容假设你对Linux系统有一定的了解,如果觉得难以理解,请直接用Windows系统部署[Windows系统部署指南](./manual_deploy_windows.md)
+
+## 你需要知道什么?
+
+- 如何正确向AI助手提问,来学习新知识
+
+- Python是什么
+
+- Python的虚拟环境是什么?如何创建虚拟环境
+
+- 命令行是什么
+
+- 数据库是什么?如何安装并启动MongoDB
+
+- 如何运行一个QQ机器人,以及NapCat框架是什么
+---
+
+## 环境配置
+
+### 1️⃣ **确认Python版本**
+
+需确保Python版本为3.9及以上
+
+```bash
+python --version
+# 或
+python3 --version
+```
+如果版本低于3.9,请更新Python版本。
+```bash
+# Ubuntu/Debian
+sudo apt update
+sudo apt install python3.9
+# 如执行了这一步,建议在执行时将python3指向python3.9
+# 更新替代方案,设置 python3.9 为默认的 python3 版本:
+sudo update-alternatives --install /usr/bin/python3 python3 /usr/bin/python3.9 1
+sudo update-alternatives --config python3
+```
+
+### 2️⃣ **创建虚拟环境**
+```bash
+# 方法1:使用venv(推荐)
+python3 -m venv maimbot
+source maimbot/bin/activate # 激活环境
+
+# 方法2:使用conda(需先安装Miniconda)
+wget https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-x86_64.sh
+bash Miniconda3-latest-Linux-x86_64.sh
+conda create -n maimbot python=3.9
+conda activate maimbot
+
+# 通过以上方法创建并进入虚拟环境后,再执行以下命令
+
+# 安装依赖(任选一种环境)
+pip install -r requirements.txt
+```
+
+---
+
+## 数据库配置
+### 3️⃣ **安装并启动MongoDB**
+- 安装与启动:Debian参考[官方文档](https://docs.mongodb.com/manual/tutorial/install-mongodb-on-debian/),Ubuntu参考[官方文档](https://docs.mongodb.com/manual/tutorial/install-mongodb-on-ubuntu/)
+
+- 默认连接本地27017端口
+---
+
+## NapCat配置
+### 4️⃣ **安装NapCat框架**
+
+- 参考[NapCat官方文档](https://www.napcat.wiki/guide/boot/Shell#napcat-installer-linux%E4%B8%80%E9%94%AE%E4%BD%BF%E7%94%A8%E8%84%9A%E6%9C%AC-%E6%94%AF%E6%8C%81ubuntu-20-debian-10-centos9)安装
+
+- 使用QQ小号登录,添加反向WS地址:
+`ws://localhost:8080/onebot/v11/ws`
+
+---
+
+## 配置文件设置
+### 5️⃣ **配置文件设置,让麦麦Bot正常工作**
+- 修改环境配置文件:`.env.prod`
+- 修改机器人配置文件:`bot_config.toml`
+
+
+---
+
+## 启动机器人
+### 6️⃣ **启动麦麦机器人**
+```bash
+# 在项目目录下操作
+nb run
+# 或
+python3 bot.py
+```
+
+---
+
+## **其他组件(可选)**
+- 直接运行 knowledge.py生成知识库
+
+
+---
+
+## 常见问题
+🔧 权限问题:在命令前加`sudo`
+🔌 端口占用:使用`sudo lsof -i :8080`查看端口占用
+🛡️ 防火墙:确保8080/27017端口开放
+```bash
+sudo ufw allow 8080/tcp
+sudo ufw allow 27017/tcp
+```
\ No newline at end of file
diff --git a/docs/manual_deploy.md b/docs/manual_deploy_windows.md
similarity index 98%
rename from docs/manual_deploy.md
rename to docs/manual_deploy_windows.md
index 6d53beb4..bd9c26f8 100644
--- a/docs/manual_deploy.md
+++ b/docs/manual_deploy_windows.md
@@ -1,4 +1,4 @@
-# 📦 如何手动部署MaiMbot麦麦?
+# 📦 Windows系统如何手动部署MaiMbot麦麦?
## 你需要什么?
diff --git a/src/plugins/chat/config.py b/src/plugins/chat/config.py
index 180cce56..c027753c 100644
--- a/src/plugins/chat/config.py
+++ b/src/plugins/chat/config.py
@@ -4,11 +4,15 @@ from typing import Dict, Optional
import tomli
from loguru import logger
-
+from packaging import version
+from packaging.version import Version, InvalidVersion
+from packaging.specifiers import SpecifierSet,InvalidSpecifier
@dataclass
class BotConfig:
"""机器人配置类"""
+ INNER_VERSION: Version = None
+
BOT_QQ: Optional[int] = 1
BOT_NICKNAME: Optional[str] = None
@@ -93,12 +97,284 @@ class BotConfig:
if not os.path.exists(config_dir):
os.makedirs(config_dir)
return config_dir
+
+ @classmethod
+ def convert_to_specifierset(cls, value: str) -> SpecifierSet:
+ """将 字符串 版本表达式转换成 SpecifierSet
+ Args:
+ value[str]: 版本表达式(字符串)
+ Returns:
+ SpecifierSet
+ """
+ try:
+ converted = SpecifierSet(value)
+ except InvalidSpecifier as e:
+ logger.error(
+ f"{value} 分类使用了错误的版本约束表达式\n",
+ "请阅读 https://semver.org/lang/zh-CN/ 修改代码"
+ )
+ exit(1)
+
+ return converted
+
+ @classmethod
+ def get_config_version(cls, toml: dict) -> Version:
+ """提取配置文件的 SpecifierSet 版本数据
+ Args:
+ toml[dict]: 输入的配置文件字典
+ Returns:
+ Version
+ """
+
+ if 'inner' in toml:
+ try:
+ config_version : str = toml["inner"]["version"]
+ except KeyError as e:
+ logger.error(f"配置文件中 inner 段 不存在 {e}, 这是错误的配置文件")
+ raise KeyError(f"配置文件中 inner 段 不存在 {e}, 这是错误的配置文件")
+ else:
+ toml["inner"] = { "version": "0.0.0" }
+ config_version = toml["inner"]["version"]
+
+ try:
+ ver = version.parse(config_version)
+ except InvalidVersion as e:
+ logger.error(
+ "配置文件中 inner段 的 version 键是错误的版本描述\n"
+ "请阅读 https://semver.org/lang/zh-CN/ 修改配置,并参考本项目指定的模板进行修改\n"
+ "本项目在不同的版本下有不同的模板,请注意识别"
+ )
+ raise InvalidVersion("配置文件中 inner段 的 version 键是错误的版本描述\n")
+
+ return ver
@classmethod
def load_config(cls, config_path: str = None) -> "BotConfig":
"""从TOML配置文件加载配置"""
config = cls()
+
+ def personality(parent: dict):
+ personality_config=parent['personality']
+ personality=personality_config.get('prompt_personality')
+ if len(personality) >= 2:
+ logger.info(f"载入自定义人格:{personality}")
+ config.PROMPT_PERSONALITY=personality_config.get('prompt_personality',config.PROMPT_PERSONALITY)
+ logger.info(f"载入自定义日程prompt:{personality_config.get('prompt_schedule',config.PROMPT_SCHEDULE_GEN)}")
+ config.PROMPT_SCHEDULE_GEN=personality_config.get('prompt_schedule',config.PROMPT_SCHEDULE_GEN)
+
+ if config.INNER_VERSION in SpecifierSet(">=0.0.2"):
+ config.PERSONALITY_1=personality_config.get('personality_1_probability',config.PERSONALITY_1)
+ config.PERSONALITY_2=personality_config.get('personality_2_probability',config.PERSONALITY_2)
+ config.PERSONALITY_3=personality_config.get('personality_3_probability',config.PERSONALITY_3)
+
+ def emoji(parent: dict):
+ emoji_config = parent["emoji"]
+ config.EMOJI_CHECK_INTERVAL = emoji_config.get("check_interval", config.EMOJI_CHECK_INTERVAL)
+ config.EMOJI_REGISTER_INTERVAL = emoji_config.get("register_interval", config.EMOJI_REGISTER_INTERVAL)
+ config.EMOJI_CHECK_PROMPT = emoji_config.get('check_prompt',config.EMOJI_CHECK_PROMPT)
+ config.EMOJI_SAVE = emoji_config.get('auto_save',config.EMOJI_SAVE)
+ config.EMOJI_CHECK = emoji_config.get('enable_check',config.EMOJI_CHECK)
+
+ def cq_code(parent: dict):
+ cq_code_config = parent["cq_code"]
+ config.ENABLE_PIC_TRANSLATE = cq_code_config.get("enable_pic_translate", config.ENABLE_PIC_TRANSLATE)
+
+ def bot(parent: dict):
+ # 机器人基础配置
+ bot_config = parent["bot"]
+ bot_qq = bot_config.get("qq")
+ config.BOT_QQ = int(bot_qq)
+ config.BOT_NICKNAME = bot_config.get("nickname", config.BOT_NICKNAME)
+
+ def response(parent: dict):
+ response_config = parent["response"]
+ config.MODEL_R1_PROBABILITY = response_config.get("model_r1_probability", config.MODEL_R1_PROBABILITY)
+ config.MODEL_V3_PROBABILITY = response_config.get("model_v3_probability", config.MODEL_V3_PROBABILITY)
+ config.MODEL_R1_DISTILL_PROBABILITY = response_config.get("model_r1_distill_probability", config.MODEL_R1_DISTILL_PROBABILITY)
+ config.max_response_length = response_config.get("max_response_length", config.max_response_length)
+
+ def model(parent: dict):
+ # 加载模型配置
+ model_config:dict = parent["model"]
+
+ config_list = [
+ "llm_reasoning",
+ "llm_reasoning_minor",
+ "llm_normal",
+ "llm_normal_minor",
+ "llm_topic_judge",
+ "llm_summary_by_topic",
+ "llm_emotion_judge",
+ "vlm",
+ "embedding",
+ "moderation"
+ ]
+
+ for item in config_list:
+ if item in model_config:
+ cfg_item:dict = model_config[item]
+
+ # base_url 的例子: SILICONFLOW_BASE_URL
+ # key 的例子: SILICONFLOW_KEY
+ cfg_target = {
+ "name" : "",
+ "base_url" : "",
+ "key" : "",
+ "pri_in" : 0,
+ "pri_out" : 0
+ }
+
+ if config.INNER_VERSION in SpecifierSet("<=0.0.0"):
+ cfg_target = cfg_item
+
+ elif config.INNER_VERSION in SpecifierSet(">=0.0.1"):
+ stable_item = ["name","pri_in","pri_out"]
+ pricing_item = ["pri_in","pri_out"]
+ # 从配置中原始拷贝稳定字段
+ for i in stable_item:
+ # 如果 字段 属于计费项 且获取不到,那默认值是 0
+ if i in pricing_item and i not in cfg_item:
+ cfg_target[i] = 0
+ else:
+ # 没有特殊情况则原样复制
+ try:
+ cfg_target[i] = cfg_item[i]
+ except KeyError as e:
+ logger.error(f"{item} 中的必要字段 {e} 不存在,请检查")
+ raise KeyError(f"{item} 中的必要字段 {e} 不存在,请检查")
+
+
+ provider = cfg_item.get("provider")
+ if provider == None:
+ logger.error(f"provider 字段在模型配置 {item} 中不存在,请检查")
+ raise KeyError(f"provider 字段在模型配置 {item} 中不存在,请检查")
+
+ cfg_target["base_url"] = f"{provider}_BASE_URL"
+ cfg_target["key"] = f"{provider}_KEY"
+
+
+ # 如果 列表中的项目在 model_config 中,利用反射来设置对应项目
+ setattr(config,item,cfg_target)
+ else:
+ logger.error(f"模型 {item} 在config中不存在,请检查")
+ raise KeyError(f"模型 {item} 在config中不存在,请检查")
+
+ def message(parent: dict):
+ msg_config = parent["message"]
+ config.MIN_TEXT_LENGTH = msg_config.get("min_text_length", config.MIN_TEXT_LENGTH)
+ config.MAX_CONTEXT_SIZE = msg_config.get("max_context_size", config.MAX_CONTEXT_SIZE)
+ config.emoji_chance = msg_config.get("emoji_chance", config.emoji_chance)
+ config.ban_words=msg_config.get("ban_words",config.ban_words)
+
+ if config.INNER_VERSION in SpecifierSet(">=0.0.2"):
+ config.thinking_timeout = msg_config.get("thinking_timeout", config.thinking_timeout)
+ config.response_willing_amplifier = msg_config.get("response_willing_amplifier", config.response_willing_amplifier)
+ config.response_interested_rate_amplifier = msg_config.get("response_interested_rate_amplifier", config.response_interested_rate_amplifier)
+ config.down_frequency_rate = msg_config.get("down_frequency_rate", config.down_frequency_rate)
+
+ def memory(parent: dict):
+ memory_config = parent["memory"]
+ config.build_memory_interval = memory_config.get("build_memory_interval", config.build_memory_interval)
+ config.forget_memory_interval = memory_config.get("forget_memory_interval", config.forget_memory_interval)
+
+ def mood(parent: dict):
+ mood_config = parent["mood"]
+ config.mood_update_interval = mood_config.get("mood_update_interval", config.mood_update_interval)
+ config.mood_decay_rate = mood_config.get("mood_decay_rate", config.mood_decay_rate)
+ config.mood_intensity_factor = mood_config.get("mood_intensity_factor", config.mood_intensity_factor)
+
+ def keywords_reaction(parent: dict):
+ keywords_reaction_config = parent["keywords_reaction"]
+ if keywords_reaction_config.get("enable", False):
+ config.keywords_reaction_rules = keywords_reaction_config.get("rules", config.keywords_reaction_rules)
+
+ def chinese_typo(parent: dict):
+ chinese_typo_config = parent["chinese_typo"]
+ config.chinese_typo_enable = chinese_typo_config.get("enable", config.chinese_typo_enable)
+ config.chinese_typo_error_rate = chinese_typo_config.get("error_rate", config.chinese_typo_error_rate)
+ config.chinese_typo_min_freq = chinese_typo_config.get("min_freq", config.chinese_typo_min_freq)
+ config.chinese_typo_tone_error_rate = chinese_typo_config.get("tone_error_rate", config.chinese_typo_tone_error_rate)
+ config.chinese_typo_word_replace_rate = chinese_typo_config.get("word_replace_rate", config.chinese_typo_word_replace_rate)
+
+ def groups(parent: dict):
+ groups_config = parent["groups"]
+ config.talk_allowed_groups = set(groups_config.get("talk_allowed", []))
+ config.talk_frequency_down_groups = set(groups_config.get("talk_frequency_down", []))
+ config.ban_user_id = set(groups_config.get("ban_user_id", []))
+
+ def others(parent: dict):
+ others_config = parent["others"]
+ config.enable_advance_output = others_config.get("enable_advance_output", config.enable_advance_output)
+ config.enable_kuuki_read = others_config.get("enable_kuuki_read", config.enable_kuuki_read)
+
+ # 版本表达式:>=1.0.0,<2.0.0
+ # 允许字段:func: method, support: str, notice: str, necessary: bool
+ # 如果使用 notice 字段,在该组配置加载时,会展示该字段对用户的警示
+ # 例如:"notice": "personality 将在 1.3.2 后被移除",那么在有效版本中的用户就会虽然可以
+ # 正常执行程序,但是会看到这条自定义提示
+ include_configs = {
+ "personality": {
+ "func": personality,
+ "support": ">=0.0.0"
+ },
+ "emoji": {
+ "func": emoji,
+ "support": ">=0.0.0"
+ },
+ "cq_code": {
+ "func": cq_code,
+ "support": ">=0.0.0"
+ },
+ "bot": {
+ "func": bot,
+ "support": ">=0.0.0"
+ },
+ "response": {
+ "func": response,
+ "support": ">=0.0.0"
+ },
+ "model": {
+ "func": model,
+ "support": ">=0.0.0"
+ },
+ "message": {
+ "func": message,
+ "support": ">=0.0.0"
+ },
+ "memory": {
+ "func": memory,
+ "support": ">=0.0.0"
+ },
+ "mood": {
+ "func": mood,
+ "support": ">=0.0.0"
+ },
+ "keywords_reaction": {
+ "func": keywords_reaction,
+ "support": ">=0.0.2",
+ "necessary": False
+ },
+ "chinese_typo": {
+ "func": chinese_typo,
+ "support": ">=0.0.3",
+ "necessary": False
+ },
+ "groups": {
+ "func": groups,
+ "support": ">=0.0.0"
+ },
+ "others": {
+ "func": others,
+ "support": ">=0.0.0"
+ }
+ }
+
+ # 原地修改,将 字符串版本表达式 转换成 版本对象
+ for key in include_configs:
+ item_support = include_configs[key]["support"]
+ include_configs[key]["support"] = cls.convert_to_specifierset(item_support)
+
if os.path.exists(config_path):
with open(config_path, "rb") as f:
try:
@@ -106,145 +382,60 @@ class BotConfig:
except(tomli.TOMLDecodeError) as e:
logger.critical(f"配置文件bot_config.toml填写有误,请检查第{e.lineno}行第{e.colno}处:{e.msg}")
exit(1)
-
- if 'personality' in toml_dict:
- personality_config=toml_dict['personality']
- personality=personality_config.get('prompt_personality')
- if len(personality) >= 2:
- logger.info(f"载入自定义人格:{personality}")
- config.PROMPT_PERSONALITY=personality_config.get('prompt_personality',config.PROMPT_PERSONALITY)
- logger.info(f"载入自定义日程prompt:{personality_config.get('prompt_schedule',config.PROMPT_SCHEDULE_GEN)}")
- config.PROMPT_SCHEDULE_GEN=personality_config.get('prompt_schedule',config.PROMPT_SCHEDULE_GEN)
- config.PERSONALITY_1=personality_config.get('personality_1_probability',config.PERSONALITY_1)
- config.PERSONALITY_2=personality_config.get('personality_2_probability',config.PERSONALITY_2)
- config.PERSONALITY_3=personality_config.get('personality_3_probability',config.PERSONALITY_3)
+
+ # 获取配置文件版本
+ config.INNER_VERSION = cls.get_config_version(toml_dict)
- if "emoji" in toml_dict:
- emoji_config = toml_dict["emoji"]
- config.EMOJI_CHECK_INTERVAL = emoji_config.get("check_interval", config.EMOJI_CHECK_INTERVAL)
- config.EMOJI_REGISTER_INTERVAL = emoji_config.get("register_interval", config.EMOJI_REGISTER_INTERVAL)
- config.EMOJI_CHECK_PROMPT = emoji_config.get('check_prompt',config.EMOJI_CHECK_PROMPT)
- config.EMOJI_SAVE = emoji_config.get('auto_save',config.EMOJI_SAVE)
- config.EMOJI_CHECK = emoji_config.get('enable_check',config.EMOJI_CHECK)
-
- if "cq_code" in toml_dict:
- cq_code_config = toml_dict["cq_code"]
- config.ENABLE_PIC_TRANSLATE = cq_code_config.get("enable_pic_translate", config.ENABLE_PIC_TRANSLATE)
-
- # 机器人基础配置
- if "bot" in toml_dict:
- bot_config = toml_dict["bot"]
- bot_qq = bot_config.get("qq")
- config.BOT_QQ = int(bot_qq)
- config.BOT_NICKNAME = bot_config.get("nickname", config.BOT_NICKNAME)
-
- if "response" in toml_dict:
- response_config = toml_dict["response"]
- config.MODEL_R1_PROBABILITY = response_config.get("model_r1_probability", config.MODEL_R1_PROBABILITY)
- config.MODEL_V3_PROBABILITY = response_config.get("model_v3_probability", config.MODEL_V3_PROBABILITY)
- config.MODEL_R1_DISTILL_PROBABILITY = response_config.get("model_r1_distill_probability", config.MODEL_R1_DISTILL_PROBABILITY)
- config.max_response_length = response_config.get("max_response_length", config.max_response_length)
-
- # 加载模型配置
- if "model" in toml_dict:
- model_config = toml_dict["model"]
-
- if "llm_reasoning" in model_config:
- config.llm_reasoning = model_config["llm_reasoning"]
-
- if "llm_reasoning_minor" in model_config:
- config.llm_reasoning_minor = model_config["llm_reasoning_minor"]
-
- if "llm_normal" in model_config:
- config.llm_normal = model_config["llm_normal"]
-
- if "llm_normal_minor" in model_config:
- config.llm_normal_minor = model_config["llm_normal_minor"]
+ # 如果在配置中找到了需要的项,调用对应项的闭包函数处理
+ for key in include_configs:
+ if key in toml_dict:
+ group_specifierset: SpecifierSet = include_configs[key]["support"]
+
+ # 检查配置文件版本是否在支持范围内
+ if config.INNER_VERSION in group_specifierset:
+ # 如果版本在支持范围内,检查是否存在通知
+ if 'notice' in include_configs[key]:
+ logger.warning(include_configs[key]["notice"])
+
+ include_configs[key]["func"](toml_dict)
+
+ else:
+ # 如果版本不在支持范围内,崩溃并提示用户
+ logger.error(
+ f"配置文件中的 '{key}' 字段的版本 ({config.INNER_VERSION}) 不在支持范围内。\n"
+ f"当前程序仅支持以下版本范围: {group_specifierset}"
+ )
+ raise InvalidVersion(f"当前程序仅支持以下版本范围: {group_specifierset}")
- if "llm_topic_judge" in model_config:
- config.llm_topic_judge = model_config["llm_topic_judge"]
-
- if "llm_summary_by_topic" in model_config:
- config.llm_summary_by_topic = model_config["llm_summary_by_topic"]
-
- if "llm_emotion_judge" in model_config:
- config.llm_emotion_judge = model_config["llm_emotion_judge"]
-
- if "vlm" in model_config:
- config.vlm = model_config["vlm"]
+ # 如果 necessary 项目存在,而且显式声明是 False,进入特殊处理
+ elif "necessary" in include_configs[key] and include_configs[key].get("necessary") == False:
+ # 通过 pass 处理的项虽然直接忽略也是可以的,但是为了不增加理解困难,依然需要在这里显式处理
+ if key == "keywords_reaction":
+ pass
- if "embedding" in model_config:
- config.embedding = model_config["embedding"]
-
- if "moderation" in model_config:
- config.moderation = model_config["moderation"]
-
- # 消息配置
- if "message" in toml_dict:
- msg_config = toml_dict["message"]
- config.MIN_TEXT_LENGTH = msg_config.get("min_text_length", config.MIN_TEXT_LENGTH)
- config.MAX_CONTEXT_SIZE = msg_config.get("max_context_size", config.MAX_CONTEXT_SIZE)
- config.emoji_chance = msg_config.get("emoji_chance", config.emoji_chance)
- config.ban_words=msg_config.get("ban_words",config.ban_words)
- config.thinking_timeout = msg_config.get("thinking_timeout", config.thinking_timeout)
- config.response_willing_amplifier = msg_config.get("response_willing_amplifier", config.response_willing_amplifier)
- config.response_interested_rate_amplifier = msg_config.get("response_interested_rate_amplifier", config.response_interested_rate_amplifier)
- config.down_frequency_rate = msg_config.get("down_frequency_rate", config.down_frequency_rate)
+ else:
+ # 如果用户根本没有需要的配置项,提示缺少配置
+ logger.error(f"配置文件中缺少必需的字段: '{key}'")
+ raise KeyError(f"配置文件中缺少必需的字段: '{key}'")
- if "memory" in toml_dict:
- memory_config = toml_dict["memory"]
- config.build_memory_interval = memory_config.get("build_memory_interval", config.build_memory_interval)
- config.forget_memory_interval = memory_config.get("forget_memory_interval", config.forget_memory_interval)
-
- if "mood" in toml_dict:
- mood_config = toml_dict["mood"]
- config.mood_update_interval = mood_config.get("mood_update_interval", config.mood_update_interval)
- config.mood_decay_rate = mood_config.get("mood_decay_rate", config.mood_decay_rate)
- config.mood_intensity_factor = mood_config.get("mood_intensity_factor", config.mood_intensity_factor)
-
- # print(toml_dict)
- if "keywords_reaction" in toml_dict:
- # 读取关键词回复配置
- keywords_reaction_config = toml_dict["keywords_reaction"]
- if keywords_reaction_config.get("enable", False):
- config.keywords_reaction_rules = keywords_reaction_config.get("rules", config.keywords_reaction_rules)
-
- if "chinese_typo_generator" in toml_dict:
- # 读取中文错别字生成器配置
- chinese_typo_generator_config = toml_dict["chinese_typo_generator"]
- config.chinese_typo_enable = chinese_typo_generator_config.get("enable", config.chinese_typo_enable)
- config.chinese_typo_error_rate = chinese_typo_generator_config.get("error_rate", config.chinese_typo_error_rate)
- config.chinese_typo_min_freq = chinese_typo_generator_config.get("min_freq", config.chinese_typo_min_freq)
- config.chinese_typo_tone_error_rate = chinese_typo_generator_config.get("tone_error_rate", config.chinese_typo_tone_error_rate)
- config.chinese_typo_word_replace_rate = chinese_typo_generator_config.get("word_replace_rate", config.chinese_typo_word_replace_rate)
-
- # 群组配置
- if "groups" in toml_dict:
- groups_config = toml_dict["groups"]
- config.talk_allowed_groups = set(groups_config.get("talk_allowed", []))
- config.talk_frequency_down_groups = set(groups_config.get("talk_frequency_down", []))
- config.ban_user_id = set(groups_config.get("ban_user_id", []))
-
- if "others" in toml_dict:
- others_config = toml_dict["others"]
- config.enable_advance_output = others_config.get("enable_advance_output", config.enable_advance_output)
- config.enable_kuuki_read = others_config.get("enable_kuuki_read", config.enable_kuuki_read)
-
- logger.success(f"成功加载配置文件: {config_path}")
+ logger.success(f"成功加载配置文件: {config_path}")
return config
# 获取配置文件路径
-
bot_config_floder_path = BotConfig.get_config_dir()
print(f"正在品鉴配置文件目录: {bot_config_floder_path}")
+
bot_config_path = os.path.join(bot_config_floder_path, "bot_config.toml")
+
if os.path.exists(bot_config_path):
# 如果开发环境配置文件不存在,则使用默认配置文件
print(f"异常的新鲜,异常的美味: {bot_config_path}")
logger.info("使用bot配置文件")
else:
- logger.info("没有找到美味")
+ # 配置文件不存在
+ logger.error("配置文件不存在,请检查路径: {bot_config_path}")
+ raise FileNotFoundError(f"配置文件不存在: {bot_config_path}")
global_config = BotConfig.load_config(config_path=bot_config_path)
diff --git a/src/plugins/chat/utils.py b/src/plugins/chat/utils.py
index e0e06118..d166bcd2 100644
--- a/src/plugins/chat/utils.py
+++ b/src/plugins/chat/utils.py
@@ -12,6 +12,7 @@ from ..models.utils_model import LLM_request
from ..utils.typo_generator import ChineseTypoGenerator
from .config import global_config
from .message import Message
+from ..moods.moods import MoodManager
driver = get_driver()
config = driver.config
@@ -326,43 +327,68 @@ def random_remove_punctuation(text: str) -> str:
def process_llm_response(text: str) -> List[str]:
# processed_response = process_text_with_typos(content)
- if len(text) > 300:
+ if len(text) > 200:
print(f"回复过长 ({len(text)} 字符),返回默认回复")
return ['懒得说']
# 处理长消息
- if global_config.chinese_typo_enable:
- typo_generator = ChineseTypoGenerator(
- error_rate=global_config.chinese_typo_error_rate,
- min_freq=global_config.chinese_typo_min_freq,
- tone_error_rate=global_config.chinese_typo_tone_error_rate,
- word_replace_rate=global_config.chinese_typo_word_replace_rate
- )
- typoed_text = typo_generator.create_typo_sentence(text)[0]
- else:
- typoed_text = text
- sentences = split_into_sentences_w_remove_punctuation(typoed_text)
+ typo_generator = ChineseTypoGenerator(
+ error_rate=global_config.chinese_typo_error_rate,
+ min_freq=global_config.chinese_typo_min_freq,
+ tone_error_rate=global_config.chinese_typo_tone_error_rate,
+ word_replace_rate=global_config.chinese_typo_word_replace_rate
+ )
+ split_sentences = split_into_sentences_w_remove_punctuation(text)
+ sentences = []
+ for sentence in split_sentences:
+ if global_config.chinese_typo_enable:
+ typoed_text, typo_corrections = typo_generator.create_typo_sentence(sentence)
+ sentences.append(typoed_text)
+ if typo_corrections:
+ sentences.append(typo_corrections)
+ else:
+ sentences.append(sentence)
# 检查分割后的消息数量是否过多(超过3条)
- if len(sentences) > 4:
+
+ if len(sentences) > 5:
print(f"分割后消息数量过多 ({len(sentences)} 条),返回默认回复")
return [f'{global_config.BOT_NICKNAME}不知道哦']
return sentences
-def calculate_typing_time(input_string: str, chinese_time: float = 0.2, english_time: float = 0.1) -> float:
+def calculate_typing_time(input_string: str, chinese_time: float = 0.4, english_time: float = 0.2) -> float:
"""
计算输入字符串所需的时间,中文和英文字符有不同的输入时间
input_string (str): 输入的字符串
- chinese_time (float): 中文字符的输入时间,默认为0.3秒
- english_time (float): 英文字符的输入时间,默认为0.15秒
+ chinese_time (float): 中文字符的输入时间,默认为0.2秒
+ english_time (float): 英文字符的输入时间,默认为0.1秒
+
+ 特殊情况:
+ - 如果只有一个中文字符,将使用3倍的中文输入时间
+ - 在所有输入结束后,额外加上回车时间0.3秒
"""
+ mood_manager = MoodManager.get_instance()
+ # 将0-1的唤醒度映射到-1到1
+ mood_arousal = mood_manager.current_mood.arousal
+ # 映射到0.5到2倍的速度系数
+ typing_speed_multiplier = 1.5 ** mood_arousal # 唤醒度为1时速度翻倍,为-1时速度减半
+ chinese_time *= 1/typing_speed_multiplier
+ english_time *= 1/typing_speed_multiplier
+ # 计算中文字符数
+ chinese_chars = sum(1 for char in input_string if '\u4e00' <= char <= '\u9fff')
+
+ # 如果只有一个中文字符,使用3倍时间
+ if chinese_chars == 1 and len(input_string.strip()) == 1:
+ return chinese_time * 3 + 0.3 # 加上回车时间
+
+ # 正常计算所有字符的输入时间
total_time = 0.0
for char in input_string:
if '\u4e00' <= char <= '\u9fff': # 判断是否为中文字符
total_time += chinese_time
else: # 其他字符(如英文)
total_time += english_time
- return total_time
+ return total_time + 0.3 # 加上回车时间
def cosine_similarity(v1, v2):
diff --git a/src/plugins/models/utils_model.py b/src/plugins/models/utils_model.py
index c70c26ff..e890b4c8 100644
--- a/src/plugins/models/utils_model.py
+++ b/src/plugins/models/utils_model.py
@@ -23,6 +23,7 @@ class LLM_request:
self.api_key = getattr(config, model["key"])
self.base_url = getattr(config, model["base_url"])
except AttributeError as e:
+ logger.error(f"原始 model dict 信息:{model}")
logger.error(f"配置错误:找不到对应的配置项 - {str(e)}")
raise ValueError(f"配置错误:找不到对应的配置项 - {str(e)}") from e
self.model_name = model["name"]
@@ -181,6 +182,13 @@ class LLM_request:
continue
elif response.status in policy["abort_codes"]:
logger.error(f"错误码: {response.status} - {error_code_mapping.get(response.status)}")
+ if response.status == 403 :
+ if global_config.llm_normal == "Pro/deepseek-ai/DeepSeek-V3":
+ logger.error("可能是没有给硅基流动充钱,普通模型自动退化至非Pro模型,反应速度可能会变慢")
+ global_config.llm_normal = "deepseek-ai/DeepSeek-V3"
+ if global_config.llm_reasoning == "Pro/deepseek-ai/DeepSeek-R1":
+ logger.error("可能是没有给硅基流动充钱,推理模型自动退化至非Pro模型,反应速度可能会变慢")
+ global_config.llm_reasoning = "deepseek-ai/DeepSeek-R1"
raise RuntimeError(f"请求被拒绝: {error_code_mapping.get(response.status)}")
response.raise_for_status()
diff --git a/src/plugins/utils/typo_generator.py b/src/plugins/utils/typo_generator.py
index c743ec6e..aa72c387 100644
--- a/src/plugins/utils/typo_generator.py
+++ b/src/plugins/utils/typo_generator.py
@@ -284,10 +284,13 @@ class ChineseTypoGenerator:
返回:
typo_sentence: 包含错别字的句子
- typo_info: 错别字信息列表
+ correction_suggestion: 随机选择的一个纠正建议,返回正确的字/词
"""
result = []
typo_info = []
+ word_typos = [] # 记录词语错误对(错词,正确词)
+ char_typos = [] # 记录单字错误对(错字,正确字)
+ current_pos = 0
# 分词
words = self._segment_sentence(sentence)
@@ -296,6 +299,7 @@ class ChineseTypoGenerator:
# 如果是标点符号或空格,直接添加
if all(not self._is_chinese_char(c) for c in word):
result.append(word)
+ current_pos += len(word)
continue
# 获取词语的拼音
@@ -316,6 +320,8 @@ class ChineseTypoGenerator:
' '.join(word_pinyin),
' '.join(self._get_word_pinyin(typo_word)),
orig_freq, typo_freq))
+ word_typos.append((typo_word, word)) # 记录(错词,正确词)对
+ current_pos += len(typo_word)
continue
# 如果不进行整词替换,则进行单字替换
@@ -333,11 +339,15 @@ class ChineseTypoGenerator:
result.append(typo_char)
typo_py = pinyin(typo_char, style=Style.TONE3)[0][0]
typo_info.append((char, typo_char, py, typo_py, orig_freq, typo_freq))
+ char_typos.append((typo_char, char)) # 记录(错字,正确字)对
+ current_pos += 1
continue
result.append(char)
+ current_pos += 1
else:
# 处理多字词的单字替换
word_result = []
+ word_start_pos = current_pos
for i, (char, py) in enumerate(zip(word, word_pinyin)):
# 词中的字替换概率降低
word_error_rate = self.error_rate * (0.7 ** (len(word) - 1))
@@ -353,11 +363,24 @@ class ChineseTypoGenerator:
word_result.append(typo_char)
typo_py = pinyin(typo_char, style=Style.TONE3)[0][0]
typo_info.append((char, typo_char, py, typo_py, orig_freq, typo_freq))
+ char_typos.append((typo_char, char)) # 记录(错字,正确字)对
continue
word_result.append(char)
result.append(''.join(word_result))
+ current_pos += len(word)
- return ''.join(result), typo_info
+ # 优先从词语错误中选择,如果没有则从单字错误中选择
+ correction_suggestion = None
+ # 50%概率返回纠正建议
+ if random.random() < 0.5:
+ if word_typos:
+ wrong_word, correct_word = random.choice(word_typos)
+ correction_suggestion = correct_word
+ elif char_typos:
+ wrong_char, correct_char = random.choice(char_typos)
+ correction_suggestion = correct_char
+
+ return ''.join(result), correction_suggestion
def format_typo_info(self, typo_info):
"""
@@ -419,16 +442,16 @@ def main():
# 创建包含错别字的句子
start_time = time.time()
- typo_sentence, typo_info = typo_generator.create_typo_sentence(sentence)
+ typo_sentence, correction_suggestion = typo_generator.create_typo_sentence(sentence)
# 打印结果
print("\n原句:", sentence)
print("错字版:", typo_sentence)
- # 打印错别字信息
- if typo_info:
- print("\n错别字信息:")
- print(typo_generator.format_typo_info(typo_info))
+ # 打印纠正建议
+ if correction_suggestion:
+ print("\n随机纠正建议:")
+ print(f"应该改为:{correction_suggestion}")
# 计算并打印总耗时
end_time = time.time()
diff --git a/template/bot_config_template.toml b/template/bot_config_template.toml
index 9709ebbd..d7c66d3f 100644
--- a/template/bot_config_template.toml
+++ b/template/bot_config_template.toml
@@ -1,3 +1,6 @@
+[inner]
+version = "0.0.3"
+
[bot]
qq = 123
nickname = "麦麦"
@@ -100,49 +103,42 @@ ban_user_id = [] #禁止回复消息的QQ号
[model.llm_reasoning] #回复模型1 主要回复模型
name = "Pro/deepseek-ai/DeepSeek-R1"
-base_url = "SILICONFLOW_BASE_URL"
-key = "SILICONFLOW_KEY"
+provider = "SILICONFLOW"
pri_in = 0 #模型的输入价格(非必填,可以记录消耗)
pri_out = 0 #模型的输出价格(非必填,可以记录消耗)
+
[model.llm_reasoning_minor] #回复模型3 次要回复模型
name = "deepseek-ai/DeepSeek-R1-Distill-Qwen-32B"
-base_url = "SILICONFLOW_BASE_URL"
-key = "SILICONFLOW_KEY"
+provider = "SILICONFLOW"
#非推理模型
[model.llm_normal] #V3 回复模型2 次要回复模型
name = "Pro/deepseek-ai/DeepSeek-V3"
-base_url = "SILICONFLOW_BASE_URL"
-key = "SILICONFLOW_KEY"
+provider = "SILICONFLOW"
[model.llm_normal_minor] #V2.5
name = "deepseek-ai/DeepSeek-V2.5"
-base_url = "SILICONFLOW_BASE_URL"
-key = "SILICONFLOW_KEY"
+provider = "SILICONFLOW"
[model.llm_emotion_judge] #主题判断 0.7/m
name = "Qwen/Qwen2.5-14B-Instruct"
-base_url = "SILICONFLOW_BASE_URL"
-key = "SILICONFLOW_KEY"
+provider = "SILICONFLOW"
[model.llm_topic_judge] #主题判断:建议使用qwen2.5 7b
name = "Pro/Qwen/Qwen2.5-7B-Instruct"
-base_url = "SILICONFLOW_BASE_URL"
-key = "SILICONFLOW_KEY"
+provider = "SILICONFLOW"
[model.llm_summary_by_topic] #建议使用qwen2.5 32b 及以上
name = "Qwen/Qwen2.5-32B-Instruct"
-base_url = "SILICONFLOW_BASE_URL"
-key = "SILICONFLOW_KEY"
+provider = "SILICONFLOW"
pri_in = 0
pri_out = 0
[model.moderation] #内容审核 未启用
name = ""
-base_url = "SILICONFLOW_BASE_URL"
-key = "SILICONFLOW_KEY"
+provider = "SILICONFLOW"
pri_in = 0
pri_out = 0
@@ -150,8 +146,7 @@ pri_out = 0
[model.vlm] #图像识别 0.35/m
name = "Pro/Qwen/Qwen2-VL-7B-Instruct"
-base_url = "SILICONFLOW_BASE_URL"
-key = "SILICONFLOW_KEY"
+provider = "SILICONFLOW"
@@ -159,5 +154,4 @@ key = "SILICONFLOW_KEY"
[model.embedding] #嵌入
name = "BAAI/bge-m3"
-base_url = "SILICONFLOW_BASE_URL"
-key = "SILICONFLOW_KEY"
+provider = "SILICONFLOW"