From f48f33a999904c5fe2ce6ec0aec07ee5af4b6821 Mon Sep 17 00:00:00 2001 From: HYY1116 Date: Sun, 2 Mar 2025 19:48:25 +0800 Subject: [PATCH] =?UTF-8?q?fix:=20=E6=A8=A1=E5=9E=8B=E4=BC=98=E5=85=88?= =?UTF-8?q?=E4=BD=BF=E7=94=A8=E6=97=A0=E9=9C=80=E4=BB=98=E8=B4=B9=E6=A8=A1?= =?UTF-8?q?=E5=9E=8B=EF=BC=8C=E5=B9=B6=E6=A8=A1=E5=9E=8B=E5=90=8D=E7=A7=B0?= =?UTF-8?q?=E5=85=A8=E5=B1=80=E5=8C=96?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- config/bot_config_toml | 45 ------------------- src/plugins/chat/config.py | 8 ++++ src/plugins/chat/llm_generator.py | 14 +++--- src/plugins/chat/topic_identifier.py | 3 +- src/plugins/memory_system/llm_module.py | 4 +- .../memory_system/llm_module_memory_make.py | 4 +- src/plugins/schedule/schedule_generator.py | 6 +-- src/plugins/schedule/schedule_llm_module.py | 9 ++-- 8 files changed, 27 insertions(+), 66 deletions(-) delete mode 100644 config/bot_config_toml diff --git a/config/bot_config_toml b/config/bot_config_toml deleted file mode 100644 index 83a3c497..00000000 --- a/config/bot_config_toml +++ /dev/null @@ -1,45 +0,0 @@ -[bot] -qq = 123456 #填入你的机器人QQ -nickname = "麦麦" #你希望bot被称呼的名字 - -[message] -min_text_length = 2 # 与麦麦聊天时麦麦只会回答文本大于等于此数的消息 -max_context_size = 15 # 麦麦获得的上下文数量,超出数量后自动丢弃 -emoji_chance = 0.2 # 麦麦使用表情包的概率 - -[emoji] -check_interval = 120 -register_interval = 10 - -[cq_code] -enable_pic_translate = false - - -[response] -api_using = "siliconflow" # 选择大模型API,可选值为siliconflow,deepseek,建议使用siliconflow,因为识图api目前只支持siliconflow的deepseek-vl2模型 -model_r1_probability = 0.8 # 麦麦回答时选择R1模型的概率 -model_v3_probability = 0.1 # 麦麦回答时选择V3模型的概率 -model_r1_distill_probability = 0.1 # 麦麦回答时选择R1蒸馏模型的概率 - -[memory] -build_memory_interval = 300 # 记忆构建间隔 - - - -[others] -enable_advance_output = true # 开启后输出更多日志,false关闭true开启 - - -[groups] - -talk_allowed = [ - 123456,12345678 -] #可以回复消息的群 - -talk_frequency_down = [ - 123456,12345678 -] #降低回复频率的群 - -ban_user_id = [ - 123456,12345678 -] #禁止回复消息的QQ号 diff --git a/src/plugins/chat/config.py b/src/plugins/chat/config.py index dc7c1420..68ec6784 100644 --- a/src/plugins/chat/config.py +++ b/src/plugins/chat/config.py @@ -33,6 +33,11 @@ class BotConfig: EMOJI_REGISTER_INTERVAL: int = 10 # 表情包注册间隔(分钟) API_USING: str = "siliconflow" # 使用的API + DEEPSEEK_MODEL_R1: str = "deepseek-reasoner" # deepseek-R1模型 + DEEPSEEK_MODEL_V3: str = "deepseek-chat" # deepseek-V3模型 + SILICONFLOW_MODEL_R1: str = "deepseek-ai/DeepSeek-R1" # siliconflow-R1模型 + SILICONFLOW_MODEL_R1_DISTILL: str = "deepseek-ai/DeepSeek-R1-Distill-Qwen-32B" # siliconflow-R1蒸馏模型 + SILICONFLOW_MODEL_V3: str = "deepseek-ai/DeepSeek-V3" # siliconflow-V3模型 MODEL_R1_PROBABILITY: float = 0.8 # R1模型概率 MODEL_V3_PROBABILITY: float = 0.1 # V3模型概率 MODEL_R1_DISTILL_PROBABILITY: float = 0.1 # R1蒸馏模型概率 @@ -82,6 +87,9 @@ class BotConfig: config.MODEL_V3_PROBABILITY = response_config.get("model_v3_probability", config.MODEL_V3_PROBABILITY) config.MODEL_R1_DISTILL_PROBABILITY = response_config.get("model_r1_distill_probability", config.MODEL_R1_DISTILL_PROBABILITY) config.API_USING = response_config.get("api_using", config.API_USING) + if response_config.get("api_using", config.API_PAID): + config.SILICONFLOW_MODEL_R1 = "Pro/deepseek-ai/DeepSeek-R1" + config.SILICONFLOW_MODEL_V3 = "Pro/deepseek-ai/DeepSeek-V3" # 消息配置 if "message" in toml_dict: diff --git a/src/plugins/chat/llm_generator.py b/src/plugins/chat/llm_generator.py index bfff1d47..682be8a8 100644 --- a/src/plugins/chat/llm_generator.py +++ b/src/plugins/chat/llm_generator.py @@ -206,13 +206,13 @@ class LLMResponseGenerator: if global_config.API_USING == "deepseek": return await self._generate_base_response( message, - "deepseek-reasoner", + global_config.DEEPSEEK_MODEL_R1, {"temperature": 0.7, "max_tokens": 1024} ) else: return await self._generate_base_response( message, - "Pro/deepseek-ai/DeepSeek-R1", + global_config.SILICONFLOW_MODEL_R1, {"temperature": 0.7, "max_tokens": 1024} ) @@ -221,13 +221,13 @@ class LLMResponseGenerator: if global_config.API_USING == "deepseek": return await self._generate_base_response( message, - "deepseek-chat", + global_config.DEEPSEEK_MODEL_V3, {"temperature": 0.8, "max_tokens": 1024} ) else: return await self._generate_base_response( message, - "Pro/deepseek-ai/DeepSeek-V3", + global_config.SILICONFLOW_MODEL_V3, {"temperature": 0.8, "max_tokens": 1024} ) @@ -235,7 +235,7 @@ class LLMResponseGenerator: """使用 DeepSeek-R1-Distill-Qwen-32B 模型生成回复""" return await self._generate_base_response( message, - "deepseek-ai/DeepSeek-R1-Distill-Qwen-32B", + global_config.SILICONFLOW_MODEL_R1_DISTILL, {"temperature": 0.7, "max_tokens": 1024} ) @@ -270,9 +270,9 @@ class LLMResponseGenerator: loop = asyncio.get_event_loop() if global_config.API_USING == "deepseek": - model = "deepseek-chat" + model = global_config.DEEPSEEK_MODEL_V3 else: - model = "Pro/deepseek-ai/DeepSeek-V3" + model = global_config.SILICONFLOW_MODEL_V3 create_completion = partial( self.client.chat.completions.create, model=model, diff --git a/src/plugins/chat/topic_identifier.py b/src/plugins/chat/topic_identifier.py index 81956ddc..34ac4e71 100644 --- a/src/plugins/chat/topic_identifier.py +++ b/src/plugins/chat/topic_identifier.py @@ -3,6 +3,7 @@ from openai import OpenAI from .message import Message import jieba from nonebot import get_driver +from .config import global_config driver = get_driver() config = driver.config @@ -24,7 +25,7 @@ class TopicIdentifier: 消息内容:{text}""" response = self.client.chat.completions.create( - model="Pro/deepseek-ai/DeepSeek-V3", + model=global_config.SILICONFLOW_MODEL_V3, messages=[{"role": "user", "content": prompt}], temperature=0.8, max_tokens=10 diff --git a/src/plugins/memory_system/llm_module.py b/src/plugins/memory_system/llm_module.py index 34e4e4af..c2b6cf04 100644 --- a/src/plugins/memory_system/llm_module.py +++ b/src/plugins/memory_system/llm_module.py @@ -5,13 +5,13 @@ import time from nonebot import get_driver import aiohttp import asyncio - +from src.plugins.chat.config import global_config driver = get_driver() config = driver.config class LLMModel: # def __init__(self, model_name="deepseek-ai/DeepSeek-R1-Distill-Qwen-32B", **kwargs): - def __init__(self, model_name="Pro/deepseek-ai/DeepSeek-V3", **kwargs): + def __init__(self, model_name=global_config.SILICONFLOW_MODEL_V3, **kwargs): self.model_name = model_name self.params = kwargs self.api_key = config.siliconflow_key diff --git a/src/plugins/memory_system/llm_module_memory_make.py b/src/plugins/memory_system/llm_module_memory_make.py index 5b01a291..f5935457 100644 --- a/src/plugins/memory_system/llm_module_memory_make.py +++ b/src/plugins/memory_system/llm_module_memory_make.py @@ -2,17 +2,17 @@ import os import requests from typing import Tuple, Union import time -from ..chat.config import BotConfig from nonebot import get_driver import aiohttp import asyncio +from src.plugins.chat.config import BotConfig, global_config driver = get_driver() config = driver.config class LLMModel: # def __init__(self, model_name="deepseek-ai/DeepSeek-R1-Distill-Qwen-32B", **kwargs): - def __init__(self, model_name="Pro/deepseek-ai/DeepSeek-V3", **kwargs): + def __init__(self, model_name=global_config.SILICONFLOW_MODEL_V3, **kwargs): self.model_name = model_name self.params = kwargs self.api_key = config.siliconflow_key diff --git a/src/plugins/schedule/schedule_generator.py b/src/plugins/schedule/schedule_generator.py index a33c4b27..63e3235a 100644 --- a/src/plugins/schedule/schedule_generator.py +++ b/src/plugins/schedule/schedule_generator.py @@ -3,7 +3,7 @@ import os from typing import List, Dict from .schedule_llm_module import LLMModel from ...common.database import Database # 使用正确的导入语法 -from ..chat.config import global_config +from src.plugins.chat.config import global_config from nonebot import get_driver driver = get_driver() @@ -22,9 +22,9 @@ Database.initialize( class ScheduleGenerator: def __init__(self): if global_config.API_USING == "siliconflow": - self.llm_scheduler = LLMModel(model_name="Pro/deepseek-ai/DeepSeek-V3") + self.llm_scheduler = LLMModel(model_name=global_config.SILICONFLOW_MODEL_V3) elif global_config.API_USING == "deepseek": - self.llm_scheduler = LLMModel(model_name="deepseek-chat",api_using="deepseek") + self.llm_scheduler = LLMModel(model_name=global_config.DEEPSEEK_MODEL_V3) self.db = Database.get_instance() today = datetime.datetime.now() diff --git a/src/plugins/schedule/schedule_llm_module.py b/src/plugins/schedule/schedule_llm_module.py index cf88a865..34dad6fe 100644 --- a/src/plugins/schedule/schedule_llm_module.py +++ b/src/plugins/schedule/schedule_llm_module.py @@ -3,20 +3,17 @@ import requests import aiohttp from typing import Tuple, Union from nonebot import get_driver - +from src.plugins.chat.config import global_config driver = get_driver() config = driver.config class LLMModel: # def __init__(self, model_name="deepseek-ai/DeepSeek-R1-Distill-Qwen-32B", **kwargs): - def __init__(self, model_name="Pro/deepseek-ai/DeepSeek-R1",api_using=None, **kwargs): + def __init__(self, model_name=global_config.SILICONFLOW_MODEL_R1,api_using=None, **kwargs): if api_using == "deepseek": self.api_key = config.deep_seek_key self.base_url = config.deep_seek_base_url - if model_name != "Pro/deepseek-ai/DeepSeek-R1": - self.model_name = model_name - else: - self.model_name = "deepseek-reasoner" + self.model_name = global_config.DEEPSEEK_MODEL_R1 else: self.api_key = config.siliconflow_key self.base_url = config.siliconflow_base_url