mirror of https://github.com/Mai-with-u/MaiBot.git
commit
bf831e9004
94
bot.py
94
bot.py
|
|
@ -5,16 +5,22 @@ import time
|
|||
import platform
|
||||
import traceback
|
||||
import shutil
|
||||
import sys
|
||||
import subprocess
|
||||
from dotenv import load_dotenv
|
||||
from pathlib import Path
|
||||
from rich.traceback import install
|
||||
from src.common.logger import initialize_logging, get_logger, shutdown_logging
|
||||
|
||||
# 设置工作目录为脚本所在目录
|
||||
script_dir = os.path.dirname(os.path.abspath(__file__))
|
||||
os.chdir(script_dir)
|
||||
|
||||
env_path = Path(__file__).parent / ".env"
|
||||
template_env_path = Path(__file__).parent / "template" / "template.env"
|
||||
|
||||
if env_path.exists():
|
||||
load_dotenv(str(env_path), override=True)
|
||||
print("成功加载环境变量配置")
|
||||
else:
|
||||
try:
|
||||
if template_env_path.exists():
|
||||
|
|
@ -28,23 +34,86 @@ else:
|
|||
print(f"自动创建 .env 失败: {e}")
|
||||
raise
|
||||
|
||||
# 最早期初始化日志系统,确保所有后续模块都使用正确的日志格式
|
||||
from src.common.logger import initialize_logging, get_logger, shutdown_logging # noqa
|
||||
|
||||
initialize_logging()
|
||||
install(extra_lines=3)
|
||||
logger = get_logger("main")
|
||||
|
||||
# 定义重启退出码
|
||||
RESTART_EXIT_CODE = 42
|
||||
|
||||
def run_runner_process():
|
||||
"""
|
||||
Runner 进程逻辑:作为守护进程运行,负责启动和监控 Worker 进程。
|
||||
处理重启请求 (退出码 42) 和 Ctrl+C 信号。
|
||||
"""
|
||||
script_file = sys.argv[0]
|
||||
python_executable = sys.executable
|
||||
|
||||
# 设置环境变量,标记子进程为 Worker 进程
|
||||
env = os.environ.copy()
|
||||
env["MAIBOT_WORKER_PROCESS"] = "1"
|
||||
|
||||
while True:
|
||||
logger.info(f"正在启动 {script_file}...")
|
||||
|
||||
# 启动子进程 (Worker)
|
||||
# 使用 sys.executable 确保使用相同的 Python 解释器
|
||||
cmd = [python_executable, script_file] + sys.argv[1:]
|
||||
|
||||
process = subprocess.Popen(cmd, env=env)
|
||||
|
||||
try:
|
||||
# 等待子进程结束
|
||||
return_code = process.wait()
|
||||
|
||||
if return_code == RESTART_EXIT_CODE:
|
||||
logger.info("检测到重启请求 (退出码 42),正在重启...")
|
||||
time.sleep(1) # 稍作等待
|
||||
continue
|
||||
else:
|
||||
logger.info(f"程序已退出 (退出码 {return_code})")
|
||||
sys.exit(return_code)
|
||||
|
||||
except KeyboardInterrupt:
|
||||
# 向子进程发送终止信号
|
||||
if process.poll() is None:
|
||||
# 在 Windows 上,Ctrl+C 通常已经发送给了子进程(如果它们共享控制台)
|
||||
# 但为了保险,我们可以尝试 terminate
|
||||
try:
|
||||
process.terminate()
|
||||
process.wait(timeout=5)
|
||||
except subprocess.TimeoutExpired:
|
||||
logger.warning("子进程未响应,强制关闭...")
|
||||
process.kill()
|
||||
sys.exit(0)
|
||||
|
||||
# 检查是否是 Worker 进程
|
||||
# 如果没有设置 MAIBOT_WORKER_PROCESS 环境变量,说明是直接运行的脚本,
|
||||
# 此时应该作为 Runner 运行。
|
||||
if os.environ.get("MAIBOT_WORKER_PROCESS") != "1":
|
||||
if __name__ == "__main__":
|
||||
run_runner_process()
|
||||
# 如果作为模块导入,不执行 Runner 逻辑,但也不应该执行下面的 Worker 逻辑
|
||||
sys.exit(0)
|
||||
|
||||
# 以下是 Worker 进程的逻辑
|
||||
|
||||
# 最早期初始化日志系统,确保所有后续模块都使用正确的日志格式
|
||||
# from src.common.logger import initialize_logging, get_logger, shutdown_logging # noqa
|
||||
# initialize_logging()
|
||||
|
||||
from src.main import MainSystem # noqa
|
||||
from src.manager.async_task_manager import async_task_manager # noqa
|
||||
|
||||
|
||||
logger = get_logger("main")
|
||||
# logger = get_logger("main")
|
||||
|
||||
|
||||
install(extra_lines=3)
|
||||
# install(extra_lines=3)
|
||||
|
||||
# 设置工作目录为脚本所在目录
|
||||
script_dir = os.path.dirname(os.path.abspath(__file__))
|
||||
os.chdir(script_dir)
|
||||
# script_dir = os.path.dirname(os.path.abspath(__file__))
|
||||
# os.chdir(script_dir)
|
||||
logger.info(f"已设置工作目录为: {script_dir}")
|
||||
|
||||
|
||||
|
|
@ -254,6 +323,15 @@ if __name__ == "__main__":
|
|||
logger.error(f"优雅关闭时发生错误: {ge}")
|
||||
# 新增:检测外部请求关闭
|
||||
|
||||
except SystemExit as e:
|
||||
# 捕获 SystemExit (例如 sys.exit()) 并保留退出代码
|
||||
if isinstance(e.code, int):
|
||||
exit_code = e.code
|
||||
else:
|
||||
exit_code = 1 if e.code else 0
|
||||
if exit_code == RESTART_EXIT_CODE:
|
||||
logger.info("收到重启信号,准备退出并请求重启...")
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"主程序发生异常: {str(e)} {str(traceback.format_exc())}")
|
||||
exit_code = 1 # 标记发生错误
|
||||
|
|
|
|||
|
|
@ -839,8 +839,6 @@ class DefaultReplyer:
|
|||
continue
|
||||
|
||||
timing_logs.append(f"{chinese_name}: {duration:.1f}s")
|
||||
if duration > 12:
|
||||
logger.warning(f"回复生成前信息获取耗时过长: {chinese_name} 耗时: {duration:.1f}s,请使用更快的模型")
|
||||
logger.info(f"回复准备: {'; '.join(timing_logs)}; {almost_zero_str} <0.1s")
|
||||
|
||||
expression_habits_block, selected_expressions = results_dict["expression_habits"]
|
||||
|
|
|
|||
|
|
@ -760,8 +760,6 @@ class PrivateReplyer:
|
|||
continue
|
||||
|
||||
timing_logs.append(f"{chinese_name}: {duration:.1f}s")
|
||||
if duration > 12:
|
||||
logger.warning(f"回复生成前信息获取耗时过长: {chinese_name} 耗时: {duration:.1f}s,请使用更快的模型")
|
||||
logger.info(f"回复准备: {'; '.join(timing_logs)}; {almost_zero_str} <0.1s")
|
||||
|
||||
expression_habits_block, selected_expressions = results_dict["expression_habits"]
|
||||
|
|
|
|||
|
|
@ -88,6 +88,9 @@ class TaskConfig(ConfigBase):
|
|||
temperature: float = 0.3
|
||||
"""模型温度"""
|
||||
|
||||
slow_threshold: float = 15.0
|
||||
"""慢请求阈值(秒),超过此值会输出警告日志"""
|
||||
|
||||
|
||||
@dataclass
|
||||
class ModelTaskConfig(ConfigBase):
|
||||
|
|
|
|||
|
|
@ -47,6 +47,21 @@ class LLMRequest:
|
|||
}
|
||||
"""模型使用量记录,用于进行负载均衡,对应为(total_tokens, penalty, usage_penalty),惩罚值是为了能在某个模型请求不给力或正在被使用的时候进行调整"""
|
||||
|
||||
def _check_slow_request(self, time_cost: float, model_name: str) -> None:
|
||||
"""检查请求是否过慢并输出警告日志
|
||||
|
||||
Args:
|
||||
time_cost: 请求耗时(秒)
|
||||
model_name: 使用的模型名称
|
||||
"""
|
||||
threshold = self.model_for_task.slow_threshold
|
||||
if time_cost > threshold:
|
||||
request_type_display = self.request_type or "未知任务"
|
||||
logger.warning(
|
||||
f"LLM请求耗时过长: {request_type_display} 使用模型 {model_name} 耗时 {time_cost:.1f}s(阈值: {threshold}s),请考虑使用更快的模型\n"
|
||||
f" 如果你认为该警告出现得过于频繁,请调整model_config.toml中对应任务的slow_threshold至符合你实际情况的合理值"
|
||||
)
|
||||
|
||||
async def generate_response_for_image(
|
||||
self,
|
||||
prompt: str,
|
||||
|
|
@ -86,6 +101,8 @@ class LLMRequest:
|
|||
if not reasoning_content and content:
|
||||
content, extracted_reasoning = self._extract_reasoning(content)
|
||||
reasoning_content = extracted_reasoning
|
||||
time_cost = time.time() - start_time
|
||||
self._check_slow_request(time_cost, model_info.name)
|
||||
if usage := response.usage:
|
||||
llm_usage_recorder.record_usage_to_database(
|
||||
model_info=model_info,
|
||||
|
|
@ -93,7 +110,7 @@ class LLMRequest:
|
|||
user_id="system",
|
||||
request_type=self.request_type,
|
||||
endpoint="/chat/completions",
|
||||
time_cost=time.time() - start_time,
|
||||
time_cost=time_cost,
|
||||
)
|
||||
return content, (reasoning_content, model_info.name, tool_calls)
|
||||
|
||||
|
|
@ -198,7 +215,8 @@ class LLMRequest:
|
|||
tool_options=tool_built,
|
||||
)
|
||||
|
||||
logger.debug(f"LLM请求总耗时: {time.time() - start_time}")
|
||||
time_cost = time.time() - start_time
|
||||
logger.debug(f"LLM请求总耗时: {time_cost}")
|
||||
logger.debug(f"LLM生成内容: {response}")
|
||||
|
||||
content = response.content
|
||||
|
|
@ -207,6 +225,7 @@ class LLMRequest:
|
|||
if not reasoning_content and content:
|
||||
content, extracted_reasoning = self._extract_reasoning(content)
|
||||
reasoning_content = extracted_reasoning
|
||||
self._check_slow_request(time_cost, model_info.name)
|
||||
if usage := response.usage:
|
||||
llm_usage_recorder.record_usage_to_database(
|
||||
model_info=model_info,
|
||||
|
|
@ -214,7 +233,7 @@ class LLMRequest:
|
|||
user_id="system",
|
||||
request_type=self.request_type,
|
||||
endpoint="/chat/completions",
|
||||
time_cost=time.time() - start_time,
|
||||
time_cost=time_cost,
|
||||
)
|
||||
return content or "", (reasoning_content, model_info.name, tool_calls)
|
||||
|
||||
|
|
@ -301,7 +320,7 @@ class LLMRequest:
|
|||
message_list=(compressed_messages or message_list),
|
||||
tool_options=tool_options,
|
||||
max_tokens=self.model_for_task.max_tokens if max_tokens is None else max_tokens,
|
||||
temperature=self.model_for_task.temperature if temperature is None else temperature,
|
||||
temperature=temperature if temperature is not None else (model_info.extra_params or {}).get("temperature", self.model_for_task.temperature),
|
||||
response_format=response_format,
|
||||
stream_response_handler=stream_response_handler,
|
||||
async_response_parser=async_response_parser,
|
||||
|
|
@ -323,24 +342,19 @@ class LLMRequest:
|
|||
)
|
||||
except EmptyResponseException as e:
|
||||
# 空回复:通常为临时问题,单独记录并重试
|
||||
original_error_info = self._get_original_error_info(e)
|
||||
retry_remain -= 1
|
||||
if retry_remain <= 0:
|
||||
logger.error(f"模型 '{model_info.name}' 在多次出现空回复后仍然失败。")
|
||||
logger.error(f"模型 '{model_info.name}' 在多次出现空回复后仍然失败。{original_error_info}")
|
||||
raise ModelAttemptFailed(f"模型 '{model_info.name}' 重试耗尽", original_exception=e) from e
|
||||
|
||||
logger.warning(f"模型 '{model_info.name}' 返回空回复(可重试)。剩余重试次数: {retry_remain}")
|
||||
logger.warning(f"模型 '{model_info.name}' 返回空回复(可重试){original_error_info}。剩余重试次数: {retry_remain}")
|
||||
await asyncio.sleep(api_provider.retry_interval)
|
||||
|
||||
except NetworkConnectionError as e:
|
||||
# 网络错误:单独记录并重试
|
||||
# 尝试从链式异常中获取原始错误信息以诊断具体原因
|
||||
original_error_info = ""
|
||||
if e.__cause__:
|
||||
original_error_type = type(e.__cause__).__name__
|
||||
original_error_msg = str(e.__cause__)
|
||||
original_error_info = (
|
||||
f"\n 底层异常类型: {original_error_type}\n 底层异常信息: {original_error_msg}"
|
||||
)
|
||||
original_error_info = self._get_original_error_info(e)
|
||||
|
||||
retry_remain -= 1
|
||||
if retry_remain <= 0:
|
||||
|
|
@ -356,15 +370,17 @@ class LLMRequest:
|
|||
await asyncio.sleep(api_provider.retry_interval)
|
||||
|
||||
except RespNotOkException as e:
|
||||
original_error_info = self._get_original_error_info(e)
|
||||
|
||||
# 可重试的HTTP错误
|
||||
if e.status_code == 429 or e.status_code >= 500:
|
||||
retry_remain -= 1
|
||||
if retry_remain <= 0:
|
||||
logger.error(f"模型 '{model_info.name}' 在遇到 {e.status_code} 错误并用尽重试次数后仍然失败。")
|
||||
logger.error(f"模型 '{model_info.name}' 在遇到 {e.status_code} 错误并用尽重试次数后仍然失败。{original_error_info}")
|
||||
raise ModelAttemptFailed(f"模型 '{model_info.name}' 重试耗尽", original_exception=e) from e
|
||||
|
||||
logger.warning(
|
||||
f"模型 '{model_info.name}' 遇到可重试的HTTP错误: {str(e)}。剩余重试次数: {retry_remain}"
|
||||
f"模型 '{model_info.name}' 遇到可重试的HTTP错误: {str(e)}{original_error_info}。剩余重试次数: {retry_remain}"
|
||||
)
|
||||
await asyncio.sleep(api_provider.retry_interval)
|
||||
continue
|
||||
|
|
@ -377,13 +393,15 @@ class LLMRequest:
|
|||
continue
|
||||
|
||||
# 不可重试的HTTP错误
|
||||
logger.warning(f"模型 '{model_info.name}' 遇到不可重试的HTTP错误: {str(e)}")
|
||||
logger.warning(f"模型 '{model_info.name}' 遇到不可重试的HTTP错误: {str(e)}{original_error_info}")
|
||||
raise ModelAttemptFailed(f"模型 '{model_info.name}' 遇到硬错误", original_exception=e) from e
|
||||
|
||||
except Exception as e:
|
||||
logger.error(traceback.format_exc())
|
||||
|
||||
logger.warning(f"模型 '{model_info.name}' 遇到未知的不可重试错误: {str(e)}")
|
||||
original_error_info = self._get_original_error_info(e)
|
||||
|
||||
logger.warning(f"模型 '{model_info.name}' 遇到未知的不可重试错误: {str(e)}{original_error_info}")
|
||||
raise ModelAttemptFailed(f"模型 '{model_info.name}' 遇到硬错误", original_exception=e) from e
|
||||
|
||||
raise ModelAttemptFailed(f"模型 '{model_info.name}' 未被尝试,因为重试次数已配置为0或更少。")
|
||||
|
|
@ -497,3 +515,14 @@ class LLMRequest:
|
|||
content = re.sub(r"(?:<think>)?.*?</think>", "", content, flags=re.DOTALL, count=1).strip()
|
||||
reasoning = match[1].strip() if match else ""
|
||||
return content, reasoning
|
||||
|
||||
@staticmethod
|
||||
def _get_original_error_info(e: Exception) -> str:
|
||||
"""获取原始错误信息"""
|
||||
if e.__cause__:
|
||||
original_error_type = type(e.__cause__).__name__
|
||||
original_error_msg = str(e.__cause__)
|
||||
return (
|
||||
f"\n 底层异常类型: {original_error_type}\n 底层异常信息: {original_error_msg}"
|
||||
)
|
||||
return ""
|
||||
|
|
|
|||
|
|
@ -15,7 +15,7 @@ class SearchKnowledgeFromLPMMTool(BaseTool):
|
|||
description = "从知识库中搜索相关信息,如果你需要知识,就使用这个工具"
|
||||
parameters = [
|
||||
("query", ToolParamType.STRING, "搜索查询关键词", True, None),
|
||||
("limit", ToolParamType.INTEGER, "希望返回的相关知识条数,默认5", False, 5),
|
||||
("limit", ToolParamType.INTEGER, "希望返回的相关知识条数,默认5", False, None),
|
||||
]
|
||||
available_for_llm = global_config.lpmm_knowledge.enable
|
||||
|
||||
|
|
|
|||
|
|
@ -11,8 +11,10 @@ from datetime import datetime
|
|||
from fastapi import APIRouter, HTTPException
|
||||
from pydantic import BaseModel
|
||||
from src.config.config import MMC_VERSION
|
||||
from src.common.logger import get_logger
|
||||
|
||||
router = APIRouter(prefix="/system", tags=["system"])
|
||||
logger = get_logger("webui_system")
|
||||
|
||||
# 记录启动时间
|
||||
_start_time = time.time()
|
||||
|
|
@ -39,21 +41,22 @@ async def restart_maibot():
|
|||
"""
|
||||
重启麦麦主程序
|
||||
|
||||
使用 os.execv 重启当前进程,配置更改将在重启后生效。
|
||||
请求重启当前进程,配置更改将在重启后生效。
|
||||
注意:此操作会使麦麦暂时离线。
|
||||
"""
|
||||
import asyncio
|
||||
|
||||
try:
|
||||
# 记录重启操作
|
||||
print(f"[{datetime.now()}] WebUI 触发重启操作")
|
||||
logger.info("WebUI 触发重启操作")
|
||||
|
||||
# 定义延迟重启的异步任务
|
||||
async def delayed_restart():
|
||||
await asyncio.sleep(0.5) # 延迟0.5秒,确保响应已发送
|
||||
python = sys.executable
|
||||
args = [python] + sys.argv
|
||||
os.execv(python, args)
|
||||
# 使用 os._exit(42) 退出当前进程,配合外部 runner 脚本进行重启
|
||||
# 42 是约定的重启状态码
|
||||
logger.info("WebUI 请求重启,退出代码 42")
|
||||
os._exit(42)
|
||||
|
||||
# 创建后台任务执行重启
|
||||
asyncio.create_task(delayed_restart())
|
||||
|
|
|
|||
|
|
@ -2,8 +2,8 @@
|
|||
version = "6.23.5"
|
||||
|
||||
#----以下是给开发人员阅读的,如果你只是部署了麦麦,不需要阅读----
|
||||
#如果你想要修改配置文件,请递增version的值
|
||||
#如果新增项目,请阅读src/config/official_configs.py中的说明
|
||||
# 如果你想要修改配置文件,请递增version的值
|
||||
# 如果新增项目,请阅读src/config/official_configs.py中的说明
|
||||
#
|
||||
# 版本格式:主版本号.次版本号.修订号,版本号递增规则如下:
|
||||
# 主版本号:MMC版本更新
|
||||
|
|
@ -23,7 +23,7 @@ alias_names = ["麦叠", "牢麦"] # 麦麦的别名
|
|||
[personality]
|
||||
# 建议120字以内,描述人格特质 和 身份特征
|
||||
personality = "是一个女大学生,现在在读大二,会刷贴吧。"
|
||||
#アイデンティティがない 生まれないらららら
|
||||
# アイデンティティがない 生まれないらららら
|
||||
# 描述麦麦说话的表达风格,表达习惯,如要修改,可以酌情新增内容
|
||||
reply_style = "请回复的平淡一些,简短一些,说中文,不要刻意突出自身学科背景。可以参考贴吧,知乎和微博的回复风格。"
|
||||
|
||||
|
|
@ -85,11 +85,11 @@ reflect_operator_id = "" # 表达反思操作员ID,格式:platform:id:type (
|
|||
allow_reflect = [] # 允许进行表达反思的聊天流ID列表,格式:["qq:123456:private", "qq:654321:group", ...],只有在此列表中的聊天流才会提出问题并跟踪。如果列表为空,则所有聊天流都可以进行表达反思(前提是 reflect = true)
|
||||
|
||||
|
||||
[chat] #麦麦的聊天设置
|
||||
talk_value = 1 #聊天频率,越小越沉默,范围0-1
|
||||
[chat] # 麦麦的聊天设置
|
||||
talk_value = 1 # 聊天频率,越小越沉默,范围0-1
|
||||
mentioned_bot_reply = true # 是否启用提及必回复
|
||||
max_context_size = 30 # 上下文长度
|
||||
planner_smooth = 2 #规划器平滑,增大数值会减小planner负荷,略微降低反应速度,推荐1-5,0为关闭,必须大于等于0
|
||||
planner_smooth = 2 # 规划器平滑,增大数值会减小planner负荷,略微降低反应速度,推荐1-5,0为关闭,必须大于等于0
|
||||
|
||||
enable_talk_value_rules = true # 是否启用动态发言频率规则
|
||||
|
||||
|
|
@ -143,8 +143,8 @@ ban_words = [
|
|||
|
||||
ban_msgs_regex = [
|
||||
# 需要过滤的消息(原始消息)匹配的正则表达式,匹配到的消息将被过滤,若不了解正则表达式请勿修改
|
||||
#"https?://[^\\s]+", # 匹配https链接
|
||||
#"\\d{4}-\\d{2}-\\d{2}", # 匹配日期
|
||||
# "https?://[^\\s]+", # 匹配https链接
|
||||
# "\\d{4}-\\d{2}-\\d{2}", # 匹配日期
|
||||
]
|
||||
|
||||
|
||||
|
|
@ -173,7 +173,7 @@ embedding_dimension = 1024 # 嵌入向量维度,应该与模型的输出维度
|
|||
keyword_rules = [
|
||||
{ keywords = ["人机", "bot", "机器", "入机", "robot", "机器人", "ai", "AI"], reaction = "有人说你是人机或者机器人,否定这一事实,攻击或反问他。注意,只有认为你是机器人才回复,否则不要否认" },
|
||||
{ keywords = ["测试关键词回复", "test"], reaction = "回答测试成功" },
|
||||
#{ keywords = ["你好", "hello"], reaction = "你好,有什么可以帮你?" }
|
||||
# { keywords = ["你好", "hello"], reaction = "你好,有什么可以帮你?" }
|
||||
# 在此处添加更多规则,格式同上
|
||||
]
|
||||
|
||||
|
|
@ -246,7 +246,7 @@ enable = true
|
|||
chat_prompts = []
|
||||
|
||||
|
||||
#此系统暂时移除,无效配置
|
||||
# 此系统暂时移除,无效配置
|
||||
[relationship]
|
||||
enable_relationship = true # 是否启用关系系统
|
||||
|
||||
|
|
|
|||
|
|
@ -1,5 +1,5 @@
|
|||
[inner]
|
||||
version = "1.8.1"
|
||||
version = "1.8.2"
|
||||
|
||||
# 配置文件版本号迭代规则同bot_config.toml
|
||||
|
||||
|
|
@ -46,7 +46,7 @@ name = "deepseek-v3" # 模型名称(可随意命名,在后面
|
|||
api_provider = "DeepSeek" # API服务商名称(对应在api_providers中配置的服务商名称)
|
||||
price_in = 2.0 # 输入价格(用于API调用统计,单位:元/ M token)(可选,若无该字段,默认值为0)
|
||||
price_out = 8.0 # 输出价格(用于API调用统计,单位:元/ M token)(可选,若无该字段,默认值为0)
|
||||
#force_stream_mode = true # 强制流式输出模式(若模型不支持非流式输出,请取消该注释,启用强制流式输出,若无该字段,默认值为false)
|
||||
# force_stream_mode = true # 强制流式输出模式(若模型不支持非流式输出,请取消该注释,启用强制流式输出,若无该字段,默认值为false)
|
||||
|
||||
[[models]]
|
||||
model_identifier = "deepseek-ai/DeepSeek-V3.2-Exp"
|
||||
|
|
@ -56,6 +56,7 @@ price_in = 2.0
|
|||
price_out = 3.0
|
||||
[models.extra_params] # 可选的额外参数配置
|
||||
enable_thinking = false # 不启用思考
|
||||
# temperature = 0.5 # 可选:为该模型单独指定温度,会覆盖任务配置中的温度
|
||||
|
||||
[[models]]
|
||||
model_identifier = "deepseek-ai/DeepSeek-V3.2-Exp"
|
||||
|
|
@ -65,6 +66,7 @@ price_in = 2.0
|
|||
price_out = 3.0
|
||||
[models.extra_params] # 可选的额外参数配置
|
||||
enable_thinking = true # 启用思考
|
||||
# temperature = 0.7 # 可选:为该模型单独指定温度,会覆盖任务配置中的温度
|
||||
|
||||
[[models]]
|
||||
model_identifier = "Qwen/Qwen3-Next-80B-A3B-Instruct"
|
||||
|
|
@ -133,51 +135,62 @@ price_out = 0
|
|||
model_list = ["siliconflow-deepseek-v3.2"] # 使用的模型列表,每个子项对应上面的模型名称(name)
|
||||
temperature = 0.2 # 模型温度,新V3建议0.1-0.3
|
||||
max_tokens = 2048 # 最大输出token数
|
||||
slow_threshold = 15.0 # 慢请求阈值(秒),模型等待回复时间超过此值会输出警告日志
|
||||
|
||||
[model_task_config.utils_small] # 在麦麦的一些组件中使用的小模型,消耗量较大,建议使用速度较快的小模型
|
||||
model_list = ["qwen3-30b","qwen3-next-80b"]
|
||||
temperature = 0.7
|
||||
max_tokens = 2048
|
||||
slow_threshold = 10.0
|
||||
|
||||
[model_task_config.tool_use] #工具调用模型,需要使用支持工具调用的模型
|
||||
model_list = ["qwen3-30b","qwen3-next-80b"]
|
||||
temperature = 0.7
|
||||
max_tokens = 800
|
||||
slow_threshold = 10.0
|
||||
|
||||
[model_task_config.replyer] # 首要回复模型,还用于表达器和表达方式学习
|
||||
model_list = ["siliconflow-deepseek-v3.2","siliconflow-deepseek-v3.2-think","siliconflow-glm-4.6","siliconflow-glm-4.6-think"]
|
||||
temperature = 0.3 # 模型温度,新V3建议0.1-0.3
|
||||
max_tokens = 2048
|
||||
slow_threshold = 25.0
|
||||
|
||||
[model_task_config.planner] #决策:负责决定麦麦该什么时候回复的模型
|
||||
model_list = ["siliconflow-deepseek-v3.2"]
|
||||
temperature = 0.3
|
||||
max_tokens = 800
|
||||
slow_threshold = 12.0
|
||||
|
||||
[model_task_config.vlm] # 图像识别模型
|
||||
model_list = ["qwen3-vl-30"]
|
||||
max_tokens = 256
|
||||
slow_threshold = 15.0
|
||||
|
||||
[model_task_config.voice] # 语音识别模型
|
||||
model_list = ["sensevoice-small"]
|
||||
slow_threshold = 12.0
|
||||
|
||||
#嵌入模型
|
||||
# 嵌入模型
|
||||
[model_task_config.embedding]
|
||||
model_list = ["bge-m3"]
|
||||
slow_threshold = 5.0
|
||||
|
||||
#------------LPMM知识库模型------------
|
||||
# ------------LPMM知识库模型------------
|
||||
|
||||
[model_task_config.lpmm_entity_extract] # 实体提取模型
|
||||
model_list = ["siliconflow-deepseek-v3.2"]
|
||||
temperature = 0.2
|
||||
max_tokens = 800
|
||||
slow_threshold = 20.0
|
||||
|
||||
[model_task_config.lpmm_rdf_build] # RDF构建模型
|
||||
model_list = ["siliconflow-deepseek-v3.2"]
|
||||
temperature = 0.2
|
||||
max_tokens = 800
|
||||
slow_threshold = 20.0
|
||||
|
||||
[model_task_config.lpmm_qa] # 问答模型
|
||||
model_list = ["siliconflow-deepseek-v3.2"]
|
||||
temperature = 0.7
|
||||
max_tokens = 800
|
||||
slow_threshold = 20.0
|
||||
|
|
|
|||
Loading…
Reference in New Issue