mirror of https://github.com/Mai-with-u/MaiBot.git
Merge branch 'dev' of https://github.com/Mai-with-u/MaiBot into dev
commit
188061456c
94
bot.py
94
bot.py
|
|
@ -5,16 +5,22 @@ import time
|
|||
import platform
|
||||
import traceback
|
||||
import shutil
|
||||
import sys
|
||||
import subprocess
|
||||
from dotenv import load_dotenv
|
||||
from pathlib import Path
|
||||
from rich.traceback import install
|
||||
from src.common.logger import initialize_logging, get_logger, shutdown_logging
|
||||
|
||||
# 设置工作目录为脚本所在目录
|
||||
script_dir = os.path.dirname(os.path.abspath(__file__))
|
||||
os.chdir(script_dir)
|
||||
|
||||
env_path = Path(__file__).parent / ".env"
|
||||
template_env_path = Path(__file__).parent / "template" / "template.env"
|
||||
|
||||
if env_path.exists():
|
||||
load_dotenv(str(env_path), override=True)
|
||||
print("成功加载环境变量配置")
|
||||
else:
|
||||
try:
|
||||
if template_env_path.exists():
|
||||
|
|
@ -28,23 +34,86 @@ else:
|
|||
print(f"自动创建 .env 失败: {e}")
|
||||
raise
|
||||
|
||||
# 最早期初始化日志系统,确保所有后续模块都使用正确的日志格式
|
||||
from src.common.logger import initialize_logging, get_logger, shutdown_logging # noqa
|
||||
|
||||
initialize_logging()
|
||||
install(extra_lines=3)
|
||||
logger = get_logger("main")
|
||||
|
||||
# 定义重启退出码
|
||||
RESTART_EXIT_CODE = 42
|
||||
|
||||
def run_runner_process():
|
||||
"""
|
||||
Runner 进程逻辑:作为守护进程运行,负责启动和监控 Worker 进程。
|
||||
处理重启请求 (退出码 42) 和 Ctrl+C 信号。
|
||||
"""
|
||||
script_file = sys.argv[0]
|
||||
python_executable = sys.executable
|
||||
|
||||
# 设置环境变量,标记子进程为 Worker 进程
|
||||
env = os.environ.copy()
|
||||
env["MAIBOT_WORKER_PROCESS"] = "1"
|
||||
|
||||
while True:
|
||||
logger.info(f"正在启动 {script_file}...")
|
||||
|
||||
# 启动子进程 (Worker)
|
||||
# 使用 sys.executable 确保使用相同的 Python 解释器
|
||||
cmd = [python_executable, script_file] + sys.argv[1:]
|
||||
|
||||
process = subprocess.Popen(cmd, env=env)
|
||||
|
||||
try:
|
||||
# 等待子进程结束
|
||||
return_code = process.wait()
|
||||
|
||||
if return_code == RESTART_EXIT_CODE:
|
||||
logger.info("检测到重启请求 (退出码 42),正在重启...")
|
||||
time.sleep(1) # 稍作等待
|
||||
continue
|
||||
else:
|
||||
logger.info(f"程序已退出 (退出码 {return_code})")
|
||||
sys.exit(return_code)
|
||||
|
||||
except KeyboardInterrupt:
|
||||
# 向子进程发送终止信号
|
||||
if process.poll() is None:
|
||||
# 在 Windows 上,Ctrl+C 通常已经发送给了子进程(如果它们共享控制台)
|
||||
# 但为了保险,我们可以尝试 terminate
|
||||
try:
|
||||
process.terminate()
|
||||
process.wait(timeout=5)
|
||||
except subprocess.TimeoutExpired:
|
||||
logger.warning("子进程未响应,强制关闭...")
|
||||
process.kill()
|
||||
sys.exit(0)
|
||||
|
||||
# 检查是否是 Worker 进程
|
||||
# 如果没有设置 MAIBOT_WORKER_PROCESS 环境变量,说明是直接运行的脚本,
|
||||
# 此时应该作为 Runner 运行。
|
||||
if os.environ.get("MAIBOT_WORKER_PROCESS") != "1":
|
||||
if __name__ == "__main__":
|
||||
run_runner_process()
|
||||
# 如果作为模块导入,不执行 Runner 逻辑,但也不应该执行下面的 Worker 逻辑
|
||||
sys.exit(0)
|
||||
|
||||
# 以下是 Worker 进程的逻辑
|
||||
|
||||
# 最早期初始化日志系统,确保所有后续模块都使用正确的日志格式
|
||||
# from src.common.logger import initialize_logging, get_logger, shutdown_logging # noqa
|
||||
# initialize_logging()
|
||||
|
||||
from src.main import MainSystem # noqa
|
||||
from src.manager.async_task_manager import async_task_manager # noqa
|
||||
|
||||
|
||||
logger = get_logger("main")
|
||||
# logger = get_logger("main")
|
||||
|
||||
|
||||
install(extra_lines=3)
|
||||
# install(extra_lines=3)
|
||||
|
||||
# 设置工作目录为脚本所在目录
|
||||
script_dir = os.path.dirname(os.path.abspath(__file__))
|
||||
os.chdir(script_dir)
|
||||
# script_dir = os.path.dirname(os.path.abspath(__file__))
|
||||
# os.chdir(script_dir)
|
||||
logger.info(f"已设置工作目录为: {script_dir}")
|
||||
|
||||
|
||||
|
|
@ -254,6 +323,15 @@ if __name__ == "__main__":
|
|||
logger.error(f"优雅关闭时发生错误: {ge}")
|
||||
# 新增:检测外部请求关闭
|
||||
|
||||
except SystemExit as e:
|
||||
# 捕获 SystemExit (例如 sys.exit()) 并保留退出代码
|
||||
if isinstance(e.code, int):
|
||||
exit_code = e.code
|
||||
else:
|
||||
exit_code = 1 if e.code else 0
|
||||
if exit_code == RESTART_EXIT_CODE:
|
||||
logger.info("收到重启信号,准备退出并请求重启...")
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"主程序发生异常: {str(e)} {str(traceback.format_exc())}")
|
||||
exit_code = 1 # 标记发生错误
|
||||
|
|
|
|||
|
|
@ -839,8 +839,6 @@ class DefaultReplyer:
|
|||
continue
|
||||
|
||||
timing_logs.append(f"{chinese_name}: {duration:.1f}s")
|
||||
if duration > 12:
|
||||
logger.warning(f"回复生成前信息获取耗时过长: {chinese_name} 耗时: {duration:.1f}s,请使用更快的模型")
|
||||
logger.info(f"回复准备: {'; '.join(timing_logs)}; {almost_zero_str} <0.1s")
|
||||
|
||||
expression_habits_block, selected_expressions = results_dict["expression_habits"]
|
||||
|
|
|
|||
|
|
@ -760,8 +760,6 @@ class PrivateReplyer:
|
|||
continue
|
||||
|
||||
timing_logs.append(f"{chinese_name}: {duration:.1f}s")
|
||||
if duration > 12:
|
||||
logger.warning(f"回复生成前信息获取耗时过长: {chinese_name} 耗时: {duration:.1f}s,请使用更快的模型")
|
||||
logger.info(f"回复准备: {'; '.join(timing_logs)}; {almost_zero_str} <0.1s")
|
||||
|
||||
expression_habits_block, selected_expressions = results_dict["expression_habits"]
|
||||
|
|
|
|||
|
|
@ -88,6 +88,9 @@ class TaskConfig(ConfigBase):
|
|||
temperature: float = 0.3
|
||||
"""模型温度"""
|
||||
|
||||
slow_threshold: float = 15.0
|
||||
"""慢请求阈值(秒),超过此值会输出警告日志"""
|
||||
|
||||
|
||||
@dataclass
|
||||
class ModelTaskConfig(ConfigBase):
|
||||
|
|
|
|||
|
|
@ -47,6 +47,21 @@ class LLMRequest:
|
|||
}
|
||||
"""模型使用量记录,用于进行负载均衡,对应为(total_tokens, penalty, usage_penalty),惩罚值是为了能在某个模型请求不给力或正在被使用的时候进行调整"""
|
||||
|
||||
def _check_slow_request(self, time_cost: float, model_name: str) -> None:
|
||||
"""检查请求是否过慢并输出警告日志
|
||||
|
||||
Args:
|
||||
time_cost: 请求耗时(秒)
|
||||
model_name: 使用的模型名称
|
||||
"""
|
||||
threshold = self.model_for_task.slow_threshold
|
||||
if time_cost > threshold:
|
||||
request_type_display = self.request_type or "未知任务"
|
||||
logger.warning(
|
||||
f"LLM请求耗时过长: {request_type_display} 使用模型 {model_name} 耗时 {time_cost:.1f}s(阈值: {threshold}s),请考虑使用更快的模型\n"
|
||||
f" 如果你认为该警告出现得过于频繁,请调整model_config.toml中对应任务的slow_threshold至符合你实际情况的合理值"
|
||||
)
|
||||
|
||||
async def generate_response_for_image(
|
||||
self,
|
||||
prompt: str,
|
||||
|
|
@ -86,6 +101,8 @@ class LLMRequest:
|
|||
if not reasoning_content and content:
|
||||
content, extracted_reasoning = self._extract_reasoning(content)
|
||||
reasoning_content = extracted_reasoning
|
||||
time_cost = time.time() - start_time
|
||||
self._check_slow_request(time_cost, model_info.name)
|
||||
if usage := response.usage:
|
||||
llm_usage_recorder.record_usage_to_database(
|
||||
model_info=model_info,
|
||||
|
|
@ -93,7 +110,7 @@ class LLMRequest:
|
|||
user_id="system",
|
||||
request_type=self.request_type,
|
||||
endpoint="/chat/completions",
|
||||
time_cost=time.time() - start_time,
|
||||
time_cost=time_cost,
|
||||
)
|
||||
return content, (reasoning_content, model_info.name, tool_calls)
|
||||
|
||||
|
|
@ -198,7 +215,8 @@ class LLMRequest:
|
|||
tool_options=tool_built,
|
||||
)
|
||||
|
||||
logger.debug(f"LLM请求总耗时: {time.time() - start_time}")
|
||||
time_cost = time.time() - start_time
|
||||
logger.debug(f"LLM请求总耗时: {time_cost}")
|
||||
logger.debug(f"LLM生成内容: {response}")
|
||||
|
||||
content = response.content
|
||||
|
|
@ -207,6 +225,7 @@ class LLMRequest:
|
|||
if not reasoning_content and content:
|
||||
content, extracted_reasoning = self._extract_reasoning(content)
|
||||
reasoning_content = extracted_reasoning
|
||||
self._check_slow_request(time_cost, model_info.name)
|
||||
if usage := response.usage:
|
||||
llm_usage_recorder.record_usage_to_database(
|
||||
model_info=model_info,
|
||||
|
|
@ -214,7 +233,7 @@ class LLMRequest:
|
|||
user_id="system",
|
||||
request_type=self.request_type,
|
||||
endpoint="/chat/completions",
|
||||
time_cost=time.time() - start_time,
|
||||
time_cost=time_cost,
|
||||
)
|
||||
return content or "", (reasoning_content, model_info.name, tool_calls)
|
||||
|
||||
|
|
@ -301,7 +320,7 @@ class LLMRequest:
|
|||
message_list=(compressed_messages or message_list),
|
||||
tool_options=tool_options,
|
||||
max_tokens=self.model_for_task.max_tokens if max_tokens is None else max_tokens,
|
||||
temperature=self.model_for_task.temperature if temperature is None else temperature,
|
||||
temperature=temperature if temperature is not None else (model_info.extra_params or {}).get("temperature", self.model_for_task.temperature),
|
||||
response_format=response_format,
|
||||
stream_response_handler=stream_response_handler,
|
||||
async_response_parser=async_response_parser,
|
||||
|
|
@ -323,24 +342,19 @@ class LLMRequest:
|
|||
)
|
||||
except EmptyResponseException as e:
|
||||
# 空回复:通常为临时问题,单独记录并重试
|
||||
original_error_info = self._get_original_error_info(e)
|
||||
retry_remain -= 1
|
||||
if retry_remain <= 0:
|
||||
logger.error(f"模型 '{model_info.name}' 在多次出现空回复后仍然失败。")
|
||||
logger.error(f"模型 '{model_info.name}' 在多次出现空回复后仍然失败。{original_error_info}")
|
||||
raise ModelAttemptFailed(f"模型 '{model_info.name}' 重试耗尽", original_exception=e) from e
|
||||
|
||||
logger.warning(f"模型 '{model_info.name}' 返回空回复(可重试)。剩余重试次数: {retry_remain}")
|
||||
logger.warning(f"模型 '{model_info.name}' 返回空回复(可重试){original_error_info}。剩余重试次数: {retry_remain}")
|
||||
await asyncio.sleep(api_provider.retry_interval)
|
||||
|
||||
except NetworkConnectionError as e:
|
||||
# 网络错误:单独记录并重试
|
||||
# 尝试从链式异常中获取原始错误信息以诊断具体原因
|
||||
original_error_info = ""
|
||||
if e.__cause__:
|
||||
original_error_type = type(e.__cause__).__name__
|
||||
original_error_msg = str(e.__cause__)
|
||||
original_error_info = (
|
||||
f"\n 底层异常类型: {original_error_type}\n 底层异常信息: {original_error_msg}"
|
||||
)
|
||||
original_error_info = self._get_original_error_info(e)
|
||||
|
||||
retry_remain -= 1
|
||||
if retry_remain <= 0:
|
||||
|
|
@ -356,15 +370,17 @@ class LLMRequest:
|
|||
await asyncio.sleep(api_provider.retry_interval)
|
||||
|
||||
except RespNotOkException as e:
|
||||
original_error_info = self._get_original_error_info(e)
|
||||
|
||||
# 可重试的HTTP错误
|
||||
if e.status_code == 429 or e.status_code >= 500:
|
||||
retry_remain -= 1
|
||||
if retry_remain <= 0:
|
||||
logger.error(f"模型 '{model_info.name}' 在遇到 {e.status_code} 错误并用尽重试次数后仍然失败。")
|
||||
logger.error(f"模型 '{model_info.name}' 在遇到 {e.status_code} 错误并用尽重试次数后仍然失败。{original_error_info}")
|
||||
raise ModelAttemptFailed(f"模型 '{model_info.name}' 重试耗尽", original_exception=e) from e
|
||||
|
||||
logger.warning(
|
||||
f"模型 '{model_info.name}' 遇到可重试的HTTP错误: {str(e)}。剩余重试次数: {retry_remain}"
|
||||
f"模型 '{model_info.name}' 遇到可重试的HTTP错误: {str(e)}{original_error_info}。剩余重试次数: {retry_remain}"
|
||||
)
|
||||
await asyncio.sleep(api_provider.retry_interval)
|
||||
continue
|
||||
|
|
@ -377,13 +393,15 @@ class LLMRequest:
|
|||
continue
|
||||
|
||||
# 不可重试的HTTP错误
|
||||
logger.warning(f"模型 '{model_info.name}' 遇到不可重试的HTTP错误: {str(e)}")
|
||||
logger.warning(f"模型 '{model_info.name}' 遇到不可重试的HTTP错误: {str(e)}{original_error_info}")
|
||||
raise ModelAttemptFailed(f"模型 '{model_info.name}' 遇到硬错误", original_exception=e) from e
|
||||
|
||||
except Exception as e:
|
||||
logger.error(traceback.format_exc())
|
||||
|
||||
logger.warning(f"模型 '{model_info.name}' 遇到未知的不可重试错误: {str(e)}")
|
||||
original_error_info = self._get_original_error_info(e)
|
||||
|
||||
logger.warning(f"模型 '{model_info.name}' 遇到未知的不可重试错误: {str(e)}{original_error_info}")
|
||||
raise ModelAttemptFailed(f"模型 '{model_info.name}' 遇到硬错误", original_exception=e) from e
|
||||
|
||||
raise ModelAttemptFailed(f"模型 '{model_info.name}' 未被尝试,因为重试次数已配置为0或更少。")
|
||||
|
|
@ -497,3 +515,14 @@ class LLMRequest:
|
|||
content = re.sub(r"(?:<think>)?.*?</think>", "", content, flags=re.DOTALL, count=1).strip()
|
||||
reasoning = match[1].strip() if match else ""
|
||||
return content, reasoning
|
||||
|
||||
@staticmethod
|
||||
def _get_original_error_info(e: Exception) -> str:
|
||||
"""获取原始错误信息"""
|
||||
if e.__cause__:
|
||||
original_error_type = type(e.__cause__).__name__
|
||||
original_error_msg = str(e.__cause__)
|
||||
return (
|
||||
f"\n 底层异常类型: {original_error_type}\n 底层异常信息: {original_error_msg}"
|
||||
)
|
||||
return ""
|
||||
|
|
|
|||
|
|
@ -15,7 +15,7 @@ class SearchKnowledgeFromLPMMTool(BaseTool):
|
|||
description = "从知识库中搜索相关信息,如果你需要知识,就使用这个工具"
|
||||
parameters = [
|
||||
("query", ToolParamType.STRING, "搜索查询关键词", True, None),
|
||||
("limit", ToolParamType.INTEGER, "希望返回的相关知识条数,默认5", False, 5),
|
||||
("limit", ToolParamType.INTEGER, "希望返回的相关知识条数,默认5", False, None),
|
||||
]
|
||||
available_for_llm = global_config.lpmm_knowledge.enable
|
||||
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
"""表情包管理 API 路由"""
|
||||
""" 表情包管理 API 路由"""
|
||||
|
||||
from fastapi import APIRouter, HTTPException, Header, Query, UploadFile, File, Form, Cookie
|
||||
from fastapi.responses import FileResponse
|
||||
from fastapi.responses import FileResponse, JSONResponse
|
||||
from pydantic import BaseModel
|
||||
from typing import Optional, List, Annotated
|
||||
from src.common.logger import get_logger
|
||||
|
|
@ -13,9 +13,156 @@ import os
|
|||
import hashlib
|
||||
from PIL import Image
|
||||
import io
|
||||
from pathlib import Path
|
||||
import threading
|
||||
import asyncio
|
||||
from concurrent.futures import ThreadPoolExecutor
|
||||
|
||||
logger = get_logger("webui.emoji")
|
||||
|
||||
# ==================== 缩略图缓存配置 ====================
|
||||
# 缩略图缓存目录
|
||||
THUMBNAIL_CACHE_DIR = Path("data/emoji_thumbnails")
|
||||
# 缩略图尺寸 (宽, 高)
|
||||
THUMBNAIL_SIZE = (200, 200)
|
||||
# 缩略图质量 (WebP 格式, 1-100)
|
||||
THUMBNAIL_QUALITY = 80
|
||||
# 缓存锁,防止并发生成同一缩略图
|
||||
_thumbnail_locks: dict[str, threading.Lock] = {}
|
||||
_locks_lock = threading.Lock()
|
||||
# 缩略图生成专用线程池(避免阻塞事件循环)
|
||||
_thumbnail_executor = ThreadPoolExecutor(max_workers=2, thread_name_prefix="thumbnail")
|
||||
# 正在生成中的缩略图哈希集合(防止重复提交任务)
|
||||
_generating_thumbnails: set[str] = set()
|
||||
_generating_lock = threading.Lock()
|
||||
|
||||
|
||||
def _get_thumbnail_lock(file_hash: str) -> threading.Lock:
|
||||
"""获取指定文件哈希的锁,用于防止并发生成同一缩略图"""
|
||||
with _locks_lock:
|
||||
if file_hash not in _thumbnail_locks:
|
||||
_thumbnail_locks[file_hash] = threading.Lock()
|
||||
return _thumbnail_locks[file_hash]
|
||||
|
||||
|
||||
def _background_generate_thumbnail(source_path: str, file_hash: str) -> None:
|
||||
"""
|
||||
后台生成缩略图(在线程池中执行)
|
||||
|
||||
生成完成后自动从 generating 集合中移除
|
||||
"""
|
||||
try:
|
||||
_generate_thumbnail(source_path, file_hash)
|
||||
except Exception as e:
|
||||
logger.warning(f"后台生成缩略图失败 {file_hash}: {e}")
|
||||
finally:
|
||||
with _generating_lock:
|
||||
_generating_thumbnails.discard(file_hash)
|
||||
|
||||
|
||||
def _ensure_thumbnail_cache_dir() -> Path:
|
||||
"""确保缩略图缓存目录存在"""
|
||||
THUMBNAIL_CACHE_DIR.mkdir(parents=True, exist_ok=True)
|
||||
return THUMBNAIL_CACHE_DIR
|
||||
|
||||
|
||||
def _get_thumbnail_cache_path(file_hash: str) -> Path:
|
||||
"""获取缩略图缓存路径"""
|
||||
return THUMBNAIL_CACHE_DIR / f"{file_hash}.webp"
|
||||
|
||||
|
||||
def _generate_thumbnail(source_path: str, file_hash: str) -> Path:
|
||||
"""
|
||||
生成缩略图并保存到缓存目录
|
||||
|
||||
Args:
|
||||
source_path: 原图路径
|
||||
file_hash: 文件哈希值,用作缓存文件名
|
||||
|
||||
Returns:
|
||||
缩略图路径
|
||||
|
||||
Features:
|
||||
- GIF: 提取第一帧作为缩略图
|
||||
- 所有格式统一转为 WebP
|
||||
- 保持宽高比缩放
|
||||
"""
|
||||
_ensure_thumbnail_cache_dir()
|
||||
cache_path = _get_thumbnail_cache_path(file_hash)
|
||||
|
||||
# 使用锁防止并发生成同一缩略图
|
||||
lock = _get_thumbnail_lock(file_hash)
|
||||
with lock:
|
||||
# 双重检查,可能在等待锁时已被其他线程生成
|
||||
if cache_path.exists():
|
||||
return cache_path
|
||||
|
||||
try:
|
||||
with Image.open(source_path) as img:
|
||||
# GIF 处理:提取第一帧
|
||||
if hasattr(img, 'n_frames') and img.n_frames > 1:
|
||||
img.seek(0) # 确保在第一帧
|
||||
|
||||
# 转换为 RGB/RGBA(WebP 支持透明度)
|
||||
if img.mode in ('P', 'PA'):
|
||||
# 调色板模式转换为 RGBA 以保留透明度
|
||||
img = img.convert('RGBA')
|
||||
elif img.mode == 'LA':
|
||||
img = img.convert('RGBA')
|
||||
elif img.mode not in ('RGB', 'RGBA'):
|
||||
img = img.convert('RGB')
|
||||
|
||||
# 创建缩略图(保持宽高比)
|
||||
img.thumbnail(THUMBNAIL_SIZE, Image.Resampling.LANCZOS)
|
||||
|
||||
# 保存为 WebP 格式
|
||||
img.save(cache_path, 'WEBP', quality=THUMBNAIL_QUALITY, method=6)
|
||||
|
||||
logger.debug(f"生成缩略图: {file_hash} -> {cache_path}")
|
||||
|
||||
except Exception as e:
|
||||
logger.warning(f"生成缩略图失败 {file_hash}: {e},将返回原图")
|
||||
# 生成失败时不创建缓存文件,下次会重试
|
||||
raise
|
||||
|
||||
return cache_path
|
||||
|
||||
|
||||
def cleanup_orphaned_thumbnails() -> tuple[int, int]:
|
||||
"""
|
||||
清理孤立的缩略图缓存(原图已不存在的缩略图)
|
||||
|
||||
Returns:
|
||||
(清理数量, 保留数量)
|
||||
"""
|
||||
if not THUMBNAIL_CACHE_DIR.exists():
|
||||
return 0, 0
|
||||
|
||||
# 获取所有表情包的哈希值
|
||||
valid_hashes = set()
|
||||
for emoji in Emoji.select(Emoji.emoji_hash):
|
||||
valid_hashes.add(emoji.emoji_hash)
|
||||
|
||||
cleaned = 0
|
||||
kept = 0
|
||||
|
||||
for cache_file in THUMBNAIL_CACHE_DIR.glob("*.webp"):
|
||||
file_hash = cache_file.stem
|
||||
if file_hash not in valid_hashes:
|
||||
try:
|
||||
cache_file.unlink()
|
||||
cleaned += 1
|
||||
logger.debug(f"清理孤立缩略图: {cache_file.name}")
|
||||
except Exception as e:
|
||||
logger.warning(f"清理缩略图失败 {cache_file.name}: {e}")
|
||||
else:
|
||||
kept += 1
|
||||
|
||||
if cleaned > 0:
|
||||
logger.info(f"清理孤立缩略图: 删除 {cleaned} 个,保留 {kept} 个")
|
||||
|
||||
return cleaned, kept
|
||||
|
||||
# 模块级别的类型别名(解决 B008 ruff 错误)
|
||||
EmojiFile = Annotated[UploadFile, File(description="表情包图片文件")]
|
||||
EmojiFiles = Annotated[List[UploadFile], File(description="多个表情包图片文件")]
|
||||
|
|
@ -472,18 +619,26 @@ async def get_emoji_thumbnail(
|
|||
token: Optional[str] = Query(None, description="访问令牌"),
|
||||
maibot_session: Optional[str] = Cookie(None),
|
||||
authorization: Optional[str] = Header(None),
|
||||
original: bool = Query(False, description="是否返回原图"),
|
||||
):
|
||||
"""
|
||||
获取表情包缩略图
|
||||
获取表情包缩略图(懒加载生成 + 缓存)
|
||||
|
||||
Args:
|
||||
emoji_id: 表情包ID
|
||||
token: 访问令牌(通过 query parameter,用于向后兼容)
|
||||
maibot_session: Cookie 中的 token
|
||||
authorization: Authorization header
|
||||
original: 是否返回原图(用于详情页查看原图)
|
||||
|
||||
Returns:
|
||||
表情包图片文件
|
||||
表情包缩略图(WebP 格式)或原图
|
||||
|
||||
Features:
|
||||
- 懒加载:首次请求时生成缩略图
|
||||
- 缓存:后续请求直接返回缓存
|
||||
- GIF 支持:提取第一帧作为缩略图
|
||||
- 格式统一:所有缩略图统一为 WebP 格式
|
||||
"""
|
||||
try:
|
||||
token_manager = get_token_manager()
|
||||
|
|
@ -513,19 +668,59 @@ async def get_emoji_thumbnail(
|
|||
if not os.path.exists(emoji.full_path):
|
||||
raise HTTPException(status_code=404, detail="表情包文件不存在")
|
||||
|
||||
# 根据格式设置 MIME 类型
|
||||
mime_types = {
|
||||
"png": "image/png",
|
||||
"jpg": "image/jpeg",
|
||||
"jpeg": "image/jpeg",
|
||||
"gif": "image/gif",
|
||||
"webp": "image/webp",
|
||||
"bmp": "image/bmp",
|
||||
}
|
||||
# 如果请求原图,直接返回原文件
|
||||
if original:
|
||||
mime_types = {
|
||||
"png": "image/png",
|
||||
"jpg": "image/jpeg",
|
||||
"jpeg": "image/jpeg",
|
||||
"gif": "image/gif",
|
||||
"webp": "image/webp",
|
||||
"bmp": "image/bmp",
|
||||
}
|
||||
media_type = mime_types.get(emoji.format.lower(), "application/octet-stream")
|
||||
return FileResponse(
|
||||
path=emoji.full_path,
|
||||
media_type=media_type,
|
||||
filename=f"{emoji.emoji_hash}.{emoji.format}"
|
||||
)
|
||||
|
||||
media_type = mime_types.get(emoji.format.lower(), "application/octet-stream")
|
||||
|
||||
return FileResponse(path=emoji.full_path, media_type=media_type, filename=f"{emoji.emoji_hash}.{emoji.format}")
|
||||
# 尝试获取或生成缩略图
|
||||
cache_path = _get_thumbnail_cache_path(emoji.emoji_hash)
|
||||
|
||||
# 检查缓存是否存在
|
||||
if cache_path.exists():
|
||||
# 缓存命中,直接返回
|
||||
return FileResponse(
|
||||
path=str(cache_path),
|
||||
media_type="image/webp",
|
||||
filename=f"{emoji.emoji_hash}_thumb.webp"
|
||||
)
|
||||
|
||||
# 缓存未命中,触发后台生成并返回 202
|
||||
with _generating_lock:
|
||||
if emoji.emoji_hash not in _generating_thumbnails:
|
||||
# 标记为正在生成
|
||||
_generating_thumbnails.add(emoji.emoji_hash)
|
||||
# 提交到线程池后台生成
|
||||
_thumbnail_executor.submit(
|
||||
_background_generate_thumbnail,
|
||||
emoji.full_path,
|
||||
emoji.emoji_hash
|
||||
)
|
||||
|
||||
# 返回 202 Accepted,告诉前端缩略图正在生成中
|
||||
return JSONResponse(
|
||||
status_code=202,
|
||||
content={
|
||||
"status": "generating",
|
||||
"message": "缩略图正在生成中,请稍后重试",
|
||||
"emoji_id": emoji_id,
|
||||
},
|
||||
headers={
|
||||
"Retry-After": "1", # 建议 1 秒后重试
|
||||
}
|
||||
)
|
||||
|
||||
except HTTPException:
|
||||
raise
|
||||
|
|
@ -877,3 +1072,235 @@ async def batch_upload_emoji(
|
|||
except Exception as e:
|
||||
logger.exception(f"批量上传表情包失败: {e}")
|
||||
raise HTTPException(status_code=500, detail=f"批量上传失败: {str(e)}") from e
|
||||
|
||||
|
||||
# ==================== 缩略图缓存管理 API ====================
|
||||
|
||||
|
||||
class ThumbnailCacheStatsResponse(BaseModel):
|
||||
"""缩略图缓存统计响应"""
|
||||
|
||||
success: bool
|
||||
cache_dir: str
|
||||
total_count: int
|
||||
total_size_mb: float
|
||||
emoji_count: int
|
||||
coverage_percent: float
|
||||
|
||||
|
||||
class ThumbnailCleanupResponse(BaseModel):
|
||||
"""缩略图清理响应"""
|
||||
|
||||
success: bool
|
||||
message: str
|
||||
cleaned_count: int
|
||||
kept_count: int
|
||||
|
||||
|
||||
class ThumbnailPreheatResponse(BaseModel):
|
||||
"""缩略图预热响应"""
|
||||
|
||||
success: bool
|
||||
message: str
|
||||
generated_count: int
|
||||
skipped_count: int
|
||||
failed_count: int
|
||||
|
||||
|
||||
@router.get("/thumbnail-cache/stats", response_model=ThumbnailCacheStatsResponse)
|
||||
async def get_thumbnail_cache_stats(
|
||||
maibot_session: Optional[str] = Cookie(None),
|
||||
authorization: Optional[str] = Header(None),
|
||||
):
|
||||
"""
|
||||
获取缩略图缓存统计信息
|
||||
|
||||
Returns:
|
||||
缓存目录、缓存数量、总大小、覆盖率等统计信息
|
||||
"""
|
||||
try:
|
||||
verify_auth_token(maibot_session, authorization)
|
||||
|
||||
_ensure_thumbnail_cache_dir()
|
||||
|
||||
# 统计缓存文件
|
||||
cache_files = list(THUMBNAIL_CACHE_DIR.glob("*.webp"))
|
||||
total_count = len(cache_files)
|
||||
total_size = sum(f.stat().st_size for f in cache_files)
|
||||
total_size_mb = round(total_size / (1024 * 1024), 2)
|
||||
|
||||
# 统计表情包总数
|
||||
emoji_count = Emoji.select().count()
|
||||
|
||||
# 计算覆盖率
|
||||
coverage_percent = round((total_count / emoji_count * 100) if emoji_count > 0 else 0, 1)
|
||||
|
||||
return ThumbnailCacheStatsResponse(
|
||||
success=True,
|
||||
cache_dir=str(THUMBNAIL_CACHE_DIR.absolute()),
|
||||
total_count=total_count,
|
||||
total_size_mb=total_size_mb,
|
||||
emoji_count=emoji_count,
|
||||
coverage_percent=coverage_percent,
|
||||
)
|
||||
|
||||
except HTTPException:
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.exception(f"获取缩略图缓存统计失败: {e}")
|
||||
raise HTTPException(status_code=500, detail=f"获取统计失败: {str(e)}") from e
|
||||
|
||||
|
||||
@router.post("/thumbnail-cache/cleanup", response_model=ThumbnailCleanupResponse)
|
||||
async def cleanup_thumbnail_cache(
|
||||
maibot_session: Optional[str] = Cookie(None),
|
||||
authorization: Optional[str] = Header(None),
|
||||
):
|
||||
"""
|
||||
清理孤立的缩略图缓存(原图已删除的表情包对应的缩略图)
|
||||
|
||||
Returns:
|
||||
清理结果
|
||||
"""
|
||||
try:
|
||||
verify_auth_token(maibot_session, authorization)
|
||||
|
||||
cleaned, kept = cleanup_orphaned_thumbnails()
|
||||
|
||||
return ThumbnailCleanupResponse(
|
||||
success=True,
|
||||
message=f"清理完成:删除 {cleaned} 个孤立缓存,保留 {kept} 个有效缓存",
|
||||
cleaned_count=cleaned,
|
||||
kept_count=kept,
|
||||
)
|
||||
|
||||
except HTTPException:
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.exception(f"清理缩略图缓存失败: {e}")
|
||||
raise HTTPException(status_code=500, detail=f"清理失败: {str(e)}") from e
|
||||
|
||||
|
||||
@router.post("/thumbnail-cache/preheat", response_model=ThumbnailPreheatResponse)
|
||||
async def preheat_thumbnail_cache(
|
||||
limit: int = Query(100, ge=1, le=1000, description="最多预热数量"),
|
||||
maibot_session: Optional[str] = Cookie(None),
|
||||
authorization: Optional[str] = Header(None),
|
||||
):
|
||||
"""
|
||||
预热缩略图缓存(提前生成未缓存的缩略图)
|
||||
|
||||
优先处理使用次数高的表情包
|
||||
|
||||
Args:
|
||||
limit: 最多预热数量 (1-1000)
|
||||
|
||||
Returns:
|
||||
预热结果
|
||||
"""
|
||||
try:
|
||||
verify_auth_token(maibot_session, authorization)
|
||||
|
||||
_ensure_thumbnail_cache_dir()
|
||||
|
||||
# 获取使用次数最高的表情包(未缓存的优先)
|
||||
emojis = (
|
||||
Emoji.select()
|
||||
.where(Emoji.is_banned == False) # noqa: E712 Peewee ORM requires == for boolean comparison
|
||||
.order_by(Emoji.usage_count.desc())
|
||||
.limit(limit * 2) # 多查一些,因为有些可能已缓存
|
||||
)
|
||||
|
||||
generated = 0
|
||||
skipped = 0
|
||||
failed = 0
|
||||
|
||||
for emoji in emojis:
|
||||
if generated >= limit:
|
||||
break
|
||||
|
||||
cache_path = _get_thumbnail_cache_path(emoji.emoji_hash)
|
||||
|
||||
# 已缓存,跳过
|
||||
if cache_path.exists():
|
||||
skipped += 1
|
||||
continue
|
||||
|
||||
# 原文件不存在,跳过
|
||||
if not os.path.exists(emoji.full_path):
|
||||
failed += 1
|
||||
continue
|
||||
|
||||
try:
|
||||
# 使用线程池异步生成缩略图,避免阻塞事件循环
|
||||
loop = asyncio.get_event_loop()
|
||||
await loop.run_in_executor(
|
||||
_thumbnail_executor,
|
||||
_generate_thumbnail,
|
||||
emoji.full_path,
|
||||
emoji.emoji_hash
|
||||
)
|
||||
generated += 1
|
||||
except Exception as e:
|
||||
logger.warning(f"预热缩略图失败 {emoji.emoji_hash}: {e}")
|
||||
failed += 1
|
||||
|
||||
return ThumbnailPreheatResponse(
|
||||
success=True,
|
||||
message=f"预热完成:生成 {generated} 个,跳过 {skipped} 个已缓存,失败 {failed} 个",
|
||||
generated_count=generated,
|
||||
skipped_count=skipped,
|
||||
failed_count=failed,
|
||||
)
|
||||
|
||||
except HTTPException:
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.exception(f"预热缩略图缓存失败: {e}")
|
||||
raise HTTPException(status_code=500, detail=f"预热失败: {str(e)}") from e
|
||||
|
||||
|
||||
@router.delete("/thumbnail-cache/clear", response_model=ThumbnailCleanupResponse)
|
||||
async def clear_all_thumbnail_cache(
|
||||
maibot_session: Optional[str] = Cookie(None),
|
||||
authorization: Optional[str] = Header(None),
|
||||
):
|
||||
"""
|
||||
清空所有缩略图缓存(下次访问时会重新生成)
|
||||
|
||||
Returns:
|
||||
清理结果
|
||||
"""
|
||||
try:
|
||||
verify_auth_token(maibot_session, authorization)
|
||||
|
||||
if not THUMBNAIL_CACHE_DIR.exists():
|
||||
return ThumbnailCleanupResponse(
|
||||
success=True,
|
||||
message="缓存目录不存在,无需清理",
|
||||
cleaned_count=0,
|
||||
kept_count=0,
|
||||
)
|
||||
|
||||
cleaned = 0
|
||||
for cache_file in THUMBNAIL_CACHE_DIR.glob("*.webp"):
|
||||
try:
|
||||
cache_file.unlink()
|
||||
cleaned += 1
|
||||
except Exception as e:
|
||||
logger.warning(f"删除缓存文件失败 {cache_file.name}: {e}")
|
||||
|
||||
logger.info(f"已清空缩略图缓存: 删除 {cleaned} 个文件")
|
||||
|
||||
return ThumbnailCleanupResponse(
|
||||
success=True,
|
||||
message=f"已清空所有缩略图缓存:删除 {cleaned} 个文件",
|
||||
cleaned_count=cleaned,
|
||||
kept_count=0,
|
||||
)
|
||||
|
||||
except HTTPException:
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.exception(f"清空缩略图缓存失败: {e}")
|
||||
raise HTTPException(status_code=500, detail=f"清空失败: {str(e)}") from e
|
||||
|
|
|
|||
|
|
@ -11,8 +11,10 @@ from datetime import datetime
|
|||
from fastapi import APIRouter, HTTPException
|
||||
from pydantic import BaseModel
|
||||
from src.config.config import MMC_VERSION
|
||||
from src.common.logger import get_logger
|
||||
|
||||
router = APIRouter(prefix="/system", tags=["system"])
|
||||
logger = get_logger("webui_system")
|
||||
|
||||
# 记录启动时间
|
||||
_start_time = time.time()
|
||||
|
|
@ -39,21 +41,22 @@ async def restart_maibot():
|
|||
"""
|
||||
重启麦麦主程序
|
||||
|
||||
使用 os.execv 重启当前进程,配置更改将在重启后生效。
|
||||
请求重启当前进程,配置更改将在重启后生效。
|
||||
注意:此操作会使麦麦暂时离线。
|
||||
"""
|
||||
import asyncio
|
||||
|
||||
try:
|
||||
# 记录重启操作
|
||||
print(f"[{datetime.now()}] WebUI 触发重启操作")
|
||||
logger.info("WebUI 触发重启操作")
|
||||
|
||||
# 定义延迟重启的异步任务
|
||||
async def delayed_restart():
|
||||
await asyncio.sleep(0.5) # 延迟0.5秒,确保响应已发送
|
||||
python = sys.executable
|
||||
args = [python] + sys.argv
|
||||
os.execv(python, args)
|
||||
# 使用 os._exit(42) 退出当前进程,配合外部 runner 脚本进行重启
|
||||
# 42 是约定的重启状态码
|
||||
logger.info("WebUI 请求重启,退出代码 42")
|
||||
os._exit(42)
|
||||
|
||||
# 创建后台任务执行重启
|
||||
asyncio.create_task(delayed_restart())
|
||||
|
|
|
|||
|
|
@ -2,8 +2,8 @@
|
|||
version = "6.23.5"
|
||||
|
||||
#----以下是给开发人员阅读的,如果你只是部署了麦麦,不需要阅读----
|
||||
#如果你想要修改配置文件,请递增version的值
|
||||
#如果新增项目,请阅读src/config/official_configs.py中的说明
|
||||
# 如果你想要修改配置文件,请递增version的值
|
||||
# 如果新增项目,请阅读src/config/official_configs.py中的说明
|
||||
#
|
||||
# 版本格式:主版本号.次版本号.修订号,版本号递增规则如下:
|
||||
# 主版本号:MMC版本更新
|
||||
|
|
@ -23,7 +23,7 @@ alias_names = ["麦叠", "牢麦"] # 麦麦的别名
|
|||
[personality]
|
||||
# 建议120字以内,描述人格特质 和 身份特征
|
||||
personality = "是一个女大学生,现在在读大二,会刷贴吧。"
|
||||
#アイデンティティがない 生まれないらららら
|
||||
# アイデンティティがない 生まれないらららら
|
||||
# 描述麦麦说话的表达风格,表达习惯,如要修改,可以酌情新增内容
|
||||
reply_style = "请回复的平淡一些,简短一些,说中文,不要刻意突出自身学科背景。可以参考贴吧,知乎和微博的回复风格。"
|
||||
|
||||
|
|
@ -85,11 +85,11 @@ reflect_operator_id = "" # 表达反思操作员ID,格式:platform:id:type (
|
|||
allow_reflect = [] # 允许进行表达反思的聊天流ID列表,格式:["qq:123456:private", "qq:654321:group", ...],只有在此列表中的聊天流才会提出问题并跟踪。如果列表为空,则所有聊天流都可以进行表达反思(前提是 reflect = true)
|
||||
|
||||
|
||||
[chat] #麦麦的聊天设置
|
||||
talk_value = 1 #聊天频率,越小越沉默,范围0-1
|
||||
[chat] # 麦麦的聊天设置
|
||||
talk_value = 1 # 聊天频率,越小越沉默,范围0-1
|
||||
mentioned_bot_reply = true # 是否启用提及必回复
|
||||
max_context_size = 30 # 上下文长度
|
||||
planner_smooth = 2 #规划器平滑,增大数值会减小planner负荷,略微降低反应速度,推荐1-5,0为关闭,必须大于等于0
|
||||
planner_smooth = 2 # 规划器平滑,增大数值会减小planner负荷,略微降低反应速度,推荐1-5,0为关闭,必须大于等于0
|
||||
|
||||
enable_talk_value_rules = true # 是否启用动态发言频率规则
|
||||
|
||||
|
|
@ -143,8 +143,8 @@ ban_words = [
|
|||
|
||||
ban_msgs_regex = [
|
||||
# 需要过滤的消息(原始消息)匹配的正则表达式,匹配到的消息将被过滤,若不了解正则表达式请勿修改
|
||||
#"https?://[^\\s]+", # 匹配https链接
|
||||
#"\\d{4}-\\d{2}-\\d{2}", # 匹配日期
|
||||
# "https?://[^\\s]+", # 匹配https链接
|
||||
# "\\d{4}-\\d{2}-\\d{2}", # 匹配日期
|
||||
]
|
||||
|
||||
|
||||
|
|
@ -173,7 +173,7 @@ embedding_dimension = 1024 # 嵌入向量维度,应该与模型的输出维度
|
|||
keyword_rules = [
|
||||
{ keywords = ["人机", "bot", "机器", "入机", "robot", "机器人", "ai", "AI"], reaction = "有人说你是人机或者机器人,否定这一事实,攻击或反问他。注意,只有认为你是机器人才回复,否则不要否认" },
|
||||
{ keywords = ["测试关键词回复", "test"], reaction = "回答测试成功" },
|
||||
#{ keywords = ["你好", "hello"], reaction = "你好,有什么可以帮你?" }
|
||||
# { keywords = ["你好", "hello"], reaction = "你好,有什么可以帮你?" }
|
||||
# 在此处添加更多规则,格式同上
|
||||
]
|
||||
|
||||
|
|
@ -246,7 +246,7 @@ enable = true
|
|||
chat_prompts = []
|
||||
|
||||
|
||||
#此系统暂时移除,无效配置
|
||||
# 此系统暂时移除,无效配置
|
||||
[relationship]
|
||||
enable_relationship = true # 是否启用关系系统
|
||||
|
||||
|
|
|
|||
|
|
@ -1,5 +1,5 @@
|
|||
[inner]
|
||||
version = "1.8.1"
|
||||
version = "1.8.2"
|
||||
|
||||
# 配置文件版本号迭代规则同bot_config.toml
|
||||
|
||||
|
|
@ -46,7 +46,7 @@ name = "deepseek-v3" # 模型名称(可随意命名,在后面
|
|||
api_provider = "DeepSeek" # API服务商名称(对应在api_providers中配置的服务商名称)
|
||||
price_in = 2.0 # 输入价格(用于API调用统计,单位:元/ M token)(可选,若无该字段,默认值为0)
|
||||
price_out = 8.0 # 输出价格(用于API调用统计,单位:元/ M token)(可选,若无该字段,默认值为0)
|
||||
#force_stream_mode = true # 强制流式输出模式(若模型不支持非流式输出,请取消该注释,启用强制流式输出,若无该字段,默认值为false)
|
||||
# force_stream_mode = true # 强制流式输出模式(若模型不支持非流式输出,请取消该注释,启用强制流式输出,若无该字段,默认值为false)
|
||||
|
||||
[[models]]
|
||||
model_identifier = "deepseek-ai/DeepSeek-V3.2-Exp"
|
||||
|
|
@ -56,6 +56,7 @@ price_in = 2.0
|
|||
price_out = 3.0
|
||||
[models.extra_params] # 可选的额外参数配置
|
||||
enable_thinking = false # 不启用思考
|
||||
# temperature = 0.5 # 可选:为该模型单独指定温度,会覆盖任务配置中的温度
|
||||
|
||||
[[models]]
|
||||
model_identifier = "deepseek-ai/DeepSeek-V3.2-Exp"
|
||||
|
|
@ -65,6 +66,7 @@ price_in = 2.0
|
|||
price_out = 3.0
|
||||
[models.extra_params] # 可选的额外参数配置
|
||||
enable_thinking = true # 启用思考
|
||||
# temperature = 0.7 # 可选:为该模型单独指定温度,会覆盖任务配置中的温度
|
||||
|
||||
[[models]]
|
||||
model_identifier = "Qwen/Qwen3-Next-80B-A3B-Instruct"
|
||||
|
|
@ -133,51 +135,62 @@ price_out = 0
|
|||
model_list = ["siliconflow-deepseek-v3.2"] # 使用的模型列表,每个子项对应上面的模型名称(name)
|
||||
temperature = 0.2 # 模型温度,新V3建议0.1-0.3
|
||||
max_tokens = 2048 # 最大输出token数
|
||||
slow_threshold = 15.0 # 慢请求阈值(秒),模型等待回复时间超过此值会输出警告日志
|
||||
|
||||
[model_task_config.utils_small] # 在麦麦的一些组件中使用的小模型,消耗量较大,建议使用速度较快的小模型
|
||||
model_list = ["qwen3-30b","qwen3-next-80b"]
|
||||
temperature = 0.7
|
||||
max_tokens = 2048
|
||||
slow_threshold = 10.0
|
||||
|
||||
[model_task_config.tool_use] #工具调用模型,需要使用支持工具调用的模型
|
||||
model_list = ["qwen3-30b","qwen3-next-80b"]
|
||||
temperature = 0.7
|
||||
max_tokens = 800
|
||||
slow_threshold = 10.0
|
||||
|
||||
[model_task_config.replyer] # 首要回复模型,还用于表达器和表达方式学习
|
||||
model_list = ["siliconflow-deepseek-v3.2","siliconflow-deepseek-v3.2-think","siliconflow-glm-4.6","siliconflow-glm-4.6-think"]
|
||||
temperature = 0.3 # 模型温度,新V3建议0.1-0.3
|
||||
max_tokens = 2048
|
||||
slow_threshold = 25.0
|
||||
|
||||
[model_task_config.planner] #决策:负责决定麦麦该什么时候回复的模型
|
||||
model_list = ["siliconflow-deepseek-v3.2"]
|
||||
temperature = 0.3
|
||||
max_tokens = 800
|
||||
slow_threshold = 12.0
|
||||
|
||||
[model_task_config.vlm] # 图像识别模型
|
||||
model_list = ["qwen3-vl-30"]
|
||||
max_tokens = 256
|
||||
slow_threshold = 15.0
|
||||
|
||||
[model_task_config.voice] # 语音识别模型
|
||||
model_list = ["sensevoice-small"]
|
||||
slow_threshold = 12.0
|
||||
|
||||
#嵌入模型
|
||||
# 嵌入模型
|
||||
[model_task_config.embedding]
|
||||
model_list = ["bge-m3"]
|
||||
slow_threshold = 5.0
|
||||
|
||||
#------------LPMM知识库模型------------
|
||||
# ------------LPMM知识库模型------------
|
||||
|
||||
[model_task_config.lpmm_entity_extract] # 实体提取模型
|
||||
model_list = ["siliconflow-deepseek-v3.2"]
|
||||
temperature = 0.2
|
||||
max_tokens = 800
|
||||
slow_threshold = 20.0
|
||||
|
||||
[model_task_config.lpmm_rdf_build] # RDF构建模型
|
||||
model_list = ["siliconflow-deepseek-v3.2"]
|
||||
temperature = 0.2
|
||||
max_tokens = 800
|
||||
slow_threshold = 20.0
|
||||
|
||||
[model_task_config.lpmm_qa] # 问答模型
|
||||
model_list = ["siliconflow-deepseek-v3.2"]
|
||||
temperature = 0.7
|
||||
max_tokens = 800
|
||||
slow_threshold = 20.0
|
||||
|
|
|
|||
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
|
|
@ -7,21 +7,21 @@
|
|||
<link rel="icon" type="image/x-icon" href="/maimai.ico" />
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
|
||||
<title>MaiBot Dashboard</title>
|
||||
<script type="module" crossorigin src="/assets/index-B31Ybn7V.js"></script>
|
||||
<script type="module" crossorigin src="/assets/index-DFcwoEiz.js"></script>
|
||||
<link rel="modulepreload" crossorigin href="/assets/react-vendor-Dtc2IqVY.js">
|
||||
<link rel="modulepreload" crossorigin href="/assets/router-CWhjJi2n.js">
|
||||
<link rel="modulepreload" crossorigin href="/assets/utils-CCeOswSm.js">
|
||||
<link rel="modulepreload" crossorigin href="/assets/radix-core-BlBHu_Lw.js">
|
||||
<link rel="modulepreload" crossorigin href="/assets/radix-extra-Cw1azsjZ.js">
|
||||
<link rel="modulepreload" crossorigin href="/assets/radix-core-C3XKqQJw.js">
|
||||
<link rel="modulepreload" crossorigin href="/assets/radix-extra-BM7iD6Dt.js">
|
||||
<link rel="modulepreload" crossorigin href="/assets/charts-Dhri-zxi.js">
|
||||
<link rel="modulepreload" crossorigin href="/assets/icons-Bw5y5Hqz.js">
|
||||
<link rel="modulepreload" crossorigin href="/assets/icons-y1PBa0Co.js">
|
||||
<link rel="modulepreload" crossorigin href="/assets/codemirror-BHeANvwm.js">
|
||||
<link rel="modulepreload" crossorigin href="/assets/misc-Ii-X5qWA.js">
|
||||
<link rel="modulepreload" crossorigin href="/assets/misc-DyBU7ISD.js">
|
||||
<link rel="modulepreload" crossorigin href="/assets/dnd-Dyi3CnuX.js">
|
||||
<link rel="modulepreload" crossorigin href="/assets/uppy-DSH7n_-V.js">
|
||||
<link rel="modulepreload" crossorigin href="/assets/uppy-BHC3OXBx.js">
|
||||
<link rel="modulepreload" crossorigin href="/assets/markdown-A1ShuLvG.js">
|
||||
<link rel="modulepreload" crossorigin href="/assets/reactflow-B3n3_Vkw.js">
|
||||
<link rel="stylesheet" crossorigin href="/assets/index-CUrrfy9B.css">
|
||||
<link rel="stylesheet" crossorigin href="/assets/index-ceRg_XiX.css">
|
||||
</head>
|
||||
<body>
|
||||
<div id="root" class="notranslate"></div>
|
||||
|
|
|
|||
Loading…
Reference in New Issue