修复神秘小功能

pull/937/head
Bakadax 2025-05-04 21:56:10 +08:00
parent 5125ece27b
commit c567bdbae2
3 changed files with 21 additions and 26 deletions

View File

@ -277,6 +277,7 @@ class BotConfig:
# enable_think_flow: bool = False # 是否启用思考流程 # enable_think_flow: bool = False # 是否启用思考流程
talk_allowed_private = set() talk_allowed_private = set()
enable_pfc_chatting: bool = False # 是否启用PFC聊天 enable_pfc_chatting: bool = False # 是否启用PFC聊天
api_polling_max_retries: int = 3 # 神秘小功能
# Group Nickname # Group Nickname
enable_nickname_mapping: bool = False # 绰号映射功能总开关 enable_nickname_mapping: bool = False # 绰号映射功能总开关
@ -706,6 +707,8 @@ class BotConfig:
config.talk_allowed_private = set(str(user) for user in experimental_config.get("talk_allowed_private", [])) config.talk_allowed_private = set(str(user) for user in experimental_config.get("talk_allowed_private", []))
if config.INNER_VERSION in SpecifierSet(">=1.1.0"): if config.INNER_VERSION in SpecifierSet(">=1.1.0"):
config.enable_pfc_chatting = experimental_config.get("pfc_chatting", config.enable_pfc_chatting) config.enable_pfc_chatting = experimental_config.get("pfc_chatting", config.enable_pfc_chatting)
if config.INNER_VERSION in SpecifierSet(">=1.6.1.5"):
config.api_polling_max_retries = experimental_config.get("api_polling_max_retries", config.api_polling_max_retries)
# 版本表达式:>=1.0.0,<2.0.0 # 版本表达式:>=1.0.0,<2.0.0
# 允许字段func: method, support: str, notice: str, necessary: bool # 允许字段func: method, support: str, notice: str, necessary: bool

View File

@ -344,9 +344,8 @@ class LLMRequest:
**kwargs: Any **kwargs: Any
) -> Dict[str, Any]: ) -> Dict[str, Any]:
"""配置请求参数,合并实例参数和调用时参数""" """配置请求参数,合并实例参数和调用时参数"""
# (代码不变)
default_retry = { default_retry = {
"max_retries": 3, "max_retries": global_config.api_polling_max_retries,
"base_wait": 10, "base_wait": 10,
"retry_codes": [429, 413, 500, 503], "retry_codes": [429, 413, 500, 503],
"abort_codes": [400, 401, 402, 403], "abort_codes": [400, 401, 402, 403],
@ -372,7 +371,7 @@ class LLMRequest:
if not self.is_gemini and stream_mode: if not self.is_gemini and stream_mode:
payload["stream"] = merged_params.get("stream", stream_mode) payload["stream"] = merged_params.get("stream", stream_mode)
return { return {
@ -399,7 +398,6 @@ class LLMRequest:
**kwargs: Any **kwargs: Any
): ):
"""统一请求执行入口, 支持列表 key 切换、代理和单次调用参数覆盖""" """统一请求执行入口, 支持列表 key 切换、代理和单次调用参数覆盖"""
# (代码不变)
final_request_type = request_type or kwargs.get('request_type') or self.request_type final_request_type = request_type or kwargs.get('request_type') or self.request_type
api_kwargs = {k: v for k, v in kwargs.items() if k != 'request_type'} api_kwargs = {k: v for k, v in kwargs.items() if k != 'request_type'}
@ -418,9 +416,9 @@ class LLMRequest:
current_proxy_url = self.proxy_url current_proxy_url = self.proxy_url
logger.debug(f"模型 {self.model_name}: 将通过代理 {current_proxy_url} 发送请求。") logger.debug(f"模型 {self.model_name}: 将通过代理 {current_proxy_url} 发送请求。")
elif self.proxy_url: elif self.proxy_url:
logger.debug(f"模型 {self.model_name}: 配置了代理,但此模型不在 PROXY_MODELS 列表中,将不使用代理。") logger.debug(f"模型 {self.model_name}: 配置了代理,但此模型不在 PROXY_MODELS 列表中,将不使用代理。")
else: else:
logger.debug(f"模型 {self.model_name}: 未配置或不为此模型使用代理。") logger.debug(f"模型 {self.model_name}: 未配置或不为此模型使用代理。")
current_key = None current_key = None
keys_failed_429 = set() keys_failed_429 = set()
@ -461,7 +459,7 @@ class LLMRequest:
logger.critical(final_error_msg) logger.critical(final_error_msg)
raise PermissionDeniedException(final_error_msg) raise PermissionDeniedException(final_error_msg)
else: else:
raise RuntimeError(f"模型 {self.model_name}: 无法选择 API key (第 {attempt + 1} 次尝试)") raise RuntimeError(f"模型 {self.model_name}: 无法选择 API key (第 {attempt + 1} 次尝试)")
logger.debug(f"模型 {self.model_name}: 尝试使用 Key: ...{current_key[-4:]} (总第 {attempt + 1} 次尝试)") logger.debug(f"模型 {self.model_name}: 尝试使用 Key: ...{current_key[-4:]} (总第 {attempt + 1} 次尝试)")
@ -527,11 +525,11 @@ class LLMRequest:
if response.status in policy["abort_codes"] or (response.status in policy["retry_codes"] and attempt >= policy["max_retries"] - 1): if response.status in policy["abort_codes"] or (response.status in policy["retry_codes"] and attempt >= policy["max_retries"] - 1):
if attempt >= policy["max_retries"] - 1 and response.status in policy["retry_codes"]: if attempt >= policy["max_retries"] - 1 and response.status in policy["retry_codes"]:
logger.error(f"模型 {self.model_name}: 达到最大重试次数,最后一次尝试仍为可重试错误 {response.status}") logger.error(f"模型 {self.model_name}: 达到最大重试次数,最后一次尝试仍为可重试错误 {response.status}")
await self._handle_error_response(response, attempt, policy, current_key) # await self._handle_error_response(response, attempt, policy, current_key)
await response.read() # await response.read()
final_error_msg = f"请求中止或达到最大重试次数,最终状态码: {response.status}" # final_error_msg = f"请求中止或达到最大重试次数,最终状态码: {response.status}"
logger.error(final_error_msg) # logger.error(final_error_msg)
raise RequestAbortException(final_error_msg, response) # raise RequestAbortException(final_error_msg, response)
response.raise_for_status() response.raise_for_status()
result = {} result = {}
@ -547,18 +545,15 @@ class LLMRequest:
) )
except _SwitchKeyException: except _SwitchKeyException:
# (代码不变)
last_exception = _SwitchKeyException() last_exception = _SwitchKeyException()
logger.debug("捕获到 _SwitchKeyException立即进行下一次尝试。") logger.debug("捕获到 _SwitchKeyException立即进行下一次尝试。")
continue continue
except PermissionDeniedException as e: except PermissionDeniedException as e:
# (代码不变)
logger.error(f"模型 {self.model_name}: 因权限拒绝 (403) 中止请求: {e}") logger.error(f"模型 {self.model_name}: 因权限拒绝 (403) 中止请求: {e}")
if is_key_list and not available_keys_pool and e.key_identifier: if is_key_list and not available_keys_pool and e.key_identifier:
logger.critical(f" 中止原因是 Key ...{e.key_identifier[-4:]} 触发 403 后已无其他 Key 可用。") logger.critical(f" 中止原因是 Key ...{e.key_identifier[-4:]} 触发 403 后已无其他 Key 可用。")
raise e raise e
except aiohttp.ClientProxyConnectionError as e: except aiohttp.ClientProxyConnectionError as e:
# (代码不变)
logger.error(f"代理连接错误: {e} (代理地址: {current_proxy_url})") logger.error(f"代理连接错误: {e} (代理地址: {current_proxy_url})")
last_exception = e last_exception = e
if attempt >= policy["max_retries"] - 1: if attempt >= policy["max_retries"] - 1:
@ -568,7 +563,6 @@ class LLMRequest:
await asyncio.sleep(wait_time) await asyncio.sleep(wait_time)
continue continue
except aiohttp.ClientConnectorError as e: except aiohttp.ClientConnectorError as e:
# (代码不变)
logger.error(f"网络连接错误: {e} (URL: {api_url}, 代理: {current_proxy_url})") logger.error(f"网络连接错误: {e} (URL: {api_url}, 代理: {current_proxy_url})")
last_exception = e last_exception = e
if attempt >= policy["max_retries"] - 1: if attempt >= policy["max_retries"] - 1:
@ -619,7 +613,6 @@ class LLMRequest:
raise rt_error raise rt_error
# --- 循环结束 --- # --- 循环结束 ---
# (代码不变)
logger.error(f"模型 {self.model_name}: 所有重试尝试 ({policy['max_retries']} 次) 均失败。") logger.error(f"模型 {self.model_name}: 所有重试尝试 ({policy['max_retries']} 次) 均失败。")
if last_exception: if last_exception:
if isinstance(last_exception, PermissionDeniedException): if isinstance(last_exception, PermissionDeniedException):
@ -1112,7 +1105,7 @@ class LLMRequest:
# (代码不变) # (代码不变)
if no_key: if no_key:
if self.is_gemini: if self.is_gemini:
return {"x-goog-api-key": "**********", "Content-Type": "application/json"} return {"x-goog-api-key": "**********", "Content-Type": "application/json"}
else: else:
return {"Authorization": "Bearer **********", "Content-Type": "application/json"} return {"Authorization": "Bearer **********", "Content-Type": "application/json"}
else: else:
@ -1128,7 +1121,6 @@ class LLMRequest:
async def generate_response(self, prompt: str, user_id: str = "system", **kwargs) -> Tuple: async def generate_response(self, prompt: str, user_id: str = "system", **kwargs) -> Tuple:
"""根据输入的提示生成模型的异步响应,支持覆盖参数""" """根据输入的提示生成模型的异步响应,支持覆盖参数"""
# (代码不变)
endpoint = ":generateContent" if self.is_gemini else "/chat/completions" endpoint = ":generateContent" if self.is_gemini else "/chat/completions"
response = await self._execute_request( response = await self._execute_request(
endpoint=endpoint, endpoint=endpoint,
@ -1146,7 +1138,6 @@ class LLMRequest:
async def generate_response_for_image(self, prompt: str, image_base64: str, image_format: str, user_id: str = "system", **kwargs) -> Tuple: async def generate_response_for_image(self, prompt: str, image_base64: str, image_format: str, user_id: str = "system", **kwargs) -> Tuple:
"""根据输入的提示和图片生成模型的异步响应,支持覆盖参数""" """根据输入的提示和图片生成模型的异步响应,支持覆盖参数"""
# (代码不变)
endpoint = ":generateContent" if self.is_gemini else "/chat/completions" endpoint = ":generateContent" if self.is_gemini else "/chat/completions"
response = await self._execute_request( response = await self._execute_request(
endpoint=endpoint, endpoint=endpoint,
@ -1159,13 +1150,13 @@ class LLMRequest:
) )
# _default_response_handler 现在总是返回至少2个值 # _default_response_handler 现在总是返回至少2个值
if len(response) == 3: if len(response) == 3:
return response # content, reasoning, tool_calls (tool_calls 可能为 None) return response # content, reasoning, tool_calls (tool_calls 可能为 None)
elif len(response) == 2: elif len(response) == 2:
content, reasoning = response content, reasoning = response
return content, reasoning # 对于 vision 请求,通常没有 tool_calls return content, reasoning # 对于 vision 请求,通常没有 tool_calls
else: else:
logger.error(f"来自 _default_response_handler 的意外响应格式: {response}") logger.error(f"来自 _default_response_handler 的意外响应格式: {response}")
return "处理响应出错", "" return "处理响应出错", ""
async def generate_response_async(self, prompt: str, user_id: str = "system", request_type: str = "chat", **kwargs) -> Union[str, Tuple]: async def generate_response_async(self, prompt: str, user_id: str = "system", request_type: str = "chat", **kwargs) -> Union[str, Tuple]:

View File

@ -1,5 +1,5 @@
[inner] [inner]
version = "1.6.1.4" version = "1.6.1.5"
#----以下是给开发人员阅读的,如果你只是部署了麦麦,不需要阅读---- #----以下是给开发人员阅读的,如果你只是部署了麦麦,不需要阅读----
#如果你想要修改配置文件请在修改后将version的值进行变更 #如果你想要修改配置文件请在修改后将version的值进行变更
@ -200,6 +200,7 @@ enable = true
enable_friend_chat = false # 是否启用好友聊天 enable_friend_chat = false # 是否启用好友聊天
talk_allowed_private = [] # 可以回复消息的QQ号 talk_allowed_private = [] # 可以回复消息的QQ号
pfc_chatting = false # 是否启用PFC聊天该功能仅作用于私聊与回复模式独立 pfc_chatting = false # 是否启用PFC聊天该功能仅作用于私聊与回复模式独立
api_polling_max_retries = 3
#下面的模型若使用硅基流动则不需要更改使用ds官方则改成.env自定义的宏使用自定义模型则选择定位相似的模型自己填写 #下面的模型若使用硅基流动则不需要更改使用ds官方则改成.env自定义的宏使用自定义模型则选择定位相似的模型自己填写
#推理模型 #推理模型