增加<think>标签处理

pull/52/head
KawaiiYusora 2025-03-04 23:16:29 +08:00
parent ba69a84b19
commit 9823f2cb0e
1 changed files with 63 additions and 47 deletions

View File

@ -2,6 +2,7 @@ import aiohttp
import asyncio import asyncio
import requests import requests
import time import time
import re
from typing import Tuple, Union from typing import Tuple, Union
from nonebot import get_driver from nonebot import get_driver
from loguru import logger from loguru import logger
@ -10,6 +11,7 @@ from ..chat.config import global_config
driver = get_driver() driver = get_driver()
config = driver.config config = driver.config
class LLM_request: class LLM_request:
def __init__(self, model, **kwargs): def __init__(self, model, **kwargs):
# 将大写的配置键转换为小写并从config中获取实际值 # 将大写的配置键转换为小写并从config中获取实际值
@ -28,21 +30,21 @@ class LLM_request:
"Authorization": f"Bearer {self.api_key}", "Authorization": f"Bearer {self.api_key}",
"Content-Type": "application/json" "Content-Type": "application/json"
} }
# 构建请求体 # 构建请求体
data = { data = {
"model": self.model_name, "model": self.model_name,
"messages": [{"role": "user", "content": prompt}], "messages": [{"role": "user", "content": prompt}],
**self.params **self.params
} }
# 发送请求到完整的chat/completions端点 # 发送请求到完整的chat/completions端点
api_url = f"{self.base_url.rstrip('/')}/chat/completions" api_url = f"{self.base_url.rstrip('/')}/chat/completions"
logger.info(f"发送请求到URL: {api_url}") # 记录请求的URL logger.info(f"发送请求到URL: {api_url}") # 记录请求的URL
max_retries = 3 max_retries = 3
base_wait_time = 15 base_wait_time = 15
for retry in range(max_retries): for retry in range(max_retries):
try: try:
async with aiohttp.ClientSession() as session: async with aiohttp.ClientSession() as session:
@ -52,16 +54,16 @@ class LLM_request:
logger.warning(f"遇到请求限制(429),等待{wait_time}秒后重试...") logger.warning(f"遇到请求限制(429),等待{wait_time}秒后重试...")
await asyncio.sleep(wait_time) await asyncio.sleep(wait_time)
continue continue
response.raise_for_status() # 检查其他响应状态 response.raise_for_status() # 检查其他响应状态
result = await response.json() result = await response.json()
if "choices" in result and len(result["choices"]) > 0: if "choices" in result and len(result["choices"]) > 0:
content = result["choices"][0]["message"]["content"] content = result["choices"][0]["message"]["content"]
reasoning_content = result["choices"][0]["message"].get("reasoning_content", "") reasoning_content = result["choices"][0]["message"].get("reasoning_content", "")
return content, reasoning_content return content, reasoning_content
return "没有返回结果", "" return "没有返回结果", ""
except Exception as e: except Exception as e:
if retry < max_retries - 1: # 如果还有重试机会 if retry < max_retries - 1: # 如果还有重试机会
wait_time = base_wait_time * (2 ** retry) wait_time = base_wait_time * (2 ** retry)
@ -70,7 +72,7 @@ class LLM_request:
else: else:
logger.critical(f"请求失败: {str(e)}", exc_info=True) logger.critical(f"请求失败: {str(e)}", exc_info=True)
raise RuntimeError(f"API请求失败: {str(e)}") raise RuntimeError(f"API请求失败: {str(e)}")
logger.error("达到最大重试次数,请求仍然失败") logger.error("达到最大重试次数,请求仍然失败")
raise RuntimeError("达到最大重试次数API请求仍然失败") raise RuntimeError("达到最大重试次数API请求仍然失败")
@ -80,7 +82,7 @@ class LLM_request:
"Authorization": f"Bearer {self.api_key}", "Authorization": f"Bearer {self.api_key}",
"Content-Type": "application/json" "Content-Type": "application/json"
} }
# 构建请求体 # 构建请求体
data = { data = {
"model": self.model_name, "model": self.model_name,
@ -103,14 +105,14 @@ class LLM_request:
], ],
**self.params **self.params
} }
# 发送请求到完整的chat/completions端点 # 发送请求到完整的chat/completions端点
api_url = f"{self.base_url.rstrip('/')}/chat/completions" api_url = f"{self.base_url.rstrip('/')}/chat/completions"
logger.info(f"发送请求到URL: {api_url}") # 记录请求的URL logger.info(f"发送请求到URL: {api_url}") # 记录请求的URL
max_retries = 3 max_retries = 3
base_wait_time = 15 base_wait_time = 15
for retry in range(max_retries): for retry in range(max_retries):
try: try:
async with aiohttp.ClientSession() as session: async with aiohttp.ClientSession() as session:
@ -120,16 +122,23 @@ class LLM_request:
logger.warning(f"遇到请求限制(429),等待{wait_time}秒后重试...") logger.warning(f"遇到请求限制(429),等待{wait_time}秒后重试...")
await asyncio.sleep(wait_time) await asyncio.sleep(wait_time)
continue continue
response.raise_for_status() # 检查其他响应状态 response.raise_for_status() # 检查其他响应状态
result = await response.json() result = await response.json()
if "choices" in result and len(result["choices"]) > 0: if "choices" in result and len(result["choices"]) > 0:
content = result["choices"][0]["message"]["content"] message = result["choices"][0]["message"]
reasoning_content = result["choices"][0]["message"].get("reasoning_content", "") content = message.get("content", "")
think_match = None
reasoning_content = message.get("reasoning_content", "")
if not reasoning_content:
think_match = re.search(r'<think>(.*?)</think>', content, re.DOTALL)
if think_match:
reasoning_content = think_match.group(1).strip()
content = re.sub(r'<think>.*?</think>', '', content, flags=re.DOTALL).strip()
return content, reasoning_content return content, reasoning_content
return "没有返回结果", "" return "没有返回结果", ""
except Exception as e: except Exception as e:
if retry < max_retries - 1: # 如果还有重试机会 if retry < max_retries - 1: # 如果还有重试机会
wait_time = base_wait_time * (2 ** retry) wait_time = base_wait_time * (2 ** retry)
@ -138,7 +147,7 @@ class LLM_request:
else: else:
logger.critical(f"请求失败: {str(e)}", exc_info=True) logger.critical(f"请求失败: {str(e)}", exc_info=True)
raise RuntimeError(f"API请求失败: {str(e)}") raise RuntimeError(f"API请求失败: {str(e)}")
logger.error("达到最大重试次数,请求仍然失败") logger.error("达到最大重试次数,请求仍然失败")
raise RuntimeError("达到最大重试次数API请求仍然失败") raise RuntimeError("达到最大重试次数API请求仍然失败")
@ -148,7 +157,7 @@ class LLM_request:
"Authorization": f"Bearer {self.api_key}", "Authorization": f"Bearer {self.api_key}",
"Content-Type": "application/json" "Content-Type": "application/json"
} }
# 构建请求体 # 构建请求体
data = { data = {
"model": self.model_name, "model": self.model_name,
@ -171,33 +180,40 @@ class LLM_request:
], ],
**self.params **self.params
} }
# 发送请求到完整的chat/completions端点 # 发送请求到完整的chat/completions端点
api_url = f"{self.base_url.rstrip('/')}/chat/completions" api_url = f"{self.base_url.rstrip('/')}/chat/completions"
logger.info(f"发送请求到URL: {api_url}") # 记录请求的URL logger.info(f"发送请求到URL: {api_url}") # 记录请求的URL
max_retries = 2 max_retries = 2
base_wait_time = 6 base_wait_time = 6
for retry in range(max_retries): for retry in range(max_retries):
try: try:
response = requests.post(api_url, headers=headers, json=data, timeout=30) response = requests.post(api_url, headers=headers, json=data, timeout=30)
if response.status_code == 429: if response.status_code == 429:
wait_time = base_wait_time * (2 ** retry) wait_time = base_wait_time * (2 ** retry)
logger.warning(f"遇到请求限制(429),等待{wait_time}秒后重试...") logger.warning(f"遇到请求限制(429),等待{wait_time}秒后重试...")
time.sleep(wait_time) time.sleep(wait_time)
continue continue
response.raise_for_status() # 检查其他响应状态 response.raise_for_status() # 检查其他响应状态
result = response.json() result = response.json()
if "choices" in result and len(result["choices"]) > 0: if "choices" in result and len(result["choices"]) > 0:
content = result["choices"][0]["message"]["content"] message = result["choices"][0]["message"]
reasoning_content = result["choices"][0]["message"].get("reasoning_content", "") content = message.get("content", "")
think_match = None
reasoning_content = message.get("reasoning_content", "")
if not reasoning_content:
think_match = re.search(r'<think>(.*?)</think>', content, re.DOTALL)
if think_match:
reasoning_content = think_match.group(1).strip()
content = re.sub(r'<think>.*?</think>', '', content, flags=re.DOTALL).strip()
return content, reasoning_content return content, reasoning_content
return "没有返回结果", "" return "没有返回结果", ""
except Exception as e: except Exception as e:
if retry < max_retries - 1: # 如果还有重试机会 if retry < max_retries - 1: # 如果还有重试机会
wait_time = base_wait_time * (2 ** retry) wait_time = base_wait_time * (2 ** retry)
@ -206,7 +222,7 @@ class LLM_request:
else: else:
logger.critical(f"请求失败: {str(e)}", exc_info=True) logger.critical(f"请求失败: {str(e)}", exc_info=True)
raise RuntimeError(f"API请求失败: {str(e)}") raise RuntimeError(f"API请求失败: {str(e)}")
logger.error("达到最大重试次数,请求仍然失败") logger.error("达到最大重试次数,请求仍然失败")
raise RuntimeError("达到最大重试次数API请求仍然失败") raise RuntimeError("达到最大重试次数API请求仍然失败")
@ -224,36 +240,36 @@ class LLM_request:
"Authorization": f"Bearer {self.api_key}", "Authorization": f"Bearer {self.api_key}",
"Content-Type": "application/json" "Content-Type": "application/json"
} }
data = { data = {
"model": model, "model": model,
"input": text, "input": text,
"encoding_format": "float" "encoding_format": "float"
} }
api_url = f"{self.base_url.rstrip('/')}/embeddings" api_url = f"{self.base_url.rstrip('/')}/embeddings"
logger.info(f"发送请求到URL: {api_url}") # 记录请求的URL logger.info(f"发送请求到URL: {api_url}") # 记录请求的URL
max_retries = 2 max_retries = 2
base_wait_time = 6 base_wait_time = 6
for retry in range(max_retries): for retry in range(max_retries):
try: try:
response = requests.post(api_url, headers=headers, json=data, timeout=30) response = requests.post(api_url, headers=headers, json=data, timeout=30)
if response.status_code == 429: if response.status_code == 429:
wait_time = base_wait_time * (2 ** retry) wait_time = base_wait_time * (2 ** retry)
logger.warning(f"遇到请求限制(429),等待{wait_time}秒后重试...") logger.warning(f"遇到请求限制(429),等待{wait_time}秒后重试...")
time.sleep(wait_time) time.sleep(wait_time)
continue continue
response.raise_for_status() response.raise_for_status()
result = response.json() result = response.json()
if 'data' in result and len(result['data']) > 0: if 'data' in result and len(result['data']) > 0:
return result['data'][0]['embedding'] return result['data'][0]['embedding']
return None return None
except Exception as e: except Exception as e:
if retry < max_retries - 1: if retry < max_retries - 1:
wait_time = base_wait_time * (2 ** retry) wait_time = base_wait_time * (2 ** retry)
@ -262,7 +278,7 @@ class LLM_request:
else: else:
logger.critical(f"embedding请求失败: {str(e)}", exc_info=True) logger.critical(f"embedding请求失败: {str(e)}", exc_info=True)
return None return None
logger.error("达到最大重试次数embedding请求仍然失败") logger.error("达到最大重试次数embedding请求仍然失败")
return None return None
@ -280,19 +296,19 @@ class LLM_request:
"Authorization": f"Bearer {self.api_key}", "Authorization": f"Bearer {self.api_key}",
"Content-Type": "application/json" "Content-Type": "application/json"
} }
data = { data = {
"model": model, "model": model,
"input": text, "input": text,
"encoding_format": "float" "encoding_format": "float"
} }
api_url = f"{self.base_url.rstrip('/')}/embeddings" api_url = f"{self.base_url.rstrip('/')}/embeddings"
logger.info(f"发送请求到URL: {api_url}") # 记录请求的URL logger.info(f"发送请求到URL: {api_url}") # 记录请求的URL
max_retries = 3 max_retries = 3
base_wait_time = 15 base_wait_time = 15
for retry in range(max_retries): for retry in range(max_retries):
try: try:
async with aiohttp.ClientSession() as session: async with aiohttp.ClientSession() as session:
@ -302,14 +318,14 @@ class LLM_request:
logger.warning(f"遇到请求限制(429),等待{wait_time}秒后重试...") logger.warning(f"遇到请求限制(429),等待{wait_time}秒后重试...")
await asyncio.sleep(wait_time) await asyncio.sleep(wait_time)
continue continue
response.raise_for_status() response.raise_for_status()
result = await response.json() result = await response.json()
if 'data' in result and len(result['data']) > 0: if 'data' in result and len(result['data']) > 0:
return result['data'][0]['embedding'] return result['data'][0]['embedding']
return None return None
except Exception as e: except Exception as e:
if retry < max_retries - 1: if retry < max_retries - 1:
wait_time = base_wait_time * (2 ** retry) wait_time = base_wait_time * (2 ** retry)
@ -318,6 +334,6 @@ class LLM_request:
else: else:
logger.critical(f"embedding请求失败: {str(e)}", exc_info=True) logger.critical(f"embedding请求失败: {str(e)}", exc_info=True)
return None return None
logger.error("达到最大重试次数embedding请求仍然失败") logger.error("达到最大重试次数embedding请求仍然失败")
return None return None