<think>之我的LLM为什么只有一半TAG

pull/63/head
KawaiiYusora 2025-03-06 01:08:26 +08:00
parent 6e3124eae3
commit a445c22250
1 changed files with 13 additions and 10 deletions

View File

@ -36,6 +36,7 @@ class LLM_request:
data = { data = {
"model": self.model_name, "model": self.model_name,
"messages": [{"role": "user", "content": prompt}], "messages": [{"role": "user", "content": prompt}],
"max_tokens": 8000,
**self.params **self.params
} }
@ -65,10 +66,10 @@ class LLM_request:
think_match = None think_match = None
reasoning_content = message.get("reasoning_content", "") reasoning_content = message.get("reasoning_content", "")
if not reasoning_content: if not reasoning_content:
think_match = re.search(r'<think>(.*?)</think>', content, re.DOTALL) think_match = re.search(r'(?:<think>)?(.*?)</think>', content, re.DOTALL)
if think_match: if think_match:
reasoning_content = think_match.group(1).strip() reasoning_content = think_match.group(1).strip()
content = re.sub(r'<think>.*?</think>', '', content, flags=re.DOTALL).strip() content = re.sub(r'(?:<think>)?.*?</think>', '', content, flags=re.DOTALL, count=1).strip()
return content, reasoning_content return content, reasoning_content
return "没有返回结果", "" return "没有返回结果", ""
@ -112,6 +113,7 @@ class LLM_request:
] ]
} }
], ],
"max_tokens": 8000,
**self.params **self.params
} }
@ -151,10 +153,10 @@ class LLM_request:
think_match = None think_match = None
reasoning_content = message.get("reasoning_content", "") reasoning_content = message.get("reasoning_content", "")
if not reasoning_content: if not reasoning_content:
think_match = re.search(r'<think>(.*?)</think>', content, re.DOTALL) think_match = re.search(r'(?:<think>)?(.*?)</think>', content, re.DOTALL)
if think_match: if think_match:
reasoning_content = think_match.group(1).strip() reasoning_content = think_match.group(1).strip()
content = re.sub(r'<think>.*?</think>', '', content, flags=re.DOTALL).strip() content = re.sub(r'(?:<think>)?.*?</think>', '', content, flags=re.DOTALL, count=1).strip()
return content, reasoning_content return content, reasoning_content
return "没有返回结果", "" return "没有返回结果", ""
@ -197,6 +199,7 @@ class LLM_request:
] ]
} }
], ],
"max_tokens": 8000,
**self.params **self.params
} }
@ -226,10 +229,10 @@ class LLM_request:
think_match = None think_match = None
reasoning_content = message.get("reasoning_content", "") reasoning_content = message.get("reasoning_content", "")
if not reasoning_content: if not reasoning_content:
think_match = re.search(r'<think>(.*?)</think>', content, re.DOTALL) think_match = re.search(r'(?:<think>)?(.*?)</think>', content, re.DOTALL)
if think_match: if think_match:
reasoning_content = think_match.group(1).strip() reasoning_content = think_match.group(1).strip()
content = re.sub(r'<think>.*?</think>', '', content, flags=re.DOTALL).strip() content = re.sub(r'(?:<think>)?.*?</think>', '', content, flags=re.DOTALL, count=1).strip()
return content, reasoning_content return content, reasoning_content
return "没有返回结果", "" return "没有返回结果", ""