From bfe4943b187cae5e5b807fdf98582b9f6e553706 Mon Sep 17 00:00:00 2001 From: UnCLAS-Prommer Date: Thu, 11 Sep 2025 21:12:08 +0800 Subject: [PATCH] =?UTF-8?q?=E6=9B=B4=E4=BC=98=E9=9B=85=E7=9A=84=E8=A7=A3?= =?UTF-8?q?=E5=86=B3=E9=97=AE=E9=A2=98?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/llm_models/model_client/openai_client.py | 36 +++++++------------- 1 file changed, 12 insertions(+), 24 deletions(-) diff --git a/src/llm_models/model_client/openai_client.py b/src/llm_models/model_client/openai_client.py index c5146c5e..34134a15 100644 --- a/src/llm_models/model_client/openai_client.py +++ b/src/llm_models/model_client/openai_client.py @@ -277,12 +277,10 @@ async def _default_stream_response_handler( # 空 choices / usage-only 帧的防御 if not hasattr(event, "choices") or not event.choices: if hasattr(event, "usage") and event.usage: - # 安全地获取usage属性,处理不同API版本的差异 - usage_obj = event.usage _usage_record = ( - getattr(usage_obj, 'prompt_tokens', 0) or 0, - getattr(usage_obj, 'completion_tokens', 0) or 0, - getattr(usage_obj, 'total_tokens', 0) or 0, + event.usage.prompt_tokens or 0, + event.usage.completion_tokens or 0, + event.usage.total_tokens or 0, ) continue # 跳过本帧,避免访问 choices[0] delta = event.choices[0].delta # 获取当前块的delta内容 @@ -302,12 +300,10 @@ async def _default_stream_response_handler( if event.usage: # 如果有使用情况,则将其存储在APIResponse对象中 - # 安全地获取usage属性,处理不同API版本的差异 - usage_obj = event.usage _usage_record = ( - getattr(usage_obj, 'prompt_tokens', 0) or 0, - getattr(usage_obj, 'completion_tokens', 0) or 0, - getattr(usage_obj, 'total_tokens', 0) or 0, + event.usage.prompt_tokens or 0, + event.usage.completion_tokens or 0, + event.usage.total_tokens or 0, ) try: @@ -374,12 +370,10 @@ def _default_normal_response_parser( # 提取Usage信息 if resp.usage: - # 安全地获取usage属性,处理不同API版本的差异 - usage_obj = resp.usage _usage_record = ( - getattr(usage_obj, 'prompt_tokens', 0) or 0, - getattr(usage_obj, 'completion_tokens', 0) or 0, - getattr(usage_obj, 'total_tokens', 0) or 0, + resp.usage.prompt_tokens or 0, + resp.usage.completion_tokens or 0, + resp.usage.total_tokens or 0, ) else: _usage_record = None @@ -557,18 +551,12 @@ class OpenaiClient(BaseClient): # 解析使用情况 if hasattr(raw_response, "usage"): - usage_obj = raw_response.usage - # 安全地获取usage属性,处理不同API版本的差异 - prompt_tokens = getattr(usage_obj, 'prompt_tokens', 0) or 0 - completion_tokens = getattr(usage_obj, 'completion_tokens', 0) or 0 - total_tokens = getattr(usage_obj, 'total_tokens', 0) or 0 - response.usage = UsageRecord( model_name=model_info.name, provider_name=model_info.api_provider, - prompt_tokens=prompt_tokens, - completion_tokens=completion_tokens, - total_tokens=total_tokens, + prompt_tokens=raw_response.usage.prompt_tokens or 0, + completion_tokens=getattr(raw_response.usage, "completion_tokens", 0), + total_tokens=raw_response.usage.total_tokens or 0, ) return response