diff --git a/src/llm_models/model_client/openai_client.py b/src/llm_models/model_client/openai_client.py index c5146c5e..34134a15 100644 --- a/src/llm_models/model_client/openai_client.py +++ b/src/llm_models/model_client/openai_client.py @@ -277,12 +277,10 @@ async def _default_stream_response_handler( # 空 choices / usage-only 帧的防御 if not hasattr(event, "choices") or not event.choices: if hasattr(event, "usage") and event.usage: - # 安全地获取usage属性,处理不同API版本的差异 - usage_obj = event.usage _usage_record = ( - getattr(usage_obj, 'prompt_tokens', 0) or 0, - getattr(usage_obj, 'completion_tokens', 0) or 0, - getattr(usage_obj, 'total_tokens', 0) or 0, + event.usage.prompt_tokens or 0, + event.usage.completion_tokens or 0, + event.usage.total_tokens or 0, ) continue # 跳过本帧,避免访问 choices[0] delta = event.choices[0].delta # 获取当前块的delta内容 @@ -302,12 +300,10 @@ async def _default_stream_response_handler( if event.usage: # 如果有使用情况,则将其存储在APIResponse对象中 - # 安全地获取usage属性,处理不同API版本的差异 - usage_obj = event.usage _usage_record = ( - getattr(usage_obj, 'prompt_tokens', 0) or 0, - getattr(usage_obj, 'completion_tokens', 0) or 0, - getattr(usage_obj, 'total_tokens', 0) or 0, + event.usage.prompt_tokens or 0, + event.usage.completion_tokens or 0, + event.usage.total_tokens or 0, ) try: @@ -374,12 +370,10 @@ def _default_normal_response_parser( # 提取Usage信息 if resp.usage: - # 安全地获取usage属性,处理不同API版本的差异 - usage_obj = resp.usage _usage_record = ( - getattr(usage_obj, 'prompt_tokens', 0) or 0, - getattr(usage_obj, 'completion_tokens', 0) or 0, - getattr(usage_obj, 'total_tokens', 0) or 0, + resp.usage.prompt_tokens or 0, + resp.usage.completion_tokens or 0, + resp.usage.total_tokens or 0, ) else: _usage_record = None @@ -557,18 +551,12 @@ class OpenaiClient(BaseClient): # 解析使用情况 if hasattr(raw_response, "usage"): - usage_obj = raw_response.usage - # 安全地获取usage属性,处理不同API版本的差异 - prompt_tokens = getattr(usage_obj, 'prompt_tokens', 0) or 0 - completion_tokens = getattr(usage_obj, 'completion_tokens', 0) or 0 - total_tokens = getattr(usage_obj, 'total_tokens', 0) or 0 - response.usage = UsageRecord( model_name=model_info.name, provider_name=model_info.api_provider, - prompt_tokens=prompt_tokens, - completion_tokens=completion_tokens, - total_tokens=total_tokens, + prompt_tokens=raw_response.usage.prompt_tokens or 0, + completion_tokens=getattr(raw_response.usage, "completion_tokens", 0), + total_tokens=raw_response.usage.total_tokens or 0, ) return response