From 79e8962f6f4c5ba6d2a5b0dc140daf1694a3bb07 Mon Sep 17 00:00:00 2001 From: Ronifue Date: Sat, 29 Nov 2025 18:15:46 +0800 Subject: [PATCH] =?UTF-8?q?feat:=20=E4=BD=BF=E5=BE=97model=5Finfo.extra=5F?= =?UTF-8?q?params=E8=83=BD=E5=A4=9F=E5=8D=95=E7=8B=AC=E6=8C=87=E5=AE=9A?= =?UTF-8?q?=E6=A8=A1=E5=9E=8B=E7=9A=84temprature?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/llm_models/utils_model.py | 2 +- template/model_config_template.toml | 2 ++ 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/src/llm_models/utils_model.py b/src/llm_models/utils_model.py index 4f1725fd..30d06252 100644 --- a/src/llm_models/utils_model.py +++ b/src/llm_models/utils_model.py @@ -301,7 +301,7 @@ class LLMRequest: message_list=(compressed_messages or message_list), tool_options=tool_options, max_tokens=self.model_for_task.max_tokens if max_tokens is None else max_tokens, - temperature=self.model_for_task.temperature if temperature is None else temperature, + temperature=temperature if temperature is not None else (model_info.extra_params or {}).get("temperature", self.model_for_task.temperature), response_format=response_format, stream_response_handler=stream_response_handler, async_response_parser=async_response_parser, diff --git a/template/model_config_template.toml b/template/model_config_template.toml index 07e2af18..f188b551 100644 --- a/template/model_config_template.toml +++ b/template/model_config_template.toml @@ -56,6 +56,7 @@ price_in = 2.0 price_out = 3.0 [models.extra_params] # 可选的额外参数配置 enable_thinking = false # 不启用思考 +# temperature = 0.5 # 可选:为该模型单独指定温度,会覆盖任务配置中的温度 [[models]] model_identifier = "deepseek-ai/DeepSeek-V3.2-Exp" @@ -65,6 +66,7 @@ price_in = 2.0 price_out = 3.0 [models.extra_params] # 可选的额外参数配置 enable_thinking = true # 启用思考 +# temperature = 0.7 # 可选:为该模型单独指定温度,会覆盖任务配置中的温度 [[models]] model_identifier = "Qwen/Qwen3-Next-80B-A3B-Instruct"