diff --git a/src/chat/knowledge/qa_manager.py b/src/chat/knowledge/qa_manager.py index 1a087a7f..c3def2ed 100644 --- a/src/chat/knowledge/qa_manager.py +++ b/src/chat/knowledge/qa_manager.py @@ -22,7 +22,6 @@ class QAManager: ): self.embed_manager = embed_manager self.kg_manager = kg_manager - self.qa_model = LLMRequest(model_set=model_config.model_task_config.lpmm_qa, request_type="lpmm.qa") async def process_query( self, question: str diff --git a/src/config/api_ada_configs.py b/src/config/api_ada_configs.py index a5d5f059..a74728c0 100644 --- a/src/config/api_ada_configs.py +++ b/src/config/api_ada_configs.py @@ -132,9 +132,6 @@ class ModelTaskConfig(ConfigBase): lpmm_rdf_build: TaskConfig """LPMM RDF构建模型配置""" - lpmm_qa: TaskConfig - """LPMM问答模型配置""" - def get_task(self, task_name: str) -> TaskConfig: """获取指定任务的配置""" if hasattr(self, task_name): diff --git a/src/hippo_memorizer/chat_history_summarizer.py b/src/hippo_memorizer/chat_history_summarizer.py index fddd2100..12952b08 100644 --- a/src/hippo_memorizer/chat_history_summarizer.py +++ b/src/hippo_memorizer/chat_history_summarizer.py @@ -848,11 +848,7 @@ class ChatHistorySummarizer: ) try: - response, _ = await self.summarizer_llm.generate_response_async( - prompt=prompt, - temperature=0.3, - max_tokens=500, - ) + response, _ = await self.summarizer_llm.generate_response_async(prompt=prompt) # 解析JSON响应 json_str = response.strip() diff --git a/template/model_config_template.toml b/template/model_config_template.toml index 669488f6..27b78df5 100644 --- a/template/model_config_template.toml +++ b/template/model_config_template.toml @@ -191,10 +191,4 @@ slow_threshold = 20.0 model_list = ["siliconflow-deepseek-v3.2"] temperature = 0.2 max_tokens = 800 -slow_threshold = 20.0 - -[model_task_config.lpmm_qa] # 问答模型 -model_list = ["siliconflow-deepseek-v3.2"] -temperature = 0.7 -max_tokens = 800 -slow_threshold = 20.0 +slow_threshold = 20.0 \ No newline at end of file