From 11cc7ced1354d695a45e2005209e44b8abdd205a Mon Sep 17 00:00:00 2001 From: SengokuCola <1026294844@qq.com> Date: Thu, 25 Sep 2025 20:20:04 +0800 Subject: [PATCH] =?UTF-8?q?fix=EF=BC=9A=E4=BF=AE=E5=A4=8D=EF=BC=88?= =?UTF-8?q?=E4=B9=9F=E8=AE=B8=EF=BC=89=E5=90=9E=E5=AD=97=E9=97=AE=E9=A2=98?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/chat/replyer/group_generator.py | 4 ++-- src/chat/replyer/prompt/replyer_prompt.py | 2 +- src/chat/replyer/prompt/rewrite_prompt.py | 2 +- src/llm_models/model_client/openai_client.py | 5 +++++ 4 files changed, 9 insertions(+), 4 deletions(-) diff --git a/src/chat/replyer/group_generator.py b/src/chat/replyer/group_generator.py index 9ff77cd0..87bfc670 100644 --- a/src/chat/replyer/group_generator.py +++ b/src/chat/replyer/group_generator.py @@ -951,13 +951,13 @@ class DefaultReplyer: if global_config.debug.show_prompt: logger.info(f"\n{prompt}\n") else: - logger.debug(f"\n{prompt}\n") + logger.debug(f"\nreplyer_Prompt:{prompt}\n") content, (reasoning_content, model_name, tool_calls) = await self.express_model.generate_response_async( prompt ) - logger.info(f"使用{model_name}生成回复内容: {content}") + logger.info(f"使用 {model_name} 生成回复内容: {content}") return content, reasoning_content, model_name, tool_calls async def get_prompt_info(self, message: str, sender: str, target: str): diff --git a/src/chat/replyer/prompt/replyer_prompt.py b/src/chat/replyer/prompt/replyer_prompt.py index 44423362..dd1a434e 100644 --- a/src/chat/replyer/prompt/replyer_prompt.py +++ b/src/chat/replyer/prompt/replyer_prompt.py @@ -86,7 +86,7 @@ def init_replyer_prompt(): 尽量简短一些。{keywords_reaction_prompt}请注意把握聊天内容,不要回复的太有条理,可以有个性。 {reply_style} 请注意不要输出多余内容(包括前后缀,冒号和引号,括号,表情等),只输出回复内容。 -{moderation_prompt}不要输出多余内容(包括前后缀,冒号和引号,括号,表情包,at或 @等 )。 +{moderation_prompt}不要输出多余内容(包括冒号和引号,括号,表情包,at或 @等 )。 """, "private_replyer_self_prompt", ) \ No newline at end of file diff --git a/src/chat/replyer/prompt/rewrite_prompt.py b/src/chat/replyer/prompt/rewrite_prompt.py index 70114b11..3118ae88 100644 --- a/src/chat/replyer/prompt/rewrite_prompt.py +++ b/src/chat/replyer/prompt/rewrite_prompt.py @@ -27,7 +27,7 @@ def init_rewrite_prompt(): 你可以完全重组回复,保留最基本的表达含义就好,但重组后保持语意通顺。 {keywords_reaction_prompt} {moderation_prompt} -不要输出多余内容(包括前后缀,冒号和引号,括号,表情包,emoji,at或 @等 ),只输出一条回复就好。 +不要输出多余内容(包括冒号和引号,括号,表情包,emoji,at或 @等 ),只输出一条回复就好。 现在,你说: """, "default_expressor_prompt", diff --git a/src/llm_models/model_client/openai_client.py b/src/llm_models/model_client/openai_client.py index 34134a15..ffee2ad7 100644 --- a/src/llm_models/model_client/openai_client.py +++ b/src/llm_models/model_client/openai_client.py @@ -487,6 +487,9 @@ class OpenaiClient(BaseClient): req_task.cancel() raise ReqAbortException("请求被外部信号中断") await asyncio.sleep(0.1) # 等待0.5秒后再次检查任务&中断信号量状态 + + # logger. + logger.debug(f"OpenAI API响应(非流式): {req_task.result()}") # logger.info(f"OpenAI请求时间: {model_info.model_identifier} {time.time() - start_time} \n{messages}") @@ -507,6 +510,8 @@ class OpenaiClient(BaseClient): total_tokens=usage_record[2], ) + # logger.debug(f"OpenAI API响应: {resp}") + return resp async def get_embedding(