feat:可选,由llm执行回复

pull/1465/head
SengokuCola 2025-12-31 00:43:08 +08:00
parent 67c24f84cd
commit 71a85667e3
5 changed files with 57 additions and 26 deletions

View File

@ -244,12 +244,14 @@ class HeartFChatting:
thinking_id,
actions,
selected_expressions: Optional[List[int]] = None,
quote_message: Optional[bool] = None,
) -> Tuple[Dict[str, Any], str, Dict[str, float]]:
with Timer("回复发送", cycle_timers):
reply_text = await self._send_response(
reply_set=response_set,
message_data=action_message,
selected_expressions=selected_expressions,
quote_message=quote_message,
)
# 获取 platform如果不存在则从 chat_stream 获取,如果还是 None 则使用默认值
@ -526,15 +528,22 @@ class HeartFChatting:
reply_set: "ReplySetModel",
message_data: "DatabaseMessages",
selected_expressions: Optional[List[int]] = None,
quote_message: Optional[bool] = None,
) -> str:
new_message_count = message_api.count_new_messages(
chat_id=self.chat_stream.stream_id, start_time=self.last_read_time, end_time=time.time()
)
need_reply = new_message_count >= random.randint(2, 3)
if need_reply:
logger.info(f"{self.log_prefix} 从思考到回复,共有{new_message_count}条新消息,使用引用回复")
# 根据 llm_quote 配置决定是否使用 quote_message 参数
if global_config.chat.llm_quote and quote_message is not None:
# 如果配置为 true使用 llm_quote 参数决定是否引用回复
need_reply = quote_message
if need_reply:
logger.info(f"{self.log_prefix} LLM 决定使用引用回复")
else:
# 如果配置为 false使用原来的模式
new_message_count = message_api.count_new_messages(
chat_id=self.chat_stream.stream_id, start_time=self.last_read_time, end_time=time.time()
)
need_reply = new_message_count >= random.randint(2, 3)
if need_reply:
logger.info(f"{self.log_prefix} 从思考到回复,共有{new_message_count}条新消息,使用引用回复")
reply_text = ""
first_replied = False
@ -640,6 +649,7 @@ class HeartFChatting:
# 从 Planner 的 action_data 中提取未知词语列表(仅在 reply 时使用)
unknown_words = None
quote_message = None
if isinstance(action_planner_info.action_data, dict):
uw = action_planner_info.action_data.get("unknown_words")
if isinstance(uw, list):
@ -651,6 +661,17 @@ class HeartFChatting:
cleaned_uw.append(s)
if cleaned_uw:
unknown_words = cleaned_uw
# 从 Planner 的 action_data 中提取 quote_message 参数
qm = action_planner_info.action_data.get("quote_message")
if qm is not None:
# 支持多种格式true/false, "true"/"false", 1/0
if isinstance(qm, bool):
quote_message = qm
elif isinstance(qm, str):
quote_message = qm.lower() in ("true", "1", "yes")
elif isinstance(qm, (int, float)):
quote_message = bool(qm)
success, llm_response = await generator_api.generate_reply(
chat_stream=self.chat_stream,
@ -682,6 +703,7 @@ class HeartFChatting:
thinking_id=thinking_id,
actions=chosen_action_plan_infos,
selected_expressions=selected_expressions,
quote_message=quote_message,
)
self.last_active_time = time.time()
return {

View File

@ -522,19 +522,32 @@ class ActionPlanner:
# 根据 think_mode 配置决定 reply action 的示例 JSON
# 在 JSON 中直接作为 action 参数携带 unknown_words 和 question
if global_config.chat.think_mode == "classic":
reply_action_example = (
reply_action_example = ""
if global_config.chat.llm_quote:
reply_action_example += "5.如果要明确回复消息使用quote如果消息不多不需要明确回复设置quote为false\n"
reply_action_example += (
'{{"action":"reply", "target_message_id":"消息id(m+数字)", '
'"unknown_words":["词语1","词语2"], '
'"question":"需要查询的问题"}'
'"question":"需要查询的问题"'
)
if global_config.chat.llm_quote:
reply_action_example += ', "quote_message":"如果需要引用该message设置为true"'
reply_action_example += "}"
else:
reply_action_example = (
"5.think_level表示思考深度0表示该回复不需要思考和回忆1表示该回复需要进行回忆和思考\n"
+ '{{"action":"reply", "think_level":数值等级(0或1), '
)
if global_config.chat.llm_quote:
reply_action_example += "6.如果要明确回复消息使用quote如果消息不多不需要明确回复设置quote为false\n"
reply_action_example += (
'{{"action":"reply", "think_level":数值等级(0或1), '
'"target_message_id":"消息id(m+数字)", '
'"unknown_words":["词语1","词语2"], '
'"question":"需要查询的问题"}'
'"question":"需要查询的问题"'
)
if global_config.chat.llm_quote:
reply_action_example += ', "quote_message":"如果需要引用该message设置为true"'
reply_action_example += "}"
planner_prompt_template = await global_prompt_manager.get_prompt_async("planner_prompt")
prompt = planner_prompt_template.format(

View File

@ -125,6 +125,9 @@ class ChatConfig(ConfigBase):
plan_reply_log_max_per_chat: int = 1024
"""每个聊天流最大保存的Plan/Reply日志数量超过此数量时会自动删除最老的日志"""
llm_quote: bool = False
"""是否在 reply action 中启用 quote 参数,启用后 LLM 可以控制是否引用消息"""
def _parse_stream_config_to_chat_id(self, stream_config_str: str) -> Optional[str]:
"""与 ChatStream.get_stream_id 一致地从 "platform:id:type" 生成 chat_id。"""
try:

View File

@ -44,18 +44,6 @@ def get_random_dream_styles(count: int = 2) -> List[str]:
"""从梦境风格列表中随机选择指定数量的风格"""
return random.sample(DREAM_STYLES, min(count, len(DREAM_STYLES)))
def get_dream_summary_model() -> LLMRequest:
"""获取用于生成梦境总结的 utils 模型实例"""
global _dream_summary_model
if _dream_summary_model is None:
_dream_summary_model = LLMRequest(
model_set=model_config.model_task_config.replyer,
request_type="dream.summary",
)
return _dream_summary_model
def init_dream_summary_prompt() -> None:
"""初始化梦境总结的提示词"""
Prompt(
@ -186,7 +174,10 @@ async def generate_dream_summary(
)
# 调用 utils 模型生成梦境
summary_model = get_dream_summary_model()
summary_model = LLMRequest(
model_set=model_config.model_task_config.replyer,
request_type="dream.summary",
)
dream_content, (reasoning, model_name, _) = await summary_model.generate_response_async(
dream_prompt,
temperature=0.8,

View File

@ -1,5 +1,5 @@
[inner]
version = "7.3.2"
version = "7.3.3"
#----以下是给开发人员阅读的,如果你只是部署了麦麦,不需要阅读----
# 如果你想要修改配置文件请递增version的值
@ -118,6 +118,8 @@ think_mode = "dynamic" # 思考模式可选classic默认浅度思考和
plan_reply_log_max_per_chat = 1024 # 每个聊天保存最大的Plan/Reply日志数量超过此数量时会自动删除最老的日志
llm_quote = false # 是否由llm执行引用
enable_talk_value_rules = true # 是否启用动态发言频率规则
# 动态发言频率规则:按时段/按chat_id调整 talk_value优先匹配具体chat再匹配全局