feat:可以定义主动发言概率

pull/1294/head
SengokuCola 2025-10-04 20:58:15 +08:00
parent d180a2ec07
commit ced8935fe1
7 changed files with 24 additions and 8 deletions

View File

@ -191,18 +191,22 @@ class HeartFChatting:
question_probability = 0.005
else:
question_probability = 0.001
question_probability = question_probability * global_config.chat.auto_chat_value
if question_probability > 0 and not self.questioned and len(global_conflict_tracker.get_questions_by_chat_id(self.stream_id)) > 0: #长久没有回复,可以试试主动发言,提问概率随着时间增加
if question_probability > 0 and not self.questioned and len(global_conflict_tracker.get_questions_by_chat_id(self.stream_id)) == 0: #长久没有回复,可以试试主动发言,提问概率随着时间增加
logger.info(f"{self.log_prefix} 长久没有回复,可以试试主动发言,概率: {question_probability}")
if random.random() < question_probability: # 30%概率主动发言
print(f"{self.log_prefix} 长久没有回复,可以试试主动发言,开始生成问题")
self.questioned = True
self.last_active_time = time.time()
print(f"{self.log_prefix} 长久没有回复,可以试试主动发言,开始生成问题")
cycle_timers, thinking_id = self.start_cycle()
question_maker = QuestionMaker(self.stream_id)
question, conflict_context = await question_maker.make_question()
if question and conflict_context:
await global_conflict_tracker.track_conflict(question, conflict_context, True, self.stream_id)
await self._lift_question_reply(question,cycle_timers,thinking_id)
self.end_cycle(cycle_timers, thinking_id)
# self.end_cycle(cycle_timers, thinking_id)
if len(recent_messages_list) >= 1:
@ -740,6 +744,7 @@ class HeartFChatting:
enable_tool=global_config.tool.enable_tool,
request_type="replyer",
from_plugin=False,
reply_time_point = action_planner_info.action_data.get("loop_start_time", time.time()),
)
if not success or not llm_response or not llm_response.reply_set:

View File

@ -469,8 +469,8 @@ class ActionPlanner:
# 调用LLM
llm_content, (reasoning_content, _, _) = await self.planner_llm.generate_response_async(prompt=prompt)
# logger.info(f"{self.log_prefix}规划器原始提示词: {prompt}")
# logger.info(f"{self.log_prefix}规划器原始响应: {llm_content}")
logger.info(f"{self.log_prefix}规划器原始提示词: {prompt}")
logger.info(f"{self.log_prefix}规划器原始响应: {llm_content}")
if global_config.debug.show_prompt:
logger.info(f"{self.log_prefix}规划器原始提示词: {prompt}")

View File

@ -71,6 +71,7 @@ class DefaultReplyer:
from_plugin: bool = True,
stream_id: Optional[str] = None,
reply_message: Optional[DatabaseMessages] = None,
reply_time_point: Optional[float] = time.time(),
) -> Tuple[bool, LLMGenerationDataModel]:
# sourcery skip: merge-nested-ifs
"""
@ -104,6 +105,7 @@ class DefaultReplyer:
enable_tool=enable_tool,
reply_message=reply_message,
reply_reason=reply_reason,
reply_time_point=reply_time_point,
)
llm_response.prompt = prompt
llm_response.selected_expressions = selected_expressions
@ -544,6 +546,7 @@ class DefaultReplyer:
available_actions: Optional[Dict[str, ActionInfo]] = None,
chosen_actions: Optional[List[ActionPlannerInfo]] = None,
enable_tool: bool = True,
reply_time_point: Optional[float] = time.time(),
) -> Tuple[str, List[int]]:
"""
构建回复器上下文
@ -583,13 +586,13 @@ class DefaultReplyer:
message_list_before_now_long = get_raw_msg_before_timestamp_with_chat(
chat_id=chat_id,
timestamp=time.time(),
timestamp=reply_time_point,
limit=global_config.chat.max_context_size * 1,
)
message_list_before_short = get_raw_msg_before_timestamp_with_chat(
chat_id=chat_id,
timestamp=time.time(),
timestamp=reply_time_point,
limit=int(global_config.chat.max_context_size * 0.33),
)

View File

@ -70,6 +70,7 @@ class PrivateReplyer:
from_plugin: bool = True,
stream_id: Optional[str] = None,
reply_message: Optional[DatabaseMessages] = None,
reply_time_point: Optional[float] = time.time(),
) -> Tuple[bool, LLMGenerationDataModel]:
# sourcery skip: merge-nested-ifs
"""

View File

@ -79,6 +79,9 @@ class ChatConfig(ConfigBase):
mentioned_bot_reply: bool = True
"""是否启用提及必回复"""
auto_chat_value: float = 1
"""自动聊天,越小,麦麦主动聊天的概率越低"""
at_bot_inevitable_reply: float = 1
"""@bot 必然回复1为100%回复0为不额外增幅"""

View File

@ -90,6 +90,7 @@ async def generate_reply(
enable_chinese_typo: bool = True,
request_type: str = "generator_api",
from_plugin: bool = True,
reply_time_point: Optional[float] = None,
) -> Tuple[bool, Optional["LLMGenerationDataModel"]]:
"""生成回复
@ -109,6 +110,7 @@ async def generate_reply(
model_set_with_weight: 模型配置列表每个元素为 (TaskConfig, weight) 元组
request_type: 请求类型可选记录LLM使用
from_plugin: 是否来自插件
reply_time_point: 回复时间点
Returns:
Tuple[bool, List[Tuple[str, Any]], Optional[str]]: (是否成功, 回复集合, 提示词)
"""
@ -136,6 +138,7 @@ async def generate_reply(
reply_reason=reply_reason,
from_plugin=from_plugin,
stream_id=chat_stream.stream_id if chat_stream else chat_id,
reply_time_point=reply_time_point,
)
if not success:
logger.warning("[GeneratorAPI] 回复生成失败")

View File

@ -1,5 +1,5 @@
[inner]
version = "6.18.1"
version = "6.18.2"
#----以下是给开发人员阅读的,如果你只是部署了麦麦,不需要阅读----
#如果你想要修改配置文件请递增version的值
@ -77,6 +77,7 @@ expression_groups = [
talk_value = 1 #聊天频率越小越沉默范围0-1
mentioned_bot_reply = true # 是否启用提及必回复
max_context_size = 30 # 上下文长度
auto_chat_value = 1 # 自动聊天,越小,麦麦主动聊天的概率越低
planner_smooth = 5 #规划器平滑增大数值会减小planner负荷略微降低反应速度推荐2-80为关闭必须大于等于0
# 动态发言频率规则:按时段/按chat_id调整 talk_value优先匹配具体chat再匹配全局