better: 优化模型配置和mood

pull/1239/head
SengokuCola 2025-09-13 22:08:29 +08:00
parent 43ac8bee02
commit 9c4681805f
4 changed files with 29 additions and 39 deletions

View File

@ -72,16 +72,15 @@ class HeartFCMessageReceiver:
chat = message.chat_stream
# 2. 兴趣度计算与更新
interested_rate, keywords = await _calculate_interest(message)
_, keywords = await _calculate_interest(message)
await self.storage.store_message(message, chat)
heartflow_chat: HeartFChatting = await heartflow.get_or_create_heartflow_chat(chat.stream_id) # type: ignore
# subheartflow.add_message_to_normal_chat_cache(message, interested_rate, is_mentioned)
if global_config.mood.enable_mood:
chat_mood = mood_manager.get_mood_by_chat_id(heartflow_chat.stream_id)
asyncio.create_task(chat_mood.update_mood_by_message(message, interested_rate))
asyncio.create_task(chat_mood.update_mood_by_message(message))
# 3. 日志记录
mes_name = chat.group_info.group_name if chat.group_info else "私聊"
@ -109,7 +108,7 @@ class HeartFCMessageReceiver:
replace_bot_name=True,
)
logger.info(f"[{mes_name}]{userinfo.user_nickname}:{processed_plain_text}[{interested_rate:.2f}]") # type: ignore
logger.info(f"[{mes_name}]{userinfo.user_nickname}:{processed_plain_text}") # type: ignore
_ = Person.register_person(
platform=message.message_info.platform, # type: ignore

View File

@ -102,9 +102,6 @@ class ModelTaskConfig(ConfigBase):
replyer: TaskConfig
"""normal_chat首要回复模型模型配置"""
emotion: TaskConfig
"""情绪模型配置"""
vlm: TaskConfig
"""视觉语言模型配置"""

View File

@ -62,11 +62,11 @@ class ChatMood:
self.regression_count: int = 0
self.mood_model = LLMRequest(model_set=model_config.model_task_config.emotion, request_type="mood")
self.mood_model = LLMRequest(model_set=model_config.model_task_config.utils, request_type="mood")
self.last_change_time: float = 0
async def update_mood_by_message(self, message: MessageRecv, interested_rate: float):
async def update_mood_by_message(self, message: MessageRecv):
self.regression_count = 0
during_last_time = message.message_info.time - self.last_change_time # type: ignore
@ -74,10 +74,9 @@ class ChatMood:
base_probability = 0.05
time_multiplier = 4 * (1 - math.exp(-0.01 * during_last_time))
if interested_rate <= 0:
interest_multiplier = 0
else:
interest_multiplier = 2 * math.pow(interested_rate, 0.25)
# 基于消息长度计算基础兴趣度
message_length = len(message.message_content.content or "")
interest_multiplier = min(2.0, 1.0 + message_length / 100)
logger.debug(
f"base_probability: {base_probability}, time_multiplier: {time_multiplier}, interest_multiplier: {interest_multiplier}"
@ -90,7 +89,7 @@ class ChatMood:
return
logger.debug(
f"{self.log_prefix} 更新情绪状态,感兴趣度: {interested_rate:.2f}, 更新概率: {update_probability:.2f}"
f"{self.log_prefix} 更新情绪状态,更新概率: {update_probability:.2f}"
)
message_time: float = message.message_info.time # type: ignore

View File

@ -1,5 +1,5 @@
[inner]
version = "1.6.0"
version = "1.7.0"
# 配置文件版本号迭代规则同bot_config.toml
@ -12,14 +12,14 @@ max_retry = 2 # 最大重试次数单个模型API
timeout = 30 # API请求超时时间单位
retry_interval = 10 # 重试间隔时间(单位:秒)
[[api_providers]] # SiliconFlow的API服务商配置
name = "SiliconFlow"
base_url = "https://api.siliconflow.cn/v1"
api_key = "your-siliconflow-api-key"
[[api_providers]] # 阿里 百炼 API服务商配置
name = "BaiLian"
base_url = "https://dashscope.aliyuncs.com/compatible-mode/v1"
api_key = "your-bailian-key"
client_type = "openai"
max_retry = 2
timeout = 30
retry_interval = 10
timeout = 15
retry_interval = 5
[[api_providers]] # 特殊Google的Gimini使用特殊API与OpenAI格式不兼容需要配置client为"gemini"
name = "Google"
@ -30,14 +30,14 @@ max_retry = 2
timeout = 30
retry_interval = 10
[[api_providers]] # 阿里 百炼 API服务商配置
name = "BaiLian"
base_url = "https://dashscope.aliyuncs.com/compatible-mode/v1"
api_key = "your-bailian-key"
[[api_providers]] # SiliconFlow的API服务商配置
name = "SiliconFlow"
base_url = "https://api.siliconflow.cn/v1"
api_key = "your-siliconflow-api-key"
client_type = "openai"
max_retry = 2
timeout = 15
retry_interval = 5
timeout = 60
retry_interval = 10
[[models]] # 模型(可以配置多个)
@ -93,8 +93,8 @@ price_in = 0
price_out = 0
[model_task_config.utils] # 在麦麦的一些组件中使用的模型,例如表情包模块,取名模块,关系模块,是麦麦必须的模型
model_list = ["siliconflow-deepseek-v3"] # 使用的模型列表,每个子项对应上面的模型名称(name)
[model_task_config.utils] # 在麦麦的一些组件中使用的模型,例如表情包模块,取名模块,关系模块,麦麦的情绪变化等,是麦麦必须的模型
model_list = ["siliconflow-deepseek-v3","qwen3-30b"] # 使用的模型列表,每个子项对应上面的模型名称(name)
temperature = 0.2 # 模型温度新V3建议0.1-0.3
max_tokens = 800 # 最大输出token数
@ -103,6 +103,11 @@ model_list = ["qwen3-8b","qwen3-30b"]
temperature = 0.7
max_tokens = 800
[model_task_config.tool_use] #工具调用模型,需要使用支持工具调用的模型
model_list = ["qwen3-30b"]
temperature = 0.7
max_tokens = 800
[model_task_config.replyer] # 首要回复模型,还用于表达器和表达方式学习
model_list = ["siliconflow-deepseek-v3"]
temperature = 0.3 # 模型温度新V3建议0.1-0.3
@ -113,11 +118,6 @@ model_list = ["siliconflow-deepseek-v3"]
temperature = 0.3
max_tokens = 800
[model_task_config.emotion] #负责麦麦的情绪变化
model_list = ["qwen3-30b"]
temperature = 0.7
max_tokens = 800
[model_task_config.vlm] # 图像识别模型
model_list = ["qwen2.5-vl-72b"]
max_tokens = 800
@ -125,11 +125,6 @@ max_tokens = 800
[model_task_config.voice] # 语音识别模型
model_list = ["sensevoice-small"]
[model_task_config.tool_use] #工具调用模型,需要使用支持工具调用的模型
model_list = ["qwen3-30b"]
temperature = 0.7
max_tokens = 800
#嵌入模型
[model_task_config.embedding]
model_list = ["bge-m3"]