mirror of https://github.com/Mai-with-u/MaiBot.git
针对memory修改
parent
97834e7a93
commit
c0cb28e10d
|
|
@ -85,7 +85,7 @@ async def monitor_relationships():
|
||||||
async def build_memory_task():
|
async def build_memory_task():
|
||||||
"""每30秒执行一次记忆构建"""
|
"""每30秒执行一次记忆构建"""
|
||||||
print("\033[1;32m[记忆构建]\033[0m 开始构建记忆...")
|
print("\033[1;32m[记忆构建]\033[0m 开始构建记忆...")
|
||||||
hippocampus.build_memory(chat_size=12)
|
await hippocampus.build_memory(chat_size=12)
|
||||||
print("\033[1;32m[记忆构建]\033[0m 记忆构建完成")
|
print("\033[1;32m[记忆构建]\033[0m 记忆构建完成")
|
||||||
|
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -37,7 +37,7 @@ class BotConfig:
|
||||||
talk_frequency_down_groups = set()
|
talk_frequency_down_groups = set()
|
||||||
ban_user_id = set()
|
ban_user_id = set()
|
||||||
|
|
||||||
build_memory_interval: int = 60 # 记忆构建间隔(秒)
|
build_memory_interval: int = 600 # 记忆构建间隔(秒)
|
||||||
EMOJI_CHECK_INTERVAL: int = 120 # 表情包检查间隔(分钟)
|
EMOJI_CHECK_INTERVAL: int = 120 # 表情包检查间隔(分钟)
|
||||||
EMOJI_REGISTER_INTERVAL: int = 10 # 表情包注册间隔(分钟)
|
EMOJI_REGISTER_INTERVAL: int = 10 # 表情包注册间隔(分钟)
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -110,7 +110,7 @@ class LLMResponseGenerator:
|
||||||
"model": model_name,
|
"model": model_name,
|
||||||
"messages": [{"role": "user", "content": prompt}],
|
"messages": [{"role": "user", "content": prompt}],
|
||||||
"stream": False,
|
"stream": False,
|
||||||
"max_tokens": 1024,
|
"max_tokens": 2048,
|
||||||
"temperature": 0.7
|
"temperature": 0.7
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -118,7 +118,7 @@ class LLMResponseGenerator:
|
||||||
"model": "deepseek-ai/DeepSeek-R1-Distill-Qwen-32B",
|
"model": "deepseek-ai/DeepSeek-R1-Distill-Qwen-32B",
|
||||||
"messages": [{"role": "user", "content": prompt_check}],
|
"messages": [{"role": "user", "content": prompt_check}],
|
||||||
"stream": False,
|
"stream": False,
|
||||||
"max_tokens": 1024,
|
"max_tokens": 2048,
|
||||||
"temperature": 0.7
|
"temperature": 0.7
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -210,13 +210,13 @@ class LLMResponseGenerator:
|
||||||
return await self._generate_base_response(
|
return await self._generate_base_response(
|
||||||
message,
|
message,
|
||||||
"deepseek-reasoner",
|
"deepseek-reasoner",
|
||||||
{"temperature": 0.7, "max_tokens": 1024}
|
{"temperature": 0.7, "max_tokens": 2048}
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
return await self._generate_base_response(
|
return await self._generate_base_response(
|
||||||
message,
|
message,
|
||||||
"Pro/deepseek-ai/DeepSeek-R1",
|
"Pro/deepseek-ai/DeepSeek-R1",
|
||||||
{"temperature": 0.7, "max_tokens": 1024}
|
{"temperature": 0.7, "max_tokens": 2048}
|
||||||
)
|
)
|
||||||
|
|
||||||
async def _generate_v3_response(self, message: Message) -> Optional[str]:
|
async def _generate_v3_response(self, message: Message) -> Optional[str]:
|
||||||
|
|
@ -225,13 +225,13 @@ class LLMResponseGenerator:
|
||||||
return await self._generate_base_response(
|
return await self._generate_base_response(
|
||||||
message,
|
message,
|
||||||
"deepseek-chat",
|
"deepseek-chat",
|
||||||
{"temperature": 0.8, "max_tokens": 1024}
|
{"temperature": 0.8, "max_tokens": 2048}
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
return await self._generate_base_response(
|
return await self._generate_base_response(
|
||||||
message,
|
message,
|
||||||
"Pro/deepseek-ai/DeepSeek-V3",
|
"Pro/deepseek-ai/DeepSeek-V3",
|
||||||
{"temperature": 0.8, "max_tokens": 1024}
|
{"temperature": 0.8, "max_tokens": 2048}
|
||||||
)
|
)
|
||||||
|
|
||||||
async def _generate_r1_distill_response(self, message: Message) -> Optional[str]:
|
async def _generate_r1_distill_response(self, message: Message) -> Optional[str]:
|
||||||
|
|
@ -239,7 +239,7 @@ class LLMResponseGenerator:
|
||||||
return await self._generate_base_response(
|
return await self._generate_base_response(
|
||||||
message,
|
message,
|
||||||
"deepseek-ai/DeepSeek-R1-Distill-Qwen-32B",
|
"deepseek-ai/DeepSeek-R1-Distill-Qwen-32B",
|
||||||
{"temperature": 0.7, "max_tokens": 1024}
|
{"temperature": 0.7, "max_tokens": 2048}
|
||||||
)
|
)
|
||||||
|
|
||||||
async def _get_group_chat_context(self, message: Message) -> str:
|
async def _get_group_chat_context(self, message: Message) -> str:
|
||||||
|
|
|
||||||
|
|
@ -171,7 +171,7 @@ class PromptBuilder:
|
||||||
|
|
||||||
|
|
||||||
#额外信息要求
|
#额外信息要求
|
||||||
extra_info = '''但是记得回复平淡一些,简短一些,记住不要输出多余内容(包括前后缀,冒号和引号,括号,表情等),只需要输出回复内容就好,不要输出其他任何内容'''
|
extra_info = '''但是记得回复平淡一些,简短一些,尤其注意在没明确提到时不要过多提及自身的背景, 记住不要输出多余内容(包括前后缀,冒号和引号,括号,表情等),只需要输出回复内容就好,不要输出其他任何内容'''
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
@ -188,9 +188,9 @@ class PromptBuilder:
|
||||||
prompt += f"{prompt_ger}\n"
|
prompt += f"{prompt_ger}\n"
|
||||||
prompt += f"{extra_info}\n"
|
prompt += f"{extra_info}\n"
|
||||||
|
|
||||||
activate_prompt_check=f"以上是群里正在进行的聊天,昵称为 '{sender_name}' 的用户说的:{message_txt}。引起了你的注意,你和他{relation_prompt},你想要{relation_prompt_2},但是这不一定是合适的时机,请你决定是否要回应这条消息。"
|
activate_prompt_check=f"以上是群里正在进行的聊天,昵称为 '{sender_name}' 的用户说的:{message_txt}。引起了你的注意,你和他{relation_prompt},你想要{relation_prompt_2},但是这不一定是合适的时机,请你决定是否要回应这条消息。不要太受现在做的事情影响,因为摸鱼是很正常的。"
|
||||||
prompt_personality_check = ''
|
prompt_personality_check = ''
|
||||||
extra_check_info=f"请注意把握群里的聊天内容的基础上,综合群内的氛围,例如,和{global_config.BOT_NICKNAME}相关的话题要积极回复,如果是at自己的消息一定要回复,如果是刚刚理会过的人发送消息,且还在与那个人对话中的话一定要回复,其他话题如果合适搭话也可以回复,如果认为应该回复请输出yes,否则输出no,请注意是决定是否需要回复,而不是编写回复内容,除了yes和no不要输出任何回复内容。"
|
extra_check_info=f"请注意把握群里的聊天内容的基础上,综合群内的氛围,例如,和{global_config.BOT_NICKNAME}相关的话题要积极回复,如果是at自己的消息,无论如何一定要回复,如果是刚刚理会过的人发送消息,且还在与那个人对话中的话一定要回复,其他话题如果合适搭话也可以回复,如果认为应该回复请输出yes,否则输出no,请注意是决定是否需要回复,而不是编写回复内容,除了yes和no不要输出任何回复内容。"
|
||||||
if personality_choice < 4/6: # 第一种人格
|
if personality_choice < 4/6: # 第一种人格
|
||||||
prompt_personality_check = f'''你的网名叫{global_config.BOT_NICKNAME},{personality[0]},{promt_info_prompt} {activate_prompt_check} {extra_check_info}'''
|
prompt_personality_check = f'''你的网名叫{global_config.BOT_NICKNAME},{personality[0]},{promt_info_prompt} {activate_prompt_check} {extra_check_info}'''
|
||||||
elif personality_choice < 1: # 第二种人格
|
elif personality_choice < 1: # 第二种人格
|
||||||
|
|
|
||||||
|
|
@ -69,7 +69,7 @@ class TopicIdentifier:
|
||||||
'按', '按照', '把', '被', '比', '比如', '除', '除了', '当', '对', '对于',
|
'按', '按照', '把', '被', '比', '比如', '除', '除了', '当', '对', '对于',
|
||||||
'根据', '关于', '跟', '和', '将', '经', '经过', '靠', '连', '论', '通过',
|
'根据', '关于', '跟', '和', '将', '经', '经过', '靠', '连', '论', '通过',
|
||||||
'同', '往', '为', '为了', '围绕', '于', '由', '由于', '与', '在', '沿', '沿着',
|
'同', '往', '为', '为了', '围绕', '于', '由', '由于', '与', '在', '沿', '沿着',
|
||||||
'依', '依照', '以', '因', '因为', '用', '由', '与', '自', '自从'
|
'依', '依照', '以', '因', '因为', '用', '由', '与', '自', '自从','[]'
|
||||||
}
|
}
|
||||||
|
|
||||||
# 过滤掉停用词和标点符号,只保留名词和动词
|
# 过滤掉停用词和标点符号,只保留名词和动词
|
||||||
|
|
@ -78,7 +78,7 @@ class TopicIdentifier:
|
||||||
if word not in stop_words and not word.strip() in {
|
if word not in stop_words and not word.strip() in {
|
||||||
'。', ',', '、', ':', ';', '!', '?', '"', '"', ''', ''',
|
'。', ',', '、', ':', ';', '!', '?', '"', '"', ''', ''',
|
||||||
'(', ')', '【', '】', '《', '》', '…', '—', '·', '、', '~',
|
'(', ')', '【', '】', '《', '》', '…', '—', '·', '、', '~',
|
||||||
'~', '+', '=', '-'
|
'~', '+', '=', '-','[',']'
|
||||||
}:
|
}:
|
||||||
filtered_words.append(word)
|
filtered_words.append(word)
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -61,7 +61,7 @@ class WillingManager:
|
||||||
if is_mentioned_bot and user_id == int(964959351):
|
if is_mentioned_bot and user_id == int(964959351):
|
||||||
reply_probability = 1
|
reply_probability = 1
|
||||||
|
|
||||||
return reply_probability
|
return reply_probability+0.5
|
||||||
|
|
||||||
def change_reply_willing_sent(self, group_id: int):
|
def change_reply_willing_sent(self, group_id: int):
|
||||||
"""开始思考后降低群组的回复意愿"""
|
"""开始思考后降低群组的回复意愿"""
|
||||||
|
|
@ -72,7 +72,7 @@ class WillingManager:
|
||||||
"""发送消息后提高群组的回复意愿"""
|
"""发送消息后提高群组的回复意愿"""
|
||||||
current_willing = self.group_reply_willing.get(group_id, 0)
|
current_willing = self.group_reply_willing.get(group_id, 0)
|
||||||
if current_willing < 1:
|
if current_willing < 1:
|
||||||
self.group_reply_willing[group_id] = min(1, current_willing + 0.3)
|
self.group_reply_willing[group_id] = min(2, current_willing + 0.8)
|
||||||
|
|
||||||
async def ensure_started(self):
|
async def ensure_started(self):
|
||||||
"""确保衰减任务已启动"""
|
"""确保衰减任务已启动"""
|
||||||
|
|
|
||||||
|
|
@ -192,7 +192,7 @@ class Hippocampus:
|
||||||
chat_text.append(chat_)
|
chat_text.append(chat_)
|
||||||
return chat_text
|
return chat_text
|
||||||
|
|
||||||
def build_memory(self,chat_size=12):
|
async def build_memory(self,chat_size=12):
|
||||||
#最近消息获取频率
|
#最近消息获取频率
|
||||||
time_frequency = {'near':1,'mid':2,'far':2}
|
time_frequency = {'near':1,'mid':2,'far':2}
|
||||||
memory_sample = self.get_memory_sample(chat_size,time_frequency)
|
memory_sample = self.get_memory_sample(chat_size,time_frequency)
|
||||||
|
|
@ -211,10 +211,12 @@ class Hippocampus:
|
||||||
first_memory = set()
|
first_memory = set()
|
||||||
first_memory = self.memory_compress(input_text, 2.5)
|
first_memory = self.memory_compress(input_text, 2.5)
|
||||||
# 延时防止访问超频
|
# 延时防止访问超频
|
||||||
# time.sleep(5)
|
# time.sleep(60)
|
||||||
#将记忆加入到图谱中
|
#将记忆加入到图谱中
|
||||||
for topic, memory in first_memory:
|
for topic, memory in first_memory:
|
||||||
topics = segment_text(topic)
|
topics = segment_text(topic)
|
||||||
|
if '[' in topic or topic=='':
|
||||||
|
continue
|
||||||
print(f"\033[1;34m话题\033[0m: {topic},节点: {topics}, 记忆: {memory}")
|
print(f"\033[1;34m话题\033[0m: {topic},节点: {topics}, 记忆: {memory}")
|
||||||
for split_topic in topics:
|
for split_topic in topics:
|
||||||
self.memory_graph.add_dot(split_topic,memory)
|
self.memory_graph.add_dot(split_topic,memory)
|
||||||
|
|
@ -240,6 +242,8 @@ class Hippocampus:
|
||||||
# print(topics)
|
# print(topics)
|
||||||
compressed_memory = set()
|
compressed_memory = set()
|
||||||
for topic in topics:
|
for topic in topics:
|
||||||
|
if topic=='' or '[' in topic:
|
||||||
|
continue
|
||||||
topic_what_prompt = topic_what(input_text,topic)
|
topic_what_prompt = topic_what(input_text,topic)
|
||||||
topic_what_response = self.llm_model_small.generate_response(topic_what_prompt)
|
topic_what_response = self.llm_model_small.generate_response(topic_what_prompt)
|
||||||
compressed_memory.add((topic.strip(), topic_what_response[0])) # 将话题和记忆作为元组存储
|
compressed_memory.add((topic.strip(), topic_what_response[0])) # 将话题和记忆作为元组存储
|
||||||
|
|
|
||||||
|
|
@ -278,6 +278,7 @@ def main():
|
||||||
|
|
||||||
#将记忆加入到图谱中
|
#将记忆加入到图谱中
|
||||||
for topic, memory in first_memory:
|
for topic, memory in first_memory:
|
||||||
|
# continue
|
||||||
topics = segment_text(topic)
|
topics = segment_text(topic)
|
||||||
print(f"\033[1;34m话题\033[0m: {topic},节点: {topics}, 记忆: {memory}")
|
print(f"\033[1;34m话题\033[0m: {topic},节点: {topics}, 记忆: {memory}")
|
||||||
for split_topic in topics:
|
for split_topic in topics:
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue