尝试减少不同群内的同义词混淆 以及允许用户设置指定群组记忆私有

用户设置指定群组记忆私有会导致记忆的语料严重减少  未设置群组记忆私有的记忆会开放对于所有群聊
因此要不就修改其他记忆配置  增强学习/记忆能力  或者增加单一群组的信息流数量  或者增加公开记忆的信息流数量

记忆检索时会按以下优先级返回记忆:
当前群组的记忆(如果群聊属于某个群组)
当前群聊的记忆(如果不属于任何群组)
公共记忆(无群组/群聊标识的记忆)
其他非私有群组的记忆
5. 跨群记忆连接
同一群组内的主题使用较强连接(相似度×10)
跨群组的相似主题使用较弱连接(相似度×5)
相同群组/群聊的相似主题会获得20%的相似度加成
pull/273/head
Cindy-Master 2025-03-12 17:55:38 +08:00
parent 6ff1601722
commit 1e3ebdcdf0
4 changed files with 18 additions and 28 deletions

View File

@ -135,7 +135,7 @@ class ChatBot:
for pattern in global_config.ban_msgs_regex:
if re.search(pattern, message.raw_message):
logger.info(
f"[{chat.group_info.group_name if chat.group_info.group_id else '私聊'}]{userinfo.user_nickname}:{message.raw_message}"
f"[{chat.group_info.group_name if chat.group_info.group_id else '私聊'}]{message.user_nickname}:{message.raw_message}"
)
logger.info(f"[正则表达式过滤]消息匹配到{pattern}filtered")
return

View File

@ -6,8 +6,6 @@ import random
import time
import traceback
from typing import Optional, Tuple
from PIL import Image
import io
from loguru import logger
from nonebot import get_driver
@ -194,11 +192,11 @@ class EmojiManager:
logger.error(f"获取标签失败: {str(e)}")
return None
async def _check_emoji(self, image_base64: str, image_format: str) -> str:
async def _check_emoji(self, image_base64: str) -> str:
try:
prompt = f'这是一个表情包,请回答这个表情包是否满足\"{global_config.EMOJI_CHECK_PROMPT}\"的要求,是则回答是,否则回答否,不要出现任何其他内容'
content, _ = await self.vlm.generate_response_for_image(prompt, image_base64, image_format)
content, _ = await self.vlm.generate_response_for_image(prompt, image_base64)
logger.debug(f"输出描述: {content}")
return content
@ -239,7 +237,7 @@ class EmojiManager:
image_bytes = base64.b64decode(image_base64)
image_hash = hashlib.md5(image_bytes).hexdigest()
image_format = Image.open(io.BytesIO(image_bytes)).format.lower()
# 检查是否已经注册过
existing_emoji = self.db['emoji'].find_one({'filename': filename})
description = None
@ -280,7 +278,7 @@ class EmojiManager:
if global_config.EMOJI_CHECK:
check = await self._check_emoji(image_base64, image_format)
check = await self._check_emoji(image_base64)
if '' not in check:
os.remove(image_path)
logger.info(f"描述: {description}")

View File

@ -4,8 +4,6 @@ import time
import aiohttp
import hashlib
from typing import Optional, Union
from PIL import Image
import io
from loguru import logger
from nonebot import get_driver
@ -121,7 +119,6 @@ class ImageManager:
# 计算哈希值
image_hash = hashlib.md5(image_bytes).hexdigest()
image_format = Image.open(io.BytesIO(image_bytes)).format.lower()
# 查重
existing = self.db.images.find_one({'hash': image_hash})
@ -130,7 +127,7 @@ class ImageManager:
# 生成文件名和路径
timestamp = int(time.time())
filename = f"{timestamp}_{image_hash[:8]}.{image_format}"
filename = f"{timestamp}_{image_hash[:8]}.jpg"
file_path = os.path.join(self.IMAGE_DIR, filename)
# 保存文件
@ -241,7 +238,6 @@ class ImageManager:
# 计算图片哈希
image_bytes = base64.b64decode(image_base64)
image_hash = hashlib.md5(image_bytes).hexdigest()
image_format = Image.open(io.BytesIO(image_bytes)).format.lower()
# 查询缓存的描述
cached_description = self._get_description_from_db(image_hash, 'emoji')
@ -251,13 +247,13 @@ class ImageManager:
# 调用AI获取描述
prompt = "这是一个表情包,使用中文简洁的描述一下表情包的内容和表情包所表达的情感"
description, _ = await self._llm.generate_response_for_image(prompt, image_base64, image_format)
description, _ = await self._llm.generate_response_for_image(prompt, image_base64)
# 根据配置决定是否保存图片
if global_config.EMOJI_SAVE:
# 生成文件名和路径
timestamp = int(time.time())
filename = f"{timestamp}_{image_hash[:8]}.{image_format}"
filename = f"{timestamp}_{image_hash[:8]}.jpg"
file_path = os.path.join(self.IMAGE_DIR, 'emoji',filename)
try:
@ -297,7 +293,6 @@ class ImageManager:
# 计算图片哈希
image_bytes = base64.b64decode(image_base64)
image_hash = hashlib.md5(image_bytes).hexdigest()
image_format = Image.open(io.BytesIO(image_bytes)).format.lower()
# 查询缓存的描述
cached_description = self._get_description_from_db(image_hash, 'image')
@ -307,7 +302,7 @@ class ImageManager:
# 调用AI获取描述
prompt = "请用中文描述这张图片的内容。如果有文字请把文字都描述出来。并尝试猜测这个图片的含义。最多200个字。"
description, _ = await self._llm.generate_response_for_image(prompt, image_base64, image_format)
description, _ = await self._llm.generate_response_for_image(prompt, image_base64)
print(f"描述是{description}")
@ -319,7 +314,7 @@ class ImageManager:
if global_config.EMOJI_SAVE:
# 生成文件名和路径
timestamp = int(time.time())
filename = f"{timestamp}_{image_hash[:8]}.{image_format}"
filename = f"{timestamp}_{image_hash[:8]}.jpg"
file_path = os.path.join(self.IMAGE_DIR,'image', filename)
try:

View File

@ -104,7 +104,6 @@ class LLM_request:
endpoint: str,
prompt: str = None,
image_base64: str = None,
image_format: str = None,
payload: dict = None,
retry_policy: dict = None,
response_handler: callable = None,
@ -116,7 +115,6 @@ class LLM_request:
endpoint: API端点路径 ( "chat/completions")
prompt: prompt文本
image_base64: 图片的base64编码
image_format: 图片格式
payload: 请求体数据
retry_policy: 自定义重试策略
response_handler: 自定义响应处理器
@ -153,7 +151,7 @@ class LLM_request:
# 构建请求体
if image_base64:
payload = await self._build_payload(prompt, image_base64, image_format)
payload = await self._build_payload(prompt, image_base64)
elif payload is None:
payload = await self._build_payload(prompt)
@ -174,7 +172,7 @@ class LLM_request:
if response.status == 413:
logger.warning("请求体过大,尝试压缩...")
image_base64 = compress_base64_image_by_scale(image_base64)
payload = await self._build_payload(prompt, image_base64, image_format)
payload = await self._build_payload(prompt, image_base64)
elif response.status in [500, 503]:
logger.error(f"错误码: {response.status} - {error_code_mapping.get(response.status)}")
raise RuntimeError("服务器负载过高模型恢复失败QAQ")
@ -296,7 +294,7 @@ class LLM_request:
new_params["max_completion_tokens"] = new_params.pop("max_tokens")
return new_params
async def _build_payload(self, prompt: str, image_base64: str = None, image_format: str = None) -> dict:
async def _build_payload(self, prompt: str, image_base64: str = None) -> dict:
"""构建请求体"""
# 复制一份参数,避免直接修改 self.params
params_copy = await self._transform_parameters(self.params)
@ -308,7 +306,7 @@ class LLM_request:
"role": "user",
"content": [
{"type": "text", "text": prompt},
{"type": "image_url", "image_url": {"url": f"data:image/{image_format.lower()};base64,{image_base64}"}}
{"type": "image_url", "image_url": {"url": f"data:image/jpeg;base64,{image_base64}"}}
]
}
],
@ -393,14 +391,13 @@ class LLM_request:
)
return content, reasoning_content
async def generate_response_for_image(self, prompt: str, image_base64: str, image_format: str) -> Tuple[str, str]:
async def generate_response_for_image(self, prompt: str, image_base64: str) -> Tuple[str, str]:
"""根据输入的提示和图片生成模型的异步响应"""
content, reasoning_content = await self._execute_request(
endpoint="/chat/completions",
prompt=prompt,
image_base64=image_base64,
image_format=image_format
image_base64=image_base64
)
return content, reasoning_content