mirror of https://github.com/Mai-with-u/MaiBot.git
feat:人工学习过的表达会有更高的使用概率
parent
e0b28f9708
commit
63c093af63
|
|
@ -18,6 +18,8 @@ from src.chat.planner_actions.action_manager import ActionManager
|
|||
from src.chat.heart_flow.hfc_utils import CycleDetail
|
||||
from src.express.expression_learner import expression_learner_manager
|
||||
from src.chat.frequency_control.frequency_control import frequency_control_manager
|
||||
from src.express.reflect_tracker import reflect_tracker_manager
|
||||
from src.express.expression_reflector import expression_reflector_manager
|
||||
from src.jargon import extract_and_store_jargon
|
||||
from src.person_info.person_info import Person
|
||||
from src.plugin_system.base.component_types import EventType, ActionInfo
|
||||
|
|
@ -397,8 +399,9 @@ class HeartFChatting:
|
|||
# ReflectTracker Check
|
||||
# 在每次回复前检查一次上下文,看是否有反思问题得到了解答
|
||||
# -------------------------------------------------------------------------
|
||||
from src.express.reflect_tracker import reflect_tracker_manager
|
||||
|
||||
reflector = expression_reflector_manager.get_or_create_reflector(self.stream_id)
|
||||
await reflector.check_and_ask()
|
||||
tracker = reflect_tracker_manager.get_tracker(self.stream_id)
|
||||
if tracker:
|
||||
resolved = await tracker.trigger_tracker()
|
||||
|
|
@ -406,16 +409,8 @@ class HeartFChatting:
|
|||
reflect_tracker_manager.remove_tracker(self.stream_id)
|
||||
logger.info(f"{self.log_prefix} ReflectTracker resolved and removed.")
|
||||
|
||||
# -------------------------------------------------------------------------
|
||||
# Expression Reflection Check
|
||||
# 检查是否需要提问表达反思
|
||||
# -------------------------------------------------------------------------
|
||||
from src.express.expression_reflector import expression_reflector_manager
|
||||
reflector = expression_reflector_manager.get_or_create_reflector(self.stream_id)
|
||||
asyncio.create_task(reflector.check_and_ask())
|
||||
|
||||
start_time = time.time()
|
||||
|
||||
async with global_prompt_manager.async_message_scope(self.chat_stream.context.get_template_name()):
|
||||
asyncio.create_task(self.expression_learner.trigger_learning_for_chat())
|
||||
asyncio.create_task(
|
||||
|
|
|
|||
|
|
@ -285,6 +285,14 @@ class ExpressionConfig(ConfigBase):
|
|||
reflect_operator_id: str = ""
|
||||
"""表达反思操作员ID"""
|
||||
|
||||
allow_reflect: list[str] = field(default_factory=list)
|
||||
"""
|
||||
允许进行表达反思的聊天流ID列表
|
||||
格式: ["qq:123456:private", "qq:654321:group", ...]
|
||||
只有在此列表中的聊天流才会提出问题并跟踪
|
||||
如果列表为空,则所有聊天流都可以进行表达反思(前提是 reflect = true)
|
||||
"""
|
||||
|
||||
def _parse_stream_config_to_chat_id(self, stream_config_str: str) -> Optional[str]:
|
||||
"""
|
||||
解析流配置字符串并生成对应的 chat_id
|
||||
|
|
|
|||
|
|
@ -63,13 +63,15 @@ def format_create_date(timestamp: float) -> str:
|
|||
|
||||
def _compute_weights(population: List[Dict]) -> List[float]:
|
||||
"""
|
||||
根据表达的count计算权重,范围限定在1~3之间。
|
||||
count越高,权重越高,但最多为基础权重的3倍。
|
||||
根据表达的count计算权重,范围限定在1~5之间。
|
||||
count越高,权重越高,但最多为基础权重的5倍。
|
||||
如果表达已checked,权重会再乘以3倍。
|
||||
"""
|
||||
if not population:
|
||||
return []
|
||||
|
||||
counts = []
|
||||
checked_flags = []
|
||||
for item in population:
|
||||
count = item.get("count", 1)
|
||||
try:
|
||||
|
|
@ -77,18 +79,29 @@ def _compute_weights(population: List[Dict]) -> List[float]:
|
|||
except (TypeError, ValueError):
|
||||
count_value = 1.0
|
||||
counts.append(max(count_value, 0.0))
|
||||
# 获取checked状态
|
||||
checked = item.get("checked", False)
|
||||
checked_flags.append(bool(checked))
|
||||
|
||||
min_count = min(counts)
|
||||
max_count = max(counts)
|
||||
|
||||
if max_count == min_count:
|
||||
return [1.0 for _ in counts]
|
||||
base_weights = [1.0 for _ in counts]
|
||||
else:
|
||||
base_weights = []
|
||||
for count_value in counts:
|
||||
# 线性映射到[1,5]区间
|
||||
normalized = (count_value - min_count) / (max_count - min_count)
|
||||
base_weights.append(1.0 + normalized * 4.0) # 1~3
|
||||
|
||||
# 如果checked,权重乘以3
|
||||
weights = []
|
||||
for count_value in counts:
|
||||
# 线性映射到[1,3]区间
|
||||
normalized = (count_value - min_count) / (max_count - min_count)
|
||||
weights.append(1.0 + normalized * 2.0) # 1~3
|
||||
for base_weight, checked in zip(base_weights, checked_flags):
|
||||
if checked:
|
||||
weights.append(base_weight * 3.0)
|
||||
else:
|
||||
weights.append(base_weight)
|
||||
return weights
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -37,6 +37,12 @@ class ExpressionReflector:
|
|||
logger.info(f"[Expression Reflection] Operator ID 未配置,跳过")
|
||||
return False
|
||||
|
||||
# 检查是否在允许列表中
|
||||
allow_reflect = global_config.expression.allow_reflect
|
||||
if allow_reflect and self.chat_id not in allow_reflect:
|
||||
logger.info(f"[Expression Reflection] 当前聊天流 {self.chat_id} 不在允许列表中,跳过")
|
||||
return False
|
||||
|
||||
# 检查上一次提问时间
|
||||
current_time = time.time()
|
||||
time_since_last_ask = current_time - self.last_ask_time
|
||||
|
|
|
|||
|
|
@ -140,6 +140,7 @@ class ExpressionSelector:
|
|||
"source_id": expr.chat_id,
|
||||
"create_date": expr.create_date if expr.create_date is not None else expr.last_active_time,
|
||||
"count": expr.count if getattr(expr, "count", None) is not None else 1,
|
||||
"checked": expr.checked if getattr(expr, "checked", None) is not None else False,
|
||||
}
|
||||
for expr in style_query
|
||||
]
|
||||
|
|
|
|||
|
|
@ -791,92 +791,6 @@ def _query_thinking_back(chat_id: str, question: str) -> Optional[Tuple[bool, st
|
|||
return None
|
||||
|
||||
|
||||
async def _analyze_question_answer(question: str, answer: str, chat_id: str) -> None:
|
||||
"""异步分析问题和答案的类别,并存储到相应系统
|
||||
|
||||
Args:
|
||||
question: 问题
|
||||
answer: 答案
|
||||
chat_id: 聊天ID
|
||||
"""
|
||||
try:
|
||||
# 使用LLM分析类别
|
||||
analysis_prompt = f"""请分析以下问题和答案的类别:
|
||||
|
||||
问题:{question}
|
||||
答案:{answer}
|
||||
|
||||
类别说明:
|
||||
1. 人物信息:有关某个用户的个体信息(如某人的喜好、习惯、经历等)
|
||||
2. 黑话:对特定概念、缩写词、谐音词、自创词的解释(如"yyds"、"社死"等)
|
||||
3. 其他:除此之外的其他内容
|
||||
|
||||
请输出JSON格式:
|
||||
{{
|
||||
"category": "人物信息" | "黑话" | "其他",
|
||||
"jargon_keyword": "如果是黑话,提取关键词(如'yyds'),否则为空字符串",
|
||||
"person_name": "如果是人物信息,提取人物名称,否则为空字符串",
|
||||
"memory_content": "如果是人物信息,提取要存储的记忆内容(简短概括),否则为空字符串"
|
||||
}}
|
||||
|
||||
只输出JSON,不要输出其他内容:"""
|
||||
|
||||
success, response, _, _ = await llm_api.generate_with_model(
|
||||
analysis_prompt,
|
||||
model_config=model_config.model_task_config.utils,
|
||||
request_type="memory.analyze_qa",
|
||||
)
|
||||
|
||||
if not success:
|
||||
logger.error(f"分析问题和答案失败: {response}")
|
||||
return
|
||||
|
||||
# 解析JSON响应
|
||||
try:
|
||||
json_pattern = r"```json\s*(.*?)\s*```"
|
||||
matches = re.findall(json_pattern, response, re.DOTALL)
|
||||
|
||||
if matches:
|
||||
json_str = matches[0]
|
||||
else:
|
||||
json_str = response.strip()
|
||||
|
||||
repaired_json = repair_json(json_str)
|
||||
analysis_result = json.loads(repaired_json)
|
||||
|
||||
category = analysis_result.get("category", "").strip()
|
||||
|
||||
if category == "黑话":
|
||||
# 处理黑话
|
||||
jargon_keyword = analysis_result.get("jargon_keyword", "").strip()
|
||||
if jargon_keyword:
|
||||
from src.jargon.jargon_miner import store_jargon_from_answer
|
||||
|
||||
await store_jargon_from_answer(jargon_keyword, answer, chat_id)
|
||||
else:
|
||||
logger.warning(f"分析为黑话但未提取到关键词,问题: {question[:50]}...")
|
||||
|
||||
elif category == "人物信息":
|
||||
# 处理人物信息
|
||||
# person_name = analysis_result.get("person_name", "").strip()
|
||||
# memory_content = analysis_result.get("memory_content", "").strip()
|
||||
# if person_name and memory_content:
|
||||
# from src.person_info.person_info import store_person_memory_from_answer
|
||||
# await store_person_memory_from_answer(person_name, memory_content, chat_id)
|
||||
# else:
|
||||
# logger.warning(f"分析为人物信息但未提取到人物名称或记忆内容,问题: {question[:50]}...")
|
||||
pass # 功能暂时禁用
|
||||
|
||||
else:
|
||||
logger.info(f"问题和答案类别为'其他',不进行存储,问题: {question[:50]}...")
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"解析分析结果失败: {e}, 响应: {response[:200]}...")
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"分析问题和答案时发生异常: {e}")
|
||||
|
||||
|
||||
def _store_thinking_back(
|
||||
chat_id: str, question: str, context: str, found_answer: bool, answer: str, thinking_steps: List[Dict[str, Any]]
|
||||
) -> None:
|
||||
|
|
@ -1013,8 +927,6 @@ async def _process_single_question(question: str, chat_id: str, context: str, in
|
|||
logger.info(f"ReAct Agent超时,不存储到数据库,问题: {question[:50]}...")
|
||||
|
||||
if found_answer and answer:
|
||||
# 创建异步任务分析问题和答案
|
||||
asyncio.create_task(_analyze_question_answer(question, answer, chat_id))
|
||||
return f"问题:{question}\n答案:{answer}"
|
||||
|
||||
return None
|
||||
|
|
|
|||
|
|
@ -1,5 +1,5 @@
|
|||
[inner]
|
||||
version = "6.23.0"
|
||||
version = "6.23.1"
|
||||
|
||||
#----以下是给开发人员阅读的,如果你只是部署了麦麦,不需要阅读----
|
||||
#如果你想要修改配置文件,请递增version的值
|
||||
|
|
@ -82,6 +82,7 @@ expression_groups = [
|
|||
|
||||
reflect = false # 是否启用表达反思(Bot主动向管理员询问表达方式是否合适)
|
||||
reflect_operator_id = "" # 表达反思操作员ID,格式:platform:id:type (例如 "qq:123456:private" 或 "qq:654321:group")
|
||||
allow_reflect = [] # 允许进行表达反思的聊天流ID列表,格式:["qq:123456:private", "qq:654321:group", ...],只有在此列表中的聊天流才会提出问题并跟踪。如果列表为空,则所有聊天流都可以进行表达反思(前提是 reflect = true)
|
||||
|
||||
|
||||
[chat] #麦麦的聊天设置
|
||||
|
|
|
|||
Loading…
Reference in New Issue