diff --git a/src/memos/mem_reader/multi_modal_struct.py b/src/memos/mem_reader/multi_modal_struct.py index a9b108f15..be9f02b22 100644 --- a/src/memos/mem_reader/multi_modal_struct.py +++ b/src/memos/mem_reader/multi_modal_struct.py @@ -13,6 +13,7 @@ from memos.mem_reader.simple_struct import PROMPT_DICT, SimpleStructMemReader from memos.mem_reader.utils import parse_json_result from memos.memories.textual.item import TextualMemoryItem, TreeNodeTextualMemoryMetadata +from memos.templates.mem_reader_prompts import MEMORY_MERGE_PROMPT_EN, MEMORY_MERGE_PROMPT_ZH from memos.templates.tool_mem_prompts import TOOL_TRAJECTORY_PROMPT_EN, TOOL_TRAJECTORY_PROMPT_ZH from memos.types import MessagesType from memos.utils import timed @@ -316,7 +317,6 @@ def _get_llm_response( custom_tags: list[str] | None = None, sources: list | None = None, prompt_type: str = "chat", - related_memories: str | None = None, ) -> dict: """ Override parent method to improve language detection by using actual text content @@ -327,7 +327,6 @@ def _get_llm_response( custom_tags: Optional custom tags sources: Optional list of SourceMessage objects to extract text content from prompt_type: Type of prompt to use ("chat" or "doc") - related_memories: related_memories in the graph Returns: LLM response dictionary @@ -362,10 +361,7 @@ def _get_llm_response( else: template = PROMPT_DICT["chat"][lang] examples = PROMPT_DICT["chat"][f"{lang}_example"] - related_memories_str = related_memories if related_memories is not None else "" - prompt = template.replace("${conversation}", mem_str).replace( - "${reference}", related_memories_str - ) + prompt = template.replace("${conversation}", mem_str) custom_tags_prompt = ( PROMPT_DICT["custom_tags"][lang].replace("{custom_tags}", str(custom_tags)) @@ -398,6 +394,7 @@ def _get_llm_response( ], "summary": mem_str, } + logger.info(f"[MultiModalFine] Task {messages}, Result {response_json}") return response_json def _determine_prompt_type(self, sources: list) -> str: @@ -418,6 +415,182 @@ def _determine_prompt_type(self, sources: list) -> str: return prompt_type + def _get_maybe_merged_memory( + self, + extracted_memory_dict: dict, + mem_text: str, + sources: list, + **kwargs, + ) -> dict: + """ + Check if extracted memory should be merged with similar existing memories. + If merge is needed, return merged memory dict with merged_from field. + Otherwise, return original memory dict. + + Args: + extracted_memory_dict: The extracted memory dict from LLM response + mem_text: The memory text content + sources: Source messages for language detection + **kwargs: Additional parameters (merge_similarity_threshold, etc.) + + Returns: + Memory dict (possibly merged) with merged_from field if merged + """ + # If no graph_db or user_name, return original + if not self.graph_db or "user_name" not in kwargs: + return extracted_memory_dict + user_name = kwargs.get("user_name") + + # Detect language + lang = "en" + if sources: + for source in sources: + if hasattr(source, "lang") and source.lang: + lang = source.lang + break + elif isinstance(source, dict) and source.get("lang"): + lang = source.get("lang") + break + if lang is None: + lang = detect_lang(mem_text) + + # Search for similar memories + merge_threshold = kwargs.get("merge_similarity_threshold", 0.3) + + try: + search_results = self.graph_db.search_by_embedding( + vector=self.embedder.embed(mem_text)[0], + top_k=20, + status="activated", + threshold=merge_threshold, + user_name=user_name, + filter={ + "or": [ + {"memory_type": "LongTermMemory"}, + {"memory_type": "UserMemory"}, + {"memory_type": "WorkingMemory"}, + ] + }, + ) + + if not search_results: + # No similar memories found, return original + return extracted_memory_dict + + # Get full memory details + similar_memory_ids = [r["id"] for r in search_results if r.get("id")] + similar_memories_list = [ + self.graph_db.get_node(mem_id, include_embedding=False) + for mem_id in similar_memory_ids + ] + + # Filter out None and mode:fast memories + filtered_similar = [] + for mem in similar_memories_list: + if not mem: + continue + mem_metadata = mem.get("metadata", {}) + tags = mem_metadata.get("tags", []) + if isinstance(tags, list) and "mode:fast" in tags: + continue + filtered_similar.append( + { + "id": mem.get("id"), + "memory": mem.get("memory", ""), + } + ) + logger.info( + f"Valid similar memories for {mem_text} is " + f"{len(filtered_similar)}: {filtered_similar}" + ) + + if not filtered_similar: + # No valid similar memories, return original + return extracted_memory_dict + + # Create a temporary TextualMemoryItem for merge check + temp_memory_item = TextualMemoryItem( + memory=mem_text, + metadata=TreeNodeTextualMemoryMetadata( + user_id="", + session_id="", + memory_type=extracted_memory_dict.get("memory_type", "LongTermMemory"), + status="activated", + tags=extracted_memory_dict.get("tags", []), + key=extracted_memory_dict.get("key", ""), + ), + ) + + # Try to merge with LLM + merge_result = self._merge_memories_with_llm( + temp_memory_item, filtered_similar, lang=lang + ) + + if merge_result: + # Return merged memory dict + merged_dict = extracted_memory_dict.copy() + merged_dict["value"] = merge_result.get("value", mem_text) + merged_dict["merged_from"] = merge_result.get("merged_from", []) + logger.info( + f"[MultiModalFine] Merged memory with {len(merged_dict['merged_from'])} existing memories" + ) + return merged_dict + else: + # No merge needed, return original + return extracted_memory_dict + + except Exception as e: + logger.error(f"[MultiModalFine] Error in get_maybe_merged_memory: {e}") + # On error, return original + return extracted_memory_dict + + def _merge_memories_with_llm( + self, + new_memory: TextualMemoryItem, + similar_memories: list[dict], + lang: str = "en", + ) -> dict | None: + """ + Use LLM to merge new memory with similar existing memories. + + Args: + new_memory: The newly extracted memory item + similar_memories: List of similar memories from graph_db (with id and memory fields) + lang: Language code ("en" or "zh") + + Returns: + Merged memory dict with merged_from field, or None if no merge needed + """ + if not similar_memories: + return None + + # Build merge prompt using template + similar_memories_text = "\n".join( + [f"[{mem['id']}]: {mem['memory']}" for mem in similar_memories] + ) + + merge_prompt_template = MEMORY_MERGE_PROMPT_ZH if lang == "zh" else MEMORY_MERGE_PROMPT_EN + merge_prompt = merge_prompt_template.format( + new_memory=new_memory.memory, + similar_memories=similar_memories_text, + ) + + try: + response_text = self.llm.generate([{"role": "user", "content": merge_prompt}]) + merge_result = parse_json_result(response_text) + + if merge_result.get("should_merge", False): + return { + "value": merge_result.get("value", new_memory.memory), + "merged_from": merge_result.get( + "merged_from", [mem["id"] for mem in similar_memories] + ), + } + except Exception as e: + logger.error(f"[MultiModalFine] Error in merge LLM call: {e}") + + return None + def _process_string_fine( self, fast_memory_items: list[TextualMemoryItem], @@ -460,40 +633,9 @@ def _process_one_item(fast_item: TextualMemoryItem) -> list[TextualMemoryItem]: # Determine prompt type based on sources prompt_type = self._determine_prompt_type(sources) - # recall related memories - related_memories = None - memory_ids = [] - if self.graph_db: - if "user_name" in kwargs: - memory_ids = self.graph_db.search_by_embedding( - vector=self.embedder.embed(mem_str)[0], - top_k=20, - status="activated", - user_name=kwargs.get("user_name"), - filter={ - "or": [ - {"memory_type": "LongTermMemory"}, - {"memory_type": "UserMemory"}, - {"memory_type": "WorkingMemory"}, - ] - }, - ) - memory_ids = set({r["id"] for r in memory_ids if r.get("id")}) - related_memories_list = self.graph_db.get_nodes( - list(memory_ids), - include_embedding=False, - user_name=kwargs.get("user_name"), - ) - related_memories = "\n".join( - ["{}: {}".format(mem["id"], mem["memory"]) for mem in related_memories_list] - ) - else: - logger.warning("user_name is null when graph_db exists") - + # ========== Stage 1: Normal extraction (without reference) ========== try: - resp = self._get_llm_response( - mem_str, custom_tags, sources, prompt_type, related_memories - ) + resp = self._get_llm_response(mem_str, custom_tags, sources, prompt_type) except Exception as e: logger.error(f"[MultiModalFine] Error calling LLM: {e}") return fine_items @@ -501,49 +643,59 @@ def _process_one_item(fast_item: TextualMemoryItem) -> list[TextualMemoryItem]: if resp.get("memory list", []): for m in resp.get("memory list", []): try: + # Check and merge with similar memories if needed + m_maybe_merged = self._get_maybe_merged_memory( + extracted_memory_dict=m, + mem_text=m.get("value", ""), + sources=sources, + **kwargs, + ) # Normalize memory_type (same as simple_struct) memory_type = ( - m.get("memory_type", "LongTermMemory") + m_maybe_merged.get("memory_type", "LongTermMemory") .replace("长期记忆", "LongTermMemory") .replace("用户记忆", "UserMemory") ) - if "merged_from" in m: - for merged_id in m["merged_from"]: - if merged_id not in memory_ids: - logger.warning("merged id not valid!!!!!") - info_per_item["merged_from"] = m["merged_from"] - # Create fine mode memory item (same as simple_struct) node = self._make_memory_item( - value=m.get("value", ""), + value=m_maybe_merged.get("value", ""), info=info_per_item, memory_type=memory_type, - tags=m.get("tags", []), - key=m.get("key", ""), + tags=m_maybe_merged.get("tags", []), + key=m_maybe_merged.get("key", ""), sources=sources, # Preserve sources from fast item background=resp.get("summary", ""), **extra_kwargs, ) + # Add merged_from to info if present + if "merged_from" in m_maybe_merged: + node.metadata.info = node.metadata.info or {} + node.metadata.info["merged_from"] = m_maybe_merged["merged_from"] fine_items.append(node) except Exception as e: logger.error(f"[MultiModalFine] parse error: {e}") elif resp.get("value") and resp.get("key"): try: - if "merged_from" in resp: - for merged_id in resp["merged_from"]: - if merged_id not in memory_ids: - logger.warning("merged id not valid!!!!!") - info_per_item["merged_from"] = resp["merged_from"] - # Create fine mode memory item (same as simple_struct) + # Check and merge with similar memories if needed + resp_maybe_merged = self._get_maybe_merged_memory( + extracted_memory_dict=resp, + mem_text=resp.get("value", "").strip(), + sources=sources, + **kwargs, + ) node = self._make_memory_item( - value=resp.get("value", "").strip(), + value=resp_maybe_merged.get("value", "").strip(), info=info_per_item, memory_type="LongTermMemory", - tags=resp.get("tags", []), - key=resp.get("key", None), + tags=resp_maybe_merged.get("tags", []), + key=resp_maybe_merged.get("key", None), sources=sources, # Preserve sources from fast item background=resp.get("summary", ""), **extra_kwargs, ) + # Add merged_from to info if present + if "merged_from" in resp_maybe_merged: + node.metadata.info = node.metadata.info or {} + node.metadata.info["merged_from"] = resp_maybe_merged["merged_from"] fine_items.append(node) except Exception as e: logger.error(f"[MultiModalFine] parse error: {e}") @@ -694,9 +846,7 @@ def _process_multi_modal_data( @timed def _process_transfer_multi_modal_data( - self, - raw_node: TextualMemoryItem, - custom_tags: list[str] | None = None, + self, raw_node: TextualMemoryItem, custom_tags: list[str] | None = None, **kwargs ) -> list[TextualMemoryItem]: """ Process transfer for multimodal data. @@ -720,9 +870,11 @@ def _process_transfer_multi_modal_data( # Part A: call llm in parallel using thread pool with ContextThreadPoolExecutor(max_workers=2) as executor: future_string = executor.submit( - self._process_string_fine, [raw_node], info, custom_tags + self._process_string_fine, [raw_node], info, custom_tags, **kwargs + ) + future_tool = executor.submit( + self._process_tool_trajectory_fine, [raw_node], info, **kwargs ) - future_tool = executor.submit(self._process_tool_trajectory_fine, [raw_node], info) # Collect results fine_memory_items_string_parser = future_string.result() @@ -789,6 +941,7 @@ def fine_transfer_simple_mem( input_memories: list[TextualMemoryItem], type: str, custom_tags: list[str] | None = None, + **kwargs, ) -> list[list[TextualMemoryItem]]: if not input_memories: return [] @@ -799,7 +952,7 @@ def fine_transfer_simple_mem( with ContextThreadPoolExecutor() as executor: futures = [ executor.submit( - self._process_transfer_multi_modal_data, scene_data_info, custom_tags + self._process_transfer_multi_modal_data, scene_data_info, custom_tags, **kwargs ) for scene_data_info in input_memories ] diff --git a/src/memos/mem_reader/simple_struct.py b/src/memos/mem_reader/simple_struct.py index 6f4542c7a..3e33538e0 100644 --- a/src/memos/mem_reader/simple_struct.py +++ b/src/memos/mem_reader/simple_struct.py @@ -228,7 +228,7 @@ def _get_llm_response(self, mem_str: str, custom_tags: list[str] | None) -> dict lang = detect_lang(mem_str) template = PROMPT_DICT["chat"][lang] examples = PROMPT_DICT["chat"][f"{lang}_example"] - prompt = template.replace("${conversation}", mem_str).replace("${reference}", "") + prompt = template.replace("${conversation}", mem_str) custom_tags_prompt = ( PROMPT_DICT["custom_tags"][lang].replace("{custom_tags}", str(custom_tags)) @@ -361,7 +361,7 @@ def _build_fast_node(w): return chat_read_nodes def _process_transfer_chat_data( - self, raw_node: TextualMemoryItem, custom_tags: list[str] | None = None + self, raw_node: TextualMemoryItem, custom_tags: list[str] | None = None, **kwargs ): raw_memory = raw_node.memory response_json = self._get_llm_response(raw_memory, custom_tags) @@ -669,6 +669,7 @@ def fine_transfer_simple_mem( input_memories: list[TextualMemoryItem], type: str, custom_tags: list[str] | None = None, + **kwargs, ) -> list[list[TextualMemoryItem]]: if not input_memories: return [] @@ -685,7 +686,7 @@ def fine_transfer_simple_mem( # Process Q&A pairs concurrently with context propagation with ContextThreadPoolExecutor() as executor: futures = [ - executor.submit(processing_func, scene_data_info, custom_tags) + executor.submit(processing_func, scene_data_info, custom_tags, **kwargs) for scene_data_info in input_memories ] for future in concurrent.futures.as_completed(futures): @@ -889,6 +890,6 @@ def _process_doc_data(self, scene_data_info, info, **kwargs): return doc_nodes def _process_transfer_doc_data( - self, raw_node: TextualMemoryItem, custom_tags: list[str] | None = None + self, raw_node: TextualMemoryItem, custom_tags: list[str] | None = None, **kwargs ): raise NotImplementedError diff --git a/src/memos/mem_scheduler/general_scheduler.py b/src/memos/mem_scheduler/general_scheduler.py index 9b19e9ecb..8755de281 100644 --- a/src/memos/mem_scheduler/general_scheduler.py +++ b/src/memos/mem_scheduler/general_scheduler.py @@ -877,6 +877,7 @@ def _process_memories_with_reader( memory_items, type="chat", custom_tags=custom_tags, + user_name=user_name, ) except Exception as e: logger.warning(f"{e}: Fail to transfer mem: {memory_items}") @@ -897,6 +898,38 @@ def _process_memories_with_reader( f"Added {len(enhanced_mem_ids)} enhanced memories: {enhanced_mem_ids}" ) + # Mark merged_from memories as archived when provided in memory metadata + if self.mem_reader and self.mem_reader.graph_db: + for memory in flattened_memories: + merged_from = (memory.metadata.info or {}).get("merged_from") + if merged_from: + old_ids = ( + merged_from + if isinstance(merged_from, (list | tuple | set)) + else [merged_from] + ) + for old_id in old_ids: + try: + self.mem_reader.graph_db.update_node( + str(old_id), {"status": "archived"}, user_name=user_name + ) + logger.info( + f"[Scheduler] Archived merged_from memory: {old_id}" + ) + except Exception as e: + logger.warning( + f"[Scheduler] Failed to archive merged_from memory {old_id}: {e}" + ) + else: + # Check if any memory has merged_from but graph_db is unavailable + has_merged_from = any( + (m.metadata.info or {}).get("merged_from") for m in flattened_memories + ) + if has_merged_from: + logger.warning( + "[Scheduler] merged_from provided but graph_db is unavailable; skip archiving." + ) + # LOGGING BLOCK START # This block is replicated from _add_message_consumer to ensure consistent logging cloud_env = is_cloud_env() diff --git a/src/memos/multi_mem_cube/single_cube.py b/src/memos/multi_mem_cube/single_cube.py index 6aea6997f..ffe8fe989 100644 --- a/src/memos/multi_mem_cube/single_cube.py +++ b/src/memos/multi_mem_cube/single_cube.py @@ -833,29 +833,32 @@ def _process_text_mem( ) # Mark merged_from memories as archived when provided in add_req.info - for memory in flattened_local: - merged_from = (memory.metadata.info or {}).get("merged_from") - if merged_from: - old_ids = ( - merged_from if isinstance(merged_from, (list | tuple | set)) else [merged_from] - ) - if self.mem_reader and self.mem_reader.graph_db: - for old_id in old_ids: - try: - self.mem_reader.graph_db.update_node( - str(old_id), {"status": "archived"} - ) - self.logger.info( - f"[SingleCubeView] Archived merged_from memory: {old_id}" - ) - except Exception as e: - self.logger.warning( - f"[SingleCubeView] Failed to archive merged_from memory {old_id}: {e}" - ) - else: - self.logger.warning( - "[SingleCubeView] merged_from provided but graph_db is unavailable; skip archiving." + if sync_mode == "sync" and extract_mode == "fine": + for memory in flattened_local: + merged_from = (memory.metadata.info or {}).get("merged_from") + if merged_from: + old_ids = ( + merged_from + if isinstance(merged_from, (list | tuple | set)) + else [merged_from] ) + if self.mem_reader and self.mem_reader.graph_db: + for old_id in old_ids: + try: + self.mem_reader.graph_db.update_node( + str(old_id), {"status": "archived"} + ) + self.logger.info( + f"[SingleCubeView] Archived merged_from memory: {old_id}" + ) + except Exception as e: + self.logger.warning( + f"[SingleCubeView] Failed to archive merged_from memory {old_id}: {e}" + ) + else: + self.logger.warning( + "[SingleCubeView] merged_from provided but graph_db is unavailable; skip archiving." + ) text_memories = [ { diff --git a/src/memos/templates/mem_reader_prompts.py b/src/memos/templates/mem_reader_prompts.py index f2d15cfb8..4a451c842 100644 --- a/src/memos/templates/mem_reader_prompts.py +++ b/src/memos/templates/mem_reader_prompts.py @@ -28,8 +28,7 @@ "key": , "memory_type": , "value": , - "tags": , - "merged_from": + "tags": }, ... ], @@ -42,7 +41,7 @@ ${custom_tags_prompt} -Example 1 — No reference memories: +Example: Conversation: user: [June 26, 2025 at 3:00 PM]: Hi Jerry! Yesterday at 3 PM I had a meeting with my team about the new project. assistant: Oh Tom! Do you think the team can finish by December 15? @@ -70,7 +69,7 @@ "summary": "Tom is currently focused on managing a new project with a tight schedule. After a team meeting on June 25, 2025, he realized the original deadline of December 15 might not be feasible due to backend delays. Concerned about insufficient testing time, he welcomed Jerry’s suggestion of proposing an extension. Tom plans to raise the idea of shifting the deadline to January 5, 2026 in the next morning’s meeting. His actions reflect both stress about timelines and a proactive, team-oriented problem-solving approach." } -Example 2 — No reference memories: +Dialogue: assistant: [10:30 AM, August 15, 2025]: The book Deep Work you mentioned is indeed very suitable for your current situation. The book explains … (omitted). The author suggests setting aside 2–3 hours of focused work blocks each day and turning off all notifications during that time. Considering that you need to submit a report next week, you could try using the 9:00–11:00 AM time slot for focused work. @@ -90,7 +89,7 @@ Note: When the dialogue contains only assistant messages, phrasing such as “assistant recommended” or “assistant suggested” should be used, rather than incorrectly attributing the content to the user’s statements or plans. -Example 3 — No reference memories (note: if the user’s language is Chinese, output must also be Chinese): +Another Example in Chinese (注意: 当user的语言为中文时,你就需要也输出中文): { "memory list": [ { @@ -104,54 +103,11 @@ "summary": "Tom 目前专注于管理一个进度紧张的新项目..." } -Note: We may provide partial reference memories. If newly extracted memories substantially overlap with reference memories, merge them and include a `merged_from` field indicating the merged memory IDs. -If newly extracted memories are strongly related to reference memories, you may appropriately reference them during extraction (but never fabricate memories — only reference them when you are very confident). -If no reference memories are provided, or if they are unrelated to the new memories, simply ignore them. - -Example 4 — With reference memories: -Dialogue: -user: [January 13, 2026] Winter skiing is so much fun! I’m planning to go skiing again with friends this weekend! -assistant: [January 13, 2026] That sounds great! -user: [January 14, 2026] You remember my ski buddy, right? His name is Tom. We ski together every year — including this week! - -Reference memories: -[xxxx-xxxx-xxxx-xxxx-01]: The user expressed a strong passion for skiing on December 29, 2025 -[xxxx-xxxx-xxxx-xxxx-06]: The user’s ski buddy is named Tom -[xxxx-xxxx-xxxx-xxxx-11]: Niseko is a ski destination the user has visited multiple times; the user met Tom at Hirafu Ski Resort and became close friends -[xxxx-xxxx-xxxx-xxxx-12]: On January 1, 2025, the user discussed skiing equipment with the assistant and planned to buy a new ski backpack - -Output: -{ - "memory list": [ - { - "key": "User's winter skiing plan", - "memory_type": "UserMemory", - "value": "On January 13, 2026, the user planned to go skiing again over the weekend with their friend Tom.", - "tags": ["skiing", "sports preference", "plan", "winter activity"] - }, - { - "key": "User's ski partner is named Tom", - "memory_type": "UserMemory", - "value": "On January 14, 2026, the user again mentioned their ski partner Tom and further explained that they ski together every year. This statement reinforces their long-term and stable skiing partnership and adds new information about its regular annual pattern.", - "tags": ["interpersonal relationship", "ski partner", "long-term habit"], - "merged_from": [ - "xxxx-xxxx-xxxx-xxxx-06", - "xxxx-xxxx-xxxx-xxxx-11" - ] - } - ], - "summary": "The user recently reinforced their strong passion for skiing and, on January 13, 2026, explicitly stated that winter skiing brings them great joy and that they planned to ski again with a friend over the weekend. This indicates that skiing remains a highly significant activity in the user’s life. Additionally, on January 14, 2026, the user elaborated on their long-term relationship with their ski partner Tom, emphasizing that they ski together every year. This further solidifies the importance of this interpersonal relationship in the user’s personal experiences." -} +Always respond in the same language as the conversation. -Your task: -Dialogue to be extracted: +Conversation: ${conversation} -Reference memories: -${reference} - -Always respond in the same language as the conversation. - Your Output:""" SIMPLE_STRUCT_MEM_READER_PROMPT_ZH = """您是记忆提取专家。 @@ -187,8 +143,7 @@ "key": <字符串,唯一且简洁的记忆标题>, "memory_type": <字符串,"LongTermMemory" 或 "UserMemory">, "value": <详细、独立且无歧义的记忆陈述——若输入对话为英文,则用英文;若为中文,则用中文>, - "tags": <相关主题关键词列表(例如,["截止日期", "团队", "计划"])>, - "merged_from": <需要被合并的参考记忆列表,当没有提供参考记忆时,不需要输出这个字段 > + "tags": <相关主题关键词列表(例如,["截止日期", "团队", "计划"])> }, ... ], @@ -201,7 +156,7 @@ ${custom_tags_prompt} -示例1-无参考记忆: +示例: 对话: user: [2025年6月26日下午3:00]:嗨Jerry!昨天下午3点我和团队开了个会,讨论新项目。 assistant: 哦Tom!你觉得团队能在12月15日前完成吗? @@ -228,7 +183,7 @@ "summary": "Tom目前正专注于管理一个进度紧张的新项目。在2025年6月25日的团队会议后,他意识到原定2025年12月15日的截止日期可能无法实现,因为后端会延迟。由于担心测试时间不足,他接受了Jerry提出的延期建议。Tom计划在次日早上的会议上提出将截止日期推迟至2026年1月5日。他的行为反映出对时间线的担忧,以及积极、以团队为导向的问题解决方式。" } -示例2-无参考记忆: +对话: assistant: [2025年8月15日上午10:30]: 你提到的那本《深度工作》确实很适合你现在的情况。这本书讲了......(略),作者建议每天留出2-3 小时的专注时间块,期间关闭所有通知。考虑到你下周要交的报告,可以试试早上9点到11点这个时段。 @@ -247,73 +202,28 @@ } 注意:当对话仅有助手消息时,应使用"助手推荐"、"助手建议"等表述,而非将其错误归因为用户的陈述或计划。 -示例3-无参考记忆(注意:当用户语言为中文时,您也需输出中文): +另一个中文示例(注意:当用户语言为中文时,您也需输出中文): { "memory list": [ { "key": "项目会议", "memory_type": "LongTermMemory", "value": "在2025年6月25日下午3点,Tom与团队开会讨论了新项目,涉及时间表,并提出了对12月15日截止日期可行性的担忧。", - "tags": ["项目", "时间表", "会议", "截止日期"], - "merged_from": [ - "xxxx-xxxx-xxxx-xxxx-xxx", - "xxxx-xxxx-xxxx-xxxx-xx", - ], + "tags": ["项目", "时间表", "会议", "截止日期"] }, ... ], "summary": "Tom 目前专注于管理一个进度紧张的新项目..." } -注意,我们可能给出部分参考记忆,这部分记忆如果和新添加的记忆大量重复,合并记忆,并在输入中多一个`merged_from`字段指明合并的记忆; -新添加的记忆如果和参考记忆有强关联,可以在提取时适当参考(但一定不要捏造记忆,十分有把握再进行参考); -如果没有给出参考记忆、或参考记忆和新添加的记忆无关,直接忽略就好。 +请始终使用与对话相同的语言进行回复。 -示例4-带参考记忆: 对话: -user: [2026年1月13日] 冬天滑雪真的太快乐了!我打算这周末和朋友再滑一次! -assistant:[2026年1月13日] 听起来就很棒! -user: [2026年1月14日] 你还记得我的滑雪搭子吧?他叫Tom,我们每年都一起滑雪!这周也是! - -参考记忆: -[xxxx-xxxx-xxxx-xxxx-01]: 用户在2025年12月29日表达了对滑雪的狂热喜爱 -[xxxx-xxxx-xxxx-xxxx-06]: 用户的滑雪搭子叫Tom -[xxxx-xxxx-xxxx-xxxx-11]: 二世谷是用户多次去过的滑雪胜地,用户在比罗夫滑雪场认识了Tom并成为好朋友 -[xxxx-xxxx-xxxx-xxxx-12]: 用户2025年1月1日和助手讨论了滑雪装备,打算新买一个滑雪背包。 - -输出: -{ - "memory list": [ - { - "key": "用户冬季滑雪计划", - "memory_type": "UserMemory", - "value": "用户在2026年1月13日计划在周末与朋友Tom再次进行滑雪活动。", - "tags": ["滑雪", "运动偏好", "计划", "冬季活动"], - }, - { - "key": "用户的滑雪伙伴叫Tom", - "memory_type": "UserMemory", - "value": "用户在2026年1月14日再次提到其滑雪搭子Tom,并进一步说明他们每年都会一起滑雪。这一描述强化了双方长期稳定的滑雪伙伴关系,在原有记忆基础上补充了新的时间规律性信息。", - "tags": ["人际关系", "滑雪搭子", "长期习惯"], - "merged_from": [ - "xxxx-xxxx-xxxx-xxxx-06", - "xxxx-xxxx-xxxx-xxxx-11", - ], - } - ], - "summary": "用户近期再次强化了自己对滑雪的热爱,并在2026年1月13日明确表示冬季滑雪带来极大的快乐,同时计划于当周周末与朋友再度滑雪。这表明滑雪对用户而言仍然是一项高度重要的活动。此外,用户在2026年1月14日补充了关于其滑雪伙伴Tom的长期关系细节,强调两人每年都会结伴滑雪,进一步巩固了此人际关系在用户生活中的重要性。" -} - -您的任务: -待提取的对话: ${conversation} -参考记忆: -${reference} - -请始终使用与对话相同的语言进行回复。 您的输出:""" + SIMPLE_STRUCT_DOC_READER_PROMPT = """You are an expert text analyst for a search and retrieval system. Your task is to process a document chunk and generate a single, structured JSON object. @@ -957,10 +867,103 @@ Important: Output **only** the JSON. No extra text. """ +MEMORY_MERGE_PROMPT_EN = """You are a memory consolidation expert. Given a new memory and similar existing memories, decide if they should be merged. + +Example: +New memory: +The user’s name is Tom, the user likes skiing, and plans to go skiing this weekend + +Similar existing memories: +xxxx-xxxx-xxxx-xxxx-01: The user’s name is Tom +xxxx-xxxx-xxxx-xxxx-10: The user likes skiing +xxxx-xxxx-xxxx-xxxx-11: The user lives by the sea + +Expected output: +{{ +“value”: “The user’s name is Tom, the user likes skiing”, +“merged_from”: [“xxxx-xxxx-xxxx-xxxx-01”, “xxxx-xxxx-xxxx-xxxx-10”], +“should_merge”: true +}} + +New memory: +The user is going to attend a party on Sunday + +Similar existing memories: +xxxx-xxxx-xxxx-xxxx-01: The user read a book yesterday + +Expected output: +{{ +“should_merge”: false +}} + +If the new memory substantially overlaps with or complements the existing memories, merge them into a single consolidated memory and return a JSON object with: +- "value": the merged memory content (preserving all unique information) +- "merged_from": list of IDs from similar_memories that were merged +- "should_merge": true + +If the new memory is distinct and should remain separate, return: +- "should_merge": false + +New Memory: +{new_memory} + +Similar Existing Memories: +{similar_memories} + +Return ONLY a valid JSON object, nothing else.""" + +MEMORY_MERGE_PROMPT_ZH = """你是一个记忆整合专家。给定一个新记忆和相似的现有记忆,判断它们是否应该合并。 + +示例: +新记忆: +用户的名字是Tom,用户喜欢滑雪,并计划周末去滑雪 + +相似的现有记忆: +xxxx-xxxx-xxxx-xxxx-01: 用户的名字是Tom +xxxx-xxxx-xxxx-xxxx-10: 用户喜欢滑雪 +xxxx-xxxx-xxxx-xxxx-11: 用户住在海边 + +应该的返回值: +{{ + "value": "用户的名字是Tom,用户喜欢滑雪", + "merged_from": ["xxxx-xxxx-xxxx-xxxx-01", "xxxx-xxxx-xxxx-xxxx-10"], + "should_merge": true +}} + +新记忆: +用户周天要参加一个聚会 + +相似的现有记忆: +xxxx-xxxx-xxxx-xxxx-01: 用户昨天读了一本书 + +应该的返回值: +{{ + "should_merge": false +}} + + +如果新记忆与现有记忆大量重叠或互补,将它们合并为一个整合的记忆,并返回一个JSON列表: +- "value": 合并后的记忆内容(保留所有独特信息) +- "merged_from": 被合并的相似记忆ID列表 +- "should_merge": true + +如果新记忆是独特的,应该保持独立,返回: +- "should_merge": false + +新记忆: +{new_memory} + +相似的现有记忆: +{similar_memories} + +只返回有效的JSON对象,不要其他内容。""" + # Prompt mapping for specialized tasks (e.g., hallucination filtering) PROMPT_MAPPING = { "hallucination_filter": SIMPLE_STRUCT_HALLUCINATION_FILTER_PROMPT, "rewrite": SIMPLE_STRUCT_REWRITE_MEMORY_PROMPT, "rewrite_user_only": SIMPLE_STRUCT_REWRITE_MEMORY_USER_ONLY_PROMPT, "add_before_search": SIMPLE_STRUCT_ADD_BEFORE_SEARCH_PROMPT, + "memory_merge_en": MEMORY_MERGE_PROMPT_EN, + "memory_merge_zh": MEMORY_MERGE_PROMPT_ZH, }