Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
31 commits
Select commit Hold shift + click to select a range
f100c1d
feat: timer false
Dec 10, 2025
acfde52
fix: conflict
Dec 11, 2025
c38ed1c
Merge branch 'dev' of github.com:MemTensor/MemOS into dev
Dec 11, 2025
a70a4e6
Merge branch 'dev' of github.com:MemTensor/MemOS into dev
Dec 11, 2025
dddbfb6
Merge branch 'dev' of github.com:MemTensor/MemOS into dev
Dec 17, 2025
8bfa0d3
Merge branch 'dev' of github.com:MemTensor/MemOS into dev
Dec 22, 2025
e682526
Merge branch 'dev' of github.com:MemTensor/MemOS into dev
Dec 23, 2025
8fc1f27
Merge branch 'dev' of github.com:MemTensor/MemOS into dev
Dec 24, 2025
286fb72
Merge branch 'dev' of github.com:MemTensor/MemOS into dev
Jan 6, 2026
9d9e0b1
Merge branch 'dev' of github.com:MemTensor/MemOS into dev
Jan 7, 2026
de44b80
Merge branch 'dev' of github.com:MemTensor/MemOS into dev
Jan 8, 2026
51a782b
fix: knowledge base adopt raw text (#836)
whipser030 Jan 8, 2026
7ffdb52
Feat/optimize cloud service api (#839)
Wang-Daoji Jan 8, 2026
5f811d4
Feat/fix palyground bug (#841)
Wang-Daoji Jan 8, 2026
bd0c351
Merge branch 'dev' of github.com:MemTensor/MemOS into dev
Jan 8, 2026
8b30a44
refactor&fix: fix a range of bugs in scheduler and revise fine add ap…
tangg555 Jan 9, 2026
cfd9105
feat: delete result
Jan 12, 2026
f3ae66f
Merge branch 'dev' of github.com:MemTensor/MemOS into dev
Jan 12, 2026
d46dbfd
Feat/merge dev 0112 (#854)
CarltonXiang Jan 12, 2026
95d7512
feat: delete result (#852)
CaralHsi Jan 12, 2026
1b853e1
fix: unuse rerank (#855)
whipser030 Jan 13, 2026
893a593
fix: backtrack knowledge retrieval (#857)
whipser030 Jan 13, 2026
c83107b
docs: fix server start cmd
Nyakult Jan 13, 2026
68c9f5a
squashed commit
Jan 13, 2026
fd0f2ad
modify get tool memory, modify search tool memory field
Jan 13, 2026
85c6a63
chore: update python-tests.yml
Jan 13, 2026
4a55da4
Feat/tool mem related (#864)
CarltonXiang Jan 13, 2026
e3deda0
docs: fix server start cmd (#858)
CaralHsi Jan 14, 2026
e4c67a4
fix: Qdrant empty when using neo4j-community (#843)
OhhhhPi Jan 14, 2026
0a9398d
fix: use knowledge embedding score rerank (#867)
whipser030 Jan 14, 2026
dc103a3
fix: optimization_and_fix (#868)
Wang-Daoji Jan 14, 2026
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions .github/workflows/python-tests.yml
Original file line number Diff line number Diff line change
Expand Up @@ -11,12 +11,14 @@ on:
branches:
- "main"
- "dev"
- "dev*"
- "feat/*"
- "test"
pull_request:
branches:
- "main"
- "dev"
- "dev*"
- "feat/*"
- "test"

Expand Down
1 change: 1 addition & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -204,6 +204,7 @@ cython_debug/
# and can be added to the global gitignore or merged into this file. For a more nuclear
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
.idea/
.trae

# VSCode
.vscode*
Expand Down
1 change: 1 addition & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -211,6 +211,7 @@ Get Free API: [Try API](https://memos-dashboard.openmem.net/quickstart/?source=g
- Launch via the uvicorn command line interface (CLI)
###### Tips: Please ensure that Neo4j and Qdrant are running before executing the following command.
```bash
cd src
uvicorn memos.api.server_api:app --host 0.0.0.0 --port 8001 --workers 1
```
##### For detailed integration steps, see the [`CLI Reference`](https://docs.openmem.net/open_source/getting_started/rest_api_server/#method-3client-install-with-CLI).
Expand Down
1 change: 1 addition & 0 deletions docker/.env.example
Original file line number Diff line number Diff line change
Expand Up @@ -123,6 +123,7 @@ API_SCHEDULER_ON=true
API_SEARCH_WINDOW_SIZE=5
# Specify how many rounds of previous conversations (history) to retrieve and consider during the 'hybrid search' (fast search+asynchronous fine search). This helps provide context aware search results
API_SEARCH_HISTORY_TURNS=5
MEMSCHEDULER_USE_REDIS_QUEUE=false

## Graph / vector stores
# Neo4j database selection mode
Expand Down
8 changes: 5 additions & 3 deletions examples/mem_scheduler/quick_start_examples.py
Original file line number Diff line number Diff line change
Expand Up @@ -146,7 +146,9 @@ def kv_cache_only():

def run_scheduler_example():
# 使用 MemScheduler 加载主 MOS(Memory-Oriented System)配置文件
config = parse_yaml("./examples/data/config/mem_scheduler/memos_config_w_scheduler.yaml")
config = parse_yaml(
f"{BASE_DIR}/examples/data/config/mem_scheduler/memos_config_w_scheduler.yaml"
)
# 将解析出的配置字典传入 MOSConfig 构造器, 构建配置对象
mos_config = MOSConfig(**config)
# 使用配置对象初始化 MOS 系统实例
Expand All @@ -159,12 +161,12 @@ def run_scheduler_example():

# 从 YAML 文件加载 MemCube(记忆立方体)的通用配置
config = GeneralMemCubeConfig.from_yaml_file(
"./examples/data/config/mem_scheduler/mem_cube_config.yaml"
f"{BASE_DIR}/examples/data/config/mem_scheduler/mem_cube_config.yaml"
)
# 定义 MemCube 的唯一标识符
mem_cube_id = "mem_cube_5"
# 定义 MemCube 的本地存储路径(路径中包含用户 ID 和 MemCube ID)
mem_cube_name_or_path = f"./outputs/mem_scheduler/{user_id}/{mem_cube_id}"
mem_cube_name_or_path = f"{BASE_DIR}/outputs/mem_scheduler/{user_id}/{mem_cube_id}"

# 如果该路径已存在, 则先删除旧目录
if Path(mem_cube_name_or_path).exists():
Expand Down
2 changes: 1 addition & 1 deletion examples/mem_scheduler/scheduler_for_async_tasks.py
Original file line number Diff line number Diff line change
Expand Up @@ -57,7 +57,7 @@ def submit_tasks():
TEST_HANDLER_LABEL = "test_handler"
mem_scheduler.register_handlers({TEST_HANDLER_LABEL: my_test_handler})

# 10s to restart
# 5s to restart
mem_scheduler.orchestrator.tasks_min_idle_ms[TEST_HANDLER_LABEL] = 5_000

tmp_dir = Path("./tmp")
Expand Down
114 changes: 112 additions & 2 deletions src/memos/api/handlers/formatters_handler.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,9 +7,13 @@

from typing import Any

from memos.log import get_logger
from memos.templates.instruction_completion import instruct_completion


logger = get_logger(__name__)


def to_iter(running: Any) -> list[Any]:
"""
Normalize running tasks to a list of task objects.
Expand All @@ -29,7 +33,9 @@ def to_iter(running: Any) -> list[Any]:
return list(running) if running else []


def format_memory_item(memory_data: Any, include_embedding: bool = False) -> dict[str, Any]:
def format_memory_item(
memory_data: Any, include_embedding: bool = False, save_sources: bool = True
) -> dict[str, Any]:
"""
Format a single memory item for API response.

Expand All @@ -49,7 +55,8 @@ def format_memory_item(memory_data: Any, include_embedding: bool = False) -> dic
memory["ref_id"] = ref_id
if not include_embedding:
memory["metadata"]["embedding"] = []
memory["metadata"]["sources"] = []
if not save_sources:
memory["metadata"]["sources"] = []
memory["metadata"]["usage"] = []
memory["metadata"]["ref_id"] = ref_id
memory["metadata"]["id"] = memory_id
Expand Down Expand Up @@ -84,6 +91,7 @@ def post_process_pref_mem(
{
"cube_id": mem_cube_id,
"memories": pref_formatted_mem,
"total_nodes": len(pref_formatted_mem),
}
)
pref_instruction, pref_note = instruct_completion(pref_formatted_mem)
Expand Down Expand Up @@ -116,12 +124,114 @@ def post_process_textual_mem(
{
"cube_id": mem_cube_id,
"memories": fact_mem,
"total_nodes": len(fact_mem),
}
)
memories_result["tool_mem"].append(
{
"cube_id": mem_cube_id,
"memories": tool_mem,
"total_nodes": len(tool_mem),
}
)
return memories_result


def separate_knowledge_and_conversation_mem(memories: list[dict[str, Any]]):
"""
Separate knowledge and conversation memories from retrieval results.
"""
knowledge_mem = []
conversation_mem = []
for item in memories:
sources = item["metadata"]["sources"]
if (
len(sources) > 0
and "type" in sources[0]
and sources[0]["type"] == "file"
and "content" in sources[0]
and sources[0]["content"] != ""
): # TODO change to memory_type
knowledge_mem.append(item)
else:
conversation_mem.append(item)

logger.info(
f"Retrieval results number of knowledge_mem: {len(knowledge_mem)}, conversation_mem: {len(conversation_mem)}"
)
return knowledge_mem, conversation_mem


def rerank_knowledge_mem(
reranker: Any,
query: str,
text_mem: list[dict[str, Any]],
top_k: int,
file_mem_proportion: float = 0.5,
) -> list[dict[str, Any]]:
"""
Rerank knowledge memories and keep conversation memories.
"""
memid2cubeid = {}
memories_list = []
for memory_group in text_mem:
cube_id = memory_group["cube_id"]
memories = memory_group["memories"]
memories_list.extend(memories)
for memory in memories:
memid2cubeid[memory["id"]] = cube_id

knowledge_mem, conversation_mem = separate_knowledge_and_conversation_mem(memories_list)
knowledge_mem_top_k = max(int(top_k * file_mem_proportion), int(top_k - len(conversation_mem)))
# rerank set unuse
reranked_knowledge_mem = knowledge_mem

# Sort by relativity in descending order
reranked_knowledge_mem = sorted(
reranked_knowledge_mem,
key=lambda item: item.get("metadata", {}).get("relativity", 0.0),
reverse=True,
)

# TODO revoke sources replace memory value
for item in reranked_knowledge_mem:
item["memory"] = item["metadata"]["sources"][0]["content"]
item["metadata"]["sources"] = []

for item in conversation_mem:
item["metadata"]["sources"] = []

# deduplicate: remove items with duplicate memory content
original_count = len(reranked_knowledge_mem)
seen_memories = set[Any]()
deduplicated_knowledge_mem = []
for item in reranked_knowledge_mem:
memory_content = item.get("memory", "")
if memory_content and memory_content not in seen_memories:
seen_memories.add(memory_content)
deduplicated_knowledge_mem.append(item)
deduplicated_count = len(deduplicated_knowledge_mem)
logger.info(
f"After filtering duplicate knowledge base text from sources, count changed from {original_count} to {deduplicated_count}"
)

reranked_knowledge_mem = deduplicated_knowledge_mem[:knowledge_mem_top_k]
conversation_mem_top_k = top_k - len(reranked_knowledge_mem)
cubeid2memories = {}
text_mem_res = []

for memory in reranked_knowledge_mem + conversation_mem[:conversation_mem_top_k]:
cube_id = memid2cubeid[memory["id"]]
if cube_id not in cubeid2memories:
cubeid2memories[cube_id] = []
cubeid2memories[cube_id].append(memory)

for cube_id, memories in cubeid2memories.items():
text_mem_res.append(
{
"cube_id": cube_id,
"memories": memories,
}
)

return text_mem_res
77 changes: 47 additions & 30 deletions src/memos/api/handlers/memory_handler.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,11 @@

from typing import TYPE_CHECKING, Any, Literal

from memos.api.handlers.formatters_handler import format_memory_item
from memos.api.handlers.formatters_handler import (
format_memory_item,
post_process_pref_mem,
post_process_textual_mem,
)
from memos.api.product_models import (
DeleteMemoryRequest,
DeleteMemoryResponse,
Expand Down Expand Up @@ -209,54 +213,67 @@ def handle_get_memory(memory_id: str, naive_mem_cube: NaiveMemCube) -> GetMemory
def handle_get_memories(
get_mem_req: GetMemoryRequest, naive_mem_cube: NaiveMemCube
) -> GetMemoryResponse:
# TODO: Implement get memory with filter
results: dict[str, Any] = {"text_mem": [], "pref_mem": [], "tool_mem": []}
memories = naive_mem_cube.text_mem.get_all(
user_name=get_mem_req.mem_cube_id,
user_id=get_mem_req.user_id,
page=get_mem_req.page,
page_size=get_mem_req.page_size,
)
total_nodes = memories["total_nodes"]
total_edges = memories["total_edges"]
del memories["total_nodes"]
del memories["total_edges"]
filter=get_mem_req.filter,
)["nodes"]

results = post_process_textual_mem(results, memories, get_mem_req.mem_cube_id)

if not get_mem_req.include_tool_memory:
results["tool_mem"] = []

preferences: list[TextualMemoryItem] = []
total_pref = 0

format_preferences = []
if get_mem_req.include_preference and naive_mem_cube.pref_mem is not None:
filter_params: dict[str, Any] = {}
if get_mem_req.user_id is not None:
filter_params["user_id"] = get_mem_req.user_id
if get_mem_req.mem_cube_id is not None:
filter_params["mem_cube_id"] = get_mem_req.mem_cube_id

preferences, total_pref = naive_mem_cube.pref_mem.get_memory_by_filter(
if get_mem_req.filter is not None:
# Check and remove user_id/mem_cube_id from filter if present
filter_copy = get_mem_req.filter.copy()
removed_fields = []

if "user_id" in filter_copy:
filter_copy.pop("user_id")
removed_fields.append("user_id")
if "mem_cube_id" in filter_copy:
filter_copy.pop("mem_cube_id")
removed_fields.append("mem_cube_id")

if removed_fields:
logger.warning(
f"Fields {removed_fields} found in filter will be ignored. "
f"Use request-level user_id/mem_cube_id parameters instead."
)

filter_params.update(filter_copy)

preferences, _ = naive_mem_cube.pref_mem.get_memory_by_filter(
filter_params, page=get_mem_req.page, page_size=get_mem_req.page_size
)
format_preferences = [format_memory_item(item) for item in preferences]
format_preferences = [format_memory_item(item, save_sources=False) for item in preferences]

return GetMemoryResponse(
message="Memories retrieved successfully",
data={
"text_mem": [
{
"cube_id": get_mem_req.mem_cube_id,
"memories": memories,
"total_nodes": total_nodes,
"total_edges": total_edges,
}
],
"pref_mem": [
{
"cube_id": get_mem_req.mem_cube_id,
"memories": format_preferences,
"total_nodes": total_pref,
}
],
},
results = post_process_pref_mem(
results, format_preferences, get_mem_req.mem_cube_id, get_mem_req.include_preference
)

# Filter to only keep text_mem, pref_mem, tool_mem
filtered_results = {
"text_mem": results.get("text_mem", []),
"pref_mem": results.get("pref_mem", []),
"tool_mem": results.get("tool_mem", []),
}

return GetMemoryResponse(message="Memories retrieved successfully", data=filtered_results)


def handle_delete_memories(delete_mem_req: DeleteMemoryRequest, naive_mem_cube: NaiveMemCube):
logger.info(
Expand Down
15 changes: 15 additions & 0 deletions src/memos/api/handlers/search_handler.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,9 +5,12 @@
using dependency injection for better modularity and testability.
"""

import time

from typing import Any

from memos.api.handlers.base_handler import BaseHandler, HandlerDependencies
from memos.api.handlers.formatters_handler import rerank_knowledge_mem
from memos.api.product_models import APISearchRequest, SearchResponse
from memos.log import get_logger
from memos.memories.textual.tree_text_memory.retrieve.retrieve_utils import (
Expand Down Expand Up @@ -69,6 +72,18 @@ def handle_search_memories(self, search_req: APISearchRequest) -> SearchResponse
# Restore original top_k for downstream logic or response metadata
search_req.top_k = original_top_k

start_time = time.time()
text_mem = results["text_mem"]
results["text_mem"] = rerank_knowledge_mem(
self.reranker,
query=search_req.query,
text_mem=text_mem,
top_k=original_top_k,
file_mem_proportion=0.5,
)
rerank_time = time.time() - start_time

self.logger.info(f"[Knowledge_replace_memory_time] Rerank time: {rerank_time} seconds")
self.logger.info(
f"[SearchHandler] Final search results: count={len(results)} results={results}"
)
Expand Down
4 changes: 3 additions & 1 deletion src/memos/api/product_models.py
Original file line number Diff line number Diff line change
Expand Up @@ -771,7 +771,9 @@ class GetMemoryRequest(BaseRequest):

mem_cube_id: str = Field(..., description="Cube ID")
user_id: str | None = Field(None, description="User ID")
include_preference: bool = Field(True, description="Whether to handle preference memory")
include_preference: bool = Field(True, description="Whether to return preference memory")
include_tool_memory: bool = Field(False, description="Whether to return tool memory")
filter: dict[str, Any] | None = Field(None, description="Filter for the memory")
page: int | None = Field(
None,
description="Page number (starts from 1). If None, exports all data without pagination.",
Expand Down
Loading