Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
37 commits
Select commit Hold shift + click to select a range
dbac035
feat: skill memory
Jan 23, 2026
36f626a
feat: split task chunks for skill memories
endxxxx Jan 23, 2026
ec9316d
fix: refine the returned format from llm and parsing
endxxxx Jan 23, 2026
7702670
Merge pull request #4 from Wang-Daoji/feat/split_chunks
Wang-Daoji Jan 24, 2026
0d33b1d
feat: add new pack oss
Jan 24, 2026
3a88419
feat: skill mem pipeline
Jan 25, 2026
2903152
feat: fill code
Jan 26, 2026
bd119d6
feat: modify code
Jan 26, 2026
4173f7b
feat: modify code
Jan 26, 2026
bccba71
feat: async add skill memory
Jan 27, 2026
14f85e0
feat: update ollama version
Jan 27, 2026
b3c79ac
feat: get memory return skill memory
Jan 27, 2026
76f1975
feat: get api add skill mem
Jan 27, 2026
687cf9d
feat: get api add skill mem
Jan 27, 2026
8555b1d
feat: modify env config
Jan 27, 2026
ae67378
feat: back set oss client
Jan 27, 2026
793b508
feat: delete tmp skill code
Jan 27, 2026
1824f5b
feat: merge main
Jan 27, 2026
e3ef4cc
feat: process new package import error
Jan 27, 2026
f14fd58
Merge remote-tracking branch 'upstream/dev-20260126-v2.0.4' into feat…
Jan 27, 2026
6ba55d3
feat: modify oss config
Jan 27, 2026
85e42d9
feat: modiy prompt and add two api
Jan 28, 2026
be17f3f
feat: merge dev-20260126-v2.0.4
Jan 28, 2026
962f804
feat: modify prompt
Jan 28, 2026
aeeb27e
feat: merge
Jan 28, 2026
bbb6e79
feat: modify code
Jan 28, 2026
a32ec5f
Merge remote-tracking branch 'upstream/dev-20260126-v2.0.4' into feat…
Jan 28, 2026
dcfb772
feat: add logger
Jan 29, 2026
bc40783
Merge remote-tracking branch 'upstream/dev-20260126-v2.0.4' into feat…
Jan 29, 2026
b0946f1
feat: fix bug in memory id
Jan 29, 2026
0269c12
Merge remote-tracking branch 'upstream/dev-20260126-v2.0.4' into feat…
Jan 29, 2026
0026443
Merge branch 'main' into feat/skill_memory
Feb 2, 2026
97c5956
Merge remote-tracking branch 'upstream/dev-20260202-v2.0.5' into feat…
Feb 5, 2026
a4d8a43
feat: new code
Feb 5, 2026
f97b7e2
fix: fix name error in polardb and related code
Feb 6, 2026
fc10547
fix: bug in polardb
Feb 6, 2026
66116d8
Merge remote-tracking branch 'upstream/dev-20260202-v2.0.5' into feat…
Feb 6, 2026
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
74 changes: 74 additions & 0 deletions src/memos/api/handlers/memory_handler.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@
from memos.api.product_models import (
DeleteMemoryRequest,
DeleteMemoryResponse,
GetMemoryDashboardRequest,
GetMemoryRequest,
GetMemoryResponse,
MemoryResponse,
Expand Down Expand Up @@ -353,3 +354,76 @@ def handle_delete_memories(delete_mem_req: DeleteMemoryRequest, naive_mem_cube:
message="Memories deleted successfully",
data={"status": "success"},
)


# =============================================================================
# Other handler functions Endpoints (for internal use)
# =============================================================================


def handle_get_memories_dashboard(
get_mem_req: GetMemoryDashboardRequest, naive_mem_cube: NaiveMemCube
) -> GetMemoryResponse:
results: dict[str, Any] = {"text_mem": [], "pref_mem": [], "tool_mem": [], "skill_mem": []}
memories = naive_mem_cube.text_mem.get_all(
user_name=get_mem_req.mem_cube_id,
user_id=get_mem_req.user_id,
page=get_mem_req.page,
page_size=get_mem_req.page_size,
filter=get_mem_req.filter,
)["nodes"]

results = post_process_textual_mem(results, memories, get_mem_req.mem_cube_id)

if not get_mem_req.include_tool_memory:
results["tool_mem"] = []
if not get_mem_req.include_skill_memory:
results["skill_mem"] = []

preferences: list[TextualMemoryItem] = []

format_preferences = []
if get_mem_req.include_preference and naive_mem_cube.pref_mem is not None:
filter_params: dict[str, Any] = {}
if get_mem_req.user_id is not None:
filter_params["user_id"] = get_mem_req.user_id
if get_mem_req.mem_cube_id is not None:
filter_params["mem_cube_id"] = get_mem_req.mem_cube_id
if get_mem_req.filter is not None:
# Check and remove user_id/mem_cube_id from filter if present
filter_copy = get_mem_req.filter.copy()
removed_fields = []

if "user_id" in filter_copy:
filter_copy.pop("user_id")
removed_fields.append("user_id")
if "mem_cube_id" in filter_copy:
filter_copy.pop("mem_cube_id")
removed_fields.append("mem_cube_id")

if removed_fields:
logger.warning(
f"Fields {removed_fields} found in filter will be ignored. "
f"Use request-level user_id/mem_cube_id parameters instead."
)

filter_params.update(filter_copy)

preferences, _ = naive_mem_cube.pref_mem.get_memory_by_filter(
filter_params, page=get_mem_req.page, page_size=get_mem_req.page_size
)
format_preferences = [format_memory_item(item, save_sources=False) for item in preferences]

results = post_process_pref_mem(
results, format_preferences, get_mem_req.mem_cube_id, get_mem_req.include_preference
)

# Filter to only keep text_mem, pref_mem, tool_mem
filtered_results = {
"text_mem": results.get("text_mem", []),
"pref_mem": results.get("pref_mem", []),
"tool_mem": results.get("tool_mem", []),
"skill_mem": results.get("skill_mem", []),
}

return GetMemoryResponse(message="Memories retrieved successfully", data=filtered_results)
8 changes: 8 additions & 0 deletions src/memos/api/product_models.py
Original file line number Diff line number Diff line change
Expand Up @@ -507,6 +507,8 @@ class APIADDRequest(BaseRequest):
description="Session ID. If not provided, a default session will be used.",
)
task_id: str | None = Field(None, description="Task ID for monitering async tasks")
manager_user_id: str | None = Field(None, description="Manager User ID")
project_id: str | None = Field(None, description="Project ID")

# ==== Multi-cube writing ====
writable_cube_ids: list[str] | None = Field(
Expand Down Expand Up @@ -814,6 +816,12 @@ class GetMemoryRequest(BaseRequest):
)


class GetMemoryDashboardRequest(GetMemoryRequest):
"""Request model for getting memories for dashboard."""

mem_cube_id: str | None = Field(None, description="Cube ID")


class DeleteMemoryRequest(BaseRequest):
"""Request model for deleting memories."""

Expand Down
11 changes: 11 additions & 0 deletions src/memos/api/routers/server_router.py
Original file line number Diff line number Diff line change
Expand Up @@ -38,6 +38,7 @@
DeleteMemoryResponse,
ExistMemCubeIdRequest,
ExistMemCubeIdResponse,
GetMemoryDashboardRequest,
GetMemoryPlaygroundRequest,
GetMemoryRequest,
GetMemoryResponse,
Expand Down Expand Up @@ -456,3 +457,13 @@ def recover_memory_by_record_id(memory_req: RecoverMemoryByRecordIdRequest):
message="Called Successfully",
data={"status": "success"},
)


@router.post(
"/get_memory_dashboard", summary="Get memories for dashboard", response_model=GetMemoryResponse
)
def get_memories_dashboard(memory_req: GetMemoryDashboardRequest):
return handlers.memory_handler.handle_get_memories_dashboard(
get_mem_req=memory_req,
naive_mem_cube=naive_mem_cube,
)
20 changes: 14 additions & 6 deletions src/memos/graph_dbs/polardb.py
Original file line number Diff line number Diff line change
Expand Up @@ -1691,7 +1691,7 @@ def get_context_chain(self, id: str, type: str = "FOLLOWS") -> list[str]:
raise NotImplementedError

@timed
def seach_by_keywords_like(
def search_by_keywords_like(
self,
query_word: str,
scope: str | None = None,
Expand Down Expand Up @@ -1761,7 +1761,7 @@ def seach_by_keywords_like(

params = (query_word,)
logger.info(
f"[seach_by_keywords_LIKE start:] user_name: {user_name}, query: {query}, params: {params}"
f"[search_by_keywords_LIKE start:] user_name: {user_name}, query: {query}, params: {params}"
)
conn = None
try:
Expand All @@ -1773,16 +1773,18 @@ def seach_by_keywords_like(
for row in results:
oldid = row[0]
id_val = str(oldid)
if id_val.startswith('"') and id_val.endswith('"'):
id_val = id_val[1:-1]
output.append({"id": id_val})
logger.info(
f"[seach_by_keywords_LIKE end:] user_name: {user_name}, query: {query}, params: {params} recalled: {output}"
f"[search_by_keywords_LIKE end:] user_name: {user_name}, query: {query}, params: {params} recalled: {output}"
)
return output
finally:
self._return_connection(conn)

@timed
def seach_by_keywords_tfidf(
def search_by_keywords_tfidf(
self,
query_words: list[str],
scope: str | None = None,
Expand Down Expand Up @@ -1858,7 +1860,7 @@ def seach_by_keywords_tfidf(

params = (tsquery_string,)
logger.info(
f"[seach_by_keywords_TFIDF start:] user_name: {user_name}, query: {query}, params: {params}"
f"[search_by_keywords_TFIDF start:] user_name: {user_name}, query: {query}, params: {params}"
)
conn = None
try:
Expand All @@ -1870,10 +1872,12 @@ def seach_by_keywords_tfidf(
for row in results:
oldid = row[0]
id_val = str(oldid)
if id_val.startswith('"') and id_val.endswith('"'):
id_val = id_val[1:-1]
output.append({"id": id_val})

logger.info(
f"[seach_by_keywords_TFIDF end:] user_name: {user_name}, query: {query}, params: {params} recalled: {output}"
f"[search_by_keywords_TFIDF end:] user_name: {user_name}, query: {query}, params: {params} recalled: {output}"
)
return output
finally:
Expand Down Expand Up @@ -2003,6 +2007,8 @@ def search_by_fulltext(
rank = row[2] # rank score

id_val = str(oldid)
if id_val.startswith('"') and id_val.endswith('"'):
id_val = id_val[1:-1]
score_val = float(rank)

# Apply threshold filter if specified
Expand Down Expand Up @@ -2167,6 +2173,8 @@ def search_by_embedding(
oldid = row[3] # old_id
score = row[4] # scope
id_val = str(oldid)
if id_val.startswith('"') and id_val.endswith('"'):
id_val = id_val[1:-1]
score_val = float(score)
score_val = (score_val + 1) / 2 # align to neo4j, Normalized Cosine Score
if threshold is None or score_val >= threshold:
Expand Down
4 changes: 2 additions & 2 deletions src/memos/mem_feedback/feedback.py
Original file line number Diff line number Diff line change
Expand Up @@ -924,15 +924,15 @@ def process_keyword_replace(
)

must_part = f"{' & '.join(queries)}" if len(queries) > 1 else queries[0]
retrieved_ids = self.graph_store.seach_by_keywords_tfidf(
retrieved_ids = self.graph_store.search_by_keywords_tfidf(
[must_part], user_name=user_name, filter=filter_dict
)
if len(retrieved_ids) < 1:
retrieved_ids = self.graph_store.search_by_fulltext(
queries, top_k=100, user_name=user_name, filter=filter_dict
)
else:
retrieved_ids = self.graph_store.seach_by_keywords_like(
retrieved_ids = self.graph_store.search_by_keywords_like(
f"%{original_word}%", user_name=user_name, filter=filter_dict
)

Expand Down
19 changes: 18 additions & 1 deletion src/memos/mem_reader/multi_modal_struct.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
import re
import traceback

from typing import Any
from typing import TYPE_CHECKING, Any

from memos import log
from memos.configs.mem_reader import MultiModalStructMemReaderConfig
Expand All @@ -20,6 +20,10 @@
from memos.utils import timed


if TYPE_CHECKING:
from memos.types.general_types import UserContext


logger = log.get_logger(__name__)


Expand Down Expand Up @@ -667,6 +671,12 @@ def _process_one_item(fast_item: TextualMemoryItem) -> list[TextualMemoryItem]:
if file_ids:
extra_kwargs["file_ids"] = file_ids

# Extract manager_user_id and project_id from user_context
user_context: UserContext | None = kwargs.get("user_context")
if user_context:
extra_kwargs["manager_user_id"] = user_context.manager_user_id
extra_kwargs["project_id"] = user_context.project_id

# Determine prompt type based on sources
prompt_type = self._determine_prompt_type(sources)

Expand Down Expand Up @@ -782,6 +792,11 @@ def _process_tool_trajectory_fine(

fine_memory_items = []

# Extract manager_user_id and project_id from user_context
user_context: UserContext | None = kwargs.get("user_context")
manager_user_id = user_context.manager_user_id if user_context else None
project_id = user_context.project_id if user_context else None

for fast_item in fast_memory_items:
# Extract memory text (string content)
mem_str = fast_item.memory or ""
Expand All @@ -808,6 +823,8 @@ def _process_tool_trajectory_fine(
correctness=m.get("correctness", ""),
experience=m.get("experience", ""),
tool_used_status=m.get("tool_used_status", []),
manager_user_id=manager_user_id,
project_id=project_id,
)
fine_memory_items.append(node)
except Exception as e:
Expand Down
13 changes: 12 additions & 1 deletion src/memos/mem_reader/read_multi_modal/assistant_parser.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@

import json

from typing import Any
from typing import TYPE_CHECKING, Any

from memos.embedders.base import BaseEmbedder
from memos.llms.base import BaseLLM
Expand All @@ -18,6 +18,10 @@
from .utils import detect_lang


if TYPE_CHECKING:
from memos.types.general_types import UserContext


logger = get_logger(__name__)


Expand Down Expand Up @@ -281,6 +285,11 @@ def parse_fast(
user_id = info_.pop("user_id", "")
session_id = info_.pop("session_id", "")

# Extract manager_user_id and project_id from user_context
user_context: UserContext | None = kwargs.get("user_context")
manager_user_id = user_context.manager_user_id if user_context else None
project_id = user_context.project_id if user_context else None

# Create memory item (equivalent to _make_memory_item)
memory_item = TextualMemoryItem(
memory=line,
Expand All @@ -298,6 +307,8 @@ def parse_fast(
confidence=0.99,
type="fact",
info=info_,
manager_user_id=manager_user_id,
project_id=project_id,
),
)

Expand Down
23 changes: 22 additions & 1 deletion src/memos/mem_reader/read_multi_modal/file_content_parser.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@
import re
import tempfile

from typing import Any
from typing import TYPE_CHECKING, Any

from tqdm import tqdm

Expand Down Expand Up @@ -34,6 +34,10 @@
from memos.types.openai_chat_completion_types import File


if TYPE_CHECKING:
from memos.types.general_types import UserContext


logger = get_logger(__name__)

# Prompt dictionary for doc processing (shared by simple_struct and file_content_parser)
Expand Down Expand Up @@ -451,6 +455,11 @@ def parse_fast(
user_id = info_.pop("user_id", "")
session_id = info_.pop("session_id", "")

# Extract manager_user_id and project_id from user_context
user_context: UserContext | None = kwargs.get("user_context")
manager_user_id = user_context.manager_user_id if user_context else None
project_id = user_context.project_id if user_context else None

# For file content parts, default to LongTermMemory
# (since we don't have role information at this level)
memory_type = "LongTermMemory"
Expand Down Expand Up @@ -495,6 +504,8 @@ def parse_fast(
type="fact",
info=info_,
file_ids=file_ids,
manager_user_id=manager_user_id,
project_id=project_id,
),
)
memory_items.append(memory_item)
Expand Down Expand Up @@ -527,6 +538,8 @@ def parse_fast(
type="fact",
info=info_,
file_ids=file_ids,
manager_user_id=manager_user_id,
project_id=project_id,
),
)
memory_items.append(memory_item)
Expand Down Expand Up @@ -644,6 +657,12 @@ def parse_fine(
info_ = info.copy()
user_id = info_.pop("user_id", "")
session_id = info_.pop("session_id", "")

# Extract manager_user_id and project_id from user_context
user_context: UserContext | None = kwargs.get("user_context")
manager_user_id = user_context.manager_user_id if user_context else None
project_id = user_context.project_id if user_context else None

if file_id:
info_["file_id"] = file_id
file_ids = [file_id] if file_id else []
Expand Down Expand Up @@ -702,6 +721,8 @@ def _make_memory_item(
type="fact",
info=info_,
file_ids=file_ids,
manager_user_id=manager_user_id,
project_id=project_id,
),
)

Expand Down
Loading