From 4f37c361b37d458dc63520048b9b8b9a3aedfda0 Mon Sep 17 00:00:00 2001 From: CaralHsi Date: Tue, 20 Jan 2026 11:29:51 +0800 Subject: [PATCH 1/5] feat: refactor & reorganize examples with unified structure and updated demos (#903) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * fix bugs: try to fix bugs in _submit_web_logs * fix bugs: try to address bugs * fix bugs * refactor: modify examples * revise add operation and fix an unbelievable bug * address the bug issues * the doc file has a format problem which has been fixed in this commit * add a range of new feats for the add operation * address the incompatible issue of local scheduler * feat(scheduler): optimize redis queue consumer group management - Proactively ensure consumer groups exist in '_refresh_stream_keys' for newly discovered streams. - Remove redundant consumer group checks in '_read_new_messages_batch' to improve read performance. - Clean up 'seen_streams' cache when streams are deleted to ensure correct group recreation. - This change reduces unnecessary Redis calls during high-frequency polling. * fix(tests): resolve AttributeError in SimpleStructMemReader tests - Import 'parse_json_result' from 'memos.mem_reader.utils' instead of accessing it as an instance attribute. - Fixes 'AttributeError: 'SimpleStructMemReader' object has no attribute 'parse_json_result'' in 'test_parse_json_result_success' and 'test_parse_json_result_failure'. - Remove incorrect mock assignment of 'parse_json_result' in 'test_process_chat_data'. * fix(mem_reader): pass info dict to add_before_search for correct user_id usage - Update 'add_before_search' signature in 'SimpleStructMemReader' to accept 'info' dict. - Pass 'info' (containing 'user_id' and 'session_id') to 'self.searcher.search' instead of using empty strings. - Add 'test_add_before_search' to 'TestSimpleStructMemReader' to verify the fix and ensure 'searcher.search' receives the correct 'info'. - This ensures that memory searches are scoped to the correct user and session. * refactor add_before_search from mem_reader to SingleCubeView * address bugs * fix: fix the qsize bug of task queue, and accept change from hotfix/scheduler * fix: address some issues to run old scheduler example and kv cache example * fix: address the issue of Top-level import of unavailable module 'torch' * fix: resolve linting errors and make optional dependencies lazy loaded - Fix ambiguous characters and commented-out code in examples/mem_scheduler/quick_start_examples.py - Fix nested if statements in src/memos/mem_os/core.py - Move torch and transformers imports to method scope in src/memos/llms/hf.py to support optional dependencies - Update tests/llms/test_hf.py to patch transformers module directly * refactor: revise the rewrite prompt to make it better * refactor: update examples * refactor: update examples for scheduler * fix bugs: address the unsupported xautoclaim command when redis version larger than 6.2.0 via adding a new feature of manul auto claim with the combination of xpending + xclaim * refactor: review settings * refactor: adjust examples to make it run better for code debugging * refactor: review slow add apis to get a better performance on Halumen * fix bugs: address the issue when set user_redis_queue to false, the status_tracker is still using * refactor: allow the code to run without rabbitmq * refactor: create a _parse_pending_entry for redis queue * refactor: add a try/catch for status_tracker * fix: revise examples of textual memories to run normally * feat: improve MemCube examples with View architecture (#859) * feat: improve MemCube examples with View architecture - Add load_cube.py: demonstrate loading MemCube and using SingleCubeView - Add dump_cube.py: demonstrate persisting MemCube to disk - Deprecate old examples: move load_from_folder.py, load_from_remote.py, load_lazily.py to _deprecated/ - Add _deprecated/README.md with migration guide - All code in English with ruff check and format passed - Use 80-char separator lines consistent with server_router_api.py style * feat: improve MemCube examples with SingleCubeView - dump_cube.py: export cube data via graph_store.export_graph() - load_cube.py: import and display graph data - Add example data in examples/data/mem_cube_tree/ --------- Co-authored-by: glin1993@outlook.com <> * fix:examples update for core memories (#863) * fix bugs: try to fix bugs in _submit_web_logs * fix bugs: try to address bugs * fix bugs * refactor: modify examples * revise add operation and fix an unbelievable bug * address the bug issues * the doc file has a format problem which has been fixed in this commit * add a range of new feats for the add operation * address the incompatible issue of local scheduler * feat(scheduler): optimize redis queue consumer group management - Proactively ensure consumer groups exist in '_refresh_stream_keys' for newly discovered streams. - Remove redundant consumer group checks in '_read_new_messages_batch' to improve read performance. - Clean up 'seen_streams' cache when streams are deleted to ensure correct group recreation. - This change reduces unnecessary Redis calls during high-frequency polling. * fix(tests): resolve AttributeError in SimpleStructMemReader tests - Import 'parse_json_result' from 'memos.mem_reader.utils' instead of accessing it as an instance attribute. - Fixes 'AttributeError: 'SimpleStructMemReader' object has no attribute 'parse_json_result'' in 'test_parse_json_result_success' and 'test_parse_json_result_failure'. - Remove incorrect mock assignment of 'parse_json_result' in 'test_process_chat_data'. * fix(mem_reader): pass info dict to add_before_search for correct user_id usage - Update 'add_before_search' signature in 'SimpleStructMemReader' to accept 'info' dict. - Pass 'info' (containing 'user_id' and 'session_id') to 'self.searcher.search' instead of using empty strings. - Add 'test_add_before_search' to 'TestSimpleStructMemReader' to verify the fix and ensure 'searcher.search' receives the correct 'info'. - This ensures that memory searches are scoped to the correct user and session. * refactor add_before_search from mem_reader to SingleCubeView * address bugs * fix: fix the qsize bug of task queue, and accept change from hotfix/scheduler * fix: address some issues to run old scheduler example and kv cache example * fix: address the issue of Top-level import of unavailable module 'torch' * fix: resolve linting errors and make optional dependencies lazy loaded - Fix ambiguous characters and commented-out code in examples/mem_scheduler/quick_start_examples.py - Fix nested if statements in src/memos/mem_os/core.py - Move torch and transformers imports to method scope in src/memos/llms/hf.py to support optional dependencies - Update tests/llms/test_hf.py to patch transformers module directly * refactor: revise the rewrite prompt to make it better * refactor: update examples * refactor: update examples for scheduler * fix bugs: address the unsupported xautoclaim command when redis version larger than 6.2.0 via adding a new feature of manul auto claim with the combination of xpending + xclaim * refactor: review settings * refactor: adjust examples to make it run better for code debugging * refactor: review slow add apis to get a better performance on Halumen * fix bugs: address the issue when set user_redis_queue to false, the status_tracker is still using * refactor: allow the code to run without rabbitmq * refactor: create a _parse_pending_entry for redis queue * refactor: add a try/catch for status_tracker * fix: revise examples of textual memories to run normally * fix:address the issues in existing examples of core memories * refactor: remove deprecated scheduler examples, and revise half examples * refactor: address all issues in examples * fix:feedback mute pref (#876) * feedback mute pref * mute pref feedback --------- Co-authored-by: 黑布林 <11641432+heiheiyouyou@user.noreply.gitee.com> * examples update from tree_textual_memories and preference_textual_mem… (#872) examples update from tree_textual_memories and preference_textual_memories * feat(examples): update basic modules examples and configs (#856) * feat(examples): update basic modules examples and configs * feat: update MCP examples and internet search * docs: api reference --------- Co-authored-by: nova Co-authored-by: CaralHsi * Examples update (#887) * neo4j_example.py * neo4j_example.py * neo4j_example.py * Examples memfeedback (#885) * update example of feedback * update example of mem_feedback --------- Co-authored-by: 周函数 * fix:examples update for mem_reader (#866) * fix:examples update for mem_reader * update readme of the examples of mem_reader --------- Co-authored-by: 周函数 * update example of mem_chat (#892) Co-authored-by: 周函数 * fix: example config bug * docs: Delete deprecated examples. * docs: Adding clear instructions as comments for examples in basic_modules. * fix: fixing bugs relevant to examples in basic_module. * doc: Adding clear instructions as comments for examples in mem_scheduler. * fix: Fix example bug caused by using the default SchedulerLocalQueue(without .show_task_status method). * revert: "doc: Examples update and related minor bug fix" (#899) Revert "doc: Examples update and related minor bug fix" * doc: Examples update and related minor bug fix (#900) Revert "revert: "doc: Examples update and related minor bug fix" (#899)" The test failed because of other reasons than commits in this pr, so we revert this revert. This reverts commit d0f3bab577786252edc206f17707487d05e0b898. Co-authored-by: CaralHsi * feat: delete useless example (#904) * fix: fix api examples and add search/chat examples. (#906) * feat: delete useless file; fix: chat model list bug (#907) * feat: delete useless example * fix: chat_model_list bug * feat: delete useless file * feat: delete useless test file (#908) * feat: delete useless example * fix: chat_model_list bug * feat: delete useless file * feat: delete useless test * feat: add mem_cube_2 because tests need them (#909) * feat: delete useless example * fix: chat_model_list bug * feat: delete useless file * feat: delete useless test * feat: add mem_cube_2 because tests need them * feat: add back mem_cube because tests use them (#910) * feat: delete useless example * fix: chat_model_list bug * feat: delete useless file * feat: delete useless test * feat: add mem_cube_2 because tests need them * feat: add mem_cube_2 because tests need them --------- Co-authored-by: chentang Co-authored-by: Zehao Lin Co-authored-by: Dubberman <48425266+whipser030@users.noreply.github.com> Co-authored-by: 黑布林 <11641432+heiheiyouyou@user.noreply.gitee.com> Co-authored-by: Wenqiang Wei <46308778+endxxxx@users.noreply.github.com> Co-authored-by: novaleolin <15062791135@163.com> Co-authored-by: nova Co-authored-by: caocuilong <39516215+caocuilong@users.noreply.github.com> Co-authored-by: HanshuZhou <118414084+zhouhanshu@users.noreply.github.com> Co-authored-by: 周函数 Co-authored-by: Wenqiang Wei Co-authored-by: bittergreen --- docs/openapi.json | 3596 +++++++++++--- examples/api/product_api.py | 170 - examples/api/server_router_api.py | 276 +- examples/basic_modules/embedder.py | 14 +- examples/basic_modules/llm.py | 8 + examples/basic_modules/nebular_example.py | 361 -- examples/basic_modules/neo4j_example.py | 73 + examples/basic_modules/parser.py | 15 - examples/basic_modules/qdrant_vec_db.py | 143 - examples/basic_modules/reranker.py | 17 + .../textual_memory_internet_search_example.py | 307 +- .../tree_textual_memory_reasoner.py | 167 - .../tree_textual_memory_recall.py | 203 +- ...textual_memory_relation_reason_detector.py | 272 +- .../tree_textual_memory_task_goal_parser.py | 205 +- .../core_memories/general_textual_memory.py | 61 +- examples/core_memories/kv_cache_memory.py | 186 +- .../core_memories/naive_textual_memory.py | 79 +- ...ternet_memoy.py => pref_textual_memory.py} | 111 +- examples/core_memories/tree_textual_memory.py | 28 + .../core_memories/vllm_kv_cache_memory.py | 2 +- examples/data/config/preference_config.json | 53 + .../config/simple_struct_reader_config.json | 22 +- .../config/tree_config_shared_database.json | 59 +- .../data/mem_cube_tree/textual_memory.json | 4219 +++++++++++++++++ examples/data/mem_scheduler/scene_data.json | 118 - examples/mem_api/pipeline_test.py | 178 - ...t_w_generated_cube_explicit_memory_only.py | 231 +- examples/mem_chat/simple_mem_chat.py | 33 - examples/mem_chunk/markdown_chunk.py | 33 - examples/mem_cube/_deprecated/README.md | 81 + .../{ => _deprecated}/load_from_folder.py | 0 .../{ => _deprecated}/load_from_remote.py | 0 .../mem_cube/{ => _deprecated}/load_lazily.py | 0 examples/mem_cube/dump_cube.py | 137 + examples/mem_cube/load_cube.py | 137 + examples/mem_feedback/example_feedback.py | 248 + examples/mem_mcp/simple_fastmcp_client.py | 69 +- examples/mem_mcp/simple_fastmcp_serve.py | 77 +- .../chat_w_generated_cube_explicit_memory.py | 180 - examples/mem_os/chat_w_scheduler.py | 74 - examples/mem_os/cot_usage_example.py | 342 -- examples/mem_os/easy_memos.py | 21 - .../mem_os/locomo_shared_database_memos.py | 203 - examples/mem_os/multi_user_memos_example.py | 162 - examples/mem_os/naive_memos.py | 30 - examples/mem_os/persistent_memos_example.py | 192 - examples/mem_os/simple_memos.py | 42 - examples/mem_os/simple_openapi_memos.py | 300 -- .../simple_openapi_memos_neo4j_community.py | 315 -- examples/mem_os/simple_prefs_memos_product.py | 399 -- examples/mem_os/simple_treekvcache_memos.py | 46 - examples/mem_os/simple_vllm_memos.py | 61 - examples/mem_reader/README.md | 90 + examples/mem_reader/builders.py | 78 + .../compare_simple_vs_multimodal.py | 461 -- .../mem_reader/multimodal_struct_reader.py | 1042 ---- examples/mem_reader/parser/__init__.py | 1 - examples/mem_reader/parser/config_utils.py | 132 - .../parser/example_assistant_parser.py | 94 - .../parser/example_file_content_parser.py | 132 - .../parser/example_multi_modal_parser.py | 400 -- .../parser/example_string_parser.py | 66 - .../parser/example_system_parser.py | 158 - .../parser/example_text_content_parser.py | 72 - .../mem_reader/parser/example_tool_parser.py | 101 - .../mem_reader/parser/example_user_parser.py | 135 - examples/mem_reader/parser/print_utils.py | 11 - examples/mem_reader/parser_demos/__init__.py | 0 examples/mem_reader/parser_demos/_base.py | 76 + .../mem_reader/parser_demos/demo_assistant.py | 44 + .../parser_demos/demo_file_content.py | 68 + .../mem_reader/parser_demos/demo_image.py | 87 + .../parser_demos/demo_multi_modal.py | 346 ++ .../mem_reader/parser_demos/demo_string.py | 30 + .../mem_reader/parser_demos/demo_system.py | 41 + .../parser_demos/demo_text_content.py | 40 + examples/mem_reader/parser_demos/demo_tool.py | 28 + examples/mem_reader/parser_demos/demo_user.py | 43 + examples/mem_reader/runners/__init__.py | 0 examples/mem_reader/runners/run_multimodal.py | 139 + examples/mem_reader/runners/run_simple.py | 113 + examples/mem_reader/samples.py | 773 +++ examples/mem_reader/settings.py | 107 + examples/mem_reader/simple_struct_reader.py | 831 ---- examples/mem_reader/utils.py | 84 + examples/mem_scheduler/api_w_scheduler.py | 163 +- .../mem_scheduler/debug_text_mem_replace.py | 109 - examples/mem_scheduler/memos_w_scheduler.py | 265 +- .../memos_w_scheduler_for_test.py | 88 - .../mem_scheduler/quick_start_examples.py | 312 -- examples/mem_scheduler/rabbitmq_example.py | 60 - examples/mem_scheduler/redis_example.py | 12 +- ..._for_async_tasks.py => run_async_tasks.py} | 9 + examples/mem_scheduler/show_redis_status.py | 52 +- examples/mem_scheduler/task_fair_schedule.py | 87 - .../mem_user/user_manager_factory_example.py | 111 - src/memos/api/config.py | 2 +- src/memos/api/handlers/config_builders.py | 2 +- src/memos/api/mcp_serve.py | 80 +- src/memos/api/start_api.py | 2 +- src/memos/cli.py | 2 +- src/memos/graph_dbs/neo4j.py | 2 +- src/memos/llms/hf.py | 2 +- src/memos/mem_feedback/feedback.py | 14 +- src/memos/mem_feedback/simple_feedback.py | 2 + .../init_components_for_scheduler.py | 2 +- .../task_schedule_modules/redis_queue.py | 35 +- tests/test_openapi_sync.py | 79 - 109 files changed, 11885 insertions(+), 9786 deletions(-) delete mode 100644 examples/api/product_api.py delete mode 100644 examples/basic_modules/nebular_example.py delete mode 100644 examples/basic_modules/parser.py delete mode 100644 examples/basic_modules/qdrant_vec_db.py delete mode 100644 examples/basic_modules/tree_textual_memory_reasoner.py rename examples/core_memories/{textual_internet_memoy.py => pref_textual_memory.py} (76%) create mode 100644 examples/data/config/preference_config.json create mode 100644 examples/data/mem_cube_tree/textual_memory.json delete mode 100644 examples/data/mem_scheduler/scene_data.json delete mode 100644 examples/mem_api/pipeline_test.py delete mode 100644 examples/mem_chat/simple_mem_chat.py delete mode 100644 examples/mem_chunk/markdown_chunk.py create mode 100644 examples/mem_cube/_deprecated/README.md rename examples/mem_cube/{ => _deprecated}/load_from_folder.py (100%) rename examples/mem_cube/{ => _deprecated}/load_from_remote.py (100%) rename examples/mem_cube/{ => _deprecated}/load_lazily.py (100%) create mode 100644 examples/mem_cube/dump_cube.py create mode 100644 examples/mem_cube/load_cube.py create mode 100644 examples/mem_feedback/example_feedback.py delete mode 100644 examples/mem_os/chat_w_generated_cube_explicit_memory.py delete mode 100644 examples/mem_os/chat_w_scheduler.py delete mode 100644 examples/mem_os/cot_usage_example.py delete mode 100644 examples/mem_os/easy_memos.py delete mode 100644 examples/mem_os/locomo_shared_database_memos.py delete mode 100644 examples/mem_os/multi_user_memos_example.py delete mode 100644 examples/mem_os/naive_memos.py delete mode 100644 examples/mem_os/persistent_memos_example.py delete mode 100644 examples/mem_os/simple_memos.py delete mode 100644 examples/mem_os/simple_openapi_memos.py delete mode 100644 examples/mem_os/simple_openapi_memos_neo4j_community.py delete mode 100644 examples/mem_os/simple_prefs_memos_product.py delete mode 100644 examples/mem_os/simple_treekvcache_memos.py delete mode 100644 examples/mem_os/simple_vllm_memos.py create mode 100644 examples/mem_reader/README.md create mode 100644 examples/mem_reader/builders.py delete mode 100644 examples/mem_reader/compare_simple_vs_multimodal.py delete mode 100644 examples/mem_reader/multimodal_struct_reader.py delete mode 100644 examples/mem_reader/parser/__init__.py delete mode 100644 examples/mem_reader/parser/config_utils.py delete mode 100644 examples/mem_reader/parser/example_assistant_parser.py delete mode 100644 examples/mem_reader/parser/example_file_content_parser.py delete mode 100644 examples/mem_reader/parser/example_multi_modal_parser.py delete mode 100644 examples/mem_reader/parser/example_string_parser.py delete mode 100644 examples/mem_reader/parser/example_system_parser.py delete mode 100644 examples/mem_reader/parser/example_text_content_parser.py delete mode 100644 examples/mem_reader/parser/example_tool_parser.py delete mode 100644 examples/mem_reader/parser/example_user_parser.py delete mode 100644 examples/mem_reader/parser/print_utils.py create mode 100644 examples/mem_reader/parser_demos/__init__.py create mode 100644 examples/mem_reader/parser_demos/_base.py create mode 100644 examples/mem_reader/parser_demos/demo_assistant.py create mode 100644 examples/mem_reader/parser_demos/demo_file_content.py create mode 100644 examples/mem_reader/parser_demos/demo_image.py create mode 100644 examples/mem_reader/parser_demos/demo_multi_modal.py create mode 100644 examples/mem_reader/parser_demos/demo_string.py create mode 100644 examples/mem_reader/parser_demos/demo_system.py create mode 100644 examples/mem_reader/parser_demos/demo_text_content.py create mode 100644 examples/mem_reader/parser_demos/demo_tool.py create mode 100644 examples/mem_reader/parser_demos/demo_user.py create mode 100644 examples/mem_reader/runners/__init__.py create mode 100644 examples/mem_reader/runners/run_multimodal.py create mode 100644 examples/mem_reader/runners/run_simple.py create mode 100644 examples/mem_reader/samples.py create mode 100644 examples/mem_reader/settings.py delete mode 100644 examples/mem_reader/simple_struct_reader.py create mode 100644 examples/mem_reader/utils.py delete mode 100644 examples/mem_scheduler/debug_text_mem_replace.py delete mode 100644 examples/mem_scheduler/memos_w_scheduler_for_test.py delete mode 100644 examples/mem_scheduler/quick_start_examples.py delete mode 100644 examples/mem_scheduler/rabbitmq_example.py rename examples/mem_scheduler/{scheduler_for_async_tasks.py => run_async_tasks.py} (90%) delete mode 100644 examples/mem_scheduler/task_fair_schedule.py delete mode 100644 examples/mem_user/user_manager_factory_example.py delete mode 100644 tests/test_openapi_sync.py diff --git a/docs/openapi.json b/docs/openapi.json index 46d715147..d9ef710b5 100644 --- a/docs/openapi.json +++ b/docs/openapi.json @@ -1,21 +1,24 @@ { "openapi": "3.1.0", "info": { - "title": "MemOS REST APIs", - "description": "A REST API for managing and searching memories using MemOS.", - "version": "1.0.0" + "title": "MemOS Server REST APIs", + "description": "A REST API for managing multiple users with MemOS Server.", + "version": "1.0.1" }, "paths": { - "/configure": { + "/product/search": { "post": { - "summary": "Configure MemOS", - "description": "Set MemOS configuration.", - "operationId": "set_config_configure_post", + "tags": [ + "Server API" + ], + "summary": "Search memories", + "description": "Search memories for a specific user.\n\nThis endpoint uses the class-based SearchHandler for better code organization.", + "operationId": "search_memories_product_search_post", "requestBody": { "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/MOSConfig" + "$ref": "#/components/schemas/APISearchRequest" } } }, @@ -27,7 +30,7 @@ "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/ConfigResponse" + "$ref": "#/components/schemas/SearchResponse" } } } @@ -45,33 +48,19 @@ } } }, - "/users": { - "get": { - "summary": "List all users", - "description": "List all active users.", - "operationId": "list_users_users_get", - "responses": { - "200": { - "description": "Successful Response", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/UserListResponse" - } - } - } - } - } - }, + "/product/add": { "post": { - "summary": "Create a new user", - "description": "Create a new user.", - "operationId": "create_user_users_post", + "tags": [ + "Server API" + ], + "summary": "Add memories", + "description": "Add memories for a specific user.\n\nThis endpoint uses the class-based AddHandler for better code organization.", + "operationId": "add_memories_product_add_post", "requestBody": { "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/UserCreate" + "$ref": "#/components/schemas/APIADDRequest" } } }, @@ -83,7 +72,7 @@ "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/UserResponse" + "$ref": "#/components/schemas/MemoryResponse" } } } @@ -101,57 +90,21 @@ } } }, - "/users/me": { + "/product/scheduler/allstatus": { "get": { - "summary": "Get current user info", - "description": "Get current user information including accessible cubes.", - "operationId": "get_user_info_users_me_get", - "responses": { - "200": { - "description": "Successful Response", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/UserResponse" - } - } - } - } - } - } - }, - "/mem_cubes": { - "post": { - "summary": "Register a MemCube", - "description": "Register a new MemCube.", - "operationId": "register_mem_cube_mem_cubes_post", - "requestBody": { - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/MemCubeRegister" - } - } - }, - "required": true - }, + "tags": [ + "Server API" + ], + "summary": "Get detailed scheduler status", + "description": "Get detailed scheduler status including running tasks and queue metrics.", + "operationId": "scheduler_allstatus_product_scheduler_allstatus_get", "responses": { "200": { "description": "Successful Response", "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/SimpleResponse" - } - } - } - }, - "422": { - "description": "Validation Error", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/HTTPValidationError" + "$ref": "#/components/schemas/AllStatusResponse" } } } @@ -159,23 +112,28 @@ } } }, - "/mem_cubes/{mem_cube_id}": { - "delete": { - "summary": "Unregister a MemCube", - "description": "Unregister a MemCube.", - "operationId": "unregister_mem_cube_mem_cubes__mem_cube_id__delete", + "/product/scheduler/status": { + "get": { + "tags": [ + "Server API" + ], + "summary": "Get scheduler running status", + "description": "Get scheduler running status.", + "operationId": "scheduler_status_product_scheduler_status_get", "parameters": [ { - "name": "mem_cube_id", - "in": "path", + "name": "user_id", + "in": "query", "required": true, "schema": { "type": "string", - "title": "Mem Cube Id" - } + "description": "User ID", + "title": "User Id" + }, + "description": "User ID" }, { - "name": "user_id", + "name": "task_id", "in": "query", "required": false, "schema": { @@ -187,8 +145,10 @@ "type": "null" } ], - "title": "User Id" - } + "description": "Optional Task ID to query a specific task", + "title": "Task Id" + }, + "description": "Optional Task ID to query a specific task" } ], "responses": { @@ -197,7 +157,7 @@ "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/SimpleResponse" + "$ref": "#/components/schemas/StatusResponse" } } } @@ -215,39 +175,34 @@ } } }, - "/mem_cubes/{cube_id}/share": { - "post": { - "summary": "Share a cube with another user", - "description": "Share a cube with another user.", - "operationId": "share_cube_mem_cubes__cube_id__share_post", + "/product/scheduler/task_queue_status": { + "get": { + "tags": [ + "Server API" + ], + "summary": "Get scheduler task queue status", + "description": "Get scheduler task queue backlog/pending status for a user.", + "operationId": "scheduler_task_queue_status_product_scheduler_task_queue_status_get", "parameters": [ { - "name": "cube_id", - "in": "path", + "name": "user_id", + "in": "query", "required": true, "schema": { "type": "string", - "title": "Cube Id" - } + "description": "User ID whose queue status is requested", + "title": "User Id" + }, + "description": "User ID whose queue status is requested" } ], - "requestBody": { - "required": true, - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/CubeShare" - } - } - } - }, "responses": { "200": { "description": "Successful Response", "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/SimpleResponse" + "$ref": "#/components/schemas/TaskQueueResponse" } } } @@ -265,79 +220,42 @@ } } }, - "/memories": { + "/product/scheduler/wait": { "post": { - "summary": "Create memories", - "description": "Store new memories in a MemCube.", - "operationId": "add_memory_memories_post", - "requestBody": { - "required": true, - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/MemoryCreate" - } - } - } - }, - "responses": { - "200": { - "description": "Successful Response", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/SimpleResponse" - } - } + "tags": [ + "Server API" + ], + "summary": "Wait until scheduler is idle for a specific user", + "description": "Wait until scheduler is idle for a specific user.", + "operationId": "scheduler_wait_product_scheduler_wait_post", + "parameters": [ + { + "name": "user_name", + "in": "query", + "required": true, + "schema": { + "type": "string", + "title": "User Name" } }, - "422": { - "description": "Validation Error", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/HTTPValidationError" - } - } - } - } - } - }, - "get": { - "summary": "Get all memories", - "description": "Retrieve all memories from a MemCube.", - "operationId": "get_all_memories_memories_get", - "parameters": [ { - "name": "mem_cube_id", + "name": "timeout_seconds", "in": "query", "required": false, "schema": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ], - "title": "Mem Cube Id" + "type": "number", + "default": 120.0, + "title": "Timeout Seconds" } }, { - "name": "user_id", + "name": "poll_interval", "in": "query", "required": false, "schema": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ], - "title": "User Id" + "type": "number", + "default": 0.5, + "title": "Poll Interval" } } ], @@ -346,9 +264,7 @@ "description": "Successful Response", "content": { "application/json": { - "schema": { - "$ref": "#/components/schemas/MemoryResponse" - } + "schema": {} } } }, @@ -365,44 +281,42 @@ } } }, - "/memories/{mem_cube_id}/{memory_id}": { + "/product/scheduler/wait/stream": { "get": { - "summary": "Get a memory", - "description": "Retrieve a specific memory by ID from a MemCube.", - "operationId": "get_memory_memories__mem_cube_id___memory_id__get", + "tags": [ + "Server API" + ], + "summary": "Stream scheduler progress for a user", + "description": "Stream scheduler progress via Server-Sent Events (SSE).", + "operationId": "scheduler_wait_stream_product_scheduler_wait_stream_get", "parameters": [ { - "name": "mem_cube_id", - "in": "path", + "name": "user_name", + "in": "query", "required": true, "schema": { "type": "string", - "title": "Mem Cube Id" + "title": "User Name" } }, { - "name": "memory_id", - "in": "path", - "required": true, + "name": "timeout_seconds", + "in": "query", + "required": false, "schema": { - "type": "string", - "title": "Memory Id" + "type": "number", + "default": 120.0, + "title": "Timeout Seconds" } }, { - "name": "user_id", + "name": "poll_interval", "in": "query", "required": false, "schema": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ], - "title": "User Id" + "type": "number", + "default": 0.5, + "title": "Poll Interval" } } ], @@ -411,9 +325,7 @@ "description": "Successful Response", "content": { "application/json": { - "schema": { - "$ref": "#/components/schemas/MemoryResponse" - } + "schema": {} } } }, @@ -428,67 +340,32 @@ } } } - }, - "put": { - "summary": "Update a memory", - "description": "Update an existing memory in a MemCube.", - "operationId": "update_memory_memories__mem_cube_id___memory_id__put", - "parameters": [ - { - "name": "mem_cube_id", - "in": "path", - "required": true, - "schema": { - "type": "string", - "title": "Mem Cube Id" - } - }, - { - "name": "memory_id", - "in": "path", - "required": true, - "schema": { - "type": "string", - "title": "Memory Id" - } - }, - { - "name": "user_id", - "in": "query", - "required": false, - "schema": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ], - "title": "User Id" - } - } + } + }, + "/product/chat/complete": { + "post": { + "tags": [ + "Server API" ], + "summary": "Chat with MemOS (Complete Response)", + "description": "Chat with MemOS for a specific user. Returns complete response (non-streaming).\n\nThis endpoint uses the class-based ChatHandler.", + "operationId": "chat_complete_product_chat_complete_post", "requestBody": { - "required": true, "content": { "application/json": { "schema": { - "type": "object", - "additionalProperties": true, - "title": "Updated Memory" + "$ref": "#/components/schemas/APIChatCompleteRequest" } } - } + }, + "required": true }, "responses": { "200": { "description": "Successful Response", "content": { "application/json": { - "schema": { - "$ref": "#/components/schemas/SimpleResponse" - } + "schema": {} } } }, @@ -503,55 +380,32 @@ } } } - }, - "delete": { - "summary": "Delete a memory", - "description": "Delete a specific memory from a MemCube.", - "operationId": "delete_memory_memories__mem_cube_id___memory_id__delete", - "parameters": [ - { - "name": "mem_cube_id", - "in": "path", - "required": true, - "schema": { - "type": "string", - "title": "Mem Cube Id" - } - }, - { - "name": "memory_id", - "in": "path", - "required": true, - "schema": { - "type": "string", - "title": "Memory Id" + } + }, + "/product/chat/stream": { + "post": { + "tags": [ + "Server API" + ], + "summary": "Chat with MemOS", + "description": "Chat with MemOS for a specific user. Returns SSE stream.\n\nThis endpoint uses the class-based ChatHandler which internally\ncomposes SearchHandler and AddHandler for a clean architecture.", + "operationId": "chat_stream_product_chat_stream_post", + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ChatRequest" + } } }, - { - "name": "user_id", - "in": "query", - "required": false, - "schema": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ], - "title": "User Id" - } - } - ], + "required": true + }, "responses": { "200": { "description": "Successful Response", "content": { "application/json": { - "schema": { - "$ref": "#/components/schemas/SimpleResponse" - } + "schema": {} } } }, @@ -568,16 +422,19 @@ } } }, - "/search": { + "/product/chat/stream/playground": { "post": { - "summary": "Search memories", - "description": "Search for memories across MemCubes.", - "operationId": "search_memories_search_post", + "tags": [ + "Server API" + ], + "summary": "Chat with MemOS playground", + "description": "Chat with MemOS for a specific user. Returns SSE stream.\n\nThis endpoint uses the class-based ChatHandler which internally\ncomposes SearchHandler and AddHandler for a clean architecture.", + "operationId": "chat_stream_playground_product_chat_stream_playground_post", "requestBody": { "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/SearchRequest" + "$ref": "#/components/schemas/ChatPlaygroundRequest" } } }, @@ -588,8 +445,131 @@ "description": "Successful Response", "content": { "application/json": { - "schema": { - "$ref": "#/components/schemas/SearchResponse" + "schema": {} + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + } + }, + "/product/suggestions": { + "post": { + "tags": [ + "Server API" + ], + "summary": "Get suggestion queries", + "description": "Get suggestion queries for a specific user with language preference.", + "operationId": "get_suggestion_queries_product_suggestions_post", + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/SuggestionRequest" + } + } + }, + "required": true + }, + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/SuggestionResponse" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + } + }, + "/product/get_all": { + "post": { + "tags": [ + "Server API" + ], + "summary": "Get all memories for user", + "description": "Get all memories or subgraph for a specific user.\n\nIf search_query is provided, returns a subgraph based on the query.\nOtherwise, returns all memories of the specified type.", + "operationId": "get_all_memories_product_get_all_post", + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/GetMemoryPlaygroundRequest" + } + } + }, + "required": true + }, + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/MemoryResponse" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + } + }, + "/product/get_memory": { + "post": { + "tags": [ + "Server API" + ], + "summary": "Get memories for user", + "operationId": "get_memories_product_get_memory_post", + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/GetMemoryRequest" + } + } + }, + "required": true + }, + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/GetMemoryResponse" } } } @@ -607,107 +587,2013 @@ } } }, - "/memories/{mem_cube_id}": { - "delete": { - "summary": "Delete all memories", - "description": "Delete all memories from a MemCube.", - "operationId": "delete_all_memories_memories__mem_cube_id__delete", + "/product/get_memory/{memory_id}": { + "get": { + "tags": [ + "Server API" + ], + "summary": "Get memory by id", + "operationId": "get_memory_by_id_product_get_memory__memory_id__get", "parameters": [ { - "name": "mem_cube_id", + "name": "memory_id", "in": "path", "required": true, "schema": { "type": "string", - "title": "Mem Cube Id" + "title": "Memory Id" + } + } + ], + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/GetMemoryResponse" + } + } } }, - { - "name": "user_id", - "in": "query", - "required": false, - "schema": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "null" + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" } - ], - "title": "User Id" + } + } + } + } + } + }, + "/product/delete_memory": { + "post": { + "tags": [ + "Server API" + ], + "summary": "Delete memories for user", + "operationId": "delete_memories_product_delete_memory_post", + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/DeleteMemoryRequest" + } + } + }, + "required": true + }, + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/DeleteMemoryResponse" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + } + }, + "/product/feedback": { + "post": { + "tags": [ + "Server API" + ], + "summary": "Feedback memories", + "description": "Feedback memories for a specific user.\n\nThis endpoint uses the class-based FeedbackHandler for better code organization.", + "operationId": "feedback_memories_product_feedback_post", + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/APIFeedbackRequest" + } + } + }, + "required": true + }, + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/MemoryResponse" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + } + }, + "/product/get_user_names_by_memory_ids": { + "post": { + "tags": [ + "Server API" + ], + "summary": "Get user names by memory ids", + "description": "Get user names by memory ids.", + "operationId": "get_user_names_by_memory_ids_product_get_user_names_by_memory_ids_post", + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/GetUserNamesByMemoryIdsRequest" + } + } + }, + "required": true + }, + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/GetUserNamesByMemoryIdsResponse" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } } } + } + } + }, + "/product/exist_mem_cube_id": { + "post": { + "tags": [ + "Server API" ], + "summary": "Check if mem cube id exists", + "description": "Check if mem cube id exists.", + "operationId": "exist_mem_cube_id_product_exist_mem_cube_id_post", + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ExistMemCubeIdRequest" + } + } + }, + "required": true + }, "responses": { "200": { "description": "Successful Response", "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/SimpleResponse" + "$ref": "#/components/schemas/ExistMemCubeIdResponse" } } - } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + } + } + }, + "components": { + "schemas": { + "APIADDRequest": { + "properties": { + "user_id": { + "type": "string", + "title": "User Id", + "description": "User ID" + }, + "session_id": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Session Id", + "description": "Session ID. If not provided, a default session will be used." + }, + "task_id": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Task Id", + "description": "Task ID for monitering async tasks" + }, + "writable_cube_ids": { + "anyOf": [ + { + "items": { + "type": "string" + }, + "type": "array" + }, + { + "type": "null" + } + ], + "title": "Writable Cube Ids", + "description": "List of cube IDs user can write for multi-cube add" + }, + "async_mode": { + "type": "string", + "enum": [ + "async", + "sync" + ], + "title": "Async Mode", + "description": "Whether to add memory in async mode. Use 'async' to enqueue background add (non-blocking), or 'sync' to add memories in the current call. Default: 'async'.", + "default": "async" + }, + "mode": { + "anyOf": [ + { + "type": "string", + "enum": [ + "fast", + "fine" + ] + }, + { + "type": "null" + } + ], + "title": "Mode", + "description": "(Internal) Add mode used only when async_mode='sync'. If set to 'fast', the handler will use a fast add pipeline. Ignored when async_mode='async'." + }, + "custom_tags": { + "anyOf": [ + { + "items": { + "type": "string" + }, + "type": "array" + }, + { + "type": "null" + } + ], + "title": "Custom Tags", + "description": "Custom tags for this add request, e.g. ['Travel', 'family']. These tags can be used as filters in search." + }, + "info": { + "anyOf": [ + { + "additionalProperties": true, + "type": "object" + }, + { + "type": "null" + } + ], + "title": "Info", + "description": "Additional metadata for the add request. All keys can be used as filters in search. Example: {'agent_id': 'xxxxxx', 'app_id': 'xxxx', 'source_type': 'web', 'source_url': 'https://www.baidu.com', 'source_content': '西湖是杭州最著名的景点'}." + }, + "messages": { + "anyOf": [ + { + "type": "string" + }, + { + "items": { + "anyOf": [ + { + "$ref": "#/components/schemas/ChatCompletionSystemMessageParam" + }, + { + "$ref": "#/components/schemas/ChatCompletionUserMessageParam" + }, + { + "$ref": "#/components/schemas/ChatCompletionAssistantMessageParam" + }, + { + "$ref": "#/components/schemas/ChatCompletionToolMessageParam" + } + ] + }, + "type": "array" + }, + { + "items": { + "anyOf": [ + { + "$ref": "#/components/schemas/ChatCompletionContentPartTextParam" + }, + { + "$ref": "#/components/schemas/File" + } + ] + }, + "type": "array" + }, + { + "type": "null" + } + ], + "title": "Messages", + "description": "List of messages to store. Supports: - system / user / assistant messages with 'content' and 'chat_time'; - tool messages including: * tool_description (name, description, parameters), * tool_input (call_id, name, argument), * raw tool messages where content is str or list[str], * tool_output with structured output items (input_text / input_image / input_file, etc.). Also supports pure input items when there is no dialog." + }, + "chat_history": { + "anyOf": [ + { + "items": { + "anyOf": [ + { + "$ref": "#/components/schemas/ChatCompletionSystemMessageParam" + }, + { + "$ref": "#/components/schemas/ChatCompletionUserMessageParam" + }, + { + "$ref": "#/components/schemas/ChatCompletionAssistantMessageParam" + }, + { + "$ref": "#/components/schemas/ChatCompletionToolMessageParam" + } + ] + }, + "type": "array" + }, + { + "type": "null" + } + ], + "title": "Chat History", + "description": "Historical chat messages used internally by algorithms. If None, internal stored history will be used; if provided (even an empty list), this value will be used as-is." + }, + "is_feedback": { + "type": "boolean", + "title": "Is Feedback", + "description": "Whether this request represents user feedback. Default: False.", + "default": false + }, + "mem_cube_id": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Mem Cube Id", + "description": "(Deprecated) Target cube ID for this add request (optional for developer API)." + }, + "memory_content": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Memory Content", + "description": "(Deprecated) Plain memory content to store. Prefer using `messages`." + }, + "doc_path": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Doc Path", + "description": "(Deprecated / internal) Path to document to store." + }, + "source": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Source", + "description": "(Deprecated) Simple source tag of the memory. Prefer using `info.source_type` / `info.source_url`." + }, + "operation": { + "anyOf": [ + { + "items": { + "$ref": "#/components/schemas/PermissionDict" + }, + "type": "array" + }, + { + "type": "null" + } + ], + "title": "Operation", + "description": "(Internal) Operation definitions for multi-cube write permissions." + } + }, + "type": "object", + "title": "APIADDRequest", + "description": "Request model for creating memories." + }, + "APIChatCompleteRequest": { + "properties": { + "user_id": { + "type": "string", + "title": "User Id", + "description": "User ID" + }, + "query": { + "type": "string", + "title": "Query", + "description": "Chat query message" + }, + "readable_cube_ids": { + "anyOf": [ + { + "items": { + "type": "string" + }, + "type": "array" + }, + { + "type": "null" + } + ], + "title": "Readable Cube Ids", + "description": "List of cube IDs user can read for multi-cube chat" + }, + "writable_cube_ids": { + "anyOf": [ + { + "items": { + "type": "string" + }, + "type": "array" + }, + { + "type": "null" + } + ], + "title": "Writable Cube Ids", + "description": "List of cube IDs user can write for multi-cube chat" + }, + "history": { + "anyOf": [ + { + "items": { + "anyOf": [ + { + "$ref": "#/components/schemas/ChatCompletionSystemMessageParam" + }, + { + "$ref": "#/components/schemas/ChatCompletionUserMessageParam" + }, + { + "$ref": "#/components/schemas/ChatCompletionAssistantMessageParam" + }, + { + "$ref": "#/components/schemas/ChatCompletionToolMessageParam" + } + ] + }, + "type": "array" + }, + { + "type": "null" + } + ], + "title": "History", + "description": "Chat history" + }, + "mode": { + "$ref": "#/components/schemas/SearchMode", + "description": "search mode: fast, fine, or mixture", + "default": "fast" + }, + "system_prompt": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "System Prompt", + "description": "Base system prompt to use for chat" + }, + "top_k": { + "type": "integer", + "title": "Top K", + "description": "Number of results to return", + "default": 10 + }, + "session_id": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Session Id", + "description": "Session ID for soft-filtering memories" + }, + "include_preference": { + "type": "boolean", + "title": "Include Preference", + "description": "Whether to handle preference memory", + "default": true + }, + "pref_top_k": { + "type": "integer", + "title": "Pref Top K", + "description": "Number of preference results to return", + "default": 6 + }, + "model_name_or_path": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Model Name Or Path", + "description": "Model name to use for chat" + }, + "max_tokens": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Max Tokens", + "description": "Max tokens to generate" + }, + "temperature": { + "anyOf": [ + { + "type": "number" + }, + { + "type": "null" + } + ], + "title": "Temperature", + "description": "Temperature for sampling" + }, + "top_p": { + "anyOf": [ + { + "type": "number" + }, + { + "type": "null" + } + ], + "title": "Top P", + "description": "Top-p (nucleus) sampling parameter" + }, + "add_message_on_answer": { + "type": "boolean", + "title": "Add Message On Answer", + "description": "Add dialogs to memory after chat", + "default": true + }, + "filter": { + "anyOf": [ + { + "additionalProperties": true, + "type": "object" + }, + { + "type": "null" + } + ], + "title": "Filter", + "description": "\n Filter for the memory, example:\n {\n \"`and` or `or`\": [\n {\"id\": \"uuid-xxx\"},\n {\"created_at\": {\"gt\": \"2024-01-01\"}},\n ]\n }\n " + }, + "internet_search": { + "type": "boolean", + "title": "Internet Search", + "description": "Whether to use internet search", + "default": false + }, + "threshold": { + "type": "number", + "title": "Threshold", + "description": "Threshold for filtering references", + "default": 0.5 + }, + "mem_cube_id": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Mem Cube Id", + "description": "Cube ID to use for chat" + }, + "moscube": { + "type": "boolean", + "title": "Moscube", + "description": "(Deprecated) Whether to use legacy MemOSCube pipeline", + "default": false + } + }, + "type": "object", + "required": [ + "user_id", + "query" + ], + "title": "APIChatCompleteRequest", + "description": "Request model for chat operations." + }, + "APIFeedbackRequest": { + "properties": { + "user_id": { + "type": "string", + "title": "User Id", + "description": "User ID" + }, + "session_id": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Session Id", + "description": "Session ID for soft-filtering memories", + "default": "default_session" + }, + "task_id": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Task Id", + "description": "Task ID for monitering async tasks" + }, + "history": { + "anyOf": [ + { + "items": { + "anyOf": [ + { + "$ref": "#/components/schemas/ChatCompletionSystemMessageParam" + }, + { + "$ref": "#/components/schemas/ChatCompletionUserMessageParam" + }, + { + "$ref": "#/components/schemas/ChatCompletionAssistantMessageParam" + }, + { + "$ref": "#/components/schemas/ChatCompletionToolMessageParam" + } + ] + }, + "type": "array" + }, + { + "type": "null" + } + ], + "title": "History", + "description": "Chat history" + }, + "retrieved_memory_ids": { + "anyOf": [ + { + "items": { + "type": "string" + }, + "type": "array" + }, + { + "type": "null" + } + ], + "title": "Retrieved Memory Ids", + "description": "Retrieved memory ids at last turn" + }, + "feedback_content": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Feedback Content", + "description": "Feedback content to process" + }, + "feedback_time": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Feedback Time", + "description": "Feedback time" + }, + "writable_cube_ids": { + "anyOf": [ + { + "items": { + "type": "string" + }, + "type": "array" + }, + { + "type": "null" + } + ], + "title": "Writable Cube Ids", + "description": "List of cube IDs user can write for multi-cube add" + }, + "async_mode": { + "type": "string", + "enum": [ + "sync", + "async" + ], + "title": "Async Mode", + "description": "feedback mode: sync or async", + "default": "async" + }, + "corrected_answer": { + "type": "boolean", + "title": "Corrected Answer", + "description": "Whether need return corrected answer", + "default": false + }, + "info": { + "anyOf": [ + { + "additionalProperties": true, + "type": "object" + }, + { + "type": "null" + } + ], + "title": "Info", + "description": "Additional metadata for the add request. All keys can be used as filters in search. Example: {'agent_id': 'xxxxxx', 'app_id': 'xxxx', 'source_type': 'web', 'source_url': 'https://www.baidu.com', 'source_content': 'West Lake is the most famous scenic spot in Hangzhou'}." + }, + "mem_cube_id": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Mem Cube Id", + "description": "(Deprecated) Single cube ID to search in. Prefer `readable_cube_ids` for multi-cube search." + } + }, + "type": "object", + "required": [ + "user_id", + "history", + "feedback_content" + ], + "title": "APIFeedbackRequest", + "description": "Request model for processing feedback info." + }, + "APISearchRequest": { + "properties": { + "query": { + "type": "string", + "title": "Query", + "description": "User search query" + }, + "user_id": { + "type": "string", + "title": "User Id", + "description": "User ID" + }, + "readable_cube_ids": { + "anyOf": [ + { + "items": { + "type": "string" + }, + "type": "array" + }, + { + "type": "null" + } + ], + "title": "Readable Cube Ids", + "description": "List of cube IDs that are readable for this request. Required for algorithm-facing API; optional for developer-facing API." + }, + "mode": { + "$ref": "#/components/schemas/SearchMode", + "description": "Search mode: fast, fine, or mixture.", + "default": "fast" + }, + "session_id": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Session Id", + "description": "Session ID used as a soft signal to prioritize more relevant memories. Only used for weighting, not as a hard filter." + }, + "top_k": { + "type": "integer", + "minimum": 1.0, + "title": "Top K", + "description": "Number of textual memories to retrieve (top-K). Default: 10.", + "default": 10 + }, + "dedup": { + "anyOf": [ + { + "type": "string", + "enum": [ + "no", + "sim" + ] + }, + { + "type": "null" + } + ], + "title": "Dedup", + "description": "Optional dedup option for textual memories. Use 'no' for no dedup, 'sim' for similarity dedup. If None, default exact-text dedup is applied." + }, + "pref_top_k": { + "type": "integer", + "minimum": 0.0, + "title": "Pref Top K", + "description": "Number of preference memories to retrieve (top-K). Default: 6.", + "default": 6 + }, + "include_preference": { + "type": "boolean", + "title": "Include Preference", + "description": "Whether to retrieve preference memories along with general memories. If enabled, the system will automatically recall user preferences relevant to the query. Default: True.", + "default": true + }, + "search_tool_memory": { + "type": "boolean", + "title": "Search Tool Memory", + "description": "Whether to retrieve tool memories along with general memories. If enabled, the system will automatically recall tool memories relevant to the query. Default: True.", + "default": true + }, + "tool_mem_top_k": { + "type": "integer", + "minimum": 0.0, + "title": "Tool Mem Top K", + "description": "Number of tool memories to retrieve (top-K). Default: 6.", + "default": 6 + }, + "filter": { + "anyOf": [ + { + "additionalProperties": true, + "type": "object" + }, + { + "type": "null" + } + ], + "title": "Filter", + "description": "\n Filter for the memory, example:\n {\n \"`and` or `or`\": [\n {\"id\": \"uuid-xxx\"},\n {\"created_at\": {\"gt\": \"2024-01-01\"}},\n ]\n }\n " + }, + "internet_search": { + "type": "boolean", + "title": "Internet Search", + "description": "Whether to enable internet search in addition to memory search. Primarily used by internal algorithms. Default: False.", + "default": false + }, + "threshold": { + "anyOf": [ + { + "type": "number" + }, + { + "type": "null" + } + ], + "title": "Threshold", + "description": "Internal similarity threshold for searching plaintext memories. If None, default thresholds will be applied." + }, + "search_memory_type": { + "type": "string", + "title": "Search Memory Type", + "description": "Type of memory to search: All, WorkingMemory, LongTermMemory, UserMemory, OuterMemory, ToolSchemaMemory, ToolTrajectoryMemory", + "default": "All" + }, + "chat_history": { + "anyOf": [ + { + "items": { + "anyOf": [ + { + "$ref": "#/components/schemas/ChatCompletionSystemMessageParam" + }, + { + "$ref": "#/components/schemas/ChatCompletionUserMessageParam" + }, + { + "$ref": "#/components/schemas/ChatCompletionAssistantMessageParam" + }, + { + "$ref": "#/components/schemas/ChatCompletionToolMessageParam" + } + ] + }, + "type": "array" + }, + { + "type": "null" + } + ], + "title": "Chat History", + "description": "Historical chat messages used internally by algorithms. If None, internal stored history may be used; if provided (even an empty list), this value will be used as-is." + }, + "mem_cube_id": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Mem Cube Id", + "description": "(Deprecated) Single cube ID to search in. Prefer `readable_cube_ids` for multi-cube search." + }, + "moscube": { + "type": "boolean", + "title": "Moscube", + "description": "(Deprecated / internal) Whether to use legacy MemOSCube path.", + "default": false + }, + "operation": { + "anyOf": [ + { + "items": { + "$ref": "#/components/schemas/PermissionDict" + }, + "type": "array" + }, + { + "type": "null" + } + ], + "title": "Operation", + "description": "(Internal) Operation definitions for multi-cube read permissions." + }, + "source": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Source", + "description": "Source of the search query [plugin will router diff search]" + } + }, + "type": "object", + "required": [ + "query", + "user_id" + ], + "title": "APISearchRequest", + "description": "Request model for searching memories." + }, + "AllStatusResponse": { + "properties": { + "code": { + "type": "integer", + "title": "Code", + "description": "Response status code", + "default": 200 + }, + "message": { + "type": "string", + "title": "Message", + "default": "Scheduler status summary retrieved successfully" + }, + "data": { + "anyOf": [ + { + "$ref": "#/components/schemas/AllStatusResponseData" + }, + { + "type": "null" + } + ], + "description": "Response data" + } + }, + "type": "object", + "title": "AllStatusResponse", + "description": "Response model for full scheduler status operations." + }, + "AllStatusResponseData": { + "properties": { + "scheduler_summary": { + "$ref": "#/components/schemas/TaskSummary", + "description": "Aggregated status for scheduler-managed tasks" + }, + "all_tasks_summary": { + "$ref": "#/components/schemas/TaskSummary", + "description": "Aggregated status for all tracked tasks" + } + }, + "type": "object", + "required": [ + "scheduler_summary", + "all_tasks_summary" + ], + "title": "AllStatusResponseData", + "description": "Aggregated scheduler status metrics." + }, + "Audio": { + "properties": { + "id": { + "type": "string", + "title": "Id" + } + }, + "type": "object", + "required": [ + "id" + ], + "title": "Audio" + }, + "ChatCompletionAssistantMessageParam": { + "properties": { + "role": { + "type": "string", + "const": "assistant", + "title": "Role" + }, + "audio": { + "anyOf": [ + { + "$ref": "#/components/schemas/Audio" + }, + { + "type": "null" + } + ] + }, + "content": { + "anyOf": [ + { + "type": "string" + }, + { + "items": { + "anyOf": [ + { + "$ref": "#/components/schemas/ChatCompletionContentPartTextParam" + }, + { + "$ref": "#/components/schemas/ChatCompletionContentPartRefusalParam" + } + ] + }, + "type": "array" + }, + { + "$ref": "#/components/schemas/ChatCompletionContentPartTextParam" + }, + { + "$ref": "#/components/schemas/ChatCompletionContentPartRefusalParam" + }, + { + "type": "null" + } + ], + "title": "Content" + }, + "refusal": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Refusal" + }, + "tool_calls": { + "anyOf": [ + { + "items": { + "anyOf": [ + { + "$ref": "#/components/schemas/ChatCompletionMessageFunctionToolCallParam" + }, + { + "$ref": "#/components/schemas/ChatCompletionMessageCustomToolCallParam" + } + ] + }, + "type": "array" + }, + { + "$ref": "#/components/schemas/ChatCompletionMessageFunctionToolCallParam" + }, + { + "$ref": "#/components/schemas/ChatCompletionMessageCustomToolCallParam" + } + ], + "title": "Tool Calls" + }, + "chat_time": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Chat Time" + }, + "message_id": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Message Id" + } + }, + "type": "object", + "required": [ + "role" + ], + "title": "ChatCompletionAssistantMessageParam" + }, + "ChatCompletionContentPartImageParam": { + "properties": { + "image_url": { + "$ref": "#/components/schemas/ImageURL" + }, + "type": { + "type": "string", + "const": "image_url", + "title": "Type" + } + }, + "type": "object", + "required": [ + "image_url", + "type" + ], + "title": "ChatCompletionContentPartImageParam" + }, + "ChatCompletionContentPartInputAudioParam": { + "properties": { + "input_audio": { + "$ref": "#/components/schemas/InputAudio" + }, + "type": { + "type": "string", + "const": "input_audio", + "title": "Type" + } + }, + "type": "object", + "required": [ + "input_audio", + "type" + ], + "title": "ChatCompletionContentPartInputAudioParam" + }, + "ChatCompletionContentPartRefusalParam": { + "properties": { + "refusal": { + "type": "string", + "title": "Refusal" + }, + "type": { + "type": "string", + "const": "refusal", + "title": "Type" + } + }, + "type": "object", + "required": [ + "refusal", + "type" + ], + "title": "ChatCompletionContentPartRefusalParam" + }, + "ChatCompletionContentPartTextParam": { + "properties": { + "text": { + "type": "string", + "title": "Text" + }, + "type": { + "type": "string", + "const": "text", + "title": "Type" + } + }, + "type": "object", + "required": [ + "text", + "type" + ], + "title": "ChatCompletionContentPartTextParam" + }, + "ChatCompletionMessageCustomToolCallParam": { + "properties": { + "id": { + "type": "string", + "title": "Id" + }, + "custom": { + "$ref": "#/components/schemas/Custom" + }, + "type": { + "type": "string", + "const": "custom", + "title": "Type" + } + }, + "type": "object", + "required": [ + "id", + "custom", + "type" + ], + "title": "ChatCompletionMessageCustomToolCallParam" + }, + "ChatCompletionMessageFunctionToolCallParam": { + "properties": { + "id": { + "type": "string", + "title": "Id" + }, + "function": { + "$ref": "#/components/schemas/Function" + }, + "type": { + "type": "string", + "const": "function", + "title": "Type" + } + }, + "type": "object", + "required": [ + "id", + "function", + "type" + ], + "title": "ChatCompletionMessageFunctionToolCallParam" + }, + "ChatCompletionSystemMessageParam": { + "properties": { + "content": { + "anyOf": [ + { + "type": "string" + }, + { + "items": { + "$ref": "#/components/schemas/ChatCompletionContentPartTextParam" + }, + "type": "array" + }, + { + "$ref": "#/components/schemas/ChatCompletionContentPartTextParam" + } + ], + "title": "Content" + }, + "role": { + "type": "string", + "const": "system", + "title": "Role" + }, + "name": { + "type": "string", + "title": "Name" + }, + "chat_time": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Chat Time" + }, + "message_id": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Message Id" + } + }, + "type": "object", + "required": [ + "content", + "role" + ], + "title": "ChatCompletionSystemMessageParam" + }, + "ChatCompletionToolMessageParam": { + "properties": { + "content": { + "anyOf": [ + { + "type": "string" + }, + { + "items": { + "anyOf": [ + { + "$ref": "#/components/schemas/ChatCompletionContentPartTextParam" + }, + { + "$ref": "#/components/schemas/ChatCompletionContentPartImageParam" + }, + { + "$ref": "#/components/schemas/ChatCompletionContentPartInputAudioParam" + }, + { + "$ref": "#/components/schemas/File" + } + ] + }, + "type": "array" + }, + { + "$ref": "#/components/schemas/ChatCompletionContentPartTextParam" + }, + { + "$ref": "#/components/schemas/ChatCompletionContentPartImageParam" + }, + { + "$ref": "#/components/schemas/ChatCompletionContentPartInputAudioParam" + }, + { + "$ref": "#/components/schemas/File" + } + ], + "title": "Content" + }, + "role": { + "type": "string", + "const": "tool", + "title": "Role" + }, + "tool_call_id": { + "type": "string", + "title": "Tool Call Id" + }, + "chat_time": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Chat Time" + }, + "message_id": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Message Id" + } + }, + "type": "object", + "required": [ + "content", + "role", + "tool_call_id" + ], + "title": "ChatCompletionToolMessageParam" + }, + "ChatCompletionUserMessageParam": { + "properties": { + "content": { + "anyOf": [ + { + "type": "string" + }, + { + "items": { + "anyOf": [ + { + "$ref": "#/components/schemas/ChatCompletionContentPartTextParam" + }, + { + "$ref": "#/components/schemas/ChatCompletionContentPartImageParam" + }, + { + "$ref": "#/components/schemas/ChatCompletionContentPartInputAudioParam" + }, + { + "$ref": "#/components/schemas/File" + } + ] + }, + "type": "array" + }, + { + "$ref": "#/components/schemas/ChatCompletionContentPartTextParam" + }, + { + "$ref": "#/components/schemas/ChatCompletionContentPartImageParam" + }, + { + "$ref": "#/components/schemas/ChatCompletionContentPartInputAudioParam" + }, + { + "$ref": "#/components/schemas/File" + } + ], + "title": "Content" + }, + "role": { + "type": "string", + "const": "user", + "title": "Role" + }, + "name": { + "type": "string", + "title": "Name" + }, + "chat_time": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Chat Time" + }, + "message_id": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Message Id" + } + }, + "type": "object", + "required": [ + "content", + "role" + ], + "title": "ChatCompletionUserMessageParam" + }, + "ChatPlaygroundRequest": { + "properties": { + "user_id": { + "type": "string", + "title": "User Id", + "description": "User ID" + }, + "query": { + "type": "string", + "title": "Query", + "description": "Chat query message" + }, + "readable_cube_ids": { + "anyOf": [ + { + "items": { + "type": "string" + }, + "type": "array" + }, + { + "type": "null" + } + ], + "title": "Readable Cube Ids", + "description": "List of cube IDs user can read for multi-cube chat" + }, + "writable_cube_ids": { + "anyOf": [ + { + "items": { + "type": "string" + }, + "type": "array" + }, + { + "type": "null" + } + ], + "title": "Writable Cube Ids", + "description": "List of cube IDs user can write for multi-cube chat" + }, + "history": { + "anyOf": [ + { + "items": { + "anyOf": [ + { + "$ref": "#/components/schemas/ChatCompletionSystemMessageParam" + }, + { + "$ref": "#/components/schemas/ChatCompletionUserMessageParam" + }, + { + "$ref": "#/components/schemas/ChatCompletionAssistantMessageParam" + }, + { + "$ref": "#/components/schemas/ChatCompletionToolMessageParam" + } + ] + }, + "type": "array" + }, + { + "type": "null" + } + ], + "title": "History", + "description": "Chat history" + }, + "mode": { + "$ref": "#/components/schemas/SearchMode", + "description": "search mode: fast, fine, or mixture", + "default": "fast" + }, + "system_prompt": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "System Prompt", + "description": "Base system prompt to use for chat" + }, + "top_k": { + "type": "integer", + "title": "Top K", + "description": "Number of results to return", + "default": 10 + }, + "session_id": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Session Id", + "description": "Session ID for soft-filtering memories" + }, + "include_preference": { + "type": "boolean", + "title": "Include Preference", + "description": "Whether to handle preference memory", + "default": true + }, + "pref_top_k": { + "type": "integer", + "title": "Pref Top K", + "description": "Number of preference results to return", + "default": 6 + }, + "model_name_or_path": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Model Name Or Path", + "description": "Model name to use for chat" + }, + "max_tokens": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Max Tokens", + "description": "Max tokens to generate" + }, + "temperature": { + "anyOf": [ + { + "type": "number" + }, + { + "type": "null" + } + ], + "title": "Temperature", + "description": "Temperature for sampling" + }, + "top_p": { + "anyOf": [ + { + "type": "number" + }, + { + "type": "null" + } + ], + "title": "Top P", + "description": "Top-p (nucleus) sampling parameter" + }, + "add_message_on_answer": { + "type": "boolean", + "title": "Add Message On Answer", + "description": "Add dialogs to memory after chat", + "default": true + }, + "filter": { + "anyOf": [ + { + "additionalProperties": true, + "type": "object" + }, + { + "type": "null" + } + ], + "title": "Filter", + "description": "\n Filter for the memory, example:\n {\n \"`and` or `or`\": [\n {\"id\": \"uuid-xxx\"},\n {\"created_at\": {\"gt\": \"2024-01-01\"}},\n ]\n }\n " + }, + "internet_search": { + "type": "boolean", + "title": "Internet Search", + "description": "Whether to use internet search", + "default": false + }, + "threshold": { + "type": "number", + "title": "Threshold", + "description": "Threshold for filtering references", + "default": 0.5 + }, + "moscube": { + "type": "boolean", + "title": "Moscube", + "description": "(Deprecated) Whether to use legacy MemOSCube pipeline.", + "default": false + }, + "mem_cube_id": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Mem Cube Id", + "description": "(Deprecated) Single cube ID to use for chat. Prefer `readable_cube_ids` / `writable_cube_ids` for multi-cube chat." + }, + "beginner_guide_step": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Beginner Guide Step", + "description": "Whether to use beginner guide, option: [first, second]" + } + }, + "type": "object", + "required": [ + "user_id", + "query" + ], + "title": "ChatPlaygroundRequest", + "description": "Request model for chat operations in playground." + }, + "ChatRequest": { + "properties": { + "user_id": { + "type": "string", + "title": "User Id", + "description": "User ID" + }, + "query": { + "type": "string", + "title": "Query", + "description": "Chat query message" + }, + "readable_cube_ids": { + "anyOf": [ + { + "items": { + "type": "string" + }, + "type": "array" + }, + { + "type": "null" + } + ], + "title": "Readable Cube Ids", + "description": "List of cube IDs user can read for multi-cube chat" + }, + "writable_cube_ids": { + "anyOf": [ + { + "items": { + "type": "string" + }, + "type": "array" + }, + { + "type": "null" + } + ], + "title": "Writable Cube Ids", + "description": "List of cube IDs user can write for multi-cube chat" + }, + "history": { + "anyOf": [ + { + "items": { + "anyOf": [ + { + "$ref": "#/components/schemas/ChatCompletionSystemMessageParam" + }, + { + "$ref": "#/components/schemas/ChatCompletionUserMessageParam" + }, + { + "$ref": "#/components/schemas/ChatCompletionAssistantMessageParam" + }, + { + "$ref": "#/components/schemas/ChatCompletionToolMessageParam" + } + ] + }, + "type": "array" + }, + { + "type": "null" + } + ], + "title": "History", + "description": "Chat history" + }, + "mode": { + "$ref": "#/components/schemas/SearchMode", + "description": "search mode: fast, fine, or mixture", + "default": "fast" + }, + "system_prompt": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "System Prompt", + "description": "Base system prompt to use for chat" + }, + "top_k": { + "type": "integer", + "title": "Top K", + "description": "Number of results to return", + "default": 10 + }, + "session_id": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Session Id", + "description": "Session ID for soft-filtering memories" + }, + "include_preference": { + "type": "boolean", + "title": "Include Preference", + "description": "Whether to handle preference memory", + "default": true + }, + "pref_top_k": { + "type": "integer", + "title": "Pref Top K", + "description": "Number of preference results to return", + "default": 6 + }, + "model_name_or_path": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Model Name Or Path", + "description": "Model name to use for chat" + }, + "max_tokens": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Max Tokens", + "description": "Max tokens to generate" }, - "422": { - "description": "Validation Error", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/HTTPValidationError" - } - } - } - } - } - } - }, - "/chat": { - "post": { - "summary": "Chat with MemOS", - "description": "Chat with the MemOS system.", - "operationId": "chat_chat_post", - "requestBody": { - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/ChatRequest" + "temperature": { + "anyOf": [ + { + "type": "number" + }, + { + "type": "null" } - } + ], + "title": "Temperature", + "description": "Temperature for sampling" }, - "required": true - }, - "responses": { - "200": { - "description": "Successful Response", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/ChatResponse" - } + "top_p": { + "anyOf": [ + { + "type": "number" + }, + { + "type": "null" } - } + ], + "title": "Top P", + "description": "Top-p (nucleus) sampling parameter" }, - "422": { - "description": "Validation Error", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/HTTPValidationError" - } + "add_message_on_answer": { + "type": "boolean", + "title": "Add Message On Answer", + "description": "Add dialogs to memory after chat", + "default": true + }, + "filter": { + "anyOf": [ + { + "additionalProperties": true, + "type": "object" + }, + { + "type": "null" } - } - } - } - } - } - }, - "components": { - "schemas": { - "ChatRequest": { - "properties": { - "user_id": { + ], + "title": "Filter", + "description": "\n Filter for the memory, example:\n {\n \"`and` or `or`\": [\n {\"id\": \"uuid-xxx\"},\n {\"created_at\": {\"gt\": \"2024-01-01\"}},\n ]\n }\n " + }, + "internet_search": { + "type": "boolean", + "title": "Internet Search", + "description": "Whether to use internet search", + "default": false + }, + "threshold": { + "type": "number", + "title": "Threshold", + "description": "Threshold for filtering references", + "default": 0.5 + }, + "moscube": { + "type": "boolean", + "title": "Moscube", + "description": "(Deprecated) Whether to use legacy MemOSCube pipeline.", + "default": false + }, + "mem_cube_id": { "anyOf": [ { "type": "string" @@ -716,42 +2602,112 @@ "type": "null" } ], - "title": "User Id", - "description": "User ID for the request", - "example": "user123" + "title": "Mem Cube Id", + "description": "(Deprecated) Single cube ID to use for chat. Prefer `readable_cube_ids` / `writable_cube_ids` for multi-cube chat." + } + }, + "type": "object", + "required": [ + "user_id", + "query" + ], + "title": "ChatRequest", + "description": "Request model for chat operations.\n\nThis model is used as the algorithm-facing chat interface, while also\nremaining backward compatible with older developer-facing APIs." + }, + "Custom": { + "properties": { + "input": { + "type": "string", + "title": "Input" }, - "query": { + "name": { "type": "string", - "title": "Query", - "description": "Chat query message.", - "example": "What is the latest update?" + "title": "Name" } }, "type": "object", "required": [ - "query" + "input", + "name" ], - "title": "ChatRequest" + "title": "Custom" + }, + "DeleteMemoryRequest": { + "properties": { + "writable_cube_ids": { + "items": { + "type": "string" + }, + "type": "array", + "title": "Writable Cube Ids", + "description": "Writable cube IDs" + }, + "memory_ids": { + "anyOf": [ + { + "items": { + "type": "string" + }, + "type": "array" + }, + { + "type": "null" + } + ], + "title": "Memory Ids", + "description": "Memory IDs" + }, + "file_ids": { + "anyOf": [ + { + "items": { + "type": "string" + }, + "type": "array" + }, + { + "type": "null" + } + ], + "title": "File Ids", + "description": "File IDs" + }, + "filter": { + "anyOf": [ + { + "additionalProperties": true, + "type": "object" + }, + { + "type": "null" + } + ], + "title": "Filter", + "description": "Filter for the memory" + } + }, + "type": "object", + "title": "DeleteMemoryRequest", + "description": "Request model for deleting memories." }, - "ChatResponse": { + "DeleteMemoryResponse": { "properties": { "code": { "type": "integer", "title": "Code", "description": "Response status code", - "default": 200, - "example": 200 + "default": 200 }, "message": { "type": "string", "title": "Message", - "description": "Response message", - "example": "Operation successful" + "description": "Response message" }, "data": { "anyOf": [ { - "type": "string" + "additionalProperties": true, + "type": "object" }, { "type": "null" @@ -765,26 +2721,49 @@ "required": [ "message" ], - "title": "ChatResponse", - "description": "Response model for chat operations." + "title": "DeleteMemoryResponse", + "description": "Response model for deleting memories." + }, + "ExistMemCubeIdRequest": { + "properties": { + "mem_cube_id": { + "type": "string", + "title": "Mem Cube Id", + "description": "Mem cube ID" + } + }, + "type": "object", + "required": [ + "mem_cube_id" + ], + "title": "ExistMemCubeIdRequest", + "description": "Request model for checking if mem cube id exists." }, - "ConfigResponse": { + "ExistMemCubeIdResponse": { "properties": { "code": { "type": "integer", "title": "Code", "description": "Response status code", - "default": 200, - "example": 200 + "default": 200 }, "message": { "type": "string", "title": "Message", - "description": "Response message", - "example": "Operation successful" + "description": "Response message" }, "data": { - "type": "null", + "anyOf": [ + { + "additionalProperties": { + "type": "boolean" + }, + "type": "object" + }, + { + "type": "null" + } + ], "title": "Data", "description": "Response data" } @@ -793,247 +2772,124 @@ "required": [ "message" ], - "title": "ConfigResponse", - "description": "Response model for configuration endpoint." + "title": "ExistMemCubeIdResponse", + "description": "Response model for checking if mem cube id exists." }, - "CubeShare": { + "File": { "properties": { - "user_id": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ], - "title": "User Id", - "description": "User ID for the request", - "example": "user123" + "file": { + "$ref": "#/components/schemas/FileFile" }, - "target_user_id": { + "type": { "type": "string", - "title": "Target User Id", - "description": "Target user ID to share with", - "example": "user456" + "const": "file", + "title": "Type" } }, "type": "object", "required": [ - "target_user_id" + "file", + "type" ], - "title": "CubeShare" + "title": "File" }, - "HTTPValidationError": { + "FileFile": { "properties": { - "detail": { - "items": { - "$ref": "#/components/schemas/ValidationError" - }, - "type": "array", - "title": "Detail" + "file_data": { + "type": "string", + "title": "File Data" + }, + "file_id": { + "type": "string", + "title": "File Id" + }, + "filename": { + "type": "string", + "title": "Filename" } }, "type": "object", - "title": "HTTPValidationError" + "title": "FileFile" }, - "LLMConfigFactory": { + "Function": { "properties": { - "model_schema": { + "arguments": { "type": "string", - "title": "Model Schema", - "description": "Schema for configuration. This value will be automatically set.", - "default": "NOT_SET" + "title": "Arguments" }, - "backend": { + "name": { "type": "string", - "title": "Backend", - "description": "Backend for LLM" - }, - "config": { - "additionalProperties": true, - "type": "object", - "title": "Config", - "description": "Configuration for the LLM backend" + "title": "Name" } }, - "additionalProperties": false, "type": "object", "required": [ - "backend", - "config" + "arguments", + "name" ], - "title": "LLMConfigFactory", - "description": "Factory class for creating LLM configurations." + "title": "Function" }, - "MOSConfig": { + "GetMemoryPlaygroundRequest": { "properties": { - "model_schema": { - "type": "string", - "title": "Model Schema", - "description": "Schema for configuration. This value will be automatically set.", - "default": "NOT_SET" - }, "user_id": { "type": "string", "title": "User Id", - "description": "User ID for the MOS. This is used to distinguish between different users' memories.", - "default": "root" + "description": "User ID" }, - "session_id": { + "memory_type": { "type": "string", - "title": "Session Id", - "description": "Session ID for the MOS. This is used to distinguish between different dialogue", - "default": "8dcdbd62-c231-4678-a3ae-0946b7d9ce14" - }, - "chat_model": { - "$ref": "#/components/schemas/LLMConfigFactory", - "description": "LLM configuration for the chat model in the MOS" - }, - "mem_reader": { - "$ref": "#/components/schemas/MemReaderConfigFactory", - "description": "MemReader configuration for the MOS" - }, - "mem_scheduler": { - "anyOf": [ - { - "$ref": "#/components/schemas/SchedulerConfigFactory" - }, - { - "type": "null" - } + "enum": [ + "text_mem", + "act_mem", + "param_mem", + "para_mem" ], - "description": "Memory scheduler configuration for managing memory operations" - }, - "user_manager": { - "$ref": "#/components/schemas/UserManagerConfigFactory", - "description": "User manager configuration for database operations" - }, - "max_turns_window": { - "type": "integer", - "title": "Max Turns Window", - "description": "Maximum number of turns to keep in the conversation history", - "default": 15 - }, - "top_k": { - "type": "integer", - "title": "Top K", - "description": "Maximum number of memories to retrieve for each query", - "default": 5 + "title": "Memory Type", + "description": "Memory type" }, - "enable_textual_memory": { - "type": "boolean", - "title": "Enable Textual Memory", - "description": "Enable textual memory for the MemChat", - "default": true - }, - "enable_activation_memory": { - "type": "boolean", - "title": "Enable Activation Memory", - "description": "Enable activation memory for the MemChat", - "default": false - }, - "enable_parametric_memory": { - "type": "boolean", - "title": "Enable Parametric Memory", - "description": "Enable parametric memory for the MemChat", - "default": false - }, - "enable_preference_memory": { - "type": "boolean", - "title": "Enable Preference Memory", - "description": "Enable preference memory for the MemChat", - "default": false - }, - "enable_mem_scheduler": { - "type": "boolean", - "title": "Enable Mem Scheduler", - "description": "Enable memory scheduler for automated memory management", - "default": false - }, - "PRO_MODE": { - "type": "boolean", - "title": "Pro Mode", - "description": "Enable PRO mode for complex query decomposition", - "default": false - } - }, - "additionalProperties": false, - "type": "object", - "title": "MOSConfig" - }, - "MemCubeRegister": { - "properties": { - "user_id": { + "mem_cube_ids": { "anyOf": [ { - "type": "string" + "items": { + "type": "string" + }, + "type": "array" }, { "type": "null" } ], - "title": "User Id", - "description": "User ID for the request", - "example": "user123" - }, - "mem_cube_name_or_path": { - "type": "string", - "title": "Mem Cube Name Or Path", - "description": "Name or path of the MemCube to register.", - "example": "/path/to/cube" + "title": "Mem Cube Ids", + "description": "Cube IDs" }, - "mem_cube_id": { + "search_query": { "anyOf": [ { "type": "string" }, - { - "type": "null" - } - ], - "title": "Mem Cube Id", - "description": "ID for the MemCube", - "example": "cube123" - } - }, - "type": "object", - "required": [ - "mem_cube_name_or_path" - ], - "title": "MemCubeRegister" - }, - "MemReaderConfigFactory": { - "properties": { - "model_schema": { - "type": "string", - "title": "Model Schema", - "description": "Schema for configuration. This value will be automatically set.", - "default": "NOT_SET" - }, - "backend": { - "type": "string", - "title": "Backend", - "description": "Backend for MemReader" - }, - "config": { - "additionalProperties": true, - "type": "object", - "title": "Config", - "description": "Configuration for the MemReader backend" + { + "type": "null" + } + ], + "title": "Search Query", + "description": "Search query" } }, - "additionalProperties": false, "type": "object", "required": [ - "backend", - "config" + "user_id", + "memory_type" ], - "title": "MemReaderConfigFactory", - "description": "Factory class for creating MemReader configurations." + "title": "GetMemoryPlaygroundRequest", + "description": "Request model for getting memories." }, - "MemoryCreate": { + "GetMemoryRequest": { "properties": { + "mem_cube_id": { + "type": "string", + "title": "Mem Cube Id", + "description": "Cube ID" + }, "user_id": { "anyOf": [ { @@ -1044,92 +2900,124 @@ } ], "title": "User Id", - "description": "User ID for the request", - "example": "user123" + "description": "User ID" }, - "messages": { - "anyOf": [ - { - "items": { - "$ref": "#/components/schemas/Message" - }, - "type": "array" - }, - { - "type": "null" - } - ], - "title": "Messages", - "description": "List of messages to store.", - "example": [ - { - "content": "Hello", - "role": "user" - } - ] + "include_preference": { + "type": "boolean", + "title": "Include Preference", + "description": "Whether to handle preference memory", + "default": true }, - "mem_cube_id": { + "page": { "anyOf": [ { - "type": "string" + "type": "integer" }, { "type": "null" } ], - "title": "Mem Cube Id", - "description": "ID of the memory cube", - "example": "cube123" + "title": "Page", + "description": "Page number (starts from 1). If None, exports all data without pagination." }, - "memory_content": { + "page_size": { "anyOf": [ { - "type": "string" + "type": "integer" }, { "type": "null" } ], - "title": "Memory Content", - "description": "Content to store as memory", - "example": "This is a memory content" + "title": "Page Size", + "description": "Number of items per page. If None, exports all data without pagination." + } + }, + "type": "object", + "required": [ + "mem_cube_id" + ], + "title": "GetMemoryRequest", + "description": "Request model for getting memories." + }, + "GetMemoryResponse": { + "properties": { + "code": { + "type": "integer", + "title": "Code", + "description": "Response status code", + "default": 200 }, - "doc_path": { + "message": { + "type": "string", + "title": "Message", + "description": "Response message" + }, + "data": { "anyOf": [ { - "type": "string" + "additionalProperties": true, + "type": "object" }, { "type": "null" } ], - "title": "Doc Path", - "description": "Path to document to store", - "example": "/path/to/document.txt" + "title": "Data", + "description": "Response data" } }, "type": "object", - "title": "MemoryCreate" + "required": [ + "message" + ], + "title": "GetMemoryResponse", + "description": "Response model for getting memories." }, - "MemoryResponse": { + "GetUserNamesByMemoryIdsRequest": { + "properties": { + "memory_ids": { + "items": { + "type": "string" + }, + "type": "array", + "title": "Memory Ids", + "description": "Memory IDs" + } + }, + "type": "object", + "required": [ + "memory_ids" + ], + "title": "GetUserNamesByMemoryIdsRequest", + "description": "Request model for getting user names by memory ids." + }, + "GetUserNamesByMemoryIdsResponse": { "properties": { "code": { "type": "integer", "title": "Code", "description": "Response status code", - "default": 200, - "example": 200 + "default": 200 }, "message": { "type": "string", "title": "Message", - "description": "Response message", - "example": "Operation successful" + "description": "Response message" }, "data": { "anyOf": [ { - "additionalProperties": true, + "additionalProperties": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, "type": "object" }, { @@ -1144,106 +3032,133 @@ "required": [ "message" ], - "title": "MemoryResponse", - "description": "Response model for memory operations." + "title": "GetUserNamesByMemoryIdsResponse", + "description": "Response model for getting user names by memory ids." }, - "Message": { + "HTTPValidationError": { "properties": { - "role": { + "detail": { + "items": { + "$ref": "#/components/schemas/ValidationError" + }, + "type": "array", + "title": "Detail" + } + }, + "type": "object", + "title": "HTTPValidationError" + }, + "ImageURL": { + "properties": { + "url": { "type": "string", - "title": "Role", - "description": "Role of the message (user or assistant).", - "example": "user" + "title": "Url" }, - "content": { + "detail": { "type": "string", - "title": "Content", - "description": "Message content.", - "example": "Hello, how can I help you?" + "enum": [ + "auto", + "low", + "high" + ], + "title": "Detail" } }, "type": "object", "required": [ - "role", - "content" + "url" ], - "title": "Message" + "title": "ImageURL" }, - "SchedulerConfigFactory": { + "InputAudio": { "properties": { - "model_schema": { + "data": { "type": "string", - "title": "Model Schema", - "description": "Schema for configuration. This value will be automatically set.", - "default": "NOT_SET" + "title": "Data" }, - "backend": { + "format": { "type": "string", - "title": "Backend", - "description": "Backend for scheduler" - }, - "config": { - "additionalProperties": true, - "type": "object", - "title": "Config", - "description": "Configuration for the scheduler backend" + "enum": [ + "wav", + "mp3" + ], + "title": "Format" } }, - "additionalProperties": false, "type": "object", "required": [ - "backend", - "config" + "data", + "format" ], - "title": "SchedulerConfigFactory", - "description": "Factory class for creating scheduler configurations." + "title": "InputAudio" }, - "SearchRequest": { + "MemoryResponse": { "properties": { - "user_id": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ], - "title": "User Id", - "description": "User ID for the request", - "example": "user123" + "code": { + "type": "integer", + "title": "Code", + "description": "Response status code", + "default": 200 }, - "query": { + "message": { "type": "string", - "title": "Query", - "description": "Search query.", - "example": "How to implement a feature?" + "title": "Message", + "description": "Response message" }, - "install_cube_ids": { + "data": { "anyOf": [ { - "items": { - "type": "string" - }, + "items": {}, "type": "array" }, { "type": "null" } ], - "title": "Install Cube Ids", - "description": "List of cube IDs to search in", - "example": [ - "cube123", - "cube456" - ] + "title": "Data", + "description": "Response data" } }, "type": "object", "required": [ - "query" + "message" + ], + "title": "MemoryResponse", + "description": "Response model for memory operations." + }, + "PermissionDict": { + "properties": { + "permissions": { + "items": { + "type": "string", + "enum": [ + "read", + "write", + "delete", + "execute" + ] + }, + "type": "array", + "title": "Permissions" + }, + "mem_cube_id": { + "type": "string", + "title": "Mem Cube Id" + } + }, + "type": "object", + "title": "PermissionDict", + "description": "Typed dictionary for chat message dictionaries." + }, + "SearchMode": { + "type": "string", + "enum": [ + "fast", + "fine", + "mixture" ], - "title": "SearchRequest" + "title": "SearchMode", + "description": "Enumeration for search modes." }, "SearchResponse": { "properties": { @@ -1251,14 +3166,12 @@ "type": "integer", "title": "Code", "description": "Response status code", - "default": 200, - "example": 200 + "default": 200 }, "message": { "type": "string", "title": "Message", - "description": "Response message", - "example": "Operation successful" + "description": "Response message" }, "data": { "anyOf": [ @@ -1281,89 +3194,165 @@ "title": "SearchResponse", "description": "Response model for search operations." }, - "SimpleResponse": { + "StatusResponse": { "properties": { "code": { "type": "integer", "title": "Code", "description": "Response status code", - "default": 200, - "example": 200 + "default": 200 }, "message": { "type": "string", "title": "Message", - "description": "Response message", - "example": "Operation successful" + "default": "Memory get status successfully" }, "data": { - "type": "null", + "anyOf": [ + { + "items": { + "$ref": "#/components/schemas/StatusResponseItem" + }, + "type": "array" + }, + { + "type": "null" + } + ], "title": "Data", "description": "Response data" } }, "type": "object", + "title": "StatusResponse", + "description": "Response model for scheduler status operations." + }, + "StatusResponseItem": { + "properties": { + "task_id": { + "type": "string", + "title": "Task Id", + "description": "The ID of the task" + }, + "status": { + "type": "string", + "enum": [ + "in_progress", + "completed", + "waiting", + "failed", + "cancelled" + ], + "title": "Status", + "description": "The current status of the task" + } + }, + "type": "object", "required": [ - "message" + "task_id", + "status" ], - "title": "SimpleResponse", - "description": "Simple response model for operations without data return." + "title": "StatusResponseItem", + "description": "Individual task status item." }, - "UserCreate": { + "SuggestionRequest": { "properties": { "user_id": { "type": "string", "title": "User Id", - "description": "User ID", - "example": "user123" + "description": "User ID" }, - "user_name": { + "mem_cube_id": { + "type": "string", + "title": "Mem Cube Id", + "description": "Cube ID" + }, + "language": { + "type": "string", + "enum": [ + "zh", + "en" + ], + "title": "Language", + "description": "Language for suggestions", + "default": "zh" + }, + "message": { "anyOf": [ { "type": "string" }, + { + "items": { + "anyOf": [ + { + "$ref": "#/components/schemas/ChatCompletionSystemMessageParam" + }, + { + "$ref": "#/components/schemas/ChatCompletionUserMessageParam" + }, + { + "$ref": "#/components/schemas/ChatCompletionAssistantMessageParam" + }, + { + "$ref": "#/components/schemas/ChatCompletionToolMessageParam" + } + ] + }, + "type": "array" + }, + { + "items": { + "anyOf": [ + { + "$ref": "#/components/schemas/ChatCompletionContentPartTextParam" + }, + { + "$ref": "#/components/schemas/File" + } + ] + }, + "type": "array" + }, { "type": "null" } ], - "title": "User Name", - "description": "Name of the user", - "example": "john_doe" - }, - "role": { - "type": "string", - "title": "Role", - "description": "Role of the user", - "default": "user", - "example": "user" + "title": "Message", + "description": "List of messages to store." } }, "type": "object", "required": [ - "user_id" + "user_id", + "mem_cube_id" ], - "title": "UserCreate" + "title": "SuggestionRequest", + "description": "Request model for getting suggestion queries." }, - "UserListResponse": { + "SuggestionResponse": { "properties": { "code": { "type": "integer", "title": "Code", "description": "Response status code", - "default": 200, - "example": 200 + "default": 200 }, "message": { "type": "string", "title": "Message", - "description": "Response message", - "example": "Operation successful" + "description": "Response message" }, "data": { "anyOf": [ { - "items": {}, - "type": "array" + "additionalProperties": { + "items": { + "type": "string" + }, + "type": "array" + }, + "type": "object" }, { "type": "null" @@ -1377,63 +3366,170 @@ "required": [ "message" ], - "title": "UserListResponse", - "description": "Response model for user list operations." + "title": "SuggestionResponse", + "description": "Response model for suggestion operations." }, - "UserManagerConfigFactory": { + "TaskQueueData": { "properties": { - "backend": { + "user_id": { "type": "string", - "title": "Backend", - "description": "Backend for user manager", - "default": "sqlite" + "title": "User Id", + "description": "User ID the query is scoped to" + }, + "user_name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "User Name", + "description": "User name if available" + }, + "mem_cube_id": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Mem Cube Id", + "description": "MemCube ID if a single cube is targeted; otherwise None" + }, + "stream_keys": { + "items": { + "type": "string" + }, + "type": "array", + "title": "Stream Keys", + "description": "Matched Redis stream keys for this user" + }, + "users_count": { + "type": "integer", + "title": "Users Count", + "description": "Distinct users currently present in queue streams" + }, + "pending_tasks_count": { + "type": "integer", + "title": "Pending Tasks Count", + "description": "Count of pending (delivered, not acked) tasks" + }, + "remaining_tasks_count": { + "type": "integer", + "title": "Remaining Tasks Count", + "description": "Count of enqueued tasks (xlen)" + }, + "pending_tasks_detail": { + "items": { + "type": "string" + }, + "type": "array", + "title": "Pending Tasks Detail", + "description": "Per-stream pending counts, formatted as '{stream_key}:{count}'" }, - "config": { - "additionalProperties": true, - "type": "object", - "title": "Config", - "description": "Configuration for the user manager backend" + "remaining_tasks_detail": { + "items": { + "type": "string" + }, + "type": "array", + "title": "Remaining Tasks Detail", + "description": "Per-stream remaining counts, formatted as '{stream_key}:{count}'" } }, "type": "object", - "title": "UserManagerConfigFactory", - "description": "Factory for user manager configurations." + "required": [ + "user_id", + "stream_keys", + "users_count", + "pending_tasks_count", + "remaining_tasks_count", + "pending_tasks_detail", + "remaining_tasks_detail" + ], + "title": "TaskQueueData", + "description": "Queue-level metrics for scheduler tasks." }, - "UserResponse": { + "TaskQueueResponse": { "properties": { "code": { "type": "integer", "title": "Code", "description": "Response status code", - "default": 200, - "example": 200 + "default": 200 }, "message": { "type": "string", "title": "Message", - "description": "Response message", - "example": "Operation successful" + "default": "Scheduler task queue status retrieved successfully" }, "data": { "anyOf": [ { - "additionalProperties": true, - "type": "object" + "$ref": "#/components/schemas/TaskQueueData" }, { "type": "null" } ], - "title": "Data", "description": "Response data" } }, "type": "object", - "required": [ - "message" - ], - "title": "UserResponse", - "description": "Response model for user operations." + "title": "TaskQueueResponse", + "description": "Response model for scheduler task queue status." + }, + "TaskSummary": { + "properties": { + "waiting": { + "type": "integer", + "title": "Waiting", + "description": "Number of tasks waiting to run", + "default": 0 + }, + "in_progress": { + "type": "integer", + "title": "In Progress", + "description": "Number of tasks currently running", + "default": 0 + }, + "pending": { + "type": "integer", + "title": "Pending", + "description": "Number of tasks fetched by workers but not yet acknowledged", + "default": 0 + }, + "completed": { + "type": "integer", + "title": "Completed", + "description": "Number of tasks completed", + "default": 0 + }, + "failed": { + "type": "integer", + "title": "Failed", + "description": "Number of tasks failed", + "default": 0 + }, + "cancelled": { + "type": "integer", + "title": "Cancelled", + "description": "Number of tasks cancelled", + "default": 0 + }, + "total": { + "type": "integer", + "title": "Total", + "description": "Total number of tasks counted", + "default": 0 + } + }, + "type": "object", + "title": "TaskSummary", + "description": "Aggregated counts of tasks by status." }, "ValidationError": { "properties": { diff --git a/examples/api/product_api.py b/examples/api/product_api.py deleted file mode 100644 index e364ce483..000000000 --- a/examples/api/product_api.py +++ /dev/null @@ -1,170 +0,0 @@ -#!/usr/bin/env python3 -""" -Simulate full MemOS Product API workflow: -1. Register user -2. Add memory -3. Search memory -4. Chat (stream) -""" - -import json - -import requests - - -BASE_URL = "http://0.0.0.0:8001/product" -HEADERS = {"Content-Type": "application/json"} - -index = "24" -USER_ID = f"memos_user_id_{index}" -USER_NAME = f"memos_user_alice_{index}" -MEM_CUBE_ID = f"memos_cube_id_{index}" -SESSION_ID = f"memos_session_id_{index}" -SESSION_ID2 = f"memos_session_id_{index}_s2" - - -def register_user(): - url = f"{BASE_URL}/users/register" - data = { - "user_id": USER_ID, - "user_name": USER_NAME, - "interests": "memory,retrieval,test", - "mem_cube_id": MEM_CUBE_ID, - } - print(f"[*] Registering user {USER_ID} ...") - resp = requests.post(url, headers=HEADERS, data=json.dumps(data), timeout=30) - print(resp.status_code, resp.text) - return resp.json() - - -def add_memory(): - url = f"{BASE_URL}/add" - data = { - "user_id": USER_ID, - "memory_content": "今天我在测试 MemOS 的记忆添加与检索流程。", - "messages": [{"role": "user", "content": "我今天在做系统测试"}], - "doc_path": None, - "mem_cube_id": MEM_CUBE_ID, - "source": "test_script", - "user_profile": False, - "session_id": SESSION_ID, - } - print("[*] Adding memory ...") - resp = requests.post(url, headers=HEADERS, data=json.dumps(data), timeout=30) - print(resp.status_code, resp.text) - return resp.json() - - -def search_memory(query="系统测试"): - url = f"{BASE_URL}/search" - data = { - "user_id": USER_ID, - "query": query, - "mem_cube_id": MEM_CUBE_ID, - "top_k": 5, - "session_id": SESSION_ID, - } - print("[*] Searching memory ...") - resp = requests.post(url, headers=HEADERS, data=json.dumps(data), timeout=30) - print(resp.status_code, resp.text) - return resp.json() - - -def chat_stream(query: str, session_id: str, history: list | None = None): - url = f"{BASE_URL}/chat" - data = { - "user_id": USER_ID, - "query": query, - "mem_cube_id": MEM_CUBE_ID, - "history": history, - "internet_search": False, - "moscube": False, - "session_id": session_id, - } - - print("[*] Starting streaming chat ...") - - with requests.post(url, headers=HEADERS, data=json.dumps(data), stream=True) as resp: - for raw_line in resp.iter_lines(): - if not raw_line: - continue - line = raw_line.decode("utf-8", errors="ignore") - - payload = line.removeprefix("data: ").strip() - if payload == "[DONE]": - print("[done]") - break - - try: - msg = json.loads(payload) - msg_type = msg.get("type") - msg_data = msg.get("data") or msg.get("content") - - if msg_type == "text": - print(msg_data, end="", flush=True) - elif msg_type == "reference": - print(f"\n[参考记忆] {msg_data}") - elif msg_type == "status": - pass - elif msg_type == "suggestion": - print(f"\n[建议] {msg_data}") - elif msg_type == "end": - print("\n[✅ Chat End]") - else: - print(f"\n[{msg_type}] {msg_data}") - except Exception: - try: - print(payload.encode("latin-1").decode("utf-8"), end="") - except Exception: - print(payload) - - -def feedback_memory(feedback_content: str, history: list | None = None): - url = f"{BASE_URL}/feedback" - data = { - "user_id": USER_ID, - "writable_cube_ids": [MEM_CUBE_ID], - "history": history, - "feedback_content": feedback_content, - "async_mode": "sync", - "corrected_answer": "false", - } - - print("[*] Feedbacking memory ...") - resp = requests.post(url, headers=HEADERS, data=json.dumps(data), timeout=30) - print(resp.status_code, resp.text) - return resp.json() - - -if __name__ == "__main__": - print("===== STEP 1: Register User =====") - register_user() - - print("\n===== STEP 2: Add Memory =====") - add_memory() - - print("\n===== STEP 3: Search Memory =====") - search_memory() - - print("\n===== STEP 4: Stream Chat =====") - chat_stream("我很开心,我今天吃了好吃的拉面", SESSION_ID, history=[]) - chat_stream( - "我刚和你说什么", - SESSION_ID, - history=[ - {"role": "user", "content": "我很开心,我今天吃了好吃的拉面"}, - {"role": "assistant", "content": "🉑"}, - ], - ) - - print("\n===== STEP 5: Stream Chat =====") - chat_stream("我刚和你说什么了呢", SESSION_ID2, history=[]) - - print("\n===== STEP 6: Feedback Memory =====") - feedback_memory( - feedback_content="错啦,我今天没有吃拉面", - history=[ - {"role": "user", "content": "我刚和你说什么了呢"}, - {"role": "assistant", "content": "你今天吃了好吃的拉面"}, - ], - ) diff --git a/examples/api/server_router_api.py b/examples/api/server_router_api.py index e7c7dc558..ffa83dd2c 100644 --- a/examples/api/server_router_api.py +++ b/examples/api/server_router_api.py @@ -34,7 +34,7 @@ # Global config # --------------------------------------------------------------------------- -BASE_URL = "http://0.0.0.0:8001/product" +BASE_URL = "http://127.0.0.1:8000/product" HEADERS = {"Content-Type": "application/json"} # You can change these identifiers if your backend requires pre-registered users/cubes. @@ -88,7 +88,12 @@ def example_01_string_message_minimal(): payload = { "user_id": USER_ID, "writable_cube_ids": [MEM_CUBE_ID], - "messages": "今天心情不错,喝了咖啡。", + "messages": [ + { + "role": "user", + "content": "今天心情不错,喝了咖啡。", + } + ], } call_add_api("01_string_message_minimal", payload) @@ -108,12 +113,7 @@ def example_02_standard_chat_triplet(): "messages": [ { "role": "system", - "content": [ - { - "type": "text", - "text": "You are a helpful travel assistant.", - } - ], + "content": "You are a helpful travel assistant.", "chat_time": "2025-11-24T10:00:00Z", "message_id": "sys-1", }, @@ -160,17 +160,7 @@ def example_03_assistant_with_tool_calls(): "messages": [ { "role": "assistant", - "content": None, - "tool_calls": [ - { - "id": "tool-call-weather-1", - "type": "function", - "function": { - "name": "get_weather", - "arguments": '{"location": "北京"}', - }, - } - ], + "content": '[Tool Call] get_weather({"location": "北京"})', "chat_time": "2025-11-24T10:12:00Z", "message_id": "assistant-with-call-1", } @@ -195,24 +185,13 @@ def example_03b_tool_message_with_result(): "messages": [ { "role": "assistant", - "content": None, - "tool_calls": [ - { - "id": "tool-call-weather-1", - "type": "function", - "function": { - "name": "get_weather", - "arguments": '{"location": "北京"}', - }, - } - ], + "content": '[Tool Call] get_weather({"location": "北京"})', "chat_time": "2025-11-24T10:12:00Z", "message_id": "assistant-with-call-1", }, { - "role": "tool", - "content": "北京今天天气晴朗,温度25°C,湿度60%。", - "tool_call_id": "tool-call-weather-1", + "role": "system", + "content": "[Tool Result] 北京今天天气晴朗,温度25°C,湿度60%。", "chat_time": "2025-11-24T10:12:05Z", "message_id": "tool-result-1", }, @@ -232,31 +211,44 @@ def example_03c_tool_description_input_output(): - `tool_output`: the result/output from the tool execution. - These are alternative formats for representing tool interactions. """ + tool_desc = { + "type": "tool_description", + "name": "get_weather", + "description": "获取指定地点的当前天气信息", + "parameters": { + "type": "object", + "properties": {"location": {"type": "string", "description": "城市名称"}}, + "required": ["location"], + }, + } + tool_input = { + "type": "tool_input", + "call_id": "call_123", + "name": "get_weather", + "argument": {"location": "北京"}, + } + tool_output = { + "type": "tool_output", + "call_id": "call_123", + "name": "get_weather", + "output": {"weather": "晴朗", "temperature": 25, "humidity": 60}, + } + payload = { "user_id": USER_ID, "writable_cube_ids": [MEM_CUBE_ID], "messages": [ { - "type": "tool_description", - "name": "get_weather", - "description": "获取指定地点的当前天气信息", - "parameters": { - "type": "object", - "properties": {"location": {"type": "string", "description": "城市名称"}}, - "required": ["location"], - }, + "role": "system", + "content": f"[Tool Description] {json.dumps(tool_desc, ensure_ascii=False)}", }, { - "type": "tool_input", - "call_id": "call_123", - "name": "get_weather", - "argument": {"location": "北京"}, + "role": "user", + "content": f"[Tool Input] {json.dumps(tool_input, ensure_ascii=False)}", }, { - "type": "tool_output", - "call_id": "call_123", - "name": "get_weather", - "output": {"weather": "晴朗", "temperature": 25, "humidity": 60}, + "role": "system", + "content": f"[Tool Output] {json.dumps(tool_output, ensure_ascii=False)}", }, ], "info": {"source_type": "custom_tool_format"}, @@ -282,20 +274,13 @@ def example_04_extreme_multimodal_single_message(): "messages": [ { "role": "user", - "content": [ - {"type": "text", "text": "请分析下面这些信息:"}, - {"type": "image_url", "image_url": {"url": "https://example.com/x.png"}}, - {"type": "file", "file": {"file_id": "f1", "filename": "xx.pdf"}}, - ], + "content": "请分析下面这些信息:\n[Image: https://example.com/x.png]\n[File: xx.pdf]", "chat_time": "2025-11-24T10:55:00Z", "message_id": "mix-mm-1", }, { "role": "user", - "content": [ - {"type": "text", "text": "请再分析一下下面这些信息:"}, - {"type": "file", "file": {"file_id": "f1", "filename": "xx.pdf"}}, - ], + "content": "请再分析一下下面这些信息:\n[File: xx.pdf]", "chat_time": "2025-11-24T10:55:10Z", "message_id": "mix-mm-2", }, @@ -323,19 +308,7 @@ def example_05_multimodal_text_and_image(): "messages": [ { "role": "user", - "content": [ - { - "type": "text", - "text": "帮我看看这张图片大概是什么内容?", - }, - { - "type": "image_url", - "image_url": { - "url": "https://example.com/mountain_lake.jpg", - "detail": "high", - }, - }, - ], + "content": "帮我看看这张图片大概是什么内容?\n[Image: https://example.com/mountain_lake.jpg]", "chat_time": "2025-11-24T10:20:00Z", "message_id": "mm-img-1", } @@ -361,19 +334,7 @@ def example_06_multimodal_text_and_file(): "messages": [ { "role": "user", - "content": [ - { - "type": "text", - "text": "请阅读这个PDF,总结里面的要点。", - }, - { - "type": "file", - "file": { - "file_id": "file_123", - "filename": "report.pdf", # optional, but recommended - }, - }, - ], + "content": "请阅读这个PDF,总结里面的要点。\n[File: report.pdf (ID: file_123)]", "chat_time": "2025-11-24T10:21:00Z", "message_id": "mm-file-1", } @@ -396,15 +357,7 @@ def example_07_audio_only_message(): "messages": [ { "role": "user", - "content": [ - { - "type": "input_audio", - "input_audio": { - "data": "base64_encoded_audio_here", - "format": "mp3", - }, - } - ], + "content": "[Audio Input: base64_encoded_audio_here (mp3)]", "chat_time": "2025-11-24T10:22:00Z", "message_id": "audio-1", } @@ -431,12 +384,12 @@ def example_08_pure_text_input_items(): "writable_cube_ids": [MEM_CUBE_ID], "messages": [ { - "type": "text", - "text": "这是一段独立的文本输入,没有明确的对话上下文。", + "role": "user", + "content": "这是一段独立的文本输入,没有明确的对话上下文。", }, { - "type": "text", - "text": "它依然会被抽取和写入明文记忆。", + "role": "user", + "content": "它依然会被抽取和写入明文记忆。", }, ], "info": {"source_type": "batch_import"}, @@ -453,18 +406,15 @@ def example_09_pure_file_input_by_file_id(): * `file_id`: optional, use when file is already uploaded * `file_data`: optional, use for base64-encoded content * `filename`: optional, but recommended for clarity - - In practice, you need at least `file_id` OR `file_data` to specify the file. + - In practice, you need at least `file_id` OR `file_data` to specify the file. """ payload = { "user_id": USER_ID, "writable_cube_ids": [MEM_CUBE_ID], "messages": [ { - "type": "file", - "file": { - "file_id": "file_uploaded_123", # at least one of file_id/file_data needed - "filename": "document.pdf", # optional - }, + "role": "user", + "content": "[File: document.pdf (ID: file_uploaded_123)]", } ], "info": {"source_type": "file_ingestion"}, @@ -487,11 +437,8 @@ def example_09b_pure_file_input_by_file_data(): "writable_cube_ids": [MEM_CUBE_ID], "messages": [ { - "type": "file", - "file": { - "file_data": "base64_encoded_file_content_here", # at least one of file_id/file_data needed - "filename": "document.pdf", # optional - }, + "role": "user", + "content": "[File: document.pdf (base64_data)]", } ], "info": {"source_type": "file_ingestion_base64"}, @@ -512,11 +459,8 @@ def example_09c_pure_file_input_by_oss_url(): "writable_cube_ids": [MEM_CUBE_ID], "messages": [ { - "type": "file", - "file": { - "file_data": "oss_url", # OSS URL instead of base64 - "filename": "document.pdf", - }, + "role": "user", + "content": "[File: document.pdf (OSS: oss_url)]", } ], "info": {"source_type": "file_ingestion_oss"}, @@ -537,11 +481,8 @@ def example_09d_pure_image_input(): "writable_cube_ids": [MEM_CUBE_ID], "messages": [ { - "type": "image_url", - "image_url": { - "url": "https://example.com/standalone_image.jpg", - "detail": "high", - }, + "role": "user", + "content": "[Image: https://example.com/standalone_image.jpg]", } ], "info": {"source_type": "image_ingestion"}, @@ -561,26 +502,7 @@ def example_10_mixed_text_file_image(): "messages": [ { "role": "user", - "content": [ - { - "type": "text", - "text": "请同时分析这个报告和图表。", - }, - { - "type": "file", - "file": { - "file_id": "file_789", - "filename": "analysis_report.pdf", - }, - }, - { - "type": "image_url", - "image_url": { - "url": "https://example.com/chart.png", - "detail": "auto", - }, - }, - ], + "content": "请同时分析这个报告和图表。\n[File: analysis_report.pdf (ID: file_789)]\n[Image: https://example.com/chart.png]", "chat_time": "2025-11-24T10:23:00Z", "message_id": "mixed-1", } @@ -844,6 +766,82 @@ def example_18_add_with_chat_history(): call_add_api("18_add_with_chat_history", payload) +# =========================================================================== +# 8. Search and Chat examples +# =========================================================================== + + +def example_19_search_memories(): + """ + Search memories using `APISearchRequest`. + + - Searches for memories relevant to a query. + - Demonstrates usage of `readable_cube_ids` for scoping. + """ + payload = { + "user_id": USER_ID, + "query": "What are my hotel preferences?", + "readable_cube_ids": [MEM_CUBE_ID], + "top_k": 5, + "mode": "fast", + "include_preference": True, + } + + print("=" * 80) + print("[*] Example: 19_search_memories") + print("- Payload:") + print(json.dumps(payload, indent=2, ensure_ascii=False)) + + try: + resp = requests.post( + f"{BASE_URL}/search", headers=HEADERS, data=json.dumps(payload), timeout=60 + ) + print("- Response:") + print(resp.status_code, resp.text) + except Exception as e: + print(f"- Request failed with exception: {e!r}") + + print("=" * 80) + print() + + +def example_20_chat_complete(): + """ + Chat completion using `APIChatCompleteRequest`. + + - Sends a chat query to the system. + - System retrieves relevant memories and generates a response. + - please make sure ENABLE_CHAT_API=true in .env or environment variables + - and set up CHAT_MODEL_LIST in .env or environment variables properly with api keys and stuff. + """ + payload = { + "user_id": USER_ID, + "query": "Recommend a hotel for me based on my preferences.", + "readable_cube_ids": [MEM_CUBE_ID], + "writable_cube_ids": [MEM_CUBE_ID], + "mode": "fast", + "top_k": 5, + "add_message_on_answer": True, + } + + print("=" * 80) + print("[*] Example: 20_chat_complete") + print("- Payload:") + print(json.dumps(payload, indent=2, ensure_ascii=False)) + + try: + resp = requests.post( + f"{BASE_URL}/chat/complete", headers=HEADERS, data=json.dumps(payload), timeout=60 + ) + print("- Response:") + print(resp.status_code, resp.text) + except Exception as e: + print(f"- Request failed with exception: {e!r}") + + print("=" * 80) + print() + + # =========================================================================== # Entry point # =========================================================================== @@ -873,3 +871,5 @@ def example_18_add_with_chat_history(): example_16_feedback_add() example_17_family_travel_conversation() example_18_add_with_chat_history() + example_19_search_memories() + example_20_chat_complete() diff --git a/examples/basic_modules/embedder.py b/examples/basic_modules/embedder.py index 7cc7942da..d35dc1797 100644 --- a/examples/basic_modules/embedder.py +++ b/examples/basic_modules/embedder.py @@ -3,7 +3,10 @@ # Scenario 1: Using EmbedderFactory - +# Prerequisites: +# 1. Install Ollama: https://ollama.com/ +# 2. Start Ollama server: `ollama serve` +# 3. Pull the model: `ollama pull nomic-embed-text` config = EmbedderConfigFactory.model_validate( { "backend": "ollama", @@ -33,6 +36,9 @@ # Scenario 3: Using SenTranEmbedder +# Prerequisites: +# 1. Ensure `einops` is installed: `pip install einops` (Required for some HF models like nomic-bert) +# 2. The model `nomic-ai/nomic-embed-text-v1.5` will be downloaded automatically from HuggingFace. config_hf = EmbedderConfigFactory.model_validate( { @@ -49,6 +55,9 @@ print("==" * 20) # === Scenario 4: Using UniversalAPIEmbedder(OpenAI) === +# Prerequisites: +# 1. Set a valid OPENAI_API_KEY +# 2. Ensure the base_url is reachable config_api = EmbedderConfigFactory.model_validate( { @@ -68,6 +77,9 @@ print("Embedding preview:", embedding_api[0][:10]) # === Scenario 5: Using UniversalAPIEmbedder(Azure) === +# Prerequisites: +# 1. Set a valid AZURE_API_KEY +# 2. Ensure the base_url is reachable config_api = EmbedderConfigFactory.model_validate( { diff --git a/examples/basic_modules/llm.py b/examples/basic_modules/llm.py index d33fc9544..fb157c991 100644 --- a/examples/basic_modules/llm.py +++ b/examples/basic_modules/llm.py @@ -5,6 +5,10 @@ # Scenario 1: Using LLMFactory with Ollama Backend # This is the most recommended way! 🌟 +# Prerequisites: +# 1. Install Ollama: https://ollama.com/ +# 2. Start Ollama server: `ollama serve` +# 3. Need python ollama package(>=0.5.0,<0.6.0) config = LLMConfigFactory.model_validate( { @@ -46,6 +50,10 @@ # Scenario 3: Using LLMFactory with OpenAI Backend +# Prerequisites: +# 1. You need a valid OpenAI API key to run this scenario. +# 2. Replace 'sk-xxxx' with your actual API key below. + config = LLMConfigFactory.model_validate( { diff --git a/examples/basic_modules/nebular_example.py b/examples/basic_modules/nebular_example.py deleted file mode 100644 index 13f88e3f3..000000000 --- a/examples/basic_modules/nebular_example.py +++ /dev/null @@ -1,361 +0,0 @@ -import json -import os - -from datetime import datetime, timezone - -import numpy as np - -from dotenv import load_dotenv - -from memos.configs.embedder import EmbedderConfigFactory -from memos.configs.graph_db import GraphDBConfigFactory -from memos.embedders.factory import EmbedderFactory -from memos.graph_dbs.factory import GraphStoreFactory -from memos.memories.textual.item import TextualMemoryItem, TreeNodeTextualMemoryMetadata - - -load_dotenv() - - -def show(nebular_data): - from memos.configs.graph_db import Neo4jGraphDBConfig - from memos.graph_dbs.neo4j import Neo4jGraphDB - - tree_config = Neo4jGraphDBConfig.from_json_file("../../examples/data/config/neo4j_config.json") - tree_config.use_multi_db = True - tree_config.db_name = "nebular-show2" - - neo4j_db = Neo4jGraphDB(tree_config) - neo4j_db.clear() - neo4j_db.import_graph(nebular_data) - - -embedder_config = EmbedderConfigFactory.model_validate( - { - "backend": "universal_api", - "config": { - "provider": "openai", - "api_key": os.getenv("OPENAI_API_KEY", "sk-xxxxx"), - "model_name_or_path": "text-embedding-3-large", - "base_url": os.getenv("OPENAI_API_BASE", "https://api.openai.com/v1"), - }, - } -) -embedder = EmbedderFactory.from_config(embedder_config) -embedder_dimension = 3072 - - -def embed_memory_item(memory: str) -> list[float]: - embedding = embedder.embed([memory])[0] - embedding_np = np.array(embedding, dtype=np.float32) - embedding_list = embedding_np.tolist() - return embedding_list - - -def example_shared_db(db_name: str = "shared-traval-group"): - """ - Example: Single(Shared)-DB multi-tenant (logical isolation) - Multiple users' data in the same Neo4j DB with user_name as a tag. - """ - # users - user_list = ["travel_member_alice", "travel_member_bob"] - - for user_name in user_list: - # Step 1: Build factory config - config = GraphDBConfigFactory( - backend="nebular", - config={ - "uri": json.loads(os.getenv("NEBULAR_HOSTS", "localhost")), - "user": os.getenv("NEBULAR_USER", "root"), - "password": os.getenv("NEBULAR_PASSWORD", "xxxxxx"), - "space": db_name, - "user_name": user_name, - "use_multi_db": False, - "auto_create": True, - "embedding_dimension": embedder_dimension, - }, - ) - - # Step 2: Instantiate graph store - graph = GraphStoreFactory.from_config(config) - print(f"\n[INFO] Working in shared DB: {db_name}, for user: {user_name}") - graph.clear() - - # Step 3: Create topic node - topic = TextualMemoryItem( - memory="This research addresses long-term multi-UAV navigation for energy-efficient communication coverage.", - metadata=TreeNodeTextualMemoryMetadata( - memory_type="LongTermMemory", - key="Multi-UAV Long-Term Coverage", - hierarchy_level="topic", - type="fact", - memory_time="2024-01-01", - source="file", - sources=["paper://multi-uav-coverage/intro"], - status="activated", - confidence=95.0, - tags=["UAV", "coverage", "multi-agent"], - entities=["UAV", "coverage", "navigation"], - visibility="public", - updated_at=datetime.now().isoformat(), - embedding=embed_memory_item( - "This research addresses long-term " - "multi-UAV navigation for " - "energy-efficient communication " - "coverage." - ), - ), - ) - - graph.add_node( - id=topic.id, memory=topic.memory, metadata=topic.metadata.model_dump(exclude_none=True) - ) - - # Step 4: Add a concept for each user - concept = TextualMemoryItem( - memory=f"Itinerary plan for {user_name}", - metadata=TreeNodeTextualMemoryMetadata( - memory_type="LongTermMemory", - key="Multi-UAV Long-Term Coverage", - hierarchy_level="concept", - type="fact", - memory_time="2024-01-01", - source="file", - sources=["paper://multi-uav-coverage/intro"], - status="activated", - confidence=95.0, - tags=["UAV", "coverage", "multi-agent"], - entities=["UAV", "coverage", "navigation"], - visibility="public", - updated_at=datetime.now().isoformat(), - embedding=embed_memory_item(f"Itinerary plan for {user_name}"), - ), - ) - - graph.add_node( - id=concept.id, - memory=concept.memory, - metadata=concept.metadata.model_dump(exclude_none=True), - ) - - # Link concept to topic - graph.add_edge(source_id=concept.id, target_id=topic.id, type="RELATE_TO") - print(f"[INFO] Added nodes for {user_name}") - - # Step 5: Query and print ALL for verification - print("\n=== Export entire DB (for verification, includes ALL users) ===") - graph = GraphStoreFactory.from_config(config) - all_graph_data = graph.export_graph() - print(str(all_graph_data)[:1000]) - - # Step 6: Search for alice's data only - print("\n=== Search for travel_member_alice ===") - config_alice = GraphDBConfigFactory( - backend="nebular", - config={ - "uri": json.loads(os.getenv("NEBULAR_HOSTS", "localhost")), - "user": os.getenv("NEBULAR_USER", "root"), - "password": os.getenv("NEBULAR_PASSWORD", "xxxxxx"), - "space": db_name, - "user_name": user_list[0], - "auto_create": True, - "embedding_dimension": embedder_dimension, - "use_multi_db": False, - }, - ) - graph_alice = GraphStoreFactory.from_config(config_alice) - nodes = graph_alice.search_by_embedding(vector=embed_memory_item("travel itinerary"), top_k=3) - for node in nodes: - print(str(graph_alice.get_node(node["id"]))[:1000]) - - -def run_user_session( - user_name: str, - db_name: str, - topic_text: str, - concept_texts: list[str], - fact_texts: list[str], -): - print(f"\n=== {user_name} starts building their memory graph ===") - - # Manually initialize correct GraphDB class - config = GraphDBConfigFactory( - backend="nebular", - config={ - "uri": json.loads(os.getenv("NEBULAR_HOSTS", "localhost")), - "user": os.getenv("NEBULAR_USER", "root"), - "password": os.getenv("NEBULAR_PASSWORD", "xxxxxx"), - "space": db_name, - "user_name": user_name, - "use_multi_db": False, - "auto_create": True, - "embedding_dimension": embedder_dimension, - }, - ) - graph = GraphStoreFactory.from_config(config) - - # Start with a clean slate for this user - graph.clear() - - now = datetime.now(timezone.utc).isoformat() - - # === Step 1: Create a root topic node (e.g., user's research focus) === - topic = TextualMemoryItem( - memory=topic_text, - metadata=TreeNodeTextualMemoryMetadata( - memory_type="LongTermMemory", - key="Research Topic", - hierarchy_level="topic", - type="fact", - memory_time="2024-01-01", - status="activated", - visibility="public", - tags=["research", "rl"], - updated_at=now, - embedding=embed_memory_item(topic_text), - ), - ) - graph.add_node(topic.id, topic.memory, topic.metadata.model_dump(exclude_none=True)) - - # === Step 2: Create two concept nodes linked to the topic === - concept_items = [] - for i, text in enumerate(concept_texts): - concept = TextualMemoryItem( - memory=text, - metadata=TreeNodeTextualMemoryMetadata( - memory_type="LongTermMemory", - key=f"Concept {i + 1}", - hierarchy_level="concept", - type="fact", - memory_time="2024-01-01", - status="activated", - visibility="public", - updated_at=now, - embedding=embed_memory_item(text), - tags=["concept"], - confidence=90 + i, - ), - ) - graph.add_node(concept.id, concept.memory, concept.metadata.model_dump(exclude_none=True)) - graph.add_edge(topic.id, concept.id, type="PARENT") - concept_items.append(concept) - - # === Step 3: Create supporting facts under each concept === - for i, text in enumerate(fact_texts): - fact = TextualMemoryItem( - memory=text, - metadata=TreeNodeTextualMemoryMetadata( - memory_type="WorkingMemory", - key=f"Fact {i + 1}", - hierarchy_level="fact", - type="fact", - memory_time="2024-01-01", - status="activated", - visibility="public", - updated_at=now, - embedding=embed_memory_item(text), - confidence=85.0, - tags=["fact"], - ), - ) - graph.add_node(fact.id, fact.memory, fact.metadata.model_dump(exclude_none=True)) - graph.add_edge(concept_items[i % len(concept_items)].id, fact.id, type="PARENT") - - # === Step 4: Retrieve memory using semantic search === - vector = embed_memory_item("How is memory retrieved?") - search_result = graph.search_by_embedding(vector, top_k=2) - for r in search_result: - node = graph.get_node(r["id"]) - print("🔍 Search result:", node["memory"]) - - # === Step 5: Tag-based neighborhood discovery === - neighbors = graph.get_neighbors_by_tag(["concept"], exclude_ids=[], top_k=2) - print("📎 Tag-related nodes:", [neighbor["memory"] for neighbor in neighbors]) - - # === Step 6: Retrieve children (facts) of first concept === - children = graph.get_children_with_embeddings(concept_items[0].id) - print("📍 Children of concept:", [child["memory"] for child in children]) - - # === Step 7: Export a local subgraph and grouped statistics === - subgraph = graph.get_subgraph(topic.id, depth=2) - print("📌 Subgraph node count:", len(subgraph["neighbors"])) - - stats = graph.get_grouped_counts(["memory_type", "status"]) - print("📊 Grouped counts:", stats) - - # === Step 8: Demonstrate updates and cleanup === - graph.update_node( - concept_items[0].id, {"confidence": 99.0, "created_at": "2025-07-24T20:11:56.375687"} - ) - graph.remove_oldest_memory("WorkingMemory", keep_latest=1) - graph.delete_edge(topic.id, concept_items[0].id, type="PARENT") - graph.delete_node(concept_items[1].id) - - # === Step 9: Export and re-import the entire graph structure === - exported = graph.export_graph() - graph.import_graph(exported) - print("📦 Graph exported and re-imported, total nodes:", len(exported["nodes"])) - - # ==================================== - # 🔍 Step 10: extra function - # ==================================== - print(f"\n=== 🔍 Extra Tests for user: {user_name} ===") - - print(" - Memory count:", graph.get_memory_count("LongTermMemory")) - print(" - Node count:", graph.count_nodes("LongTermMemory")) - print(" - All LongTermMemory items:", graph.get_all_memory_items("LongTermMemory")) - - if len(exported["edges"]) > 0: - n1, n2 = exported["edges"][0]["source"], exported["edges"][0]["target"] - print(" - Edge exists?", graph.edge_exists(n1, n2, exported["edges"][0]["type"])) - print(" - Edges for node:", graph.get_edges(n1)) - - filters = [{"field": "memory_type", "op": "=", "value": "LongTermMemory"}] - print(" - Metadata query result:", graph.get_by_metadata(filters)) - print( - " - Optimization candidates:", graph.get_structure_optimization_candidates("LongTermMemory") - ) - try: - graph.drop_database() - except ValueError as e: - print(" - drop_database raised ValueError as expected:", e) - - -def example_complex_shared_db(db_name: str = "shared-traval-group-complex"): - # User 1: Alice explores structured memory for LLMs - run_user_session( - user_name="alice", - db_name=db_name, - topic_text="Alice studies structured memory and long-term memory optimization in LLMs.", - concept_texts=[ - "Short-term memory can be simulated using WorkingMemory blocks.", - "A structured memory graph improves retrieval precision for agents.", - ], - fact_texts=[ - "Embedding search is used to find semantically similar memory items.", - "User memories are stored as node-edge structures that support hierarchical reasoning.", - ], - ) - - # User 2: Bob focuses on GNN-based reasoning - run_user_session( - user_name="bob", - db_name=db_name, - topic_text="Bob investigates how graph neural networks can support knowledge reasoning.", - concept_texts=[ - "GNNs can learn high-order relations among entities.", - "Attention mechanisms in graphs improve inference precision.", - ], - fact_texts=[ - "GAT outperforms GCN in graph classification tasks.", - "Multi-hop reasoning helps answer complex queries.", - ], - ) - - -if __name__ == "__main__": - print("\n=== Example: Single-DB ===") - example_shared_db(db_name="shared_traval_group-new") - - print("\n=== Example: Single-DB-Complex ===") - example_complex_shared_db(db_name="shared-traval-group-complex-new2") diff --git a/examples/basic_modules/neo4j_example.py b/examples/basic_modules/neo4j_example.py index ea68975cc..e1c0df317 100644 --- a/examples/basic_modules/neo4j_example.py +++ b/examples/basic_modules/neo4j_example.py @@ -27,6 +27,23 @@ def embed_memory_item(memory: str) -> list[float]: return embedder.embed([memory])[0] +def get_neo4j_graph(db_name: str = "paper"): + config = GraphDBConfigFactory( + backend="neo4j", + config={ + "uri": "bolt://xxxx:7687", + "user": "neo4j", + "password": "xxxx", + "db_name": db_name, + "auto_create": True, + "embedding_dimension": 3072, + "use_multi_db": True, + }, + ) + graph = GraphStoreFactory.from_config(config) + return graph + + def example_multi_db(db_name: str = "paper"): # Step 1: Build factory config config = GraphDBConfigFactory( @@ -537,6 +554,53 @@ def example_complex_shared_db(db_name: str = "shared-traval-group-complex", comm ) +def example_complex_shared_db_search_filter(db): + embedding = embed_memory_item( + "The reward function combines " + "multiple objectives: coverage " + "maximization, energy consumption " + ) + print(f"get_node:{db.get_node(id='5364c28e-1e4b-485a-b1d5-1ba11bc5bc8b')}") + + filter_id = {"id": "a269f2bf-f4a2-43b9-aa8d-1cb2a2eb4691"} + print(f"==filter_id:{db.search_by_embedding(vector=embedding, filter=filter_id)}") + + filter_and_params = { + "and": [{"id": "a269f2bf-f4a2-43b9-aa8d-1cb2a2eb4691"}, {"source": "file123"}] + } + print( + f"==filter_and_params:{db.search_by_embedding(vector=embedding, filter=filter_and_params)}" + ) + + filter_or_params = {"or": [{"id": "a269f2bf-f4a2-43b9-aa8d-1cb2a2eb4691"}, {"id": "xxxxxxxx"}]} + print(f"==filter_or_params:{db.search_by_embedding(vector=embedding, filter=filter_or_params)}") + filter_like_params = { + "and": [ + {"memory_type": {"like": "LongTermMemory"}}, + ] + } + print( + f"==filter_like_params:{db.search_by_embedding(vector=embedding, filter=filter_like_params)}" + ) + + """ + cypher_op_map = {"gt": ">", "lt": "<", "gte": ">=", "lte": "<="} + """ + filter_lt_params = { + "and": [ + {"created_at": {"gt": "2025-11-29"}}, + ] + } + print(f"==filter_lt_params:{db.search_by_embedding(vector=embedding, filter=filter_lt_params)}") + + +def example_complex_shared_db_delete_memory(db): + print("delete node") + db.delete_node(id="582de45f-8f99-4006-8062-76eea5649d94") + print("delete edge") + db.delete_edge(source_id=1, target_id=2, type="PARENT", user_name="") + + if __name__ == "__main__": print("\n=== Example: Multi-DB ===") example_multi_db(db_name="paper") @@ -544,8 +608,17 @@ def example_complex_shared_db(db_name: str = "shared-traval-group-complex", comm print("\n=== Example: Single-DB ===") example_shared_db(db_name="shared-traval-group") + print("\n=== Example: Single-DB ===") + example_shared_db(db_name="shared-traval-group") + print("\n=== Example: Single-DB-Complex ===") example_complex_shared_db(db_name="shared-traval-group-complex-new") print("\n=== Example: Single-Community-DB-Complex ===") example_complex_shared_db(db_name="paper", community=True) + + print("\n=== Example: Single-DB-Complex searchFilter ===") + db = get_neo4j_graph(db_name="paper") + example_complex_shared_db_search_filter(db) + + example_complex_shared_db_delete_memory(db) diff --git a/examples/basic_modules/parser.py b/examples/basic_modules/parser.py deleted file mode 100644 index c063964b7..000000000 --- a/examples/basic_modules/parser.py +++ /dev/null @@ -1,15 +0,0 @@ -from memos.configs.parser import ParserConfigFactory -from memos.parsers.factory import ParserFactory - - -config = ParserConfigFactory.model_validate( - { - "backend": "markitdown", - "config": {}, - } -) -parser = ParserFactory.from_config(config) -file_path = "README.md" -markdown_text = parser.parse(file_path) -print("Markdown text:\n", markdown_text) -print("==" * 20) diff --git a/examples/basic_modules/qdrant_vec_db.py b/examples/basic_modules/qdrant_vec_db.py deleted file mode 100644 index a16cd95ee..000000000 --- a/examples/basic_modules/qdrant_vec_db.py +++ /dev/null @@ -1,143 +0,0 @@ -import uuid - -from memos import settings -from memos.configs.vec_db import VectorDBConfigFactory -from memos.vec_dbs.factory import VecDBFactory - - -config = VectorDBConfigFactory.model_validate( - { - "backend": "qdrant", - "config": { - "collection_name": "test_collection", - "vector_dimension": 4, - "distance_metric": "cosine", - "path": str(settings.MEMOS_DIR / "qdrant"), - }, - } -) -vec_db = VecDBFactory.from_config(config) - -# ============== BATCH OPERATIONS ============== -print("\n" + "=" * 50) -print("BATCH DOCUMENT ADDITION") - -# Add batch data -batch_data = [ - { - "id": str(uuid.uuid4()), - "vector": [0.5, 0.6, 0.7, 0.8], - "payload": {"text": "Document A", "category": "type1"}, - }, - { - "id": str(uuid.uuid4()), - "vector": [0.4, 0.5, 0.6, 0.7], - "payload": {"text": "Document B", "category": "type2"}, - }, - { - "id": str(uuid.uuid4()), - "vector": [0.3, 0.4, 0.5, 0.6], - "payload": {"text": "Document C", "category": "type1"}, - }, -] -vec_db.add(batch_data) -print(f"✓ Added {len(batch_data)} documents") - -# ============== SEARCH OPERATIONS ============== -print("\n" + "=" * 50) -print("VECTOR SEARCH") - -# Search for similar items -query_vector = [5.0, 6.0, 7.0, 8.0] -results = vec_db.search(query_vector, top_k=2) -print(f"Query vector: {query_vector}") -print("\nResults:") -for i, result in enumerate(results, 1): - print(f" {i}. ID: {result.id}") - print(f" Score: {result.score}") - print(f" Payload: {result.payload}") - -# ============== COUNT OPERATIONS ============== -print("\n" + "=" * 50) -print("DOCUMENT COUNT") - -# Count documents in collection -count = vec_db.count() -print(f"Total documents in collection: {count}") - -# Count documents with filter -filtered_count = vec_db.count(filter={"category": "type1"}) -print(f"Documents with category 'type1': {filtered_count}") - -# ============== SINGLE DOCUMENT OPERATIONS ============== -print("\n" + "=" * 50) -print("DOCUMENT OPERATIONS") - -# Add a document -doc_id = str(uuid.uuid4()) -vec_db.add( - [ - { - "id": doc_id, - "vector": [0.1, 0.2, 0.3, 0.4], - "payload": {"text": "Original document", "status": "new"}, - } - ] -) -print(f"✓ Added document with ID: {doc_id}") - -# Update document payload -vec_db.update(doc_id, {"payload": {"text": "Updated document", "status": "updated"}}) -print(f"✓ Updated document payload for ID: {doc_id}") - -# Retrieve updated document -result = vec_db.get_by_id(doc_id) -print("\nRetrieved updated document:") -print(f" ID: {doc_id}") -print(f" Payload: {result.payload if result else 'Not found'}") - -# Delete the document -vec_db.delete([doc_id]) -print(f"\n✓ Deleted document with ID: {doc_id}") - -# Verify deletion -result = vec_db.get_by_id(doc_id) -print("\nDocument after deletion:") -print(f" Result: {'Not found' if result is None else result}") - -# ============== COLLECTION OPERATIONS ============== -print("\n" + "=" * 50) -print("COLLECTION OPERATIONS") - -# List all collections in the database -collections = vec_db.list_collections() -print(f"Available collections: {collections}") - -# ============== FILTER OPERATIONS ============== -print("\n" + "=" * 50) -print("FILTER OPERATIONS") - -# Get documents by filter criteria -filter_results = vec_db.get_by_filter({"category": "type1"}) -print("Documents filtered by category 'type1':") -for i, item in enumerate(filter_results, 1): - print(f" {i}. ID: {item.id}") - print(f" Payload: {item.payload}") - -# Get all documents in the collection -all_docs = vec_db.get_all() -print("\nAll documents in the collection:") -for i, item in enumerate(all_docs, 1): - print(f" {i}. ID: {item.id}") - print(f" Vector: {item.vector}") - print(f" Payload: {item.payload}") - -# ============== CLEANUP ============== -print("\n" + "=" * 50) -print("CLEANUP") - -# Delete the collection -vec_db.delete_collection("test_collection") -print("✓ Collection deleted") -print(f"Available collections after deletion: {vec_db.list_collections()}") -print("\n" + "=" * 50) diff --git a/examples/basic_modules/reranker.py b/examples/basic_modules/reranker.py index 47bf1405c..e5a869789 100644 --- a/examples/basic_modules/reranker.py +++ b/examples/basic_modules/reranker.py @@ -50,6 +50,7 @@ def show_ranked(title: str, ranked: list[tuple[TextualMemoryItem, float]], top_n def main(): # ------------------------------- # 1) Build the embedder (real vectors) + # You may need to set valid OPENAI_API_KEY and OPENAI_API_BASE in your environment variables. # ------------------------------- embedder_cfg = EmbedderConfigFactory.model_validate( { @@ -62,6 +63,22 @@ def main(): }, } ) + """ + # ------------------------------- + # Optional: Build the embedder (using local sentence-transformers) + # ------------------------------- + # Use a local model so no API key is required. + embedder_cfg = EmbedderConfigFactory.model_validate( + { + "backend": "sentence_transformer", + "config": { + "model_name_or_path": "nomic-ai/nomic-embed-text-v1.5", + "trust_remote_code": True, + }, + } + ) + """ + embedder = EmbedderFactory.from_config(embedder_cfg) # ------------------------------- diff --git a/examples/basic_modules/textual_memory_internet_search_example.py b/examples/basic_modules/textual_memory_internet_search_example.py index f26d3afd8..9007d7e67 100644 --- a/examples/basic_modules/textual_memory_internet_search_example.py +++ b/examples/basic_modules/textual_memory_internet_search_example.py @@ -1,7 +1,49 @@ """ -Example: Using InternetRetrieverFactory with BochaAISearchRetriever +Textual Memory Internet Search Example +======================================= + +This example demonstrates how to use MemOS's InternetRetrieverFactory to search +the web and retrieve relevant information as memory items. + +**What you'll learn:** +- How to initialize an embedder for web content embedding +- How to configure and use BochaAI web search retriever +- How to configure and use Google Custom Search API +- How to chunk and process web content into memory items +- How to retrieve structured information from internet searches + +**Use case:** +When you need to answer questions that require real-time web information +(e.g., "What's in Alibaba's 2024 ESG report?"), this retriever can: +1. Search the web using BochaAI API or Google Custom Search API +2. Fetch and parse web page content +3. Chunk the content into manageable pieces +4. Return structured memory items with embeddings + +**Prerequisites:** +- Valid BochaAI API Key (set in environment variable: BOCHA_API_KEY) +- (Optional) Valid Google API Key and Search Engine ID for Google Custom Search + - GOOGLE_API_KEY: Get from https://console.cloud.google.com/ + - GOOGLE_SEARCH_ENGINE_ID: Get from https://programmablesearchengine.google.com/ +- Embedder service running (e.g., Ollama with nomic-embed-text) +- Internet connection for web searches + +Run this example: + # Basic test with BochaAI + export BOCHA_API_KEY='sk-your-bocha-api-key' + python examples/basic_modules/textual_memory_internet_search_example.py + + # Test with both BochaAI and Google + export BOCHA_API_KEY='sk-your-bocha-api-key' + export GOOGLE_API_KEY='your-google-api-key' + export GOOGLE_SEARCH_ENGINE_ID='your-search-engine-id' + python examples/basic_modules/textual_memory_internet_search_example.py """ +import json +import os + +from memos import log from memos.configs.embedder import EmbedderConfigFactory from memos.configs.internet_retriever import InternetRetrieverConfigFactory from memos.embedders.factory import EmbedderFactory @@ -10,43 +52,258 @@ ) -# ========= 1. Create an embedder ========= -embedder_config = EmbedderConfigFactory.model_validate( - { - "backend": "ollama", # Or "sentence_transformer", etc. - "config": { - "model_name_or_path": "nomic-embed-text:latest", - }, - } -) +logger = log.get_logger(__name__) + +# ============================================================================ +# Step 0: Setup - Load configuration files +# ============================================================================ +print("=" * 80) +print("Textual Memory Internet Search Example") +print("=" * 80) + +current_dir = os.path.dirname(os.path.abspath(__file__)) +config_dir = os.path.join(current_dir, "../data/config") + +# Load the shared tree-text memory configuration +config_path = os.path.join(config_dir, "tree_config_shared_database.json") +with open(config_path) as f: + config_data = json.load(f) + +print(f"\n✓ Loaded configuration from: {config_path}") + +# ============================================================================ +# Step 1: Initialize Embedder +# ============================================================================ +print("\n[Step 1] Initializing embedder for web content...") + +# The embedder will convert web content into vector embeddings +embedder_config = EmbedderConfigFactory.model_validate(config_data["embedder"]) embedder = EmbedderFactory.from_config(embedder_config) -# ========= 2. Create retriever config for BochaAI ========= +print(f"✓ Embedder initialized: {embedder_config.backend}") + +# ============================================================================ +# Step 2: Configure Internet Retriever (BochaAI) +# ============================================================================ +print("\n[Step 2] Configuring internet retriever...") + +# Load the simple_struct reader configuration +reader_config_path = os.path.join(config_dir, "simple_struct_reader_config.json") +with open(reader_config_path) as f: + reader_config_data = json.load(f) + +print(f"✓ Loaded reader configuration from: {reader_config_path}") + +# NOTE: You need to set your BochaAI API key here or in environment variable +# For this example, we'll read from environment variable +bocha_api_key = os.environ.get("BOCHA_API_KEY", "sk-your-bocha-api-key-here") + +if bocha_api_key == "sk-your-bocha-api-key-here": + print("⚠️ Warning: Using placeholder API key. Set BOCHA_API_KEY environment variable.") + retriever_config = InternetRetrieverConfigFactory.model_validate( { "backend": "bocha", "config": { - "api_key": "sk-xxx", # Your BochaAI API Key - "max_results": 5, - "reader": { # Reader config for chunking web content + "api_key": bocha_api_key, + "max_results": 5, # Maximum number of search results to retrieve + "reader": { + # The reader chunks web content into memory items "backend": "simple_struct", - "config": { # your simple struct reader config - }, + "config": reader_config_data, # Use loaded configuration }, }, } ) -# ========= 3. Build retriever instance via factory ========= +print(f"✓ Retriever configured: {retriever_config.backend}") +print(f" Max results per search: {retriever_config.config.max_results}") + +# ============================================================================ +# Step 3: Create Retriever Instance +# ============================================================================ +print("\n[Step 3] Creating internet retriever instance...") + retriever = InternetRetrieverFactory.from_config(retriever_config, embedder) -# ========= 4. Run BochaAI Web Search ========= -print("=== Scenario 1: Web Search (BochaAI) ===") -query_web = "Alibaba 2024 ESG report" -results_web = retriever.retrieve_from_internet(query_web) +print("✓ Retriever initialized and ready") + +# ============================================================================ +# Step 4: Perform Web Search +# ============================================================================ +print("\n[Step 4] Performing web search...") + +# Define the search query +query = "Alibaba 2024 ESG report" +print(f" 🔍 Query: '{query}'") +print(" ⏳ Searching the web and processing results...\n") + +# Execute the search +# This will: +# 1. Search using BochaAI API +# 2. Fetch web page content +# 3. Parse and chunk the content +# 4. Generate embeddings for each chunk +# 5. Return as TextualMemoryItem objects +results = retriever.retrieve_from_internet(query) + +print("✓ Search completed!") +print(f"✓ Retrieved {len(results)} memory items from web search\n") + +# ============================================================================ +# Step 5: Display Results +# ============================================================================ +print("=" * 80) +print("WEB SEARCH RESULTS") +print("=" * 80) + +if not results: + print("\n❌ No results found.") + print(" This might indicate:") + print(" - Invalid or missing BochaAI API key") + print(" - Network connectivity issues") + print(" - The query returned no relevant web pages") + print(" - The web content couldn't be parsed") +else: + for idx, item in enumerate(results, 1): + print(f"\n[Result #{idx}]") + print("-" * 80) + + # Display the memory content (truncated for readability) + content = item.memory + if len(content) > 300: + print(f"Content: {content[:300]}...") + print(f" (... {len(content) - 300} more characters)") + else: + print(f"Content: {content}") + + # Display metadata if available + if hasattr(item, "metadata") and item.metadata: + metadata = item.metadata + if hasattr(metadata, "sources") and metadata.sources: + print(f"Source: {metadata.sources[0] if metadata.sources else 'N/A'}") + + print() + +print("=" * 80) +print("Example completed successfully!") +print("=" * 80) +print("\n💡 Next steps:") +print(" - Set your BochaAI API key in environment variable: export BOCHA_API_KEY='sk-...'") +print(" - Try different search queries to test various topics") +print(" - Adjust max_results in config to control number of results") +print(" - Use the retrieved memory items in your retrieval pipeline") +print(" - Combine internet search with local memory retrieval for hybrid systems\n") + +print("\n⚠️ Note:") +print(" If you see 'No results found', make sure:") +print(" 1. Your BochaAI API key is valid and set correctly") +print(" 2. You have internet connectivity") +print(" 3. The embedder service is running\n") + +# ============================================================================ +# Step 6: Test Google Custom Search API (Optional) +# ============================================================================ +print("\n" + "=" * 80) +print("GOOGLE CUSTOM SEARCH API TEST") +print("=" * 80) + +# NOTE: You need to set your Google API key and Search Engine ID +# Get these from: https://developers.google.com/custom-search/v1/overview +google_api_key = os.environ.get("GOOGLE_API_KEY", "") +google_search_engine_id = os.environ.get("GOOGLE_SEARCH_ENGINE_ID", "") + +if google_api_key and google_search_engine_id: + print("\n[Step 6.1] Configuring Google Custom Search retriever...") + + google_retriever_config = InternetRetrieverConfigFactory.model_validate( + { + "backend": "google", + "config": { + "api_key": google_api_key, + "search_engine_id": google_search_engine_id, + "max_results": 5, # Maximum number of search results to retrieve + "num_per_request": 5, # Number of results per API request (max 10 for Google) + }, + } + ) + + print("✓ Google retriever configured") + print(f" Max results: {google_retriever_config.config.max_results}") + + print("\n[Step 6.2] Creating Google retriever instance...") + google_retriever = InternetRetrieverFactory.from_config(google_retriever_config, embedder) + print("✓ Google retriever initialized") + + print("\n[Step 6.3] Performing Google web search...") + google_query = "Python best practices 2024" + print(f" 🔍 Query: '{google_query}'") + print(" ⏳ Searching via Google Custom Search API...\n") + + google_results = google_retriever.retrieve_from_internet(google_query) + + print("✓ Google search completed!") + print(f"✓ Retrieved {len(google_results)} memory items from Google search\n") + + # Display Google search results + print("=" * 80) + print("GOOGLE SEARCH RESULTS") + print("=" * 80) + + if not google_results: + print("\n❌ No results found from Google.") + print(" This might indicate:") + print(" - Invalid Google API key or Search Engine ID") + print(" - API quota exceeded") + print(" - Network connectivity issues") + else: + for idx, item in enumerate(google_results, 1): + print(f"\n[Google Result #{idx}]") + print("-" * 80) + + # Display the memory content (truncated for readability) + content = item.memory + if len(content) > 300: + print(f"Content: {content[:300]}...") + print(f" (... {len(content) - 300} more characters)") + else: + print(f"Content: {content}") + + # Display metadata if available + if hasattr(item, "metadata") and item.metadata: + metadata = item.metadata + if hasattr(metadata, "sources") and metadata.sources: + print(f"Source: {metadata.sources[0] if metadata.sources else 'N/A'}") + + print() -print(f"Retrieved {len(results_web)} memory items.") -for idx, item in enumerate(results_web, 1): - print(f"[{idx}] {item.memory[:500]}...") # preview first 100 chars + print("=" * 80) + print("Google Search Test completed!") + print("=" * 80) +else: + print("\n⏭️ Skipping Google Custom Search API test") + print(" To enable this test, set the following environment variables:") + print(" - GOOGLE_API_KEY: Your Google API key") + print(" - GOOGLE_SEARCH_ENGINE_ID: Your Google Custom Search Engine ID (cx parameter)") + print("\n Get your credentials from:") + print(" https://developers.google.com/custom-search/v1/overview") -print("==" * 20) +print("\n" + "=" * 80) +print("ALL TESTS COMPLETED") +print("=" * 80) +print("\n💡 Summary:") +print(" ✓ Tested BochaAI web search retriever") +if google_api_key and google_search_engine_id: + print(" ✓ Tested Google Custom Search API") +else: + print(" ⏭️ Skipped Google Custom Search API (credentials not set)") +print("\n💡 Quick Start:") +print(" # Set BochaAI API key") +print(" export BOCHA_API_KEY='sk-your-bocha-api-key'") +print(" ") +print(" # Set Google Custom Search credentials (optional)") +print(" export GOOGLE_API_KEY='your-google-api-key'") +print(" export GOOGLE_SEARCH_ENGINE_ID='your-search-engine-id'") +print(" ") +print(" # Run the example") +print(" python examples/basic_modules/textual_memory_internet_search_example.py\n") diff --git a/examples/basic_modules/tree_textual_memory_reasoner.py b/examples/basic_modules/tree_textual_memory_reasoner.py deleted file mode 100644 index 369787458..000000000 --- a/examples/basic_modules/tree_textual_memory_reasoner.py +++ /dev/null @@ -1,167 +0,0 @@ -from memos import log -from memos.configs.embedder import EmbedderConfigFactory -from memos.configs.graph_db import GraphDBConfigFactory -from memos.configs.llm import LLMConfigFactory -from memos.embedders.factory import EmbedderFactory -from memos.graph_dbs.factory import GraphStoreFactory -from memos.llms.factory import LLMFactory -from memos.memories.textual.item import TextualMemoryItem, TreeNodeTextualMemoryMetadata -from memos.memories.textual.tree_text_memory.retrieve.reasoner import MemoryReasoner -from memos.memories.textual.tree_text_memory.retrieve.retrieval_mid_structs import ParsedTaskGoal - - -logger = log.get_logger(__name__) - -embedder_config = EmbedderConfigFactory.model_validate( - { - "backend": "ollama", - "config": { - "model_name_or_path": "nomic-embed-text:latest", - }, - } -) -embedder = EmbedderFactory.from_config(embedder_config) - -# Step 1: Load LLM config and instantiate -config = LLMConfigFactory.model_validate( - { - "backend": "ollama", - "config": { - "model_name_or_path": "qwen3:0.6b", - "temperature": 0.7, - "max_tokens": 1024, - }, - } -) -llm = LLMFactory.from_config(config) - -# Step 1: Prepare a mock ParsedTaskGoal -parsed_goal = ParsedTaskGoal( - memories=[ - "Multi-UAV Long-Term Coverage", - "Coverage Metrics", - "Reward Function Design", - "Energy Model", - "CT and FT Definition", - "Reward Components", - "Energy Cost Components", - ], - keys=["UAV", "coverage", "energy", "reward"], - tags=[], - goal_type="explanation", -) - -query = "How can multiple UAVs coordinate to maximize coverage while saving energy?" -query_embedding = embedder.embed([query])[0] - - -# Step 2: Initialize graph store -graph_config = GraphDBConfigFactory( - backend="neo4j", - config={ - "uri": "bolt://localhost:7687", - "user": "neo4j", - "password": "12345678", - "db_name": "user06alice", - "auto_create": True, - }, -) -graph_store = GraphStoreFactory.from_config(graph_config) - -ranked_memories = [ - TextualMemoryItem( - id="a88db9ce-3c77-4e83-8d61-aa9ef95c957e", - memory="Coverage performance is measured using CT (Coverage Time) and FT (Fairness Time) metrics.", - metadata=TreeNodeTextualMemoryMetadata( - user_id=None, - session_id=None, - status="activated", - type="fact", - memory_time="2024-01-01", - source="file", - confidence=91.0, - entities=["CT", "FT"], - tags=["coverage", "fairness", "metrics"], - visibility="public", - updated_at="2025-06-11T11:51:24.438001", - memory_type="LongTermMemory", - key="Coverage Metrics", - value="CT and FT used for long-term area and fairness evaluation", - hierarchy_level="concept", - sources=["paper://multi-uav-coverage/metrics"], - embedding=[0.01] * 768, - ), - ), - TextualMemoryItem( - id="c34f5e6b-2d34-4e6f-8c9b-abcdef123456", - memory="The capital of France is Paris, which is known for the Eiffel Tower.", - metadata=TreeNodeTextualMemoryMetadata( - user_id=None, - session_id=None, - status="activated", - type="fact", - memory_time="2024-01-01", - source="file", - confidence=90.0, - entities=["France", "Paris", "Eiffel Tower"], - tags=["geography", "city", "landmark"], - visibility="public", - updated_at="2025-06-11T11:51:24.438001", - memory_type="LongTermMemory", - key="Geography Fact", - value="Paris is the capital of France", - hierarchy_level="concept", - sources=["wikipedia://paris"], - embedding=[0.03] * 768, - ), - ), - TextualMemoryItem( - id="d56a7b8c-3e45-4f7a-9dab-fedcba654321", - memory="Total energy cost is calculated from both mechanical movement and communication transmission.", - metadata=TreeNodeTextualMemoryMetadata( - user_id=None, - session_id=None, - status="activated", - type="fact", - memory_time="2024-01-01", - source="file", - confidence=89.0, - entities=["movement power", "transmission power"], - tags=["energy", "movement", "transmission"], - visibility="public", - updated_at="2025-06-11T11:51:24.438001", - memory_type="LongTermMemory", - key="Energy Cost Components", - value="Includes movement and communication energy", - hierarchy_level="fact", - sources=["paper://multi-uav-coverage/energy-detail"], - embedding=[0.04] * 768, - ), - ), -] - -# Step 7: Init memory retriever -reasoner = MemoryReasoner(llm=llm) - - -# Step 8: Print retrieved memory items before ranking -print("\n=== Retrieved Memory Items (Before Rerank) ===") -for idx, item in enumerate(ranked_memories): - print(f"[Original #{idx + 1}] ID: {item.id}") - print(f"Memory: {item.memory[:200]}...\n") - -# Step 9: Rerank -reasoned_memories = reasoner.reason( - query=query, - ranked_memories=ranked_memories, - parsed_goal=parsed_goal, -) - -# Step 10: Print ranked reasoned memory items with original positions -print("\n=== Memory Items After Reason (Sorted) ===") -id_to_original_rank = {item.id: i + 1 for i, item in enumerate(ranked_memories)} - -for idx, item in enumerate(reasoned_memories): - original_rank = id_to_original_rank.get(item.id, "-") - print(f"[Reasoned #{idx + 1}] ID: {item.id} (Original #{original_rank})") - print(f"Memory: {item.memory[:200]}...\n") diff --git a/examples/basic_modules/tree_textual_memory_recall.py b/examples/basic_modules/tree_textual_memory_recall.py index 935513784..10831cbeb 100644 --- a/examples/basic_modules/tree_textual_memory_recall.py +++ b/examples/basic_modules/tree_textual_memory_recall.py @@ -1,26 +1,152 @@ +""" +Tree Textual Memory Recall Example +=================================== + +This example demonstrates how to use MemOS's GraphMemoryRetriever to recall memories +from a shared graph database. + +**What you'll learn:** +- How to load embedder and graph database configurations +- How to insert memories into the graph store with embeddings +- How to build a ParsedTaskGoal to guide retrieval +- How to retrieve relevant memories using hybrid search + +**Use case:** +You have stored various long-term memories about a user (e.g., "Caroline") +in a graph database, and now you want to answer a natural language question +by retrieving the most relevant memories. + +Run this example: + python examples/basic_modules/tree_textual_memory_recall.py +""" + +import json +import os + from memos import log from memos.configs.embedder import EmbedderConfigFactory from memos.configs.graph_db import GraphDBConfigFactory from memos.embedders.factory import EmbedderFactory from memos.graph_dbs.factory import GraphStoreFactory -from memos.memories.textual.item import TextualMemoryItem +from memos.memories.textual.item import TextualMemoryItem, TreeNodeTextualMemoryMetadata from memos.memories.textual.tree_text_memory.retrieve.recall import GraphMemoryRetriever from memos.memories.textual.tree_text_memory.retrieve.retrieval_mid_structs import ParsedTaskGoal logger = log.get_logger(__name__) -embedder_config = EmbedderConfigFactory.model_validate( - { - "backend": "ollama", - "config": { - "model_name_or_path": "nomic-embed-text:latest", - }, - } -) +# ============================================================================ +# Step 0: Setup - Load configuration files +# ============================================================================ +print("=" * 70) +print("Tree Textual Memory Recall Example") +print("=" * 70) + +current_dir = os.path.dirname(os.path.abspath(__file__)) +config_dir = os.path.join(current_dir, "../data/config") + +# Load the shared tree-text memory configuration +# This config includes both embedder settings and graph database settings +config_path = os.path.join(config_dir, "tree_config_shared_database.json") +with open(config_path) as f: + config_data = json.load(f) + +print(f"\n✓ Loaded configuration from: {config_path}") + +# ============================================================================ +# Step 1: Initialize Embedder +# ============================================================================ +# The embedder converts text into vector embeddings for semantic search +embedder_config = EmbedderConfigFactory.model_validate(config_data["embedder"]) embedder = EmbedderFactory.from_config(embedder_config) -# Step 1: Prepare a mock ParsedTaskGoal +print(f"✓ Initialized embedder: {embedder_config.backend}") + +# ============================================================================ +# Step 2: Initialize Graph Store +# ============================================================================ +# The graph store persists memories and supports both graph queries and vector search +graph_config = GraphDBConfigFactory(**config_data["graph_db"]) +graph_store = GraphStoreFactory.from_config(graph_config) + +print(f"✓ Initialized graph store: {graph_config.backend}") + +# ============================================================================ +# Step 3: Clean up old mock data (optional) +# ============================================================================ +# If you're running this example multiple times, clean up previous test data +# to avoid duplicates. This is optional in production. +print("\nCleaning up old mock data...") +try: + if hasattr(graph_store, "delete_node_by_prams"): + graph_store.delete_node_by_prams(filter={"key": "LGBTQ support group"}) + graph_store.delete_node_by_prams(filter={"key": "LGBTQ community"}) + print("✓ Old mock data cleaned") + else: + print("⚠ Graph store doesn't support delete_node_by_prams, skipping cleanup") +except Exception as exc: + print(f"⚠ Cleanup warning: {exc}") + +# ============================================================================ +# Step 4: Insert mock memories into the graph store +# ============================================================================ +# In a real application, these would be memories extracted from user conversations +# or documents. Here we use a few hardcoded examples about "Caroline". +print("\nInserting mock memories...") + +mock_memories = [ + { + "memory": "Caroline joined the LGBTQ support group in 2023.", + "tags": ["LGBTQ", "support group"], + "key": "LGBTQ support group", + }, + { + "memory": "Caroline has been an active member of the LGBTQ community since college.", + "tags": ["LGBTQ", "community"], + "key": "LGBTQ community", + }, + { + "memory": "She attended the weekly LGBTQ support group meetings every Friday.", + "tags": ["LGBTQ", "support group", "meetings"], + "key": "LGBTQ support group", + }, +] + +for idx, mem_data in enumerate(mock_memories, 1): + # Generate embedding for this memory + mem_embedding = embedder.embed([mem_data["memory"]])[0] + + # Create a TextualMemoryItem with metadata + item = TextualMemoryItem( + memory=mem_data["memory"], + metadata=TreeNodeTextualMemoryMetadata( + memory_type="LongTermMemory", # Can be ShortTermMemory, LongTermMemory, etc. + key=mem_data["key"], + tags=mem_data["tags"], + embedding=mem_embedding, + sources=[], + ), + ) + + # Add the memory node to the graph store + graph_store.add_node(item.id, item.memory, item.metadata.model_dump()) + print(f" [{idx}/{len(mock_memories)}] Added: {mem_data['memory'][:60]}...") + +print("✓ Mock memories inserted successfully") + +# ============================================================================ +# Step 5: Define a query and retrieval goal +# ============================================================================ +# This is the natural language question we want to answer +query = "When did Caroline go to the LGBTQ support group?" +print(f"\n{'=' * 70}") +print(f"Query: {query}") +print(f"{'=' * 70}") + +# ParsedTaskGoal provides hints to guide the retrieval process: +# - memories: semantic descriptions of what we're looking for +# - keys: specific keywords to match +# - tags: categorical tags to filter by parsed_goal = ParsedTaskGoal( memories=[ "Caroline's participation in the LGBTQ community", @@ -34,37 +160,46 @@ tags=["LGBTQ", "support group"], ) -# Step 2: Initialize graph store -graph_config = GraphDBConfigFactory( - backend="neo4j", - config={ - "uri": "bolt://localhost:7687", - "user": "neo4j", - "password": "12345678", - "db_name": "caroline", - "auto_create": True, - }, -) -graph_store = GraphStoreFactory.from_config(graph_config) +# ============================================================================ +# Step 6: Perform hybrid retrieval +# ============================================================================ +# The retriever uses both semantic similarity (embeddings) and graph structure +# to find the most relevant memories +print("\nPerforming hybrid retrieval...") -# Step 6: Create embedding for query -query = "When did Caroline go to the LGBTQ support group?" query_embedding = embedder.embed([query])[0] - -# Step 7: Init memory retriever retriever = GraphMemoryRetriever(graph_store=graph_store, embedder=embedder) -# Step 8: Run hybrid retrieval retrieved_items: list[TextualMemoryItem] = retriever.retrieve( query=query, parsed_goal=parsed_goal, - top_k=10, - memory_scope="LongTermMemory", + top_k=10, # Maximum number of memories to retrieve + memory_scope="LongTermMemory", # Filter by memory type query_embedding=[query_embedding], ) -# Step 9: Print retrieved memory items -print("\n=== Retrieved Memory Items ===") -for item in retrieved_items: - print(f"ID: {item.id}") - print(f"Memory: {item.memory}") +print(f"✓ Retrieved {len(retrieved_items)} memories") + +# ============================================================================ +# Step 7: Display results +# ============================================================================ +print(f"\n{'=' * 70}") +print("Retrieved Memory Items:") +print(f"{'=' * 70}\n") + +if not retrieved_items: + print("❌ No memories retrieved.") + print(" This might indicate:") + print(" - The mock data wasn't inserted correctly") + print(" - The query doesn't match any stored memories") + print(" - The retrieval parameters are too restrictive") +else: + for idx, item in enumerate(retrieved_items, 1): + print(f"[{idx}] ID: {item.id}") + print(f" Memory: {item.memory}") + print(f" Tags: {item.metadata.tags if hasattr(item.metadata, 'tags') else 'N/A'}") + print() + +print(f"{'=' * 70}") +print("Example completed successfully!") +print(f"{'=' * 70}\n") diff --git a/examples/basic_modules/tree_textual_memory_relation_reason_detector.py b/examples/basic_modules/tree_textual_memory_relation_reason_detector.py index 72e4deb60..dd6bca67f 100644 --- a/examples/basic_modules/tree_textual_memory_relation_reason_detector.py +++ b/examples/basic_modules/tree_textual_memory_relation_reason_detector.py @@ -1,3 +1,31 @@ +""" +Tree Textual Memory Relation & Reasoning Detector Example +========================================================== + +This example demonstrates how to use MemOS's RelationAndReasoningDetector to +automatically discover relationships between memories and infer new knowledge. + +**What you'll learn:** +- How to initialize embedder, graph store, and LLM for relation detection +- How to create mock memory nodes with rich metadata +- How to detect pairwise relations between memory nodes (e.g., causal, temporal) +- How to infer new facts through multi-hop reasoning chains +- How to generate aggregate concepts from related memories +- How to identify sequential patterns (FOLLOWS relationships) + +**Use case:** +You have stored multiple facts about a user (e.g., "Caroline's work stress", +"joining support group", "improved mental health"). This detector can: +1. Find causal links: "Work stress" → "Joining support group" → "Better mental health" +2. Infer new facts: "Support groups help reduce work-related stress" +3. Build aggregate concepts: "Caroline's stress management journey" + +Run this example: + python examples/basic_modules/tree_textual_memory_relation_reason_detector.py +""" + +import json +import os import uuid from memos import log @@ -16,52 +44,79 @@ logger = log.get_logger(__name__) -# === Step 1: Initialize embedder === -embedder_config = EmbedderConfigFactory.model_validate( - { - "backend": "ollama", - "config": { - "model_name_or_path": "nomic-embed-text:latest", - }, - } -) +# ============================================================================ +# Step 0: Setup - Load configuration files +# ============================================================================ +print("=" * 80) +print("Tree Textual Memory Relation & Reasoning Detector Example") +print("=" * 80) +print("\nThis example will:") +print(" 1. Create a set of related memories about Caroline") +print(" 2. Detect causal and temporal relationships between them") +print(" 3. Infer new knowledge through reasoning chains") +print(" 4. Generate aggregate concepts") +print("=" * 80) + +current_dir = os.path.dirname(os.path.abspath(__file__)) +config_dir = os.path.join(current_dir, "../data/config") + +# Load the shared tree-text memory configuration +# This includes embedder, graph DB, and LLM configurations +config_path = os.path.join(config_dir, "tree_config_shared_database.json") +with open(config_path) as f: + config_data = json.load(f) + +print(f"\n✓ Loaded configuration from: {config_path}") + +# ============================================================================ +# Step 1: Initialize Embedder +# ============================================================================ +print("\n[Step 1] Initializing embedder...") + +embedder_config = EmbedderConfigFactory.model_validate(config_data["embedder"]) embedder = EmbedderFactory.from_config(embedder_config) -# === Step 2: Initialize Neo4j GraphStore === -graph_config = GraphDBConfigFactory( - backend="neo4j", - config={ - "uri": "bolt://localhost:7687", - "user": "neo4j", - "password": "12345678", - "db_name": "lucy4", - "auto_create": True, - }, -) +print(f"✓ Embedder initialized: {embedder_config.backend}") + +# ============================================================================ +# Step 2: Initialize Graph Store +# ============================================================================ +print("\n[Step 2] Initializing graph database...") + +# Load graph database configuration from the config file +graph_config = GraphDBConfigFactory(**config_data["graph_db"]) graph_store = GraphStoreFactory.from_config(graph_config) -# === Step 3: Initialize LLM for pairwise relation detection === -# Step 1: Load LLM config and instantiate -config = LLMConfigFactory.model_validate( - { - "backend": "ollama", - "config": { - "model_name_or_path": "qwen3:0.6b", - "temperature": 0.7, - "max_tokens": 1024, - }, - } -) -llm = LLMFactory.from_config(config) +print(f"✓ Graph store initialized: {graph_config.backend}") +print(f" Connected to: {graph_config.config.get('uri', 'N/A')}") +print(f" Database: {graph_config.config.get('db_name', 'N/A')}") + +# ============================================================================ +# Step 3: Initialize LLM +# ============================================================================ +print("\n[Step 3] Initializing LLM for relation detection...") -# === Step 4: Create a mock GraphDBNode to test relation detection === +# The LLM analyzes pairs of memories to detect semantic relationships +# (e.g., "causes", "leads to", "happens before", etc.) +# We use the extractor_llm from the config file +llm_config = LLMConfigFactory.model_validate(config_data["extractor_llm"]) +llm = LLMFactory.from_config(llm_config) +print(f"✓ LLM initialized: {llm_config.backend}") + +# ============================================================================ +# Step 4: Create Mock Memory Nodes +# ============================================================================ +print("\n[Step 4] Creating mock memory nodes...") +print(" Building a scenario about Caroline's stress and support journey...\n") + +# Node A: Caroline's work stress node_a = GraphDBNode( id=str(uuid.uuid4()), memory="Caroline faced increased workload stress during the project deadline.", metadata=TreeNodeTextualMemoryMetadata( memory_type="LongTermMemory", - embedding=[0.1] * 10, + embedding=[0.1] * 10, # Placeholder embedding (real one will be generated) key="Workload stress", tags=["stress", "workload"], type="fact", @@ -70,7 +125,7 @@ updated_at="2024-06-28T09:00:00Z", ), ) - +# Node B: Improved mental health after joining support group node_b = GraphDBNode( id=str(uuid.uuid4()), memory="After joining the support group, Caroline reported improved mental health.", @@ -85,7 +140,9 @@ updated_at="2024-07-10T12:00:00Z", ), ) +print(" ✓ Node B: Improved mental health") +# Node C: General research about support groups node_c = GraphDBNode( id=str(uuid.uuid4()), memory="Peer support groups are effective in reducing stress for LGBTQ individuals.", @@ -100,8 +157,9 @@ updated_at="2024-06-29T14:00:00Z", ), ) +print(" ✓ Node C: Support group benefits") -# === D: Work pressure ➜ stress === +# Node D: Work pressure → stress (causal chain element) node_d = GraphDBNode( id=str(uuid.uuid4()), memory="Excessive work pressure increases stress levels among employees.", @@ -116,8 +174,9 @@ updated_at="2024-06-15T08:00:00Z", ), ) +print(" ✓ Node D: Work pressure → stress") -# === E: Stress ➜ poor sleep === +# Node E: Stress → poor sleep (causal chain element) node_e = GraphDBNode( id=str(uuid.uuid4()), memory="High stress levels often result in poor sleep quality.", @@ -132,8 +191,9 @@ updated_at="2024-06-18T10:00:00Z", ), ) +print(" ✓ Node E: Stress → poor sleep") -# === F: Poor sleep ➜ low performance === +# Node F: Poor sleep → low performance (causal chain element) node_f = GraphDBNode( id=str(uuid.uuid4()), memory="Employees with poor sleep show reduced work performance.", @@ -148,7 +208,10 @@ updated_at="2024-06-20T12:00:00Z", ), ) +print(" ✓ Node F: Poor sleep → low performance") +# Main Node: The central fact we want to analyze +# This node will be used as the "anchor" to find related memories node = GraphDBNode( id="a88db9ce-3c77-4e83-8d61-aa9ef95c957e", memory="Caroline joined an LGBTQ support group to cope with work-related stress.", @@ -156,7 +219,7 @@ memory_type="LongTermMemory", embedding=embedder.embed( ["Caroline joined an LGBTQ support group to cope with work-related stress."] - )[0], + )[0], # Generate real embedding for the main node key="Caroline LGBTQ stress", tags=["LGBTQ", "support group", "stress"], type="fact", @@ -165,49 +228,122 @@ updated_at="2024-07-01T10:00:00Z", ), ) +print(" ✓ Main Node: Caroline's support group action\n") +# ============================================================================ +# Step 5: Insert Nodes into Graph Store +# ============================================================================ +print("[Step 5] Inserting all nodes into graph database...") -for n in [node, node_a, node_b, node_c, node_d, node_e, node_f]: +all_nodes = [node, node_a, node_b, node_c, node_d, node_e, node_f] +for n in all_nodes: graph_store.add_node(n.id, n.memory, n.metadata.dict()) +print(f"✓ Successfully inserted {len(all_nodes)} memory nodes into the graph\n") + +# ============================================================================ +# Step 6: Initialize Relation & Reasoning Detector +# ============================================================================ +print("[Step 6] Initializing RelationAndReasoningDetector...") -# === Step 5: Initialize RelationDetector and run detection === relation_detector = RelationAndReasoningDetector( - graph_store=graph_store, llm=llm, embedder=embedder + graph_store=graph_store, + llm=llm, + embedder=embedder, ) +print("✓ Detector initialized and ready\n") + +# ============================================================================ +# Step 7: Run Relation Detection & Reasoning +# ============================================================================ +print("[Step 7] Running relation detection and reasoning...") +print(f" Analyzing relationships for: '{node.memory[:60]}...'\n") + +# This will: +# 1. Find semantically similar nodes using embeddings +# 2. Detect pairwise relations (causal, temporal, etc.) using LLM +# 3. Infer new facts through multi-hop reasoning +# 4. Generate aggregate concepts +# 5. Identify sequential patterns results = relation_detector.process_node( node=node, - exclude_ids=[node.id], # Exclude self when searching for neighbors - top_k=5, + exclude_ids=[node.id], # Don't compare the node with itself + top_k=5, # Consider top 5 most similar nodes ) -# === Step 6: Print detected relations === -print("\n=== Detected Global Relations ===") +print("✓ Analysis complete!\n") + +# ============================================================================ +# Step 8: Display Results +# ============================================================================ +print("=" * 80) +print("ANALYSIS RESULTS") +print("=" * 80) + +# Display detected pairwise relations +print("\n📊 [1] Detected Pairwise Relations") +print("-" * 80) +if results["relations"]: + for idx, rel in enumerate(results["relations"], 1): + print(f"\n Relation #{idx}:") + print(f" Source: {rel['source_id'][:8]}...") + print(f" Target: {rel['target_id'][:8]}...") + print(f" Type: {rel['relation_type']}") +else: + print(" ❌ No pairwise relations detected") + print(" Try adjusting similarity threshold or adding more related nodes") +# Display inferred new facts +print("\n\n💡 [2] Inferred New Facts (through reasoning)") +print("-" * 80) +if results["inferred_nodes"]: + for idx, inferred_node in enumerate(results["inferred_nodes"], 1): + print(f"\n Inferred Fact #{idx}:") + print(f" 💬 {inferred_node.memory}") + print(f" 📌 Sources: {inferred_node.metadata.sources}") + print(f" 🏷️ Key: {inferred_node.metadata.key}") +else: + print(" ℹ️ No new facts inferred") + print(" This is normal if relations are simple or insufficient for reasoning") -# === Step 6: Pretty-print detected results === -print("\n=== Detected Pairwise Relations ===") -for rel in results["relations"]: - print(f" Source ID: {rel['source_id']}") - print(f" Target ID: {rel['target_id']}") - print(f" Relation Type: {rel['relation_type']}") - print("------") +# Display sequence links (temporal ordering) +print("\n\n⏱️ [3] Sequence Links (FOLLOWS relationships)") +print("-" * 80) +if results["sequence_links"]: + for idx, link in enumerate(results["sequence_links"], 1): + print(f" {idx}. {link['from_id'][:8]}... → {link['to_id'][:8]}...") +else: + print(" ℹ️ No sequential patterns detected") -print("\n=== Inferred Nodes ===") -for node in results["inferred_nodes"]: - print(f" New Fact: {node.memory}") - print(f" Sources: {node.metadata.sources}") - print("------") +# Display aggregate concepts +print("\n\n🎯 [4] Aggregate Concepts") +print("-" * 80) +if results["aggregate_nodes"]: + for idx, agg in enumerate(results["aggregate_nodes"], 1): + print(f"\n Concept #{idx}:") + print(f" 📖 {agg.memory}") + print(f" 🔑 Key: {agg.metadata.key}") + print(f" 📎 Aggregates from: {agg.metadata.sources}") +else: + print(" ℹ️ No aggregate concepts generated") + print(" Aggregates are created when multiple related memories share themes") -print("\n=== Sequence Links (FOLLOWS) ===") -for link in results["sequence_links"]: - print(f" From: {link['from_id']} -> To: {link['to_id']}") - print("------") +print("\n" + "=" * 80) +print("Example completed successfully!") +print("=" * 80) +print("\n💡 Next steps:") +print(" - Modify the mock memories to test different scenarios") +print(" - Adjust top_k parameter to control how many neighbors are considered") +print(" - Experiment with different LLM models for relation detection") +print(" - Check the Neo4j database to visualize the created graph\n") print("\n=== Aggregate Concepts ===") -for agg in results["aggregate_nodes"]: - print(f" Concept Key: {agg.metadata.key}") - print(f" Concept Memory: {agg.memory}") - print(f" Sources: {agg.metadata.sources}") - print("------") +if not results["aggregate_nodes"]: + print("No aggregate concepts generated.") +else: + for agg in results["aggregate_nodes"]: + print(f" Concept Key: {agg.metadata.key}") + print(f" Concept Memory: {agg.memory}") + print(f" Sources: {agg.metadata.sources}") + print("------") diff --git a/examples/basic_modules/tree_textual_memory_task_goal_parser.py b/examples/basic_modules/tree_textual_memory_task_goal_parser.py index c36d47cf1..11c2c8087 100644 --- a/examples/basic_modules/tree_textual_memory_task_goal_parser.py +++ b/examples/basic_modules/tree_textual_memory_task_goal_parser.py @@ -1,52 +1,185 @@ +""" +Tree Textual Memory Task Goal Parser Example +============================================= + +This example demonstrates how to use MemOS's TaskGoalParser to parse natural +language queries into structured retrieval goals. + +**What you'll learn:** +- How to initialize an LLM for task parsing +- How to parse a natural language query into structured components +- The difference between "fast" and "fine" parsing modes +- How the parser extracts memories, keys, tags, and goal types + +**Use case:** +When a user asks "When did Caroline go to the LGBTQ support group?", you need to: +1. Extract semantic descriptions (memories to look for) +2. Identify key phrases and keywords +3. Determine relevant tags for filtering +4. Classify the goal type (retrieval, update, etc.) + +The TaskGoalParser does this automatically using an LLM. + +Run this example: + python examples/basic_modules/tree_textual_memory_task_goal_parser.py +""" + +import json +import os import time +from memos import log from memos.configs.llm import LLMConfigFactory from memos.llms.factory import LLMFactory from memos.memories.textual.tree_text_memory.retrieve.task_goal_parser import TaskGoalParser -# Step 1: Load LLM config and instantiate -config = LLMConfigFactory.model_validate( - { - "backend": "ollama", - "config": { - "model_name_or_path": "qwen3:0.6b", - "temperature": 0.7, - "max_tokens": 1024, - "remove_think_prefix": True, - }, - } -) -llm = LLMFactory.from_config(config) +logger = log.get_logger(__name__) + +# ============================================================================ +# Step 0: Setup - Load configuration files +# ============================================================================ +print("=" * 80) +print("Tree Textual Memory Task Goal Parser Example") +print("=" * 80) + +current_dir = os.path.dirname(os.path.abspath(__file__)) +config_dir = os.path.join(current_dir, "../data/config") + +# Load the shared tree-text memory configuration +config_path = os.path.join(config_dir, "tree_config_shared_database.json") +with open(config_path) as f: + config_data = json.load(f) + +print(f"\n✓ Loaded configuration from: {config_path}") + +# ============================================================================ +# Step 1: Initialize LLM for Task Parsing +# ============================================================================ +print("\n[Step 1] Initializing LLM for task goal parsing...") + +# The LLM will analyze the natural language query and extract structured information +# We use the extractor_llm from the config file +llm_config = LLMConfigFactory.model_validate(config_data["extractor_llm"]) +llm = LLMFactory.from_config(llm_config) + +print(f"✓ LLM initialized: {llm_config.backend}") -# Task input +# ============================================================================ +# Step 2: Define a natural language task/query +# ============================================================================ +# This is the user's question that needs to be parsed task = "When did Caroline go to the LGBTQ support group?" -parser = TaskGoalParser(llm, mode="fast") +print("\n[Step 2] Task to parse:") +print(f" 📝 '{task}'") +print() -time_init = time.time() -# Parse task goal -result = parser.parse(task) +# ============================================================================ +# Step 3: Parse using FAST mode +# ============================================================================ +print("[Step 3] Parsing with FAST mode...") +print(" (Fast mode uses a simpler prompt for quick parsing)") -# Print output -print("=== Parsed Result ===") -print("memories:", result.memories) -print("keys:", result.keys) -print("tags:", result.tags) -print("goal_type:", result.goal_type) -print("time:", time.time() - time_init) +parser = TaskGoalParser(llm) -parser = TaskGoalParser(llm, mode="fine") +time_start = time.time() +result_fast = parser.parse(task, mode="fast") +time_fast = time.time() - time_start -time_init = time.time() +print(f"✓ Fast mode parsing completed in {time_fast:.3f}s\n") -# Parse task goal -result = parser.parse(task) +# Display fast mode results +print("=" * 80) +print("FAST MODE RESULTS") +print("=" * 80) +print("\n📋 Memories (semantic descriptions):") +if result_fast.memories: + for idx, mem in enumerate(result_fast.memories, 1): + print(f" {idx}. {mem}") +else: + print(" (None extracted)") + +print("\n🔑 Keys (important keywords):") +if result_fast.keys: + for idx, key in enumerate(result_fast.keys, 1): + print(f" {idx}. {key}") +else: + print(" (None extracted)") + +print("\n🏷️ Tags (categorical labels):") +if result_fast.tags: + print(f" {', '.join(result_fast.tags)}") +else: + print(" (None extracted)") + +print(f"\n🎯 Goal Type: {result_fast.goal_type}") +print(f"⏱️ Processing Time: {time_fast:.3f}s") + +# ============================================================================ +# Step 4: Parse using FINE mode +# ============================================================================ +print(f"\n{'=' * 80}") +print("[Step 4] Parsing with FINE mode...") +print(" (Fine mode uses more detailed prompts for better accuracy)") + +time_start = time.time() +result_fine = parser.parse(task, mode="fine") +time_fine = time.time() - time_start + +print(f"✓ Fine mode parsing completed in {time_fine:.3f}s\n") + +# Display fine mode results +print("=" * 80) +print("FINE MODE RESULTS") +print("=" * 80) +print("\n📋 Memories (semantic descriptions):") +if result_fine.memories: + for idx, mem in enumerate(result_fine.memories, 1): + print(f" {idx}. {mem}") +else: + print(" (None extracted)") + +print("\n🔑 Keys (important keywords):") +if result_fine.keys: + for idx, key in enumerate(result_fine.keys, 1): + print(f" {idx}. {key}") +else: + print(" (None extracted)") + +print("\n🏷️ Tags (categorical labels):") +if result_fine.tags: + print(f" {', '.join(result_fine.tags)}") +else: + print(" (None extracted)") + +print(f"\n🎯 Goal Type: {result_fine.goal_type}") +print(f"⏱️ Processing Time: {time_fine:.3f}s") + +# ============================================================================ +# Step 5: Compare Results +# ============================================================================ +print(f"\n{'=' * 80}") +print("COMPARISON") +print("=" * 80) +print("\nSpeed:") +print(f" Fast mode: {time_fast:.3f}s") +print(f" Fine mode: {time_fine:.3f}s") +print(f" Difference: {abs(time_fast - time_fine):.3f}s") + +print("\nExtracted Components:") +print( + f" Fast mode: {len(result_fast.memories)} memories, {len(result_fast.keys)} keys, {len(result_fast.tags)} tags" +) +print( + f" Fine mode: {len(result_fine.memories)} memories, {len(result_fine.keys)} keys, {len(result_fine.tags)} tags" +) -# Print output -print("=== Parsed Result ===") -print("memories:", result.memories) -print("keys:", result.keys) -print("tags:", result.tags) -print("goal_type:", result.goal_type) -print("time:", time.time() - time_init) +print(f"\n{'=' * 80}") +print("Example completed successfully!") +print("=" * 80) +print("\n💡 Next steps:") +print(" - Try different queries to see how the parser handles various inputs") +print(" - Use the parsed result as input for GraphMemoryRetriever") +print(" - Experiment with 'fast' vs 'fine' mode based on your accuracy/speed needs") +print(" - The parsed ParsedTaskGoal can be passed directly to retrieval functions\n") diff --git a/examples/core_memories/general_textual_memory.py b/examples/core_memories/general_textual_memory.py index f71e2ef2e..d5c765b01 100644 --- a/examples/core_memories/general_textual_memory.py +++ b/examples/core_memories/general_textual_memory.py @@ -1,14 +1,25 @@ +import os +import pprint + from memos.configs.memory import MemoryConfigFactory from memos.memories.factory import MemoryFactory +# Initialize the memory configuration +# This configuration specifies the extractor, vector database, and embedder backend. +# Here we use OpenAI for extraction, Qdrant for vector storage, and Ollama for embedding. config = MemoryConfigFactory( backend="general_text", config={ "extractor_llm": { - "backend": "ollama", + "backend": "openai", "config": { - "model_name_or_path": "qwen3:0.6b", + "model_name_or_path": "gpt-4o-mini", + "api_key": os.environ.get("OPENAI_API_KEY"), + "api_base": os.environ.get( + "OPENAI_BASE_URL", + os.environ.get("OPENAI_API_BASE", "https://api.openai.com/v1"), + ), "temperature": 0.0, "remove_think_prefix": True, "max_tokens": 8192, @@ -30,6 +41,8 @@ }, }, ) + +# Create the memory instance from the configuration m = MemoryFactory.from_config(config) example_memories = [ @@ -52,20 +65,25 @@ }, }, ] + example_id = "a19b6caa-5d59-42ad-8c8a-e4f7118435b4" print("===== Extract memories =====") +# Extract memories from a conversation +# The extractor LLM processes the conversation to identify relevant information. memories = m.extract( [ {"role": "user", "content": "I love tomatoes."}, {"role": "assistant", "content": "Great! Tomatoes are delicious."}, ] ) -print(memories) +pprint.pprint(memories) print() print("==== Add memories ====") +# Add the extracted memories to the memory store m.add(memories) +# Add a manually created memory item m.add( [ { @@ -80,19 +98,27 @@ } ] ) -print(m.get_all()) +print("All memories after addition:") +pprint.pprint(m.get_all()) print() + print("==== Search memories ====") +# Search for memories related to a query search_results = m.search("Tell me more about the user", top_k=2) -print(search_results) +pprint.pprint(search_results) print() print("==== Get memories ====") -print(m.get(example_id)) -print(m.get_by_ids([example_id])) +# Retrieve a specific memory by its ID +print(f"Memory with ID {example_id}:") +pprint.pprint(m.get(example_id)) +# Retrieve multiple memories by IDs +print(f"Memories by IDs [{example_id}]:") +pprint.pprint(m.get_by_ids([example_id])) print() print("==== Update memories ====") +# Update an existing memory m.update( example_id, { @@ -106,19 +132,26 @@ }, }, ) -print(m.get(example_id)) +print(f"Memory after update (ID {example_id}):") +pprint.pprint(m.get(example_id)) +print() + +print("==== Dump memory ====") +# Dump the current state of memory to a file +m.dump("tmp/general_mem") +print("Memory dumped to 'tmp/general_mem'.") print() print("==== Delete memories ====") +# Delete a memory by its ID m.delete([example_id]) -print(m.get_all()) +print("All memories after deletion:") +pprint.pprint(m.get_all()) print() print("==== Delete all memories ====") +# Clear all memories from the store m.delete_all() -print(m.get_all()) +print("All memories after delete_all:") +pprint.pprint(m.get_all()) print() - -print("==== Dump memory ====") -m.dump("tmp/mem") -print("Memory dumped to 'tmp/mem'.") diff --git a/examples/core_memories/kv_cache_memory.py b/examples/core_memories/kv_cache_memory.py index dbe3ac4d7..470ad56a7 100644 --- a/examples/core_memories/kv_cache_memory.py +++ b/examples/core_memories/kv_cache_memory.py @@ -1,68 +1,132 @@ +import json + +from transformers import DynamicCache + from memos.configs.memory import MemoryConfigFactory +from memos.memories.activation.item import KVCacheItem from memos.memories.factory import MemoryFactory -# ===== Example: Use factory and HFLLM to build and manage KVCacheMemory ===== +def get_cache_info(cache): + if not cache: + return None + + num_layers = 0 + total_size_bytes = 0 + + if hasattr(cache, "layers"): + num_layers = len(cache.layers) + for layer in cache.layers: + if hasattr(layer, "key_cache") and layer.key_cache is not None: + total_size_bytes += layer.key_cache.nelement() * layer.key_cache.element_size() + if hasattr(layer, "value_cache") and layer.value_cache is not None: + total_size_bytes += layer.value_cache.nelement() * layer.value_cache.element_size() + + if hasattr(layer, "keys") and layer.keys is not None: + total_size_bytes += layer.keys.nelement() * layer.keys.element_size() + if hasattr(layer, "values") and layer.values is not None: + total_size_bytes += layer.values.nelement() * layer.values.element_size() + + elif hasattr(cache, "key_cache") and hasattr(cache, "value_cache"): + num_layers = len(cache.key_cache) + for k, v in zip(cache.key_cache, cache.value_cache, strict=False): + if k is not None: + total_size_bytes += k.nelement() * k.element_size() + if v is not None: + total_size_bytes += v.nelement() * v.element_size() + + return { + "num_layers": num_layers, + "size_bytes": total_size_bytes, + "size_mb": f"{total_size_bytes / (1024 * 1024):.2f} MB", + } + + +def serialize_item(obj): + if isinstance(obj, list): + return [serialize_item(x) for x in obj] + + if isinstance(obj, KVCacheItem): + return { + "id": obj.id, + "metadata": obj.metadata, + "records": obj.records.model_dump() + if hasattr(obj.records, "model_dump") + else obj.records, + "memory": get_cache_info(obj.memory), + } + + if isinstance(obj, DynamicCache): + return get_cache_info(obj) -# 1. Create config for KVCacheMemory (using HuggingFace backend) -config = MemoryConfigFactory( - backend="kv_cache", - config={ - "extractor_llm": { - "backend": "huggingface", - "config": { - "model_name_or_path": "Qwen/Qwen3-0.6B", # Use a valid HuggingFace model name - "max_tokens": 32, - "add_generation_prompt": True, - "remove_think_prefix": True, + return str(obj) + + +if __name__ == "__main__": + # ===== Example: Use factory and HFLLM to build and manage KVCacheMemory ===== + + # 1. Create config for KVCacheMemory (using HuggingFace backend) + config = MemoryConfigFactory( + backend="kv_cache", + config={ + "extractor_llm": { + "backend": "huggingface", + "config": { + "model_name_or_path": "Qwen/Qwen3-0.6B", # Use a valid HuggingFace model name + "max_tokens": 32, + "add_generation_prompt": True, + "remove_think_prefix": True, + }, }, }, - }, -) - -# 2. Instantiate KVCacheMemory using the factory -kv_mem = MemoryFactory.from_config(config) - -# 3. Extract a KVCacheItem (DynamicCache) from a prompt (uses HFLLM.build_kv_cache internally) -prompt = [ - {"role": "user", "content": "What is MemOS?"}, - {"role": "assistant", "content": "MemOS is a memory operating system for LLMs."}, -] -print("===== Extract KVCacheItem =====") -cache_item = kv_mem.extract(prompt) -print(cache_item) -print() - -# 4. Add the extracted KVCacheItem -print("===== Add KVCacheItem =====") -kv_mem.add([cache_item]) -print(kv_mem.get_all()) -print() - -# 5. Get by id -print("===== Get KVCacheItem by id =====") -retrieved = kv_mem.get(cache_item.id) -print(retrieved) -print() - -# 6. Merge caches (simulate with two items) -print("===== Merge DynamicCache =====") -item2 = kv_mem.extract([{"role": "user", "content": "Tell me a joke."}]) -kv_mem.add([item2]) -merged_cache = kv_mem.get_cache([cache_item.id, item2.id]) -print(merged_cache) -print() - -# 7. Delete one -print("===== Delete one KVCacheItem =====") -kv_mem.delete([cache_item.id]) -print(kv_mem.get_all()) -print() - -# 8. Dump and load -print("===== Dump and Load KVCacheMemory =====") -kv_mem.dump("tmp/kv_mem") -print("Memory dumped to 'tmp/kv_mem'.") -kv_mem.delete_all() -kv_mem.load("tmp/kv_mem") -print("Memory loaded from 'tmp/kv_mem':", kv_mem.get_all()) + ) + + # 2. Instantiate KVCacheMemory using the factory + kv_mem = MemoryFactory.from_config(config) + + # 3. Extract a KVCacheItem (DynamicCache) from a prompt (uses HFLLM.build_kv_cache internally) + prompt = [ + {"role": "user", "content": "What is MemOS?"}, + {"role": "assistant", "content": "MemOS is a memory operating system for LLMs."}, + ] + print("===== Extract KVCacheItem =====") + cache_item = kv_mem.extract(prompt) + print(json.dumps(serialize_item(cache_item), indent=2, default=str)) + print() + + # 4. Add the extracted KVCacheItem + print("===== Add KVCacheItem =====") + kv_mem.add([cache_item]) + print(json.dumps(serialize_item(kv_mem.get_all()), indent=2, default=str)) + print() + + # 5. Get by id + print("===== Get KVCacheItem by id =====") + retrieved = kv_mem.get(cache_item.id) + print(json.dumps(serialize_item(retrieved), indent=2, default=str)) + print() + + # 6. Merge caches (simulate with two items) + print("===== Merge DynamicCache =====") + item2 = kv_mem.extract([{"role": "user", "content": "Tell me a joke."}]) + kv_mem.add([item2]) + merged_cache = kv_mem.get_cache([cache_item.id, item2.id]) + print(json.dumps(serialize_item(merged_cache), indent=2, default=str)) + print() + + # 7. Delete one + print("===== Delete one KVCacheItem =====") + kv_mem.delete([cache_item.id]) + print(json.dumps(serialize_item(kv_mem.get_all()), indent=2, default=str)) + print() + + # 8. Dump and load + print("===== Dump and Load KVCacheMemory =====") + kv_mem.dump("tmp/kv_mem") + print("Memory dumped to 'tmp/kv_mem'.") + kv_mem.delete_all() + kv_mem.load("tmp/kv_mem") + print( + "Memory loaded from 'tmp/kv_mem':", + json.dumps(serialize_item(kv_mem.get_all()), indent=2, default=str), + ) diff --git a/examples/core_memories/naive_textual_memory.py b/examples/core_memories/naive_textual_memory.py index ff61334dc..ab73060c7 100644 --- a/examples/core_memories/naive_textual_memory.py +++ b/examples/core_memories/naive_textual_memory.py @@ -1,53 +1,77 @@ +import json +import os import uuid from memos.configs.memory import MemoryConfigFactory from memos.memories.factory import MemoryFactory +def print_result(title, result): + """Helper function: Pretty print the result.""" + print(f"\n{'=' * 10} {title} {'=' * 10}") + if isinstance(result, list | dict): + print(json.dumps(result, indent=2, ensure_ascii=False, default=str)) + else: + print(result) + + +# Configure memory backend with OpenAI extractor config = MemoryConfigFactory( backend="naive_text", config={ "extractor_llm": { - "backend": "ollama", + "backend": "openai", "config": { - "model_name_or_path": "qwen3:0.6b", + "model_name_or_path": "gpt-4o-mini", + "api_key": os.environ.get("OPENAI_API_KEY"), + "api_base": os.environ.get( + "OPENAI_BASE_URL", + os.environ.get("OPENAI_API_BASE", "https://api.openai.com/v1"), + ), "temperature": 0.0, "remove_think_prefix": True, }, } }, ) + +# Create memory instance m = MemoryFactory.from_config(config) -print("===== Extract memories =====") +# Extract memories from a simulated conversation memories = m.extract( [ {"role": "user", "content": "I love tomatoes."}, {"role": "assistant", "content": "Great! Tomatoes are delicious."}, ] ) -print(memories) -print() +print_result("Extract memories", memories) + -print("==== Add memories ====") +# Add the extracted memories to storage m.add(memories) + +# Manually create a memory item and add it example_id = str(uuid.uuid4()) -m.add([{"id": example_id, "memory": "User is Chinese.", "metadata": {"type": "opinion"}}]) -print(m.get_all()) -print() +manual_memory = [{"id": example_id, "memory": "User is Chinese.", "metadata": {"type": "opinion"}}] +m.add(manual_memory) + +# Print all current memories +print_result("Add memories (Check all after adding)", m.get_all()) -print("==== Search memories ====") + +# Search for relevant memories based on the query search_results = m.search("Tell me more about the user", top_k=2) -print(search_results) -print() +print_result("Search memories", search_results) + + +# Get specific memory item by ID +memory_item = m.get(example_id) +print_result("Get memory", memory_item) -print("==== Get memories ====") -memories = m.get(example_id) -print(memories) -print() -print("==== Update memories ====") +# Update the memory content for the specified ID m.update( example_id, { @@ -56,15 +80,22 @@ "metadata": {"type": "opinion", "confidence": 85}, }, ) -print(m.get(example_id)) +updated_memory = m.get(example_id) +print_result("Update memory", updated_memory) + + +print("==== Dump memory ====") +# Dump the current state of memory to a file +m.dump("tmp/naive_mem") +print("Memory dumped to 'tmp/naive_mem'.") print() -print("==== Delete memories ====") + +# Delete memory with the specified ID m.delete([example_id]) -print(m.get_all()) -print() +print_result("Delete memory (Check all after deleting)", m.get_all()) -print("==== Delete all memories ====") + +# Delete all memories in storage m.delete_all() -print(m.get_all()) -print() +print_result("Delete all", m.get_all()) diff --git a/examples/core_memories/textual_internet_memoy.py b/examples/core_memories/pref_textual_memory.py similarity index 76% rename from examples/core_memories/textual_internet_memoy.py rename to examples/core_memories/pref_textual_memory.py index 21979e890..ca9d471b9 100644 --- a/examples/core_memories/textual_internet_memoy.py +++ b/examples/core_memories/pref_textual_memory.py @@ -1,52 +1,18 @@ +import time + from memos import log -from memos.configs.embedder import EmbedderConfigFactory -from memos.configs.internet_retriever import InternetRetrieverConfigFactory -from memos.configs.mem_reader import SimpleStructMemReaderConfig -from memos.configs.memory import TreeTextMemoryConfig -from memos.embedders.factory import EmbedderFactory -from memos.mem_reader.simple_struct import SimpleStructMemReader -from memos.memories.textual.tree import TreeTextMemory +from memos.configs.memory import PreferenceTextMemoryConfig +from memos.memories.textual.preference import PreferenceTextMemory logger = log.get_logger(__name__) - -embedder_config = EmbedderConfigFactory.model_validate( - { - "backend": "ollama", - "config": { - "model_name_or_path": "nomic-embed-text:latest", - }, - } -) -embedder = EmbedderFactory.from_config(embedder_config) - - -def embed_memory_item(memory: str) -> list[float]: - return embedder.embed([memory])[0] - - -# google or xinyu pelase change tree_config_with_internet api key or token or url -tree_config = TreeTextMemoryConfig.from_json_file("examples/data/config/tree_config.json") -interconfig = InternetRetrieverConfigFactory( - backend="xinyu", - config={ - "api_key": "google or xinyu token", - "search_engine_id": "google engine id or xinyu url", - "max_results": 15, - "num_per_request": 10, - }, +preference_config = PreferenceTextMemoryConfig.from_json_file( + "examples/data/config/preference_config.json" ) -tree_config.internet_retriever = interconfig +my_preference_textual_memory = PreferenceTextMemory(preference_config) +my_preference_textual_memory.delete_all() -my_tree_textual_memory = TreeTextMemory(tree_config) -my_tree_textual_memory.delete_all() - -# Create a memory reader instance -reader_config = SimpleStructMemReaderConfig.from_json_file( - "examples/data/config/simple_struct_reader_config.json" -) -reader = SimpleStructMemReader(reader_config) scene_data = [ [ @@ -191,51 +157,36 @@ def embed_memory_item(memory: str) -> list[float]: ], ] -# Acquiring memories -memory = reader.get_memory(scene_data, type="chat", info={"user_id": "1234", "session_id": "2222"}) - -for m_list in memory: - my_tree_textual_memory.add(m_list) - -results = my_tree_textual_memory.search( - "Talk about the user's childhood story?", - top_k=10, - info={"query": "Talk about the user's childhood story?", "user_id": "111", "session": "2234"}, +memories = my_preference_textual_memory.get_memory( + scene_data, type="chat", info={"user_id": "1234", "session_id": "2222"} ) -for i, r in enumerate(results): - r = r.to_dict() - print(f"{i}'th similar result is: " + str(r["memory"])) -print(f"Successfully search {len(results)} memories") -# find related nodes -related_nodes = my_tree_textual_memory.get_relevant_subgraph("Painting") +added_ids = my_preference_textual_memory.add(memories) -# get current memory_size -print(f"Current Memory Size is {my_tree_textual_memory.get_current_memory_size()}") +time.sleep(10) -logger.info("Start doc search example...") -# Processing Documents -doc_paths = [ - "./text1.txt", - "./text2.txt", -] -# Acquiring memories from documents -doc_memory = reader.get_memory(doc_paths, "doc", info={"user_id": "1111", "session_id": "2222"}) - -for m_list in doc_memory: - my_tree_textual_memory.add(m_list) +init_time = time.time() +# search preference memories +results = my_preference_textual_memory.search("Talk about childhood story of the user", top_k=10) -results = my_tree_textual_memory.search( - "Tell me about what memos consist of?", - top_k=30, - info={"query": "Tell me about what memos consist of?", "user_id": "111", "session": "2234"}, -) for i, r in enumerate(results): r = r.to_dict() print(f"{i}'th similar result is: " + str(r["memory"])) -print(f"Successfully search {len(results)} memories") +print(f"Successfully search {len(results)} memories in {round(time.time() - init_time)}s") +# get all preference memories +all_preference_memories = my_preference_textual_memory.get_all() +for key, value in all_preference_memories.items(): + for i, m in enumerate(value): + print(f"{i}'th {key} memory is: " + str(m.memory)) + +# use filter to get all implicit preference memories +all_implicit_memories = my_preference_textual_memory.get_memory_by_filter( + {"preference_type": "implicit_preference"} +) +for i, m in enumerate(all_implicit_memories[0]): + print(f"{i}'th filtered memory is: " + str(m.memory)) -# my_tree_textual_memory.dump -my_tree_textual_memory.dump("tmp/my_tree_textual_memory") -my_tree_textual_memory.drop() +# dump preference memories +dumped_memories_dir = "tmp/my_preference_textual_memory" +my_preference_textual_memory.dump(dumped_memories_dir) diff --git a/examples/core_memories/tree_textual_memory.py b/examples/core_memories/tree_textual_memory.py index d2e197e5b..641335495 100644 --- a/examples/core_memories/tree_textual_memory.py +++ b/examples/core_memories/tree_textual_memory.py @@ -3,6 +3,7 @@ from memos import log from memos.configs.mem_reader import SimpleStructMemReaderConfig from memos.configs.memory import TreeTextMemoryConfig +from memos.mem_reader.multi_modal_struct import MultiModalStructMemReader from memos.mem_reader.simple_struct import SimpleStructMemReader from memos.memories.textual.tree import TreeTextMemory @@ -240,6 +241,33 @@ top_k=30, info={"query": "Tell me about what memos consist of?", "user_id": "111", "session": "2234"}, ) + +for i, r in enumerate(results): + r = r.to_dict() + print(f"{i}'th similar result is: " + str(r["memory"])) +print(f"Successfully search {len(results)} memories") + +logger.info("start multi-modal memory search example...") + +multi_modal_reader = MultiModalStructMemReader(reader_config) +doc_paths = ["examples/data/one_page_example.pdf"] +multi_modal_memory = multi_modal_reader.get_memory( + doc_paths, "doc", info={"user_id": "1111", "session_id": "2222"} +) + +for m_list in multi_modal_memory: + added_ids = my_tree_textual_memory.add(m_list) + my_tree_textual_memory.memory_manager.wait_reorganizer() + +results = my_tree_textual_memory.search( + "Give me one poem from Tagore's 'Stray birds'", + top_k=30, + info={ + "query": "Give me one poem from Tagore's 'Stray birds'", + "user_id": "111", + "session": "2234", + }, +) for i, r in enumerate(results): r = r.to_dict() print(f"{i}'th similar result is: " + str(r["memory"])) diff --git a/examples/core_memories/vllm_kv_cache_memory.py b/examples/core_memories/vllm_kv_cache_memory.py index 65ae64911..7122200f6 100644 --- a/examples/core_memories/vllm_kv_cache_memory.py +++ b/examples/core_memories/vllm_kv_cache_memory.py @@ -20,7 +20,7 @@ def main(): "extractor_llm": { "backend": "vllm", "config": { - "model_name_or_path": "/mnt/afs/models/hf_models/Qwen2.5-7B", + "model_name_or_path": "Qwen/Qwen3-0.6B", "api_base": "http://localhost:8088/v1", "temperature": 0.7, "max_tokens": 1024, diff --git a/examples/data/config/preference_config.json b/examples/data/config/preference_config.json new file mode 100644 index 000000000..4db7475a0 --- /dev/null +++ b/examples/data/config/preference_config.json @@ -0,0 +1,53 @@ +{ + "extractor_llm": { + "backend": "ollama", + "config": { + "model_name_or_path": "qwen3:0.6b", + "temperature": 0.0, + "remove_think_prefix": true, + "max_tokens": 8192 + } + }, + "vector_db": { + "backend": "milvus", + "config": { + "collection_name": [ + "explicit_preference", + "implicit_preference" + ], + "vector_dimension": 768, + "distance_metric": "cosine", + "uri": "./milvus_demo.db" + } + }, + "embedder": { + "backend": "ollama", + "config": { + "model_name_or_path": "nomic-embed-text:latest" + } + }, + "reranker": { + "backend": "cosine_local", + "config": { + "level_weights": { + "topic": 1.0, + "concept": 1.0, + "fact": 1.0 + }, + "level_field": "background" + } + }, + "extractor": { + "backend": "naive", + "config": {} + }, + "adder": { + "backend": "naive", + "config": {} + }, + "retriever": { + "backend": "naive", + "config": {} + }, + "memory_filename": "preference_textual_memories.json" +} diff --git a/examples/data/config/simple_struct_reader_config.json b/examples/data/config/simple_struct_reader_config.json index 8a27de457..abfcdfa92 100644 --- a/examples/data/config/simple_struct_reader_config.json +++ b/examples/data/config/simple_struct_reader_config.json @@ -1,24 +1,30 @@ { "llm": { - "backend": "ollama", + "backend": "openai", "config": { - "model_name_or_path": "qwen3:0.6b", - "temperature": 0.0, - "remove_think_prefix": true, - "max_tokens": 8192 + "model_name_or_path": "gpt-4o-mini", + "temperature": 0.8, + "max_tokens": 1024, + "top_p": 0.9, + "top_k": 50, + "api_key": "sk-xxx", + "api_base": "http://" } }, "embedder": { - "backend": "ollama", + "backend": "universal_api", "config": { - "model_name_or_path": "nomic-embed-text:latest" + "provider": "openai", + "api_key": "EMPTY", + "model_name_or_path": "bge-m3", + "base_url": "http://" } }, "chunker": { "backend": "sentence", "config": { "tokenizer_or_token_counter": "gpt2", - "chunk_size": 512, + "chunk_size": 2048, "chunk_overlap": 128, "min_sentences_per_chunk": 1 } diff --git a/examples/data/config/tree_config_shared_database.json b/examples/data/config/tree_config_shared_database.json index 914f3c725..6dd1a9220 100644 --- a/examples/data/config/tree_config_shared_database.json +++ b/examples/data/config/tree_config_shared_database.json @@ -1,40 +1,63 @@ { "extractor_llm": { - "backend": "ollama", + "backend": "openai", "config": { - "model_name_or_path": "qwen3:0.6b", - "temperature": 0.0, - "remove_think_prefix": true, - "max_tokens": 8192 + "model_name_or_path": "gpt-4o", + "temperature": 0.8, + "max_tokens": 1024, + "top_p": 0.9, + "top_k": 50, + "api_key": "sk-xxx", + "api_base": "http://" } }, "dispatcher_llm": { - "backend": "ollama", + "backend": "openai", "config": { - "model_name_or_path": "qwen3:0.6b", - "temperature": 0.0, - "remove_think_prefix": true, - "max_tokens": 8192 + "model_name_or_path": "gpt-4o", + "temperature": 0.8, + "max_tokens": 1024, + "top_p": 0.9, + "top_k": 50, + "api_key": "sk-xxx", + "api_base": "http://" } }, "embedder": { - "backend": "ollama", + "backend": "universal_api", "config": { - "model_name_or_path": "nomic-embed-text:latest" + "provider": "openai", + "api_key": "EMPTY", + "model_name_or_path": "bge-m3", + "base_url": "http://" + } + }, + "reranker": { + "backend": "http_bge", + "config": { + "url": "http://", + "model": "bge-reranker-v2-m3", + "timeout": 10 } }, "graph_db": { "backend": "neo4j", "config": { - "uri": "bolt://localhost:7687", + "uri": "neo4j://127.0.0.1:7687", "user": "neo4j", "password": "12345678", - "db_name": "shared-tree-textual-memory", - "user_name": "alice", + "db_name": "neo4j", + "user_name": "xxx", "auto_create": true, - "use_multi_db": false, - "embedding_dimension": 768 + "use_multi_db": true, + "embedding_dimension": 1024 } }, - "reorganize": true + "reorganize": false, + "memory_size": { + "WorkingMemory": 200, + "LongTermMemory": 20000, + "UserMemory": 30000 + }, + "memory_filename": "tree_textual_memories.json" } diff --git a/examples/data/mem_cube_tree/textual_memory.json b/examples/data/mem_cube_tree/textual_memory.json new file mode 100644 index 000000000..91f426ca2 --- /dev/null +++ b/examples/data/mem_cube_tree/textual_memory.json @@ -0,0 +1,4219 @@ +{ + "nodes": [ + { + "id": "c1e2bfb3-4dd7-40f9-a347-a25a72223b2c", + "memory": "On January 13, 2026, at 8:51 PM, the user added another memory to demonstrate the concept of persistence.", + "metadata": { + "sources": [ + "\"{\\\"type\\\": \\\"chat\\\", \\\"role\\\": \\\"user\\\", \\\"chat_time\\\": \\\"08:51 PM on 13 January, 2026\\\", \\\"content\\\": \\\"This is a test memory for dump example\\\", \\\"lang\\\": \\\"en\\\"}\"", + "\"{\\\"type\\\": \\\"chat\\\", \\\"role\\\": \\\"user\\\", \\\"chat_time\\\": \\\"08:51 PM on 13 January, 2026\\\", \\\"content\\\": \\\"Another memory to demonstrate persistence\\\", \\\"lang\\\": \\\"en\\\"}\"" + ], + "confidence": 0.99, + "usage": [], + "created_at": "2026-01-13T20:51:42.394870000+00:00", + "session_id": "default_session", + "memory_type": "UserMemory", + "type": "fact", + "tags": [ + "persistence", + "memory", + "demonstration" + ], + "updated_at": "2026-01-13T20:51:42.396309000+00:00", + "user_id": "example_user", + "background": "On January 13, 2026, at 8:51 PM, the user engaged in a memory demonstration by creating a test memory example and another memory to illustrate the concept of persistence. These actions reflect the user's interest in exploring and demonstrating memory-related concepts.", + "file_ids": [], + "embedding": [ + -0.022425159811973572, + 0.01004026923328638, + 0.013415387831628323, + -0.013093948364257812, + -0.04768655076622963, + -0.047837819904088974, + 0.02147974818944931, + 0.019097311422228813, + 0.005332120228558779, + -0.005180854815989733, + 0.0216499213129282, + -0.006977136246860027, + -0.014067722484469414, + 0.014010997489094734, + -0.01164746843278408, + 0.037948813289403915, + 0.049539558589458466, + -0.028778323903679848, + -0.0363227054476738, + -0.006282258778810501, + 0.03201163187623024, + -0.043678008019924164, + 0.06640569865703583, + 0.04734620451927185, + -0.03552856296300888, + 0.008948319591581821, + -0.018199170008301735, + 0.015788370743393898, + 0.08047342300415039, + 0.04632515832781792, + -0.01887986622750759, + 0.00966683216392994, + -0.02516685239970684, + -0.00974719226360321, + -0.01631780155003071, + -0.01085332315415144, + 0.0284947007894516, + 0.012914320454001427, + -0.02427816577255726, + 0.006589517462998629, + -0.013954272493720055, + -0.0017856458434835076, + 0.004043997265398502, + -0.004202353768050671, + 0.032805778086185455, + -0.00395891023799777, + -0.002543156733736396, + -0.018889321014285088, + -0.02066669426858425, + -0.006225534249097109, + 0.008262895978987217, + 0.0266795102506876, + 0.055325478315353394, + -0.019683465361595154, + -0.013576108030974865, + 0.015372389927506447, + -0.05816170945763588, + -0.00842361617833376, + -0.009548655711114407, + -0.0019262757850810885, + -0.047535285353660583, + 0.026603877544403076, + -0.0035098399966955185, + -0.030290981754660606, + -0.015334573574364185, + 0.04798908531665802, + 0.028400158509612083, + -0.00544084282591939, + 0.008106903173029423, + -0.03639834001660347, + 0.011515110731124878, + 0.042278800159692764, + -0.07343956083059311, + -0.02628243714570999, + -0.08886867761611938, + 0.02773837186396122, + -0.0017962816637009382, + -0.007761827670037746, + -0.011363845318555832, + 0.031860366463661194, + 0.017783189192414284, + -0.026849685236811638, + 0.04144683852791786, + -0.00212244875729084, + 0.03042333945631981, + 0.026755142956972122, + -0.04500158503651619, + 0.03989636152982712, + 0.04318639263510704, + -0.03339193016290665, + 0.025412660092115402, + -0.01126930397003889, + 0.027587104588747025, + -0.03471550717949867, + -0.006041178945451975, + -0.007709830068051815, + -0.02686859294772148, + 0.0020196351688355207, + 0.05143038183450699, + 0.029402295127511024, + 0.059598736464977264, + 0.01989145576953888, + 0.013944818638265133, + -0.010550791397690773, + 0.011004588566720486, + -0.02439161390066147, + 0.05952310189604759, + -0.0021389934699982405, + -0.011723102070391178, + 0.007941455580294132, + 0.010248259641230106, + 0.029893908649683, + 0.033032674342393875, + 0.008130538277328014, + 0.025242485105991364, + -0.009198852814733982, + 0.04307294636964798, + 0.005294303875416517, + -0.030272074043750763, + -0.004263805691152811, + 0.04027452692389488, + 0.017197033390402794, + 0.06016598269343376, + -0.025620650500059128, + -0.02253860794007778, + -0.015088766813278198, + -0.007000771816819906, + -0.018870411440730095, + -0.015325118787586689, + -0.0027582377661019564, + 0.025734098628163338, + -0.017972271889448166, + 0.014824051409959793, + -0.025072310119867325, + -0.05165728181600571, + -0.00673132948577404, + 0.004667968954890966, + 0.03293813392519951, + 0.02083686739206314, + -0.03785427287220955, + 0.013188489712774754, + -0.02533702552318573, + 0.002592790871858597, + -0.03611471503973007, + 0.022084811702370644, + -0.007724011316895485, + 0.0028811413794755936, + 0.007497112732380629, + 0.026755142956972122, + 0.014861867763102055, + -0.012413252145051956, + 0.014880775474011898, + -0.03142547607421875, + -0.03377009555697441, + -0.024202533066272736, + 0.0020539062097668648, + 0.02323821373283863, + 0.09204525500535965, + -0.03365664556622505, + -0.031633466482162476, + -0.005308485124260187, + -0.06735111027956009, + -0.06126265972852707, + 0.04545538127422333, + 0.03220071271061897, + 0.028343435376882553, + 0.010843869298696518, + 0.02891068160533905, + -0.034488607197999954, + 0.006887322291731834, + -0.0065280660055577755, + -0.003923457581549883, + 0.02036416158080101, + 0.008220352232456207, + 0.0012183990329504013, + 0.0293266624212265, + 0.04390490800142288, + 0.01639343425631523, + -0.0038076445925980806, + -0.04159810394048691, + -0.057556647807359695, + -0.01890822872519493, + 0.04326202720403671, + -0.025355935096740723, + -0.045379746705293655, + -0.014578244648873806, + -0.0410686731338501, + 0.03469659760594368, + 0.01117476262152195, + 0.036190349608659744, + 0.020439794287085533, + -0.013093948364257812, + -0.03879968449473381, + 0.005251760594546795, + -0.028040902689099312, + -0.028229985386133194, + 0.005209216848015785, + -0.00709531269967556, + -0.029875000938773155, + 0.0010895866435021162, + -0.012006725184619427, + 0.07343956083059311, + 0.010541337542235851, + 0.02246297523379326, + -0.02633916214108467, + -0.020401978865265846, + -0.01178928092122078, + 0.025715190917253494, + 0.01966455765068531, + -0.07551946491003036, + -0.02388109266757965, + 0.005265941843390465, + -0.0036445611622184515, + -0.004528520628809929, + 0.018482793122529984, + -0.04511503502726555, + 0.012120174244046211, + 0.010635878890752792, + -0.008995589800179005, + -0.01553311012685299, + -0.030158624053001404, + 0.060014717280864716, + -0.056913767009973526, + 0.004339438397437334, + 0.032975949347019196, + -0.008612697944045067, + 0.0023138944525271654, + 0.03199272230267525, + 0.004360710270702839, + -0.008277077227830887, + 0.018066812306642532, + 0.02879723161458969, + 0.02745474874973297, + -0.010200989432632923, + 0.022746598348021507, + -0.03348647430539131, + -0.04084177315235138, + -0.006783327087759972, + 0.005696103908121586, + 0.0018281893571838737, + -0.01589236594736576, + -0.03482895717024803, + 0.0062491693533957005, + 0.0023871639277786016, + -0.034091535955667496, + -0.03195490688085556, + -0.014294620603322983, + 0.01899331621825695, + 0.017537381500005722, + 0.006069541443139315, + 0.03144438564777374, + 0.02019398845732212, + -0.06375854462385178, + -0.032087262719869614, + 0.005927729420363903, + 0.02123394049704075, + 0.034091535955667496, + -0.010465704835951328, + 0.041219938546419144, + 0.03170910105109215, + 0.03439406678080559, + -0.024996677413582802, + -0.0038998222444206476, + 0.03887531906366348, + 0.041219938546419144, + -0.01626107655465603, + -0.01804790459573269, + -0.0051997629925608635, + 0.023502927273511887, + -0.026547152549028397, + 0.029572470113635063, + -0.03140656650066376, + -0.023105856031179428, + 0.017924999818205833, + -0.005507021676748991, + -0.010134810581803322, + -0.024315981194376945, + -0.007908366620540619, + -0.024259258061647415, + 0.0360390841960907, + 0.02802199497818947, + -0.03195490688085556, + 0.023200396448373795, + -0.018123537302017212, + 0.022576425224542618, + -0.0413333885371685, + -0.03955601528286934, + 5.5653712479397655e-05, + 0.0209503173828125, + -0.04159810394048691, + -0.024070175364613533, + 0.02679296024143696, + 0.06493085622787476, + -0.033789005130529404, + -0.01567492075264454, + -0.02656606025993824, + -0.005653560161590576, + -0.16155190765857697, + 0.010730419307947159, + -0.021025950089097023, + 0.028532516211271286, + -0.010900593362748623, + -0.008319620974361897, + -0.03637943044304848, + -0.01583564095199108, + -0.04239225015044212, + 0.07003607600927353, + -0.03594454377889633, + -0.04863196238875389, + 0.027360206469893456, + 0.021252848207950592, + -0.02497776970267296, + -0.004530884325504303, + -0.02703876607120037, + -0.019815823063254356, + -0.03985854610800743, + -0.030877137556672096, + 0.012186353094875813, + -0.01614762656390667, + 0.03165237605571747, + -0.035131487995386124, + -0.008636333048343658, + 0.008645787835121155, + 0.002212262712419033, + -0.009577018208801746, + -0.055438924580812454, + 0.006929865572601557, + -0.010834414511919022, + 0.03229525312781334, + -0.011987816542387009, + -0.015410206280648708, + 0.02745474874973297, + 0.05207326263189316, + 0.009000317193567753, + -0.01176091842353344, + 0.023068038746714592, + -0.030971677973866463, + 0.026849685236811638, + 0.05237579345703125, + -0.024561788886785507, + 0.0284947007894516, + 0.020874684676527977, + -0.02405126579105854, + -0.020685601979494095, + 0.0006671059527434409, + -0.049653008580207825, + -0.010815506801009178, + -0.02312476374208927, + -0.028475793078541756, + -0.01626107655465603, + -0.008579608984291553, + -0.02042088657617569, + 0.011486749164760113, + -0.0293266624212265, + 0.013538291677832603, + -0.035377293825149536, + 0.009870095178484917, + -0.012413252145051956, + 0.00476250983774662, + -0.0022571696899831295, + 0.006447705905884504, + -0.04447215422987938, + 0.016828322783112526, + 0.033505380153656006, + -0.04341329261660576, + 7.6445379818324e-05, + -0.034791141748428345, + 0.05759446322917938, + -0.006835324689745903, + 0.002079905243590474, + -0.01290486566722393, + 0.010768236592411995, + 0.07635142654180527, + -0.011609652079641819, + -0.04632515832781792, + 0.01966455765068531, + -0.10089430958032608, + 0.019929273054003716, + 0.050711870193481445, + -0.0033467565663158894, + 0.00785164162516594, + -0.03653069585561752, + 0.022368434816598892, + 0.006537519861012697, + 0.004381982143968344, + 0.03955601528286934, + 0.2114696353673935, + 0.031690191477537155, + -0.05438006669282913, + 0.013112856075167656, + 0.041219938546419144, + -0.005327393300831318, + -0.01223362423479557, + 0.025242485105991364, + -0.056270889937877655, + -0.046552058309316635, + 0.015154944732785225, + -0.016459612175822258, + 0.00955338217318058, + 0.013727374374866486, + 0.03424280136823654, + 0.004329984541982412, + -0.024675238877534866, + 0.015438568778336048, + 0.08584335446357727, + 0.012999407015740871, + 0.016686512157320976, + -0.009118493646383286, + -0.004190536215901375, + 0.005601562559604645, + -0.034318435937166214, + -0.01955110765993595, + -0.004864141810685396, + 0.045606646686792374, + 0.02949683740735054, + 0.053396835923194885, + -0.01936202682554722, + 0.030688054859638214, + -0.002644788473844528, + 0.0024911591317504644, + -0.0174617487937212, + -0.014861867763102055, + -0.025488292798399925, + -0.06754019111394882, + -0.011590744368731976, + 0.018123537302017212, + 0.02720894105732441, + -0.009737737476825714, + 0.0340726263821125, + 0.0259799063205719, + -0.01553311012685299, + -0.011316575109958649, + 0.017471203580498695, + -0.03048006445169449, + -0.04371582344174385, + -0.022954588755965233, + -0.022028086706995964, + 0.024372706189751625, + 0.02620680443942547, + -0.023086946457624435, + 0.019967088475823402, + -0.03685213625431061, + -0.005171400494873524, + -0.010541337542235851, + 0.0012302165850996971, + 0.06027943268418312, + 0.029875000938773155, + 0.03199272230267525, + -0.0051146759651601315, + 0.03204944729804993, + -0.0076814680360257626, + 0.0007687376928515732, + 0.029515745118260384, + -0.02446724846959114, + 0.03348647430539131, + 0.02340838685631752, + 0.03902658447623253, + 0.013377571478486061, + 0.01896495372056961, + 0.020515428856015205, + 0.010938410647213459, + -0.0005574973183684051, + 0.02427816577255726, + 0.021876821294426918, + 0.016128718852996826, + 0.05600617453455925, + -0.011921637691557407, + -0.02773837186396122, + -0.07733465731143951, + 0.007166218478232622, + 0.020969225093722343, + -0.04950174316763878, + -0.04844288155436516, + 0.07752373814582825, + -0.0036587421782314777, + -0.03898876905441284, + -0.01647852174937725, + 0.049010127782821655, + -0.0048286886885762215, + -0.019626740366220474, + 0.05732974782586098, + -0.020628876984119415, + 0.07241851836442947, + 0.0036209258250892162, + -0.036549605429172516, + -0.016610879451036453, + -0.0497286394238472, + -0.026225712150335312, + 0.01734829880297184, + -0.0354718379676342, + -0.026433704420924187, + -0.005355755798518658, + -0.023673102259635925, + -0.0011675831628963351, + -0.0009330029133707285, + 0.005828461609780788, + 0.01135439146310091, + 0.011278758756816387, + -0.059371836483478546, + 0.010787144303321838, + 0.01670541986823082, + 0.008470886386930943, + -0.005596835631877184, + 0.005970273166894913, + -0.016270531341433525, + 0.026055539026856422, + 0.014701147563755512, + 0.004741238430142403, + 0.02008053846657276, + 0.05604398995637894, + 0.004296895116567612, + -0.002104722196236253, + 0.016043631359934807, + -0.019182397052645683, + -0.009676286019384861, + -0.01740502379834652, + 0.02580973133444786, + -0.019570017233490944, + 0.04764873534440994, + -0.022746598348021507, + 0.04935047775506973, + -0.009510839357972145, + 0.014578244648873806, + 0.017660286277532578, + -0.0019392751855775714, + 0.02645261213183403, + 0.008243987336754799, + -0.06999826431274414, + -0.00033325751428492367, + 0.010739874094724655, + 0.013169581070542336, + 0.03482895717024803, + -0.0050059533677995205, + -0.01709303818643093, + -0.021971361711621284, + 0.016941772773861885, + 0.0410686731338501, + 0.026698417961597443, + 0.04243006557226181, + -0.049426108598709106, + -0.03530166298151016, + -0.021631013602018356, + -0.038648419082164764, + 0.022860048338770866, + 0.02896740660071373, + -0.0329570434987545, + 0.0008916411316022277, + 0.025677373632788658, + 0.05101440101861954, + 0.0709436759352684, + 0.057443197816610336, + -0.0075018396601080894, + -0.019702374935150146, + 0.008073813281953335, + -0.02323821373283863, + 0.0043016220442950726, + 0.005232852417975664, + -0.018671875819563866, + -0.01896495372056961, + 0.012696875259280205, + -0.003824189305305481, + -0.021498655900359154, + -0.040463607758283615, + 0.06292658299207687, + -0.008726147934794426, + 0.00958174467086792, + 0.008849050849676132, + -0.021044857800006866, + -0.025355935096740723, + -0.02193354442715645, + 0.041219938546419144, + 0.03013971634209156, + 0.03628489002585411, + -0.03866732865571976, + 0.03148220106959343, + -0.006480795331299305, + -0.06251060217618942, + 0.0854651927947998, + 0.028248893097043037, + -0.01910676434636116, + 0.02620680443942547, + -0.05116566643118858, + 0.019380934536457062, + 0.021555380895733833, + 0.0031009495723992586, + -0.0020385433454066515, + -0.021801186725497246, + 0.013935364782810211, + -0.015400752425193787, + -0.0027086036279797554, + -0.001123857800848782, + 0.022028086706995964, + 0.01639343425631523, + 0.01609090156853199, + -0.016998497769236565, + -0.02265205793082714, + -0.0029922272078692913, + -0.04299731180071831, + -0.021460838615894318, + 0.017282120883464813, + -0.04364019259810448, + -0.019475474953651428, + 0.06614098697900772, + 0.03450751677155495, + -1.5242912922985852e-05, + -0.016941772773861885, + 0.0027251483406871557, + 0.03061242215335369, + 0.03271123394370079, + -0.051695097237825394, + -0.011023497208952904, + 0.017527928575873375, + 0.027133308351039886, + -0.027757279574871063, + 0.023313846439123154, + 0.017887184396386147, + -0.01006863173097372, + -0.030536789447069168, + -0.022084811702370644, + -0.0037651010788977146, + 0.04095522314310074, + -0.06035506725311279, + 0.015485838986933231, + 0.014285166747868061, + 0.0011563563020899892, + -0.02253860794007778, + -0.005549564957618713, + -0.015079312026500702, + 0.0031553106382489204, + 0.012895411811769009, + 0.03948038071393967, + 0.04492595046758652, + -0.006863687187433243, + -0.02352183684706688, + 0.018331527709960938, + 0.0300262663513422, + 0.004287440795451403, + 0.004887776914983988, + -0.0340726263821125, + -0.024958861991763115, + 0.0438670888543129, + 0.03278686851263046, + 0.020742326974868774, + -0.013519383035600185, + -0.06307785212993622, + -0.04855633154511452, + -0.004703422077000141, + 0.036133624613285065, + -0.03594454377889633, + 0.0012633060105144978, + -0.06288877129554749, + -0.0413333885371685, + 0.01810462959110737, + -0.06330475211143494, + 0.000763419724535197, + -0.027171123772859573, + 0.0096384696662426, + -0.009543928317725658, + -0.012971044518053532, + -0.03136875107884407, + 0.058426424860954285, + 0.03386463597416878, + -0.007010225672274828, + -0.05173291265964508, + 0.05850205942988396, + 0.03488568216562271, + -0.011751463636755943, + 0.046438608318567276, + 0.039934176951646805, + 0.04927484318614006, + -0.043980538845062256, + -0.00198654574342072, + 0.01614762656390667, + 0.0009625470265746117, + 0.03339193016290665, + -0.0865996852517128, + 0.021252848207950592, + 0.007208762224763632, + -0.03583109378814697, + 0.04904794320464134, + 0.006003362592309713, + 0.008792325854301453, + -0.0311418529599905, + 0.01097622700035572, + -0.02458069659769535, + 0.018085720017552376, + -0.017669739201664925, + -0.011430024169385433, + -0.039820730686187744, + -0.04084177315235138, + -0.005908821243792772, + -0.003512203460559249, + -0.00965265091508627, + -0.0047388747334480286, + 0.0592205710709095, + -0.0059135486371815205, + -0.021517563611268997, + -0.04401835426688194, + 0.04284604638814926, + 0.04632515832781792, + -0.011458386667072773, + -0.018482793122529984, + 0.027927452698349953, + -0.06493085622787476, + 0.0351882129907608, + 0.0034814774990081787, + 0.03804335743188858, + 0.025885364040732384, + -0.04159810394048691, + 0.008589062839746475, + -0.0035500198137015104, + -0.008442523889243603, + 0.005483386106789112, + -0.026320254430174828, + 0.008669422939419746, + -0.027492564171552658, + -0.012148536741733551, + -0.015495292842388153, + -0.029648102819919586, + -0.006665150634944439, + 0.05721629783511162, + 0.026603877544403076, + 0.015240032225847244, + -0.024429431185126305, + 0.04295949637889862, + -0.04927484318614006, + -0.05165728181600571, + -0.036719780415296555, + 0.036833230406045914, + -0.04791345074772835, + -0.02650933712720871, + 0.020855776965618134, + -0.0036776503548026085, + 0.006182990502566099, + -0.029893908649683, + 0.007118948269635439, + 0.0009324120474047959, + 0.04556883126497269, + -0.024202533066272736, + -0.07865823060274124, + -0.004027452785521746, + -0.004242533817887306, + -0.03898876905441284, + -0.006078995298594236, + 0.033618830144405365, + -0.013245213776826859, + -0.01378409843891859, + 0.01681886985898018, + -0.014842959120869637, + 0.0032687599305063486, + -0.04700585454702377, + 0.009071222506463528, + 0.026471519842743874, + 0.030045175924897194, + 0.006863687187433243, + -0.03429952636361122, + 0.018842048943042755, + -0.01849224790930748, + 0.004533248022198677, + -0.021574288606643677, + -0.023673102259635925, + -0.010730419307947159, + 0.012243078090250492, + -0.03978291153907776, + 0.03781645745038986, + 0.047724369913339615, + -0.03592563420534134, + -0.021215032786130905, + 0.014757872559130192, + -0.040652692317962646, + 0.004173991270363331, + -0.04352674260735512, + -0.028116535395383835, + -0.009619561024010181, + 0.011836551129817963, + 0.022897865623235703, + -0.05528765916824341, + -0.025355935096740723, + -0.017442841082811356, + 0.017111947759985924, + -0.16896392405033112, + 0.029856093227863312, + 0.016194898635149002, + 0.016866140067577362, + -0.04326202720403671, + -0.013509929180145264, + -0.05611962080001831, + 0.006154628470540047, + 0.014616061002016068, + -0.021290665492415428, + 0.0066131530329585075, + -0.04564446210861206, + 0.06096012890338898, + -0.03384573012590408, + -0.027416931465268135, + 0.010229351930320263, + 0.006901503540575504, + 0.057027217000722885, + -0.05388845130801201, + 0.06186772510409355, + 0.009699921123683453, + -0.01977800764143467, + -0.0142473503947258, + -0.0002583632303867489, + -0.017244303598999977, + -0.01770755648612976, + 0.019796915352344513, + 0.03055569715797901, + -0.030158624053001404, + 0.05131693184375763, + 0.0026967860758304596, + 0.02441052347421646, + 0.019182397052645683, + 0.041749369353055954, + 0.03246542811393738, + 0.0048452336341142654, + -0.0012266712728887796, + -0.03808117285370827, + -0.0015800188994035125, + -0.01989145576953888, + -0.021044857800006866, + 0.02153647318482399, + -0.01650688424706459, + -0.01559928897768259, + 0.004192899446934462, + 0.09136456251144409, + -0.028929589316248894, + -0.003526384709402919, + -0.02592318132519722, + 0.03384573012590408, + 0.015240032225847244, + 0.03781645745038986, + 0.009520293213427067, + 0.011524565517902374, + -0.017631923779845238, + -0.005559019278734922, + 0.01158128958195448, + -0.019031131640076637, + -0.024032358080148697, + 0.014776780270040035, + 0.014086630195379257, + -0.019314754754304886, + -0.03161455690860748, + -0.0729101300239563, + -0.017074130475521088, + 0.006750237662345171, + -0.07007389515638351, + -0.007246578577905893, + -0.025091219693422318, + -0.013349208980798721, + -0.025885364040732384, + 0.029950633645057678, + 0.026887500658631325, + -0.0005920638795942068, + -0.012838686816394329, + 0.048064716160297394, + 0.03757065162062645, + 0.01751847378909588, + -0.013821914792060852, + -0.026244621723890305, + -0.027019858360290527, + -0.011808188632130623, + -0.0006871959194540977, + -0.03278686851263046, + 0.035377293825149536, + 0.013056132011115551, + -0.025960998609662056, + -0.012469977140426636, + -0.0031482200138270855, + -0.0007728738128207624, + -0.08077595382928848, + 0.015088766813278198, + 0.056497786194086075, + 0.01623271405696869, + -0.009293394163250923, + 0.06659477949142456, + -0.01989145576953888, + -0.008598516695201397, + -0.015192762017250061, + -0.0026802413631230593, + 0.00839525368064642, + -0.001349574769847095, + 0.02866487391293049, + 0.005842642392963171, + -0.025942089036107063, + 0.0029638647101819515, + -0.024240348488092422, + -0.03524493798613548, + 0.021876821294426918, + -0.010720965452492237, + -0.005984454415738583, + -0.025771915912628174, + -0.04863196238875389, + 0.008239260874688625, + -0.06856123358011246, + -0.03305158391594887, + -0.032162897288799286, + 0.03318393975496292, + -0.0441318042576313, + -0.02000490576028824, + 0.03225743770599365, + 0.03131202608346939, + 0.048896677792072296, + -0.03499913215637207, + 0.024297073483467102, + 0.000977909890934825, + 0.030101899057626724, + 0.015882911160588264, + 0.018000634387135506, + -0.006197171751409769, + -0.006892049219459295, + -0.00835270993411541, + 0.02195245400071144, + 0.039593830704689026, + -0.010238805785775185, + 0.00586627796292305, + 0.0156465582549572, + 0.005800099112093449, + -0.015230578370392323, + -0.031690191477537155, + 0.006849505938589573, + -0.028986314311623573, + 0.02253860794007778, + -0.01421898789703846, + -0.014795688912272453, + -0.019872548058629036, + 0.018331527709960938, + -0.01570328325033188, + -0.019116219133138657, + -0.011864913627505302, + 0.002848051954060793, + 0.00756329158321023, + 0.032919224351644516, + 0.06387199461460114, + -0.01949438452720642, + -0.013112856075167656, + 0.004374891519546509, + 0.022311709821224213, + 0.01765083149075508, + 0.02304913103580475, + -0.034564241766929626, + -0.00717094587162137, + -0.05332120507955551, + -0.05154383182525635, + -0.003493295283988118, + 0.022444067522883415, + 0.037589557468891144, + -0.0087970532476902, + -0.06220807135105133, + 0.004157446790486574, + 0.009151582606136799, + 0.06288877129554749, + -0.030820412561297417, + 0.011902729980647564, + 0.003717830404639244, + -0.056157439947128296, + 0.024883229285478592, + 0.0030016812961548567, + 0.02505340240895748, + -0.026717327535152435, + 0.025185760110616684, + -0.01331139262765646, + 0.005445569753646851, + 0.02475087158381939, + 0.024202533066272736, + 0.036833230406045914, + -0.002345802029594779, + 0.023427294567227364, + -0.011184217408299446, + -0.01375573594123125, + 0.02312476374208927, + -0.029515745118260384, + -0.05910712108016014, + -0.017631923779845238, + 0.01743338629603386, + -0.024486156180500984, + -0.0026613331865519285, + 0.01509822066873312, + -0.011042405851185322, + 0.03392136096954346, + -0.036247074604034424, + -0.04787563532590866, + 0.030064083635807037, + -0.022670965641736984, + 0.005308485124260187, + 0.014786235056817532, + -0.027284573763608932, + 0.009671558625996113, + -0.0360390841960907, + 0.02153647318482399, + -0.05188417807221413, + -0.008995589800179005, + 0.011051859706640244, + -0.0006470159278251231, + -0.020269621163606644, + 0.02229280211031437, + 0.011108584702014923, + -0.013736828230321407, + -0.009841732680797577, + 0.034148260951042175, + -0.0014807506231591105, + -0.0018388251774013042, + 0.013500475324690342, + -0.026849685236811638, + -0.060014717280864716, + -0.008404707536101341, + 0.011212579905986786, + 0.02047761157155037, + -0.0016426523216068745, + 0.0483294315636158, + 0.022444067522883415, + 0.036719780415296555, + -0.03861060366034508, + 0.037362661212682724, + -0.006286986172199249, + -0.02773837186396122, + 0.033448655158281326, + 0.019985998049378395, + -0.03745720162987709, + -0.005204489920288324, + 0.016487974673509598, + -0.019314754754304886, + -0.0555901937186718, + -0.008636333048343658 + ], + "key": "Persistence Demonstration", + "status": "activated" + } + }, + { + "id": "76374204-35a7-49af-a437-29410ae353e3", + "memory": "On January 13, 2026, at 8:51 PM, the user added another memory to demonstrate the concept of persistence.", + "metadata": { + "sources": [ + "\"{\\\"type\\\": \\\"chat\\\", \\\"role\\\": \\\"user\\\", \\\"chat_time\\\": \\\"08:51 PM on 13 January, 2026\\\", \\\"content\\\": \\\"This is a test memory for dump example\\\", \\\"lang\\\": \\\"en\\\"}\"", + "\"{\\\"type\\\": \\\"chat\\\", \\\"role\\\": \\\"user\\\", \\\"chat_time\\\": \\\"08:51 PM on 13 January, 2026\\\", \\\"content\\\": \\\"Another memory to demonstrate persistence\\\", \\\"lang\\\": \\\"en\\\"}\"" + ], + "confidence": 0.99, + "usage": [], + "created_at": "2026-01-13T20:51:42.394870000+00:00", + "session_id": "default_session", + "memory_type": "WorkingMemory", + "type": "fact", + "tags": [ + "persistence", + "memory", + "demonstration" + ], + "updated_at": "2026-01-13T20:51:42.396281000+00:00", + "user_id": "example_user", + "background": "On January 13, 2026, at 8:51 PM, the user engaged in a memory demonstration by creating a test memory example and another memory to illustrate the concept of persistence. These actions reflect the user's interest in exploring and demonstrating memory-related concepts.", + "file_ids": [], + "embedding": [ + -0.022425159811973572, + 0.01004026923328638, + 0.013415387831628323, + -0.013093948364257812, + -0.04768655076622963, + -0.047837819904088974, + 0.02147974818944931, + 0.019097311422228813, + 0.005332120228558779, + -0.005180854815989733, + 0.0216499213129282, + -0.006977136246860027, + -0.014067722484469414, + 0.014010997489094734, + -0.01164746843278408, + 0.037948813289403915, + 0.049539558589458466, + -0.028778323903679848, + -0.0363227054476738, + -0.006282258778810501, + 0.03201163187623024, + -0.043678008019924164, + 0.06640569865703583, + 0.04734620451927185, + -0.03552856296300888, + 0.008948319591581821, + -0.018199170008301735, + 0.015788370743393898, + 0.08047342300415039, + 0.04632515832781792, + -0.01887986622750759, + 0.00966683216392994, + -0.02516685239970684, + -0.00974719226360321, + -0.01631780155003071, + -0.01085332315415144, + 0.0284947007894516, + 0.012914320454001427, + -0.02427816577255726, + 0.006589517462998629, + -0.013954272493720055, + -0.0017856458434835076, + 0.004043997265398502, + -0.004202353768050671, + 0.032805778086185455, + -0.00395891023799777, + -0.002543156733736396, + -0.018889321014285088, + -0.02066669426858425, + -0.006225534249097109, + 0.008262895978987217, + 0.0266795102506876, + 0.055325478315353394, + -0.019683465361595154, + -0.013576108030974865, + 0.015372389927506447, + -0.05816170945763588, + -0.00842361617833376, + -0.009548655711114407, + -0.0019262757850810885, + -0.047535285353660583, + 0.026603877544403076, + -0.0035098399966955185, + -0.030290981754660606, + -0.015334573574364185, + 0.04798908531665802, + 0.028400158509612083, + -0.00544084282591939, + 0.008106903173029423, + -0.03639834001660347, + 0.011515110731124878, + 0.042278800159692764, + -0.07343956083059311, + -0.02628243714570999, + -0.08886867761611938, + 0.02773837186396122, + -0.0017962816637009382, + -0.007761827670037746, + -0.011363845318555832, + 0.031860366463661194, + 0.017783189192414284, + -0.026849685236811638, + 0.04144683852791786, + -0.00212244875729084, + 0.03042333945631981, + 0.026755142956972122, + -0.04500158503651619, + 0.03989636152982712, + 0.04318639263510704, + -0.03339193016290665, + 0.025412660092115402, + -0.01126930397003889, + 0.027587104588747025, + -0.03471550717949867, + -0.006041178945451975, + -0.007709830068051815, + -0.02686859294772148, + 0.0020196351688355207, + 0.05143038183450699, + 0.029402295127511024, + 0.059598736464977264, + 0.01989145576953888, + 0.013944818638265133, + -0.010550791397690773, + 0.011004588566720486, + -0.02439161390066147, + 0.05952310189604759, + -0.0021389934699982405, + -0.011723102070391178, + 0.007941455580294132, + 0.010248259641230106, + 0.029893908649683, + 0.033032674342393875, + 0.008130538277328014, + 0.025242485105991364, + -0.009198852814733982, + 0.04307294636964798, + 0.005294303875416517, + -0.030272074043750763, + -0.004263805691152811, + 0.04027452692389488, + 0.017197033390402794, + 0.06016598269343376, + -0.025620650500059128, + -0.02253860794007778, + -0.015088766813278198, + -0.007000771816819906, + -0.018870411440730095, + -0.015325118787586689, + -0.0027582377661019564, + 0.025734098628163338, + -0.017972271889448166, + 0.014824051409959793, + -0.025072310119867325, + -0.05165728181600571, + -0.00673132948577404, + 0.004667968954890966, + 0.03293813392519951, + 0.02083686739206314, + -0.03785427287220955, + 0.013188489712774754, + -0.02533702552318573, + 0.002592790871858597, + -0.03611471503973007, + 0.022084811702370644, + -0.007724011316895485, + 0.0028811413794755936, + 0.007497112732380629, + 0.026755142956972122, + 0.014861867763102055, + -0.012413252145051956, + 0.014880775474011898, + -0.03142547607421875, + -0.03377009555697441, + -0.024202533066272736, + 0.0020539062097668648, + 0.02323821373283863, + 0.09204525500535965, + -0.03365664556622505, + -0.031633466482162476, + -0.005308485124260187, + -0.06735111027956009, + -0.06126265972852707, + 0.04545538127422333, + 0.03220071271061897, + 0.028343435376882553, + 0.010843869298696518, + 0.02891068160533905, + -0.034488607197999954, + 0.006887322291731834, + -0.0065280660055577755, + -0.003923457581549883, + 0.02036416158080101, + 0.008220352232456207, + 0.0012183990329504013, + 0.0293266624212265, + 0.04390490800142288, + 0.01639343425631523, + -0.0038076445925980806, + -0.04159810394048691, + -0.057556647807359695, + -0.01890822872519493, + 0.04326202720403671, + -0.025355935096740723, + -0.045379746705293655, + -0.014578244648873806, + -0.0410686731338501, + 0.03469659760594368, + 0.01117476262152195, + 0.036190349608659744, + 0.020439794287085533, + -0.013093948364257812, + -0.03879968449473381, + 0.005251760594546795, + -0.028040902689099312, + -0.028229985386133194, + 0.005209216848015785, + -0.00709531269967556, + -0.029875000938773155, + 0.0010895866435021162, + -0.012006725184619427, + 0.07343956083059311, + 0.010541337542235851, + 0.02246297523379326, + -0.02633916214108467, + -0.020401978865265846, + -0.01178928092122078, + 0.025715190917253494, + 0.01966455765068531, + -0.07551946491003036, + -0.02388109266757965, + 0.005265941843390465, + -0.0036445611622184515, + -0.004528520628809929, + 0.018482793122529984, + -0.04511503502726555, + 0.012120174244046211, + 0.010635878890752792, + -0.008995589800179005, + -0.01553311012685299, + -0.030158624053001404, + 0.060014717280864716, + -0.056913767009973526, + 0.004339438397437334, + 0.032975949347019196, + -0.008612697944045067, + 0.0023138944525271654, + 0.03199272230267525, + 0.004360710270702839, + -0.008277077227830887, + 0.018066812306642532, + 0.02879723161458969, + 0.02745474874973297, + -0.010200989432632923, + 0.022746598348021507, + -0.03348647430539131, + -0.04084177315235138, + -0.006783327087759972, + 0.005696103908121586, + 0.0018281893571838737, + -0.01589236594736576, + -0.03482895717024803, + 0.0062491693533957005, + 0.0023871639277786016, + -0.034091535955667496, + -0.03195490688085556, + -0.014294620603322983, + 0.01899331621825695, + 0.017537381500005722, + 0.006069541443139315, + 0.03144438564777374, + 0.02019398845732212, + -0.06375854462385178, + -0.032087262719869614, + 0.005927729420363903, + 0.02123394049704075, + 0.034091535955667496, + -0.010465704835951328, + 0.041219938546419144, + 0.03170910105109215, + 0.03439406678080559, + -0.024996677413582802, + -0.0038998222444206476, + 0.03887531906366348, + 0.041219938546419144, + -0.01626107655465603, + -0.01804790459573269, + -0.0051997629925608635, + 0.023502927273511887, + -0.026547152549028397, + 0.029572470113635063, + -0.03140656650066376, + -0.023105856031179428, + 0.017924999818205833, + -0.005507021676748991, + -0.010134810581803322, + -0.024315981194376945, + -0.007908366620540619, + -0.024259258061647415, + 0.0360390841960907, + 0.02802199497818947, + -0.03195490688085556, + 0.023200396448373795, + -0.018123537302017212, + 0.022576425224542618, + -0.0413333885371685, + -0.03955601528286934, + 5.5653712479397655e-05, + 0.0209503173828125, + -0.04159810394048691, + -0.024070175364613533, + 0.02679296024143696, + 0.06493085622787476, + -0.033789005130529404, + -0.01567492075264454, + -0.02656606025993824, + -0.005653560161590576, + -0.16155190765857697, + 0.010730419307947159, + -0.021025950089097023, + 0.028532516211271286, + -0.010900593362748623, + -0.008319620974361897, + -0.03637943044304848, + -0.01583564095199108, + -0.04239225015044212, + 0.07003607600927353, + -0.03594454377889633, + -0.04863196238875389, + 0.027360206469893456, + 0.021252848207950592, + -0.02497776970267296, + -0.004530884325504303, + -0.02703876607120037, + -0.019815823063254356, + -0.03985854610800743, + -0.030877137556672096, + 0.012186353094875813, + -0.01614762656390667, + 0.03165237605571747, + -0.035131487995386124, + -0.008636333048343658, + 0.008645787835121155, + 0.002212262712419033, + -0.009577018208801746, + -0.055438924580812454, + 0.006929865572601557, + -0.010834414511919022, + 0.03229525312781334, + -0.011987816542387009, + -0.015410206280648708, + 0.02745474874973297, + 0.05207326263189316, + 0.009000317193567753, + -0.01176091842353344, + 0.023068038746714592, + -0.030971677973866463, + 0.026849685236811638, + 0.05237579345703125, + -0.024561788886785507, + 0.0284947007894516, + 0.020874684676527977, + -0.02405126579105854, + -0.020685601979494095, + 0.0006671059527434409, + -0.049653008580207825, + -0.010815506801009178, + -0.02312476374208927, + -0.028475793078541756, + -0.01626107655465603, + -0.008579608984291553, + -0.02042088657617569, + 0.011486749164760113, + -0.0293266624212265, + 0.013538291677832603, + -0.035377293825149536, + 0.009870095178484917, + -0.012413252145051956, + 0.00476250983774662, + -0.0022571696899831295, + 0.006447705905884504, + -0.04447215422987938, + 0.016828322783112526, + 0.033505380153656006, + -0.04341329261660576, + 7.6445379818324e-05, + -0.034791141748428345, + 0.05759446322917938, + -0.006835324689745903, + 0.002079905243590474, + -0.01290486566722393, + 0.010768236592411995, + 0.07635142654180527, + -0.011609652079641819, + -0.04632515832781792, + 0.01966455765068531, + -0.10089430958032608, + 0.019929273054003716, + 0.050711870193481445, + -0.0033467565663158894, + 0.00785164162516594, + -0.03653069585561752, + 0.022368434816598892, + 0.006537519861012697, + 0.004381982143968344, + 0.03955601528286934, + 0.2114696353673935, + 0.031690191477537155, + -0.05438006669282913, + 0.013112856075167656, + 0.041219938546419144, + -0.005327393300831318, + -0.01223362423479557, + 0.025242485105991364, + -0.056270889937877655, + -0.046552058309316635, + 0.015154944732785225, + -0.016459612175822258, + 0.00955338217318058, + 0.013727374374866486, + 0.03424280136823654, + 0.004329984541982412, + -0.024675238877534866, + 0.015438568778336048, + 0.08584335446357727, + 0.012999407015740871, + 0.016686512157320976, + -0.009118493646383286, + -0.004190536215901375, + 0.005601562559604645, + -0.034318435937166214, + -0.01955110765993595, + -0.004864141810685396, + 0.045606646686792374, + 0.02949683740735054, + 0.053396835923194885, + -0.01936202682554722, + 0.030688054859638214, + -0.002644788473844528, + 0.0024911591317504644, + -0.0174617487937212, + -0.014861867763102055, + -0.025488292798399925, + -0.06754019111394882, + -0.011590744368731976, + 0.018123537302017212, + 0.02720894105732441, + -0.009737737476825714, + 0.0340726263821125, + 0.0259799063205719, + -0.01553311012685299, + -0.011316575109958649, + 0.017471203580498695, + -0.03048006445169449, + -0.04371582344174385, + -0.022954588755965233, + -0.022028086706995964, + 0.024372706189751625, + 0.02620680443942547, + -0.023086946457624435, + 0.019967088475823402, + -0.03685213625431061, + -0.005171400494873524, + -0.010541337542235851, + 0.0012302165850996971, + 0.06027943268418312, + 0.029875000938773155, + 0.03199272230267525, + -0.0051146759651601315, + 0.03204944729804993, + -0.0076814680360257626, + 0.0007687376928515732, + 0.029515745118260384, + -0.02446724846959114, + 0.03348647430539131, + 0.02340838685631752, + 0.03902658447623253, + 0.013377571478486061, + 0.01896495372056961, + 0.020515428856015205, + 0.010938410647213459, + -0.0005574973183684051, + 0.02427816577255726, + 0.021876821294426918, + 0.016128718852996826, + 0.05600617453455925, + -0.011921637691557407, + -0.02773837186396122, + -0.07733465731143951, + 0.007166218478232622, + 0.020969225093722343, + -0.04950174316763878, + -0.04844288155436516, + 0.07752373814582825, + -0.0036587421782314777, + -0.03898876905441284, + -0.01647852174937725, + 0.049010127782821655, + -0.0048286886885762215, + -0.019626740366220474, + 0.05732974782586098, + -0.020628876984119415, + 0.07241851836442947, + 0.0036209258250892162, + -0.036549605429172516, + -0.016610879451036453, + -0.0497286394238472, + -0.026225712150335312, + 0.01734829880297184, + -0.0354718379676342, + -0.026433704420924187, + -0.005355755798518658, + -0.023673102259635925, + -0.0011675831628963351, + -0.0009330029133707285, + 0.005828461609780788, + 0.01135439146310091, + 0.011278758756816387, + -0.059371836483478546, + 0.010787144303321838, + 0.01670541986823082, + 0.008470886386930943, + -0.005596835631877184, + 0.005970273166894913, + -0.016270531341433525, + 0.026055539026856422, + 0.014701147563755512, + 0.004741238430142403, + 0.02008053846657276, + 0.05604398995637894, + 0.004296895116567612, + -0.002104722196236253, + 0.016043631359934807, + -0.019182397052645683, + -0.009676286019384861, + -0.01740502379834652, + 0.02580973133444786, + -0.019570017233490944, + 0.04764873534440994, + -0.022746598348021507, + 0.04935047775506973, + -0.009510839357972145, + 0.014578244648873806, + 0.017660286277532578, + -0.0019392751855775714, + 0.02645261213183403, + 0.008243987336754799, + -0.06999826431274414, + -0.00033325751428492367, + 0.010739874094724655, + 0.013169581070542336, + 0.03482895717024803, + -0.0050059533677995205, + -0.01709303818643093, + -0.021971361711621284, + 0.016941772773861885, + 0.0410686731338501, + 0.026698417961597443, + 0.04243006557226181, + -0.049426108598709106, + -0.03530166298151016, + -0.021631013602018356, + -0.038648419082164764, + 0.022860048338770866, + 0.02896740660071373, + -0.0329570434987545, + 0.0008916411316022277, + 0.025677373632788658, + 0.05101440101861954, + 0.0709436759352684, + 0.057443197816610336, + -0.0075018396601080894, + -0.019702374935150146, + 0.008073813281953335, + -0.02323821373283863, + 0.0043016220442950726, + 0.005232852417975664, + -0.018671875819563866, + -0.01896495372056961, + 0.012696875259280205, + -0.003824189305305481, + -0.021498655900359154, + -0.040463607758283615, + 0.06292658299207687, + -0.008726147934794426, + 0.00958174467086792, + 0.008849050849676132, + -0.021044857800006866, + -0.025355935096740723, + -0.02193354442715645, + 0.041219938546419144, + 0.03013971634209156, + 0.03628489002585411, + -0.03866732865571976, + 0.03148220106959343, + -0.006480795331299305, + -0.06251060217618942, + 0.0854651927947998, + 0.028248893097043037, + -0.01910676434636116, + 0.02620680443942547, + -0.05116566643118858, + 0.019380934536457062, + 0.021555380895733833, + 0.0031009495723992586, + -0.0020385433454066515, + -0.021801186725497246, + 0.013935364782810211, + -0.015400752425193787, + -0.0027086036279797554, + -0.001123857800848782, + 0.022028086706995964, + 0.01639343425631523, + 0.01609090156853199, + -0.016998497769236565, + -0.02265205793082714, + -0.0029922272078692913, + -0.04299731180071831, + -0.021460838615894318, + 0.017282120883464813, + -0.04364019259810448, + -0.019475474953651428, + 0.06614098697900772, + 0.03450751677155495, + -1.5242912922985852e-05, + -0.016941772773861885, + 0.0027251483406871557, + 0.03061242215335369, + 0.03271123394370079, + -0.051695097237825394, + -0.011023497208952904, + 0.017527928575873375, + 0.027133308351039886, + -0.027757279574871063, + 0.023313846439123154, + 0.017887184396386147, + -0.01006863173097372, + -0.030536789447069168, + -0.022084811702370644, + -0.0037651010788977146, + 0.04095522314310074, + -0.06035506725311279, + 0.015485838986933231, + 0.014285166747868061, + 0.0011563563020899892, + -0.02253860794007778, + -0.005549564957618713, + -0.015079312026500702, + 0.0031553106382489204, + 0.012895411811769009, + 0.03948038071393967, + 0.04492595046758652, + -0.006863687187433243, + -0.02352183684706688, + 0.018331527709960938, + 0.0300262663513422, + 0.004287440795451403, + 0.004887776914983988, + -0.0340726263821125, + -0.024958861991763115, + 0.0438670888543129, + 0.03278686851263046, + 0.020742326974868774, + -0.013519383035600185, + -0.06307785212993622, + -0.04855633154511452, + -0.004703422077000141, + 0.036133624613285065, + -0.03594454377889633, + 0.0012633060105144978, + -0.06288877129554749, + -0.0413333885371685, + 0.01810462959110737, + -0.06330475211143494, + 0.000763419724535197, + -0.027171123772859573, + 0.0096384696662426, + -0.009543928317725658, + -0.012971044518053532, + -0.03136875107884407, + 0.058426424860954285, + 0.03386463597416878, + -0.007010225672274828, + -0.05173291265964508, + 0.05850205942988396, + 0.03488568216562271, + -0.011751463636755943, + 0.046438608318567276, + 0.039934176951646805, + 0.04927484318614006, + -0.043980538845062256, + -0.00198654574342072, + 0.01614762656390667, + 0.0009625470265746117, + 0.03339193016290665, + -0.0865996852517128, + 0.021252848207950592, + 0.007208762224763632, + -0.03583109378814697, + 0.04904794320464134, + 0.006003362592309713, + 0.008792325854301453, + -0.0311418529599905, + 0.01097622700035572, + -0.02458069659769535, + 0.018085720017552376, + -0.017669739201664925, + -0.011430024169385433, + -0.039820730686187744, + -0.04084177315235138, + -0.005908821243792772, + -0.003512203460559249, + -0.00965265091508627, + -0.0047388747334480286, + 0.0592205710709095, + -0.0059135486371815205, + -0.021517563611268997, + -0.04401835426688194, + 0.04284604638814926, + 0.04632515832781792, + -0.011458386667072773, + -0.018482793122529984, + 0.027927452698349953, + -0.06493085622787476, + 0.0351882129907608, + 0.0034814774990081787, + 0.03804335743188858, + 0.025885364040732384, + -0.04159810394048691, + 0.008589062839746475, + -0.0035500198137015104, + -0.008442523889243603, + 0.005483386106789112, + -0.026320254430174828, + 0.008669422939419746, + -0.027492564171552658, + -0.012148536741733551, + -0.015495292842388153, + -0.029648102819919586, + -0.006665150634944439, + 0.05721629783511162, + 0.026603877544403076, + 0.015240032225847244, + -0.024429431185126305, + 0.04295949637889862, + -0.04927484318614006, + -0.05165728181600571, + -0.036719780415296555, + 0.036833230406045914, + -0.04791345074772835, + -0.02650933712720871, + 0.020855776965618134, + -0.0036776503548026085, + 0.006182990502566099, + -0.029893908649683, + 0.007118948269635439, + 0.0009324120474047959, + 0.04556883126497269, + -0.024202533066272736, + -0.07865823060274124, + -0.004027452785521746, + -0.004242533817887306, + -0.03898876905441284, + -0.006078995298594236, + 0.033618830144405365, + -0.013245213776826859, + -0.01378409843891859, + 0.01681886985898018, + -0.014842959120869637, + 0.0032687599305063486, + -0.04700585454702377, + 0.009071222506463528, + 0.026471519842743874, + 0.030045175924897194, + 0.006863687187433243, + -0.03429952636361122, + 0.018842048943042755, + -0.01849224790930748, + 0.004533248022198677, + -0.021574288606643677, + -0.023673102259635925, + -0.010730419307947159, + 0.012243078090250492, + -0.03978291153907776, + 0.03781645745038986, + 0.047724369913339615, + -0.03592563420534134, + -0.021215032786130905, + 0.014757872559130192, + -0.040652692317962646, + 0.004173991270363331, + -0.04352674260735512, + -0.028116535395383835, + -0.009619561024010181, + 0.011836551129817963, + 0.022897865623235703, + -0.05528765916824341, + -0.025355935096740723, + -0.017442841082811356, + 0.017111947759985924, + -0.16896392405033112, + 0.029856093227863312, + 0.016194898635149002, + 0.016866140067577362, + -0.04326202720403671, + -0.013509929180145264, + -0.05611962080001831, + 0.006154628470540047, + 0.014616061002016068, + -0.021290665492415428, + 0.0066131530329585075, + -0.04564446210861206, + 0.06096012890338898, + -0.03384573012590408, + -0.027416931465268135, + 0.010229351930320263, + 0.006901503540575504, + 0.057027217000722885, + -0.05388845130801201, + 0.06186772510409355, + 0.009699921123683453, + -0.01977800764143467, + -0.0142473503947258, + -0.0002583632303867489, + -0.017244303598999977, + -0.01770755648612976, + 0.019796915352344513, + 0.03055569715797901, + -0.030158624053001404, + 0.05131693184375763, + 0.0026967860758304596, + 0.02441052347421646, + 0.019182397052645683, + 0.041749369353055954, + 0.03246542811393738, + 0.0048452336341142654, + -0.0012266712728887796, + -0.03808117285370827, + -0.0015800188994035125, + -0.01989145576953888, + -0.021044857800006866, + 0.02153647318482399, + -0.01650688424706459, + -0.01559928897768259, + 0.004192899446934462, + 0.09136456251144409, + -0.028929589316248894, + -0.003526384709402919, + -0.02592318132519722, + 0.03384573012590408, + 0.015240032225847244, + 0.03781645745038986, + 0.009520293213427067, + 0.011524565517902374, + -0.017631923779845238, + -0.005559019278734922, + 0.01158128958195448, + -0.019031131640076637, + -0.024032358080148697, + 0.014776780270040035, + 0.014086630195379257, + -0.019314754754304886, + -0.03161455690860748, + -0.0729101300239563, + -0.017074130475521088, + 0.006750237662345171, + -0.07007389515638351, + -0.007246578577905893, + -0.025091219693422318, + -0.013349208980798721, + -0.025885364040732384, + 0.029950633645057678, + 0.026887500658631325, + -0.0005920638795942068, + -0.012838686816394329, + 0.048064716160297394, + 0.03757065162062645, + 0.01751847378909588, + -0.013821914792060852, + -0.026244621723890305, + -0.027019858360290527, + -0.011808188632130623, + -0.0006871959194540977, + -0.03278686851263046, + 0.035377293825149536, + 0.013056132011115551, + -0.025960998609662056, + -0.012469977140426636, + -0.0031482200138270855, + -0.0007728738128207624, + -0.08077595382928848, + 0.015088766813278198, + 0.056497786194086075, + 0.01623271405696869, + -0.009293394163250923, + 0.06659477949142456, + -0.01989145576953888, + -0.008598516695201397, + -0.015192762017250061, + -0.0026802413631230593, + 0.00839525368064642, + -0.001349574769847095, + 0.02866487391293049, + 0.005842642392963171, + -0.025942089036107063, + 0.0029638647101819515, + -0.024240348488092422, + -0.03524493798613548, + 0.021876821294426918, + -0.010720965452492237, + -0.005984454415738583, + -0.025771915912628174, + -0.04863196238875389, + 0.008239260874688625, + -0.06856123358011246, + -0.03305158391594887, + -0.032162897288799286, + 0.03318393975496292, + -0.0441318042576313, + -0.02000490576028824, + 0.03225743770599365, + 0.03131202608346939, + 0.048896677792072296, + -0.03499913215637207, + 0.024297073483467102, + 0.000977909890934825, + 0.030101899057626724, + 0.015882911160588264, + 0.018000634387135506, + -0.006197171751409769, + -0.006892049219459295, + -0.00835270993411541, + 0.02195245400071144, + 0.039593830704689026, + -0.010238805785775185, + 0.00586627796292305, + 0.0156465582549572, + 0.005800099112093449, + -0.015230578370392323, + -0.031690191477537155, + 0.006849505938589573, + -0.028986314311623573, + 0.02253860794007778, + -0.01421898789703846, + -0.014795688912272453, + -0.019872548058629036, + 0.018331527709960938, + -0.01570328325033188, + -0.019116219133138657, + -0.011864913627505302, + 0.002848051954060793, + 0.00756329158321023, + 0.032919224351644516, + 0.06387199461460114, + -0.01949438452720642, + -0.013112856075167656, + 0.004374891519546509, + 0.022311709821224213, + 0.01765083149075508, + 0.02304913103580475, + -0.034564241766929626, + -0.00717094587162137, + -0.05332120507955551, + -0.05154383182525635, + -0.003493295283988118, + 0.022444067522883415, + 0.037589557468891144, + -0.0087970532476902, + -0.06220807135105133, + 0.004157446790486574, + 0.009151582606136799, + 0.06288877129554749, + -0.030820412561297417, + 0.011902729980647564, + 0.003717830404639244, + -0.056157439947128296, + 0.024883229285478592, + 0.0030016812961548567, + 0.02505340240895748, + -0.026717327535152435, + 0.025185760110616684, + -0.01331139262765646, + 0.005445569753646851, + 0.02475087158381939, + 0.024202533066272736, + 0.036833230406045914, + -0.002345802029594779, + 0.023427294567227364, + -0.011184217408299446, + -0.01375573594123125, + 0.02312476374208927, + -0.029515745118260384, + -0.05910712108016014, + -0.017631923779845238, + 0.01743338629603386, + -0.024486156180500984, + -0.0026613331865519285, + 0.01509822066873312, + -0.011042405851185322, + 0.03392136096954346, + -0.036247074604034424, + -0.04787563532590866, + 0.030064083635807037, + -0.022670965641736984, + 0.005308485124260187, + 0.014786235056817532, + -0.027284573763608932, + 0.009671558625996113, + -0.0360390841960907, + 0.02153647318482399, + -0.05188417807221413, + -0.008995589800179005, + 0.011051859706640244, + -0.0006470159278251231, + -0.020269621163606644, + 0.02229280211031437, + 0.011108584702014923, + -0.013736828230321407, + -0.009841732680797577, + 0.034148260951042175, + -0.0014807506231591105, + -0.0018388251774013042, + 0.013500475324690342, + -0.026849685236811638, + -0.060014717280864716, + -0.008404707536101341, + 0.011212579905986786, + 0.02047761157155037, + -0.0016426523216068745, + 0.0483294315636158, + 0.022444067522883415, + 0.036719780415296555, + -0.03861060366034508, + 0.037362661212682724, + -0.006286986172199249, + -0.02773837186396122, + 0.033448655158281326, + 0.019985998049378395, + -0.03745720162987709, + -0.005204489920288324, + 0.016487974673509598, + -0.019314754754304886, + -0.0555901937186718, + -0.008636333048343658 + ], + "key": "Persistence Demonstration", + "status": "activated" + } + }, + { + "id": "6c0c977f-2e75-4e3a-a661-88674da5b14e", + "memory": "On January 13, 2026, at 8:51 PM, the user created a test memory for a demonstration purpose.", + "metadata": { + "sources": [ + "\"{\\\"type\\\": \\\"chat\\\", \\\"role\\\": \\\"user\\\", \\\"chat_time\\\": \\\"08:51 PM on 13 January, 2026\\\", \\\"content\\\": \\\"This is a test memory for dump example\\\", \\\"lang\\\": \\\"en\\\"}\"", + "\"{\\\"type\\\": \\\"chat\\\", \\\"role\\\": \\\"user\\\", \\\"chat_time\\\": \\\"08:51 PM on 13 January, 2026\\\", \\\"content\\\": \\\"Another memory to demonstrate persistence\\\", \\\"lang\\\": \\\"en\\\"}\"" + ], + "confidence": 0.99, + "usage": [], + "created_at": "2026-01-13T20:51:42.359643000+00:00", + "session_id": "default_session", + "memory_type": "UserMemory", + "type": "fact", + "tags": [ + "test", + "memory", + "demonstration" + ], + "updated_at": "2026-01-13T20:51:42.396236000+00:00", + "user_id": "example_user", + "background": "On January 13, 2026, at 8:51 PM, the user engaged in a memory demonstration by creating a test memory example and another memory to illustrate the concept of persistence. These actions reflect the user's interest in exploring and demonstrating memory-related concepts.", + "file_ids": [], + "embedding": [ + -0.023225771263241768, + 0.009711744263768196, + 0.01401975005865097, + -0.023113388568162918, + -0.044953107833862305, + -0.025454696267843246, + 0.004780950024724007, + 0.012184164486825466, + 0.003397237276658416, + 0.006766378879547119, + 0.033059265464544296, + -0.0018625102238729596, + -0.00135678774677217, + -0.0159396231174469, + -0.0017864176770672202, + 0.03302180394530296, + 0.05941302329301834, + -0.013467201963067055, + -0.011341294273734093, + -0.03324656933546066, + 0.01599581353366375, + -0.05263259634375572, + 0.07222465425729752, + 0.022851161658763885, + -0.007220592815428972, + 0.0017969536129385233, + -0.012418295256793499, + 0.013214340433478355, + 0.08645980805158615, + 0.0018215373856946826, + 0.02071589045226574, + -0.028620144352316856, + -0.015490091405808926, + -0.025454696267843246, + -0.04469088092446327, + -0.02105303853750229, + 0.027945848181843758, + 3.424162423470989e-05, + -0.028526492416858673, + -0.019479678943753242, + -0.008882921189069748, + 0.012596234679222107, + 0.015480726025998592, + 0.006949000991880894, + 0.015499456785619259, + 0.014825159683823586, + -0.012549408711493015, + -0.02382514625787735, + -0.025342313572764397, + -0.013364183716475964, + 0.002884491113945842, + 0.03133606165647507, + 0.04525279253721237, + -0.037179965525865555, + 0.027009325101971626, + 0.021689873188734055, + -0.05034748092293739, + -0.043342288583517075, + -0.019816827028989792, + 0.007492184638977051, + -0.03575645014643669, + 0.007862110622227192, + 0.0021774161141365767, + 0.01226845197379589, + -0.013504662550985813, + 0.03937143087387085, + 0.0068834442645311356, + 0.0019292375072836876, + -0.00272059952840209, + -0.023169580847024918, + 0.0013368866639211774, + 0.04581470787525177, + -0.05514247715473175, + -0.01210924331098795, + -0.0809905156493187, + 0.01955460198223591, + 0.0017419328214600682, + -0.015677396208047867, + -0.009426104836165905, + 0.021408917382359505, + 0.03892189636826515, + -0.022476553916931152, + 0.01390736736357212, + -0.0002572511730249971, + 0.047350604087114334, + 0.01260560005903244, + -0.06349626183509827, + 0.057502515614032745, + 0.040345411747694016, + -0.04274291172623634, + 0.028788719326257706, + 0.0003658293280750513, + 0.010807476006448269, + -0.02504262700676918, + 0.004296299535781145, + 0.010404771193861961, + -0.05158368870615959, + 0.003305926453322172, + 0.029069675132632256, + 0.011032241396605968, + 0.05813935026526451, + 0.022532744333148003, + 0.02037874236702919, + -0.017878225073218346, + -0.0010945613030344248, + -0.028788719326257706, + 0.02927570976316929, + 0.014375628903508186, + -0.009627456776797771, + 0.0005191849777474999, + 0.001597942435182631, + 0.04281783476471901, + 0.04918619245290756, + 0.01210924331098795, + 0.04203115403652191, + 0.013766888529062271, + 0.013485931791365147, + -0.01013317983597517, + -0.05083446949720383, + -0.014778333716094494, + 0.017484884709119797, + 0.014703411608934402, + 0.038341253995895386, + -0.03693646937608719, + -0.06076161563396454, + -0.014787699095904827, + -0.01294274814426899, + -0.023019736632704735, + -0.0010770015651360154, + -0.004511700011789799, + 0.016042640432715416, + -0.000730487983673811, + 0.00883141253143549, + -0.008058780804276466, + -0.026971863582730293, + -0.021614952012896538, + 0.011247641406953335, + 0.05281990021467209, + -0.010498423129320145, + -0.03399578854441643, + 0.020341280847787857, + -0.003252076217904687, + -0.008681568317115307, + -0.026859480887651443, + 0.015930257737636566, + -0.02498643472790718, + 0.005469294730573893, + -0.003645415883511305, + 0.02721535973250866, + 0.001160117913968861, + -0.01229654811322689, + 0.019891750067472458, + -0.02288862317800522, + -0.04469088092446327, + -0.022851161658763885, + -0.003551763715222478, + 0.01916126161813736, + 0.06192290410399437, + -0.01468468178063631, + 0.012624330818653107, + -0.0015429217601194978, + -0.048511892557144165, + -0.06076161563396454, + 0.0465264655649662, + 0.02032255008816719, + 0.02860141359269619, + 0.013457836583256721, + 0.025248661637306213, + -0.0460769347846508, + 0.03639328479766846, + 0.002751036547124386, + -0.008306959643959999, + -0.008747125044465065, + -0.008157115429639816, + -0.008611329831182957, + 0.033845942467451096, + 0.038734592497348785, + -0.000227984826778993, + -0.01032984908670187, + -0.0321040116250515, + -0.05158368870615959, + 0.0031490586698055267, + 0.030287155881524086, + 0.013794984668493271, + -0.04135685786604881, + -0.003430015640333295, + -0.012390200048685074, + 0.004427412524819374, + 0.015049925073981285, + 0.01107906736433506, + 0.008161798119544983, + -0.03180432319641113, + -0.000778484798502177, + 0.00315842404961586, + -0.005722155794501305, + -0.027908386662602425, + -0.0014211736852303147, + 0.0008768197149038315, + -0.02448071353137493, + 0.005408420693129301, + -0.011060337536036968, + 0.035737719386816025, + 0.015668030828237534, + 0.02127780392765999, + -0.01746615581214428, + -0.013823079876601696, + -0.01416022889316082, + 0.0026854798197746277, + 0.0031677891965955496, + -0.0812152773141861, + -0.024836590513586998, + 0.0003201738290954381, + -0.016014544293284416, + -0.011032241396605968, + 0.02238290011882782, + -0.013261166401207447, + -0.0022523379884660244, + -0.005698742810636759, + 0.012764809653162956, + -0.015499456785619259, + -0.05371896177530289, + 0.04941095784306526, + -0.017447425052523613, + 0.030343346297740936, + 0.030811607837677002, + -0.01826220005750656, + 0.008536407724022865, + 0.05016017332673073, + 0.02848903089761734, + -0.0326659232378006, + 0.026110263541340828, + 0.018721096217632294, + 0.012015590444207191, + -0.007585836574435234, + 0.03513834625482559, + -0.020847003906965256, + -0.04154416173696518, + 0.0050150807946920395, + 0.02605407126247883, + 0.0016857414739206433, + -0.01627677120268345, + 0.004181575495749712, + 0.01961079239845276, + 0.018365217372775078, + -0.04154416173696518, + -0.026241375133395195, + 0.0030554065015167, + 0.01035794522613287, + 0.02227051742374897, + 0.01488135103136301, + 0.01074191927909851, + 0.007899572141468525, + -0.05918825790286064, + -0.001568676088936627, + 0.01715710200369358, + 0.009051495231688023, + 0.029032215476036072, + -0.006073351949453354, + 0.03877205401659012, + -0.0013439105823636055, + 0.04064510017633438, + -0.03774188086390495, + 0.025810575112700462, + 0.03341514244675636, + 0.022944815456867218, + -0.019854288548231125, + -0.019535871222615242, + -0.013411009684205055, + 0.014591028913855553, + -0.008742442354559898, + 0.02993127703666687, + -0.05525486171245575, + -0.02365657314658165, + 0.03487611934542656, + -0.015705492347478867, + 0.008349102921783924, + -0.02970651164650917, + 5.427992618933786e-06, + -0.020584776997566223, + 0.038846977055072784, + 0.016632650047540665, + -0.026409950107336044, + 0.031298600137233734, + -0.0122216260060668, + 0.0071503533981740475, + -0.017335042357444763, + -0.027796003967523575, + -0.027140438556671143, + 0.03952127322554588, + -0.019704444333910942, + -0.019460948184132576, + 0.01024556253105402, + 0.06357118487358093, + -0.04360451176762581, + -0.018206007778644562, + -0.021952100098133087, + 0.00883141253143549, + -0.15463869273662567, + -0.008232037536799908, + -0.013879271224141121, + -0.0018063188763335347, + -0.014413089491426945, + 0.016632650047540665, + -0.04682615399360657, + -0.0486617386341095, + -0.010479693301022053, + 0.08968144655227661, + -0.005267942324280739, + -0.04244322329759598, + 0.017306946218013763, + 0.026353757828474045, + -0.025866767391562462, + -0.005651916842907667, + -0.03865967318415642, + -0.027908386662602425, + -0.024312138557434082, + -0.011931303888559341, + 0.010030161589384079, + -0.02148383855819702, + 0.05424341559410095, + -0.01921745389699936, + -0.002258191118016839, + -0.00631684809923172, + 0.04941095784306526, + 0.009739839471876621, + -0.020734621211886406, + 0.002176245441660285, + -0.014169593341648579, + 0.04128193482756615, + -0.006579074542969465, + -0.005895412527024746, + -0.008999986574053764, + 0.05971270799636841, + -0.0005150876822881401, + 0.007473453879356384, + 0.011453676968812943, + -0.020416202023625374, + 0.015396438539028168, + 0.066230908036232, + -0.03858475014567375, + 0.045964550226926804, + 0.03455770015716553, + 0.0163891538977623, + -0.004703687038272619, + -0.012184164486825466, + -0.0476502925157547, + -0.011369389481842518, + -0.007332975510507822, + -0.020022863522171974, + -0.015096751973032951, + -0.036505669355392456, + -0.020528584718704224, + -0.013401645235717297, + -0.054955173283815384, + 0.017035353928804398, + -0.08526106178760529, + 0.021614952012896538, + -0.0310363732278347, + -0.05192083865404129, + -0.010760650038719177, + 0.040345411747694016, + -0.02970651164650917, + 0.00763734569773078, + 0.04030795395374298, + -0.06394579261541367, + -0.0025051990523934364, + -0.03637455403804779, + 0.04847443476319313, + -0.018449503928422928, + 0.012736713513731956, + 0.001112121157348156, + 0.021352725103497505, + 0.06166067719459534, + 0.002046302892267704, + -0.03247861936688423, + 0.011013510636985302, + -0.08885730803012848, + -0.027796003967523575, + 0.053681500256061554, + -0.015452630817890167, + -0.03388340398669243, + -0.01715710200369358, + 0.00902808178216219, + -0.01121954619884491, + 0.008096241392195225, + 0.04113209247589111, + 0.20933163166046143, + -0.0004925525863654912, + -0.0620727464556694, + -0.013055131770670414, + 0.03787299245595932, + -0.010076988488435745, + -0.053606580942869186, + 0.01327053178101778, + -0.04233084246516228, + -0.04536517709493637, + 0.020285088568925858, + -0.0007509744027629495, + 0.01352339331060648, + 0.010208101011812687, + 0.0115192336961627, + -0.0243870597332716, + -0.01263369619846344, + -0.007501549553126097, + 0.08908206969499588, + 0.016585823148489, + 0.035793911665678024, + -0.014338168315589428, + -0.001372006256133318, + -0.001101585221476853, + -0.04525279253721237, + -0.02648487128317356, + -0.00762329762801528, + 0.07971683889627457, + -0.012708617374300957, + 0.056565992534160614, + -0.018917765468358994, + 0.011697173118591309, + 0.007997906766831875, + 0.004799680784344673, + 0.023469267413020134, + 0.009664918296039104, + 0.007894889451563358, + -0.04281783476471901, + -0.017569173127412796, + 0.02921951934695244, + 0.040794942528009415, + -0.023787684738636017, + 0.013841810636222363, + 0.04124447703361511, + -0.002535636071115732, + 0.01271798275411129, + 0.011978129856288433, + -0.02232670970261097, + -0.04135685786604881, + -0.002619923325255513, + -0.03480119630694389, + 0.016838684678077698, + 0.0310363732278347, + -0.016005178913474083, + 0.013682601973414421, + -0.006705504842102528, + -0.011097798123955727, + -0.02687821164727211, + 0.002102494239807129, + 0.03644947707653046, + 0.03888443857431412, + 0.0315420962870121, + -0.028751257807016373, + 0.051995761692523956, + -0.014937542378902435, + 0.0018285613041371107, + 0.0020779105834662914, + -0.05240783095359802, + 0.03425801172852516, + 0.026953132823109627, + 0.06267212331295013, + 0.010020796209573746, + 0.03259100392460823, + 0.0234318058937788, + 0.008007272146642208, + 0.012689887546002865, + 0.03985841944813728, + 0.05488025024533272, + 0.021858448162674904, + 0.07214973866939545, + 0.0159115269780159, + -0.020453663542866707, + -0.04027049243450165, + 0.008372516371309757, + 0.005876682233065367, + -0.05701552331447601, + -0.024143563583493233, + 0.052445292472839355, + -0.024518173187971115, + -0.03463262319564819, + -0.0329468809068203, + 0.045739784836769104, + -0.025510888546705246, + -0.027833465486764908, + 0.048174746334552765, + 0.0156586654484272, + 0.04124447703361511, + -0.0003596833848860115, + -0.029743971303105354, + 0.0014972662320360541, + -0.05083446949720383, + -0.02843284048140049, + 0.013663871213793755, + -0.0015663348603993654, + -0.010320483706891537, + -0.03315291553735733, + -0.028414109721779823, + -0.003565811552107334, + 0.008011954836547375, + 0.012764809653162956, + -0.0057455687783658504, + 0.03433293476700783, + -0.07361071556806564, + -0.04446611553430557, + -0.0036758529022336006, + 0.003003897611051798, + -0.014619125053286552, + 0.009084274061024189, + -0.009491661563515663, + 0.047425527125597, + 0.009936509653925896, + 0.00795576348900795, + 0.010620171204209328, + 0.05540470406413078, + 0.009861587546765804, + -0.022364171221852303, + -0.0022628738079220057, + -0.012006225995719433, + 0.004530430305749178, + -0.01372006256133318, + 0.026615984737873077, + -0.036187250167131424, + 0.04094478860497475, + -0.03877205401659012, + 0.02732774242758751, + 0.014862621203064919, + 0.021858448162674904, + 0.01667010970413685, + -0.01502182986587286, + 0.033621177077293396, + 0.0031748132314532995, + -0.09402691572904587, + -0.000989787746220827, + 0.027346473187208176, + 0.025585809722542763, + 0.01667947508394718, + 0.024181025102734566, + -0.02427467703819275, + -0.01249321736395359, + 0.02448071353137493, + 0.03933396935462952, + 0.01710091158747673, + 0.02449944242835045, + -0.05960032716393471, + -0.03335895016789436, + -0.039071742445230484, + -0.04547755792737007, + 0.017475519329309464, + 0.03644947707653046, + -0.04236830398440361, + -0.006752331275492907, + 0.031841784715652466, + 0.03397705778479576, + 0.08226418495178223, + 0.0621851310133934, + -0.04214353859424591, + -0.011397485621273518, + 0.030006198212504387, + 0.0031162805389612913, + -0.023356884717941284, + 0.006532248109579086, + -0.005928190890699625, + -0.016407884657382965, + 0.021184151992201805, + 0.0018777287332341075, + -0.047425527125597, + -0.04903634637594223, + 0.026840750128030777, + -0.021502569317817688, + -0.01077001541852951, + -0.008461485616862774, + -0.02521120011806488, + -0.037685688585042953, + -0.011650347150862217, + 0.02054731547832489, + 0.021090500056743622, + 0.01893649622797966, + -0.028357917442917824, + 0.012689887546002865, + -0.001998306019231677, + -0.08511121571063995, + 0.06967731565237045, + 0.030399538576602936, + -0.015471360646188259, + 0.04203115403652191, + -0.043791819363832474, + 0.02854522317647934, + 0.010301753878593445, + 0.011388120241463184, + 0.022233057767152786, + -0.012661791406571865, + 0.0016037956811487675, + -0.019685715436935425, + -0.01944221928715706, + -0.019292375072836876, + -0.012905287556350231, + 0.009908413514494896, + 0.0161643885076046, + -0.0068834442645311356, + 0.02015397697687149, + -0.018589982762932777, + -0.019966671243309975, + 0.0020123538561165333, + 0.014328802935779095, + -0.050722088664770126, + -0.014890716411173344, + 0.07177512347698212, + 0.044503577053546906, + -0.011921938508749008, + 0.010320483706891537, + -0.014478646218776703, + 0.01338291447609663, + 0.010526519268751144, + -0.01812172122299671, + -0.02142764814198017, + 0.01088239811360836, + 0.00478329136967659, + -0.05225798487663269, + 0.02693440392613411, + 0.022682588547468185, + -0.005764299537986517, + -0.02238290011882782, + -0.024087373167276382, + -0.019311105832457542, + 0.05296974256634712, + -0.0651070848107338, + 0.020809542387723923, + 0.015780413523316383, + -0.024780400097370148, + -0.0027042103465646505, + -0.02914459817111492, + 0.014207054860889912, + 0.020079053938388824, + 0.04768775403499603, + 0.0315982885658741, + 0.03526945784687996, + 0.013439105823636055, + 0.012886556796729565, + 0.007670123595744371, + 0.023974990472197533, + 0.017597267404198647, + 0.007866793312132359, + -0.048174746334552765, + -0.0489988848567009, + 0.040457796305418015, + 0.08016636967658997, + 0.013682601973414421, + -0.018046800047159195, + -0.06683028489351273, + -0.034351665526628494, + -0.006799157243221998, + 0.0028048865497112274, + -0.020341280847787857, + 0.01605200581252575, + -0.04637662321329117, + -0.06993953883647919, + 0.017569173127412796, + -0.06008731946349144, + -0.006916222628206015, + -0.03309672325849533, + -0.027758542448282242, + 0.004675591364502907, + -0.00883141253143549, + -0.03899681940674782, + 0.07012684643268585, + -0.0009002327569760382, + -0.02620391547679901, + -0.025023896247148514, + 0.06802903115749359, + 0.030193503946065903, + -0.00706606637686491, + 0.052332907915115356, + 0.02976270206272602, + 0.03502596169710159, + -0.012886556796729565, + -3.263197504566051e-05, + 0.019179992377758026, + -0.02637248858809471, + 0.017278850078582764, + -0.06031208485364914, + -0.012249721214175224, + 0.00553953368216753, + -0.029182057827711105, + 0.040907327085733414, + -0.010788745246827602, + 0.01755044236779213, + -0.042293380945920944, + 0.02043493278324604, + -0.0234318058937788, + 0.02427467703819275, + -0.030455728992819786, + 0.011584790423512459, + -0.045515019446611404, + -0.015181038528680801, + 0.0005320621421560645, + 0.00795576348900795, + -0.01710091158747673, + 0.017082180827856064, + 0.029687780886888504, + 0.003418309148401022, + 0.020734621211886406, + -0.07994160801172256, + 0.007215910125523806, + 0.022457823157310486, + -0.0006052280077710748, + -0.015817875042557716, + 0.02682201936841011, + -0.058176811784505844, + 0.05413103103637695, + -0.004389951936900616, + 0.03639328479766846, + 0.011172720231115818, + -0.0489988848567009, + -0.005675329826772213, + -0.031111296266317368, + -0.0019268961623311043, + -0.0032825132366269827, + -0.017981242388486862, + 0.036955200135707855, + -0.03377101942896843, + -0.030418269336223602, + 0.003273148089647293, + -0.02464928664267063, + -0.0011630445951595902, + 0.04394166171550751, + 0.020416202023625374, + 0.021034307777881622, + 0.0008352614822797477, + 0.05083446949720383, + -0.03319037705659866, + -0.009290308691561222, + -0.02464928664267063, + 0.04083240404725075, + -0.049785565584897995, + -0.031298600137233734, + 0.02560454048216343, + -0.01760663278400898, + 0.014263246208429337, + -0.01757853850722313, + -0.00598438223823905, + -0.005591042805463076, + 0.02594168856739998, + -0.012867826968431473, + -0.0477626770734787, + 0.033902134746313095, + -0.025061357766389847, + -0.05274497717618942, + 0.031523365527391434, + 0.03970857709646225, + 0.01596771739423275, + 0.013888636603951454, + 0.03877205401659012, + -0.009739839471876621, + -0.009786665439605713, + -0.04761283099651337, + -0.008143068291246891, + 0.032684653997421265, + 0.03822886943817139, + 0.009163877926766872, + -0.028170613572001457, + 0.005843903869390488, + 0.020753350108861923, + -0.007281466852873564, + -0.014216420240700245, + -0.014057210646569729, + -0.006424548104405403, + -0.011172720231115818, + -0.03332148864865303, + 0.03974603861570358, + 0.013757524080574512, + -0.015012464486062527, + 0.0028165930416435003, + 0.01327053178101778, + -0.019910480827093124, + 0.006625900510698557, + -0.022626396268606186, + -0.011912573128938675, + -0.02537977509200573, + -0.024574365466833115, + 0.007300197146832943, + -0.023169580847024918, + -0.047425527125597, + 0.0003184178494848311, + 0.02537977509200573, + -0.16213087737560272, + 0.02176479622721672, + 0.0014211736852303147, + 0.0010805134661495686, + -0.021952100098133087, + -0.02803950011730194, + -0.036280903965234756, + -0.023281963542103767, + 0.028245534747838974, + -0.02571692317724228, + 0.011734633706510067, + -0.03586883470416069, + 0.035793911665678024, + -0.030736686661839485, + -0.02288862317800522, + 0.02682201936841011, + 0.012006225995719433, + 0.04476580023765564, + -0.06004985794425011, + 0.09335261583328247, + -0.005075954832136631, + -0.030418269336223602, + -0.004537454340606928, + 0.005057224538177252, + -0.048624277114868164, + -0.0246118251234293, + 0.031017644330859184, + 0.02798330970108509, + -0.03510088473558426, + 0.03560660779476166, + -0.02120288275182247, + 0.031298600137233734, + 0.02449944242835045, + 0.05858888104557991, + 0.03335895016789436, + -0.01662328466773033, + 0.011725268326699734, + -0.04349213093519211, + -0.015340247191488743, + -0.00708011444658041, + -0.05031001940369606, + 0.01857125200331211, + -0.009627456776797771, + -0.015265326015651226, + 0.03616851940751076, + 0.06383340805768967, + -0.027833465486764908, + 0.009430787526071072, + -0.037685688585042953, + 0.02260766737163067, + -0.012202895246446133, + 0.021614952012896538, + -0.009009351953864098, + -0.009674282744526863, + -0.02116542123258114, + -0.017953146249055862, + -0.01185638178139925, + -0.013869906775653362, + -0.011453676968812943, + 0.016951067373156548, + -0.01976063661277294, + -0.012540043331682682, + -0.0244432520121336, + -0.042068615555763245, + -0.018955226987600327, + 0.011697173118591309, + -0.06926524639129639, + 0.0025239295791834593, + -0.015920892357826233, + 0.009945875033736229, + -0.035119615495204926, + 0.027009325101971626, + 0.02015397697687149, + -0.009777300991117954, + -0.015977082774043083, + 0.05308212712407112, + 0.06327150017023087, + 0.021558761596679688, + 0.017091546207666397, + -0.01766282506287098, + -0.013336088508367538, + 0.005932873580604792, + -0.0006385916494764388, + -0.015649300068616867, + 0.015049925073981285, + 0.03538184240460396, + -0.005164924543350935, + 0.003172471886500716, + -0.03903428092598915, + -0.01271798275411129, + -0.09005605429410934, + -0.008620694279670715, + 0.045664865523576736, + 0.003343387274071574, + -0.0031935435254126787, + 0.08196450024843216, + -0.023019736632704735, + -0.03611232712864876, + -0.037123773247003555, + -0.014216420240700245, + -0.009983335621654987, + 0.02026635967195034, + 0.00901871733367443, + 0.027964578941464424, + 0.009561900049448013, + 0.0246118251234293, + -0.01416022889316082, + -0.039558734744787216, + 0.006003112997859716, + 0.024087373167276382, + -0.036131057888269424, + -0.04008318483829498, + -0.04064510017633438, + -0.01315814908593893, + -0.08001653105020523, + -0.01819664239883423, + -0.020228898152709007, + 0.014759602956473827, + -0.05536724254488945, + -0.021577490493655205, + 0.035962484776973724, + 0.008822047151625156, + 0.01771901547908783, + -0.027908386662602425, + 0.019854288548231125, + 0.01024556253105402, + 0.029800163581967354, + 0.002376427175477147, + -0.007960446178913116, + 0.005197702907025814, + -0.019236182793974876, + 0.000641518272459507, + 0.04941095784306526, + 0.059113334864377975, + -0.0018589982064440846, + -0.006073351949453354, + -0.012024955824017525, + 0.013420375064015388, + -0.05192083865404129, + -0.03547549247741699, + -0.011425580829381943, + -0.008737759664654732, + 0.024836590513586998, + -0.014075941406190395, + -0.008587916381657124, + 0.013195609673857689, + 0.032628461718559265, + 0.003600931027904153, + -0.021633682772517204, + -0.0024490077048540115, + 0.018814748153090477, + -0.010835572145879269, + 0.036730434745550156, + 0.056116461753845215, + -0.047238223254680634, + 0.013729427941143513, + -0.006794474553316832, + 0.02987508475780487, + 0.008246085606515408, + 0.032347507774829865, + -0.04191877320408821, + 0.03465135395526886, + -0.061061304062604904, + -0.030792877078056335, + 0.013111323118209839, + 0.03854728862643242, + 0.03127986937761307, + 0.006546296179294586, + -0.062447357922792435, + 0.021970830857753754, + 0.021408917382359505, + 0.04547755792737007, + -0.034070707857608795, + -0.003069454338401556, + 0.006246608681976795, + -0.03761076554656029, + 0.017119640484452248, + 0.03736726939678192, + 0.018608713522553444, + -0.030774148181080818, + 0.021408917382359505, + -0.015321517363190651, + 0.019704444333910942, + 0.009730474092066288, + 0.02082827314734459, + 0.017110276967287064, + -0.012146703898906708, + 0.018589982762932777, + -0.006349626462906599, + 0.01249321736395359, + 0.025248661637306213, + -0.02309465780854225, + -0.053681500256061554, + -0.03335895016789436, + 0.04158162325620651, + -0.02916332706809044, + -0.02815188281238079, + 0.026110263541340828, + -0.014825159683823586, + 0.06844110786914825, + -0.04536517709493637, + -0.03854728862643242, + 0.013261166401207447, + -0.049785565584897995, + 0.008058780804276466, + 0.020453663542866707, + -0.010535884648561478, + 0.019517140462994576, + -0.04693853482604027, + 0.016342326998710632, + -0.020640967413783073, + -0.01554628275334835, + 0.017896955832839012, + 0.012184164486825466, + -0.02148383855819702, + 0.01065763272345066, + 0.0030835021752864122, + 0.019348565489053726, + -0.02071589045226574, + 0.05364403873682022, + -0.017054084688425064, + -0.004036414436995983, + 0.026335028931498528, + -0.03764822706580162, + -0.0230384673923254, + 0.00017208610370289534, + -0.0007825820939615369, + 0.0464140810072422, + 0.02110922895371914, + 0.048849042505025864, + 0.021633682772517204, + 0.024349600076675415, + -0.02910713665187359, + 0.022982275113463402, + -0.01966698467731476, + -0.011921938508749008, + 0.027065515518188477, + 0.017232025042176247, + -0.03560660779476166, + -0.01119145005941391, + 0.0029125867877155542, + -0.019123800098896027, + -0.04903634637594223, + -0.02921951934695244 + ], + "key": "Test Memory Example", + "status": "activated" + } + }, + { + "id": "3178a22f-4edb-46e1-89fd-a08009b131a1", + "memory": "On January 13, 2026, at 8:51 PM, the user created a test memory for a demonstration purpose.", + "metadata": { + "sources": [ + "\"{\\\"type\\\": \\\"chat\\\", \\\"role\\\": \\\"user\\\", \\\"chat_time\\\": \\\"08:51 PM on 13 January, 2026\\\", \\\"content\\\": \\\"This is a test memory for dump example\\\", \\\"lang\\\": \\\"en\\\"}\"", + "\"{\\\"type\\\": \\\"chat\\\", \\\"role\\\": \\\"user\\\", \\\"chat_time\\\": \\\"08:51 PM on 13 January, 2026\\\", \\\"content\\\": \\\"Another memory to demonstrate persistence\\\", \\\"lang\\\": \\\"en\\\"}\"" + ], + "confidence": 0.99, + "usage": [], + "created_at": "2026-01-13T20:51:42.359643000+00:00", + "session_id": "default_session", + "memory_type": "WorkingMemory", + "type": "fact", + "tags": [ + "test", + "memory", + "demonstration" + ], + "updated_at": "2026-01-13T20:51:42.396195000+00:00", + "user_id": "example_user", + "background": "On January 13, 2026, at 8:51 PM, the user engaged in a memory demonstration by creating a test memory example and another memory to illustrate the concept of persistence. These actions reflect the user's interest in exploring and demonstrating memory-related concepts.", + "file_ids": [], + "embedding": [ + -0.023225771263241768, + 0.009711744263768196, + 0.01401975005865097, + -0.023113388568162918, + -0.044953107833862305, + -0.025454696267843246, + 0.004780950024724007, + 0.012184164486825466, + 0.003397237276658416, + 0.006766378879547119, + 0.033059265464544296, + -0.0018625102238729596, + -0.00135678774677217, + -0.0159396231174469, + -0.0017864176770672202, + 0.03302180394530296, + 0.05941302329301834, + -0.013467201963067055, + -0.011341294273734093, + -0.03324656933546066, + 0.01599581353366375, + -0.05263259634375572, + 0.07222465425729752, + 0.022851161658763885, + -0.007220592815428972, + 0.0017969536129385233, + -0.012418295256793499, + 0.013214340433478355, + 0.08645980805158615, + 0.0018215373856946826, + 0.02071589045226574, + -0.028620144352316856, + -0.015490091405808926, + -0.025454696267843246, + -0.04469088092446327, + -0.02105303853750229, + 0.027945848181843758, + 3.424162423470989e-05, + -0.028526492416858673, + -0.019479678943753242, + -0.008882921189069748, + 0.012596234679222107, + 0.015480726025998592, + 0.006949000991880894, + 0.015499456785619259, + 0.014825159683823586, + -0.012549408711493015, + -0.02382514625787735, + -0.025342313572764397, + -0.013364183716475964, + 0.002884491113945842, + 0.03133606165647507, + 0.04525279253721237, + -0.037179965525865555, + 0.027009325101971626, + 0.021689873188734055, + -0.05034748092293739, + -0.043342288583517075, + -0.019816827028989792, + 0.007492184638977051, + -0.03575645014643669, + 0.007862110622227192, + 0.0021774161141365767, + 0.01226845197379589, + -0.013504662550985813, + 0.03937143087387085, + 0.0068834442645311356, + 0.0019292375072836876, + -0.00272059952840209, + -0.023169580847024918, + 0.0013368866639211774, + 0.04581470787525177, + -0.05514247715473175, + -0.01210924331098795, + -0.0809905156493187, + 0.01955460198223591, + 0.0017419328214600682, + -0.015677396208047867, + -0.009426104836165905, + 0.021408917382359505, + 0.03892189636826515, + -0.022476553916931152, + 0.01390736736357212, + -0.0002572511730249971, + 0.047350604087114334, + 0.01260560005903244, + -0.06349626183509827, + 0.057502515614032745, + 0.040345411747694016, + -0.04274291172623634, + 0.028788719326257706, + 0.0003658293280750513, + 0.010807476006448269, + -0.02504262700676918, + 0.004296299535781145, + 0.010404771193861961, + -0.05158368870615959, + 0.003305926453322172, + 0.029069675132632256, + 0.011032241396605968, + 0.05813935026526451, + 0.022532744333148003, + 0.02037874236702919, + -0.017878225073218346, + -0.0010945613030344248, + -0.028788719326257706, + 0.02927570976316929, + 0.014375628903508186, + -0.009627456776797771, + 0.0005191849777474999, + 0.001597942435182631, + 0.04281783476471901, + 0.04918619245290756, + 0.01210924331098795, + 0.04203115403652191, + 0.013766888529062271, + 0.013485931791365147, + -0.01013317983597517, + -0.05083446949720383, + -0.014778333716094494, + 0.017484884709119797, + 0.014703411608934402, + 0.038341253995895386, + -0.03693646937608719, + -0.06076161563396454, + -0.014787699095904827, + -0.01294274814426899, + -0.023019736632704735, + -0.0010770015651360154, + -0.004511700011789799, + 0.016042640432715416, + -0.000730487983673811, + 0.00883141253143549, + -0.008058780804276466, + -0.026971863582730293, + -0.021614952012896538, + 0.011247641406953335, + 0.05281990021467209, + -0.010498423129320145, + -0.03399578854441643, + 0.020341280847787857, + -0.003252076217904687, + -0.008681568317115307, + -0.026859480887651443, + 0.015930257737636566, + -0.02498643472790718, + 0.005469294730573893, + -0.003645415883511305, + 0.02721535973250866, + 0.001160117913968861, + -0.01229654811322689, + 0.019891750067472458, + -0.02288862317800522, + -0.04469088092446327, + -0.022851161658763885, + -0.003551763715222478, + 0.01916126161813736, + 0.06192290410399437, + -0.01468468178063631, + 0.012624330818653107, + -0.0015429217601194978, + -0.048511892557144165, + -0.06076161563396454, + 0.0465264655649662, + 0.02032255008816719, + 0.02860141359269619, + 0.013457836583256721, + 0.025248661637306213, + -0.0460769347846508, + 0.03639328479766846, + 0.002751036547124386, + -0.008306959643959999, + -0.008747125044465065, + -0.008157115429639816, + -0.008611329831182957, + 0.033845942467451096, + 0.038734592497348785, + -0.000227984826778993, + -0.01032984908670187, + -0.0321040116250515, + -0.05158368870615959, + 0.0031490586698055267, + 0.030287155881524086, + 0.013794984668493271, + -0.04135685786604881, + -0.003430015640333295, + -0.012390200048685074, + 0.004427412524819374, + 0.015049925073981285, + 0.01107906736433506, + 0.008161798119544983, + -0.03180432319641113, + -0.000778484798502177, + 0.00315842404961586, + -0.005722155794501305, + -0.027908386662602425, + -0.0014211736852303147, + 0.0008768197149038315, + -0.02448071353137493, + 0.005408420693129301, + -0.011060337536036968, + 0.035737719386816025, + 0.015668030828237534, + 0.02127780392765999, + -0.01746615581214428, + -0.013823079876601696, + -0.01416022889316082, + 0.0026854798197746277, + 0.0031677891965955496, + -0.0812152773141861, + -0.024836590513586998, + 0.0003201738290954381, + -0.016014544293284416, + -0.011032241396605968, + 0.02238290011882782, + -0.013261166401207447, + -0.0022523379884660244, + -0.005698742810636759, + 0.012764809653162956, + -0.015499456785619259, + -0.05371896177530289, + 0.04941095784306526, + -0.017447425052523613, + 0.030343346297740936, + 0.030811607837677002, + -0.01826220005750656, + 0.008536407724022865, + 0.05016017332673073, + 0.02848903089761734, + -0.0326659232378006, + 0.026110263541340828, + 0.018721096217632294, + 0.012015590444207191, + -0.007585836574435234, + 0.03513834625482559, + -0.020847003906965256, + -0.04154416173696518, + 0.0050150807946920395, + 0.02605407126247883, + 0.0016857414739206433, + -0.01627677120268345, + 0.004181575495749712, + 0.01961079239845276, + 0.018365217372775078, + -0.04154416173696518, + -0.026241375133395195, + 0.0030554065015167, + 0.01035794522613287, + 0.02227051742374897, + 0.01488135103136301, + 0.01074191927909851, + 0.007899572141468525, + -0.05918825790286064, + -0.001568676088936627, + 0.01715710200369358, + 0.009051495231688023, + 0.029032215476036072, + -0.006073351949453354, + 0.03877205401659012, + -0.0013439105823636055, + 0.04064510017633438, + -0.03774188086390495, + 0.025810575112700462, + 0.03341514244675636, + 0.022944815456867218, + -0.019854288548231125, + -0.019535871222615242, + -0.013411009684205055, + 0.014591028913855553, + -0.008742442354559898, + 0.02993127703666687, + -0.05525486171245575, + -0.02365657314658165, + 0.03487611934542656, + -0.015705492347478867, + 0.008349102921783924, + -0.02970651164650917, + 5.427992618933786e-06, + -0.020584776997566223, + 0.038846977055072784, + 0.016632650047540665, + -0.026409950107336044, + 0.031298600137233734, + -0.0122216260060668, + 0.0071503533981740475, + -0.017335042357444763, + -0.027796003967523575, + -0.027140438556671143, + 0.03952127322554588, + -0.019704444333910942, + -0.019460948184132576, + 0.01024556253105402, + 0.06357118487358093, + -0.04360451176762581, + -0.018206007778644562, + -0.021952100098133087, + 0.00883141253143549, + -0.15463869273662567, + -0.008232037536799908, + -0.013879271224141121, + -0.0018063188763335347, + -0.014413089491426945, + 0.016632650047540665, + -0.04682615399360657, + -0.0486617386341095, + -0.010479693301022053, + 0.08968144655227661, + -0.005267942324280739, + -0.04244322329759598, + 0.017306946218013763, + 0.026353757828474045, + -0.025866767391562462, + -0.005651916842907667, + -0.03865967318415642, + -0.027908386662602425, + -0.024312138557434082, + -0.011931303888559341, + 0.010030161589384079, + -0.02148383855819702, + 0.05424341559410095, + -0.01921745389699936, + -0.002258191118016839, + -0.00631684809923172, + 0.04941095784306526, + 0.009739839471876621, + -0.020734621211886406, + 0.002176245441660285, + -0.014169593341648579, + 0.04128193482756615, + -0.006579074542969465, + -0.005895412527024746, + -0.008999986574053764, + 0.05971270799636841, + -0.0005150876822881401, + 0.007473453879356384, + 0.011453676968812943, + -0.020416202023625374, + 0.015396438539028168, + 0.066230908036232, + -0.03858475014567375, + 0.045964550226926804, + 0.03455770015716553, + 0.0163891538977623, + -0.004703687038272619, + -0.012184164486825466, + -0.0476502925157547, + -0.011369389481842518, + -0.007332975510507822, + -0.020022863522171974, + -0.015096751973032951, + -0.036505669355392456, + -0.020528584718704224, + -0.013401645235717297, + -0.054955173283815384, + 0.017035353928804398, + -0.08526106178760529, + 0.021614952012896538, + -0.0310363732278347, + -0.05192083865404129, + -0.010760650038719177, + 0.040345411747694016, + -0.02970651164650917, + 0.00763734569773078, + 0.04030795395374298, + -0.06394579261541367, + -0.0025051990523934364, + -0.03637455403804779, + 0.04847443476319313, + -0.018449503928422928, + 0.012736713513731956, + 0.001112121157348156, + 0.021352725103497505, + 0.06166067719459534, + 0.002046302892267704, + -0.03247861936688423, + 0.011013510636985302, + -0.08885730803012848, + -0.027796003967523575, + 0.053681500256061554, + -0.015452630817890167, + -0.03388340398669243, + -0.01715710200369358, + 0.00902808178216219, + -0.01121954619884491, + 0.008096241392195225, + 0.04113209247589111, + 0.20933163166046143, + -0.0004925525863654912, + -0.0620727464556694, + -0.013055131770670414, + 0.03787299245595932, + -0.010076988488435745, + -0.053606580942869186, + 0.01327053178101778, + -0.04233084246516228, + -0.04536517709493637, + 0.020285088568925858, + -0.0007509744027629495, + 0.01352339331060648, + 0.010208101011812687, + 0.0115192336961627, + -0.0243870597332716, + -0.01263369619846344, + -0.007501549553126097, + 0.08908206969499588, + 0.016585823148489, + 0.035793911665678024, + -0.014338168315589428, + -0.001372006256133318, + -0.001101585221476853, + -0.04525279253721237, + -0.02648487128317356, + -0.00762329762801528, + 0.07971683889627457, + -0.012708617374300957, + 0.056565992534160614, + -0.018917765468358994, + 0.011697173118591309, + 0.007997906766831875, + 0.004799680784344673, + 0.023469267413020134, + 0.009664918296039104, + 0.007894889451563358, + -0.04281783476471901, + -0.017569173127412796, + 0.02921951934695244, + 0.040794942528009415, + -0.023787684738636017, + 0.013841810636222363, + 0.04124447703361511, + -0.002535636071115732, + 0.01271798275411129, + 0.011978129856288433, + -0.02232670970261097, + -0.04135685786604881, + -0.002619923325255513, + -0.03480119630694389, + 0.016838684678077698, + 0.0310363732278347, + -0.016005178913474083, + 0.013682601973414421, + -0.006705504842102528, + -0.011097798123955727, + -0.02687821164727211, + 0.002102494239807129, + 0.03644947707653046, + 0.03888443857431412, + 0.0315420962870121, + -0.028751257807016373, + 0.051995761692523956, + -0.014937542378902435, + 0.0018285613041371107, + 0.0020779105834662914, + -0.05240783095359802, + 0.03425801172852516, + 0.026953132823109627, + 0.06267212331295013, + 0.010020796209573746, + 0.03259100392460823, + 0.0234318058937788, + 0.008007272146642208, + 0.012689887546002865, + 0.03985841944813728, + 0.05488025024533272, + 0.021858448162674904, + 0.07214973866939545, + 0.0159115269780159, + -0.020453663542866707, + -0.04027049243450165, + 0.008372516371309757, + 0.005876682233065367, + -0.05701552331447601, + -0.024143563583493233, + 0.052445292472839355, + -0.024518173187971115, + -0.03463262319564819, + -0.0329468809068203, + 0.045739784836769104, + -0.025510888546705246, + -0.027833465486764908, + 0.048174746334552765, + 0.0156586654484272, + 0.04124447703361511, + -0.0003596833848860115, + -0.029743971303105354, + 0.0014972662320360541, + -0.05083446949720383, + -0.02843284048140049, + 0.013663871213793755, + -0.0015663348603993654, + -0.010320483706891537, + -0.03315291553735733, + -0.028414109721779823, + -0.003565811552107334, + 0.008011954836547375, + 0.012764809653162956, + -0.0057455687783658504, + 0.03433293476700783, + -0.07361071556806564, + -0.04446611553430557, + -0.0036758529022336006, + 0.003003897611051798, + -0.014619125053286552, + 0.009084274061024189, + -0.009491661563515663, + 0.047425527125597, + 0.009936509653925896, + 0.00795576348900795, + 0.010620171204209328, + 0.05540470406413078, + 0.009861587546765804, + -0.022364171221852303, + -0.0022628738079220057, + -0.012006225995719433, + 0.004530430305749178, + -0.01372006256133318, + 0.026615984737873077, + -0.036187250167131424, + 0.04094478860497475, + -0.03877205401659012, + 0.02732774242758751, + 0.014862621203064919, + 0.021858448162674904, + 0.01667010970413685, + -0.01502182986587286, + 0.033621177077293396, + 0.0031748132314532995, + -0.09402691572904587, + -0.000989787746220827, + 0.027346473187208176, + 0.025585809722542763, + 0.01667947508394718, + 0.024181025102734566, + -0.02427467703819275, + -0.01249321736395359, + 0.02448071353137493, + 0.03933396935462952, + 0.01710091158747673, + 0.02449944242835045, + -0.05960032716393471, + -0.03335895016789436, + -0.039071742445230484, + -0.04547755792737007, + 0.017475519329309464, + 0.03644947707653046, + -0.04236830398440361, + -0.006752331275492907, + 0.031841784715652466, + 0.03397705778479576, + 0.08226418495178223, + 0.0621851310133934, + -0.04214353859424591, + -0.011397485621273518, + 0.030006198212504387, + 0.0031162805389612913, + -0.023356884717941284, + 0.006532248109579086, + -0.005928190890699625, + -0.016407884657382965, + 0.021184151992201805, + 0.0018777287332341075, + -0.047425527125597, + -0.04903634637594223, + 0.026840750128030777, + -0.021502569317817688, + -0.01077001541852951, + -0.008461485616862774, + -0.02521120011806488, + -0.037685688585042953, + -0.011650347150862217, + 0.02054731547832489, + 0.021090500056743622, + 0.01893649622797966, + -0.028357917442917824, + 0.012689887546002865, + -0.001998306019231677, + -0.08511121571063995, + 0.06967731565237045, + 0.030399538576602936, + -0.015471360646188259, + 0.04203115403652191, + -0.043791819363832474, + 0.02854522317647934, + 0.010301753878593445, + 0.011388120241463184, + 0.022233057767152786, + -0.012661791406571865, + 0.0016037956811487675, + -0.019685715436935425, + -0.01944221928715706, + -0.019292375072836876, + -0.012905287556350231, + 0.009908413514494896, + 0.0161643885076046, + -0.0068834442645311356, + 0.02015397697687149, + -0.018589982762932777, + -0.019966671243309975, + 0.0020123538561165333, + 0.014328802935779095, + -0.050722088664770126, + -0.014890716411173344, + 0.07177512347698212, + 0.044503577053546906, + -0.011921938508749008, + 0.010320483706891537, + -0.014478646218776703, + 0.01338291447609663, + 0.010526519268751144, + -0.01812172122299671, + -0.02142764814198017, + 0.01088239811360836, + 0.00478329136967659, + -0.05225798487663269, + 0.02693440392613411, + 0.022682588547468185, + -0.005764299537986517, + -0.02238290011882782, + -0.024087373167276382, + -0.019311105832457542, + 0.05296974256634712, + -0.0651070848107338, + 0.020809542387723923, + 0.015780413523316383, + -0.024780400097370148, + -0.0027042103465646505, + -0.02914459817111492, + 0.014207054860889912, + 0.020079053938388824, + 0.04768775403499603, + 0.0315982885658741, + 0.03526945784687996, + 0.013439105823636055, + 0.012886556796729565, + 0.007670123595744371, + 0.023974990472197533, + 0.017597267404198647, + 0.007866793312132359, + -0.048174746334552765, + -0.0489988848567009, + 0.040457796305418015, + 0.08016636967658997, + 0.013682601973414421, + -0.018046800047159195, + -0.06683028489351273, + -0.034351665526628494, + -0.006799157243221998, + 0.0028048865497112274, + -0.020341280847787857, + 0.01605200581252575, + -0.04637662321329117, + -0.06993953883647919, + 0.017569173127412796, + -0.06008731946349144, + -0.006916222628206015, + -0.03309672325849533, + -0.027758542448282242, + 0.004675591364502907, + -0.00883141253143549, + -0.03899681940674782, + 0.07012684643268585, + -0.0009002327569760382, + -0.02620391547679901, + -0.025023896247148514, + 0.06802903115749359, + 0.030193503946065903, + -0.00706606637686491, + 0.052332907915115356, + 0.02976270206272602, + 0.03502596169710159, + -0.012886556796729565, + -3.263197504566051e-05, + 0.019179992377758026, + -0.02637248858809471, + 0.017278850078582764, + -0.06031208485364914, + -0.012249721214175224, + 0.00553953368216753, + -0.029182057827711105, + 0.040907327085733414, + -0.010788745246827602, + 0.01755044236779213, + -0.042293380945920944, + 0.02043493278324604, + -0.0234318058937788, + 0.02427467703819275, + -0.030455728992819786, + 0.011584790423512459, + -0.045515019446611404, + -0.015181038528680801, + 0.0005320621421560645, + 0.00795576348900795, + -0.01710091158747673, + 0.017082180827856064, + 0.029687780886888504, + 0.003418309148401022, + 0.020734621211886406, + -0.07994160801172256, + 0.007215910125523806, + 0.022457823157310486, + -0.0006052280077710748, + -0.015817875042557716, + 0.02682201936841011, + -0.058176811784505844, + 0.05413103103637695, + -0.004389951936900616, + 0.03639328479766846, + 0.011172720231115818, + -0.0489988848567009, + -0.005675329826772213, + -0.031111296266317368, + -0.0019268961623311043, + -0.0032825132366269827, + -0.017981242388486862, + 0.036955200135707855, + -0.03377101942896843, + -0.030418269336223602, + 0.003273148089647293, + -0.02464928664267063, + -0.0011630445951595902, + 0.04394166171550751, + 0.020416202023625374, + 0.021034307777881622, + 0.0008352614822797477, + 0.05083446949720383, + -0.03319037705659866, + -0.009290308691561222, + -0.02464928664267063, + 0.04083240404725075, + -0.049785565584897995, + -0.031298600137233734, + 0.02560454048216343, + -0.01760663278400898, + 0.014263246208429337, + -0.01757853850722313, + -0.00598438223823905, + -0.005591042805463076, + 0.02594168856739998, + -0.012867826968431473, + -0.0477626770734787, + 0.033902134746313095, + -0.025061357766389847, + -0.05274497717618942, + 0.031523365527391434, + 0.03970857709646225, + 0.01596771739423275, + 0.013888636603951454, + 0.03877205401659012, + -0.009739839471876621, + -0.009786665439605713, + -0.04761283099651337, + -0.008143068291246891, + 0.032684653997421265, + 0.03822886943817139, + 0.009163877926766872, + -0.028170613572001457, + 0.005843903869390488, + 0.020753350108861923, + -0.007281466852873564, + -0.014216420240700245, + -0.014057210646569729, + -0.006424548104405403, + -0.011172720231115818, + -0.03332148864865303, + 0.03974603861570358, + 0.013757524080574512, + -0.015012464486062527, + 0.0028165930416435003, + 0.01327053178101778, + -0.019910480827093124, + 0.006625900510698557, + -0.022626396268606186, + -0.011912573128938675, + -0.02537977509200573, + -0.024574365466833115, + 0.007300197146832943, + -0.023169580847024918, + -0.047425527125597, + 0.0003184178494848311, + 0.02537977509200573, + -0.16213087737560272, + 0.02176479622721672, + 0.0014211736852303147, + 0.0010805134661495686, + -0.021952100098133087, + -0.02803950011730194, + -0.036280903965234756, + -0.023281963542103767, + 0.028245534747838974, + -0.02571692317724228, + 0.011734633706510067, + -0.03586883470416069, + 0.035793911665678024, + -0.030736686661839485, + -0.02288862317800522, + 0.02682201936841011, + 0.012006225995719433, + 0.04476580023765564, + -0.06004985794425011, + 0.09335261583328247, + -0.005075954832136631, + -0.030418269336223602, + -0.004537454340606928, + 0.005057224538177252, + -0.048624277114868164, + -0.0246118251234293, + 0.031017644330859184, + 0.02798330970108509, + -0.03510088473558426, + 0.03560660779476166, + -0.02120288275182247, + 0.031298600137233734, + 0.02449944242835045, + 0.05858888104557991, + 0.03335895016789436, + -0.01662328466773033, + 0.011725268326699734, + -0.04349213093519211, + -0.015340247191488743, + -0.00708011444658041, + -0.05031001940369606, + 0.01857125200331211, + -0.009627456776797771, + -0.015265326015651226, + 0.03616851940751076, + 0.06383340805768967, + -0.027833465486764908, + 0.009430787526071072, + -0.037685688585042953, + 0.02260766737163067, + -0.012202895246446133, + 0.021614952012896538, + -0.009009351953864098, + -0.009674282744526863, + -0.02116542123258114, + -0.017953146249055862, + -0.01185638178139925, + -0.013869906775653362, + -0.011453676968812943, + 0.016951067373156548, + -0.01976063661277294, + -0.012540043331682682, + -0.0244432520121336, + -0.042068615555763245, + -0.018955226987600327, + 0.011697173118591309, + -0.06926524639129639, + 0.0025239295791834593, + -0.015920892357826233, + 0.009945875033736229, + -0.035119615495204926, + 0.027009325101971626, + 0.02015397697687149, + -0.009777300991117954, + -0.015977082774043083, + 0.05308212712407112, + 0.06327150017023087, + 0.021558761596679688, + 0.017091546207666397, + -0.01766282506287098, + -0.013336088508367538, + 0.005932873580604792, + -0.0006385916494764388, + -0.015649300068616867, + 0.015049925073981285, + 0.03538184240460396, + -0.005164924543350935, + 0.003172471886500716, + -0.03903428092598915, + -0.01271798275411129, + -0.09005605429410934, + -0.008620694279670715, + 0.045664865523576736, + 0.003343387274071574, + -0.0031935435254126787, + 0.08196450024843216, + -0.023019736632704735, + -0.03611232712864876, + -0.037123773247003555, + -0.014216420240700245, + -0.009983335621654987, + 0.02026635967195034, + 0.00901871733367443, + 0.027964578941464424, + 0.009561900049448013, + 0.0246118251234293, + -0.01416022889316082, + -0.039558734744787216, + 0.006003112997859716, + 0.024087373167276382, + -0.036131057888269424, + -0.04008318483829498, + -0.04064510017633438, + -0.01315814908593893, + -0.08001653105020523, + -0.01819664239883423, + -0.020228898152709007, + 0.014759602956473827, + -0.05536724254488945, + -0.021577490493655205, + 0.035962484776973724, + 0.008822047151625156, + 0.01771901547908783, + -0.027908386662602425, + 0.019854288548231125, + 0.01024556253105402, + 0.029800163581967354, + 0.002376427175477147, + -0.007960446178913116, + 0.005197702907025814, + -0.019236182793974876, + 0.000641518272459507, + 0.04941095784306526, + 0.059113334864377975, + -0.0018589982064440846, + -0.006073351949453354, + -0.012024955824017525, + 0.013420375064015388, + -0.05192083865404129, + -0.03547549247741699, + -0.011425580829381943, + -0.008737759664654732, + 0.024836590513586998, + -0.014075941406190395, + -0.008587916381657124, + 0.013195609673857689, + 0.032628461718559265, + 0.003600931027904153, + -0.021633682772517204, + -0.0024490077048540115, + 0.018814748153090477, + -0.010835572145879269, + 0.036730434745550156, + 0.056116461753845215, + -0.047238223254680634, + 0.013729427941143513, + -0.006794474553316832, + 0.02987508475780487, + 0.008246085606515408, + 0.032347507774829865, + -0.04191877320408821, + 0.03465135395526886, + -0.061061304062604904, + -0.030792877078056335, + 0.013111323118209839, + 0.03854728862643242, + 0.03127986937761307, + 0.006546296179294586, + -0.062447357922792435, + 0.021970830857753754, + 0.021408917382359505, + 0.04547755792737007, + -0.034070707857608795, + -0.003069454338401556, + 0.006246608681976795, + -0.03761076554656029, + 0.017119640484452248, + 0.03736726939678192, + 0.018608713522553444, + -0.030774148181080818, + 0.021408917382359505, + -0.015321517363190651, + 0.019704444333910942, + 0.009730474092066288, + 0.02082827314734459, + 0.017110276967287064, + -0.012146703898906708, + 0.018589982762932777, + -0.006349626462906599, + 0.01249321736395359, + 0.025248661637306213, + -0.02309465780854225, + -0.053681500256061554, + -0.03335895016789436, + 0.04158162325620651, + -0.02916332706809044, + -0.02815188281238079, + 0.026110263541340828, + -0.014825159683823586, + 0.06844110786914825, + -0.04536517709493637, + -0.03854728862643242, + 0.013261166401207447, + -0.049785565584897995, + 0.008058780804276466, + 0.020453663542866707, + -0.010535884648561478, + 0.019517140462994576, + -0.04693853482604027, + 0.016342326998710632, + -0.020640967413783073, + -0.01554628275334835, + 0.017896955832839012, + 0.012184164486825466, + -0.02148383855819702, + 0.01065763272345066, + 0.0030835021752864122, + 0.019348565489053726, + -0.02071589045226574, + 0.05364403873682022, + -0.017054084688425064, + -0.004036414436995983, + 0.026335028931498528, + -0.03764822706580162, + -0.0230384673923254, + 0.00017208610370289534, + -0.0007825820939615369, + 0.0464140810072422, + 0.02110922895371914, + 0.048849042505025864, + 0.021633682772517204, + 0.024349600076675415, + -0.02910713665187359, + 0.022982275113463402, + -0.01966698467731476, + -0.011921938508749008, + 0.027065515518188477, + 0.017232025042176247, + -0.03560660779476166, + -0.01119145005941391, + 0.0029125867877155542, + -0.019123800098896027, + -0.04903634637594223, + -0.02921951934695244 + ], + "key": "Test Memory Example", + "status": "activated" + } + } + ], + "edges": [], + "total_nodes": 4, + "total_edges": 0 +} \ No newline at end of file diff --git a/examples/data/mem_scheduler/scene_data.json b/examples/data/mem_scheduler/scene_data.json deleted file mode 100644 index 94f95c9ba..000000000 --- a/examples/data/mem_scheduler/scene_data.json +++ /dev/null @@ -1,118 +0,0 @@ -[ - [ - { - "role": "user", - "content": "I'm feeling a bit down today." - }, - { - "role": "assistant", - "content": "I'm sorry to hear that. Do you want to talk about what's been going on?" - }, - { - "role": "user", - "content": "It's just been a tough couple of days, you know? Everything feels a bit overwhelming, and I just can't seem to shake it off." - }, - { - "role": "assistant", - "content": "It sounds like you're going through a lot right now. Sometimes it helps to talk things out. Is there something specific that's been weighing on you, or is it more of a general feeling?" - }, - { - "role": "user", - "content": "It's a mix, I guess. Work's been really stressful, and on top of that, I've been feeling kinda disconnected from the people around me." - }, - { - "role": "assistant", - "content": "That can be really draining, especially when you're feeling isolated on top of the stress. Do you think there's something from your past that's contributing to how you're feeling now? Sometimes our emotions are tied to older experiences." - }, - { - "role": "user", - "content": "Yeah, now that you mention it... I've been thinking a lot about my childhood lately. I didn't have the easiest upbringing. My parents were always busy, and I often felt like I had to figure things out on my own. I remember one winter, I got really sick with a high fever. My parents were both working, and I didn't want to bother them. I just lay in bed, crying by myself, not knowing what to do." - }, - { - "role": "assistant", - "content": "That must have been very lonely. Growing up without that support can really make you feel like you have to handle everything on your own. How do you think that experience has affected you now?" - }, - { - "role": "user", - "content": "Yeah, it was really lonely. I remember spending a lot of time doing things on my own, like drawing, because it was my only way to escape. I used to draw animals, and I created my own little world on paper. It was peaceful in a way, just me and my imagination, with no outside distractions." - }, - { - "role": "assistant", - "content": "Drawing sounds like a beautiful way to express yourself and find some peace in that solitude. It must have been a very quiet and private world for you. Do you think that sense of loneliness from back then has shaped how you connect with others today?" - }, - { - "role": "user", - "content": "I think it has. I've always found it hard to let others in. I feel like I have to handle everything on my own. If I rely on someone else, I feel vulnerable, and I start to feel that loneliness again." - }, - { - "role": "assistant", - "content": "It sounds like this was a protective mechanism that helped you cope with your feelings of loneliness back then. But as you've grown, it seems like this pattern might not be serving you as well anymore. Have you thought about trying to break this cycle, or finding new ways to connect with others?" - }, - { - "role": "user", - "content": "I've thought about it, though it's hard. But I also know I can't keep going like this forever. Maybe it's time to seek some help." - }, - { - "role": "assistant", - "content": "It's a big step, but a positive one. Exploring your emotions and past experiences could really help you understand how they've shaped your present. Therapy could be a great place to start, as it would help you connect with both yourself and others. Would you be open to trying it?" - }, - { - "role": "user", - "content": "I'm open to trying. Thank you for encouraging me. It feels like it might be a good starting point for me." - }, - { - "role": "assistant", - "content": "I'm really glad to hear that. Taking that first step is always difficult, but you've made an important decision. If you need help finding the right therapist, or just want to talk, I'm here for you." - } - ], - [ - { - "role": "user", - "content": "This week I've been feeling a bit off, especially when playing tennis. My body just doesn't feel right." - }, - { - "role": "assistant", - "content": "It sounds like you've been having some physical discomfort lately. Where specifically are you feeling unwell? Is there something particular about tennis that's been difficult?" - }, - { - "role": "user", - "content": "I just feel a bit fatigued, and every time I play tennis, my movements feel off. I used to play pretty well, but lately, I've been feeling weak and my movements aren't as coordinated as they used to be." - }, - { - "role": "assistant", - "content": "Physical discomfort can definitely affect performance, especially in a sport like tennis that requires quick reactions and coordination. Have you noticed anything specific that might be causing these changes? Could it be overtraining, or is there another physical issue making you feel off?" - }, - { - "role": "user", - "content": "I think it might be due to stress and lack of sleep recently. I just feel drained all the time. Plus, I've been frustrated with my tennis performance, which makes me feel even worse, like I'm stuck in a vicious cycle." - }, - { - "role": "assistant", - "content": "Stress and poor sleep quality can definitely affect your physical performance, especially in sports. When you're also feeling down about your performance, it's easy to fall into a negative cycle. Have you tried anything to relieve some of the stress, or to improve your sleep?" - }, - { - "role": "user", - "content": "I've tried some relaxation techniques, like deep breathing and simple meditation, but it doesn't seem to help much. I still can't focus during tennis, and I feel like my mind is distracted by other things." - }, - { - "role": "assistant", - "content": "Relaxation techniques like deep breathing and meditation can help reduce stress, but sometimes we need a more structured approach to improve both our physical and mental state. For example, you might want to focus on specific aspects of your tennis training, or adjust your rest and recovery time. Have you thought about setting smaller goals for yourself? Like focusing on a specific tennis move each day, rather than expecting perfection right away? That might help you gradually regain confidence." - }, - { - "role": "user", - "content": "That's a good idea. I think I've been expecting too much from myself, which just makes me more anxious. Maybe I can start with small practice sessions and gradually find my rhythm again." - }, - { - "role": "assistant", - "content": "Exactly! Setting small and clear goals will help reduce pressure and give you a sense of accomplishment. And don't forget that rest is just as important—taking time to recover will help you feel better physically and mentally. Both physical and mental recovery are key to improving your performance and health." - }, - { - "role": "user", - "content": "Thanks, I feel like I need to rethink a few things. Maybe I really need to be kinder to myself and take things step by step." - }, - { - "role": "assistant", - "content": "You're on the right track. Rethinking things and giving yourself space and patience is so important. You've already taken the first step, now just keep moving forward, one step at a time. If you need anything, I'm always here to help." - } - ] -] diff --git a/examples/mem_api/pipeline_test.py b/examples/mem_api/pipeline_test.py deleted file mode 100644 index cd7b3bee3..000000000 --- a/examples/mem_api/pipeline_test.py +++ /dev/null @@ -1,178 +0,0 @@ -""" -Pipeline test script for MemOS Server API functions. -This script directly tests add and search functionalities without going through the API layer. -If you want to start server_api set .env to MemOS/.env and run: -uvicorn memos.api.server_api:app --host 0.0.0.0 --port 8002 --workers 4 -""" - -from typing import Any - -from dotenv import load_dotenv - -# Import directly from server_router to reuse initialized components -from memos.api.routers.server_router import ( - _create_naive_mem_cube, - mem_reader, -) -from memos.log import get_logger - - -# Load environment variables -load_dotenv() - -logger = get_logger(__name__) - - -def test_add_memories( - messages: list[dict[str, str]], - user_id: str, - mem_cube_id: str, - session_id: str = "default_session", -) -> list[str]: - """ - Test adding memories to the system. - - Args: - messages: List of message dictionaries with 'role' and 'content' - user_id: User identifier - mem_cube_id: Memory cube identifier - session_id: Session identifier - - Returns: - List of memory IDs that were added - """ - logger.info(f"Testing add memories for user: {user_id}, mem_cube: {mem_cube_id}") - - # Create NaiveMemCube using server_router function - naive_mem_cube = _create_naive_mem_cube() - - # Extract memories from messages using server_router's mem_reader - memories = mem_reader.get_memory( - [messages], - type="chat", - info={ - "user_id": user_id, - "session_id": session_id, - }, - ) - - # Flatten memory list - flattened_memories = [mm for m in memories for mm in m] - - # Add memories to the system - mem_id_list: list[str] = naive_mem_cube.text_mem.add( - flattened_memories, - user_name=mem_cube_id, - ) - - logger.info(f"Added {len(mem_id_list)} memories: {mem_id_list}") - - # Print details of added memories - for memory_id, memory in zip(mem_id_list, flattened_memories, strict=False): - logger.info(f" - ID: {memory_id}") - logger.info(f" Memory: {memory.memory}") - logger.info(f" Type: {memory.metadata.memory_type}") - - return mem_id_list - - -def test_search_memories( - query: str, - user_id: str, - mem_cube_id: str, - session_id: str = "default_session", - top_k: int = 5, - mode: str = "fast", - internet_search: bool = False, - moscube: bool = False, - chat_history: list | None = None, -) -> list[Any]: - """ - Test searching memories from the system. - - Args: - query: Search query text - user_id: User identifier - mem_cube_id: Memory cube identifier - session_id: Session identifier - top_k: Number of top results to return - mode: Search mode - internet_search: Whether to enable internet search - moscube: Whether to enable moscube search - chat_history: Chat history for context - - Returns: - List of search results - """ - - # Create NaiveMemCube using server_router function - naive_mem_cube = _create_naive_mem_cube() - - # Prepare search filter - search_filter = {"session_id": session_id} if session_id != "default_session" else None - - search_results = naive_mem_cube.text_mem.search( - query=query, - user_name=mem_cube_id, - top_k=top_k, - mode=mode, - manual_close_internet=not internet_search, - moscube=moscube, - search_filter=search_filter, - info={ - "user_id": user_id, - "session_id": session_id, - "chat_history": chat_history or [], - }, - ) - - # Print search results - for idx, result in enumerate(search_results, 1): - logger.info(f"\n Result {idx}:") - logger.info(f" ID: {result.id}") - logger.info(f" Memory: {result.memory}") - logger.info(f" Score: {getattr(result, 'score', 'N/A')}") - logger.info(f" Type: {result.metadata.memory_type}") - - return search_results - - -def main(): - # Test parameters - user_id = "test_user_123" - mem_cube_id = "test_cube_123" - session_id = "test_session_001" - - test_messages = [ - {"role": "user", "content": "Where should I go for Christmas?"}, - { - "role": "assistant", - "content": "There are many places to visit during Christmas, such as the Bund and Disneyland in Shanghai.", - }, - {"role": "user", "content": "What about New Year's Eve?"}, - { - "role": "assistant", - "content": "For New Year's Eve, you could visit Times Square in New York or watch fireworks at the Sydney Opera House.", - }, - ] - - memory_ids = test_add_memories( - messages=test_messages, user_id=user_id, mem_cube_id=mem_cube_id, session_id=session_id - ) - - logger.info(f"\nSuccessfully added {len(memory_ids)} memories!") - - search_queries = [ - "How to enjoy Christmas?", - "Where to celebrate New Year?", - "What are good places to visit during holidays?", - ] - - for query in search_queries: - logger.info("\n" + "-" * 80) - results = test_search_memories(query=query, user_id=user_id, mem_cube_id=mem_cube_id) - print(f"Query: '{query}' returned {len(results)} results") - - -if __name__ == "__main__": - main() diff --git a/examples/mem_chat/chat_w_generated_cube_explicit_memory_only.py b/examples/mem_chat/chat_w_generated_cube_explicit_memory_only.py index a1cce6e8a..2c72fd192 100644 --- a/examples/mem_chat/chat_w_generated_cube_explicit_memory_only.py +++ b/examples/mem_chat/chat_w_generated_cube_explicit_memory_only.py @@ -1,127 +1,148 @@ +import os +import sys + + +# Add project root to python path to ensure src modules can be imported +sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "../../../src"))) + from memos.configs.mem_chat import MemChatConfigFactory from memos.configs.mem_cube import GeneralMemCubeConfig from memos.mem_chat.factory import MemChatFactory from memos.mem_cube.general import GeneralMemCube -mem_chat_config = MemChatConfigFactory.model_validate( - { - "backend": "simple", - "config": { - "user_id": "user_123", - "chat_llm": { - "backend": "ollama", - "config": { - "model_name_or_path": "qwen3:1.7b", - "temperature": 0.0, - "remove_think_prefix": True, - "max_tokens": 4096, - }, - }, - "max_turns_window": 20, - "top_k": 5, - "enable_textual_memory": True, - "enable_activation_memory": False, - "enable_parametric_memory": False, - }, - } -) -mem_chat = MemChatFactory.from_config(mem_chat_config) - -# Initialize_memory_cube -config = GeneralMemCubeConfig.model_validate( - { - "user_id": "user03alice", - "cube_id": "user03alice/mem_cube_tree", - "text_mem": { - "backend": "tree_text", - "config": { - "extractor_llm": { - "backend": "ollama", - "config": { - "model_name_or_path": "qwen3:1.7b", - "temperature": 0.0, - "remove_think_prefix": True, - "max_tokens": 8192, - }, - }, - "dispatcher_llm": { - "backend": "ollama", - "config": { - "model_name_or_path": "qwen3:1.7b", - "temperature": 0.0, - "remove_think_prefix": True, - "max_tokens": 8192, - }, - }, - "graph_db": { - "backend": "neo4j", - "config": { - "uri": "bolt://localhost:7687", - "user": "neo4j", - "password": "12345678", - "db_name": "user03alice_mem_cube_3", - "auto_create": True, - }, - }, - "embedder": { - "backend": "ollama", - "config": { - "model_name_or_path": "nomic-embed-text:latest", - }, - }, - }, - }, - "act_mem": { - "backend": "kv_cache", +def get_mem_chat_config() -> MemChatConfigFactory: + """ + Generates the configuration object for MemChat. + + MemChat is the top-level component for user interaction, responsible for managing the conversation flow, + invoking the LLM, and interacting with the memory module. + The configuration includes: + - user_id: User identifier + - chat_llm: LLM configuration used for chat (uses OpenAI compatible interface here) + - max_turns_window: Size of the conversation history window + - enable_textual_memory: Whether to enable textual memory (Explicit Memory) + """ + return MemChatConfigFactory.model_validate( + { + "backend": "simple", "config": { - "memory_filename": "activation_memory.pickle", - "extractor_llm": { - "backend": "huggingface", + "user_id": "user_123", + "chat_llm": { + "backend": "openai", "config": { - "model_name_or_path": "Qwen/Qwen3-1.7B", + # Prioritize getting sensitive information and model configuration from environment variables + "model_name_or_path": os.getenv("MOS_CHAT_MODEL", "gpt-4o"), "temperature": 0.8, "max_tokens": 1024, "top_p": 0.9, "top_k": 50, - "add_generation_prompt": True, - "remove_think_prefix": False, + "api_key": os.getenv("OPENAI_API_KEY"), + "api_base": os.getenv("OPENAI_API_BASE"), }, }, + "max_turns_window": 20, + "top_k": 5, + # Enable textual memory functionality, allowing the system to retrieve and store explicit memories + "enable_textual_memory": True, + # This example demonstrates only explicit memory, so activation memory and parametric memory are disabled + "enable_activation_memory": False, + "enable_parametric_memory": False, }, - }, - "para_mem": { - "backend": "lora", - "config": { - "memory_filename": "parametric_memory.adapter", - "extractor_llm": { - "backend": "huggingface", - "config": { - "model_name_or_path": "Qwen/Qwen3-1.7B", - "temperature": 0.8, - "max_tokens": 1024, - "top_p": 0.9, - "top_k": 50, - "add_generation_prompt": True, - "remove_think_prefix": False, + } + ) + + +def get_mem_cube_config() -> GeneralMemCubeConfig: + """ + Generates the configuration object for GeneralMemCube. + + MemCube (Memory Cube) is the core storage and management unit for memory. + GeneralMemCube is a general implementation of the memory cube, supporting extraction, vectorized storage, and retrieval of textual memory. + The configuration includes: + - user_id / cube_id: Identifiers for the user and the cube to which the memory belongs + - text_mem: Specific configuration for textual memory + - extractor_llm: LLM used to extract memory fragments from the conversation + - vector_db: Database used to store memory vectors (uses Qdrant here) + - embedder: Model used to generate text vectors (uses OpenAI compatible interface here) + """ + return GeneralMemCubeConfig.model_validate( + { + "user_id": "user03alice", + "cube_id": "user03alice/mem_cube_tree", + "text_mem": { + "backend": "general_text", + "config": { + "cube_id": "user03alice/mem_cube_general", + "memory_filename": "textual_memory.json", + "extractor_llm": { + "backend": "openai", + "config": { + "model_name_or_path": os.getenv("MOS_CHAT_MODEL", "gpt-4o"), + "temperature": 0.8, + "max_tokens": 1024, + "top_p": 0.9, + "top_k": 50, + "api_key": os.getenv("OPENAI_API_KEY"), + "api_base": os.getenv("OPENAI_API_BASE"), + }, + }, + "vector_db": { + "backend": "qdrant", + "config": { + "collection_name": "user03alice_mem_cube_general", + "vector_dimension": 1024, + "distance_metric": "cosine", + }, + }, + "embedder": { + "backend": os.getenv("MOS_EMBEDDER_BACKEND", "universal_api"), + "config": { + "provider": "openai", + "api_key": os.getenv("MOS_EMBEDDER_API_KEY", "EMPTY"), + "model_name_or_path": os.getenv("MOS_EMBEDDER_MODEL", "bge-m3"), + "base_url": os.getenv("MOS_EMBEDDER_API_BASE"), + }, }, }, }, - }, - }, -) + } + ) + + +def main(): + """ + Main program entry point: + 1. Initialize MemChat (Conversation Controller) + 2. Initialize MemCube (Memory Storage) + 3. Mount MemCube to MemChat + 4. Start the chat loop + 5. Save memory after the chat ends + """ + print("Initializing MemChat...") + mem_chat_config = get_mem_chat_config() + mem_chat = MemChatFactory.from_config(mem_chat_config) + + print("Initializing MemCube...") + mem_cube_config = get_mem_cube_config() + mem_cube = GeneralMemCube(mem_cube_config) -# Initialize the MemCube with the configuration -mem_cube = GeneralMemCube(config) + # Mount the initialized memory cube onto the chat system + # This allows MemChat to perform memory retrieval (search) and organization (organize) via mem_cube during the conversation + mem_chat.mem_cube = mem_cube -# TODO: Read memory and prepare data -# Hope to read user docs and save in a file + print("Starting Chat Session...") + try: + mem_chat.run() + except KeyboardInterrupt: + print("\nChat session interrupted.") + finally: + # Ensure memory is persisted to disk before the program exits + # The dump method saves the in-memory memory state to the specified path + print("Saving memory cube...") + mem_chat.mem_cube.dump("new_cube_path") + print("Memory cube saved to 'new_cube_path'.") -# TODO: Organize MemoryCube -# Call Tree.add() -# save in memory cube -# chat and search and organize -mem_chat.mem_cube = mem_cube -mem_chat.run() -mem_chat.mem_cube.dump("new_cube_path") +if __name__ == "__main__": + main() diff --git a/examples/mem_chat/simple_mem_chat.py b/examples/mem_chat/simple_mem_chat.py deleted file mode 100644 index a6b4f5cbe..000000000 --- a/examples/mem_chat/simple_mem_chat.py +++ /dev/null @@ -1,33 +0,0 @@ -from memos.configs.mem_chat import MemChatConfigFactory -from memos.mem_chat.factory import MemChatFactory -from memos.mem_cube.general import GeneralMemCube - - -config = MemChatConfigFactory.model_validate( - { - "backend": "simple", - "config": { - "user_id": "user_123", - "chat_llm": { - "backend": "huggingface", - "config": { - "model_name_or_path": "Qwen/Qwen3-1.7B", - "temperature": 0.1, - "remove_think_prefix": True, - "max_tokens": 4096, - }, - }, - "max_turns_window": 20, - "top_k": 5, - "enable_textual_memory": True, - "enable_activation_memory": False, - "enable_parametric_memory": False, - }, - } -) -mem_chat = MemChatFactory.from_config(config) -mem_chat.mem_cube = GeneralMemCube.init_from_dir("examples/data/mem_cube_2") - -mem_chat.run() - -mem_chat.mem_cube.dump("tmp/mem_cube") diff --git a/examples/mem_chunk/markdown_chunk.py b/examples/mem_chunk/markdown_chunk.py deleted file mode 100644 index ce7d2b9ae..000000000 --- a/examples/mem_chunk/markdown_chunk.py +++ /dev/null @@ -1,33 +0,0 @@ -from memos.chunkers import ChunkerFactory -from memos.configs.chunker import ChunkerConfigFactory - - -config = ChunkerConfigFactory.model_validate( - { - "backend": "markdown", - "config": { - "chunk_size": 1000, - "chunk_overlap": 100, - "recursive": True, - }, - } -) - -chunker = ChunkerFactory.from_config(config) - -text = """ -# Header 1 -This is the first sentence. This is the second sentence. -And here's a third one with some additional context. - -# Header 2 -This is the fourth sentence. This is the fifth sentence. -And here's a sixth one with some additional context. - -# Header 3 -This is the seventh sentence. This is the eighth sentence. -And here's a ninth one with some additional context. -""" -chunks = chunker.chunk(text) -for chunk in chunks: - print("doc:", chunk) diff --git a/examples/mem_cube/_deprecated/README.md b/examples/mem_cube/_deprecated/README.md new file mode 100644 index 000000000..da02eebef --- /dev/null +++ b/examples/mem_cube/_deprecated/README.md @@ -0,0 +1,81 @@ +# Deprecated Examples + +⚠️ **These examples are deprecated and no longer maintained.** + +## Why deprecated? + +These examples demonstrate old APIs that directly access MemCube internals (e.g., `mem_cube.text_mem.get_all()`), which is no longer the recommended approach. + +## Current Best Practice + +**Use `SingleCubeView` / `CompositeCubeView` for all add/search operations.** + +The new View architecture provides: +- ✅ Unified API interface +- ✅ Multi-cube support +- ✅ Better integration with MemOS Server +- ✅ Consistent result format with `cube_id` tracking + +## Updated Examples + +See the following files in the parent directory: +- **`../load_cube.py`** - Load MemCube and operate via SingleCubeView +- **`../dump_cube.py`** - Persist MemCube to disk + +## Migration Guide + +### Old approach (deprecated): +```python +mem_cube = GeneralMemCube.init_from_dir("examples/data/mem_cube_2") +items = mem_cube.text_mem.get_all() # ❌ Direct access +for item in items: + print(item) +``` + +### New approach (recommended): +```python +import json +from memos.api.handlers import init_server +from memos.api.product_models import APISearchRequest +from memos.multi_mem_cube.single_cube import SingleCubeView +from memos.log import get_logger + +logger = get_logger(__name__) + +# Initialize server (uses .env configuration) +components = init_server() +naive = components["naive_mem_cube"] + +# Create View +view = SingleCubeView( + cube_id="my_cube", + naive_mem_cube=naive, + mem_reader=components["mem_reader"], + mem_scheduler=components["mem_scheduler"], + logger=logger, + searcher=components["searcher"], + feedback_server=components["feedback_server"], +) + +# Load data from exported JSON +with open("examples/data/mem_cube_tree/textual_memory.json") as f: + json_data = json.load(f) +naive.text_mem.graph_store.import_graph(json_data, user_name="my_cube") + +# Use View API for search +results = view.search_memories(APISearchRequest( + user_id="user", + readable_cube_ids=["my_cube"], + query="your query here", +)) +for group in results.get("text_mem", []): + for mem in group.get("memories", []): + print(mem.get("metadata", {}).get("memory", "N/A")) +``` + +> **Note on Embeddings**: The sample data uses **bge-m3** model with **1024 dimensions**. +> Ensure your environment uses the same embedding configuration for accurate search. + +--- + +For more information, see the [MemCube documentation](https://memos-doc.memoryos.ai/open_source/modules/mem_cube). diff --git a/examples/mem_cube/load_from_folder.py b/examples/mem_cube/_deprecated/load_from_folder.py similarity index 100% rename from examples/mem_cube/load_from_folder.py rename to examples/mem_cube/_deprecated/load_from_folder.py diff --git a/examples/mem_cube/load_from_remote.py b/examples/mem_cube/_deprecated/load_from_remote.py similarity index 100% rename from examples/mem_cube/load_from_remote.py rename to examples/mem_cube/_deprecated/load_from_remote.py diff --git a/examples/mem_cube/load_lazily.py b/examples/mem_cube/_deprecated/load_lazily.py similarity index 100% rename from examples/mem_cube/load_lazily.py rename to examples/mem_cube/_deprecated/load_lazily.py diff --git a/examples/mem_cube/dump_cube.py b/examples/mem_cube/dump_cube.py new file mode 100644 index 000000000..5a2967ff9 --- /dev/null +++ b/examples/mem_cube/dump_cube.py @@ -0,0 +1,137 @@ +""" +MemCube dump example using SingleCubeView. + +Demonstrates: +1. Initialize server and create SingleCubeView with NEW cube_id +2. Add memories via View +3. Dump ONLY this cube's data to directory + +Requirements: + - MemOS service environment (.env configured) + - Neo4j graph database (set NEO4J_BACKEND=neo4j in .env) + +Note on Embeddings: + This example exports embeddings along with memory data. + The sample data uses: bge-m3 model, 1024 dimensions. + If your environment uses a different embedding model or dimension, + you may need to re-embed the data after import, or the semantic + search results may be inaccurate or fail. +""" + +import contextlib +import json +import os +import shutil + +from memos.api.handlers import init_server +from memos.api.product_models import APIADDRequest +from memos.log import get_logger +from memos.multi_mem_cube.single_cube import SingleCubeView + + +logger = get_logger(__name__) + +# NEW cube_id to avoid dumping existing data +EXAMPLE_CUBE_ID = "example_dump_cube" +EXAMPLE_USER_ID = "example_user" + +# ============================================================================= +# Step 1: Initialize server +# ============================================================================= +print("=" * 60) +print("Step 1: Initialize server") +print("=" * 60) + +components = init_server() +print("✓ Server initialized") + +# ============================================================================= +# Step 2: Create SingleCubeView with NEW cube_id +# ============================================================================= +print("\n" + "=" * 60) +print(f"Step 2: Create SingleCubeView (cube_id={EXAMPLE_CUBE_ID})") +print("=" * 60) + +naive = components["naive_mem_cube"] +view = SingleCubeView( + cube_id=EXAMPLE_CUBE_ID, # NEW cube_id + naive_mem_cube=naive, + mem_reader=components["mem_reader"], + mem_scheduler=components["mem_scheduler"], + logger=logger, + searcher=components["searcher"], + feedback_server=components["feedback_server"], +) +print("✓ SingleCubeView created") + +# ============================================================================= +# Step 3: Add memories via View +# ============================================================================= +print("\n" + "=" * 60) +print("Step 3: Add memories via SingleCubeView") +print("=" * 60) + +result = view.add_memories( + APIADDRequest( + user_id=EXAMPLE_USER_ID, + writable_cube_ids=[EXAMPLE_CUBE_ID], + messages=[ + {"role": "user", "content": "This is a test memory for dump example"}, + {"role": "user", "content": "Another memory to demonstrate persistence"}, + ], + async_mode="sync", + ) +) +print(f"✓ Added {len(result)} memories") + +# ============================================================================= +# Step 4: Dump ONLY this cube's data +# ============================================================================= +print("\n" + "=" * 60) +print("Step 4: Dump cube data (filtered by cube_id)") +print("=" * 60) + +output_dir = "tmp/mem_cube_dump" +if os.path.exists(output_dir): + shutil.rmtree(output_dir) +os.makedirs(output_dir, exist_ok=True) + +# Export only this cube's data using user_name filter +text_mem = naive.text_mem +json_data = text_mem.graph_store.export_graph( + include_embedding=True, # Include embeddings for semantic search + user_name=EXAMPLE_CUBE_ID, # Filter by cube_id +) + +# Fix embedding format: parse string to list for import compatibility +# (export_graph stores embedding as string in metadata, but add_node expects list) +for node in json_data.get("nodes", []): + metadata = node.get("metadata", {}) + if "embedding" in metadata and isinstance(metadata["embedding"], str): + with contextlib.suppress(json.JSONDecodeError): + metadata["embedding"] = json.loads(metadata["embedding"]) + +print(f"✓ Exported {len(json_data.get('nodes', []))} nodes") + +# Save to file +memory_file = os.path.join(output_dir, "textual_memory.json") +with open(memory_file, "w", encoding="utf-8") as f: + json.dump(json_data, f, indent=2, ensure_ascii=False) +print(f"✓ Saved to: {memory_file}") + +# Save config (user can modify sensitive fields before sharing) +config = components["default_cube_config"].model_copy(deep=True) +config.user_id = EXAMPLE_USER_ID +config.cube_id = EXAMPLE_CUBE_ID +config_file = os.path.join(output_dir, "config.json") +config.to_json_file(config_file) +print(f"✓ Config saved to: {config_file}") + +# ============================================================================= +# Done +# ============================================================================= +print("\n" + "=" * 60) +print("✅ Example completed!") +print("=" * 60) +print(f"\nDumped to: {output_dir}") +print("Run load_cube.py to load this data") diff --git a/examples/mem_cube/load_cube.py b/examples/mem_cube/load_cube.py new file mode 100644 index 000000000..c7552a4a2 --- /dev/null +++ b/examples/mem_cube/load_cube.py @@ -0,0 +1,137 @@ +""" +MemCube load example using SingleCubeView. + +Demonstrates: +1. Initialize server and create SingleCubeView +2. Load memories from dump via graph_store.import_graph() +3. Display loaded memories +4. Search loaded memories (semantic search) + +Requirements: + - MemOS service environment (.env configured) + - Neo4j graph database (set NEO4J_BACKEND=neo4j in .env) + +Note on Embeddings: + The sample data (examples/data/mem_cube_tree) uses: bge-m3 model, 1024 dimensions. + For semantic search to work correctly, your environment must use the same + embedding model and dimension. If different, search results may be inaccurate. +""" + +import json +import os + +from memos.api.handlers import init_server +from memos.api.product_models import APISearchRequest +from memos.log import get_logger +from memos.multi_mem_cube.single_cube import SingleCubeView + + +logger = get_logger(__name__) + +EXAMPLE_CUBE_ID = "example_dump_cube" +EXAMPLE_USER_ID = "example_user" + +# ============================================================================= +# Step 1: Initialize server +# ============================================================================= +print("=" * 60) +print("Step 1: Initialize server") +print("=" * 60) + +components = init_server() +print("✓ Server initialized") + +# ============================================================================= +# Step 2: Create SingleCubeView +# ============================================================================= +print("\n" + "=" * 60) +print(f"Step 2: Create SingleCubeView (cube_id={EXAMPLE_CUBE_ID})") +print("=" * 60) + +naive = components["naive_mem_cube"] +view = SingleCubeView( + cube_id=EXAMPLE_CUBE_ID, + naive_mem_cube=naive, + mem_reader=components["mem_reader"], + mem_scheduler=components["mem_scheduler"], + logger=logger, + searcher=components["searcher"], + feedback_server=components["feedback_server"], +) +print("✓ SingleCubeView created") + +# ============================================================================= +# Step 3: Load memories from dump +# ============================================================================= +print("\n" + "=" * 60) +print("Step 3: Load memories from dump") +print("=" * 60) + +load_dir = "examples/data/mem_cube_tree" +memory_file = os.path.join(load_dir, "textual_memory.json") + +if not os.path.exists(memory_file): + print(f"❌ File not found: {memory_file}") + print(" Run dump_cube.py first to create data!") + exit(1) + +with open(memory_file, encoding="utf-8") as f: + json_data = json.load(f) + +# Import graph data into graph_store +text_mem = naive.text_mem +text_mem.graph_store.import_graph(json_data, user_name=EXAMPLE_CUBE_ID) + +nodes = json_data.get("nodes", []) +edges = json_data.get("edges", []) +print(f"✓ Imported {len(nodes)} nodes, {len(edges)} edges") + +# ============================================================================= +# Step 4: Display loaded memories +# ============================================================================= +print("\n" + "=" * 60) +print("Step 4: Display loaded memories") +print("=" * 60) + +print(f"\nLoaded {len(nodes)} memories:") +for i, node in enumerate(nodes, 1): + metadata = node.get("metadata", {}) + memory_text = node.get("memory", "N/A") + mem_type = metadata.get("memory_type", "unknown") + print(f"\n [{i}] Type: {mem_type}") + print(f" Content: {memory_text[:70]}...") + +# ============================================================================= +# Step 5: Search loaded memories +# ============================================================================= +print("\n" + "=" * 60) +print("Step 5: Search loaded memories") +print("=" * 60) + +query = "test memory dump persistence demonstration" +print(f'Query: "{query}"') + +search_result = view.search_memories( + APISearchRequest( + user_id=EXAMPLE_USER_ID, + readable_cube_ids=[EXAMPLE_CUBE_ID], + query=query, + ) +) + +text_mem_results = search_result.get("text_mem", []) +memories = [] +for group in text_mem_results: + memories.extend(group.get("memories", [])) + +print(f"\n✓ Found {len(memories)} relevant memories:") +for i, mem in enumerate(memories[:3], 1): + content = mem.get("metadata", {}).get("memory", "N/A")[:70] + print(f" [{i}] {content}...") + +# ============================================================================= +# Done +# ============================================================================= +print("\n" + "=" * 60) +print("✅ Example completed!") +print("=" * 60) diff --git a/examples/mem_feedback/example_feedback.py b/examples/mem_feedback/example_feedback.py new file mode 100644 index 000000000..8f4446863 --- /dev/null +++ b/examples/mem_feedback/example_feedback.py @@ -0,0 +1,248 @@ +import json +import os +import sys + + +# Add project root to python path to ensure src modules can be imported +sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "../../../src"))) + + +def init_components(): + """ + Initialize MemOS core components. + + This function is responsible for building and configuring all basic components required for MemOS operation, including: + 1. LLM (Large Language Model): Model responsible for natural language understanding and generation (e.g., GPT-4o). + 2. Embedder: Responsible for converting text into vector representations for semantic search and similarity calculation. + 3. GraphDB (Neo4j): Graph database for persistent storage of memory nodes and their relationships. + 4. MemoryManager: Memory manager responsible for memory CRUD operations. + 5. MemReader: Memory reader for parsing and processing input text. + 6. Reranker: Reranker for refining the sorting of retrieval results. + 7. Searcher: Searcher that integrates retrieval and reranking logic. + 8. FeedbackServer (SimpleMemFeedback): Feedback service core, responsible for processing user feedback and updating memory. + + Returns: + tuple: (feedback_server, memory_manager, embedder) + """ + # Lazy import to avoid E402 (module level import not at top of file) + from memos.configs.embedder import EmbedderConfigFactory + from memos.configs.graph_db import GraphDBConfigFactory + from memos.configs.llm import LLMConfigFactory + from memos.configs.mem_reader import MemReaderConfigFactory + from memos.configs.reranker import RerankerConfigFactory + from memos.embedders.factory import EmbedderFactory + from memos.graph_dbs.factory import GraphStoreFactory + from memos.llms.factory import LLMFactory + from memos.mem_feedback.simple_feedback import SimpleMemFeedback + from memos.mem_reader.factory import MemReaderFactory + from memos.memories.textual.tree_text_memory.organize.manager import MemoryManager + from memos.memories.textual.tree_text_memory.retrieve.searcher import Searcher + from memos.reranker.factory import RerankerFactory + + print("Initializing MemOS Components...") + + # 1. LLM: Configure Large Language Model, using OpenAI compatible interface + llm_config = LLMConfigFactory.model_validate( + { + "backend": "openai", + "config": { + "model_name_or_path": os.getenv("MOS_CHAT_MODEL", "gpt-4o"), + "temperature": 0.8, + "max_tokens": 1024, + "top_p": 0.9, + "top_k": 50, + "api_key": os.getenv("OPENAI_API_KEY"), + "api_base": os.getenv("OPENAI_API_BASE"), + }, + } + ) + llm = LLMFactory.from_config(llm_config) + + # 2. Embedder: Configure embedding model for generating text vectors + embedder_config = EmbedderConfigFactory.model_validate( + { + "backend": os.getenv("MOS_EMBEDDER_BACKEND", "universal_api"), + "config": { + "provider": "openai", + "api_key": os.getenv("MOS_EMBEDDER_API_KEY", "EMPTY"), + "model_name_or_path": os.getenv("MOS_EMBEDDER_MODEL", "bge-m3"), + "base_url": os.getenv("MOS_EMBEDDER_API_BASE"), + }, + } + ) + embedder = EmbedderFactory.from_config(embedder_config) + + # 3. GraphDB: Configure Neo4j graph database connection + graph_db = GraphStoreFactory.from_config( + GraphDBConfigFactory.model_validate( + { + "backend": "neo4j", + "config": { + "uri": os.getenv("NEO4J_URI", "neo4j://127.0.0.1:7687"), + "user": os.getenv("NEO4J_USER", "neo4j"), + "password": os.getenv("NEO4J_PASSWORD", "12345678"), + "db_name": os.getenv("NEO4J_DB_NAME", "neo4j"), + "user_name": "zhs", + "auto_create": True, + "use_multi_db": False, + "embedding_dimension": int(os.getenv("EMBEDDING_DIMENSION", "1024")), + }, + } + ) + ) + + # Clear test data for specific user to ensure a clean environment for each run + graph_db.clear(user_name="cube_id_001_0115") + + # 4. MemoryManager: Core memory management, coordinating storage and retrieval + memory_manager = MemoryManager(graph_db, embedder, llm, is_reorganize=False) + + # 5. MemReader: Configure memory reader, including chunking strategy + mem_reader = MemReaderFactory.from_config( + MemReaderConfigFactory.model_validate( + { + "backend": "simple_struct", + "config": { + "llm": llm_config.model_dump(), + "embedder": embedder_config.model_dump(), + "chunker": { + "backend": "sentence", + "config": { + "tokenizer_or_token_counter": "gpt2", + "chunk_size": 512, + "chunk_overlap": 128, + "min_sentences_per_chunk": 1, + }, + }, + }, + } + ) + ) + + # 6. Reranker: Configure reranker to improve retrieval relevance + mem_reranker = RerankerFactory.from_config( + RerankerConfigFactory.model_validate( + { + "backend": os.getenv("MOS_RERANKER_BACKEND", "cosine_local"), + "config": { + "level_weights": {"topic": 1.0, "concept": 1.0, "fact": 1.0}, + "level_field": "background", + }, + } + ) + ) + + # 7. Searcher: Comprehensive searcher + searcher = Searcher(llm, graph_db, embedder, mem_reranker) + + # 8. Feedback Server: Initialize feedback service, the core of this example + feedback_server = SimpleMemFeedback( + llm=llm, + embedder=embedder, + graph_store=graph_db, + memory_manager=memory_manager, + mem_reader=mem_reader, + searcher=searcher, + reranker=mem_reranker, + pref_mem=None, + ) + + return feedback_server, memory_manager, embedder + + +def main(): + """ + Main program flow: + 1. Initialize components. + 2. Simulate a conversation scenario and existing (possibly incorrect) memory. + 3. Receive user feedback (correct memory). + 4. Process feedback and update memory store. + 5. Display processing results. + """ + # Load dotenv in main to avoid affecting module import order + from dotenv import load_dotenv + + load_dotenv() + + # Lazy import to avoid E402 + from memos.mem_feedback.utils import make_mem_item + + feedback_server, memory_manager, embedder = init_components() + print("-" * 50) + print("Initialization Done. Processing Feedback...") + print("-" * 50) + + # 1. Simulate Chat History + # Simulate a conversation between user and assistant, where the assistant's response contains a statement about user preferences. + history = [ + {"role": "user", "content": "我喜欢什么水果,不喜欢什么水果"}, + {"role": "assistant", "content": "你喜欢苹果,不喜欢香蕉"}, + ] + + # 2. Simulate Initial Memory + # We manually add a memory to the database, representing what the system currently believes to be a "fact". + # This memory content is "你喜欢苹果,不喜欢香蕉", which we will later correct via feedback. + mem_text = "你喜欢苹果,不喜欢香蕉" + memory_manager.add( + [ + make_mem_item( + mem_text, + user_id="user_id_001", + user_name="cube_id_001_0115", + session_id="session_id", + tags=["fact"], + key="food_preference", + sources=[{"type": "chat"}], + background="init from chat history", + embedding=embedder.embed([mem_text])[ + 0 + ], # Generate embedding for subsequent retrieval + info={ + "user_id": "user_id_001", + "user_name": "cube_id_001_0115", + "session_id": "session_id", + }, + ) + ], + user_name="cube_id_001_0115", + mode="sync", + ) + + # 3. Feedback Input + # The user points out the previous memory is incorrect and provides the correct information. + feedback_content = "错了,实际上我喜欢的是山竹" + + print("\nChat History:") + print(json.dumps(history, ensure_ascii=False, indent=2)) + print("\nFeedback Input:") + print(feedback_content) + + # 4. Process Feedback + # Core step: Call feedback_server to process user correction information. + # The system analyzes feedback content, retrieves relevant memories, and generates update operations (e.g., add, modify, or archive old memories). + res = feedback_server.process_feedback( + user_id="user_id_001", + user_name="cube_id_001_0115", + session_id="session_id", + chat_history=history, + feedback_content=feedback_content, + feedback_time="", + async_mode="sync", + corrected_answer="", + task_id="task_id", + info={}, + ) + + # 5. Feedback Result + print("\n" + "=" * 50) + print("Feedback Result") + print("=" * 50) + + """ + Print feedback processing results, including added or updated memory operations (add/update) + """ + print(json.dumps(res, ensure_ascii=False, indent=4, default=str)) + + +if __name__ == "__main__": + main() diff --git a/examples/mem_mcp/simple_fastmcp_client.py b/examples/mem_mcp/simple_fastmcp_client.py index 1981e3066..e5c186b3b 100644 --- a/examples/mem_mcp/simple_fastmcp_client.py +++ b/examples/mem_mcp/simple_fastmcp_client.py @@ -12,66 +12,43 @@ async def main(): print("Working FastMCP Client") print("=" * 40) - async with Client("http://127.0.0.1:8000/mcp") as client: - print("Connected to MOS MCP server!") + # Connect to MCP server via HTTP + # FastMCP HTTP endpoint is at /mcp (not /mcp/v1) + async with Client("http://localhost:8002/mcp") as client: + print("Connected to MCP server") - print("Available tools:") - tools = await client.list_tools() - for tool in tools: - print("**" * 20) - print(f" - {tool.name}: {tool.description}") + print("\nTesting tool calls via Server API...") - print("Available resources:") - resources = await client.list_resources() - for resource in resources: - print(f" - {resource.uri}: {resource.description}") + # Note: 'create_user' and 'get_user_info' are not supported by the Server API. + # We assume the user already exists or the Server API handles it implicitly. + # Using a demo user ID. + user_id = "fastmcp_demo_user" - print("Testing tool calls...") - - print(" Getting user info...") - result = await client.call_tool("get_user_info", {}) - print(f" Result: {result.content[0].text}") - - print(" Creating user...") - result = await client.call_tool( - "create_user", - {"user_id": "fastmcp_user", "role": "USER", "user_name": "FastMCP Test User"}, - ) - print(f"Result: {result.content[0].text}") - - print(" register cube...") + print("\n 1. Adding memory...") result = await client.call_tool( - "register_cube", - { - "cube_name_or_path": "cube_default_user", - "user_id": "fastmcp_user", - "cube_id": "fastmcp_user", + "add_memory", + arguments={ + "memory_content": "MemOS is a great tool for memory management.", + "user_id": user_id, }, ) print(f" Result: {result}") - print(" Adding memory...") + print("\n 2. Searching memories...") result = await client.call_tool( - "add_memory", - { - "memory_content": "This is a test memory from FastMCP client.", - "cube_id": "fastmcp_user", - "user_id": "fastmcp_user", - }, + "search_memories", + arguments={"query": "MemOS", "user_id": user_id}, ) - print(f" Result: {result.content[0].text}") + print(f" Result: {result}") - print(" Searching memories...") + print("\n 3. Chatting...") result = await client.call_tool( - "search_memories", {"query": "test memory", "user_id": "fastmcp_user"} + "chat", + arguments={"query": "What is MemOS?", "user_id": user_id}, ) - print(f" Result: {result.content[0].text[:200]}...") + print(f" Result: {result}") - print(" Testing chat...") - result = await client.call_tool( - "chat", {"query": "Hello! Tell me about yourself.", "user_id": "fastmcp_user"} - ) - print(f" Result: {result.content[0].text[:200]}...") + print("\n✓ All tests completed!") if __name__ == "__main__": diff --git a/examples/mem_mcp/simple_fastmcp_serve.py b/examples/mem_mcp/simple_fastmcp_serve.py index 78c05cd05..55ad4d84d 100644 --- a/examples/mem_mcp/simple_fastmcp_serve.py +++ b/examples/mem_mcp/simple_fastmcp_serve.py @@ -1,18 +1,67 @@ import argparse +import json import os -from memos.api.mcp_serve import MOSMCPStdioServer +import requests +from dotenv import load_dotenv +from fastmcp import FastMCP + + +load_dotenv() + +# Configuration +# This points to the Server API base URL (e.g., started via server_api.py) +API_BASE_URL = os.getenv("MEMOS_API_BASE_URL", "http://localhost:8001/product") + +# Create MCP Server +mcp = FastMCP("MemOS MCP via Server API") -if __name__ == "__main__": - import argparse - from dotenv import load_dotenv +@mcp.tool() +def add_memory(memory_content: str, user_id: str, cube_id: str | None = None): + """Add memory using the Server API.""" + payload = { + "user_id": user_id, + "messages": memory_content, + "writable_cube_ids": [cube_id] if cube_id else None, + } + try: + resp = requests.post(f"{API_BASE_URL}/add", json=payload) + resp.raise_for_status() + return resp.json()["message"] + except Exception as e: + return f"Error: {e}" - load_dotenv() +@mcp.tool() +def search_memories(query: str, user_id: str, cube_ids: str | None = None): + """Search memories using the Server API.""" + payload = {"query": query, "user_id": user_id, "readable_cube_ids": cube_ids} + try: + resp = requests.post(f"{API_BASE_URL}/search", json=payload) + resp.raise_for_status() + # The Server API search response structure matches product API mostly + return json.dumps(resp.json()["data"], ensure_ascii=False) + except Exception as e: + return f"Error: {e}" + + +@mcp.tool() +def chat(query: str, user_id: str): + """Chat using the Server API.""" + payload = {"query": query, "user_id": user_id} + try: + resp = requests.post(f"{API_BASE_URL}/chat/complete", json=payload) + resp.raise_for_status() + return resp.json()["data"]["response"] + except Exception as e: + return f"Error: {e}" + + +if __name__ == "__main__": # Parse command line arguments - parser = argparse.ArgumentParser(description="MOS MCP Server") + parser = argparse.ArgumentParser(description="MOS MCP Server via API") parser.add_argument( "--transport", choices=["stdio", "http", "sse"], @@ -24,14 +73,8 @@ args = parser.parse_args() - # Set environment variables - os.environ["OPENAI_API_BASE"] = os.getenv("OPENAI_API_BASE") - os.environ["OPENAI_API_KEY"] = os.getenv("OPENAI_API_KEY") - os.environ["MOS_TEXT_MEM_TYPE"] = "tree_text" # "tree_text" need set neo4j - os.environ["NEO4J_URI"] = os.getenv("NEO4J_URI") - os.environ["NEO4J_USER"] = os.getenv("NEO4J_USER") - os.environ["NEO4J_PASSWORD"] = os.getenv("NEO4J_PASSWORD") - - # Create and run MCP server - server = MOSMCPStdioServer() - server.run(transport=args.transport, host=args.host, port=args.port) + # For stdio transport, don't pass host and port + if args.transport == "stdio": + mcp.run(transport=args.transport) + else: + mcp.run(transport=args.transport, host=args.host, port=args.port) diff --git a/examples/mem_os/chat_w_generated_cube_explicit_memory.py b/examples/mem_os/chat_w_generated_cube_explicit_memory.py deleted file mode 100644 index d19a68486..000000000 --- a/examples/mem_os/chat_w_generated_cube_explicit_memory.py +++ /dev/null @@ -1,180 +0,0 @@ -from memos.configs.mem_cube import GeneralMemCubeConfig -from memos.configs.mem_os import MOSConfig -from memos.mem_cube.general import GeneralMemCube -from memos.mem_os.main import MOS - - -# 1. Create Mos Config -config = { - "user_id": "user03alice", - "chat_model": { - "backend": "huggingface", - "config": { - "model_name_or_path": "Qwen/Qwen3-1.7B", - "temperature": 0.1, - "remove_think_prefix": True, - "max_tokens": 4096, - }, - }, - "mem_reader": { - "backend": "simple_struct", - "config": { - "llm": { - "backend": "ollama", - "config": { - "model_name_or_path": "qwen3:0.6b", - "temperature": 0.0, - "remove_think_prefix": True, - "max_tokens": 8192, - }, - }, - "embedder": { - "backend": "ollama", - "config": { - "model_name_or_path": "nomic-embed-text:latest", - }, - }, - "chunker": { - "backend": "sentence", - "config": { - "tokenizer_or_token_counter": "gpt2", - "chunk_size": 512, - "chunk_overlap": 128, - "min_sentences_per_chunk": 1, - }, - }, - }, - }, - "max_turns_window": 20, - "top_k": 5, - "enable_textual_memory": True, - "enable_activation_memory": False, - "enable_parametric_memory": False, -} - -mos_config = MOSConfig(**config) -mos = MOS(mos_config) - -# 2. Initialize_memory_cube -config = GeneralMemCubeConfig.model_validate( - { - "user_id": "user03alice", - "cube_id": "user03alice/mem_cube_tree", - "text_mem": { - "backend": "tree_text", - "config": { - "extractor_llm": { - "backend": "ollama", - "config": { - "model_name_or_path": "qwen3:1.7b", - "temperature": 0.0, - "remove_think_prefix": True, - "max_tokens": 8192, - }, - }, - "dispatcher_llm": { - "backend": "ollama", - "config": { - "model_name_or_path": "qwen3:1.7b", - "temperature": 0.0, - "remove_think_prefix": True, - "max_tokens": 8192, - }, - }, - "graph_db": { - "backend": "neo4j", - "config": { - "uri": "bolt://localhost:7687", - "user": "neo4j", - "password": "12345678", - "db_name": "user03alice11", - "auto_create": True, - }, - }, - "embedder": { - "backend": "ollama", - "config": { - "model_name_or_path": "nomic-embed-text:latest", - }, - }, - }, - }, - "act_mem": { - "backend": "kv_cache", - "config": { - "memory_filename": "activation_memory.pickle", - "extractor_llm": { - "backend": "huggingface", - "config": { - "model_name_or_path": "Qwen/Qwen3-1.7B", - "temperature": 0.8, - "max_tokens": 1024, - "top_p": 0.9, - "top_k": 50, - "add_generation_prompt": True, - "remove_think_prefix": False, - }, - }, - }, - }, - "para_mem": { - "backend": "lora", - "config": { - "memory_filename": "parametric_memory.adapter", - "extractor_llm": { - "backend": "huggingface", - "config": { - "model_name_or_path": "Qwen/Qwen3-1.7B", - "temperature": 0.8, - "max_tokens": 1024, - "top_p": 0.9, - "top_k": 50, - "add_generation_prompt": True, - "remove_think_prefix": False, - }, - }, - }, - }, - }, -) - -# 3. Initialize the MemCube with the configuration -mem_cube = GeneralMemCube(config) -try: - mem_cube.dump("/tmp/user03alice/mem_cube_5") -except Exception as e: - print(e) - -# 4. Register the MemCube explicitly -mos.register_mem_cube("/tmp/user03alice/mem_cube_5", "user03alice") - -# 5. add, get, search memory -mos.add(memory_content="I like playing football.") - -get_all_results = mos.get_all() -print(f"Get all results after add memory: {get_all_results}") - -# 6. add mesaages -messages = [ - {"role": "user", "content": "I like playing football."}, - {"role": "assistant", "content": "yes football is my favorite game."}, -] -mos.add(messages) -get_all_results = mos.get_all() -print(f"Get all results after add mesaages: {get_all_results}") - -# 6. add doc -mos.add(doc_path="./examples/data") -get_all_results = mos.get_all() -print(f"Get all results after add doc: {get_all_results}") - -search_results = mos.search(query="my favorite football game") -print(f"Search results: {search_results}") - -# .chat -while True: - user_input = input("👤 [You] ").strip() - print() - response = mos.chat(user_input) - print(f"🤖 [Assistant] {response}\n") -print("📢 [System] MemChat has stopped.") diff --git a/examples/mem_os/chat_w_scheduler.py b/examples/mem_os/chat_w_scheduler.py deleted file mode 100644 index 28c4c31a9..000000000 --- a/examples/mem_os/chat_w_scheduler.py +++ /dev/null @@ -1,74 +0,0 @@ -import shutil -import sys - -from pathlib import Path - -from memos.configs.mem_cube import GeneralMemCubeConfig -from memos.configs.mem_os import MOSConfig -from memos.configs.mem_scheduler import AuthConfig -from memos.mem_cube.general import GeneralMemCube -from memos.mem_os.main import MOS - - -FILE_PATH = Path(__file__).absolute() -BASE_DIR = FILE_PATH.parent.parent.parent -sys.path.insert(0, str(BASE_DIR)) # Enable execution from any working directory - - -# set configs -mos_config = MOSConfig.from_yaml_file( - f"{BASE_DIR}/examples/data/config/mem_scheduler/memos_config_w_scheduler.yaml" -) - -mem_cube_config = GeneralMemCubeConfig.from_yaml_file( - f"{BASE_DIR}/examples/data/config/mem_scheduler/mem_cube_config_neo4j.yaml" -) - -# default local graphdb uri -if AuthConfig.default_config_exists(): - auth_config = AuthConfig.from_local_config() - - mos_config.mem_reader.config.llm.config.api_key = auth_config.openai.api_key - mos_config.mem_reader.config.llm.config.api_base = auth_config.openai.base_url - - mem_cube_config.text_mem.config.graph_db.config.uri = auth_config.graph_db.uri - mem_cube_config.text_mem.config.graph_db.config.user = auth_config.graph_db.user - mem_cube_config.text_mem.config.graph_db.config.password = auth_config.graph_db.password - mem_cube_config.text_mem.config.graph_db.config.db_name = auth_config.graph_db.db_name - mem_cube_config.text_mem.config.graph_db.config.auto_create = auth_config.graph_db.auto_create - -# Initialization -mos = MOS(mos_config) - -user_id = "user_1" -mos.create_user(user_id) - -mem_cube_id = "mem_cube_5" -mem_cube_name_or_path = f"{BASE_DIR}/outputs/mem_scheduler/{user_id}/{mem_cube_id}" - -if Path(mem_cube_name_or_path).exists(): - shutil.rmtree(mem_cube_name_or_path) - print(f"{mem_cube_name_or_path} is not empty, and has been removed.") - -mem_cube = GeneralMemCube(mem_cube_config) -mem_cube.dump(mem_cube_name_or_path) -mos.register_mem_cube( - mem_cube_name_or_path=mem_cube_name_or_path, mem_cube_id=mem_cube_id, user_id=user_id -) - -messages = [ - {"role": "user", "content": "I like playing football."}, - {"role": "assistant", "content": "I like playing football too."}, -] -mos.add(messages, user_id=user_id, mem_cube_id=mem_cube_id) - - -while True: - user_input = input("👤 [You] ").strip() - print() - response = mos.chat(user_input, user_id=user_id) - retrieved_memories = mos.get_all(mem_cube_id=mem_cube_id, user_id=user_id) - print(f"🤖 [Assistant] {response}\n") - for node in retrieved_memories["text_mem"][0]["memories"]["nodes"]: - if node["metadata"]["memory_type"] == "WorkingMemory": - print(f"🤖 [Assistant]working mem : {node['memory']}\n") diff --git a/examples/mem_os/cot_usage_example.py b/examples/mem_os/cot_usage_example.py deleted file mode 100644 index e3d10fa75..000000000 --- a/examples/mem_os/cot_usage_example.py +++ /dev/null @@ -1,342 +0,0 @@ -#!/usr/bin/env python3 -""" -MemOS CoT (Chain of Thought) Usage Example -This example demonstrates how to use CoT functionality with tree textual memory. -It shows how to: -1. Decompose complex questions into sub-questions -2. Get answers for sub-questions using tree_textual_memory -3. Use JSON configuration files with environment variable overrides -""" - -import json -import os - -# Load environment variables -from dotenv import load_dotenv - -from memos.configs.llm import LLMConfigFactory -from memos.configs.mem_reader import SimpleStructMemReaderConfig -from memos.configs.memory import TreeTextMemoryConfig -from memos.mem_os.main import MOS -from memos.mem_reader.simple_struct import SimpleStructMemReader -from memos.memories.textual.tree import TreeTextMemory - - -load_dotenv() - - -def load_and_modify_config(config_path: str) -> dict: - """Load JSON config and modify it with environment variables.""" - with open(config_path) as f: - config = json.load(f) - - # Get environment variables - openai_api_key = os.getenv("OPENAI_API_KEY") - openai_base_url = os.getenv("OPENAI_API_BASE", "https://api.openai.com/v1") - - # Modify config to use ollama for embedder and gpt-4o-mini for LLMs - if "embedder" in config: - config["embedder"] = { - "backend": "ollama", - "config": {"model_name_or_path": "nomic-embed-text:latest"}, - } - - # Modify LLM configs to use gpt-4o-mini - if "llm" in config: - config["llm"] = { - "backend": "openai", - "config": { - "model_name_or_path": "gpt-4o-mini", - "api_key": openai_api_key, - "api_base": openai_base_url, - "temperature": 0.5, - "remove_think_prefix": True, - "max_tokens": 8192, - }, - } - - if "extractor_llm" in config: - config["extractor_llm"] = { - "backend": "openai", - "config": { - "model_name_or_path": "gpt-4o-mini", - "api_key": openai_api_key, - "api_base": openai_base_url, - "temperature": 0.5, - "remove_think_prefix": True, - "max_tokens": 8192, - }, - } - - if "dispatcher_llm" in config: - config["dispatcher_llm"] = { - "backend": "openai", - "config": { - "model_name_or_path": "gpt-4o-mini", - "api_key": openai_api_key, - "api_base": openai_base_url, - "temperature": 0.5, - "remove_think_prefix": True, - "max_tokens": 8192, - }, - } - - # Modify graph_db config if present - if "graph_db" in config: - neo4j_uri = os.getenv("NEO4J_URI", "bolt://localhost:7687") - neo4j_user = os.getenv("NEO4J_USER", "neo4j") - neo4j_password = os.getenv("NEO4J_PASSWORD", "12345678") - - config["graph_db"] = { - "backend": "neo4j", - "config": { - "uri": neo4j_uri, - "user": neo4j_user, - "password": neo4j_password, - "db_name": "testlcy", - "auto_create": True, - "embedding_dimension": 768, - }, - } - - return config - - -def setup_llm_config(): - """Setup LLM configuration for CoT operations.""" - # Get environment variables - openai_api_key = os.getenv("OPENAI_API_KEY") - openai_base_url = os.getenv("OPENAI_API_BASE", "https://api.openai.com/v1") - # Use ollama with gpt-4o-mini model - return LLMConfigFactory( - backend="openai", - config={ - "model_name_or_path": "gpt-4o-mini", - "api_key": openai_api_key, - "api_base": openai_base_url, - "temperature": 0.5, - "remove_think_prefix": True, - "max_tokens": 8192, - }, - ) - - -def create_tree_memory(): - """Create a tree textual memory with sample data.""" - print("Creating tree textual memory...") - - # Load and modify configurations - tree_config_dict = load_and_modify_config("examples/data/config/tree_config.json") - reader_config_dict = load_and_modify_config( - "examples/data/config/simple_struct_reader_config.json" - ) - - # Create config objects - tree_config = TreeTextMemoryConfig.model_validate(tree_config_dict) - reader_config = SimpleStructMemReaderConfig.model_validate(reader_config_dict) - - # Create tree memory - tree_memory = TreeTextMemory(tree_config) - tree_memory.delete_all() # Clear existing data - - # Create memory reader - reader = SimpleStructMemReader(reader_config) - - # Sample conversation data - sample_conversations = [ - [ - {"role": "user", "content": "Tell me about China and its capital."}, - { - "role": "assistant", - "content": "China is a country in East Asia. Beijing is its capital city.", - }, - {"role": "user", "content": "Who is Lang Ping?"}, - { - "role": "assistant", - "content": "Lang Ping is a famous Chinese volleyball coach and former player.", - }, - {"role": "user", "content": "What about Madagascar?"}, - { - "role": "assistant", - "content": "Madagascar is an island country in the Indian Ocean. It's known for its unique wildlife.", - }, - {"role": "user", "content": "Tell me about trade between China and Madagascar."}, - { - "role": "assistant", - "content": "China and Madagascar have developed trade relations, particularly in agriculture and mining.", - }, - {"role": "user", "content": "What about the essential oil industry in Madagascar?"}, - { - "role": "assistant", - "content": "The essential oil industry is growing in Madagascar, especially on Nosy Be Island where vanilla and ylang-ylang are produced.", - }, - ] - ] - - # Acquire memories using the reader - memories = reader.get_memory( - sample_conversations, type="chat", info={"user_id": "cot_user", "session_id": "cot_session"} - ) - - # Add memories to tree structure - for memory_list in memories: - tree_memory.add(memory_list) - - print("✓ Added sample conversations to tree memory") - return tree_memory - - -def cot_decompose(): - """Test the cot_decompose functionality.""" - print("\n=== Testing CoT Decomposition ===") - - # Setup LLM config - llm_config = setup_llm_config() - - # Test questions - test_questions = [ - "Who is the current head coach of the gymnastics team in the capital of the country that Lang Ping represents?", - "What is the weather like today?", - "How did the trade relationship between Madagascar and China develop, and how does this relationship affect the market expansion of the essential oil industry on Nosy Be Island?", - ] - - for i, question in enumerate(test_questions, 1): - print(f"\nTest {i}: {question}") - result = MOS.cot_decompose(question, llm_config) - print(f"✓ Decomposition result: {result}") - - if result.get("is_complex", False): - sub_questions = result.get("sub_questions", []) - print(f"✓ Found {len(sub_questions)} sub-questions:") - for j, sub_q in enumerate(sub_questions, 1): - print(f" {j}. {sub_q}") - else: - print("✓ Question is not complex, no decomposition needed.") - - return llm_config - - -def get_sub_answers_with_tree_memory(): - """Test get_sub_answers with tree textual memory.""" - print("\n=== Testing get_sub_answers with Tree Textual Memory ===") - - # Setup - llm_config = setup_llm_config() - tree_memory = create_tree_memory() - - # Test sub-questions - sub_questions = [ - "Which country does Lang Ping represent in volleyball?", - "What is the capital of this country?", - "Who is the current head coach of the gymnastics team in this capital?", - ] - - print("Sub-questions to answer:") - for i, q in enumerate(sub_questions, 1): - print(f" {i}. {q}") - print("\nGenerating answers using tree memory and LLM...") - sub_questions, sub_answers = MOS.get_sub_answers( - sub_questions=sub_questions, search_engine=tree_memory, llm_config=llm_config, top_k=3 - ) - - print("✓ Generated answers:") - for i, (question, answer) in enumerate(zip(sub_questions, sub_answers, strict=False), 1): - print(f"\n Sub-question {i}: {question}") - print(f" Answer: {answer}") - - -def complete_cot_workflow(): - """Test the complete CoT workflow from decomposition to final synthesis.""" - print("\n=== Testing Complete CoT Workflow ===") - - # Setup - llm_config = setup_llm_config() - tree_memory = create_tree_memory() - - # Complex question - complex_question = "How did the trade relationship between Madagascar and China develop, and how does this relationship affect the market expansion of the essential oil industry on Nosy Be Island?" - - print(f"Original question: {complex_question}") - - try: - # Step 1: Decompose the question - print("\n1. Decomposing question...") - decomposition_result = MOS.cot_decompose(complex_question, llm_config) - print(f"✓ Decomposition result: {decomposition_result}") - - if not decomposition_result.get("is_complex", False): - print("Question is not complex, no decomposition needed.") - return - - sub_questions = decomposition_result.get("sub_questions", []) - print(f"✓ Found {len(sub_questions)} sub-questions:") - for i, q in enumerate(sub_questions, 1): - print(f" {i}. {q}") - - # Step 2: Get answers for sub-questions - print("\n2. Getting answers for sub-questions...") - sub_questions, sub_answers = MOS.get_sub_answers( - sub_questions=sub_questions, search_engine=tree_memory, llm_config=llm_config, top_k=3 - ) - - print("✓ Generated answers:") - for i, (question, answer) in enumerate(zip(sub_questions, sub_answers, strict=False), 1): - print(f"\n Sub-question {i}: {question}") - print(f" Answer: {answer}") - - # Step 3: Generate final synthesis - print("\n3. Generating final synthesis...") - # Build the sub-questions and answers text - qa_text = "" - for i, (question, answer) in enumerate(zip(sub_questions, sub_answers, strict=False), 1): - qa_text += f"Q{i}: {question}\nA{i}: {answer}\n\n" - - synthesis_prompt = f"""You are an expert at synthesizing information from multiple sources to provide comprehensive answers. - -Sub-questions and their answers: -{qa_text} -Please synthesize these answers into a comprehensive response that: -1. Addresses the original question completely -2. Integrates information from all sub-questions -3. Provides clear reasoning and connections -4. Is well-structured and easy to understand - -Original question: {complex_question} - -Your response:""" - - # Generate final answer - from memos.llms.factory import LLMFactory - - llm = LLMFactory.from_config(llm_config) - messages = [ - { - "role": "system", - "content": "You are a helpful assistant that synthesizes information from multiple sources.", - }, - {"role": "user", "content": synthesis_prompt}, - ] - - final_answer = llm.generate(messages) - print(f"\n✓ Final synthesized answer:\n{final_answer}") - - except Exception as e: - print(f"✗ Error in complete workflow: {e}") - - -def main(): - """Main function to run the CoT example.""" - print("MemOS CoT (Chain of Thought) Usage Example") - print("=" * 60) - - # Run the examples - cot_decompose() - get_sub_answers_with_tree_memory() - complete_cot_workflow() - - print("\n" + "=" * 60) - print("✓ All examples completed successfully!") - - -if __name__ == "__main__": - main() diff --git a/examples/mem_os/easy_memos.py b/examples/mem_os/easy_memos.py deleted file mode 100644 index a2e8014ed..000000000 --- a/examples/mem_os/easy_memos.py +++ /dev/null @@ -1,21 +0,0 @@ -#!/usr/bin/env python3 -""" -Simple test script for MOS.simple() functionality. -""" - -import os - -from memos.mem_os.main import MOS - - -# Set environment variables for testing -os.environ["OPENAI_API_BASE"] = "http://xxxxxxxxx" -os.environ["OPENAI_API_KEY"] = "sk-xxxxxxxxxx" -os.environ["MOS_TEXT_MEM_TYPE"] = "general_text" # "tree_text" need set neo4j - - -memory = MOS.simple() -print("MOS.simple() works!") -memory.add(memory_content="my favorite color is blue") -print(memory.chat("what is my favorite color?")) -# Your favorite color is blue! diff --git a/examples/mem_os/locomo_shared_database_memos.py b/examples/mem_os/locomo_shared_database_memos.py deleted file mode 100644 index 97efe1fe6..000000000 --- a/examples/mem_os/locomo_shared_database_memos.py +++ /dev/null @@ -1,203 +0,0 @@ -import json -import os - -from dotenv import load_dotenv - -from memos import log -from memos.configs.mem_cube import GeneralMemCubeConfig -from memos.configs.mem_os import MOSConfig -from memos.mem_cube.general import GeneralMemCube -from memos.mem_os.product import MOSProduct - - -load_dotenv() - - -logger = log.get_logger(__name__) - - -# === Load conversation === -with open("evaluation/data/locomo/locomo10.json", encoding="utf-8") as f: - conversation = json.load(f) - data = conversation[3] - speaker_a = data["conversation"]["speaker_a"] - speaker_b = data["conversation"]["speaker_b"] - conversation_i = data["conversation"] - -db_name = "shared-db-locomo-case" - -openapi_config = { - "model_name_or_path": "gpt-4o-mini", - "temperature": 0.8, - "max_tokens": 1024, - "api_key": "your-api-key-here", - "api_base": "https://api.openai.com/v1", -} - - -# === Create MOS Config === -def get_user_configs(user_name): - mos_config = MOSConfig( - user_id=user_name, - chat_model={"backend": "openai", "config": openapi_config}, - mem_reader={ - "backend": "simple_struct", - "config": { - "llm": {"backend": "openai", "config": openapi_config}, - "embedder": { - "backend": "universal_api", - "config": { - "provider": "openai", - "api_key": openapi_config["api_key"], - "model_name_or_path": "text-embedding-3-large", - "base_url": openapi_config["api_base"], - }, - }, - "chunker": { - "backend": "sentence", - "config": { - "tokenizer_or_token_counter": "gpt2", - "chunk_size": 512, - "chunk_overlap": 128, - "min_sentences_per_chunk": 1, - }, - }, - }, - }, - enable_textual_memory=True, - enable_activation_memory=False, - enable_parametric_memory=False, - top_k=5, - max_turns_window=20, - ) - - return mos_config - - -# === Get Memory Cube Config === -def get_mem_cube_config(user_name): - neo4j_uri = os.getenv("NEO4J_URI", "bolt://localhost:7687") - neo4j_config = { - "uri": neo4j_uri, - "user": "neo4j", - "password": "12345678", - "db_name": db_name, - "user_name": "will be updated", - "use_multi_db": False, - "embedding_dimension": 3072, - "auto_create": True, - } - cube_config = GeneralMemCubeConfig.model_validate( - { - "user_id": user_name, - "cube_id": f"{user_name}_cube", - "text_mem": { - "backend": "tree_text", - "config": { - "extractor_llm": {"backend": "openai", "config": openapi_config}, - "dispatcher_llm": {"backend": "openai", "config": openapi_config}, - "graph_db": {"backend": "neo4j", "config": neo4j_config}, - "embedder": { - "backend": "universal_api", - "config": { - "provider": "openai", - "api_key": openapi_config["api_key"], - "model_name_or_path": "text-embedding-3-large", - "base_url": openapi_config["api_base"], - }, - }, - "reorganize": True, - }, - }, - } - ) - - mem_cube = GeneralMemCube(cube_config) - return mem_cube - - -# === Initialize MOSProduct === -root_config = get_user_configs(user_name="system") -mos_product = MOSProduct(default_config=root_config) - - -# === Register both users === -users = {} -for speaker in [speaker_a, speaker_b]: - user_id = speaker.lower() + "_test" - config = get_user_configs(user_id) - mem_cube = get_mem_cube_config(user_id) - result = mos_product.user_register( - user_id=user_id, - user_name=speaker, - interests=f"I'm {speaker}", - default_mem_cube=mem_cube, - ) - users[speaker] = {"user_id": user_id, "default_cube_id": result["default_cube_id"]} - print(f"✅ Registered: {speaker} -> {result}") - -# === Process conversation, add to both roles === -i = 1 -MAX_CONVERSATION_FOR_TEST = 3 -while ( - f"session_{i}_date_time" in conversation_i and f"session_{i}" in conversation_i -) and i < MAX_CONVERSATION_FOR_TEST: - session_i = conversation_i[f"session_{i}"] - session_time = conversation_i[f"session_{i}_date_time"] - - print(f"\n=== Processing Session {i} | Time: {session_time} ===") - - role1_msgs, role2_msgs = [], [] - - for m in session_i: - if m["speaker"] == speaker_a: - role1_msgs.append( - { - "role": "user", - "content": f"{m['speaker']}:{m['text']}", - "chat_time": session_time, - } - ) - role2_msgs.append( - { - "role": "assistant", - "content": f"{m['speaker']}:{m['text']}", - "chat_time": session_time, - } - ) - elif m["speaker"] == speaker_b: - role1_msgs.append( - { - "role": "assistant", - "content": f"{m['speaker']}:{m['text']}", - "chat_time": session_time, - } - ) - role2_msgs.append( - { - "role": "user", - "content": f"{m['speaker']}:{m['text']}", - "chat_time": session_time, - } - ) - - print(f"\n[Session {i}] {speaker_a} will add {len(role1_msgs)} messages.") - print(f"[Session {i}] {speaker_b} will add {len(role2_msgs)} messages.") - - mos_product.add( - user_id=users[speaker_a]["user_id"], - messages=role1_msgs, - mem_cube_id=users[speaker_a]["default_cube_id"], - ) - mos_product.add( - user_id=users[speaker_b]["user_id"], - messages=role2_msgs, - mem_cube_id=users[speaker_b]["default_cube_id"], - ) - - print(f"[Session {i}] Added messages for both roles") - - i += 1 - -print("\n✅ All messages added for both roles.\n") -mos_product.mem_reorganizer_off() diff --git a/examples/mem_os/multi_user_memos_example.py b/examples/mem_os/multi_user_memos_example.py deleted file mode 100644 index ac7e68610..000000000 --- a/examples/mem_os/multi_user_memos_example.py +++ /dev/null @@ -1,162 +0,0 @@ -""" -Example demonstrating how to use MOSProduct for multi-user scenarios. -""" - -import os - -from memos.configs.mem_cube import GeneralMemCubeConfig -from memos.configs.mem_os import MOSConfig -from memos.mem_cube.general import GeneralMemCube -from memos.mem_os.product import MOSProduct - - -def get_config(user_name): - openapi_config = { - "model_name_or_path": "gpt-4o-mini", - "temperature": 0.8, - "max_tokens": 1024, - "top_p": 0.9, - "top_k": 50, - "remove_think_prefix": True, - "api_key": os.getenv("OPENAI_API_KEY"), - "api_base": os.getenv("OPENAI_API_BASE"), - } - # Create a default configuration - default_config = MOSConfig( - user_id="root", - chat_model={"backend": "openai", "config": openapi_config}, - mem_reader={ - "backend": "simple_struct", - "config": { - "llm": { - "backend": "openai", - "config": openapi_config, - }, - "embedder": { - "backend": "universal_api", - "config": { - "provider": os.getenv("MOS_EMBEDDER_PROVIDER", "openai"), - "api_key": os.getenv("MOS_EMBEDDER_API_KEY", "sk-xxxx"), - "model_name_or_path": os.getenv( - "MOS_EMBEDDER_MODEL", "text-embedding-3-large" - ), - "base_url": os.getenv("MOS_EMBEDDER_API_BASE", "http://openai.com"), - }, - }, - "chunker": { - "backend": "sentence", - "config": { - "tokenizer_or_token_counter": "gpt2", - "chunk_size": 512, - "chunk_overlap": 128, - "min_sentences_per_chunk": 1, - }, - }, - }, - }, - user_manager={ - "backend": "mysql", - "config": { - "host": os.getenv("MYSQL_HOST", "localhost"), - "port": int(os.getenv("MYSQL_PORT", "3306")), - "username": os.getenv("MYSQL_USERNAME", "root"), - "password": os.getenv("MYSQL_PASSWORD", "12345678"), - "database": os.getenv("MYSQL_DATABASE", "memos_users"), - "charset": os.getenv("MYSQL_CHARSET", "utf8mb4"), - }, - }, - enable_textual_memory=True, - enable_activation_memory=False, - top_k=5, - max_turns_window=20, - ) - default_cube_config = GeneralMemCubeConfig.model_validate( - { - "user_id": user_name, - "cube_id": f"{user_name}_default_cube", - "text_mem": { - "backend": "tree_text", - "config": { - "extractor_llm": {"backend": "openai", "config": openapi_config}, - "dispatcher_llm": {"backend": "openai", "config": openapi_config}, - "graph_db": { - "backend": "neo4j", - "config": { - "uri": os.getenv("NEO4J_URI", "bolt://localhost:7687"), - "user": os.getenv("NEO4J_USER", "neo4j"), - "password": os.getenv("NEO4J_PASSWORD", "12345678"), - "db_name": os.getenv( - "NEO4J_DB_NAME", "shared-tree-textual-memory-test" - ), - "user_name": f"memos{user_name.replace('-', '')}", - "embedding_dimension": int(os.getenv("EMBEDDING_DIMENSION", 768)), - "use_multi_db": False, - "auto_create": True, - }, - }, - "embedder": { - "backend": "universal_api", - "config": { - "provider": os.getenv("MOS_EMBEDDER_PROVIDER", "openai"), - "api_key": os.getenv("MOS_EMBEDDER_API_KEY", "sk-xxxx"), - "model_name_or_path": os.getenv( - "MOS_EMBEDDER_MODEL", "text-embedding-3-large" - ), - "base_url": os.getenv("MOS_EMBEDDER_API_BASE", "http://openai.com"), - }, - }, - }, - }, - "act_mem": {}, - "para_mem": {}, - } - ) - default_mem_cube = GeneralMemCube(default_cube_config) - return default_config, default_mem_cube - - -def main(): - default_config, default_mem_cube = get_config(user_name="alice") - # Initialize MOSProduct with default config - mos_product = MOSProduct(default_config=default_config) - - # Register first user with default config - result1 = mos_product.user_register( - user_id="alice", - user_name="alice", - interests="I'm interested in machine learning and AI research.", - default_mem_cube=default_mem_cube, - ) - print(f"User registration result: {result1}") - - # Chat with Alice - print("\n=== Chatting with Alice ===") - for response_chunk in mos_product.chat(query="What are my interests?", user_id="alice"): - print(response_chunk, end="") - - # Add memory for Alice - mos_product.add( - user_id="alice", - memory_content="I attended a machine learning conference last week.", - mem_cube_id=result1["default_cube_id"], - ) - - # Search memories for Alice - search_result = mos_product.search(query="conference", user_id="alice") - print(f"\nSearch result for Alice: {search_result}") - - # Search memories for Alice - search_result = mos_product.get_all(user_id="alice", memory_type="text_mem") - print(f"\nSearch result for Alice: {search_result}") - - # List all users - users = mos_product.list_users() - print(f"\nAll registered users: {users}") - - # Get user info - alice_info = mos_product.get_user_info("alice") - print(f"\nAlice's info: {alice_info}") - - -if __name__ == "__main__": - main() diff --git a/examples/mem_os/naive_memos.py b/examples/mem_os/naive_memos.py deleted file mode 100644 index 5d0039bb9..000000000 --- a/examples/mem_os/naive_memos.py +++ /dev/null @@ -1,30 +0,0 @@ -import uuid - -from memos.configs.mem_os import MOSConfig -from memos.mem_os.main import MOS - - -# init MOS -mos_config = MOSConfig.from_json_file("examples/data/config/simple_memos_config.json") -memory = MOS(mos_config) - -# create user -user_id = str(uuid.uuid4()) -memory.create_user(user_id=user_id) - -# register cube for user -memory.register_mem_cube("examples/data/mem_cube_2", user_id=user_id) - -# add memory for user -memory.add( - messages=[ - {"role": "user", "content": "I like playing football."}, - {"role": "assistant", "content": "I like playing football too."}, - ], - user_id=user_id, -) - -# Later, when you want to retrieve memory for user -retrieved_memories = memory.search(query="What do you like?", user_id=user_id) -# output text_memories: I like playing football, act_memories, para_memories -print(f"text_memories: {retrieved_memories['text_mem']}") diff --git a/examples/mem_os/persistent_memos_example.py b/examples/mem_os/persistent_memos_example.py deleted file mode 100644 index 16353be6a..000000000 --- a/examples/mem_os/persistent_memos_example.py +++ /dev/null @@ -1,192 +0,0 @@ -""" -Example demonstrating persistent user management in MemOS. - -This example shows how to use the PersistentUserManager to maintain -user configurations across service restarts. -""" - -import os -import tempfile - -from memos.configs.mem_os import MOSConfig -from memos.mem_os.product import MOSProduct -from memos.mem_user.persistent_user_manager import PersistentUserManager, UserRole - - -def create_sample_config(user_id: str) -> MOSConfig: - """Create a sample configuration for a user.""" - return MOSConfig( - user_id=user_id, - chat_model={ - "backend": "openai", - "config": { - "model_name_or_path": "gpt-3.5-turbo", - "api_key": "your-api-key-here", - "temperature": 0.7, - }, - }, - mem_reader={ - "backend": "naive", - "config": { - "llm": { - "backend": "openai", - "config": { - "model_name_or_path": "gpt-3.5-turbo", - "api_key": "your-api-key-here", - }, - }, - "embedder": { - "backend": "ollama", - "config": { - "model_name_or_path": "nomic-embed-text:latest", - }, - }, - }, - }, - enable_textual_memory=True, - enable_activation_memory=False, - top_k=5, - max_turns_window=20, - ) - - -def demonstrate_persistence(): - """Demonstrate the persistence functionality.""" - print("=== MemOS Persistent User Management Demo ===\n") - - # Create a temporary database for this demo - temp_dir = tempfile.mkdtemp() - db_path = os.path.join(temp_dir, "demo_memos.db") - - try: - # Step 1: Create a persistent user manager - print("1. Creating PersistentUserManager...") - user_manager = PersistentUserManager(db_path=db_path) - print(f" Database created at: {db_path}") - - # Step 2: Create some sample configurations - print("\n2. Creating sample user configurations...") - user_configs = {} - for i in range(3): - user_id = f"user_{i + 1}" - user_name = f"User {i + 1}" - config = create_sample_config(user_id) - user_configs[user_id] = config - - # Create user with configuration - created_id = user_manager.create_user_with_config( - user_name, config, UserRole.USER, user_id - ) - print(f" Created user: {user_name} (ID: {created_id})") - - # Step 3: Verify configurations are saved - print("\n3. Verifying configurations are saved...") - for user_id in user_configs: - config = user_manager.get_user_config(user_id) - if config: - print(f" ✓ Configuration found for {user_id}") - print(f" - Textual memory enabled: {config.enable_textual_memory}") - print(f" - Top-k: {config.top_k}") - else: - print(f" ✗ Configuration not found for {user_id}") - - # Step 4: Simulate service restart by creating a new manager instance - print("\n4. Simulating service restart...") - print(" Creating new PersistentUserManager instance...") - new_user_manager = PersistentUserManager(db_path=db_path) - - # Step 5: Verify configurations are restored - print("\n5. Verifying configurations are restored after restart...") - for user_id in user_configs: - config = new_user_manager.get_user_config(user_id) - if config: - print(f" ✓ Configuration restored for {user_id}") - else: - print(f" ✗ Configuration not restored for {user_id}") - - # Step 6: Create MOSProduct and demonstrate restoration - print("\n6. Creating MOSProduct with persistent user manager...") - default_config = create_sample_config("default_user") - mos_product = MOSProduct(default_config=default_config) - - # The MOSProduct should automatically restore user instances - print(f" Active user instances: {len(mos_product.user_instances)}") - for user_id in mos_product.user_instances: - print(f" - {user_id}") - - # Step 7: Demonstrate configuration update - print("\n7. Demonstrating configuration update...") - user_id = "user_1" - original_config = user_manager.get_user_config(user_id) - if original_config: - # Update configuration - updated_config = original_config.model_copy(deep=True) - updated_config.top_k = 10 - updated_config.enable_activation_memory = True - - success = user_manager.save_user_config(user_id, updated_config) - if success: - print(f" ✓ Updated configuration for {user_id}") - print(f" - New top-k: {updated_config.top_k}") - print(f" - Activation memory: {updated_config.enable_activation_memory}") - else: - print(f" ✗ Failed to update configuration for {user_id}") - - # Step 8: List all configurations - print("\n8. Listing all user configurations...") - all_configs = user_manager.list_user_configs() - print(f" Total configurations: {len(all_configs)}") - for user_id, config in all_configs.items(): - print( - f" - {user_id}: top_k={config.top_k}, textual_memory={config.enable_textual_memory}" - ) - - print("\n=== Demo completed successfully! ===") - print(f"Database file: {db_path}") - print("You can inspect this file to see the persistent data.") - - except Exception as e: - print(f"Error during demo: {e}") - raise - finally: - # Cleanup - if os.path.exists(db_path): - os.remove(db_path) - if os.path.exists(temp_dir): - os.rmdir(temp_dir) - - -def demonstrate_api_usage(): - """Demonstrate how the API would work with persistence.""" - print("\n=== API Usage Example ===") - print(""" - With the new persistent system, your API calls would work like this: - - 1. Register a user (configuration is automatically saved): - POST /product/users/register - { - "user_id": "john_doe", - "user_name": "John Doe", - "interests": "AI, machine learning, programming" - } - - 2. Get user configuration: - GET /product/users/john_doe/config - - 3. Update user configuration: - PUT /product/users/john_doe/config - { - "user_id": "john_doe", - "enable_activation_memory": true, - "top_k": 10, - ... - } - - 4. After service restart, all user instances are automatically restored - and the user can immediately use the system without re-registration. - """) - - -if __name__ == "__main__": - demonstrate_persistence() - demonstrate_api_usage() diff --git a/examples/mem_os/simple_memos.py b/examples/mem_os/simple_memos.py deleted file mode 100644 index be765e837..000000000 --- a/examples/mem_os/simple_memos.py +++ /dev/null @@ -1,42 +0,0 @@ -from memos.configs.mem_os import MOSConfig -from memos.mem_os.main import MOS - - -# init MOSConfig -mos_config = MOSConfig.from_json_file("examples/data/config/simple_memos_config.json") -mos = MOS(mos_config) - -# create user id -user_id = "lcy1" -mos.create_user(user_id=user_id) -users = mos.list_users() -print("\nAll users:") -for user in users: - print(f" - {user['user_name']} ({user['user_id']}) - Role: {user['role']}") - - -# load exist mem_cube from local -mos.register_mem_cube("examples/data/mem_cube_2", user_id=user_id) - -mos.add(memory_content="I like playing football.", user_id=user_id) - -get_all_results = mos.get_all(user_id=user_id) -print(f"Get all results for user : {get_all_results}") - -get_results = mos.get( - mem_cube_id="examples/data/mem_cube_2", - memory_id=get_all_results["text_mem"][0]["memories"][0].id, - user_id=user_id, -) -print(f"Get memories for user : {get_results}") - -search_results = mos.search(query="my favorite football game", user_id=user_id) -print(f"Search results for user : {search_results}") - - -while True: - user_input = input("👤 [You] ").strip() - print() - response = mos.chat(user_input, user_id=user_id) - print(f"🤖 [Assistant] {response}\n") -print("📢 [System] MemChat has stopped.") diff --git a/examples/mem_os/simple_openapi_memos.py b/examples/mem_os/simple_openapi_memos.py deleted file mode 100644 index a9faa3dc8..000000000 --- a/examples/mem_os/simple_openapi_memos.py +++ /dev/null @@ -1,300 +0,0 @@ -import os -import time -import uuid - -from datetime import datetime - -from dotenv import load_dotenv - -from memos.configs.mem_cube import GeneralMemCubeConfig -from memos.configs.mem_os import MOSConfig -from memos.mem_cube.general import GeneralMemCube -from memos.mem_os.main import MOS - - -load_dotenv() - -# 1. Create MOS Config and set openai config -print(f"🚀 [{datetime.now().strftime('%H:%M:%S')}] Starting to create MOS configuration...") -start_time = time.time() - -user_name = str(uuid.uuid4()) -print(user_name) - -# 1.1 Set openai config -openapi_config = { - "model_name_or_path": "gpt-4o-mini", - "temperature": 0.8, - "max_tokens": 1024, - "top_p": 0.9, - "top_k": 50, - "remove_think_prefix": True, - "api_key": os.getenv("OPENAI_API_KEY", "sk-xxxxx"), - "api_base": os.getenv("OPENAI_API_BASE", "https://api.openai.com/v1"), -} -# 1.2 Set neo4j config -neo4j_uri = os.getenv("NEO4J_URI", "bolt://localhost:7687") - -# 1.3 Create MOS Config -config = { - "user_id": user_name, - "chat_model": { - "backend": "openai", - "config": openapi_config, - }, - "mem_reader": { - "backend": "simple_struct", - "config": { - "llm": { - "backend": "openai", - "config": openapi_config, - }, - "embedder": { - "backend": "ollama", - "config": { - "model_name_or_path": "nomic-embed-text:latest", - }, - }, - "chunker": { - "backend": "sentence", - "config": { - "tokenizer_or_token_counter": "gpt2", - "chunk_size": 512, - "chunk_overlap": 128, - "min_sentences_per_chunk": 1, - }, - }, - }, - }, - "max_turns_window": 20, - "top_k": 5, - "enable_textual_memory": True, - "enable_activation_memory": False, - "enable_parametric_memory": False, -} - -mos_config = MOSConfig(**config) -# you can set PRO_MODE to True to enable CoT enhancement mos_config.PRO_MODE = True -mos = MOS(mos_config) - -print( - f"✅ [{datetime.now().strftime('%H:%M:%S')}] MOS configuration created successfully, time elapsed: {time.time() - start_time:.2f}s\n" -) - -# 2. Initialize memory cube -print(f"🚀 [{datetime.now().strftime('%H:%M:%S')}] Starting to initialize MemCube configuration...") -start_time = time.time() - -config = GeneralMemCubeConfig.model_validate( - { - "user_id": user_name, - "cube_id": f"{user_name}", - "text_mem": { - "backend": "tree_text", - "config": { - "extractor_llm": { - "backend": "openai", - "config": openapi_config, - }, - "dispatcher_llm": { - "backend": "openai", - "config": openapi_config, - }, - "graph_db": { - "backend": "neo4j", - "config": { - "uri": neo4j_uri, - "user": "neo4j", - "password": "12345678", - "db_name": f"db{user_name.replace('-', '')}", - "auto_create": True, - }, - }, - "embedder": { - "backend": "ollama", - "config": { - "model_name_or_path": "nomic-embed-text:latest", - }, - }, - }, - }, - "act_mem": {}, - "para_mem": {}, - }, -) - -print( - f"✅ [{datetime.now().strftime('%H:%M:%S')}] MemCube configuration initialization completed, time elapsed: {time.time() - start_time:.2f}s\n" -) - -# 3. Initialize the MemCube with the configuration -print(f"🚀 [{datetime.now().strftime('%H:%M:%S')}] Starting to create MemCube instance...") -start_time = time.time() - -mem_cube = GeneralMemCube(config) -try: - mem_cube.dump(f"/tmp/{user_name}/") - print( - f"✅ [{datetime.now().strftime('%H:%M:%S')}] MemCube created and saved successfully, time elapsed: {time.time() - start_time:.2f}s\n" - ) -except Exception as e: - print( - f"❌ [{datetime.now().strftime('%H:%M:%S')}] MemCube save failed: {e}, time elapsed: {time.time() - start_time:.2f}s\n" - ) - -# 4. Register the MemCube -print(f"🚀 [{datetime.now().strftime('%H:%M:%S')}] Starting to register MemCube...") -start_time = time.time() - -mos.register_mem_cube(f"/tmp/{user_name}", mem_cube_id=user_name) - -print( - f"✅ [{datetime.now().strftime('%H:%M:%S')}] MemCube registration completed, time elapsed: {time.time() - start_time:.2f}s\n" -) - -# 5. Add, get, search memory -print(f"🚀 [{datetime.now().strftime('%H:%M:%S')}] Starting to add single memory...") -start_time = time.time() - -mos.add(memory_content="I like playing football.") - -print( - f"✅ [{datetime.now().strftime('%H:%M:%S')}] Single memory added successfully, time elapsed: {time.time() - start_time:.2f}s" -) - -print(f"🚀 [{datetime.now().strftime('%H:%M:%S')}] Starting to get all memories...") -start_time = time.time() - -get_all_results = mos.get_all() - - -# Filter out embedding fields, keeping only necessary fields -def filter_memory_data(memories_data): - filtered_data = {} - for key, value in memories_data.items(): - if key == "text_mem": - filtered_data[key] = [] - for mem_group in value: - # Check if it's the new data structure (list of TextualMemoryItem objects) - if "memories" in mem_group and isinstance(mem_group["memories"], list): - # New data structure: directly a list of TextualMemoryItem objects - filtered_memories = [] - for memory_item in mem_group["memories"]: - # Create filtered dictionary - filtered_item = { - "id": memory_item.id, - "memory": memory_item.memory, - "metadata": {}, - } - # Filter metadata, excluding embedding - if hasattr(memory_item, "metadata") and memory_item.metadata: - for attr_name in dir(memory_item.metadata): - if not attr_name.startswith("_") and attr_name != "embedding": - attr_value = getattr(memory_item.metadata, attr_name) - if not callable(attr_value): - filtered_item["metadata"][attr_name] = attr_value - filtered_memories.append(filtered_item) - - filtered_group = { - "cube_id": mem_group.get("cube_id", ""), - "memories": filtered_memories, - } - filtered_data[key].append(filtered_group) - else: - # Old data structure: dictionary with nodes and edges - filtered_group = { - "memories": {"nodes": [], "edges": mem_group["memories"].get("edges", [])} - } - for node in mem_group["memories"].get("nodes", []): - filtered_node = { - "id": node.get("id"), - "memory": node.get("memory"), - "metadata": { - k: v - for k, v in node.get("metadata", {}).items() - if k != "embedding" - }, - } - filtered_group["memories"]["nodes"].append(filtered_node) - filtered_data[key].append(filtered_group) - else: - filtered_data[key] = value - return filtered_data - - -filtered_results = filter_memory_data(get_all_results) -print(f"Get all results after add memory: {filtered_results['text_mem'][0]['memories']}") - -print( - f"✅ [{datetime.now().strftime('%H:%M:%S')}] Get all memories completed, time elapsed: {time.time() - start_time:.2f}s\n" -) - -# 6. Add messages -print(f"🚀 [{datetime.now().strftime('%H:%M:%S')}] Starting to add conversation messages...") -start_time = time.time() - -messages = [ - {"role": "user", "content": "I like playing football."}, - {"role": "assistant", "content": "yes football is my favorite game."}, -] -mos.add(messages) - -print( - f"✅ [{datetime.now().strftime('%H:%M:%S')}] Conversation messages added successfully, time elapsed: {time.time() - start_time:.2f}s" -) - -print( - f"🚀 [{datetime.now().strftime('%H:%M:%S')}] Starting to get all memories (after adding messages)..." -) -start_time = time.time() - -get_all_results = mos.get_all() -filtered_results = filter_memory_data(get_all_results) -print(f"Get all results after add messages: {filtered_results}") - -print( - f"✅ [{datetime.now().strftime('%H:%M:%S')}] Get all memories completed, time elapsed: {time.time() - start_time:.2f}s\n" -) - -# 7. Add document -print(f"🚀 [{datetime.now().strftime('%H:%M:%S')}] Starting to add document...") -start_time = time.time() -## 7.1 add pdf for ./tmp/data if use doc mem mos.add(doc_path="./tmp/data/") -start_time = time.time() - -get_all_results = mos.get_all() -filtered_results = filter_memory_data(get_all_results) -print(f"Get all results after add doc: {filtered_results}") - -print( - f"✅ [{datetime.now().strftime('%H:%M:%S')}] Get all memories completed, time elapsed: {time.time() - start_time:.2f}s\n" -) - -# 8. Search -print(f"🚀 [{datetime.now().strftime('%H:%M:%S')}] Starting to search memories...") -start_time = time.time() - -search_results = mos.search(query="my favorite football game", user_id=user_name) -filtered_search_results = filter_memory_data(search_results) -print(f"Search results: {filtered_search_results}") - -print( - f"✅ [{datetime.now().strftime('%H:%M:%S')}] Memory search completed, time elapsed: {time.time() - start_time:.2f}s\n" -) - -# 9. Chat -print(f"🎯 [{datetime.now().strftime('%H:%M:%S')}] Starting chat mode...") -while True: - user_input = input("👤 [You] ").strip() - if user_input.lower() in ["quit", "exit"]: - break - - print() - chat_start_time = time.time() - response = mos.chat(user_input) - chat_duration = time.time() - chat_start_time - - print(f"🤖 [Assistant] {response}") - print(f"⏱️ [Response time: {chat_duration:.2f}s]\n") - -print("📢 [System] MemChat has stopped.") diff --git a/examples/mem_os/simple_openapi_memos_neo4j_community.py b/examples/mem_os/simple_openapi_memos_neo4j_community.py deleted file mode 100644 index aad1b8c77..000000000 --- a/examples/mem_os/simple_openapi_memos_neo4j_community.py +++ /dev/null @@ -1,315 +0,0 @@ -import os -import time -import uuid - -from datetime import datetime - -from dotenv import load_dotenv - -from memos.configs.mem_cube import GeneralMemCubeConfig -from memos.configs.mem_os import MOSConfig -from memos.mem_cube.general import GeneralMemCube -from memos.mem_os.main import MOS - - -load_dotenv() - -# 1. Create MOS Config and set openai config -print(f"🚀 [{datetime.now().strftime('%H:%M:%S')}] Starting to create MOS configuration...") -start_time = time.time() - -user_name = str(uuid.uuid4()) -print(user_name) - -# 1.1 Set openai config -openapi_config = { - "model_name_or_path": "gpt-4o-mini", - "temperature": 0.8, - "max_tokens": 1024, - "top_p": 0.9, - "top_k": 50, - "remove_think_prefix": True, - "api_key": os.getenv("OPENAI_API_KEY", "sk-xxxxx"), - "api_base": os.getenv("OPENAI_API_BASE", "https://api.openai.com/v1"), -} -embedder_config = { - "backend": "universal_api", - "config": { - "provider": "openai", - "api_key": os.getenv("OPENAI_API_KEY", "sk-xxxxx"), - "model_name_or_path": "text-embedding-3-large", - "base_url": os.getenv("OPENAI_API_BASE", "https://api.openai.com/v1"), - }, -} -EMBEDDING_DIMENSION = 3072 - -# 1.2 Set neo4j config -neo4j_uri = os.getenv("NEO4J_URI", "bolt://localhost:7687") - -# 1.3 Create MOS Config -config = { - "user_id": user_name, - "chat_model": { - "backend": "openai", - "config": openapi_config, - }, - "mem_reader": { - "backend": "simple_struct", - "config": { - "llm": { - "backend": "openai", - "config": openapi_config, - }, - "embedder": embedder_config, - "chunker": { - "backend": "sentence", - "config": { - "tokenizer_or_token_counter": "gpt2", - "chunk_size": 512, - "chunk_overlap": 128, - "min_sentences_per_chunk": 1, - }, - }, - }, - }, - "max_turns_window": 20, - "top_k": 5, - "enable_textual_memory": True, - "enable_activation_memory": False, - "enable_parametric_memory": False, -} - -mos_config = MOSConfig(**config) -# you can set PRO_MODE to True to enable CoT enhancement mos_config.PRO_MODE = True -mos = MOS(mos_config) - -print( - f"✅ [{datetime.now().strftime('%H:%M:%S')}] MOS configuration created successfully, time elapsed: {time.time() - start_time:.2f}s\n" -) - -# 2. Initialize memory cube -print(f"🚀 [{datetime.now().strftime('%H:%M:%S')}] Starting to initialize MemCube configuration...") -start_time = time.time() - -config = GeneralMemCubeConfig.model_validate( - { - "user_id": user_name, - "cube_id": f"{user_name}", - "text_mem": { - "backend": "tree_text", - "config": { - "extractor_llm": { - "backend": "openai", - "config": openapi_config, - }, - "dispatcher_llm": { - "backend": "openai", - "config": openapi_config, - }, - "embedder": embedder_config, - "graph_db": { - "backend": "neo4j-community", - "config": { - "uri": neo4j_uri, - "user": "neo4j", - "password": "12345678", - "db_name": "neo4j", - "user_name": "alice", - "use_multi_db": False, - "auto_create": False, - "embedding_dimension": EMBEDDING_DIMENSION, - "vec_config": { - "backend": "qdrant", - "config": { - "collection_name": "neo4j_vec_db", - "vector_dimension": EMBEDDING_DIMENSION, - "distance_metric": "cosine", - "host": "localhost", - "port": 6333, - }, - }, - }, - }, - "reorganize": True, - }, - }, - "act_mem": {}, - "para_mem": {}, - }, -) - -print( - f"✅ [{datetime.now().strftime('%H:%M:%S')}] MemCube configuration initialization completed, time elapsed: {time.time() - start_time:.2f}s\n" -) - -# 3. Initialize the MemCube with the configuration -print(f"🚀 [{datetime.now().strftime('%H:%M:%S')}] Starting to create MemCube instance...") -start_time = time.time() - -mem_cube = GeneralMemCube(config) -try: - mem_cube.dump(f"/tmp/{user_name}/") - print( - f"✅ [{datetime.now().strftime('%H:%M:%S')}] MemCube created and saved successfully, time elapsed: {time.time() - start_time:.2f}s\n" - ) -except Exception as e: - print( - f"❌ [{datetime.now().strftime('%H:%M:%S')}] MemCube save failed: {e}, time elapsed: {time.time() - start_time:.2f}s\n" - ) - -# 4. Register the MemCube -print(f"🚀 [{datetime.now().strftime('%H:%M:%S')}] Starting to register MemCube...") -start_time = time.time() - -mos.register_mem_cube(f"/tmp/{user_name}", mem_cube_id=user_name) - -print( - f"✅ [{datetime.now().strftime('%H:%M:%S')}] MemCube registration completed, time elapsed: {time.time() - start_time:.2f}s\n" -) - -# 5. Add, get, search memory -print(f"🚀 [{datetime.now().strftime('%H:%M:%S')}] Starting to add single memory...") -start_time = time.time() - -mos.add(memory_content="I like playing football.") - -print( - f"✅ [{datetime.now().strftime('%H:%M:%S')}] Single memory added successfully, time elapsed: {time.time() - start_time:.2f}s" -) - -print(f"🚀 [{datetime.now().strftime('%H:%M:%S')}] Starting to get all memories...") -start_time = time.time() - -get_all_results = mos.get_all() - - -# Filter out embedding fields, keeping only necessary fields -def filter_memory_data(memories_data): - filtered_data = {} - for key, value in memories_data.items(): - if key == "text_mem": - filtered_data[key] = [] - for mem_group in value: - # Check if it's the new data structure (list of TextualMemoryItem objects) - if "memories" in mem_group and isinstance(mem_group["memories"], list): - # New data structure: directly a list of TextualMemoryItem objects - filtered_memories = [] - for memory_item in mem_group["memories"]: - # Create filtered dictionary - filtered_item = { - "id": memory_item.id, - "memory": memory_item.memory, - "metadata": {}, - } - # Filter metadata, excluding embedding - if hasattr(memory_item, "metadata") and memory_item.metadata: - for attr_name in dir(memory_item.metadata): - if not attr_name.startswith("_") and attr_name != "embedding": - attr_value = getattr(memory_item.metadata, attr_name) - if not callable(attr_value): - filtered_item["metadata"][attr_name] = attr_value - filtered_memories.append(filtered_item) - - filtered_group = { - "cube_id": mem_group.get("cube_id", ""), - "memories": filtered_memories, - } - filtered_data[key].append(filtered_group) - else: - # Old data structure: dictionary with nodes and edges - filtered_group = { - "memories": {"nodes": [], "edges": mem_group["memories"].get("edges", [])} - } - for node in mem_group["memories"].get("nodes", []): - filtered_node = { - "id": node.get("id"), - "memory": node.get("memory"), - "metadata": { - k: v - for k, v in node.get("metadata", {}).items() - if k != "embedding" - }, - } - filtered_group["memories"]["nodes"].append(filtered_node) - filtered_data[key].append(filtered_group) - else: - filtered_data[key] = value - return filtered_data - - -filtered_results = filter_memory_data(get_all_results) -print(f"Get all results after add memory: {filtered_results['text_mem'][0]['memories']}") - -print( - f"✅ [{datetime.now().strftime('%H:%M:%S')}] Get all memories completed, time elapsed: {time.time() - start_time:.2f}s\n" -) - -# 6. Add messages -print(f"🚀 [{datetime.now().strftime('%H:%M:%S')}] Starting to add conversation messages...") -start_time = time.time() - -messages = [ - {"role": "user", "content": "I like playing football."}, - {"role": "assistant", "content": "yes football is my favorite game."}, -] -mos.add(messages) - -print( - f"✅ [{datetime.now().strftime('%H:%M:%S')}] Conversation messages added successfully, time elapsed: {time.time() - start_time:.2f}s" -) - -print( - f"🚀 [{datetime.now().strftime('%H:%M:%S')}] Starting to get all memories (after adding messages)..." -) -start_time = time.time() - -get_all_results = mos.get_all() -filtered_results = filter_memory_data(get_all_results) -print(f"Get all results after add messages: {filtered_results}") - -print( - f"✅ [{datetime.now().strftime('%H:%M:%S')}] Get all memories completed, time elapsed: {time.time() - start_time:.2f}s\n" -) - -# 7. Add document -print(f"🚀 [{datetime.now().strftime('%H:%M:%S')}] Starting to add document...") -start_time = time.time() -## 7.1 add pdf for ./tmp/data if use doc mem mos.add(doc_path="./tmp/data/") -start_time = time.time() - -get_all_results = mos.get_all() -filtered_results = filter_memory_data(get_all_results) -print(f"Get all results after add doc: {filtered_results}") - -print( - f"✅ [{datetime.now().strftime('%H:%M:%S')}] Get all memories completed, time elapsed: {time.time() - start_time:.2f}s\n" -) - -# 8. Search -print(f"🚀 [{datetime.now().strftime('%H:%M:%S')}] Starting to search memories...") -start_time = time.time() - -search_results = mos.search(query="my favorite football game", user_id=user_name) -filtered_search_results = filter_memory_data(search_results) -print(f"Search results: {filtered_search_results}") - -print( - f"✅ [{datetime.now().strftime('%H:%M:%S')}] Memory search completed, time elapsed: {time.time() - start_time:.2f}s\n" -) - -# 9. Chat -print(f"🎯 [{datetime.now().strftime('%H:%M:%S')}] Starting chat mode...") -while True: - user_input = input("👤 [You] ").strip() - if user_input.lower() in ["quit", "exit"]: - break - - print() - chat_start_time = time.time() - response = mos.chat(user_input) - chat_duration = time.time() - chat_start_time - - print(f"🤖 [Assistant] {response}") - print(f"⏱️ [Response time: {chat_duration:.2f}s]\n") - -print("📢 [System] MemChat has stopped.") diff --git a/examples/mem_os/simple_prefs_memos_product.py b/examples/mem_os/simple_prefs_memos_product.py deleted file mode 100644 index 40ec920f5..000000000 --- a/examples/mem_os/simple_prefs_memos_product.py +++ /dev/null @@ -1,399 +0,0 @@ -from memos.configs.mem_cube import GeneralMemCubeConfig -from memos.configs.mem_os import MOSConfig -from memos.mem_cube.general import GeneralMemCube -from memos.mem_os.product import MOSProduct - - -def get_config(user_id: str): - llm_config = { - "backend": "openai", - "config": { - "model_name_or_path": "gpt-4o-mini", - "api_key": "sk-xxxxx", - "api_base": "http://xxxx/v1", - "temperature": 0.1, - "remove_think_prefix": True, - "max_tokens": 4096, - }, - } - - embedder_config = { - "backend": "ollama", - "config": {"model_name_or_path": "nomic-embed-text:latest"}, - } - - # init MOS - mos_config = { - "user_id": user_id, - "chat_model": llm_config, - "mem_reader": { - "backend": "simple_struct", - "config": { - "llm": llm_config, - "embedder": embedder_config, - "chunker": { - "backend": "sentence", - "config": { - "tokenizer_or_token_counter": "gpt2", - "chunk_size": 512, - "chunk_overlap": 128, - "min_sentences_per_chunk": 1, - }, - }, - }, - }, - "max_turns_window": 20, - "top_k": 5, - "enable_textual_memory": True, - "enable_activation_memory": False, - "enable_parametric_memory": False, - "enable_preference_memory": True, - } - - cube_config = { - "model_schema": "memos.configs.mem_cube.GeneralMemCubeConfig", - "user_id": user_id, - "cube_id": f"{user_id}/mem_cube", - "text_mem": { - "backend": "tree_text", - "config": { - "cube_id": f"{user_id}/mem_cube", - "extractor_llm": llm_config, - "dispatcher_llm": llm_config, - "graph_db": { - "backend": "neo4j", - "config": { - "uri": "bolt://localhost:7687", - "user": "neo4j", - "password": "12345678", - "db_name": "neo4j", - "user_name": "memosneo4j", - "embedding_dimension": 768, - "use_multi_db": False, - "auto_create": False, - }, - }, - "embedder": embedder_config, - }, - }, - "act_mem": {"backend": "uninitialized", "config": {}}, - "para_mem": {"backend": "uninitialized", "config": {}}, - "pref_mem": { - "backend": "pref_text", - "config": { - "cube_id": f"{user_id}/mem_cube", - "extractor_llm": llm_config, - "vector_db": { - "backend": "milvus", - "config": { - "collection_name": [ - "explicit_preference", - "implicit_preference", - ], - "vector_dimension": 768, - "distance_metric": "cosine", - "uri": "./milvus_demo.db", - }, - }, - "embedder": embedder_config, - "extractor": {"backend": "naive", "config": {}}, - "adder": {"backend": "naive", "config": {}}, - "retriever": {"backend": "naive", "config": {}}, - }, - }, - } - mos_config = MOSConfig(**mos_config) - cube_config = GeneralMemCubeConfig.model_validate(cube_config) - - return mos_config, cube_config - - -user_id = "superman" -mos_config, mem_cube_config = get_config(user_id) - -memory = MOSProduct(default_config=mos_config) - -# register user and mem cube -mem_cube = GeneralMemCube(mem_cube_config) - -register_result = memory.user_register( - user_id=user_id, user_name=user_id, default_mem_cube=mem_cube -) -print(f"register_result: {register_result}") - -# add memory for user -memory.add( - messages=[ - # Sports and hobbies - {"role": "user", "content": "I like playing football."}, - { - "role": "assistant", - "content": "I like playing football too. It's a great way to stay active and work as a team.", - }, - {"role": "user", "content": "Do you enjoy watching basketball games?"}, - { - "role": "assistant", - "content": "Yes, I find basketball exciting to watch, especially the fast-paced action and strategic plays.", - }, - {"role": "user", "content": "I'm learning to play tennis this summer."}, - { - "role": "assistant", - "content": "That's wonderful! Tennis is a fantastic sport that improves both physical fitness and mental focus.", - }, - # Food and cooking - {"role": "user", "content": "What's your favorite type of cuisine?"}, - { - "role": "assistant", - "content": "I really enjoy Italian cuisine, especially pasta dishes and authentic pizza.", - }, - {"role": "user", "content": "I love cooking Asian food at home."}, - { - "role": "assistant", - "content": "Asian cooking is amazing! The balance of flavors and techniques is so interesting to learn.", - }, - {"role": "user", "content": "Have you ever tried making homemade bread?"}, - { - "role": "assistant", - "content": "Yes, there's something magical about the process of making bread from scratch. The smell is incredible!", - }, - # Travel and places - {"role": "user", "content": "I'm planning a trip to Japan next year."}, - { - "role": "assistant", - "content": "Japan is an incredible destination! The culture, food, and technology blend is fascinating.", - }, - {"role": "user", "content": "What's the most beautiful place you've ever seen?"}, - { - "role": "assistant", - "content": "I find mountain landscapes particularly breathtaking, especially during sunrise or sunset.", - }, - {"role": "user", "content": "I prefer beach vacations over city trips."}, - { - "role": "assistant", - "content": "Beach vacations are so relaxing! The sound of waves and warm sand is perfect for unwinding.", - }, - # Technology and innovation - {"role": "user", "content": "What do you think about artificial intelligence?"}, - { - "role": "assistant", - "content": "AI is transforming many aspects of our lives. It's exciting to see how it can help solve complex problems.", - }, - {"role": "user", "content": "I'm learning to code in Python."}, - { - "role": "assistant", - "content": "Python is a great language to start with! It's versatile and has a wonderful community.", - }, - {"role": "user", "content": "Do you use any productivity apps?"}, - { - "role": "assistant", - "content": "I find task management apps really helpful for organizing daily activities and staying focused.", - }, - # Music and entertainment - {"role": "user", "content": "What kind of music do you enjoy?"}, - { - "role": "assistant", - "content": "I appreciate many genres, but I particularly enjoy jazz and classical music for their complexity.", - }, - {"role": "user", "content": "I play the guitar in my free time."}, - { - "role": "assistant", - "content": "That's fantastic! Playing an instrument is such a rewarding creative outlet.", - }, - {"role": "user", "content": "Have you seen any good movies lately?"}, - { - "role": "assistant", - "content": "I enjoy films that make you think deeply about human nature and society.", - }, - # Books and learning - {"role": "user", "content": "Do you read books regularly?"}, - { - "role": "assistant", - "content": "Yes, reading is one of my favorite ways to learn and explore new ideas.", - }, - {"role": "user", "content": "I'm currently reading a science fiction novel."}, - { - "role": "assistant", - "content": "Science fiction is great for exploring possibilities and imagining different futures.", - }, - {"role": "user", "content": "What's your favorite subject to learn about?"}, - { - "role": "assistant", - "content": "I'm fascinated by psychology and how the human mind works.", - }, - # Nature and environment - {"role": "user", "content": "I love hiking in the mountains."}, - { - "role": "assistant", - "content": "Hiking is wonderful! It's a great way to connect with nature and get exercise.", - }, - {"role": "user", "content": "Are you concerned about climate change?"}, - { - "role": "assistant", - "content": "Yes, it's one of the most important challenges of our time that requires collective action.", - }, - {"role": "user", "content": "I try to recycle and reduce waste at home."}, - { - "role": "assistant", - "content": "That's commendable! Every small action contributes to environmental protection.", - }, - # Work and career - {"role": "user", "content": "What do you do for work?"}, - { - "role": "assistant", - "content": "I work in technology, helping people solve problems and improve their daily lives.", - }, - {"role": "user", "content": "I'm considering a career change."}, - { - "role": "assistant", - "content": "Career changes can be challenging but also very rewarding when you find the right fit.", - }, - {"role": "user", "content": "Do you work from home or in an office?"}, - { - "role": "assistant", - "content": "I work remotely, which gives me flexibility in how I organize my day.", - }, - # Health and wellness - {"role": "user", "content": "I've started doing yoga in the morning."}, - { - "role": "assistant", - "content": "Yoga is excellent for both physical and mental well-being. Great way to start the day!", - }, - {"role": "user", "content": "How do you manage stress?"}, - { - "role": "assistant", - "content": "I find meditation and regular exercise help me stay balanced and focused.", - }, - {"role": "user", "content": "I'm trying to eat more vegetables."}, - { - "role": "assistant", - "content": "That's a great health goal! Vegetables provide so many essential nutrients.", - }, - # Art and creativity - {"role": "user", "content": "Do you enjoy painting or drawing?"}, - { - "role": "assistant", - "content": "I appreciate visual arts, though I'm more of an observer than a creator.", - }, - {"role": "user", "content": "I'm learning photography."}, - { - "role": "assistant", - "content": "Photography is a beautiful way to capture moments and express your perspective.", - }, - {"role": "user", "content": "What's your favorite type of art?"}, - { - "role": "assistant", - "content": "I'm drawn to abstract art because it allows for personal interpretation.", - }, - # Family and relationships - {"role": "user", "content": "I have two siblings."}, - { - "role": "assistant", - "content": "Having siblings can be wonderful - they're often our first friends and lifelong companions.", - }, - {"role": "user", "content": "Do you have any pets?"}, - { - "role": "assistant", - "content": "I don't have pets, but I think they bring so much joy and companionship to people's lives.", - }, - {"role": "user", "content": "I'm planning to visit my grandparents this weekend."}, - { - "role": "assistant", - "content": "That's lovely! Grandparents often have such interesting stories and wisdom to share.", - }, - # Education and growth - {"role": "user", "content": "I'm taking an online course in data science."}, - { - "role": "assistant", - "content": "Data science is a fascinating field with many applications across different industries.", - }, - {"role": "user", "content": "What's the best way to learn a new language?"}, - { - "role": "assistant", - "content": "Immersion and consistent practice are key, along with finding methods that work for your learning style.", - }, - {"role": "user", "content": "I enjoy attending workshops and conferences."}, - { - "role": "assistant", - "content": "Workshops are great for hands-on learning and networking with like-minded people.", - }, - # Hobbies and interests - {"role": "user", "content": "I collect vintage postcards."}, - { - "role": "assistant", - "content": "What a unique hobby! Postcards offer glimpses into different times and places.", - }, - {"role": "user", "content": "Do you garden?"}, - { - "role": "assistant", - "content": "I appreciate gardening, though I'm more of a plant observer than a gardener myself.", - }, - {"role": "user", "content": "I love solving puzzles and brain teasers."}, - { - "role": "assistant", - "content": "Puzzles are excellent for keeping the mind sharp and developing problem-solving skills.", - }, - # Future and goals - {"role": "user", "content": "What are your goals for next year?"}, - { - "role": "assistant", - "content": "I aim to continue learning and growing, helping more people with their challenges.", - }, - {"role": "user", "content": "I want to start my own business someday."}, - { - "role": "assistant", - "content": "Entrepreneurship is exciting! It requires courage, planning, and persistence.", - }, - {"role": "user", "content": "Do you believe in setting New Year's resolutions?"}, - { - "role": "assistant", - "content": "I think any time is good for setting positive goals and making positive changes.", - }, - # Philosophy and life - {"role": "user", "content": "What's the meaning of life to you?"}, - { - "role": "assistant", - "content": "I believe it's about learning, growing, and making positive connections with others.", - }, - {"role": "user", "content": "I try to practice gratitude daily."}, - { - "role": "assistant", - "content": "Gratitude is such a powerful practice that can transform your perspective on life.", - }, - {"role": "user", "content": "What's your biggest fear?"}, - { - "role": "assistant", - "content": "I think fear of not being able to help when someone really needs it would be challenging.", - }, - # Technology and daily life - {"role": "user", "content": "How has technology changed your daily routine?"}, - { - "role": "assistant", - "content": "Technology has made many tasks more efficient and connected me with people worldwide.", - }, - {"role": "user", "content": "I'm trying to reduce my screen time."}, - { - "role": "assistant", - "content": "That's a healthy goal! Finding balance with technology is important for well-being.", - }, - {"role": "user", "content": "Do you use social media?"}, - { - "role": "assistant", - "content": "I think social media can be a great tool for connection when used mindfully.", - }, - # Final thoughts - {"role": "user", "content": "This has been a great conversation!"}, - { - "role": "assistant", - "content": "I've really enjoyed our chat too! It's wonderful to explore so many different topics together.", - }, - ], - user_id=user_id, - mem_cube_id=register_result["default_cube_id"], -) - -retrieved_memories = memory.search(query="What do you like?", user_id=user_id) -print( - f"len_pref_memories: {len(retrieved_memories['pref_mem'][0]['memories'])}" - if retrieved_memories["pref_mem"] - else 0 -) diff --git a/examples/mem_os/simple_treekvcache_memos.py b/examples/mem_os/simple_treekvcache_memos.py deleted file mode 100644 index 2f3408afd..000000000 --- a/examples/mem_os/simple_treekvcache_memos.py +++ /dev/null @@ -1,46 +0,0 @@ -import os - -from memos.configs.mem_cube import GeneralMemCubeConfig -from memos.configs.mem_os import MOSConfig -from memos.mem_cube.general import GeneralMemCube -from memos.mem_os.main import MOS - - -# init MOSConfig by deafult user -# note kvcache must at chatllm backend by huggingface -# gpu need set -os.environ["CUDA_VISIBLE_DEVICES"] = "1" -user_id = "root" -cube_id = "root/mem_cube_kv_cache" -tmp_cube_path = "/tmp/default/mem_cube_5" - -mos_config = MOSConfig.from_json_file("examples/data/config/simple_treekvcache_memos_config.json") -mos = MOS(mos_config) - - -# 2. Initialize_memory_cube -cube_config = GeneralMemCubeConfig.from_json_file( - "examples/data/config/simple_treekvcache_cube_config.json" -) - -# 3. Initialize the MemCube with the configuration and dump cube -mem_cube = GeneralMemCube(cube_config) -try: - mem_cube.dump(tmp_cube_path) -except Exception as e: - print(e) - -# 4. Register the MemCube explicitly -mos.register_mem_cube(tmp_cube_path, mem_cube_id=cube_id, user_id=user_id) - -# 5. Extract kv memory and add kv cache_mem -extract_kvmem = mos.mem_cubes[cube_id].act_mem.extract("I like football") -mos.mem_cubes[cube_id].act_mem.add([extract_kvmem]) - -# .chat -while True: - user_input = input("👤 [You] ").strip() - print() - response = mos.chat(user_input) - print(f"🤖 [Assistant] {response}\n") -print("📢 [System] MemChat has stopped.") diff --git a/examples/mem_os/simple_vllm_memos.py b/examples/mem_os/simple_vllm_memos.py deleted file mode 100644 index ffa7a3a24..000000000 --- a/examples/mem_os/simple_vllm_memos.py +++ /dev/null @@ -1,61 +0,0 @@ -#!/usr/bin/env python3 -""" -Simple example demonstrating how to use VLLMLLM with an existing vLLM server. -Requires a vLLM server to be running. -""" - -from typing import TYPE_CHECKING - -from memos.configs.llm import VLLMLLMConfig -from memos.llms.vllm import VLLMLLM - - -if TYPE_CHECKING: - from memos.types import MessageDict - - -def main(): - """Main function demonstrating VLLMLLM usage.""" - - # Configuration for connecting to existing vLLM server - config = VLLMLLMConfig( - model_name_or_path="/mnt/afs/models/hf_models/Qwen2.5-7B", # MUST MATCH the --model arg of vLLM server - api_key="", # Not needed for local server - api_base="http://localhost:8088/v1", # vLLM server address with /v1 - temperature=0.7, - max_tokens=512, - top_p=0.9, - model_schema="memos.configs.llm.VLLMLLMConfig", - ) - - # Initialize VLLM LLM - print("Initializing VLLM LLM...") - llm = VLLMLLM(config) - - # Test messages for KV cache building - print("\nBuilding KV cache for system messages...") - system_messages: list[MessageDict] = [ - {"role": "system", "content": "You are a helpful AI assistant."}, - {"role": "user", "content": "Hello! Can you tell me about vLLM?"}, - ] - try: - prompt = llm.build_vllm_kv_cache(system_messages) - print(f"✓ KV cache built successfully for prompt: '{prompt[:100]}...'") - except Exception as e: - print(f"✗ Failed to build KV cache: {e}") - - # Test with different messages for generation - print("\nGenerating response...") - user_messages: list[MessageDict] = [ - {"role": "system", "content": "You are a helpful AI assistant. Please Introduce yourself "}, - {"role": "user", "content": "What are the benefits of using vLLM?"}, - ] - try: - response = llm.generate(user_messages) - print(f"Response: {response}") - except Exception as e: - print(f"Error generating response: {e}") - - -if __name__ == "__main__": - main() diff --git a/examples/mem_reader/README.md b/examples/mem_reader/README.md new file mode 100644 index 000000000..3677d0502 --- /dev/null +++ b/examples/mem_reader/README.md @@ -0,0 +1,90 @@ +# MemReader Examples + +This directory contains examples and sample code demonstrating how to use the `MemReader` module in MemOS. `MemReader` is responsible for parsing various types of input data (text, chat history, files, images) into structured memory formats. + +## 📂 Directory Structure + +```text +examples/mem_reader/ +├── builders.py # Factory functions to initialize Reader components +├── parser_demos/ # Demos for individual parser components +│ ├── demo_image.py # Example: Parsing image content +│ ├── demo_string.py # Example: Parsing string content +│ └── ... # Other specific parser demos +├── runners/ # Main execution scripts for running examples +│ ├── run_simple.py # Runner for SimpleStructMemReader +│ └── run_multimodal.py# Runner for MultiModalStructMemReader +├── samples.py # Sample data (chat logs, test cases) +├── settings.py # Configuration management (loads from .env) +└── utils.py # Utility functions (printing, formatting) +``` + +## 🚀 Getting Started + +### 1. Configuration + +Before running the examples, ensure you have configured your environment variables. Copy the `.env.example` file in the project root to `.env` and fill in the necessary API keys. + +The `settings.py` file loads these configurations. Key variables include: +- `OPENAI_API_KEY`: For LLM and Embeddings. +- `MOS_CHAT_MODEL`: Default model for chat (e.g., `gpt-4o`). +- `MOS_EMBEDDER_MODEL`: Model for embeddings. + +### 2. Running Examples + +We provide two main runners to demonstrate different capabilities: + +#### A. Simple Reader (`run_simple.py`) + +Demonstrates the `SimpleStructMemReader`, which is optimized for text-based chat history and documents. + +**Features:** +- **Fast Mode**: Quick parsing without LLM (regex/rule-based). +- **Fine Mode**: Detailed parsing using LLM. +- **Transfer**: Converting Fast memories to Fine memories. +- **Document Parsing**: Reading text files. + +**Usage:** +```bash +python -m examples.mem_reader.runners.run_simple +``` + +#### B. Multimodal Reader (`run_multimodal.py`) + +Demonstrates the `MultiModalStructMemReader`, which handles complex inputs like images, files, and mixed content types. + +**Features:** +- Supports **String**, **Multimodal**, and **Raw** input types. +- Configurable output format (Text/JSON). +- Selectable test cases. + +**Usage:** +```bash +# Run all examples in 'fine' mode +python -m examples.mem_reader.runners.run_multimodal --example all --mode fine + +# Run specific example (e.g., multimodal inputs) +python -m examples.mem_reader.runners.run_multimodal --example multimodal + +# View help for more options +python -m examples.mem_reader.runners.run_multimodal --help +``` + +### 3. Parser Demos + +If you want to understand how specific parsers work internally (e.g., how the system parses a User message vs. an Assistant message), check the `parser_demos/` directory. + +**Usage:** +```bash +python -m examples.mem_reader.parser_demos.demo_user +python -m examples.mem_reader.parser_demos.demo_image +``` + +## 🧩 Key Components + +- **`SimpleStructMemReader`**: Best for standard text-based chat applications. It's lightweight and efficient. +- **`MultiModalStructMemReader`**: Designed for advanced agents that handle images, file attachments, and complex tool interactions. + +## 🛠️ Customization + +You can modify `settings.py` or `builders.py` to change the underlying LLM backend (e.g., switching from OpenAI to Ollama) or adjust chunking strategies. diff --git a/examples/mem_reader/builders.py b/examples/mem_reader/builders.py new file mode 100644 index 000000000..89c39a5de --- /dev/null +++ b/examples/mem_reader/builders.py @@ -0,0 +1,78 @@ +"""Builder functions for initializing MemReader components. + +This module provides factory functions to create configured instances of +LLMs, Embedders, and MemReaders, simplifying the setup process in examples. +""" + +from typing import Any + +from memos.configs.embedder import EmbedderConfigFactory +from memos.configs.llm import LLMConfigFactory +from memos.configs.mem_reader import ( + MultiModalStructMemReaderConfig, + SimpleStructMemReaderConfig, +) +from memos.configs.parser import ParserConfigFactory +from memos.embedders.factory import EmbedderFactory +from memos.llms.factory import LLMFactory +from memos.mem_reader.multi_modal_struct import MultiModalStructMemReader +from memos.mem_reader.simple_struct import SimpleStructMemReader +from memos.parsers.factory import ParserFactory + +from .settings import get_embedder_config, get_llm_config, get_reader_config + + +def build_llm_and_embedder() -> tuple[Any, Any]: + """Initialize and return configured LLM and Embedder instances.""" + llm_config_dict = get_llm_config() + embedder_config_dict = get_embedder_config() + + llm_config = LLMConfigFactory.model_validate(llm_config_dict) + embedder_config = EmbedderConfigFactory.model_validate(embedder_config_dict) + + llm = LLMFactory.from_config(llm_config) + embedder = EmbedderFactory.from_config(embedder_config) + + return embedder, llm + + +def build_file_parser() -> Any: + """Initialize and return a configured file parser (MarkItDown). + + Returns: + Configured parser instance or None if initialization fails. + """ + try: + parser_config = ParserConfigFactory.model_validate( + { + "backend": "markitdown", + "config": {}, + } + ) + return ParserFactory.from_config(parser_config) + except Exception as e: + print(f"⚠️ Warning: Could not initialize file parser: {e}") + return None + + +def build_simple_reader() -> SimpleStructMemReader: + """Initialize and return a configured SimpleStructMemReader. + + Returns: + Configured SimpleStructMemReader instance. + """ + config_dict = get_reader_config() + # Simple reader doesn't need file parser + config = SimpleStructMemReaderConfig(**config_dict) + return SimpleStructMemReader(config) + + +def build_multimodal_reader() -> MultiModalStructMemReader: + """Initialize and return a configured MultiModalStructMemReader. + + Returns: + Configured MultiModalStructMemReader instance. + """ + config_dict = get_reader_config() + config = MultiModalStructMemReaderConfig(**config_dict) + return MultiModalStructMemReader(config) diff --git a/examples/mem_reader/compare_simple_vs_multimodal.py b/examples/mem_reader/compare_simple_vs_multimodal.py deleted file mode 100644 index fa12ac211..000000000 --- a/examples/mem_reader/compare_simple_vs_multimodal.py +++ /dev/null @@ -1,461 +0,0 @@ -"""Compare SimpleStructMemReader and MultiModalStructMemReader outputs. - -This example demonstrates the differences between simple_struct and multi_modal_struct -in both fast and fine modes. -""" - -import os -import sys - -from pathlib import Path - -from dotenv import load_dotenv - -from memos.configs.mem_reader import ( - MultiModalStructMemReaderConfig, - SimpleStructMemReaderConfig, -) -from memos.memories.textual.item import TextualMemoryItem - - -# Add src directory to path -project_root = Path(__file__).parent.parent.parent -src_path = project_root / "src" -if str(src_path) not in sys.path: - sys.path.insert(0, str(src_path)) - -# Load environment variables -load_dotenv() - - -def get_reader_config() -> dict: - """Get reader configuration from environment variables.""" - openai_api_key = os.getenv("OPENAI_API_KEY") - openai_base_url = os.getenv("OPENAI_API_BASE", "https://api.openai.com/v1") - ollama_api_base = os.getenv("OLLAMA_API_BASE", "http://localhost:11434") - - # LLM config - llm_backend = os.getenv("MEM_READER_LLM_BACKEND", "openai") - if llm_backend == "ollama": - llm_config = { - "backend": "ollama", - "config": { - "model_name_or_path": os.getenv("MEM_READER_LLM_MODEL", "qwen3:0.6b"), - "api_base": ollama_api_base, - "temperature": float(os.getenv("MEM_READER_LLM_TEMPERATURE", "0.0")), - "remove_think_prefix": os.getenv( - "MEM_READER_LLM_REMOVE_THINK_PREFIX", "true" - ).lower() - == "true", - "max_tokens": int(os.getenv("MEM_READER_LLM_MAX_TOKENS", "8192")), - }, - } - else: # openai - llm_config = { - "backend": "openai", - "config": { - "model_name_or_path": os.getenv("MEM_READER_LLM_MODEL", "gpt-4o-mini"), - "api_key": openai_api_key or os.getenv("MEMRADER_API_KEY", "EMPTY"), - "api_base": openai_base_url, - "temperature": float(os.getenv("MEM_READER_LLM_TEMPERATURE", "0.5")), - "remove_think_prefix": os.getenv( - "MEM_READER_LLM_REMOVE_THINK_PREFIX", "true" - ).lower() - == "true", - "max_tokens": int(os.getenv("MEM_READER_LLM_MAX_TOKENS", "8192")), - }, - } - - # Embedder config - embedder_backend = os.getenv( - "MEM_READER_EMBEDDER_BACKEND", os.getenv("MOS_EMBEDDER_BACKEND", "ollama") - ) - if embedder_backend == "universal_api": - embedder_config = { - "backend": "universal_api", - "config": { - "provider": os.getenv( - "MEM_READER_EMBEDDER_PROVIDER", - os.getenv("MOS_EMBEDDER_PROVIDER", "openai"), - ), - "api_key": os.getenv( - "MEM_READER_EMBEDDER_API_KEY", - os.getenv("MOS_EMBEDDER_API_KEY", openai_api_key or "sk-xxxx"), - ), - "model_name_or_path": os.getenv( - "MEM_READER_EMBEDDER_MODEL", - os.getenv("MOS_EMBEDDER_MODEL", "text-embedding-3-large"), - ), - "base_url": os.getenv( - "MEM_READER_EMBEDDER_API_BASE", - os.getenv("MOS_EMBEDDER_API_BASE", openai_base_url), - ), - }, - } - else: # ollama - embedder_config = { - "backend": "ollama", - "config": { - "model_name_or_path": os.getenv( - "MEM_READER_EMBEDDER_MODEL", - os.getenv("MOS_EMBEDDER_MODEL", "nomic-embed-text:latest"), - ), - "api_base": ollama_api_base, - }, - } - - return { - "llm": llm_config, - "embedder": embedder_config, - "chunker": { - "backend": "sentence", - "config": { - "tokenizer_or_token_counter": "gpt2", - "chunk_size": 512, - "chunk_overlap": 128, - "min_sentences_per_chunk": 1, - }, - }, - } - - -def print_memory_item(item: TextualMemoryItem, prefix: str = "", max_length: int = 500): - """Print a memory item in a readable format.""" - print(f"{prefix}Memory ID: {item.id}") - print(f"{prefix}Memory Type: {item.metadata.memory_type}") - print(f"{prefix}Tags: {item.metadata.tags}") - memory_preview = ( - item.memory[:max_length] + "..." if len(item.memory) > max_length else item.memory - ) - print(f"{prefix}Memory: {memory_preview}") - print(f"{prefix}Key: {item.metadata.key}") - if item.metadata.background: - bg_preview = ( - item.metadata.background[:max_length] + "..." - if len(item.metadata.background) > max_length - else item.metadata.background - ) - print(f"{prefix}Background: {bg_preview}") - print(f"{prefix}Sources count: {len(item.metadata.sources) if item.metadata.sources else 0}") - print() - - -def compare_readers(): - """Compare SimpleStructMemReader and MultiModalStructMemReader.""" - print("=" * 80) - print("Comparing SimpleStructMemReader vs MultiModalStructMemReader") - print("=" * 80) - print() - - # Test data - simple chat messages - scene_data = [ - [ - {"role": "user", "chat_time": "3 May 2025", "content": "I'm feeling a bit down today."}, - { - "role": "assistant", - "chat_time": "3 May 2025", - "content": "I'm sorry to hear that. Do you want to talk about what's been going on?", - }, - { - "role": "user", - "chat_time": "3 May 2025", - "content": "It's just been a tough couple of days, you know? Everything feels a bit overwhelming, and I just can't seem to shake it off.", - }, - { - "role": "assistant", - "chat_time": "3 May 2025", - "content": "It sounds like you're going through a lot right now. Sometimes it helps to talk things out. Is there something specific that's been weighing on you, or is it more of a general feeling?", - }, - { - "role": "user", - "chat_time": "3 May 2025", - "content": "It's a mix, I guess. Work's been really stressful, and on top of that, I've been feeling kinda disconnected from the people around me.", - }, - { - "role": "assistant", - "chat_time": "3 May 2025", - "content": "That can be really draining, especially when you're feeling isolated on top of the stress. Do you think there's something from your past that's contributing to how you're feeling now? Sometimes our emotions are tied to older experiences.", - }, - { - "role": "user", - "chat_time": "3 May 2025", - "content": "Yeah, now that you mention it… I've been thinking a lot about my childhood lately. I didn't have the easiest upbringing. My parents were always busy, and I often felt like I had to figure things out on my own. I remember one winter, I got really sick with a high fever. My parents were both working, and I didn't want to bother them. I just lay in bed, crying by myself, not knowing what to do.", - }, - { - "role": "assistant", - "chat_time": "3 May 2025", - "content": "That must have been very lonely. Growing up without that support can really make you feel like you have to handle everything on your own. How do you think that experience has affected you now?", - }, - { - "role": "user", - "chat_time": "3 May 2025", - "content": "Yeah, it was really lonely. I remember spending a lot of time doing things on my own, like drawing, because it was my only way to escape. I used to draw animals, and I created my own little world on paper. It was peaceful in a way, just me and my imagination, with no outside distractions.", - }, - { - "role": "assistant", - "chat_time": "3 May 2025", - "content": "Drawing sounds like a beautiful way to express yourself and find some peace in that solitude. It must have been a very quiet and private world for you. Do you think that sense of loneliness from back then has shaped how you connect with others today?", - }, - { - "role": "user", - "chat_time": "3 May 2025", - "content": "I think it has. I've always found it hard to let others in. I feel like I have to handle everything on my own. If I rely on someone else, I feel vulnerable, and I start to feel that loneliness again.", - }, - { - "role": "assistant", - "chat_time": "3 May 2025", - "content": "It sounds like this was a protective mechanism that helped you cope with your feelings of loneliness back then. But as you've grown, it seems like this pattern might not be serving you as well anymore. Have you thought about trying to break this cycle, or finding new ways to connect with others?", - }, - { - "role": "user", - "chat_time": "3 May 2025", - "content": "I've thought about it, though it's hard. But I also know I can't keep going like this forever. Maybe it's time to seek some help.", - }, - { - "role": "assistant", - "chat_time": "3 May 2025", - "content": "It's a big step, but a positive one. Exploring your emotions and past experiences could really help you understand how they've shaped your present. Therapy could be a great place to start, as it would help you connect with both yourself and others. Would you be open to trying it?", - }, - { - "role": "user", - "chat_time": "3 May 2025", - "content": "I'm open to trying. Thank you for encouraging me. It feels like it might be a good starting point for me.", - }, - { - "role": "assistant", - "chat_time": "3 May 2025", - "content": "I'm really glad to hear that. Taking that first step is always difficult, but you've made an important decision. If you need help finding the right therapist, or just want to talk, I'm here for you.", - }, - ], - [ - { - "role": "user", - "chat_time": "3 May 2025", - "content": "This week I've been feeling a bit off, especially when playing tennis. My body just doesn't feel right.", - }, - { - "role": "assistant", - "chat_time": "3 May 2025", - "content": "It sounds like you've been having some physical discomfort lately. Where specifically are you feeling unwell? Is there something particular about tennis that's been difficult?", - }, - { - "role": "user", - "chat_time": "3 May 2025", - "content": "I just feel a bit fatigued, and every time I play tennis, my movements feel off. I used to play pretty well, but lately, I've been feeling weak and my movements aren't as coordinated as they used to be.", - }, - { - "role": "assistant", - "chat_time": "3 May 2025", - "content": "Physical discomfort can definitely affect performance, especially in a sport like tennis that requires quick reactions and coordination. Have you noticed anything specific that might be causing these changes? Could it be overtraining, or is there another physical issue making you feel off?", - }, - { - "role": "user", - "chat_time": "3 May 2025", - "content": "I think it might be due to stress and lack of sleep recently. I just feel drained all the time. Plus, I've been frustrated with my tennis performance, which makes me feel even worse, like I'm stuck in a vicious cycle.", - }, - { - "role": "assistant", - "chat_time": "3 May 2025", - "content": "Stress and poor sleep quality can definitely affect your physical performance, especially in sports. When you're also feeling down about your performance, it's easy to fall into a negative cycle. Have you tried anything to relieve some of the stress, or to improve your sleep?", - }, - { - "role": "user", - "chat_time": "3 May 2025", - "content": "I've tried some relaxation techniques, like deep breathing and simple meditation, but it doesn't seem to help much. I still can't focus during tennis, and I feel like my mind is distracted by other things.", - }, - { - "role": "assistant", - "chat_time": "3 May 2025", - "content": "Relaxation techniques like deep breathing and meditation can help reduce stress, but sometimes we need a more structured approach to improve both our physical and mental state. For example, you might want to focus on specific aspects of your tennis training, or adjust your rest and recovery time. Have you thought about setting smaller goals for yourself? Like focusing on a specific tennis move each day, rather than expecting perfection right away? That might help you gradually regain confidence.", - }, - { - "role": "user", - "chat_time": "3 May 2025", - "content": "That's a good idea. I think I've been expecting too much from myself, which just makes me more anxious. Maybe I can start with small practice sessions and gradually find my rhythm again.", - }, - { - "role": "assistant", - "chat_time": "3 May 2025", - "content": "Exactly! Setting small and clear goals will help reduce pressure and give you a sense of accomplishment. And don't forget that rest is just as important—taking time to recover will help you feel better physically and mentally. Both physical and mental recovery are key to improving your performance and health.", - }, - { - "role": "user", - "chat_time": "3 May 2025", - "content": "Thanks, I feel like I need to rethink a few things. Maybe I really need to be kinder to myself and take things step by step.", - }, - { - "role": "assistant", - "chat_time": "3 May 2025", - "content": "You're on the right track. Rethinking things and giving yourself space and patience is so important. You've already taken the first step, now just keep moving forward, one step at a time. If you need anything, I'm always here to help.", - }, - ], - [ - { - "role": "user", - "chat_time": "3 May 2025", - "content": "Yesterday at 3 PM I had a meeting with my team about the new project. We discussed the timeline and I'm concerned we won't meet the deadline of December 15th.", - }, - { - "role": "assistant", - "chat_time": "3 May 2025", - "content": "I understand your concern about the project timeline. What specific challenges do you think might prevent your team from meeting the December 15th deadline?", - }, - { - "role": "user", - "chat_time": "3 May 2025", - "content": "We have too many requirements to implement in just two weeks. The backend team needs at least until December 10th to finish their part, which leaves almost no time for testing before the deadline.", - }, - { - "role": "assistant", - "chat_time": "3 May 2025", - "content": "That's definitely a tight timeline. Have you considered prioritizing the requirements to focus on the most critical features first? You might also want to discuss with stakeholders about possibly extending the deadline or reducing the scope for this initial release.", - }, - { - "role": "user", - "chat_time": "3 May 2025", - "content": "That's a good point. We have a follow-up meeting scheduled for tomorrow morning at 9:30 AM. I'll suggest we prioritize features and maybe propose pushing the deadline to January 5th next year.", - }, - { - "role": "assistant", - "chat_time": "3 May 2025", - "content": "That sounds like a solid approach. Coming to the meeting with both a prioritization strategy and a specific alternative deadline shows you've thought through the problem carefully. It might also help to bring some data about similar past projects and how long they took to complete, if you have that information available.", - }, - ], - ] - - info = {"user_id": "test_user", "session_id": "test_session"} - - # Get config - config_dict = get_reader_config() - - # Initialize readers - print("Initializing readers...") - simple_config = SimpleStructMemReaderConfig(**config_dict) - multimodal_config = MultiModalStructMemReaderConfig(**config_dict) - - from memos.mem_reader.multi_modal_struct import MultiModalStructMemReader - from memos.mem_reader.simple_struct import SimpleStructMemReader - - simple_reader = SimpleStructMemReader(simple_config) - multimodal_reader = MultiModalStructMemReader(multimodal_config) - print("Readers initialized.\n") - print(f"Using LLM: {config_dict['llm']['backend']}") - print(f"Using Embedder: {config_dict['embedder']['backend']}") - print() - - # Test FAST mode - print("=" * 80) - print("FAST MODE COMPARISON") - print("=" * 80) - print() - - print("-" * 80) - print("SimpleStructMemReader (FAST):") - print("-" * 80) - try: - simple_fast = simple_reader.get_memory(scene_data, "chat", info, mode="fast") - if simple_fast and len(simple_fast) > 0: - for scene_idx, scene_memories in enumerate(simple_fast): - print(f"\nScene {scene_idx + 1}:") - for item_idx, item in enumerate(scene_memories): - print_memory_item(item, prefix=f" [{item_idx + 1}] ") - else: - print(" No memories generated.") - except Exception as e: - print(f" Error: {e}") - import traceback - - traceback.print_exc() - - print("\n" + "-" * 80) - print("MultiModalStructMemReader (FAST):") - print("-" * 80) - try: - multimodal_fast = multimodal_reader.get_memory(scene_data, "chat", info, mode="fast") - if multimodal_fast and len(multimodal_fast) > 0: - for scene_idx, scene_memories in enumerate(multimodal_fast): - print(f"\nScene {scene_idx + 1}:") - for item_idx, item in enumerate(scene_memories): - print_memory_item(item, prefix=f" [{item_idx + 1}] ") - else: - print(" No memories generated.") - except Exception as e: - print(f" Error: {e}") - import traceback - - traceback.print_exc() - - # Test FINE mode - print("\n" + "=" * 80) - print("FINE MODE COMPARISON") - print("=" * 80) - print() - - print("-" * 80) - print("SimpleStructMemReader (FINE):") - print("-" * 80) - try: - simple_fine = simple_reader.get_memory(scene_data, "chat", info, mode="fine") - if simple_fine and len(simple_fine) > 0: - for scene_idx, scene_memories in enumerate(simple_fine): - print(f"\nScene {scene_idx + 1}:") - for item_idx, item in enumerate(scene_memories): - print_memory_item(item, prefix=f" [{item_idx + 1}] ") - else: - print(" No memories generated.") - except Exception as e: - print(f" Error: {e}") - import traceback - - traceback.print_exc() - - print("\n" + "-" * 80) - print("MultiModalStructMemReader (FINE):") - print("-" * 80) - try: - multimodal_fine = multimodal_reader.get_memory(scene_data, "chat", info, mode="fine") - if multimodal_fine and len(multimodal_fine) > 0: - for scene_idx, scene_memories in enumerate(multimodal_fine): - print(f"\nScene {scene_idx + 1}:") - for item_idx, item in enumerate(scene_memories): - print_memory_item(item, prefix=f" [{item_idx + 1}] ") - else: - print(" No memories generated.") - except Exception as e: - print(f" Error: {e}") - import traceback - - traceback.print_exc() - - # Summary comparison - print("\n" + "=" * 80) - print("SUMMARY") - print("=" * 80) - print() - - def count_memories(memories_list): - """Count total memories across all scenes.""" - if not memories_list: - return 0 - return sum(len(scene) for scene in memories_list if scene) - - simple_fast_count = count_memories(simple_fast) if "simple_fast" in locals() else 0 - multimodal_fast_count = count_memories(multimodal_fast) if "multimodal_fast" in locals() else 0 - simple_fine_count = count_memories(simple_fine) if "simple_fine" in locals() else 0 - multimodal_fine_count = count_memories(multimodal_fine) if "multimodal_fine" in locals() else 0 - - print(f"SimpleStructMemReader FAST: {simple_fast_count} memories") - print(f"MultiModalStructMemReader FAST: {multimodal_fast_count} memories") - print(f"SimpleStructMemReader FINE: {simple_fine_count} memories") - print(f"MultiModalStructMemReader FINE: {multimodal_fine_count} memories") - print() - - print("Key Differences:") - print("1. Both readers should produce similar results for simple text messages") - print("2. MultiModalStructMemReader can handle multimodal content (images, files, etc.)") - print("3. FINE mode uses LLM to extract structured memories from aggregated windows") - print("4. FAST mode directly aggregates messages into windows without LLM processing") - - -if __name__ == "__main__": - compare_readers() diff --git a/examples/mem_reader/multimodal_struct_reader.py b/examples/mem_reader/multimodal_struct_reader.py deleted file mode 100644 index 790b13f85..000000000 --- a/examples/mem_reader/multimodal_struct_reader.py +++ /dev/null @@ -1,1042 +0,0 @@ -#!/usr/bin/env python3 -""" -MultiModalStructMemReader Example Script - -This script demonstrates various use cases for MultiModalStructMemReader, -including different message types, modes (fast/fine), and output formats. - -Usage: - python multimodal_struct_reader.py --example all - python multimodal_struct_reader.py --example string_message --mode fast - python multimodal_struct_reader.py --example multimodal --format json -""" - -import argparse -import json -import os -import sys -import time - -from pathlib import Path -from typing import Any - -from dotenv import load_dotenv - -from memos.configs.mem_reader import MultiModalStructMemReaderConfig -from memos.mem_reader.multi_modal_struct import MultiModalStructMemReader -from memos.memories.textual.item import TextualMemoryItem - - -# Add src directory to path -src_path = Path(__file__).parent.parent.parent / "src" -sys.path.insert(0, str(src_path)) - -# Load environment variables -load_dotenv() - - -# ============================================================================ -# Test Case Definitions -# ============================================================================ - - -class TestCase: - """Base class for test cases.""" - - def __init__( - self, - name: str, - description: str, - scene_data: Any, - expected_count: dict[str, int] | None = None, - ): - """ - Initialize a test case. - - Args: - name: Test case name - description: Test case description - scene_data: Scene data to test - expected_count: Expected memory count for each mode (optional) - """ - self.name = name - self.description = description - self.scene_data = scene_data - self.expected_count = expected_count or {} - - def get_info(self) -> dict[str, Any]: - """Get info dict for this test case.""" - return { - "user_id": "test_user", - "session_id": f"session_{self.name}", - "test_case": self.name, - } - - -# String message test cases -STRING_MESSAGE_CASES = [ - TestCase( - name="string_simple", - description="Simple string message", - scene_data=["今天心情不错,喝了咖啡。"], - expected_count={"fast": 1, "fine": 1}, # StringParser returns [] in - # fast mode - ), - TestCase( - name="string_multiple", - description="Multiple string messages", - scene_data=[ - "这是第一条消息。", - "这是第二条消息。", - "这是第三条消息。", - ], - ), -] - -# Standard chat message test cases -CHAT_MESSAGE_CASES = [ - TestCase( - name="chat_simple", - description="Simple chat conversation", - scene_data=[ - [ - { - "role": "user", - "content": "Hello, how are you?", - "chat_time": "2025-01-01T10:00:00Z", - }, - { - "role": "assistant", - "content": "I'm doing well, thank you!", - "chat_time": "2025-01-01T10:00:01Z", - }, - ] - ], - ), - TestCase( - name="chat_with_system", - description="Chat with system message", - scene_data=[ - [ - { - "role": "system", - "content": [{"type": "text", "text": "You are a helpful assistant."}], - "chat_time": "2025-01-01T10:00:00Z", - }, - { - "role": "user", - "content": "What's the weather?", - "chat_time": "2025-01-01T10:00:01Z", - }, - { - "role": "assistant", - "content": "I don't have access to weather data.", - "chat_time": "2025-01-01T10:00:02Z", - }, - ] - ], - ), - TestCase( - name="chat_long_conversation", - description="Long conversation with multiple turns", - scene_data=[ - [ - { - "role": "user", - "chat_time": "3 May 2025", - "content": "I'm feeling a bit down today.", - }, - { - "role": "assistant", - "chat_time": "3 May 2025", - "content": "I'm sorry to hear that. Do you want to talk about what's been going on?", - }, - { - "role": "user", - "chat_time": "3 May 2025", - "content": "It's just been a tough couple of days.", - }, - { - "role": "assistant", - "chat_time": "3 May 2025", - "content": "It sounds like you're going through a lot right now.", - }, - ] - ], - ), - TestCase( - name="chat_with_list_content", - description="", - scene_data=[ - [ - { - "role": "user", - "content": [ - { - "type": "text", - "text": "我是测试base64", - }, - { - "type": "file", - "file": { - "file_data": "Hello World", - "filename": "2102b64c-25a2-481c-a940-4325496baf39.txt", - "file_id": "90ee1bcf-5295-4b75-91a4-23fe1f7ab30a", - }, - }, - { - "type": "image_url", - "image_url": { - "url": "https://play-groud-test-1.oss-cn-shanghai.aliyuncs.com/algorithmImages/2025/12/01/ce545319ba6d4d21a0aebcb75337acc3.jpeg" - }, - }, - ], - "message_id": "1995458892790317057", - } - ] - ], - ), -] - -# Tool-related test cases -TOOL_MESSAGE_CASES = [ - TestCase( - name="tool_assistant_with_calls", - description="Assistant message with tool_calls", - scene_data=[ - [ - { - "role": "assistant", - "content": None, - "tool_calls": [ - { - "id": "tool-call-weather-1", - "type": "function", - "function": { - "name": "get_weather", - "arguments": '{"location": "北京"}', - }, - } - ], - "chat_time": "2025-11-24T10:12:00Z", - "message_id": "assistant-with-call-1", - } - ] - ], - ), - TestCase( - name="tool_with_result", - description="Tool call with result message", - scene_data=[ - [ - { - "role": "assistant", - "content": None, - "tool_calls": [ - { - "id": "tool-call-weather-1", - "type": "function", - "function": { - "name": "get_weather", - "arguments": '{"location": "北京"}', - }, - } - ], - "chat_time": "2025-11-24T10:12:00Z", - }, - { - "role": "tool", - "content": "北京今天天气晴朗,温度25°C,湿度60%。", - "tool_call_id": "tool-call-weather-1", - "chat_time": "2025-11-24T10:12:05Z", - }, - ] - ], - ), - TestCase( - name="tool_custom_format", - description="Custom tool format (tool_description, tool_input, tool_output)", - scene_data=[ - [ - { - "type": "tool_description", - "name": "get_weather", - "description": "获取指定地点的当前天气信息", - "parameters": { - "type": "object", - "properties": {"location": {"type": "string", "description": "城市名称"}}, - "required": ["location"], - }, - }, - { - "type": "tool_input", - "call_id": "call_123", - "name": "get_weather", - "argument": {"location": "北京"}, - }, - { - "type": "tool_output", - "call_id": "call_123", - "name": "get_weather", - "output": {"weather": "晴朗", "temperature": 25, "humidity": 60}, - }, - ] - ], - ), -] - -# Multimodal message test cases -MULTIMODAL_MESSAGE_CASES = [ - TestCase( - name="multimodal_text_image", - description="User message with text and image", - scene_data=[ - [ - { - "role": "user", - "content": [ - {"type": "text", "text": "帮我看看这张图片大概是什么内容?"}, - { - "type": "image_url", - "image_url": { - "url": "https://example.com/mountain_lake.jpg", - "detail": "high", - }, - }, - ], - "chat_time": "2025-11-24T10:20:00Z", - "message_id": "mm-img-1", - } - ] - ], - ), - TestCase( - name="multimodal_text_file", - description="User message with text and file", - scene_data=[ - [ - { - "role": "user", - "content": [ - {"type": "text", "text": "请阅读这个PDF,总结里面的要点。"}, - {"type": "file", "file": {"file_id": "file_123", "filename": "report.pdf"}}, - ], - "chat_time": "2025-11-24T10:21:00Z", - "message_id": "mm-file-1", - } - ] - ], - ), - TestCase( - name="oss_text_file", - description="User message with text and file", - scene_data=[ - [ - { - "role": "user", - "content": [ - {"type": "text", "text": "请阅读这个PDF,总结里面的要点。"}, - { - "type": "file", - "file": { - "file_id": "file_123", - "filename": "report.pdf", - "file_data": "@http://139.196.232.20:9090/graph-test/algorithm/2025_11_13/1763043889_1763043782_PM1%E8%BD%A6%E9%97%B4PMT%E9%9D%B4%E5%8E%8B%E8%BE%B9%E5%8E%8B%E5%8E%8B%E5%8A%9B%E6%97%A0%E6%B3%95%E5%BB%BA%E7%AB%8B%E6%95%85%E9%9A%9C%E6%8A%A5%E5%91%8A20240720.md", - }, - }, - ], - "chat_time": "2025-11-24T10:21:00Z", - "message_id": "mm-file-1", - } - ] - ], - ), - TestCase( - name="pure_data_file", - description="User message with text and file", - scene_data=[ - [ - { - "role": "user", - "content": [ - {"type": "text", "text": "请阅读这个PDF,总结里面的要点。"}, - { - "type": "file", - "file": { - "file_id": "file_123", - "filename": "report.pdf", - "file_data": "明文记忆是系统与用户对话、操作等交互中动态习得,以及外部提供的、可显式管理的结构化知识形态,通常以文档、提示模板、图结构或用户规则等形式存在。它具备编辑性、可共享性与治理友好性,适合存储需要频繁修改、可审计或多方协同使用的信息。 在 MemOS 中,明文记忆可用于动态生成推理上下文、个性化偏好注入、多代理协作共享等场景,成为连接人类输入与模型认知的关键桥梁。激活记忆是指模型在推理过程中产生的瞬时性认知状态,包括 KV cache、隐藏层激活、注意力权重等中间张量结构。它通常用于维持上下文连续性、对话一致性与行为风格控制。 MemOS 将激活记忆抽象为可调度资源,支持按需唤醒、延迟卸载与结构变换。例如,某些上下文状态可以被压缩为“半结构化记忆片段”用于未来复用,也可以在任务级别转化为参数化模块,支持短期记忆的长期化演进。这一机制为模型行为一致性、风格保持与状态持续性提供了基础。", - }, - }, - ], - "chat_time": "2025-11-24T10:21:00Z", - "message_id": "mm-file-1", - } - ] - ], - ), - TestCase( - name="local_data_file", - description="User message with text and file", - scene_data=[ - [ - { - "role": "user", - "content": [ - {"type": "text", "text": "请阅读这个PDF,总结里面的要点。"}, - { - "type": "file", - "file": { - "file_id": "file_123", - "filename": "report.pdf", - "file_data": "./my_local_file/report.pdf", - }, - }, - ], - "chat_time": "2025-11-24T10:21:00Z", - "message_id": "mm-file-1", - } - ] - ], - ), - TestCase( - name="internet_file", - description="User message with text and file", - scene_data=[ - [ - { - "role": "user", - "content": [ - {"type": "text", "text": "请阅读这个PDF,总结里面的要点。"}, - { - "type": "file", - "file": { - "file_id": "file_123", - "filename": "report.pdf", - "file_data": "https://upload.wikimedia.org/wikipedia/commons/c/cb/NLC416-16jh004830-88775_%E7%B4%85%E6%A8%93%E5%A4%A2.pdf", - }, - }, - ], - "chat_time": "2025-11-24T10:21:00Z", - "message_id": "mm-file-1", - } - ] - ], - ), - TestCase( - name="multimodal_mixed", - description="Mixed multimodal message (text + file + image)", - scene_data=[ - [ - { - "role": "user", - "content": [ - {"type": "text", "text": "请同时分析这个报告和图表。"}, - { - "type": "file", - "file": {"file_id": "file_789", "filename": "analysis_report.pdf"}, - }, - { - "type": "image_url", - "image_url": {"url": "https://example.com/chart.png", "detail": "auto"}, - }, - ], - "chat_time": "2025-11-24T10:23:00Z", - "message_id": "mixed-1", - } - ] - ], - ), - TestCase( - name="multimodal_audio", - description="Audio-only message", - scene_data=[ - [ - { - "role": "user", - "content": [ - { - "type": "input_audio", - "input_audio": {"data": "base64_encoded_audio_here", "format": "mp3"}, - } - ], - "chat_time": "2025-11-24T10:22:00Z", - "message_id": "audio-1", - } - ] - ], - ), -] - -# Raw input item test cases -RAW_INPUT_CASES = [ - TestCase( - name="raw_text_items", - description="Pure text input items without dialog context", - scene_data=[ - [ - {"type": "text", "text": "这是一段独立的文本输入,没有明确的对话上下文。"}, - {"type": "text", "text": "它依然会被抽取和写入明文记忆。"}, - ] - ], - ), - TestCase( - name="raw_file_item", - description="Pure file input by file_id", - scene_data=[ - [{"type": "file", "file": {"file_id": "file_uploaded_123", "filename": "document.pdf"}}] - ], - ), - # File parameter test cases - covering all combinations - TestCase( - name="file_only_file_id", - description="File with only file_id parameter", - scene_data=[[{"type": "file", "file": {"file_id": "file_only_id_123"}}]], - ), - TestCase( - name="file_only_filename", - description="File with only filename parameter", - scene_data=[[{"type": "file", "file": {"filename": "document_only.pdf"}}]], - ), - TestCase( - name="file_only_file_data_base64", - description="File with only file_data (base64 encoded)", - scene_data=[ - [ - { - "type": "file", - "file": { - "file_data": "data:application/pdf;base64,JVBERi0xLjQKJdPr6eEKMSAwIG9iago8PAovVHlwZSAvQ2F0YWxvZwovUGFnZXMgMiAwIFIKPj4KZW5kb2JqCjIgMCBvYmoKPDwKL1R5cGUgL1BhZ2VzCi9LaWRzIFszIDAgUl0KL0NvdW50IDEKPD4KZW5kb2JqCjMgMCBvYmoKPDwKL1R5cGUgL1BhZ2UKL1BhcmVudCAyIDAgUgovTWVkaWFCb3ggWzAgMCA2MTIgNzkyXQovUmVzb3VyY2VzIDw8Ci9Gb250IDw8Ci9GMSA0IDAgUgo+Pgo+PgovQ29udGVudHMgNSAwIFIKPj4KZW5kb2JqCjQgMCBvYmoKPDwKL1R5cGUgL0ZvbnQKL1N1YnR5cGUgL1R5cGUxCi9CYXNlRm9udCAvSGVsdmV0aWNhCj4+CmVuZG9iag==" - }, - } - ] - ], - ), - TestCase( - name="file_only_file_data_url", - description="File with only file_data (URL)", - scene_data=[ - [ - { - "type": "file", - "file": {"file_data": "https://example.com/documents/report.pdf"}, - } - ] - ], - ), - TestCase( - name="file_only_file_data_text", - description="File with only file_data (plain text content)", - scene_data=[ - [ - { - "type": "file", - "file": { - "file_data": "This is a plain text file content. It contains multiple lines.\nLine 2 of the file.\nLine 3 of the file." - }, - } - ] - ], - ), - TestCase( - name="file_file_data_and_file_id", - description="File with file_data and file_id", - scene_data=[ - [ - { - "type": "file", - "file": { - "file_data": "https://example.com/documents/data.pdf", - "file_id": "file_with_data_123", - }, - } - ] - ], - ), - TestCase( - name="file_file_data_and_filename", - description="File with file_data and filename", - scene_data=[ - [ - { - "type": "file", - "file": { - "file_data": "This is file content with filename.", - "filename": "content_with_name.txt", - }, - } - ] - ], - ), - TestCase( - name="file_file_id_and_filename", - description="File with file_id and filename (existing case)", - scene_data=[ - [{"type": "file", "file": {"file_id": "file_uploaded_123", "filename": "document.pdf"}}] - ], - ), - TestCase( - name="file_all_parameters", - description="File with all parameters (file_data, file_id, filename)", - scene_data=[ - [ - { - "type": "file", - "file": { - "file_data": "https://example.com/documents/complete.pdf", - "file_id": "file_complete_123", - "filename": "complete_document.pdf", - }, - } - ] - ], - ), - TestCase( - name="file_no_parameters", - description="File with no parameters (should return [File: unknown])", - scene_data=[[{"type": "file", "file": {}}]], - ), -] - -# Assistant message test cases -ASSISTANT_MESSAGE_CASES = [ - TestCase( - name="assistant_with_refusal", - description="Assistant message with refusal", - scene_data=[ - [ - { - "role": "assistant", - "content": [{"type": "text", "text": "I can help you with that."}], - "refusal": "I cannot provide information about that topic.", - "chat_time": "2025-11-24T10:30:00Z", - } - ] - ], - ), - TestCase( - name="assistant_with_audio", - description="Assistant message with audio", - scene_data=[ - [ - { - "role": "assistant", - "content": "Here's the audio response.", - "audio": {"id": "audio_response_123"}, - "chat_time": "2025-11-24T10:31:00Z", - } - ] - ], - ), -] - -# All test cases organized by category -TEST_CASES = { - "string": STRING_MESSAGE_CASES, - "chat": CHAT_MESSAGE_CASES, - "tool": TOOL_MESSAGE_CASES, - "multimodal": MULTIMODAL_MESSAGE_CASES, - "raw": RAW_INPUT_CASES, - "assistant": ASSISTANT_MESSAGE_CASES, -} - -# Flattened list of all test cases -ALL_TEST_CASES = {case.name: case for cases in TEST_CASES.values() for case in cases} - - -# ============================================================================ -# Utility Functions -# ============================================================================ - - -def print_textual_memory_item(item: TextualMemoryItem, prefix: str = "", max_length: int = 500): - """Print a memory item in a readable format.""" - print(f"{prefix}Memory ID: {item.id}") - print(f"{prefix}Memory Type: {item.metadata.memory_type}") - if item.metadata.tags: - print(f"{prefix}Tags: {item.metadata.tags}") - memory_preview = ( - item.memory[:max_length] + "..." if len(item.memory) > max_length else item.memory - ) - print(f"{prefix}Memory: {memory_preview}") - if item.metadata.key: - print(f"{prefix}Key: {item.metadata.key}") - if item.metadata.sources: - sources_count = len(item.metadata.sources) if isinstance(item.metadata.sources, list) else 1 - print(f"{prefix}Sources count: {sources_count}") - print() - - -def print_textual_memory_item_json(item: TextualMemoryItem, indent: int = 2): - """Print a memory item as formatted JSON.""" - data = item.to_dict() - if "metadata" in data and "embedding" in data["metadata"]: - embedding = data["metadata"]["embedding"] - if embedding: - data["metadata"]["embedding"] = f"[vector of {len(embedding)} dimensions]" - print(json.dumps(data, indent=indent, ensure_ascii=False)) - - -def get_reader_config() -> dict[str, Any]: - """ - Get reader configuration from environment variables. - - Returns: - Configuration dictionary for MultiModalStructMemReaderConfig - """ - openai_api_key = os.getenv("OPENAI_API_KEY") - openai_base_url = os.getenv("OPENAI_API_BASE", "https://api.openai.com/v1") - ollama_api_base = os.getenv("OLLAMA_API_BASE", "http://localhost:11434") - - # Get LLM backend and config - llm_backend = os.getenv("MEM_READER_LLM_BACKEND", "openai") - if llm_backend == "ollama": - llm_config = { - "backend": "ollama", - "config": { - "model_name_or_path": os.getenv("MEM_READER_LLM_MODEL", "qwen3:0.6b"), - "api_base": ollama_api_base, - "temperature": float(os.getenv("MEM_READER_LLM_TEMPERATURE", "0.0")), - "remove_think_prefix": os.getenv( - "MEM_READER_LLM_REMOVE_THINK_PREFIX", "true" - ).lower() - == "true", - "max_tokens": int(os.getenv("MEM_READER_LLM_MAX_TOKENS", "8192")), - }, - } - else: # openai - llm_config = { - "backend": "openai", - "config": { - "model_name_or_path": os.getenv("MEM_READER_LLM_MODEL", "gpt-4o-mini"), - "api_key": openai_api_key or os.getenv("MEMRADER_API_KEY", "EMPTY"), - "api_base": openai_base_url, - "temperature": float(os.getenv("MEM_READER_LLM_TEMPERATURE", "0.5")), - "remove_think_prefix": os.getenv( - "MEM_READER_LLM_REMOVE_THINK_PREFIX", "true" - ).lower() - == "true", - "max_tokens": int(os.getenv("MEM_READER_LLM_MAX_TOKENS", "8192")), - }, - } - - # Get embedder backend and config - embedder_backend = os.getenv( - "MEM_READER_EMBEDDER_BACKEND", os.getenv("MOS_EMBEDDER_BACKEND", "ollama") - ) - if embedder_backend == "universal_api": - embedder_config = { - "backend": "universal_api", - "config": { - "provider": os.getenv( - "MEM_READER_EMBEDDER_PROVIDER", os.getenv("MOS_EMBEDDER_PROVIDER", "openai") - ), - "api_key": os.getenv( - "MEM_READER_EMBEDDER_API_KEY", - os.getenv("MOS_EMBEDDER_API_KEY", openai_api_key or "sk-xxxx"), - ), - "model_name_or_path": os.getenv( - "MEM_READER_EMBEDDER_MODEL", - os.getenv("MOS_EMBEDDER_MODEL", "text-embedding-3-large"), - ), - "base_url": os.getenv( - "MEM_READER_EMBEDDER_API_BASE", - os.getenv("MOS_EMBEDDER_API_BASE", openai_base_url), - ), - }, - } - else: # ollama - embedder_config = { - "backend": "ollama", - "config": { - "model_name_or_path": os.getenv( - "MEM_READER_EMBEDDER_MODEL", - os.getenv("MOS_EMBEDDER_MODEL", "nomic-embed-text:latest"), - ), - "api_base": ollama_api_base, - }, - } - - # Get direct markdown hostnames from environment variable - direct_markdown_hostnames = None - env_hostnames = os.getenv("FILE_PARSER_DIRECT_MARKDOWN_HOSTNAMES", "139.196.232.20") - if env_hostnames: - direct_markdown_hostnames = [h.strip() for h in env_hostnames.split(",") if h.strip()] - - return { - "llm": llm_config, - "embedder": embedder_config, - "chunker": { - "backend": "sentence", - "config": { - "tokenizer_or_token_counter": "gpt2", - "chunk_size": 512, - "chunk_overlap": 128, - "min_sentences_per_chunk": 1, - }, - }, - "direct_markdown_hostnames": direct_markdown_hostnames, - } - - -def count_memories(memory_results: list[list[TextualMemoryItem]]) -> int: - """Count total number of memory items across all scenes.""" - return sum(len(mem_list) for mem_list in memory_results) - - -# ============================================================================ -# Main Functions -# ============================================================================ - - -def run_test_case( - test_case: TestCase, reader: MultiModalStructMemReader, mode: str = "fast", format: str = "text" -): - """ - Run a single test case. - - Args: - test_case: Test case to run - reader: MultiModalStructMemReader instance - mode: Processing mode ("fast" or "fine") - format: Output format ("text" or "json") - """ - print(f"\n{'=' * 80}") - print(f"Test Case: {test_case.name}") - print(f"Description: {test_case.description}") - print(f"Mode: {mode.upper()}") - print(f"{'=' * 80}\n") - - info = test_case.get_info() - start_time = time.time() - - try: - memory_results = reader.get_memory(test_case.scene_data, type="chat", info=info, mode=mode) - elapsed_time = time.time() - start_time - - total_count = count_memories(memory_results) - print(f"✅ Completed in {elapsed_time:.2f}s") - print(f"📊 Generated {total_count} memory items across {len(memory_results)} scenes\n") - - # Check expected count if provided - if test_case.expected_count and mode in test_case.expected_count: - expected = test_case.expected_count[mode] - if total_count == expected: - print(f"✅ Expected count matches: {expected}") - else: - print(f"⚠️ Expected {expected}, got {total_count}") - - # Print sample results - print("\nSample Results:") - print("-" * 80) - for scene_idx, mem_list in enumerate(memory_results[:3]): # Show first 3 scenes - if not mem_list: - continue - print(f"\nScene {scene_idx + 1}:") - for item_idx, item in enumerate(mem_list[:2]): # Show first 2 items per scene - print(f"\n [Item {item_idx + 1}]") - if format == "json": - print_textual_memory_item_json(item, indent=4) - else: - print_textual_memory_item(item, prefix=" ", max_length=300) - - except Exception as e: - print(f"❌ Error: {e}") - import traceback - - traceback.print_exc() - - -def run_all_test_cases(reader: MultiModalStructMemReader, mode: str = "fast", format: str = "text"): - """Run all test cases.""" - print(f"\n{'=' * 80}") - print(f"Running All Test Cases (Mode: {mode.upper()})") - print(f"{'=' * 80}\n") - - total_cases = len(ALL_TEST_CASES) - for idx, (name, test_case) in enumerate(ALL_TEST_CASES.items(), 1): - print(f"\n[{idx}/{total_cases}] Running: {name}") - run_test_case(test_case, reader, mode=mode, format=format) - - -def run_category( - category: str, reader: MultiModalStructMemReader, mode: str = "fast", format: str = "text" -): - """Run all test cases in a category.""" - if category not in TEST_CASES: - print(f"❌ Unknown category: {category}") - print(f"Available categories: {', '.join(TEST_CASES.keys())}") - return - - cases = TEST_CASES[category] - print(f"\n{'=' * 80}") - print(f"Running Category: {category.upper()} ({len(cases)} test cases)") - print(f"Mode: {mode.upper()}") - print(f"{'=' * 80}\n") - - for idx, test_case in enumerate(cases, 1): - print(f"\n[{idx}/{len(cases)}] {test_case.name}") - run_test_case(test_case, reader, mode=mode, format=format) - - -def compare_modes(test_case: TestCase, reader: MultiModalStructMemReader, format: str = "text"): - """Compare fast and fine modes for a test case.""" - print(f"\n{'=' * 80}") - print(f"Comparing Fast vs Fine Mode: {test_case.name}") - print(f"{'=' * 80}\n") - - info = test_case.get_info() - - # Fast mode - print("⚡ FAST Mode:") - print("-" * 80) - start_time = time.time() - fast_results = reader.get_memory(test_case.scene_data, type="chat", info=info, mode="fast") - fast_time = time.time() - start_time - fast_count = count_memories(fast_results) - print(f"Time: {fast_time:.2f}s, Items: {fast_count}") - - # Fine mode - print("\n🔄 FINE Mode:") - print("-" * 80) - start_time = time.time() - fine_results = reader.get_memory(test_case.scene_data, type="chat", info=info, mode="fine") - fine_time = time.time() - start_time - fine_count = count_memories(fine_results) - print(f"Time: {fine_time:.2f}s, Items: {fine_count}") - - # Comparison - print("\n📈 Comparison:") - print(f" Fast: {fast_time:.2f}s, {fast_count} items") - print(f" Fine: {fine_time:.2f}s, {fine_count} items") - if fast_time > 0: - print(f" Speed: {fine_time / fast_time:.1f}x difference") - - # Show samples - if format == "text": - print("\n--- Fast Mode Sample (first item) ---") - if fast_results and fast_results[0]: - print_textual_memory_item(fast_results[0][0], prefix=" ", max_length=300) - - print("\n--- Fine Mode Sample (first item) ---") - if fine_results and fine_results[0]: - print_textual_memory_item(fine_results[0][0], prefix=" ", max_length=300) - - -def list_test_cases(): - """List all available test cases.""" - print("\n" + "=" * 80) - print("Available Test Cases") - print("=" * 80 + "\n") - - for category, cases in TEST_CASES.items(): - print(f"📁 {category.upper()} ({len(cases)} cases):") - for case in cases: - print(f" • {case.name}: {case.description}") - print() - - -def main(): - """Main entry point.""" - parser = argparse.ArgumentParser( - description="Test MultiModalStructMemReader with various use cases", - formatter_class=argparse.RawDescriptionHelpFormatter, - epilog=""" -Examples: - # Run all test cases in fast mode - python multimodal_struct_reader.py --example all --mode fast - - # Run a specific test case - python multimodal_struct_reader.py --example chat_simple --mode fine - - # Run a category of test cases - python multimodal_struct_reader.py --example multimodal --mode fast - - # Compare fast vs fine mode - python multimodal_struct_reader.py --example chat_simple --compare - - # List all available test cases - python multimodal_struct_reader.py --list - - # Output in JSON format - python multimodal_struct_reader.py --example chat_simple --format json - """, - ) - - parser.add_argument( - "--example", - type=str, - default="oss_text_file", - help="Test case name, category name, or 'all' to run all cases (default: all)", - ) - parser.add_argument( - "--mode", - choices=["fast", "fine"], - default="fine", - help="Processing mode: fast (quick) or fine (with LLM) (default: fast)", - ) - parser.add_argument( - "--format", - choices=["text", "json"], - default="text", - help="Output format: text (readable) or json (structured) (default: text)", - ) - parser.add_argument( - "--compare", - action="store_true", - help="Compare fast and fine modes (only works with specific test case)", - ) - parser.add_argument( - "--list", - action="store_true", - help="List all available test cases and exit", - ) - parser.add_argument( - "--max-memory-length", - type=int, - default=500, - help="Maximum length of memory content to display (default: 500)", - ) - - args = parser.parse_args() - - # List test cases and exit - if args.list: - list_test_cases() - return - - # Initialize reader - print("Initializing MultiModalStructMemReader...") - try: - config_dict = get_reader_config() - reader_config = MultiModalStructMemReaderConfig.model_validate(config_dict) - reader = MultiModalStructMemReader(reader_config) - print("✅ Reader initialized\n") - except Exception as e: - print(f"❌ Failed to initialize reader: {e}") - import traceback - - traceback.print_exc() - return - - # Run test cases - if args.example == "all": - run_all_test_cases(reader, mode=args.mode, format=args.format) - elif args.example in ALL_TEST_CASES: - test_case = ALL_TEST_CASES[args.example] - if args.compare: - compare_modes(test_case, reader, format=args.format) - else: - run_test_case(test_case, reader, mode=args.mode, format=args.format) - elif args.example in TEST_CASES: - run_category(args.example, reader, mode=args.mode, format=args.format) - else: - print(f"❌ Unknown test case or category: {args.example}") - print("\nAvailable options:") - print(" Categories:", ", ".join(TEST_CASES.keys())) - print(" Test cases:", ", ".join(ALL_TEST_CASES.keys())) - print("\nUse --list to see all available test cases") - - -if __name__ == "__main__": - main() diff --git a/examples/mem_reader/parser/__init__.py b/examples/mem_reader/parser/__init__.py deleted file mode 100644 index 3a947ae89..000000000 --- a/examples/mem_reader/parser/__init__.py +++ /dev/null @@ -1 +0,0 @@ -"""Parser examples for different message types.""" diff --git a/examples/mem_reader/parser/config_utils.py b/examples/mem_reader/parser/config_utils.py deleted file mode 100644 index 225b8b5b4..000000000 --- a/examples/mem_reader/parser/config_utils.py +++ /dev/null @@ -1,132 +0,0 @@ -"""Shared configuration utilities for parser examples. - -This module provides configuration functions that match the configuration -logic in examples/mem_reader/multimodal_struct_reader.py. -""" - -import os - -from typing import Any - -from memos.configs.embedder import EmbedderConfigFactory -from memos.configs.llm import LLMConfigFactory -from memos.embedders.factory import EmbedderFactory -from memos.llms.factory import LLMFactory - - -def get_reader_config() -> dict[str, Any]: - """ - Get reader configuration from environment variables. - - Returns a dictionary that can be used to create MultiModalStructMemReaderConfig. - Matches the configuration logic in examples/mem_reader/multimodal_struct_reader.py. - - Returns: - Configuration dictionary with llm, embedder, and chunker configs - """ - openai_api_key = os.getenv("OPENAI_API_KEY") - openai_base_url = os.getenv("OPENAI_API_BASE", "https://api.openai.com/v1") - ollama_api_base = os.getenv("OLLAMA_API_BASE", "http://localhost:11434") - - # Get LLM backend and config - llm_backend = os.getenv("MEM_READER_LLM_BACKEND", "openai") - if llm_backend == "ollama": - llm_config = { - "backend": "ollama", - "config": { - "model_name_or_path": os.getenv("MEM_READER_LLM_MODEL", "qwen3:0.6b"), - "api_base": ollama_api_base, - "temperature": float(os.getenv("MEM_READER_LLM_TEMPERATURE", "0.0")), - "remove_think_prefix": os.getenv( - "MEM_READER_LLM_REMOVE_THINK_PREFIX", "true" - ).lower() - == "true", - "max_tokens": int(os.getenv("MEM_READER_LLM_MAX_TOKENS", "8192")), - }, - } - else: # openai - llm_config = { - "backend": "openai", - "config": { - "model_name_or_path": os.getenv("MEM_READER_LLM_MODEL", "gpt-4o-mini"), - "api_key": openai_api_key or os.getenv("MEMRADER_API_KEY", "EMPTY"), - "api_base": openai_base_url, - "temperature": float(os.getenv("MEM_READER_LLM_TEMPERATURE", "0.5")), - "remove_think_prefix": os.getenv( - "MEM_READER_LLM_REMOVE_THINK_PREFIX", "true" - ).lower() - == "true", - "max_tokens": int(os.getenv("MEM_READER_LLM_MAX_TOKENS", "8192")), - }, - } - - # Get embedder backend and config - embedder_backend = os.getenv( - "MEM_READER_EMBEDDER_BACKEND", os.getenv("MOS_EMBEDDER_BACKEND", "ollama") - ) - if embedder_backend == "universal_api": - embedder_config = { - "backend": "universal_api", - "config": { - "provider": os.getenv( - "MEM_READER_EMBEDDER_PROVIDER", os.getenv("MOS_EMBEDDER_PROVIDER", "openai") - ), - "api_key": os.getenv( - "MEM_READER_EMBEDDER_API_KEY", - os.getenv("MOS_EMBEDDER_API_KEY", openai_api_key or "sk-xxxx"), - ), - "model_name_or_path": os.getenv( - "MEM_READER_EMBEDDER_MODEL", - os.getenv("MOS_EMBEDDER_MODEL", "text-embedding-3-large"), - ), - "base_url": os.getenv( - "MEM_READER_EMBEDDER_API_BASE", - os.getenv("MOS_EMBEDDER_API_BASE", openai_base_url), - ), - }, - } - else: # ollama - embedder_config = { - "backend": "ollama", - "config": { - "model_name_or_path": os.getenv( - "MEM_READER_EMBEDDER_MODEL", - os.getenv("MOS_EMBEDDER_MODEL", "nomic-embed-text:latest"), - ), - "api_base": ollama_api_base, - }, - } - - return { - "llm": llm_config, - "embedder": embedder_config, - "chunker": { - "backend": "sentence", - "config": { - "tokenizer_or_token_counter": "gpt2", - "chunk_size": 512, - "chunk_overlap": 128, - "min_sentences_per_chunk": 1, - }, - }, - } - - -def init_embedder_and_llm(): - """ - Initialize embedder and LLM from environment variables. - - Returns: - Tuple of (embedder, llm) instances - """ - config_dict = get_reader_config() - - # Initialize embedder - embedder_config = EmbedderConfigFactory.model_validate(config_dict["embedder"]) - embedder = EmbedderFactory.from_config(embedder_config) - - # Initialize LLM - llm_config = LLMConfigFactory.model_validate(config_dict["llm"]) - llm = LLMFactory.from_config(llm_config) - - return embedder, llm diff --git a/examples/mem_reader/parser/example_assistant_parser.py b/examples/mem_reader/parser/example_assistant_parser.py deleted file mode 100644 index a77f04a68..000000000 --- a/examples/mem_reader/parser/example_assistant_parser.py +++ /dev/null @@ -1,94 +0,0 @@ -"""Example demonstrating AssistantParser usage. - -AssistantParser handles assistant messages in chat conversations. -""" - -import sys - -from pathlib import Path - -from dotenv import load_dotenv - -from memos.mem_reader.read_multi_modal.assistant_parser import AssistantParser - - -# Handle imports for both script and module usage -try: - from .config_utils import init_embedder_and_llm -except ImportError: - # When running as script, add parent directory to path - sys.path.insert(0, str(Path(__file__).parent)) - from config_utils import init_embedder_and_llm - -# Load environment variables -load_dotenv() - - -def main(): - """Demonstrate AssistantParser usage.""" - print("=== AssistantParser Example ===\n") - - # 1. Initialize embedder and LLM (using shared config) - embedder, llm = init_embedder_and_llm() - - # 3. Create AssistantParser - parser = AssistantParser(embedder=embedder, llm=llm) - - # 4. Example assistant messages - assistant_messages = [ - { - "role": "assistant", - "content": "I'm sorry to hear that you're feeling down. Would you like to talk about what's been going on?", - "chat_time": "2025-01-15T10:00:30", - "message_id": "msg_001", - }, - { - "role": "assistant", - "content": "Based on the document you provided, I can see several key points: 1) The project timeline, 2) Budget considerations, and 3) Resource allocation.", - "chat_time": "2025-01-15T10:05:30", - "message_id": "msg_002", - }, - { - "role": "assistant", - "content": "Here's a Python solution for your problem:\n```python\ndef solve_problem():\n return 'solution'\n```", - "chat_time": "2025-01-15T10:10:30", - "message_id": "msg_003", - }, - ] - - print("📝 Processing assistant messages:\n") - for i, message in enumerate(assistant_messages, 1): - print(f"Assistant Message {i}:") - print(f" Content: {message['content'][:60]}...") - - # Create source from assistant message - info = {"user_id": "user1", "session_id": "session1"} - source = parser.create_source(message, info) - - print(" ✅ Created SourceMessage:") - print(f" - Type: {source.type}") - print(f" - Role: {source.role}") - print(f" - Content: {source.content[:60]}...") - print(f" - Chat Time: {source.chat_time}") - print(f" - Message ID: {source.message_id}") - print() - - # Parse in fast mode - memory_items = parser.parse_fast(message, info) - print(f" 📊 Fast mode generated {len(memory_items)} memory item(s)") - if memory_items: - print(f" - Memory: {memory_items[0].memory[:60]}...") - print(f" - Memory Type: {memory_items[0].metadata.memory_type}") - print(f" - Tags: {memory_items[0].metadata.tags}") - print() - - # Rebuild assistant message from source - rebuilt = parser.rebuild_from_source(source) - print(f" 🔄 Rebuilt message: role={rebuilt['role']}, content={rebuilt['content'][:40]}...") - print() - - print("✅ AssistantParser example completed!") - - -if __name__ == "__main__": - main() diff --git a/examples/mem_reader/parser/example_file_content_parser.py b/examples/mem_reader/parser/example_file_content_parser.py deleted file mode 100644 index 06071a70c..000000000 --- a/examples/mem_reader/parser/example_file_content_parser.py +++ /dev/null @@ -1,132 +0,0 @@ -"""Example demonstrating FileContentParser usage. - -FileContentParser handles file content parts in multimodal messages (RawMessageList). -""" - -import sys - -from pathlib import Path - -from dotenv import load_dotenv - -from memos.configs.parser import ParserConfigFactory -from memos.mem_reader.read_multi_modal.file_content_parser import FileContentParser -from memos.parsers.factory import ParserFactory - - -# Handle imports for both script and module usage -try: - from .config_utils import init_embedder_and_llm -except ImportError: - # When running as script, add parent directory to path - sys.path.insert(0, str(Path(__file__).parent)) - from config_utils import init_embedder_and_llm - -# Load environment variables -load_dotenv() - - -def main(): - """Demonstrate FileContentParser usage.""" - print("=== FileContentParser Example ===\n") - - # 1. Initialize embedder and LLM (using shared config) - embedder, llm = init_embedder_and_llm() - - # 3. Initialize parser for file content parsing (optional) - try: - parser_config = ParserConfigFactory.model_validate( - { - "backend": "markitdown", - "config": {}, - } - ) - file_parser = ParserFactory.from_config(parser_config) - except Exception as e: - print(f"⚠️ Warning: Could not initialize file parser: {e}") - print(" FileContentParser will work without a parser, but file parsing will be limited.") - file_parser = None - - # 4. Create FileContentParser - parser = FileContentParser(embedder=embedder, llm=llm, parser=file_parser) - - # 5. Example file content parts - file_content_parts = [ - { - "type": "file", - "file": { - "filename": "document.pdf", - "file_id": "file_123", - "file_data": "This is the content extracted from the PDF file...", - }, - }, - { - "type": "file", - "file": { - "filename": "report.docx", - "file_id": "file_456", - "file_data": "Report content: Analysis of Q4 performance...", - }, - }, - { - "type": "file", - "file": { - "filename": "data.csv", - "file_id": "file_789", - "path": "/path/to/data.csv", # Alternative: using path instead of file_data - }, - }, - ] - - print("📝 Processing file content parts:\n") - for i, part in enumerate(file_content_parts, 1): - print(f"File Content Part {i}:") - file_info = part.get("file", {}) - print(f" Filename: {file_info.get('filename', 'unknown')}") - print(f" File ID: {file_info.get('file_id', 'N/A')}") - - # Create source from file content part - info = {"user_id": "user1", "session_id": "session1"} - source = parser.create_source(part, info) - - print(" ✅ Created SourceMessage:") - print(f" - Type: {source.type}") - print(f" - Doc Path: {source.doc_path}") - if source.content: - print(f" - Content: {source.content[:60]}...") - if hasattr(source, "original_part") and source.original_part: - print(" - Has original_part: Yes") - print() - - # Rebuild file content part from source - rebuilt = parser.rebuild_from_source(source) - print(" 🔄 Rebuilt part:") - print(f" - Type: {rebuilt['type']}") - print(f" - Filename: {rebuilt['file'].get('filename', 'N/A')}") - print() - - # 6. Example with actual file path (if parser is available) - if file_parser: - print("📄 Testing file parsing with actual file path:\n") - # Note: This is just an example - actual file parsing would require a real file - example_file_part = { - "type": "file", - "file": { - "filename": "example.txt", - "path": "examples/mem_reader/text1.txt", # Using existing test file - }, - } - - try: - source = parser.create_source(example_file_part, info) - print(f" ✅ Created SourceMessage for file: {source.doc_path}") - # The parser would parse the file content if the file exists - except Exception as e: - print(f" ⚠️ File parsing note: {e}") - print() - - print("✅ FileContentParser example completed!") - - -if __name__ == "__main__": - main() diff --git a/examples/mem_reader/parser/example_multi_modal_parser.py b/examples/mem_reader/parser/example_multi_modal_parser.py deleted file mode 100644 index 3638d8d5e..000000000 --- a/examples/mem_reader/parser/example_multi_modal_parser.py +++ /dev/null @@ -1,400 +0,0 @@ -"""Example demonstrating MultiModalParser parser selection. - -This example verifies that different input types correctly return -the corresponding parser instances. - -MessagesType Definition (from src/memos/types/general_types.py): - MessagesType = str | MessageList | RawMessageList - - Where: - - str: Simple string messages - - MessageList: list[ChatCompletionMessageParam] - ChatCompletionMessageParam = ( - ChatCompletionSystemMessageParam | - ChatCompletionUserMessageParam | - ChatCompletionAssistantMessageParam | - ChatCompletionToolMessageParam - ) - - RawMessageList: list[RawMessageDict] - RawMessageDict = ChatCompletionContentPartTextParam | File - - Note: User/Assistant messages can have multimodal content (list of parts): - - {"type": "text", "text": "..."} - - {"type": "file", "file": {...}} - - {"type": "image_url", "image_url": {...}} - - {"type": "input_audio", "input_audio": {...}} -""" - -import sys - -from pathlib import Path - -from dotenv import load_dotenv - -from memos.mem_reader.read_multi_modal.multi_modal_parser import MultiModalParser - - -# Add src directory to path for imports -project_root = Path(__file__).parent.parent.parent.parent -src_path = project_root / "src" -if str(src_path) not in sys.path: - sys.path.insert(0, str(src_path)) - - -# Handle imports for both script and module usage -try: - from .config_utils import init_embedder_and_llm -except ImportError: - # When running as script, add parent directory to path - sys.path.insert(0, str(Path(__file__).parent)) - from config_utils import init_embedder_and_llm - -# Load environment variables -load_dotenv() - - -def parser_selection(): - """Test that different input types return the correct parser.""" - print("=== MultiModalParser Parser Selection Test ===\n") - - # 1. Initialize embedder and LLM - embedder, llm = init_embedder_and_llm() - - # 2. Create MultiModalParser - parser = MultiModalParser(embedder=embedder, llm=llm) - - # 3. Test cases: different input types - test_cases = [ - # String input -> StringParser - { - "name": "String input", - "message": "This is a simple string message", - "expected_parser_type": "StringParser", - }, - # RawMessageList: text type -> TextContentParser - { - "name": "Text content part (RawMessageList)", - "message": {"type": "text", "text": "This is a text content part"}, - "expected_parser_type": "TextContentParser", - }, - # RawMessageList: file type -> FileContentParser - { - "name": "File content part (RawMessageList)", - "message": { - "type": "file", - "file": { - "filename": "example.pdf", - "file_data": "File content here", - }, - }, - "expected_parser_type": "FileContentParser", - }, - # RawMessageList: image_url type -> None (type_parsers uses "image" key, not "image_url") - { - "name": "Image content part (RawMessageList - image_url type)", - "message": { - "type": "image_url", - "image_url": { - "url": "https://example.com/image.jpg", - "detail": "auto", - }, - }, - "expected_parser_type": None, # type_parsers has "image" key, but message has "image_url" type - "should_return_none": True, - }, - # RawMessageList: input_audio type -> None (type_parsers uses "audio" key, not "input_audio") - { - "name": "Audio content part (RawMessageList - input_audio type)", - "message": { - "type": "input_audio", - "input_audio": { - "data": "base64_encoded_audio_data", - "format": "mp3", - }, - }, - "expected_parser_type": None, # type_parsers has "audio" key, but message has "input_audio" type - "should_return_none": True, - }, - # MessageList: system role -> SystemParser - { - "name": "System message", - "message": { - "role": "system", - "content": "You are a helpful assistant.", - }, - "expected_parser_type": "SystemParser", - }, - # MessageList: user role -> UserParser - { - "name": "User message (simple)", - "message": { - "role": "user", - "content": "Hello, how are you?", - }, - "expected_parser_type": "UserParser", - }, - # MessageList: user role with multimodal content -> UserParser - { - "name": "User message (multimodal with text and file)", - "message": { - "role": "user", - "content": [ - {"type": "text", "text": "What's in this image?"}, - {"type": "file", "file": {"filename": "image.jpg", "file_data": ""}}, - ], - }, - "expected_parser_type": "UserParser", - }, - # MessageList: user role with image_url content -> UserParser - { - "name": "User message (with image_url)", - "message": { - "role": "user", - "content": [ - {"type": "text", "text": "What's in this image?"}, - { - "type": "image_url", - "image_url": {"url": "https://example.com/image.jpg"}, - }, - ], - }, - "expected_parser_type": "UserParser", - }, - # MessageList: user role with input_audio content -> UserParser - { - "name": "User message (with input_audio)", - "message": { - "role": "user", - "content": [ - {"type": "text", "text": "Listen to this audio"}, - { - "type": "input_audio", - "input_audio": {"data": "base64_data", "format": "wav"}, - }, - ], - }, - "expected_parser_type": "UserParser", - }, - # MessageList: assistant role -> AssistantParser - { - "name": "Assistant message (simple)", - "message": { - "role": "assistant", - "content": "I'm doing well, thank you!", - }, - "expected_parser_type": "AssistantParser", - }, - # MessageList: assistant role with tool_calls -> AssistantParser - { - "name": "Assistant message (with tool_calls)", - "message": { - "role": "assistant", - "content": None, - "tool_calls": [ - { - "id": "call_123", - "type": "function", - "function": { - "name": "get_weather", - "arguments": '{"location": "Beijing"}', - }, - } - ], - }, - "expected_parser_type": "AssistantParser", - }, - # MessageList: tool role -> ToolParser - { - "name": "Tool message", - "message": { - "role": "tool", - "content": "Tool execution result", - "tool_call_id": "call_123", - }, - "expected_parser_type": "ToolParser", - }, - ] - - print("Testing parser selection for different input types:\n") - all_passed = True - - for i, test_case in enumerate(test_cases, 1): - message = test_case["message"] - expected_type = test_case.get("expected_parser_type") - test_name = test_case["name"] - should_return_none = test_case.get("should_return_none", False) - - # Get parser using internal method - selected_parser = parser._get_parser(message) - - # Handle cases where None is expected - if should_return_none or expected_type is None: - if selected_parser is None: - print(f"✅ Test {i}: {test_name}") - print(" Expected: None (parser not implemented yet or not found)") - print(" Got: None") - if expected_type: - print(f" Note: {expected_type} is not yet implemented") - else: - print(f"⚠️ Test {i}: {test_name}") - print(" Expected: None") - print(f" Got: {type(selected_parser).__name__}") - print(" Note: Parser found but may not be fully implemented") - print() - continue - - # Check if parser was found - if selected_parser is None: - print(f"❌ Test {i}: {test_name}") - print(f" Expected: {expected_type}") - print(" Got: None (parser not found)") - print(f" Message: {message}\n") - all_passed = False - continue - - # Get actual parser type name - actual_type = type(selected_parser).__name__ - - # Verify parser type - if actual_type == expected_type: - print(f"✅ Test {i}: {test_name}") - print(f" Expected: {expected_type}") - print(f" Got: {actual_type}") - print(f" Parser instance: {selected_parser}") - else: - print(f"❌ Test {i}: {test_name}") - print(f" Expected: {expected_type}") - print(f" Got: {actual_type}") - print(f" Message: {message}") - all_passed = False - print() - - # Test edge cases - print("\n=== Testing Edge Cases ===\n") - - edge_cases = [ - { - "name": "Unknown message type (not dict, not str)", - "message": 12345, - "should_return_none": True, - }, - { - "name": "Dict without type or role", - "message": {"content": "Some content"}, - "should_return_none": True, - }, - { - "name": "Unknown type in RawMessageList", - "message": {"type": "unknown_type", "data": "some data"}, - "should_return_none": True, - }, - { - "name": "Unknown role in MessageList", - "message": {"role": "unknown_role", "content": "some content"}, - "should_return_none": True, - }, - { - "name": "List of messages (MessageList - not handled by _get_parser)", - "message": [ - {"role": "user", "content": "Message 1"}, - {"role": "assistant", "content": "Message 2"}, - ], - "should_return_none": True, # Lists are handled in parse(), not _get_parser() - }, - { - "name": "List of RawMessageList items (not handled by _get_parser)", - "message": [ - {"type": "text", "text": "Text content 1"}, - {"type": "file", "file": {"filename": "doc.pdf", "file_data": ""}}, - ], - "should_return_none": True, # Lists are handled in parse(), not _get_parser() - }, - ] - - for i, test_case in enumerate(edge_cases, 1): - message = test_case["message"] - should_return_none = test_case["should_return_none"] - test_name = test_case["name"] - - selected_parser = parser._get_parser(message) - - if should_return_none: - if selected_parser is None: - print(f"✅ Edge Case {i}: {test_name}") - print(" Correctly returned None") - else: - print(f"❌ Edge Case {i}: {test_name}") - print(" Expected: None") - print(f" Got: {type(selected_parser).__name__}") - all_passed = False - else: - if selected_parser is not None: - print(f"✅ Edge Case {i}: {test_name}") - print(f" Got parser: {type(selected_parser).__name__}") - else: - print(f"❌ Edge Case {i}: {test_name}") - print(" Expected: Parser") - print(" Got: None") - all_passed = False - print() - - # Summary - print("=" * 60) - if all_passed: - print("✅ All tests passed! Parser selection is working correctly.") - else: - print("❌ Some tests failed. Please check the output above.") - print("=" * 60) - - -def parser_instances(): - """Test that parser instances are correctly initialized.""" - print("\n=== Parser Instance Verification ===\n") - - embedder, llm = init_embedder_and_llm() - parser = MultiModalParser(embedder=embedder, llm=llm) - - # Verify all parser instances are initialized - parsers_to_check = { - "string_parser": "StringParser", - "system_parser": "SystemParser", - "user_parser": "UserParser", - "assistant_parser": "AssistantParser", - "tool_parser": "ToolParser", - "text_content_parser": "TextContentParser", - "file_content_parser": "FileContentParser", - } - - print("Checking parser instance initialization:\n") - all_initialized = True - - for attr_name, expected_type in parsers_to_check.items(): - parser_instance = getattr(parser, attr_name, None) - if parser_instance is None: - print(f"❌ {attr_name}: Not initialized") - all_initialized = False - else: - actual_type = type(parser_instance).__name__ - if actual_type == expected_type: - print(f"✅ {attr_name}: {actual_type}") - else: - print(f"❌ {attr_name}: Expected {expected_type}, got {actual_type}") - all_initialized = False - - print() - if all_initialized: - print("✅ All parser instances are correctly initialized!") - else: - print("❌ Some parser instances are missing or incorrect.") - print() - - -def main(): - """Run all tests.""" - parser_selection() - parser_instances() - print("\n✅ MultiModalParser example completed!") - - -if __name__ == "__main__": - main() diff --git a/examples/mem_reader/parser/example_string_parser.py b/examples/mem_reader/parser/example_string_parser.py deleted file mode 100644 index 3ec658a0e..000000000 --- a/examples/mem_reader/parser/example_string_parser.py +++ /dev/null @@ -1,66 +0,0 @@ -"""Example demonstrating StringParser usage. - -StringParser handles simple string messages that need to be converted to memory items. -""" - -import sys - -from pathlib import Path - -from dotenv import load_dotenv - -from memos.mem_reader.read_multi_modal.string_parser import StringParser - - -# Handle imports for both script and module usage -try: - from .config_utils import init_embedder_and_llm -except ImportError: - # When running as script, add parent directory to path - sys.path.insert(0, str(Path(__file__).parent)) - from config_utils import init_embedder_and_llm - -# Load environment variables -load_dotenv() - - -def main(): - """Demonstrate StringParser usage.""" - print("=== StringParser Example ===\n") - - # 1. Initialize embedder and LLM (using shared config) - embedder, llm = init_embedder_and_llm() - - # 3. Create StringParser - parser = StringParser(embedder=embedder, llm=llm) - - # 4. Example string messages - string_messages = [ - "This is a simple text message that needs to be parsed.", - "Another string message for processing.", - "StringParser handles plain text strings and converts them to SourceMessage objects.", - ] - - print("📝 Processing string messages:\n") - for i, message in enumerate(string_messages, 1): - print(f"Message {i}: {message[:50]}...") - - # Create source from string - info = {"user_id": "user1", "session_id": "session1"} - source = parser.create_source(message, info) - - print(" ✅ Created SourceMessage:") - print(f" - Type: {source.type}") - print(f" - Content: {source.content[:50]}...") - print() - - # Rebuild string from source - rebuilt = parser.rebuild_from_source(source) - print(f" 🔄 Rebuilt string: {rebuilt[:50]}...") - print() - - print("✅ StringParser example completed!") - - -if __name__ == "__main__": - main() diff --git a/examples/mem_reader/parser/example_system_parser.py b/examples/mem_reader/parser/example_system_parser.py deleted file mode 100644 index bc684a32b..000000000 --- a/examples/mem_reader/parser/example_system_parser.py +++ /dev/null @@ -1,158 +0,0 @@ -"""Example demonstrating SystemParser usage. - -SystemParser handles system messages in chat conversations. -Note: System messages support multimodal content, but only text parts are allowed -(not file, image_url, or input_audio like user messages). -""" - -import sys - -from pathlib import Path - -from dotenv import load_dotenv - - -try: - from .print_utils import pretty_print_dict -except ImportError: - # Fallback if print_utils is not available - def pretty_print_dict(d): - import json - - print(json.dumps(d, indent=2, ensure_ascii=False)) - - -from memos.mem_reader.read_multi_modal.system_parser import SystemParser - - -# Handle imports for both script and module usage -try: - from .config_utils import init_embedder_and_llm -except ImportError: - # When running as script, add parent directory to path - sys.path.insert(0, str(Path(__file__).parent)) - from config_utils import init_embedder_and_llm - -# Load environment variables -load_dotenv() - - -def main(): - """Demonstrate SystemParser usage.""" - print("=== SystemParser Example ===\n") - - # 1. Initialize embedder and LLM (using shared config) - embedder, llm = init_embedder_and_llm() - - # 3. Create SystemParser - parser = SystemParser(embedder=embedder, llm=llm) - - # 4. Example system messages (simple text) - simple_system_message = { - "role": "system", - "content": "You are a helpful assistant that provides clear and concise answers.", - "chat_time": "2025-01-15T10:00:00", - "message_id": "msg_001", - } - - print("📝 Example 1: Simple text system message\n") - pretty_print_dict(simple_system_message) - - info = {"user_id": "user1", "session_id": "session1"} - source = parser.create_source(simple_system_message, info) - - print(" ✅ Created SourceMessage:") - print(f" - Type: {source.type}") - print(f" - Role: {source.role}") - print(f" - Content: {source.content[:60]}...") - print(f" - Chat Time: {source.chat_time}") - print(f" - Message ID: {source.message_id}") - print() - - # Parse in fast mode - memory_items = parser.parse_fast(simple_system_message, info) - print(f" 📊 Fast mode generated {len(memory_items)} memory item(s)") - if memory_items: - print(f" - Memory: {memory_items[0].memory[:60]}...") - print(f" - Memory Type: {memory_items[0].metadata.memory_type}") - print(f" - Tags: {memory_items[0].metadata.tags}") - print() - - # 5. Example multimodal system message (multiple text parts) - # Note: System messages only support text parts, not file/image/audio - multimodal_system_message = { - "role": "system", - "content": [ - {"type": "text", "text": "You are a helpful assistant."}, - {"type": "text", "text": "Always provide clear and concise answers."}, - {"type": "text", "text": "If you don't know something, say so."}, - ], - "chat_time": "2025-01-15T10:05:00", - "message_id": "msg_002", - } - - print("📝 Example 2: Multimodal system message (multiple text parts)\n") - pretty_print_dict(multimodal_system_message) - print(f"Message contains {len(multimodal_system_message['content'])} text parts") - - sources = parser.create_source(multimodal_system_message, info) - if isinstance(sources, list): - print(f" ✅ Created {len(sources)} SourceMessage(s):") - for i, src in enumerate(sources, 1): - print(f" [{i}] Type: {src.type}, Role: {src.role}") - print(f" Content: {src.content[:50]}...") - else: - print(f" ✅ Created SourceMessage: Type={sources.type}") - print() - - # Parse in fast mode - memory_items = parser.parse_fast(multimodal_system_message, info) - print(f" 📊 Fast mode generated {len(memory_items)} memory item(s)") - if memory_items: - print(f" - Memory: {memory_items[0].memory[:60]}...") - print(f" - Memory Type: {memory_items[0].metadata.memory_type}") - print(f" - Tags: {memory_items[0].metadata.tags}") - # Show sources from memory item - if memory_items[0].metadata.sources: - print(f" - Sources: {len(memory_items[0].metadata.sources)} SourceMessage(s)") - print() - - # 6. Example with structured system instructions - structured_system_message = { - "role": "system", - "content": [ - { - "type": "text", - "text": "You are a coding assistant specialized in Python programming.", - }, - {"type": "text", "text": "Always write clean, well-documented code."}, - {"type": "text", "text": "Explain your reasoning when providing solutions."}, - ], - "chat_time": "2025-01-15T10:10:00", - "message_id": "msg_003", - } - - print("📝 Example 3: Structured system instructions (multiple text parts)\n") - pretty_print_dict(structured_system_message) - - sources = parser.create_source(structured_system_message, info) - if isinstance(sources, list): - print(f" ✅ Created {len(sources)} SourceMessage(s):") - for i, src in enumerate(sources, 1): - print(f" [{i}] Type: {src.type}, Role: {src.role}") - print(f" Content: {src.content[:50]}...") - print() - - # Rebuild examples - print("🔄 Rebuilding messages from sources:\n") - if isinstance(sources, list) and sources: - rebuilt = parser.rebuild_from_source(sources[0]) - else: - rebuilt = parser.rebuild_from_source(source) - if rebuilt: - pretty_print_dict(rebuilt) - print("✅ SystemParser example completed!") - - -if __name__ == "__main__": - main() diff --git a/examples/mem_reader/parser/example_text_content_parser.py b/examples/mem_reader/parser/example_text_content_parser.py deleted file mode 100644 index 1eb64d033..000000000 --- a/examples/mem_reader/parser/example_text_content_parser.py +++ /dev/null @@ -1,72 +0,0 @@ -"""Example demonstrating TextContentParser usage. - -TextContentParser handles text content parts in multimodal messages (RawMessageList). -""" - -import sys - -from pathlib import Path - -from dotenv import load_dotenv - -from memos.mem_reader.read_multi_modal.text_content_parser import TextContentParser - - -# Handle imports for both script and module usage -try: - from .config_utils import init_embedder_and_llm -except ImportError: - # When running as script, add parent directory to path - sys.path.insert(0, str(Path(__file__).parent)) - from config_utils import init_embedder_and_llm - -# Load environment variables -load_dotenv() - - -def main(): - """Demonstrate TextContentParser usage.""" - print("=== TextContentParser Example ===\n") - - # 1. Initialize embedder and LLM (using shared config) - embedder, llm = init_embedder_and_llm() - - # 3. Create TextContentParser - parser = TextContentParser(embedder=embedder, llm=llm) - - # 4. Example text content parts - text_content_parts = [ - {"type": "text", "text": "This is a simple text content part."}, - {"type": "text", "text": "TextContentParser handles text parts in multimodal messages."}, - { - "type": "text", - "text": "This parser is used when processing RawMessageList items that contain text content.", - }, - ] - - print("📝 Processing text content parts:\n") - for i, part in enumerate(text_content_parts, 1): - print(f"Text Content Part {i}:") - print(f" Text: {part['text'][:60]}...") - - # Create source from text content part - info = {"user_id": "user1", "session_id": "session1"} - source = parser.create_source(part, info) - - print(" ✅ Created SourceMessage:") - print(f" - Type: {source.type}") - print(f" - Content: {source.content[:60]}...") - if hasattr(source, "original_part") and source.original_part: - print(" - Has original_part: Yes") - print() - - # Rebuild text content part from source - rebuilt = parser.rebuild_from_source(source) - print(f" 🔄 Rebuilt part: type={rebuilt['type']}, text={rebuilt['text'][:40]}...") - print() - - print("✅ TextContentParser example completed!") - - -if __name__ == "__main__": - main() diff --git a/examples/mem_reader/parser/example_tool_parser.py b/examples/mem_reader/parser/example_tool_parser.py deleted file mode 100644 index bf3f4e333..000000000 --- a/examples/mem_reader/parser/example_tool_parser.py +++ /dev/null @@ -1,101 +0,0 @@ -"""Example demonstrating ToolParser usage. - -ToolParser handles tool/function call messages in chat conversations. -""" - -import sys - -from pathlib import Path - -from dotenv import load_dotenv - -from memos.mem_reader.read_multi_modal.tool_parser import ToolParser - - -# Handle imports for both script and module usage -try: - from .config_utils import init_embedder_and_llm -except ImportError: - # When running as script, add parent directory to path - sys.path.insert(0, str(Path(__file__).parent)) - from config_utils import init_embedder_and_llm - -# Load environment variables -load_dotenv() - - -def main(): - """Demonstrate ToolParser usage.""" - print("=== ToolParser Example ===\n") - - # 1. Initialize embedder and LLM (using shared config) - embedder, llm = init_embedder_and_llm() - - # 3. Create ToolParser - parser = ToolParser(embedder=embedder, llm=llm) - - # 4. Example tool messages - tool_messages = [ - { - "role": "tool", - "content": '{"result": "Weather in New York: 72°F, sunny"}', - "tool_call_id": "call_abc123", - "chat_time": "2025-01-15T10:00:30", - "message_id": "msg_001", - }, - { - "role": "tool", - "content": '{"status": "success", "data": {"items": [1, 2, 3]}}', - "tool_call_id": "call_def456", - "chat_time": "2025-01-15T10:05:30", - "message_id": "msg_002", - }, - { - "role": "tool", - "content": "Database query executed successfully. Retrieved 5 records.", - "tool_call_id": "call_ghi789", - "chat_time": "2025-01-15T10:10:30", - "message_id": "msg_003", - }, - ] - - print("📝 Processing tool messages:\n") - for i, message in enumerate(tool_messages, 1): - print(f"Tool Message {i}:") - print(f" Content: {message['content'][:60]}...") - print(f" Tool Call ID: {message['tool_call_id']}") - - # Create source from tool message - info = {"user_id": "user1", "session_id": "session1"} - source = parser.create_source(message, info) - - print(" ✅ Created SourceMessage:") - print(f" - Type: {source.type}") - print(f" - Role: {source.role}") - print(f" - Content: {source.content[:60]}...") - print(f" - Chat Time: {source.chat_time}") - print(f" - Message ID: {source.message_id}") - print() - - # Parse in fast mode - memory_items = parser.parse_fast(message, info) - print(f" 📊 Fast mode generated {len(memory_items)} memory item(s)") - if memory_items: - print(f" - Memory: {memory_items[0].memory[:60]}...") - print(f" - Memory Type: {memory_items[0].metadata.memory_type}") - print(f" - Tags: {memory_items[0].metadata.tags}") - print() - - # Rebuild tool message from source - rebuilt = parser.rebuild_from_source(source) - print(" 🔄 Rebuilt message:") - print(f" - Role: {rebuilt['role']}") - print(f" - Tool Call ID: {rebuilt.get('tool_call_id', 'N/A')}") - print(f" - Content: {rebuilt['content'][:40]}...") - print() - - print("✅ ToolParser example completed!") - - -if __name__ == "__main__": - main() diff --git a/examples/mem_reader/parser/example_user_parser.py b/examples/mem_reader/parser/example_user_parser.py deleted file mode 100644 index 78a75b94f..000000000 --- a/examples/mem_reader/parser/example_user_parser.py +++ /dev/null @@ -1,135 +0,0 @@ -"""Example demonstrating UserParser usage. - -UserParser handles user messages, including multimodal messages with text, files, images, etc. -""" - -import sys - -from pathlib import Path - -from dotenv import load_dotenv -from print_utils import pretty_print_dict - -from memos.mem_reader.read_multi_modal.user_parser import UserParser - - -# Handle imports for both script and module usage -try: - from .config_utils import init_embedder_and_llm -except ImportError: - # When running as script, add parent directory to path - sys.path.insert(0, str(Path(__file__).parent)) - from config_utils import init_embedder_and_llm - -# Load environment variables -load_dotenv() - - -def main(): - """Demonstrate UserParser usage.""" - print("=== UserParser Example ===\n") - - # 1. Initialize embedder and LLM (using shared config) - embedder, llm = init_embedder_and_llm() - - # 3. Create UserParser - parser = UserParser(embedder=embedder, llm=llm) - - # 4. Example user messages (simple text) - simple_user_message = { - "role": "user", - "content": "I'm feeling a bit down today. Can you help me?", - "chat_time": "2025-01-15T10:00:00", - "message_id": "msg_001", - } - - print("📝 Example 1: Simple text user message\n") - pretty_print_dict(simple_user_message) - - info = {"user_id": "user1", "session_id": "session1"} - # Parse in fast mode - memory_items = parser.parse_fast(simple_user_message, info) - print(f" 📊 Fast mode generated {len(memory_items)} memory item(s)") - if memory_items: - print(f" - Memory: {memory_items[0].memory[:60]}...") - print(f" - Memory Type: {memory_items[0].metadata.memory_type}") - print() - - # 5. Example multimodal user message (text + file) - multimodal_user_message = { - "role": "user", - "content": [ - {"type": "text", "text": "Please analyze this document:"}, - { - "type": "file", - "file": { - "filename": "report.pdf", - "file_id": "file_123", - "file_data": "This is the content of the PDF file...", - }, - }, - ], - "chat_time": "2025-01-15T10:05:00", - "message_id": "msg_002", - } - - print("📝 Example 2: Multimodal user message (text + file)\n") - pretty_print_dict(multimodal_user_message) - print(f"Message contains {len(multimodal_user_message['content'])} parts") - - # Parse in fast mode - memory_items = parser.parse_fast(multimodal_user_message, info) - print(f" 📊 Fast mode generated {len(memory_items)} memory item(s)") - for memory_item in memory_items: - sources = memory_item.metadata.sources - print(f" ✅ Created {len(sources)} SourceMessage(s):") - for i, src in enumerate(sources, 1): - print(f" [{i}] Type: {src.type}, Role: {src.role}") - if src.type == "text": - print(f" Content: {src.content[:50]}...") - elif src.type == "file": - print(f" Doc Path: {src.doc_path}") - print() - - # 6. Example with image_url (future support) - image_user_message = { - "role": "user", - "content": [ - {"type": "text", "text": "What's in this image?"}, - { - "type": "image_url", - "image_url": {"url": "https://example.com/image.jpg"}, - }, - ], - "chat_time": "2025-01-15T10:10:00", - "message_id": "msg_003", - } - print("📝 Example 3: User message with image\n") - print(f"Message contains {len(image_user_message['content'])} parts") - pretty_print_dict(image_user_message) - - # Parse in fast mode - memory_items = parser.parse_fast(image_user_message, info) - print(f" 📊 Fast mode generated {len(memory_items)} memory item(s)") - for memory_item in memory_items: - sources = memory_item.metadata.sources - print(f" ✅ Created {len(sources)} SourceMessage(s):") - for i, src in enumerate(sources, 1): - print(f" [{i}] Type: {src.type}, Role: {src.role}") - if src.type == "text": - print(f" Content: {src.content[:50]}...") - elif src.type == "file": - print(f" Doc Path: {src.doc_path}") - elif src.type == "image": - print(f" Image Path: {src.image_path}") - - # Rebuild examples - print("🔄 Rebuilding messages from sources:\n") - rebuilt_simple = parser.rebuild_from_source(sources[1]) - if rebuilt_simple: - pretty_print_dict(rebuilt_simple) - print("✅ UserParser example completed!") - - -if __name__ == "__main__": - main() diff --git a/examples/mem_reader/parser/print_utils.py b/examples/mem_reader/parser/print_utils.py deleted file mode 100644 index 5eba1fa76..000000000 --- a/examples/mem_reader/parser/print_utils.py +++ /dev/null @@ -1,11 +0,0 @@ -import pprint - - -def pretty_print_dict(d: dict): - text = pprint.pformat(d, indent=2, width=120) - border = "═" * (max(len(line) for line in text.split("\n")) + 4) - - print(f"╔{border}╗") - for line in text.split("\n"): - print(f"║ {line.ljust(len(border) - 2)} ║") - print(f"╚{border}╝") diff --git a/examples/mem_reader/parser_demos/__init__.py b/examples/mem_reader/parser_demos/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/examples/mem_reader/parser_demos/_base.py b/examples/mem_reader/parser_demos/_base.py new file mode 100644 index 000000000..883578a21 --- /dev/null +++ b/examples/mem_reader/parser_demos/_base.py @@ -0,0 +1,76 @@ +"""Base class and utilities for parser demos.""" + +from typing import Any + +from examples.mem_reader.builders import build_llm_and_embedder +from examples.mem_reader.utils import pretty_print_dict +from memos.memories.textual.item import SourceMessage + + +class BaseParserDemo: + """Base class for all parser demos.""" + + def __init__(self): + print(f"\n🚀 Initializing {self.__class__.__name__}...") + self.embedder, self.llm = build_llm_and_embedder() + self.parser = self.create_parser() + print("✅ Initialization complete.\n") + + def create_parser(self): + """Create and return the specific parser instance.""" + raise NotImplementedError + + def run(self): + """Run the main demo logic.""" + raise NotImplementedError + + def demo_source_creation( + self, message: Any, info: dict, **kwargs + ) -> SourceMessage | list[SourceMessage]: + """Demonstrate creating a SourceMessage from raw input.""" + print(f"📝 Creating SourceMessage from: {str(message)[:100]}...") + source = self.parser.create_source(message, info, **kwargs) + + if isinstance(source, list): + print(f" ✅ Created {len(source)} SourceMessage(s)") + for i, s in enumerate(source): + print(f" [{i}] Type: {s.type}, Role: {getattr(s, 'role', 'N/A')}") + else: + print(" ✅ Created SourceMessage:") + print(f" - Type: {source.type}") + if hasattr(source, "role"): + print(f" - Role: {source.role}") + if source.content: + print(f" - Content: {str(source.content)[:60]}...") + + return source + + def demo_rebuild(self, source: SourceMessage | list[SourceMessage]): + """Demonstrate rebuilding raw message from SourceMessage.""" + print("\n🔄 Rebuilding message from source...") + + # Handle list of sources (take first one for demo if it's a list) + src_to_rebuild = source[0] if isinstance(source, list) else source + + rebuilt = self.parser.rebuild_from_source(src_to_rebuild) + print(" ✅ Rebuilt result:") + if isinstance(rebuilt, dict): + pretty_print_dict(rebuilt) + else: + print(f" {rebuilt}") + + def demo_parse_fast(self, message: Any, info: dict): + """Demonstrate fast parsing (if supported).""" + if not hasattr(self.parser, "parse_fast"): + return + + print("\n⚡️ Running parse_fast...") + try: + memory_items = self.parser.parse_fast(message, info) + print(f" 📊 Generated {len(memory_items)} memory item(s)") + if memory_items: + item = memory_items[0] + print(f" - Memory: {item.memory[:60]}...") + print(f" - Type: {item.metadata.memory_type}") + except Exception as e: + print(f" ⚠️ parse_fast not applicable or failed: {e}") diff --git a/examples/mem_reader/parser_demos/demo_assistant.py b/examples/mem_reader/parser_demos/demo_assistant.py new file mode 100644 index 000000000..7c6a0a1e8 --- /dev/null +++ b/examples/mem_reader/parser_demos/demo_assistant.py @@ -0,0 +1,44 @@ +"""Demo for AssistantParser.""" + +from examples.mem_reader.samples import ASSISTANT_MESSAGE_CASES +from memos.mem_reader.read_multi_modal.assistant_parser import AssistantParser + +from ._base import BaseParserDemo + + +class AssistantParserDemo(BaseParserDemo): + def create_parser(self): + parser = AssistantParser(embedder=self.embedder, llm=self.llm) + + # Workaround: AssistantParser.rebuild_from_source is empty in src. + # Patch it to return content for demo visualization, aligning with legacy behavior. + original_rebuild = parser.rebuild_from_source + + def patched_rebuild(source): + if source.role == "assistant": + # Only handling simple text content as per legacy example scope + return { + "role": "assistant", + "content": source.content, + } + return original_rebuild(source) + + parser.rebuild_from_source = patched_rebuild + return parser + + def run(self): + print("=== AssistantParser Demo ===") + + info = {"user_id": "user1", "session_id": "session1"} + + for case in ASSISTANT_MESSAGE_CASES: + print(f"\n--- Case: {case.description} ---") + for msg in case.scene_data: + source = self.demo_source_creation(msg, info) + self.demo_rebuild(source) + self.demo_parse_fast(msg, info) + + +if __name__ == "__main__": + demo = AssistantParserDemo() + demo.run() diff --git a/examples/mem_reader/parser_demos/demo_file_content.py b/examples/mem_reader/parser_demos/demo_file_content.py new file mode 100644 index 000000000..f0d11b15f --- /dev/null +++ b/examples/mem_reader/parser_demos/demo_file_content.py @@ -0,0 +1,68 @@ +"""Demo for FileContentParser.""" + +from examples.mem_reader.builders import build_file_parser +from examples.mem_reader.samples import FILE_CONTENT_PARTS, FILE_CONTENT_REAL_FILE_PART +from memos.mem_reader.read_multi_modal.file_content_parser import FileContentParser + +from ._base import BaseParserDemo + + +class FileContentParserDemo(BaseParserDemo): + def create_parser(self): + # Initialize the underlying file parser (MarkItDown) + file_parser_impl = build_file_parser() + + return FileContentParser( + embedder=self.embedder, + llm=self.llm, + parser=file_parser_impl, + ) + + def run(self): + print("=== FileContentParser Demo ===") + + info = {"user_id": "user1", "session_id": "session1"} + + print("📝 Processing file content parts:\n") + for i, part in enumerate(FILE_CONTENT_PARTS, 1): + print(f"File Content Part {i}:") + file_info = part.get("file", {}) + print(f" Filename: {file_info.get('filename', 'unknown')}") + print(f" File ID: {file_info.get('file_id', 'N/A')}") + + # Create source from file content part + source = self.parser.create_source(part, info) + + print(" ✅ Created SourceMessage:") + print(f" - Type: {source.type}") + print(f" - Doc Path: {source.doc_path}") + if source.content: + print(f" - Content: {source.content[:60]}...") + if hasattr(source, "original_part") and source.original_part: + print(" - Has original_part: Yes") + print() + + # Rebuild file content part from source + rebuilt = self.parser.rebuild_from_source(source) + print(" 🔄 Rebuilt part:") + print(f" - Type: {rebuilt.get('type')}") + print(f" - Filename: {rebuilt.get('file', {}).get('filename', 'N/A')}") + + print() + + # 6. Example with actual file path (if parser is available) + if getattr(self.parser, "parser", None): + print("📄 Testing file parsing with actual file path:\n") + + try: + source = self.parser.create_source(FILE_CONTENT_REAL_FILE_PART, info) + print(f" ✅ Created SourceMessage for file: {source.doc_path}") + # The parser would parse the file content if the file exists + except Exception as e: + print(f" ⚠️ File parsing note: {e}") + print() + + +if __name__ == "__main__": + demo = FileContentParserDemo() + demo.run() diff --git a/examples/mem_reader/parser_demos/demo_image.py b/examples/mem_reader/parser_demos/demo_image.py new file mode 100644 index 000000000..a3ff3b334 --- /dev/null +++ b/examples/mem_reader/parser_demos/demo_image.py @@ -0,0 +1,87 @@ +"""Demo for ImageParser.""" + +import base64 +import copy + +from pathlib import Path + +from examples.mem_reader.samples import IMAGE_MESSAGE_CASES +from memos.mem_reader.read_multi_modal.image_parser import ImageParser + +from ._base import BaseParserDemo + + +class ImageParserDemo(BaseParserDemo): + def create_parser(self): + return ImageParser(embedder=self.embedder, llm=self.llm) + + def run(self): + print("🚀 Initializing ImageParserDemo...") + print("✅ Initialization complete.") + print("=== ImageParser Demo ===\n") + + info = {"user_id": "user1", "session_id": "session1"} + + test_cases = copy.deepcopy(IMAGE_MESSAGE_CASES) + + # Add Local Image (Base64) if exists + local_img_path = Path(__file__).parent.parent / "test_image.png" + if local_img_path.exists(): + with open(local_img_path, "rb") as f: + b64_data = base64.b64encode(f.read()).decode("utf-8") + test_cases.append( + { + "type": "image_url", + "image_url": { + "url": f"data:image/png;base64,{b64_data}", + "detail": "auto", + }, + "_note": "Local Image (Base64)", + } + ) + + for i, msg in enumerate(test_cases, 1): + print(f"--- Case {i}: Image URL message ---") + + # 1. Create SourceMessage + print(f"📝 Creating SourceMessage from: {msg}") + source = self.parser.create_source(msg, info) + print(" ✅ Created SourceMessage:") + print(f" - Type: {source.type}") + print(f" - URL: {getattr(source, 'url', 'N/A')}") + + # 2. Rebuild from Source + print("🔄 Rebuilding message from source...") + rebuilt = self.parser.rebuild_from_source(source) + print(f" ✅ Rebuilt result: {rebuilt}") + + # 3. Fast Parse (Expected Empty) + print("⚡️ Running parse_fast (expecting empty)...") + fast_results = self.parser.parse_fast(msg, info) + if not fast_results: + print(" ✅ Got empty list as expected (images require fine mode).") + else: + print(f" ⚠️ Unexpected fast results: {len(fast_results)} items") + + # 4. Fine Parse (Vision Model) + print("🧠 Running parse_fine (Vision Model)...") + # Note: This might fail if the configured LLM doesn't support vision or if the URL is unreachable + try: + fine_results = self.parser.parse_fine(msg, info) + if not fine_results: + print( + " ⚠️ No memories generated (LLM might not support vision or image inaccessible)." + ) + else: + print(f" 📊 Generated {len(fine_results)} memory item(s):") + for item in fine_results: + print(f" - Memory: {item.memory[:100]}...") + except Exception as e: + print(f" ❌ Error during fine parsing: {e}") + + print() + + +if __name__ == "__main__": + demo = ImageParserDemo() + demo.run() diff --git a/examples/mem_reader/parser_demos/demo_multi_modal.py b/examples/mem_reader/parser_demos/demo_multi_modal.py new file mode 100644 index 000000000..3302ef073 --- /dev/null +++ b/examples/mem_reader/parser_demos/demo_multi_modal.py @@ -0,0 +1,346 @@ +"""Demo for MultiModalParser.""" + +from examples.mem_reader.builders import build_file_parser +from memos.mem_reader.read_multi_modal.multi_modal_parser import MultiModalParser + +from ._base import BaseParserDemo + + +class MultiModalParserDemo(BaseParserDemo): + def create_parser(self): + file_parser = build_file_parser() + return MultiModalParser(embedder=self.embedder, llm=self.llm, parser=file_parser) + + def run(self): + self.parser_selection() + self.parser_instances() + print("\n✅ MultiModalParser example completed!") + + def parser_selection(self): + """Test that different input types return the correct parser.""" + print("=== MultiModalParser Parser Selection Test ===\n") + + # Test cases: different input types + test_cases = [ + # String input -> StringParser + { + "name": "String input", + "message": "This is a simple string message", + "expected_parser_type": "StringParser", + }, + # RawMessageList: text type -> TextContentParser + { + "name": "Text content part (RawMessageList)", + "message": {"type": "text", "text": "This is a text content part"}, + "expected_parser_type": "TextContentParser", + }, + # RawMessageList: file type -> FileContentParser + { + "name": "File content part (RawMessageList)", + "message": { + "type": "file", + "file": { + "filename": "example.pdf", + "file_data": "File content here", + }, + }, + "expected_parser_type": "FileContentParser", + }, + # RawMessageList: image_url type -> ImageParser + { + "name": "Image content part (RawMessageList - image_url type)", + "message": { + "type": "image_url", + "image_url": { + "url": "https://example.com/image.jpg", + "detail": "auto", + }, + }, + "expected_parser_type": "ImageParser", + }, + # RawMessageList: input_audio type -> None (type_parsers uses "audio" key, not "input_audio") + { + "name": "Audio content part (RawMessageList - input_audio type)", + "message": { + "type": "input_audio", + "input_audio": { + "data": "base64_encoded_audio_data", + "format": "mp3", + }, + }, + "expected_parser_type": None, # type_parsers has "audio" key, but message has "input_audio" type + "should_return_none": True, + }, + # MessageList: system role -> SystemParser + { + "name": "System message", + "message": { + "role": "system", + "content": "You are a helpful assistant.", + }, + "expected_parser_type": "SystemParser", + }, + # MessageList: user role -> UserParser + { + "name": "User message (simple)", + "message": { + "role": "user", + "content": "Hello, how are you?", + }, + "expected_parser_type": "UserParser", + }, + # MessageList: user role with multimodal content -> UserParser + { + "name": "User message (multimodal with text and file)", + "message": { + "role": "user", + "content": [ + {"type": "text", "text": "What's in this image?"}, + {"type": "file", "file": {"filename": "image.jpg", "file_data": ""}}, + ], + }, + "expected_parser_type": "UserParser", + }, + # MessageList: user role with image_url content -> UserParser + { + "name": "User message (with image_url)", + "message": { + "role": "user", + "content": [ + {"type": "text", "text": "What's in this image?"}, + { + "type": "image_url", + "image_url": {"url": "https://example.com/image.jpg"}, + }, + ], + }, + "expected_parser_type": "UserParser", + }, + # MessageList: user role with input_audio content -> UserParser + { + "name": "User message (with input_audio)", + "message": { + "role": "user", + "content": [ + {"type": "text", "text": "Listen to this audio"}, + { + "type": "input_audio", + "input_audio": {"data": "base64_data", "format": "wav"}, + }, + ], + }, + "expected_parser_type": "UserParser", + }, + # MessageList: assistant role -> AssistantParser + { + "name": "Assistant message (simple)", + "message": { + "role": "assistant", + "content": "I'm doing well, thank you!", + }, + "expected_parser_type": "AssistantParser", + }, + # MessageList: assistant role with tool_calls -> AssistantParser + { + "name": "Assistant message (with tool_calls)", + "message": { + "role": "assistant", + "content": None, + "tool_calls": [ + { + "id": "call_123", + "type": "function", + "function": { + "name": "get_weather", + "arguments": '{"location": "Beijing"}', + }, + } + ], + }, + "expected_parser_type": "AssistantParser", + }, + # MessageList: tool role -> ToolParser + { + "name": "Tool message", + "message": { + "role": "tool", + "content": "Tool execution result", + "tool_call_id": "call_123", + }, + "expected_parser_type": "ToolParser", + }, + ] + + print("Testing parser selection for different input types:\n") + all_passed = True + + for i, test_case in enumerate(test_cases, 1): + message = test_case["message"] + expected_type = test_case.get("expected_parser_type") + test_name = test_case["name"] + should_return_none = test_case.get("should_return_none", False) + + # Get parser using internal method + selected_parser = self.parser._get_parser(message) + + # Handle cases where None is expected + if should_return_none or expected_type is None: + if selected_parser is None: + print(f"✅ Test {i}: {test_name}") + print(" Expected: None (parser not implemented yet or not found)") + print(" Got: None") + if expected_type: + print(f" Note: {expected_type} is not yet implemented") + else: + print(f"⚠️ Test {i}: {test_name}") + print(" Expected: None") + print(f" Got: {type(selected_parser).__name__}") + print(" Note: Parser found but may not be fully implemented") + print() + continue + + # Check if parser was found + if selected_parser is None: + print(f"❌ Test {i}: {test_name}") + print(f" Expected: {expected_type}") + print(" Got: None (parser not found)") + print(f" Message: {message}\n") + all_passed = False + continue + + # Get actual parser type name + actual_type = type(selected_parser).__name__ + + # Verify parser type + if actual_type == expected_type: + print(f"✅ Test {i}: {test_name}") + print(f" Expected: {expected_type}") + print(f" Got: {actual_type}") + print(f" Parser instance: {selected_parser}") + else: + print(f"❌ Test {i}: {test_name}") + print(f" Expected: {expected_type}") + print(f" Got: {actual_type}") + print(f" Message: {message}") + all_passed = False + print() + + # Test edge cases + print("\n=== Testing Edge Cases ===\n") + + edge_cases = [ + { + "name": "Unknown message type (not dict, not str)", + "message": 12345, + "should_return_none": True, + }, + { + "name": "Dict without type or role", + "message": {"content": "Some content"}, + "should_return_none": True, + }, + { + "name": "Unknown type in RawMessageList", + "message": {"type": "unknown_type", "data": "some data"}, + "should_return_none": True, + }, + { + "name": "Unknown role in MessageList", + "message": {"role": "unknown_role", "content": "some content"}, + "should_return_none": True, + }, + { + "name": "List of messages (MessageList - not handled by _get_parser)", + "message": [ + {"role": "user", "content": "Message 1"}, + {"role": "assistant", "content": "Message 2"}, + ], + "should_return_none": True, # Lists are handled in parse(), not _get_parser() + }, + { + "name": "List of RawMessageList items (not handled by _get_parser)", + "message": [ + {"type": "text", "text": "Text content 1"}, + {"type": "file", "file": {"filename": "doc.pdf", "file_data": ""}}, + ], + "should_return_none": True, # Lists are handled in parse(), not _get_parser() + }, + ] + + for i, test_case in enumerate(edge_cases, 1): + message = test_case["message"] + should_return_none = test_case["should_return_none"] + test_name = test_case["name"] + + selected_parser = self.parser._get_parser(message) + + if should_return_none: + if selected_parser is None: + print(f"✅ Edge Case {i}: {test_name}") + print(" Correctly returned None") + else: + print(f"❌ Edge Case {i}: {test_name}") + print(" Expected: None") + print(f" Got: {type(selected_parser).__name__}") + all_passed = False + else: + if selected_parser is not None: + print(f"✅ Edge Case {i}: {test_name}") + print(f" Got parser: {type(selected_parser).__name__}") + else: + print(f"❌ Edge Case {i}: {test_name}") + print(" Expected: Parser") + print(" Got: None") + all_passed = False + print() + + # Summary + print("=" * 60) + if all_passed: + print("✅ All tests passed! Parser selection is working correctly.") + else: + print("❌ Some tests failed. Please check the output above.") + print("=" * 60) + + def parser_instances(self): + """Test that parser instances are correctly initialized.""" + print("\n=== Parser Instance Verification ===\n") + + # Verify all parser instances are initialized + parsers_to_check = { + "string_parser": "StringParser", + "system_parser": "SystemParser", + "user_parser": "UserParser", + "assistant_parser": "AssistantParser", + "tool_parser": "ToolParser", + "text_content_parser": "TextContentParser", + "file_content_parser": "FileContentParser", + } + + print("Checking parser instance initialization:\n") + all_initialized = True + + for attr_name, expected_type in parsers_to_check.items(): + parser_instance = getattr(self.parser, attr_name, None) + if parser_instance is None: + print(f"❌ {attr_name}: Not initialized") + all_initialized = False + else: + actual_type = type(parser_instance).__name__ + if actual_type == expected_type: + print(f"✅ {attr_name}: {actual_type}") + else: + print(f"❌ {attr_name}: Expected {expected_type}, got {actual_type}") + all_initialized = False + + print() + if all_initialized: + print("✅ All parser instances are correctly initialized!") + else: + print("❌ Some parser instances are missing or incorrect.") + print() + + +if __name__ == "__main__": + demo = MultiModalParserDemo() + demo.run() diff --git a/examples/mem_reader/parser_demos/demo_string.py b/examples/mem_reader/parser_demos/demo_string.py new file mode 100644 index 000000000..e6bf99f83 --- /dev/null +++ b/examples/mem_reader/parser_demos/demo_string.py @@ -0,0 +1,30 @@ +"""Demo for StringParser.""" + +from examples.mem_reader.samples import STRING_MESSAGE_CASES +from memos.mem_reader.read_multi_modal.string_parser import StringParser + +from ._base import BaseParserDemo + + +class StringParserDemo(BaseParserDemo): + def create_parser(self): + return StringParser(embedder=self.embedder, llm=self.llm) + + def run(self): + print("=== StringParser Demo ===") + + info = {"user_id": "user1", "session_id": "session1"} + + for case in STRING_MESSAGE_CASES: + print(f"\n--- Case: {case.description} ---") + print("📝 Processing string messages:\n") + for i, msg in enumerate(case.scene_data, 1): + print(f"Message {i}: {msg[:50]}...") + source = self.demo_source_creation(msg, info) + self.demo_rebuild(source) + print() + + +if __name__ == "__main__": + demo = StringParserDemo() + demo.run() diff --git a/examples/mem_reader/parser_demos/demo_system.py b/examples/mem_reader/parser_demos/demo_system.py new file mode 100644 index 000000000..8655833ef --- /dev/null +++ b/examples/mem_reader/parser_demos/demo_system.py @@ -0,0 +1,41 @@ +"""Demo for SystemParser.""" + +from examples.mem_reader.samples import SYSTEM_MESSAGE_CASES +from memos.mem_reader.read_multi_modal.system_parser import SystemParser + +from ._base import BaseParserDemo + + +class SystemParserDemo(BaseParserDemo): + def create_parser(self): + return SystemParser(embedder=self.embedder, llm=self.llm) + + def run(self): + print("=== SystemParser Demo ===") + + info = {"user_id": "user1", "session_id": "session1"} + + for case in SYSTEM_MESSAGE_CASES: + print(f"\n--- Case: {case.description} ---") + for msg in case.scene_data: + # Workaround: SystemParser in src only supports str/dict content, not list. + # Since we cannot modify src, we flatten list content here. + msg_to_process = msg + if isinstance(msg.get("content"), list): + msg_to_process = msg.copy() + content_list = msg["content"] + merged_text = "".join( + part.get("text", "") + for part in content_list + if isinstance(part, dict) and part.get("type") == "text" + ) + msg_to_process["content"] = merged_text + + source = self.demo_source_creation(msg_to_process, info) + self.demo_rebuild(source) + self.demo_parse_fast(msg_to_process, info) + + +if __name__ == "__main__": + demo = SystemParserDemo() + demo.run() diff --git a/examples/mem_reader/parser_demos/demo_text_content.py b/examples/mem_reader/parser_demos/demo_text_content.py new file mode 100644 index 000000000..4c27c6422 --- /dev/null +++ b/examples/mem_reader/parser_demos/demo_text_content.py @@ -0,0 +1,40 @@ +"""Demo for TextContentParser.""" + +from examples.mem_reader.samples import TEXT_CONTENT_PARTS +from memos.mem_reader.read_multi_modal.text_content_parser import TextContentParser + +from ._base import BaseParserDemo + + +class TextContentParserDemo(BaseParserDemo): + def create_parser(self): + return TextContentParser(embedder=self.embedder, llm=self.llm) + + def run(self): + print("=== TextContentParser Demo ===") + + info = {"user_id": "user1", "session_id": "session1"} + + for i, part in enumerate(TEXT_CONTENT_PARTS, 1): + print(f"\n--- Part {i} ---") + source = self.demo_source_creation(part, info) + + # Legacy example attempts to rebuild and access dict keys directly. + # Since current source returns None, we must handle it safely in the demo. + print("\n🔄 Rebuilding from source...") + rebuilt = self.parser.rebuild_from_source(source) + if rebuilt: + print(" ✅ Rebuilt result:") + if isinstance(rebuilt, dict): + from examples.mem_reader.utils import pretty_print_dict + + pretty_print_dict(rebuilt) + else: + print(f" {rebuilt}") + else: + print(" ⚠️ Rebuilt result is None (not implemented in source)") + + +if __name__ == "__main__": + demo = TextContentParserDemo() + demo.run() diff --git a/examples/mem_reader/parser_demos/demo_tool.py b/examples/mem_reader/parser_demos/demo_tool.py new file mode 100644 index 000000000..991f21751 --- /dev/null +++ b/examples/mem_reader/parser_demos/demo_tool.py @@ -0,0 +1,28 @@ +"""Demo for ToolParser.""" + +from examples.mem_reader.samples import TOOL_MESSAGE_CASES +from memos.mem_reader.read_multi_modal.tool_parser import ToolParser + +from ._base import BaseParserDemo + + +class ToolParserDemo(BaseParserDemo): + def create_parser(self): + return ToolParser(embedder=self.embedder, llm=self.llm) + + def run(self): + print("=== ToolParser Demo ===") + + info = {"user_id": "user1", "session_id": "session1"} + + for case in TOOL_MESSAGE_CASES: + print(f"\n--- Case: {case.description} ---") + for msg in case.scene_data: + source = self.demo_source_creation(msg, info) + self.demo_rebuild(source) + self.demo_parse_fast(msg, info) + + +if __name__ == "__main__": + demo = ToolParserDemo() + demo.run() diff --git a/examples/mem_reader/parser_demos/demo_user.py b/examples/mem_reader/parser_demos/demo_user.py new file mode 100644 index 000000000..16dafcb2d --- /dev/null +++ b/examples/mem_reader/parser_demos/demo_user.py @@ -0,0 +1,43 @@ +"""Demo for UserParser.""" + +from examples.mem_reader.samples import USER_MESSAGE_CASES +from memos.mem_reader.read_multi_modal.user_parser import UserParser + +from ._base import BaseParserDemo + + +class UserParserDemo(BaseParserDemo): + def create_parser(self): + return UserParser(embedder=self.embedder, llm=self.llm) + + def run(self): + print("=== UserParser Demo ===") + + info = {"user_id": "user1", "session_id": "session1"} + + for case in USER_MESSAGE_CASES: + print(f"\n--- Case: {case.description} ---") + for msg in case.scene_data: + sources = self.demo_source_creation(msg, info) + + # Rebuild all sources to show full multimodal support + if isinstance(sources, list): + for i, src in enumerate(sources): + print(f"\n🔄 Rebuilding source part {i + 1} ({src.type})...") + rebuilt = self.parser.rebuild_from_source(src) + print(" ✅ Rebuilt result:") + if isinstance(rebuilt, dict): + from examples.mem_reader.utils import pretty_print_dict + + pretty_print_dict(rebuilt) + else: + print(f" {rebuilt}") + else: + self.demo_rebuild(sources) + + self.demo_parse_fast(msg, info) + + +if __name__ == "__main__": + demo = UserParserDemo() + demo.run() diff --git a/examples/mem_reader/runners/__init__.py b/examples/mem_reader/runners/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/examples/mem_reader/runners/run_multimodal.py b/examples/mem_reader/runners/run_multimodal.py new file mode 100644 index 000000000..e4176e898 --- /dev/null +++ b/examples/mem_reader/runners/run_multimodal.py @@ -0,0 +1,139 @@ +"""Runner for MultiModalStructMemReader.""" + +import argparse +import json +import time +import traceback + +from examples.mem_reader.builders import build_multimodal_reader +from examples.mem_reader.samples import ( + MULTIMODAL_MESSAGE_CASES, + RAW_INPUT_CASES, + STRING_MESSAGE_CASES, +) +from examples.mem_reader.utils import print_memory_item + + +# Map example names to test cases +EXAMPLE_MAP = { + "string_message": STRING_MESSAGE_CASES, + "multimodal": MULTIMODAL_MESSAGE_CASES, + "raw_input": RAW_INPUT_CASES, +} + + +def run_multimodal_reader(): + """Run MultiModalStructMemReader with sample data.""" + parser = argparse.ArgumentParser(description="MultiModalStructMemReader Example") + parser.add_argument( + "--example", + type=str, + default="all", + choices=[*list(EXAMPLE_MAP.keys()), "all"], + help="Example to run", + ) + parser.add_argument( + "--mode", + type=str, + default="fine", + choices=["fast", "fine"], + help="Processing mode (fast/fine)", + ) + parser.add_argument( + "--format", + type=str, + default="text", + choices=["text", "json"], + help="Output format", + ) + + args = parser.parse_args() + + print("🚀 Initializing MultiModalStructMemReader...") + reader = build_multimodal_reader() + print("✅ Initialization complete.") + + # Select test cases + if args.example == "all": + test_cases = [] + for cases in EXAMPLE_MAP.values(): + test_cases.extend(cases) + else: + test_cases = EXAMPLE_MAP[args.example] + + print(f"📋 Running {len(test_cases)} test cases in '{args.mode}' mode...\n") + + results = [] + + for i, case in enumerate(test_cases): + print(f"🔹 Case {i + 1}: {case.name} - {case.description}") + + info = case.get_info() + scene_data = case.scene_data + + # Data structure adaptation logic + # Ensure scene_data is List[List[dict]] if it looks like a single conversation + # Most samples in samples.py are wrapped in [], so they are List[List[dict]]. + # Except STRING_MESSAGE_CASES which are List[str]. + if ( + isinstance(scene_data, list) + and len(scene_data) > 0 + and not isinstance(scene_data[0], list) + and not isinstance(scene_data[0], str) + ): + scene_data = [scene_data] + + try: + start_time = time.time() + + # Determine input type + input_type = "chat" + if case in EXAMPLE_MAP["string_message"]: + input_type = "string" + elif case in EXAMPLE_MAP["raw_input"]: + input_type = "raw" + + memories = reader.get_memory( + scene_data, + type=input_type, + mode=args.mode, + info=info, + ) + duration = time.time() - start_time + + result_entry = { + "case": case.name, + "description": case.description, + "duration_seconds": round(duration, 4), + "memory_count": sum(len(m) for m in memories), + "memories": [], + } + + print( + f" ✅ Processed in {duration:.4f}s. Extracted {result_entry['memory_count']} memories." + ) + + # Flatten memories for display/output + flat_memories = [item for sublist in memories for item in sublist] + + if args.format == "json": + # Convert TextualMemoryItem to dict + result_entry["memories"] = [ + m.to_dict() if hasattr(m, "to_dict") else str(m) for m in flat_memories + ] + results.append(result_entry) + else: + for item in flat_memories: + print_memory_item(item, indent=6) + print() + + except Exception as e: + print(f" ❌ Error: {e}") + traceback.print_exc() + + if args.format == "json": + print(json.dumps(results, indent=2, ensure_ascii=False)) + + +if __name__ == "__main__": + run_multimodal_reader() diff --git a/examples/mem_reader/runners/run_simple.py b/examples/mem_reader/runners/run_simple.py new file mode 100644 index 000000000..21ecd73a9 --- /dev/null +++ b/examples/mem_reader/runners/run_simple.py @@ -0,0 +1,113 @@ +"""Runner for SimpleStructMemReader.""" + +import time + +from examples.mem_reader.samples import SIMPLE_CHAT_SCENE +from examples.mem_reader.settings import get_reader_config +from examples.mem_reader.utils import print_memory_item +from memos.configs.mem_reader import SimpleStructMemReaderConfig +from memos.mem_reader.simple_struct import SimpleStructMemReader + + +def _print_memory_sets(title: str, memories): + """memories: list[list[TextualMemoryItem]]""" + total = sum(len(mem_list) for mem_list in memories) + print(f"\n{title}") + print(f"📊 Total memory items: {total}") + print(f"✅ Extracted {len(memories)} memory sets.") + for i, memory_list in enumerate(memories): + print(f"\n--- Window/Conversation {i + 1} Memories ({len(memory_list)} items) ---") + for item in memory_list: + print_memory_item(item, indent=2) + + +def run_simple_reader(): + """Run SimpleStructMemReader with sample data.""" + print("🚀 Initializing SimpleStructMemReader from JSON config...") + + # Use settings config instead of hardcoded JSON + reader_config = SimpleStructMemReaderConfig(**get_reader_config()) + reader = SimpleStructMemReader(reader_config) + print("✅ Initialization complete.") + + info = {"user_id": "simple_user", "session_id": "simple_session"} + + print("\n📝 Processing Simple Chat Scene...") + # SIMPLE_CHAT_SCENE: list[list[dict]] (multiple conversations) + + try: + # 1) FINE + print("\n🔄 Testing FINE mode (with LLM)...") + t0 = time.time() + fine_memory = reader.get_memory( + SIMPLE_CHAT_SCENE, + type="chat", + info=info, + mode="fine", + ) + fine_time = time.time() - t0 + print(f"⏱️ Fine mode time: {fine_time:.2f}s") + _print_memory_sets("=== FINE Mode Results ===", fine_memory) + + # 2) FAST + print("\n⚡ Testing FAST mode (no LLM)...") + t0 = time.time() + fast_memory = reader.get_memory( + SIMPLE_CHAT_SCENE, + type="chat", + info=info, + mode="fast", + ) + fast_time = time.time() - t0 + print(f"⏱️ Fast mode time: {fast_time:.2f}s") + _print_memory_sets("=== FAST Mode Results ===", fast_memory) + + # 3) Transfer: FAST -> FINE + # fine_transfer_simple_mem expects a flat list[TextualMemoryItem] + print("\n🔁 Transfer FAST memories -> FINE...") + flat_fast_items = [item for mem_list in fast_memory for item in mem_list] + + t0 = time.time() + transferred = reader.fine_transfer_simple_mem(flat_fast_items, type="chat") + transfer_time = time.time() - t0 + + print(f"⏱️ Transfer time: {transfer_time:.2f}s") + _print_memory_sets("=== TRANSFER Results (FAST -> FINE) ===", transferred) + + # 4) Documents (Fine only) + print("\n📄 Processing Documents (Fine Mode Only)...") + doc_paths = [ + "text1.txt", + "text2.txt", + ] + + try: + t0 = time.time() + doc_memory = reader.get_memory( + doc_paths, + type="doc", + info={"user_id": "doc_user", "session_id": "doc_session"}, + mode="fine", + ) + doc_time = time.time() - t0 + print(f"⏱️ Doc fine mode time: {doc_time:.2f}s") + _print_memory_sets("=== DOC Mode Results (FINE) ===", doc_memory) + except Exception as e: + print(f"⚠️ Document processing failed: {e}") + print(" (This is expected if document files don't exist)") + + # 5) Summary (no speedup) + print("\n📈 Summary") + print(f" Fine: {fine_time:.2f}s") + print(f" Fast: {fast_time:.2f}s") + print(f" Transfer: {transfer_time:.2f}s") + + except Exception as e: + print(f"❌ Error during processing: {e}") + import traceback + + traceback.print_exc() + + +if __name__ == "__main__": + run_simple_reader() diff --git a/examples/mem_reader/samples.py b/examples/mem_reader/samples.py new file mode 100644 index 000000000..c29177c58 --- /dev/null +++ b/examples/mem_reader/samples.py @@ -0,0 +1,773 @@ +"""Sample data for MemReader examples. + +This module contains test cases and sample data for various MemReader scenarios, +including simple chat, multimodal messages, file content, and tool usage. +""" + +from dataclasses import dataclass, field +from typing import Any + + +@dataclass +class TestCase: + """Base class for test cases.""" + + name: str + description: str + scene_data: Any + expected_count: dict[str, int] = field(default_factory=dict) + + def get_info(self) -> dict[str, Any]: + """Get info dict for this test case.""" + return { + "user_id": "test_user", + "session_id": f"session_{self.name}", + "test_case": self.name, + } + + +# ============================================================================ +# 1. Simple Chat Samples (for SimpleStructMemReader) +# ============================================================================ + +SIMPLE_CHAT_SCENE = [ + [ + {"role": "user", "chat_time": "3 May 2025", "content": "I'm feeling a bit down today."}, + { + "role": "assistant", + "chat_time": "3 May 2025", + "content": "I'm sorry to hear that. Do you want to talk about what's been going on?", + }, + { + "role": "user", + "chat_time": "3 May 2025", + "content": "It's just been a tough couple of days, you know? Everything feels a bit overwhelming, and I just can't seem to shake it off.", + }, + { + "role": "assistant", + "chat_time": "3 May 2025", + "content": "It sounds like you're going through a lot right now. Sometimes it helps to talk things out. Is there something specific that's been weighing on you, or is it more of a general feeling?", + }, + { + "role": "user", + "chat_time": "3 May 2025", + "content": "It's a mix, I guess. Work's been really stressful, and on top of that, I've been feeling kinda disconnected from the people around me.", + }, + { + "role": "assistant", + "chat_time": "3 May 2025", + "content": "That can be really draining, especially when you're feeling isolated on top of the stress. Do you think there's something from your past that's contributing to how you're feeling now? Sometimes our emotions are tied to older experiences.", + }, + { + "role": "user", + "chat_time": "3 May 2025", + "content": "Yeah, now that you mention it… I've been thinking a lot about my childhood lately. I didn't have the easiest upbringing. My parents were always busy, and I often felt like I had to figure things out on my own. I remember one winter, I got really sick with a high fever. My parents were both working, and I didn't want to bother them. I just lay in bed, crying by myself, not knowing what to do.", + }, + { + "role": "assistant", + "chat_time": "3 May 2025", + "content": "That must have been very lonely. Growing up without that support can really make you feel like you have to handle everything on your own. How do you think that experience has affected you now?", + }, + { + "role": "user", + "chat_time": "3 May 2025", + "content": "Yeah, it was really lonely. I remember spending a lot of time doing things on my own, like drawing, because it was my only way to escape. I used to draw animals, and I created my own little world on paper. It was peaceful in a way, just me and my imagination, with no outside distractions.", + }, + { + "role": "assistant", + "chat_time": "3 May 2025", + "content": "Drawing sounds like a beautiful way to express yourself and find some peace in that solitude. It must have been a very quiet and private world for you. Do you think that sense of loneliness from back then has shaped how you connect with others today?", + }, + { + "role": "user", + "chat_time": "3 May 2025", + "content": "I think it has. I've always found it hard to let others in. I feel like I have to handle everything on my own. If I rely on someone else, I feel vulnerable, and I start to feel that loneliness again.", + }, + { + "role": "assistant", + "chat_time": "3 May 2025", + "content": "It sounds like this was a protective mechanism that helped you cope with your feelings of loneliness back then. But as you've grown, it seems like this pattern might not be serving you as well anymore. Have you thought about trying to break this cycle, or finding new ways to connect with others?", + }, + ] +] + +# ============================================================================ +# 2. String Message Cases +# ============================================================================ + +STRING_MESSAGE_CASES = [ + TestCase( + name="string_simple", + description="Simple string message", + scene_data=[ + "This is a simple text message that needs to be parsed.", + "Another string message for processing.", + "StringParser handles plain text strings and converts them to SourceMessage objects.", + ], + expected_count={"fast": 1, "fine": 1}, + ), + TestCase( + name="string_multiple", + description="Multiple string messages", + scene_data=[ + "这是第一条消息。", + "这是第二条消息。", + "这是第三条消息。", + ], + ), +] + +# ============================================================================ +# 3. Chat Message Cases (Standard & Multimodal) +# ============================================================================ + +CHAT_MESSAGE_CASES = [ + TestCase( + name="chat_simple", + description="Simple chat conversation", + scene_data=[ + [ + { + "role": "user", + "content": "Hello, how are you? I'm planning to learn Python next week.", + "chat_time": "2025-01-01T10:00:00Z", + "message_id": "chat_simple_u1", + }, + { + "role": "assistant", + "content": "I'm doing well, thank you!", + "chat_time": "2025-01-01T10:00:01Z", + "message_id": "chat_simple_a1", + }, + ] + ], + ), + TestCase( + name="chat_with_system", + description="Chat with system message", + scene_data=[ + [ + { + "role": "system", + "content": "You are a helpful assistant.", + "chat_time": "2025-01-01T10:00:00Z", + "message_id": "chat_sys_s1", + }, + { + "role": "user", + "content": "What's the weather?", + "chat_time": "2025-01-01T10:00:01Z", + "message_id": "chat_sys_u1", + }, + { + "role": "assistant", + "content": "I don't have access to weather data.", + "chat_time": "2025-01-01T10:00:02Z", + "message_id": "chat_sys_a1", + }, + ] + ], + ), + TestCase( + name="chat_multimodal_complex", + description="Complex multimodal chat with text, file, and image", + scene_data=[ + [ + { + "role": "user", + "content": [ + {"type": "text", "text": "我是测试base64"}, + { + "type": "file", + "file": { + "file_data": "Hello World", + "filename": "example.txt", + "file_id": "file_123", + }, + }, + { + "type": "image_url", + "image_url": { + "url": "https://statics.memtensor.com.cn/memos/memos-banner.gif", + "detail": "auto", + }, + }, + ], + "chat_time": "2025-01-01T10:00:03Z", + "message_id": "chat_mm_u1", + } + ] + ], + ), +] + + +# ============================================================================ +# 4. Tool Message Cases +# ============================================================================ + +TOOL_MESSAGE_CASES = [ + TestCase( + name="tool_weather", + description="Weather tool result", + scene_data=[ + { + "role": "user", + "content": "I'm planning a hiking trip to New York this weekend, can you check the weather?", + "chat_time": "2025-01-15T10:00:00", + "message_id": "msg_000", + }, + { + "role": "tool", + "content": '{"result": "Weather in New York: 72°F, sunny"}', + "tool_call_id": "call_abc123", + "chat_time": "2025-01-15T10:00:30", + "message_id": "msg_001", + }, + ], + ), + TestCase( + name="tool_data", + description="Data API result", + scene_data=[ + { + "role": "user", + "content": "Please retrieve my saved reading list items.", + "chat_time": "2025-01-15T10:05:00", + "message_id": "msg_000_2", + }, + { + "role": "tool", + "content": '{"status": "success", "data": {"items": ["The Great Gatsby", "1984", "Python Crash Course"]}}', + "tool_call_id": "call_def456", + "chat_time": "2025-01-15T10:05:30", + "message_id": "msg_002", + }, + ], + ), + TestCase( + name="tool_db", + description="Database query result", + scene_data=[ + { + "role": "user", + "content": "Did I complete the registration for the upcoming workshop?", + "chat_time": "2025-01-15T10:10:00", + "message_id": "msg_000_3", + }, + { + "role": "tool", + "content": "Database query executed successfully. Found registration record for user_id=123: status=confirmed.", + "tool_call_id": "call_ghi789", + "chat_time": "2025-01-15T10:10:30", + "message_id": "msg_003", + }, + ], + ), +] + +# ============================================================================ +# 5. File Content Samples (for FileContentParser Demo) +# ============================================================================ + +FILE_CONTENT_PARTS = [ + { + "type": "file", + "file": { + "filename": "document.pdf", + "file_id": "file_123", + "file_data": "This is the content extracted from the PDF file...", + }, + }, + { + "type": "file", + "file": { + "filename": "report.docx", + "file_id": "file_456", + "file_data": "Report content: Analysis of Q4 performance...", + }, + }, + { + "type": "file", + "file": { + "filename": "data.csv", + "file_id": "file_789", + "path": "/path/to/data.csv", + }, + }, +] + +FILE_CONTENT_REAL_FILE_PART = { + "type": "file", + "file": { + "filename": "example.txt", + "path": "examples/mem_reader/text1.txt", + }, +} + +# ============================================================================ +# 6. Text Content Samples (for TextContentParser Demo) +# ============================================================================ + +TEXT_CONTENT_PARTS = [ + {"type": "text", "text": "This is a simple text content part."}, + {"type": "text", "text": "TextContentParser handles text parts in multimodal messages."}, +] + +# ============================================================================ +# 7. System Message Samples (for SystemParser Demo) +# ============================================================================ + +SYSTEM_MESSAGE_CASES = [ + TestCase( + name="system_simple", + description="Simple text system message", + scene_data=[ + { + "role": "system", + "content": "You are a helpful assistant that provides clear and concise answers.", + "chat_time": "2025-01-15T10:00:00", + "message_id": "msg_001", + } + ], + ), + TestCase( + name="system_multimodal", + description="Multimodal system message (multiple text parts)", + scene_data=[ + { + "role": "system", + "content": [ + {"type": "text", "text": "You are a helpful assistant."}, + {"type": "text", "text": "Always provide clear and concise answers."}, + {"type": "text", "text": "If you don't know something, say so."}, + ], + "chat_time": "2025-01-15T10:05:00", + "message_id": "msg_002", + } + ], + ), + TestCase( + name="system_structured", + description="Structured system instructions (multiple text parts)", + scene_data=[ + { + "role": "system", + "content": [ + { + "type": "text", + "text": "You are a coding assistant specialized in Python programming.", + }, + {"type": "text", "text": "Always write clean, well-documented code."}, + {"type": "text", "text": "Explain your reasoning when providing solutions."}, + ], + "chat_time": "2025-01-15T10:10:00", + "message_id": "msg_003", + } + ], + ), +] + +# ============================================================================ +# 8. User Message Samples (for UserParser Demo) +# ============================================================================ + +USER_MESSAGE_CASES = [ + TestCase( + name="user_simple", + description="Simple text user message", + scene_data=[ + { + "role": "user", + "content": "I'm feeling a bit down today. Can you help me?", + "chat_time": "2025-01-15T10:00:00", + "message_id": "msg_001", + } + ], + ), + TestCase( + name="user_multimodal", + description="Multimodal user message (text + file)", + scene_data=[ + { + "role": "user", + "content": [ + {"type": "text", "text": "Please analyze this document:"}, + { + "type": "file", + "file": { + "filename": "report.pdf", + "file_id": "file_123", + "file_data": "This is the content of the PDF file...", + }, + }, + ], + "chat_time": "2025-01-15T10:05:00", + "message_id": "msg_002", + } + ], + ), + TestCase( + name="user_image", + description="User message with image", + scene_data=[ + { + "role": "user", + "content": [ + {"type": "text", "text": "What's in this image?"}, + {"type": "image_url", "image_url": {"url": "https://example.com/image.jpg"}}, + ], + "chat_time": "2025-01-15T10:10:00", + "message_id": "msg_003", + } + ], + ), +] + +# ============================================================================ +# 9. Assistant Message Samples (for AssistantParser Demo) +# ============================================================================ + +ASSISTANT_MESSAGE_CASES = [ + TestCase( + name="assistant_simple", + description="Simple support message", + scene_data=[ + { + "role": "assistant", + "content": "I'm sorry to hear that you're feeling down. Would you like to talk about what's been going on?", + "chat_time": "2025-01-15T10:00:30", + "message_id": "msg_001", + } + ], + ), + TestCase( + name="assistant_analysis", + description="Document analysis response", + scene_data=[ + { + "role": "assistant", + "content": "Based on the document you provided, I can see several key points: 1) The project timeline, 2) Budget considerations, and 3) Resource allocation.", + "chat_time": "2025-01-15T10:05:30", + "message_id": "msg_002", + } + ], + ), + TestCase( + name="assistant_code", + description="Code solution", + scene_data=[ + { + "role": "assistant", + "content": "Here's a Python solution for your problem:\n```python\ndef solve_problem():\n return 'solution'\n```", + "chat_time": "2025-01-15T10:10:30", + "message_id": "msg_003", + } + ], + ), +] + +# ============================================================================ +# 10. Image Samples (for ImageParser Demo) +# ============================================================================ + +IMAGE_MESSAGE_CASES = [ + { + "type": "image_url", + "image_url": { + "url": "https://statics.memtensor.com.cn/memos/memos-banner.gif", + "detail": "auto", + }, + "_note": "Real Image (MemOS Banner)", + }, + { + "type": "image", + "image_url": "https://example.com/image2.png", + "_note": "Dummy Image (Negative Test)", + }, +] + +# ============================================================================ +# 11. Multimodal Message Cases (from Legacy) +# ============================================================================ + +MULTIMODAL_MESSAGE_CASES = [ + TestCase( + name="multimodal_text_image", + description="User message with text and image", + scene_data=[ + [ + { + "role": "user", + "content": [ + {"type": "text", "text": "帮我看看这张图片大概是什么内容?"}, + { + "type": "image_url", + "image_url": { + "url": "https://example.com/mountain_lake.jpg", + "detail": "high", + }, + }, + ], + "chat_time": "2025-11-24T10:20:00Z", + "message_id": "mm-img-1", + } + ] + ], + ), + TestCase( + name="multimodal_text_file", + description="User message with text and file", + scene_data=[ + [ + { + "role": "user", + "content": [ + {"type": "text", "text": "请阅读这个PDF,总结里面的要点。"}, + {"type": "file", "file": {"file_id": "file_123", "filename": "report.pdf"}}, + ], + "chat_time": "2025-11-24T10:21:00Z", + "message_id": "mm-file-1", + } + ] + ], + ), + TestCase( + name="oss_text_file", + description="User message with text and file", + scene_data=[ + [ + { + "role": "user", + "content": [ + {"type": "text", "text": "请阅读这个PDF,总结里面的要点。"}, + { + "type": "file", + "file": { + "file_id": "file_123", + "filename": "report.pdf", + "file_data": "@http://139.196.232.20:9090/graph-test/algorithm/2025_11_13/1763043889_1763043782_PM1%E8%BD%A6%E9%97%B4PMT%E9%9D%B4%E5%8E%8B%E8%BE%B9%E5%8E%8B%E5%8E%8B%E5%8A%9B%E6%97%A0%E6%B3%95%E5%BB%BA%E7%AB%8B%E6%95%85%E9%9A%9C%E6%8A%A5%E5%91%8A20240720.md", + }, + }, + ], + "chat_time": "2025-11-24T10:21:00Z", + "message_id": "mm-file-1", + } + ] + ], + ), + TestCase( + name="pure_data_file", + description="User message with text and file", + scene_data=[ + [ + { + "role": "user", + "content": [ + {"type": "text", "text": "请阅读这个PDF,总结里面的要点。"}, + { + "type": "file", + "file": { + "file_id": "file_123", + "filename": "report.pdf", + "file_data": "明文记忆是系统与用户对话、操作等交互中动态习得,以及外部提供的、可显式管理的结构化知识形态,通常以文档、提示模板、图结构或用户规则等形式存在。它具备编辑性、可共享性与治理友好性,适合存储需要频繁修改、可审计或多方协同使用的信息。 在 MemOS 中,明文记忆可用于动态生成推理上下文、个性化偏好注入、多代理协作共享等场景,成为连接人类输入与模型认知的关键桥梁。激活记忆是指模型在推理过程中产生的瞬时性认知状态,包括 KV cache、隐藏层激活、注意力权重等中间张量结构。它通常用于维持上下文连续性、对话一致性与行为风格控制。 MemOS 将激活记忆抽象为可调度资源,支持按需唤醒、延迟卸载与结构变换。例如,某些上下文状态可以被压缩为“半结构化记忆片段”用于未来复用,也可以在任务级别转化为参数化模块,支持短期记忆的长期化演进。这一机制为模型行为一致性、风格保持与状态持续性提供了基础。", + }, + }, + ], + "chat_time": "2025-11-24T10:21:00Z", + "message_id": "mm-file-1", + } + ] + ], + ), + TestCase( + name="local_data_file", + description="User message with text and file", + scene_data=[ + [ + { + "role": "user", + "content": [ + {"type": "text", "text": "请阅读这个PDF,总结里面的要点。"}, + { + "type": "file", + "file": { + "file_id": "file_123", + "filename": "report.pdf", + "file_data": "./my_local_file/report.pdf", + }, + }, + ], + "chat_time": "2025-11-24T10:21:00Z", + "message_id": "mm-file-1", + } + ] + ], + ), + TestCase( + name="internet_file", + description="User message with text and file", + scene_data=[ + [ + { + "role": "user", + "content": [ + {"type": "text", "text": "请阅读这个PDF,总结里面的要点。"}, + { + "type": "file", + "file": { + "file_id": "file_123", + "filename": "report.pdf", + "file_data": "https://upload.wikimedia.org/wikipedia/commons/c/cb/NLC416-16jh004830-88775_%E7%B4%85%E6%A8%93%E5%A4%A2.pdf", + }, + }, + ], + "chat_time": "2025-11-24T10:21:00Z", + "message_id": "mm-file-1", + } + ] + ], + ), + TestCase( + name="multimodal_mixed", + description="Mixed multimodal message (text + file + image)", + scene_data=[ + [ + { + "role": "user", + "content": [ + {"type": "text", "text": "请同时分析这个报告和图表。"}, + { + "type": "file", + "file": {"file_id": "file_789", "filename": "analysis_report.pdf"}, + }, + { + "type": "image_url", + "image_url": {"url": "https://example.com/chart.png", "detail": "auto"}, + }, + ], + "chat_time": "2025-11-24T10:23:00Z", + "message_id": "mixed-1", + } + ] + ], + ), + TestCase( + name="multimodal_audio", + description="Audio-only message", + scene_data=[ + [ + { + "role": "user", + "content": [ + { + "type": "input_audio", + "input_audio": {"data": "base64_encoded_audio_here", "format": "mp3"}, + } + ], + "chat_time": "2025-11-24T10:22:00Z", + "message_id": "audio-1", + } + ] + ], + ), +] + +# ============================================================================ +# 12. Raw Input Cases (from Legacy) +# ============================================================================ + +RAW_INPUT_CASES = [ + TestCase( + name="raw_text_items", + description="Pure text input items without dialog context", + scene_data=[ + [ + {"type": "text", "text": "这是一段独立的文本输入,没有明确的对话上下文。"}, + {"type": "text", "text": "它依然会被抽取和写入明文记忆。"}, + ] + ], + ), + TestCase( + name="raw_file_item", + description="Pure file input by file_id", + scene_data=[ + [{"type": "file", "file": {"file_id": "file_uploaded_123", "filename": "document.pdf"}}] + ], + ), + TestCase( + name="file_only_file_id", + description="File with only file_id parameter", + scene_data=[[{"type": "file", "file": {"file_id": "file_only_id_123"}}]], + ), + TestCase( + name="file_only_filename", + description="File with only filename parameter", + scene_data=[[{"type": "file", "file": {"filename": "document_only.pdf"}}]], + ), + TestCase( + name="file_only_file_data_base64", + description="File with only file_data (base64 encoded)", + scene_data=[ + [ + { + "type": "file", + "file": { + "file_data": "data:application/pdf;base64,JVBERi0xLjQKJdPr6eEKMSAwIG9iago8PAovVHlwZSAvQ2F0YWxvZwovUGFnZXMgMiAwIFIKPj4KZW5kb2JqCjIgMCBvYmoKPDwKL1R5cGUgL1BhZ2VzCi9LaWRzIFszIDAgUl0KL0NvdW50IDEKPD4KZW5kb2JqCjMgMCBvYmoKPDwKL1R5cGUgL1BhZ2UKL1BhcmVudCAyIDAgUgovTWVkaWFCb3ggWzAgMCA2MTIgNzkyXQovUmVzb3VyY2VzIDw8Ci9Gb250IDw8Ci9GMSA0IDAgUgo+Pgo+PgovQ29udGVudHMgNSAwIFIKPj4KZW5kb2JqCjQgMCBvYmoKPDwKL1R5cGUgL0ZvbnQKL1N1YnR5cGUgL1R5cGUxCi9CYXNlRm9udCAvSGVsdmV0aWNhCj4+CmVuZG9iag==" + }, + } + ] + ], + ), + TestCase( + name="file_only_file_data_url", + description="File with only file_data (URL)", + scene_data=[ + [{"type": "file", "file": {"file_data": "https://example.com/documents/report.pdf"}}] + ], + ), + TestCase( + name="file_only_file_data_text", + description="File with only file_data (plain text content)", + scene_data=[ + [ + { + "type": "file", + "file": { + "file_data": "This is a plain text file content. It contains multiple lines.\nLine 2 of the file.\nLine 3 of the file." + }, + } + ] + ], + ), + TestCase( + name="file_file_data_and_file_id", + description="File with file_data and file_id", + scene_data=[ + [ + { + "type": "file", + "file": { + "file_data": "https://example.com/documents/data.pdf", + "file_id": "file_with_data_123", + }, + } + ] + ], + ), + TestCase( + name="file_file_data_and_filename", + description="File with file_data and filename", + scene_data=[ + [ + { + "type": "file", + "file": { + "file_data": "This is file content with filename.", + "filename": "content_file.txt", + }, + } + ] + ], + ), +] diff --git a/examples/mem_reader/settings.py b/examples/mem_reader/settings.py new file mode 100644 index 000000000..0623a91de --- /dev/null +++ b/examples/mem_reader/settings.py @@ -0,0 +1,107 @@ +"""Configuration settings for MemReader examples. + +This module handles environment variables and default configurations for +LLMs, Embedders, and Chunkers used in the examples. +""" + +import os + +from typing import Any + +from dotenv import load_dotenv + + +# Load environment variables from .env file +load_dotenv() + + +def get_llm_config() -> dict[str, Any]: + """Get LLM configuration from environment variables.""" + openai_api_key = os.getenv("OPENAI_API_KEY") + openai_base_url = os.getenv("OPENAI_API_BASE", "https://api.openai.com/v1") + ollama_api_base = os.getenv("OLLAMA_API_BASE", "http://localhost:11434") + + # Use MEMRADER_ variables from .env as primary source + reader_model = os.getenv("MEMRADER_MODEL", os.getenv("MOS_CHAT_MODEL", "gpt-4o-mini")) + reader_api_key = os.getenv("MEMRADER_API_KEY", openai_api_key) + reader_api_base = os.getenv("MEMRADER_API_BASE", openai_base_url) + + # Check for specific MemReader backend override, otherwise assume openai if keys present + llm_backend = os.getenv("MEMRADER_LLM_BACKEND", "openai") + + if llm_backend == "ollama": + return { + "backend": "ollama", + "config": { + "model_name_or_path": reader_model, + "api_base": ollama_api_base, + "temperature": float(os.getenv("MEMRADER_TEMPERATURE", "0.0")), + "remove_think_prefix": os.getenv("MEMRADER_REMOVE_THINK_PREFIX", "true").lower() + == "true", + "max_tokens": int(os.getenv("MEMRADER_MAX_TOKENS", "8192")), + }, + } + else: # openai + return { + "backend": "openai", + "config": { + "model_name_or_path": reader_model, + "api_key": reader_api_key or "EMPTY", + "api_base": reader_api_base, + "temperature": float(os.getenv("MEMRADER_TEMPERATURE", "0.5")), + "remove_think_prefix": os.getenv("MEMRADER_REMOVE_THINK_PREFIX", "true").lower() + == "true", + "max_tokens": int(os.getenv("MEMRADER_MAX_TOKENS", "8192")), + }, + } + + +def get_embedder_config() -> dict[str, Any]: + """Get Embedder configuration from environment variables.""" + openai_api_key = os.getenv("OPENAI_API_KEY") + openai_base_url = os.getenv("OPENAI_API_BASE", "https://api.openai.com/v1") + ollama_api_base = os.getenv("OLLAMA_API_BASE", "http://localhost:11434") + + # .env uses MOS_EMBEDDER_BACKEND + embedder_backend = os.getenv("MOS_EMBEDDER_BACKEND", "ollama") + + if embedder_backend == "universal_api": + return { + "backend": "universal_api", + "config": { + "provider": os.getenv("MOS_EMBEDDER_PROVIDER", "openai"), + "api_key": os.getenv("MOS_EMBEDDER_API_KEY", openai_api_key or "sk-xxxx"), + "model_name_or_path": os.getenv("MOS_EMBEDDER_MODEL", "text-embedding-3-large"), + "base_url": os.getenv("MOS_EMBEDDER_API_BASE", openai_base_url), + }, + } + else: # ollama + return { + "backend": "ollama", + "config": { + "model_name_or_path": os.getenv("MOS_EMBEDDER_MODEL", "nomic-embed-text:latest"), + "api_base": ollama_api_base, + }, + } + + +def get_chunker_config() -> dict[str, Any]: + """Get Chunker configuration from environment variables.""" + return { + "backend": "sentence", + "config": { + "tokenizer_or_token_counter": "gpt2", + "chunk_size": 512, + "chunk_overlap": 128, + "min_sentences_per_chunk": 1, + }, + } + + +def get_reader_config() -> dict[str, Any]: + """Get full reader configuration.""" + return { + "llm": get_llm_config(), + "embedder": get_embedder_config(), + "chunker": get_chunker_config(), + } diff --git a/examples/mem_reader/simple_struct_reader.py b/examples/mem_reader/simple_struct_reader.py deleted file mode 100644 index 72dc5fd05..000000000 --- a/examples/mem_reader/simple_struct_reader.py +++ /dev/null @@ -1,831 +0,0 @@ -import argparse -import json -import os -import time - -from typing import Any - -from dotenv import load_dotenv - -from memos.configs.mem_reader import SimpleStructMemReaderConfig -from memos.mem_reader.simple_struct import SimpleStructMemReader -from memos.memories.textual.item import ( - SourceMessage, - TextualMemoryItem, - TreeNodeTextualMemoryMetadata, -) - - -# Load environment variables from .env file -load_dotenv() - - -def print_textual_memory_item( - item: TextualMemoryItem, max_memory_length: int = 200, indent: int = 0 -): - """ - Print a TextualMemoryItem in a structured format. - - Args: - item: The TextualMemoryItem to print - max_memory_length: Maximum length of memory content to display - indent: Number of spaces for indentation - """ - indent_str = " " * indent - print(f"{indent_str}{'=' * 80}") - print(f"{indent_str}TextualMemoryItem") - print(f"{indent_str}{'=' * 80}") - print(f"{indent_str}ID: {item.id}") - print( - f"{indent_str}Memory: {item.memory[:max_memory_length]}{'...' if len(item.memory) > max_memory_length else ''}" - ) - print(f"{indent_str}Memory Length: {len(item.memory)} characters") - - # Print metadata - if hasattr(item.metadata, "user_id"): - print(f"{indent_str}User ID: {item.metadata.user_id}") - if hasattr(item.metadata, "session_id"): - print(f"{indent_str}Session ID: {item.metadata.session_id}") - if hasattr(item.metadata, "memory_type"): - print(f"{indent_str}Memory Type: {item.metadata.memory_type}") - if hasattr(item.metadata, "type"): - print(f"{indent_str}Type: {item.metadata.type}") - if hasattr(item.metadata, "key") and item.metadata.key: - print(f"{indent_str}Key: {item.metadata.key}") - if hasattr(item.metadata, "tags") and item.metadata.tags: - print(f"{indent_str}Tags: {', '.join(item.metadata.tags)}") - if hasattr(item.metadata, "confidence"): - print(f"{indent_str}Confidence: {item.metadata.confidence}") - if hasattr(item.metadata, "status"): - print(f"{indent_str}Status: {item.metadata.status}") - if hasattr(item.metadata, "background") and item.metadata.background: - bg_preview = ( - item.metadata.background[:100] + "..." - if len(item.metadata.background) > 100 - else item.metadata.background - ) - print(f"{indent_str}Background: {bg_preview}") - if hasattr(item.metadata, "sources") and item.metadata.sources: - print(f"{indent_str}Sources ({len(item.metadata.sources)}):") - for i, source in enumerate(item.metadata.sources): - source_info = [] - if hasattr(source, "type"): - source_info.append(f"type={source.type}") - if hasattr(source, "role"): - source_info.append(f"role={source.role}") - if hasattr(source, "doc_path"): - source_info.append(f"doc_path={source.doc_path}") - if hasattr(source, "chat_time"): - source_info.append(f"chat_time={source.chat_time}") - if hasattr(source, "index") and source.index is not None: - source_info.append(f"index={source.index}") - print(f"{indent_str} [{i + 1}] {', '.join(source_info)}") - if hasattr(item.metadata, "created_at"): - print(f"{indent_str}Created At: {item.metadata.created_at}") - if hasattr(item.metadata, "updated_at"): - print(f"{indent_str}Updated At: {item.metadata.updated_at}") - if hasattr(item.metadata, "embedding") and item.metadata.embedding: - print(f"{indent_str}Embedding: [vector of {len(item.metadata.embedding)} dimensions]") - print(f"{indent_str}{'=' * 80}\n") - - -def print_textual_memory_item_json(item: TextualMemoryItem, indent: int = 2): - """ - Print a TextualMemoryItem as formatted JSON. - - Args: - item: The TextualMemoryItem to print - indent: JSON indentation level - """ - # Convert to dict and exclude embedding for readability - data = item.to_dict() - if "metadata" in data and "embedding" in data["metadata"]: - embedding = data["metadata"]["embedding"] - if embedding: - data["metadata"]["embedding"] = f"[vector of {len(embedding)} dimensions]" - - print(json.dumps(data, indent=indent, ensure_ascii=False)) - - -def get_reader_config() -> dict[str, Any]: - """ - Get reader configuration from environment variables. - - Returns a dictionary that can be used to create SimpleStructMemReaderConfig. - Similar to APIConfig.get_reader_config() in server_router_api.py. - - Returns: - Configuration dictionary for SimpleStructMemReaderConfig - """ - openai_api_key = os.getenv("OPENAI_API_KEY") - openai_base_url = os.getenv("OPENAI_API_BASE", "https://api.openai.com/v1") - ollama_api_base = os.getenv("OLLAMA_API_BASE", "http://localhost:11434") - - # Get LLM backend and config - llm_backend = os.getenv("MEM_READER_LLM_BACKEND", "openai") - if llm_backend == "ollama": - llm_config = { - "backend": "ollama", - "config": { - "model_name_or_path": os.getenv("MEM_READER_LLM_MODEL", "qwen3:0.6b"), - "api_base": ollama_api_base, - "temperature": float(os.getenv("MEM_READER_LLM_TEMPERATURE", "0.0")), - "remove_think_prefix": os.getenv( - "MEM_READER_LLM_REMOVE_THINK_PREFIX", "true" - ).lower() - == "true", - "max_tokens": int(os.getenv("MEM_READER_LLM_MAX_TOKENS", "8192")), - }, - } - else: # openai - llm_config = { - "backend": "openai", - "config": { - "model_name_or_path": os.getenv("MEM_READER_LLM_MODEL", "gpt-4o-mini"), - "api_key": openai_api_key or os.getenv("MEMRADER_API_KEY", "EMPTY"), - "api_base": openai_base_url, - "temperature": float(os.getenv("MEM_READER_LLM_TEMPERATURE", "0.5")), - "remove_think_prefix": os.getenv( - "MEM_READER_LLM_REMOVE_THINK_PREFIX", "true" - ).lower() - == "true", - "max_tokens": int(os.getenv("MEM_READER_LLM_MAX_TOKENS", "8192")), - }, - } - - # Get embedder backend and config - embedder_backend = os.getenv( - "MEM_READER_EMBEDDER_BACKEND", os.getenv("MOS_EMBEDDER_BACKEND", "ollama") - ) - if embedder_backend == "universal_api": - embedder_config = { - "backend": "universal_api", - "config": { - "provider": os.getenv( - "MEM_READER_EMBEDDER_PROVIDER", os.getenv("MOS_EMBEDDER_PROVIDER", "openai") - ), - "api_key": os.getenv( - "MEM_READER_EMBEDDER_API_KEY", - os.getenv("MOS_EMBEDDER_API_KEY", openai_api_key or "sk-xxxx"), - ), - "model_name_or_path": os.getenv( - "MEM_READER_EMBEDDER_MODEL", - os.getenv("MOS_EMBEDDER_MODEL", "text-embedding-3-large"), - ), - "base_url": os.getenv( - "MEM_READER_EMBEDDER_API_BASE", - os.getenv("MOS_EMBEDDER_API_BASE", openai_base_url), - ), - }, - } - else: # ollama - embedder_config = { - "backend": "ollama", - "config": { - "model_name_or_path": os.getenv( - "MEM_READER_EMBEDDER_MODEL", - os.getenv("MOS_EMBEDDER_MODEL", "nomic-embed-text:latest"), - ), - "api_base": ollama_api_base, - }, - } - - return { - "llm": llm_config, - "embedder": embedder_config, - "chunker": { - "backend": "sentence", - "config": { - "tokenizer_or_token_counter": "gpt2", - "chunk_size": 512, - "chunk_overlap": 128, - "min_sentences_per_chunk": 1, - }, - }, - } - - -def main(): - # Parse command line arguments - parser = argparse.ArgumentParser(description="Test Mem-Reader with structured output") - parser.add_argument( - "--format", - choices=["text", "json"], - default="text", - help="Output format: 'text' for structured text, 'json' for JSON format (default: text)", - ) - parser.add_argument( - "--max-memory-length", - type=int, - default=200, - help="Maximum length of memory content to display in text format (default: 200)", - ) - args = parser.parse_args() - - # 1. Create Configuration from environment variables or JSON file - # Try to get config from environment variables first - openai_api_key = os.getenv("OPENAI_API_KEY") - if openai_api_key: - # Use environment variables (similar to server_router_api.py) - config_dict = get_reader_config() - reader_config = SimpleStructMemReaderConfig.model_validate(config_dict) - else: - # Fall back to JSON file - reader_config = SimpleStructMemReaderConfig.from_json_file( - "examples/data/config/simple_struct_reader_config.json" - ) - reader = SimpleStructMemReader(reader_config) - - # 2. Define scene data - scene_data = [ - [ - {"role": "user", "chat_time": "3 May 2025", "content": "I'm feeling a bit down today."}, - { - "role": "assistant", - "chat_time": "3 May 2025", - "content": "I'm sorry to hear that. Do you want to talk about what's been going on?", - }, - { - "role": "user", - "chat_time": "3 May 2025", - "content": "It's just been a tough couple of days, you know? Everything feels a bit overwhelming, and I just can't seem to shake it off.", - }, - { - "role": "assistant", - "chat_time": "3 May 2025", - "content": "It sounds like you're going through a lot right now. Sometimes it helps to talk things out. Is there something specific that's been weighing on you, or is it more of a general feeling?", - }, - { - "role": "user", - "chat_time": "3 May 2025", - "content": "It's a mix, I guess. Work's been really stressful, and on top of that, I've been feeling kinda disconnected from the people around me.", - }, - { - "role": "assistant", - "chat_time": "3 May 2025", - "content": "That can be really draining, especially when you're feeling isolated on top of the stress. Do you think there's something from your past that's contributing to how you're feeling now? Sometimes our emotions are tied to older experiences.", - }, - { - "role": "user", - "chat_time": "3 May 2025", - "content": "Yeah, now that you mention it… I've been thinking a lot about my childhood lately. I didn't have the easiest upbringing. My parents were always busy, and I often felt like I had to figure things out on my own. I remember one winter, I got really sick with a high fever. My parents were both working, and I didn't want to bother them. I just lay in bed, crying by myself, not knowing what to do.", - }, - { - "role": "assistant", - "chat_time": "3 May 2025", - "content": "That must have been very lonely. Growing up without that support can really make you feel like you have to handle everything on your own. How do you think that experience has affected you now?", - }, - { - "role": "user", - "chat_time": "3 May 2025", - "content": "Yeah, it was really lonely. I remember spending a lot of time doing things on my own, like drawing, because it was my only way to escape. I used to draw animals, and I created my own little world on paper. It was peaceful in a way, just me and my imagination, with no outside distractions.", - }, - { - "role": "assistant", - "chat_time": "3 May 2025", - "content": "Drawing sounds like a beautiful way to express yourself and find some peace in that solitude. It must have been a very quiet and private world for you. Do you think that sense of loneliness from back then has shaped how you connect with others today?", - }, - { - "role": "user", - "chat_time": "3 May 2025", - "content": "I think it has. I've always found it hard to let others in. I feel like I have to handle everything on my own. If I rely on someone else, I feel vulnerable, and I start to feel that loneliness again.", - }, - { - "role": "assistant", - "chat_time": "3 May 2025", - "content": "It sounds like this was a protective mechanism that helped you cope with your feelings of loneliness back then. But as you've grown, it seems like this pattern might not be serving you as well anymore. Have you thought about trying to break this cycle, or finding new ways to connect with others?", - }, - { - "role": "user", - "chat_time": "3 May 2025", - "content": "I've thought about it, though it's hard. But I also know I can't keep going like this forever. Maybe it's time to seek some help.", - }, - { - "role": "assistant", - "chat_time": "3 May 2025", - "content": "It's a big step, but a positive one. Exploring your emotions and past experiences could really help you understand how they've shaped your present. Therapy could be a great place to start, as it would help you connect with both yourself and others. Would you be open to trying it?", - }, - { - "role": "user", - "chat_time": "3 May 2025", - "content": "I'm open to trying. Thank you for encouraging me. It feels like it might be a good starting point for me.", - }, - { - "role": "assistant", - "chat_time": "3 May 2025", - "content": "I'm really glad to hear that. Taking that first step is always difficult, but you've made an important decision. If you need help finding the right therapist, or just want to talk, I'm here for you.", - }, - ], - [ - { - "role": "user", - "chat_time": "3 May 2025", - "content": "This week I've been feeling a bit off, especially when playing tennis. My body just doesn't feel right.", - }, - { - "role": "assistant", - "chat_time": "3 May 2025", - "content": "It sounds like you've been having some physical discomfort lately. Where specifically are you feeling unwell? Is there something particular about tennis that's been difficult?", - }, - { - "role": "user", - "chat_time": "3 May 2025", - "content": "I just feel a bit fatigued, and every time I play tennis, my movements feel off. I used to play pretty well, but lately, I've been feeling weak and my movements aren't as coordinated as they used to be.", - }, - { - "role": "assistant", - "chat_time": "3 May 2025", - "content": "Physical discomfort can definitely affect performance, especially in a sport like tennis that requires quick reactions and coordination. Have you noticed anything specific that might be causing these changes? Could it be overtraining, or is there another physical issue making you feel off?", - }, - { - "role": "user", - "chat_time": "3 May 2025", - "content": "I think it might be due to stress and lack of sleep recently. I just feel drained all the time. Plus, I've been frustrated with my tennis performance, which makes me feel even worse, like I'm stuck in a vicious cycle.", - }, - { - "role": "assistant", - "chat_time": "3 May 2025", - "content": "Stress and poor sleep quality can definitely affect your physical performance, especially in sports. When you're also feeling down about your performance, it's easy to fall into a negative cycle. Have you tried anything to relieve some of the stress, or to improve your sleep?", - }, - { - "role": "user", - "chat_time": "3 May 2025", - "content": "I've tried some relaxation techniques, like deep breathing and simple meditation, but it doesn't seem to help much. I still can't focus during tennis, and I feel like my mind is distracted by other things.", - }, - { - "role": "assistant", - "chat_time": "3 May 2025", - "content": "Relaxation techniques like deep breathing and meditation can help reduce stress, but sometimes we need a more structured approach to improve both our physical and mental state. For example, you might want to focus on specific aspects of your tennis training, or adjust your rest and recovery time. Have you thought about setting smaller goals for yourself? Like focusing on a specific tennis move each day, rather than expecting perfection right away? That might help you gradually regain confidence.", - }, - { - "role": "user", - "chat_time": "3 May 2025", - "content": "That's a good idea. I think I've been expecting too much from myself, which just makes me more anxious. Maybe I can start with small practice sessions and gradually find my rhythm again.", - }, - { - "role": "assistant", - "chat_time": "3 May 2025", - "content": "Exactly! Setting small and clear goals will help reduce pressure and give you a sense of accomplishment. And don't forget that rest is just as important—taking time to recover will help you feel better physically and mentally. Both physical and mental recovery are key to improving your performance and health.", - }, - { - "role": "user", - "chat_time": "3 May 2025", - "content": "Thanks, I feel like I need to rethink a few things. Maybe I really need to be kinder to myself and take things step by step.", - }, - { - "role": "assistant", - "chat_time": "3 May 2025", - "content": "You're on the right track. Rethinking things and giving yourself space and patience is so important. You've already taken the first step, now just keep moving forward, one step at a time. If you need anything, I'm always here to help.", - }, - ], - [ - { - "role": "user", - "chat_time": "3 May 2025", - "content": "Yesterday at 3 PM I had a meeting with my team about the new project. We discussed the timeline and I'm concerned we won't meet the deadline of December 15th.", - }, - { - "role": "assistant", - "chat_time": "3 May 2025", - "content": "I understand your concern about the project timeline. What specific challenges do you think might prevent your team from meeting the December 15th deadline?", - }, - { - "role": "user", - "chat_time": "3 May 2025", - "content": "We have too many requirements to implement in just two weeks. The backend team needs at least until December 10th to finish their part, which leaves almost no time for testing before the deadline.", - }, - { - "role": "assistant", - "chat_time": "3 May 2025", - "content": "That's definitely a tight timeline. Have you considered prioritizing the requirements to focus on the most critical features first? You might also want to discuss with stakeholders about possibly extending the deadline or reducing the scope for this initial release.", - }, - { - "role": "user", - "chat_time": "3 May 2025", - "content": "That's a good point. We have a follow-up meeting scheduled for tomorrow morning at 9:30 AM. I'll suggest we prioritize features and maybe propose pushing the deadline to January 5th next year.", - }, - { - "role": "assistant", - "chat_time": "3 May 2025", - "content": "That sounds like a solid approach. Coming to the meeting with both a prioritization strategy and a specific alternative deadline shows you've thought through the problem carefully. It might also help to bring some data about similar past projects and how long they took to complete, if you have that information available.", - }, - ], - ] - - print("=== Mem-Reader Fast vs Fine Mode Comparison ===\n") - - # 3. Test Fine Mode (default) - print("🔄 Testing FINE mode (default, with LLM processing)...") - start_time = time.time() - fine_memory = reader.get_memory( - scene_data, type="chat", info={"user_id": "user1", "session_id": "session1"}, mode="fine" - ) - fine_time = time.time() - start_time - print(f"✅ Fine mode completed in {fine_time:.2f} seconds") - print(f"📊 Fine mode generated {sum(len(mem_list) for mem_list in fine_memory)} memory items") - - # 4. Test Fast Mode - print("\n⚡ Testing FAST mode (quick processing, no LLM calls)...") - start_time = time.time() - fast_memory = reader.get_memory( - scene_data, type="chat", info={"user_id": "user1", "session_id": "session1"}, mode="fast" - ) - fast_time = time.time() - start_time - print(f"✅ Fast mode completed in {fast_time:.2f} seconds") - print(f"📊 Fast mode generated {sum(len(mem_list) for mem_list in fast_memory)} memory items") - - # 5. Performance Comparison - print("\n📈 Performance Comparison:") - print(f" Fine mode: {fine_time:.2f}s") - print(f" Fast mode: {fast_time:.2f}s") - print(f" Speed improvement: {fine_time / fast_time:.1f}x faster") - - # 6. Show sample results from both modes - print("\n🔍 Sample Results Comparison:") - print("\n--- FINE Mode Results (first 3 items) ---") - for i, mem_list in enumerate(fine_memory[:3]): - for j, mem_item in enumerate(mem_list[:2]): # Show first 2 items from each list - print(f"\n[Scene {i}][Item {j}]") - if args.format == "json": - print_textual_memory_item_json(mem_item, indent=2) - else: - print_textual_memory_item( - mem_item, max_memory_length=args.max_memory_length, indent=2 - ) - - print("\n--- FAST Mode Results (first 3 items) ---") - for i, mem_list in enumerate(fast_memory[:3]): - for j, mem_item in enumerate(mem_list[:2]): # Show first 2 items from each list - print(f"\n[Scene {i}][Item {j}]") - if args.format == "json": - print_textual_memory_item_json(mem_item, indent=2) - else: - print_textual_memory_item( - mem_item, max_memory_length=args.max_memory_length, indent=2 - ) - - # 7. Example of transfer fast mode result into fine result - fast_mode_memories = [ - TextualMemoryItem( - id="4553141b-3a33-4548-b779-e677ec797a9f", - memory="user: Nate:Oh cool! I might check that one out some time soon! I do love watching classics.\nassistant: Joanna:Yep, that movie is awesome. I first watched it around 3 years ago. I even went out and got a physical copy!\nuser: Nate:Sounds cool! Have you seen it a lot? sounds like you know the movie well!\nassistant: Joanna:A few times. It's one of my favorites! I really like the idea and the acting.\nuser: Nate:Cool! I'll definitely check it out. Thanks for the recommendation!\nassistant: Joanna:No problem, Nate! Let me know if you like it!\n", - metadata=TreeNodeTextualMemoryMetadata( - user_id="nate_test", - session_id="root_session", - status="activated", - type="fact", - key="user: Nate:Oh cool", - confidence=0.9900000095367432, - source=None, - tags=["mode:fast", "lang:en", "role:assistant", "role:user"], - visibility=None, - updated_at="2025-10-16T17:16:30.094877+08:00", - memory_type="LongTermMemory", - sources=[ - SourceMessage( - type="chat", - role="user", - chat_time="7:31 pm on 21 January, 2022", - message_id=None, - content=None, - doc_path=None, - index=0, - ), - SourceMessage( - type="chat", - role="assistant", - chat_time="7:31 pm on 21 January, 2022", - message_id=None, - content=None, - doc_path=None, - index=1, - ), - SourceMessage( - type="chat", - role="user", - chat_time="7:31 pm on 21 January, 2022", - message_id=None, - content=None, - doc_path=None, - index=2, - ), - SourceMessage( - type="chat", - role="assistant", - chat_time="7:31 pm on 21 January, 2022", - message_id=None, - content=None, - doc_path=None, - index=3, - ), - SourceMessage( - type="chat", - role="user", - chat_time="7:31 pm on 21 January, 2022", - message_id=None, - content=None, - doc_path=None, - index=4, - ), - SourceMessage( - type="chat", - role="assistant", - chat_time="7:31 pm on 21 January, 2022", - message_id=None, - content=None, - doc_path=None, - index=5, - ), - ], - embedding=None, - created_at="2025-10-16T17:16:30.094919+08:00", - usage=[], - background="", - ), - ), - TextualMemoryItem( - id="752e42fa-92b6-491a-a430-6864a7730fba", - memory="user: Nate:It was! How about you? Do you have any hobbies you love?\nassistant: Joanna:Yeah! Besides writing, I also enjoy reading, watching movies, and exploring nature. Anything else you enjoy doing, Nate?\nuser: Nate:Playing video games and watching movies are my main hobbies.\nassistant: Joanna:Cool, Nate! So we both have similar interests. What type of movies do you like best?\nuser: Nate:I love action and sci-fi movies, the effects are so cool! What about you, what's your favorite genre?\nassistant: Joanna:I'm all about dramas and romcoms. I love getting immersed in the feelings and plots.\nuser: Nate:Wow, movies can be so powerful! Do you have any recommendations for me?\nassistant: Joanna:Yeah, totally! Have you seen this romantic drama that's all about memory and relationships? It's such a good one.\nuser: Nate:Oh cool! I might check that one out some time soon! I do love watching classics.\nassistant: Joanna:Yep, that movie is awesome. I first watched it around 3 years ago. I even went out and got a physical copy!\n", - metadata=TreeNodeTextualMemoryMetadata( - user_id="nate_test", - session_id="root_session", - status="activated", - type="fact", - key="user: Nate:It was", - confidence=0.9900000095367432, - source=None, - tags=["mode:fast", "lang:en", "role:assistant", "role:user"], - visibility=None, - updated_at="2025-10-16T17:16:30.095726+08:00", - memory_type="LongTermMemory", - sources=[ - SourceMessage( - type="chat", - role="user", - chat_time="7:31 pm on 21 January, 2022", - message_id=None, - content=None, - doc_path=None, - index=0, - ), - SourceMessage( - type="chat", - role="assistant", - chat_time="7:31 pm on 21 January, 2022", - message_id=None, - content=None, - doc_path=None, - index=1, - ), - SourceMessage( - type="chat", - role="user", - chat_time="7:31 pm on 21 January, 2022", - message_id=None, - content=None, - doc_path=None, - index=2, - ), - SourceMessage( - type="chat", - role="assistant", - chat_time="7:31 pm on 21 January, 2022", - message_id=None, - content=None, - doc_path=None, - index=3, - ), - SourceMessage( - type="chat", - role="user", - chat_time="7:31 pm on 21 January, 2022", - message_id=None, - content=None, - doc_path=None, - index=4, - ), - SourceMessage( - type="chat", - role="assistant", - chat_time="7:31 pm on 21 January, 2022", - message_id=None, - content=None, - doc_path=None, - index=5, - ), - SourceMessage( - type="chat", - role="user", - chat_time="7:31 pm on 21 January, 2022", - message_id=None, - content=None, - doc_path=None, - index=6, - ), - SourceMessage( - type="chat", - role="assistant", - chat_time="7:31 pm on 21 January, 2022", - message_id=None, - content=None, - doc_path=None, - index=7, - ), - SourceMessage( - type="chat", - role="user", - chat_time="7:31 pm on 21 January, 2022", - message_id=None, - content=None, - doc_path=None, - index=8, - ), - SourceMessage( - type="chat", - role="assistant", - chat_time="7:31 pm on 21 January, 2022", - message_id=None, - content=None, - doc_path=None, - index=9, - ), - ], - embedding=None, - created_at="2025-10-16T17:16:30.095767+08:00", - usage=[], - background="", - ), - ), - TextualMemoryItem( - id="c9cf448c-deee-43a8-bafd-eb15fde535b2", - memory="user: Nate:Hey Joanna! Long time no see! What's up? Anything fun going on?\nassistant: Joanna:Hey Nate! Long time no see! I've been working on a project lately - it's been pretty cool. What about you - any fun projects or hobbies?\nuser: Nate:Hey Joanna! That's cool! I won my first video game tournament last week - so exciting!\nassistant: Joanna:Wow Nate! Congrats on winning! Tell me more - what game was it?\nuser: Nate:Thanks! it's a team shooter game.\nassistant: Joanna:Wow, great job! What was is called?\nuser: Nate:The game was called Counter-Strike: Global Offensive, and me and my team had a blast to the very end!\nassistant: Joanna:Cool, Nate! Sounds like a fun experience, even if I'm not into games.\nuser: Nate:It was! How about you? Do you have any hobbies you love?\nassistant: Joanna:Yeah! Besides writing, I also enjoy reading, watching movies, and exploring nature. Anything else you enjoy doing, Nate?\n", - metadata=TreeNodeTextualMemoryMetadata( - user_id="nate_test", - session_id="root_session", - status="activated", - type="fact", - key="user: Nate:Hey Joanna", - confidence=0.9900000095367432, - source=None, - tags=["mode:fast", "lang:en", "role:assistant", "role:user"], - visibility=None, - updated_at="2025-10-16T17:16:30.098208+08:00", - memory_type="LongTermMemory", - sources=[ - SourceMessage( - type="chat", - role="user", - chat_time="7:31 pm on 21 January, 2022", - message_id=None, - content=None, - doc_path=None, - index=0, - ), - SourceMessage( - type="chat", - role="assistant", - chat_time="7:31 pm on 21 January, 2022", - message_id=None, - content=None, - doc_path=None, - index=1, - ), - SourceMessage( - type="chat", - role="user", - chat_time="7:31 pm on 21 January, 2022", - message_id=None, - content=None, - doc_path=None, - index=2, - ), - SourceMessage( - type="chat", - role="assistant", - chat_time="7:31 pm on 21 January, 2022", - message_id=None, - content=None, - doc_path=None, - index=3, - ), - SourceMessage( - type="chat", - role="user", - chat_time="7:31 pm on 21 January, 2022", - message_id=None, - content=None, - doc_path=None, - index=4, - ), - SourceMessage( - type="chat", - role="assistant", - chat_time="7:31 pm on 21 January, 2022", - message_id=None, - content=None, - doc_path=None, - index=5, - ), - SourceMessage( - type="chat", - role="user", - chat_time="7:31 pm on 21 January, 2022", - message_id=None, - content=None, - doc_path=None, - index=6, - ), - SourceMessage( - type="chat", - role="assistant", - chat_time="7:31 pm on 21 January, 2022", - message_id=None, - content=None, - doc_path=None, - index=7, - ), - SourceMessage( - type="chat", - role="user", - chat_time="7:31 pm on 21 January, 2022", - message_id=None, - content=None, - doc_path=None, - index=8, - ), - SourceMessage( - type="chat", - role="assistant", - chat_time="7:31 pm on 21 January, 2022", - message_id=None, - content=None, - doc_path=None, - index=9, - ), - ], - embedding=None, - created_at="2025-10-16T17:16:30.098246+08:00", - usage=[], - background="", - ), - ), - ] - fine_memories = reader.fine_transfer_simple_mem(fast_mode_memories, type="chat") - print("\n--- Transfer Mode Results (first 3 items) ---") - for i, mem_list in enumerate(fine_memories[:3]): - for j, mem_item in enumerate(mem_list[:2]): # Show first 2 items from each list - print(f"\n[Scene {i}][Item {j}]") - if args.format == "json": - print_textual_memory_item_json(mem_item, indent=2) - else: - print_textual_memory_item( - mem_item, max_memory_length=args.max_memory_length, indent=2 - ) - - # 7. Example of processing documents (only in fine mode) - print("\n=== Processing Documents (Fine Mode Only) ===") - # Example document paths (you should replace these with actual document paths) - doc_paths = [ - "text1.txt", - "text2.txt", - ] - - try: - # 6. Acquiring memories from documents - doc_memory = reader.get_memory( - doc_paths, - "doc", - info={ - "user_id": "1111", - "session_id": "2222", - }, - mode="fine", - ) - total_items = sum(len(mem_list) for mem_list in doc_memory) - print(f"\n📄 Document Memory generated {total_items} items") - - # Print structured document memory items - if doc_memory: - print("\n--- Document Memory Items (first 3) ---") - for i, mem_list in enumerate(doc_memory[:3]): - for j, mem_item in enumerate(mem_list[:3]): # Show first 3 items from each document - print(f"\n[Document {i}][Item {j}]") - if args.format == "json": - print_textual_memory_item_json(mem_item, indent=2) - else: - print_textual_memory_item( - mem_item, max_memory_length=args.max_memory_length, indent=2 - ) - except Exception as e: - print(f"⚠️ Document processing failed: {e}") - print(" (This is expected if document files don't exist)") - - print("\n🎯 Summary:") - print(f" • Fast mode: {fast_time:.2f}s - Quick processing, no LLM calls") - print(f" • Fine mode: {fine_time:.2f}s - Full LLM processing for better understanding") - print(" • Use fast mode for: Real-time applications, high-throughput scenarios") - print(" • Use fine mode for: Quality analysis, detailed memory extraction") - - -if __name__ == "__main__": - main() diff --git a/examples/mem_reader/utils.py b/examples/mem_reader/utils.py new file mode 100644 index 000000000..0ac3e4d6a --- /dev/null +++ b/examples/mem_reader/utils.py @@ -0,0 +1,84 @@ +"""Utility functions for MemReader examples.""" + +import json +import pprint + +from typing import Any + +from memos.memories.textual.item import TextualMemoryItem + + +def _truncate(s: str, max_len: int | None) -> str: + if max_len is None or len(s) <= max_len: + return s + return s[:max_len] + "..." + + +def sanitize_for_print(obj: Any, *, max_str_len: int | None = 500) -> Any: + """ + Recursively sanitize data for pretty printing: + - Long strings are truncated + - Strings keep real newlines (so box printer can render multi-line) + """ + if isinstance(obj, str): + return _truncate(obj, max_str_len) + if isinstance(obj, dict): + return {k: sanitize_for_print(v, max_str_len=max_str_len) for k, v in obj.items()} + if isinstance(obj, list): + return [sanitize_for_print(v, max_str_len=max_str_len) for v in obj] + if isinstance(obj, tuple): + return tuple(sanitize_for_print(v, max_str_len=max_str_len) for v in obj) + return obj + + +def pretty_print_dict(d: dict, *, max_str_len: int | None = 500): + """Print a dictionary in a pretty bordered box (handles multiline strings).""" + d2 = sanitize_for_print(d, max_str_len=max_str_len) + + # Prefer JSON formatting if possible, fallback to pprint + try: + text = json.dumps(d2, indent=2, ensure_ascii=False) + except (TypeError, ValueError): + text = pprint.pformat(d2, indent=2, width=120) + + # Expand the JSON/pprint output into lines + lines: list[str] = [] + for line in text.splitlines(): + # If a line itself contains literal "\n" sequences (rare), leave it; + # real newlines are already split by splitlines(). + lines.append(line) + + # Prevent extremely wide boxes (optional safety) + max_len = max(len(line) for line in lines) if lines else 0 + border = "═" * (max_len + 4) + + print(f"╔{border}╗") + for line in lines: + print(f"║ {line.ljust(max_len)} ║") + print(f"╚{border}╝") + + +def print_memory_item( + item: TextualMemoryItem, + indent: int = 0, + max_memory_length: int | None = 300, # None = 不截断 +): + """Print a TextualMemoryItem in a structured format.""" + prefix = " " * indent + print(f"{prefix}--- Memory Item ---") + print(f"{prefix}Type: {item.metadata.memory_type}") + + mem = item.memory or "" + mem_preview = mem if max_memory_length is None else _truncate(mem, max_memory_length) + print(f"{prefix}Memory: {mem_preview}") + + if item.metadata.tags: + print(f"{prefix}Tags: {item.metadata.tags}") + + if item.metadata.confidence is not None: + print(f"{prefix}Confidence: {item.metadata.confidence}") + + if hasattr(item.metadata, "sources") and item.metadata.sources: + print(f"{prefix}Sources ({len(item.metadata.sources)}):") + for source in item.metadata.sources: + print(f"{prefix} - {source.type} (role: {getattr(source, 'role', 'N/A')})") diff --git a/examples/mem_scheduler/api_w_scheduler.py b/examples/mem_scheduler/api_w_scheduler.py index 871dd0258..b02161c12 100644 --- a/examples/mem_scheduler/api_w_scheduler.py +++ b/examples/mem_scheduler/api_w_scheduler.py @@ -1,85 +1,112 @@ +""" +# Prerequisites & Configuration +# To run this script, you must have the following services +# running and configured in your .env file (or environment variables): +# 1. Redis (Required for TaskStatusTracker and Scheduler Queue) +# 2. Graph Database (Required for Memory Storage) +# 3. Vector Database (Required if using Neo4j Community or Preference Memory) +""" + +import sys + +from pathlib import Path from time import sleep -from memos.api.handlers.scheduler_handler import ( + +FILE_PATH = Path(__file__).absolute() +BASE_DIR = FILE_PATH.parent.parent.parent +sys.path.insert(0, str(BASE_DIR)) # Enable execution from any working directory + +from memos.api.handlers.scheduler_handler import ( # noqa: E402 handle_scheduler_status, handle_scheduler_wait, ) -from memos.api.routers.server_router import mem_scheduler, status_tracker -from memos.mem_scheduler.schemas.message_schemas import ScheduleMessageItem - - -# Debug: Print scheduler configuration -print("=== Scheduler Configuration Debug ===") -print(f"Scheduler type: {type(mem_scheduler).__name__}") -print(f"Config: {mem_scheduler.config}") -print(f"use_redis_queue: {mem_scheduler.use_redis_queue}") -print(f"Queue type: {type(mem_scheduler.memos_message_queue).__name__}") -print(f"Queue maxsize: {getattr(mem_scheduler.memos_message_queue, 'maxsize', 'N/A')}") -print("=====================================\n") - -queue = mem_scheduler.memos_message_queue -queue.clear() - - -# 1. Define a handler function -def my_test_handler(messages: list[ScheduleMessageItem]): - print(f"My test handler received {len(messages)} messages:") - for msg in messages: - print(f" my_test_handler - {msg.item_id}: {msg.content}") - user_status_running = handle_scheduler_status( - user_id=msg.user_id, status_tracker=status_tracker - ) - print("[Monitor] Status after submit:", user_status_running) +from memos.api.routers.server_router import mem_scheduler, status_tracker # noqa: E402 +from memos.mem_scheduler.schemas.message_schemas import ScheduleMessageItem # noqa: E402 -# 2. Register the handler TEST_HANDLER_LABEL = "test_handler" TEST_USER_ID = "test_user" -mem_scheduler.register_handlers({TEST_HANDLER_LABEL: my_test_handler}) - -# 2.1 Monitor global scheduler status before submitting tasks -global_status_before = handle_scheduler_status(user_id=TEST_USER_ID, status_tracker=status_tracker) -print("[Monitor] Global status before submit:", global_status_before) - -# 3. Create messages -messages_to_send = [ - ScheduleMessageItem( - item_id=f"test_item_{i}", - user_id=TEST_USER_ID, - mem_cube_id="test_mem_cube", - label=TEST_HANDLER_LABEL, - content=f"This is test message {i}", +USER_MEM_CUBE = "test_mem_cube" + + +def run_with_scheduler_api(): + # Debug: Print scheduler configuration + print("=== Scheduler Configuration Debug ===") + print(f"Scheduler type: {type(mem_scheduler).__name__}") + print(f"Config: {mem_scheduler.config}") + print(f"use_redis_queue: {mem_scheduler.use_redis_queue}") + print(f"Queue type: {type(mem_scheduler.memos_message_queue).__name__}") + print(f"Queue maxsize: {getattr(mem_scheduler.memos_message_queue, 'maxsize', 'N/A')}") + print("=====================================\n") + + queue = mem_scheduler.memos_message_queue + queue.clear() + + # 1. Define a handler function + def my_test_handler(messages: list[ScheduleMessageItem]): + print(f"My test handler received {len(messages)} messages:") + for msg in messages: + print(f" my_test_handler - {msg.item_id}: {msg.content}") + user_status_running = handle_scheduler_status( + user_id=msg.user_id, status_tracker=status_tracker + ) + print("[Monitor] Status after submit:", user_status_running) + + # 2. Register the handler + mem_scheduler.register_handlers({TEST_HANDLER_LABEL: my_test_handler}) + + # 2.1 Monitor global scheduler status before submitting tasks + global_status_before = handle_scheduler_status( + user_id=TEST_USER_ID, status_tracker=status_tracker ) - for i in range(5) -] + print("[Monitor] Global status before submit:", global_status_before) + + # 3. Create messages + messages_to_send = [ + ScheduleMessageItem( + item_id=f"test_item_{i}", + user_id=TEST_USER_ID, + mem_cube_id="test_mem_cube", + label=TEST_HANDLER_LABEL, + content=f"This is test message {i}", + ) + for i in range(5) + ] -# 5. Submit messages -for mes in messages_to_send: - print(f"Submitting message {mes.item_id} to the scheduler...") - mem_scheduler.submit_messages([mes]) - sleep(1) + # 5. Submit messages + for mes in messages_to_send: + print(f"Submitting message {mes.item_id} to the scheduler...") + mem_scheduler.submit_messages([mes]) + sleep(1) -# 5.1 Monitor status for specific mem_cube while running -USER_MEM_CUBE = "test_mem_cube" + # 5.1 Monitor status for specific mem_cube while running + # 6. Wait for messages to be processed (limited to 100 checks) -# 6. Wait for messages to be processed (limited to 100 checks) + user_status_running = handle_scheduler_status( + user_id=TEST_USER_ID, status_tracker=status_tracker + ) + print(f"[Monitor] Status for {USER_MEM_CUBE} after submit:", user_status_running) + + # 6.1 Wait until idle for specific mem_cube via handler + wait_result = handle_scheduler_wait( + user_name=TEST_USER_ID, + status_tracker=status_tracker, + timeout_seconds=120.0, + poll_interval=0.5, + ) + print(f"[Monitor] Wait result for {USER_MEM_CUBE}:", wait_result) -user_status_running = handle_scheduler_status(user_id=TEST_USER_ID, status_tracker=status_tracker) -print(f"[Monitor] Status for {USER_MEM_CUBE} after submit:", user_status_running) + # 6.2 Monitor global scheduler status after processing + global_status_after = handle_scheduler_status( + user_id=TEST_USER_ID, status_tracker=status_tracker + ) + print("[Monitor] Global status after processing:", global_status_after) -# 6.1 Wait until idle for specific mem_cube via handler -wait_result = handle_scheduler_wait( - user_name=TEST_USER_ID, - status_tracker=status_tracker, - timeout_seconds=120.0, - poll_interval=0.5, -) -print(f"[Monitor] Wait result for {USER_MEM_CUBE}:", wait_result) + # 7. Stop the scheduler + print("Stopping the scheduler...") + mem_scheduler.stop() -# 6.2 Monitor global scheduler status after processing -global_status_after = handle_scheduler_status(user_id=TEST_USER_ID, status_tracker=status_tracker) -print("[Monitor] Global status after processing:", global_status_after) -# 7. Stop the scheduler -print("Stopping the scheduler...") -mem_scheduler.stop() +if __name__ == "__main__": + run_with_scheduler_api() diff --git a/examples/mem_scheduler/debug_text_mem_replace.py b/examples/mem_scheduler/debug_text_mem_replace.py deleted file mode 100644 index a5de8e572..000000000 --- a/examples/mem_scheduler/debug_text_mem_replace.py +++ /dev/null @@ -1,109 +0,0 @@ -import json -import shutil -import sys - -from pathlib import Path - -from memos_w_scheduler_for_test import init_task - -from memos.configs.mem_cube import GeneralMemCubeConfig -from memos.configs.mem_os import MOSConfig -from memos.configs.mem_scheduler import AuthConfig -from memos.log import get_logger -from memos.mem_cube.general import GeneralMemCube -from memos.mem_scheduler.analyzer.mos_for_test_scheduler import MOSForTestScheduler - - -FILE_PATH = Path(__file__).absolute() -BASE_DIR = FILE_PATH.parent.parent.parent -sys.path.insert(0, str(BASE_DIR)) - -# Enable execution from any working directory - -logger = get_logger(__name__) - -if __name__ == "__main__": - # set up data - conversations, questions = init_task() - - # set configs - mos_config = MOSConfig.from_yaml_file( - f"{BASE_DIR}/examples/data/config/mem_scheduler/memos_config_w_optimized_scheduler.yaml" - ) - - mem_cube_config = GeneralMemCubeConfig.from_yaml_file( - f"{BASE_DIR}/examples/data/config/mem_scheduler/mem_cube_config_neo4j.yaml" - ) - - # default local graphdb uri - if AuthConfig.default_config_exists(): - auth_config = AuthConfig.from_local_config() - - mos_config.mem_reader.config.llm.config.api_key = auth_config.openai.api_key - mos_config.mem_reader.config.llm.config.api_base = auth_config.openai.base_url - - mem_cube_config.text_mem.config.graph_db.config.uri = auth_config.graph_db.uri - mem_cube_config.text_mem.config.graph_db.config.user = auth_config.graph_db.user - mem_cube_config.text_mem.config.graph_db.config.password = auth_config.graph_db.password - mem_cube_config.text_mem.config.graph_db.config.db_name = auth_config.graph_db.db_name - mem_cube_config.text_mem.config.graph_db.config.auto_create = ( - auth_config.graph_db.auto_create - ) - - # Initialization - mos = MOSForTestScheduler(mos_config) - - user_id = "user_1" - mos.create_user(user_id) - - mem_cube_id = "mem_cube_5" - mem_cube_name_or_path = f"{BASE_DIR}/outputs/mem_scheduler/{user_id}/{mem_cube_id}" - - if Path(mem_cube_name_or_path).exists(): - shutil.rmtree(mem_cube_name_or_path) - print(f"{mem_cube_name_or_path} is not empty, and has been removed.") - - mem_cube = GeneralMemCube(mem_cube_config) - mem_cube.dump(mem_cube_name_or_path) - mos.register_mem_cube( - mem_cube_name_or_path=mem_cube_name_or_path, mem_cube_id=mem_cube_id, user_id=user_id - ) - - mos.add(conversations, user_id=user_id, mem_cube_id=mem_cube_id) - - # Add interfering conversations - file_path = Path(f"{BASE_DIR}/examples/data/mem_scheduler/scene_data.json") - scene_data = json.load(file_path.open("r", encoding="utf-8")) - mos.add(scene_data[0], user_id=user_id, mem_cube_id=mem_cube_id) - mos.add(scene_data[1], user_id=user_id, mem_cube_id=mem_cube_id) - - # Test the replace_working_memory functionality - print("\n--- Testing replace_working_memory ---") - - # Get current working memories - text_mem_base = mem_cube.text_mem - if text_mem_base is not None: - working_memories_before = text_mem_base.get_working_memory() - print(f"Working memories before replacement: {len(working_memories_before)}") - - # Create filtered memories (simulate what the scheduler would do) - # Keep only memories related to Max - filtered_memories = [working_memories_before[1], working_memories_before[4]] - - text_mem_base.replace_working_memory(memories=filtered_memories) - - # Check working memory after replacement - working_memories_after = text_mem_base.get_working_memory() - print(f"Working memories after replacement: {len(working_memories_after)}") - - if len(working_memories_after) == len(filtered_memories): - print("✅ SUCCESS: Working memory count matches filtered memories") - else: - print( - f"❌ FAILED: Expected {len(filtered_memories)}, got {len(working_memories_after)}" - ) - - else: - print("❌ text_mem is None - not properly initialized") - - mos.mem_scheduler.stop() diff --git a/examples/mem_scheduler/memos_w_scheduler.py b/examples/mem_scheduler/memos_w_scheduler.py index ef7d853df..b7250a677 100644 --- a/examples/mem_scheduler/memos_w_scheduler.py +++ b/examples/mem_scheduler/memos_w_scheduler.py @@ -1,32 +1,43 @@ -import re -import shutil +# Prerequisites & Configuration +# To run this script, you must have the following services +# running and configured in your .env file (or environment variables): +# 1. Redis (Required for TaskStatusTracker and Scheduler Queue) +# 2. Graph Database (Required for Memory Storage) +# 3. Vector Database (Required if using Neo4j Community or Preference Memory) + +import asyncio +import json +import os import sys +import time -from datetime import datetime from pathlib import Path -from memos.configs.mem_cube import GeneralMemCubeConfig -from memos.configs.mem_os import MOSConfig -from memos.configs.mem_scheduler import AuthConfig -from memos.log import get_logger -from memos.mem_cube.general import GeneralMemCube -from memos.mem_os.main import MOS -from memos.mem_scheduler.schemas.message_schemas import ScheduleLogForWebItem -from memos.mem_scheduler.schemas.task_schemas import ( - ADD_TASK_LABEL, - ANSWER_TASK_LABEL, - MEM_ARCHIVE_TASK_LABEL, - MEM_ORGANIZE_TASK_LABEL, - MEM_UPDATE_TASK_LABEL, - QUERY_TASK_LABEL, -) -from memos.mem_scheduler.utils.filter_utils import transform_name_to_key - +# Setup paths before imports that depend on them FILE_PATH = Path(__file__).absolute() BASE_DIR = FILE_PATH.parent.parent.parent sys.path.insert(0, str(BASE_DIR)) # Enable execution from any working directory +# Set environment variables before importing server_router to ensure components are initialized correctly +os.environ["ENABLE_CHAT_API"] = "true" + +from memos.api.product_models import APIADDRequest, ChatPlaygroundRequest # noqa: E402 + +# Import from server_router for initialization +from memos.api.routers.server_router import ( # noqa: E402 + add_handler, + chat_stream_playground, + mem_scheduler, +) +from memos.log import get_logger # noqa: E402 +from memos.mem_scheduler.schemas.message_schemas import ScheduleMessageItem # noqa: E402 +from memos.mem_scheduler.schemas.task_schemas import ( # noqa: E402 + MEM_UPDATE_TASK_LABEL, + QUERY_TASK_LABEL, +) + + logger = get_logger(__name__) @@ -73,150 +84,106 @@ def init_task(): return conversations, questions -def _truncate_with_rules(text: str) -> str: - has_cjk = bool(re.search(r"[\u4e00-\u9fff]", text)) - limit = 32 if has_cjk else 64 - normalized = text.strip().replace("\n", " ") - if len(normalized) <= limit: - return normalized - return normalized[:limit] + "..." - - -def _format_title(ts: datetime, title_text: str) -> str: - return f"{ts.astimezone().strftime('%H:%M:%S')} {title_text}" - - -def _cube_display_from(mem_cube_id: str) -> str: - if "public" in (mem_cube_id or "").lower(): - return "PublicMemCube" - return "UserMemCube" - - -_TYPE_SHORT = { - "LongTermMemory": "LTM", - "UserMemory": "User", - "WorkingMemory": "Working", - "ActivationMemory": "Activation", - "ParameterMemory": "Parameter", - "TextMemory": "Text", - "UserInput": "Input", - "NotApplicable": "NA", -} - - -def _format_entry(item: ScheduleLogForWebItem) -> tuple[str, str]: - cube_display = getattr(item, "memcube_name", None) or _cube_display_from(item.mem_cube_id) - label = item.label - content = item.log_content or "" - memcube_content = getattr(item, "memcube_log_content", None) or [] - memory_len = getattr(item, "memory_len", None) or len(memcube_content) or 1 - - def _first_content() -> str: - if memcube_content: - return memcube_content[0].get("content", "") or content - return content - - if label in ("addMessage", QUERY_TASK_LABEL, ANSWER_TASK_LABEL): - target_cube = cube_display.replace("MemCube", "") - title = _format_title(item.timestamp, f"addMessages to {target_cube} MemCube") - return title, _truncate_with_rules(_first_content()) - - if label in ("addMemory", ADD_TASK_LABEL): - title = _format_title(item.timestamp, f"{cube_display} added {memory_len} memories") - return title, _truncate_with_rules(_first_content()) - - if label in ("updateMemory", MEM_UPDATE_TASK_LABEL): - title = _format_title(item.timestamp, f"{cube_display} updated {memory_len} memories") - return title, _truncate_with_rules(_first_content()) - - if label in ("archiveMemory", MEM_ARCHIVE_TASK_LABEL): - title = _format_title(item.timestamp, f"{cube_display} archived {memory_len} memories") - return title, _truncate_with_rules(_first_content()) - - if label in ("mergeMemory", MEM_ORGANIZE_TASK_LABEL): - title = _format_title(item.timestamp, f"{cube_display} merged {memory_len} memories") - merged = [c for c in memcube_content if c.get("type") == "merged"] - post = [c for c in memcube_content if c.get("type") == "postMerge"] - parts = [] - if merged: - parts.append("Merged: " + " | ".join(c.get("content", "") for c in merged)) - if post: - parts.append("Result: " + " | ".join(c.get("content", "") for c in post)) - detail = " ".join(parts) if parts else _first_content() - return title, _truncate_with_rules(detail) - - if label == "scheduleMemory": - title = _format_title(item.timestamp, f"{cube_display} scheduled {memory_len} memories") - if memcube_content: - return title, _truncate_with_rules(memcube_content[0].get("content", "")) - key = transform_name_to_key(content) - from_short = _TYPE_SHORT.get(item.from_memory_type, item.from_memory_type) - to_short = _TYPE_SHORT.get(item.to_memory_type, item.to_memory_type) - return title, _truncate_with_rules(f"[{from_short}→{to_short}] {key}: {content}") - - title = _format_title(item.timestamp, f"{cube_display} event") - return title, _truncate_with_rules(_first_content()) - - -def run_with_scheduler_init(): - print("==== run_with_automatic_scheduler_init ====") - conversations, questions = init_task() - - # set configs - mos_config = MOSConfig.from_yaml_file( - f"{BASE_DIR}/examples/data/config/mem_scheduler/memos_config_w_optimized_scheduler.yaml" - ) - - mem_cube_config = GeneralMemCubeConfig.from_yaml_file( - f"{BASE_DIR}/examples/data/config/mem_scheduler/mem_cube_config_neo4j.yaml" - ) - - # default local graphdb uri - if AuthConfig.default_config_exists(): - auth_config = AuthConfig.from_local_config() +working_memories = [] + + +# Define custom query handler function +def custom_query_handler(messages: list[ScheduleMessageItem]): + for msg in messages: + # Print user input content + print(f"\n[scheduler] User input query: {msg.content}") + # Manually construct a new message with MEM_UPDATE label to trigger memory update + new_msg = msg.model_copy(update={"label": MEM_UPDATE_TASK_LABEL}) + # Submit the message to the scheduler for processing + mem_scheduler.submit_messages([new_msg]) + + +# Define custom memory update handler function +def custom_mem_update_handler(messages: list[ScheduleMessageItem]): + global working_memories + search_args = {} + top_k = 2 + for msg in messages: + # Search for memories relevant to the current content in text memory (return top_k=2) + results = mem_scheduler.retriever.search( + query=msg.content, + user_id=msg.user_id, + mem_cube_id=msg.mem_cube_id, + mem_cube=mem_scheduler.current_mem_cube, + top_k=top_k, + method=mem_scheduler.search_method, + search_args=search_args, + ) + working_memories.extend(results) + working_memories = working_memories[-5:] + for mem in results: + print(f"\n[scheduler] Retrieved memory: {mem.memory}") - mos_config.mem_reader.config.llm.config.api_key = auth_config.openai.api_key - mos_config.mem_reader.config.llm.config.api_base = auth_config.openai.base_url - mem_cube_config.text_mem.config.graph_db.config.uri = auth_config.graph_db.uri - mem_cube_config.text_mem.config.graph_db.config.user = auth_config.graph_db.user - mem_cube_config.text_mem.config.graph_db.config.password = auth_config.graph_db.password - mem_cube_config.text_mem.config.graph_db.config.db_name = auth_config.graph_db.db_name - mem_cube_config.text_mem.config.graph_db.config.auto_create = ( - auth_config.graph_db.auto_create - ) +async def run_with_scheduler(): + print("==== run_with_automatic_scheduler_init ====") + conversations, questions = init_task() - # Initialization - mos = MOS(mos_config) + # Initialization using server_router components + # Configs are loaded via environment variables in init_server() user_id = "user_1" - mos.create_user(user_id) - mem_cube_id = "mem_cube_5" - mem_cube_name_or_path = f"{BASE_DIR}/outputs/mem_scheduler/{user_id}/{mem_cube_id}" - if Path(mem_cube_name_or_path).exists(): - shutil.rmtree(mem_cube_name_or_path) - print(f"{mem_cube_name_or_path} is not empty, and has been removed.") + print(f"Adding conversations for user {user_id}...") - mem_cube = GeneralMemCube(mem_cube_config) - mem_cube.dump(mem_cube_name_or_path) - mos.register_mem_cube( - mem_cube_name_or_path=mem_cube_name_or_path, mem_cube_id=mem_cube_id, user_id=user_id + # Use add_handler to add memories + add_req = APIADDRequest( + user_id=user_id, + writable_cube_ids=[mem_cube_id], + messages=conversations, + async_mode="sync", # Use sync mode for immediate addition in this example ) - - mos.add(conversations, user_id=user_id, mem_cube_id=mem_cube_id) - mos.mem_scheduler.current_mem_cube = mem_cube + add_handler.handle_add_memories(add_req) for item in questions: print("===== Chat Start =====") query = item["question"] print(f"Query:\n {query}\n") - response = mos.chat(query=query, user_id=user_id) - print(f"Answer:\n {response}\n") - mos.mem_scheduler.stop() + # Use chat_handler to chat + chat_req = ChatPlaygroundRequest( + user_id=user_id, + query=query, + readable_cube_ids=[mem_cube_id], + writable_cube_ids=[mem_cube_id], + ) + response = chat_stream_playground(chat_req) + + answer = "" + buffer = "" + async for chunk in response.body_iterator: + if isinstance(chunk, bytes): + chunk = chunk.decode("utf-8") + buffer += chunk + while "\n\n" in buffer: + msg, buffer = buffer.split("\n\n", 1) + for line in msg.split("\n"): + if line.startswith("data: "): + json_str = line[6:] + try: + data = json.loads(json_str) + if data.get("type") == "text": + answer += data["data"] + except json.JSONDecodeError: + pass + print(f"\nAnswer: {answer}") if __name__ == "__main__": - run_with_scheduler_init() + mem_scheduler.register_handlers( + { + QUERY_TASK_LABEL: custom_query_handler, # Query task + MEM_UPDATE_TASK_LABEL: custom_mem_update_handler, # Memory update task + } + ) + + asyncio.run(run_with_scheduler()) + + time.sleep(20) + mem_scheduler.stop() diff --git a/examples/mem_scheduler/memos_w_scheduler_for_test.py b/examples/mem_scheduler/memos_w_scheduler_for_test.py deleted file mode 100644 index 2e135f127..000000000 --- a/examples/mem_scheduler/memos_w_scheduler_for_test.py +++ /dev/null @@ -1,88 +0,0 @@ -import json -import shutil -import sys - -from pathlib import Path - -from memos_w_scheduler import init_task - -from memos.configs.mem_cube import GeneralMemCubeConfig -from memos.configs.mem_os import MOSConfig -from memos.configs.mem_scheduler import AuthConfig -from memos.log import get_logger -from memos.mem_cube.general import GeneralMemCube -from memos.mem_scheduler.analyzer.mos_for_test_scheduler import MOSForTestScheduler - - -FILE_PATH = Path(__file__).absolute() -BASE_DIR = FILE_PATH.parent.parent.parent -sys.path.insert(0, str(BASE_DIR)) - -# Enable execution from any working directory - -logger = get_logger(__name__) - -if __name__ == "__main__": - # set up data - conversations, questions = init_task() - - # set configs - mos_config = MOSConfig.from_yaml_file( - f"{BASE_DIR}/examples/data/config/mem_scheduler/memos_config_w_optimized_scheduler.yaml" - ) - - mem_cube_config = GeneralMemCubeConfig.from_yaml_file( - f"{BASE_DIR}/examples/data/config/mem_scheduler/mem_cube_config_neo4j.yaml" - ) - - # default local graphdb uri - if AuthConfig.default_config_exists(): - auth_config = AuthConfig.from_local_config() - - mos_config.mem_reader.config.llm.config.api_key = auth_config.openai.api_key - mos_config.mem_reader.config.llm.config.api_base = auth_config.openai.base_url - - mem_cube_config.text_mem.config.graph_db.config.uri = auth_config.graph_db.uri - mem_cube_config.text_mem.config.graph_db.config.user = auth_config.graph_db.user - mem_cube_config.text_mem.config.graph_db.config.password = auth_config.graph_db.password - mem_cube_config.text_mem.config.graph_db.config.db_name = auth_config.graph_db.db_name - mem_cube_config.text_mem.config.graph_db.config.auto_create = ( - auth_config.graph_db.auto_create - ) - - # Initialization - mos = MOSForTestScheduler(mos_config) - - user_id = "user_1" - mos.create_user(user_id) - - mem_cube_id = "mem_cube_5" - mem_cube_name_or_path = f"{BASE_DIR}/outputs/mem_scheduler/{user_id}/{mem_cube_id}" - - if Path(mem_cube_name_or_path).exists(): - shutil.rmtree(mem_cube_name_or_path) - print(f"{mem_cube_name_or_path} is not empty, and has been removed.") - - mem_cube = GeneralMemCube(mem_cube_config) - mem_cube.dump(mem_cube_name_or_path) - mos.register_mem_cube( - mem_cube_name_or_path=mem_cube_name_or_path, mem_cube_id=mem_cube_id, user_id=user_id - ) - mos.mem_scheduler.current_mem_cube = mem_cube - - mos.add(conversations, user_id=user_id, mem_cube_id=mem_cube_id) - - # Add interfering conversations - file_path = Path(f"{BASE_DIR}/examples/data/mem_scheduler/scene_data.json") - scene_data = json.load(file_path.open("r", encoding="utf-8")) - mos.add(scene_data[0], user_id=user_id, mem_cube_id=mem_cube_id) - mos.add(scene_data[1], user_id=user_id, mem_cube_id=mem_cube_id) - - for item in questions: - print("===== Chat Start =====") - query = item["question"] - print(f"Query:\n {query}\n") - response = mos.chat(query=query, user_id=user_id) - print(f"Answer:\n {response}\n") - - mos.mem_scheduler.stop() diff --git a/examples/mem_scheduler/quick_start_examples.py b/examples/mem_scheduler/quick_start_examples.py deleted file mode 100644 index 724663be6..000000000 --- a/examples/mem_scheduler/quick_start_examples.py +++ /dev/null @@ -1,312 +0,0 @@ -import json -import shutil -import sys -import uuid - -from pathlib import Path - -from transformers import DynamicCache - -from memos.configs.mem_cube import GeneralMemCubeConfig -from memos.configs.mem_os import MOSConfig -from memos.configs.memory import MemoryConfigFactory -from memos.mem_cube.general import GeneralMemCube -from memos.mem_os.main import MOS -from memos.mem_scheduler.schemas.message_schemas import ScheduleMessageItem -from memos.mem_scheduler.schemas.task_schemas import ( - ANSWER_TASK_LABEL, - MEM_UPDATE_TASK_LABEL, - QUERY_TASK_LABEL, -) -from memos.mem_scheduler.utils.db_utils import get_utc_now -from memos.mem_scheduler.utils.misc_utils import parse_yaml -from memos.memories.activation.item import KVCacheItem -from memos.memories.factory import MemoryFactory - - -FILE_PATH = Path(__file__).absolute() -BASE_DIR = FILE_PATH.parent.parent.parent -sys.path.insert(0, str(BASE_DIR)) # Enable execution from any working directory - - -def get_cache_info(cache): - if not cache: - return None - - num_layers = 0 - total_size_bytes = 0 - - if hasattr(cache, "layers"): - num_layers = len(cache.layers) - for layer in cache.layers: - if hasattr(layer, "key_cache") and layer.key_cache is not None: - total_size_bytes += layer.key_cache.nelement() * layer.key_cache.element_size() - if hasattr(layer, "value_cache") and layer.value_cache is not None: - total_size_bytes += layer.value_cache.nelement() * layer.value_cache.element_size() - - if hasattr(layer, "keys") and layer.keys is not None: - total_size_bytes += layer.keys.nelement() * layer.keys.element_size() - if hasattr(layer, "values") and layer.values is not None: - total_size_bytes += layer.values.nelement() * layer.values.element_size() - - elif hasattr(cache, "key_cache") and hasattr(cache, "value_cache"): - num_layers = len(cache.key_cache) - for k, v in zip(cache.key_cache, cache.value_cache, strict=False): - if k is not None: - total_size_bytes += k.nelement() * k.element_size() - if v is not None: - total_size_bytes += v.nelement() * v.element_size() - - return { - "num_layers": num_layers, - "size_bytes": total_size_bytes, - "size_mb": f"{total_size_bytes / (1024 * 1024):.2f} MB", - } - - -def serialize_item(obj): - if isinstance(obj, list): - return [serialize_item(x) for x in obj] - - if isinstance(obj, KVCacheItem): - return { - "id": obj.id, - "metadata": obj.metadata, - "records": obj.records.model_dump() - if hasattr(obj.records, "model_dump") - else obj.records, - "memory": get_cache_info(obj.memory), - } - - if isinstance(obj, DynamicCache): - return get_cache_info(obj) - - return str(obj) - - -def kv_cache_only(): - # 为 KVCacheMemory(HuggingFace 后端)创建配置 - config = MemoryConfigFactory( - backend="kv_cache", - config={ - "extractor_llm": { - "backend": "huggingface", - "config": { - "model_name_or_path": "Qwen/Qwen3-0.6B", - "max_tokens": 32, - "add_generation_prompt": True, - "remove_think_prefix": True, - }, - }, - }, - ) - - # 实例化 KVCacheMemory - kv_mem = MemoryFactory.from_config(config) - - # 提取一个 KVCacheItem(DynamicCache) - prompt = [ - {"role": "user", "content": "What is MemOS?"}, - {"role": "assistant", "content": "MemOS is a memory operating system for LLMs."}, - ] - print("===== Extract KVCacheItem =====") - cache_item = kv_mem.extract(prompt) - print(json.dumps(serialize_item(cache_item), indent=2, default=str)) - - # 将缓存添加到内存中 - kv_mem.add([cache_item]) - print("All caches:") - print(json.dumps(serialize_item(kv_mem.get_all()), indent=2, default=str)) - - # 通过 ID 获取 - retrieved = kv_mem.get(cache_item.id) - print("Retrieved:") - print(json.dumps(serialize_item(retrieved), indent=2, default=str)) - - # 合并缓存 - item2 = kv_mem.extract([{"role": "user", "content": "Tell me a joke."}]) - kv_mem.add([item2]) - merged = kv_mem.get_cache([cache_item.id, item2.id]) - print("Merged cache:") - print(json.dumps(serialize_item(merged), indent=2, default=str)) - - # 删除其中一个 - kv_mem.delete([cache_item.id]) - print("After delete:") - print(json.dumps(serialize_item(kv_mem.get_all()), indent=2, default=str)) - - # 导出和加载缓存 - kv_mem.dump("tmp/kv_mem") - print("Dumped to tmp/kv_mem") - kv_mem.delete_all() - kv_mem.load("tmp/kv_mem") - print("Loaded caches:") - print(json.dumps(serialize_item(kv_mem.get_all()), indent=2, default=str)) - - -def run_scheduler_example(): - # 使用 MemScheduler 加载主 MOS(Memory-Oriented System)配置文件 - config = parse_yaml( - f"{BASE_DIR}/examples/data/config/mem_scheduler/memos_config_w_scheduler.yaml" - ) - # 将解析出的配置字典传入 MOSConfig 构造器, 构建配置对象 - mos_config = MOSConfig(**config) - # 使用配置对象初始化 MOS 系统实例 - mos = MOS(mos_config) - - # 生成一个唯一的动态用户 ID(使用 UUID4) - user_id = str(uuid.uuid4()) - # 在 MOS 系统中为该用户创建账户 - mos.create_user(user_id=user_id) - - # 从 YAML 文件加载 MemCube(记忆立方体)的通用配置 - config = GeneralMemCubeConfig.from_yaml_file( - f"{BASE_DIR}/examples/data/config/mem_scheduler/mem_cube_config.yaml" - ) - # 定义 MemCube 的唯一标识符 - mem_cube_id = "mem_cube_5" - # 定义 MemCube 的本地存储路径(路径中包含用户 ID 和 MemCube ID) - mem_cube_name_or_path = f"{BASE_DIR}/outputs/mem_scheduler/{user_id}/{mem_cube_id}" - - # 如果该路径已存在, 则先删除旧目录 - if Path(mem_cube_name_or_path).exists(): - shutil.rmtree(mem_cube_name_or_path) - print(f"{mem_cube_name_or_path} 目录非空,已被删除。") - - # 根据加载的配置创建一个新的 MemCube 实例 - mem_cube = GeneralMemCube(config) - # 将该 MemCube 实例序列化并保存到指定路径 - mem_cube.dump(mem_cube_name_or_path) - - # 在 MOS 系统中为当前用户注册这个 MemCube - mos.register_mem_cube( - mem_cube_name_or_path=mem_cube_name_or_path, mem_cube_id=mem_cube_id, user_id=user_id - ) - - # 定义一个辅助函数, 用于获取缓存(如 KV Cache)的内存信息 - def get_cache_info(cache): - # 如果缓存为空, 则直接返回 None - if not cache: - return None - - num_layers = 0 # 记录缓存的层数 - total_size_bytes = 0 # 记录总字节数 - - # 情况一: 缓存结构包含 layers 属性(如 HuggingFace 的缓存格式) - if hasattr(cache, "layers"): - num_layers = len(cache.layers) - for layer in cache.layers: - # 统计 key_cache 的内存占用(如果存在) - if hasattr(layer, "key_cache") and layer.key_cache is not None: - total_size_bytes += layer.key_cache.nelement() * layer.key_cache.element_size() - # 统计 value_cache 的内存占用(如果存在) - if hasattr(layer, "value_cache") and layer.value_cache is not None: - total_size_bytes += ( - layer.value_cache.nelement() * layer.value_cache.element_size() - ) - - # 兼容其他可能的缓存命名方式(如 keys/values) - if hasattr(layer, "keys") and layer.keys is not None: - total_size_bytes += layer.keys.nelement() * layer.keys.element_size() - if hasattr(layer, "values") and layer.values is not None: - total_size_bytes += layer.values.nelement() * layer.values.element_size() - - # 情况二: 缓存结构直接包含 key_cache 和 value_cache 列表(如某些自定义格式) - elif hasattr(cache, "key_cache") and hasattr(cache, "value_cache"): - num_layers = len(cache.key_cache) - for k, v in zip(cache.key_cache, cache.value_cache, strict=False): - if k is not None: - total_size_bytes += k.nelement() * k.element_size() - if v is not None: - total_size_bytes += v.nelement() * v.element_size() - - # 返回结构化的缓存信息, 包括层数, 字节数和以 MB 为单位的可读格式 - return { - "num_layers": num_layers, - "size_bytes": total_size_bytes, - "size_mb": f"{total_size_bytes / (1024 * 1024):.2f} MB", - } - - # 定义自定义的查询(query)处理函数 - def custom_query_handler(messages: list[ScheduleMessageItem]): - for msg in messages: - # 打印用户输入内容 - print(f"\n[scheduler] 用户输入了查询:{msg.content}") - # 手动构造一个带有 MEM_UPDATE 标签的新消息, 用于触发记忆更新 - new_msg = msg.model_copy(update={"label": MEM_UPDATE_TASK_LABEL}) - # 将该消息提交给调度器处理 - mos.mem_scheduler.submit_messages([new_msg]) - - # 定义自定义的回答(answer)处理函数 - def custom_answer_handler(messages: list[ScheduleMessageItem]): - for msg in messages: - # 打印 LLM 的回复内容 - print(f"\n[scheduler] LLM 回复了答案:{msg.content}") - - # 定义自定义的记忆更新(mem_update)处理函数 - def custom_mem_update_handler(messages: list[ScheduleMessageItem]): - for msg in messages: - mem_cube = mos.mem_cubes.get(msg.mem_cube_id) - kv_mem = mem_cube.act_mem - # 如果该 MemCube 配置了文本记忆(TreeTextMemory / NaiveTextMemory) - if mem_cube and mem_cube.text_mem: - # 在文本记忆中搜索与当前内容相关的记忆(返回 top_k=3 条) - results = mem_cube.text_mem.search(msg.content, top_k=3) - for mem in results: - print(f"\n[scheduler] 检索到的记忆:{mem.memory}") - print("\n[scheduler] 转换为激活记忆......") - # 从文本记忆中提取对应的 KV 缓存项 - cache_item = kv_mem.extract(mem.memory) - # 附加元信息 - cache_item.records.text_memories = [mem.memory] - cache_item.records.timestamp = get_utc_now() - # 将该缓存项添加到激活记忆中 - kv_mem.add([cache_item]) - print("\n[scheduler] 完成!") - - # 将上述三个自定义处理器注册到调度器的分发器中, 分别对应不同任务标签 - mos.mem_scheduler.dispatcher.register_handlers( - { - QUERY_TASK_LABEL: custom_query_handler, # 查询任务 - ANSWER_TASK_LABEL: custom_answer_handler, # 回答任务 - MEM_UPDATE_TASK_LABEL: custom_mem_update_handler, # 记忆更新任务 - } - ) - - # 初始添加两条测试消息(用户和助手的对话)到系统中 - messages = [ - {"role": "user", "content": "I like playing football."}, - {"role": "assistant", "content": "I like playing football too."}, - ] - mos.add(messages, user_id=user_id, mem_cube_id=mem_cube_id) - - # 进入聊天循环: 展示 TreeTextMemory 的记忆节点结构 + KV Cache 的状态 - while True: - # 获取用户输入并去除首尾空格 - user_input = input("👤 [You] ").strip() - print() - # 调用 MOS 系统进行聊天响应 - response = mos.chat(user_input, user_id=user_id) - # 获取该用户当前 MemCube 中的所有记忆内容 - retrieved_memories = mos.get_all(mem_cube_id=mem_cube_id, user_id=user_id) - - # 打印助手的回复 - print(f"🤖 [Assistant] {response}") - - # 获取文本记忆部分 - TreeTextMemory - memories = retrieved_memories["text_mem"][0]["memories"] - for mem in memories: - print(f"[文本记忆] {mem.memory}") - - # 获取对应的 MemCube 和其激活记忆(KV Cache) - mem_cube = mos.mem_scheduler.mem_cube - kv_mem = mem_cube.act_mem - # 遍历所有激活记忆项, 打印其缓存信息和记录 - for cache_item in kv_mem.get_all(): - print(f"[激活记忆] {get_cache_info(cache_item.memory)} (记录:{cache_item.records})") - - -if __name__ == "__main__": - kv_cache_only() - - run_scheduler_example() diff --git a/examples/mem_scheduler/rabbitmq_example.py b/examples/mem_scheduler/rabbitmq_example.py deleted file mode 100644 index 5e40eaad5..000000000 --- a/examples/mem_scheduler/rabbitmq_example.py +++ /dev/null @@ -1,60 +0,0 @@ -import threading -import time - -from memos.configs.mem_scheduler import AuthConfig -from memos.mem_scheduler.webservice_modules.rabbitmq_service import RabbitMQSchedulerModule - - -def publish_message(rabbitmq_module, message): - """Function to publish a message.""" - rabbitmq_module.rabbitmq_publish_message(message) - print(f"Published message: {message}\n") - - -def main(): - # Initialize RabbitMQ module - rabbitmq_module = RabbitMQSchedulerModule() - rabbitmq_module.rabbit_queue_name = "test" - - # Initialize from configuration dictionary - if not AuthConfig.default_config_exists(): - print("Please set configs for rabbitmq.") - return - else: - rabbitmq_module.initialize_rabbitmq(config=AuthConfig.from_local_config().rabbitmq) - - try: - rabbitmq_module.wait_for_connection_ready() - - # === Publish some test messages === - # List to hold thread references - threads = [] - - # Publish some test messages using multiple threads - for i in range(3): - message = {"type": "test", "data": f"Message {i}", "timestamp": time.time()} - thread = threading.Thread(target=publish_message, args=(rabbitmq_module, message)) - thread.start() - threads.append(thread) - - # Start consumer - rabbitmq_module.rabbitmq_start_consuming() - - # Join threads to ensure all messages are published before proceeding - for thread in threads: - thread.join() - - except KeyboardInterrupt: - print("\nProgram interrupted by user") - - finally: - # Give some time for cleanup - time.sleep(3) - - # Close connections - rabbitmq_module.rabbitmq_close() - print("RabbitMQ connection closed") - - -if __name__ == "__main__": - main() diff --git a/examples/mem_scheduler/redis_example.py b/examples/mem_scheduler/redis_example.py index be6f20bed..00989c424 100644 --- a/examples/mem_scheduler/redis_example.py +++ b/examples/mem_scheduler/redis_example.py @@ -1,5 +1,13 @@ -import asyncio +# Prerequisites: +# 1. Ensure a Redis server is running locally on the default port (6379). +# You can start it with: `redis-server` +# On macOS with Homebrew: `/opt/homebrew/bin/redis-server` or `brew services start redis` +# On Linux: `sudo service redis-server start` +# 2. If Redis is running on a different host/port, update the configuration or environment variables accordingly. + + import sys +import time from datetime import datetime from pathlib import Path @@ -64,7 +72,7 @@ def service_run(): print( f"Added: {res}", ) - asyncio.sleep(0.5) + time.sleep(0.5) mem_scheduler.redis_stop_listening() diff --git a/examples/mem_scheduler/scheduler_for_async_tasks.py b/examples/mem_scheduler/run_async_tasks.py similarity index 90% rename from examples/mem_scheduler/scheduler_for_async_tasks.py rename to examples/mem_scheduler/run_async_tasks.py index 7f544c3da..fbb5a7bcc 100644 --- a/examples/mem_scheduler/scheduler_for_async_tasks.py +++ b/examples/mem_scheduler/run_async_tasks.py @@ -1,3 +1,12 @@ +""" +# Prerequisites & Configuration +# To run this script, you must have the following services +# running and configured in your .env file (or environment variables): +# 1. Redis (Required for TaskStatusTracker and Scheduler Queue) +# 2. Graph Database (Required for Memory Storage) +# 3. Vector Database (Required if using Neo4j Community or Preference Memory) +""" + from pathlib import Path from time import sleep diff --git a/examples/mem_scheduler/show_redis_status.py b/examples/mem_scheduler/show_redis_status.py index 04e79ca97..94a915588 100644 --- a/examples/mem_scheduler/show_redis_status.py +++ b/examples/mem_scheduler/show_redis_status.py @@ -1,56 +1,67 @@ +""" +# Prerequisites: +# 1. Ensure a Redis server is running locally on the default port (6379). +# You can start it with: `redis-server` +# On macOS with Homebrew: `/opt/homebrew/bin/redis-server` or `brew services start redis` +# On Linux: `sudo service redis-server start` +# 2. If Redis is running on a different host/port, update the configuration or environment variables accordingly. +""" + import time -from memos.api.routers.server_router import mem_scheduler +from memos.mem_scheduler.task_schedule_modules.orchestrator import SchedulerOrchestrator from memos.mem_scheduler.task_schedule_modules.redis_queue import SchedulerRedisQueue -queue = mem_scheduler.memos_message_queue.memos_message_queue +# Explicitly initialize Redis queue for monitoring +queue = SchedulerRedisQueue( + max_len=None, + consumer_group="scheduler_group", + consumer_name="monitor_consumer", + orchestrator=SchedulerOrchestrator(), +) -def fetch_status(queue: SchedulerRedisQueue) -> dict[str, dict[str, int]]: +def fetch_status( + queue: SchedulerRedisQueue, stream_key_prefix: str | None = None +) -> dict[str, dict[str, int]]: """Fetch and print per-user Redis queue status using built-in API. - Returns a dict mapping user_id -> {"pending": int, "remaining": int}. + Returns a dict mapping user_id -> {"remaining": int}. """ # This method will also print a summary and per-user counts. - return queue.show_task_status() + return queue.show_task_status(stream_key_prefix=stream_key_prefix) def print_diff(prev: dict[str, dict[str, int]], curr: dict[str, dict[str, int]]) -> None: """Print aggregated totals and per-user changes compared to previous snapshot.""" ts = time.strftime("%Y-%m-%d %H:%M:%S") - tot_p_prev = sum(v.get("pending", 0) for v in prev.values()) if prev else 0 tot_r_prev = sum(v.get("remaining", 0) for v in prev.values()) if prev else 0 - tot_p_curr = sum(v.get("pending", 0) for v in curr.values()) tot_r_curr = sum(v.get("remaining", 0) for v in curr.values()) - dp_tot = tot_p_curr - tot_p_prev dr_tot = tot_r_curr - tot_r_prev - print(f"[{ts}] Total pending={tot_p_curr} ({dp_tot:+d}), remaining={tot_r_curr} ({dr_tot:+d})") + print(f"[{ts}] Total remaining={tot_r_curr} ({dr_tot:+d})") # Print per-user deltas (current counts are already printed by show_task_status) all_uids = sorted(set(prev.keys()) | set(curr.keys())) for uid in all_uids: - p_prev = prev.get(uid, {}).get("pending", 0) r_prev = prev.get(uid, {}).get("remaining", 0) - p_curr = curr.get(uid, {}).get("pending", 0) r_curr = curr.get(uid, {}).get("remaining", 0) - dp = p_curr - p_prev dr = r_curr - r_prev # Only print when there is any change to reduce noise - if dp != 0 or dr != 0: - print(f" Δ {uid}: pending={dp:+d}, remaining={dr:+d}") + if dr != 0: + print(f" Δ {uid}: remaining={dr:+d}") # Note: queue.show_task_status() handles printing per-user counts internally. -def main(interval_sec: float = 5.0) -> None: +def main(interval_sec: float = 5.0, stream_key_prefix: str | None = None) -> None: prev: dict[str, dict[str, int]] = {} while True: try: - curr = fetch_status(queue) + curr = fetch_status(queue, stream_key_prefix=stream_key_prefix) print_diff(prev, curr) print(f"stream_cache ({len(queue._stream_keys_cache)}): {queue._stream_keys_cache}") prev = curr @@ -64,4 +75,11 @@ def main(interval_sec: float = 5.0) -> None: if __name__ == "__main__": - main() + import argparse + + parser = argparse.ArgumentParser() + parser.add_argument("--interval", type=float, default=1.0) + parser.add_argument("--prefix", type=str, default=None) + args = parser.parse_args() + + main(interval_sec=args.interval, stream_key_prefix=args.prefix) diff --git a/examples/mem_scheduler/task_fair_schedule.py b/examples/mem_scheduler/task_fair_schedule.py deleted file mode 100644 index 8b02b1931..000000000 --- a/examples/mem_scheduler/task_fair_schedule.py +++ /dev/null @@ -1,87 +0,0 @@ -import sys - -from collections import defaultdict -from pathlib import Path - -from memos.api.routers.server_router import mem_scheduler -from memos.mem_scheduler.schemas.message_schemas import ScheduleMessageItem - - -FILE_PATH = Path(__file__).absolute() -BASE_DIR = FILE_PATH.parent.parent.parent -sys.path.insert(0, str(BASE_DIR)) - - -def make_message(user_id: str, mem_cube_id: str, label: str, idx: int | str) -> ScheduleMessageItem: - return ScheduleMessageItem( - item_id=f"{user_id}:{mem_cube_id}:{label}:{idx}", - user_id=user_id, - mem_cube_id=mem_cube_id, - label=label, - content=f"msg-{idx} for {user_id}/{mem_cube_id}/{label}", - ) - - -def seed_messages_for_test_fairness(queue, combos, per_stream): - # send overwhelm message by one user - (u, c, label) = combos[0] - task_target = 100 - print(f"{u}:{c}:{label} submit {task_target} messages") - for i in range(task_target): - msg = make_message(u, c, label, f"overwhelm_{i}") - queue.submit_messages(msg) - - for u, c, label in combos: - print(f"{u}:{c}:{label} submit {per_stream} messages") - for i in range(per_stream): - msg = make_message(u, c, label, i) - queue.submit_messages(msg) - print("======= seed_messages Done ===========") - - -def count_by_stream(messages): - counts = defaultdict(int) - for m in messages: - key = f"{m.user_id}:{m.mem_cube_id}:{m.label}" - counts[key] += 1 - return counts - - -def run_fair_redis_schedule(batch_size: int = 3): - print("=== Redis Fairness Demo ===") - print(f"use_redis_queue: {mem_scheduler.use_redis_queue}") - mem_scheduler.consume_batch = batch_size - queue = mem_scheduler.memos_message_queue - - # Isolate and clear queue - queue.clear() - - # Define multiple streams: (user_id, mem_cube_id, task_label) - combos = [ - ("u1", "u1", "labelX"), - ("u1", "u1", "labelY"), - ("u2", "u2", "labelX"), - ("u2", "u2", "labelY"), - ] - per_stream = 5 - - # Seed messages evenly across streams - seed_messages_for_test_fairness(queue, combos, per_stream) - - # Compute target batch size (fair split across streams) - print(f"Request batch_size={batch_size} for {len(combos)} streams") - - for _ in range(len(combos)): - # Fetch one brokered pack - msgs = queue.get_messages(batch_size=batch_size) - print(f"Fetched {len(msgs)} messages in first pack") - - # Check fairness: counts per stream - counts = count_by_stream(msgs) - for k in sorted(counts): - print(f"{k}: {counts[k]}") - - -if __name__ == "__main__": - # task 1 fair redis schedule - run_fair_redis_schedule() diff --git a/examples/mem_user/user_manager_factory_example.py b/examples/mem_user/user_manager_factory_example.py deleted file mode 100644 index ea50c30c9..000000000 --- a/examples/mem_user/user_manager_factory_example.py +++ /dev/null @@ -1,111 +0,0 @@ -"""Example demonstrating the use of UserManagerFactory with different backends.""" - -from memos.configs.mem_user import UserManagerConfigFactory -from memos.mem_user.factory import UserManagerFactory -from memos.mem_user.persistent_factory import PersistentUserManagerFactory - - -def example_sqlite_default(): - """Example: Create SQLite user manager with default settings.""" - print("=== SQLite Default Example ===") - - # Method 1: Using factory with minimal config - user_manager = UserManagerFactory.create_sqlite() - - # Method 2: Using config factory (equivalent) - UserManagerConfigFactory( - backend="sqlite", - config={}, # Uses all defaults - ) - - print(f"Created user manager: {type(user_manager).__name__}") - print(f"Database path: {user_manager.db_path}") - - # Test basic operations - users = user_manager.list_users() - print(f"Initial users: {[user.user_name for user in users]}") - - user_manager.close() - - -def example_sqlite_custom(): - """Example: Create SQLite user manager with custom settings.""" - print("\n=== SQLite Custom Example ===") - - config_factory = UserManagerConfigFactory( - backend="sqlite", config={"db_path": "/tmp/custom_memos.db", "user_id": "admin"} - ) - - user_manager = UserManagerFactory.from_config(config_factory) - print(f"Created user manager: {type(user_manager).__name__}") - print(f"Database path: {user_manager.db_path}") - - # Test operations - user_id = user_manager.create_user("test_user") - print(f"Created user: {user_id}") - - user_manager.close() - - -def example_mysql(): - """Example: Create MySQL user manager.""" - print("\n=== MySQL Example ===") - - # Method 1: Using factory with parameters - try: - user_manager = UserManagerFactory.create_mysql( - host="localhost", - port=3306, - username="root", - password="your_password", # Replace with actual password - database="test_memos_users", - ) - - print(f"Created user manager: {type(user_manager).__name__}") - print(f"Connection URL: {user_manager.connection_url}") - - # Test operations - users = user_manager.list_users() - print(f"Users: {[user.user_name for user in users]}") - - user_manager.close() - - except Exception as e: - print(f"MySQL connection failed (expected if not set up): {e}") - - -def example_persistent_managers(): - """Example: Create persistent user managers with configuration storage.""" - print("\n=== Persistent User Manager Examples ===") - - # SQLite persistent manager - config_factory = UserManagerConfigFactory(backend="sqlite", config={}) - - persistent_manager = PersistentUserManagerFactory.from_config(config_factory) - print(f"Created persistent manager: {type(persistent_manager).__name__}") - - # Test config operations - from memos.configs.mem_os import MOSConfig - - # Create a sample config (you might need to adjust this based on MOSConfig structure) - try: - # This is a simplified example - adjust based on actual MOSConfig requirements - sample_config = MOSConfig() # Use default config - - # Save user config - success = persistent_manager.save_user_config("test_user", sample_config) - print(f"Config saved: {success}") - - # Retrieve user config - retrieved_config = persistent_manager.get_user_config("test_user") - print(f"Config retrieved: {retrieved_config is not None}") - - except Exception as e: - print(f"Config operations failed: {e}") - - persistent_manager.close() - - -if __name__ == "__main__": - # Run all examples - example_sqlite_default() diff --git a/src/memos/api/config.py b/src/memos/api/config.py index daf9b6cfe..204b0961c 100644 --- a/src/memos/api/config.py +++ b/src/memos/api/config.py @@ -23,7 +23,7 @@ # Load environment variables -load_dotenv() +load_dotenv(override=True) logger = logging.getLogger(__name__) diff --git a/src/memos/api/handlers/config_builders.py b/src/memos/api/handlers/config_builders.py index fb3df80c2..fce789e2a 100644 --- a/src/memos/api/handlers/config_builders.py +++ b/src/memos/api/handlers/config_builders.py @@ -89,7 +89,7 @@ def build_chat_llm_config() -> list[dict[str, Any]]: Returns: Validated chat LLM configuration dictionary """ - configs = json.loads(os.getenv("CHAT_MODEL_LIST")) + configs = json.loads(os.getenv("CHAT_MODEL_LIST", "[]")) return [ { "config_class": LLMConfigFactory.model_validate( diff --git a/src/memos/api/mcp_serve.py b/src/memos/api/mcp_serve.py index 838c2a76a..ce2e41390 100644 --- a/src/memos/api/mcp_serve.py +++ b/src/memos/api/mcp_serve.py @@ -61,6 +61,19 @@ def load_default_config(user_id="default_user"): "SCHEDULER_TOP_N": "scheduler_top_n", } + # Fields that should always be kept as strings (not converted to numbers) + string_only_fields = { + "openai_api_key", + "openai_api_base", + "neo4j_uri", + "neo4j_user", + "neo4j_password", + "neo4j_db_name", + "text_mem_type", + "model_name", + "embedder_model", + } + kwargs = {"user_id": user_id} for env_key, param_key in env_mapping.items(): val = os.getenv(env_key) @@ -74,6 +87,9 @@ def load_default_config(user_id="default_user"): # Handle boolean conversions if val.lower() in ("true", "false"): kwargs[param_key] = val.lower() == "true" + # Keep certain fields as strings + elif param_key in string_only_fields: + kwargs[param_key] = val else: # Try numeric conversions (int first, then float) try: @@ -93,6 +109,10 @@ def load_default_config(user_id="default_user"): openai_api_base = kwargs.pop("openai_api_base", "https://api.openai.com/v1") text_mem_type = kwargs.pop("text_mem_type", "tree_text") + # Ensure embedder_model has a default value if not set + if "embedder_model" not in kwargs: + kwargs["embedder_model"] = os.getenv("EMBEDDER_MODEL", "nomic-embed-text:latest") + config, cube = get_default( openai_api_key=openai_api_key, openai_api_base=openai_api_base, @@ -110,6 +130,21 @@ def __init__(self): self.mos_core.register_mem_cube(cube) self._setup_tools() + +class MOSMCPServer: + """MCP Server that accepts an existing MOS instance.""" + + def __init__(self, mos_instance: MOS | None = None): + self.mcp = FastMCP("MOS Memory System") + if mos_instance is None: + # Fall back to creating from default config + config, cube = load_default_config() + self.mos_core = MOS(config=config) + self.mos_core.register_mem_cube(cube) + else: + self.mos_core = mos_instance + self._setup_tools() + def _setup_tools(self): """Setup MCP tools""" @@ -133,7 +168,10 @@ async def chat(query: str, user_id: str | None = None) -> str: response = self.mos_core.chat(query, user_id) return response except Exception as e: - return f"Chat error: {e!s}" + import traceback + + error_details = traceback.format_exc() + return f"Chat error: {e!s}\nTraceback:\n{error_details}" @self.mcp.tool() async def create_user( @@ -261,7 +299,10 @@ async def search_memories( result = self.mos_core.search(query, user_id, cube_ids) return result except Exception as e: - return {"error": str(e)} + import traceback + + error_details = traceback.format_exc() + return {"error": str(e), "traceback": error_details} @self.mcp.tool() async def add_memory( @@ -527,23 +568,24 @@ async def control_memory_scheduler(action: str) -> str: except Exception as e: return f"Error controlling memory scheduler: {e!s}" - def run(self, transport: str = "stdio", **kwargs): - """Run MCP server with specified transport""" - if transport == "stdio": - # Run stdio mode (default for local usage) - self.mcp.run(transport="stdio") - elif transport == "http": - # Run HTTP mode - host = kwargs.get("host", "localhost") - port = kwargs.get("port", 8000) - asyncio.run(self.mcp.run_http_async(host=host, port=port)) - elif transport == "sse": - # Run SSE mode (deprecated but still supported) - host = kwargs.get("host", "localhost") - port = kwargs.get("port", 8000) - self.mcp.run(transport="sse", host=host, port=port) - else: - raise ValueError(f"Unsupported transport: {transport}") + +def _run_mcp(self, transport: str = "stdio", **kwargs): + if transport == "stdio": + self.mcp.run(transport="stdio") + elif transport == "http": + host = kwargs.get("host", "localhost") + port = kwargs.get("port", 8000) + asyncio.run(self.mcp.run_http_async(host=host, port=port)) + elif transport == "sse": + host = kwargs.get("host", "localhost") + port = kwargs.get("port", 8000) + self.mcp.run(transport="sse", host=host, port=port) + else: + raise ValueError(f"Unsupported transport: {transport}") + + +MOSMCPStdioServer.run = _run_mcp +MOSMCPServer.run = _run_mcp # Usage example diff --git a/src/memos/api/start_api.py b/src/memos/api/start_api.py index cbcdf6ce2..24a36f017 100644 --- a/src/memos/api/start_api.py +++ b/src/memos/api/start_api.py @@ -20,7 +20,7 @@ logger = logging.getLogger(__name__) # Load environment variables -load_dotenv() +load_dotenv(override=True) T = TypeVar("T") diff --git a/src/memos/cli.py b/src/memos/cli.py index fb3475ff3..092f2d276 100644 --- a/src/memos/cli.py +++ b/src/memos/cli.py @@ -13,7 +13,7 @@ def export_openapi(output: str) -> bool: """Export OpenAPI schema to JSON file.""" - from memos.api.start_api import app + from memos.api.server_api import app # Create directory if it doesn't exist if os.path.dirname(output): diff --git a/src/memos/graph_dbs/neo4j.py b/src/memos/graph_dbs/neo4j.py index 8698b6f73..d0ae5a555 100644 --- a/src/memos/graph_dbs/neo4j.py +++ b/src/memos/graph_dbs/neo4j.py @@ -1705,7 +1705,7 @@ def _parse_node(self, node_data: dict[str, Any]) -> dict[str, Any]: node.pop("user_name", None) # serialization - if node["sources"]: + if node.get("sources"): for idx in range(len(node["sources"])): if not ( isinstance(node["sources"][idx], str) diff --git a/src/memos/llms/hf.py b/src/memos/llms/hf.py index b5fc4ba13..0dd841c1a 100644 --- a/src/memos/llms/hf.py +++ b/src/memos/llms/hf.py @@ -51,7 +51,7 @@ def __init__(self, config: HFLLMConfig): self.config.model_name_or_path, torch_dtype="auto", device_map="auto" ) self.tokenizer = AutoTokenizer.from_pretrained( - self.config.model_name_or_path, use_fast=True + self.config.model_name_or_path, use_fast=True, force_download=True ) # Logits processors for sampling diff --git a/src/memos/mem_feedback/feedback.py b/src/memos/mem_feedback/feedback.py index 1d199c6cb..e38318a64 100644 --- a/src/memos/mem_feedback/feedback.py +++ b/src/memos/mem_feedback/feedback.py @@ -96,6 +96,7 @@ def __init__(self, config: MemFeedbackConfig): self.searcher: Searcher = None self.reranker = None self.pref_mem: SimplePreferenceTextMemory = None + self.pref_feedback: bool = False self.DB_IDX_READY = False @require_python_package( @@ -630,11 +631,14 @@ def _retrieve(self, query: str, info=None, top_k=20, user_name=None): ) retrieved_mems = [item[0] for item in retrieved_mems if float(item[1]) > 0.01] - pref_info = {} - if "user_id" in info: - pref_info = {"user_id": info["user_id"]} - retrieved_prefs = self.pref_mem.search(query, top_k, pref_info) - return retrieved_mems + retrieved_prefs + if self.pref_feedback: + pref_info = {} + if "user_id" in info: + pref_info = {"user_id": info["user_id"]} + retrieved_prefs = self.pref_mem.search(query, top_k, pref_info) + return retrieved_mems + retrieved_prefs + else: + return retrieved_mems def _vec_query(self, new_memories_embedding: list[float], user_name=None): """Vector retrieval query""" diff --git a/src/memos/mem_feedback/simple_feedback.py b/src/memos/mem_feedback/simple_feedback.py index e32f939c7..2ac0a0a39 100644 --- a/src/memos/mem_feedback/simple_feedback.py +++ b/src/memos/mem_feedback/simple_feedback.py @@ -25,6 +25,7 @@ def __init__( searcher: Searcher, reranker: BaseReranker, pref_mem: SimplePreferenceTextMemory, + pref_feedback: bool = False, ): self.llm = llm self.embedder = embedder @@ -36,3 +37,4 @@ def __init__( self.pref_mem = pref_mem self.reranker = reranker self.DB_IDX_READY = False + self.pref_feedback = pref_feedback diff --git a/src/memos/mem_scheduler/general_modules/init_components_for_scheduler.py b/src/memos/mem_scheduler/general_modules/init_components_for_scheduler.py index 3a12a9c79..903088a4c 100644 --- a/src/memos/mem_scheduler/general_modules/init_components_for_scheduler.py +++ b/src/memos/mem_scheduler/general_modules/init_components_for_scheduler.py @@ -109,7 +109,7 @@ def build_chat_llm_config() -> list[dict[str, Any]]: Returns: Validated chat LLM configuration dictionary """ - configs = json.loads(os.getenv("CHAT_MODEL_LIST")) + configs = json.loads(os.getenv("CHAT_MODEL_LIST", "[]")) return [ { "config_class": LLMConfigFactory.model_validate( diff --git a/src/memos/mem_scheduler/task_schedule_modules/redis_queue.py b/src/memos/mem_scheduler/task_schedule_modules/redis_queue.py index 1c9683542..d570dccdd 100644 --- a/src/memos/mem_scheduler/task_schedule_modules/redis_queue.py +++ b/src/memos/mem_scheduler/task_schedule_modules/redis_queue.py @@ -983,34 +983,21 @@ def qsize(self) -> dict: logger.error(f"Failed to get Redis queue size: {e}", stack_info=True) return {} - def show_task_status(self) -> dict[str, dict[str, int]]: - stream_keys = self.get_stream_keys(stream_key_prefix=self.stream_key_prefix) + def show_task_status(self, stream_key_prefix: str | None = None) -> dict[str, dict[str, int]]: + effective_prefix = ( + stream_key_prefix if stream_key_prefix is not None else self.stream_key_prefix + ) + stream_keys = self.get_stream_keys(stream_key_prefix=effective_prefix) if not stream_keys: - logger.info("No Redis streams found for the configured prefix") + logger.info(f"No Redis streams found for the configured prefix: {effective_prefix}") return {} - consumer_group = self.consumer_group or "scheduler_group" - grouped: dict[str, dict[str, int]] = {} for sk in stream_keys: uid = sk if uid not in grouped: - grouped[uid] = {"pending": 0, "remaining": 0} - - # Pending count via XPENDING - pending_count = 0 - try: - pending_info = self._redis_conn.xpending(sk, consumer_group) - # redis-py may return a tuple-like [count, ...] - if pending_info: - try: - pending_count = int(pending_info[0]) - except Exception: - # Fallback if structure differs - pending_count = int(getattr(pending_info, "count", 0) or 0) - except Exception as e: - logger.debug(f"XPENDING failed for '{sk}': {e}") + grouped[uid] = {"remaining": 0} # Remaining count via XLEN remaining_count = 0 @@ -1019,20 +1006,16 @@ def show_task_status(self) -> dict[str, dict[str, int]]: except Exception as e: logger.debug(f"XLEN failed for '{sk}': {e}") - grouped[uid]["pending"] += pending_count grouped[uid]["remaining"] += remaining_count # Pretty-print summary try: - total_pending = sum(v.get("pending", 0) for v in grouped.values()) total_remaining = sum(v.get("remaining", 0) for v in grouped.values()) - header = f"Task Queue Status by user_id | pending={total_pending}, remaining={total_remaining}" + header = f"Task Queue Status by user_id | remaining={total_remaining}" print(header) for uid in sorted(grouped.keys()): counts = grouped[uid] - print( - f"- {uid}: pending={counts.get('pending', 0)}, remaining={counts.get('remaining', 0)}" - ) + print(f"- {uid}: remaining={counts.get('remaining', 0)}") except Exception: # Printing is best-effort; return grouped regardless pass diff --git a/tests/test_openapi_sync.py b/tests/test_openapi_sync.py deleted file mode 100644 index 90d44dcf0..000000000 --- a/tests/test_openapi_sync.py +++ /dev/null @@ -1,79 +0,0 @@ -import json - -from pathlib import Path - -from memos.api.start_api import app - - -OPENAPI_JSON_PATH = Path(__file__).parent.parent / "docs" / "openapi.json" - - -def test_openapi_json_up_to_date(): - """ - Ensure docs/openapi.json is up to date. - If not, run: `make openapi` to regenerate it. - """ - - assert OPENAPI_JSON_PATH.exists(), ( - f"{OPENAPI_JSON_PATH} does not exist. Please run: `make openapi` to regenerate it." - ) - - # Get current OpenAPI schema and existing file - current_openapi = app.openapi() - with open(OPENAPI_JSON_PATH) as f: - existing_openapi = json.load(f) - - # Remove dynamic values like UUIDs in defaults to avoid false positives - def normalize(obj): - if isinstance(obj, dict): - return {k: normalize(v) for k, v in obj.items() if k != "default"} - elif isinstance(obj, list): - return [normalize(item) for item in obj] - return obj - - # Compare normalized structures - normalized_current = normalize(current_openapi) - normalized_existing = normalize(existing_openapi) - - if normalized_current == normalized_existing: - return # Test passes - - # Generate helpful error message with specific differences - def find_diffs(current, existing, path=""): - diffs = [] - if type(current) is not type(existing): - diffs.append( - f"Type mismatch at {path}: {type(current).__name__} vs {type(existing).__name__}" - ) - elif isinstance(current, dict): - all_keys = set(current.keys()) | set(existing.keys()) - for key in sorted(all_keys): - key_path = f"{path}.{key}" if path else key - if key not in current: - diffs.append(f"Missing: {key_path}") - elif key not in existing: - diffs.append(f"Added: {key_path}") - else: - diffs.extend(find_diffs(current[key], existing[key], key_path)) - elif isinstance(current, list): - if len(current) != len(existing): - diffs.append(f"Array length differs at {path}: {len(current)} vs {len(existing)}") - for i, (curr_item, exist_item) in enumerate(zip(current, existing, strict=False)): - diffs.extend(find_diffs(curr_item, exist_item, f"{path}[{i}]")) - elif current != existing: - diffs.append(f"Value differs at {path}") - return diffs - - differences = find_diffs(normalized_current, normalized_existing) - - # Format error message - max_diffs = 5 - diff_preview = "\n".join(f" - {diff}" for diff in differences[:max_diffs]) - if len(differences) > max_diffs: - diff_preview += f"\n ... and {len(differences) - max_diffs} more" - - raise AssertionError( - f"OpenAPI schema is out of date ({len(differences)} differences found):\n" - f"{diff_preview}\n\n" - f"To fix, run: `make openapi` to regenerate it." - ) From 8d9cc27fd5754250367bb39051643a8bf3a8c152 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E9=BB=91=E5=B8=83=E6=9E=97?= <11641432+heiheiyouyou@user.noreply.gitee.com> Date: Wed, 21 Jan 2026 09:58:34 +0800 Subject: [PATCH 2/5] fix rerank too long --- src/memos/api/config.py | 2 ++ src/memos/reranker/factory.py | 4 ++++ src/memos/reranker/http_bge.py | 8 ++++++++ src/memos/reranker/http_bge_strategy.py | 9 +++++++++ 4 files changed, 23 insertions(+) diff --git a/src/memos/api/config.py b/src/memos/api/config.py index 204b0961c..a3bf25be0 100644 --- a/src/memos/api/config.py +++ b/src/memos/api/config.py @@ -410,6 +410,8 @@ def get_feedback_reranker_config() -> dict[str, Any]: "url": os.getenv("MOS_RERANKER_URL", "localhost:8000/v1/rerank"), "model": os.getenv("MOS_FEEDBACK_RERANKER_MODEL", "bge-reranker-v2-m3"), "timeout": 10, + "max_query_tokens": int(os.getenv("MOS_RERANKER_MAX_TOKENS", 8000)), + "concate_len": int(os.getenv("MOS_RERANKER_CONCAT_LEN", 1000)), "headers_extra": json.loads(os.getenv("MOS_RERANKER_HEADERS_EXTRA", "{}")), "rerank_source": os.getenv("MOS_RERANK_SOURCE"), "reranker_strategy": os.getenv("MOS_RERANKER_STRATEGY", "single_turn"), diff --git a/src/memos/reranker/factory.py b/src/memos/reranker/factory.py index 1440704a6..21282f613 100644 --- a/src/memos/reranker/factory.py +++ b/src/memos/reranker/factory.py @@ -42,6 +42,8 @@ def from_config(cfg: RerankerConfigFactory | None) -> BaseReranker | None: reranker_url=c.get("url") or c.get("endpoint") or c.get("reranker_url"), model=c.get("model", "bge-reranker-v2-m3"), timeout=int(c.get("timeout", 10)), + max_query_tokens=min(max(c.get("max_query_tokens", 8000), 100), 8000), + concate_len=min(max(c.get("concate_len", 1000), 4), 8000), headers_extra=headers_extra, rerank_source=c.get("rerank_source"), ) @@ -60,6 +62,8 @@ def from_config(cfg: RerankerConfigFactory | None) -> BaseReranker | None: reranker_url=c.get("url") or c.get("endpoint") or c.get("reranker_url"), model=c.get("model", "bge-reranker-v2-m3"), timeout=int(c.get("timeout", 10)), + max_query_tokens=min(max(c.get("max_query_tokens", 8000), 100), 8000), + concate_len=min(max(c.get("concate_len", 1000), 4), 8000), headers_extra=headers_extra, rerank_source=c.get("rerank_source"), reranker_strategy=c.get("reranker_strategy"), diff --git a/src/memos/reranker/http_bge.py b/src/memos/reranker/http_bge.py index 32034cf6d..b100badd5 100644 --- a/src/memos/reranker/http_bge.py +++ b/src/memos/reranker/http_bge.py @@ -80,6 +80,8 @@ def __init__( token: str = "", model: str = "bge-reranker-v2-m3", timeout: int = 10, + max_query_tokens: int | None = None, + concate_len: int | None = None, headers_extra: dict | None = None, rerank_source: str | None = None, boost_weights: dict[str, float] | None = None, @@ -107,6 +109,8 @@ def __init__( self.token = token or "" self.model = model self.timeout = timeout + self.max_query_tokens = max_query_tokens + self.concate_len = concate_len self.headers_extra = headers_extra or {} self.rerank_source = rerank_source @@ -155,6 +159,10 @@ def rerank( Re-ranked items with scores, sorted descending by score. """ + if self.max_query_tokens and len(query) > self.max_query_tokens: + single_concate_len = self.concate_len // 2 + query = query[:single_concate_len] + "\n" + query[-single_concate_len:] + if not graph_results: return [] diff --git a/src/memos/reranker/http_bge_strategy.py b/src/memos/reranker/http_bge_strategy.py index b0567698c..9a1bb21f8 100644 --- a/src/memos/reranker/http_bge_strategy.py +++ b/src/memos/reranker/http_bge_strategy.py @@ -80,6 +80,8 @@ def __init__( token: str = "", model: str = "bge-reranker-v2-m3", timeout: int = 10, + max_query_tokens: int | None = None, + concate_len: int | None = None, headers_extra: dict | None = None, rerank_source: str | None = None, boost_weights: dict[str, float] | None = None, @@ -108,6 +110,8 @@ def __init__( self.token = token or "" self.model = model self.timeout = timeout + self.max_query_tokens = max_query_tokens + self.concate_len = concate_len self.headers_extra = headers_extra or {} self.boost_weights = ( @@ -149,6 +153,11 @@ def rerank( list[tuple[TextualMemoryItem, float]] Re-ranked items with scores, sorted descending by score. """ + + if self.self.max_query_tokens and len(query) > self.max_query_tokens: + single_concate_len = self.concate_len // 2 + query = query[:single_concate_len] + "\n" + query[-single_concate_len:] + if not graph_results: return [] From d0b0d71a0152bc05d63be5763c232ed029c21e64 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E9=BB=91=E5=B8=83=E6=9E=97?= <11641432+heiheiyouyou@user.noreply.gitee.com> Date: Wed, 21 Jan 2026 10:25:04 +0800 Subject: [PATCH 3/5] fix rerank config for too long --- src/memos/reranker/http_bge_strategy.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/src/memos/reranker/http_bge_strategy.py b/src/memos/reranker/http_bge_strategy.py index 9a1bb21f8..871ac5ff0 100644 --- a/src/memos/reranker/http_bge_strategy.py +++ b/src/memos/reranker/http_bge_strategy.py @@ -153,8 +153,7 @@ def rerank( list[tuple[TextualMemoryItem, float]] Re-ranked items with scores, sorted descending by score. """ - - if self.self.max_query_tokens and len(query) > self.max_query_tokens: + if self.max_query_tokens and len(query) > self.max_query_tokens: single_concate_len = self.concate_len // 2 query = query[:single_concate_len] + "\n" + query[-single_concate_len:] From d19ba68d771ce1c4026e4920f191742b1f261bcf Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E9=BB=91=E5=B8=83=E6=9E=97?= <11641432+heiheiyouyou@user.noreply.gitee.com> Date: Wed, 21 Jan 2026 15:30:10 +0800 Subject: [PATCH 4/5] json load warning --- src/memos/mem_reader/read_multi_modal/utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/memos/mem_reader/read_multi_modal/utils.py b/src/memos/mem_reader/read_multi_modal/utils.py index d3d97b4e6..a6d910e54 100644 --- a/src/memos/mem_reader/read_multi_modal/utils.py +++ b/src/memos/mem_reader/read_multi_modal/utils.py @@ -97,7 +97,7 @@ def _cheap_close(t: str) -> str: return json.loads(s) except json.JSONDecodeError: pass - logger.error(f"[JSONParse] Failed to decode JSON: {e}\nRaw: {response_text}") + logger.warning(f"[JSONParse] Failed to decode JSON: {e}\nRaw: {response_text}") return {} From 75d7bddd919a0dbdd6c34b876af1f84cee7f8b1b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E9=BB=91=E5=B8=83=E6=9E=97?= <11641432+heiheiyouyou@user.noreply.gitee.com> Date: Wed, 21 Jan 2026 15:44:35 +0800 Subject: [PATCH 5/5] json load warning --- .../read_multi_modal/file_content_parser.py | 23 ++++++++++++++----- .../mem_reader/read_multi_modal/utils.py | 2 +- 2 files changed, 18 insertions(+), 7 deletions(-) diff --git a/src/memos/mem_reader/read_multi_modal/file_content_parser.py b/src/memos/mem_reader/read_multi_modal/file_content_parser.py index fbc704d0b..66f5fd8d8 100644 --- a/src/memos/mem_reader/read_multi_modal/file_content_parser.py +++ b/src/memos/mem_reader/read_multi_modal/file_content_parser.py @@ -74,12 +74,23 @@ def _get_doc_llm_response(self, chunk_text: str, custom_tags: list[str] | None = prompt = prompt.replace("{custom_tags_prompt}", custom_tags_prompt) messages = [{"role": "user", "content": prompt}] - try: - response_text = self.llm.generate(messages) - response_json = parse_json_result(response_text) - except Exception as e: - logger.error(f"[FileContentParser] LLM generation error: {e}") - response_json = {} + max_retries = 3 + for attempt in range(max_retries): + try: + response_text = self.llm.generate(messages) + response_json = parse_json_result(response_text) + return response_json + except Exception as e: + if attempt < max_retries - 1: + logger.warning( + f"[FileContentParser] LLM generation error (attempt {attempt + 1}/{max_retries}): {e}." + ) + else: + logger.error( + f"[FileContentParser] LLM generation error after {max_retries} attempts: {e}" + ) + response_json = {} + return response_json def _handle_url(self, url_str: str, filename: str) -> tuple[str, str | None, bool]: diff --git a/src/memos/mem_reader/read_multi_modal/utils.py b/src/memos/mem_reader/read_multi_modal/utils.py index a6d910e54..d3d97b4e6 100644 --- a/src/memos/mem_reader/read_multi_modal/utils.py +++ b/src/memos/mem_reader/read_multi_modal/utils.py @@ -97,7 +97,7 @@ def _cheap_close(t: str) -> str: return json.loads(s) except json.JSONDecodeError: pass - logger.warning(f"[JSONParse] Failed to decode JSON: {e}\nRaw: {response_text}") + logger.error(f"[JSONParse] Failed to decode JSON: {e}\nRaw: {response_text}") return {}