From cbd6f3145849d71a18506bf7f5f523a8d2a4ee52 Mon Sep 17 00:00:00 2001 From: CaralHsi Date: Mon, 19 Jan 2026 11:52:26 +0800 Subject: [PATCH] Revert "doc: Examples update and related minor bug fix" --- examples/basic_modules/embedder.py | 14 +- examples/basic_modules/llm.py | 8 - examples/basic_modules/nebular_example.py | 361 ++++++++++++++++++ examples/basic_modules/parser.py | 15 + examples/basic_modules/reranker.py | 17 - .../tree_textual_memory_reasoner.py | 167 ++++++++ examples/mem_scheduler/api_w_scheduler.py | 9 - examples/mem_scheduler/memos_w_scheduler.py | 7 - examples/mem_scheduler/redis_example.py | 8 - examples/mem_scheduler/run_async_tasks.py | 9 - examples/mem_scheduler/show_redis_status.py | 19 +- src/memos/graph_dbs/neo4j.py | 2 +- 12 files changed, 547 insertions(+), 89 deletions(-) create mode 100644 examples/basic_modules/nebular_example.py create mode 100644 examples/basic_modules/parser.py create mode 100644 examples/basic_modules/tree_textual_memory_reasoner.py diff --git a/examples/basic_modules/embedder.py b/examples/basic_modules/embedder.py index d35dc1797..7cc7942da 100644 --- a/examples/basic_modules/embedder.py +++ b/examples/basic_modules/embedder.py @@ -3,10 +3,7 @@ # Scenario 1: Using EmbedderFactory -# Prerequisites: -# 1. Install Ollama: https://ollama.com/ -# 2. Start Ollama server: `ollama serve` -# 3. Pull the model: `ollama pull nomic-embed-text` + config = EmbedderConfigFactory.model_validate( { "backend": "ollama", @@ -36,9 +33,6 @@ # Scenario 3: Using SenTranEmbedder -# Prerequisites: -# 1. Ensure `einops` is installed: `pip install einops` (Required for some HF models like nomic-bert) -# 2. The model `nomic-ai/nomic-embed-text-v1.5` will be downloaded automatically from HuggingFace. config_hf = EmbedderConfigFactory.model_validate( { @@ -55,9 +49,6 @@ print("==" * 20) # === Scenario 4: Using UniversalAPIEmbedder(OpenAI) === -# Prerequisites: -# 1. Set a valid OPENAI_API_KEY -# 2. Ensure the base_url is reachable config_api = EmbedderConfigFactory.model_validate( { @@ -77,9 +68,6 @@ print("Embedding preview:", embedding_api[0][:10]) # === Scenario 5: Using UniversalAPIEmbedder(Azure) === -# Prerequisites: -# 1. Set a valid AZURE_API_KEY -# 2. Ensure the base_url is reachable config_api = EmbedderConfigFactory.model_validate( { diff --git a/examples/basic_modules/llm.py b/examples/basic_modules/llm.py index fb157c991..d33fc9544 100644 --- a/examples/basic_modules/llm.py +++ b/examples/basic_modules/llm.py @@ -5,10 +5,6 @@ # Scenario 1: Using LLMFactory with Ollama Backend # This is the most recommended way! 🌟 -# Prerequisites: -# 1. Install Ollama: https://ollama.com/ -# 2. Start Ollama server: `ollama serve` -# 3. Need python ollama package(>=0.5.0,<0.6.0) config = LLMConfigFactory.model_validate( { @@ -50,10 +46,6 @@ # Scenario 3: Using LLMFactory with OpenAI Backend -# Prerequisites: -# 1. You need a valid OpenAI API key to run this scenario. -# 2. Replace 'sk-xxxx' with your actual API key below. - config = LLMConfigFactory.model_validate( { diff --git a/examples/basic_modules/nebular_example.py b/examples/basic_modules/nebular_example.py new file mode 100644 index 000000000..13f88e3f3 --- /dev/null +++ b/examples/basic_modules/nebular_example.py @@ -0,0 +1,361 @@ +import json +import os + +from datetime import datetime, timezone + +import numpy as np + +from dotenv import load_dotenv + +from memos.configs.embedder import EmbedderConfigFactory +from memos.configs.graph_db import GraphDBConfigFactory +from memos.embedders.factory import EmbedderFactory +from memos.graph_dbs.factory import GraphStoreFactory +from memos.memories.textual.item import TextualMemoryItem, TreeNodeTextualMemoryMetadata + + +load_dotenv() + + +def show(nebular_data): + from memos.configs.graph_db import Neo4jGraphDBConfig + from memos.graph_dbs.neo4j import Neo4jGraphDB + + tree_config = Neo4jGraphDBConfig.from_json_file("../../examples/data/config/neo4j_config.json") + tree_config.use_multi_db = True + tree_config.db_name = "nebular-show2" + + neo4j_db = Neo4jGraphDB(tree_config) + neo4j_db.clear() + neo4j_db.import_graph(nebular_data) + + +embedder_config = EmbedderConfigFactory.model_validate( + { + "backend": "universal_api", + "config": { + "provider": "openai", + "api_key": os.getenv("OPENAI_API_KEY", "sk-xxxxx"), + "model_name_or_path": "text-embedding-3-large", + "base_url": os.getenv("OPENAI_API_BASE", "https://api.openai.com/v1"), + }, + } +) +embedder = EmbedderFactory.from_config(embedder_config) +embedder_dimension = 3072 + + +def embed_memory_item(memory: str) -> list[float]: + embedding = embedder.embed([memory])[0] + embedding_np = np.array(embedding, dtype=np.float32) + embedding_list = embedding_np.tolist() + return embedding_list + + +def example_shared_db(db_name: str = "shared-traval-group"): + """ + Example: Single(Shared)-DB multi-tenant (logical isolation) + Multiple users' data in the same Neo4j DB with user_name as a tag. + """ + # users + user_list = ["travel_member_alice", "travel_member_bob"] + + for user_name in user_list: + # Step 1: Build factory config + config = GraphDBConfigFactory( + backend="nebular", + config={ + "uri": json.loads(os.getenv("NEBULAR_HOSTS", "localhost")), + "user": os.getenv("NEBULAR_USER", "root"), + "password": os.getenv("NEBULAR_PASSWORD", "xxxxxx"), + "space": db_name, + "user_name": user_name, + "use_multi_db": False, + "auto_create": True, + "embedding_dimension": embedder_dimension, + }, + ) + + # Step 2: Instantiate graph store + graph = GraphStoreFactory.from_config(config) + print(f"\n[INFO] Working in shared DB: {db_name}, for user: {user_name}") + graph.clear() + + # Step 3: Create topic node + topic = TextualMemoryItem( + memory="This research addresses long-term multi-UAV navigation for energy-efficient communication coverage.", + metadata=TreeNodeTextualMemoryMetadata( + memory_type="LongTermMemory", + key="Multi-UAV Long-Term Coverage", + hierarchy_level="topic", + type="fact", + memory_time="2024-01-01", + source="file", + sources=["paper://multi-uav-coverage/intro"], + status="activated", + confidence=95.0, + tags=["UAV", "coverage", "multi-agent"], + entities=["UAV", "coverage", "navigation"], + visibility="public", + updated_at=datetime.now().isoformat(), + embedding=embed_memory_item( + "This research addresses long-term " + "multi-UAV navigation for " + "energy-efficient communication " + "coverage." + ), + ), + ) + + graph.add_node( + id=topic.id, memory=topic.memory, metadata=topic.metadata.model_dump(exclude_none=True) + ) + + # Step 4: Add a concept for each user + concept = TextualMemoryItem( + memory=f"Itinerary plan for {user_name}", + metadata=TreeNodeTextualMemoryMetadata( + memory_type="LongTermMemory", + key="Multi-UAV Long-Term Coverage", + hierarchy_level="concept", + type="fact", + memory_time="2024-01-01", + source="file", + sources=["paper://multi-uav-coverage/intro"], + status="activated", + confidence=95.0, + tags=["UAV", "coverage", "multi-agent"], + entities=["UAV", "coverage", "navigation"], + visibility="public", + updated_at=datetime.now().isoformat(), + embedding=embed_memory_item(f"Itinerary plan for {user_name}"), + ), + ) + + graph.add_node( + id=concept.id, + memory=concept.memory, + metadata=concept.metadata.model_dump(exclude_none=True), + ) + + # Link concept to topic + graph.add_edge(source_id=concept.id, target_id=topic.id, type="RELATE_TO") + print(f"[INFO] Added nodes for {user_name}") + + # Step 5: Query and print ALL for verification + print("\n=== Export entire DB (for verification, includes ALL users) ===") + graph = GraphStoreFactory.from_config(config) + all_graph_data = graph.export_graph() + print(str(all_graph_data)[:1000]) + + # Step 6: Search for alice's data only + print("\n=== Search for travel_member_alice ===") + config_alice = GraphDBConfigFactory( + backend="nebular", + config={ + "uri": json.loads(os.getenv("NEBULAR_HOSTS", "localhost")), + "user": os.getenv("NEBULAR_USER", "root"), + "password": os.getenv("NEBULAR_PASSWORD", "xxxxxx"), + "space": db_name, + "user_name": user_list[0], + "auto_create": True, + "embedding_dimension": embedder_dimension, + "use_multi_db": False, + }, + ) + graph_alice = GraphStoreFactory.from_config(config_alice) + nodes = graph_alice.search_by_embedding(vector=embed_memory_item("travel itinerary"), top_k=3) + for node in nodes: + print(str(graph_alice.get_node(node["id"]))[:1000]) + + +def run_user_session( + user_name: str, + db_name: str, + topic_text: str, + concept_texts: list[str], + fact_texts: list[str], +): + print(f"\n=== {user_name} starts building their memory graph ===") + + # Manually initialize correct GraphDB class + config = GraphDBConfigFactory( + backend="nebular", + config={ + "uri": json.loads(os.getenv("NEBULAR_HOSTS", "localhost")), + "user": os.getenv("NEBULAR_USER", "root"), + "password": os.getenv("NEBULAR_PASSWORD", "xxxxxx"), + "space": db_name, + "user_name": user_name, + "use_multi_db": False, + "auto_create": True, + "embedding_dimension": embedder_dimension, + }, + ) + graph = GraphStoreFactory.from_config(config) + + # Start with a clean slate for this user + graph.clear() + + now = datetime.now(timezone.utc).isoformat() + + # === Step 1: Create a root topic node (e.g., user's research focus) === + topic = TextualMemoryItem( + memory=topic_text, + metadata=TreeNodeTextualMemoryMetadata( + memory_type="LongTermMemory", + key="Research Topic", + hierarchy_level="topic", + type="fact", + memory_time="2024-01-01", + status="activated", + visibility="public", + tags=["research", "rl"], + updated_at=now, + embedding=embed_memory_item(topic_text), + ), + ) + graph.add_node(topic.id, topic.memory, topic.metadata.model_dump(exclude_none=True)) + + # === Step 2: Create two concept nodes linked to the topic === + concept_items = [] + for i, text in enumerate(concept_texts): + concept = TextualMemoryItem( + memory=text, + metadata=TreeNodeTextualMemoryMetadata( + memory_type="LongTermMemory", + key=f"Concept {i + 1}", + hierarchy_level="concept", + type="fact", + memory_time="2024-01-01", + status="activated", + visibility="public", + updated_at=now, + embedding=embed_memory_item(text), + tags=["concept"], + confidence=90 + i, + ), + ) + graph.add_node(concept.id, concept.memory, concept.metadata.model_dump(exclude_none=True)) + graph.add_edge(topic.id, concept.id, type="PARENT") + concept_items.append(concept) + + # === Step 3: Create supporting facts under each concept === + for i, text in enumerate(fact_texts): + fact = TextualMemoryItem( + memory=text, + metadata=TreeNodeTextualMemoryMetadata( + memory_type="WorkingMemory", + key=f"Fact {i + 1}", + hierarchy_level="fact", + type="fact", + memory_time="2024-01-01", + status="activated", + visibility="public", + updated_at=now, + embedding=embed_memory_item(text), + confidence=85.0, + tags=["fact"], + ), + ) + graph.add_node(fact.id, fact.memory, fact.metadata.model_dump(exclude_none=True)) + graph.add_edge(concept_items[i % len(concept_items)].id, fact.id, type="PARENT") + + # === Step 4: Retrieve memory using semantic search === + vector = embed_memory_item("How is memory retrieved?") + search_result = graph.search_by_embedding(vector, top_k=2) + for r in search_result: + node = graph.get_node(r["id"]) + print("🔍 Search result:", node["memory"]) + + # === Step 5: Tag-based neighborhood discovery === + neighbors = graph.get_neighbors_by_tag(["concept"], exclude_ids=[], top_k=2) + print("📎 Tag-related nodes:", [neighbor["memory"] for neighbor in neighbors]) + + # === Step 6: Retrieve children (facts) of first concept === + children = graph.get_children_with_embeddings(concept_items[0].id) + print("📍 Children of concept:", [child["memory"] for child in children]) + + # === Step 7: Export a local subgraph and grouped statistics === + subgraph = graph.get_subgraph(topic.id, depth=2) + print("📌 Subgraph node count:", len(subgraph["neighbors"])) + + stats = graph.get_grouped_counts(["memory_type", "status"]) + print("📊 Grouped counts:", stats) + + # === Step 8: Demonstrate updates and cleanup === + graph.update_node( + concept_items[0].id, {"confidence": 99.0, "created_at": "2025-07-24T20:11:56.375687"} + ) + graph.remove_oldest_memory("WorkingMemory", keep_latest=1) + graph.delete_edge(topic.id, concept_items[0].id, type="PARENT") + graph.delete_node(concept_items[1].id) + + # === Step 9: Export and re-import the entire graph structure === + exported = graph.export_graph() + graph.import_graph(exported) + print("📦 Graph exported and re-imported, total nodes:", len(exported["nodes"])) + + # ==================================== + # 🔍 Step 10: extra function + # ==================================== + print(f"\n=== 🔍 Extra Tests for user: {user_name} ===") + + print(" - Memory count:", graph.get_memory_count("LongTermMemory")) + print(" - Node count:", graph.count_nodes("LongTermMemory")) + print(" - All LongTermMemory items:", graph.get_all_memory_items("LongTermMemory")) + + if len(exported["edges"]) > 0: + n1, n2 = exported["edges"][0]["source"], exported["edges"][0]["target"] + print(" - Edge exists?", graph.edge_exists(n1, n2, exported["edges"][0]["type"])) + print(" - Edges for node:", graph.get_edges(n1)) + + filters = [{"field": "memory_type", "op": "=", "value": "LongTermMemory"}] + print(" - Metadata query result:", graph.get_by_metadata(filters)) + print( + " - Optimization candidates:", graph.get_structure_optimization_candidates("LongTermMemory") + ) + try: + graph.drop_database() + except ValueError as e: + print(" - drop_database raised ValueError as expected:", e) + + +def example_complex_shared_db(db_name: str = "shared-traval-group-complex"): + # User 1: Alice explores structured memory for LLMs + run_user_session( + user_name="alice", + db_name=db_name, + topic_text="Alice studies structured memory and long-term memory optimization in LLMs.", + concept_texts=[ + "Short-term memory can be simulated using WorkingMemory blocks.", + "A structured memory graph improves retrieval precision for agents.", + ], + fact_texts=[ + "Embedding search is used to find semantically similar memory items.", + "User memories are stored as node-edge structures that support hierarchical reasoning.", + ], + ) + + # User 2: Bob focuses on GNN-based reasoning + run_user_session( + user_name="bob", + db_name=db_name, + topic_text="Bob investigates how graph neural networks can support knowledge reasoning.", + concept_texts=[ + "GNNs can learn high-order relations among entities.", + "Attention mechanisms in graphs improve inference precision.", + ], + fact_texts=[ + "GAT outperforms GCN in graph classification tasks.", + "Multi-hop reasoning helps answer complex queries.", + ], + ) + + +if __name__ == "__main__": + print("\n=== Example: Single-DB ===") + example_shared_db(db_name="shared_traval_group-new") + + print("\n=== Example: Single-DB-Complex ===") + example_complex_shared_db(db_name="shared-traval-group-complex-new2") diff --git a/examples/basic_modules/parser.py b/examples/basic_modules/parser.py new file mode 100644 index 000000000..c063964b7 --- /dev/null +++ b/examples/basic_modules/parser.py @@ -0,0 +1,15 @@ +from memos.configs.parser import ParserConfigFactory +from memos.parsers.factory import ParserFactory + + +config = ParserConfigFactory.model_validate( + { + "backend": "markitdown", + "config": {}, + } +) +parser = ParserFactory.from_config(config) +file_path = "README.md" +markdown_text = parser.parse(file_path) +print("Markdown text:\n", markdown_text) +print("==" * 20) diff --git a/examples/basic_modules/reranker.py b/examples/basic_modules/reranker.py index e5a869789..47bf1405c 100644 --- a/examples/basic_modules/reranker.py +++ b/examples/basic_modules/reranker.py @@ -50,7 +50,6 @@ def show_ranked(title: str, ranked: list[tuple[TextualMemoryItem, float]], top_n def main(): # ------------------------------- # 1) Build the embedder (real vectors) - # You may need to set valid OPENAI_API_KEY and OPENAI_API_BASE in your environment variables. # ------------------------------- embedder_cfg = EmbedderConfigFactory.model_validate( { @@ -63,22 +62,6 @@ def main(): }, } ) - """ - # ------------------------------- - # Optional: Build the embedder (using local sentence-transformers) - # ------------------------------- - # Use a local model so no API key is required. - embedder_cfg = EmbedderConfigFactory.model_validate( - { - "backend": "sentence_transformer", - "config": { - "model_name_or_path": "nomic-ai/nomic-embed-text-v1.5", - "trust_remote_code": True, - }, - } - ) - """ - embedder = EmbedderFactory.from_config(embedder_cfg) # ------------------------------- diff --git a/examples/basic_modules/tree_textual_memory_reasoner.py b/examples/basic_modules/tree_textual_memory_reasoner.py new file mode 100644 index 000000000..369787458 --- /dev/null +++ b/examples/basic_modules/tree_textual_memory_reasoner.py @@ -0,0 +1,167 @@ +from memos import log +from memos.configs.embedder import EmbedderConfigFactory +from memos.configs.graph_db import GraphDBConfigFactory +from memos.configs.llm import LLMConfigFactory +from memos.embedders.factory import EmbedderFactory +from memos.graph_dbs.factory import GraphStoreFactory +from memos.llms.factory import LLMFactory +from memos.memories.textual.item import TextualMemoryItem, TreeNodeTextualMemoryMetadata +from memos.memories.textual.tree_text_memory.retrieve.reasoner import MemoryReasoner +from memos.memories.textual.tree_text_memory.retrieve.retrieval_mid_structs import ParsedTaskGoal + + +logger = log.get_logger(__name__) + +embedder_config = EmbedderConfigFactory.model_validate( + { + "backend": "ollama", + "config": { + "model_name_or_path": "nomic-embed-text:latest", + }, + } +) +embedder = EmbedderFactory.from_config(embedder_config) + +# Step 1: Load LLM config and instantiate +config = LLMConfigFactory.model_validate( + { + "backend": "ollama", + "config": { + "model_name_or_path": "qwen3:0.6b", + "temperature": 0.7, + "max_tokens": 1024, + }, + } +) +llm = LLMFactory.from_config(config) + +# Step 1: Prepare a mock ParsedTaskGoal +parsed_goal = ParsedTaskGoal( + memories=[ + "Multi-UAV Long-Term Coverage", + "Coverage Metrics", + "Reward Function Design", + "Energy Model", + "CT and FT Definition", + "Reward Components", + "Energy Cost Components", + ], + keys=["UAV", "coverage", "energy", "reward"], + tags=[], + goal_type="explanation", +) + +query = "How can multiple UAVs coordinate to maximize coverage while saving energy?" +query_embedding = embedder.embed([query])[0] + + +# Step 2: Initialize graph store +graph_config = GraphDBConfigFactory( + backend="neo4j", + config={ + "uri": "bolt://localhost:7687", + "user": "neo4j", + "password": "12345678", + "db_name": "user06alice", + "auto_create": True, + }, +) +graph_store = GraphStoreFactory.from_config(graph_config) + +ranked_memories = [ + TextualMemoryItem( + id="a88db9ce-3c77-4e83-8d61-aa9ef95c957e", + memory="Coverage performance is measured using CT (Coverage Time) and FT (Fairness Time) metrics.", + metadata=TreeNodeTextualMemoryMetadata( + user_id=None, + session_id=None, + status="activated", + type="fact", + memory_time="2024-01-01", + source="file", + confidence=91.0, + entities=["CT", "FT"], + tags=["coverage", "fairness", "metrics"], + visibility="public", + updated_at="2025-06-11T11:51:24.438001", + memory_type="LongTermMemory", + key="Coverage Metrics", + value="CT and FT used for long-term area and fairness evaluation", + hierarchy_level="concept", + sources=["paper://multi-uav-coverage/metrics"], + embedding=[0.01] * 768, + ), + ), + TextualMemoryItem( + id="c34f5e6b-2d34-4e6f-8c9b-abcdef123456", + memory="The capital of France is Paris, which is known for the Eiffel Tower.", + metadata=TreeNodeTextualMemoryMetadata( + user_id=None, + session_id=None, + status="activated", + type="fact", + memory_time="2024-01-01", + source="file", + confidence=90.0, + entities=["France", "Paris", "Eiffel Tower"], + tags=["geography", "city", "landmark"], + visibility="public", + updated_at="2025-06-11T11:51:24.438001", + memory_type="LongTermMemory", + key="Geography Fact", + value="Paris is the capital of France", + hierarchy_level="concept", + sources=["wikipedia://paris"], + embedding=[0.03] * 768, + ), + ), + TextualMemoryItem( + id="d56a7b8c-3e45-4f7a-9dab-fedcba654321", + memory="Total energy cost is calculated from both mechanical movement and communication transmission.", + metadata=TreeNodeTextualMemoryMetadata( + user_id=None, + session_id=None, + status="activated", + type="fact", + memory_time="2024-01-01", + source="file", + confidence=89.0, + entities=["movement power", "transmission power"], + tags=["energy", "movement", "transmission"], + visibility="public", + updated_at="2025-06-11T11:51:24.438001", + memory_type="LongTermMemory", + key="Energy Cost Components", + value="Includes movement and communication energy", + hierarchy_level="fact", + sources=["paper://multi-uav-coverage/energy-detail"], + embedding=[0.04] * 768, + ), + ), +] + +# Step 7: Init memory retriever +reasoner = MemoryReasoner(llm=llm) + + +# Step 8: Print retrieved memory items before ranking +print("\n=== Retrieved Memory Items (Before Rerank) ===") +for idx, item in enumerate(ranked_memories): + print(f"[Original #{idx + 1}] ID: {item.id}") + print(f"Memory: {item.memory[:200]}...\n") + +# Step 9: Rerank +reasoned_memories = reasoner.reason( + query=query, + ranked_memories=ranked_memories, + parsed_goal=parsed_goal, +) + +# Step 10: Print ranked reasoned memory items with original positions +print("\n=== Memory Items After Reason (Sorted) ===") +id_to_original_rank = {item.id: i + 1 for i, item in enumerate(ranked_memories)} + +for idx, item in enumerate(reasoned_memories): + original_rank = id_to_original_rank.get(item.id, "-") + print(f"[Reasoned #{idx + 1}] ID: {item.id} (Original #{original_rank})") + print(f"Memory: {item.memory[:200]}...\n") diff --git a/examples/mem_scheduler/api_w_scheduler.py b/examples/mem_scheduler/api_w_scheduler.py index b02161c12..ba3d64bf1 100644 --- a/examples/mem_scheduler/api_w_scheduler.py +++ b/examples/mem_scheduler/api_w_scheduler.py @@ -1,12 +1,3 @@ -""" -# Prerequisites & Configuration -# To run this script, you must have the following services -# running and configured in your .env file (or environment variables): -# 1. Redis (Required for TaskStatusTracker and Scheduler Queue) -# 2. Graph Database (Required for Memory Storage) -# 3. Vector Database (Required if using Neo4j Community or Preference Memory) -""" - import sys from pathlib import Path diff --git a/examples/mem_scheduler/memos_w_scheduler.py b/examples/mem_scheduler/memos_w_scheduler.py index b7250a677..55e966b5a 100644 --- a/examples/mem_scheduler/memos_w_scheduler.py +++ b/examples/mem_scheduler/memos_w_scheduler.py @@ -1,10 +1,3 @@ -# Prerequisites & Configuration -# To run this script, you must have the following services -# running and configured in your .env file (or environment variables): -# 1. Redis (Required for TaskStatusTracker and Scheduler Queue) -# 2. Graph Database (Required for Memory Storage) -# 3. Vector Database (Required if using Neo4j Community or Preference Memory) - import asyncio import json import os diff --git a/examples/mem_scheduler/redis_example.py b/examples/mem_scheduler/redis_example.py index 00989c424..9f7ca4139 100644 --- a/examples/mem_scheduler/redis_example.py +++ b/examples/mem_scheduler/redis_example.py @@ -1,11 +1,3 @@ -# Prerequisites: -# 1. Ensure a Redis server is running locally on the default port (6379). -# You can start it with: `redis-server` -# On macOS with Homebrew: `/opt/homebrew/bin/redis-server` or `brew services start redis` -# On Linux: `sudo service redis-server start` -# 2. If Redis is running on a different host/port, update the configuration or environment variables accordingly. - - import sys import time diff --git a/examples/mem_scheduler/run_async_tasks.py b/examples/mem_scheduler/run_async_tasks.py index fbb5a7bcc..7f544c3da 100644 --- a/examples/mem_scheduler/run_async_tasks.py +++ b/examples/mem_scheduler/run_async_tasks.py @@ -1,12 +1,3 @@ -""" -# Prerequisites & Configuration -# To run this script, you must have the following services -# running and configured in your .env file (or environment variables): -# 1. Redis (Required for TaskStatusTracker and Scheduler Queue) -# 2. Graph Database (Required for Memory Storage) -# 3. Vector Database (Required if using Neo4j Community or Preference Memory) -""" - from pathlib import Path from time import sleep diff --git a/examples/mem_scheduler/show_redis_status.py b/examples/mem_scheduler/show_redis_status.py index 94a915588..d88e86655 100644 --- a/examples/mem_scheduler/show_redis_status.py +++ b/examples/mem_scheduler/show_redis_status.py @@ -1,25 +1,10 @@ -""" -# Prerequisites: -# 1. Ensure a Redis server is running locally on the default port (6379). -# You can start it with: `redis-server` -# On macOS with Homebrew: `/opt/homebrew/bin/redis-server` or `brew services start redis` -# On Linux: `sudo service redis-server start` -# 2. If Redis is running on a different host/port, update the configuration or environment variables accordingly. -""" - import time -from memos.mem_scheduler.task_schedule_modules.orchestrator import SchedulerOrchestrator +from memos.api.routers.server_router import mem_scheduler from memos.mem_scheduler.task_schedule_modules.redis_queue import SchedulerRedisQueue -# Explicitly initialize Redis queue for monitoring -queue = SchedulerRedisQueue( - max_len=None, - consumer_group="scheduler_group", - consumer_name="monitor_consumer", - orchestrator=SchedulerOrchestrator(), -) +queue = mem_scheduler.memos_message_queue.memos_message_queue def fetch_status( diff --git a/src/memos/graph_dbs/neo4j.py b/src/memos/graph_dbs/neo4j.py index 2cf9ae32f..64aedc8f4 100644 --- a/src/memos/graph_dbs/neo4j.py +++ b/src/memos/graph_dbs/neo4j.py @@ -1687,7 +1687,7 @@ def _parse_node(self, node_data: dict[str, Any]) -> dict[str, Any]: node.pop("user_name", None) # serialization - if node.get("sources"): + if node["sources"]: for idx in range(len(node["sources"])): if not ( isinstance(node["sources"][idx], str)