Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions src/memos/mem_reader/multi_modal_struct.py
Original file line number Diff line number Diff line change
Expand Up @@ -646,6 +646,7 @@ def _merge_memories_with_llm(

return None

@timed
def _process_string_fine(
self,
fast_memory_items: list[TextualMemoryItem],
Expand Down Expand Up @@ -883,6 +884,7 @@ def _get_llm_tool_trajectory_response(self, mem_str: str) -> dict:
logger.error(f"[MultiModalFine] Error calling LLM for tool trajectory: {e}")
return []

@timed
def _process_tool_trajectory_fine(
self, fast_memory_items: list[TextualMemoryItem], info: dict[str, Any], **kwargs
) -> list[TextualMemoryItem]:
Expand Down
4 changes: 4 additions & 0 deletions src/memos/mem_reader/read_multi_modal/multi_modal_parser.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@
from memos.log import get_logger
from memos.memories.textual.item import SourceMessage, TextualMemoryItem
from memos.types import MessagesType
from memos.utils import timed

from .assistant_parser import AssistantParser
from .base import BaseMessageParser
Expand Down Expand Up @@ -120,6 +121,7 @@ def _get_parser(self, message: Any) -> BaseMessageParser | None:
logger.warning(f"[MultiModalParser] Could not determine parser for message: {message}")
return None

@timed
def parse(
self,
message: MessagesType,
Expand Down Expand Up @@ -157,6 +159,7 @@ def parse(
logger.error(f"[MultiModalParser] Error parsing message: {e}")
return []

@timed
def parse_batch(
self,
messages: list[MessagesType],
Expand All @@ -182,6 +185,7 @@ def parse_batch(
results.append(items)
return results

@timed
def process_transfer(
self,
source: SourceMessage,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,7 @@
TOOL_GENERATION_PROMPT,
)
from memos.types import MessageList
from memos.utils import timed


load_dotenv()
Expand Down Expand Up @@ -64,6 +65,7 @@ def _generate_content_by_llm(llm: BaseLLM, prompt_template: str, **kwargs) -> An
return {} if "json" in prompt_template.lower() else ""


@timed
def _batch_extract_skills(
task_chunks: dict[str, MessageList],
related_memories_map: dict[str, list[TextualMemoryItem]],
Expand Down Expand Up @@ -97,6 +99,7 @@ def _batch_extract_skills(
return results


@timed
def _batch_generate_skill_details(
raw_skills_data: list[tuple[dict[str, Any], str, MessageList]],
related_skill_memories_map: dict[str, list[TextualMemoryItem]],
Expand Down Expand Up @@ -756,6 +759,7 @@ def _delete_skills(
logger.warning(f"Error deleting local file: {e}")


@timed
def _write_skills_to_file(
skill_memory: dict[str, Any], info: dict[str, Any], skills_dir_config: dict[str, Any]
) -> str:
Expand Down Expand Up @@ -1000,6 +1004,7 @@ def _get_skill_file_storage_location() -> str:
return "LOCAL"


@timed
def process_skill_memory_fine(
fast_memory_items: list[TextualMemoryItem],
info: dict[str, Any],
Expand Down Expand Up @@ -1064,6 +1069,7 @@ def process_skill_memory_fine(
)
related_skill_memories_by_task[task_name] = []

@timed
def _simple_extract():
# simple extract skill memory, only one stage
memories = []
Expand Down Expand Up @@ -1096,6 +1102,7 @@ def _simple_extract():
)
return memories

@timed
def _full_extract():
# full extract skill memory, include two stage
raw_extraction_results = _batch_extract_skills(
Expand Down