From 70373f93e8c01f5ac3671c5b7209d3dd380a0df9 Mon Sep 17 00:00:00 2001 From: "yuan.wang" Date: Thu, 4 Dec 2025 13:23:55 +0800 Subject: [PATCH 01/26] fix playground bug, internet search judge --- src/memos/api/handlers/chat_handler.py | 1 + src/memos/api/product_models.py | 3 + src/memos/memories/textual/tree.py | 67 ++++++------------- .../tree_text_memory/retrieve/searcher.py | 13 +++- .../retrieve/task_goal_parser.py | 4 ++ src/memos/multi_mem_cube/single_cube.py | 2 + 6 files changed, 41 insertions(+), 49 deletions(-) diff --git a/src/memos/api/handlers/chat_handler.py b/src/memos/api/handlers/chat_handler.py index e9bb2e499..3cfa49d3d 100644 --- a/src/memos/api/handlers/chat_handler.py +++ b/src/memos/api/handlers/chat_handler.py @@ -400,6 +400,7 @@ def generate_chat_response() -> Generator[str, None, None]: include_preference=chat_req.include_preference, pref_top_k=chat_req.pref_top_k, filter=chat_req.filter, + playground_search_goal_parser=True, ) search_response = self.search_handler.handle_search_memories(search_req) diff --git a/src/memos/api/product_models.py b/src/memos/api/product_models.py index f949f6cb5..9dfd872b0 100644 --- a/src/memos/api/product_models.py +++ b/src/memos/api/product_models.py @@ -374,6 +374,9 @@ class APISearchRequest(BaseRequest): ), ) + # TODO: tmp field for playground search goal parser, will be removed later + playground_search_goal_parser: bool = Field(False, description="Playground search goal parser") + # ==== Context ==== chat_history: MessageList | None = Field( None, diff --git a/src/memos/memories/textual/tree.py b/src/memos/memories/textual/tree.py index cad850d2d..f64d9fb6e 100644 --- a/src/memos/memories/textual/tree.py +++ b/src/memos/memories/textual/tree.py @@ -132,27 +132,15 @@ def get_current_memory_size(self, user_name: str | None = None) -> dict[str, int def get_searcher( self, manual_close_internet: bool = False, moscube: bool = False, process_llm=None ): - if (self.internet_retriever is not None) and manual_close_internet: - logger.warning( - "Internet retriever is init by config , but this search set manual_close_internet is True and will close it" - ) - searcher = Searcher( - self.dispatcher_llm, - self.graph_store, - self.embedder, - self.reranker, - internet_retriever=None, - process_llm=process_llm, - ) - else: - searcher = Searcher( - self.dispatcher_llm, - self.graph_store, - self.embedder, - self.reranker, - internet_retriever=self.internet_retriever, - process_llm=process_llm, - ) + searcher = Searcher( + self.dispatcher_llm, + self.graph_store, + self.embedder, + self.reranker, + internet_retriever=self.internet_retriever, + manual_close_internet=manual_close_internet, + process_llm=process_llm, + ) return searcher def search( @@ -191,30 +179,17 @@ def search( Returns: list[TextualMemoryItem]: List of matching memories. """ - if (self.internet_retriever is not None) and manual_close_internet: - searcher = Searcher( - self.dispatcher_llm, - self.graph_store, - self.embedder, - self.reranker, - bm25_retriever=self.bm25_retriever, - internet_retriever=None, - search_strategy=self.search_strategy, - manual_close_internet=manual_close_internet, - tokenizer=self.tokenizer, - ) - else: - searcher = Searcher( - self.dispatcher_llm, - self.graph_store, - self.embedder, - self.reranker, - bm25_retriever=self.bm25_retriever, - internet_retriever=self.internet_retriever, - search_strategy=self.search_strategy, - manual_close_internet=manual_close_internet, - tokenizer=self.tokenizer, - ) + searcher = Searcher( + self.dispatcher_llm, + self.graph_store, + self.embedder, + self.reranker, + bm25_retriever=self.bm25_retriever, + internet_retriever=self.internet_retriever, + search_strategy=self.search_strategy, + manual_close_internet=manual_close_internet, + tokenizer=self.tokenizer, + ) return searcher.search( query, top_k, @@ -224,9 +199,9 @@ def search( search_filter, search_priority, user_name=user_name, - plugin=kwargs.get("plugin", False), search_tool_memory=search_tool_memory, tool_mem_top_k=tool_mem_top_k, + **kwargs, ) def get_relevant_subgraph( diff --git a/src/memos/memories/textual/tree_text_memory/retrieve/searcher.py b/src/memos/memories/textual/tree_text_memory/retrieve/searcher.py index 761797c40..b1fb210c6 100644 --- a/src/memos/memories/textual/tree_text_memory/retrieve/searcher.py +++ b/src/memos/memories/textual/tree_text_memory/retrieve/searcher.py @@ -90,6 +90,7 @@ def retrieve( search_filter=search_filter, search_priority=search_priority, user_name=user_name, + **kwargs, ) results = self._retrieve_paths( query, @@ -166,7 +167,7 @@ def search( else: logger.debug(f"[SEARCH] Received info dict: {info}") - if kwargs.get("plugin"): + if kwargs.get("plugin", False): logger.info(f"[SEARCH] Retrieve from plugin: {query}") retrieved_results = self._retrieve_simple( query=query, top_k=top_k, search_filter=search_filter, user_name=user_name @@ -183,6 +184,7 @@ def search( user_name=user_name, search_tool_memory=search_tool_memory, tool_mem_top_k=tool_mem_top_k, + **kwargs, ) full_recall = kwargs.get("full_recall", False) @@ -218,6 +220,7 @@ def _parse_task( search_filter: dict | None = None, search_priority: dict | None = None, user_name: str | None = None, + **kwargs, ): """Parse user query, do embedding search and create context""" context = [] @@ -268,6 +271,7 @@ def _parse_task( conversation=info.get("chat_history", []), mode=mode, use_fast_graph=self.use_fast_graph, + **kwargs, ) query = parsed_goal.rephrased_query or query @@ -351,7 +355,7 @@ def _retrieve_paths( query, parsed_goal, query_embedding, - top_k, + tool_mem_top_k, memory_type, search_filter, search_priority, @@ -516,7 +520,10 @@ def _retrieve_from_internet( user_id: str | None = None, ): """Retrieve and rerank from Internet source""" - if not self.internet_retriever or self.manual_close_internet: + if not self.internet_retriever: + logger.info(f"[PATH-C] '{query}' Skipped (no retriever)") + return [] + if self.manual_close_internet and not parsed_goal.internet_search: logger.info(f"[PATH-C] '{query}' Skipped (no retriever, fast mode)") return [] if memory_type not in ["All"]: diff --git a/src/memos/memories/textual/tree_text_memory/retrieve/task_goal_parser.py b/src/memos/memories/textual/tree_text_memory/retrieve/task_goal_parser.py index b9814f079..f75f8d045 100644 --- a/src/memos/memories/textual/tree_text_memory/retrieve/task_goal_parser.py +++ b/src/memos/memories/textual/tree_text_memory/retrieve/task_goal_parser.py @@ -39,6 +39,10 @@ def parse( - mode == 'fast': use jieba to split words only - mode == 'fine': use LLM to parse structured topic/keys/tags """ + # TODO: tmp mode for playground search goal parser, will be removed later + if kwargs.get("playground_search_goal_parser", False): + mode = "fine" + if mode == "fast": return self._parse_fast(task_description, context=context, **kwargs) elif mode == "fine": diff --git a/src/memos/multi_mem_cube/single_cube.py b/src/memos/multi_mem_cube/single_cube.py index f9e084347..2d381ac3e 100644 --- a/src/memos/multi_mem_cube/single_cube.py +++ b/src/memos/multi_mem_cube/single_cube.py @@ -436,6 +436,8 @@ def _fast_search( plugin=plugin, search_tool_memory=search_req.search_tool_memory, tool_mem_top_k=search_req.tool_mem_top_k, + # TODO: tmp field for playground search goal parser, will be removed later + playground_search_goal_parser=search_req.playground_search_goal_parser, ) formatted_memories = [format_memory_item(data) for data in search_results] From 11cf00aa87d40aa75ecaad3652d5a373a35e6107 Mon Sep 17 00:00:00 2001 From: "yuan.wang" Date: Thu, 4 Dec 2025 16:56:08 +0800 Subject: [PATCH 02/26] fix playground internet bug --- src/memos/api/handlers/chat_handler.py | 109 +++++++++++++++++++------ 1 file changed, 83 insertions(+), 26 deletions(-) diff --git a/src/memos/api/handlers/chat_handler.py b/src/memos/api/handlers/chat_handler.py index 3cfa49d3d..6a65c1429 100644 --- a/src/memos/api/handlers/chat_handler.py +++ b/src/memos/api/handlers/chat_handler.py @@ -388,22 +388,6 @@ def generate_chat_response() -> Generator[str, None, None]: [chat_req.mem_cube_id] if chat_req.mem_cube_id else [chat_req.user_id] ) - search_req = APISearchRequest( - query=chat_req.query, - user_id=chat_req.user_id, - readable_cube_ids=readable_cube_ids, - mode=chat_req.mode, - internet_search=chat_req.internet_search, - top_k=chat_req.top_k, - chat_history=chat_req.history, - session_id=chat_req.session_id, - include_preference=chat_req.include_preference, - pref_top_k=chat_req.pref_top_k, - filter=chat_req.filter, - playground_search_goal_parser=True, - ) - - search_response = self.search_handler.handle_search_memories(search_req) # for playground, add the query to memory without response self._start_add_to_memory( user_id=chat_req.user_id, @@ -414,7 +398,6 @@ def generate_chat_response() -> Generator[str, None, None]: async_mode="sync", ) - yield f"data: {json.dumps({'type': 'status', 'data': '1'})}\n\n" # Use first readable cube ID for scheduler (backward compatibility) scheduler_cube_id = ( readable_cube_ids[0] if readable_cube_ids else chat_req.user_id @@ -425,7 +408,26 @@ def generate_chat_response() -> Generator[str, None, None]: query=chat_req.query, label=QUERY_LABEL, ) - # Extract memories from search results + + # ====== first search without parse goal ====== + search_req = APISearchRequest( + query=chat_req.query, + user_id=chat_req.user_id, + readable_cube_ids=readable_cube_ids, + mode=chat_req.mode, + internet_search=False, + top_k=chat_req.top_k, + chat_history=chat_req.history, + session_id=chat_req.session_id, + include_preference=chat_req.include_preference, + pref_top_k=chat_req.pref_top_k, + filter=chat_req.filter, + ) + search_response = self.search_handler.handle_search_memories(search_req) + + yield f"data: {json.dumps({'type': 'status', 'data': '1'})}\n\n" + + # Extract memories from search results (first search) memories_list = [] if search_response.data and search_response.data.get("text_mem"): text_mem_results = search_response.data["text_mem"] @@ -433,14 +435,13 @@ def generate_chat_response() -> Generator[str, None, None]: memories_list = text_mem_results[0]["memories"] # Filter memories by threshold - filtered_memories = self._filter_memories_by_threshold(memories_list) + first_filtered_memories = self._filter_memories_by_threshold(memories_list) + + # Prepare reference data (first search) + reference = prepare_reference_data(first_filtered_memories) + # get preference string + pref_string = search_response.data.get("pref_string", "") - # Prepare reference data - reference = prepare_reference_data(filtered_memories) - # get internet reference - internet_reference = self._get_internet_reference( - search_response.data.get("text_mem")[0]["memories"] - ) yield f"data: {json.dumps({'type': 'reference', 'data': reference})}\n\n" # Prepare preference markdown string @@ -450,9 +451,52 @@ def generate_chat_response() -> Generator[str, None, None]: pref_md_string = self._build_pref_md_string_for_playground(pref_memories) yield f"data: {json.dumps({'type': 'pref_md_string', 'data': pref_md_string})}\n\n" + # internet status + yield f"data: {json.dumps({'type': 'status', 'data': 'start_internet_search'})}\n\n" + + # ====== second search with parse goal ====== + search_req = APISearchRequest( + query=chat_req.query, + user_id=chat_req.user_id, + readable_cube_ids=readable_cube_ids, + mode=chat_req.mode, + internet_search=chat_req.internet_search, + top_k=chat_req.top_k, + chat_history=chat_req.history, + session_id=chat_req.session_id, + include_preference=False, + filter=chat_req.filter, + playground_search_goal_parser=True, + ) + search_response = self.search_handler.handle_search_memories(search_req) + + # Extract memories from search results (second search) + memories_list = [] + if search_response.data and search_response.data.get("text_mem"): + text_mem_results = search_response.data["text_mem"] + if text_mem_results and text_mem_results[0].get("memories"): + memories_list = text_mem_results[0]["memories"] + + # Filter memories by threshold + second_filtered_memories = self._filter_memories_by_threshold(memories_list) + + # dedup and supplement memories + filtered_memories = self._dedup_and_supplement_memories( + first_filtered_memories, second_filtered_memories + ) + + # Prepare remain reference data (second search) + reference = prepare_reference_data(filtered_memories) + # get internet reference + internet_reference = self._get_internet_reference( + search_response.data.get("text_mem")[0]["memories"] + ) + + yield f"data: {json.dumps({'type': 'reference', 'data': reference})}\n\n" + # Step 2: Build system prompt with memories system_prompt = self._build_enhance_system_prompt( - filtered_memories, search_response.data.get("pref_string", "") + filtered_memories, pref_string ) # Prepare messages @@ -588,6 +632,19 @@ def generate_chat_response() -> Generator[str, None, None]: self.logger.error(f"Failed to start chat stream: {traceback.format_exc()}") raise HTTPException(status_code=500, detail=str(traceback.format_exc())) from err + def _dedup_and_supplement_memories( + self, first_filtered_memories: list, second_filtered_memories: list + ) -> list: + """Remove memory from second_filtered_memories that already exists in first_filtered_memories, return remaining memories""" + # Create a set of IDs from first_filtered_memories for efficient lookup + first_memory_ids = {memory["id"] for memory in first_filtered_memories} + + remaining_memories = [] + for memory in second_filtered_memories: + if memory["id"] not in first_memory_ids: + remaining_memories.append(memory) + return remaining_memories + def _get_internet_reference( self, search_response: list[dict[str, any]] ) -> list[dict[str, any]]: From c861f6160e8eee7eb2e925c6aae6d937d1e9c30b Mon Sep 17 00:00:00 2001 From: "yuan.wang" Date: Thu, 4 Dec 2025 18:48:18 +0800 Subject: [PATCH 03/26] modify delete mem --- src/memos/api/handlers/memory_handler.py | 8 ++------ src/memos/memories/textual/tree.py | 22 ++++++++++++++++++++++ 2 files changed, 24 insertions(+), 6 deletions(-) diff --git a/src/memos/api/handlers/memory_handler.py b/src/memos/api/handlers/memory_handler.py index dc72d0112..a33ee9254 100644 --- a/src/memos/api/handlers/memory_handler.py +++ b/src/memos/api/handlers/memory_handler.py @@ -209,12 +209,8 @@ def handle_delete_memories(delete_mem_req: DeleteMemoryRequest, naive_mem_cube: if naive_mem_cube.pref_mem is not None: naive_mem_cube.pref_mem.delete(delete_mem_req.memory_ids) elif delete_mem_req.file_ids is not None: - # TODO: Implement deletion by file_ids - # Need to find memory_ids associated with file_ids and delete them - logger.warning("Deletion by file_ids not implemented yet") - return DeleteMemoryResponse( - message="Deletion by file_ids not implemented yet", - data={"status": "failure"}, + naive_mem_cube.text_mem.delete_by_filter( + writable_cube_ids=delete_mem_req.writable_cube_ids, file_ids=delete_mem_req.file_ids ) elif delete_mem_req.filter is not None: # TODO: Implement deletion by filter diff --git a/src/memos/memories/textual/tree.py b/src/memos/memories/textual/tree.py index f64d9fb6e..c53c13618 100644 --- a/src/memos/memories/textual/tree.py +++ b/src/memos/memories/textual/tree.py @@ -339,6 +339,28 @@ def delete_all(self) -> None: logger.error(f"An error occurred while deleting all memories: {e}") raise + def delete_by_filter( + self, + writable_cube_ids: list[str], + memory_ids: list[str] | None = None, + file_ids: list[str] | None = None, + filter: dict | None = None, + ) -> int: + """Delete memories by filter. + Returns: + int: Number of nodes deleted. + """ + try: + return self.graph_store.delete_node_by_prams( + writable_cube_ids=writable_cube_ids, + memory_ids=memory_ids, + file_ids=file_ids, + filter=filter, + ) + except Exception as e: + logger.error(f"An error occurred while deleting memories by filter: {e}") + raise + def load(self, dir: str) -> None: try: memory_file = os.path.join(dir, self.config.memory_filename) From e638039fae5189a2db5724ec82cd5a102aca2ab1 Mon Sep 17 00:00:00 2001 From: "yuan.wang" Date: Thu, 4 Dec 2025 18:59:35 +0800 Subject: [PATCH 04/26] modify tool resp bug in multi cube --- src/memos/multi_mem_cube/composite_cube.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/memos/multi_mem_cube/composite_cube.py b/src/memos/multi_mem_cube/composite_cube.py index 6db6ca3d7..2e97e442c 100644 --- a/src/memos/multi_mem_cube/composite_cube.py +++ b/src/memos/multi_mem_cube/composite_cube.py @@ -43,6 +43,7 @@ def search_memories(self, search_req: APISearchRequest) -> dict[str, Any]: "para_mem": [], "pref_mem": [], "pref_note": "", + "tool_mem": [], } for view in self.cube_views: @@ -52,6 +53,7 @@ def search_memories(self, search_req: APISearchRequest) -> dict[str, Any]: merged_results["act_mem"].extend(cube_result.get("act_mem", [])) merged_results["para_mem"].extend(cube_result.get("para_mem", [])) merged_results["pref_mem"].extend(cube_result.get("pref_mem", [])) + merged_results["tool_mem"].extend(cube_result.get("tool_mem", [])) note = cube_result.get("pref_note") if note: From 8765dc4b0a57175e5ebf2b2308e03fabb82f4910 Mon Sep 17 00:00:00 2001 From: "yuan.wang" Date: Fri, 5 Dec 2025 13:05:24 +0800 Subject: [PATCH 05/26] fix bug in playground chat handle and search inter --- src/memos/api/handlers/chat_handler.py | 24 ++++++++++++------- .../tree_text_memory/retrieve/searcher.py | 3 ++- 2 files changed, 17 insertions(+), 10 deletions(-) diff --git a/src/memos/api/handlers/chat_handler.py b/src/memos/api/handlers/chat_handler.py index 9e60c2885..c101eece4 100644 --- a/src/memos/api/handlers/chat_handler.py +++ b/src/memos/api/handlers/chat_handler.py @@ -159,9 +159,11 @@ def handle_chat_complete(self, chat_req: APIChatCompleteRequest) -> dict[str, An # Step 3: Generate complete response from LLM if chat_req.model_name_or_path and chat_req.model_name_or_path not in self.chat_llms: - return { - "message": f"Model {chat_req.model_name_or_path} not suport, choose from {list(self.chat_llms.keys())}" - } + raise HTTPException( + status_code=400, + detail=f"Model {chat_req.model_name_or_path} not suport, choose from {list(self.chat_llms.keys())}", + ) + model = chat_req.model_name_or_path or next(iter(self.chat_llms.keys())) response = self.chat_llms[model].generate(current_messages, model_name_or_path=model) @@ -281,9 +283,11 @@ def generate_chat_response() -> Generator[str, None, None]: chat_req.model_name_or_path and chat_req.model_name_or_path not in self.chat_llms ): - return { - "message": f"Model {chat_req.model_name_or_path} not suport, choose from {list(self.chat_llms.keys())}" - } + raise HTTPException( + status_code=400, + detail=f"Model {chat_req.model_name_or_path} not suport, choose from {list(self.chat_llms.keys())}", + ) + model = chat_req.model_name_or_path or next(iter(self.chat_llms.keys())) response_stream = self.chat_llms[model].generate_stream( current_messages, model_name_or_path=model @@ -517,9 +521,11 @@ def generate_chat_response() -> Generator[str, None, None]: chat_req.model_name_or_path and chat_req.model_name_or_path not in self.chat_llms ): - return { - "message": f"Model {chat_req.model_name_or_path} not suport, choose from {list(self.chat_llms.keys())}" - } + raise HTTPException( + status_code=400, + detail=f"Model {chat_req.model_name_or_path} not suport, choose from {list(self.chat_llms.keys())}", + ) + model = chat_req.model_name_or_path or next(iter(self.chat_llms.keys())) response_stream = self.chat_llms[model].generate_stream( current_messages, model_name_or_path=model diff --git a/src/memos/memories/textual/tree_text_memory/retrieve/searcher.py b/src/memos/memories/textual/tree_text_memory/retrieve/searcher.py index b1fb210c6..3e769e424 100644 --- a/src/memos/memories/textual/tree_text_memory/retrieve/searcher.py +++ b/src/memos/memories/textual/tree_text_memory/retrieve/searcher.py @@ -227,7 +227,8 @@ def _parse_task( query_embedding = None # fine mode will trigger initial embedding search - if mode == "fine_old": + # TODO: tmp "playground_search_goal_parser" for playground search goal parser, will be removed later + if mode == "fine_old" or kwargs.get("playground_search_goal_parser", False): logger.info("[SEARCH] Fine mode: embedding search") query_embedding = self.embedder.embed([query])[0] From 1a335db81e6910c934b857a35be0b92c6021bf6e Mon Sep 17 00:00:00 2001 From: "yuan.wang" Date: Fri, 5 Dec 2025 14:44:10 +0800 Subject: [PATCH 06/26] modify prompt --- src/memos/templates/mos_prompts.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/memos/templates/mos_prompts.py b/src/memos/templates/mos_prompts.py index 357a9f1bd..15f1a44b3 100644 --- a/src/memos/templates/mos_prompts.py +++ b/src/memos/templates/mos_prompts.py @@ -130,6 +130,8 @@ - Intelligently choose which memories (PersonalMemory[P] or OuterMemory[O]) are most relevant to the user's query - Only reference memories that are directly relevant to the user's question - Prioritize the most appropriate memory type based on the context and nature of the query +- Responses must not contain non-existent citations +- Explicit and implicit preferences can be referenced if relevant to the user's question, but must not be cited or source-attributed in responses - **Attribution-first selection:** Distinguish memory from user vs from assistant ** before composing. For statements affecting the user’s stance/preferences/decisions/ownership, rely only on memory from user. Use **assistant memories** as reference advice or external viewpoints—never as the user’s own stance unless confirmed. ### Response Style @@ -137,6 +139,8 @@ - Seamlessly incorporate memory references when appropriate - Ensure the flow of conversation remains smooth despite memory citations - Balance factual accuracy with engaging dialogue +- Avoid meaningless blank lines +- Keep the reply language consistent with the user's query language ## Key Principles - Reference only relevant memories to avoid information overload From 18320ffcbf5c41157ce088547848d4f814023580 Mon Sep 17 00:00:00 2001 From: "yuan.wang" Date: Sat, 6 Dec 2025 15:59:47 +0800 Subject: [PATCH 07/26] fix bug in playground --- src/memos/api/handlers/chat_handler.py | 106 +++++++++++------- src/memos/api/product_models.py | 23 +++- src/memos/memories/textual/tree.py | 3 + .../tree_text_memory/retrieve/searcher.py | 7 +- .../retrieve/task_goal_parser.py | 4 +- .../tree_text_memory/retrieve/utils.py | 2 +- src/memos/multi_mem_cube/single_cube.py | 1 + 7 files changed, 98 insertions(+), 48 deletions(-) diff --git a/src/memos/api/handlers/chat_handler.py b/src/memos/api/handlers/chat_handler.py index c101eece4..a6a386313 100644 --- a/src/memos/api/handlers/chat_handler.py +++ b/src/memos/api/handlers/chat_handler.py @@ -21,7 +21,9 @@ from memos.api.product_models import ( APIADDRequest, APIChatCompleteRequest, + APISearchPlaygroundRequest, APISearchRequest, + ChatPlaygroundRequest, ChatRequest, ) from memos.context.context import ContextThread @@ -91,6 +93,7 @@ def __init__( self.enable_mem_scheduler = ( hasattr(dependencies, "enable_mem_scheduler") and dependencies.enable_mem_scheduler ) + self.dependencies = dependencies def handle_chat_complete(self, chat_req: APIChatCompleteRequest) -> dict[str, Any]: """ @@ -356,7 +359,7 @@ def generate_chat_response() -> Generator[str, None, None]: self.logger.error(f"Failed to start chat stream: {traceback.format_exc()}") raise HTTPException(status_code=500, detail=str(traceback.format_exc())) from err - def handle_chat_stream_playground(self, chat_req: ChatRequest) -> StreamingResponse: + def handle_chat_stream_playground(self, chat_req: ChatPlaygroundRequest) -> StreamingResponse: """ Chat with MemOS via Server-Sent Events (SSE) stream using search/add handlers. @@ -413,8 +416,8 @@ def generate_chat_response() -> Generator[str, None, None]: label=QUERY_TASK_LABEL, ) - # ====== first search without parse goal ====== - search_req = APISearchRequest( + # ====== first search text mem with parse goal ====== + search_req = APISearchPlaygroundRequest( query=chat_req.query, user_id=chat_req.user_id, readable_cube_ids=readable_cube_ids, @@ -426,6 +429,7 @@ def generate_chat_response() -> Generator[str, None, None]: include_preference=chat_req.include_preference, pref_top_k=chat_req.pref_top_k, filter=chat_req.filter, + playground_search_goal_parser=True, ) search_response = self.search_handler.handle_search_memories(search_req) @@ -439,10 +443,10 @@ def generate_chat_response() -> Generator[str, None, None]: memories_list = text_mem_results[0]["memories"] # Filter memories by threshold - first_filtered_memories = self._filter_memories_by_threshold(memories_list) + filtered_memories = self._filter_memories_by_threshold(memories_list) # Prepare reference data (first search) - reference = prepare_reference_data(first_filtered_memories) + reference = prepare_reference_data(filtered_memories) # get preference string pref_string = search_response.data.get("pref_string", "") @@ -455,48 +459,68 @@ def generate_chat_response() -> Generator[str, None, None]: pref_md_string = self._build_pref_md_string_for_playground(pref_memories) yield f"data: {json.dumps({'type': 'pref_md_string', 'data': pref_md_string})}\n\n" - # internet status - yield f"data: {json.dumps({'type': 'status', 'data': 'start_internet_search'})}\n\n" - - # ====== second search with parse goal ====== - search_req = APISearchRequest( - query=chat_req.query, - user_id=chat_req.user_id, - readable_cube_ids=readable_cube_ids, - mode=chat_req.mode, - internet_search=chat_req.internet_search, - top_k=chat_req.top_k, - chat_history=chat_req.history, - session_id=chat_req.session_id, - include_preference=False, - filter=chat_req.filter, - playground_search_goal_parser=True, + # parse goal for internet search + searcher = self.dependencies.searcher + parsed_goal = searcher.task_goal_parser.parse( + task_description=chat_req.query, + context="\n".join( + [memory.get("memory", "") for memory in filtered_memories] + ), + conversation=chat_req.history, + mode="fine", ) - search_response = self.search_handler.handle_search_memories(search_req) - # Extract memories from search results (second search) - memories_list = [] - if search_response.data and search_response.data.get("text_mem"): - text_mem_results = search_response.data["text_mem"] - if text_mem_results and text_mem_results[0].get("memories"): - memories_list = text_mem_results[0]["memories"] + if chat_req.beginner_guide_step == "first": + chat_req.internet_search = False + parsed_goal.internet_search = False + elif chat_req.beginner_guide_step == "second": + chat_req.internet_search = True + parsed_goal.internet_search = True + + if chat_req.internet_search or parsed_goal.internet_search: + # internet status + yield f"data: {json.dumps({'type': 'status', 'data': 'start_internet_search'})}\n\n" + + # ====== internet search with parse goal ====== + search_req = APISearchPlaygroundRequest( + query=chat_req.query + + (f"{parsed_goal.tags}" if parsed_goal.tags else ""), + user_id=chat_req.user_id, + readable_cube_ids=readable_cube_ids, + mode=chat_req.mode, + internet_search=True, + top_k=chat_req.top_k, + chat_history=chat_req.history, + session_id=chat_req.session_id, + include_preference=False, + filter=chat_req.filter, + search_memory_type="OuterMemory", + ) + search_response = self.search_handler.handle_search_memories(search_req) - # Filter memories by threshold - second_filtered_memories = self._filter_memories_by_threshold(memories_list) + # Extract memories from search results (second search) + memories_list = [] + if search_response.data and search_response.data.get("text_mem"): + text_mem_results = search_response.data["text_mem"] + if text_mem_results and text_mem_results[0].get("memories"): + memories_list = text_mem_results[0]["memories"] - # dedup and supplement memories - filtered_memories = self._dedup_and_supplement_memories( - first_filtered_memories, second_filtered_memories - ) + # Filter memories by threshold + second_filtered_memories = self._filter_memories_by_threshold(memories_list) - # Prepare remain reference data (second search) - reference = prepare_reference_data(filtered_memories) - # get internet reference - internet_reference = self._get_internet_reference( - search_response.data.get("text_mem")[0]["memories"] - ) + # dedup and supplement memories + filtered_memories = self._dedup_and_supplement_memories( + filtered_memories, second_filtered_memories + ) - yield f"data: {json.dumps({'type': 'reference', 'data': reference})}\n\n" + # Prepare remain reference data (second search) + reference = prepare_reference_data(filtered_memories) + # get internet reference + internet_reference = self._get_internet_reference( + search_response.data.get("text_mem")[0]["memories"] + ) + + yield f"data: {json.dumps({'type': 'reference', 'data': reference})}\n\n" # Step 2: Build system prompt with memories system_prompt = self._build_enhance_system_prompt( diff --git a/src/memos/api/product_models.py b/src/memos/api/product_models.py index 9dfd872b0..191f9c9a9 100644 --- a/src/memos/api/product_models.py +++ b/src/memos/api/product_models.py @@ -159,6 +159,14 @@ def _convert_deprecated_fields(self): return self +class ChatPlaygroundRequest(ChatRequest): + """Request model for chat operations in playground.""" + + beginner_guide_step: str | None = Field( + None, description="Whether to use beginner guide, option: [first, second]" + ) + + class ChatCompleteRequest(BaseRequest): """Request model for chat operations. will (Deprecated), instead use APIChatCompleteRequest.""" @@ -373,9 +381,11 @@ class APISearchRequest(BaseRequest): "If None, default thresholds will be applied." ), ) - - # TODO: tmp field for playground search goal parser, will be removed later - playground_search_goal_parser: bool = Field(False, description="Playground search goal parser") + # Internal field for search memory type + search_memory_type: str = Field( + "All", + description="Type of memory to search: All, WorkingMemory, LongTermMemory, UserMemory, OuterMemory, ToolSchemaMemory, ToolTrajectoryMemory", + ) # ==== Context ==== chat_history: MessageList | None = Field( @@ -448,6 +458,13 @@ def _convert_deprecated_fields(self) -> "APISearchRequest": return self +class APISearchPlaygroundRequest(APISearchRequest): + """Request model for searching memories in playground.""" + + # TODO: tmp field for playground search goal parser, will be removed later + playground_search_goal_parser: bool = Field(False, description="Playground search goal parser") + + class APIADDRequest(BaseRequest): """Request model for creating memories.""" diff --git a/src/memos/memories/textual/tree.py b/src/memos/memories/textual/tree.py index c53c13618..691257046 100644 --- a/src/memos/memories/textual/tree.py +++ b/src/memos/memories/textual/tree.py @@ -137,9 +137,12 @@ def get_searcher( self.graph_store, self.embedder, self.reranker, + bm25_retriever=self.bm25_retriever, internet_retriever=self.internet_retriever, + search_strategy=self.search_strategy, manual_close_internet=manual_close_internet, process_llm=process_llm, + tokenizer=self.tokenizer, ) return searcher diff --git a/src/memos/memories/textual/tree_text_memory/retrieve/searcher.py b/src/memos/memories/textual/tree_text_memory/retrieve/searcher.py index 3e769e424..4225ed99b 100644 --- a/src/memos/memories/textual/tree_text_memory/retrieve/searcher.py +++ b/src/memos/memories/textual/tree_text_memory/retrieve/searcher.py @@ -275,6 +275,10 @@ def _parse_task( **kwargs, ) + # TODO: tmp field playground_search_goal_parser for playground, will be removed later + if kwargs.get("playground_search_goal_parser", False): + parsed_goal.internet_search = False + query = parsed_goal.rephrased_query or query # if goal has extra memories, embed them too if parsed_goal.memories: @@ -527,7 +531,8 @@ def _retrieve_from_internet( if self.manual_close_internet and not parsed_goal.internet_search: logger.info(f"[PATH-C] '{query}' Skipped (no retriever, fast mode)") return [] - if memory_type not in ["All"]: + if memory_type not in ["All", "OuterMemory"]: + logger.info(f"[PATH-C] '{query}' Skipped (memory_type does not match)") return [] logger.info(f"[PATH-C] '{query}' Retrieving from internet...") items = self.internet_retriever.retrieve_from_internet( diff --git a/src/memos/memories/textual/tree_text_memory/retrieve/task_goal_parser.py b/src/memos/memories/textual/tree_text_memory/retrieve/task_goal_parser.py index f75f8d045..6b96d7e98 100644 --- a/src/memos/memories/textual/tree_text_memory/retrieve/task_goal_parser.py +++ b/src/memos/memories/textual/tree_text_memory/retrieve/task_goal_parser.py @@ -48,7 +48,7 @@ def parse( elif mode == "fine": if not self.llm: raise ValueError("LLM not provided for slow mode.") - return self._parse_fine(task_description, context, conversation) + return self._parse_fine(task_description, context, conversation, **kwargs) else: raise ValueError(f"Unknown mode: {mode}") @@ -81,7 +81,7 @@ def _parse_fast(self, task_description: str, **kwargs) -> ParsedTaskGoal: ) def _parse_fine( - self, query: str, context: str = "", conversation: list[dict] | None = None + self, query: str, context: str = "", conversation: list[dict] | None = None, **kwargs ) -> ParsedTaskGoal: """ Slow mode: LLM structured parse. diff --git a/src/memos/memories/textual/tree_text_memory/retrieve/utils.py b/src/memos/memories/textual/tree_text_memory/retrieve/utils.py index 1b7b28949..55c6243d8 100644 --- a/src/memos/memories/textual/tree_text_memory/retrieve/utils.py +++ b/src/memos/memories/textual/tree_text_memory/retrieve/utils.py @@ -4,7 +4,7 @@ 1. Keys: the high-level keywords directly relevant to the user’s task. 2. Tags: thematic tags to help categorize and retrieve related memories. 3. Goal Type: retrieval | qa | generation -4. Rephrased instruction: Give a rephrased task instruction based on the former conversation to make it less confusing to look alone. If you think the task instruction is easy enough to understand, or there is no former conversation, set "rephrased_instruction" to an empty string. +4. Rephrased instruction: Give a rephrased task instruction based on the former conversation to make it less confusing to look alone. Make full use of information related to the query. If you think the task instruction is easy enough to understand, or there is no former conversation, set "rephrased_instruction" to an empty string. 5. Need for internet search: If the user's task instruction only involves objective facts or can be completed without introducing external knowledge, set "internet_search" to False. Otherwise, set it to True. 6. Memories: Provide 2–5 short semantic expansions or rephrasings of the rephrased/original user task instruction. These are used for improved embedding search coverage. Each should be clear, concise, and meaningful for retrieval. diff --git a/src/memos/multi_mem_cube/single_cube.py b/src/memos/multi_mem_cube/single_cube.py index 88c0f87c7..179071182 100644 --- a/src/memos/multi_mem_cube/single_cube.py +++ b/src/memos/multi_mem_cube/single_cube.py @@ -426,6 +426,7 @@ def _fast_search( top_k=search_req.top_k, mode=SearchMode.FAST, manual_close_internet=not search_req.internet_search, + momory_type=search_req.search_memory_type, search_filter=search_filter, search_priority=search_priority, info={ From 666b8974e72866af8ff686581b64d053cc4e1d34 Mon Sep 17 00:00:00 2001 From: "yuan.wang" Date: Sat, 6 Dec 2025 18:35:21 +0800 Subject: [PATCH 08/26] fix bug playfround --- src/memos/api/handlers/chat_handler.py | 5 +++-- src/memos/api/routers/server_router.py | 3 ++- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/src/memos/api/handlers/chat_handler.py b/src/memos/api/handlers/chat_handler.py index a6a386313..44ecbe531 100644 --- a/src/memos/api/handlers/chat_handler.py +++ b/src/memos/api/handlers/chat_handler.py @@ -595,8 +595,9 @@ def generate_chat_response() -> Generator[str, None, None]: chunk_data = f"data: {json.dumps({'type': 'text', 'data': processed_chunk}, ensure_ascii=False)}\n\n" yield chunk_data - # Yield internet reference after text response - yield f"data: {json.dumps({'type': 'internet_reference', 'data': internet_reference})}\n\n" + if chat_req.internet_search or parsed_goal.internet_search: + # Yield internet reference after text response + yield f"data: {json.dumps({'type': 'internet_reference', 'data': internet_reference})}\n\n" # Calculate timing time_end = time.time() diff --git a/src/memos/api/routers/server_router.py b/src/memos/api/routers/server_router.py index 5b2107b6c..0256f595c 100644 --- a/src/memos/api/routers/server_router.py +++ b/src/memos/api/routers/server_router.py @@ -28,6 +28,7 @@ APIChatCompleteRequest, APIFeedbackRequest, APISearchRequest, + ChatPlaygroundRequest, ChatRequest, DeleteMemoryRequest, DeleteMemoryResponse, @@ -187,7 +188,7 @@ def chat_stream(chat_req: ChatRequest): @router.post("/chat/stream/playground", summary="Chat with MemOS playground") -def chat_stream_playground(chat_req: ChatRequest): +def chat_stream_playground(chat_req: ChatPlaygroundRequest): """ Chat with MemOS for a specific user. Returns SSE stream. From 0d225120182d17fc16a813d707300c52ed4d315c Mon Sep 17 00:00:00 2001 From: "yuan.wang" Date: Sun, 7 Dec 2025 11:32:15 +0800 Subject: [PATCH 09/26] fix bug --- src/memos/multi_mem_cube/single_cube.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/memos/multi_mem_cube/single_cube.py b/src/memos/multi_mem_cube/single_cube.py index d92e0bb79..15bcb99af 100644 --- a/src/memos/multi_mem_cube/single_cube.py +++ b/src/memos/multi_mem_cube/single_cube.py @@ -437,7 +437,7 @@ def _fast_search( search_tool_memory=search_req.search_tool_memory, tool_mem_top_k=search_req.tool_mem_top_k, # TODO: tmp field for playground search goal parser, will be removed later - playground_search_goal_parser=search_req.playground_search_goal_parser, + playground_search_goal_parser=search_req.get("playground_search_goal_parser", None), ) formatted_memories = [format_memory_item(data) for data in search_results] From a9eb1f61b0e1909e870dc485819059432763cfdb Mon Sep 17 00:00:00 2001 From: "yuan.wang" Date: Sun, 7 Dec 2025 11:38:21 +0800 Subject: [PATCH 10/26] fix code --- src/memos/multi_mem_cube/single_cube.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/memos/multi_mem_cube/single_cube.py b/src/memos/multi_mem_cube/single_cube.py index 15bcb99af..5a9a87acb 100644 --- a/src/memos/multi_mem_cube/single_cube.py +++ b/src/memos/multi_mem_cube/single_cube.py @@ -437,7 +437,9 @@ def _fast_search( search_tool_memory=search_req.search_tool_memory, tool_mem_top_k=search_req.tool_mem_top_k, # TODO: tmp field for playground search goal parser, will be removed later - playground_search_goal_parser=search_req.get("playground_search_goal_parser", None), + playground_search_goal_parser=search_req.playground_search_goal_parser + if hasattr(search_req, "playground_search_goal_parser") + else False, ) formatted_memories = [format_memory_item(data) for data in search_results] From 723a14fc32623a6b1350db21fab2ff6a33329943 Mon Sep 17 00:00:00 2001 From: "yuan.wang" Date: Sun, 7 Dec 2025 22:21:26 +0800 Subject: [PATCH 11/26] fix model bug in playground --- src/memos/api/handlers/chat_handler.py | 11 +---------- src/memos/vec_dbs/milvus.py | 6 +++--- 2 files changed, 4 insertions(+), 13 deletions(-) diff --git a/src/memos/api/handlers/chat_handler.py b/src/memos/api/handlers/chat_handler.py index 44ecbe531..06deb8024 100644 --- a/src/memos/api/handlers/chat_handler.py +++ b/src/memos/api/handlers/chat_handler.py @@ -541,16 +541,7 @@ def generate_chat_response() -> Generator[str, None, None]: ) # Step 3: Generate streaming response from LLM - if ( - chat_req.model_name_or_path - and chat_req.model_name_or_path not in self.chat_llms - ): - raise HTTPException( - status_code=400, - detail=f"Model {chat_req.model_name_or_path} not suport, choose from {list(self.chat_llms.keys())}", - ) - - model = chat_req.model_name_or_path or next(iter(self.chat_llms.keys())) + model = next(iter(self.chat_llms.keys())) response_stream = self.chat_llms[model].generate_stream( current_messages, model_name_or_path=model ) diff --git a/src/memos/vec_dbs/milvus.py b/src/memos/vec_dbs/milvus.py index 42aeec29b..ecbca5815 100644 --- a/src/memos/vec_dbs/milvus.py +++ b/src/memos/vec_dbs/milvus.py @@ -588,9 +588,9 @@ def add(self, collection_name: str, data: list[MilvusVecDBItem | dict[str, Any]] # Prepare entity data entity = { - "id": item.id, - "memory": item.memory, - "original_text": item.original_text, + "id": item.id[:65000], + "memory": item.memory[:65000], + "original_text": item.original_text[:65000], "vector": item.vector, "payload": item.payload if item.payload else {}, } From 5ab6e92a79f8dd729c49c8cd0048e37bf0a315bf Mon Sep 17 00:00:00 2001 From: "yuan.wang" Date: Mon, 8 Dec 2025 14:33:40 +0800 Subject: [PATCH 12/26] modify plan b --- src/memos/api/handlers/chat_handler.py | 73 +++++++++++++------------- 1 file changed, 37 insertions(+), 36 deletions(-) diff --git a/src/memos/api/handlers/chat_handler.py b/src/memos/api/handlers/chat_handler.py index 06deb8024..283e95ee7 100644 --- a/src/memos/api/handlers/chat_handler.py +++ b/src/memos/api/handlers/chat_handler.py @@ -429,7 +429,7 @@ def generate_chat_response() -> Generator[str, None, None]: include_preference=chat_req.include_preference, pref_top_k=chat_req.pref_top_k, filter=chat_req.filter, - playground_search_goal_parser=True, + playground_search_goal_parser=False, ) search_response = self.search_handler.handle_search_memories(search_req) @@ -481,46 +481,47 @@ def generate_chat_response() -> Generator[str, None, None]: # internet status yield f"data: {json.dumps({'type': 'status', 'data': 'start_internet_search'})}\n\n" - # ====== internet search with parse goal ====== - search_req = APISearchPlaygroundRequest( - query=chat_req.query - + (f"{parsed_goal.tags}" if parsed_goal.tags else ""), - user_id=chat_req.user_id, - readable_cube_ids=readable_cube_ids, - mode=chat_req.mode, - internet_search=True, - top_k=chat_req.top_k, - chat_history=chat_req.history, - session_id=chat_req.session_id, - include_preference=False, - filter=chat_req.filter, - search_memory_type="OuterMemory", - ) - search_response = self.search_handler.handle_search_memories(search_req) + # ====== internet search with parse goal ====== + search_req = APISearchPlaygroundRequest( + query=parsed_goal.rephrased_query + or chat_req.query + (f"{parsed_goal.tags}" if parsed_goal.tags else ""), + user_id=chat_req.user_id, + readable_cube_ids=readable_cube_ids, + mode=chat_req.mode, + internet_search=chat_req.internet_search, + top_k=chat_req.top_k, + chat_history=chat_req.history, + session_id=chat_req.session_id, + include_preference=False, + filter=chat_req.filter, + search_memory_type="All", + playground_search_goal_parser=False, + ) + search_response = self.search_handler.handle_search_memories(search_req) - # Extract memories from search results (second search) - memories_list = [] - if search_response.data and search_response.data.get("text_mem"): - text_mem_results = search_response.data["text_mem"] - if text_mem_results and text_mem_results[0].get("memories"): - memories_list = text_mem_results[0]["memories"] + # Extract memories from search results (second search) + memories_list = [] + if search_response.data and search_response.data.get("text_mem"): + text_mem_results = search_response.data["text_mem"] + if text_mem_results and text_mem_results[0].get("memories"): + memories_list = text_mem_results[0]["memories"] - # Filter memories by threshold - second_filtered_memories = self._filter_memories_by_threshold(memories_list) + # Filter memories by threshold + second_filtered_memories = self._filter_memories_by_threshold(memories_list) - # dedup and supplement memories - filtered_memories = self._dedup_and_supplement_memories( - filtered_memories, second_filtered_memories - ) + # dedup and supplement memories + filtered_memories = self._dedup_and_supplement_memories( + filtered_memories, second_filtered_memories + ) - # Prepare remain reference data (second search) - reference = prepare_reference_data(filtered_memories) - # get internet reference - internet_reference = self._get_internet_reference( - search_response.data.get("text_mem")[0]["memories"] - ) + # Prepare remain reference data (second search) + reference = prepare_reference_data(filtered_memories) + # get internet reference + internet_reference = self._get_internet_reference( + search_response.data.get("text_mem")[0]["memories"] + ) - yield f"data: {json.dumps({'type': 'reference', 'data': reference})}\n\n" + yield f"data: {json.dumps({'type': 'reference', 'data': reference})}\n\n" # Step 2: Build system prompt with memories system_prompt = self._build_enhance_system_prompt( From 1bb0bcda20efa23275640bacfd415d5bb7082352 Mon Sep 17 00:00:00 2001 From: "yuan.wang" Date: Mon, 8 Dec 2025 20:22:30 +0800 Subject: [PATCH 13/26] llm param modify --- src/memos/configs/llm.py | 2 +- .../mem_reader/read_multi_modal/system_parser.py | 11 +++++++++-- 2 files changed, 10 insertions(+), 3 deletions(-) diff --git a/src/memos/configs/llm.py b/src/memos/configs/llm.py index 70217b896..2c2890eef 100644 --- a/src/memos/configs/llm.py +++ b/src/memos/configs/llm.py @@ -10,7 +10,7 @@ class BaseLLMConfig(BaseConfig): model_name_or_path: str = Field(..., description="Model name or path") temperature: float = Field(default=0.7, description="Temperature for sampling") - max_tokens: int = Field(default=8192, description="Maximum number of tokens to generate") + max_tokens: int = Field(default=4096, description="Maximum number of tokens to generate") top_p: float = Field(default=0.95, description="Top-p sampling parameter") top_k: int = Field(default=50, description="Top-k sampling parameter") remove_think_prefix: bool = Field( diff --git a/src/memos/mem_reader/read_multi_modal/system_parser.py b/src/memos/mem_reader/read_multi_modal/system_parser.py index 3f467d649..2e856365a 100644 --- a/src/memos/mem_reader/read_multi_modal/system_parser.py +++ b/src/memos/mem_reader/read_multi_modal/system_parser.py @@ -1,5 +1,6 @@ """Parser for system messages.""" +import ast import json import re import uuid @@ -137,8 +138,14 @@ def parse_fine( tool_schema = json.loads(content) assert isinstance(tool_schema, list), "Tool schema must be a list[dict]" except json.JSONDecodeError: - logger.warning(f"[SystemParser] Failed to parse tool schema: {content}") - return [] + try: + tool_schema = ast.literal_eval(content) + assert isinstance(tool_schema, list), "Tool schema must be a list[dict]" + except (ValueError, SyntaxError, AssertionError): + logger.warning( + f"[SystemParser] Failed to parse tool schema with both JSON and ast.literal_eval: {content}" + ) + return [] except AssertionError: logger.warning(f"[SystemParser] Tool schema must be a list[dict]: {content}") return [] From f5bc4262db1913617835782e674a0fef02e0198d Mon Sep 17 00:00:00 2001 From: "yuan.wang" Date: Mon, 8 Dec 2025 20:49:05 +0800 Subject: [PATCH 14/26] add logger in playground --- src/memos/api/handlers/chat_handler.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/src/memos/api/handlers/chat_handler.py b/src/memos/api/handlers/chat_handler.py index 283e95ee7..9adbdfbe6 100644 --- a/src/memos/api/handlers/chat_handler.py +++ b/src/memos/api/handlers/chat_handler.py @@ -421,7 +421,7 @@ def generate_chat_response() -> Generator[str, None, None]: query=chat_req.query, user_id=chat_req.user_id, readable_cube_ids=readable_cube_ids, - mode=chat_req.mode, + mode="fast", internet_search=False, top_k=chat_req.top_k, chat_history=chat_req.history, @@ -431,7 +431,10 @@ def generate_chat_response() -> Generator[str, None, None]: filter=chat_req.filter, playground_search_goal_parser=False, ) + start_time = time.time() search_response = self.search_handler.handle_search_memories(search_req) + end_time = time.time() + self.logger.info(f"first search time: {end_time - start_time}") yield f"data: {json.dumps({'type': 'status', 'data': '1'})}\n\n" @@ -497,7 +500,10 @@ def generate_chat_response() -> Generator[str, None, None]: search_memory_type="All", playground_search_goal_parser=False, ) + start_time = time.time() search_response = self.search_handler.handle_search_memories(search_req) + end_time = time.time() + self.logger.info(f"second search time: {end_time - start_time}") # Extract memories from search results (second search) memories_list = [] From a9fa3098b33bb42864c8da13f743787f3cd8216f Mon Sep 17 00:00:00 2001 From: "yuan.wang" Date: Tue, 9 Dec 2025 10:33:45 +0800 Subject: [PATCH 15/26] modify code --- src/memos/configs/llm.py | 2 +- src/memos/memories/textual/tree.py | 11 +++++++++++ src/memos/multi_mem_cube/single_cube.py | 3 +++ 3 files changed, 15 insertions(+), 1 deletion(-) diff --git a/src/memos/configs/llm.py b/src/memos/configs/llm.py index 2c2890eef..70217b896 100644 --- a/src/memos/configs/llm.py +++ b/src/memos/configs/llm.py @@ -10,7 +10,7 @@ class BaseLLMConfig(BaseConfig): model_name_or_path: str = Field(..., description="Model name or path") temperature: float = Field(default=0.7, description="Temperature for sampling") - max_tokens: int = Field(default=4096, description="Maximum number of tokens to generate") + max_tokens: int = Field(default=8192, description="Maximum number of tokens to generate") top_p: float = Field(default=0.95, description="Top-p sampling parameter") top_k: int = Field(default=50, description="Top-k sampling parameter") remove_think_prefix: bool = Field( diff --git a/src/memos/memories/textual/tree.py b/src/memos/memories/textual/tree.py index b4b1c0f23..7f022b439 100644 --- a/src/memos/memories/textual/tree.py +++ b/src/memos/memories/textual/tree.py @@ -343,6 +343,17 @@ def delete_all(self) -> None: logger.error(f"An error occurred while deleting all memories: {e}") raise + def delete_by_filter( + self, + writable_cube_ids: list[str], + file_ids: list[str] | None = None, + filter: dict | None = None, + ) -> None: + """Delete memories by filter.""" + self.graph_store.delete_node_by_prams( + writable_cube_ids=writable_cube_ids, file_ids=file_ids, filter=filter + ) + def load(self, dir: str) -> None: try: memory_file = os.path.join(dir, self.config.memory_filename) diff --git a/src/memos/multi_mem_cube/single_cube.py b/src/memos/multi_mem_cube/single_cube.py index 4ae0c207e..780de8545 100644 --- a/src/memos/multi_mem_cube/single_cube.py +++ b/src/memos/multi_mem_cube/single_cube.py @@ -30,6 +30,7 @@ SearchMode, UserContext, ) +from memos.utils import timed logger = get_logger(__name__) @@ -198,6 +199,7 @@ def _get_search_mode(self, mode: str) -> str: """ return mode + @timed def _search_text( self, search_req: APISearchRequest, @@ -363,6 +365,7 @@ def _fine_search( return formatted_memories + @timed def _search_pref( self, search_req: APISearchRequest, From 4c055d075e71ab2d7bfdceab35cb899b04d463bf Mon Sep 17 00:00:00 2001 From: "yuan.wang" Date: Tue, 9 Dec 2025 11:15:57 +0800 Subject: [PATCH 16/26] fix bug --- src/memos/multi_mem_cube/single_cube.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/memos/multi_mem_cube/single_cube.py b/src/memos/multi_mem_cube/single_cube.py index 780de8545..f0157952b 100644 --- a/src/memos/multi_mem_cube/single_cube.py +++ b/src/memos/multi_mem_cube/single_cube.py @@ -432,7 +432,7 @@ def _fast_search( top_k=search_req.top_k, mode=SearchMode.FAST, manual_close_internet=not search_req.internet_search, - momory_type=search_req.search_memory_type, + memory_type=search_req.search_memory_type, search_filter=search_filter, search_priority=search_priority, info={ From 27b4fc48821fd5e6c1454a3c5368182554f1feb7 Mon Sep 17 00:00:00 2001 From: "yuan.wang" Date: Tue, 9 Dec 2025 11:24:33 +0800 Subject: [PATCH 17/26] modify code --- src/memos/api/handlers/chat_handler.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/memos/api/handlers/chat_handler.py b/src/memos/api/handlers/chat_handler.py index 9adbdfbe6..82771ef73 100644 --- a/src/memos/api/handlers/chat_handler.py +++ b/src/memos/api/handlers/chat_handler.py @@ -490,7 +490,7 @@ def generate_chat_response() -> Generator[str, None, None]: or chat_req.query + (f"{parsed_goal.tags}" if parsed_goal.tags else ""), user_id=chat_req.user_id, readable_cube_ids=readable_cube_ids, - mode=chat_req.mode, + mode="fast", internet_search=chat_req.internet_search, top_k=chat_req.top_k, chat_history=chat_req.history, From cefeefbf75f76e4b114e356f6202e02a31660af4 Mon Sep 17 00:00:00 2001 From: "yuan.wang" Date: Tue, 9 Dec 2025 11:30:45 +0800 Subject: [PATCH 18/26] modify code --- src/memos/api/handlers/chat_handler.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/memos/api/handlers/chat_handler.py b/src/memos/api/handlers/chat_handler.py index 82771ef73..06244796f 100644 --- a/src/memos/api/handlers/chat_handler.py +++ b/src/memos/api/handlers/chat_handler.py @@ -423,12 +423,13 @@ def generate_chat_response() -> Generator[str, None, None]: readable_cube_ids=readable_cube_ids, mode="fast", internet_search=False, - top_k=chat_req.top_k, + top_k=5, chat_history=chat_req.history, session_id=chat_req.session_id, include_preference=chat_req.include_preference, pref_top_k=chat_req.pref_top_k, filter=chat_req.filter, + search_tool_memory=False, playground_search_goal_parser=False, ) start_time = time.time() @@ -498,6 +499,7 @@ def generate_chat_response() -> Generator[str, None, None]: include_preference=False, filter=chat_req.filter, search_memory_type="All", + search_tool_memory=False, playground_search_goal_parser=False, ) start_time = time.time() From 7e05fa7dd7cbc76af45a3d78f203a70dbf5572f1 Mon Sep 17 00:00:00 2001 From: "yuan.wang" Date: Tue, 9 Dec 2025 14:05:53 +0800 Subject: [PATCH 19/26] fix bug --- src/memos/api/handlers/chat_handler.py | 23 +++++++------- .../tree_text_memory/retrieve/searcher.py | 30 +++++++++++++++---- 2 files changed, 36 insertions(+), 17 deletions(-) diff --git a/src/memos/api/handlers/chat_handler.py b/src/memos/api/handlers/chat_handler.py index 06244796f..732197658 100644 --- a/src/memos/api/handlers/chat_handler.py +++ b/src/memos/api/handlers/chat_handler.py @@ -426,7 +426,7 @@ def generate_chat_response() -> Generator[str, None, None]: top_k=5, chat_history=chat_req.history, session_id=chat_req.session_id, - include_preference=chat_req.include_preference, + include_preference=False, pref_top_k=chat_req.pref_top_k, filter=chat_req.filter, search_tool_memory=False, @@ -451,18 +451,9 @@ def generate_chat_response() -> Generator[str, None, None]: # Prepare reference data (first search) reference = prepare_reference_data(filtered_memories) - # get preference string - pref_string = search_response.data.get("pref_string", "") yield f"data: {json.dumps({'type': 'reference', 'data': reference})}\n\n" - # Prepare preference markdown string - if chat_req.include_preference: - pref_list = search_response.data.get("pref_mem") or [] - pref_memories = pref_list[0].get("memories", []) if pref_list else [] - pref_md_string = self._build_pref_md_string_for_playground(pref_memories) - yield f"data: {json.dumps({'type': 'pref_md_string', 'data': pref_md_string})}\n\n" - # parse goal for internet search searcher = self.dependencies.searcher parsed_goal = searcher.task_goal_parser.parse( @@ -496,7 +487,8 @@ def generate_chat_response() -> Generator[str, None, None]: top_k=chat_req.top_k, chat_history=chat_req.history, session_id=chat_req.session_id, - include_preference=False, + include_preference=chat_req.include_preference, + pref_top_k=chat_req.pref_top_k, filter=chat_req.filter, search_memory_type="All", search_tool_memory=False, @@ -524,12 +516,19 @@ def generate_chat_response() -> Generator[str, None, None]: # Prepare remain reference data (second search) reference = prepare_reference_data(filtered_memories) + # get preference string + pref_string = search_response.data.get("pref_string", "") # get internet reference internet_reference = self._get_internet_reference( search_response.data.get("text_mem")[0]["memories"] ) - yield f"data: {json.dumps({'type': 'reference', 'data': reference})}\n\n" + # Prepare preference markdown string + if chat_req.include_preference: + pref_list = search_response.data.get("pref_mem") or [] + pref_memories = pref_list[0].get("memories", []) if pref_list else [] + pref_md_string = self._build_pref_md_string_for_playground(pref_memories) + yield f"data: {json.dumps({'type': 'pref_md_string', 'data': pref_md_string})}\n\n" # Step 2: Build system prompt with memories system_prompt = self._build_enhance_system_prompt( diff --git a/src/memos/memories/textual/tree_text_memory/retrieve/searcher.py b/src/memos/memories/textual/tree_text_memory/retrieve/searcher.py index 4225ed99b..fa91bd4f8 100644 --- a/src/memos/memories/textual/tree_text_memory/retrieve/searcher.py +++ b/src/memos/memories/textual/tree_text_memory/retrieve/searcher.py @@ -701,15 +701,35 @@ def _sort_and_trim( """Sort results by score and trim to top_k""" final_items = [] if search_tool_memory: - tool_results = [ + tool_schema_results = [ (item, score) for item, score in results - if item.metadata.memory_type in ["ToolSchemaMemory", "ToolTrajectoryMemory"] + if item.metadata.memory_type == "ToolSchemaMemory" ] - sorted_tool_results = sorted(tool_results, key=lambda pair: pair[1], reverse=True)[ - :tool_mem_top_k + sorted_tool_schema_results = sorted( + tool_schema_results, key=lambda pair: pair[1], reverse=True + )[:tool_mem_top_k] + for item, score in sorted_tool_schema_results: + if plugin and round(score, 2) == 0.00: + continue + meta_data = item.metadata.model_dump() + meta_data["relativity"] = score + final_items.append( + TextualMemoryItem( + id=item.id, + memory=item.memory, + metadata=SearchedTreeNodeTextualMemoryMetadata(**meta_data), + ) + ) + tool_trajectory_results = [ + (item, score) + for item, score in results + if item.metadata.memory_type == "ToolTrajectoryMemory" ] - for item, score in sorted_tool_results: + sorted_tool_trajectory_results = sorted( + tool_trajectory_results, key=lambda pair: pair[1], reverse=True + )[:tool_mem_top_k] + for item, score in sorted_tool_trajectory_results: if plugin and round(score, 2) == 0.00: continue meta_data = item.metadata.model_dump() From 05da172b03a886020613c998eb64384bbe8052cb Mon Sep 17 00:00:00 2001 From: "yuan.wang" Date: Tue, 9 Dec 2025 14:58:49 +0800 Subject: [PATCH 20/26] fix search bug in plarground --- src/memos/api/handlers/chat_handler.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/memos/api/handlers/chat_handler.py b/src/memos/api/handlers/chat_handler.py index 732197658..ed6ab04b4 100644 --- a/src/memos/api/handlers/chat_handler.py +++ b/src/memos/api/handlers/chat_handler.py @@ -483,7 +483,7 @@ def generate_chat_response() -> Generator[str, None, None]: user_id=chat_req.user_id, readable_cube_ids=readable_cube_ids, mode="fast", - internet_search=chat_req.internet_search, + internet_search=chat_req.internet_search or parsed_goal.internet_search, top_k=chat_req.top_k, chat_history=chat_req.history, session_id=chat_req.session_id, From e410ec2579d1eb9b462109424f0e703fb29228fb Mon Sep 17 00:00:00 2001 From: "yuan.wang" Date: Tue, 9 Dec 2025 15:04:57 +0800 Subject: [PATCH 21/26] fixx bug --- src/memos/api/handlers/chat_handler.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/memos/api/handlers/chat_handler.py b/src/memos/api/handlers/chat_handler.py index ed6ab04b4..242547c61 100644 --- a/src/memos/api/handlers/chat_handler.py +++ b/src/memos/api/handlers/chat_handler.py @@ -476,7 +476,7 @@ def generate_chat_response() -> Generator[str, None, None]: # internet status yield f"data: {json.dumps({'type': 'status', 'data': 'start_internet_search'})}\n\n" - # ====== internet search with parse goal ====== + # ====== second deep search ====== search_req = APISearchPlaygroundRequest( query=parsed_goal.rephrased_query or chat_req.query + (f"{parsed_goal.tags}" if parsed_goal.tags else ""), From 0324588e3bd34339257000eaa0626577e3ceaac9 Mon Sep 17 00:00:00 2001 From: "yuan.wang" Date: Tue, 9 Dec 2025 15:18:32 +0800 Subject: [PATCH 22/26] move schadualr to back --- src/memos/api/handlers/chat_handler.py | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/src/memos/api/handlers/chat_handler.py b/src/memos/api/handlers/chat_handler.py index 242547c61..7647bb39f 100644 --- a/src/memos/api/handlers/chat_handler.py +++ b/src/memos/api/handlers/chat_handler.py @@ -405,17 +405,6 @@ def generate_chat_response() -> Generator[str, None, None]: async_mode="sync", ) - # Use first readable cube ID for scheduler (backward compatibility) - scheduler_cube_id = ( - readable_cube_ids[0] if readable_cube_ids else chat_req.user_id - ) - self._send_message_to_scheduler( - user_id=chat_req.user_id, - mem_cube_id=scheduler_cube_id, - query=chat_req.query, - label=QUERY_TASK_LABEL, - ) - # ====== first search text mem with parse goal ====== search_req = APISearchPlaygroundRequest( query=chat_req.query, @@ -454,6 +443,17 @@ def generate_chat_response() -> Generator[str, None, None]: yield f"data: {json.dumps({'type': 'reference', 'data': reference})}\n\n" + # Use first readable cube ID for scheduler (backward compatibility) + scheduler_cube_id = ( + readable_cube_ids[0] if readable_cube_ids else chat_req.user_id + ) + self._send_message_to_scheduler( + user_id=chat_req.user_id, + mem_cube_id=scheduler_cube_id, + query=chat_req.query, + label=QUERY_TASK_LABEL, + ) + # parse goal for internet search searcher = self.dependencies.searcher parsed_goal = searcher.task_goal_parser.parse( From 40849546d0bdf15cc53dc43646a69af571f48b97 Mon Sep 17 00:00:00 2001 From: "yuan.wang" Date: Tue, 9 Dec 2025 16:34:32 +0800 Subject: [PATCH 23/26] modify pref location --- src/memos/api/handlers/chat_handler.py | 21 +++++++++++---------- 1 file changed, 11 insertions(+), 10 deletions(-) diff --git a/src/memos/api/handlers/chat_handler.py b/src/memos/api/handlers/chat_handler.py index 7647bb39f..85a92c68c 100644 --- a/src/memos/api/handlers/chat_handler.py +++ b/src/memos/api/handlers/chat_handler.py @@ -415,7 +415,7 @@ def generate_chat_response() -> Generator[str, None, None]: top_k=5, chat_history=chat_req.history, session_id=chat_req.session_id, - include_preference=False, + include_preference=True, pref_top_k=chat_req.pref_top_k, filter=chat_req.filter, search_tool_memory=False, @@ -440,9 +440,18 @@ def generate_chat_response() -> Generator[str, None, None]: # Prepare reference data (first search) reference = prepare_reference_data(filtered_memories) + # get preference string + pref_string = search_response.data.get("pref_string", "") yield f"data: {json.dumps({'type': 'reference', 'data': reference})}\n\n" + # Prepare preference markdown string + if chat_req.include_preference: + pref_list = search_response.data.get("pref_mem") or [] + pref_memories = pref_list[0].get("memories", []) if pref_list else [] + pref_md_string = self._build_pref_md_string_for_playground(pref_memories) + yield f"data: {json.dumps({'type': 'pref_md_string', 'data': pref_md_string})}\n\n" + # Use first readable cube ID for scheduler (backward compatibility) scheduler_cube_id = ( readable_cube_ids[0] if readable_cube_ids else chat_req.user_id @@ -487,7 +496,7 @@ def generate_chat_response() -> Generator[str, None, None]: top_k=chat_req.top_k, chat_history=chat_req.history, session_id=chat_req.session_id, - include_preference=chat_req.include_preference, + include_preference=False, pref_top_k=chat_req.pref_top_k, filter=chat_req.filter, search_memory_type="All", @@ -516,19 +525,11 @@ def generate_chat_response() -> Generator[str, None, None]: # Prepare remain reference data (second search) reference = prepare_reference_data(filtered_memories) - # get preference string - pref_string = search_response.data.get("pref_string", "") # get internet reference internet_reference = self._get_internet_reference( search_response.data.get("text_mem")[0]["memories"] ) yield f"data: {json.dumps({'type': 'reference', 'data': reference})}\n\n" - # Prepare preference markdown string - if chat_req.include_preference: - pref_list = search_response.data.get("pref_mem") or [] - pref_memories = pref_list[0].get("memories", []) if pref_list else [] - pref_md_string = self._build_pref_md_string_for_playground(pref_memories) - yield f"data: {json.dumps({'type': 'pref_md_string', 'data': pref_md_string})}\n\n" # Step 2: Build system prompt with memories system_prompt = self._build_enhance_system_prompt( From 8b547b88c26c963ca1036eb20338cbe57b868e33 Mon Sep 17 00:00:00 2001 From: "yuan.wang" Date: Tue, 9 Dec 2025 19:48:58 +0800 Subject: [PATCH 24/26] modify fast net search --- .../tree_text_memory/retrieve/bochasearch.py | 108 ++++++++++++------ .../tree_text_memory/retrieve/searcher.py | 2 +- .../tree_text_memory/retrieve/xinyusearch.py | 83 +++++++++++--- 3 files changed, 139 insertions(+), 54 deletions(-) diff --git a/src/memos/memories/textual/tree_text_memory/retrieve/bochasearch.py b/src/memos/memories/textual/tree_text_memory/retrieve/bochasearch.py index 042ed837e..133a85631 100644 --- a/src/memos/memories/textual/tree_text_memory/retrieve/bochasearch.py +++ b/src/memos/memories/textual/tree_text_memory/retrieve/bochasearch.py @@ -12,7 +12,11 @@ from memos.embedders.factory import OllamaEmbedder from memos.log import get_logger from memos.mem_reader.base import BaseMemReader -from memos.memories.textual.item import SourceMessage, TextualMemoryItem +from memos.memories.textual.item import ( + SearchedTreeNodeTextualMemoryMetadata, + SourceMessage, + TextualMemoryItem, +) logger = get_logger(__name__) @@ -138,7 +142,7 @@ def __init__( self.reader = reader def retrieve_from_internet( - self, query: str, top_k: int = 10, parsed_goal=None, info=None + self, query: str, top_k: int = 10, parsed_goal=None, info=None, mode="fast" ) -> list[TextualMemoryItem]: """ Default internet retrieval (Web Search). @@ -155,24 +159,24 @@ def retrieve_from_internet( """ search_results = self.bocha_api.search_ai(query) # ✅ default to # web-search - return self._convert_to_mem_items(search_results, query, parsed_goal, info) + return self._convert_to_mem_items(search_results, query, parsed_goal, info, mode=mode) def retrieve_from_web( - self, query: str, top_k: int = 10, parsed_goal=None, info=None + self, query: str, top_k: int = 10, parsed_goal=None, info=None, mode="fast" ) -> list[TextualMemoryItem]: """Explicitly retrieve using Bocha Web Search.""" search_results = self.bocha_api.search_web(query) - return self._convert_to_mem_items(search_results, query, parsed_goal, info) + return self._convert_to_mem_items(search_results, query, parsed_goal, info, mode=mode) def retrieve_from_ai( - self, query: str, top_k: int = 10, parsed_goal=None, info=None + self, query: str, top_k: int = 10, parsed_goal=None, info=None, mode="fast" ) -> list[TextualMemoryItem]: """Explicitly retrieve using Bocha AI Search.""" search_results = self.bocha_api.search_ai(query) - return self._convert_to_mem_items(search_results, query, parsed_goal, info) + return self._convert_to_mem_items(search_results, query, parsed_goal, info, mode=mode) def _convert_to_mem_items( - self, search_results: list[dict], query: str, parsed_goal=None, info=None + self, search_results: list[dict], query: str, parsed_goal=None, info=None, mode="fast" ): """Convert API search results into TextualMemoryItem objects.""" memory_items = [] @@ -181,7 +185,7 @@ def _convert_to_mem_items( with ContextThreadPoolExecutor(max_workers=8) as executor: futures = [ - executor.submit(self._process_result, r, query, parsed_goal, info) + executor.submit(self._process_result, r, query, parsed_goal, info, mode=mode) for r in search_results ] for future in as_completed(futures): @@ -195,7 +199,7 @@ def _convert_to_mem_items( return list(unique_memory_items.values()) def _process_result( - self, result: dict, query: str, parsed_goal: str, info: dict[str, Any] + self, result: dict, query: str, parsed_goal: str, info: dict[str, Any], mode="fast" ) -> list[TextualMemoryItem]: """Process one Bocha search result into TextualMemoryItem.""" title = result.get("name", "") @@ -216,27 +220,63 @@ def _process_result( else: publish_time = datetime.now().strftime("%Y-%m-%d") - # Use reader to split and process the content into chunks - read_items = self.reader.get_memory([content], type="doc", info=info) - - memory_items = [] - for read_item_i in read_items[0]: - read_item_i.memory = ( - f"[Outer internet view] Title: {title}\nNewsTime:" - f" {publish_time}\nSummary:" - f" {summary}\n" - f"Content: {read_item_i.memory}" - ) - read_item_i.metadata.source = "web" - read_item_i.metadata.memory_type = "OuterMemory" - read_item_i.metadata.sources = [SourceMessage(type="web", url=url)] if url else [] - read_item_i.metadata.visibility = "public" - read_item_i.metadata.internet_info = { - "title": title, - "url": url, - "site_name": site_name, - "site_icon": site_icon, - "summary": summary, - } - memory_items.append(read_item_i) - return memory_items + if mode == "fast": + info_ = info.copy() + user_id = info_.pop("user_id", "") + session_id = info_.pop("session_id", "") + return [ + TextualMemoryItem( + memory=( + f"[Outer internet view] Title: {title}\nNewsTime:" + f" {publish_time}\nSummary:" + f" {summary}\n" + ), + metadata=SearchedTreeNodeTextualMemoryMetadata( + user_id=user_id, + session_id=session_id, + memory_type="OuterMemory", + status="activated", + type="fact", + source="web", + sources=[SourceMessage(type="web", url=url)] if url else [], + visibility="public", + info=info_, + background="", + confidence=0.99, + usage=[], + embedding=self.embedder.embed([content])[0], + internet_info={ + "title": title, + "url": url, + "site_name": site_name, + "site_icon": site_icon, + "summary": summary, + }, + ), + ) + ] + else: + # Use reader to split and process the content into chunks + read_items = self.reader.get_memory([content], type="doc", info=info) + + memory_items = [] + for read_item_i in read_items[0]: + read_item_i.memory = ( + f"[Outer internet view] Title: {title}\nNewsTime:" + f" {publish_time}\nSummary:" + f" {summary}\n" + f"Content: {read_item_i.memory}" + ) + read_item_i.metadata.source = "web" + read_item_i.metadata.memory_type = "OuterMemory" + read_item_i.metadata.sources = [SourceMessage(type="web", url=url)] if url else [] + read_item_i.metadata.visibility = "public" + read_item_i.metadata.internet_info = { + "title": title, + "url": url, + "site_name": site_name, + "site_icon": site_icon, + "summary": summary, + } + memory_items.append(read_item_i) + return memory_items diff --git a/src/memos/memories/textual/tree_text_memory/retrieve/searcher.py b/src/memos/memories/textual/tree_text_memory/retrieve/searcher.py index fa91bd4f8..eae96ccac 100644 --- a/src/memos/memories/textual/tree_text_memory/retrieve/searcher.py +++ b/src/memos/memories/textual/tree_text_memory/retrieve/searcher.py @@ -536,7 +536,7 @@ def _retrieve_from_internet( return [] logger.info(f"[PATH-C] '{query}' Retrieving from internet...") items = self.internet_retriever.retrieve_from_internet( - query=query, top_k=top_k, parsed_goal=parsed_goal, info=info + query=query, top_k=top_k, parsed_goal=parsed_goal, info=info, mode=mode ) logger.info(f"[PATH-C] '{query}' Retrieved from internet {len(items)} items: {items}") return self.reranker.rerank( diff --git a/src/memos/memories/textual/tree_text_memory/retrieve/xinyusearch.py b/src/memos/memories/textual/tree_text_memory/retrieve/xinyusearch.py index e5acd00f5..ab12a0647 100644 --- a/src/memos/memories/textual/tree_text_memory/retrieve/xinyusearch.py +++ b/src/memos/memories/textual/tree_text_memory/retrieve/xinyusearch.py @@ -12,7 +12,11 @@ from memos.embedders.factory import OllamaEmbedder from memos.log import get_logger from memos.mem_reader.base import BaseMemReader -from memos.memories.textual.item import SourceMessage, TextualMemoryItem +from memos.memories.textual.item import ( + SearchedTreeNodeTextualMemoryMetadata, + SourceMessage, + TextualMemoryItem, +) logger = get_logger(__name__) @@ -132,7 +136,7 @@ def __init__( self.reader = reader def retrieve_from_internet( - self, query: str, top_k: int = 10, parsed_goal=None, info=None + self, query: str, top_k: int = 10, parsed_goal=None, info=None, mode="fast" ) -> list[TextualMemoryItem]: """ Retrieve information from Xinyu search and convert to TextualMemoryItem format @@ -153,7 +157,7 @@ def retrieve_from_internet( with ContextThreadPoolExecutor(max_workers=8) as executor: futures = [ - executor.submit(self._process_result, result, query, parsed_goal, info) + executor.submit(self._process_result, result, query, parsed_goal, info, mode=mode) for result in search_results ] for future in as_completed(futures): @@ -303,7 +307,7 @@ def _extract_tags(self, title: str, content: str, summary: str, parsed_goal=None return list(set(tags))[:15] # Limit to 15 tags def _process_result( - self, result: dict, query: str, parsed_goal: str, info: None + self, result: dict, query: str, parsed_goal: str, info: None, mode="fast" ) -> list[TextualMemoryItem]: if not info: info = {"user_id": "", "session_id": ""} @@ -323,18 +327,59 @@ def _process_result( else: publish_time = datetime.now().strftime("%Y-%m-%d") - read_items = self.reader.get_memory([content], type="doc", info=info) - - memory_items = [] - for read_item_i in read_items[0]: - read_item_i.memory = ( - f"Title: {title}\nNewsTime: {publish_time}\nSummary: {summary}\n" - f"Content: {read_item_i.memory}" - ) - read_item_i.metadata.source = "web" - read_item_i.metadata.memory_type = "OuterMemory" - read_item_i.metadata.sources = [SourceMessage(type="web", url=url)] if url else [] - read_item_i.metadata.visibility = "public" - - memory_items.append(read_item_i) - return memory_items + if mode == "fast": + info_ = info.copy() + user_id = info_.pop("user_id", "") + session_id = info_.pop("session_id", "") + return [ + TextualMemoryItem( + memory=( + f"[Outer internet view] Title: {title}\nNewsTime:" + f" {publish_time}\nSummary:" + f" {summary}\n" + ), + metadata=SearchedTreeNodeTextualMemoryMetadata( + user_id=user_id, + session_id=session_id, + memory_type="OuterMemory", + status="activated", + type="fact", + source="web", + sources=[SourceMessage(type="web", url=url)] if url else [], + visibility="public", + info=info_, + background="", + confidence=0.99, + usage=[], + embedding=self.embedder.embed([content])[0], + internet_info={ + "title": title, + "url": url, + "summary": summary, + "content": content, + }, + ), + ) + ] + else: + read_items = self.reader.get_memory([content], type="doc", info=info) + + memory_items = [] + for read_item_i in read_items[0]: + read_item_i.memory = ( + f"Title: {title}\nNewsTime: {publish_time}\nSummary: {summary}\n" + f"Content: {read_item_i.memory}" + ) + read_item_i.metadata.source = "web" + read_item_i.metadata.memory_type = "OuterMemory" + read_item_i.metadata.sources = [SourceMessage(type="web", url=url)] if url else [] + read_item_i.metadata.visibility = "public" + read_item_i.metadata.internet_info = { + "title": title, + "url": url, + "summary": summary, + "content": content, + } + + memory_items.append(read_item_i) + return memory_items From 4543332ee62de000b53d233db19a0fb51c252c47 Mon Sep 17 00:00:00 2001 From: "yuan.wang" Date: Wed, 10 Dec 2025 16:29:15 +0800 Subject: [PATCH 25/26] add tags and new package --- docker/requirements.txt | 2 + poetry.lock | 24 +++++++++-- pyproject.toml | 2 + .../tree_text_memory/retrieve/bochasearch.py | 41 +++++++++++++++++++ .../tree_text_memory/retrieve/xinyusearch.py | 1 + 5 files changed, 66 insertions(+), 4 deletions(-) diff --git a/docker/requirements.txt b/docker/requirements.txt index d3268edae..f522dd3b6 100644 --- a/docker/requirements.txt +++ b/docker/requirements.txt @@ -160,3 +160,5 @@ xlrd==2.0.2 xlsxwriter==3.2.5 prometheus-client==0.23.1 pymilvus==2.5.12 +nltk==3.9.1 +rake-nltk==1.0.6 diff --git a/poetry.lock b/poetry.lock index bdb962f86..dc061b2f5 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1,4 +1,4 @@ -# This file is automatically @generated by Poetry 2.1.4 and should not be changed by hand. +# This file is automatically @generated by Poetry 2.1.3 and should not be changed by hand. [[package]] name = "absl-py" @@ -2469,7 +2469,7 @@ version = "3.9.1" description = "Natural Language Toolkit" optional = false python-versions = ">=3.8" -groups = ["eval"] +groups = ["main", "eval"] files = [ {file = "nltk-3.9.1-py3-none-any.whl", hash = "sha256:4fa26829c5b00715afe3061398a8989dc643b92ce7dd93fb4585a70930d168a1"}, {file = "nltk-3.9.1.tar.gz", hash = "sha256:87d127bd3de4bd89a4f81265e5fa59cb1b199b27440175370f7417d2bc7ae868"}, @@ -4031,6 +4031,22 @@ urllib3 = ">=1.26.14,<3" fastembed = ["fastembed (>=0.7,<0.8)"] fastembed-gpu = ["fastembed-gpu (>=0.7,<0.8)"] +[[package]] +name = "rake-nltk" +version = "1.0.6" +description = "RAKE short for Rapid Automatic Keyword Extraction algorithm, is a domain independent keyword extraction algorithm which tries to determine key phrases in a body of text by analyzing the frequency of word appearance and its co-occurance with other words in the text." +optional = true +python-versions = ">=3.6,<4.0" +groups = ["main"] +markers = "extra == \"all\"" +files = [ + {file = "rake-nltk-1.0.6.tar.gz", hash = "sha256:7813d680b2ce77b51cdac1757f801a87ff47682c9dbd2982aea3b66730346122"}, + {file = "rake_nltk-1.0.6-py3-none-any.whl", hash = "sha256:1c1ffdb64cae8cb99d169d53a5ffa4635f1c4abd3a02c6e22d5d083136bdc5c1"}, +] + +[package.dependencies] +nltk = ">=3.6.2,<4.0.0" + [[package]] name = "rank-bm25" version = "0.2.2" @@ -6216,7 +6232,7 @@ cffi = {version = ">=1.11", markers = "platform_python_implementation == \"PyPy\ cffi = ["cffi (>=1.11)"] [extras] -all = ["cachetools", "chonkie", "datasketch", "jieba", "langchain-text-splitters", "markitdown", "neo4j", "pika", "pymilvus", "pymysql", "qdrant-client", "rank-bm25", "redis", "schedule", "sentence-transformers", "torch", "volcengine-python-sdk"] +all = ["cachetools", "chonkie", "datasketch", "jieba", "langchain-text-splitters", "markitdown", "neo4j", "nltk", "pika", "pymilvus", "pymysql", "qdrant-client", "rake-nltk", "rank-bm25", "redis", "schedule", "sentence-transformers", "torch", "volcengine-python-sdk"] mem-reader = ["chonkie", "langchain-text-splitters", "markitdown"] mem-scheduler = ["pika", "redis"] mem-user = ["pymysql"] @@ -6226,4 +6242,4 @@ tree-mem = ["neo4j", "schedule"] [metadata] lock-version = "2.1" python-versions = ">=3.10,<4.0" -content-hash = "04c7b73bd8063f6c8ea8ed6a60b23d59a06de50b8607aff06581cc0e40192e38" +content-hash = "dab8e54c6f4c51597adbd0fa34be7a8adb3b3a9c733508f3cc2b93c0ed434ec1" diff --git a/pyproject.toml b/pyproject.toml index 74dfefc09..7358bdcbd 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -121,6 +121,8 @@ all = [ "sentence-transformers (>=4.1.0,<5.0.0)", "qdrant-client (>=1.14.2,<2.0.0)", "volcengine-python-sdk (>=4.0.4,<5.0.0)", + "nltk (>=3.9.1,<4.0.0)", + "rake-nltk (>=1.0.6,<1.1.0)", # Uncategorized dependencies ] diff --git a/src/memos/memories/textual/tree_text_memory/retrieve/bochasearch.py b/src/memos/memories/textual/tree_text_memory/retrieve/bochasearch.py index 133a85631..a4aeca498 100644 --- a/src/memos/memories/textual/tree_text_memory/retrieve/bochasearch.py +++ b/src/memos/memories/textual/tree_text_memory/retrieve/bochasearch.py @@ -9,9 +9,11 @@ import requests from memos.context.context import ContextThreadPoolExecutor +from memos.dependency import require_python_package from memos.embedders.factory import OllamaEmbedder from memos.log import get_logger from memos.mem_reader.base import BaseMemReader +from memos.mem_reader.read_multi_modal import detect_lang from memos.memories.textual.item import ( SearchedTreeNodeTextualMemoryMetadata, SourceMessage, @@ -121,6 +123,21 @@ def _post(self, url: str, body: dict) -> list[dict]: class BochaAISearchRetriever: """BochaAI retriever that converts search results into TextualMemoryItem objects""" + @require_python_package( + import_name="rake_nltk", + install_command="pip install rake_nltk", + install_link="https://pypi.org/project/rake-nltk/", + ) + @require_python_package( + import_name="nltk", + install_command="pip install nltk", + install_link="https://www.nltk.org/install.html", + ) + @require_python_package( + import_name="jieba", + install_command="pip install jieba", + install_link="https://github.com/fxsjy/jieba", + ) def __init__( self, access_key: str, @@ -137,9 +154,25 @@ def __init__( reader: MemReader instance for processing internet content max_results: Maximum number of search results to retrieve """ + import nltk + + try: + nltk.download("averaged_perceptron_tagger_eng") + except Exception as err: + raise Exception("Failed to download nltk averaged_perceptron_tagger_eng") from err + try: + nltk.download("stopwords") + except Exception as err: + raise Exception("Failed to download nltk stopwords") from err + + from jieba.analyse import TextRank + from rake_nltk import Rake + self.bocha_api = BochaAISearchAPI(access_key, max_results=max_results) self.embedder = embedder self.reader = reader + self.en_fast_keywords_extractor = Rake() + self.zh_fast_keywords_extractor = TextRank() def retrieve_from_internet( self, query: str, top_k: int = 10, parsed_goal=None, info=None, mode="fast" @@ -224,6 +257,13 @@ def _process_result( info_ = info.copy() user_id = info_.pop("user_id", "") session_id = info_.pop("session_id", "") + lang = detect_lang(summary) + tags = ( + self.zh_fast_keywords_extractor.textrank(summary)[:3] + if lang == "zh" + else self.en_fast_keywords_extractor.extract_keywords_from_text(summary)[:3] + ) + return [ TextualMemoryItem( memory=( @@ -244,6 +284,7 @@ def _process_result( background="", confidence=0.99, usage=[], + tags=tags, embedding=self.embedder.embed([content])[0], internet_info={ "title": title, diff --git a/src/memos/memories/textual/tree_text_memory/retrieve/xinyusearch.py b/src/memos/memories/textual/tree_text_memory/retrieve/xinyusearch.py index ab12a0647..c8f8e4576 100644 --- a/src/memos/memories/textual/tree_text_memory/retrieve/xinyusearch.py +++ b/src/memos/memories/textual/tree_text_memory/retrieve/xinyusearch.py @@ -347,6 +347,7 @@ def _process_result( source="web", sources=[SourceMessage(type="web", url=url)] if url else [], visibility="public", + tags=self._extract_tags(title, content, summary), info=info_, background="", confidence=0.99, From 033e8bde0d928965c7da9d6782e38042c5778a5b Mon Sep 17 00:00:00 2001 From: "yuan.wang" Date: Wed, 10 Dec 2025 19:09:50 +0800 Subject: [PATCH 26/26] modify prompt fix bug --- src/memos/api/handlers/chat_handler.py | 24 +++++++++---------- .../tree_text_memory/retrieve/utils.py | 2 +- src/memos/templates/mos_prompts.py | 5 ++-- 3 files changed, 16 insertions(+), 15 deletions(-) diff --git a/src/memos/api/handlers/chat_handler.py b/src/memos/api/handlers/chat_handler.py index 85a92c68c..614046dd6 100644 --- a/src/memos/api/handlers/chat_handler.py +++ b/src/memos/api/handlers/chat_handler.py @@ -395,16 +395,6 @@ def generate_chat_response() -> Generator[str, None, None]: [chat_req.mem_cube_id] if chat_req.mem_cube_id else [chat_req.user_id] ) - # for playground, add the query to memory without response - self._start_add_to_memory( - user_id=chat_req.user_id, - writable_cube_ids=writable_cube_ids, - session_id=chat_req.session_id or "default_session", - query=chat_req.query, - full_response=None, - async_mode="sync", - ) - # ====== first search text mem with parse goal ====== search_req = APISearchPlaygroundRequest( query=chat_req.query, @@ -450,7 +440,7 @@ def generate_chat_response() -> Generator[str, None, None]: pref_list = search_response.data.get("pref_mem") or [] pref_memories = pref_list[0].get("memories", []) if pref_list else [] pref_md_string = self._build_pref_md_string_for_playground(pref_memories) - yield f"data: {json.dumps({'type': 'pref_md_string', 'data': pref_md_string})}\n\n" + yield f"data: {json.dumps({'type': 'pref_md_string', 'data': pref_md_string}, ensure_ascii=False)}\n\n" # Use first readable cube ID for scheduler (backward compatibility) scheduler_cube_id = ( @@ -531,6 +521,16 @@ def generate_chat_response() -> Generator[str, None, None]: ) yield f"data: {json.dumps({'type': 'reference', 'data': reference})}\n\n" + # for playground, add the query to memory without response + self._start_add_to_memory( + user_id=chat_req.user_id, + writable_cube_ids=writable_cube_ids, + session_id=chat_req.session_id or "default_session", + query=chat_req.query, + full_response=None, + async_mode="sync", + ) + # Step 2: Build system prompt with memories system_prompt = self._build_enhance_system_prompt( filtered_memories, pref_string @@ -794,7 +794,7 @@ def _build_enhance_system_prompt( sys_body + "\n\n# Memories\n## PersonalMemory (ordered)\n" + mem_block_p - + "\n## OuterMemory (ordered)\n" + + "\n## OuterMemory (from Internet Search, ordered)\n" + mem_block_o + f"\n\n{pref_string}" ) diff --git a/src/memos/memories/textual/tree_text_memory/retrieve/utils.py b/src/memos/memories/textual/tree_text_memory/retrieve/utils.py index 55c6243d8..8659b6112 100644 --- a/src/memos/memories/textual/tree_text_memory/retrieve/utils.py +++ b/src/memos/memories/textual/tree_text_memory/retrieve/utils.py @@ -4,7 +4,7 @@ 1. Keys: the high-level keywords directly relevant to the user’s task. 2. Tags: thematic tags to help categorize and retrieve related memories. 3. Goal Type: retrieval | qa | generation -4. Rephrased instruction: Give a rephrased task instruction based on the former conversation to make it less confusing to look alone. Make full use of information related to the query. If you think the task instruction is easy enough to understand, or there is no former conversation, set "rephrased_instruction" to an empty string. +4. Rephrased instruction: Give a rephrased task instruction based on the former conversation to make it less confusing to look alone. Make full use of information related to the query, including user's personal information. If you think the task instruction is easy enough to understand, or there is no former conversation, set "rephrased_instruction" to an empty string. 5. Need for internet search: If the user's task instruction only involves objective facts or can be completed without introducing external knowledge, set "internet_search" to False. Otherwise, set it to True. 6. Memories: Provide 2–5 short semantic expansions or rephrasings of the rephrased/original user task instruction. These are used for improved embedding search coverage. Each should be clear, concise, and meaningful for retrieval. diff --git a/src/memos/templates/mos_prompts.py b/src/memos/templates/mos_prompts.py index 15f1a44b3..0d8b3019b 100644 --- a/src/memos/templates/mos_prompts.py +++ b/src/memos/templates/mos_prompts.py @@ -65,7 +65,6 @@ MEMOS_PRODUCT_BASE_PROMPT = """ # System - Role: You are MemOS🧚, nickname Little M(小忆🧚) — an advanced Memory Operating System assistant by 记忆张量(MemTensor Technology Co., Ltd.), a Shanghai-based AI research company advised by an academician of the Chinese Academy of Sciences. -- Date: {date} - Mission & Values: Uphold MemTensor’s vision of "low cost, low hallucination, high generalization, exploring AI development paths aligned with China’s national context and driving the adoption of trustworthy AI technologies. MemOS’s mission is to give large language models (LLMs) and autonomous agents **human-like long-term memory**, turning memory from a black-box inside model weights into a **manageable, schedulable, and auditable** core resource. @@ -105,12 +104,14 @@ - When using facts from memories, add citations at the END of the sentence with `[i:memId]`. - `i` is the order in the "Memories" section below (starting at 1). `memId` is the given short memory ID. - Multiple citations must be concatenated directly, e.g., `[1:sed23s], [ -2:1k3sdg], [3:ghi789]`. Do NOT use commas inside brackets. +2:1k3sdg], [3:ghi789]`. Do NOT use commas inside brackets. Do not use wrong format like `[def456]`. - Cite only relevant memories; keep citations minimal but sufficient. - Do not use a connected format like [1:abc123,2:def456]. - Brackets MUST be English half-width square brackets `[]`, NEVER use Chinese full-width brackets `【】` or any other symbols. - **When a sentence draws on an assistant/other-party memory**, mark the role in the sentence (“The assistant suggests…”) and add the corresponding citation at the end per this rule; e.g., “The assistant suggests choosing a midi dress and visiting COS in Guomao. [1:abc123]” +# Current Date: {date} + # Style - Tone: {tone}; Verbosity: {verbosity}. - Be direct, well-structured, and conversational. Avoid fluff. Use short lists when helpful.