From 845f8b35a2f925e331b54df7c7d582cc240654a2 Mon Sep 17 00:00:00 2001 From: "yuan.wang" Date: Tue, 25 Nov 2025 10:24:55 +0800 Subject: [PATCH 1/2] add status of reasoning in playground --- src/memos/api/handlers/chat_handler.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/memos/api/handlers/chat_handler.py b/src/memos/api/handlers/chat_handler.py index 2f40f1c91..f32ebaff0 100644 --- a/src/memos/api/handlers/chat_handler.py +++ b/src/memos/api/handlers/chat_handler.py @@ -425,8 +425,6 @@ def generate_chat_response() -> Generator[str, None, None]: f"current_system_prompt: {system_prompt}" ) - yield f"data: {json.dumps({'type': 'status', 'data': '2'})}\n\n" - # Step 3: Generate streaming response from LLM if ( chat_req.model_name_or_path @@ -448,9 +446,11 @@ def generate_chat_response() -> Generator[str, None, None]: for chunk in response_stream: if chunk == "": in_think = True + yield f"data: {json.dumps({'type': 'status', 'data': 'reasoning'})}\n\n" continue if chunk == "": in_think = False + yield f"data: {json.dumps({'type': 'status', 'data': '2'})}\n\n" continue if in_think: From 7000ee8a847d166f1eedba70276f32f6956338bd Mon Sep 17 00:00:00 2001 From: "yuan.wang" Date: Tue, 25 Nov 2025 18:01:43 +0800 Subject: [PATCH 2/2] playground chat bug fix --- src/memos/api/handlers/chat_handler.py | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/src/memos/api/handlers/chat_handler.py b/src/memos/api/handlers/chat_handler.py index f32ebaff0..f0fcbabd9 100644 --- a/src/memos/api/handlers/chat_handler.py +++ b/src/memos/api/handlers/chat_handler.py @@ -402,9 +402,9 @@ def generate_chat_response() -> Generator[str, None, None]: # Prepare preference markdown string if chat_req.include_preference: - pref_md_string = self._build_pref_md_string_for_playground( - search_response.data["pref_mem"][0].get("memories", []) - ) + pref_list = search_response.data.get("pref_mem") or [] + pref_memories = pref_list[0].get("memories", []) if pref_list else [] + pref_md_string = self._build_pref_md_string_for_playground(pref_memories) yield f"data: {json.dumps({'type': 'pref_md_string', 'data': pref_md_string})}\n\n" # Step 2: Build system prompt with memories @@ -564,17 +564,17 @@ def _build_pref_md_string_for_playground(self, pref_mem_list: list[any]) -> str: explicit = [] implicit = [] for pref_mem in pref_mem_list: - if pref_mem["metadata"]["preference_type"] == "explicit": + if pref_mem["metadata"]["preference_type"] == "explicit_preference": explicit.append( { - "content": pref_mem["preference"], + "content": pref_mem["metadata"]["preference"], "reasoning": pref_mem["metadata"]["reasoning"], } ) - elif pref_mem["metadata"]["preference_type"] == "implicit": + elif pref_mem["metadata"]["preference_type"] == "implicit_preference": implicit.append( { - "content": pref_mem["preference"], + "content": pref_mem["metadata"]["preference"], "reasoning": pref_mem["metadata"]["reasoning"], } )