From 845f8b35a2f925e331b54df7c7d582cc240654a2 Mon Sep 17 00:00:00 2001 From: "yuan.wang" Date: Tue, 25 Nov 2025 10:24:55 +0800 Subject: [PATCH] add status of reasoning in playground --- src/memos/api/handlers/chat_handler.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/memos/api/handlers/chat_handler.py b/src/memos/api/handlers/chat_handler.py index 2f40f1c91..f32ebaff0 100644 --- a/src/memos/api/handlers/chat_handler.py +++ b/src/memos/api/handlers/chat_handler.py @@ -425,8 +425,6 @@ def generate_chat_response() -> Generator[str, None, None]: f"current_system_prompt: {system_prompt}" ) - yield f"data: {json.dumps({'type': 'status', 'data': '2'})}\n\n" - # Step 3: Generate streaming response from LLM if ( chat_req.model_name_or_path @@ -448,9 +446,11 @@ def generate_chat_response() -> Generator[str, None, None]: for chunk in response_stream: if chunk == "": in_think = True + yield f"data: {json.dumps({'type': 'status', 'data': 'reasoning'})}\n\n" continue if chunk == "": in_think = False + yield f"data: {json.dumps({'type': 'status', 'data': '2'})}\n\n" continue if in_think: