Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
44 commits
Select commit Hold shift + click to select a range
70373f9
fix playground bug, internet search judge
Dec 4, 2025
d181339
Merge branch 'dev' into feat/fix_palyground_bug
Dec 4, 2025
11cf00a
fix playground internet bug
Dec 4, 2025
6b10ce1
merge dev
Dec 4, 2025
c861f61
modify delete mem
Dec 4, 2025
e638039
modify tool resp bug in multi cube
Dec 4, 2025
dcd3d50
Merge branch 'dev' into feat/fix_palyground_bug
Dec 4, 2025
0c0eff8
Merge branch 'dev' into feat/fix_palyground_bug
Dec 5, 2025
8765dc4
fix bug in playground chat handle and search inter
Dec 5, 2025
1a335db
modify prompt
Dec 5, 2025
18320ff
fix bug in playground
Dec 6, 2025
666b897
fix bug playfround
Dec 6, 2025
275b9b6
Merge branch 'dev' into feat/fix_palyground_bug
Dec 7, 2025
0d22512
fix bug
Dec 7, 2025
d38f55f
Merge branch 'dev' into feat/fix_palyground_bug
Dec 7, 2025
a9eb1f6
fix code
Dec 7, 2025
94ad709
Merge branch 'dev' into feat/fix_palyground_bug
Dec 7, 2025
723a14f
fix model bug in playground
Dec 7, 2025
6f06a23
Merge branch 'dev' into feat/fix_palyground_bug
Dec 7, 2025
a300670
Merge branch 'dev' into feat/fix_palyground_bug
Dec 8, 2025
7ee13b1
Merge branch 'dev' into feat/fix_palyground_bug
Dec 8, 2025
5ab6e92
modify plan b
Dec 8, 2025
1bb0bcd
llm param modify
Dec 8, 2025
1b607e7
Merge branch 'dev' into feat/fix_palyground_bug
Dec 8, 2025
f5bc426
add logger in playground
Dec 8, 2025
a9fa309
modify code
Dec 9, 2025
d2efa24
Merge branch 'dev' into feat/fix_palyground_bug
Dec 9, 2025
9ebfbe1
Merge branch 'dev' into feat/fix_palyground_bug
fridayL Dec 9, 2025
4c055d0
fix bug
Dec 9, 2025
27b4fc4
modify code
Dec 9, 2025
cefeefb
modify code
Dec 9, 2025
7e05fa7
fix bug
Dec 9, 2025
a4f66b1
Merge branch 'dev' into feat/fix_palyground_bug
Dec 9, 2025
9b47647
Merge branch 'dev' into feat/fix_palyground_bug
Dec 9, 2025
05da172
fix search bug in plarground
Dec 9, 2025
e410ec2
fixx bug
Dec 9, 2025
0324588
move schadualr to back
Dec 9, 2025
a834028
Merge branch 'dev' into feat/fix_palyground_bug
Dec 9, 2025
4084954
modify pref location
Dec 9, 2025
de5e372
Merge branch 'dev' into feat/fix_palyground_bug
Dec 9, 2025
87861ab
Merge branch 'dev' into feat/fix_palyground_bug
Dec 9, 2025
8b547b8
modify fast net search
Dec 9, 2025
c915867
Merge branch 'dev' into feat/fix_palyground_bug
Dec 9, 2025
2f238fd
Merge branch 'dev' into feat/fix_palyground_bug
Dec 9, 2025
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
108 changes: 74 additions & 34 deletions src/memos/memories/textual/tree_text_memory/retrieve/bochasearch.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,11 @@
from memos.embedders.factory import OllamaEmbedder
from memos.log import get_logger
from memos.mem_reader.base import BaseMemReader
from memos.memories.textual.item import SourceMessage, TextualMemoryItem
from memos.memories.textual.item import (
SearchedTreeNodeTextualMemoryMetadata,
SourceMessage,
TextualMemoryItem,
)


logger = get_logger(__name__)
Expand Down Expand Up @@ -138,7 +142,7 @@ def __init__(
self.reader = reader

def retrieve_from_internet(
self, query: str, top_k: int = 10, parsed_goal=None, info=None
self, query: str, top_k: int = 10, parsed_goal=None, info=None, mode="fast"
) -> list[TextualMemoryItem]:
"""
Default internet retrieval (Web Search).
Expand All @@ -155,24 +159,24 @@ def retrieve_from_internet(
"""
search_results = self.bocha_api.search_ai(query) # ✅ default to
# web-search
return self._convert_to_mem_items(search_results, query, parsed_goal, info)
return self._convert_to_mem_items(search_results, query, parsed_goal, info, mode=mode)

def retrieve_from_web(
self, query: str, top_k: int = 10, parsed_goal=None, info=None
self, query: str, top_k: int = 10, parsed_goal=None, info=None, mode="fast"
) -> list[TextualMemoryItem]:
"""Explicitly retrieve using Bocha Web Search."""
search_results = self.bocha_api.search_web(query)
return self._convert_to_mem_items(search_results, query, parsed_goal, info)
return self._convert_to_mem_items(search_results, query, parsed_goal, info, mode=mode)

def retrieve_from_ai(
self, query: str, top_k: int = 10, parsed_goal=None, info=None
self, query: str, top_k: int = 10, parsed_goal=None, info=None, mode="fast"
) -> list[TextualMemoryItem]:
"""Explicitly retrieve using Bocha AI Search."""
search_results = self.bocha_api.search_ai(query)
return self._convert_to_mem_items(search_results, query, parsed_goal, info)
return self._convert_to_mem_items(search_results, query, parsed_goal, info, mode=mode)

def _convert_to_mem_items(
self, search_results: list[dict], query: str, parsed_goal=None, info=None
self, search_results: list[dict], query: str, parsed_goal=None, info=None, mode="fast"
):
"""Convert API search results into TextualMemoryItem objects."""
memory_items = []
Expand All @@ -181,7 +185,7 @@ def _convert_to_mem_items(

with ContextThreadPoolExecutor(max_workers=8) as executor:
futures = [
executor.submit(self._process_result, r, query, parsed_goal, info)
executor.submit(self._process_result, r, query, parsed_goal, info, mode=mode)
for r in search_results
]
for future in as_completed(futures):
Expand All @@ -195,7 +199,7 @@ def _convert_to_mem_items(
return list(unique_memory_items.values())

def _process_result(
self, result: dict, query: str, parsed_goal: str, info: dict[str, Any]
self, result: dict, query: str, parsed_goal: str, info: dict[str, Any], mode="fast"
) -> list[TextualMemoryItem]:
"""Process one Bocha search result into TextualMemoryItem."""
title = result.get("name", "")
Expand All @@ -216,27 +220,63 @@ def _process_result(
else:
publish_time = datetime.now().strftime("%Y-%m-%d")

# Use reader to split and process the content into chunks
read_items = self.reader.get_memory([content], type="doc", info=info)

memory_items = []
for read_item_i in read_items[0]:
read_item_i.memory = (
f"[Outer internet view] Title: {title}\nNewsTime:"
f" {publish_time}\nSummary:"
f" {summary}\n"
f"Content: {read_item_i.memory}"
)
read_item_i.metadata.source = "web"
read_item_i.metadata.memory_type = "OuterMemory"
read_item_i.metadata.sources = [SourceMessage(type="web", url=url)] if url else []
read_item_i.metadata.visibility = "public"
read_item_i.metadata.internet_info = {
"title": title,
"url": url,
"site_name": site_name,
"site_icon": site_icon,
"summary": summary,
}
memory_items.append(read_item_i)
return memory_items
if mode == "fast":
info_ = info.copy()
user_id = info_.pop("user_id", "")
session_id = info_.pop("session_id", "")
return [
TextualMemoryItem(
memory=(
f"[Outer internet view] Title: {title}\nNewsTime:"
f" {publish_time}\nSummary:"
f" {summary}\n"
),
metadata=SearchedTreeNodeTextualMemoryMetadata(
user_id=user_id,
session_id=session_id,
memory_type="OuterMemory",
status="activated",
type="fact",
source="web",
sources=[SourceMessage(type="web", url=url)] if url else [],
visibility="public",
info=info_,
background="",
confidence=0.99,
usage=[],
embedding=self.embedder.embed([content])[0],
internet_info={
"title": title,
"url": url,
"site_name": site_name,
"site_icon": site_icon,
"summary": summary,
},
),
)
]
else:
# Use reader to split and process the content into chunks
read_items = self.reader.get_memory([content], type="doc", info=info)

memory_items = []
for read_item_i in read_items[0]:
read_item_i.memory = (
f"[Outer internet view] Title: {title}\nNewsTime:"
f" {publish_time}\nSummary:"
f" {summary}\n"
f"Content: {read_item_i.memory}"
)
read_item_i.metadata.source = "web"
read_item_i.metadata.memory_type = "OuterMemory"
read_item_i.metadata.sources = [SourceMessage(type="web", url=url)] if url else []
read_item_i.metadata.visibility = "public"
read_item_i.metadata.internet_info = {
"title": title,
"url": url,
"site_name": site_name,
"site_icon": site_icon,
"summary": summary,
}
memory_items.append(read_item_i)
return memory_items
Original file line number Diff line number Diff line change
Expand Up @@ -536,7 +536,7 @@ def _retrieve_from_internet(
return []
logger.info(f"[PATH-C] '{query}' Retrieving from internet...")
items = self.internet_retriever.retrieve_from_internet(
query=query, top_k=top_k, parsed_goal=parsed_goal, info=info
query=query, top_k=top_k, parsed_goal=parsed_goal, info=info, mode=mode
)
logger.info(f"[PATH-C] '{query}' Retrieved from internet {len(items)} items: {items}")
return self.reranker.rerank(
Expand Down
83 changes: 64 additions & 19 deletions src/memos/memories/textual/tree_text_memory/retrieve/xinyusearch.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,11 @@
from memos.embedders.factory import OllamaEmbedder
from memos.log import get_logger
from memos.mem_reader.base import BaseMemReader
from memos.memories.textual.item import SourceMessage, TextualMemoryItem
from memos.memories.textual.item import (
SearchedTreeNodeTextualMemoryMetadata,
SourceMessage,
TextualMemoryItem,
)


logger = get_logger(__name__)
Expand Down Expand Up @@ -132,7 +136,7 @@ def __init__(
self.reader = reader

def retrieve_from_internet(
self, query: str, top_k: int = 10, parsed_goal=None, info=None
self, query: str, top_k: int = 10, parsed_goal=None, info=None, mode="fast"
) -> list[TextualMemoryItem]:
"""
Retrieve information from Xinyu search and convert to TextualMemoryItem format
Expand All @@ -153,7 +157,7 @@ def retrieve_from_internet(

with ContextThreadPoolExecutor(max_workers=8) as executor:
futures = [
executor.submit(self._process_result, result, query, parsed_goal, info)
executor.submit(self._process_result, result, query, parsed_goal, info, mode=mode)
for result in search_results
]
for future in as_completed(futures):
Expand Down Expand Up @@ -303,7 +307,7 @@ def _extract_tags(self, title: str, content: str, summary: str, parsed_goal=None
return list(set(tags))[:15] # Limit to 15 tags

def _process_result(
self, result: dict, query: str, parsed_goal: str, info: None
self, result: dict, query: str, parsed_goal: str, info: None, mode="fast"
) -> list[TextualMemoryItem]:
if not info:
info = {"user_id": "", "session_id": ""}
Expand All @@ -323,18 +327,59 @@ def _process_result(
else:
publish_time = datetime.now().strftime("%Y-%m-%d")

read_items = self.reader.get_memory([content], type="doc", info=info)

memory_items = []
for read_item_i in read_items[0]:
read_item_i.memory = (
f"Title: {title}\nNewsTime: {publish_time}\nSummary: {summary}\n"
f"Content: {read_item_i.memory}"
)
read_item_i.metadata.source = "web"
read_item_i.metadata.memory_type = "OuterMemory"
read_item_i.metadata.sources = [SourceMessage(type="web", url=url)] if url else []
read_item_i.metadata.visibility = "public"

memory_items.append(read_item_i)
return memory_items
if mode == "fast":
info_ = info.copy()
user_id = info_.pop("user_id", "")
session_id = info_.pop("session_id", "")
return [
TextualMemoryItem(
memory=(
f"[Outer internet view] Title: {title}\nNewsTime:"
f" {publish_time}\nSummary:"
f" {summary}\n"
),
metadata=SearchedTreeNodeTextualMemoryMetadata(
user_id=user_id,
session_id=session_id,
memory_type="OuterMemory",
status="activated",
type="fact",
source="web",
sources=[SourceMessage(type="web", url=url)] if url else [],
visibility="public",
info=info_,
background="",
confidence=0.99,
usage=[],
embedding=self.embedder.embed([content])[0],
internet_info={
"title": title,
"url": url,
"summary": summary,
"content": content,
},
),
)
]
else:
read_items = self.reader.get_memory([content], type="doc", info=info)

memory_items = []
for read_item_i in read_items[0]:
read_item_i.memory = (
f"Title: {title}\nNewsTime: {publish_time}\nSummary: {summary}\n"
f"Content: {read_item_i.memory}"
)
read_item_i.metadata.source = "web"
read_item_i.metadata.memory_type = "OuterMemory"
read_item_i.metadata.sources = [SourceMessage(type="web", url=url)] if url else []
read_item_i.metadata.visibility = "public"
read_item_i.metadata.internet_info = {
"title": title,
"url": url,
"summary": summary,
"content": content,
}

memory_items.append(read_item_i)
return memory_items