diff --git a/src/memos/api/handlers/component_init.py b/src/memos/api/handlers/component_init.py index 89e61e79d..a01d8fc2a 100644 --- a/src/memos/api/handlers/component_init.py +++ b/src/memos/api/handlers/component_init.py @@ -40,6 +40,7 @@ from memos.memories.textual.simple_preference import SimplePreferenceTextMemory from memos.memories.textual.simple_tree import SimpleTreeTextMemory from memos.memories.textual.tree_text_memory.organize.manager import MemoryManager +from memos.memories.textual.tree_text_memory.retrieve.retrieve_utils import FastTokenizer if TYPE_CHECKING: @@ -142,7 +143,7 @@ def init_server() -> dict[str, Any]: ) logger.debug("Memory manager initialized") - + tokenizer = FastTokenizer() # Initialize text memory text_mem = SimpleTreeTextMemory( llm=llm, @@ -153,6 +154,7 @@ def init_server() -> dict[str, Any]: memory_manager=memory_manager, config=default_cube_config.text_mem.config, internet_retriever=internet_retriever, + tokenizer=tokenizer, ) logger.debug("Text memory initialized") @@ -270,7 +272,6 @@ def init_server() -> dict[str, Any]: online_bot = get_online_bot_function() if dingding_enabled else None logger.info("DingDing bot is enabled") - # Return all components as a dictionary for easy access and extension return { "graph_db": graph_db, diff --git a/src/memos/api/handlers/search_handler.py b/src/memos/api/handlers/search_handler.py index 7d7d52dc4..c8b92e225 100644 --- a/src/memos/api/handlers/search_handler.py +++ b/src/memos/api/handlers/search_handler.py @@ -191,7 +191,7 @@ def _fast_search( """ target_session_id = search_req.session_id or "default_session" search_filter = {"session_id": search_req.session_id} if search_req.session_id else None - + plugin = bool(search_req.source is not None and search_req.source == "plugin") search_results = self.naive_mem_cube.text_mem.search( query=search_req.query, user_name=user_context.mem_cube_id, @@ -205,6 +205,7 @@ def _fast_search( "session_id": target_session_id, "chat_history": search_req.chat_history, }, + plugin=plugin, ) formatted_memories = [format_memory_item(data) for data in search_results] diff --git a/src/memos/api/product_models.py b/src/memos/api/product_models.py index f7f0304c7..c238e7d09 100644 --- a/src/memos/api/product_models.py +++ b/src/memos/api/product_models.py @@ -185,6 +185,7 @@ class APISearchRequest(BaseRequest): ) include_preference: bool = Field(True, description="Whether to handle preference memory") pref_top_k: int = Field(6, description="Number of preference results to return") + source: str | None = Field(None, description="Source of the search") class APIADDRequest(BaseRequest): diff --git a/src/memos/graph_dbs/polardb.py b/src/memos/graph_dbs/polardb.py index da1635296..eaa0a6881 100644 --- a/src/memos/graph_dbs/polardb.py +++ b/src/memos/graph_dbs/polardb.py @@ -1450,6 +1450,115 @@ def get_context_chain(self, id: str, type: str = "FOLLOWS") -> list[str]: """Get the ordered context chain starting from a node.""" raise NotImplementedError + @timed + def search_by_fulltext( + self, + query_words: list[str], + top_k: int = 10, + scope: str | None = None, + status: str | None = None, + threshold: float | None = None, + search_filter: dict | None = None, + user_name: str | None = None, + tsvector_field: str = "properties_tsvector_zh", + tsquery_config: str = "jiebaqry", + **kwargs, + ) -> list[dict]: + """ + Full-text search functionality using PostgreSQL's full-text search capabilities. + + Args: + query_text: query text + top_k: maximum number of results to return + scope: memory type filter (memory_type) + status: status filter, defaults to "activated" + threshold: similarity threshold filter + search_filter: additional property filter conditions + user_name: username filter + tsvector_field: full-text index field name, defaults to properties_tsvector_zh_1 + tsquery_config: full-text search configuration, defaults to jiebaqry (Chinese word segmentation) + **kwargs: other parameters (e.g. cube_name) + + Returns: + list[dict]: result list containing id and score + """ + # Build WHERE clause dynamically, same as search_by_embedding + where_clauses = [] + + if scope: + where_clauses.append( + f"ag_catalog.agtype_access_operator(properties, '\"memory_type\"'::agtype) = '\"{scope}\"'::agtype" + ) + if status: + where_clauses.append( + f"ag_catalog.agtype_access_operator(properties, '\"status\"'::agtype) = '\"{status}\"'::agtype" + ) + else: + where_clauses.append( + "ag_catalog.agtype_access_operator(properties, '\"status\"'::agtype) = '\"activated\"'::agtype" + ) + + # Add user_name filter + user_name = user_name if user_name else self.config.user_name + where_clauses.append( + f"ag_catalog.agtype_access_operator(properties, '\"user_name\"'::agtype) = '\"{user_name}\"'::agtype" + ) + + # Add search_filter conditions + if search_filter: + for key, value in search_filter.items(): + if isinstance(value, str): + where_clauses.append( + f"ag_catalog.agtype_access_operator(properties, '\"{key}\"'::agtype) = '\"{value}\"'::agtype" + ) + else: + where_clauses.append( + f"ag_catalog.agtype_access_operator(properties, '\"{key}\"'::agtype) = {value}::agtype" + ) + + # Add fulltext search condition + # Convert query_text to OR query format: "word1 | word2 | word3" + tsquery_string = " | ".join(query_words) + + where_clauses.append(f"{tsvector_field} @@ to_tsquery('{tsquery_config}', %s)") + + where_clause = f"WHERE {' AND '.join(where_clauses)}" if where_clauses else "" + + # Build fulltext search query + query = f""" + SELECT + ag_catalog.agtype_access_operator(properties, '"id"'::agtype) AS old_id, + agtype_object_field_text(properties, 'memory') as memory_text, + ts_rank({tsvector_field}, to_tsquery('{tsquery_config}', %s)) as rank + FROM "{self.db_name}_graph"."Memory" + {where_clause} + ORDER BY rank DESC + LIMIT {top_k}; + """ + + params = [tsquery_string, tsquery_string] + + conn = self._get_connection() + try: + with conn.cursor() as cursor: + cursor.execute(query, params) + results = cursor.fetchall() + output = [] + for row in results: + oldid = row[0] # old_id + rank = row[2] # rank score + + id_val = str(oldid) + score_val = float(rank) + + # Apply threshold filter if specified + if threshold is None or score_val >= threshold: + output.append({"id": id_val, "score": score_val}) + + return output[:top_k] + finally: + self._return_connection(conn) + @timed def search_by_embedding( self, diff --git a/src/memos/memories/textual/item.py b/src/memos/memories/textual/item.py index e7595443d..947e73463 100644 --- a/src/memos/memories/textual/item.py +++ b/src/memos/memories/textual/item.py @@ -199,6 +199,7 @@ class PreferenceTextualMemoryMetadata(TextualMemoryMetadata): preference: str | None = Field(default=None, description="Preference.") created_at: str | None = Field(default=None, description="Timestamp of the dialog.") mem_cube_id: str | None = Field(default=None, description="ID of the MemCube.") + score: float | None = Field(default=None, description="Score of the retrieval result.") class TextualMemoryItem(BaseModel): diff --git a/src/memos/memories/textual/prefer_text_memory/extractor.py b/src/memos/memories/textual/prefer_text_memory/extractor.py index d5eab2aec..947bddf85 100644 --- a/src/memos/memories/textual/prefer_text_memory/extractor.py +++ b/src/memos/memories/textual/prefer_text_memory/extractor.py @@ -90,7 +90,8 @@ def extract_implicit_preference(self, qa_pair: MessageList | str) -> dict[str, A response = self.llm_provider.generate([{"role": "user", "content": prompt}]) response = response.strip().replace("```json", "").replace("```", "").strip() result = json.loads(response) - result["preference"] = result.pop("implicit_preference") + for d in result: + d["preference"] = d.pop("implicit_preference") return result except Exception as e: logger.error(f"Error extracting implicit preferences: {e}, return None") @@ -136,20 +137,24 @@ def _process_single_chunk_implicit( if not implicit_pref: return None - vector_info = { - "embedding": self.embedder.embed([implicit_pref["context_summary"]])[0], - } + memories = [] + for pref in implicit_pref: + vector_info = { + "embedding": self.embedder.embed([pref["context_summary"]])[0], + } - extract_info = {**basic_info, **implicit_pref, **vector_info, **info} + extract_info = {**basic_info, **pref, **vector_info, **info} - metadata = PreferenceTextualMemoryMetadata( - type=msg_type, preference_type="implicit_preference", **extract_info - ) - memory = TextualMemoryItem( - id=extract_info["dialog_id"], memory=implicit_pref["context_summary"], metadata=metadata - ) + metadata = PreferenceTextualMemoryMetadata( + type=msg_type, preference_type="implicit_preference", **extract_info + ) + memory = TextualMemoryItem( + id=str(uuid.uuid4()), memory=pref["context_summary"], metadata=metadata + ) - return memory + memories.append(memory) + + return memories def extract( self, diff --git a/src/memos/memories/textual/prefer_text_memory/retrievers.py b/src/memos/memories/textual/prefer_text_memory/retrievers.py index c3aa950e4..1fc4c19f4 100644 --- a/src/memos/memories/textual/prefer_text_memory/retrievers.py +++ b/src/memos/memories/textual/prefer_text_memory/retrievers.py @@ -1,3 +1,5 @@ +import os + from abc import ABC, abstractmethod from typing import Any @@ -34,9 +36,12 @@ def _naive_reranker( self, query: str, prefs_mem: list[TextualMemoryItem], top_k: int, **kwargs: Any ) -> list[TextualMemoryItem]: if self.reranker: - prefs_mem = self.reranker.rerank(query, prefs_mem, top_k) - return [item for item, _ in prefs_mem] - return prefs_mem + prefs_mem_reranked = [] + prefs_mem_tuple = self.reranker.rerank(query, prefs_mem, top_k) + for item, score in prefs_mem_tuple: + item.metadata.score = score + prefs_mem_reranked.append(item) + return prefs_mem_reranked def _original_text_reranker( self, @@ -52,11 +57,22 @@ def _original_text_reranker( prefs_mem_for_reranker = deepcopy(prefs_mem) for pref_mem, pref in zip(prefs_mem_for_reranker, prefs, strict=False): pref_mem.memory = pref_mem.memory + "\n" + pref.original_text - prefs_mem_for_reranker = self.reranker.rerank(query, prefs_mem_for_reranker, top_k) - prefs_mem_for_reranker = [item for item, _ in prefs_mem_for_reranker] + reranked_results = self.reranker.rerank(query, prefs_mem_for_reranker, top_k) + prefs_mem_for_reranker = [item for item, _ in reranked_results] prefs_ids = [item.id for item in prefs_mem_for_reranker] prefs_dict = {item.id: item for item in prefs_mem} - return [prefs_dict[item_id] for item_id in prefs_ids if item_id in prefs_dict] + + # Create mapping from id to score from reranked results + reranked_scores = {item.id: score for item, score in reranked_results} + + # Assign scores to the original items + result_items = [] + for item_id in prefs_ids: + if item_id in prefs_dict: + original_item = prefs_dict[item_id] + original_item.metadata.score = reranked_scores.get(item_id) + result_items.append(original_item) + return result_items return prefs_mem def retrieve( @@ -119,9 +135,6 @@ def retrieve( if pref.payload.get("preference", None) ] - # store explicit id and score, use it after reranker - explicit_id_scores = {item.id: item.score for item in explicit_prefs} - reranker_map = { "naive": self._naive_reranker, "original_text": self._original_text_reranker, @@ -136,7 +149,14 @@ def retrieve( # filter explicit mem by score bigger than threshold explicit_prefs_mem = [ - item for item in explicit_prefs_mem if explicit_id_scores.get(item.id, 0) >= 0.0 + item + for item in explicit_prefs_mem + if item.metadata.score >= float(os.getenv("PREFERENCE_SEARCH_THRESHOLD", 0.0)) + ] + implicit_prefs_mem = [ + item + for item in implicit_prefs_mem + if item.metadata.score >= float(os.getenv("PREFERENCE_SEARCH_THRESHOLD", 0.0)) ] return explicit_prefs_mem + implicit_prefs_mem diff --git a/src/memos/memories/textual/simple_tree.py b/src/memos/memories/textual/simple_tree.py index 05e62e3ee..c67271f76 100644 --- a/src/memos/memories/textual/simple_tree.py +++ b/src/memos/memories/textual/simple_tree.py @@ -9,6 +9,7 @@ from memos.memories.textual.tree import TreeTextMemory from memos.memories.textual.tree_text_memory.organize.manager import MemoryManager from memos.memories.textual.tree_text_memory.retrieve.bm25_util import EnhancedBM25 +from memos.memories.textual.tree_text_memory.retrieve.retrieve_utils import FastTokenizer from memos.reranker.base import BaseReranker @@ -35,6 +36,7 @@ def __init__( config: TreeTextMemoryConfig, internet_retriever: None = None, is_reorganize: bool = False, + tokenizer: FastTokenizer | None = None, ): """Initialize memory with the given configuration.""" self.config: TreeTextMemoryConfig = config @@ -51,6 +53,7 @@ def __init__( if self.search_strategy and self.search_strategy.get("bm25", False) else None ) + self.tokenizer = tokenizer self.reranker = reranker self.memory_manager: MemoryManager = memory_manager # Create internet retriever if configured diff --git a/src/memos/memories/textual/tree.py b/src/memos/memories/textual/tree.py index 1b2355bc8..60cc25263 100644 --- a/src/memos/memories/textual/tree.py +++ b/src/memos/memories/textual/tree.py @@ -89,6 +89,7 @@ def __init__(self, config: TreeTextMemoryConfig): ) else: logger.info("No internet retriever configured") + self.tokenizer = None def add( self, @@ -165,6 +166,7 @@ def search( moscube: bool = False, search_filter: dict | None = None, user_name: str | None = None, + **kwargs, ) -> list[TextualMemoryItem]: """Search for memories based on a query. User query -> TaskGoalParser -> MemoryPathResolver -> @@ -199,6 +201,7 @@ def search( moscube=moscube, search_strategy=self.search_strategy, manual_close_internet=manual_close_internet, + tokenizer=self.tokenizer, ) else: searcher = Searcher( @@ -211,9 +214,17 @@ def search( moscube=moscube, search_strategy=self.search_strategy, manual_close_internet=manual_close_internet, + tokenizer=self.tokenizer, ) return searcher.search( - query, top_k, info, mode, memory_type, search_filter, user_name=user_name + query, + top_k, + info, + mode, + memory_type, + search_filter, + user_name=user_name, + plugin=kwargs.get("plugin", False), ) def get_relevant_subgraph( diff --git a/src/memos/memories/textual/tree_text_memory/retrieve/recall.py b/src/memos/memories/textual/tree_text_memory/retrieve/recall.py index 375048900..c5bf6cade 100644 --- a/src/memos/memories/textual/tree_text_memory/retrieve/recall.py +++ b/src/memos/memories/textual/tree_text_memory/retrieve/recall.py @@ -143,6 +143,26 @@ def retrieve_from_cube( return list(combined.values()) + def retrieve_from_mixed( + self, + top_k: int, + memory_scope: str | None = None, + query_embedding: list[list[float]] | None = None, + search_filter: dict | None = None, + user_name: str | None = None, + use_fast_graph: bool = False, + ) -> list[TextualMemoryItem]: + """Retrieve from mixed and memory""" + vector_results = self._vector_recall( + query_embedding or [], + memory_scope, + top_k, + search_filter=search_filter, + user_name=user_name, + ) # Merge and deduplicate by ID + combined = {item.id: item for item in vector_results} + return list(combined.values()) + def _graph_recall( self, parsed_goal: ParsedTaskGoal, memory_scope: str, user_name: str | None = None, **kwargs ) -> list[TextualMemoryItem]: @@ -270,7 +290,7 @@ def _vector_recall( query_embedding: list[list[float]], memory_scope: str, top_k: int = 20, - max_num: int = 5, + max_num: int = 20, status: str = "activated", cube_name: str | None = None, search_filter: dict | None = None, diff --git a/src/memos/memories/textual/tree_text_memory/retrieve/retrieve_utils.py b/src/memos/memories/textual/tree_text_memory/retrieve/retrieve_utils.py index 3f2b41a47..824f93b26 100644 --- a/src/memos/memories/textual/tree_text_memory/retrieve/retrieve_utils.py +++ b/src/memos/memories/textual/tree_text_memory/retrieve/retrieve_utils.py @@ -3,6 +3,8 @@ from pathlib import Path +import numpy as np + from memos.dependency import require_python_package from memos.log import get_logger @@ -376,3 +378,28 @@ def detect_lang(text): return "en" except Exception: return "en" + + +def find_best_unrelated_subgroup(sentences: list, similarity_matrix: list, bar: float = 0.8): + assert len(sentences) == len(similarity_matrix) + + num_sentence = len(sentences) + selected_sentences = [] + selected_indices = [] + for i in range(num_sentence): + can_add = True + for j in selected_indices: + if similarity_matrix[i][j] > bar: + can_add = False + break + if can_add: + selected_sentences.append(i) + selected_indices.append(i) + return selected_sentences, selected_indices + + +def cosine_similarity_matrix(embeddings: list[list[float]]) -> list[list[float]]: + norms = np.linalg.norm(embeddings, axis=1, keepdims=True) + x_normalized = embeddings / norms + similarity_matrix = np.dot(x_normalized, x_normalized.T) + return similarity_matrix diff --git a/src/memos/memories/textual/tree_text_memory/retrieve/searcher.py b/src/memos/memories/textual/tree_text_memory/retrieve/searcher.py index 933ef5af1..4f5feb9d9 100644 --- a/src/memos/memories/textual/tree_text_memory/retrieve/searcher.py +++ b/src/memos/memories/textual/tree_text_memory/retrieve/searcher.py @@ -8,7 +8,10 @@ from memos.memories.textual.item import SearchedTreeNodeTextualMemoryMetadata, TextualMemoryItem from memos.memories.textual.tree_text_memory.retrieve.bm25_util import EnhancedBM25 from memos.memories.textual.tree_text_memory.retrieve.retrieve_utils import ( + FastTokenizer, + cosine_similarity_matrix, detect_lang, + find_best_unrelated_subgroup, parse_json_result, ) from memos.reranker.base import BaseReranker @@ -44,6 +47,7 @@ def __init__( moscube: bool = False, search_strategy: dict | None = None, manual_close_internet: bool = True, + tokenizer: FastTokenizer | None = None, ): self.graph_store = graph_store self.embedder = embedder @@ -60,6 +64,7 @@ def __init__( self.vec_cot = search_strategy.get("cot", False) if search_strategy else False self.use_fast_graph = search_strategy.get("fast_graph", False) if search_strategy else False self.manual_close_internet = manual_close_internet + self.tokenizer = tokenizer self._usage_executor = ContextThreadPoolExecutor(max_workers=4, thread_name_prefix="usage") @timed @@ -90,6 +95,7 @@ def retrieve( memory_type, search_filter, user_name, + **kwargs, ) return results @@ -99,9 +105,10 @@ def post_retrieve( top_k: int, user_name: str | None = None, info=None, + plugin=False, ): deduped = self._deduplicate_results(retrieved_results) - final_results = self._sort_and_trim(deduped, top_k) + final_results = self._sort_and_trim(deduped, top_k, plugin) self._update_usage_history(final_results, info, user_name) return final_results @@ -115,6 +122,7 @@ def search( memory_type="All", search_filter: dict | None = None, user_name: str | None = None, + **kwargs, ) -> list[TextualMemoryItem]: """ Search for memories based on a query. @@ -142,21 +150,28 @@ def search( else: logger.debug(f"[SEARCH] Received info dict: {info}") - retrieved_results = self.retrieve( - query=query, - top_k=top_k, - info=info, - mode=mode, - memory_type=memory_type, - search_filter=search_filter, - user_name=user_name, - ) + if kwargs.get("plugin"): + logger.info(f"[SEARCH] Retrieve from plugin: {query}") + retrieved_results = self._retrieve_simple( + query=query, top_k=top_k, search_filter=search_filter, user_name=user_name + ) + else: + retrieved_results = self.retrieve( + query=query, + top_k=top_k, + info=info, + mode=mode, + memory_type=memory_type, + search_filter=search_filter, + user_name=user_name, + ) final_results = self.post_retrieve( retrieved_results=retrieved_results, top_k=top_k, user_name=user_name, info=None, + plugin=kwargs.get("plugin", False), ) logger.info(f"[SEARCH] Done. Total {len(final_results)} results.") @@ -235,6 +250,49 @@ def _parse_task( return parsed_goal, query_embedding, context, query + @timed + def _retrieve_simple( + self, + query: str, + top_k: int, + search_filter: dict | None = None, + user_name: str | None = None, + **kwargs, + ): + """Retrieve from by keywords and embedding""" + query_words = [] + if self.tokenizer: + query_words = self.tokenizer.tokenize_mixed(query) + else: + query_words = query.strip().split() + query_words = [query, *query_words] + logger.info(f"[SIMPLESEARCH] Query words: {query_words}") + query_embeddings = self.embedder.embed(query_words) + + items = self.graph_retriever.retrieve_from_mixed( + top_k=top_k * 2, + memory_scope=None, + query_embedding=query_embeddings, + search_filter=search_filter, + user_name=user_name, + use_fast_graph=self.use_fast_graph, + ) + logger.info(f"[SIMPLESEARCH] Items count: {len(items)}") + documents = [getattr(item, "memory", "") for item in items] + documents_embeddings = self.embedder.embed(documents) + similarity_matrix = cosine_similarity_matrix(documents_embeddings) + selected_indices, _ = find_best_unrelated_subgroup(documents, similarity_matrix) + selected_items = [items[i] for i in selected_indices] + logger.info( + f"[SIMPLESEARCH] after unrelated subgroup selection items count: {len(selected_items)}" + ) + return self.reranker.rerank( + query=query, + query_embedding=query_embeddings[0], + graph_results=selected_items, + top_k=top_k, + ) + @timed def _retrieve_paths( self, @@ -247,6 +305,7 @@ def _retrieve_paths( memory_type, search_filter: dict | None = None, user_name: str | None = None, + **kwargs, ): """Run A/B/C retrieval paths in parallel""" tasks = [] @@ -308,7 +367,6 @@ def _retrieve_paths( "memos_cube01", ) ) - results = [] for t in tasks: results.extend(t.result()) @@ -487,12 +545,14 @@ def _deduplicate_results(self, results): return list(deduped.values()) @timed - def _sort_and_trim(self, results, top_k): + def _sort_and_trim(self, results, top_k, plugin=False): """Sort results by score and trim to top_k""" sorted_results = sorted(results, key=lambda pair: pair[1], reverse=True)[:top_k] final_items = [] for item, score in sorted_results: + if plugin and round(score, 2) == 0.00: + continue meta_data = item.metadata.model_dump() meta_data["relativity"] = score final_items.append( diff --git a/src/memos/templates/prefer_complete_prompt.py b/src/memos/templates/prefer_complete_prompt.py index 3a468b943..3315e061e 100644 --- a/src/memos/templates/prefer_complete_prompt.py +++ b/src/memos/templates/prefer_complete_prompt.py @@ -11,7 +11,8 @@ Requirements: 1. Keep only the preferences explicitly mentioned by the user. Do not infer or assume. If the user mentions reasons for their preferences, include those reasons as well. 2. Output should be a list of entries concise natural language summaries and the corresponding context summary, context summary must contain complete information of the conversation fragment that the preference is mentioned. -3. If multiple preferences are mentioned within the same topic or domain, you MUST combine them into a single entry, keep each entry information complete. +3. If multiple preferences are mentioned within the same topic or domain, you MUST combine them into a single entry, keep each entry information complete. Different topics of preferences should be divided into multiple entries. +4. If no explicit preference can be reasonably extracted, return []. Conversation: {qa_pair} @@ -23,6 +24,7 @@ "explicit_preference": "A short natural language summary of the preferences", "context_summary": "The corresponding context summary, which is a summary of the corresponding conversation, do not lack any scenario information", "reasoning": "reasoning process to find the explicit preferences" + "topic": "preference topic, which can only belong to one topic or domain, such as: sports, hotel, education, etc.", }, ] ``` @@ -42,7 +44,8 @@ 要求: 1. 只保留用户明确提到的偏好,不要推断或假设。如果用户提到了偏好的原因,也要包含这些原因。 2. 输出应该是一个条目列表,包含简洁的自然语言摘要和相应的上下文摘要,上下文摘要必须包含提到偏好的对话片段的完整信息。 -3. 如果在同一主题或领域内提到了多个偏好,你必须将它们合并为一个条目,保持每个条目信息完整。 +3. 如果在同一主题或领域内提到了多个偏好,你必须将它们合并为一个条目,保持每个条目信息完整。不同话题的偏好要分为多个条目。 +4. 如果没有可以合理提取的显式偏好,返回[]。 对话: {qa_pair} @@ -51,9 +54,10 @@ ```json [ { - "explicit_preference": "偏好的简短自然语言摘要", + "explicit_preference": "偏好的简短自然语言摘要,需要描述为“用户偏好于/不喜欢/想要/不想要/偏好什么”", "context_summary": "对应的上下文摘要,即对应对话的摘要,不要遗漏任何场景信息", - "reasoning": "寻找显式偏好的推理过程" + "reasoning": "寻找显式偏好的推理过程", + "topic": "偏好所属的主题或领域,例如:体育、酒店、教育等, topic只能属于一个主题或领域", }, ] ``` @@ -79,18 +83,22 @@ 2. Inferred implicit preferences must not conflict with explicit preferences. 3. For implicit_preference: only output the preference statement itself; do not include any extra explanation, reasoning, or confidence information. Put all reasoning and explanation in the reasoning field. 4. In the reasoning field, explicitly explain the underlying logic and hidden motivations you identified. -5. If no implicit preference can be reasonably inferred, leave the implicit_preference field empty (do not output anything else). +5. Different topics of preferences should be divided into multiple entries. +6. If no implicit preference can be reasonably inferred, return []. Conversation: {qa_pair} Output format: -```json -{ - "implicit_preference": "A concise natural language statement of the implicit preferences reasonably inferred from the conversation, or an empty string", - "context_summary": "The corresponding context summary, which is a summary of the corresponding conversation, do not lack any scenario information", - "reasoning": "Explain the underlying logic, hidden motivations, and behavioral patterns that led to this inference" -} +[ + ```json + { + "implicit_preference": "A concise natural language statement of the implicit preferences reasonably inferred from the conversation, or an empty string", + "context_summary": "The corresponding context summary, which is a summary of the corresponding conversation, do not lack any scenario information", + "reasoning": "Explain the underlying logic, hidden motivations, and behavioral patterns that led to this inference", + "topic": "preference topic, which can only belong to one topic or domain, such as: sports, hotel, education, etc.", + } +] ``` Don't output anything except the JSON. """ @@ -115,18 +123,22 @@ 2. 推断的隐式偏好不得与显式偏好冲突。 3. 对于 implicit_preference:仅输出偏好陈述本身;不要包含任何额外的解释、推理或置信度信息。将所有推理和解释放在 reasoning 字段中。 4. 在 reasoning 字段中,明确解释你识别出的底层逻辑和隐藏动机。 -5. 如果无法合理推断出隐式偏好,则将 implicit_preference 字段留空(不要输出其他任何内容)。 +5. 如果在同一主题或领域内提到了多个偏好,你必须将它们合并为一个条目,保持每个条目信息完整。不同话题的偏好要分为多个条目。 +6. 如果没有可以合理推断的隐式偏好,返回[]。 对话: {qa_pair} 输出格式: ```json -{ - "implicit_preference": "从对话中合理推断出的隐式偏好的简洁自然语言陈述,或空字符串", - "context_summary": "对应的上下文摘要,即对应对话的摘要,不要遗漏任何场景信息", - "reasoning": "解释推断出该偏好的底层逻辑、隐藏动机和行为模式" -} +[ + { + "implicit_preference": "从对话中合理推断出的隐式偏好的简洁自然语言陈述,或空字符串", + "context_summary": "对应的上下文摘要,即对应对话的摘要,不要遗漏任何场景信息", + "reasoning": "解释推断出该偏好的底层逻辑、隐藏动机和行为模式", + "topic": "偏好所属的主题或领域,例如:体育、酒店、教育等, topic只能属于一个主题或领域", + } +] ``` 除JSON外不要输出任何其他内容。 """