Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -374,7 +374,7 @@ async def _redis_search(
vector_field_name=self.vector_field_name,
text_scorer=text_scorer,
filter_expression=combined_filter,
alpha=alpha,
linear_alpha=alpha,
dtype=self.redis_vectorizer.dtype,
num_results=num_results,
return_fields=return_fields,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -86,35 +86,38 @@ async def main() -> None:
context_providers=[history_provider],
)

# Use a fixed session ID so Redis history persists across separate program runs
session = agent.create_session(session_id="redis-demo-session")
Comment on lines +89 to +90
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Should this example work without session? If you create and re-use the same session, the history should be already preserved. I think the main point of using context provider is the ability to inject the same context across multiple sessions.


# Conversation
query = "Remember that I enjoy gumbo"
result = await agent.run(query)
result = await agent.run(query, session=session)
print("User: ", query)
print("Agent: ", result)

# Ask the agent to recall the stored preference; it should retrieve from memory
query = "What do I enjoy?"
result = await agent.run(query)
result = await agent.run(query, session=session)
print("User: ", query)
print("Agent: ", result)

query = "What did I say to you just now?"
result = await agent.run(query)
result = await agent.run(query, session=session)
print("User: ", query)
print("Agent: ", result)

query = "Remember that I have a meeting at 3pm tomorrow"
result = await agent.run(query)
result = await agent.run(query, session=session)
print("User: ", query)
print("Agent: ", result)

query = "Tulips are red"
result = await agent.run(query)
result = await agent.run(query, session=session)
print("User: ", query)
print("Agent: ", result)

query = "What was the first thing I said to you this conversation?"
result = await agent.run(query)
result = await agent.run(query, session=session)
print("User: ", query)
print("Agent: ", result)

Expand Down
30 changes: 17 additions & 13 deletions python/samples/02-agents/context_providers/redis/redis_basics.py
Original file line number Diff line number Diff line change
Expand Up @@ -37,6 +37,10 @@
from redisvl.extensions.cache.embeddings import EmbeddingsCache
from redisvl.utils.vectorize import OpenAITextVectorizer

# Default Redis URL for local Redis Stack (docker run -d -p 6379:6379 redis/redis-stack:latest).
# Override via the REDIS_URL environment variable for remote or authenticated instances.
REDIS_URL = os.getenv("REDIS_URL", "redis://localhost:6379")


# NOTE: approval_mode="never_require" is for sample brevity.
# Use "always_require" in production; see samples/02-agents/tools/function_tool_with_approval.py
Expand Down Expand Up @@ -121,14 +125,14 @@ async def main() -> None:
vectorizer = OpenAITextVectorizer(
model="text-embedding-ada-002",
api_config={"api_key": os.getenv("OPENAI_API_KEY")},
cache=EmbeddingsCache(name="openai_embeddings_cache", redis_url="redis://localhost:6379"),
cache=EmbeddingsCache(name="openai_embeddings_cache", redis_url=REDIS_URL),
)
# The provider manages persistence and retrieval. application_id/agent_id/user_id
# scope data for multi-tenant separation; thread_id (set later) narrows to a
# specific conversation.
provider = RedisContextProvider(
source_id="redis_context",
redis_url="redis://localhost:6379",
redis_url=REDIS_URL,
index_name="redis_basics",
application_id="matrix_of_kermits",
agent_id="agent_kermit",
Expand All @@ -151,16 +155,14 @@ async def main() -> None:
from agent_framework import AgentSession, SessionContext

session = AgentSession(session_id="runA")
context = SessionContext()
context.extend_messages("input", messages)
context = SessionContext(input_messages=messages)
state = session.state

# Store messages via after_run
await provider.after_run(agent=None, session=session, context=context, state=state)

# Retrieve relevant memories via before_run
query_context = SessionContext()
query_context.extend_messages("input", [Message("system", ["B: Assistant Message"])])
query_context = SessionContext(input_messages=[Message("system", ["B: Assistant Message"])])
await provider.before_run(agent=None, session=session, context=query_context, state=state)

# Inspect retrieved memories that would be injected into instructions
Expand All @@ -179,12 +181,12 @@ async def main() -> None:
vectorizer = OpenAITextVectorizer(
model="text-embedding-ada-002",
api_config={"api_key": os.getenv("OPENAI_API_KEY")},
cache=EmbeddingsCache(name="openai_embeddings_cache", redis_url="redis://localhost:6379"),
cache=EmbeddingsCache(name="openai_embeddings_cache", redis_url=REDIS_URL),
)
# Recreate a clean index so the next scenario starts fresh
provider = RedisContextProvider(
source_id="redis_context",
redis_url="redis://localhost:6379",
redis_url=REDIS_URL,
index_name="redis_basics_2",
prefix="context_2",
application_id="matrix_of_kermits",
Expand All @@ -211,14 +213,15 @@ async def main() -> None:
)

# Teach a user preference; the agent writes this to the provider's memory
session = agent.create_session(session_id="basics-session-2")
query = "Remember that I enjoy glugenflorgle"
result = await agent.run(query)
result = await agent.run(query, session=session)
print("User: ", query)
print("Agent: ", result)

# Ask the agent to recall the stored preference; it should retrieve from memory
query = "What do I enjoy?"
result = await agent.run(query)
result = await agent.run(query, session=session)
print("User: ", query)
print("Agent: ", result)

Expand All @@ -232,7 +235,7 @@ async def main() -> None:
# Text-only provider (full-text search only). Omits vectorizer and related params.
provider = RedisContextProvider(
source_id="redis_context",
redis_url="redis://localhost:6379",
redis_url=REDIS_URL,
index_name="redis_basics_3",
prefix="context_3",
application_id="matrix_of_kermits",
Expand All @@ -253,13 +256,14 @@ async def main() -> None:
context_providers=[provider],
)
# Invoke the tool; outputs become part of memory/context
session = agent.create_session(session_id="basics-session-3")
query = "Are there any flights from new york city (jfk) to la? Give me details"
result = await agent.run(query)
result = await agent.run(query, session=session)
print("User: ", query)
print("Agent: ", result)
# Verify the agent can recall tool-derived context
query = "Which flight did I ask about?"
result = await agent.run(query)
result = await agent.run(query, session=session)
print("User: ", query)
print("Agent: ", result)

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,10 @@
from redisvl.extensions.cache.embeddings import EmbeddingsCache
from redisvl.utils.vectorize import OpenAITextVectorizer

# Default Redis URL for local Redis Stack (docker run -d -p 6379:6379 redis/redis-stack:latest).
# Override via the REDIS_URL environment variable for remote or authenticated instances.
REDIS_URL = os.getenv("REDIS_URL", "redis://localhost:6379")


async def main() -> None:
"""Walk through provider and chat message store usage.
Expand All @@ -34,12 +38,12 @@ async def main() -> None:
vectorizer = OpenAITextVectorizer(
model="text-embedding-ada-002",
api_config={"api_key": os.getenv("OPENAI_API_KEY")},
cache=EmbeddingsCache(name="openai_embeddings_cache", redis_url="redis://localhost:6379"),
cache=EmbeddingsCache(name="openai_embeddings_cache", redis_url=REDIS_URL),
)

provider = RedisContextProvider(
source_id="redis_context",
redis_url="redis://localhost:6379",
redis_url=REDIS_URL,
index_name="redis_conversation",
prefix="redis_conversation",
application_id="matrix_of_kermits",
Expand Down Expand Up @@ -69,35 +73,38 @@ async def main() -> None:
context_providers=[provider],
)

# Create a session so the provider can scope storage/retrieval to this conversation
session = agent.create_session(session_id="redis-conversation-session")

# Teach a user preference; the agent writes this to the provider's memory
query = "Remember that I enjoy gumbo"
result = await agent.run(query)
result = await agent.run(query, session=session)
print("User: ", query)
print("Agent: ", result)

# Ask the agent to recall the stored preference; it should retrieve from memory
query = "What do I enjoy?"
result = await agent.run(query)
result = await agent.run(query, session=session)
print("User: ", query)
print("Agent: ", result)

query = "What did I say to you just now?"
result = await agent.run(query)
result = await agent.run(query, session=session)
print("User: ", query)
print("Agent: ", result)

query = "Remember that I have a meeting at 3pm tomorro"
result = await agent.run(query)
query = "Remember that I have a meeting at 3pm tomorrow"
result = await agent.run(query, session=session)
print("User: ", query)
print("Agent: ", result)

query = "Tulips are red"
result = await agent.run(query)
result = await agent.run(query, session=session)
print("User: ", query)
print("Agent: ", result)

query = "What was the first thing I said to you this conversation?"
result = await agent.run(query)
result = await agent.run(query, session=session)
print("User: ", query)
print("Agent: ", result)
# Drop / delete the provider index in Redis
Expand Down
31 changes: 18 additions & 13 deletions python/samples/02-agents/context_providers/redis/redis_sessions.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,6 +35,10 @@
from redisvl.extensions.cache.embeddings import EmbeddingsCache
from redisvl.utils.vectorize import OpenAITextVectorizer

# Default Redis URL for local Redis Stack (docker run -d -p 6379:6379 redis/redis-stack:latest).
# Override via the REDIS_URL environment variable for remote or authenticated instances.
REDIS_URL = os.getenv("REDIS_URL", "redis://localhost:6379")

# Please set OPENAI_API_KEY to use the OpenAI vectorizer.
# For chat responses, also set AZURE_AI_PROJECT_ENDPOINT and AZURE_OPENAI_RESPONSES_DEPLOYMENT_NAME.

Expand All @@ -57,12 +61,11 @@ async def example_global_thread_scope() -> None:

provider = RedisContextProvider(
source_id="redis_context",
redis_url="redis://localhost:6379",
redis_url=REDIS_URL,
index_name="redis_threads_global",
application_id="threads_demo_app",
agent_id="threads_demo_agent",
user_id="threads_demo_user",
scope_to_per_operation_thread_id=False, # Share memories across all sessions
)

agent = client.as_agent(
Expand All @@ -76,9 +79,10 @@ async def example_global_thread_scope() -> None:
)

# Store a preference in the global scope
session = agent.create_session(session_id="global-scope-session")
query = "Remember that I prefer technical responses with code examples when discussing programming."
print(f"User: {query}")
result = await agent.run(query)
result = await agent.run(query, session=session)
print(f"Agent: {result}\n")

# Create a new session - memories should still be accessible due to global scope
Expand Down Expand Up @@ -106,19 +110,18 @@ async def example_per_operation_thread_scope() -> None:
vectorizer = OpenAITextVectorizer(
model="text-embedding-ada-002",
api_config={"api_key": os.getenv("OPENAI_API_KEY")},
cache=EmbeddingsCache(name="openai_embeddings_cache", redis_url="redis://localhost:6379"),
cache=EmbeddingsCache(name="openai_embeddings_cache", redis_url=REDIS_URL),
)

provider = RedisContextProvider(
source_id="redis_context",
redis_url="redis://localhost:6379",
redis_url=REDIS_URL,
index_name="redis_threads_dynamic",
# overwrite_redis_index=True,
# drop_redis_index=True,
application_id="threads_demo_app",
agent_id="threads_demo_agent",
user_id="threads_demo_user",
scope_to_per_operation_thread_id=True, # Isolate memories per session
redis_vectorizer=vectorizer,
vector_field_name="vector",
vector_algorithm="hnsw",
Expand Down Expand Up @@ -172,12 +175,12 @@ async def example_multiple_agents() -> None:
vectorizer = OpenAITextVectorizer(
model="text-embedding-ada-002",
api_config={"api_key": os.getenv("OPENAI_API_KEY")},
cache=EmbeddingsCache(name="openai_embeddings_cache", redis_url="redis://localhost:6379"),
cache=EmbeddingsCache(name="openai_embeddings_cache", redis_url=REDIS_URL),
)

personal_provider = RedisContextProvider(
source_id="redis_context",
redis_url="redis://localhost:6379",
redis_url=REDIS_URL,
index_name="redis_threads_agents",
application_id="threads_demo_app",
agent_id="agent_personal",
Expand All @@ -196,7 +199,7 @@ async def example_multiple_agents() -> None:

work_provider = RedisContextProvider(
source_id="redis_context",
redis_url="redis://localhost:6379",
redis_url=REDIS_URL,
index_name="redis_threads_agents",
application_id="threads_demo_app",
agent_id="agent_work",
Expand All @@ -214,25 +217,27 @@ async def example_multiple_agents() -> None:
)

# Store personal information
personal_session = personal_agent.create_session(session_id="personal-session")
query = "Remember that I like to exercise at 6 AM and prefer outdoor activities."
print(f"User to Personal Agent: {query}")
result = await personal_agent.run(query)
result = await personal_agent.run(query, session=personal_session)
print(f"Personal Agent: {result}\n")

# Store work information
work_session = work_agent.create_session(session_id="work-session")
query = "Remember that I have team meetings every Tuesday at 2 PM."
print(f"User to Work Agent: {query}")
result = await work_agent.run(query)
result = await work_agent.run(query, session=work_session)
print(f"Work Agent: {result}\n")

# Test memory isolation
query = "What do you know about my schedule?"
print(f"User to Personal Agent: {query}")
result = await personal_agent.run(query)
result = await personal_agent.run(query, session=personal_session)
print(f"Personal Agent: {result}\n")

print(f"User to Work Agent: {query}")
result = await work_agent.run(query)
result = await work_agent.run(query, session=work_session)
print(f"Work Agent: {result}\n")

# Clean up the Redis index (shared)
Expand Down
Loading