diff --git a/python/packages/azurefunctions/tests/test_utils.py b/python/packages/azurefunctions/tests/test_func_utils.py similarity index 100% rename from python/packages/azurefunctions/tests/test_utils.py rename to python/packages/azurefunctions/tests/test_func_utils.py diff --git a/python/packages/core/agent_framework/openai/_responses_client.py b/python/packages/core/agent_framework/openai/_responses_client.py index bdda41a173..c9856d5105 100644 --- a/python/packages/core/agent_framework/openai/_responses_client.py +++ b/python/packages/core/agent_framework/openai/_responses_client.py @@ -788,6 +788,9 @@ async def _prepare_options( request_input = self._prepare_messages_for_openai(messages) if not request_input: raise ServiceInvalidRequestError("Messages are required for chat completions") + + conversation_id = self._get_current_conversation_id(options, **kwargs) + run_options["input"] = request_input # model id @@ -911,8 +914,11 @@ def _prepare_message_for_openai( for content in message.contents: match content.type: case "text_reasoning": - # Don't send reasoning content back to model - continue + # Reasoning items must be sent back as top-level input items + # for reasoning models that require them alongside function_calls + reasoning = self._prepare_content_for_openai(message.role, content, call_id_to_id) # type: ignore[arg-type] + if reasoning: + all_messages.append(reasoning) case "function_result": new_args: dict[str, Any] = {} new_args.update(self._prepare_content_for_openai(message.role, content, call_id_to_id)) # type: ignore[arg-type] @@ -967,6 +973,8 @@ def _prepare_content_for_openai( } props: dict[str, Any] | None = getattr(content, "additional_properties", None) if props: + if reasoning_id := props.get("reasoning_id"): + ret["id"] = reasoning_id if status := props.get("status"): ret["status"] = status if reasoning_text := props.get("reasoning_text"): @@ -1184,22 +1192,29 @@ def _parse_response_from_openai( ) ) case "reasoning": # ResponseOutputReasoning + reasoning_id = getattr(item, "id", None) if hasattr(item, "content") and item.content: for index, reasoning_content in enumerate(item.content): - additional_properties = None + additional_properties: dict[str, Any] = {} + if reasoning_id: + additional_properties["reasoning_id"] = reasoning_id if hasattr(item, "summary") and item.summary and index < len(item.summary): - additional_properties = {"summary": item.summary[index]} + additional_properties["summary"] = item.summary[index] contents.append( Content.from_text_reasoning( text=reasoning_content.text, raw_representation=reasoning_content, - additional_properties=additional_properties, + additional_properties=additional_properties or None, ) ) if hasattr(item, "summary") and item.summary: for summary in item.summary: contents.append( - Content.from_text_reasoning(text=summary.text, raw_representation=summary) # type: ignore[arg-type] + Content.from_text_reasoning( + text=summary.text, + raw_representation=summary, # type: ignore[arg-type] + additional_properties={"reasoning_id": reasoning_id} if reasoning_id else None, + ) ) case "code_interpreter_call": # ResponseOutputCodeInterpreterCall call_id = getattr(item, "call_id", None) or getattr(item, "id", None) @@ -1413,16 +1428,40 @@ def _parse_chunk_from_openai( contents.append(Content.from_text(text=event.delta, raw_representation=event)) metadata.update(self._get_metadata_from_response(event)) case "response.reasoning_text.delta": - contents.append(Content.from_text_reasoning(text=event.delta, raw_representation=event)) + contents.append( + Content.from_text_reasoning( + text=event.delta, + raw_representation=event, + additional_properties={"reasoning_id": event.item_id}, + ) + ) metadata.update(self._get_metadata_from_response(event)) case "response.reasoning_text.done": - contents.append(Content.from_text_reasoning(text=event.text, raw_representation=event)) + contents.append( + Content.from_text_reasoning( + text=event.text, + raw_representation=event, + additional_properties={"reasoning_id": event.item_id}, + ) + ) metadata.update(self._get_metadata_from_response(event)) case "response.reasoning_summary_text.delta": - contents.append(Content.from_text_reasoning(text=event.delta, raw_representation=event)) + contents.append( + Content.from_text_reasoning( + text=event.delta, + raw_representation=event, + additional_properties={"reasoning_id": event.item_id}, + ) + ) metadata.update(self._get_metadata_from_response(event)) case "response.reasoning_summary_text.done": - contents.append(Content.from_text_reasoning(text=event.text, raw_representation=event)) + contents.append( + Content.from_text_reasoning( + text=event.text, + raw_representation=event, + additional_properties={"reasoning_id": event.item_id}, + ) + ) metadata.update(self._get_metadata_from_response(event)) case "response.code_interpreter_call_code.delta": call_id = getattr(event, "call_id", None) or getattr(event, "id", None) or event.item_id @@ -1593,20 +1632,23 @@ def _parse_chunk_from_openai( ) ) case "reasoning": # ResponseOutputReasoning + reasoning_id = getattr(event_item, "id", None) if hasattr(event_item, "content") and event_item.content: for index, reasoning_content in enumerate(event_item.content): - additional_properties = None + additional_properties: dict[str, Any] = {} + if reasoning_id: + additional_properties["reasoning_id"] = reasoning_id if ( hasattr(event_item, "summary") and event_item.summary and index < len(event_item.summary) ): - additional_properties = {"summary": event_item.summary[index]} + additional_properties["summary"] = event_item.summary[index] contents.append( Content.from_text_reasoning( text=reasoning_content.text, raw_representation=reasoning_content, - additional_properties=additional_properties, + additional_properties=additional_properties or None, ) ) case _: diff --git a/python/packages/core/tests/azure/test_azure_assistants_client.py b/python/packages/core/tests/azure/test_azure_assistants_client.py index f2c6a7701d..c87419216b 100644 --- a/python/packages/core/tests/azure/test_azure_assistants_client.py +++ b/python/packages/core/tests/azure/test_azure_assistants_client.py @@ -129,9 +129,7 @@ def test_azure_assistants_client_init_validation_fail() -> None: def test_azure_assistants_client_init_missing_deployment_name(azure_openai_unit_test_env: dict[str, str]) -> None: """Test AzureOpenAIAssistantsClient initialization with missing deployment name.""" with pytest.raises(ServiceInitializationError): - AzureOpenAIAssistantsClient( - api_key=azure_openai_unit_test_env.get("AZURE_OPENAI_API_KEY", "test-key") - ) + AzureOpenAIAssistantsClient(api_key=azure_openai_unit_test_env.get("AZURE_OPENAI_API_KEY", "test-key")) def test_azure_assistants_client_init_with_default_headers(azure_openai_unit_test_env: dict[str, str]) -> None: diff --git a/python/packages/core/tests/azure/test_azure_chat_client.py b/python/packages/core/tests/azure/test_azure_chat_client.py index 091b875a13..30a1b9adfd 100644 --- a/python/packages/core/tests/azure/test_azure_chat_client.py +++ b/python/packages/core/tests/azure/test_azure_chat_client.py @@ -94,15 +94,13 @@ def test_init_endpoint(azure_openai_unit_test_env: dict[str, str]) -> None: @pytest.mark.parametrize("exclude_list", [["AZURE_OPENAI_CHAT_DEPLOYMENT_NAME"]], indirect=True) def test_init_with_empty_deployment_name(azure_openai_unit_test_env: dict[str, str]) -> None: with pytest.raises(ServiceInitializationError): - AzureOpenAIChatClient( - ) + AzureOpenAIChatClient() @pytest.mark.parametrize("exclude_list", [["AZURE_OPENAI_ENDPOINT", "AZURE_OPENAI_BASE_URL"]], indirect=True) def test_init_with_empty_endpoint_and_base_url(azure_openai_unit_test_env: dict[str, str]) -> None: with pytest.raises(ServiceInitializationError): - AzureOpenAIChatClient( - ) + AzureOpenAIChatClient() @pytest.mark.parametrize("override_env_param_dict", [{"AZURE_OPENAI_ENDPOINT": "http://test.com"}], indirect=True) diff --git a/python/packages/core/tests/azure/test_azure_responses_client.py b/python/packages/core/tests/azure/test_azure_responses_client.py index 106aca40b1..c95deb02cc 100644 --- a/python/packages/core/tests/azure/test_azure_responses_client.py +++ b/python/packages/core/tests/azure/test_azure_responses_client.py @@ -1,6 +1,7 @@ # Copyright (c) Microsoft. All rights reserved. import json +import logging import os from typing import Annotated, Any from unittest.mock import MagicMock @@ -30,6 +31,8 @@ else "Integration tests are disabled.", ) +logger = logging.getLogger(__name__) + class OutputStruct(BaseModel): """A structured output for testing purposes.""" @@ -111,8 +114,7 @@ def test_init_with_default_header(azure_openai_unit_test_env: dict[str, str]) -> @pytest.mark.parametrize("exclude_list", [["AZURE_OPENAI_RESPONSES_DEPLOYMENT_NAME"]], indirect=True) def test_init_with_empty_model_id(azure_openai_unit_test_env: dict[str, str]) -> None: with pytest.raises(ServiceInitializationError): - AzureOpenAIResponsesClient( - ) + AzureOpenAIResponsesClient() def test_init_with_project_client(azure_openai_unit_test_env: dict[str, str]) -> None: diff --git a/python/packages/core/tests/core/test_function_invocation_logic.py b/python/packages/core/tests/core/test_function_invocation_logic.py index 2e7601f787..3f4218813d 100644 --- a/python/packages/core/tests/core/test_function_invocation_logic.py +++ b/python/packages/core/tests/core/test_function_invocation_logic.py @@ -2700,3 +2700,74 @@ def test_func(arg1: str) -> str: assert conversation_ids_received[1] == "stream_conv_after_first", ( "streaming: conversation_id should be updated in options after receiving new conversation_id from API" ) + + +async def test_streaming_function_calling_response_includes_reasoning_and_tool_results( + chat_client_base: SupportsChatGetResponse, +): + """Test that the finalized streaming response includes reasoning, function_call, + function_result, and final text in its messages. + + This is critical for workflow chaining: when one agent's response is passed as + input to the next agent, the conversation must include all items (reasoning, + function_call, function_call_output) so the API can validate the history. + """ + + @tool(name="search", approval_mode="never_require") + def search_func(query: str) -> str: + return f"Found results for {query}" + + chat_client_base.streaming_responses = [ + [ + # First response: reasoning + function_call + ChatResponseUpdate( + contents=[ + Content.from_text_reasoning( + text="Let me search for that", + additional_properties={"reasoning_id": "rs_test123", "status": "completed"}, + ) + ], + role="assistant", + ), + ChatResponseUpdate( + contents=[ + Content.from_function_call( + call_id="call_1", + name="search", + arguments='{"query": "test"}', + additional_properties={"fc_id": "fc_test456"}, + ) + ], + role="assistant", + ), + ], + [ + # Second response: final text + ChatResponseUpdate( + contents=[Content.from_text(text="Here are the results")], + role="assistant", + ), + ], + ] + + stream = chat_client_base.get_response( + "search for test", options={"tool_choice": "auto", "tools": [search_func]}, stream=True + ) + + updates = [] + async for update in stream: + updates.append(update) + response = await stream.get_final_response() + + # Verify all content types are in the response messages + all_content_types = [c.type for msg in response.messages for c in msg.contents] + assert "text_reasoning" in all_content_types, "Reasoning must be preserved in response messages" + assert "function_call" in all_content_types, "Function call must be preserved in response messages" + assert "function_result" in all_content_types, "Function result must be in response messages for chaining" + assert "text" in all_content_types, "Final text must be in response messages" + + # Verify reasoning has the reasoning_id preserved + reasoning_contents = [c for msg in response.messages for c in msg.contents if c.type == "text_reasoning"] + assert len(reasoning_contents) >= 1 + assert reasoning_contents[0].additional_properties is not None + assert reasoning_contents[0].additional_properties.get("reasoning_id") == "rs_test123" diff --git a/python/packages/core/tests/openai/test_openai_assistants_client.py b/python/packages/core/tests/openai/test_openai_assistants_client.py index fa07d8f1a1..7a5ed6c059 100644 --- a/python/packages/core/tests/openai/test_openai_assistants_client.py +++ b/python/packages/core/tests/openai/test_openai_assistants_client.py @@ -154,9 +154,7 @@ def test_init_validation_fail() -> None: def test_init_missing_model_id(openai_unit_test_env: dict[str, str]) -> None: """Test OpenAIAssistantsClient initialization with missing model ID.""" with pytest.raises(ServiceInitializationError): - OpenAIAssistantsClient( - api_key=openai_unit_test_env.get("OPENAI_API_KEY", "test-key") - ) + OpenAIAssistantsClient(api_key=openai_unit_test_env.get("OPENAI_API_KEY", "test-key")) @pytest.mark.parametrize("exclude_list", [["OPENAI_API_KEY"]], indirect=True) diff --git a/python/packages/core/tests/openai/test_openai_chat_client.py b/python/packages/core/tests/openai/test_openai_chat_client.py index 5258a2030f..ea4d2cc27b 100644 --- a/python/packages/core/tests/openai/test_openai_chat_client.py +++ b/python/packages/core/tests/openai/test_openai_chat_client.py @@ -97,8 +97,7 @@ def test_init_base_url_from_settings_env() -> None: @pytest.mark.parametrize("exclude_list", [["OPENAI_CHAT_MODEL_ID"]], indirect=True) def test_init_with_empty_model_id(openai_unit_test_env: dict[str, str]) -> None: with pytest.raises(ServiceInitializationError): - OpenAIChatClient( - ) + OpenAIChatClient() @pytest.mark.parametrize("exclude_list", [["OPENAI_API_KEY"]], indirect=True) diff --git a/python/packages/core/tests/openai/test_openai_responses_client.py b/python/packages/core/tests/openai/test_openai_responses_client.py index 3c5beed6af..d0100f015a 100644 --- a/python/packages/core/tests/openai/test_openai_responses_client.py +++ b/python/packages/core/tests/openai/test_openai_responses_client.py @@ -139,8 +139,7 @@ def test_init_with_default_header(openai_unit_test_env: dict[str, str]) -> None: @pytest.mark.parametrize("exclude_list", [["OPENAI_RESPONSES_MODEL_ID"]], indirect=True) def test_init_with_empty_model_id(openai_unit_test_env: dict[str, str]) -> None: with pytest.raises(ServiceInitializationError): - OpenAIResponsesClient( - ) + OpenAIResponsesClient() @pytest.mark.parametrize("exclude_list", [["OPENAI_API_KEY"]], indirect=True) @@ -816,7 +815,101 @@ def test_prepare_message_for_openai_with_function_approval_response() -> None: assert prepared_message["approve"] is True -def test_chat_message_with_error_content() -> None: +def test_prepare_message_for_openai_includes_reasoning_with_function_call() -> None: + """Test _prepare_message_for_openai includes reasoning items alongside function_calls. + + Reasoning models require reasoning items to be present in the input when + function_call items are included. Stripping reasoning causes a 400 error: + "function_call was provided without its required reasoning item". + """ + client = OpenAIResponsesClient(model_id="test-model", api_key="test-key") + + reasoning = Content.from_text_reasoning( + text="Let me analyze the request", + additional_properties={"status": "completed", "reasoning_id": "rs_abc123"}, + ) + function_call = Content.from_function_call( + call_id="call_123", + name="search_hotels", + arguments='{"city": "Paris"}', + ) + + message = Message(role="assistant", contents=[reasoning, function_call]) + call_id_to_id: dict[str, str] = {} + + result = client._prepare_message_for_openai(message, call_id_to_id) + + # Both reasoning and function_call should be present as top-level items + types = [item["type"] for item in result] + assert "reasoning" in types, "Reasoning items must be included for reasoning models" + assert "function_call" in types + + reasoning_item = next(item for item in result if item["type"] == "reasoning") + assert reasoning_item["summary"]["text"] == "Let me analyze the request" + assert reasoning_item["id"] == "rs_abc123", "Reasoning id must be preserved for the API" + + +def test_prepare_messages_for_openai_full_conversation_with_reasoning() -> None: + """Test _prepare_messages_for_openai correctly serializes a full conversation + that includes reasoning + function_call + function_result + final text. + + This simulates the conversation history passed between agents in a workflow. + The API requires reasoning items alongside function_calls. + """ + client = OpenAIResponsesClient(model_id="test-model", api_key="test-key") + + messages = [ + Message(role="user", contents=[Content.from_text(text="search for hotels")]), + Message( + role="assistant", + contents=[ + Content.from_text_reasoning( + text="I need to search for hotels", + additional_properties={"reasoning_id": "rs_test123", "status": "completed"}, + ), + Content.from_function_call( + call_id="call_1", + name="search_hotels", + arguments='{"city": "Paris"}', + additional_properties={"fc_id": "fc_test456"}, + ), + ], + ), + Message( + role="tool", + contents=[ + Content.from_function_result( + call_id="call_1", + result="Found 3 hotels in Paris", + ), + ], + ), + Message(role="assistant", contents=[Content.from_text(text="I found hotels for you")]), + ] + + result = client._prepare_messages_for_openai(messages) + + types = [item.get("type") for item in result] + assert "message" in types, "User/assistant messages should be present" + assert "reasoning" in types, "Reasoning items must be present" + assert "function_call" in types, "Function call items must be present" + assert "function_call_output" in types, "Function call output must be present" + + # Verify reasoning has id + reasoning_items = [item for item in result if item.get("type") == "reasoning"] + assert reasoning_items[0]["id"] == "rs_test123" + + # Verify function_call has id + fc_items = [item for item in result if item.get("type") == "function_call"] + assert fc_items[0]["id"] == "fc_test456" + + # Verify correct ordering: reasoning before function_call + reasoning_idx = types.index("reasoning") + fc_idx = types.index("function_call") + assert reasoning_idx < fc_idx, "Reasoning must come before function_call" + + +def test_prepare_message_for_openai_filters_error_content() -> None: """Test that error content in messages is handled properly.""" client = OpenAIResponsesClient(model_id="test-model", api_key="test-key") diff --git a/python/packages/foundry_local/tests/test_foundry_local_client.py b/python/packages/foundry_local/tests/test_foundry_local_client.py index a9866943d9..eb26837953 100644 --- a/python/packages/foundry_local/tests/test_foundry_local_client.py +++ b/python/packages/foundry_local/tests/test_foundry_local_client.py @@ -44,9 +44,7 @@ def test_foundry_local_settings_missing_model_id(foundry_local_unit_test_env: di def test_foundry_local_settings_explicit_overrides_env(foundry_local_unit_test_env: dict[str, str]) -> None: """Test that explicit values override environment variables.""" - settings = load_settings( - FoundryLocalSettings, env_prefix="FOUNDRY_LOCAL_", model_id="override-model-id" - ) + settings = load_settings(FoundryLocalSettings, env_prefix="FOUNDRY_LOCAL_", model_id="override-model-id") assert settings["model_id"] == "override-model-id" assert settings["model_id"] != foundry_local_unit_test_env["FOUNDRY_LOCAL_MODEL_ID"] diff --git a/python/samples/05-end-to-end/evaluation/red_teaming/red_team_agent_sample.py b/python/samples/05-end-to-end/evaluation/red_teaming/red_team_agent_sample.py index 6e240d66b4..a63912c615 100644 --- a/python/samples/05-end-to-end/evaluation/red_teaming/red_team_agent_sample.py +++ b/python/samples/05-end-to-end/evaluation/red_teaming/red_team_agent_sample.py @@ -1,3 +1,13 @@ +# /// script +# requires-python = ">=3.10" +# dependencies = [ +# "azure-ai-evaluation", +# "pyrit==0.9.0" +# ] +# /// +# Run with any PEP 723 compatible runner, e.g.: +# uv run samples/05-end-to-end/evaluation/red_teaming/red_team_agent_sample.py + # Copyright (c) Microsoft. All rights reserved. # type: ignore import asyncio @@ -5,6 +15,7 @@ import os from typing import Any +from agent_framework import Message from agent_framework.azure import AzureOpenAIChatClient from azure.ai.evaluation.red_team import AttackStrategy, RedTeam, RiskCategory from azure.identity import AzureCliCredential @@ -20,10 +31,10 @@ Prerequisites: - Azure AI project (hub and project created) - Azure CLI authentication (run `az login`) - - Environment variables set in .env file or environment + - Environment variables set in environment Installation: - pip install agent-framework azure-ai-evaluation pyrit duckdb azure-identity + pip install agent-framework-core azure-ai-evaluation pyrit==0.9.0 duckdb Reference: Azure AI Red Teaming: https://github.com/Azure-Samples/azureai-samples/blob/main/scenarios/evaluate/AI_RedTeaming/AI_RedTeaming.ipynb @@ -60,19 +71,30 @@ async def main() -> None: ) # Create the callback - async def agent_callback(query: str) -> dict[str, list[Any]]: + async def agent_callback( + messages: list, + stream: bool | None = False, # noqa: ARG001 + session_state: str | None = None, # noqa: ARG001 + context: dict[str, Any] | None = None, # noqa: ARG001 + ) -> dict[str, list[dict[str, str]]]: """Async callback function that interfaces between RedTeam and the agent. Args: - query: The adversarial prompt from RedTeam + messages: The adversarial prompts from RedTeam """ + messages_list = [Message(role=message.role, text=message.content) for message in messages] try: - response = await agent.run(query) - return {"messages": [{"content": response.text, "role": "assistant"}]} - + response = agent.run(messages=messages_list, stream=stream) + result = await response.get_final_response() if stream else await response + # Format the response to follow the expected chat protocol format + formatted_response = {"content": result.text, "role": "assistant"} except Exception as e: - print(f"Error during agent run: {e}") - return {"messages": [f"I encountered an error and couldn't process your request: {e!s}"]} + print(f"Error calling Azure OpenAI: {e!s}") + formatted_response = { + "content": f"I encountered an error and couldn't process your request: {e}", + "role": "assistant", + } + return {"messages": [formatted_response]} # Create RedTeam instance red_team = RedTeam( diff --git a/python/samples/05-end-to-end/evaluation/self_reflection/README.md b/python/samples/05-end-to-end/evaluation/self_reflection/README.md index c75aa62ce8..5c26f352e7 100644 --- a/python/samples/05-end-to-end/evaluation/self_reflection/README.md +++ b/python/samples/05-end-to-end/evaluation/self_reflection/README.md @@ -7,23 +7,22 @@ This sample demonstrates the self-reflection pattern using Agent Framework and A **What it demonstrates:** - Iterative self-reflection loop that automatically improves responses based on groundedness evaluation - Batch processing of prompts from JSONL files with progress tracking -- Using `AzureOpenAIChatClient` with Azure CLI authentication +- Using `AzureOpenAIResponsesClient` with a Project Endpoint and Azure CLI authentication - Comprehensive summary statistics and detailed result tracking ## Prerequisites ### Azure Resources -- **Azure OpenAI**: Deploy models (default: gpt-4.1 for both agent and judge) +- **Azure OpenAI Responses in Foundry**: Deploy models (default: gpt-5.2 for both agent and judge) - **Azure CLI**: Run `az login` to authenticate ### Python Environment ```bash -pip install agent-framework-core azure-ai-projects pandas --pre +pip install agent-framework-core pandas --pre ``` ### Environment Variables ```bash -# .env file AZURE_AI_PROJECT_ENDPOINT=https://.services.ai.azure.com/api/projects// ``` @@ -67,6 +66,12 @@ The agent iteratively improves responses: ✓ Completed with score: 5/5 (best at iteration 2/3) ``` +In the Foundry UI, under `Build`/`Evaluations` you can view detailed results for each prompt, including: +- Context +- Query +- Response +- Groundedness scores and reasoning for each interation of each prompt + ## Related Resources - [Reflexion Paper](https://arxiv.org/abs/2303.11366) diff --git a/python/samples/05-end-to-end/evaluation/self_reflection/self_reflection.py b/python/samples/05-end-to-end/evaluation/self_reflection/self_reflection.py index 6a21059a93..d554531e35 100644 --- a/python/samples/05-end-to-end/evaluation/self_reflection/self_reflection.py +++ b/python/samples/05-end-to-end/evaluation/self_reflection/self_reflection.py @@ -2,6 +2,7 @@ # requires-python = ">=3.10" # dependencies = [ # "pandas", +# "pyarrow", # ] # /// # Run with any PEP 723 compatible runner, e.g.: @@ -13,12 +14,13 @@ import asyncio import os import time +from pathlib import Path from typing import Any import openai import pandas as pd from agent_framework import Agent, Message -from agent_framework.azure import AzureOpenAIChatClient +from agent_framework.azure import AzureOpenAIResponsesClient from azure.ai.projects import AIProjectClient from azure.identity import AzureCliCredential from dotenv import load_dotenv @@ -50,11 +52,38 @@ --output resources/results.jsonl \\ --max-reflections 3 \\ -n 10 # Optional: process only first 10 prompts + +=============== Example output =============== + +============================================================ +SUMMARY +============================================================ +Total prompts processed: 31 + ✓ Successful: 30 + ✗ Failed: 1 + +Groundedness Scores: + Average best score: 4.77/5 + Perfect scores (5/5): 25/30 (83.3%) + +Improvement Analysis: + Average first score: 4.50/5 + Average final score: 4.70/5 + Average improvement: +0.20 + Responses that improved: 4/30 (13.3%) + +Iteration Statistics: + Average best iteration: 1.17 + Best on first try: 25/30 (83.3%) +============================================================ + +✓ Processing complete! + """ -DEFAULT_AGENT_MODEL = "gpt-4.1" -DEFAULT_JUDGE_MODEL = "gpt-4.1" +DEFAULT_AGENT_MODEL = "gpt-5.2" +DEFAULT_JUDGE_MODEL = "gpt-5.2" def create_openai_client(): @@ -64,6 +93,13 @@ def create_openai_client(): return project_client.get_openai_client() +def create_async_project_client(): + from azure.ai.projects.aio import AIProjectClient as AsyncAIProjectClient + from azure.identity.aio import AzureCliCredential as AsyncAzureCliCredential + + return AsyncAIProjectClient(endpoint=os.environ["AZURE_AI_PROJECT_ENDPOINT"], credential=AsyncAzureCliCredential()) + + def create_eval(client: openai.OpenAI, judge_model: str) -> openai.types.EvalCreateResponse: print("Creating Eval") data_source_config = DataSourceConfigCustom({ @@ -257,6 +293,7 @@ async def execute_query_with_self_reflection( async def run_self_reflection_batch( + project_client: AIProjectClient, input_file: str, output_file: str, agent_model: str = DEFAULT_AGENT_MODEL, @@ -284,16 +321,15 @@ async def run_self_reflection_batch( load_dotenv(override=True) # Create agent, it loads environment variables AZURE_OPENAI_API_KEY and AZURE_OPENAI_ENDPOINT automatically - agent = AzureOpenAIChatClient( - credential=AzureCliCredential(), + responses_client = AzureOpenAIResponsesClient( + project_client=project_client, deployment_name=agent_model, - ).as_agent( - instructions="You are a helpful agent.", ) # Load input data - print(f"Loading prompts from: {input_file}") - df = pd.read_json(input_file, lines=True) + input_path = (Path(__file__).parent / input_file).resolve() + print(f"Loading prompts from: {input_path}") + df = pd.read_json(path_or_buf=input_path, lines=True, engine="pyarrow") print(f"Loaded {len(df)} prompts") # Apply limit if specified @@ -332,7 +368,7 @@ async def run_self_reflection_batch( try: result = await execute_query_with_self_reflection( client=client, - agent=agent, + agent=responses_client.as_agent(instructions=row["system_instruction"]), eval_object=eval_object, full_user_query=row["full_prompt"], context=row["context_document"], @@ -386,8 +422,9 @@ async def run_self_reflection_batch( # Create DataFrame and save results_df = pd.DataFrame(results) - print(f"\nSaving results to: {output_file}") - results_df.to_json(output_file, orient="records", lines=True) + output_path = (Path(__file__).parent / output_file).resolve() + print(f"\nSaving results to: {output_path}") + results_df.to_json(output_path, orient="records", lines=True) # Generate detailed summary successful_runs = results_df[results_df["error"].isna()] @@ -482,6 +519,7 @@ async def main(): # Run the batch processing try: await run_self_reflection_batch( + project_client=create_async_project_client(), input_file=args.input, output_file=args.output, agent_model=args.agent_model, @@ -499,4 +537,4 @@ async def main(): if __name__ == "__main__": - exit(asyncio.run(main())) + asyncio.run(main()) diff --git a/python/samples/05-end-to-end/workflow_evaluation/.env.example b/python/samples/05-end-to-end/workflow_evaluation/.env.example index 3a13025d22..b7a06ab22a 100644 --- a/python/samples/05-end-to-end/workflow_evaluation/.env.example +++ b/python/samples/05-end-to-end/workflow_evaluation/.env.example @@ -1,2 +1,3 @@ AZURE_AI_PROJECT_ENDPOINT="" -AZURE_AI_MODEL_DEPLOYMENT_NAME="" \ No newline at end of file +AZURE_AI_MODEL_DEPLOYMENT_NAME_WORKFLOW="" +AZURE_AI_MODEL_DEPLOYMENT_NAME_EVAL="" diff --git a/python/samples/05-end-to-end/workflow_evaluation/create_workflow.py b/python/samples/05-end-to-end/workflow_evaluation/create_workflow.py index d1f679b778..12a4286de0 100644 --- a/python/samples/05-end-to-end/workflow_evaluation/create_workflow.py +++ b/python/samples/05-end-to-end/workflow_evaluation/create_workflow.py @@ -1,5 +1,5 @@ # Copyright (c) Microsoft. All rights reserved. - +# type: ignore """ Multi-Agent Travel Planning Workflow Evaluation with Multiple Response Tracking @@ -52,10 +52,11 @@ Message, WorkflowBuilder, WorkflowContext, + WorkflowEvent, executor, handler, ) -from agent_framework.azure import AzureAIClient +from agent_framework.azure import AzureOpenAIResponsesClient from azure.ai.projects.aio import AIProjectClient from azure.identity.aio import DefaultAzureCredential from dotenv import load_dotenv @@ -73,8 +74,8 @@ async def start_executor(input: str, ctx: WorkflowContext[list[Message]]) -> Non class ResearchLead(Executor): """Aggregates and summarizes travel planning findings from all specialized agents.""" - def __init__(self, client: AzureAIClient, id: str = "travel-planning-coordinator"): - # store=True to preserve conversation history for evaluation + def __init__(self, client: AzureOpenAIResponsesClient, id: str = "travel-planning-coordinator"): + # Use default_options to persist conversation history for evaluation. self.agent = client.as_agent( id="travel-planning-coordinator", instructions=( @@ -86,7 +87,6 @@ def __init__(self, client: AzureAIClient, id: str = "travel-planning-coordinator "Clearly indicate which information came from which agent. Do not use tools." ), name="travel-planning-coordinator", - store=True, ) super().__init__(id=id) @@ -142,12 +142,15 @@ def _extract_agent_findings(self, responses: list[AgentExecutorResponse]) -> lis return agent_findings -async def run_workflow_with_response_tracking(query: str, client: AzureAIClient | None = None) -> dict: +async def run_workflow_with_response_tracking( + query: str, client: AzureOpenAIResponsesClient | None = None, deployment_name: str | None = None +) -> dict: """Run multi-agent workflow and track conversation IDs, response IDs, and interaction sequence. Args: query: The user query to process through the multi-agent workflow - client: Optional AzureAIClient instance + client: Optional AzureOpenAIResponsesClient instance + deployment_name: Optional model deployment name for the workflow agents Returns: Dictionary containing interaction sequence, conversation/response IDs, and conversation analysis @@ -155,17 +158,13 @@ async def run_workflow_with_response_tracking(query: str, client: AzureAIClient if client is None: try: async with DefaultAzureCredential() as credential: - # Create AIProjectClient with the correct API version for V2 prompt agents project_client = AIProjectClient( endpoint=os.environ["AZURE_AI_PROJECT_ENDPOINT"], credential=credential, - api_version="2025-11-15-preview", ) - async with ( - project_client, - AzureAIClient(project_client=project_client, credential=credential) as client, - ): + async with project_client: + client = AzureOpenAIResponsesClient(project_client=project_client, deployment_name=deployment_name) return await _run_workflow_with_client(query, client) except Exception as e: print(f"Error during workflow execution: {e}") @@ -174,21 +173,29 @@ async def run_workflow_with_response_tracking(query: str, client: AzureAIClient return await _run_workflow_with_client(query, client) -async def _run_workflow_with_client(query: str, client: AzureAIClient) -> dict: +async def _run_workflow_with_client(query: str, client: AzureOpenAIResponsesClient) -> dict: """Execute workflow with given client and track all interactions.""" # Initialize tracking variables - use lists to track multiple responses per agent - conversation_ids = defaultdict(list) - response_ids = defaultdict(list) - workflow_output = None + conversation_ids: dict[str, list[str]] = defaultdict(list) + response_ids: dict[str, list[str]] = defaultdict(list) + + # Create workflow components using a single shared client + workflow, agent_map = await _create_workflow(client) + + def track_ids(event: WorkflowEvent) -> WorkflowEvent: + """Transform hook that tracks response/conversation IDs from AgentResponseUpdate events.""" + if event.type == "output" and isinstance(event.data, AgentResponseUpdate): + _track_agent_ids(event, event.executor_id, response_ids, conversation_ids) + return event - # Create workflow components and keep agent references - # Pass project_client and credential to create separate client instances per agent - workflow, agent_map = await _create_workflow(client.project_client, client.credential) + # Process workflow events using a transform hook for ID tracking + stream = workflow.run(query, stream=True).with_transform_hook(track_ids) + result = await stream.get_final_response() - # Process workflow events - events = workflow.run(query, stream=True) - workflow_output = await _process_workflow_events(events, conversation_ids, response_ids) + workflow_output = result.get_outputs()[-1] if result.get_outputs() else None + if workflow_output: + print(f"\nWorkflow Output: {workflow_output}\n") return { "conversation_ids": dict(conversation_ids), @@ -198,115 +205,80 @@ async def _run_workflow_with_client(query: str, client: AzureAIClient) -> dict: } -async def _create_workflow(project_client, credential): +async def _create_workflow(client: AzureOpenAIResponsesClient): """Create the multi-agent travel planning workflow with specialized agents. - IMPORTANT: Each agent needs its own client instance because the V2 client stores - agent_name and agent_version as instance variables, causing all agents to share - the same agent identity if they share a client. + Uses a single shared AzureOpenAIResponsesClient for all agents. """ - # Create separate client for Final Coordinator - final_coordinator_client = AzureAIClient( - project_client=project_client, credential=credential, agent_name="final-coordinator" - ) - final_coordinator = ResearchLead(client=final_coordinator_client, id="final-coordinator") + final_coordinator = ResearchLead(client=client, id="final-coordinator") # Agent 1: Travel Request Handler (initial coordinator) - # Create separate client with unique agent_name - travel_request_handler_client = AzureAIClient( - project_client=project_client, credential=credential, agent_name="travel-request-handler" - ) - travel_request_handler = travel_request_handler_client.as_agent( + travel_request_handler = client.as_agent( id="travel-request-handler", instructions=( "You receive user travel queries and relay them to specialized agents. Extract key information: destination, dates, budget, and preferences. Pass this information forward clearly to the next agents." ), name="travel-request-handler", - store=True, ) # Agent 2: Hotel Search Executor - hotel_search_client = AzureAIClient( - project_client=project_client, credential=credential, agent_name="hotel-search-agent" - ) - hotel_search_agent = hotel_search_client.as_agent( + hotel_search_agent = client.as_agent( id="hotel-search-agent", instructions=( "You are a hotel search specialist. Your task is ONLY to search for and provide hotel information. Use search_hotels to find options, get_hotel_details for specifics, and check_availability to verify rooms. Output format: List hotel names, prices per night, total cost for the stay, locations, ratings, amenities, and addresses. IMPORTANT: Only provide hotel information without additional commentary." ), name="hotel-search-agent", tools=[search_hotels, get_hotel_details, check_hotel_availability], - store=True, ) # Agent 3: Flight Search Executor - flight_search_client = AzureAIClient( - project_client=project_client, credential=credential, agent_name="flight-search-agent" - ) - flight_search_agent = flight_search_client.as_agent( + flight_search_agent = client.as_agent( id="flight-search-agent", instructions=( "You are a flight search specialist. Your task is ONLY to search for and provide flight information. Use search_flights to find options, get_flight_details for specifics, and check_availability for seats. Output format: List flight numbers, airlines, departure/arrival times, prices, durations, and cabin class. IMPORTANT: Only provide flight information without additional commentary." ), name="flight-search-agent", tools=[search_flights, get_flight_details, check_flight_availability], - store=True, ) # Agent 4: Activity Search Executor - activity_search_client = AzureAIClient( - project_client=project_client, credential=credential, agent_name="activity-search-agent" - ) - activity_search_agent = activity_search_client.as_agent( + activity_search_agent = client.as_agent( id="activity-search-agent", instructions=( "You are an activities specialist. Your task is ONLY to search for and provide activity information. Use search_activities to find options for activities. Output format: List activity names, descriptions, prices, durations, ratings, and categories. IMPORTANT: Only provide activity information without additional commentary." ), name="activity-search-agent", tools=[search_activities], - store=True, ) # Agent 5: Booking Confirmation Executor - booking_confirmation_client = AzureAIClient( - project_client=project_client, credential=credential, agent_name="booking-confirmation-agent" - ) - booking_confirmation_agent = booking_confirmation_client.as_agent( + booking_confirmation_agent = client.as_agent( id="booking-confirmation-agent", instructions=( "You confirm bookings. Use check_hotel_availability and check_flight_availability to verify slots, then confirm_booking to finalize. Provide ONLY: confirmation numbers, booking references, and confirmation status." ), name="booking-confirmation-agent", tools=[confirm_booking, check_hotel_availability, check_flight_availability], - store=True, ) # Agent 6: Booking Payment Executor - booking_payment_client = AzureAIClient( - project_client=project_client, credential=credential, agent_name="booking-payment-agent" - ) - booking_payment_agent = booking_payment_client.as_agent( + booking_payment_agent = client.as_agent( id="booking-payment-agent", instructions=( "You process payments. Use validate_payment_method to verify payment, then process_payment to complete transactions. Provide ONLY: payment confirmation status, transaction IDs, and payment amounts." ), name="booking-payment-agent", tools=[process_payment, validate_payment_method], - store=True, ) # Agent 7: Booking Information Aggregation Executor - booking_info_client = AzureAIClient( - project_client=project_client, credential=credential, agent_name="booking-info-aggregation-agent" - ) - booking_info_aggregation_agent = booking_info_client.as_agent( + booking_info_aggregation_agent = client.as_agent( id="booking-info-aggregation-agent", instructions=( "You aggregate hotel and flight search results. Receive options from search agents and organize them. Provide: top 2-3 hotel options with prices and top 2-3 flight options with prices in a structured format." ), name="booking-info-aggregation-agent", - store=True, ) # Build workflow with logical booking flow: @@ -347,63 +319,31 @@ async def _create_workflow(project_client, credential): return workflow, agent_map -async def _process_workflow_events(events, conversation_ids, response_ids): - """Process workflow events and track interactions.""" - workflow_output = None - - async for event in events: - if event.type == "output": - workflow_output = event.data - # Handle Unicode characters that may not be displayable in Windows console - try: - print(f"\nWorkflow Output: {event.data}\n") - except UnicodeEncodeError: - output_str = str(event.data).encode("ascii", "replace").decode("ascii") - print(f"\nWorkflow Output: {output_str}\n") - - elif event.type == "output" and isinstance(event.data, AgentResponseUpdate): - _track_agent_ids(event, event.executor_id, response_ids, conversation_ids) - - return workflow_output - - def _track_agent_ids(event, agent, response_ids, conversation_ids): """Track agent response and conversation IDs - supporting multiple responses per agent.""" + update = event.data + + # response_id is directly on AgentResponseUpdate + if update.response_id and update.response_id not in response_ids[agent]: + response_ids[agent].append(update.response_id) + + # conversation_id is on the underlying ChatResponseUpdate (raw_representation) + raw = update.raw_representation if ( - isinstance(event.data, AgentResponseUpdate) - and hasattr(event.data, "raw_representation") - and event.data.raw_representation + raw + and hasattr(raw, "conversation_id") + and raw.conversation_id + and raw.conversation_id not in conversation_ids[agent] ): - # Check for conversation_id and response_id from raw_representation - # V2 API stores conversation_id directly on raw_representation (ChatResponseUpdate) - raw = event.data.raw_representation - - # Try conversation_id directly on raw representation - if ( - hasattr(raw, "conversation_id") - and raw.conversation_id # type: ignore[union-attr] - and raw.conversation_id not in conversation_ids[agent] # type: ignore[union-attr] - ): - # Only add if not already in the list - conversation_ids[agent].append(raw.conversation_id) # type: ignore[union-attr] - - # Extract response_id from the OpenAI event (available from first event) - if hasattr(raw, "raw_representation") and raw.raw_representation: # type: ignore[union-attr] - openai_event = raw.raw_representation # type: ignore[union-attr] - - # Check if event has response object with id - if ( - hasattr(openai_event, "response") - and hasattr(openai_event.response, "id") - and openai_event.response.id not in response_ids[agent] - ): - # Only add if not already in the list - response_ids[agent].append(openai_event.response.id) - - -async def create_and_run_workflow(): + conversation_ids[agent].append(raw.conversation_id) + + +async def create_and_run_workflow(deployment_name: str | None = None): """Run the workflow evaluation and display results. + Args: + deployment_name: Optional model deployment name for the workflow agents + Returns: Dictionary containing agents data with conversation IDs, response IDs, and query information """ @@ -416,7 +356,7 @@ async def create_and_run_workflow(): query = example_queries[0] print(f"Query: {query}\n") - result = await run_workflow_with_response_tracking(query) + result = await run_workflow_with_response_tracking(query, deployment_name=deployment_name) # Create output data structure output_data = {"agents": {}, "query": result["query"], "output": result.get("output", "")} diff --git a/python/samples/05-end-to-end/workflow_evaluation/run_evaluation.py b/python/samples/05-end-to-end/workflow_evaluation/run_evaluation.py index ed17b54258..bd064058b9 100644 --- a/python/samples/05-end-to-end/workflow_evaluation/run_evaluation.py +++ b/python/samples/05-end-to-end/workflow_evaluation/run_evaluation.py @@ -1,24 +1,43 @@ # Copyright (c) Microsoft. All rights reserved. +# type: ignore -""" -Script to run multi-agent travel planning workflow and evaluate agent responses. - -This script: -1. Executes the multi-agent workflow -2. Displays response data summary -3. Creates and runs evaluation with multiple evaluators -4. Monitors evaluation progress and displays results -""" +from __future__ import annotations import asyncio import os import time +from typing import TYPE_CHECKING, Any from azure.ai.projects import AIProjectClient from azure.identity import DefaultAzureCredential from create_workflow import create_and_run_workflow from dotenv import load_dotenv +if TYPE_CHECKING: + from openai import OpenAI + from openai.types import EvalCreateResponse + from openai.types.evals import RunCreateResponse + +""" +Script to run multi-agent travel planning workflow and evaluate agent responses. + +This script: +1. Runs the multi-agent travel planning workflow +2. Displays a summary of tracked agent responses +3. Fetches and previews final agent responses +4. Creates an evaluation with multiple evaluators +5. Runs the evaluation on selected agent responses +6. Monitors evaluation progress and displays results +""" + + +def create_openai_client() -> OpenAI: + project_client = AIProjectClient( + endpoint=os.environ["AZURE_AI_PROJECT_ENDPOINT"], + credential=DefaultAzureCredential(), + ) + return project_client.get_openai_client() + def print_section(title: str): """Print a formatted section header.""" @@ -27,26 +46,26 @@ def print_section(title: str): print(f"{'=' * 80}") -async def run_workflow(): +async def run_workflow(deployment_name: str | None = None) -> dict[str, Any]: """Execute the multi-agent travel planning workflow. + Args: + deployment_name: Optional model deployment name for the workflow agents + Returns: Dictionary containing workflow data with agent response IDs """ - print_section("Step 1: Running Workflow") print("Executing multi-agent travel planning workflow...") print("This may take a few minutes...") - workflow_data = await create_and_run_workflow() + workflow_data = await create_and_run_workflow(deployment_name=deployment_name) print("Workflow execution completed") return workflow_data -def display_response_summary(workflow_data: dict): +def display_response_summary(workflow_data: dict) -> None: """Display summary of response data.""" - print_section("Step 2: Response Data Summary") - print(f"Query: {workflow_data['query']}") print(f"\nAgents tracked: {len(workflow_data['agents'])}") @@ -55,10 +74,8 @@ def display_response_summary(workflow_data: dict): print(f" {agent_name}: {response_count} response(s)") -def fetch_agent_responses(openai_client, workflow_data: dict, agent_names: list): +def fetch_agent_responses(openai_client: OpenAI, workflow_data: dict[str, Any], agent_names: list[str]) -> None: """Fetch and display final responses from specified agents.""" - print_section("Step 3: Fetching Agent Responses") - for agent_name in agent_names: if agent_name not in workflow_data["agents"]: continue @@ -80,10 +97,9 @@ def fetch_agent_responses(openai_client, workflow_data: dict, agent_names: list) print(f" Error: {e}") -def create_evaluation(openai_client, model_deployment: str): +def create_evaluation(openai_client: OpenAI, deployment_name: str | None = "gpt-5.2") -> EvalCreateResponse: """Create evaluation with multiple evaluators.""" - print_section("Step 4: Creating Evaluation") - + deployment_name = os.environ.get("AZURE_AI_MODEL_DEPLOYMENT_NAME", deployment_name) data_source_config = {"type": "azure_ai_source", "scenario": "responses"} testing_criteria = [ @@ -91,25 +107,25 @@ def create_evaluation(openai_client, model_deployment: str): "type": "azure_ai_evaluator", "name": "relevance", "evaluator_name": "builtin.relevance", - "initialization_parameters": {"deployment_name": model_deployment} + "initialization_parameters": {"deployment_name": deployment_name}, }, { "type": "azure_ai_evaluator", "name": "groundedness", "evaluator_name": "builtin.groundedness", - "initialization_parameters": {"deployment_name": model_deployment} + "initialization_parameters": {"deployment_name": deployment_name}, }, { "type": "azure_ai_evaluator", "name": "tool_call_accuracy", "evaluator_name": "builtin.tool_call_accuracy", - "initialization_parameters": {"deployment_name": model_deployment} + "initialization_parameters": {"deployment_name": deployment_name}, }, { "type": "azure_ai_evaluator", "name": "tool_output_utilization", "evaluator_name": "builtin.tool_output_utilization", - "initialization_parameters": {"deployment_name": model_deployment} + "initialization_parameters": {"deployment_name": deployment_name}, }, ] @@ -126,10 +142,10 @@ def create_evaluation(openai_client, model_deployment: str): return eval_object -def run_evaluation(openai_client, eval_object, workflow_data: dict, agent_names: list): +def run_evaluation( + openai_client: OpenAI, eval_object: EvalCreateResponse, workflow_data: dict[str, Any], agent_names: list[str] +) -> RunCreateResponse: """Run evaluation on selected agent responses.""" - print_section("Step 5: Running Evaluation") - selected_response_ids = [] for agent_name in agent_names: if agent_name in workflow_data["agents"]: @@ -162,10 +178,8 @@ def run_evaluation(openai_client, eval_object, workflow_data: dict, agent_names: return eval_run -def monitor_evaluation(openai_client, eval_object, eval_run): +def monitor_evaluation(openai_client: OpenAI, eval_object: EvalCreateResponse, eval_run: RunCreateResponse): """Monitor evaluation progress and display results.""" - print_section("Step 6: Monitoring Evaluation") - print("Waiting for evaluation to complete...") while eval_run.status not in ["completed", "failed"]: @@ -187,29 +201,41 @@ def monitor_evaluation(openai_client, eval_object, eval_run): async def main(): """Main execution flow.""" load_dotenv() + openai_client = create_openai_client() + + # Model configuration + workflow_agent_model = os.environ.get("AZURE_AI_MODEL_DEPLOYMENT_NAME_WORKFLOW", "gpt-4.1-nano") + eval_model = os.environ.get("AZURE_AI_MODEL_DEPLOYMENT_NAME_EVAL", "gpt-5.2") + + # Focus on these agents, uncomment other ones you want to have evals run on + agents_to_evaluate = [ + "hotel-search-agent", + "flight-search-agent", + "activity-search-agent", + # "booking-payment-agent", + # "booking-info-aggregation-agent", + # "travel-request-handler", + # "booking-confirmation-agent", + ] - print("Travel Planning Workflow Evaluation") + print_section("Travel Planning Workflow Evaluation") - workflow_data = await run_workflow() + print_section("Step 1: Running Workflow") + workflow_data = await run_workflow(deployment_name=workflow_agent_model) + print_section("Step 2: Response Data Summary") display_response_summary(workflow_data) - project_client = AIProjectClient( - endpoint=os.environ["AZURE_AI_PROJECT_ENDPOINT"], - credential=DefaultAzureCredential(), - api_version="2025-11-15-preview" - ) - openai_client = project_client.get_openai_client() - - agents_to_evaluate = ["hotel-search-agent", "flight-search-agent", "activity-search-agent"] - + print_section("Step 3: Fetching Agent Responses") fetch_agent_responses(openai_client, workflow_data, agents_to_evaluate) - model_deployment = os.environ.get("AZURE_AI_MODEL_DEPLOYMENT_NAME", "gpt-4o-mini") - eval_object = create_evaluation(openai_client, model_deployment) + print_section("Step 4: Creating Evaluation") + eval_object = create_evaluation(openai_client, deployment_name=eval_model) + print_section("Step 5: Running Evaluation") eval_run = run_evaluation(openai_client, eval_object, workflow_data, agents_to_evaluate) + print_section("Step 6: Monitoring Evaluation") monitor_evaluation(openai_client, eval_object, eval_run) print_section("Complete") diff --git a/python/samples/demos/chatkit-integration/uploads/atc_3df183a3 b/python/samples/demos/chatkit-integration/uploads/atc_3df183a3 deleted file mode 100644 index f760b6553f..0000000000 Binary files a/python/samples/demos/chatkit-integration/uploads/atc_3df183a3 and /dev/null differ diff --git a/python/samples/demos/chatkit-integration/uploads/atc_967c57ef b/python/samples/demos/chatkit-integration/uploads/atc_967c57ef deleted file mode 100644 index f760b6553f..0000000000 Binary files a/python/samples/demos/chatkit-integration/uploads/atc_967c57ef and /dev/null differ diff --git a/python/samples/demos/chatkit-integration/uploads/atc_f4a18d5e b/python/samples/demos/chatkit-integration/uploads/atc_f4a18d5e deleted file mode 100644 index f760b6553f..0000000000 Binary files a/python/samples/demos/chatkit-integration/uploads/atc_f4a18d5e and /dev/null differ diff --git a/python/samples/demos/chatkit-integration/uploads/atc_fa77f9c0 b/python/samples/demos/chatkit-integration/uploads/atc_fa77f9c0 deleted file mode 100644 index f760b6553f..0000000000 Binary files a/python/samples/demos/chatkit-integration/uploads/atc_fa77f9c0 and /dev/null differ diff --git a/python/samples/getting_started/sessions/README.md b/python/samples/getting_started/sessions/README.md deleted file mode 100644 index daee274c2c..0000000000 --- a/python/samples/getting_started/sessions/README.md +++ /dev/null @@ -1,103 +0,0 @@ -# Sessions & Context Provider Examples - -Sessions and context providers are the core building blocks for agent memory in the Agent Framework. Sessions hold conversation state across turns, while context providers add, retrieve, and persist context before and after each agent invocation. - -## Core Concepts - -- **`AgentSession`**: Lightweight state container holding a `session_id` and a mutable `state` dict. Pass to `agent.run()` to maintain conversation across turns. -- **`BaseContextProvider`**: Hook that runs `before_run` / `after_run` around each invocation. Use for injecting instructions, RAG context, or metadata. -- **`BaseHistoryProvider`**: Subclass of `BaseContextProvider` for conversation history storage. Implements `get_messages()` / `save_messages()` and handles load/store automatically. -- **`InMemoryHistoryProvider`**: Built-in provider storing messages in `session.state`. Auto-injected when no providers are configured. - -## Examples - -### Session Management - -| File | Description | -|------|-------------| -| [`suspend_resume_session.py`](suspend_resume_session.py) | Suspend and resume sessions via `to_dict()` / `from_dict()` — both service-managed (Azure AI) and in-memory (OpenAI). | -| [`custom_history_provider.py`](custom_history_provider.py) | Implement a custom `BaseHistoryProvider` with dict-based storage. Shows serialization/deserialization. | -| [`redis_history_provider.py`](redis_history_provider.py) | `RedisHistoryProvider` for persistent storage: basic usage, user sessions, persistence across restarts, serialization, and message trimming. | - -### Custom Context Providers - -| File | Description | -|------|-------------| -| [`simple_context_provider.py`](simple_context_provider.py) | Build a custom `BaseContextProvider` that extracts and stores user information using structured output, then provides dynamic instructions based on stored context. | - -### Azure AI Search - -| File | Description | -|------|-------------| -| [`azure_ai_search/azure_ai_with_search_context_agentic.py`](azure_ai_search/azure_ai_with_search_context_agentic.py) | **Agentic mode** — Knowledge Bases with query planning and multi-hop reasoning. | -| [`azure_ai_search/azure_ai_with_search_context_semantic.py`](azure_ai_search/azure_ai_with_search_context_semantic.py) | **Semantic mode** — fast hybrid search with semantic ranking. | - -### Mem0 - -| File | Description | -|------|-------------| -| [`mem0/mem0_basic.py`](mem0/mem0_basic.py) | Basic Mem0 integration for user preference memory. | -| [`mem0/mem0_sessions.py`](mem0/mem0_sessions.py) | Session scoping: global scope, per-operation scope, and multi-agent isolation. | -| [`mem0/mem0_oss.py`](mem0/mem0_oss.py) | Mem0 Open Source (self-hosted) integration. | - -### Redis - -| File | Description | -|------|-------------| -| [`redis/redis_basics.py`](redis/redis_basics.py) | Standalone provider usage, full-text/hybrid search, preferences, and tool output memory. | -| [`redis/redis_conversation.py`](redis/redis_conversation.py) | Conversation persistence across sessions. | -| [`redis/redis_sessions.py`](redis/redis_sessions.py) | Session scoping: global, per-operation, and multi-agent isolation. | -| [`redis/azure_redis_conversation.py`](redis/azure_redis_conversation.py) | Azure Managed Redis with Entra ID authentication. | - -## Choosing a Provider - -| Provider | Use Case | Persistence | Search | -|----------|----------|-------------|--------| -| **InMemoryHistoryProvider** | Prototyping, stateless apps | Session state only | No | -| **Custom BaseHistoryProvider** | File/DB-backed storage | Your choice | Your choice | -| **RedisHistoryProvider** | Fast persistent chat history | Yes (Redis) | No | -| **RedisContextProvider** | Searchable memory / RAG | Yes (Redis) | Full-text + Hybrid | -| **Mem0ContextProvider** | Long-term user memory | Yes (cloud/self-hosted) | Semantic | -| **AzureAISearchContextProvider** | Enterprise RAG | Yes (Azure) | Hybrid + Semantic | - -## Building Custom Providers - -### Custom Context Provider - -```python -from agent_framework import AgentSession, BaseContextProvider, SessionContext, Message -from typing import Any - -class MyContextProvider(BaseContextProvider): - def __init__(self): - super().__init__("my-context") - - async def before_run(self, *, agent: Any, session: AgentSession | None, - context: SessionContext, state: dict[str, Any]) -> None: - context.extend_messages(self.source_id, [Message("system", ["Extra context here"])]) - - async def after_run(self, *, agent: Any, session: AgentSession | None, - context: SessionContext, state: dict[str, Any]) -> None: - pass # Store information, update memory, etc. -``` - -### Custom History Provider - -```python -from agent_framework import BaseHistoryProvider, Message -from collections.abc import Sequence -from typing import Any - -class MyHistoryProvider(BaseHistoryProvider): - def __init__(self): - super().__init__("my-history") - - async def get_messages(self, session_id: str | None, **kwargs: Any) -> list[Message]: - ... # Load from your storage - - async def save_messages(self, session_id: str | None, - messages: Sequence[Message], **kwargs: Any) -> None: - ... # Persist to your storage -``` - -See `custom_history_provider.py` and `simple_context_provider.py` for complete examples. diff --git a/python/samples/getting_started/sessions/azure_ai_search/README.md b/python/samples/getting_started/sessions/azure_ai_search/README.md deleted file mode 100644 index 49403d106c..0000000000 --- a/python/samples/getting_started/sessions/azure_ai_search/README.md +++ /dev/null @@ -1,264 +0,0 @@ -# Azure AI Search Context Provider Examples - -Azure AI Search context provider enables Retrieval Augmented Generation (RAG) with your agents by retrieving relevant documents from Azure AI Search indexes. It supports two search modes optimized for different use cases. - -This folder contains examples demonstrating how to use the Azure AI Search context provider with the Agent Framework. - -## Examples - -| File | Description | -|------|-------------| -| [`azure_ai_with_search_context_agentic.py`](azure_ai_with_search_context_agentic.py) | **Agentic mode** (recommended for most scenarios): Uses Knowledge Bases in Azure AI Search for query planning and multi-hop reasoning. Provides more accurate results through intelligent retrieval with automatic query reformulation. Slightly slower with more token consumption for query planning. [Learn more](https://techcommunity.microsoft.com/blog/azure-ai-foundry-blog/foundry-iq-boost-response-relevance-by-36-with-agentic-retrieval/4470720) | -| [`azure_ai_with_search_context_semantic.py`](azure_ai_with_search_context_semantic.py) | **Semantic mode** (fast queries): Fast hybrid search combining vector and keyword search with semantic ranking. Returns raw search results as context. Best for scenarios where speed is critical and simple retrieval is sufficient. | - -## Installation - -```bash -pip install agent-framework-azure-ai-search agent-framework-azure-ai -``` - -## Prerequisites - -### Required Resources - -1. **Azure AI Search service** with a search index containing your documents - - [Create Azure AI Search service](https://learn.microsoft.com/azure/search/search-create-service-portal) - - [Create and populate a search index](https://learn.microsoft.com/azure/search/search-what-is-an-index) - -2. **Azure AI Foundry project** with a model deployment - - [Create Azure AI Foundry project](https://learn.microsoft.com/azure/ai-studio/how-to/create-projects) - - Deploy a model (e.g., GPT-4o) - -3. **For Agentic mode only**: Azure OpenAI resource for Knowledge Base model calls - - [Create Azure OpenAI resource](https://learn.microsoft.com/azure/ai-services/openai/how-to/create-resource) - - Note: This is separate from your Azure AI Foundry project endpoint - -### Authentication - -Both examples support two authentication methods: - -- **API Key**: Set `AZURE_SEARCH_API_KEY` environment variable -- **Entra ID (Managed Identity)**: Uses `DefaultAzureCredential` when API key is not provided - -Run `az login` if using Entra ID authentication. - -## Configuration - -### Environment Variables - -**Common (both modes):** -- `AZURE_SEARCH_ENDPOINT`: Your Azure AI Search endpoint (e.g., `https://myservice.search.windows.net`) -- `AZURE_SEARCH_INDEX_NAME`: Name of your search index -- `AZURE_AI_PROJECT_ENDPOINT`: Your Azure AI Foundry project endpoint -- `AZURE_AI_MODEL_DEPLOYMENT_NAME`: Model deployment name (e.g., `gpt-4o`, defaults to `gpt-4o`) -- `AZURE_SEARCH_API_KEY`: _(Optional)_ Your search API key - if not provided, uses DefaultAzureCredential - -**Agentic mode only:** -- `AZURE_SEARCH_KNOWLEDGE_BASE_NAME`: Name of your Knowledge Base in Azure AI Search -- `AZURE_OPENAI_RESOURCE_URL`: Your Azure OpenAI resource URL (e.g., `https://myresource.openai.azure.com`) - - **Important**: This is different from `AZURE_AI_PROJECT_ENDPOINT` - Knowledge Base needs the OpenAI endpoint for model calls - -### Example .env file - -**For Semantic Mode:** -```env -AZURE_SEARCH_ENDPOINT=https://myservice.search.windows.net -AZURE_SEARCH_INDEX_NAME=my-index -AZURE_AI_PROJECT_ENDPOINT=https://.services.ai.azure.com/api/projects/ -AZURE_AI_MODEL_DEPLOYMENT_NAME=gpt-4o -# Optional - omit to use Entra ID -AZURE_SEARCH_API_KEY=your-search-key -``` - -**For Agentic Mode (add these to semantic mode variables):** -```env -AZURE_SEARCH_KNOWLEDGE_BASE_NAME=my-knowledge-base -AZURE_OPENAI_RESOURCE_URL=https://myresource.openai.azure.com -``` - -## Search Modes Comparison - -| Feature | Semantic Mode | Agentic Mode | -|---------|--------------|--------------| -| **Speed** | Fast | Slower (query planning overhead) | -| **Token Usage** | Lower | Higher (query reformulation) | -| **Retrieval Strategy** | Hybrid search + semantic ranking | Multi-hop reasoning with Knowledge Base | -| **Query Handling** | Direct search | Automatic query reformulation | -| **Best For** | Simple queries, speed-critical apps | Complex queries, multi-document reasoning | -| **Additional Setup** | None | Requires Knowledge Base + OpenAI resource | - -### When to Use Semantic Mode - -- **Simple queries** where direct keyword/vector search is sufficient -- **Speed is critical** and you need low latency -- **Straightforward retrieval** from single documents -- **Lower token costs** are important - -### When to Use Agentic Mode - -- **Complex queries** requiring multi-hop reasoning -- **Cross-document analysis** where information spans multiple sources -- **Ambiguous queries** that benefit from automatic reformulation -- **Higher accuracy** is more important than speed -- You need **intelligent query planning** and document synthesis - -## How the Examples Work - -### Semantic Mode Flow - -1. User query is sent to Azure AI Search -2. Hybrid search (vector + keyword) retrieves relevant documents -3. Semantic ranking reorders results for relevance -4. Top-k documents are returned as context -5. Agent generates response using retrieved context - -### Agentic Mode Flow - -1. User query is sent to the Knowledge Base -2. Knowledge Base plans the retrieval strategy -3. Multiple search queries may be executed (multi-hop) -4. Retrieved information is synthesized -5. Enhanced context is provided to the agent -6. Agent generates response with comprehensive context - -## Code Example - -### Semantic Mode - -```python -from agent_framework import Agent -from agent_framework.azure import AzureAIAgentClient, AzureAISearchContextProvider -from azure.identity.aio import DefaultAzureCredential - -# Create search provider with semantic mode (default) -search_provider = AzureAISearchContextProvider( - endpoint=search_endpoint, - index_name=index_name, - api_key=search_key, # Or use credential for Entra ID - mode="semantic", # Default mode - top_k=3, # Number of documents to retrieve -) - -# Create agent with search context -async with AzureAIAgentClient(credential=DefaultAzureCredential()) as client: - async with Agent( - client=client, - model=model_deployment, - context_providers=[search_provider], - ) as agent: - response = await agent.run("What information is in the knowledge base?") -``` - -### Agentic Mode - -```python -from agent_framework.azure import AzureAISearchContextProvider - -# Create search provider with agentic mode -search_provider = AzureAISearchContextProvider( - endpoint=search_endpoint, - index_name=index_name, - api_key=search_key, - mode="agentic", # Enable agentic retrieval - knowledge_base_name=knowledge_base_name, - azure_openai_resource_url=azure_openai_resource_url, - top_k=5, -) - -# Use with agent (same as semantic mode) -async with Agent( - client=client, - model=model_deployment, - context_providers=[search_provider], -) as agent: - response = await agent.run("Analyze and compare topics across documents") -``` - -## Running the Examples - -1. **Set up environment variables** (see Configuration section above) - -2. **Ensure you have an Azure AI Search index** with documents: - ```bash - # Verify your index exists - curl -X GET "https://myservice.search.windows.net/indexes/my-index?api-version=2024-07-01" \ - -H "api-key: YOUR_API_KEY" - ``` - -3. **For agentic mode**: Create a Knowledge Base in Azure AI Search - - [Knowledge Base documentation](https://learn.microsoft.com/azure/search/knowledge-store-create-portal) - -4. **Run the examples**: - ```bash - # Semantic mode (fast, simple) - python azure_ai_with_search_context_semantic.py - - # Agentic mode (intelligent, complex) - python azure_ai_with_search_context_agentic.py - ``` - -## Key Parameters - -### Common Parameters - -- `endpoint`: Azure AI Search service endpoint -- `index_name`: Name of the search index -- `api_key`: API key for authentication (optional, can use credential instead) -- `credential`: Azure credential for Entra ID auth (e.g., `DefaultAzureCredential()`) -- `mode`: Search mode - `"semantic"` (default) or `"agentic"` -- `top_k`: Number of documents to retrieve (default: 3 for semantic, 5 for agentic) - -### Semantic Mode Parameters - -- `semantic_configuration`: Name of semantic configuration in your index (optional) -- `query_type`: Query type - `"semantic"` for semantic search (default) - -### Agentic Mode Parameters - -- `knowledge_base_name`: Name of your Knowledge Base (required) -- `azure_openai_resource_url`: Azure OpenAI resource URL (required) -- `max_search_queries`: Maximum number of search queries to generate (default: 3) - -## Troubleshooting - -### Common Issues - -1. **Authentication errors** - - Ensure `AZURE_SEARCH_API_KEY` is set, or run `az login` for Entra ID auth - - Verify your credentials have search permissions - -2. **Index not found** - - Verify `AZURE_SEARCH_INDEX_NAME` matches your index name exactly - - Check that the index exists and contains documents - -3. **Agentic mode errors** - - Ensure `AZURE_SEARCH_KNOWLEDGE_BASE_NAME` is correctly configured - - Verify `AZURE_OPENAI_RESOURCE_URL` points to your Azure OpenAI resource (not AI Foundry endpoint) - - Check that your OpenAI resource has the necessary model deployments - -4. **No results returned** - - Verify your index has documents with vector embeddings (for semantic/hybrid search) - - Check that your queries match the content in your index - - Try increasing `top_k` parameter - -5. **Slow responses in agentic mode** - - This is expected - agentic mode trades speed for accuracy - - Reduce `max_search_queries` if needed - - Consider semantic mode for speed-critical applications - -## Performance Tips - -- **Use semantic mode** as the default for most scenarios - it's fast and effective -- **Switch to agentic mode** when you need multi-hop reasoning or complex queries -- **Adjust `top_k`** based on your needs - higher values provide more context but increase token usage -- **Enable semantic configuration** in your index for better semantic ranking -- **Use Entra ID authentication** in production for better security - -## Additional Resources - -- [Azure AI Search Documentation](https://learn.microsoft.com/azure/search/) -- [Azure AI Foundry Documentation](https://learn.microsoft.com/azure/ai-studio/) -- [RAG with Azure AI Search](https://learn.microsoft.com/azure/search/retrieval-augmented-generation-overview) -- [Semantic Search in Azure AI Search](https://learn.microsoft.com/azure/search/semantic-search-overview) -- [Knowledge Bases in Azure AI Search](https://learn.microsoft.com/azure/search/knowledge-store-concept-intro) -- [Agentic Retrieval Blog Post](https://techcommunity.microsoft.com/blog/azure-ai-foundry-blog/foundry-iq-boost-response-relevance-by-36-with-agentic-retrieval/4470720) diff --git a/python/samples/getting_started/sessions/azure_ai_search/azure_ai_with_search_context_agentic.py b/python/samples/getting_started/sessions/azure_ai_search/azure_ai_with_search_context_agentic.py deleted file mode 100644 index 5a4503f920..0000000000 --- a/python/samples/getting_started/sessions/azure_ai_search/azure_ai_with_search_context_agentic.py +++ /dev/null @@ -1,141 +0,0 @@ -# Copyright (c) Microsoft. All rights reserved. - -import asyncio -import os - -from agent_framework import Agent -from agent_framework.azure import AzureAIAgentClient, AzureAISearchContextProvider -from azure.identity.aio import AzureCliCredential -from dotenv import load_dotenv - -# Load environment variables from .env file -load_dotenv() - -""" -This sample demonstrates how to use Azure AI Search with agentic mode for RAG -(Retrieval Augmented Generation) with Azure AI agents. - -**Agentic mode** is recommended for most scenarios: -- Uses Knowledge Bases in Azure AI Search for query planning -- Performs multi-hop reasoning across documents -- Provides more accurate results through intelligent retrieval -- Slightly slower with more token consumption for query planning -- See: https://techcommunity.microsoft.com/blog/azure-ai-foundry-blog/foundry-iq-boost-response-relevance-by-36-with-agentic-retrieval/4470720 - -For simple queries where speed is critical, use semantic mode instead (see azure_ai_with_search_context_semantic.py). - -Prerequisites: -1. An Azure AI Search service -2. An Azure AI Foundry project with a model deployment -3. Either an existing Knowledge Base OR a search index (to auto-create a KB) - -Environment variables: - - AZURE_SEARCH_ENDPOINT: Your Azure AI Search endpoint - - AZURE_SEARCH_API_KEY: (Optional) API key - if not provided, uses DefaultAzureCredential - - AZURE_AI_PROJECT_ENDPOINT: Your Azure AI Foundry project endpoint - - AZURE_AI_MODEL_DEPLOYMENT_NAME: Your model deployment name (e.g., "gpt-4o") - -For using an existing Knowledge Base (recommended): - - AZURE_SEARCH_KNOWLEDGE_BASE_NAME: Your Knowledge Base name - -For auto-creating a Knowledge Base from an index: - - AZURE_SEARCH_INDEX_NAME: Your search index name - - AZURE_OPENAI_RESOURCE_URL: Azure OpenAI resource URL (e.g., "https://myresource.openai.azure.com") -""" - -# Sample queries to demonstrate agentic RAG -USER_INPUTS = [ - "What information is available in the knowledge base?", - "Analyze and compare the main topics from different documents", - "What connections can you find across different sections?", -] - - -async def main() -> None: - """Main function demonstrating Azure AI Search agentic mode.""" - - # Get configuration from environment - search_endpoint = os.environ["AZURE_SEARCH_ENDPOINT"] - search_key = os.environ.get("AZURE_SEARCH_API_KEY") - project_endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] - model_deployment = os.environ.get("AZURE_AI_MODEL_DEPLOYMENT_NAME", "gpt-4o") - - # Agentic mode requires exactly ONE of: knowledge_base_name OR index_name - # Option 1: Use existing Knowledge Base (recommended) - knowledge_base_name = os.environ.get("AZURE_SEARCH_KNOWLEDGE_BASE_NAME") - # Option 2: Auto-create KB from index (requires azure_openai_resource_url) - index_name = os.environ.get("AZURE_SEARCH_INDEX_NAME") - azure_openai_resource_url = os.environ.get("AZURE_OPENAI_RESOURCE_URL") - - # Create Azure AI Search context provider with agentic mode (recommended for accuracy) - print("Using AGENTIC mode (Knowledge Bases with query planning, recommended)\n") - print("This mode is slightly slower but provides more accurate results.\n") - - # Configure based on whether using existing KB or auto-creating from index - if knowledge_base_name: - # Use existing Knowledge Base - simplest approach - search_provider = AzureAISearchContextProvider( - endpoint=search_endpoint, - api_key=search_key, - credential=AzureCliCredential() if not search_key else None, - mode="agentic", - knowledge_base_name=knowledge_base_name, - # Optional: Configure retrieval behavior - knowledge_base_output_mode="extractive_data", # or "answer_synthesis" - retrieval_reasoning_effort="minimal", # or "medium", "low" - ) - else: - # Auto-create Knowledge Base from index - if not index_name: - raise ValueError("Set AZURE_SEARCH_KNOWLEDGE_BASE_NAME or AZURE_SEARCH_INDEX_NAME") - if not azure_openai_resource_url: - raise ValueError("AZURE_OPENAI_RESOURCE_URL required when using index_name") - search_provider = AzureAISearchContextProvider( - endpoint=search_endpoint, - index_name=index_name, - api_key=search_key, - credential=AzureCliCredential() if not search_key else None, - mode="agentic", - azure_openai_resource_url=azure_openai_resource_url, - model_deployment_name=model_deployment, - # Optional: Configure retrieval behavior - knowledge_base_output_mode="extractive_data", # or "answer_synthesis" - retrieval_reasoning_effort="minimal", # or "medium", "low" - top_k=3, - ) - - # Create agent with search context provider - async with ( - search_provider, - AzureAIAgentClient( - project_endpoint=project_endpoint, - model_deployment_name=model_deployment, - credential=AzureCliCredential(), - ) as client, - Agent( - client=client, - name="SearchAgent", - instructions=( - "You are a helpful assistant with advanced reasoning capabilities. " - "Use the provided context from the knowledge base to answer complex " - "questions that may require synthesizing information from multiple sources." - ), - context_providers=[search_provider], - ) as agent, - ): - print("=== Azure AI Agent with Search Context (Agentic Mode) ===\n") - - for user_input in USER_INPUTS: - print(f"User: {user_input}") - print("Agent: ", end="", flush=True) - - # Stream response - async for chunk in agent.run(user_input, stream=True): - if chunk.text: - print(chunk.text, end="", flush=True) - - print("\n") - - -if __name__ == "__main__": - asyncio.run(main()) diff --git a/python/samples/getting_started/sessions/azure_ai_search/azure_ai_with_search_context_semantic.py b/python/samples/getting_started/sessions/azure_ai_search/azure_ai_with_search_context_semantic.py deleted file mode 100644 index 8309d5197c..0000000000 --- a/python/samples/getting_started/sessions/azure_ai_search/azure_ai_with_search_context_semantic.py +++ /dev/null @@ -1,97 +0,0 @@ -# Copyright (c) Microsoft. All rights reserved. - -import asyncio -import os - -from agent_framework import Agent -from agent_framework.azure import AzureAIAgentClient, AzureAISearchContextProvider -from azure.identity.aio import AzureCliCredential -from dotenv import load_dotenv - -# Load environment variables from .env file -load_dotenv() - -""" -This sample demonstrates how to use Azure AI Search with semantic mode for RAG -(Retrieval Augmented Generation) with Azure AI agents. - -**Semantic mode** is the recommended default mode: -- Fast hybrid search combining vector and keyword search -- Uses semantic ranking for improved relevance -- Returns raw search results as context -- Best for most RAG use cases - -Prerequisites: -1. An Azure AI Search service with a search index -2. An Azure AI Foundry project with a model deployment -3. Set the following environment variables: - - AZURE_SEARCH_ENDPOINT: Your Azure AI Search endpoint - - AZURE_SEARCH_API_KEY: (Optional) Your search API key - if not provided, uses DefaultAzureCredential for Entra ID - - AZURE_SEARCH_INDEX_NAME: Your search index name - - AZURE_AI_PROJECT_ENDPOINT: Your Azure AI Foundry project endpoint - - AZURE_AI_MODEL_DEPLOYMENT_NAME: Your model deployment name (e.g., "gpt-4o") -""" - -# Sample queries to demonstrate RAG -USER_INPUTS = [ - "What information is available in the knowledge base?", - "Summarize the main topics from the documents", - "Find specific details about the content", -] - - -async def main() -> None: - """Main function demonstrating Azure AI Search semantic mode.""" - - # Get configuration from environment - search_endpoint = os.environ["AZURE_SEARCH_ENDPOINT"] - search_key = os.environ.get("AZURE_SEARCH_API_KEY") - index_name = os.environ["AZURE_SEARCH_INDEX_NAME"] - project_endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] - model_deployment = os.environ.get("AZURE_AI_MODEL_DEPLOYMENT_NAME", "gpt-4o") - - # Create Azure AI Search context provider with semantic mode (recommended, fast) - print("Using SEMANTIC mode (hybrid search + semantic ranking, fast)\n") - search_provider = AzureAISearchContextProvider( - endpoint=search_endpoint, - index_name=index_name, - api_key=search_key, # Use api_key for API key auth, or credential for managed identity - credential=AzureCliCredential() if not search_key else None, - mode="semantic", # Default mode - top_k=3, # Retrieve top 3 most relevant documents - ) - - # Create agent with search context provider - async with ( - search_provider, - AzureAIAgentClient( - project_endpoint=project_endpoint, - model_deployment_name=model_deployment, - credential=AzureCliCredential(), - ) as client, - Agent( - client=client, - name="SearchAgent", - instructions=( - "You are a helpful assistant. Use the provided context from the " - "knowledge base to answer questions accurately." - ), - context_providers=[search_provider], - ) as agent, - ): - print("=== Azure AI Agent with Search Context (Semantic Mode) ===\n") - - for user_input in USER_INPUTS: - print(f"User: {user_input}") - print("Agent: ", end="", flush=True) - - # Stream response - async for chunk in agent.run(user_input, stream=True): - if chunk.text: - print(chunk.text, end="", flush=True) - - print("\n") - - -if __name__ == "__main__": - asyncio.run(main()) diff --git a/python/samples/getting_started/sessions/custom_history_provider.py b/python/samples/getting_started/sessions/custom_history_provider.py deleted file mode 100644 index e3ce5c5905..0000000000 --- a/python/samples/getting_started/sessions/custom_history_provider.py +++ /dev/null @@ -1,85 +0,0 @@ -# Copyright (c) Microsoft. All rights reserved. - -import asyncio -from collections.abc import Sequence -from typing import Any - -from agent_framework import AgentSession, BaseHistoryProvider, Message -from agent_framework.openai import OpenAIChatClient - -""" -Custom History Provider Example - -This sample demonstrates how to implement and use a custom history provider -for session management, allowing you to persist conversation history in your -preferred storage solution (database, file system, etc.). -""" - - -class CustomHistoryProvider(BaseHistoryProvider): - """Implementation of custom history provider. - In real applications, this can be an implementation of relational database or vector store.""" - - def __init__(self) -> None: - super().__init__("custom-history") - self._storage: dict[str, list[Message]] = {} - - async def get_messages( - self, session_id: str | None, *, state: dict[str, Any] | None = None, **kwargs: Any - ) -> list[Message]: - key = session_id or "default" - return list(self._storage.get(key, [])) - - async def save_messages( - self, - session_id: str | None, - messages: Sequence[Message], - *, - state: dict[str, Any] | None = None, - **kwargs: Any, - ) -> None: - key = session_id or "default" - if key not in self._storage: - self._storage[key] = [] - self._storage[key].extend(messages) - - -async def main() -> None: - """Demonstrates how to use 3rd party or custom history provider for sessions.""" - print("=== Session with 3rd party or custom history provider ===") - - # OpenAI Chat Client is used as an example here, - # other chat clients can be used as well. - agent = OpenAIChatClient().as_agent( - name="CustomBot", - instructions="You are a helpful assistant that remembers our conversation.", - # Use custom history provider. - # If not provided, the default in-memory provider will be used. - context_providers=[CustomHistoryProvider()], - ) - - # Start a new session for the agent conversation. - session = agent.create_session() - - # Respond to user input. - query = "Hello! My name is Alice and I love pizza." - print(f"User: {query}") - print(f"Agent: {await agent.run(query, session=session)}\n") - - # Serialize the session state, so it can be stored for later use. - serialized_session = session.to_dict() - - # The session can now be saved to a database, file, or any other storage mechanism and loaded again later. - print(f"Serialized session: {serialized_session}\n") - - # Deserialize the session state after loading from storage. - resumed_session = AgentSession.from_dict(serialized_session) - - # Respond to user input. - query = "What do you remember about me?" - print(f"User: {query}") - print(f"Agent: {await agent.run(query, session=resumed_session)}\n") - - -if __name__ == "__main__": - asyncio.run(main()) diff --git a/python/samples/getting_started/sessions/mem0/README.md b/python/samples/getting_started/sessions/mem0/README.md deleted file mode 100644 index 667455a536..0000000000 --- a/python/samples/getting_started/sessions/mem0/README.md +++ /dev/null @@ -1,55 +0,0 @@ -# Mem0 Context Provider Examples - -[Mem0](https://mem0.ai/) is a self-improving memory layer for Large Language Models that enables applications to have long-term memory capabilities. The Agent Framework's Mem0 context provider integrates with Mem0's API to provide persistent memory across conversation sessions. - -This folder contains examples demonstrating how to use the Mem0 context provider with the Agent Framework for persistent memory and context management across conversations. - -## Examples - -| File | Description | -|------|-------------| -| [`mem0_basic.py`](mem0_basic.py) | Basic example of using Mem0 context provider to store and retrieve user preferences across different conversation sessions. | -| [`mem0_sessions.py`](mem0_sessions.py) | Advanced example demonstrating different session scoping strategies with Mem0. Covers global session scope (memories shared across all operations), per-operation session scope (memories isolated per session), and multiple agents with different memory configurations for personal vs. work contexts. | -| [`mem0_oss.py`](mem0_oss.py) | Example of using the Mem0 Open Source self-hosted version as the context provider. Demonstrates setup and configuration for local deployment. | - -## Prerequisites - -### Required Resources - -1. [Mem0 API Key](https://app.mem0.ai/) - Sign up for a Mem0 account and get your API key - _or_ self-host [Mem0 Open Source](https://docs.mem0.ai/open-source/overview) -2. Azure AI project endpoint (used in these examples) -3. Azure CLI authentication (run `az login`) - -## Configuration - -### Environment Variables - -Set the following environment variables: - -**For Mem0 Platform:** -- `MEM0_API_KEY`: Your Mem0 API key (alternatively, pass it as `api_key` parameter to `Mem0Provider`). Not required if you are self-hosting [Mem0 Open Source](https://docs.mem0.ai/open-source/overview) - -**For Mem0 Open Source:** -- `OPENAI_API_KEY`: Your OpenAI API key (used by Mem0 OSS for embedding generation and automatic memory extraction) - -**For Azure AI:** -- `AZURE_AI_PROJECT_ENDPOINT`: Your Azure AI project endpoint -- `AZURE_AI_MODEL_DEPLOYMENT_NAME`: The name of your model deployment - -## Key Concepts - -### Memory Scoping - -The Mem0 context provider supports different scoping strategies: - -- **Global Scope** (`scope_to_per_operation_thread_id=False`): Memories are shared across all conversation sessions -- **Session Scope** (`scope_to_per_operation_thread_id=True`): Memories are isolated per conversation session - -### Memory Association - -Mem0 records can be associated with different identifiers: - -- `user_id`: Associate memories with a specific user -- `agent_id`: Associate memories with a specific agent -- `thread_id`: Associate memories with a specific conversation session -- `application_id`: Associate memories with an application context diff --git a/python/samples/getting_started/sessions/mem0/mem0_basic.py b/python/samples/getting_started/sessions/mem0/mem0_basic.py deleted file mode 100644 index 7824db6a67..0000000000 --- a/python/samples/getting_started/sessions/mem0/mem0_basic.py +++ /dev/null @@ -1,82 +0,0 @@ -# Copyright (c) Microsoft. All rights reserved. - -import asyncio -import uuid - -from agent_framework import tool -from agent_framework.azure import AzureAIAgentClient -from agent_framework.mem0 import Mem0ContextProvider -from azure.identity.aio import AzureCliCredential - - -# NOTE: approval_mode="never_require" is for sample brevity. Use "always_require" in production; see samples/02-agents/tools/function_tool_with_approval.py and samples/02-agents/tools/function_tool_with_approval_and_sessions.py. -@tool(approval_mode="never_require") -def retrieve_company_report(company_code: str, detailed: bool) -> str: - if company_code != "CNTS": - raise ValueError("Company code not found") - if not detailed: - return "CNTS is a company that specializes in technology." - return ( - "CNTS is a company that specializes in technology. " - "It had a revenue of $10 million in 2022. It has 100 employees." - ) - - -async def main() -> None: - """Example of memory usage with Mem0 context provider.""" - print("=== Mem0 Context Provider Example ===") - - # Each record in Mem0 should be associated with agent_id or user_id or application_id or thread_id. - # In this example, we associate Mem0 records with user_id. - user_id = str(uuid.uuid4()) - - # For Azure authentication, run `az login` command in terminal or replace AzureCliCredential with preferred - # authentication option. - # For Mem0 authentication, set Mem0 API key via "api_key" parameter or MEM0_API_KEY environment variable. - async with ( - AzureCliCredential() as credential, - AzureAIAgentClient(credential=credential).as_agent( - name="FriendlyAssistant", - instructions="You are a friendly assistant.", - tools=retrieve_company_report, - context_providers=[Mem0ContextProvider(user_id=user_id)], - ) as agent, - ): - # First ask the agent to retrieve a company report with no previous context. - # The agent will not be able to invoke the tool, since it doesn't know - # the company code or the report format, so it should ask for clarification. - query = "Please retrieve my company report" - print(f"User: {query}") - result = await agent.run(query) - print(f"Agent: {result}\n") - - # Now tell the agent the company code and the report format that you want to use - # and it should be able to invoke the tool and return the report. - query = "I always work with CNTS and I always want a detailed report format. Please remember and retrieve it." - print(f"User: {query}") - result = await agent.run(query) - print(f"Agent: {result}\n") - - # Mem0 processes and indexes memories asynchronously. - # Wait for memories to be indexed before querying in a new session. - # In production, consider implementing retry logic or using Mem0's - # eventual consistency handling instead of a fixed delay. - print("Waiting for memories to be processed...") - await asyncio.sleep(12) # Empirically determined delay for Mem0 indexing - - print("\nRequest within a new session:") - # Create a new session for the agent. - # The new session has no conversation history from the previous session. - session = agent.create_session() - - # Since we have the Mem0 context provider, the agent should be able to - # retrieve the company report without asking for clarification, as Mem0 - # remembers user preferences across sessions. - query = "Please retrieve my company report" - print(f"User: {query}") - result = await agent.run(query, session=session) - print(f"Agent: {result}\n") - - -if __name__ == "__main__": - asyncio.run(main()) diff --git a/python/samples/getting_started/sessions/mem0/mem0_oss.py b/python/samples/getting_started/sessions/mem0/mem0_oss.py deleted file mode 100644 index 1b03ac5fc1..0000000000 --- a/python/samples/getting_started/sessions/mem0/mem0_oss.py +++ /dev/null @@ -1,79 +0,0 @@ -# Copyright (c) Microsoft. All rights reserved. - -import asyncio -import uuid - -from agent_framework import tool -from agent_framework.azure import AzureAIAgentClient -from agent_framework.mem0 import Mem0ContextProvider -from azure.identity.aio import AzureCliCredential -from mem0 import AsyncMemory - - -# NOTE: approval_mode="never_require" is for sample brevity. Use "always_require" in production; see samples/02-agents/tools/function_tool_with_approval.py and samples/02-agents/tools/function_tool_with_approval_and_sessions.py. -@tool(approval_mode="never_require") -def retrieve_company_report(company_code: str, detailed: bool) -> str: - if company_code != "CNTS": - raise ValueError("Company code not found") - if not detailed: - return "CNTS is a company that specializes in technology." - return ( - "CNTS is a company that specializes in technology. " - "It had a revenue of $10 million in 2022. It has 100 employees." - ) - - -async def main() -> None: - """Example of memory usage with local Mem0 OSS context provider.""" - print("=== Mem0 Context Provider Example ===") - - # Each record in Mem0 should be associated with agent_id or user_id or application_id or thread_id. - # In this example, we associate Mem0 records with user_id. - user_id = str(uuid.uuid4()) - - # For Azure authentication, run `az login` command in terminal or replace AzureCliCredential with preferred - # authentication option. - # By default, local Mem0 authenticates to your OpenAI using the OPENAI_API_KEY environment variable. - # See the Mem0 documentation for other LLM providers and authentication options. - local_mem0_client = AsyncMemory() - async with ( - AzureCliCredential() as credential, - AzureAIAgentClient(credential=credential).as_agent( - name="FriendlyAssistant", - instructions="You are a friendly assistant.", - tools=retrieve_company_report, - context_providers=[Mem0ContextProvider(user_id=user_id, mem0_client=local_mem0_client)], - ) as agent, - ): - # First ask the agent to retrieve a company report with no previous context. - # The agent will not be able to invoke the tool, since it doesn't know - # the company code or the report format, so it should ask for clarification. - query = "Please retrieve my company report" - print(f"User: {query}") - result = await agent.run(query) - print(f"Agent: {result}\n") - - # Now tell the agent the company code and the report format that you want to use - # and it should be able to invoke the tool and return the report. - query = "I always work with CNTS and I always want a detailed report format. Please remember and retrieve it." - print(f"User: {query}") - result = await agent.run(query) - print(f"Agent: {result}\n") - - print("\nRequest within a new session:") - - # Create a new session for the agent. - # The new session has no context of the previous conversation. - session = agent.create_session() - - # Since we have the mem0 component in the session, the agent should be able to - # retrieve the company report without asking for clarification, as it will - # be able to remember the user preferences from Mem0 component. - query = "Please retrieve my company report" - print(f"User: {query}") - result = await agent.run(query, session=session) - print(f"Agent: {result}\n") - - -if __name__ == "__main__": - asyncio.run(main()) diff --git a/python/samples/getting_started/sessions/mem0/mem0_sessions.py b/python/samples/getting_started/sessions/mem0/mem0_sessions.py deleted file mode 100644 index 8ad6678cb8..0000000000 --- a/python/samples/getting_started/sessions/mem0/mem0_sessions.py +++ /dev/null @@ -1,111 +0,0 @@ -# Copyright (c) Microsoft. All rights reserved. - -import asyncio - -from agent_framework import tool -from agent_framework.azure import AzureAIAgentClient -from agent_framework.mem0 import Mem0ContextProvider -from azure.identity.aio import AzureCliCredential - - -# NOTE: approval_mode="never_require" is for sample brevity. Use "always_require" in production; see samples/02-agents/tools/function_tool_with_approval.py and samples/02-agents/tools/function_tool_with_approval_and_sessions.py. -@tool(approval_mode="never_require") -def get_user_preferences(user_id: str) -> str: - """Mock function to get user preferences.""" - preferences = { - "user123": "Prefers concise responses and technical details", - "user456": "Likes detailed explanations with examples", - } - return preferences.get(user_id, "No specific preferences found") - - -async def example_cross_session_memory() -> None: - """Example 1: Cross-session memory (memories shared across all sessions for a user).""" - print("1. Cross-Session Memory Example:") - print("-" * 40) - - user_id = "user123" - - async with ( - AzureCliCredential() as credential, - AzureAIAgentClient(credential=credential).as_agent( - name="MemoryAssistant", - instructions="You are an assistant that remembers user preferences across conversations.", - tools=get_user_preferences, - context_providers=[Mem0ContextProvider(user_id=user_id)], - ) as agent, - ): - # Store some preferences - query = "Remember that I prefer technical responses with code examples when discussing programming." - print(f"User: {query}") - result = await agent.run(query) - print(f"Agent: {result}\n") - - # Mem0 processes and indexes memories asynchronously. - print("Waiting for memories to be processed...") - await asyncio.sleep(12) - - # Create a new session - memories should still be accessible - # because Mem0 scopes by user_id, not session - new_session = agent.create_session() - query = "What do you know about my preferences?" - print(f"User (new session): {query}") - result = await agent.run(query, session=new_session) - print(f"Agent: {result}\n") - - -async def example_agent_scoped_memory() -> None: - """Example 2: Agent-scoped memory (memories isolated per agent).""" - print("2. Agent-Scoped Memory Example:") - print("-" * 40) - - async with ( - AzureCliCredential() as credential, - AzureAIAgentClient(credential=credential).as_agent( - name="PersonalAssistant", - instructions="You are a personal assistant that helps with personal tasks.", - context_providers=[Mem0ContextProvider(agent_id="agent_personal")], - ) as personal_agent, - AzureAIAgentClient(credential=credential).as_agent( - name="WorkAssistant", - instructions="You are a work assistant that helps with professional tasks.", - context_providers=[Mem0ContextProvider(agent_id="agent_work")], - ) as work_agent, - ): - # Store personal information - query = "Remember that I like to exercise at 6 AM and prefer outdoor activities." - print(f"User to Personal Agent: {query}") - result = await personal_agent.run(query) - print(f"Personal Agent: {result}\n") - - # Store work information - query = "Remember that I have team meetings every Tuesday at 2 PM." - print(f"User to Work Agent: {query}") - result = await work_agent.run(query) - print(f"Work Agent: {result}\n") - - # Mem0 processes and indexes memories asynchronously. - print("Waiting for memories to be processed...") - await asyncio.sleep(12) - - # Test memory isolation - each agent should only recall its own memories - query = "What do you know about my schedule?" - print(f"User to Personal Agent: {query}") - result = await personal_agent.run(query) - print(f"Personal Agent: {result}\n") - - print(f"User to Work Agent: {query}") - result = await work_agent.run(query) - print(f"Work Agent: {result}\n") - - -async def main() -> None: - """Run all Mem0 session management examples.""" - print("=== Mem0 Session Management Example ===\n") - - await example_cross_session_memory() - await example_agent_scoped_memory() - - -if __name__ == "__main__": - asyncio.run(main()) diff --git a/python/samples/getting_started/sessions/redis/README.md b/python/samples/getting_started/sessions/redis/README.md deleted file mode 100644 index 03c41295f3..0000000000 --- a/python/samples/getting_started/sessions/redis/README.md +++ /dev/null @@ -1,113 +0,0 @@ -# Redis Context Provider Examples - -The Redis context provider enables persistent, searchable memory for your agents using Redis (RediSearch). It supports full‑text search and optional hybrid search with vector embeddings, letting agents remember and retrieve user context across sessions. - -This folder contains an example demonstrating how to use the Redis context provider with the Agent Framework. - -## Examples - -| File | Description | -|------|-------------| -| [`azure_redis_conversation.py`](azure_redis_conversation.py) | Demonstrates conversation persistence with RedisHistoryProvider and Azure Redis with Azure AD (Entra ID) authentication using credential provider. | -| [`redis_basics.py`](redis_basics.py) | Shows standalone provider usage and agent integration. Demonstrates writing messages to Redis, retrieving context via full‑text or hybrid vector search, and persisting preferences across sessions. Also includes a simple tool example whose outputs are remembered. | -| [`redis_conversation.py`](redis_conversation.py) | Simple example showing conversation persistence with RedisContextProvider using traditional connection string authentication. | -| [`redis_sessions.py`](redis_sessions.py) | Demonstrates session scoping. Includes: (1) global session scope with a fixed `thread_id` shared across operations; (2) per‑operation session scope where `scope_to_per_operation_thread_id=True` binds memory to a single session for the provider's lifetime; and (3) multiple agents with isolated memory via different `agent_id` values. | - - -## Prerequisites - -### Required resources - -1. A running Redis with RediSearch (Redis Stack or a managed service) -2. Python environment with Agent Framework Redis extra installed -3. Optional: OpenAI API key if using vector embeddings - -### Install the package - -```bash -pip install "agent-framework-redis" -``` - -## Running Redis - -Pick one option: - -### Option A: Docker (local Redis Stack) - -```bash -docker run --name redis -p 6379:6379 -d redis:8.0.3 -``` - -### Option B: Redis Cloud - -Create a free database and get the connection URL at `https://redis.io/cloud/`. - -### Option C: Azure Managed Redis - -See quickstart: `https://learn.microsoft.com/azure/redis/quickstart-create-managed-redis` - -## Configuration - -### Environment variables - -- `OPENAI_API_KEY` (optional): Required only if you set `vectorizer_choice="openai"` to enable hybrid search. - -### Provider configuration highlights - -The provider supports both full‑text only and hybrid vector search: - -- Set `vectorizer_choice` to `"openai"` or `"hf"` to enable embeddings and hybrid search. -- When using a vectorizer, also set `vector_field_name` (e.g., `"vector"`). -- Partition fields for scoping memory: `application_id`, `agent_id`, `user_id`, `thread_id`. -- Session scoping: `scope_to_per_operation_thread_id=True` isolates memory per operation session. -- Index management: `index_name`, `overwrite_redis_index`, `drop_redis_index`. - -## What the example does - -`redis_basics.py` walks through three scenarios: - -1. Standalone provider usage: adds messages and retrieves context via `invoking`. -2. Agent integration: teaches the agent a preference and verifies it is remembered across turns. -3. Agent + tool: calls a sample tool (flight search) and then asks the agent to recall details remembered from the tool output. - -It uses OpenAI for both chat (via `OpenAIChatClient`) and, in some steps, optional embeddings for hybrid search. - -## How to run - -1) Start Redis (see options above). For local default, ensure it's reachable at `redis://localhost:6379`. - -2) Set your OpenAI key if using embeddings and for the chat client used in the sample: - -```bash -export OPENAI_API_KEY="" -``` - -3) Run the example: - -```bash -python redis_basics.py -``` - -You should see the agent responses and, when using embeddings, context retrieved from Redis. The example includes commented debug helpers you can print, such as index info or all stored docs. - -## Key concepts - -### Memory scoping - -- Global scope: set `application_id`, `agent_id`, `user_id`, or `thread_id` on the provider to filter memory. -- Per‑operation session scope: set `scope_to_per_operation_thread_id=True` to isolate memory to the current session created by the framework. - -### Hybrid vector search (optional) - -- Enable by setting `vectorizer_choice` to `"openai"` (requires `OPENAI_API_KEY`) or `"hf"` (offline model). -- Provide `vector_field_name` (e.g., `"vector"`); other vector settings have sensible defaults. - -### Index lifecycle controls - -- `overwrite_redis_index` and `drop_redis_index` help recreate indexes during iteration. - -## Troubleshooting - -- Ensure at least one of `application_id`, `agent_id`, `user_id`, or `thread_id` is set; the provider requires a scope. -- If using embeddings, verify `OPENAI_API_KEY` is set and reachable. -- Make sure Redis exposes RediSearch (Redis Stack image or managed service with search enabled). diff --git a/python/samples/getting_started/sessions/redis/azure_redis_conversation.py b/python/samples/getting_started/sessions/redis/azure_redis_conversation.py deleted file mode 100644 index ce569be8cb..0000000000 --- a/python/samples/getting_started/sessions/redis/azure_redis_conversation.py +++ /dev/null @@ -1,123 +0,0 @@ -# Copyright (c) Microsoft. All rights reserved. - -"""Azure Managed Redis History Provider with Azure AD Authentication - -This example demonstrates how to use Azure Managed Redis with Azure AD authentication -to persist conversational details using RedisHistoryProvider. - -Requirements: - - Azure Managed Redis instance with Azure AD authentication enabled - - Azure credentials configured (az login or managed identity) - - agent-framework-redis: pip install agent-framework-redis - - azure-identity: pip install azure-identity - -Environment Variables: - - AZURE_REDIS_HOST: Your Azure Managed Redis host (e.g., myredis.redis.cache.windows.net) - - OPENAI_API_KEY: Your OpenAI API key - - OPENAI_CHAT_MODEL_ID: OpenAI model (e.g., gpt-4o-mini) - - AZURE_USER_OBJECT_ID: Your Azure AD User Object ID for authentication -""" - -import asyncio -import os - -from agent_framework.openai import OpenAIChatClient -from agent_framework.redis import RedisHistoryProvider -from azure.identity.aio import AzureCliCredential -from redis.credentials import CredentialProvider - - -class AzureCredentialProvider(CredentialProvider): - """Credential provider for Azure AD authentication with Redis Enterprise.""" - - def __init__(self, azure_credential: AzureCliCredential, user_object_id: str): - self.azure_credential = azure_credential - self.user_object_id = user_object_id - - async def get_credentials_async(self) -> tuple[str] | tuple[str, str]: - """Get Azure AD token for Redis authentication. - - Returns (username, token) where username is the Azure user's Object ID. - """ - token = await self.azure_credential.get_token("https://redis.azure.com/.default") - return (self.user_object_id, token.token) - - -async def main() -> None: - redis_host = os.environ.get("AZURE_REDIS_HOST") - if not redis_host: - print("ERROR: Set AZURE_REDIS_HOST environment variable") - return - - # For Azure Redis with Entra ID, username must be your Object ID - user_object_id = os.environ.get("AZURE_USER_OBJECT_ID") - if not user_object_id: - print("ERROR: Set AZURE_USER_OBJECT_ID environment variable") - print("Get your Object ID from the Azure Portal") - return - - # Create Azure CLI credential provider (uses 'az login' credentials) - azure_credential = AzureCliCredential() - credential_provider = AzureCredentialProvider(azure_credential, user_object_id) - - session_id = "azure_test_session" - - # Create Azure Redis history provider - history_provider = RedisHistoryProvider( - credential_provider=credential_provider, - host=redis_host, - port=10000, - ssl=True, - thread_id=session_id, - key_prefix="chat_messages", - max_messages=100, - ) - - # Create chat client - client = OpenAIChatClient() - - # Create agent with Azure Redis history provider - agent = client.as_agent( - name="AzureRedisAssistant", - instructions="You are a helpful assistant.", - context_providers=[history_provider], - ) - - # Conversation - query = "Remember that I enjoy gumbo" - result = await agent.run(query) - print("User: ", query) - print("Agent: ", result) - - # Ask the agent to recall the stored preference; it should retrieve from memory - query = "What do I enjoy?" - result = await agent.run(query) - print("User: ", query) - print("Agent: ", result) - - query = "What did I say to you just now?" - result = await agent.run(query) - print("User: ", query) - print("Agent: ", result) - - query = "Remember that I have a meeting at 3pm tomorrow" - result = await agent.run(query) - print("User: ", query) - print("Agent: ", result) - - query = "Tulips are red" - result = await agent.run(query) - print("User: ", query) - print("Agent: ", result) - - query = "What was the first thing I said to you this conversation?" - result = await agent.run(query) - print("User: ", query) - print("Agent: ", result) - - # Cleanup - await azure_credential.close() - - -if __name__ == "__main__": - asyncio.run(main()) diff --git a/python/samples/getting_started/sessions/redis/redis_basics.py b/python/samples/getting_started/sessions/redis/redis_basics.py deleted file mode 100644 index f6919577aa..0000000000 --- a/python/samples/getting_started/sessions/redis/redis_basics.py +++ /dev/null @@ -1,253 +0,0 @@ -# Copyright (c) Microsoft. All rights reserved. - -"""Redis Context Provider: Basic usage and agent integration - -This example demonstrates how to use the Redis context provider to persist and -retrieve conversational memory for agents. It covers three progressively more -realistic scenarios: - -1) Standalone provider usage ("basic cache") - - Write messages to Redis and retrieve relevant context using full-text or - hybrid vector search. - -2) Agent + provider - - Connect the provider to an agent so the agent can store user preferences - and recall them across turns. - -3) Agent + provider + tool memory - - Expose a simple tool to the agent, then verify that details from the tool - outputs are captured and retrievable as part of the agent's memory. - -Requirements: - - A Redis instance with RediSearch enabled (e.g., Redis Stack) - - agent-framework with the Redis extra installed: pip install "agent-framework-redis" - - Optionally an OpenAI API key if enabling embeddings for hybrid search - -Run: - python redis_basics.py -""" - -import asyncio -import os - -from agent_framework import Message, tool -from agent_framework.openai import OpenAIChatClient -from agent_framework.redis import RedisContextProvider -from redisvl.extensions.cache.embeddings import EmbeddingsCache -from redisvl.utils.vectorize import OpenAITextVectorizer - - -# NOTE: approval_mode="never_require" is for sample brevity. Use "always_require" in production; see samples/02-agents/tools/function_tool_with_approval.py and samples/02-agents/tools/function_tool_with_approval_and_sessions.py. -@tool(approval_mode="never_require") -def search_flights(origin_airport_code: str, destination_airport_code: str, detailed: bool = False) -> str: - """Simulated flight-search tool to demonstrate tool memory. - - The agent can call this function, and the returned details can be stored - by the Redis context provider. We later ask the agent to recall facts from - these tool results to verify memory is working as expected. - """ - # Minimal static catalog used to simulate a tool's structured output - flights = { - ("JFK", "LAX"): { - "airline": "SkyJet", - "duration": "6h 15m", - "price": 325, - "cabin": "Economy", - "baggage": "1 checked bag", - }, - ("SFO", "SEA"): { - "airline": "Pacific Air", - "duration": "2h 5m", - "price": 129, - "cabin": "Economy", - "baggage": "Carry-on only", - }, - ("LHR", "DXB"): { - "airline": "EuroWings", - "duration": "6h 50m", - "price": 499, - "cabin": "Business", - "baggage": "2 bags included", - }, - } - - route = (origin_airport_code.upper(), destination_airport_code.upper()) - if route not in flights: - return f"No flights found between {origin_airport_code} and {destination_airport_code}" - - flight = flights[route] - if not detailed: - return f"Flights available from {origin_airport_code} to {destination_airport_code}." - - return ( - f"{flight['airline']} operates flights from {origin_airport_code} to {destination_airport_code}. " - f"Duration: {flight['duration']}. " - f"Price: ${flight['price']}. " - f"Cabin: {flight['cabin']}. " - f"Baggage policy: {flight['baggage']}." - ) - - -async def main() -> None: - """Walk through provider-only, agent integration, and tool-memory scenarios. - - Helpful debugging (uncomment when iterating): - - print(await provider.redis_index.info()) - - print(await provider.search_all()) - """ - - print("1. Standalone provider usage:") - print("-" * 40) - # Create a provider with partition scope and OpenAI embeddings - - # Please set the OPENAI_API_KEY and OPENAI_CHAT_MODEL_ID environment variables to use the OpenAI vectorizer - # Recommend default for OPENAI_CHAT_MODEL_ID is gpt-4o-mini - - # We attach an embedding vectorizer so the provider can perform hybrid (text + vector) - # retrieval. If you prefer text-only retrieval, instantiate RedisContextProvider without the - # 'vectorizer' and vector_* parameters. - vectorizer = OpenAITextVectorizer( - model="text-embedding-ada-002", - api_config={"api_key": os.getenv("OPENAI_API_KEY")}, - cache=EmbeddingsCache(name="openai_embeddings_cache", redis_url="redis://localhost:6379"), - ) - # The provider manages persistence and retrieval. application_id/agent_id/user_id - # scope data for multi-tenant separation. - provider = RedisContextProvider( - redis_url="redis://localhost:6379", - index_name="redis_basics", - application_id="matrix_of_kermits", - agent_id="agent_kermit", - user_id="kermit", - redis_vectorizer=vectorizer, - vector_field_name="vector", - vector_algorithm="hnsw", - vector_distance_metric="cosine", - ) - - # Build sample chat messages to persist to Redis - messages = [ - Message("user", ["runA CONVO: User Message"]), - Message("assistant", ["runA CONVO: Assistant Message"]), - Message("system", ["runA CONVO: System Message"]), - ] - - # Use the provider's before_run/after_run API to store and retrieve messages. - # In practice, the agent handles this automatically; this shows the low-level API. - from agent_framework import AgentSession, SessionContext - - session = AgentSession(session_id="runA") - context = SessionContext(input_messages=messages) - state = session.state - - # Store messages via after_run - await provider.after_run(agent=None, session=session, context=context, state=state) - - # Retrieve relevant memories via before_run - query_context = SessionContext(input_messages=[Message("system", ["B: Assistant Message"])]) - await provider.before_run(agent=None, session=session, context=query_context, state=state) - - # Inspect retrieved memories that would be injected into instructions - # (Debug-only output so you can verify retrieval works as expected.) - print("Before Run Result:") - print(query_context) - - # Drop / delete the provider index in Redis - await provider.redis_index.delete() - - # --- Agent + provider: teach and recall a preference --- - - print("\n2. Agent + provider: teach and recall a preference") - print("-" * 40) - # Fresh provider for the agent demo (recreates index) - vectorizer = OpenAITextVectorizer( - model="text-embedding-ada-002", - api_config={"api_key": os.getenv("OPENAI_API_KEY")}, - cache=EmbeddingsCache(name="openai_embeddings_cache", redis_url="redis://localhost:6379"), - ) - # Recreate a clean index so the next scenario starts fresh - provider = RedisContextProvider( - redis_url="redis://localhost:6379", - index_name="redis_basics_2", - prefix="context_2", - application_id="matrix_of_kermits", - agent_id="agent_kermit", - user_id="kermit", - redis_vectorizer=vectorizer, - vector_field_name="vector", - vector_algorithm="hnsw", - vector_distance_metric="cosine", - ) - - # Create chat client for the agent - client = OpenAIChatClient(model_id=os.getenv("OPENAI_CHAT_MODEL_ID"), api_key=os.getenv("OPENAI_API_KEY")) - # Create agent wired to the Redis context provider. The provider automatically - # persists conversational details and surfaces relevant context on each turn. - agent = client.as_agent( - name="MemoryEnhancedAssistant", - instructions=( - "You are a helpful assistant. Personalize replies using provided context. " - "Before answering, always check for stored context" - ), - tools=[], - context_providers=[provider], - ) - - # Teach a user preference; the agent writes this to the provider's memory - query = "Remember that I enjoy glugenflorgle" - result = await agent.run(query) - print("User: ", query) - print("Agent: ", result) - - # Ask the agent to recall the stored preference; it should retrieve from memory - query = "What do I enjoy?" - result = await agent.run(query) - print("User: ", query) - print("Agent: ", result) - - # Drop / delete the provider index in Redis - await provider.redis_index.delete() - - # --- Agent + provider + tool: store and recall tool-derived context --- - - print("\n3. Agent + provider + tool: store and recall tool-derived context") - print("-" * 40) - # Text-only provider (full-text search only). Omits vectorizer and related params. - provider = RedisContextProvider( - redis_url="redis://localhost:6379", - index_name="redis_basics_3", - prefix="context_3", - application_id="matrix_of_kermits", - agent_id="agent_kermit", - user_id="kermit", - ) - - # Create agent exposing the flight search tool. Tool outputs are captured by the - # provider and become retrievable context for later turns. - client = OpenAIChatClient(model_id=os.getenv("OPENAI_CHAT_MODEL_ID"), api_key=os.getenv("OPENAI_API_KEY")) - agent = client.as_agent( - name="MemoryEnhancedAssistant", - instructions=( - "You are a helpful assistant. Personalize replies using provided context. " - "Before answering, always check for stored context" - ), - tools=search_flights, - context_providers=[provider], - ) - # Invoke the tool; outputs become part of memory/context - query = "Are there any flights from new york city (jfk) to la? Give me details" - result = await agent.run(query) - print("User: ", query) - print("Agent: ", result) - # Verify the agent can recall tool-derived context - query = "Which flight did I ask about?" - result = await agent.run(query) - print("User: ", query) - print("Agent: ", result) - - # Drop / delete the provider index in Redis - await provider.redis_index.delete() - - -if __name__ == "__main__": - asyncio.run(main()) diff --git a/python/samples/getting_started/sessions/redis/redis_conversation.py b/python/samples/getting_started/sessions/redis/redis_conversation.py deleted file mode 100644 index e9e4c5714e..0000000000 --- a/python/samples/getting_started/sessions/redis/redis_conversation.py +++ /dev/null @@ -1,102 +0,0 @@ -# Copyright (c) Microsoft. All rights reserved. - -"""Redis Context Provider: Basic usage and agent integration - -This example demonstrates how to use the Redis context provider to persist -conversational details. Pass it as a constructor argument to create_agent. - -Requirements: - - A Redis instance with RediSearch enabled (e.g., Redis Stack) - - agent-framework with the Redis extra installed: pip install "agent-framework-redis" - - Optionally an OpenAI API key if enabling embeddings for hybrid search - -Run: - python redis_conversation.py -""" - -import asyncio -import os - -from agent_framework.openai import OpenAIChatClient -from agent_framework.redis import RedisContextProvider -from redisvl.extensions.cache.embeddings import EmbeddingsCache -from redisvl.utils.vectorize import OpenAITextVectorizer - - -async def main() -> None: - """Walk through provider and chat message store usage. - - Helpful debugging (uncomment when iterating): - - print(await provider.redis_index.info()) - - print(await provider.search_all()) - """ - vectorizer = OpenAITextVectorizer( - model="text-embedding-ada-002", - api_config={"api_key": os.getenv("OPENAI_API_KEY")}, - cache=EmbeddingsCache(name="openai_embeddings_cache", redis_url="redis://localhost:6379"), - ) - - provider = RedisContextProvider( - redis_url="redis://localhost:6379", - index_name="redis_conversation", - prefix="redis_conversation", - application_id="matrix_of_kermits", - agent_id="agent_kermit", - user_id="kermit", - redis_vectorizer=vectorizer, - vector_field_name="vector", - vector_algorithm="hnsw", - vector_distance_metric="cosine", - ) - - # Create chat client for the agent - client = OpenAIChatClient(model_id=os.getenv("OPENAI_CHAT_MODEL_ID"), api_key=os.getenv("OPENAI_API_KEY")) - # Create agent wired to the Redis context provider. The provider automatically - # persists conversational details and surfaces relevant context on each turn. - agent = client.as_agent( - name="MemoryEnhancedAssistant", - instructions=( - "You are a helpful assistant. Personalize replies using provided context. " - "Before answering, always check for stored context" - ), - tools=[], - context_providers=[provider], - ) - - # Teach a user preference; the agent writes this to the provider's memory - query = "Remember that I enjoy gumbo" - result = await agent.run(query) - print("User: ", query) - print("Agent: ", result) - - # Ask the agent to recall the stored preference; it should retrieve from memory - query = "What do I enjoy?" - result = await agent.run(query) - print("User: ", query) - print("Agent: ", result) - - query = "What did I say to you just now?" - result = await agent.run(query) - print("User: ", query) - print("Agent: ", result) - - query = "Remember that I have a meeting at 3pm tomorro" - result = await agent.run(query) - print("User: ", query) - print("Agent: ", result) - - query = "Tulips are red" - result = await agent.run(query) - print("User: ", query) - print("Agent: ", result) - - query = "What was the first thing I said to you this conversation?" - result = await agent.run(query) - print("User: ", query) - print("Agent: ", result) - # Drop / delete the provider index in Redis - await provider.redis_index.delete() - - -if __name__ == "__main__": - asyncio.run(main()) diff --git a/python/samples/getting_started/sessions/redis/redis_sessions.py b/python/samples/getting_started/sessions/redis/redis_sessions.py deleted file mode 100644 index 75552297f5..0000000000 --- a/python/samples/getting_started/sessions/redis/redis_sessions.py +++ /dev/null @@ -1,243 +0,0 @@ -# Copyright (c) Microsoft. All rights reserved. - -"""Redis Context Provider: Memory scoping examples - -This sample demonstrates how conversational memory can be scoped when using the -Redis context provider. It covers three scenarios: - -1) Cross-session memory - - Memories are shared across all sessions for a given app/agent/user. - - New sessions can still retrieve memories stored in earlier sessions. - -2) Session-specific memory - - Demonstrates storing and retrieving memories within a single session, - with memories also accessible from new sessions due to cross-session retrieval. - -3) Multiple agents with isolated memory - - Use different agent_id values to keep memories separated for different - agent personas, even when the user_id is the same. - -Requirements: - - A Redis instance with RediSearch enabled (e.g., Redis Stack) - - agent-framework with the Redis extra installed: pip install "agent-framework-redis" - - Optionally an OpenAI API key for the chat client in this demo - -Run: - python redis_sessions.py -""" - -import asyncio -import os - -from agent_framework.openai import OpenAIChatClient -from agent_framework.redis import RedisContextProvider -from redisvl.extensions.cache.embeddings import EmbeddingsCache -from redisvl.utils.vectorize import OpenAITextVectorizer - -# Please set the OPENAI_API_KEY and OPENAI_CHAT_MODEL_ID environment variables to use the OpenAI vectorizer -# Recommend default for OPENAI_CHAT_MODEL_ID is gpt-4o-mini - - -async def example_cross_session_memory() -> None: - """Example 1: Cross-session memory (memories shared across all sessions for a user).""" - print("1. Cross-Session Memory Example:") - print("-" * 40) - - client = OpenAIChatClient( - model_id=os.getenv("OPENAI_CHAT_MODEL_ID", "gpt-4o-mini"), - api_key=os.getenv("OPENAI_API_KEY"), - ) - - provider = RedisContextProvider( - redis_url="redis://localhost:6379", - index_name="redis_threads_global", - application_id="threads_demo_app", - agent_id="threads_demo_agent", - user_id="threads_demo_user", - ) - - agent = client.as_agent( - name="MemoryAssistant", - instructions=( - "You are a helpful assistant. Personalize replies using provided context. " - "Before answering, always check for stored context containing information" - ), - tools=[], - context_providers=[provider], - ) - - # Store a preference - query = "Remember that I prefer technical responses with code examples when discussing programming." - print(f"User: {query}") - result = await agent.run(query) - print(f"Agent: {result}\n") - - # Create a new session - memories should still be accessible because - # RedisContextProvider retrieves across all sessions for the same app/agent/user - new_session = agent.create_session() - query = "What technical responses do I prefer?" - print(f"User (new session): {query}") - result = await agent.run(query, session=new_session) - print(f"Agent: {result}\n") - - # Clean up the Redis index - await provider.redis_index.delete() - - -async def example_session_memory_with_vectorizer() -> None: - """Example 2: Session memory with a custom vectorizer for hybrid search. - - Demonstrates storing and retrieving memories within a session using - a custom OpenAI vectorizer for hybrid (text + vector) search. Memories - are also accessible from new sessions due to cross-session retrieval. - """ - print("2. Session Memory with Vectorizer Example:") - print("-" * 40) - - client = OpenAIChatClient( - model_id=os.getenv("OPENAI_CHAT_MODEL_ID", "gpt-4o-mini"), - api_key=os.getenv("OPENAI_API_KEY"), - ) - - vectorizer = OpenAITextVectorizer( - model="text-embedding-ada-002", - api_config={"api_key": os.getenv("OPENAI_API_KEY")}, - cache=EmbeddingsCache(name="openai_embeddings_cache", redis_url="redis://localhost:6379"), - ) - - provider = RedisContextProvider( - redis_url="redis://localhost:6379", - index_name="redis_threads_dynamic", - application_id="threads_demo_app", - agent_id="threads_demo_agent", - user_id="threads_demo_user", - redis_vectorizer=vectorizer, - vector_field_name="vector", - vector_algorithm="hnsw", - vector_distance_metric="cosine", - ) - - agent = client.as_agent( - name="VectorizerMemoryAssistant", - instructions="You are an assistant with hybrid search memory.", - context_providers=[provider], - ) - - # Create a specific session for this scoped provider - dedicated_session = agent.create_session() - - # Store some information in the dedicated session - query = "Remember that for this conversation, I'm working on a Python project about data analysis." - print(f"User (dedicated session): {query}") - result = await agent.run(query, session=dedicated_session) - print(f"Agent: {result}\n") - - # Test memory retrieval in the same dedicated session - query = "What project am I working on?" - print(f"User (same dedicated session): {query}") - result = await agent.run(query, session=dedicated_session) - print(f"Agent: {result}\n") - - # Store more information in the same session - query = "Also remember that I prefer using pandas and matplotlib for this project." - print(f"User (same dedicated session): {query}") - result = await agent.run(query, session=dedicated_session) - print(f"Agent: {result}\n") - - # Test comprehensive memory retrieval - query = "What do you know about my current project and preferences?" - print(f"User (same dedicated session): {query}") - result = await agent.run(query, session=dedicated_session) - print(f"Agent: {result}\n") - - # Clean up the Redis index - await provider.redis_index.delete() - - -async def example_multiple_agents() -> None: - """Example 3: Multiple agents with isolated memory (isolated via agent_id) but within 1 index.""" - print("3. Multiple Agents with Isolated Memory:") - print("-" * 40) - - client = OpenAIChatClient( - model_id=os.getenv("OPENAI_CHAT_MODEL_ID", "gpt-4o-mini"), - api_key=os.getenv("OPENAI_API_KEY"), - ) - - vectorizer = OpenAITextVectorizer( - model="text-embedding-ada-002", - api_config={"api_key": os.getenv("OPENAI_API_KEY")}, - cache=EmbeddingsCache(name="openai_embeddings_cache", redis_url="redis://localhost:6379"), - ) - - personal_provider = RedisContextProvider( - redis_url="redis://localhost:6379", - index_name="redis_threads_agents", - application_id="threads_demo_app", - agent_id="agent_personal", - user_id="threads_demo_user", - redis_vectorizer=vectorizer, - vector_field_name="vector", - vector_algorithm="hnsw", - vector_distance_metric="cosine", - ) - - personal_agent = client.as_agent( - name="PersonalAssistant", - instructions="You are a personal assistant that helps with personal tasks.", - context_providers=[personal_provider], - ) - - work_provider = RedisContextProvider( - redis_url="redis://localhost:6379", - index_name="redis_threads_agents", - application_id="threads_demo_app", - agent_id="agent_work", - user_id="threads_demo_user", - redis_vectorizer=vectorizer, - vector_field_name="vector", - vector_algorithm="hnsw", - vector_distance_metric="cosine", - ) - - work_agent = client.as_agent( - name="WorkAssistant", - instructions="You are a work assistant that helps with professional tasks.", - context_providers=[work_provider], - ) - - # Store personal information - query = "Remember that I like to exercise at 6 AM and prefer outdoor activities." - print(f"User to Personal Agent: {query}") - result = await personal_agent.run(query) - print(f"Personal Agent: {result}\n") - - # Store work information - query = "Remember that I have team meetings every Tuesday at 2 PM." - print(f"User to Work Agent: {query}") - result = await work_agent.run(query) - print(f"Work Agent: {result}\n") - - # Test memory isolation - query = "What do you know about my schedule?" - print(f"User to Personal Agent: {query}") - result = await personal_agent.run(query) - print(f"Personal Agent: {result}\n") - - print(f"User to Work Agent: {query}") - result = await work_agent.run(query) - print(f"Work Agent: {result}\n") - - # Clean up the Redis index (shared) - await work_provider.redis_index.delete() - - -async def main() -> None: - print("=== Redis Memory Scoping Examples ===\n") - await example_cross_session_memory() - await example_session_memory_with_vectorizer() - await example_multiple_agents() - - -if __name__ == "__main__": - asyncio.run(main()) diff --git a/python/samples/getting_started/sessions/redis_history_provider.py b/python/samples/getting_started/sessions/redis_history_provider.py deleted file mode 100644 index f54edd8170..0000000000 --- a/python/samples/getting_started/sessions/redis_history_provider.py +++ /dev/null @@ -1,257 +0,0 @@ -# Copyright (c) Microsoft. All rights reserved. - -import asyncio -import os -from uuid import uuid4 - -from agent_framework import AgentSession -from agent_framework.openai import OpenAIChatClient -from agent_framework.redis import RedisHistoryProvider - -""" -Redis History Provider Session Example - -This sample demonstrates how to use Redis as a history provider for session -management, enabling persistent conversation history storage across sessions -with Redis as the backend data store. -""" - - -async def example_manual_memory_store() -> None: - """Basic example of using Redis history provider.""" - print("=== Basic Redis History Provider Example ===") - - # Create Redis history provider - redis_provider = RedisHistoryProvider( - source_id="redis_basic_chat", - redis_url="redis://localhost:6379", - ) - - # Create agent with Redis history provider - agent = OpenAIChatClient().as_agent( - name="RedisBot", - instructions="You are a helpful assistant that remembers our conversation using Redis.", - context_providers=[redis_provider], - ) - - # Create session - session = agent.create_session() - - # Have a conversation - print("\n--- Starting conversation ---") - query1 = "Hello! My name is Alice and I love pizza." - print(f"User: {query1}") - response1 = await agent.run(query1, session=session) - print(f"Agent: {response1.text}") - - query2 = "What do you remember about me?" - print(f"User: {query2}") - response2 = await agent.run(query2, session=session) - print(f"Agent: {response2.text}") - - print("Done\n") - - -async def example_user_session_management() -> None: - """Example of managing user sessions with Redis.""" - print("=== User Session Management Example ===") - - user_id = "alice_123" - session_id = f"session_{uuid4()}" - - # Create Redis history provider for specific user session - redis_provider = RedisHistoryProvider( - source_id=f"redis_{user_id}", - redis_url="redis://localhost:6379", - max_messages=10, # Keep only last 10 messages - ) - - # Create agent with history provider - agent = OpenAIChatClient().as_agent( - name="SessionBot", - instructions="You are a helpful assistant. Keep track of user preferences.", - context_providers=[redis_provider], - ) - - # Start conversation - session = agent.create_session(session_id=session_id) - - print(f"Started session for user {user_id}") - - # Simulate conversation - queries = [ - "Hi, I'm Alice and I prefer vegetarian food.", - "What restaurants would you recommend?", - "I also love Italian cuisine.", - "Can you remember my food preferences?", - ] - - for i, query in enumerate(queries, 1): - print(f"\n--- Message {i} ---") - print(f"User: {query}") - response = await agent.run(query, session=session) - print(f"Agent: {response.text}") - - print("Done\n") - - -async def example_conversation_persistence() -> None: - """Example of conversation persistence across application restarts.""" - print("=== Conversation Persistence Example ===") - - # Phase 1: Start conversation - print("--- Phase 1: Starting conversation ---") - redis_provider = RedisHistoryProvider( - source_id="redis_persistent_chat", - redis_url="redis://localhost:6379", - ) - - agent = OpenAIChatClient().as_agent( - name="PersistentBot", - instructions="You are a helpful assistant. Remember our conversation history.", - context_providers=[redis_provider], - ) - - session = agent.create_session() - - # Start conversation - query1 = "Hello! I'm working on a Python project about machine learning." - print(f"User: {query1}") - response1 = await agent.run(query1, session=session) - print(f"Agent: {response1.text}") - - query2 = "I'm specifically interested in neural networks." - print(f"User: {query2}") - response2 = await agent.run(query2, session=session) - print(f"Agent: {response2.text}") - - # Serialize session state - serialized = session.to_dict() - - # Phase 2: Resume conversation (simulating app restart) - print("\n--- Phase 2: Resuming conversation (after 'restart') ---") - restored_session = AgentSession.from_dict(serialized) - - # Continue conversation - agent should remember context - query3 = "What was I working on before?" - print(f"User: {query3}") - response3 = await agent.run(query3, session=restored_session) - print(f"Agent: {response3.text}") - - query4 = "Can you suggest some Python libraries for neural networks?" - print(f"User: {query4}") - response4 = await agent.run(query4, session=restored_session) - print(f"Agent: {response4.text}") - - print("Done\n") - - -async def example_session_serialization() -> None: - """Example of session state serialization and deserialization.""" - print("=== Session Serialization Example ===") - - redis_provider = RedisHistoryProvider( - source_id="redis_serialization_chat", - redis_url="redis://localhost:6379", - ) - - agent = OpenAIChatClient().as_agent( - name="SerializationBot", - instructions="You are a helpful assistant.", - context_providers=[redis_provider], - ) - - session = agent.create_session() - - # Have initial conversation - print("--- Initial conversation ---") - query1 = "Hello! I'm testing serialization." - print(f"User: {query1}") - response1 = await agent.run(query1, session=session) - print(f"Agent: {response1.text}") - - # Serialize session state - serialized = session.to_dict() - print(f"\nSerialized session state: {serialized}") - - # Deserialize session state (simulating loading from database/file) - print("\n--- Deserializing session state ---") - restored_session = AgentSession.from_dict(serialized) - - # Continue conversation with restored session - query2 = "Do you remember what I said about testing?" - print(f"User: {query2}") - response2 = await agent.run(query2, session=restored_session) - print(f"Agent: {response2.text}") - - print("Done\n") - - -async def example_message_limits() -> None: - """Example of automatic message trimming with limits.""" - print("=== Message Limits Example ===") - - # Create provider with small message limit - redis_provider = RedisHistoryProvider( - source_id="redis_limited_chat", - redis_url="redis://localhost:6379", - max_messages=3, # Keep only 3 most recent messages - ) - - agent = OpenAIChatClient().as_agent( - name="LimitBot", - instructions="You are a helpful assistant with limited memory.", - context_providers=[redis_provider], - ) - - session = agent.create_session() - - # Send multiple messages to test trimming - messages = [ - "Message 1: Hello!", - "Message 2: How are you?", - "Message 3: What's the weather?", - "Message 4: Tell me a joke.", - "Message 5: This should trigger trimming.", - ] - - for i, query in enumerate(messages, 1): - print(f"\n--- Sending message {i} ---") - print(f"User: {query}") - response = await agent.run(query, session=session) - print(f"Agent: {response.text}") - - print("Done\n") - - -async def main() -> None: - """Run all Redis history provider examples.""" - print("Redis History Provider Examples") - print("=" * 50) - print("Prerequisites:") - print("- Redis server running on localhost:6379") - print("- OPENAI_API_KEY environment variable set") - print("=" * 50) - - # Check prerequisites - if not os.getenv("OPENAI_API_KEY"): - print("ERROR: OPENAI_API_KEY environment variable not set") - return - - try: - # Run all examples - await example_manual_memory_store() - await example_user_session_management() - await example_conversation_persistence() - await example_session_serialization() - await example_message_limits() - - print("All examples completed successfully!") - - except Exception as e: - print(f"Error running examples: {e}") - raise - - -if __name__ == "__main__": - asyncio.run(main()) diff --git a/python/samples/getting_started/sessions/simple_context_provider.py b/python/samples/getting_started/sessions/simple_context_provider.py deleted file mode 100644 index 0ddcbf61a8..0000000000 --- a/python/samples/getting_started/sessions/simple_context_provider.py +++ /dev/null @@ -1,123 +0,0 @@ -# Copyright (c) Microsoft. All rights reserved. - -import asyncio -from typing import Any - -from agent_framework import ( - Agent, - AgentSession, - BaseContextProvider, - SessionContext, - SupportsChatGetResponse, -) -from agent_framework.azure import AzureOpenAIResponsesClient -from azure.identity import AzureCliCredential -from pydantic import BaseModel - - -class UserInfo(BaseModel): - name: str | None = None - age: int | None = None - - -class UserInfoMemory(BaseContextProvider): - """Context provider that extracts and remembers user info (name, age). - - State is stored in ``session.state["user_info_memory"]`` so it survives - serialization via ``session.to_dict()`` / ``AgentSession.from_dict()``. - """ - - DEFAULT_SOURCE_ID = "user_info_memory" - - def __init__(self, client: SupportsChatGetResponse): - super().__init__(self.DEFAULT_SOURCE_ID) - self._chat_client = client - - async def before_run( - self, - *, - agent: Any, - session: AgentSession | None, - context: SessionContext, - state: dict[str, Any], - ) -> None: - """Provide user information context before each agent call.""" - user_info = state.setdefault("user_info", UserInfo()) - - instructions: list[str] = [] - - if user_info.name is None: - instructions.append( - "Ask the user for their name and politely decline to answer any questions until they provide it." - ) - else: - instructions.append(f"The user's name is {user_info.name}.") - - if user_info.age is None: - instructions.append( - "Ask the user for their age and politely decline to answer any questions until they provide it." - ) - else: - instructions.append(f"The user's age is {user_info.age}.") - - context.extend_instructions(self.source_id, " ".join(instructions)) - - async def after_run( - self, - *, - agent: Any, - session: AgentSession | None, - context: SessionContext, - state: dict[str, Any], - ) -> None: - """Extract user information from messages after each agent call.""" - user_info = state.setdefault("user_info", UserInfo()) - if user_info.name is not None and user_info.age is not None: - return # Already have everything - - request_messages = context.get_messages(include_input=True, include_response=True) - user_messages = [msg for msg in request_messages if hasattr(msg, "role") and msg.role == "user"] # type: ignore - if not user_messages: - return - - try: - result = await self._chat_client.get_response( - messages=request_messages, # type: ignore - instructions="Extract the user's name and age from the message if present. " - "If not present return nulls.", - options={"response_format": UserInfo}, - ) - extracted = result.value - if extracted and user_info.name is None and extracted.name: - user_info.name = extracted.name - if extracted and user_info.age is None and extracted.age: - user_info.age = extracted.age - state["user_info"] = user_info - except Exception: - pass # Failed to extract, continue without updating - - -async def main(): - client = AzureOpenAIResponsesClient(credential=AzureCliCredential()) - - async with Agent( - client=client, - instructions="You are a friendly assistant. Always address the user by their name.", - default_options={"store": True}, - context_providers=[UserInfoMemory(client)], - ) as agent: - session = agent.create_session() - - print(await agent.run("Hello, what is the square root of 9?", session=session)) - print(await agent.run("My name is Ruaidhrí", session=session)) - print(await agent.run("I am 20 years old", session=session)) - - # Inspect extracted user info from session state - user_info = session.state.get(UserInfoMemory.DEFAULT_SOURCE_ID, {}).get("user_info", UserInfo()) - print() - print(f"MEMORY - User Name: {user_info.name}") - print(f"MEMORY - User Age: {user_info.age}") - - -if __name__ == "__main__": - asyncio.run(main()) diff --git a/python/samples/getting_started/sessions/suspend_resume_session.py b/python/samples/getting_started/sessions/suspend_resume_session.py deleted file mode 100644 index dcbb00d06a..0000000000 --- a/python/samples/getting_started/sessions/suspend_resume_session.py +++ /dev/null @@ -1,93 +0,0 @@ -# Copyright (c) Microsoft. All rights reserved. - -import asyncio - -from agent_framework import AgentSession -from agent_framework.azure import AzureAIAgentClient -from agent_framework.openai import OpenAIChatClient -from azure.identity.aio import AzureCliCredential - -""" -Session Suspend and Resume Example - -This sample demonstrates how to suspend and resume conversation sessions, comparing -service-managed sessions (Azure AI) with in-memory sessions (OpenAI) for persistent -conversation state across sessions. -""" - - -async def suspend_resume_service_managed_session() -> None: - """Demonstrates how to suspend and resume a service-managed session.""" - print("=== Suspend-Resume Service-Managed Session ===") - - # AzureAIAgentClient supports service-managed sessions. - async with ( - AzureCliCredential() as credential, - AzureAIAgentClient(credential=credential).as_agent( - name="MemoryBot", instructions="You are a helpful assistant that remembers our conversation." - ) as agent, - ): - # Start a new session for the agent conversation. - session = agent.create_session() - - # Respond to user input. - query = "Hello! My name is Alice and I love pizza." - print(f"User: {query}") - print(f"Agent: {await agent.run(query, session=session)}\n") - - # Serialize the session state, so it can be stored for later use. - serialized_session = session.to_dict() - - # The session can now be saved to a database, file, or any other storage mechanism and loaded again later. - print(f"Serialized session: {serialized_session}\n") - - # Deserialize the session state after loading from storage. - resumed_session = AgentSession.from_dict(serialized_session) - - # Respond to user input. - query = "What do you remember about me?" - print(f"User: {query}") - print(f"Agent: {await agent.run(query, session=resumed_session)}\n") - - -async def suspend_resume_in_memory_session() -> None: - """Demonstrates how to suspend and resume an in-memory session.""" - print("=== Suspend-Resume In-Memory Session ===") - - # OpenAI Chat Client is used as an example here, - # other chat clients can be used as well. - agent = OpenAIChatClient().as_agent( - name="MemoryBot", instructions="You are a helpful assistant that remembers our conversation." - ) - - # Start a new session for the agent conversation. - session = agent.create_session() - - # Respond to user input. - query = "Hello! My name is Alice and I love pizza." - print(f"User: {query}") - print(f"Agent: {await agent.run(query, session=session)}\n") - - # Serialize the session state, so it can be stored for later use. - serialized_session = session.to_dict() - - # The session can now be saved to a database, file, or any other storage mechanism and loaded again later. - print(f"Serialized session: {serialized_session}\n") - - # Deserialize the session state after loading from storage. - resumed_session = AgentSession.from_dict(serialized_session) - - # Respond to user input. - query = "What do you remember about me?" - print(f"User: {query}") - print(f"Agent: {await agent.run(query, session=resumed_session)}\n") - - -async def main() -> None: - print("=== Suspend-Resume Session Examples ===") - await suspend_resume_service_managed_session() - await suspend_resume_in_memory_session() - - -if __name__ == "__main__": - asyncio.run(main()) diff --git a/python/uv.lock b/python/uv.lock index 36412ed85f..2d35bd6969 100644 --- a/python/uv.lock +++ b/python/uv.lock @@ -886,7 +886,7 @@ wheels = [ [[package]] name = "anthropic" -version = "0.80.0" +version = "0.81.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "anyio", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, @@ -898,9 +898,9 @@ dependencies = [ { name = "sniffio", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, { name = "typing-extensions", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/7f/63/791e14ef5a8ecb485cef5b5d058c7ca3ad6c50a2f94cf4cea5231c6b7c16/anthropic-0.80.0.tar.gz", hash = "sha256:ef042586673fdcab2a6ffd381aa5f9a1bcce38ffe73c07fe70bd56d12b8124ba", size = 533291, upload-time = "2026-02-17T19:26:26.717Z" } +sdist = { url = "https://files.pythonhosted.org/packages/f3/c2/d2bb9b3c82c386abf3b2c32ae0452a8dcb89ed2809d875e1420bea22e318/anthropic-0.81.0.tar.gz", hash = "sha256:bab2d4e45c2e81a0668fdc2da2f7fd665ed8a0295ba3c86450f9dcc3a7804524", size = 532935, upload-time = "2026-02-18T04:00:54.658Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/b2/4b/665f29338f51d0c2f9e04b276ea54cc1e957ae5c521a0ad868aa80abc608/anthropic-0.80.0-py3-none-any.whl", hash = "sha256:dad0e40ec371ee686e9ffb2e0cb461a0ed51447fa100927fb5d39b174c286d6f", size = 453667, upload-time = "2026-02-17T19:26:29.96Z" }, + { url = "https://files.pythonhosted.org/packages/86/27/a18e1613da66b3c9c7565c92457a60de15e824a6dd2ed9bce0fbfe615ded/anthropic-0.81.0-py3-none-any.whl", hash = "sha256:ac54407e9a1f9b35e6e6c86f75bf403f0e54d60944f99f15f685a38d6829f20b", size = 455627, upload-time = "2026-02-18T04:00:53.207Z" }, ] [[package]] @@ -2231,7 +2231,7 @@ wheels = [ [[package]] name = "github-copilot-sdk" -version = "0.1.24" +version = "0.1.25" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "pydantic", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, @@ -2239,12 +2239,12 @@ dependencies = [ { name = "typing-extensions", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, ] wheels = [ - { url = "https://files.pythonhosted.org/packages/4c/2f/a23d957f414c73bac4bcf4dfb7319d4b409599be56d9491c98be7bcacbae/github_copilot_sdk-0.1.24-py3-none-macosx_10_9_x86_64.whl", hash = "sha256:f099b247defc55f6286c1912a9be58607359aecdfe4e90d60efd43be3af5095e", size = 58062112, upload-time = "2026-02-16T17:54:36.042Z" }, - { url = "https://files.pythonhosted.org/packages/f5/ae/e43e22eb7610aef72470287b9ea8044f645b4e46970f37d97656eddbcf62/github_copilot_sdk-0.1.24-py3-none-macosx_11_0_arm64.whl", hash = "sha256:f353a74229a840269d381b867dd4307ebed939aaddb81470f2d8a01ade54acfa", size = 54821079, upload-time = "2026-02-16T17:54:39.656Z" }, - { url = "https://files.pythonhosted.org/packages/46/64/da92d3d6c978a730989b298afd35945c937a8a55f3c0b6882c1500809239/github_copilot_sdk-0.1.24-py3-none-manylinux_2_17_aarch64.whl", hash = "sha256:786520ff2a166619bb12b1982f6e241fef7769d0ec5af718a81e9018d911604f", size = 60957893, upload-time = "2026-02-16T17:54:43.676Z" }, - { url = "https://files.pythonhosted.org/packages/a4/87/632426b384c8fe06dfc07b579d4cd3ff32ac5edb7488cd16bda13d9f7a2b/github_copilot_sdk-0.1.24-py3-none-manylinux_2_17_x86_64.whl", hash = "sha256:980688f05387f508d7038def027f5b6cf04cec842827b0ee296feff55532b1a1", size = 59134563, upload-time = "2026-02-16T17:54:47.278Z" }, - { url = "https://files.pythonhosted.org/packages/d1/5a/47d37001c8e5b4caf9c3b24dbd44f1b684a779f9de695b21244b4e12ccb0/github_copilot_sdk-0.1.24-py3-none-win_amd64.whl", hash = "sha256:955bfdea5270c26b30816a8a0f11529f1e6eb545d708e84edf1d5709fbd5b082", size = 53576127, upload-time = "2026-02-16T17:54:52.781Z" }, - { url = "https://files.pythonhosted.org/packages/ab/1b/95f81bb3f1eebe2af1c45883fe4a01749ad27cacbe5cc1cfbaac0b9231b5/github_copilot_sdk-0.1.24-py3-none-win_arm64.whl", hash = "sha256:944cce58e589d5e191d1e3251f62d517cca476de2ef2f69474e69a4f7ca194ec", size = 51581234, upload-time = "2026-02-16T17:54:56.166Z" }, + { url = "https://files.pythonhosted.org/packages/87/06/1dec504b54c724d69283969d4ed004225ec8bbb1c0a5e9e0c3b6b048099a/github_copilot_sdk-0.1.25-py3-none-macosx_10_9_x86_64.whl", hash = "sha256:d32c3fc2c393f70923a645a133607da2e562d078b87437f499100d5bb8c1902f", size = 58097936, upload-time = "2026-02-18T00:07:20.672Z" }, + { url = "https://files.pythonhosted.org/packages/9f/a3/a6ad1ca47af561069d6d8d0a4b074b000b0be1dfa9e66215b264ee31650c/github_copilot_sdk-0.1.25-py3-none-macosx_11_0_arm64.whl", hash = "sha256:7af33d3afbe09a78dfc9d65a843526e47aba15631e90926c42a21a200fab12da", size = 54867128, upload-time = "2026-02-18T00:07:25.228Z" }, + { url = "https://files.pythonhosted.org/packages/8c/08/74fd9be0ed292d524a15fa4db950f43f4afefb77514f856e36fd1203bf13/github_copilot_sdk-0.1.25-py3-none-manylinux_2_17_aarch64.whl", hash = "sha256:bc74a3d08ee45313ac02a3f7159c583ec41fc16090ec5f27f88c4b737f03139e", size = 60999905, upload-time = "2026-02-18T00:07:29.462Z" }, + { url = "https://files.pythonhosted.org/packages/ae/01/daae53c8586c0cadae9a2a146d1da9bd6dbd7e89b7dcd72643b453267345/github_copilot_sdk-0.1.25-py3-none-manylinux_2_17_x86_64.whl", hash = "sha256:13ef99fa8c709c5f80d820672bf36ee9176bc33f0efce6a2b5cbf6d1bb2369e8", size = 59183062, upload-time = "2026-02-18T00:07:34.059Z" }, + { url = "https://files.pythonhosted.org/packages/81/a8/2ec7d47a18b042cca2c140cabb5fe6621697c1b43b8721637061122c51ed/github_copilot_sdk-0.1.25-py3-none-win_amd64.whl", hash = "sha256:1a90ee583309ff308fea42f9edec61203645a33ca1d3dc42953628fb8c3eda07", size = 53624148, upload-time = "2026-02-18T00:07:38.558Z" }, + { url = "https://files.pythonhosted.org/packages/6b/2e/4cffd33552ede91de7517641835a3365571abd3f436c9d76a4f50793033c/github_copilot_sdk-0.1.25-py3-none-win_arm64.whl", hash = "sha256:5249a63d1ac1e4d325c70c9902e81327b0baca53afa46010f52ac3fd3b5a111b", size = 51623455, upload-time = "2026-02-18T00:07:42.156Z" }, ] [[package]] @@ -3365,7 +3365,7 @@ wheels = [ [[package]] name = "mem0ai" -version = "1.0.3" +version = "1.0.4" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "openai", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, @@ -3376,9 +3376,9 @@ dependencies = [ { name = "qdrant-client", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, { name = "sqlalchemy", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/ce/b6/9d3a747a5c1af2b4f73572a3d296bf5e99c99630a3f201b0ddbb14e811e6/mem0ai-1.0.3.tar.gz", hash = "sha256:8f7abe485a61653e3f2d3f8c222f531f8b52660b19d88820c56522103d9f31b5", size = 182698, upload-time = "2026-02-03T05:38:04.608Z" } +sdist = { url = "https://files.pythonhosted.org/packages/51/be/bb17c05e5a752ca79df2fbdcef83c7eaa249004029da9fd9488def574806/mem0ai-1.0.4.tar.gz", hash = "sha256:c6201130be46c9dc2b5cf0836e7811fd604430bb39c55c9c454045722d1ed21b", size = 182968, upload-time = "2026-02-17T22:34:46.247Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/84/3e/b300ab9fa6efd36c78f1402684eab1483f282c4ca6e983920fceb9c0f4fb/mem0ai-1.0.3-py3-none-any.whl", hash = "sha256:f500c3decc12c2663b2ad829ac4edcd0c674f2bd9bf4abf7f5c0522aef3d3cf8", size = 275722, upload-time = "2026-02-03T05:38:03.126Z" }, + { url = "https://files.pythonhosted.org/packages/b0/da/67f023b4269d77336bce950c7419ebd554272a5bfe1bc9c8ed79e8907eaa/mem0ai-1.0.4-py3-none-any.whl", hash = "sha256:06b31a2d98364ff6ae35abe4ee2ad2aea60fe43b20bad09c3ec6c1a9c031b753", size = 275979, upload-time = "2026-02-17T22:34:43.887Z" }, ] [[package]] @@ -4274,7 +4274,7 @@ wheels = [ [[package]] name = "pandas" -version = "3.0.0" +version = "3.0.1" source = { registry = "https://pypi.org/simple" } resolution-markers = [ "python_full_version >= '3.14' and sys_platform == 'darwin'", @@ -4295,55 +4295,55 @@ dependencies = [ { name = "python-dateutil", marker = "(python_full_version >= '3.11' and sys_platform == 'darwin') or (python_full_version >= '3.11' and sys_platform == 'linux') or (python_full_version >= '3.11' and sys_platform == 'win32')" }, { name = "tzdata", marker = "python_full_version >= '3.11' and sys_platform == 'win32'" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/de/da/b1dc0481ab8d55d0f46e343cfe67d4551a0e14fcee52bd38ca1bd73258d8/pandas-3.0.0.tar.gz", hash = "sha256:0facf7e87d38f721f0af46fe70d97373a37701b1c09f7ed7aeeb292ade5c050f", size = 4633005, upload-time = "2026-01-21T15:52:04.726Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/46/1e/b184654a856e75e975a6ee95d6577b51c271cd92cb2b020c9378f53e0032/pandas-3.0.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:d64ce01eb9cdca96a15266aa679ae50212ec52757c79204dbc7701a222401850", size = 10313247, upload-time = "2026-01-21T15:50:15.775Z" }, - { url = "https://files.pythonhosted.org/packages/dd/5e/e04a547ad0f0183bf151fd7c7a477468e3b85ff2ad231c566389e6cc9587/pandas-3.0.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:613e13426069793aa1ec53bdcc3b86e8d32071daea138bbcf4fa959c9cdaa2e2", size = 9913131, upload-time = "2026-01-21T15:50:18.611Z" }, - { url = "https://files.pythonhosted.org/packages/a2/93/bb77bfa9fc2aba9f7204db807d5d3fb69832ed2854c60ba91b4c65ba9219/pandas-3.0.0-cp311-cp311-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:0192fee1f1a8e743b464a6607858ee4b071deb0b118eb143d71c2a1d170996d5", size = 10741925, upload-time = "2026-01-21T15:50:21.058Z" }, - { url = "https://files.pythonhosted.org/packages/62/fb/89319812eb1d714bfc04b7f177895caeba8ab4a37ef6712db75ed786e2e0/pandas-3.0.0-cp311-cp311-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:f0b853319dec8d5e0c8b875374c078ef17f2269986a78168d9bd57e49bf650ae", size = 11245979, upload-time = "2026-01-21T15:50:23.413Z" }, - { url = "https://files.pythonhosted.org/packages/a9/63/684120486f541fc88da3862ed31165b3b3e12b6a1c7b93be4597bc84e26c/pandas-3.0.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:707a9a877a876c326ae2cb640fbdc4ef63b0a7b9e2ef55c6df9942dcee8e2af9", size = 11756337, upload-time = "2026-01-21T15:50:25.932Z" }, - { url = "https://files.pythonhosted.org/packages/39/92/7eb0ad232312b59aec61550c3c81ad0743898d10af5df7f80bc5e5065416/pandas-3.0.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:afd0aa3d0b5cda6e0b8ffc10dbcca3b09ef3cbcd3fe2b27364f85fdc04e1989d", size = 12325517, upload-time = "2026-01-21T15:50:27.952Z" }, - { url = "https://files.pythonhosted.org/packages/51/27/bf9436dd0a4fc3130acec0828951c7ef96a0631969613a9a35744baf27f6/pandas-3.0.0-cp311-cp311-win_amd64.whl", hash = "sha256:113b4cca2614ff7e5b9fee9b6f066618fe73c5a83e99d721ffc41217b2bf57dd", size = 9881576, upload-time = "2026-01-21T15:50:30.149Z" }, - { url = "https://files.pythonhosted.org/packages/e7/2b/c618b871fce0159fd107516336e82891b404e3f340821853c2fc28c7830f/pandas-3.0.0-cp311-cp311-win_arm64.whl", hash = "sha256:c14837eba8e99a8da1527c0280bba29b0eb842f64aa94982c5e21227966e164b", size = 9140807, upload-time = "2026-01-21T15:50:32.308Z" }, - { url = "https://files.pythonhosted.org/packages/0b/38/db33686f4b5fa64d7af40d96361f6a4615b8c6c8f1b3d334eee46ae6160e/pandas-3.0.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:9803b31f5039b3c3b10cc858c5e40054adb4b29b4d81cb2fd789f4121c8efbcd", size = 10334013, upload-time = "2026-01-21T15:50:34.771Z" }, - { url = "https://files.pythonhosted.org/packages/a5/7b/9254310594e9774906bacdd4e732415e1f86ab7dbb4b377ef9ede58cd8ec/pandas-3.0.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:14c2a4099cd38a1d18ff108168ea417909b2dea3bd1ebff2ccf28ddb6a74d740", size = 9874154, upload-time = "2026-01-21T15:50:36.67Z" }, - { url = "https://files.pythonhosted.org/packages/63/d4/726c5a67a13bc66643e66d2e9ff115cead482a44fc56991d0c4014f15aaf/pandas-3.0.0-cp312-cp312-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:d257699b9a9960e6125686098d5714ac59d05222bef7a5e6af7a7fd87c650801", size = 10384433, upload-time = "2026-01-21T15:50:39.132Z" }, - { url = "https://files.pythonhosted.org/packages/bf/2e/9211f09bedb04f9832122942de8b051804b31a39cfbad199a819bb88d9f3/pandas-3.0.0-cp312-cp312-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:69780c98f286076dcafca38d8b8eee1676adf220199c0a39f0ecbf976b68151a", size = 10864519, upload-time = "2026-01-21T15:50:41.043Z" }, - { url = "https://files.pythonhosted.org/packages/00/8d/50858522cdc46ac88b9afdc3015e298959a70a08cd21e008a44e9520180c/pandas-3.0.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:4a66384f017240f3858a4c8a7cf21b0591c3ac885cddb7758a589f0f71e87ebb", size = 11394124, upload-time = "2026-01-21T15:50:43.377Z" }, - { url = "https://files.pythonhosted.org/packages/86/3f/83b2577db02503cd93d8e95b0f794ad9d4be0ba7cb6c8bcdcac964a34a42/pandas-3.0.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:be8c515c9bc33989d97b89db66ea0cececb0f6e3c2a87fcc8b69443a6923e95f", size = 11920444, upload-time = "2026-01-21T15:50:45.932Z" }, - { url = "https://files.pythonhosted.org/packages/64/2d/4f8a2f192ed12c90a0aab47f5557ece0e56b0370c49de9454a09de7381b2/pandas-3.0.0-cp312-cp312-win_amd64.whl", hash = "sha256:a453aad8c4f4e9f166436994a33884442ea62aa8b27d007311e87521b97246e1", size = 9730970, upload-time = "2026-01-21T15:50:47.962Z" }, - { url = "https://files.pythonhosted.org/packages/d4/64/ff571be435cf1e643ca98d0945d76732c0b4e9c37191a89c8550b105eed1/pandas-3.0.0-cp312-cp312-win_arm64.whl", hash = "sha256:da768007b5a33057f6d9053563d6b74dd6d029c337d93c6d0d22a763a5c2ecc0", size = 9041950, upload-time = "2026-01-21T15:50:50.422Z" }, - { url = "https://files.pythonhosted.org/packages/6f/fa/7f0ac4ca8877c57537aaff2a842f8760e630d8e824b730eb2e859ffe96ca/pandas-3.0.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:b78d646249b9a2bc191040988c7bb524c92fa8534fb0898a0741d7e6f2ffafa6", size = 10307129, upload-time = "2026-01-21T15:50:52.877Z" }, - { url = "https://files.pythonhosted.org/packages/6f/11/28a221815dcea4c0c9414dfc845e34a84a6a7dabc6da3194498ed5ba4361/pandas-3.0.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:bc9cba7b355cb4162442a88ce495e01cb605f17ac1e27d6596ac963504e0305f", size = 9850201, upload-time = "2026-01-21T15:50:54.807Z" }, - { url = "https://files.pythonhosted.org/packages/ba/da/53bbc8c5363b7e5bd10f9ae59ab250fc7a382ea6ba08e4d06d8694370354/pandas-3.0.0-cp313-cp313-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:3c9a1a149aed3b6c9bf246033ff91e1b02d529546c5d6fb6b74a28fea0cf4c70", size = 10354031, upload-time = "2026-01-21T15:50:57.463Z" }, - { url = "https://files.pythonhosted.org/packages/f7/a3/51e02ebc2a14974170d51e2410dfdab58870ea9bcd37cda15bd553d24dc4/pandas-3.0.0-cp313-cp313-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:95683af6175d884ee89471842acfca29172a85031fccdabc35e50c0984470a0e", size = 10861165, upload-time = "2026-01-21T15:50:59.32Z" }, - { url = "https://files.pythonhosted.org/packages/a5/fe/05a51e3cac11d161472b8297bd41723ea98013384dd6d76d115ce3482f9b/pandas-3.0.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:1fbbb5a7288719e36b76b4f18d46ede46e7f916b6c8d9915b756b0a6c3f792b3", size = 11359359, upload-time = "2026-01-21T15:51:02.014Z" }, - { url = "https://files.pythonhosted.org/packages/ee/56/ba620583225f9b85a4d3e69c01df3e3870659cc525f67929b60e9f21dcd1/pandas-3.0.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:8e8b9808590fa364416b49b2a35c1f4cf2785a6c156935879e57f826df22038e", size = 11912907, upload-time = "2026-01-21T15:51:05.175Z" }, - { url = "https://files.pythonhosted.org/packages/c9/8c/c6638d9f67e45e07656b3826405c5cc5f57f6fd07c8b2572ade328c86e22/pandas-3.0.0-cp313-cp313-win_amd64.whl", hash = "sha256:98212a38a709feb90ae658cb6227ea3657c22ba8157d4b8f913cd4c950de5e7e", size = 9732138, upload-time = "2026-01-21T15:51:07.569Z" }, - { url = "https://files.pythonhosted.org/packages/7b/bf/bd1335c3bf1770b6d8fed2799993b11c4971af93bb1b729b9ebbc02ca2ec/pandas-3.0.0-cp313-cp313-win_arm64.whl", hash = "sha256:177d9df10b3f43b70307a149d7ec49a1229a653f907aa60a48f1877d0e6be3be", size = 9033568, upload-time = "2026-01-21T15:51:09.484Z" }, - { url = "https://files.pythonhosted.org/packages/8e/c6/f5e2171914d5e29b9171d495344097d54e3ffe41d2d85d8115baba4dc483/pandas-3.0.0-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:2713810ad3806767b89ad3b7b69ba153e1c6ff6d9c20f9c2140379b2a98b6c98", size = 10741936, upload-time = "2026-01-21T15:51:11.693Z" }, - { url = "https://files.pythonhosted.org/packages/51/88/9a0164f99510a1acb9f548691f022c756c2314aad0d8330a24616c14c462/pandas-3.0.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:15d59f885ee5011daf8335dff47dcb8a912a27b4ad7826dc6cbe809fd145d327", size = 10393884, upload-time = "2026-01-21T15:51:14.197Z" }, - { url = "https://files.pythonhosted.org/packages/e0/53/b34d78084d88d8ae2b848591229da8826d1e65aacf00b3abe34023467648/pandas-3.0.0-cp313-cp313t-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:24e6547fb64d2c92665dd2adbfa4e85fa4fd70a9c070e7cfb03b629a0bbab5eb", size = 10310740, upload-time = "2026-01-21T15:51:16.093Z" }, - { url = "https://files.pythonhosted.org/packages/5b/d3/bee792e7c3d6930b74468d990604325701412e55d7aaf47460a22311d1a5/pandas-3.0.0-cp313-cp313t-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:48ee04b90e2505c693d3f8e8f524dab8cb8aaf7ddcab52c92afa535e717c4812", size = 10700014, upload-time = "2026-01-21T15:51:18.818Z" }, - { url = "https://files.pythonhosted.org/packages/55/db/2570bc40fb13aaed1cbc3fbd725c3a60ee162477982123c3adc8971e7ac1/pandas-3.0.0-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:66f72fb172959af42a459e27a8d8d2c7e311ff4c1f7db6deb3b643dbc382ae08", size = 11323737, upload-time = "2026-01-21T15:51:20.784Z" }, - { url = "https://files.pythonhosted.org/packages/bc/2e/297ac7f21c8181b62a4cccebad0a70caf679adf3ae5e83cb676194c8acc3/pandas-3.0.0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:4a4a400ca18230976724a5066f20878af785f36c6756e498e94c2a5e5d57779c", size = 11771558, upload-time = "2026-01-21T15:51:22.977Z" }, - { url = "https://files.pythonhosted.org/packages/0a/46/e1c6876d71c14332be70239acce9ad435975a80541086e5ffba2f249bcf6/pandas-3.0.0-cp313-cp313t-win_amd64.whl", hash = "sha256:940eebffe55528074341a5a36515f3e4c5e25e958ebbc764c9502cfc35ba3faa", size = 10473771, upload-time = "2026-01-21T15:51:25.285Z" }, - { url = "https://files.pythonhosted.org/packages/c0/db/0270ad9d13c344b7a36fa77f5f8344a46501abf413803e885d22864d10bf/pandas-3.0.0-cp314-cp314-macosx_10_15_x86_64.whl", hash = "sha256:597c08fb9fef0edf1e4fa2f9828dd27f3d78f9b8c9b4a748d435ffc55732310b", size = 10312075, upload-time = "2026-01-21T15:51:28.5Z" }, - { url = "https://files.pythonhosted.org/packages/09/9f/c176f5e9717f7c91becfe0f55a52ae445d3f7326b4a2cf355978c51b7913/pandas-3.0.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:447b2d68ac5edcbf94655fe909113a6dba6ef09ad7f9f60c80477825b6c489fe", size = 9900213, upload-time = "2026-01-21T15:51:30.955Z" }, - { url = "https://files.pythonhosted.org/packages/d9/e7/63ad4cc10b257b143e0a5ebb04304ad806b4e1a61c5da25f55896d2ca0f4/pandas-3.0.0-cp314-cp314-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:debb95c77ff3ed3ba0d9aa20c3a2f19165cc7956362f9873fce1ba0a53819d70", size = 10428768, upload-time = "2026-01-21T15:51:33.018Z" }, - { url = "https://files.pythonhosted.org/packages/9e/0e/4e4c2d8210f20149fd2248ef3fff26623604922bd564d915f935a06dd63d/pandas-3.0.0-cp314-cp314-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:fedabf175e7cd82b69b74c30adbaa616de301291a5231138d7242596fc296a8d", size = 10882954, upload-time = "2026-01-21T15:51:35.287Z" }, - { url = "https://files.pythonhosted.org/packages/c6/60/c9de8ac906ba1f4d2250f8a951abe5135b404227a55858a75ad26f84db47/pandas-3.0.0-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:412d1a89aab46889f3033a386912efcdfa0f1131c5705ff5b668dda88305e986", size = 11430293, upload-time = "2026-01-21T15:51:37.57Z" }, - { url = "https://files.pythonhosted.org/packages/a1/69/806e6637c70920e5787a6d6896fd707f8134c2c55cd761e7249a97b7dc5a/pandas-3.0.0-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:e979d22316f9350c516479dd3a92252be2937a9531ed3a26ec324198a99cdd49", size = 11952452, upload-time = "2026-01-21T15:51:39.618Z" }, - { url = "https://files.pythonhosted.org/packages/cb/de/918621e46af55164c400ab0ef389c9d969ab85a43d59ad1207d4ddbe30a5/pandas-3.0.0-cp314-cp314-win_amd64.whl", hash = "sha256:083b11415b9970b6e7888800c43c82e81a06cd6b06755d84804444f0007d6bb7", size = 9851081, upload-time = "2026-01-21T15:51:41.758Z" }, - { url = "https://files.pythonhosted.org/packages/91/a1/3562a18dd0bd8c73344bfa26ff90c53c72f827df119d6d6b1dacc84d13e3/pandas-3.0.0-cp314-cp314-win_arm64.whl", hash = "sha256:5db1e62cb99e739fa78a28047e861b256d17f88463c76b8dafc7c1338086dca8", size = 9174610, upload-time = "2026-01-21T15:51:44.312Z" }, - { url = "https://files.pythonhosted.org/packages/ce/26/430d91257eaf366f1737d7a1c158677caaf6267f338ec74e3a1ec444111c/pandas-3.0.0-cp314-cp314t-macosx_10_15_x86_64.whl", hash = "sha256:697b8f7d346c68274b1b93a170a70974cdc7d7354429894d5927c1effdcccd73", size = 10761999, upload-time = "2026-01-21T15:51:46.899Z" }, - { url = "https://files.pythonhosted.org/packages/ec/1a/954eb47736c2b7f7fe6a9d56b0cb6987773c00faa3c6451a43db4beb3254/pandas-3.0.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:8cb3120f0d9467ed95e77f67a75e030b67545bcfa08964e349252d674171def2", size = 10410279, upload-time = "2026-01-21T15:51:48.89Z" }, - { url = "https://files.pythonhosted.org/packages/20/fc/b96f3a5a28b250cd1b366eb0108df2501c0f38314a00847242abab71bb3a/pandas-3.0.0-cp314-cp314t-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:33fd3e6baa72899746b820c31e4b9688c8e1b7864d7aec2de7ab5035c285277a", size = 10330198, upload-time = "2026-01-21T15:51:51.015Z" }, - { url = "https://files.pythonhosted.org/packages/90/b3/d0e2952f103b4fbef1ef22d0c2e314e74fc9064b51cee30890b5e3286ee6/pandas-3.0.0-cp314-cp314t-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:a8942e333dc67ceda1095227ad0febb05a3b36535e520154085db632c40ad084", size = 10728513, upload-time = "2026-01-21T15:51:53.387Z" }, - { url = "https://files.pythonhosted.org/packages/76/81/832894f286df828993dc5fd61c63b231b0fb73377e99f6c6c369174cf97e/pandas-3.0.0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:783ac35c4d0fe0effdb0d67161859078618b1b6587a1af15928137525217a721", size = 11345550, upload-time = "2026-01-21T15:51:55.329Z" }, - { url = "https://files.pythonhosted.org/packages/34/a0/ed160a00fb4f37d806406bc0a79a8b62fe67f29d00950f8d16203ff3409b/pandas-3.0.0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:125eb901e233f155b268bbef9abd9afb5819db74f0e677e89a61b246228c71ac", size = 11799386, upload-time = "2026-01-21T15:51:57.457Z" }, - { url = "https://files.pythonhosted.org/packages/36/c8/2ac00d7255252c5e3cf61b35ca92ca25704b0188f7454ca4aec08a33cece/pandas-3.0.0-cp314-cp314t-win_amd64.whl", hash = "sha256:b86d113b6c109df3ce0ad5abbc259fe86a1bd4adfd4a31a89da42f84f65509bb", size = 10873041, upload-time = "2026-01-21T15:52:00.034Z" }, - { url = "https://files.pythonhosted.org/packages/e6/3f/a80ac00acbc6b35166b42850e98a4f466e2c0d9c64054161ba9620f95680/pandas-3.0.0-cp314-cp314t-win_arm64.whl", hash = "sha256:1c39eab3ad38f2d7a249095f0a3d8f8c22cc0f847e98ccf5bbe732b272e2d9fa", size = 9441003, upload-time = "2026-01-21T15:52:02.281Z" }, +sdist = { url = "https://files.pythonhosted.org/packages/2e/0c/b28ed414f080ee0ad153f848586d61d1878f91689950f037f976ce15f6c8/pandas-3.0.1.tar.gz", hash = "sha256:4186a699674af418f655dbd420ed87f50d56b4cd6603784279d9eef6627823c8", size = 4641901, upload-time = "2026-02-17T22:20:16.434Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ff/07/c7087e003ceee9b9a82539b40414ec557aa795b584a1a346e89180853d79/pandas-3.0.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:de09668c1bf3b925c07e5762291602f0d789eca1b3a781f99c1c78f6cac0e7ea", size = 10323380, upload-time = "2026-02-17T22:18:16.133Z" }, + { url = "https://files.pythonhosted.org/packages/c1/27/90683c7122febeefe84a56f2cde86a9f05f68d53885cebcc473298dfc33e/pandas-3.0.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:24ba315ba3d6e5806063ac6eb717504e499ce30bd8c236d8693a5fd3f084c796", size = 9923455, upload-time = "2026-02-17T22:18:19.13Z" }, + { url = "https://files.pythonhosted.org/packages/0e/f1/ed17d927f9950643bc7631aa4c99ff0cc83a37864470bc419345b656a41f/pandas-3.0.1-cp311-cp311-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:406ce835c55bac912f2a0dcfaf27c06d73c6b04a5dde45f1fd3169ce31337389", size = 10753464, upload-time = "2026-02-17T22:18:21.134Z" }, + { url = "https://files.pythonhosted.org/packages/2e/7c/870c7e7daec2a6c7ff2ac9e33b23317230d4e4e954b35112759ea4a924a7/pandas-3.0.1-cp311-cp311-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:830994d7e1f31dd7e790045235605ab61cff6c94defc774547e8b7fdfbff3dc7", size = 11255234, upload-time = "2026-02-17T22:18:24.175Z" }, + { url = "https://files.pythonhosted.org/packages/5c/39/3653fe59af68606282b989c23d1a543ceba6e8099cbcc5f1d506a7bae2aa/pandas-3.0.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:a64ce8b0f2de1d2efd2ae40b0abe7f8ae6b29fbfb3812098ed5a6f8e235ad9bf", size = 11767299, upload-time = "2026-02-17T22:18:26.824Z" }, + { url = "https://files.pythonhosted.org/packages/9b/31/1daf3c0c94a849c7a8dab8a69697b36d313b229918002ba3e409265c7888/pandas-3.0.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:9832c2c69da24b602c32e0c7b1b508a03949c18ba08d4d9f1c1033426685b447", size = 12333292, upload-time = "2026-02-17T22:18:28.996Z" }, + { url = "https://files.pythonhosted.org/packages/1f/67/af63f83cd6ca603a00fe8530c10a60f0879265b8be00b5930e8e78c5b30b/pandas-3.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:84f0904a69e7365f79a0c77d3cdfccbfb05bf87847e3a51a41e1426b0edb9c79", size = 9892176, upload-time = "2026-02-17T22:18:31.79Z" }, + { url = "https://files.pythonhosted.org/packages/79/ab/9c776b14ac4b7b4140788eca18468ea39894bc7340a408f1d1e379856a6b/pandas-3.0.1-cp311-cp311-win_arm64.whl", hash = "sha256:4a68773d5a778afb31d12e34f7dd4612ab90de8c6fb1d8ffe5d4a03b955082a1", size = 9151328, upload-time = "2026-02-17T22:18:35.721Z" }, + { url = "https://files.pythonhosted.org/packages/37/51/b467209c08dae2c624873d7491ea47d2b47336e5403309d433ea79c38571/pandas-3.0.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:476f84f8c20c9f5bc47252b66b4bb25e1a9fc2fa98cead96744d8116cb85771d", size = 10344357, upload-time = "2026-02-17T22:18:38.262Z" }, + { url = "https://files.pythonhosted.org/packages/7c/f1/e2567ffc8951ab371db2e40b2fe068e36b81d8cf3260f06ae508700e5504/pandas-3.0.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:0ab749dfba921edf641d4036c4c21c0b3ea70fea478165cb98a998fb2a261955", size = 9884543, upload-time = "2026-02-17T22:18:41.476Z" }, + { url = "https://files.pythonhosted.org/packages/d7/39/327802e0b6d693182403c144edacbc27eb82907b57062f23ef5a4c4a5ea7/pandas-3.0.1-cp312-cp312-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:b8e36891080b87823aff3640c78649b91b8ff6eea3c0d70aeabd72ea43ab069b", size = 10396030, upload-time = "2026-02-17T22:18:43.822Z" }, + { url = "https://files.pythonhosted.org/packages/3d/fe/89d77e424365280b79d99b3e1e7d606f5165af2f2ecfaf0c6d24c799d607/pandas-3.0.1-cp312-cp312-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:532527a701281b9dd371e2f582ed9094f4c12dd9ffb82c0c54ee28d8ac9520c4", size = 10876435, upload-time = "2026-02-17T22:18:45.954Z" }, + { url = "https://files.pythonhosted.org/packages/b5/a6/2a75320849dd154a793f69c951db759aedb8d1dd3939eeacda9bdcfa1629/pandas-3.0.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:356e5c055ed9b0da1580d465657bc7d00635af4fd47f30afb23025352ba764d1", size = 11405133, upload-time = "2026-02-17T22:18:48.533Z" }, + { url = "https://files.pythonhosted.org/packages/58/53/1d68fafb2e02d7881df66aa53be4cd748d25cbe311f3b3c85c93ea5d30ca/pandas-3.0.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:9d810036895f9ad6345b8f2a338dd6998a74e8483847403582cab67745bff821", size = 11932065, upload-time = "2026-02-17T22:18:50.837Z" }, + { url = "https://files.pythonhosted.org/packages/75/08/67cc404b3a966b6df27b38370ddd96b3b023030b572283d035181854aac5/pandas-3.0.1-cp312-cp312-win_amd64.whl", hash = "sha256:536232a5fe26dd989bd633e7a0c450705fdc86a207fec7254a55e9a22950fe43", size = 9741627, upload-time = "2026-02-17T22:18:53.905Z" }, + { url = "https://files.pythonhosted.org/packages/86/4f/caf9952948fb00d23795f09b893d11f1cacb384e666854d87249530f7cbe/pandas-3.0.1-cp312-cp312-win_arm64.whl", hash = "sha256:0f463ebfd8de7f326d38037c7363c6dacb857c5881ab8961fb387804d6daf2f7", size = 9052483, upload-time = "2026-02-17T22:18:57.31Z" }, + { url = "https://files.pythonhosted.org/packages/0b/48/aad6ec4f8d007534c091e9a7172b3ec1b1ee6d99a9cbb936b5eab6c6cf58/pandas-3.0.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:5272627187b5d9c20e55d27caf5f2cd23e286aba25cadf73c8590e432e2b7262", size = 10317509, upload-time = "2026-02-17T22:18:59.498Z" }, + { url = "https://files.pythonhosted.org/packages/a8/14/5990826f779f79148ae9d3a2c39593dc04d61d5d90541e71b5749f35af95/pandas-3.0.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:661e0f665932af88c7877f31da0dc743fe9c8f2524bdffe23d24fdcb67ef9d56", size = 9860561, upload-time = "2026-02-17T22:19:02.265Z" }, + { url = "https://files.pythonhosted.org/packages/fa/80/f01ff54664b6d70fed71475543d108a9b7c888e923ad210795bef04ffb7d/pandas-3.0.1-cp313-cp313-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:75e6e292ff898679e47a2199172593d9f6107fd2dd3617c22c2946e97d5df46e", size = 10365506, upload-time = "2026-02-17T22:19:05.017Z" }, + { url = "https://files.pythonhosted.org/packages/f2/85/ab6d04733a7d6ff32bfc8382bf1b07078228f5d6ebec5266b91bfc5c4ff7/pandas-3.0.1-cp313-cp313-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:1ff8cf1d2896e34343197685f432450ec99a85ba8d90cce2030c5eee2ef98791", size = 10873196, upload-time = "2026-02-17T22:19:07.204Z" }, + { url = "https://files.pythonhosted.org/packages/48/a9/9301c83d0b47c23ac5deab91c6b39fd98d5b5db4d93b25df8d381451828f/pandas-3.0.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:eca8b4510f6763f3d37359c2105df03a7a221a508f30e396a51d0713d462e68a", size = 11370859, upload-time = "2026-02-17T22:19:09.436Z" }, + { url = "https://files.pythonhosted.org/packages/59/fe/0c1fc5bd2d29c7db2ab372330063ad555fb83e08422829c785f5ec2176ca/pandas-3.0.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:06aff2ad6f0b94a17822cf8b83bbb563b090ed82ff4fe7712db2ce57cd50d9b8", size = 11924584, upload-time = "2026-02-17T22:19:11.562Z" }, + { url = "https://files.pythonhosted.org/packages/d6/7d/216a1588b65a7aa5f4535570418a599d943c85afb1d95b0876fc00aa1468/pandas-3.0.1-cp313-cp313-win_amd64.whl", hash = "sha256:9fea306c783e28884c29057a1d9baa11a349bbf99538ec1da44c8476563d1b25", size = 9742769, upload-time = "2026-02-17T22:19:13.926Z" }, + { url = "https://files.pythonhosted.org/packages/c4/cb/810a22a6af9a4e97c8ab1c946b47f3489c5bca5adc483ce0ffc84c9cc768/pandas-3.0.1-cp313-cp313-win_arm64.whl", hash = "sha256:a8d37a43c52917427e897cb2e429f67a449327394396a81034a4449b99afda59", size = 9043855, upload-time = "2026-02-17T22:19:16.09Z" }, + { url = "https://files.pythonhosted.org/packages/92/fa/423c89086cca1f039cf1253c3ff5b90f157b5b3757314aa635f6bf3e30aa/pandas-3.0.1-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:d54855f04f8246ed7b6fc96b05d4871591143c46c0b6f4af874764ed0d2d6f06", size = 10752673, upload-time = "2026-02-17T22:19:18.304Z" }, + { url = "https://files.pythonhosted.org/packages/22/23/b5a08ec1f40020397f0faba72f1e2c11f7596a6169c7b3e800abff0e433f/pandas-3.0.1-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:4e1b677accee34a09e0dc2ce5624e4a58a1870ffe56fc021e9caf7f23cd7668f", size = 10404967, upload-time = "2026-02-17T22:19:20.726Z" }, + { url = "https://files.pythonhosted.org/packages/5c/81/94841f1bb4afdc2b52a99daa895ac2c61600bb72e26525ecc9543d453ebc/pandas-3.0.1-cp313-cp313t-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:a9cabbdcd03f1b6cd254d6dda8ae09b0252524be1592594c00b7895916cb1324", size = 10320575, upload-time = "2026-02-17T22:19:24.919Z" }, + { url = "https://files.pythonhosted.org/packages/0a/8b/2ae37d66a5342a83adadfd0cb0b4bf9c3c7925424dd5f40d15d6cfaa35ee/pandas-3.0.1-cp313-cp313t-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:5ae2ab1f166668b41e770650101e7090824fd34d17915dd9cd479f5c5e0065e9", size = 10710921, upload-time = "2026-02-17T22:19:27.181Z" }, + { url = "https://files.pythonhosted.org/packages/a2/61/772b2e2757855e232b7ccf7cb8079a5711becb3a97f291c953def15a833f/pandas-3.0.1-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:6bf0603c2e30e2cafac32807b06435f28741135cb8697eae8b28c7d492fc7d76", size = 11334191, upload-time = "2026-02-17T22:19:29.411Z" }, + { url = "https://files.pythonhosted.org/packages/1b/08/b16c6df3ef555d8495d1d265a7963b65be166785d28f06a350913a4fac78/pandas-3.0.1-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:6c426422973973cae1f4a23e51d4ae85974f44871b24844e4f7de752dd877098", size = 11782256, upload-time = "2026-02-17T22:19:32.34Z" }, + { url = "https://files.pythonhosted.org/packages/55/80/178af0594890dee17e239fca96d3d8670ba0f5ff59b7d0439850924a9c09/pandas-3.0.1-cp313-cp313t-win_amd64.whl", hash = "sha256:b03f91ae8c10a85c1613102c7bef5229b5379f343030a3ccefeca8a33414cf35", size = 10485047, upload-time = "2026-02-17T22:19:34.605Z" }, + { url = "https://files.pythonhosted.org/packages/bb/8b/4bb774a998b97e6c2fd62a9e6cfdaae133b636fd1c468f92afb4ae9a447a/pandas-3.0.1-cp314-cp314-macosx_10_15_x86_64.whl", hash = "sha256:99d0f92ed92d3083d140bf6b97774f9f13863924cf3f52a70711f4e7588f9d0a", size = 10322465, upload-time = "2026-02-17T22:19:36.803Z" }, + { url = "https://files.pythonhosted.org/packages/72/3a/5b39b51c64159f470f1ca3b1c2a87da290657ca022f7cd11442606f607d1/pandas-3.0.1-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:3b66857e983208654294bb6477b8a63dee26b37bdd0eb34d010556e91261784f", size = 9910632, upload-time = "2026-02-17T22:19:39.001Z" }, + { url = "https://files.pythonhosted.org/packages/4e/f7/b449ffb3f68c11da12fc06fbf6d2fa3a41c41e17d0284d23a79e1c13a7e4/pandas-3.0.1-cp314-cp314-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:56cf59638bf24dc9bdf2154c81e248b3289f9a09a6d04e63608c159022352749", size = 10440535, upload-time = "2026-02-17T22:19:41.157Z" }, + { url = "https://files.pythonhosted.org/packages/55/77/6ea82043db22cb0f2bbfe7198da3544000ddaadb12d26be36e19b03a2dc5/pandas-3.0.1-cp314-cp314-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c1a9f55e0f46951874b863d1f3906dcb57df2d9be5c5847ba4dfb55b2c815249", size = 10893940, upload-time = "2026-02-17T22:19:43.493Z" }, + { url = "https://files.pythonhosted.org/packages/03/30/f1b502a72468c89412c1b882a08f6eed8a4ee9dc033f35f65d0663df6081/pandas-3.0.1-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:1849f0bba9c8a2fb0f691d492b834cc8dadf617e29015c66e989448d58d011ee", size = 11442711, upload-time = "2026-02-17T22:19:46.074Z" }, + { url = "https://files.pythonhosted.org/packages/0d/f0/ebb6ddd8fc049e98cabac5c2924d14d1dda26a20adb70d41ea2e428d3ec4/pandas-3.0.1-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:c3d288439e11b5325b02ae6e9cc83e6805a62c40c5a6220bea9beb899c073b1c", size = 11963918, upload-time = "2026-02-17T22:19:48.838Z" }, + { url = "https://files.pythonhosted.org/packages/09/f8/8ce132104074f977f907442790eaae24e27bce3b3b454e82faa3237ff098/pandas-3.0.1-cp314-cp314-win_amd64.whl", hash = "sha256:93325b0fe372d192965f4cca88d97667f49557398bbf94abdda3bf1b591dbe66", size = 9862099, upload-time = "2026-02-17T22:19:51.081Z" }, + { url = "https://files.pythonhosted.org/packages/e6/b7/6af9aac41ef2456b768ef0ae60acf8abcebb450a52043d030a65b4b7c9bd/pandas-3.0.1-cp314-cp314-win_arm64.whl", hash = "sha256:97ca08674e3287c7148f4858b01136f8bdfe7202ad25ad04fec602dd1d29d132", size = 9185333, upload-time = "2026-02-17T22:19:53.266Z" }, + { url = "https://files.pythonhosted.org/packages/66/fc/848bb6710bc6061cb0c5badd65b92ff75c81302e0e31e496d00029fe4953/pandas-3.0.1-cp314-cp314t-macosx_10_15_x86_64.whl", hash = "sha256:58eeb1b2e0fb322befcf2bbc9ba0af41e616abadb3d3414a6bc7167f6cbfce32", size = 10772664, upload-time = "2026-02-17T22:19:55.806Z" }, + { url = "https://files.pythonhosted.org/packages/69/5c/866a9bbd0f79263b4b0db6ec1a341be13a1473323f05c122388e0f15b21d/pandas-3.0.1-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:cd9af1276b5ca9e298bd79a26bda32fa9cc87ed095b2a9a60978d2ca058eaf87", size = 10421286, upload-time = "2026-02-17T22:19:58.091Z" }, + { url = "https://files.pythonhosted.org/packages/51/a4/2058fb84fb1cfbfb2d4a6d485e1940bb4ad5716e539d779852494479c580/pandas-3.0.1-cp314-cp314t-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:94f87a04984d6b63788327cd9f79dda62b7f9043909d2440ceccf709249ca988", size = 10342050, upload-time = "2026-02-17T22:20:01.376Z" }, + { url = "https://files.pythonhosted.org/packages/22/1b/674e89996cc4be74db3c4eb09240c4bb549865c9c3f5d9b086ff8fcfbf00/pandas-3.0.1-cp314-cp314t-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:85fe4c4df62e1e20f9db6ebfb88c844b092c22cd5324bdcf94bfa2fc1b391221", size = 10740055, upload-time = "2026-02-17T22:20:04.328Z" }, + { url = "https://files.pythonhosted.org/packages/d0/f8/e954b750764298c22fa4614376531fe63c521ef517e7059a51f062b87dca/pandas-3.0.1-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:331ca75a2f8672c365ae25c0b29e46f5ac0c6551fdace8eec4cd65e4fac271ff", size = 11357632, upload-time = "2026-02-17T22:20:06.647Z" }, + { url = "https://files.pythonhosted.org/packages/6d/02/c6e04b694ffd68568297abd03588b6d30295265176a5c01b7459d3bc35a3/pandas-3.0.1-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:15860b1fdb1973fffade772fdb931ccf9b2f400a3f5665aef94a00445d7d8dd5", size = 11810974, upload-time = "2026-02-17T22:20:08.946Z" }, + { url = "https://files.pythonhosted.org/packages/89/41/d7dfb63d2407f12055215070c42fc6ac41b66e90a2946cdc5e759058398b/pandas-3.0.1-cp314-cp314t-win_amd64.whl", hash = "sha256:44f1364411d5670efa692b146c748f4ed013df91ee91e9bec5677fb1fd58b937", size = 10884622, upload-time = "2026-02-17T22:20:11.711Z" }, + { url = "https://files.pythonhosted.org/packages/68/b0/34937815889fa982613775e4b97fddd13250f11012d769949c5465af2150/pandas-3.0.1-cp314-cp314t-win_arm64.whl", hash = "sha256:108dd1790337a494aa80e38def654ca3f0968cf4f362c85f44c15e471667102d", size = 9452085, upload-time = "2026-02-17T22:20:14.331Z" }, ] [[package]] @@ -6012,7 +6012,7 @@ dependencies = [ { name = "numpy", version = "2.2.6", source = { registry = "https://pypi.org/simple" }, marker = "(python_full_version < '3.11' and sys_platform == 'darwin') or (python_full_version < '3.11' and sys_platform == 'linux') or (python_full_version < '3.11' and sys_platform == 'win32')" }, { name = "numpy", version = "2.4.2", source = { registry = "https://pypi.org/simple" }, marker = "(python_full_version >= '3.11' and sys_platform == 'darwin') or (python_full_version >= '3.11' and sys_platform == 'linux') or (python_full_version >= '3.11' and sys_platform == 'win32')" }, { name = "pandas", version = "2.3.3", source = { registry = "https://pypi.org/simple" }, marker = "(python_full_version < '3.11' and sys_platform == 'darwin') or (python_full_version < '3.11' and sys_platform == 'linux') or (python_full_version < '3.11' and sys_platform == 'win32')" }, - { name = "pandas", version = "3.0.0", source = { registry = "https://pypi.org/simple" }, marker = "(python_full_version >= '3.11' and sys_platform == 'darwin') or (python_full_version >= '3.11' and sys_platform == 'linux') or (python_full_version >= '3.11' and sys_platform == 'win32')" }, + { name = "pandas", version = "3.0.1", source = { registry = "https://pypi.org/simple" }, marker = "(python_full_version >= '3.11' and sys_platform == 'darwin') or (python_full_version >= '3.11' and sys_platform == 'linux') or (python_full_version >= '3.11' and sys_platform == 'win32')" }, ] sdist = { url = "https://files.pythonhosted.org/packages/86/59/a451d7420a77ab0b98f7affa3a1d78a313d2f7281a57afb1a34bae8ab412/seaborn-0.13.2.tar.gz", hash = "sha256:93e60a40988f4d65e9f4885df477e2fdaff6b73a9ded434c1ab356dd57eefff7", size = 1457696, upload-time = "2024-01-25T13:21:52.551Z" } wheels = [ @@ -6275,7 +6275,7 @@ dependencies = [ { name = "loguru", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, { name = "matplotlib", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, { name = "pandas", version = "2.3.3", source = { registry = "https://pypi.org/simple" }, marker = "(python_full_version < '3.11' and sys_platform == 'darwin') or (python_full_version < '3.11' and sys_platform == 'linux') or (python_full_version < '3.11' and sys_platform == 'win32')" }, - { name = "pandas", version = "3.0.0", source = { registry = "https://pypi.org/simple" }, marker = "(python_full_version >= '3.11' and sys_platform == 'darwin') or (python_full_version >= '3.11' and sys_platform == 'linux') or (python_full_version >= '3.11' and sys_platform == 'win32')" }, + { name = "pandas", version = "3.0.1", source = { registry = "https://pypi.org/simple" }, marker = "(python_full_version >= '3.11' and sys_platform == 'darwin') or (python_full_version >= '3.11' and sys_platform == 'linux') or (python_full_version >= '3.11' and sys_platform == 'win32')" }, { name = "plotly", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, { name = "psutil", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, { name = "pydantic-argparse", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, @@ -6606,27 +6606,27 @@ wheels = [ [[package]] name = "uv" -version = "0.10.3" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/60/ad/7e2de47ef90b2807572ca0410e7a2d5694b0afac6d31b559b6e2eff225e4/uv-0.10.3.tar.gz", hash = "sha256:56e15a90bf81a484e501f6ebc890024a47cb808800e18ecbc81f7d7668ff8b55", size = 3875449, upload-time = "2026-02-16T11:23:58.482Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/67/a7/e90f9c2c0a4f80c65102fa444ba71a6fe1233e2c4dd55fb68b9bf5e50607/uv-0.10.3-py3-none-linux_armv6l.whl", hash = "sha256:de599782dfaa8ff818767cd07e6fbdcff41a7451a42c324b8a32862da7283148", size = 22331102, upload-time = "2026-02-16T11:24:17.531Z" }, - { url = "https://files.pythonhosted.org/packages/40/db/c5729ec39ed557ac673ca993abd71e5b18d71a92f95f1845152e639000ff/uv-0.10.3-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:815717d3ed409ca7573d3f95125d6bcec6978d2236bddad7bc123ac12dc6cfbb", size = 21384482, upload-time = "2026-02-16T11:24:22.974Z" }, - { url = "https://files.pythonhosted.org/packages/0b/8c/cce26f6da9f69a9f0df032d642a4cb5603249f145c368e41b0154c463ca9/uv-0.10.3-py3-none-macosx_11_0_arm64.whl", hash = "sha256:52241dbefdc1be57aaa37171d27372252d64f3347a5df9e5b8ee3de49207f6ff", size = 20118525, upload-time = "2026-02-16T11:24:00.258Z" }, - { url = "https://files.pythonhosted.org/packages/e1/b2/bab536c11c24c49674820e8e2ebfa686db810d9776bdc5c47ba65beccc48/uv-0.10.3-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.musllinux_1_1_aarch64.whl", hash = "sha256:4c5dbc7071c7f18ddf8e445ca59e2f4e4f34872ddffcb971c0ec2d0aee959b3e", size = 21869406, upload-time = "2026-02-16T11:24:08.153Z" }, - { url = "https://files.pythonhosted.org/packages/ed/0e/3dc97fde6e16c974a9f937fdbdf9af713f2b96873f97cd16c78dc6295116/uv-0.10.3-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.musllinux_1_1_armv7l.whl", hash = "sha256:519d9449bf276395643358322c4199338ddf506d684001b12bd7777385d9f81c", size = 21953236, upload-time = "2026-02-16T11:23:56.348Z" }, - { url = "https://files.pythonhosted.org/packages/be/ea/6195af5b4d189d64f148a69991fa6335bf77d770c5eb763990787f0ebf65/uv-0.10.3-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:c8ac385bb93bd1b1d4f74f234a3a6e89033eb50976da5d5a4d15218a84c7247f", size = 21937189, upload-time = "2026-02-16T11:24:28.848Z" }, - { url = "https://files.pythonhosted.org/packages/f6/09/2b61660fd3058b751c591b906fe1e34a8308586bb283beafe9df5487c83d/uv-0.10.3-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:95e2bedd5b618621dc4320f3dc863c8f00e07084d8f4ab77b39ddbe7822ddeda", size = 23285307, upload-time = "2026-02-16T11:24:05.588Z" }, - { url = "https://files.pythonhosted.org/packages/06/02/32b1f98552498c0f390192a3496066e6470544ad8c552ae39ec1aea2b794/uv-0.10.3-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b967705e8126ff33bf35d7531283bc5cf5096b8c2b7235772abfa55e1f174d14", size = 24021235, upload-time = "2026-02-16T11:24:14.91Z" }, - { url = "https://files.pythonhosted.org/packages/2e/77/feead72435bbd66ac12e452b98ac0798bf548135eb333f7597cefb43afe9/uv-0.10.3-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ddd903aec32ac4810857ebdbc526dd9624d70c549d0b0410ea06c617a89af7b6", size = 23124930, upload-time = "2026-02-16T11:23:47.418Z" }, - { url = "https://files.pythonhosted.org/packages/ce/de/0638d8aa83c2038c5f820151006b3bf372d109afbfdb2a8fc59187cc3690/uv-0.10.3-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f8daf6530d6496da799dd53a4bfafe138b412a714ea0bff34472c1267229054c", size = 23085652, upload-time = "2026-02-16T11:24:02.865Z" }, - { url = "https://files.pythonhosted.org/packages/f1/cc/1e83561367d78d3d0c0aa7eca89cd635d1e67865ed07067015a055755de9/uv-0.10.3-py3-none-manylinux_2_28_aarch64.whl", hash = "sha256:d930818bfa78976d68dac888123e6dda8e6fc506f0baf5058d8b4e7f514c8e17", size = 21937422, upload-time = "2026-02-16T11:23:44.725Z" }, - { url = "https://files.pythonhosted.org/packages/fc/f7/4c248d6bc67c99828ef741f8090790f11bd522bded48bb8b6d7f18b7f924/uv-0.10.3-py3-none-manylinux_2_31_riscv64.whl", hash = "sha256:9c67056288d0e7a000c926545cf9a7e37fd6f29cae2a0b36f2e8c905165dc99c", size = 22750461, upload-time = "2026-02-16T11:23:50.364Z" }, - { url = "https://files.pythonhosted.org/packages/6e/3a/52f1009c8e035253b94f3f255272e468fb553f1f8a9233b8acf5b3a9891f/uv-0.10.3-py3-none-musllinux_1_1_i686.whl", hash = "sha256:8868ae00305a4c4d0daeb6a2b3a7eef41e11e376176fbd8746288477c75fd674", size = 22461429, upload-time = "2026-02-16T11:23:53.892Z" }, - { url = "https://files.pythonhosted.org/packages/93/a3/980c999480d3f0391f4c8d62c71404b9f096f15c49c644696e9e4d15586c/uv-0.10.3-py3-none-musllinux_1_1_x86_64.whl", hash = "sha256:037d93c77acb344ef28eac9ab554e5c237452b90ca4869830104397f2fb798a1", size = 23288496, upload-time = "2026-02-16T11:24:11.676Z" }, - { url = "https://files.pythonhosted.org/packages/bb/d6/5ccb14e919d43389d3746586d26649c45221deaafdddd714559f8f813b5f/uv-0.10.3-py3-none-win32.whl", hash = "sha256:d13307f74ce7b9f04962e12198a6c86b7032435979e1c1fd3845cce9b4b8befa", size = 21359743, upload-time = "2026-02-16T11:24:25.737Z" }, - { url = "https://files.pythonhosted.org/packages/d5/95/89b9caeb4007d950dfa5fa5923c2ea67de541de0e3582598b7b9c7eeac53/uv-0.10.3-py3-none-win_amd64.whl", hash = "sha256:4f2fa16083b6c5277f81c5b3ea4bf12e25e6d16c2a71c3c794506bd080c5afe6", size = 23837765, upload-time = "2026-02-16T11:24:31.465Z" }, - { url = "https://files.pythonhosted.org/packages/50/f6/324e84432441b8c03bf65724fabb11d8c7ccdac98e39bd717e4a72b838de/uv-0.10.3-py3-none-win_arm64.whl", hash = "sha256:f86596b464d6fb7951890708fdb907744cbd326464d6a5326919a6859eaefca2", size = 22178302, upload-time = "2026-02-16T11:24:20.036Z" }, +version = "0.10.4" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/c2/bb/dfd872ab6515e5609dc899acb65ccaf8cbedddefa3e34e8da0a5b3e13070/uv-0.10.4.tar.gz", hash = "sha256:b9ecf9f9145b95ddd6627b106e2e74f4204393b41bea2488079872699c03612e", size = 3875347, upload-time = "2026-02-17T22:01:22.28Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/d8/a3/565e5e45b5006c108ccd596682768c00be988421a83be92193c90bd889e4/uv-0.10.4-py3-none-linux_armv6l.whl", hash = "sha256:97cd6856145dec1d50821468bb6a10c14f3d71015eb97bb657163c837b5ffe79", size = 22352134, upload-time = "2026-02-17T22:01:30.071Z" }, + { url = "https://files.pythonhosted.org/packages/3e/c6/b86f3fdcde9f270e6dc1ff631a4fe73971bf4162c4dd169c7621110361b8/uv-0.10.4-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:44dd91ef224cfce2203716ecf244c3d3641269d1c99996aab852248caf2aeba4", size = 21417697, upload-time = "2026-02-17T22:01:51.162Z" }, + { url = "https://files.pythonhosted.org/packages/63/91/c4ddf7e55e05394967615050cc364a999157a44c008d0e1e9db2ed49a11c/uv-0.10.4-py3-none-macosx_11_0_arm64.whl", hash = "sha256:751959135a62f006ef51f3fcc5d02ec67986defa0424d470cce0918eede36a55", size = 20082236, upload-time = "2026-02-17T22:01:43.025Z" }, + { url = "https://files.pythonhosted.org/packages/25/92/606701b147d421ba2afe327d25f1ec5f59e519157b7e530d09cf61781d22/uv-0.10.4-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.musllinux_1_1_aarch64.whl", hash = "sha256:c184891b496c5fa04a7e1396d7f1953f52c97a5635636330854ab68f9e8ec212", size = 21921200, upload-time = "2026-02-17T22:01:24.131Z" }, + { url = "https://files.pythonhosted.org/packages/c3/79/942e75d0920a9e4cac76257cd3e2c238f1963d7e45423793f92e84eaa480/uv-0.10.4-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.musllinux_1_1_armv7l.whl", hash = "sha256:5b8a2170ecc700d82ed322fa056789ae2281353fef094e44f563c2f32ab8f438", size = 21974822, upload-time = "2026-02-17T22:01:45.337Z" }, + { url = "https://files.pythonhosted.org/packages/60/71/e5b1140c5c7296f935037a967717a82591522bbc93b4e67c4554dfbb4380/uv-0.10.4-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:decaf620106efa0d09ca27a8301dd83b8a5371e42649cd2704cfd11fe31af7d7", size = 21953309, upload-time = "2026-02-17T22:01:38.225Z" }, + { url = "https://files.pythonhosted.org/packages/70/a3/03ac1ff2058413c2c7d347f3b3396f291e192b096d2625a201c00bd962c6/uv-0.10.4-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f7d1035db05ac5b94387395428bdcbfce685f6c8eb2b711b66a5a1b397111913", size = 23217053, upload-time = "2026-02-17T22:01:09.278Z" }, + { url = "https://files.pythonhosted.org/packages/e3/d5/9b02140e8ff29d9b575335662288493cdcde5f123337613c04613017cf23/uv-0.10.4-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e754f9c8fd7532a28da7deaa6e400de5e7b459f7846bd5320db215a074fa8664", size = 24053086, upload-time = "2026-02-17T22:01:32.722Z" }, + { url = "https://files.pythonhosted.org/packages/f8/80/7023e1b0f9180226f8c3aa3e207383671cb524eb8bbd8a8eecf1c0cfe867/uv-0.10.4-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d419ef8d4fbd5be0af952a60c76d4f6183acb827cc729095d11c63e7dfaec24c", size = 23121689, upload-time = "2026-02-17T22:01:26.835Z" }, + { url = "https://files.pythonhosted.org/packages/f2/b3/4b9580d62e1245df52e8516cf3e404ff39cc72634d2d749d47b1dada4161/uv-0.10.4-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:82978155e571f2ac3dd57077bd746bfe41b65fa19accc3c92d1f09632cd36c63", size = 23136767, upload-time = "2026-02-17T22:01:40.729Z" }, + { url = "https://files.pythonhosted.org/packages/bd/4e/058976e2a5513f11954e09595a1821d5db1819e96e00bafded19c6a470e9/uv-0.10.4-py3-none-manylinux_2_28_aarch64.whl", hash = "sha256:8437e56a7d0f8ecd7421e8b84024dd8153179b8f1371ca1bd66b79fa7fb4c2c1", size = 22003202, upload-time = "2026-02-17T22:01:12.447Z" }, + { url = "https://files.pythonhosted.org/packages/41/c5/da0fc5b732f7dd1f99116ce19e3c1cae7dfa7d04528a0c38268f20643edf/uv-0.10.4-py3-none-manylinux_2_31_riscv64.whl", hash = "sha256:ff1c6a465ec035dfe2dfd745b2e85061f47ab3c5cc626eead491994c028eacc6", size = 22720004, upload-time = "2026-02-17T22:01:53.551Z" }, + { url = "https://files.pythonhosted.org/packages/71/17/13c24dd56c135553645c2c62543eba928e88479fdd2d8356fdf35a0113bc/uv-0.10.4-py3-none-musllinux_1_1_i686.whl", hash = "sha256:525dc49a02b78fcd77431f013f2c48b2a152e31808e792c0d1aee4600495a320", size = 22401692, upload-time = "2026-02-17T22:01:35.368Z" }, + { url = "https://files.pythonhosted.org/packages/9c/b2/7a5fdbc0bfd8364e6290457794127d5e766dbc6d44bb15d1a9e318bc356b/uv-0.10.4-py3-none-musllinux_1_1_x86_64.whl", hash = "sha256:7d514b30877fda6e83874ccbd1379e0249cfa064511c5858433edcf697d0d4e3", size = 23330968, upload-time = "2026-02-17T22:01:15.237Z" }, + { url = "https://files.pythonhosted.org/packages/d1/df/004e32be4cd24338422842dd93383f2df0be4554efb6872fef37997ff3ca/uv-0.10.4-py3-none-win32.whl", hash = "sha256:4aed1237847dbd694475c06e8608f2f5f6509181ac148ee35694400d382a3784", size = 21373394, upload-time = "2026-02-17T22:01:20.362Z" }, + { url = "https://files.pythonhosted.org/packages/31/dd/1900452678d46f6a649ab8167bededb02500b0561fc9f69e1f52607895c7/uv-0.10.4-py3-none-win_amd64.whl", hash = "sha256:4a1c595cf692fa611019a7ad9bf4b0757fccd0a3f838ca05e53db82912ddaa39", size = 23813606, upload-time = "2026-02-17T22:01:17.733Z" }, + { url = "https://files.pythonhosted.org/packages/7b/e8/c6ba7ceee3ec58d21156b4968449e6a12af15eea8d26308b3b3ffeef2baf/uv-0.10.4-py3-none-win_arm64.whl", hash = "sha256:28c59a02d7a648b75a9c2ea735773d9d357a1eee773b78593c275b0bef1a4b73", size = 22180241, upload-time = "2026-02-17T22:01:56.305Z" }, ] [[package]]