diff --git a/.github/actions/azure-functions-integration-setup/action.yml b/.github/actions/azure-functions-integration-setup/action.yml index 6be5afb814..357168d92e 100644 --- a/.github/actions/azure-functions-integration-setup/action.yml +++ b/.github/actions/azure-functions-integration-setup/action.yml @@ -28,6 +28,18 @@ runs: echo "Waiting for Azurite (Azure Storage emulator) to be ready" timeout 30 bash -c 'until curl --silent http://localhost:10000/devstoreaccount1; do sleep 1; done' echo "Azurite (Azure Storage emulator) is ready" + - name: Start Redis + shell: bash + run: | + if [ "$(docker ps -aq -f name=redis)" ]; then + echo "Stopping and removing existing Redis" + docker rm -f redis + fi + echo "Starting Redis" + docker run -d --name redis -p 6379:6379 redis:latest + echo "Waiting for Redis to be ready" + timeout 30 bash -c 'until docker exec redis redis-cli ping | grep -q PONG; do sleep 1; done' + echo "Redis is ready" - name: Install Azure Functions Core Tools shell: bash run: | diff --git a/.github/workflows/python-merge-tests.yml b/.github/workflows/python-merge-tests.yml index 66b9122726..eb4f123f42 100644 --- a/.github/workflows/python-merge-tests.yml +++ b/.github/workflows/python-merge-tests.yml @@ -154,7 +154,7 @@ jobs: subscription-id: ${{ secrets.AZURE_SUBSCRIPTION_ID }} - name: Test with pytest timeout-minutes: 10 - run: uv run poe azure-ai-tests -n logical --dist loadfile --dist worksteal --timeout 300 --retries 3 --retry-delay 10 + run: uv run --directory packages/azure-ai poe integration-tests -n logical --dist loadfile --dist worksteal --timeout 300 --retries 3 --retry-delay 10 working-directory: ./python - name: Test Azure AI samples timeout-minutes: 10 diff --git a/agent-samples/README.md b/agent-samples/README.md index 0ee940f3a0..ea5c8b0aeb 100644 --- a/agent-samples/README.md +++ b/agent-samples/README.md @@ -1,3 +1,3 @@ # Declarative Agents -This folder contains sample agent definitions than be ran using the declarative agent support, for python see the [declarative agent python sample folder](../python/samples/getting_started/declarative/). +This folder contains sample agent definitions that can be run using the declarative agent support, for python see the [declarative agent python sample folder](../python/samples/getting_started/declarative/). diff --git a/dotnet/Directory.Packages.props b/dotnet/Directory.Packages.props index 4825a42921..9202b36f2a 100644 --- a/dotnet/Directory.Packages.props +++ b/dotnet/Directory.Packages.props @@ -11,13 +11,13 @@ - + - + @@ -100,7 +100,7 @@ - + @@ -112,19 +112,21 @@ - - - - + + + + - - + + + + diff --git a/dotnet/agent-framework-dotnet.slnx b/dotnet/agent-framework-dotnet.slnx index 5e08a766f9..002efdbab1 100644 --- a/dotnet/agent-framework-dotnet.slnx +++ b/dotnet/agent-framework-dotnet.slnx @@ -33,6 +33,7 @@ + diff --git a/dotnet/nuget/nuget-package.props b/dotnet/nuget/nuget-package.props index 6ae60933c6..3fc7c3974b 100644 --- a/dotnet/nuget/nuget-package.props +++ b/dotnet/nuget/nuget-package.props @@ -2,9 +2,9 @@ 1.0.0 - $(VersionPrefix)-$(VersionSuffix).251204.1 - $(VersionPrefix)-preview.251204.1 - 1.0.0-preview.251204.1 + $(VersionPrefix)-$(VersionSuffix).251219.1 + $(VersionPrefix)-preview.251219.1 + 1.0.0-preview.251219.1 Debug;Release;Publish true diff --git a/dotnet/samples/AGUIClientServer/AGUIDojoServer/AgenticUI/AgenticUIAgent.cs b/dotnet/samples/AGUIClientServer/AGUIDojoServer/AgenticUI/AgenticUIAgent.cs index 05a7d86f15..d79787d260 100644 --- a/dotnet/samples/AGUIClientServer/AGUIDojoServer/AgenticUI/AgenticUIAgent.cs +++ b/dotnet/samples/AGUIClientServer/AGUIDojoServer/AgenticUI/AgenticUIAgent.cs @@ -19,12 +19,12 @@ public AgenticUIAgent(AIAgent innerAgent, JsonSerializerOptions jsonSerializerOp this._jsonSerializerOptions = jsonSerializerOptions; } - public override Task RunAsync(IEnumerable messages, AgentThread? thread = null, AgentRunOptions? options = null, CancellationToken cancellationToken = default) + protected override Task RunCoreAsync(IEnumerable messages, AgentThread? thread = null, AgentRunOptions? options = null, CancellationToken cancellationToken = default) { - return this.RunStreamingAsync(messages, thread, options, cancellationToken).ToAgentRunResponseAsync(cancellationToken); + return this.RunCoreStreamingAsync(messages, thread, options, cancellationToken).ToAgentRunResponseAsync(cancellationToken); } - public override async IAsyncEnumerable RunStreamingAsync( + protected override async IAsyncEnumerable RunCoreStreamingAsync( IEnumerable messages, AgentThread? thread = null, AgentRunOptions? options = null, diff --git a/dotnet/samples/AGUIClientServer/AGUIDojoServer/PredictiveStateUpdates/PredictiveStateUpdatesAgent.cs b/dotnet/samples/AGUIClientServer/AGUIDojoServer/PredictiveStateUpdates/PredictiveStateUpdatesAgent.cs index 8ac9928fbe..ab9ca2fca3 100644 --- a/dotnet/samples/AGUIClientServer/AGUIDojoServer/PredictiveStateUpdates/PredictiveStateUpdatesAgent.cs +++ b/dotnet/samples/AGUIClientServer/AGUIDojoServer/PredictiveStateUpdates/PredictiveStateUpdatesAgent.cs @@ -20,12 +20,12 @@ public PredictiveStateUpdatesAgent(AIAgent innerAgent, JsonSerializerOptions jso this._jsonSerializerOptions = jsonSerializerOptions; } - public override Task RunAsync(IEnumerable messages, AgentThread? thread = null, AgentRunOptions? options = null, CancellationToken cancellationToken = default) + protected override Task RunCoreAsync(IEnumerable messages, AgentThread? thread = null, AgentRunOptions? options = null, CancellationToken cancellationToken = default) { - return this.RunStreamingAsync(messages, thread, options, cancellationToken).ToAgentRunResponseAsync(cancellationToken); + return this.RunCoreStreamingAsync(messages, thread, options, cancellationToken).ToAgentRunResponseAsync(cancellationToken); } - public override async IAsyncEnumerable RunStreamingAsync( + protected override async IAsyncEnumerable RunCoreStreamingAsync( IEnumerable messages, AgentThread? thread = null, AgentRunOptions? options = null, diff --git a/dotnet/samples/AGUIClientServer/AGUIDojoServer/SharedState/SharedStateAgent.cs b/dotnet/samples/AGUIClientServer/AGUIDojoServer/SharedState/SharedStateAgent.cs index c10450fcfb..1a1e58860a 100644 --- a/dotnet/samples/AGUIClientServer/AGUIDojoServer/SharedState/SharedStateAgent.cs +++ b/dotnet/samples/AGUIClientServer/AGUIDojoServer/SharedState/SharedStateAgent.cs @@ -19,12 +19,12 @@ public SharedStateAgent(AIAgent innerAgent, JsonSerializerOptions jsonSerializer this._jsonSerializerOptions = jsonSerializerOptions; } - public override Task RunAsync(IEnumerable messages, AgentThread? thread = null, AgentRunOptions? options = null, CancellationToken cancellationToken = default) + protected override Task RunCoreAsync(IEnumerable messages, AgentThread? thread = null, AgentRunOptions? options = null, CancellationToken cancellationToken = default) { - return this.RunStreamingAsync(messages, thread, options, cancellationToken).ToAgentRunResponseAsync(cancellationToken); + return this.RunCoreStreamingAsync(messages, thread, options, cancellationToken).ToAgentRunResponseAsync(cancellationToken); } - public override async IAsyncEnumerable RunStreamingAsync( + protected override async IAsyncEnumerable RunCoreStreamingAsync( IEnumerable messages, AgentThread? thread = null, AgentRunOptions? options = null, diff --git a/dotnet/samples/AzureFunctions/08_ReliableStreaming/08_ReliableStreaming.csproj b/dotnet/samples/AzureFunctions/08_ReliableStreaming/08_ReliableStreaming.csproj new file mode 100644 index 0000000000..df0b60a4a1 --- /dev/null +++ b/dotnet/samples/AzureFunctions/08_ReliableStreaming/08_ReliableStreaming.csproj @@ -0,0 +1,47 @@ + + + net10.0 + v4 + Exe + enable + enable + + ReliableStreaming + ReliableStreaming + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/dotnet/samples/AzureFunctions/08_ReliableStreaming/FunctionTriggers.cs b/dotnet/samples/AzureFunctions/08_ReliableStreaming/FunctionTriggers.cs new file mode 100644 index 0000000000..e642b64337 --- /dev/null +++ b/dotnet/samples/AzureFunctions/08_ReliableStreaming/FunctionTriggers.cs @@ -0,0 +1,320 @@ +// Copyright (c) Microsoft. All rights reserved. + +using System.Text; +using Microsoft.Agents.AI; +using Microsoft.Agents.AI.DurableTask; +using Microsoft.Agents.AI.Hosting.AzureFunctions; +using Microsoft.AspNetCore.Http; +using Microsoft.AspNetCore.Http.Features; +using Microsoft.AspNetCore.Mvc; +using Microsoft.Azure.Functions.Worker; +using Microsoft.DurableTask.Client; +using Microsoft.Extensions.Logging; + +namespace ReliableStreaming; + +/// +/// HTTP trigger functions for reliable streaming of durable agent responses. +/// +/// +/// This class exposes two endpoints: +/// +/// +/// Create +/// Starts an agent run and streams responses. The response format depends on the +/// Accept header: text/plain returns raw text (ideal for terminals), while +/// text/event-stream or any other value returns Server-Sent Events (SSE). +/// +/// +/// Stream +/// Resumes a stream from a cursor position, enabling reliable message delivery +/// +/// +/// +public sealed class FunctionTriggers +{ + private readonly RedisStreamResponseHandler _streamHandler; + private readonly ILogger _logger; + + /// + /// Initializes a new instance of the class. + /// + /// The Redis stream handler for reading/writing agent responses. + /// The logger instance. + public FunctionTriggers(RedisStreamResponseHandler streamHandler, ILogger logger) + { + this._streamHandler = streamHandler; + this._logger = logger; + } + + /// + /// Creates a new agent session, starts an agent run with the provided prompt, + /// and streams the response back to the client. + /// + /// + /// + /// The response format depends on the Accept header: + /// + /// text/plain: Returns raw text output, ideal for terminal display with curl + /// text/event-stream or other: Returns Server-Sent Events (SSE) with cursor support + /// + /// + /// + /// The response includes an x-conversation-id header containing the conversation ID. + /// For SSE responses, clients can use this conversation ID to resume the stream if disconnected + /// by calling the endpoint with the conversation ID and the last received cursor. + /// + /// + /// Each SSE event contains the following fields: + /// + /// id: The Redis stream entry ID (use as cursor for resumption) + /// event: Either "message" for content or "done" for stream completion + /// data: The text content of the response chunk + /// + /// + /// + /// The HTTP request containing the prompt in the body. + /// The Durable Task client for signaling agents. + /// The function invocation context. + /// Cancellation token. + /// A streaming response in the format specified by the Accept header. + [Function(nameof(CreateAsync))] + public async Task CreateAsync( + [HttpTrigger(AuthorizationLevel.Anonymous, "post", Route = "agent/create")] HttpRequest request, + [DurableClient] DurableTaskClient durableClient, + FunctionContext context, + CancellationToken cancellationToken) + { + // Read the prompt from the request body + string prompt = await new StreamReader(request.Body).ReadToEndAsync(cancellationToken); + if (string.IsNullOrWhiteSpace(prompt)) + { + return new BadRequestObjectResult("Request body must contain a prompt."); + } + + AIAgent agentProxy = durableClient.AsDurableAgentProxy(context, "TravelPlanner"); + + // Create a new agent thread + AgentThread thread = agentProxy.GetNewThread(); + AgentThreadMetadata metadata = thread.GetService() + ?? throw new InvalidOperationException("Failed to get AgentThreadMetadata from new thread."); + + this._logger.LogInformation("Creating new agent session: {ConversationId}", metadata.ConversationId); + + // Run the agent in the background (fire-and-forget) + DurableAgentRunOptions options = new() { IsFireAndForget = true }; + await agentProxy.RunAsync(prompt, thread, options, cancellationToken); + + this._logger.LogInformation("Agent run started for session: {ConversationId}", metadata.ConversationId); + + // Check Accept header to determine response format + // text/plain = raw text output (ideal for terminals) + // text/event-stream or other = SSE format (supports resumption) + string? acceptHeader = request.Headers.Accept.FirstOrDefault(); + bool useSseFormat = acceptHeader?.Contains("text/plain", StringComparison.OrdinalIgnoreCase) != true; + + return await this.StreamToClientAsync( + conversationId: metadata.ConversationId!, cursor: null, useSseFormat, request.HttpContext, cancellationToken); + } + + /// + /// Resumes streaming from a specific cursor position for an existing session. + /// + /// + /// + /// Use this endpoint to resume a stream after disconnection. Pass the conversation ID + /// (from the x-conversation-id response header) and the last received cursor + /// (Redis stream entry ID) to continue from where you left off. + /// + /// + /// If no cursor is provided, streaming starts from the beginning of the stream. + /// This allows clients to replay the entire response if needed. + /// + /// + /// The response format depends on the Accept header: + /// + /// text/plain: Returns raw text output, ideal for terminal display with curl + /// text/event-stream or other: Returns Server-Sent Events (SSE) with cursor support + /// + /// + /// + /// The HTTP request. Use the cursor query parameter to specify the cursor position. + /// The conversation ID to stream from. + /// Cancellation token. + /// A streaming response in the format specified by the Accept header. + [Function(nameof(StreamAsync))] + public async Task StreamAsync( + [HttpTrigger(AuthorizationLevel.Anonymous, "get", Route = "agent/stream/{conversationId}")] HttpRequest request, + string conversationId, + CancellationToken cancellationToken) + { + if (string.IsNullOrWhiteSpace(conversationId)) + { + return new BadRequestObjectResult("Conversation ID is required."); + } + + // Get the cursor from query string (optional) + string? cursor = request.Query["cursor"].FirstOrDefault(); + + this._logger.LogInformation( + "Resuming stream for conversation {ConversationId} from cursor: {Cursor}", + conversationId, + cursor ?? "(beginning)"); + + // Check Accept header to determine response format + // text/plain = raw text output (ideal for terminals) + // text/event-stream or other = SSE format (supports cursor-based resumption) + string? acceptHeader = request.Headers.Accept.FirstOrDefault(); + bool useSseFormat = acceptHeader?.Contains("text/plain", StringComparison.OrdinalIgnoreCase) != true; + + return await this.StreamToClientAsync(conversationId, cursor, useSseFormat, request.HttpContext, cancellationToken); + } + + /// + /// Streams chunks from the Redis stream to the HTTP response. + /// + /// The conversation ID to stream from. + /// Optional cursor to resume from. If null, streams from the beginning. + /// True to use SSE format, false for plain text. + /// The HTTP context for writing the response. + /// Cancellation token. + /// An empty result after streaming completes. + private async Task StreamToClientAsync( + string conversationId, + string? cursor, + bool useSseFormat, + HttpContext httpContext, + CancellationToken cancellationToken) + { + // Set response headers based on format + httpContext.Response.Headers.ContentType = useSseFormat + ? "text/event-stream" + : "text/plain; charset=utf-8"; + httpContext.Response.Headers.CacheControl = "no-cache"; + httpContext.Response.Headers.Connection = "keep-alive"; + httpContext.Response.Headers["x-conversation-id"] = conversationId; + + // Disable response buffering if supported + httpContext.Features.Get()?.DisableBuffering(); + + try + { + await foreach (StreamChunk chunk in this._streamHandler.ReadStreamAsync( + conversationId, + cursor, + cancellationToken)) + { + if (chunk.Error != null) + { + this._logger.LogWarning("Stream error for conversation {ConversationId}: {Error}", conversationId, chunk.Error); + await WriteErrorAsync(httpContext.Response, chunk.Error, useSseFormat, cancellationToken); + break; + } + + if (chunk.IsDone) + { + await WriteEndOfStreamAsync(httpContext.Response, chunk.EntryId, useSseFormat, cancellationToken); + break; + } + + if (chunk.Text != null) + { + await WriteChunkAsync(httpContext.Response, chunk, useSseFormat, cancellationToken); + } + } + } + catch (OperationCanceledException) + { + this._logger.LogInformation("Client disconnected from stream {ConversationId}", conversationId); + } + + return new EmptyResult(); + } + + /// + /// Writes a text chunk to the response. + /// + private static async Task WriteChunkAsync( + HttpResponse response, + StreamChunk chunk, + bool useSseFormat, + CancellationToken cancellationToken) + { + if (useSseFormat) + { + await WriteSSEEventAsync(response, "message", chunk.Text!, chunk.EntryId); + } + else + { + await response.WriteAsync(chunk.Text!, cancellationToken); + } + + await response.Body.FlushAsync(cancellationToken); + } + + /// + /// Writes an end-of-stream marker to the response. + /// + private static async Task WriteEndOfStreamAsync( + HttpResponse response, + string entryId, + bool useSseFormat, + CancellationToken cancellationToken) + { + if (useSseFormat) + { + await WriteSSEEventAsync(response, "done", "[DONE]", entryId); + } + else + { + await response.WriteAsync("\n", cancellationToken); + } + + await response.Body.FlushAsync(cancellationToken); + } + + /// + /// Writes an error message to the response. + /// + private static async Task WriteErrorAsync( + HttpResponse response, + string error, + bool useSseFormat, + CancellationToken cancellationToken) + { + if (useSseFormat) + { + await WriteSSEEventAsync(response, "error", error, null); + } + else + { + await response.WriteAsync($"\n[Error: {error}]\n", cancellationToken); + } + + await response.Body.FlushAsync(cancellationToken); + } + + /// + /// Writes a Server-Sent Event to the response stream. + /// + private static async Task WriteSSEEventAsync( + HttpResponse response, + string eventType, + string data, + string? id) + { + StringBuilder sb = new(); + + // Include the ID if provided (used as cursor for resumption) + if (!string.IsNullOrEmpty(id)) + { + sb.AppendLine($"id: {id}"); + } + + sb.AppendLine($"event: {eventType}"); + sb.AppendLine($"data: {data}"); + sb.AppendLine(); // Empty line marks end of event + + await response.WriteAsync(sb.ToString()); + } +} diff --git a/dotnet/samples/AzureFunctions/08_ReliableStreaming/Program.cs b/dotnet/samples/AzureFunctions/08_ReliableStreaming/Program.cs new file mode 100644 index 0000000000..6c48ed4177 --- /dev/null +++ b/dotnet/samples/AzureFunctions/08_ReliableStreaming/Program.cs @@ -0,0 +1,100 @@ +// Copyright (c) Microsoft. All rights reserved. + +// This sample demonstrates how to implement reliable streaming for durable agents using Redis Streams. +// It exposes two HTTP endpoints: +// 1. Create - Starts an agent run and streams responses back via Server-Sent Events (SSE) +// 2. Stream - Resumes a stream from a specific cursor position, enabling reliable message delivery +// +// This pattern is inspired by OpenAI's background mode for the Responses API, which allows clients +// to disconnect and reconnect to ongoing agent responses without losing messages. + +using Azure; +using Azure.AI.OpenAI; +using Azure.Identity; +using Microsoft.Agents.AI.DurableTask; +using Microsoft.Agents.AI.Hosting.AzureFunctions; +using Microsoft.Azure.Functions.Worker.Builder; +using Microsoft.Extensions.AI; +using Microsoft.Extensions.DependencyInjection; +using Microsoft.Extensions.Hosting; +using OpenAI.Chat; +using ReliableStreaming; +using StackExchange.Redis; + +// Get the Azure OpenAI endpoint and deployment name from environment variables. +string endpoint = Environment.GetEnvironmentVariable("AZURE_OPENAI_ENDPOINT") + ?? throw new InvalidOperationException("AZURE_OPENAI_ENDPOINT is not set."); +string deploymentName = Environment.GetEnvironmentVariable("AZURE_OPENAI_DEPLOYMENT") + ?? throw new InvalidOperationException("AZURE_OPENAI_DEPLOYMENT is not set."); + +// Get Redis connection string from environment variable. +string redisConnectionString = Environment.GetEnvironmentVariable("REDIS_CONNECTION_STRING") + ?? "localhost:6379"; + +// Get the Redis stream TTL from environment variable (default: 10 minutes). +int redisStreamTtlMinutes = int.TryParse( + Environment.GetEnvironmentVariable("REDIS_STREAM_TTL_MINUTES"), + out int ttlMinutes) ? ttlMinutes : 10; + +// Use Azure Key Credential if provided, otherwise use Azure CLI Credential. +string? azureOpenAiKey = System.Environment.GetEnvironmentVariable("AZURE_OPENAI_KEY"); +AzureOpenAIClient client = !string.IsNullOrEmpty(azureOpenAiKey) + ? new AzureOpenAIClient(new Uri(endpoint), new AzureKeyCredential(azureOpenAiKey)) + : new AzureOpenAIClient(new Uri(endpoint), new AzureCliCredential()); + +// Travel Planner agent instructions - designed to produce longer responses for demonstrating streaming. +const string TravelPlannerName = "TravelPlanner"; +const string TravelPlannerInstructions = + """ + You are an expert travel planner who creates detailed, personalized travel itineraries. + When asked to plan a trip, you should: + 1. Create a comprehensive day-by-day itinerary + 2. Include specific recommendations for activities, restaurants, and attractions + 3. Provide practical tips for each destination + 4. Consider weather and local events when making recommendations + 5. Include estimated times and logistics between activities + + Always use the available tools to get current weather forecasts and local events + for the destination to make your recommendations more relevant and timely. + + Format your response with clear headings for each day and include emoji icons + to make the itinerary easy to scan and visually appealing. + """; + +// Configure the function app to host the AI agent. +FunctionsApplicationBuilder builder = FunctionsApplication + .CreateBuilder(args) + .ConfigureFunctionsWebApplication() + .ConfigureDurableAgents(options => + { + // Define the Travel Planner agent with tools for weather and events + options.AddAIAgentFactory(TravelPlannerName, sp => + { + return client.GetChatClient(deploymentName).CreateAIAgent( + instructions: TravelPlannerInstructions, + name: TravelPlannerName, + services: sp, + tools: [ + AIFunctionFactory.Create(TravelTools.GetWeatherForecast), + AIFunctionFactory.Create(TravelTools.GetLocalEvents), + ]); + }); + }); + +// Register Redis connection as a singleton +builder.Services.AddSingleton(_ => + ConnectionMultiplexer.Connect(redisConnectionString)); + +// Register the Redis stream response handler - this captures agent responses +// and publishes them to Redis Streams for reliable delivery. +// Registered as both the concrete type (for FunctionTriggers) and the interface (for the agent framework). +builder.Services.AddSingleton(sp => + new RedisStreamResponseHandler( + sp.GetRequiredService(), + TimeSpan.FromMinutes(redisStreamTtlMinutes))); +builder.Services.AddSingleton(sp => + sp.GetRequiredService()); + +using IHost app = builder.Build(); + +app.Run(); diff --git a/dotnet/samples/AzureFunctions/08_ReliableStreaming/README.md b/dotnet/samples/AzureFunctions/08_ReliableStreaming/README.md new file mode 100644 index 0000000000..f1c68c2339 --- /dev/null +++ b/dotnet/samples/AzureFunctions/08_ReliableStreaming/README.md @@ -0,0 +1,264 @@ +# Reliable Streaming with Redis + +This sample demonstrates how to implement reliable streaming for durable agents using Redis Streams as a message broker. It enables clients to disconnect and reconnect to ongoing agent responses without losing messages, inspired by [OpenAI's background mode](https://platform.openai.com/docs/guides/background) for the Responses API. + +## Key Concepts Demonstrated + +- **Reliable message delivery**: Agent responses are persisted to Redis Streams, allowing clients to resume from any point +- **Content negotiation**: Use `Accept: text/plain` for raw terminal output, or `Accept: text/event-stream` for SSE format +- **Server-Sent Events (SSE)**: Standard streaming format that works with `curl`, browsers, and most HTTP clients +- **Cursor-based resumption**: Each SSE event includes an `id` field that can be used to resume the stream +- **Fire-and-forget agent invocation**: The agent runs in the background while the client streams from Redis via an HTTP trigger function + +## Environment Setup + +See the [README.md](../README.md) file in the parent directory for more information on how to configure the environment, including how to install and run common sample dependencies. + +### Additional Requirements: Redis + +This sample requires a Redis instance. Start a local Redis instance using Docker: + +```bash +docker run -d --name redis -p 6379:6379 redis:latest +``` + +To verify Redis is running: + +```bash +docker ps | grep redis +``` + +## Running the Sample + +Start the Azure Functions host: + +```bash +func start +``` + +### 1. Test Streaming with curl + +Open a new terminal and start a travel planning request. Use the `-i` flag to see response headers (including the conversation ID) and `Accept: text/plain` for raw text output: + +**Bash (Linux/macOS/WSL):** + +```bash +curl -i -N -X POST http://localhost:7071/api/agent/create \ + -H "Content-Type: text/plain" \ + -H "Accept: text/plain" \ + -d "Plan a 7-day trip to Tokyo, Japan for next month. Include daily activities, restaurant recommendations, and tips for getting around." +``` + +**PowerShell:** + +```powershell +curl -i -N -X POST http://localhost:7071/api/agent/create ` + -H "Content-Type: text/plain" ` + -H "Accept: text/plain" ` + -d "Plan a 7-day trip to Tokyo, Japan for next month. Include daily activities, restaurant recommendations, and tips for getting around." +``` + +You'll first see the response headers, including: + +```text +HTTP/1.1 200 OK +Content-Type: text/plain; charset=utf-8 +x-conversation-id: @dafx-travelplanner@a1b2c3d4e5f67890abcdef1234567890 +... +``` + +Then the agent's response will stream to your terminal in chunks, similar to a ChatGPT-style experience (though not character-by-character). + +> **Note:** The `-N` flag in curl disables output buffering, which is essential for seeing the stream in real-time. The `-i` flag includes the HTTP headers in the output. + +### 2. Demonstrate Stream Interruption and Resumption + +This is the key feature of reliable streaming! Follow these steps to see it in action: + +#### Step 1: Start a stream and note the conversation ID + +Run the curl command from step 1. Watch for the `x-conversation-id` header in the response - **copy this value**, you'll need it to resume. + +```text +x-conversation-id: @dafx-travelplanner@a1b2c3d4e5f67890abcdef1234567890 +``` + +#### Step 2: Interrupt the stream + +While the agent is still generating text, press **`Ctrl+C`** to interrupt the stream. The agent continues running in the background - your messages are being saved to Redis! + +#### Step 3: Resume the stream + +Use the conversation ID you copied to resume streaming from where you left off. Include the `Accept: text/plain` header to get raw text output: + +**Bash (Linux/macOS/WSL):** + +```bash +# Replace with your actual conversation ID from the x-conversation-id header +CONVERSATION_ID="@dafx-travelplanner@a1b2c3d4e5f67890abcdef1234567890" + +curl -N -H "Accept: text/plain" "http://localhost:7071/api/agent/stream/${CONVERSATION_ID}" +``` + +**PowerShell:** + +```powershell +# Replace with your actual conversation ID from the x-conversation-id header +$conversationId = "@dafx-travelplanner@a1b2c3d4e5f67890abcdef1234567890" + +curl -N -H "Accept: text/plain" "http://localhost:7071/api/agent/stream/$conversationId" +``` + +You'll see the **entire response replayed from the beginning**, including the parts you already received before interrupting. + +#### Step 4 (Advanced): Resume from a specific cursor + +If you're using SSE format, each event includes an `id` field that you can use as a cursor to resume from a specific point: + +```bash +# Resume from a specific cursor position +curl -N "http://localhost:7071/api/agent/stream/${CONVERSATION_ID}?cursor=1734567890123-0" +``` + +### 3. Alternative: SSE Format for Programmatic Clients + +If you need the full Server-Sent Events format with cursors for resumable streaming, use `Accept: text/event-stream` (or omit the Accept header): + +```bash +curl -i -N -X POST http://localhost:7071/api/agent/create \ + -H "Content-Type: text/plain" \ + -H "Accept: text/event-stream" \ + -d "Plan a 7-day trip to Tokyo, Japan." +``` + +This returns SSE-formatted events with `id`, `event`, and `data` fields: + +```text +id: 1734567890123-0 +event: message +data: # 7-Day Tokyo Adventure + +id: 1734567890124-0 +event: message +data: ## Day 1: Arrival and Exploration + +id: 1734567890999-0 +event: done +data: [DONE] +``` + +The `id` field is the Redis stream entry ID - use it as the `cursor` parameter to resume from that exact point. + +### Understanding the Response Headers + +| Header | Description | +|--------|-------------| +| `x-conversation-id` | The conversation ID (session key). Use this to resume the stream. | +| `Content-Type` | Either `text/plain` or `text/event-stream` depending on your `Accept` header. | +| `Cache-Control` | Set to `no-cache` to prevent caching of the stream. | + +## Architecture Overview + +```text +┌─────────────┐ POST /agent/create ┌─────────────────────┐ +│ Client │ (Accept: text/plain or SSE)│ Azure Functions │ +│ (curl) │ ──────────────────────────► │ (FunctionTriggers) │ +└─────────────┘ └──────────┬──────────┘ + ▲ │ + │ Text or SSE stream Signal Entity + │ │ + │ ▼ + │ ┌─────────────────────┐ + │ │ AgentEntity │ + │ │ (Durable Entity) │ + │ └──────────┬──────────┘ + │ │ + │ IAgentResponseHandler + │ │ + │ ▼ + │ ┌─────────────────────┐ + │ │ RedisStreamResponse │ + │ │ Handler │ + │ └──────────┬──────────┘ + │ │ + │ XADD (write) + │ │ + │ ▼ + │ ┌─────────────────────┐ + └─────────── XREAD (poll) ────────── │ Redis Streams │ + │ (Durable Log) │ + └─────────────────────┘ +``` + +### Data Flow + +1. **Client sends prompt**: The `Create` endpoint receives the prompt and generates a new agent thread. + +2. **Agent invoked**: The durable entity (`AgentEntity`) is signaled to run the travel planner agent. This is fire-and-forget from the HTTP request's perspective. + +3. **Responses captured**: As the agent generates responses, `RedisStreamResponseHandler` (implementing `IAgentResponseHandler`) extracts the text from each `AgentRunResponseUpdate` and publishes it to a Redis Stream keyed by session ID. + +4. **Client polls Redis**: The HTTP response streams events by polling the Redis Stream. For SSE format, each event includes the Redis entry ID as the `id` field. + +5. **Resumption**: If the client disconnects, it can call the `Stream` endpoint with the conversation ID (from the `x-conversation-id` header) and optionally the last received cursor to resume from that point. + +## Message Delivery Guarantees + +This sample provides **at-least-once delivery** with the following characteristics: + +- **Durability**: Messages are persisted to Redis Streams with configurable TTL (default: 10 minutes). +- **Ordering**: Messages are delivered in order within a session. +- **Resumption**: Clients can resume from any point using cursor-based pagination. +- **Replay**: Clients can replay the entire stream by omitting the cursor. + +### Important Considerations + +- **No exactly-once delivery**: If a client disconnects exactly when receiving a message, it may receive that message again upon resumption. Clients should handle duplicate messages idempotently. +- **TTL expiration**: Streams expire after the configured TTL. Clients cannot resume streams that have expired. +- **Redis guarantees**: Redis streams are backed by Redis persistence mechanisms (RDB/AOF). Ensure your Redis instance is configured for durability as needed. + +## When to Use These Patterns + +The patterns demonstrated in this sample are ideal for: + +- **Long-running agent tasks**: When agent responses take minutes to complete (e.g., deep research, complex planning) +- **Unreliable network connections**: Mobile apps, unstable WiFi, or connections that may drop +- **Resumable experiences**: Users should be able to close and reopen an app without losing context +- **Background processing**: When you want to fire off a task and check on it later + +These patterns may be overkill for: + +- **Simple, fast responses**: If responses complete in a few seconds, standard streaming is simpler +- **Stateless interactions**: If there's no need to resume or replay conversations +- **Very high throughput**: Redis adds latency; for maximum throughput, direct streaming may be better + +## Configuration + +| Environment Variable | Description | Default | +|---------------------|-------------|---------| +| `REDIS_CONNECTION_STRING` | Redis connection string | `localhost:6379` | +| `REDIS_STREAM_TTL_MINUTES` | How long streams are retained after last write | `10` | +| `AZURE_OPENAI_ENDPOINT` | Azure OpenAI endpoint URL | (required) | +| `AZURE_OPENAI_DEPLOYMENT` | Azure OpenAI deployment name | (required) | +| `AZURE_OPENAI_KEY` | API key (optional, uses Azure CLI auth if not set) | (optional) | + +## Cleanup + +To stop and remove the Redis Docker containers: + +```bash +docker stop redis +docker rm redis +``` + +## Disclaimer + +> ⚠️ **This sample is for illustration purposes only and is not intended to be production-ready.** +> +> A production implementation should consider: +> +> - Redis cluster configuration for high availability +> - Authentication and authorization for the streaming endpoints +> - Rate limiting and abuse prevention +> - Monitoring and alerting for stream health +> - Graceful handling of Redis failures diff --git a/dotnet/samples/AzureFunctions/08_ReliableStreaming/RedisStreamResponseHandler.cs b/dotnet/samples/AzureFunctions/08_ReliableStreaming/RedisStreamResponseHandler.cs new file mode 100644 index 0000000000..b0a95f49f6 --- /dev/null +++ b/dotnet/samples/AzureFunctions/08_ReliableStreaming/RedisStreamResponseHandler.cs @@ -0,0 +1,213 @@ +// Copyright (c) Microsoft. All rights reserved. + +using System.Runtime.CompilerServices; +using Microsoft.Agents.AI; +using Microsoft.Agents.AI.DurableTask; +using StackExchange.Redis; + +namespace ReliableStreaming; + +/// +/// Represents a chunk of data read from a Redis stream. +/// +/// The Redis stream entry ID (can be used as a cursor for resumption). +/// The text content of the chunk, or null if this is a completion/error marker. +/// True if this chunk marks the end of the stream. +/// An error message if something went wrong, or null otherwise. +public readonly record struct StreamChunk(string EntryId, string? Text, bool IsDone, string? Error); + +/// +/// An implementation of that publishes agent response updates +/// to Redis Streams for reliable delivery. This enables clients to disconnect and reconnect +/// to ongoing agent responses without losing messages. +/// +/// +/// +/// Redis Streams provide a durable, append-only log that supports consumer groups and message +/// acknowledgment. This implementation uses auto-generated IDs (which are timestamp-based) +/// as sequence numbers, allowing clients to resume from any point in the stream. +/// +/// +/// Each agent session gets its own Redis Stream, keyed by session ID. The stream entries +/// contain text chunks extracted from objects. +/// +/// +public sealed class RedisStreamResponseHandler : IAgentResponseHandler +{ + private const int MaxEmptyReads = 300; // 5 minutes at 1 second intervals + private const int PollIntervalMs = 1000; + + private readonly IConnectionMultiplexer _redis; + private readonly TimeSpan _streamTtl; + + /// + /// Initializes a new instance of the class. + /// + /// The Redis connection multiplexer. + /// The time-to-live for stream entries. Streams will expire after this duration of inactivity. + public RedisStreamResponseHandler(IConnectionMultiplexer redis, TimeSpan streamTtl) + { + this._redis = redis; + this._streamTtl = streamTtl; + } + + /// + public async ValueTask OnStreamingResponseUpdateAsync( + IAsyncEnumerable messageStream, + CancellationToken cancellationToken) + { + // Get the current session ID from the DurableAgentContext + // This is set by the AgentEntity before invoking the response handler + DurableAgentContext? context = DurableAgentContext.Current; + if (context is null) + { + throw new InvalidOperationException( + "DurableAgentContext.Current is not set. This handler must be used within a durable agent context."); + } + + // Get conversation ID from the current thread context, which is only available in the context of + // a durable agent execution. + string conversationId = context.CurrentThread.GetService()?.ConversationId + ?? throw new InvalidOperationException("Unable to determine conversation ID from the current thread."); + string streamKey = GetStreamKey(conversationId); + + IDatabase db = this._redis.GetDatabase(); + int sequenceNumber = 0; + + await foreach (AgentRunResponseUpdate update in messageStream.WithCancellation(cancellationToken)) + { + // Extract just the text content - this avoids serialization round-trip issues + string text = update.Text; + + // Only publish non-empty text chunks + if (!string.IsNullOrEmpty(text)) + { + // Create the stream entry with the text and metadata + NameValueEntry[] entries = + [ + new NameValueEntry("text", text), + new NameValueEntry("sequence", sequenceNumber++), + new NameValueEntry("timestamp", DateTimeOffset.UtcNow.ToUnixTimeMilliseconds()), + ]; + + // Add to the Redis Stream with auto-generated ID (timestamp-based) + await db.StreamAddAsync(streamKey, entries); + + // Refresh the TTL on each write to keep the stream alive during active streaming + await db.KeyExpireAsync(streamKey, this._streamTtl); + } + } + + // Add a sentinel entry to mark the end of the stream + NameValueEntry[] endEntries = + [ + new NameValueEntry("text", ""), + new NameValueEntry("sequence", sequenceNumber), + new NameValueEntry("timestamp", DateTimeOffset.UtcNow.ToUnixTimeMilliseconds()), + new NameValueEntry("done", "true"), + ]; + await db.StreamAddAsync(streamKey, endEntries); + + // Set final TTL - the stream will be cleaned up after this duration + await db.KeyExpireAsync(streamKey, this._streamTtl); + } + + /// + public ValueTask OnAgentResponseAsync(AgentRunResponse message, CancellationToken cancellationToken) + { + // This handler is optimized for streaming responses. + // For non-streaming responses, we don't need to store in Redis since + // the response is returned directly to the caller. + return ValueTask.CompletedTask; + } + + /// + /// Reads chunks from a Redis stream for the given session, yielding them as they become available. + /// + /// The conversation ID to read from. + /// Optional cursor to resume from. If null, reads from the beginning. + /// Cancellation token. + /// An async enumerable of stream chunks. + public async IAsyncEnumerable ReadStreamAsync( + string conversationId, + string? cursor, + [EnumeratorCancellation] CancellationToken cancellationToken) + { + string streamKey = GetStreamKey(conversationId); + + IDatabase db = this._redis.GetDatabase(); + string startId = string.IsNullOrEmpty(cursor) ? "0-0" : cursor; + + int emptyReadCount = 0; + bool hasSeenData = false; + + while (!cancellationToken.IsCancellationRequested) + { + StreamEntry[]? entries = null; + string? errorMessage = null; + + try + { + entries = await db.StreamReadAsync(streamKey, startId, count: 100); + } + catch (Exception ex) + { + errorMessage = ex.Message; + } + + if (errorMessage != null) + { + yield return new StreamChunk(startId, null, false, errorMessage); + yield break; + } + + // entries is guaranteed to be non-null if errorMessage is null + if (entries!.Length == 0) + { + if (!hasSeenData) + { + emptyReadCount++; + if (emptyReadCount >= MaxEmptyReads) + { + yield return new StreamChunk( + startId, + null, + false, + $"Stream not found or timed out after {MaxEmptyReads * PollIntervalMs / 1000} seconds"); + yield break; + } + } + + await Task.Delay(PollIntervalMs, cancellationToken); + continue; + } + + hasSeenData = true; + + foreach (StreamEntry entry in entries) + { + startId = entry.Id.ToString(); + string? text = entry["text"]; + string? done = entry["done"]; + + if (done == "true") + { + yield return new StreamChunk(startId, null, true, null); + yield break; + } + + if (!string.IsNullOrEmpty(text)) + { + yield return new StreamChunk(startId, text, false, null); + } + } + } + } + + /// + /// Gets the Redis Stream key for a given conversation ID. + /// + /// The conversation ID. + /// The Redis Stream key. + internal static string GetStreamKey(string conversationId) => $"agent-stream:{conversationId}"; +} diff --git a/dotnet/samples/AzureFunctions/08_ReliableStreaming/Tools.cs b/dotnet/samples/AzureFunctions/08_ReliableStreaming/Tools.cs new file mode 100644 index 0000000000..fce73bc378 --- /dev/null +++ b/dotnet/samples/AzureFunctions/08_ReliableStreaming/Tools.cs @@ -0,0 +1,161 @@ +// Copyright (c) Microsoft. All rights reserved. + +using System.ComponentModel; + +namespace ReliableStreaming; + +/// +/// Mock travel tools that return hardcoded data for demonstration purposes. +/// In a real application, these would call actual weather and events APIs. +/// +internal static class TravelTools +{ + /// + /// Gets a weather forecast for a destination on a specific date. + /// Returns mock weather data for demonstration purposes. + /// + /// The destination city or location. + /// The date for the forecast (e.g., "2025-01-15" or "next Monday"). + /// A weather forecast summary. + [Description("Gets the weather forecast for a destination on a specific date. Use this to provide weather-aware recommendations in the itinerary.")] + public static string GetWeatherForecast(string destination, string date) + { + // Mock weather data based on destination for realistic responses + Dictionary weatherByRegion = new(StringComparer.OrdinalIgnoreCase) + { + ["Tokyo"] = ("Partly cloudy with a chance of light rain", 58, 45), + ["Paris"] = ("Overcast with occasional drizzle", 52, 41), + ["New York"] = ("Clear and cold", 42, 28), + ["London"] = ("Foggy morning, clearing in afternoon", 48, 38), + ["Sydney"] = ("Sunny and warm", 82, 68), + ["Rome"] = ("Sunny with light breeze", 62, 48), + ["Barcelona"] = ("Partly sunny", 59, 47), + ["Amsterdam"] = ("Cloudy with light rain", 46, 38), + ["Dubai"] = ("Sunny and hot", 85, 72), + ["Singapore"] = ("Tropical thunderstorms in afternoon", 88, 77), + ["Bangkok"] = ("Hot and humid, afternoon showers", 91, 78), + ["Los Angeles"] = ("Sunny and pleasant", 72, 55), + ["San Francisco"] = ("Morning fog, afternoon sun", 62, 52), + ["Seattle"] = ("Rainy with breaks", 48, 40), + ["Miami"] = ("Warm and sunny", 78, 65), + ["Honolulu"] = ("Tropical paradise weather", 82, 72), + }; + + // Find a matching destination or use a default + (string condition, int highF, int lowF) forecast = ("Partly cloudy", 65, 50); + foreach (KeyValuePair entry in weatherByRegion) + { + if (destination.Contains(entry.Key, StringComparison.OrdinalIgnoreCase)) + { + forecast = entry.Value; + break; + } + } + + return $""" + Weather forecast for {destination} on {date}: + Conditions: {forecast.condition} + High: {forecast.highF}°F ({(forecast.highF - 32) * 5 / 9}°C) + Low: {forecast.lowF}°F ({(forecast.lowF - 32) * 5 / 9}°C) + + Recommendation: {GetWeatherRecommendation(forecast.condition)} + """; + } + + /// + /// Gets local events happening at a destination around a specific date. + /// Returns mock event data for demonstration purposes. + /// + /// The destination city or location. + /// The date to search for events (e.g., "2025-01-15" or "next week"). + /// A list of local events and activities. + [Description("Gets local events and activities happening at a destination around a specific date. Use this to suggest timely activities and experiences.")] + public static string GetLocalEvents(string destination, string date) + { + // Mock events data based on destination + Dictionary eventsByCity = new(StringComparer.OrdinalIgnoreCase) + { + ["Tokyo"] = [ + "🎭 Kabuki Theater Performance at Kabukiza Theatre - Traditional Japanese drama", + "🌸 Winter Illuminations at Yoyogi Park - Spectacular light displays", + "🍜 Ramen Festival at Tokyo Station - Sample ramen from across Japan", + "🎮 Gaming Expo at Tokyo Big Sight - Latest video games and technology", + ], + ["Paris"] = [ + "🎨 Impressionist Exhibition at Musée d'Orsay - Extended evening hours", + "🍷 Wine Tasting Tour in Le Marais - Local sommelier guided", + "🎵 Jazz Night at Le Caveau de la Huchette - Historic jazz club", + "🥐 French Pastry Workshop - Learn from master pâtissiers", + ], + ["New York"] = [ + "🎭 Broadway Show: Hamilton - Limited engagement performances", + "🏀 Knicks vs Lakers at Madison Square Garden", + "🎨 Modern Art Exhibit at MoMA - New installations", + "🍕 Pizza Walking Tour of Brooklyn - Artisan pizzerias", + ], + ["London"] = [ + "👑 Royal Collection Exhibition at Buckingham Palace", + "🎭 West End Musical: The Phantom of the Opera", + "🍺 Craft Beer Festival at Brick Lane", + "🎪 Winter Wonderland at Hyde Park - Rides and markets", + ], + ["Sydney"] = [ + "🏄 Pro Surfing Competition at Bondi Beach", + "🎵 Opera at Sydney Opera House - La Bohème", + "🦘 Wildlife Night Safari at Taronga Zoo", + "🍽️ Harbor Dinner Cruise with fireworks", + ], + ["Rome"] = [ + "🏛️ After-Hours Vatican Tour - Skip the crowds", + "🍝 Pasta Making Class in Trastevere", + "🎵 Classical Concert at Borghese Gallery", + "🍷 Wine Tasting in Roman Cellars", + ], + }; + + // Find events for the destination or use generic events + string[] events = [ + "🎭 Local theater performance", + "🍽️ Food and wine festival", + "🎨 Art gallery opening", + "🎵 Live music at local venues", + ]; + + foreach (KeyValuePair entry in eventsByCity) + { + if (destination.Contains(entry.Key, StringComparison.OrdinalIgnoreCase)) + { + events = entry.Value; + break; + } + } + + string eventList = string.Join("\n• ", events); + return $""" + Local events in {destination} around {date}: + + • {eventList} + + 💡 Tip: Book popular events in advance as they may sell out quickly! + """; + } + + private static string GetWeatherRecommendation(string condition) + { + // Use case-insensitive comparison instead of ToLowerInvariant() to satisfy CA1308 + return condition switch + { + string c when c.Contains("rain", StringComparison.OrdinalIgnoreCase) || c.Contains("drizzle", StringComparison.OrdinalIgnoreCase) => + "Bring an umbrella and waterproof jacket. Consider indoor activities for backup.", + string c when c.Contains("fog", StringComparison.OrdinalIgnoreCase) => + "Morning visibility may be limited. Plan outdoor sightseeing for afternoon.", + string c when c.Contains("cold", StringComparison.OrdinalIgnoreCase) => + "Layer up with warm clothing. Hot drinks and cozy cafés recommended.", + string c when c.Contains("hot", StringComparison.OrdinalIgnoreCase) || c.Contains("warm", StringComparison.OrdinalIgnoreCase) => + "Stay hydrated and use sunscreen. Plan strenuous activities for cooler morning hours.", + string c when c.Contains("thunder", StringComparison.OrdinalIgnoreCase) || c.Contains("storm", StringComparison.OrdinalIgnoreCase) => + "Keep an eye on weather updates. Have indoor alternatives ready.", + _ => "Pleasant conditions expected. Great day for outdoor exploration!" + }; + } +} diff --git a/dotnet/samples/AzureFunctions/08_ReliableStreaming/host.json b/dotnet/samples/AzureFunctions/08_ReliableStreaming/host.json new file mode 100644 index 0000000000..4247b37c97 --- /dev/null +++ b/dotnet/samples/AzureFunctions/08_ReliableStreaming/host.json @@ -0,0 +1,21 @@ +{ + "version": "2.0", + "logging": { + "logLevel": { + "Microsoft.Agents.AI.DurableTask": "Information", + "Microsoft.Agents.AI.Hosting.AzureFunctions": "Information", + "DurableTask": "Information", + "Microsoft.DurableTask": "Information", + "ReliableStreaming": "Information" + } + }, + "extensions": { + "durableTask": { + "hubName": "default", + "storageProvider": { + "type": "AzureManaged", + "connectionStringName": "DURABLE_TASK_SCHEDULER_CONNECTION_STRING" + } + } + } +} diff --git a/dotnet/samples/AzureFunctions/08_ReliableStreaming/local.settings.json b/dotnet/samples/AzureFunctions/08_ReliableStreaming/local.settings.json new file mode 100644 index 0000000000..5dfdb17999 --- /dev/null +++ b/dotnet/samples/AzureFunctions/08_ReliableStreaming/local.settings.json @@ -0,0 +1,12 @@ +{ + "IsEncrypted": false, + "Values": { + "FUNCTIONS_WORKER_RUNTIME": "dotnet-isolated", + "AzureWebJobsStorage": "UseDevelopmentStorage=true", + "DURABLE_TASK_SCHEDULER_CONNECTION_STRING": "Endpoint=http://localhost:8080;TaskHub=default;Authentication=None", + "AZURE_OPENAI_ENDPOINT": "", + "AZURE_OPENAI_DEPLOYMENT": "", + "REDIS_CONNECTION_STRING": "localhost:6379", + "REDIS_STREAM_TTL_MINUTES": "10" + } +} diff --git a/dotnet/samples/AzureFunctions/README.md b/dotnet/samples/AzureFunctions/README.md index e60b0f662e..2545712ea4 100644 --- a/dotnet/samples/AzureFunctions/README.md +++ b/dotnet/samples/AzureFunctions/README.md @@ -9,6 +9,7 @@ This directory contains samples for Azure Functions. - **[05_AgentOrchestration_HITL](05_AgentOrchestration_HITL)**: A sample that demonstrates how to implement a human-in-the-loop workflow using durable orchestration, including external event handling for human approval. - **[06_LongRunningTools](06_LongRunningTools)**: A sample that demonstrates how agents can start and interact with durable orchestrations from tool calls to enable long-running tool scenarios. - **[07_AgentAsMcpTool](07_AgentAsMcpTool)**: A sample that demonstrates how to configure durable AI agents to be accessible as Model Context Protocol (MCP) tools. +- **[08_ReliableStreaming](08_ReliableStreaming)**: A sample that demonstrates how to implement reliable streaming for durable agents using Redis Streams, enabling clients to disconnect and reconnect without losing messages. ## Running the Samples diff --git a/dotnet/samples/GettingStarted/AGUI/Step04_HumanInLoop/Client/ServerFunctionApprovalClientAgent.cs b/dotnet/samples/GettingStarted/AGUI/Step04_HumanInLoop/Client/ServerFunctionApprovalClientAgent.cs index 41538085db..9f7812cc50 100644 --- a/dotnet/samples/GettingStarted/AGUI/Step04_HumanInLoop/Client/ServerFunctionApprovalClientAgent.cs +++ b/dotnet/samples/GettingStarted/AGUI/Step04_HumanInLoop/Client/ServerFunctionApprovalClientAgent.cs @@ -22,17 +22,17 @@ public ServerFunctionApprovalClientAgent(AIAgent innerAgent, JsonSerializerOptio this._jsonSerializerOptions = jsonSerializerOptions; } - public override Task RunAsync( + protected override Task RunCoreAsync( IEnumerable messages, AgentThread? thread = null, AgentRunOptions? options = null, CancellationToken cancellationToken = default) { - return this.RunStreamingAsync(messages, thread, options, cancellationToken) + return this.RunCoreStreamingAsync(messages, thread, options, cancellationToken) .ToAgentRunResponseAsync(cancellationToken); } - public override async IAsyncEnumerable RunStreamingAsync( + protected override async IAsyncEnumerable RunCoreStreamingAsync( IEnumerable messages, AgentThread? thread = null, AgentRunOptions? options = null, diff --git a/dotnet/samples/GettingStarted/AGUI/Step04_HumanInLoop/Server/ServerFunctionApprovalServerAgent.cs b/dotnet/samples/GettingStarted/AGUI/Step04_HumanInLoop/Server/ServerFunctionApprovalServerAgent.cs index f515e97531..69e3db58c7 100644 --- a/dotnet/samples/GettingStarted/AGUI/Step04_HumanInLoop/Server/ServerFunctionApprovalServerAgent.cs +++ b/dotnet/samples/GettingStarted/AGUI/Step04_HumanInLoop/Server/ServerFunctionApprovalServerAgent.cs @@ -22,17 +22,17 @@ public ServerFunctionApprovalAgent(AIAgent innerAgent, JsonSerializerOptions jso this._jsonSerializerOptions = jsonSerializerOptions; } - public override Task RunAsync( + protected override Task RunCoreAsync( IEnumerable messages, AgentThread? thread = null, AgentRunOptions? options = null, CancellationToken cancellationToken = default) { - return this.RunStreamingAsync(messages, thread, options, cancellationToken) + return this.RunCoreStreamingAsync(messages, thread, options, cancellationToken) .ToAgentRunResponseAsync(cancellationToken); } - public override async IAsyncEnumerable RunStreamingAsync( + protected override async IAsyncEnumerable RunCoreStreamingAsync( IEnumerable messages, AgentThread? thread = null, AgentRunOptions? options = null, diff --git a/dotnet/samples/GettingStarted/AGUI/Step05_StateManagement/Client/StatefulAgent.cs b/dotnet/samples/GettingStarted/AGUI/Step05_StateManagement/Client/StatefulAgent.cs index 8321efaa73..d5fd9f187b 100644 --- a/dotnet/samples/GettingStarted/AGUI/Step05_StateManagement/Client/StatefulAgent.cs +++ b/dotnet/samples/GettingStarted/AGUI/Step05_StateManagement/Client/StatefulAgent.cs @@ -35,18 +35,18 @@ public StatefulAgent(AIAgent innerAgent, JsonSerializerOptions jsonSerializerOpt } /// - public override Task RunAsync( + protected override Task RunCoreAsync( IEnumerable messages, AgentThread? thread = null, AgentRunOptions? options = null, CancellationToken cancellationToken = default) { - return this.RunStreamingAsync(messages, thread, options, cancellationToken) + return this.RunCoreStreamingAsync(messages, thread, options, cancellationToken) .ToAgentRunResponseAsync(cancellationToken); } /// - public override async IAsyncEnumerable RunStreamingAsync( + protected override async IAsyncEnumerable RunCoreStreamingAsync( IEnumerable messages, AgentThread? thread = null, AgentRunOptions? options = null, diff --git a/dotnet/samples/GettingStarted/AGUI/Step05_StateManagement/Server/SharedStateAgent.cs b/dotnet/samples/GettingStarted/AGUI/Step05_StateManagement/Server/SharedStateAgent.cs index 4588c7bd60..603698b579 100644 --- a/dotnet/samples/GettingStarted/AGUI/Step05_StateManagement/Server/SharedStateAgent.cs +++ b/dotnet/samples/GettingStarted/AGUI/Step05_StateManagement/Server/SharedStateAgent.cs @@ -17,17 +17,17 @@ public SharedStateAgent(AIAgent innerAgent, JsonSerializerOptions jsonSerializer this._jsonSerializerOptions = jsonSerializerOptions; } - public override Task RunAsync( + protected override Task RunCoreAsync( IEnumerable messages, AgentThread? thread = null, AgentRunOptions? options = null, CancellationToken cancellationToken = default) { - return this.RunStreamingAsync(messages, thread, options, cancellationToken) + return this.RunCoreStreamingAsync(messages, thread, options, cancellationToken) .ToAgentRunResponseAsync(cancellationToken); } - public override async IAsyncEnumerable RunStreamingAsync( + protected override async IAsyncEnumerable RunCoreStreamingAsync( IEnumerable messages, AgentThread? thread = null, AgentRunOptions? options = null, diff --git a/dotnet/samples/GettingStarted/AgentProviders/Agent_With_CustomImplementation/Program.cs b/dotnet/samples/GettingStarted/AgentProviders/Agent_With_CustomImplementation/Program.cs index 8f1039251d..a4e588f347 100644 --- a/dotnet/samples/GettingStarted/AgentProviders/Agent_With_CustomImplementation/Program.cs +++ b/dotnet/samples/GettingStarted/AgentProviders/Agent_With_CustomImplementation/Program.cs @@ -34,7 +34,7 @@ public override AgentThread GetNewThread() public override AgentThread DeserializeThread(JsonElement serializedThread, JsonSerializerOptions? jsonSerializerOptions = null) => new CustomAgentThread(serializedThread, jsonSerializerOptions); - public override async Task RunAsync(IEnumerable messages, AgentThread? thread = null, AgentRunOptions? options = null, CancellationToken cancellationToken = default) + protected override async Task RunCoreAsync(IEnumerable messages, AgentThread? thread = null, AgentRunOptions? options = null, CancellationToken cancellationToken = default) { // Create a thread if the user didn't supply one. thread ??= this.GetNewThread(); @@ -45,7 +45,7 @@ public override async Task RunAsync(IEnumerable m } // Clone the input messages and turn them into response messages with upper case text. - List responseMessages = CloneAndToUpperCase(messages, this.DisplayName).ToList(); + List responseMessages = CloneAndToUpperCase(messages, this.Name).ToList(); // Notify the thread of the input and output messages. await typedThread.MessageStore.AddMessagesAsync(messages.Concat(responseMessages), cancellationToken); @@ -58,7 +58,7 @@ public override async Task RunAsync(IEnumerable m }; } - public override async IAsyncEnumerable RunStreamingAsync(IEnumerable messages, AgentThread? thread = null, AgentRunOptions? options = null, [EnumeratorCancellation] CancellationToken cancellationToken = default) + protected override async IAsyncEnumerable RunCoreStreamingAsync(IEnumerable messages, AgentThread? thread = null, AgentRunOptions? options = null, [EnumeratorCancellation] CancellationToken cancellationToken = default) { // Create a thread if the user didn't supply one. thread ??= this.GetNewThread(); @@ -69,7 +69,7 @@ public override async IAsyncEnumerable RunStreamingAsync } // Clone the input messages and turn them into response messages with upper case text. - List responseMessages = CloneAndToUpperCase(messages, this.DisplayName).ToList(); + List responseMessages = CloneAndToUpperCase(messages, this.Name).ToList(); // Notify the thread of the input and output messages. await typedThread.MessageStore.AddMessagesAsync(messages.Concat(responseMessages), cancellationToken); @@ -79,7 +79,7 @@ public override async IAsyncEnumerable RunStreamingAsync yield return new AgentRunResponseUpdate { AgentId = this.Id, - AuthorName = this.DisplayName, + AuthorName = message.AuthorName, Role = ChatRole.Assistant, Contents = message.Contents, ResponseId = Guid.NewGuid().ToString("N"), @@ -88,7 +88,7 @@ public override async IAsyncEnumerable RunStreamingAsync } } - private static IEnumerable CloneAndToUpperCase(IEnumerable messages, string agentName) => messages.Select(x => + private static IEnumerable CloneAndToUpperCase(IEnumerable messages, string? agentName) => messages.Select(x => { // Clone the message and update its author to be the agent. var messageClone = x.Clone(); diff --git a/dotnet/samples/GettingStarted/AgentWithOpenAI/Agent_OpenAI_Step03_CreateFromChatClient/OpenAIChatClientAgent.cs b/dotnet/samples/GettingStarted/AgentWithOpenAI/Agent_OpenAI_Step03_CreateFromChatClient/OpenAIChatClientAgent.cs index b295bfecea..a0b59d1053 100644 --- a/dotnet/samples/GettingStarted/AgentWithOpenAI/Agent_OpenAI_Step03_CreateFromChatClient/OpenAIChatClientAgent.cs +++ b/dotnet/samples/GettingStarted/AgentWithOpenAI/Agent_OpenAI_Step03_CreateFromChatClient/OpenAIChatClientAgent.cs @@ -87,10 +87,10 @@ public virtual IAsyncEnumerable RunStreamingAsync } /// - public sealed override Task RunAsync(IEnumerable messages, AgentThread? thread = null, AgentRunOptions? options = null, CancellationToken cancellationToken = default) => - base.RunAsync(messages, thread, options, cancellationToken); + protected sealed override Task RunCoreAsync(IEnumerable messages, AgentThread? thread = null, AgentRunOptions? options = null, CancellationToken cancellationToken = default) => + base.RunCoreAsync(messages, thread, options, cancellationToken); /// - public override IAsyncEnumerable RunStreamingAsync(IEnumerable messages, AgentThread? thread = null, AgentRunOptions? options = null, CancellationToken cancellationToken = default) => - base.RunStreamingAsync(messages, thread, options, cancellationToken); + protected override IAsyncEnumerable RunCoreStreamingAsync(IEnumerable messages, AgentThread? thread = null, AgentRunOptions? options = null, CancellationToken cancellationToken = default) => + base.RunCoreStreamingAsync(messages, thread, options, cancellationToken); } diff --git a/dotnet/samples/GettingStarted/AgentWithOpenAI/Agent_OpenAI_Step04_CreateFromOpenAIResponseClient/OpenAIResponseClientAgent.cs b/dotnet/samples/GettingStarted/AgentWithOpenAI/Agent_OpenAI_Step04_CreateFromOpenAIResponseClient/OpenAIResponseClientAgent.cs index 622223307c..f894a5434c 100644 --- a/dotnet/samples/GettingStarted/AgentWithOpenAI/Agent_OpenAI_Step04_CreateFromOpenAIResponseClient/OpenAIResponseClientAgent.cs +++ b/dotnet/samples/GettingStarted/AgentWithOpenAI/Agent_OpenAI_Step04_CreateFromOpenAIResponseClient/OpenAIResponseClientAgent.cs @@ -105,10 +105,10 @@ public virtual async IAsyncEnumerable RunStreamingAsync } /// - public sealed override Task RunAsync(IEnumerable messages, AgentThread? thread = null, AgentRunOptions? options = null, CancellationToken cancellationToken = default) => - base.RunAsync(messages, thread, options, cancellationToken); + protected sealed override Task RunCoreAsync(IEnumerable messages, AgentThread? thread = null, AgentRunOptions? options = null, CancellationToken cancellationToken = default) => + base.RunCoreAsync(messages, thread, options, cancellationToken); /// - public sealed override IAsyncEnumerable RunStreamingAsync(IEnumerable messages, AgentThread? thread = null, AgentRunOptions? options = null, CancellationToken cancellationToken = default) => - base.RunStreamingAsync(messages, thread, options, cancellationToken); + protected sealed override IAsyncEnumerable RunCoreStreamingAsync(IEnumerable messages, AgentThread? thread = null, AgentRunOptions? options = null, CancellationToken cancellationToken = default) => + base.RunCoreStreamingAsync(messages, thread, options, cancellationToken); } diff --git a/dotnet/samples/HostedAgents/AgentWithTextSearchRag/AgentWithTextSearchRag.csproj b/dotnet/samples/HostedAgents/AgentWithTextSearchRag/AgentWithTextSearchRag.csproj index e67846f54c..54791c1992 100644 --- a/dotnet/samples/HostedAgents/AgentWithTextSearchRag/AgentWithTextSearchRag.csproj +++ b/dotnet/samples/HostedAgents/AgentWithTextSearchRag/AgentWithTextSearchRag.csproj @@ -35,7 +35,7 @@ - + diff --git a/dotnet/samples/HostedAgents/AgentsInWorkflows/AgentsInWorkflows.csproj b/dotnet/samples/HostedAgents/AgentsInWorkflows/AgentsInWorkflows.csproj index a865f43be5..ad23b11b17 100644 --- a/dotnet/samples/HostedAgents/AgentsInWorkflows/AgentsInWorkflows.csproj +++ b/dotnet/samples/HostedAgents/AgentsInWorkflows/AgentsInWorkflows.csproj @@ -35,7 +35,7 @@ - + diff --git a/dotnet/samples/M365Agent/Agents/WeatherForecastAgent.cs b/dotnet/samples/M365Agent/Agents/WeatherForecastAgent.cs index 740b959a7a..ff7af20ba9 100644 --- a/dotnet/samples/M365Agent/Agents/WeatherForecastAgent.cs +++ b/dotnet/samples/M365Agent/Agents/WeatherForecastAgent.cs @@ -48,9 +48,9 @@ public WeatherForecastAgent(IChatClient chatClient) { } - public override async Task RunAsync(IEnumerable messages, AgentThread? thread = null, AgentRunOptions? options = null, CancellationToken cancellationToken = default) + protected override async Task RunCoreAsync(IEnumerable messages, AgentThread? thread = null, AgentRunOptions? options = null, CancellationToken cancellationToken = default) { - var response = await base.RunAsync(messages, thread, options, cancellationToken); + var response = await base.RunCoreAsync(messages, thread, options, cancellationToken); // If the agent returned a valid structured output response // we might be able to enhance the response with an adaptive card. diff --git a/dotnet/src/Microsoft.Agents.AI.A2A/A2AAgent.cs b/dotnet/src/Microsoft.Agents.AI.A2A/A2AAgent.cs index e804fbb389..cf88a89177 100644 --- a/dotnet/src/Microsoft.Agents.AI.A2A/A2AAgent.cs +++ b/dotnet/src/Microsoft.Agents.AI.A2A/A2AAgent.cs @@ -30,7 +30,6 @@ internal sealed class A2AAgent : AIAgent private readonly string? _id; private readonly string? _name; private readonly string? _description; - private readonly string? _displayName; private readonly ILogger _logger; /// @@ -40,9 +39,8 @@ internal sealed class A2AAgent : AIAgent /// The unique identifier for the agent. /// The the name of the agent. /// The description of the agent. - /// The display name of the agent. /// Optional logger factory to use for logging. - public A2AAgent(A2AClient a2aClient, string? id = null, string? name = null, string? description = null, string? displayName = null, ILoggerFactory? loggerFactory = null) + public A2AAgent(A2AClient a2aClient, string? id = null, string? name = null, string? description = null, ILoggerFactory? loggerFactory = null) { _ = Throw.IfNull(a2aClient); @@ -50,7 +48,6 @@ public A2AAgent(A2AClient a2aClient, string? id = null, string? name = null, str this._id = id; this._name = name; this._description = description; - this._displayName = displayName; this._logger = (loggerFactory ?? NullLoggerFactory.Instance).CreateLogger(); } @@ -71,7 +68,7 @@ public override AgentThread DeserializeThread(JsonElement serializedThread, Json => new A2AAgentThread(serializedThread, jsonSerializerOptions); /// - public override async Task RunAsync(IEnumerable messages, AgentThread? thread = null, AgentRunOptions? options = null, CancellationToken cancellationToken = default) + protected override async Task RunCoreAsync(IEnumerable messages, AgentThread? thread = null, AgentRunOptions? options = null, CancellationToken cancellationToken = default) { _ = Throw.IfNull(messages); @@ -134,7 +131,7 @@ public override async Task RunAsync(IEnumerable m } /// - public override async IAsyncEnumerable RunStreamingAsync(IEnumerable messages, AgentThread? thread = null, AgentRunOptions? options = null, [EnumeratorCancellation] CancellationToken cancellationToken = default) + protected override async IAsyncEnumerable RunCoreStreamingAsync(IEnumerable messages, AgentThread? thread = null, AgentRunOptions? options = null, [EnumeratorCancellation] CancellationToken cancellationToken = default) { _ = Throw.IfNull(messages); @@ -203,9 +200,6 @@ public override async IAsyncEnumerable RunStreamingAsync /// public override string? Name => this._name ?? base.Name; - /// - public override string DisplayName => this._displayName ?? base.DisplayName; - /// public override string? Description => this._description ?? base.Description; diff --git a/dotnet/src/Microsoft.Agents.AI.A2A/Extensions/A2AClientExtensions.cs b/dotnet/src/Microsoft.Agents.AI.A2A/Extensions/A2AClientExtensions.cs index 095481c0d4..d57ed4cb42 100644 --- a/dotnet/src/Microsoft.Agents.AI.A2A/Extensions/A2AClientExtensions.cs +++ b/dotnet/src/Microsoft.Agents.AI.A2A/Extensions/A2AClientExtensions.cs @@ -33,9 +33,8 @@ public static class A2AClientExtensions /// The unique identifier for the agent. /// The the name of the agent. /// The description of the agent. - /// The display name of the agent. /// Optional logger factory for enabling logging within the agent. /// An instance backed by the A2A agent. - public static AIAgent GetAIAgent(this A2AClient client, string? id = null, string? name = null, string? description = null, string? displayName = null, ILoggerFactory? loggerFactory = null) => - new A2AAgent(client, id, name, description, displayName, loggerFactory); + public static AIAgent GetAIAgent(this A2AClient client, string? id = null, string? name = null, string? description = null, ILoggerFactory? loggerFactory = null) => + new A2AAgent(client, id, name, description, loggerFactory); } diff --git a/dotnet/src/Microsoft.Agents.AI.Abstractions/AIAgent.cs b/dotnet/src/Microsoft.Agents.AI.Abstractions/AIAgent.cs index 4cff385dcc..afed5d1518 100644 --- a/dotnet/src/Microsoft.Agents.AI.Abstractions/AIAgent.cs +++ b/dotnet/src/Microsoft.Agents.AI.Abstractions/AIAgent.cs @@ -60,18 +60,6 @@ public abstract class AIAgent /// public virtual string? Name { get; } - /// - /// Gets a display-friendly name for the agent. - /// - /// - /// The agent's if available, otherwise the . - /// - /// - /// This property provides a guaranteed non-null string suitable for display in user interfaces, - /// logs, or other contexts where a readable identifier is needed. - /// - public virtual string DisplayName => this.Name ?? this.Id; - /// /// Gets a description of the agent's purpose, capabilities, or behavior. /// @@ -230,6 +218,35 @@ public Task RunAsync( /// A task that represents the asynchronous operation. The task result contains an with the agent's output. /// /// + /// This method delegates to to perform the actual agent invocation. It handles collections of messages, + /// allowing for complex conversational scenarios including multi-turn interactions, function calls, and + /// context-rich conversations. + /// + /// + /// The messages are processed in the order provided and become part of the conversation history. + /// The agent's response will also be added to if one is provided. + /// + /// + public Task RunAsync( + IEnumerable messages, + AgentThread? thread = null, + AgentRunOptions? options = null, + CancellationToken cancellationToken = default) => + this.RunCoreAsync(messages, thread, options, cancellationToken); + + /// + /// Core implementation of the agent invocation logic with a collection of chat messages. + /// + /// The collection of messages to send to the agent for processing. + /// + /// The conversation thread to use for this invocation. If , a new thread will be created. + /// The thread will be updated with the input messages and any response messages generated during invocation. + /// + /// Optional configuration parameters for controlling the agent's invocation behavior. + /// The to monitor for cancellation requests. The default is . + /// A task that represents the asynchronous operation. The task result contains an with the agent's output. + /// + /// /// This is the primary invocation method that implementations must override. It handles collections of messages, /// allowing for complex conversational scenarios including multi-turn interactions, function calls, and /// context-rich conversations. @@ -239,7 +256,7 @@ public Task RunAsync( /// The agent's response will also be added to if one is provided. /// /// - public abstract Task RunAsync( + protected abstract Task RunCoreAsync( IEnumerable messages, AgentThread? thread = null, AgentRunOptions? options = null, @@ -324,6 +341,34 @@ public IAsyncEnumerable RunStreamingAsync( /// An asynchronous enumerable of instances representing the streaming response. /// /// + /// This method delegates to to perform the actual streaming invocation. It provides real-time + /// updates as the agent processes the input and generates its response, enabling more responsive user experiences. + /// + /// + /// Each represents a portion of the complete response, allowing consumers + /// to display partial results, implement progressive loading, or provide immediate feedback to users. + /// + /// + public IAsyncEnumerable RunStreamingAsync( + IEnumerable messages, + AgentThread? thread = null, + AgentRunOptions? options = null, + CancellationToken cancellationToken = default) => + this.RunCoreStreamingAsync(messages, thread, options, cancellationToken); + + /// + /// Core implementation of the agent streaming invocation logic with a collection of chat messages. + /// + /// The collection of messages to send to the agent for processing. + /// + /// The conversation thread to use for this invocation. If , a new thread will be created. + /// The thread will be updated with the input messages and any response updates generated during invocation. + /// + /// Optional configuration parameters for controlling the agent's invocation behavior. + /// The to monitor for cancellation requests. The default is . + /// An asynchronous enumerable of instances representing the streaming response. + /// + /// /// This is the primary streaming invocation method that implementations must override. It provides real-time /// updates as the agent processes the input and generates its response, enabling more responsive user experiences. /// @@ -332,7 +377,7 @@ public IAsyncEnumerable RunStreamingAsync( /// to display partial results, implement progressive loading, or provide immediate feedback to users. /// /// - public abstract IAsyncEnumerable RunStreamingAsync( + protected abstract IAsyncEnumerable RunCoreStreamingAsync( IEnumerable messages, AgentThread? thread = null, AgentRunOptions? options = null, diff --git a/dotnet/src/Microsoft.Agents.AI.Abstractions/DelegatingAIAgent.cs b/dotnet/src/Microsoft.Agents.AI.Abstractions/DelegatingAIAgent.cs index 4c0ff1a36d..e7bf58f39f 100644 --- a/dotnet/src/Microsoft.Agents.AI.Abstractions/DelegatingAIAgent.cs +++ b/dotnet/src/Microsoft.Agents.AI.Abstractions/DelegatingAIAgent.cs @@ -81,7 +81,7 @@ public override AgentThread DeserializeThread(JsonElement serializedThread, Json => this.InnerAgent.DeserializeThread(serializedThread, jsonSerializerOptions); /// - public override Task RunAsync( + protected override Task RunCoreAsync( IEnumerable messages, AgentThread? thread = null, AgentRunOptions? options = null, @@ -89,7 +89,7 @@ public override Task RunAsync( => this.InnerAgent.RunAsync(messages, thread, options, cancellationToken); /// - public override IAsyncEnumerable RunStreamingAsync( + protected override IAsyncEnumerable RunCoreStreamingAsync( IEnumerable messages, AgentThread? thread = null, AgentRunOptions? options = null, diff --git a/dotnet/src/Microsoft.Agents.AI.CopilotStudio/CopilotStudioAgent.cs b/dotnet/src/Microsoft.Agents.AI.CopilotStudio/CopilotStudioAgent.cs index 6ca2f38d3d..203bab21ed 100644 --- a/dotnet/src/Microsoft.Agents.AI.CopilotStudio/CopilotStudioAgent.cs +++ b/dotnet/src/Microsoft.Agents.AI.CopilotStudio/CopilotStudioAgent.cs @@ -58,7 +58,7 @@ public override AgentThread DeserializeThread(JsonElement serializedThread, Json => new CopilotStudioAgentThread(serializedThread, jsonSerializerOptions); /// - public override async Task RunAsync( + protected override async Task RunCoreAsync( IEnumerable messages, AgentThread? thread = null, AgentRunOptions? options = null, @@ -96,7 +96,7 @@ public override async Task RunAsync( } /// - public override async IAsyncEnumerable RunStreamingAsync( + protected override async IAsyncEnumerable RunCoreStreamingAsync( IEnumerable messages, AgentThread? thread = null, AgentRunOptions? options = null, diff --git a/dotnet/src/Microsoft.Agents.AI.DevUI/EntitiesApiExtensions.cs b/dotnet/src/Microsoft.Agents.AI.DevUI/EntitiesApiExtensions.cs index 3271b40853..8dcc46b53c 100644 --- a/dotnet/src/Microsoft.Agents.AI.DevUI/EntitiesApiExtensions.cs +++ b/dotnet/src/Microsoft.Agents.AI.DevUI/EntitiesApiExtensions.cs @@ -231,7 +231,7 @@ private static EntityInfo CreateAgentEntityInfo(AIAgent agent) return new EntityInfo( Id: entityId, Type: "agent", - Name: agent.DisplayName, + Name: agent.Name ?? agent.Id, Description: agent.Description, Framework: "agent_framework", Tools: tools, diff --git a/dotnet/src/Microsoft.Agents.AI.DurableTask/DurableAIAgent.cs b/dotnet/src/Microsoft.Agents.AI.DurableTask/DurableAIAgent.cs index 2035b792fd..d841a80ddd 100644 --- a/dotnet/src/Microsoft.Agents.AI.DurableTask/DurableAIAgent.cs +++ b/dotnet/src/Microsoft.Agents.AI.DurableTask/DurableAIAgent.cs @@ -63,7 +63,7 @@ public override AgentThread DeserializeThread( /// Thrown when the agent has not been registered. /// Thrown when the provided thread is not valid for a durable agent. /// Thrown when cancellation is requested (cancellation is not supported for durable agents). - public override async Task RunAsync( + protected override async Task RunCoreAsync( IEnumerable messages, AgentThread? thread = null, AgentRunOptions? options = null, @@ -128,7 +128,7 @@ public override async Task RunAsync( /// Optional run options. /// The cancellation token. /// A streaming response enumerable. - public override async IAsyncEnumerable RunStreamingAsync( + protected override async IAsyncEnumerable RunCoreStreamingAsync( IEnumerable messages, AgentThread? thread = null, AgentRunOptions? options = null, diff --git a/dotnet/src/Microsoft.Agents.AI.DurableTask/DurableAIAgentProxy.cs b/dotnet/src/Microsoft.Agents.AI.DurableTask/DurableAIAgentProxy.cs index 58f9598a7e..ecff2d5c90 100644 --- a/dotnet/src/Microsoft.Agents.AI.DurableTask/DurableAIAgentProxy.cs +++ b/dotnet/src/Microsoft.Agents.AI.DurableTask/DurableAIAgentProxy.cs @@ -23,7 +23,7 @@ public override AgentThread GetNewThread() return new DurableAgentThread(AgentSessionId.WithRandomKey(this.Name!)); } - public override async Task RunAsync( + protected override async Task RunCoreAsync( IEnumerable messages, AgentThread? thread = null, AgentRunOptions? options = null, @@ -70,7 +70,7 @@ public override async Task RunAsync( return await agentRunHandle.ReadAgentResponseAsync(cancellationToken); } - public override IAsyncEnumerable RunStreamingAsync( + protected override IAsyncEnumerable RunCoreStreamingAsync( IEnumerable messages, AgentThread? thread = null, AgentRunOptions? options = null, diff --git a/dotnet/src/Microsoft.Agents.AI.DurableTask/EntityAgentWrapper.cs b/dotnet/src/Microsoft.Agents.AI.DurableTask/EntityAgentWrapper.cs index 8822ebcc39..4a6074fcb6 100644 --- a/dotnet/src/Microsoft.Agents.AI.DurableTask/EntityAgentWrapper.cs +++ b/dotnet/src/Microsoft.Agents.AI.DurableTask/EntityAgentWrapper.cs @@ -21,13 +21,13 @@ internal sealed class EntityAgentWrapper( // The ID of the agent is always the entity ID. protected override string? IdCore => this._entityContext.Id.ToString(); - public override async Task RunAsync( + protected override async Task RunCoreAsync( IEnumerable messages, AgentThread? thread = null, AgentRunOptions? options = null, CancellationToken cancellationToken = default) { - AgentRunResponse response = await base.RunAsync( + AgentRunResponse response = await base.RunCoreAsync( messages, thread, this.GetAgentEntityRunOptions(options), @@ -37,13 +37,13 @@ public override async Task RunAsync( return response; } - public override async IAsyncEnumerable RunStreamingAsync( + protected override async IAsyncEnumerable RunCoreStreamingAsync( IEnumerable messages, AgentThread? thread = null, AgentRunOptions? options = null, [EnumeratorCancellation] CancellationToken cancellationToken = default) { - await foreach (AgentRunResponseUpdate update in base.RunStreamingAsync( + await foreach (AgentRunResponseUpdate update in base.RunCoreStreamingAsync( messages, thread, this.GetAgentEntityRunOptions(options), diff --git a/dotnet/src/Microsoft.Agents.AI.Hosting.AzureFunctions/BuiltInFunctionExecutor.cs b/dotnet/src/Microsoft.Agents.AI.Hosting.AzureFunctions/BuiltInFunctionExecutor.cs index 10b1bc54ff..fa0b9ef287 100644 --- a/dotnet/src/Microsoft.Agents.AI.Hosting.AzureFunctions/BuiltInFunctionExecutor.cs +++ b/dotnet/src/Microsoft.Agents.AI.Hosting.AzureFunctions/BuiltInFunctionExecutor.cs @@ -32,7 +32,7 @@ public async ValueTask ExecuteAsync(FunctionContext context) } HttpRequestData? httpRequestData = null; - TaskEntityDispatcher? dispatcher = null; + string? encodedEntityRequest = null; DurableTaskClient? durableTaskClient = null; ToolInvocationContext? mcpToolInvocationContext = null; @@ -43,8 +43,8 @@ public async ValueTask ExecuteAsync(FunctionContext context) case HttpRequestData request: httpRequestData = request; break; - case TaskEntityDispatcher entityDispatcher: - dispatcher = entityDispatcher; + case string entityRequest: + encodedEntityRequest = entityRequest; break; case DurableTaskClient client: durableTaskClient = client; @@ -78,14 +78,14 @@ public async ValueTask ExecuteAsync(FunctionContext context) if (context.FunctionDefinition.EntryPoint == BuiltInFunctions.RunAgentEntityFunctionEntryPoint) { - if (dispatcher is null) + if (encodedEntityRequest is null) { throw new InvalidOperationException($"Task entity dispatcher binding is missing for the invocation {context.InvocationId}."); } - await BuiltInFunctions.InvokeAgentAsync( - dispatcher, + context.GetInvocationResult().Value = await BuiltInFunctions.InvokeAgentAsync( durableTaskClient, + encodedEntityRequest, context); return; } diff --git a/dotnet/src/Microsoft.Agents.AI.Hosting.AzureFunctions/BuiltInFunctions.cs b/dotnet/src/Microsoft.Agents.AI.Hosting.AzureFunctions/BuiltInFunctions.cs index ebd378ac3b..3d824994e9 100644 --- a/dotnet/src/Microsoft.Agents.AI.Hosting.AzureFunctions/BuiltInFunctions.cs +++ b/dotnet/src/Microsoft.Agents.AI.Hosting.AzureFunctions/BuiltInFunctions.cs @@ -7,6 +7,7 @@ using Microsoft.Azure.Functions.Worker.Extensions.Mcp; using Microsoft.Azure.Functions.Worker.Http; using Microsoft.DurableTask.Client; +using Microsoft.DurableTask.Worker.Grpc; using Microsoft.Extensions.AI; using Microsoft.Extensions.DependencyInjection; @@ -22,14 +23,14 @@ internal static class BuiltInFunctions internal static readonly string RunAgentMcpToolFunctionEntryPoint = $"{typeof(BuiltInFunctions).FullName!}.{nameof(RunMcpToolAsync)}"; // Exposed as an entity trigger via AgentFunctionsProvider - public static async Task InvokeAgentAsync( - [EntityTrigger] TaskEntityDispatcher dispatcher, + public static Task InvokeAgentAsync( [DurableClient] DurableTaskClient client, + string encodedEntityRequest, FunctionContext functionContext) { // This should never be null except if the function trigger is misconfigured. - ArgumentNullException.ThrowIfNull(dispatcher); ArgumentNullException.ThrowIfNull(client); + ArgumentNullException.ThrowIfNull(encodedEntityRequest); ArgumentNullException.ThrowIfNull(functionContext); // Create a combined service provider that includes both the existing services @@ -38,7 +39,8 @@ public static async Task InvokeAgentAsync( // This method is the entry point for the agent entity. // It will be invoked by the Azure Functions runtime when the entity is called. - await dispatcher.DispatchAsync(new AgentEntity(combinedServiceProvider, functionContext.CancellationToken)); + AgentEntity entity = new(combinedServiceProvider, functionContext.CancellationToken); + return GrpcEntityRunner.LoadAndRunAsync(encodedEntityRequest, entity, combinedServiceProvider); } public static async Task RunAgentHttpAsync( diff --git a/dotnet/src/Microsoft.Agents.AI.Hosting.AzureFunctions/CHANGELOG.md b/dotnet/src/Microsoft.Agents.AI.Hosting.AzureFunctions/CHANGELOG.md index d32f4bb0e2..a606629dc2 100644 --- a/dotnet/src/Microsoft.Agents.AI.Hosting.AzureFunctions/CHANGELOG.md +++ b/dotnet/src/Microsoft.Agents.AI.Hosting.AzureFunctions/CHANGELOG.md @@ -1,5 +1,9 @@ # Release History +## + +- Addressed incompatibility issue with `Microsoft.Azure.Functions.Worker.Extensions.DurableTask` >= 1.11.0 ([#2759](https://github.com/microsoft/agent-framework/pull/2759)) + ## v1.0.0-preview.251125.1 - Added support for .NET 10 ([#2128](https://github.com/microsoft/agent-framework/pull/2128)) diff --git a/dotnet/src/Microsoft.Agents.AI.Hosting.AzureFunctions/DurableAgentFunctionMetadataTransformer.cs b/dotnet/src/Microsoft.Agents.AI.Hosting.AzureFunctions/DurableAgentFunctionMetadataTransformer.cs index cce8fbd1b0..f626db2a90 100644 --- a/dotnet/src/Microsoft.Agents.AI.Hosting.AzureFunctions/DurableAgentFunctionMetadataTransformer.cs +++ b/dotnet/src/Microsoft.Agents.AI.Hosting.AzureFunctions/DurableAgentFunctionMetadataTransformer.cs @@ -73,7 +73,7 @@ private static DefaultFunctionMetadata CreateAgentTrigger(string name) Language = "dotnet-isolated", RawBindings = [ - """{"name":"dispatcher","type":"entityTrigger","direction":"In"}""", + """{"name":"encodedEntityRequest","type":"entityTrigger","direction":"In"}""", """{"name":"client","type":"durableClient","direction":"In"}""" ], EntryPoint = BuiltInFunctions.RunAgentEntityFunctionEntryPoint, diff --git a/dotnet/src/Microsoft.Agents.AI.Hosting.OpenAI/EndpointRouteBuilderExtensions.ChatCompletions.cs b/dotnet/src/Microsoft.Agents.AI.Hosting.OpenAI/EndpointRouteBuilderExtensions.ChatCompletions.cs index 3fcc9cad27..92c817b124 100644 --- a/dotnet/src/Microsoft.Agents.AI.Hosting.OpenAI/EndpointRouteBuilderExtensions.ChatCompletions.cs +++ b/dotnet/src/Microsoft.Agents.AI.Hosting.OpenAI/EndpointRouteBuilderExtensions.ChatCompletions.cs @@ -61,7 +61,7 @@ public static IEndpointConventionBuilder MapOpenAIChatCompletions( path ??= $"/{agent.Name}/v1/chat/completions"; var group = endpoints.MapGroup(path); - var endpointAgentName = agent.DisplayName; + var endpointAgentName = agent.Name ?? agent.Id; group.MapPost("/", async ([FromBody] CreateChatCompletion request, CancellationToken cancellationToken) => await AIAgentChatCompletionsProcessor.CreateChatCompletionAsync(agent, request, cancellationToken).ConfigureAwait(false)) diff --git a/dotnet/src/Microsoft.Agents.AI.Hosting.OpenAI/EndpointRouteBuilderExtensions.Responses.cs b/dotnet/src/Microsoft.Agents.AI.Hosting.OpenAI/EndpointRouteBuilderExtensions.Responses.cs index 9a395b9b12..ae96636f16 100644 --- a/dotnet/src/Microsoft.Agents.AI.Hosting.OpenAI/EndpointRouteBuilderExtensions.Responses.cs +++ b/dotnet/src/Microsoft.Agents.AI.Hosting.OpenAI/EndpointRouteBuilderExtensions.Responses.cs @@ -76,7 +76,7 @@ public static IEndpointConventionBuilder MapOpenAIResponses( var handlers = new ResponsesHttpHandler(responsesService); var group = endpoints.MapGroup(responsesPath); - var endpointAgentName = agent.DisplayName; + var endpointAgentName = agent.Name ?? agent.Id; // Create response endpoint group.MapPost("/", handlers.CreateResponseAsync) diff --git a/dotnet/src/Microsoft.Agents.AI.Purview/PurviewAgent.cs b/dotnet/src/Microsoft.Agents.AI.Purview/PurviewAgent.cs index fd2a1950e9..6907fe8889 100644 --- a/dotnet/src/Microsoft.Agents.AI.Purview/PurviewAgent.cs +++ b/dotnet/src/Microsoft.Agents.AI.Purview/PurviewAgent.cs @@ -42,13 +42,13 @@ public override AgentThread GetNewThread() } /// - public override Task RunAsync(IEnumerable messages, AgentThread? thread = null, AgentRunOptions? options = null, CancellationToken cancellationToken = default) + protected override Task RunCoreAsync(IEnumerable messages, AgentThread? thread = null, AgentRunOptions? options = null, CancellationToken cancellationToken = default) { return this._purviewWrapper.ProcessAgentContentAsync(messages, thread, options, this._innerAgent, cancellationToken); } /// - public override async IAsyncEnumerable RunStreamingAsync(IEnumerable messages, AgentThread? thread = null, AgentRunOptions? options = null, [EnumeratorCancellation] CancellationToken cancellationToken = default) + protected override async IAsyncEnumerable RunCoreStreamingAsync(IEnumerable messages, AgentThread? thread = null, AgentRunOptions? options = null, [EnumeratorCancellation] CancellationToken cancellationToken = default) { var response = await this._purviewWrapper.ProcessAgentContentAsync(messages, thread, options, this._innerAgent, cancellationToken).ConfigureAwait(false); foreach (var update in response.ToAgentRunResponseUpdates()) diff --git a/dotnet/src/Microsoft.Agents.AI.Workflows/HandoffsWorkflowBuilder.cs b/dotnet/src/Microsoft.Agents.AI.Workflows/HandoffsWorkflowBuilder.cs index 9e5b61ac42..9a3abfe960 100644 --- a/dotnet/src/Microsoft.Agents.AI.Workflows/HandoffsWorkflowBuilder.cs +++ b/dotnet/src/Microsoft.Agents.AI.Workflows/HandoffsWorkflowBuilder.cs @@ -125,14 +125,14 @@ public HandoffsWorkflowBuilder WithHandoff(AIAgent from, AIAgent to, string? han { Throw.ArgumentException( nameof(to), - $"The provided target agent '{to.DisplayName}' has no description, name, or instructions, and no handoff description has been provided. " + + $"The provided target agent '{to.Name ?? to.Id}' has no description, name, or instructions, and no handoff description has been provided. " + "At least one of these is required to register a handoff so that the appropriate target agent can be chosen."); } } if (!handoffs.Add(new(to, handoffReason))) { - Throw.InvalidOperationException($"A handoff from agent '{from.DisplayName}' to agent '{to.DisplayName}' has already been registered."); + Throw.InvalidOperationException($"A handoff from agent '{from.Name ?? from.Id}' to agent '{to.Name ?? to.Id}' has already been registered."); } return this; diff --git a/dotnet/src/Microsoft.Agents.AI.Workflows/Specialized/AgentRunStreamingExecutor.cs b/dotnet/src/Microsoft.Agents.AI.Workflows/Specialized/AgentRunStreamingExecutor.cs index ea80f646f0..ae3a932feb 100644 --- a/dotnet/src/Microsoft.Agents.AI.Workflows/Specialized/AgentRunStreamingExecutor.cs +++ b/dotnet/src/Microsoft.Agents.AI.Workflows/Specialized/AgentRunStreamingExecutor.cs @@ -20,7 +20,7 @@ internal sealed class AgentRunStreamingExecutor(AIAgent agent, bool includeInput protected override async ValueTask TakeTurnAsync(List messages, IWorkflowContext context, bool? emitEvents, CancellationToken cancellationToken = default) { - List? roleChanged = messages.ChangeAssistantToUserForOtherParticipants(agent.DisplayName); + List? roleChanged = messages.ChangeAssistantToUserForOtherParticipants(agent.Name ?? agent.Id); List updates = []; await foreach (var update in agent.RunStreamingAsync(messages, cancellationToken: cancellationToken).ConfigureAwait(false)) diff --git a/dotnet/src/Microsoft.Agents.AI.Workflows/Specialized/HandoffAgentExecutor.cs b/dotnet/src/Microsoft.Agents.AI.Workflows/Specialized/HandoffAgentExecutor.cs index 59dc49f143..24e0eea3cb 100644 --- a/dotnet/src/Microsoft.Agents.AI.Workflows/Specialized/HandoffAgentExecutor.cs +++ b/dotnet/src/Microsoft.Agents.AI.Workflows/Specialized/HandoffAgentExecutor.cs @@ -67,7 +67,7 @@ protected override RouteBuilder ConfigureRoutes(RouteBuilder routeBuilder) => List updates = []; List allMessages = handoffState.Messages; - List? roleChanges = allMessages.ChangeAssistantToUserForOtherParticipants(this._agent.DisplayName); + List? roleChanges = allMessages.ChangeAssistantToUserForOtherParticipants(this._agent.Name ?? this._agent.Id); await foreach (var update in this._agent.RunStreamingAsync(allMessages, options: this._agentOptions, @@ -85,7 +85,7 @@ await AddUpdateAsync( new AgentRunResponseUpdate { AgentId = this._agent.Id, - AuthorName = this._agent.DisplayName, + AuthorName = this._agent.Name ?? this._agent.Id, Contents = [new FunctionResultContent(fcc.CallId, "Transferred.")], CreatedAt = DateTimeOffset.UtcNow, MessageId = Guid.NewGuid().ToString("N"), diff --git a/dotnet/src/Microsoft.Agents.AI.Workflows/WorkflowHostAgent.cs b/dotnet/src/Microsoft.Agents.AI.Workflows/WorkflowHostAgent.cs index 70fcee15df..7c0479b85e 100644 --- a/dotnet/src/Microsoft.Agents.AI.Workflows/WorkflowHostAgent.cs +++ b/dotnet/src/Microsoft.Agents.AI.Workflows/WorkflowHostAgent.cs @@ -79,8 +79,8 @@ private async ValueTask UpdateThreadAsync(IEnumerable RunAsync( + protected override async + Task RunCoreAsync( IEnumerable messages, AgentThread? thread = null, AgentRunOptions? options = null, @@ -101,8 +101,8 @@ Task RunAsync( return merger.ComputeMerged(workflowThread.LastResponseId!, this.Id, this.Name); } - public override async - IAsyncEnumerable RunStreamingAsync( + protected override async + IAsyncEnumerable RunCoreStreamingAsync( IEnumerable messages, AgentThread? thread = null, AgentRunOptions? options = null, diff --git a/dotnet/src/Microsoft.Agents.AI/AnonymousDelegatingAIAgent.cs b/dotnet/src/Microsoft.Agents.AI/AnonymousDelegatingAIAgent.cs index 21fbfda639..542bafdbf4 100644 --- a/dotnet/src/Microsoft.Agents.AI/AnonymousDelegatingAIAgent.cs +++ b/dotnet/src/Microsoft.Agents.AI/AnonymousDelegatingAIAgent.cs @@ -17,18 +17,18 @@ namespace Microsoft.Agents.AI; /// internal sealed class AnonymousDelegatingAIAgent : DelegatingAIAgent { - /// The delegate to use as the implementation of . + /// The delegate to use as the implementation of . private readonly Func, AgentThread?, AgentRunOptions?, AIAgent, CancellationToken, Task>? _runFunc; - /// The delegate to use as the implementation of . + /// The delegate to use as the implementation of . /// - /// When non-, this delegate is used as the implementation of and + /// When non-, this delegate is used as the implementation of and /// will be invoked with the same arguments as the method itself. - /// When , will delegate directly to the inner agent. + /// When , will delegate directly to the inner agent. /// private readonly Func, AgentThread?, AgentRunOptions?, AIAgent, CancellationToken, IAsyncEnumerable>? _runStreamingFunc; - /// The delegate to use as the implementation of both and . + /// The delegate to use as the implementation of both and . private readonly Func, AgentThread?, AgentRunOptions?, Func, AgentThread?, AgentRunOptions?, CancellationToken, Task>, CancellationToken, Task>? _sharedFunc; /// @@ -36,7 +36,7 @@ internal sealed class AnonymousDelegatingAIAgent : DelegatingAIAgent /// /// The inner agent. /// - /// A delegate that provides the implementation for both and . + /// A delegate that provides the implementation for both and . /// In addition to the arguments for the operation, it's provided with a delegate to the inner agent that should be /// used to perform the operation on the inner agent. It will handle both the non-streaming and streaming cases. /// @@ -61,13 +61,13 @@ public AnonymousDelegatingAIAgent( /// /// The inner agent. /// - /// A delegate that provides the implementation for . When , - /// must be non-null, and the implementation of + /// A delegate that provides the implementation for . When , + /// must be non-null, and the implementation of /// will use for the implementation. /// /// - /// A delegate that provides the implementation for . When , - /// must be non-null, and the implementation of + /// A delegate that provides the implementation for . When , + /// must be non-null, and the implementation of /// will use for the implementation. /// /// is . @@ -85,7 +85,7 @@ public AnonymousDelegatingAIAgent( } /// - public override Task RunAsync( + protected override Task RunCoreAsync( IEnumerable messages, AgentThread? thread = null, AgentRunOptions? options = null, @@ -132,7 +132,7 @@ await this._sharedFunc( } /// - public override IAsyncEnumerable RunStreamingAsync( + protected override IAsyncEnumerable RunCoreStreamingAsync( IEnumerable messages, AgentThread? thread = null, AgentRunOptions? options = null, diff --git a/dotnet/src/Microsoft.Agents.AI/ChatClient/ChatClientAgent.cs b/dotnet/src/Microsoft.Agents.AI/ChatClient/ChatClientAgent.cs index a5a34d24a9..f4a7fcd9c2 100644 --- a/dotnet/src/Microsoft.Agents.AI/ChatClient/ChatClientAgent.cs +++ b/dotnet/src/Microsoft.Agents.AI/ChatClient/ChatClientAgent.cs @@ -149,7 +149,7 @@ public ChatClientAgent(IChatClient chatClient, ChatClientAgentOptions? options, internal ChatOptions? ChatOptions => this._agentOptions?.ChatOptions; /// - public override Task RunAsync( + protected override Task RunCoreAsync( IEnumerable messages, AgentThread? thread = null, AgentRunOptions? options = null, @@ -193,7 +193,7 @@ private static IChatClient ApplyRunOptionsTransformations(AgentRunOptions? optio } /// - public override async IAsyncEnumerable RunStreamingAsync( + protected override async IAsyncEnumerable RunCoreStreamingAsync( IEnumerable messages, AgentThread? thread = null, AgentRunOptions? options = null, diff --git a/dotnet/src/Microsoft.Agents.AI/FunctionInvocationDelegatingAgent.cs b/dotnet/src/Microsoft.Agents.AI/FunctionInvocationDelegatingAgent.cs index 7eefcebc55..2463b266c7 100644 --- a/dotnet/src/Microsoft.Agents.AI/FunctionInvocationDelegatingAgent.cs +++ b/dotnet/src/Microsoft.Agents.AI/FunctionInvocationDelegatingAgent.cs @@ -21,10 +21,10 @@ internal FunctionInvocationDelegatingAgent(AIAgent innerAgent, Func RunAsync(IEnumerable messages, AgentThread? thread = null, AgentRunOptions? options = null, CancellationToken cancellationToken = default) + protected override Task RunCoreAsync(IEnumerable messages, AgentThread? thread = null, AgentRunOptions? options = null, CancellationToken cancellationToken = default) => this.InnerAgent.RunAsync(messages, thread, this.AgentRunOptionsWithFunctionMiddleware(options), cancellationToken); - public override IAsyncEnumerable RunStreamingAsync(IEnumerable messages, AgentThread? thread = null, AgentRunOptions? options = null, CancellationToken cancellationToken = default) + protected override IAsyncEnumerable RunCoreStreamingAsync(IEnumerable messages, AgentThread? thread = null, AgentRunOptions? options = null, CancellationToken cancellationToken = default) => this.InnerAgent.RunStreamingAsync(messages, thread, this.AgentRunOptionsWithFunctionMiddleware(options), cancellationToken); // Decorate options to add the middleware function diff --git a/dotnet/src/Microsoft.Agents.AI/LoggingAgent.cs b/dotnet/src/Microsoft.Agents.AI/LoggingAgent.cs index b986e58bae..03b85d1ef5 100644 --- a/dotnet/src/Microsoft.Agents.AI/LoggingAgent.cs +++ b/dotnet/src/Microsoft.Agents.AI/LoggingAgent.cs @@ -55,7 +55,7 @@ public JsonSerializerOptions JsonSerializerOptions } /// - public override async Task RunAsync( + protected override async Task RunCoreAsync( IEnumerable messages, AgentThread? thread = null, AgentRunOptions? options = null, CancellationToken cancellationToken = default) { if (this._logger.IsEnabled(LogLevel.Debug)) @@ -72,7 +72,7 @@ public override async Task RunAsync( try { - AgentRunResponse response = await base.RunAsync(messages, thread, options, cancellationToken).ConfigureAwait(false); + AgentRunResponse response = await base.RunCoreAsync(messages, thread, options, cancellationToken).ConfigureAwait(false); if (this._logger.IsEnabled(LogLevel.Debug)) { @@ -101,7 +101,7 @@ public override async Task RunAsync( } /// - public override async IAsyncEnumerable RunStreamingAsync( + protected override async IAsyncEnumerable RunCoreStreamingAsync( IEnumerable messages, AgentThread? thread = null, AgentRunOptions? options = null, [EnumeratorCancellation] CancellationToken cancellationToken = default) { if (this._logger.IsEnabled(LogLevel.Debug)) @@ -119,7 +119,7 @@ public override async IAsyncEnumerable RunStreamingAsync IAsyncEnumerator e; try { - e = base.RunStreamingAsync(messages, thread, options, cancellationToken).GetAsyncEnumerator(cancellationToken); + e = base.RunCoreStreamingAsync(messages, thread, options, cancellationToken).GetAsyncEnumerator(cancellationToken); } catch (OperationCanceledException) { diff --git a/dotnet/src/Microsoft.Agents.AI/OpenTelemetryAgent.cs b/dotnet/src/Microsoft.Agents.AI/OpenTelemetryAgent.cs index 7cd3c27b70..35d31371c3 100644 --- a/dotnet/src/Microsoft.Agents.AI/OpenTelemetryAgent.cs +++ b/dotnet/src/Microsoft.Agents.AI/OpenTelemetryAgent.cs @@ -78,7 +78,7 @@ public bool EnableSensitiveData } /// - public override async Task RunAsync( + protected override async Task RunCoreAsync( IEnumerable messages, AgentThread? thread = null, AgentRunOptions? options = null, CancellationToken cancellationToken = default) { ChatOptions co = new ForwardedOptions(options, thread, Activity.Current); @@ -89,7 +89,7 @@ public override async Task RunAsync( } /// - public override async IAsyncEnumerable RunStreamingAsync( + protected override async IAsyncEnumerable RunCoreStreamingAsync( IEnumerable messages, AgentThread? thread = null, AgentRunOptions? options = null, [EnumeratorCancellation] CancellationToken cancellationToken = default) { ChatOptions co = new ForwardedOptions(options, thread, Activity.Current); @@ -114,7 +114,9 @@ private void UpdateCurrentActivity(Activity? previousActivity) // Override information set by OpenTelemetryChatClient to make it specific to invoke_agent. - activity.DisplayName = $"{OpenTelemetryConsts.GenAI.InvokeAgent} {this.DisplayName}"; + activity.DisplayName = string.IsNullOrWhiteSpace(this.Name) + ? $"{OpenTelemetryConsts.GenAI.InvokeAgent} {this.Id}" + : $"{OpenTelemetryConsts.GenAI.InvokeAgent} {this.Name}({this.Id})"; activity.SetTag(OpenTelemetryConsts.GenAI.Operation.Name, OpenTelemetryConsts.GenAI.InvokeAgent); if (!string.IsNullOrWhiteSpace(this._providerName)) diff --git a/dotnet/tests/Microsoft.Agents.AI.A2A.UnitTests/A2AAgentTests.cs b/dotnet/tests/Microsoft.Agents.AI.A2A.UnitTests/A2AAgentTests.cs index 9869d47f6b..0b491fb303 100644 --- a/dotnet/tests/Microsoft.Agents.AI.A2A.UnitTests/A2AAgentTests.cs +++ b/dotnet/tests/Microsoft.Agents.AI.A2A.UnitTests/A2AAgentTests.cs @@ -42,16 +42,14 @@ public void Constructor_WithAllParameters_InitializesPropertiesCorrectly() const string TestId = "test-id"; const string TestName = "test-name"; const string TestDescription = "test-description"; - const string TestDisplayName = "test-display-name"; // Act - var agent = new A2AAgent(this._a2aClient, TestId, TestName, TestDescription, TestDisplayName); + var agent = new A2AAgent(this._a2aClient, TestId, TestName, TestDescription); // Assert Assert.Equal(TestId, agent.Id); Assert.Equal(TestName, agent.Name); Assert.Equal(TestDescription, agent.Description); - Assert.Equal(TestDisplayName, agent.DisplayName); } [Fact] @@ -70,7 +68,6 @@ public void Constructor_WithDefaultParameters_UsesBaseProperties() Assert.NotEmpty(agent.Id); Assert.Null(agent.Name); Assert.Null(agent.Description); - Assert.Equal(agent.Id, agent.DisplayName); } [Fact] diff --git a/dotnet/tests/Microsoft.Agents.AI.A2A.UnitTests/Extensions/A2AClientExtensionsTests.cs b/dotnet/tests/Microsoft.Agents.AI.A2A.UnitTests/Extensions/A2AClientExtensionsTests.cs index e21035003e..5b84324e8b 100644 --- a/dotnet/tests/Microsoft.Agents.AI.A2A.UnitTests/Extensions/A2AClientExtensionsTests.cs +++ b/dotnet/tests/Microsoft.Agents.AI.A2A.UnitTests/Extensions/A2AClientExtensionsTests.cs @@ -19,10 +19,9 @@ public void GetAIAgent_WithAllParameters_ReturnsA2AAgentWithSpecifiedProperties( const string TestId = "test-agent-id"; const string TestName = "Test Agent"; const string TestDescription = "This is a test agent description"; - const string TestDisplayName = "Test Display Name"; // Act - var agent = a2aClient.GetAIAgent(TestId, TestName, TestDescription, TestDisplayName); + var agent = a2aClient.GetAIAgent(TestId, TestName, TestDescription); // Assert Assert.NotNull(agent); @@ -30,6 +29,5 @@ public void GetAIAgent_WithAllParameters_ReturnsA2AAgentWithSpecifiedProperties( Assert.Equal(TestId, agent.Id); Assert.Equal(TestName, agent.Name); Assert.Equal(TestDescription, agent.Description); - Assert.Equal(TestDisplayName, agent.DisplayName); } } diff --git a/dotnet/tests/Microsoft.Agents.AI.Abstractions.UnitTests/AIAgentTests.cs b/dotnet/tests/Microsoft.Agents.AI.Abstractions.UnitTests/AIAgentTests.cs index e3bda2081a..a1c8cb32bf 100644 --- a/dotnet/tests/Microsoft.Agents.AI.Abstractions.UnitTests/AIAgentTests.cs +++ b/dotnet/tests/Microsoft.Agents.AI.Abstractions.UnitTests/AIAgentTests.cs @@ -8,6 +8,7 @@ using System.Threading.Tasks; using Microsoft.Extensions.AI; using Moq; +using Moq.Protected; namespace Microsoft.Agents.AI.Abstractions.UnitTests; @@ -33,18 +34,20 @@ public AIAgentTests() this._agentMock = new Mock { CallBase = true }; this._agentMock - .Setup(x => x.RunAsync( - It.IsAny>(), - this._agentThreadMock.Object, - It.IsAny(), - It.IsAny())) + .Protected() + .Setup>("RunCoreAsync", + ItExpr.IsAny>(), + ItExpr.Is(t => t == this._agentThreadMock.Object), + ItExpr.IsAny(), + ItExpr.IsAny()) .ReturnsAsync(this._invokeResponse); this._agentMock - .Setup(x => x.RunStreamingAsync( - It.IsAny>(), - this._agentThreadMock.Object, - It.IsAny(), - It.IsAny())) + .Protected() + .Setup>("RunCoreStreamingAsync", + ItExpr.IsAny>(), + ItExpr.Is(t => t == this._agentThreadMock.Object), + ItExpr.IsAny(), + ItExpr.IsAny()) .Returns(ToAsyncEnumerableAsync(this._invokeStreamingResponses)); } @@ -64,13 +67,14 @@ public async Task InvokeWithoutMessageCallsMockedInvokeWithEmptyArrayAsync() Assert.Equal(this._invokeResponse, response); // Verify that the mocked method was called with the expected parameters - this._agentMock.Verify( - x => x.RunAsync( - It.Is>(messages => messages.Count == 0), - this._agentThreadMock.Object, - options, - cancellationToken), - Times.Once); + this._agentMock + .Protected() + .Verify>("RunCoreAsync", + Times.Once(), + ItExpr.Is>(messages => !messages.Any()), + ItExpr.Is(t => t == this._agentThreadMock.Object), + ItExpr.Is(o => o == options), + ItExpr.Is(ct => ct == cancellationToken)); } /// @@ -90,13 +94,14 @@ public async Task InvokeWithStringMessageCallsMockedInvokeWithMessageInCollectio Assert.Equal(this._invokeResponse, response); // Verify that the mocked method was called with the expected parameters - this._agentMock.Verify( - x => x.RunAsync( - It.Is>(messages => messages.Count == 1 && messages.First().Text == Message), - this._agentThreadMock.Object, - options, - cancellationToken), - Times.Once); + this._agentMock + .Protected() + .Verify>("RunCoreAsync", + Times.Once(), + ItExpr.Is>(messages => messages.Count() == 1 && messages.First().Text == Message), + ItExpr.Is(t => t == this._agentThreadMock.Object), + ItExpr.Is(o => o == options), + ItExpr.Is(ct => ct == cancellationToken)); } /// @@ -116,13 +121,14 @@ public async Task InvokeWithSingleMessageCallsMockedInvokeWithMessageInCollectio Assert.Equal(this._invokeResponse, response); // Verify that the mocked method was called with the expected parameters - this._agentMock.Verify( - x => x.RunAsync( - It.Is>(messages => messages.Count == 1 && messages.First() == message), - this._agentThreadMock.Object, - options, - cancellationToken), - Times.Once); + this._agentMock + .Protected() + .Verify>("RunCoreAsync", + Times.Once(), + ItExpr.Is>(messages => messages.Count() == 1 && messages.First() == message), + ItExpr.Is(t => t == this._agentThreadMock.Object), + ItExpr.Is(o => o == options), + ItExpr.Is(ct => ct == cancellationToken)); } /// @@ -144,13 +150,14 @@ public async Task InvokeStreamingWithoutMessageCallsMockedInvokeWithEmptyArrayAs } // Verify that the mocked method was called with the expected parameters - this._agentMock.Verify( - x => x.RunStreamingAsync( - It.Is>(messages => messages.Count == 0), - this._agentThreadMock.Object, - options, - cancellationToken), - Times.Once); + this._agentMock + .Protected() + .Verify>("RunCoreStreamingAsync", + Times.Once(), + ItExpr.Is>(messages => !messages.Any()), + ItExpr.Is(t => t == this._agentThreadMock.Object), + ItExpr.Is(o => o == options), + ItExpr.Is(ct => ct == cancellationToken)); } /// @@ -173,13 +180,14 @@ public async Task InvokeStreamingWithStringMessageCallsMockedInvokeWithMessageIn } // Verify that the mocked method was called with the expected parameters - this._agentMock.Verify( - x => x.RunStreamingAsync( - It.Is>(messages => messages.Count == 1 && messages.First().Text == Message), - this._agentThreadMock.Object, - options, - cancellationToken), - Times.Once); + this._agentMock + .Protected() + .Verify>("RunCoreStreamingAsync", + Times.Once(), + ItExpr.Is>(messages => messages.Count() == 1 && messages.First().Text == Message), + ItExpr.Is(t => t == this._agentThreadMock.Object), + ItExpr.Is(o => o == options), + ItExpr.Is(ct => ct == cancellationToken)); } /// @@ -202,13 +210,14 @@ public async Task InvokeStreamingWithSingleMessageCallsMockedInvokeWithMessageIn } // Verify that the mocked method was called with the expected parameters - this._agentMock.Verify( - x => x.RunStreamingAsync( - It.Is>(messages => messages.Count == 1 && messages.First() == message), - this._agentThreadMock.Object, - options, - cancellationToken), - Times.Once); + this._agentMock + .Protected() + .Verify>("RunCoreStreamingAsync", + Times.Once(), + ItExpr.Is>(messages => messages.Count() == 1 && messages.First() == message), + ItExpr.Is(t => t == this._agentThreadMock.Object), + ItExpr.Is(o => o == options), + ItExpr.Is(ct => ct == cancellationToken)); } [Fact] @@ -375,14 +384,14 @@ public override AgentThread GetNewThread() public override AgentThread DeserializeThread(JsonElement serializedThread, JsonSerializerOptions? jsonSerializerOptions = null) => throw new NotImplementedException(); - public override Task RunAsync( + protected override Task RunCoreAsync( IEnumerable messages, AgentThread? thread = null, AgentRunOptions? options = null, CancellationToken cancellationToken = default) => throw new NotImplementedException(); - public override IAsyncEnumerable RunStreamingAsync( + protected override IAsyncEnumerable RunCoreStreamingAsync( IEnumerable messages, AgentThread? thread = null, AgentRunOptions? options = null, diff --git a/dotnet/tests/Microsoft.Agents.AI.Abstractions.UnitTests/DelegatingAIAgentTests.cs b/dotnet/tests/Microsoft.Agents.AI.Abstractions.UnitTests/DelegatingAIAgentTests.cs index 50271b7eee..2a6cc7bb81 100644 --- a/dotnet/tests/Microsoft.Agents.AI.Abstractions.UnitTests/DelegatingAIAgentTests.cs +++ b/dotnet/tests/Microsoft.Agents.AI.Abstractions.UnitTests/DelegatingAIAgentTests.cs @@ -38,19 +38,21 @@ public DelegatingAIAgentTests() this._innerAgentMock.Setup(x => x.GetNewThread()).Returns(this._testThread); this._innerAgentMock - .Setup(x => x.RunAsync( - It.IsAny>(), - It.IsAny(), - It.IsAny(), - It.IsAny())) + .Protected() + .Setup>("RunCoreAsync", + ItExpr.IsAny>(), + ItExpr.IsAny(), + ItExpr.IsAny(), + ItExpr.IsAny()) .ReturnsAsync(this._testResponse); this._innerAgentMock - .Setup(x => x.RunStreamingAsync( - It.IsAny>(), - It.IsAny(), - It.IsAny(), - It.IsAny())) + .Protected() + .Setup>("RunCoreStreamingAsync", + ItExpr.IsAny>(), + ItExpr.IsAny(), + ItExpr.IsAny(), + ItExpr.IsAny()) .Returns(ToAsyncEnumerableAsync(this._testStreamingResponses)); this._delegatingAgent = new TestDelegatingAIAgent(this._innerAgentMock.Object); @@ -159,7 +161,12 @@ public async Task RunAsyncDefaultsToInnerAgentAsync() var innerAgentMock = new Mock(); innerAgentMock - .Setup(x => x.RunAsync(expectedMessages, expectedThread, expectedOptions, expectedCancellationToken)) + .Protected() + .Setup>("RunCoreAsync", + ItExpr.Is>(m => m == expectedMessages), + ItExpr.Is(t => t == expectedThread), + ItExpr.Is(o => o == expectedOptions), + ItExpr.Is(ct => ct == expectedCancellationToken)) .Returns(expectedResult.Task); var delegatingAgent = new TestDelegatingAIAgent(innerAgentMock.Object); @@ -193,7 +200,12 @@ public async Task RunStreamingAsyncDefaultsToInnerAgentAsync() var innerAgentMock = new Mock(); innerAgentMock - .Setup(x => x.RunStreamingAsync(expectedMessages, expectedThread, expectedOptions, expectedCancellationToken)) + .Protected() + .Setup>("RunCoreStreamingAsync", + ItExpr.Is>(m => m == expectedMessages), + ItExpr.Is(t => t == expectedThread), + ItExpr.Is(o => o == expectedOptions), + ItExpr.Is(ct => ct == expectedCancellationToken)) .Returns(ToAsyncEnumerableAsync(expectedResults)); var delegatingAgent = new TestDelegatingAIAgent(innerAgentMock.Object); diff --git a/dotnet/tests/Microsoft.Agents.AI.Declarative.UnitTests/AggregatorPromptAgentFactoryTests.cs b/dotnet/tests/Microsoft.Agents.AI.Declarative.UnitTests/AggregatorPromptAgentFactoryTests.cs index d20bd9be00..09ee72504a 100644 --- a/dotnet/tests/Microsoft.Agents.AI.Declarative.UnitTests/AggregatorPromptAgentFactoryTests.cs +++ b/dotnet/tests/Microsoft.Agents.AI.Declarative.UnitTests/AggregatorPromptAgentFactoryTests.cs @@ -76,12 +76,12 @@ public override AgentThread GetNewThread() throw new NotImplementedException(); } - public override Task RunAsync(IEnumerable messages, AgentThread? thread = null, AgentRunOptions? options = null, CancellationToken cancellationToken = default) + protected override Task RunCoreAsync(IEnumerable messages, AgentThread? thread = null, AgentRunOptions? options = null, CancellationToken cancellationToken = default) { throw new NotImplementedException(); } - public override IAsyncEnumerable RunStreamingAsync(IEnumerable messages, AgentThread? thread = null, AgentRunOptions? options = null, CancellationToken cancellationToken = default) + protected override IAsyncEnumerable RunCoreStreamingAsync(IEnumerable messages, AgentThread? thread = null, AgentRunOptions? options = null, CancellationToken cancellationToken = default) { throw new NotImplementedException(); } diff --git a/dotnet/tests/Microsoft.Agents.AI.Hosting.AGUI.AspNetCore.IntegrationTests/BasicStreamingTests.cs b/dotnet/tests/Microsoft.Agents.AI.Hosting.AGUI.AspNetCore.IntegrationTests/BasicStreamingTests.cs index 69560421cf..dfabaca64e 100644 --- a/dotnet/tests/Microsoft.Agents.AI.Hosting.AGUI.AspNetCore.IntegrationTests/BasicStreamingTests.cs +++ b/dotnet/tests/Microsoft.Agents.AI.Hosting.AGUI.AspNetCore.IntegrationTests/BasicStreamingTests.cs @@ -290,7 +290,7 @@ public override AgentThread DeserializeThread(JsonElement serializedThread, Json return new FakeInMemoryAgentThread(serializedThread, jsonSerializerOptions); } - public override async Task RunAsync( + protected override async Task RunCoreAsync( IEnumerable messages, AgentThread? thread = null, AgentRunOptions? options = null, @@ -305,7 +305,7 @@ public override async Task RunAsync( return updates.ToAgentRunResponse(); } - public override async IAsyncEnumerable RunStreamingAsync( + protected override async IAsyncEnumerable RunCoreStreamingAsync( IEnumerable messages, AgentThread? thread = null, AgentRunOptions? options = null, @@ -358,7 +358,7 @@ public override AgentThread DeserializeThread(JsonElement serializedThread, Json return new FakeInMemoryAgentThread(serializedThread, jsonSerializerOptions); } - public override async Task RunAsync( + protected override async Task RunCoreAsync( IEnumerable messages, AgentThread? thread = null, AgentRunOptions? options = null, @@ -373,7 +373,7 @@ public override async Task RunAsync( return updates.ToAgentRunResponse(); } - public override async IAsyncEnumerable RunStreamingAsync( + protected override async IAsyncEnumerable RunCoreStreamingAsync( IEnumerable messages, AgentThread? thread = null, AgentRunOptions? options = null, diff --git a/dotnet/tests/Microsoft.Agents.AI.Hosting.AGUI.AspNetCore.IntegrationTests/ForwardedPropertiesTests.cs b/dotnet/tests/Microsoft.Agents.AI.Hosting.AGUI.AspNetCore.IntegrationTests/ForwardedPropertiesTests.cs index df8caea214..1777ff456a 100644 --- a/dotnet/tests/Microsoft.Agents.AI.Hosting.AGUI.AspNetCore.IntegrationTests/ForwardedPropertiesTests.cs +++ b/dotnet/tests/Microsoft.Agents.AI.Hosting.AGUI.AspNetCore.IntegrationTests/ForwardedPropertiesTests.cs @@ -303,12 +303,12 @@ public FakeForwardedPropsAgent() public JsonElement ReceivedForwardedProperties { get; private set; } - public override Task RunAsync(IEnumerable messages, AgentThread? thread = null, AgentRunOptions? options = null, CancellationToken cancellationToken = default) + protected override Task RunCoreAsync(IEnumerable messages, AgentThread? thread = null, AgentRunOptions? options = null, CancellationToken cancellationToken = default) { - return this.RunStreamingAsync(messages, thread, options, cancellationToken).ToAgentRunResponseAsync(cancellationToken); + return this.RunCoreStreamingAsync(messages, thread, options, cancellationToken).ToAgentRunResponseAsync(cancellationToken); } - public override async IAsyncEnumerable RunStreamingAsync( + protected override async IAsyncEnumerable RunCoreStreamingAsync( IEnumerable messages, AgentThread? thread = null, AgentRunOptions? options = null, diff --git a/dotnet/tests/Microsoft.Agents.AI.Hosting.AGUI.AspNetCore.IntegrationTests/SharedStateTests.cs b/dotnet/tests/Microsoft.Agents.AI.Hosting.AGUI.AspNetCore.IntegrationTests/SharedStateTests.cs index c96f2d92d0..df51d1cbc4 100644 --- a/dotnet/tests/Microsoft.Agents.AI.Hosting.AGUI.AspNetCore.IntegrationTests/SharedStateTests.cs +++ b/dotnet/tests/Microsoft.Agents.AI.Hosting.AGUI.AspNetCore.IntegrationTests/SharedStateTests.cs @@ -342,12 +342,12 @@ internal sealed class FakeStateAgent : AIAgent { public override string? Description => "Agent for state testing"; - public override Task RunAsync(IEnumerable messages, AgentThread? thread = null, AgentRunOptions? options = null, CancellationToken cancellationToken = default) + protected override Task RunCoreAsync(IEnumerable messages, AgentThread? thread = null, AgentRunOptions? options = null, CancellationToken cancellationToken = default) { - return this.RunStreamingAsync(messages, thread, options, cancellationToken).ToAgentRunResponseAsync(cancellationToken); + return this.RunCoreStreamingAsync(messages, thread, options, cancellationToken).ToAgentRunResponseAsync(cancellationToken); } - public override async IAsyncEnumerable RunStreamingAsync( + protected override async IAsyncEnumerable RunCoreStreamingAsync( IEnumerable messages, AgentThread? thread = null, AgentRunOptions? options = null, diff --git a/dotnet/tests/Microsoft.Agents.AI.Hosting.AGUI.AspNetCore.UnitTests/AGUIEndpointRouteBuilderExtensionsTests.cs b/dotnet/tests/Microsoft.Agents.AI.Hosting.AGUI.AspNetCore.UnitTests/AGUIEndpointRouteBuilderExtensionsTests.cs index 3e80a58369..402451b061 100644 --- a/dotnet/tests/Microsoft.Agents.AI.Hosting.AGUI.AspNetCore.UnitTests/AGUIEndpointRouteBuilderExtensionsTests.cs +++ b/dotnet/tests/Microsoft.Agents.AI.Hosting.AGUI.AspNetCore.UnitTests/AGUIEndpointRouteBuilderExtensionsTests.cs @@ -430,12 +430,12 @@ private sealed class MultiResponseAgent : AIAgent public override AgentThread DeserializeThread(JsonElement serializedThread, JsonSerializerOptions? jsonSerializerOptions = null) => new TestInMemoryAgentThread(serializedThread, jsonSerializerOptions); - public override Task RunAsync(IEnumerable messages, AgentThread? thread = null, AgentRunOptions? options = null, CancellationToken cancellationToken = default) + protected override Task RunCoreAsync(IEnumerable messages, AgentThread? thread = null, AgentRunOptions? options = null, CancellationToken cancellationToken = default) { throw new NotImplementedException(); } - public override async IAsyncEnumerable RunStreamingAsync( + protected override async IAsyncEnumerable RunCoreStreamingAsync( IEnumerable messages, AgentThread? thread = null, AgentRunOptions? options = null, @@ -519,12 +519,12 @@ private sealed class TestAgent : AIAgent public override AgentThread DeserializeThread(JsonElement serializedThread, JsonSerializerOptions? jsonSerializerOptions = null) => new TestInMemoryAgentThread(serializedThread, jsonSerializerOptions); - public override Task RunAsync(IEnumerable messages, AgentThread? thread = null, AgentRunOptions? options = null, CancellationToken cancellationToken = default) + protected override Task RunCoreAsync(IEnumerable messages, AgentThread? thread = null, AgentRunOptions? options = null, CancellationToken cancellationToken = default) { throw new NotImplementedException(); } - public override async IAsyncEnumerable RunStreamingAsync( + protected override async IAsyncEnumerable RunCoreStreamingAsync( IEnumerable messages, AgentThread? thread = null, AgentRunOptions? options = null, diff --git a/dotnet/tests/Microsoft.Agents.AI.Hosting.AzureFunctions.IntegrationTests/SamplesValidation.cs b/dotnet/tests/Microsoft.Agents.AI.Hosting.AzureFunctions.IntegrationTests/SamplesValidation.cs index 0ba879f024..c80cd73941 100644 --- a/dotnet/tests/Microsoft.Agents.AI.Hosting.AzureFunctions.IntegrationTests/SamplesValidation.cs +++ b/dotnet/tests/Microsoft.Agents.AI.Hosting.AzureFunctions.IntegrationTests/SamplesValidation.cs @@ -19,6 +19,7 @@ public sealed class SamplesValidation(ITestOutputHelper outputHelper) : IAsyncLi private const string AzureFunctionsPort = "7071"; private const string AzuritePort = "10000"; private const string DtsPort = "8080"; + private const string RedisPort = "6379"; private static readonly string s_dotnetTargetFramework = GetTargetFramework(); private static readonly HttpClient s_sharedHttpClient = new(); @@ -392,6 +393,136 @@ await this.WaitForConditionAsync( }); } + [Fact] + public async Task ReliableStreamingSampleValidationAsync() + { + string samplePath = Path.Combine(s_samplesPath, "08_ReliableStreaming"); + await this.RunSampleTestAsync(samplePath, async (logs) => + { + Uri createUri = new($"http://localhost:{AzureFunctionsPort}/api/agent/create"); + this._outputHelper.WriteLine($"Starting reliable streaming agent via POST request to {createUri}..."); + + // Test the agent endpoint with a simple prompt + const string RequestBody = "Plan a 3-day trip to Seattle. Include daily activities."; + using HttpContent content = new StringContent(RequestBody, Encoding.UTF8, "text/plain"); + using HttpRequestMessage request = new(HttpMethod.Post, createUri) + { + Content = content + }; + request.Headers.Add("Accept", "text/plain"); + + using HttpResponseMessage response = await s_sharedHttpClient.SendAsync( + request, + HttpCompletionOption.ResponseHeadersRead); + + // The response should be successful + Assert.True(response.IsSuccessStatusCode, $"Agent request failed with status: {response.StatusCode}"); + Assert.Equal("text/plain", response.Content.Headers.ContentType?.MediaType); + + // The response headers should include the conversation ID + string? conversationId = response.Headers.GetValues("x-conversation-id")?.FirstOrDefault(); + Assert.NotNull(conversationId); + Assert.NotEmpty(conversationId); + this._outputHelper.WriteLine($"Agent conversation ID: {conversationId}"); + + // Read the streamed response + using Stream responseStream = await response.Content.ReadAsStreamAsync(); + using StreamReader reader = new(responseStream); + StringBuilder responseText = new(); + char[] buffer = new char[1024]; + int bytesRead; + + // Read for a reasonable amount of time to get some content + using CancellationTokenSource readTimeout = new(TimeSpan.FromSeconds(30)); + try + { + while (!readTimeout.Token.IsCancellationRequested) + { + bytesRead = await reader.ReadAsync(buffer, 0, buffer.Length); + if (bytesRead == 0) + { + // Check if we've received enough content + if (responseText.Length > 50) + { + break; + } + await Task.Delay(100, readTimeout.Token); + continue; + } + + responseText.Append(buffer, 0, bytesRead); + if (responseText.Length > 200) + { + // We've received enough content to validate + break; + } + } + } + catch (OperationCanceledException) + { + // Timeout is acceptable if we got some content + } + + string responseContent = responseText.ToString(); + Assert.True(responseContent.Length > 0, "Expected to receive some streamed content"); + this._outputHelper.WriteLine($"Received {responseContent.Length} characters of streamed content"); + + // Test resumption by calling the stream endpoint + Uri streamUri = new($"http://localhost:{AzureFunctionsPort}/api/agent/stream/{conversationId}"); + this._outputHelper.WriteLine($"Testing stream resumption via GET request to {streamUri}..."); + + using HttpRequestMessage streamRequest = new(HttpMethod.Get, streamUri); + streamRequest.Headers.Add("Accept", "text/plain"); + + using HttpResponseMessage streamResponse = await s_sharedHttpClient.SendAsync( + streamRequest, + HttpCompletionOption.ResponseHeadersRead); + Assert.True(streamResponse.IsSuccessStatusCode, $"Stream request failed with status: {streamResponse.StatusCode}"); + Assert.Equal("text/plain", streamResponse.Content.Headers.ContentType?.MediaType); + + // Verify the conversation ID header is present + string? resumedConversationId = streamResponse.Headers.GetValues("x-conversation-id")?.FirstOrDefault(); + Assert.Equal(conversationId, resumedConversationId); + + // Read some content from the resumed stream + using Stream resumedStream = await streamResponse.Content.ReadAsStreamAsync(); + using StreamReader resumedReader = new(resumedStream); + StringBuilder resumedText = new(); + + using CancellationTokenSource resumedReadTimeout = new(TimeSpan.FromSeconds(10)); + try + { + while (!resumedReadTimeout.Token.IsCancellationRequested) + { + bytesRead = await resumedReader.ReadAsync(buffer, 0, buffer.Length); + if (bytesRead == 0) + { + if (resumedText.Length > 50) + { + break; + } + await Task.Delay(100, resumedReadTimeout.Token); + continue; + } + + resumedText.Append(buffer, 0, bytesRead); + if (resumedText.Length > 100) + { + break; + } + } + } + catch (OperationCanceledException) + { + // Timeout is acceptable if we got some content + } + + string resumedContent = resumedText.ToString(); + Assert.True(resumedContent.Length > 0, "Expected to receive some content from resumed stream"); + this._outputHelper.WriteLine($"Received {resumedContent.Length} characters from resumed stream"); + }); + } + private async Task InvokeMcpToolAsync(McpClient mcpClient, string toolName, string query) { this._outputHelper.WriteLine($"Invoking MCP tool '{toolName}'..."); @@ -482,6 +613,21 @@ await this.WaitForConditionAsync( message: "DTS emulator is running", timeout: TimeSpan.FromSeconds(30)); } + + // Start Redis if it's not already running + if (!await this.IsRedisRunningAsync()) + { + await this.StartDockerContainerAsync( + containerName: "redis", + image: "redis:latest", + ports: ["-p", "6379:6379"]); + + // Wait for Redis + await this.WaitForConditionAsync( + condition: this.IsRedisRunningAsync, + message: "Redis is running", + timeout: TimeSpan.FromSeconds(30)); + } } private async Task IsAzuriteRunningAsync() @@ -562,6 +708,49 @@ private async Task IsDtsEmulatorRunningAsync() } } + private async Task IsRedisRunningAsync() + { + this._outputHelper.WriteLine($"Checking if Redis is running at localhost:{RedisPort}..."); + + try + { + using CancellationTokenSource timeoutCts = new(TimeSpan.FromSeconds(30)); + ProcessStartInfo startInfo = new() + { + FileName = "docker", + Arguments = "exec redis redis-cli ping", + UseShellExecute = false, + RedirectStandardOutput = true, + RedirectStandardError = true, + CreateNoWindow = true + }; + + using Process process = new() { StartInfo = startInfo }; + if (!process.Start()) + { + this._outputHelper.WriteLine("Failed to start docker exec command"); + return false; + } + + string output = await process.StandardOutput.ReadToEndAsync(timeoutCts.Token); + await process.WaitForExitAsync(timeoutCts.Token); + + if (process.ExitCode == 0 && output.Contains("PONG", StringComparison.OrdinalIgnoreCase)) + { + this._outputHelper.WriteLine("Redis is running"); + return true; + } + + this._outputHelper.WriteLine($"Redis is not running. Exit code: {process.ExitCode}, Output: {output}"); + return false; + } + catch (Exception ex) + { + this._outputHelper.WriteLine($"Redis is not running: {ex.Message}"); + return false; + } + } + private async Task StartDockerContainerAsync(string containerName, string image, string[] ports) { // Stop existing container if it exists @@ -646,6 +835,7 @@ private Process StartFunctionApp(string samplePath, List logs) startInfo.EnvironmentVariables["DURABLE_TASK_SCHEDULER_CONNECTION_STRING"] = $"Endpoint=http://localhost:{DtsPort};TaskHub=default;Authentication=None"; startInfo.EnvironmentVariables["AzureWebJobsStorage"] = "UseDevelopmentStorage=true"; + startInfo.EnvironmentVariables["REDIS_CONNECTION_STRING"] = $"localhost:{RedisPort}"; Process process = new() { StartInfo = startInfo }; diff --git a/dotnet/tests/Microsoft.Agents.AI.Hosting.AzureFunctions.UnitTests/TestAgent.cs b/dotnet/tests/Microsoft.Agents.AI.Hosting.AzureFunctions.UnitTests/TestAgent.cs index b0ad7ec0fe..e6824a2dd4 100644 --- a/dotnet/tests/Microsoft.Agents.AI.Hosting.AzureFunctions.UnitTests/TestAgent.cs +++ b/dotnet/tests/Microsoft.Agents.AI.Hosting.AzureFunctions.UnitTests/TestAgent.cs @@ -17,13 +17,13 @@ public override AgentThread DeserializeThread( JsonElement serializedThread, JsonSerializerOptions? jsonSerializerOptions = null) => new DummyAgentThread(); - public override Task RunAsync( + protected override Task RunCoreAsync( IEnumerable messages, AgentThread? thread = null, AgentRunOptions? options = null, CancellationToken cancellationToken = default) => Task.FromResult(new AgentRunResponse([.. messages])); - public override IAsyncEnumerable RunStreamingAsync( + protected override IAsyncEnumerable RunCoreStreamingAsync( IEnumerable messages, AgentThread? thread = null, AgentRunOptions? options = null, diff --git a/dotnet/tests/Microsoft.Agents.AI.OpenAI.UnitTests/Extensions/AIAgentWithOpenAIExtensionsTests.cs b/dotnet/tests/Microsoft.Agents.AI.OpenAI.UnitTests/Extensions/AIAgentWithOpenAIExtensionsTests.cs index de8c459be0..60c37c9b82 100644 --- a/dotnet/tests/Microsoft.Agents.AI.OpenAI.UnitTests/Extensions/AIAgentWithOpenAIExtensionsTests.cs +++ b/dotnet/tests/Microsoft.Agents.AI.OpenAI.UnitTests/Extensions/AIAgentWithOpenAIExtensionsTests.cs @@ -6,6 +6,7 @@ using System.Threading; using System.Threading.Tasks; using Moq; +using Moq.Protected; using ChatMessage = Microsoft.Extensions.AI.ChatMessage; using ChatRole = Microsoft.Extensions.AI.ChatRole; using OpenAIChatMessage = OpenAI.Chat.ChatMessage; @@ -76,22 +77,28 @@ public async Task RunAsync_CallsUnderlyingAgentAsync() var responseMessage = new ChatMessage(ChatRole.Assistant, [new TextContent(ResponseText)]); mockAgent - .Setup(a => a.RunAsync(It.IsAny>(), It.IsAny(), It.IsAny(), It.IsAny())) + .Protected() + .Setup>("RunCoreAsync", + ItExpr.IsAny>(), + ItExpr.IsAny(), + ItExpr.IsAny(), + ItExpr.IsAny()) .ReturnsAsync(new AgentRunResponse([responseMessage])); // Act var result = await mockAgent.Object.RunAsync(openAiMessages, mockThread.Object, options, cancellationToken); // Assert - mockAgent.Verify( - a => a.RunAsync( - It.Is>(msgs => + mockAgent.Protected() + .Verify("RunCoreAsync", + Times.Once(), + ItExpr.Is>(msgs => msgs.ToList().Count == 1 && msgs.ToList()[0].Text == TestMessageText), mockThread.Object, options, - cancellationToken), - Times.Once); + cancellationToken + ); Assert.NotNull(result); Assert.NotEmpty(result.Content); @@ -160,7 +167,12 @@ public async Task RunStreamingAsync_CallsUnderlyingAgentAsync() }; mockAgent - .Setup(a => a.RunStreamingAsync(It.IsAny>(), It.IsAny(), It.IsAny(), It.IsAny())) + .Protected() + .Setup>("RunCoreStreamingAsync", + ItExpr.IsAny>(), + ItExpr.IsAny(), + ItExpr.IsAny(), + ItExpr.IsAny()) .Returns(ToAsyncEnumerableAsync(responseUpdates)); // Act @@ -172,15 +184,16 @@ public async Task RunStreamingAsync_CallsUnderlyingAgentAsync() } // Assert - mockAgent.Verify( - a => a.RunStreamingAsync( - It.Is>(msgs => + mockAgent.Protected() + .Verify("RunCoreStreamingAsync", + Times.Once(), + ItExpr.Is>(msgs => msgs.ToList().Count == 1 && msgs.ToList()[0].Text == TestMessageText), mockThread.Object, options, - cancellationToken), - Times.Once); + cancellationToken + ); Assert.True(updateCount > 0, "Expected at least one streaming update"); } diff --git a/dotnet/tests/Microsoft.Agents.AI.Purview.UnitTests/PurviewWrapperTests.cs b/dotnet/tests/Microsoft.Agents.AI.Purview.UnitTests/PurviewWrapperTests.cs index 22b729dda4..eafc67f7fc 100644 --- a/dotnet/tests/Microsoft.Agents.AI.Purview.UnitTests/PurviewWrapperTests.cs +++ b/dotnet/tests/Microsoft.Agents.AI.Purview.UnitTests/PurviewWrapperTests.cs @@ -8,6 +8,7 @@ using Microsoft.Extensions.AI; using Microsoft.Extensions.Logging.Abstractions; using Moq; +using Moq.Protected; namespace Microsoft.Agents.AI.Purview.UnitTests; @@ -277,11 +278,13 @@ public async Task ProcessAgentContentAsync_WithBlockedPrompt_ReturnsBlockedMessa Assert.Single(result.Messages); Assert.Equal(ChatRole.System, result.Messages[0].Role); Assert.Equal("Prompt blocked by policy", result.Messages[0].Text); - mockAgent.Verify(x => x.RunAsync( - It.IsAny>(), - It.IsAny(), - It.IsAny(), - It.IsAny()), Times.Never); + + mockAgent.Protected().Verify("RunCoreAsync", + Times.Never(), + ItExpr.IsAny>(), + ItExpr.IsAny(), + ItExpr.IsAny(), + ItExpr.IsAny()); } [Fact] @@ -295,11 +298,12 @@ public async Task ProcessAgentContentAsync_WithAllowedPromptAndBlockedResponse_R var mockAgent = new Mock(); var innerResponse = new AgentRunResponse(new ChatMessage(ChatRole.Assistant, "Sensitive response")); - mockAgent.Setup(x => x.RunAsync( - It.IsAny>(), - It.IsAny(), - It.IsAny(), - It.IsAny())) + mockAgent.Protected() + .Setup>("RunCoreAsync", + ItExpr.IsAny>(), + ItExpr.IsAny(), + ItExpr.IsAny(), + ItExpr.IsAny()) .ReturnsAsync(innerResponse); this._mockProcessor.SetupSequence(x => x.ProcessMessagesAsync( @@ -333,11 +337,12 @@ public async Task ProcessAgentContentAsync_WithAllowedPromptAndResponse_ReturnsI var mockAgent = new Mock(); var innerResponse = new AgentRunResponse(new ChatMessage(ChatRole.Assistant, "Safe response")); - mockAgent.Setup(x => x.RunAsync( - It.IsAny>(), - It.IsAny(), - It.IsAny(), - It.IsAny())) + mockAgent.Protected() + .Setup>("RunCoreAsync", + ItExpr.IsAny>(), + ItExpr.IsAny(), + ItExpr.IsAny(), + ItExpr.IsAny()) .ReturnsAsync(innerResponse); this._mockProcessor.Setup(x => x.ProcessMessagesAsync( @@ -375,11 +380,12 @@ public async Task ProcessAgentContentAsync_WithIgnoreExceptions_ContinuesOnError var expectedResponse = new AgentRunResponse(new ChatMessage(ChatRole.Assistant, "Response from inner agent")); var mockAgent = new Mock(); - mockAgent.Setup(x => x.RunAsync( - It.IsAny>(), - It.IsAny(), - It.IsAny(), - It.IsAny())) + mockAgent.Protected() + .Setup>("RunCoreAsync", + ItExpr.IsAny>(), + ItExpr.IsAny(), + ItExpr.IsAny(), + ItExpr.IsAny()) .ReturnsAsync(expectedResponse); this._mockProcessor.SetupSequence(x => x.ProcessMessagesAsync( @@ -441,11 +447,12 @@ public async Task ProcessAgentContentAsync_ExtractsThreadIdFromMessageAdditional var expectedResponse = new AgentRunResponse(new ChatMessage(ChatRole.Assistant, "Response")); var mockAgent = new Mock(); - mockAgent.Setup(x => x.RunAsync( - It.IsAny>(), - It.IsAny(), - It.IsAny(), - It.IsAny())) + mockAgent.Protected() + .Setup>("RunCoreAsync", + ItExpr.IsAny>(), + ItExpr.IsAny(), + ItExpr.IsAny(), + ItExpr.IsAny()) .ReturnsAsync(expectedResponse); this._mockProcessor.Setup(x => x.ProcessMessagesAsync( @@ -482,11 +489,12 @@ public async Task ProcessAgentContentAsync_GeneratesThreadId_WhenNotProvidedAsyn var expectedResponse = new AgentRunResponse(new ChatMessage(ChatRole.Assistant, "Response")); var mockAgent = new Mock(); - mockAgent.Setup(x => x.RunAsync( - It.IsAny>(), - It.IsAny(), - It.IsAny(), - It.IsAny())) + mockAgent.Protected() + .Setup>("RunCoreAsync", + ItExpr.IsAny>(), + ItExpr.IsAny(), + ItExpr.IsAny(), + ItExpr.IsAny()) .ReturnsAsync(expectedResponse); string? capturedThreadId = null; @@ -521,11 +529,12 @@ public async Task ProcessAgentContentAsync_PassesResolvedUserId_ToResponseProces var mockAgent = new Mock(); var innerResponse = new AgentRunResponse(new ChatMessage(ChatRole.Assistant, "Response")); - mockAgent.Setup(x => x.RunAsync( - It.IsAny>(), - It.IsAny(), - It.IsAny(), - It.IsAny())) + mockAgent.Protected() + .Setup>("RunCoreAsync", + ItExpr.IsAny>(), + ItExpr.IsAny(), + ItExpr.IsAny(), + ItExpr.IsAny()) .ReturnsAsync(innerResponse); var callCount = 0; diff --git a/dotnet/tests/Microsoft.Agents.AI.UnitTests/AgentExtensionsTests.cs b/dotnet/tests/Microsoft.Agents.AI.UnitTests/AgentExtensionsTests.cs index f2b2bcfd6a..d039c95652 100644 --- a/dotnet/tests/Microsoft.Agents.AI.UnitTests/AgentExtensionsTests.cs +++ b/dotnet/tests/Microsoft.Agents.AI.UnitTests/AgentExtensionsTests.cs @@ -337,7 +337,7 @@ public override AgentThread DeserializeThread(JsonElement serializedThread, Json public CancellationToken LastCancellationToken { get; private set; } public int RunAsyncCallCount { get; private set; } - public override Task RunAsync( + protected override Task RunCoreAsync( IEnumerable messages, AgentThread? thread = null, AgentRunOptions? options = null, @@ -355,7 +355,7 @@ public override Task RunAsync( return Task.FromResult(this._responseToReturn!); } - public override async IAsyncEnumerable RunStreamingAsync( + protected override async IAsyncEnumerable RunCoreStreamingAsync( IEnumerable messages, AgentThread? thread = null, AgentRunOptions? options = null, diff --git a/dotnet/tests/Microsoft.Agents.AI.UnitTests/AnonymousDelegatingAIAgentTests.cs b/dotnet/tests/Microsoft.Agents.AI.UnitTests/AnonymousDelegatingAIAgentTests.cs index 369ab1ad4f..4e91fc1430 100644 --- a/dotnet/tests/Microsoft.Agents.AI.UnitTests/AnonymousDelegatingAIAgentTests.cs +++ b/dotnet/tests/Microsoft.Agents.AI.UnitTests/AnonymousDelegatingAIAgentTests.cs @@ -8,6 +8,7 @@ using System.Threading.Tasks; using Microsoft.Extensions.AI; using Moq; +using Moq.Protected; namespace Microsoft.Agents.AI.UnitTests; @@ -35,18 +36,22 @@ public AnonymousDelegatingAIAgentTests() new AgentRunResponseUpdate(ChatRole.Assistant, "Response 2") ]; - this._innerAgentMock.Setup(x => x.RunAsync( - It.IsAny>(), - It.IsAny(), - It.IsAny(), - It.IsAny())) + this._innerAgentMock + .Protected() + .Setup>("RunCoreAsync", + ItExpr.IsAny>(), + ItExpr.IsAny(), + ItExpr.IsAny(), + ItExpr.IsAny()) .ReturnsAsync(this._testResponse); - this._innerAgentMock.Setup(x => x.RunStreamingAsync( - It.IsAny>(), - It.IsAny(), - It.IsAny(), - It.IsAny())) + this._innerAgentMock + .Protected() + .Setup>("RunCoreStreamingAsync", + ItExpr.IsAny>(), + ItExpr.IsAny(), + ItExpr.IsAny(), + ItExpr.IsAny()) .Returns(ToAsyncEnumerableAsync(this._testStreamingResponses)); } @@ -184,11 +189,14 @@ public async Task RunAsync_WithSharedFunc_ContextPropagatedAsync() Assert.Same(this._testOptions, capturedOptions); Assert.Equal(expectedCancellationToken, capturedCancellationToken); - this._innerAgentMock.Verify(x => x.RunAsync( - this._testMessages, - this._testThread, - this._testOptions, - expectedCancellationToken), Times.Once); + this._innerAgentMock + .Protected() + .Verify>("RunCoreAsync", + Times.Once(), + ItExpr.Is>(m => m == this._testMessages), + ItExpr.Is(t => t == this._testThread), + ItExpr.Is(o => o == this._testOptions), + ItExpr.Is(ct => ct == expectedCancellationToken)); } /// @@ -458,11 +466,13 @@ public async Task AsyncLocalContext_MaintainedAcrossDelegatesAsync() capturedValue = asyncLocal.Value; }); - this._innerAgentMock.Setup(x => x.RunAsync( - It.IsAny>(), - It.IsAny(), - It.IsAny(), - It.IsAny())) + this._innerAgentMock + .Protected() + .Setup>("RunCoreAsync", + ItExpr.IsAny>(), + ItExpr.IsAny(), + ItExpr.IsAny(), + ItExpr.IsAny()) .Returns(() => { // Verify AsyncLocal value is available in inner agent call @@ -926,11 +936,13 @@ public async Task AIAgentBuilder_Use_CancellationTokenPropagation_WorksCorrectly var capturedTokens = new List(); // Setup mock to throw OperationCanceledException when cancelled token is used - this._innerAgentMock.Setup(x => x.RunAsync( - It.IsAny>(), - It.IsAny(), - It.IsAny(), - It.Is(ct => ct.IsCancellationRequested))) + this._innerAgentMock + .Protected() + .Setup>("RunCoreAsync", + ItExpr.IsAny>(), + ItExpr.IsAny(), + ItExpr.IsAny(), + ItExpr.Is(ct => ct.IsCancellationRequested)) .ThrowsAsync(new OperationCanceledException()); var agent = new AIAgentBuilder(this._innerAgentMock.Object) @@ -993,11 +1005,14 @@ public async Task AIAgentBuilder_Use_MiddlewareShortCircuits_InnerAgentNotCalled Assert.Equal(expectedOrder, executionOrder); // Verify inner agent was never called - this._innerAgentMock.Verify(x => x.RunAsync( - It.IsAny>(), - It.IsAny(), - It.IsAny(), - It.IsAny()), Times.Never); + this._innerAgentMock + .Protected() + .Verify>("RunCoreAsync", + Times.Never(), + ItExpr.IsAny>(), + ItExpr.IsAny(), + ItExpr.IsAny(), + ItExpr.IsAny()); } #endregion diff --git a/dotnet/tests/Microsoft.Agents.AI.UnitTests/LoggingAgentTests.cs b/dotnet/tests/Microsoft.Agents.AI.UnitTests/LoggingAgentTests.cs index 57b3051197..58e9536491 100644 --- a/dotnet/tests/Microsoft.Agents.AI.UnitTests/LoggingAgentTests.cs +++ b/dotnet/tests/Microsoft.Agents.AI.UnitTests/LoggingAgentTests.cs @@ -42,7 +42,6 @@ public void Properties_DelegateToInnerAgent() Assert.Equal("TestAgent", agent.Name); Assert.Equal("This is a test agent.", agent.Description); Assert.Equal(innerAgent.Id, agent.Id); - Assert.Equal(innerAgent.DisplayName, agent.DisplayName); } [Fact] diff --git a/dotnet/tests/Microsoft.Agents.AI.UnitTests/OpenTelemetryAgentTests.cs b/dotnet/tests/Microsoft.Agents.AI.UnitTests/OpenTelemetryAgentTests.cs index b9b04b7228..405832763c 100644 --- a/dotnet/tests/Microsoft.Agents.AI.UnitTests/OpenTelemetryAgentTests.cs +++ b/dotnet/tests/Microsoft.Agents.AI.UnitTests/OpenTelemetryAgentTests.cs @@ -45,7 +45,6 @@ public void Properties_DelegateToInnerAgent() Assert.Equal("TestAgent", agent.Name); Assert.Equal("This is a test agent.", agent.Description); Assert.Equal(innerAgent.Id, agent.Id); - Assert.Equal(innerAgent.DisplayName, agent.DisplayName); } [Fact] @@ -170,7 +169,7 @@ async static IAsyncEnumerable CallbackAsync( Assert.Equal("localhost", activity.GetTagItem("server.address")); Assert.Equal(12345, (int)activity.GetTagItem("server.port")!); - Assert.Equal("invoke_agent TestAgent", activity.DisplayName); + Assert.Equal($"invoke_agent {agent.Name}({agent.Id})", activity.DisplayName); Assert.Equal("invoke_agent", activity.GetTagItem("gen_ai.operation.name")); Assert.Equal("TestAgentProviderFromAIAgentMetadata", activity.GetTagItem("gen_ai.provider.name")); Assert.Equal(innerAgent.Name, activity.GetTagItem("gen_ai.agent.name")); @@ -431,7 +430,15 @@ async static IAsyncEnumerable CallbackAsync( Assert.Equal("localhost", activity.GetTagItem("server.address")); Assert.Equal(12345, (int)activity.GetTagItem("server.port")!); - Assert.Equal($"invoke_agent {innerAgent.DisplayName}", activity.DisplayName); + if (string.IsNullOrWhiteSpace(innerAgent.Name)) + { + Assert.Equal($"invoke_agent {innerAgent.Id}", activity.DisplayName); + } + else + { + Assert.Equal($"invoke_agent {innerAgent.Name}({innerAgent.Id})", activity.DisplayName); + } + Assert.Equal("invoke_agent", activity.GetTagItem("gen_ai.operation.name")); Assert.Equal("TestAgentProviderFromAIAgentMetadata", activity.GetTagItem("gen_ai.provider.name")); Assert.Equal(innerAgent.Name, activity.GetTagItem("gen_ai.agent.name")); diff --git a/dotnet/tests/Microsoft.Agents.AI.UnitTests/TestAIAgent.cs b/dotnet/tests/Microsoft.Agents.AI.UnitTests/TestAIAgent.cs index fb00973c78..3d2cdff868 100644 --- a/dotnet/tests/Microsoft.Agents.AI.UnitTests/TestAIAgent.cs +++ b/dotnet/tests/Microsoft.Agents.AI.UnitTests/TestAIAgent.cs @@ -30,10 +30,10 @@ public override AgentThread DeserializeThread(JsonElement serializedThread, Json public override AgentThread GetNewThread() => this.GetNewThreadFunc(); - public override Task RunAsync(IEnumerable messages, AgentThread? thread = null, AgentRunOptions? options = null, CancellationToken cancellationToken = default) => + protected override Task RunCoreAsync(IEnumerable messages, AgentThread? thread = null, AgentRunOptions? options = null, CancellationToken cancellationToken = default) => this.RunAsyncFunc(messages, thread, options, cancellationToken); - public override IAsyncEnumerable RunStreamingAsync(IEnumerable messages, AgentThread? thread = null, AgentRunOptions? options = null, CancellationToken cancellationToken = default) => + protected override IAsyncEnumerable RunCoreStreamingAsync(IEnumerable messages, AgentThread? thread = null, AgentRunOptions? options = null, CancellationToken cancellationToken = default) => this.RunStreamingAsyncFunc(messages, thread, options, cancellationToken); public override object? GetService(Type serviceType, object? serviceKey = null) => diff --git a/dotnet/tests/Microsoft.Agents.AI.Workflows.UnitTests/AgentWorkflowBuilderTests.cs b/dotnet/tests/Microsoft.Agents.AI.Workflows.UnitTests/AgentWorkflowBuilderTests.cs index 0437fc7695..c45ef8726e 100644 --- a/dotnet/tests/Microsoft.Agents.AI.Workflows.UnitTests/AgentWorkflowBuilderTests.cs +++ b/dotnet/tests/Microsoft.Agents.AI.Workflows.UnitTests/AgentWorkflowBuilderTests.cs @@ -141,11 +141,11 @@ public override AgentThread GetNewThread() public override AgentThread DeserializeThread(JsonElement serializedThread, JsonSerializerOptions? jsonSerializerOptions = null) => new DoubleEchoAgentThread(); - public override Task RunAsync( + protected override Task RunCoreAsync( IEnumerable messages, AgentThread? thread = null, AgentRunOptions? options = null, CancellationToken cancellationToken = default) => throw new NotImplementedException(); - public override async IAsyncEnumerable RunStreamingAsync( + protected override async IAsyncEnumerable RunCoreStreamingAsync( IEnumerable messages, AgentThread? thread = null, AgentRunOptions? options = null, [EnumeratorCancellation] CancellationToken cancellationToken = default) { await Task.Yield(); @@ -409,7 +409,7 @@ public async Task BuildGroupChat_AgentsRunInOrderAsync(int maxIterations) private sealed class DoubleEchoAgentWithBarrier(string name, StrongBox> barrier, StrongBox remaining) : DoubleEchoAgent(name) { - public override async IAsyncEnumerable RunStreamingAsync( + protected override async IAsyncEnumerable RunCoreStreamingAsync( IEnumerable messages, AgentThread? thread = null, AgentRunOptions? options = null, [EnumeratorCancellation] CancellationToken cancellationToken = default) { if (Interlocked.Decrement(ref remaining.Value) == 0) @@ -419,7 +419,7 @@ public override async IAsyncEnumerable RunStreamingAsync await barrier.Value!.Task.ConfigureAwait(false); - await foreach (var update in base.RunStreamingAsync(messages, thread, options, cancellationToken)) + await foreach (var update in base.RunCoreStreamingAsync(messages, thread, options, cancellationToken)) { await Task.Yield(); yield return update; diff --git a/dotnet/tests/Microsoft.Agents.AI.Workflows.UnitTests/InProcessExecutionTests.cs b/dotnet/tests/Microsoft.Agents.AI.Workflows.UnitTests/InProcessExecutionTests.cs index e134f10aa7..b3e53da6f8 100644 --- a/dotnet/tests/Microsoft.Agents.AI.Workflows.UnitTests/InProcessExecutionTests.cs +++ b/dotnet/tests/Microsoft.Agents.AI.Workflows.UnitTests/InProcessExecutionTests.cs @@ -149,7 +149,7 @@ public SimpleTestAgent(string name) public override AgentThread DeserializeThread(System.Text.Json.JsonElement serializedThread, System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) => new SimpleTestAgentThread(); - public override Task RunAsync( + protected override Task RunCoreAsync( IEnumerable messages, AgentThread? thread = null, AgentRunOptions? options = null, @@ -160,7 +160,7 @@ public override Task RunAsync( return Task.FromResult(new AgentRunResponse(responseMessage)); } - public override async IAsyncEnumerable RunStreamingAsync( + protected override async IAsyncEnumerable RunCoreStreamingAsync( IEnumerable messages, AgentThread? thread = null, AgentRunOptions? options = null, diff --git a/dotnet/tests/Microsoft.Agents.AI.Workflows.UnitTests/RepresentationTests.cs b/dotnet/tests/Microsoft.Agents.AI.Workflows.UnitTests/RepresentationTests.cs index 1878a55868..5eb8696221 100644 --- a/dotnet/tests/Microsoft.Agents.AI.Workflows.UnitTests/RepresentationTests.cs +++ b/dotnet/tests/Microsoft.Agents.AI.Workflows.UnitTests/RepresentationTests.cs @@ -30,10 +30,10 @@ public override AgentThread GetNewThread() public override AgentThread DeserializeThread(JsonElement serializedThread, JsonSerializerOptions? jsonSerializerOptions = null) => throw new NotImplementedException(); - public override Task RunAsync(IEnumerable messages, AgentThread? thread = null, AgentRunOptions? options = null, CancellationToken cancellationToken = default) => + protected override Task RunCoreAsync(IEnumerable messages, AgentThread? thread = null, AgentRunOptions? options = null, CancellationToken cancellationToken = default) => throw new NotImplementedException(); - public override IAsyncEnumerable RunStreamingAsync(IEnumerable messages, AgentThread? thread = null, AgentRunOptions? options = null, CancellationToken cancellationToken = default) => + protected override IAsyncEnumerable RunCoreStreamingAsync(IEnumerable messages, AgentThread? thread = null, AgentRunOptions? options = null, CancellationToken cancellationToken = default) => throw new NotImplementedException(); } diff --git a/dotnet/tests/Microsoft.Agents.AI.Workflows.UnitTests/Sample/06_GroupChat_Workflow.cs b/dotnet/tests/Microsoft.Agents.AI.Workflows.UnitTests/Sample/06_GroupChat_Workflow.cs index 16a51876d0..a351c45b20 100644 --- a/dotnet/tests/Microsoft.Agents.AI.Workflows.UnitTests/Sample/06_GroupChat_Workflow.cs +++ b/dotnet/tests/Microsoft.Agents.AI.Workflows.UnitTests/Sample/06_GroupChat_Workflow.cs @@ -66,17 +66,17 @@ public override AgentThread GetNewThread() public override AgentThread DeserializeThread(JsonElement serializedThread, JsonSerializerOptions? jsonSerializerOptions = null) => new HelloAgentThread(); - public override async Task RunAsync(IEnumerable messages, AgentThread? thread = null, AgentRunOptions? options = null, CancellationToken cancellationToken = default) + protected override async Task RunCoreAsync(IEnumerable messages, AgentThread? thread = null, AgentRunOptions? options = null, CancellationToken cancellationToken = default) { IEnumerable update = [ - await this.RunStreamingAsync(messages, thread, options, cancellationToken) + await this.RunCoreStreamingAsync(messages, thread, options, cancellationToken) .SingleAsync(cancellationToken) .ConfigureAwait(false)]; return update.ToAgentRunResponse(); } - public override async IAsyncEnumerable RunStreamingAsync(IEnumerable messages, AgentThread? thread = null, AgentRunOptions? options = null, [EnumeratorCancellation] CancellationToken cancellationToken = default) + protected override async IAsyncEnumerable RunCoreStreamingAsync(IEnumerable messages, AgentThread? thread = null, AgentRunOptions? options = null, [EnumeratorCancellation] CancellationToken cancellationToken = default) { yield return new(ChatRole.Assistant, "Hello World!") { diff --git a/dotnet/tests/Microsoft.Agents.AI.Workflows.UnitTests/Sample/12_HandOff_HostAsAgent.cs b/dotnet/tests/Microsoft.Agents.AI.Workflows.UnitTests/Sample/12_HandOff_HostAsAgent.cs index 824a75d5d0..c319a0ac32 100644 --- a/dotnet/tests/Microsoft.Agents.AI.Workflows.UnitTests/Sample/12_HandOff_HostAsAgent.cs +++ b/dotnet/tests/Microsoft.Agents.AI.Workflows.UnitTests/Sample/12_HandOff_HostAsAgent.cs @@ -30,7 +30,7 @@ protected override IEnumerable GetEpilogueMessages(AgentRunOptions? { return [new(ChatRole.Assistant, [new FunctionCallContent(Guid.NewGuid().ToString("N"), handoff.Name)]) { - AuthorName = this.DisplayName, + AuthorName = this.Name ?? this.Id, MessageId = Guid.NewGuid().ToString("N"), CreatedAt = DateTime.UtcNow }]; diff --git a/dotnet/tests/Microsoft.Agents.AI.Workflows.UnitTests/SpecializedExecutorSmokeTests.cs b/dotnet/tests/Microsoft.Agents.AI.Workflows.UnitTests/SpecializedExecutorSmokeTests.cs index daff2c248e..ed9af701c6 100644 --- a/dotnet/tests/Microsoft.Agents.AI.Workflows.UnitTests/SpecializedExecutorSmokeTests.cs +++ b/dotnet/tests/Microsoft.Agents.AI.Workflows.UnitTests/SpecializedExecutorSmokeTests.cs @@ -62,14 +62,14 @@ public static TestAIAgent FromStrings(params string[] messages) => public List Messages { get; } = Validate(messages) ?? []; - public override Task RunAsync(IEnumerable messages, AgentThread? thread = null, AgentRunOptions? options = null, CancellationToken cancellationToken = default) => + protected override Task RunCoreAsync(IEnumerable messages, AgentThread? thread = null, AgentRunOptions? options = null, CancellationToken cancellationToken = default) => Task.FromResult(new AgentRunResponse(this.Messages) { AgentId = this.Id, ResponseId = Guid.NewGuid().ToString("N") }); - public override async IAsyncEnumerable RunStreamingAsync(IEnumerable messages, AgentThread? thread = null, AgentRunOptions? options = null, [EnumeratorCancellation] CancellationToken cancellationToken = default) + protected override async IAsyncEnumerable RunCoreStreamingAsync(IEnumerable messages, AgentThread? thread = null, AgentRunOptions? options = null, [EnumeratorCancellation] CancellationToken cancellationToken = default) { string responseId = Guid.NewGuid().ToString("N"); foreach (ChatMessage message in this.Messages) diff --git a/dotnet/tests/Microsoft.Agents.AI.Workflows.UnitTests/TestEchoAgent.cs b/dotnet/tests/Microsoft.Agents.AI.Workflows.UnitTests/TestEchoAgent.cs index 9ddc94cf71..422d7a16ba 100644 --- a/dotnet/tests/Microsoft.Agents.AI.Workflows.UnitTests/TestEchoAgent.cs +++ b/dotnet/tests/Microsoft.Agents.AI.Workflows.UnitTests/TestEchoAgent.cs @@ -47,7 +47,7 @@ IEnumerable echoMessages select UpdateThread(new ChatMessage(ChatRole.Assistant, $"{prefix}{message.Text}") { - AuthorName = this.DisplayName, + AuthorName = this.Name ?? this.Id, CreatedAt = DateTimeOffset.Now, MessageId = Guid.NewGuid().ToString("N") }, thread as InMemoryAgentThread); @@ -60,7 +60,7 @@ protected virtual IEnumerable GetEpilogueMessages(AgentRunOptions? return []; } - public override Task RunAsync(IEnumerable messages, AgentThread? thread = null, AgentRunOptions? options = null, CancellationToken cancellationToken = default) + protected override Task RunCoreAsync(IEnumerable messages, AgentThread? thread = null, AgentRunOptions? options = null, CancellationToken cancellationToken = default) { AgentRunResponse result = new(this.EchoMessages(messages, thread, options).ToList()) @@ -73,7 +73,7 @@ public override Task RunAsync(IEnumerable message return Task.FromResult(result); } - public override async IAsyncEnumerable RunStreamingAsync(IEnumerable messages, AgentThread? thread = null, AgentRunOptions? options = null, [EnumeratorCancellation] CancellationToken cancellationToken = default) + protected override async IAsyncEnumerable RunCoreStreamingAsync(IEnumerable messages, AgentThread? thread = null, AgentRunOptions? options = null, [EnumeratorCancellation] CancellationToken cancellationToken = default) { string responseId = Guid.NewGuid().ToString("N"); diff --git a/python/.github/instructions/python.instructions.md b/python/.github/instructions/python.instructions.md index 4668f35efb..2756071a72 100644 --- a/python/.github/instructions/python.instructions.md +++ b/python/.github/instructions/python.instructions.md @@ -1,6 +1,11 @@ --- applyTo: '**/agent-framework/python/**' --- +- Use `uv run` as the main entrypoint for running Python commands with all packages available. +- Use `uv run poe ` for development tasks like formatting (`fmt`), linting (`lint`), type checking (`pyright`, `mypy`), and testing (`test`). +- Use `uv run --directory packages/ poe ` to run tasks for a specific package. +- Read [DEV_SETUP.md](../../DEV_SETUP.md) for detailed development environment setup and available poe tasks. +- Read [CODING_STANDARD.md](../../CODING_STANDARD.md) for the project's coding standards and best practices. - When verifying logic with unit tests, run only the related tests, not the entire test suite. - For new tests and samples, review existing ones to understand the coding style and reuse it. - When generating new functions, always specify the function return type and parameter types. diff --git a/python/CHANGELOG.md b/python/CHANGELOG.md index 7b012ccf23..18ba979fb8 100644 --- a/python/CHANGELOG.md +++ b/python/CHANGELOG.md @@ -7,9 +7,43 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ## [Unreleased] +## [1.0.0b251223] - 2025-12-23 + +### Added + +- **agent-framework-bedrock**: Introducing support for Bedrock-hosted models (Anthropic, Cohere, etc.) ([#2610](https://github.com/microsoft/agent-framework/pull/2610)) +- **agent-framework-core**: Added `response.created` and `response.in_progress` event process to `OpenAIBaseResponseClient` ([#2975](https://github.com/microsoft/agent-framework/pull/2975)) +- **agent-framework-foundry-local**: Introducing Foundry Local Chat Clients ([#2915](https://github.com/microsoft/agent-framework/pull/2915)) +- **samples**: Added GitHub MCP sample with PAT ([#2967](https://github.com/microsoft/agent-framework/pull/2967)) + +### Changed + +- **agent-framework-core**: Preserve reasoning blocks with OpenRouter ([#2950](https://github.com/microsoft/agent-framework/pull/2950)) + +## [1.0.0b251218] - 2025-12-18 + +### Added + +- **agent-framework-core**: Azure AI Agent with Bing Grounding Citations sample ([#2892](https://github.com/microsoft/agent-framework/pull/2892)) +- **agent-framework-core**: Workflow option to visualize internal executors ([#2917](https://github.com/microsoft/agent-framework/pull/2917)) +- **agent-framework-core**: Workflow cancellation sample ([#2732](https://github.com/microsoft/agent-framework/pull/2732)) +- **agent-framework-core**: Azure Managed Redis support with credential provider ([#2887](https://github.com/microsoft/agent-framework/pull/2887)) +- **agent-framework-core**: Additional arguments for Azure AI agent configuration ([#2922](https://github.com/microsoft/agent-framework/pull/2922)) + ### Changed -- **agent-framework-azurefunctions**: Durable Agents: platforms should use consistent entity method names (#2234) +- **agent-framework-ollama**: Updated Ollama package version ([#2920](https://github.com/microsoft/agent-framework/pull/2920)) +- **agent-framework-ollama**: Move Ollama samples to samples getting started directory ([#2921](https://github.com/microsoft/agent-framework/pull/2921)) +- **agent-framework-core**: Cleanup and refactoring of chat clients ([#2937](https://github.com/microsoft/agent-framework/pull/2937)) +- **agent-framework-core**: Align Run ID and Thread ID casing with AG-UI TypeScript SDK ([#2948](https://github.com/microsoft/agent-framework/pull/2948)) + +### Fixed + +- **agent-framework-core**: Fix Pydantic error when using Literal types for tool parameters ([#2893](https://github.com/microsoft/agent-framework/pull/2893)) +- **agent-framework-core**: Correct MCP image type conversion in `_mcp.py` ([#2901](https://github.com/microsoft/agent-framework/pull/2901)) +- **agent-framework-core**: Fix BadRequestError when using Pydantic models in response formatting ([#1843](https://github.com/microsoft/agent-framework/pull/1843)) +- **agent-framework-core**: Propagate workflow kwargs to sub-workflows via WorkflowExecutor ([#2923](https://github.com/microsoft/agent-framework/pull/2923)) +- **agent-framework-core**: Fix WorkflowAgent event handling and kwargs forwarding ([#2946](https://github.com/microsoft/agent-framework/pull/2946)) ## [1.0.0b251216] - 2025-12-16 @@ -392,7 +426,9 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 For more information, see the [announcement blog post](https://devblogs.microsoft.com/foundry/introducing-microsoft-agent-framework-the-open-source-engine-for-agentic-ai-apps/). -[Unreleased]: https://github.com/microsoft/agent-framework/compare/python-1.0.0b251216...HEAD +[Unreleased]: https://github.com/microsoft/agent-framework/compare/python-1.0.0b251223...HEAD +[1.0.0b251223]: https://github.com/microsoft/agent-framework/compare/python-1.0.0b251218...python-1.0.0b251223 +[1.0.0b251218]: https://github.com/microsoft/agent-framework/compare/python-1.0.0b251216...python-1.0.0b251218 [1.0.0b251216]: https://github.com/microsoft/agent-framework/compare/python-1.0.0b251211...python-1.0.0b251216 [1.0.0b251211]: https://github.com/microsoft/agent-framework/compare/python-1.0.0b251209...python-1.0.0b251211 [1.0.0b251209]: https://github.com/microsoft/agent-framework/compare/python-1.0.0b251204...python-1.0.0b251209 diff --git a/python/CODING_STANDARD.md b/python/CODING_STANDARD.md new file mode 100644 index 0000000000..6858f79f43 --- /dev/null +++ b/python/CODING_STANDARD.md @@ -0,0 +1,402 @@ +# Coding Standards + +This document describes the coding standards and conventions for the Agent Framework project. + +## Code Style and Formatting + +We use [ruff](https://github.com/astral-sh/ruff) for both linting and formatting with the following configuration: + +- **Line length**: 120 characters +- **Target Python version**: 3.10+ +- **Google-style docstrings**: All public functions, classes, and modules should have docstrings following Google conventions + +## Function Parameter Guidelines + +To make the code easier to use and maintain: + +- **Positional parameters**: Only use for up to 3 fully expected parameters +- **Keyword parameters**: Use for all other parameters, especially when there are multiple required parameters without obvious ordering +- **Avoid additional imports**: Do not require the user to import additional modules to use the function, so provide string based overrides when applicable, for instance: +```python +def create_agent(name: str, tool_mode: ChatToolMode) -> Agent: + # Implementation here +``` +Should be: +```python +def create_agent(name: str, tool_mode: Literal['auto', 'required', 'none'] | ChatToolMode) -> Agent: + # Implementation here + if isinstance(tool_mode, str): + tool_mode = ChatToolMode(tool_mode) +``` +- **Document kwargs**: Always document how `kwargs` are used, either by referencing external documentation or explaining their purpose +- **Separate kwargs**: When combining kwargs for multiple purposes, use specific parameters like `client_kwargs: dict[str, Any]` instead of mixing everything in `**kwargs` + +## Method Naming Inside Connectors + +When naming methods inside connectors, we have a loose preference for using the following conventions: +- Use `_prepare__for_` as a prefix for methods that prepare data for sending to the external service. +- Use `_parse__from_` as a prefix for methods that process data received from the external service. + +This is not a strict rule, but a guideline to help maintain consistency across the codebase. + +## Implementation Decisions + +### Asynchronous Programming + +It's important to note that most of this library is written with asynchronous in mind. The +developer should always assume everything is asynchronous. One can use the function signature +with either `async def` or `def` to understand if something is asynchronous or not. + +### Attributes vs Inheritance + +Prefer attributes over inheritance when parameters are mostly the same: + +```python +# ✅ Preferred - using attributes +from agent_framework import ChatMessage + +user_msg = ChatMessage(role="user", content="Hello, world!") +asst_msg = ChatMessage(role="assistant", content="Hello, world!") + +# ❌ Not preferred - unnecessary inheritance +from agent_framework import UserMessage, AssistantMessage + +user_msg = UserMessage(content="Hello, world!") +asst_msg = AssistantMessage(content="Hello, world!") +``` + +### Logging + +Use the centralized logging system: + +```python +from agent_framework import get_logger + +# For main package +logger = get_logger() + +# For subpackages +logger = get_logger('agent_framework.azure') +``` + +**Do not use** direct logging module imports: +```python +# ❌ Avoid this +import logging +logger = logging.getLogger(__name__) +``` + +### Import Structure + +The package follows a flat import structure: + +- **Core**: Import directly from `agent_framework` + ```python + from agent_framework import ChatAgent, ai_function + ``` + +- **Components**: Import from `agent_framework.` + ```python + from agent_framework.observability import enable_instrumentation, configure_otel_providers + ``` + +- **Connectors**: Import from `agent_framework.` + ```python + from agent_framework.openai import OpenAIChatClient + from agent_framework.azure import AzureOpenAIChatClient + ``` + +## Package Structure + +The project uses a monorepo structure with separate packages for each connector/extension: + +```plaintext +python/ +├── pyproject.toml # Root package (agent-framework) depends on agent-framework-core[all] +├── samples/ # Sample code and examples +├── packages/ +│ ├── core/ # agent-framework-core - Core abstractions and implementations +│ │ ├── pyproject.toml # Defines [all] extra that includes all connector packages +│ │ ├── tests/ # Tests for core package +│ │ └── agent_framework/ +│ │ ├── __init__.py # Public API exports +│ │ ├── _agents.py # Agent implementations +│ │ ├── _clients.py # Chat client protocols and base classes +│ │ ├── _tools.py # Tool definitions +│ │ ├── _types.py # Type definitions +│ │ ├── _logging.py # Logging utilities +│ │ │ +│ │ │ # Provider folders - lazy load from connector packages +│ │ ├── openai/ # OpenAI clients (built into core) +│ │ ├── azure/ # Lazy loads from azure-ai, azure-ai-search, azurefunctions +│ │ ├── anthropic/ # Lazy loads from agent-framework-anthropic +│ │ ├── ollama/ # Lazy loads from agent-framework-ollama +│ │ ├── a2a/ # Lazy loads from agent-framework-a2a +│ │ ├── ag_ui/ # Lazy loads from agent-framework-ag-ui +│ │ ├── chatkit/ # Lazy loads from agent-framework-chatkit +│ │ ├── declarative/ # Lazy loads from agent-framework-declarative +│ │ ├── devui/ # Lazy loads from agent-framework-devui +│ │ ├── mem0/ # Lazy loads from agent-framework-mem0 +│ │ └── redis/ # Lazy loads from agent-framework-redis +│ │ +│ ├── azure-ai/ # agent-framework-azure-ai +│ │ ├── pyproject.toml +│ │ ├── tests/ +│ │ └── agent_framework_azure_ai/ +│ │ ├── __init__.py # Public exports +│ │ ├── _chat_client.py # AzureAIClient implementation +│ │ ├── _client.py # AzureAIAgentClient implementation +│ │ ├── _shared.py # AzureAISettings and shared utilities +│ │ └── py.typed # PEP 561 marker +│ ├── anthropic/ # agent-framework-anthropic +│ ├── bedrock/ # agent-framework-bedrock +│ ├── ollama/ # agent-framework-ollama +│ └── ... # Other connector packages +``` + +### Lazy Loading Pattern + +Provider folders in the core package use `__getattr__` to lazy load classes from their respective connector packages. This allows users to import from a consistent location while only loading dependencies when needed: + +```python +# In agent_framework/azure/__init__.py +_IMPORTS: dict[str, tuple[str, str]] = { + "AzureAIAgentClient": ("agent_framework_azure_ai", "agent-framework-azure-ai"), + # ... +} + +def __getattr__(name: str) -> Any: + if name in _IMPORTS: + import_path, package_name = _IMPORTS[name] + try: + return getattr(importlib.import_module(import_path), name) + except ModuleNotFoundError as exc: + raise ModuleNotFoundError( + f"The package {package_name} is required to use `{name}`. " + f"Install it with: pip install {package_name}" + ) from exc +``` + +### Adding a New Connector Package + +**Important:** Do not create a new package unless there is an issue that has been reviewed and approved by the core team. + +#### Initial Release (Preview Phase) + +For the first release of a new connector package: + +1. Create a new directory under `packages/` (e.g., `packages/my-connector/`) +2. Add the package to `tool.uv.sources` in the root `pyproject.toml` +3. Include samples inside the package itself (e.g., `packages/my-connector/samples/`) +4. **Do NOT** add the package to the `[all]` extra in `packages/core/pyproject.toml` +5. **Do NOT** create lazy loading in core yet + +#### Promotion to Stable + +After the package has been released and gained a measure of confidence: + +1. Move samples from the package to the root `samples/` folder +2. Add the package to the `[all]` extra in `packages/core/pyproject.toml` +3. Create a provider folder in `agent_framework/` with lazy loading `__init__.py` + +### Installation Options + +Connectors are distributed as separate packages and are not imported by default in the core package. Users install the specific connectors they need: + +```bash +# Install core only +pip install agent-framework-core + +# Install core with all connectors +pip install agent-framework-core[all] +# or (equivalently): +pip install agent-framework + +# Install specific connector +pip install agent-framework-azure-ai +``` + +## Documentation + +Each file should have a single first line containing: # Copyright (c) Microsoft. All rights reserved. + +We follow the [Google Docstring](https://github.com/google/styleguide/blob/gh-pages/pyguide.md#383-functions-and-methods) style guide for functions and methods. +They are currently not checked for private functions (functions starting with '_'). + +They should contain: + +- Single line explaining what the function does, ending with a period. +- If necessary to further explain the logic a newline follows the first line and then the explanation is given. +- The following three sections are optional, and if used should be separated by a single empty line. +- Arguments are then specified after a header called `Args:`, with each argument being specified in the following format: + - `arg_name`: Explanation of the argument. + - if a longer explanation is needed for a argument, it should be placed on the next line, indented by 4 spaces. + - Type and default values do not have to be specified, they will be pulled from the definition. +- Returns are specified after a header called `Returns:` or `Yields:`, with the return type and explanation of the return value. +- Keyword arguments are specified after a header called `Keyword Args:`, with each argument being specified in the same format as `Args:`. +- A header for exceptions can be added, called `Raises:`, but should only be used for: + - Agent Framework specific exceptions (e.g., `ServiceInitializationError`) + - Base exceptions that might be unexpected in the context + - Obvious exceptions like `ValueError` or `TypeError` do not need to be documented + - Format: `ExceptionType`: Explanation of the exception. + - If a longer explanation is needed, it should be placed on the next line, indented by 4 spaces. +- Code examples can be added using the `Examples:` header followed by `.. code-block:: python` directive. + +Putting them all together, gives you at minimum this: + +```python +def equal(arg1: str, arg2: str) -> bool: + """Compares two strings and returns True if they are the same.""" + ... +``` + +Or a complete version of this: + +```python +def equal(arg1: str, arg2: str) -> bool: + """Compares two strings and returns True if they are the same. + + Here is extra explanation of the logic involved. + + Args: + arg1: The first string to compare. + arg2: The second string to compare. + + Returns: + True if the strings are the same, False otherwise. + """ +``` + +A more complete example with keyword arguments and code samples: + +```python +def create_client( + model_id: str | None = None, + *, + timeout: float | None = None, + env_file_path: str | None = None, + **kwargs: Any, +) -> Client: + """Create a new client with the specified configuration. + + Args: + model_id: The model ID to use. If not provided, + it will be loaded from settings. + + Keyword Args: + timeout: Optional timeout for requests. + env_file_path: If provided, settings are read from this file. + kwargs: Additional keyword arguments passed to the underlying client. + + Returns: + A configured client instance. + + Raises: + ValueError: If the model_id is invalid. + + Examples: + + .. code-block:: python + + # Create a client with default settings: + client = create_client(model_id="gpt-4o") + + # Or load from environment: + client = create_client(env_file_path=".env") + """ + ... +``` + +Use Google-style docstrings for all public APIs: + +```python +def create_agent(name: str, chat_client: ChatClientProtocol) -> Agent: + """Create a new agent with the specified configuration. + + Args: + name: The name of the agent. + chat_client: The chat client to use for communication. + + Returns: + True if the strings are the same, False otherwise. + + Raises: + ValueError: If one of the strings is empty. + """ + ... +``` + +If in doubt, use the link above to read much more considerations of what to do and when, or use common sense. + +## Performance considerations + +### Cache Expensive Computations + +Think about caching where appropriate. Cache the results of expensive operations that are called repeatedly with the same inputs: + +```python +# ✅ Preferred - cache expensive computations +class AIFunction: + def __init__(self, ...): + self._cached_parameters: dict[str, Any] | None = None + + def parameters(self) -> dict[str, Any]: + """Return the JSON schema for the function's parameters. + + The result is cached after the first call for performance. + """ + if self._cached_parameters is None: + self._cached_parameters = self.input_model.model_json_schema() + return self._cached_parameters + +# ❌ Avoid - recalculating every time +def parameters(self) -> dict[str, Any]: + return self.input_model.model_json_schema() +``` + +### Prefer Attribute Access Over isinstance() + +When checking types in hot paths, prefer checking a `type` attribute (fast string comparison) over `isinstance()` (slower due to method resolution order traversal): + +```python +# ✅ Preferred - use match/case with type attribute (faster) +match content.type: + case "function_call": + # handle function call + case "usage": + # handle usage + case _: + # handle other types + +# ❌ Avoid in hot paths - isinstance() is slower +if isinstance(content, FunctionCallContent): + # handle function call +elif isinstance(content, UsageContent): + # handle usage +``` + +For inline conditionals: + +```python +# ✅ Preferred - type attribute comparison +result = value if content.type == "function_call" else other + +# ❌ Avoid - isinstance() in hot paths +result = value if isinstance(content, FunctionCallContent) else other +``` + +### Avoid Redundant Serialization + +When the same data needs to be used in multiple places, compute it once and reuse it: + +```python +# ✅ Preferred - reuse computed representation +otel_message = _to_otel_message(message) +otel_messages.append(otel_message) +logger.info(otel_message, extra={...}) + +# ❌ Avoid - computing the same thing twice +otel_messages.append(_to_otel_message(message)) # this already serializes +message_data = message.to_dict(exclude_none=True) # and this does so again! +logger.info(message_data, extra={...}) +``` diff --git a/python/DEV_SETUP.md b/python/DEV_SETUP.md index 2d4b9b92b1..101f96f6d0 100644 --- a/python/DEV_SETUP.md +++ b/python/DEV_SETUP.md @@ -4,6 +4,8 @@ This document describes how to setup your environment with Python and uv, if you're working on new features or a bug fix for Agent Framework, or simply want to run the tests included. +For coding standards and conventions, see [CODING_STANDARD.md](CODING_STANDARD.md). + ## System setup We are using a tool called [poethepoet](https://github.com/nat-n/poethepoet) for task management and [uv](https://github.com/astral-sh/uv) for dependency management. At the [end of this document](#available-poe-tasks), you will find the available Poe tasks. @@ -117,43 +119,6 @@ from agent_framework.openai import OpenAIChatClient chat_client = OpenAIChatClient(env_file_path="openai.env") ``` - -## Coding Standards - -### Code Style and Formatting - -We use [ruff](https://github.com/astral-sh/ruff) for both linting and formatting with the following configuration: - -- **Line length**: 120 characters -- **Target Python version**: 3.10+ -- **Google-style docstrings**: All public functions, classes, and modules should have docstrings following Google conventions - -### Function Parameter Guidelines - -To make the code easier to use and maintain: - -- **Positional parameters**: Only use for up to 3 fully expected parameters -- **Keyword parameters**: Use for all other parameters, especially when there are multiple required parameters without obvious ordering -- **Avoid additional imports**: Do not require the user to import additional modules to use the function, so provide string based overrides when applicable, for instance: -```python -def create_agent(name: str, tool_mode: ChatToolMode) -> Agent: - # Implementation here -``` -Should be: -```python -def create_agent(name: str, tool_mode: Literal['auto', 'required', 'none'] | ChatToolMode) -> Agent: - # Implementation here - if isinstance(tool_mode, str): - tool_mode = ChatToolMode(tool_mode) -``` -- **Document kwargs**: Always document how `kwargs` are used, either by referencing external documentation or explaining their purpose -- **Separate kwargs**: When combining kwargs for multiple purposes, use specific parameters like `client_kwargs: dict[str, Any]` instead of mixing everything in `**kwargs` - -Example: -```python -chat_completion = OpenAIChatClient(env_file_path="openai.env") -``` - ## Tests All the tests are located in the `tests` folder of each package. There are tests that are marked with a `@skip_if_..._integration_tests_disabled` decorator, these are integration tests that require an external service to be running, like OpenAI or Azure OpenAI. @@ -171,264 +136,6 @@ uv run poe --directory packages/core test These commands also output the coverage report. -## Implementation Decisions - -### Asynchronous programming - -It's important to note that most of this library is written with asynchronous in mind. The -developer should always assume everything is asynchronous. One can use the function signature -with either `async def` or `def` to understand if something is asynchronous or not. - -### Documentation - -Each file should have a single first line containing: # Copyright (c) Microsoft. All rights reserved. - -We follow the [Google Docstring](https://github.com/google/styleguide/blob/gh-pages/pyguide.md#383-functions-and-methods) style guide for functions and methods. -They are currently not checked for private functions (functions starting with '_'). - -They should contain: - -- Single line explaining what the function does, ending with a period. -- If necessary to further explain the logic a newline follows the first line and then the explanation is given. -- The following three sections are optional, and if used should be separated by a single empty line. -- Arguments are then specified after a header called `Args:`, with each argument being specified in the following format: - - `arg_name`: Explanation of the argument. - - if a longer explanation is needed for a argument, it should be placed on the next line, indented by 4 spaces. - - Type and default values do not have to be specified, they will be pulled from the definition. -- Returns are specified after a header called `Returns:` or `Yields:`, with the return type and explanation of the return value. -- Finally, a header for exceptions can be added, called `Raises:`, with each exception being specified in the following format: - - `ExceptionType`: Explanation of the exception. - - if a longer explanation is needed for a exception, it should be placed on the next line, indented by 4 spaces. - -Putting them all together, gives you at minimum this: - -```python -def equal(arg1: str, arg2: str) -> bool: - """Compares two strings and returns True if they are the same.""" - ... -``` - -Or a complete version of this: - -```python -def equal(arg1: str, arg2: str) -> bool: - """Compares two strings and returns True if they are the same. - - Here is extra explanation of the logic involved. - - Args: - arg1: The first string to compare. - arg2: The second string to compare. - - Returns: - True if the strings are the same, False otherwise. - """ -``` - -### Attributes vs Inheritance - -Prefer attributes over inheritance when parameters are mostly the same: - -```python -# ✅ Preferred - using attributes -from agent_framework import ChatMessage - -user_msg = ChatMessage(role="user", content="Hello, world!") -asst_msg = ChatMessage(role="assistant", content="Hello, world!") - -# ❌ Not preferred - unnecessary inheritance -from agent_framework import UserMessage, AssistantMessage - -user_msg = UserMessage(content="Hello, world!") -asst_msg = AssistantMessage(content="Hello, world!") -``` - -### Logging - -Use the centralized logging system: - -```python -from agent_framework import get_logger - -# For main package -logger = get_logger() - -# For subpackages -logger = get_logger('agent_framework.azure') -``` - -**Do not use** direct logging module imports: -```python -# ❌ Avoid this -import logging -logger = logging.getLogger(__name__) -``` - -### Import Structure - -The package follows a flat import structure: - -- **Core**: Import directly from `agent_framework` - ```python - from agent_framework import ChatAgent, ai_function - ``` - -- **Components**: Import from `agent_framework.` - ```python - from agent_framework.vector_data import VectorStoreModel - from agent_framework.guardrails import ContentFilter - ``` - -- **Connectors**: Import from `agent_framework.` - ```python - from agent_framework.openai import OpenAIChatClient - from agent_framework.azure import AzureOpenAIChatClient - ``` - -## Testing - -### Running Tests - -```bash -# Run all tests with coverage -uv run poe test - -# Run specific test file -uv run pytest tests/test_agents.py - -# Run with verbose output -uv run pytest -v -``` - -### Test Coverage - -- Target: Minimum 80% test coverage for all packages -- Coverage reports are generated automatically during test runs -- Tests should be in corresponding `test_*.py` files in the `tests/` directory - -## Documentation - -### Building Documentation - -```bash -# Build documentation -uv run poe docs-build - -# Serve documentation locally with auto-reload -uv run poe docs-serve - -# Check documentation for warnings -uv run poe docs-check -``` - -### Docstring Style - -Use Google-style docstrings for all public APIs: - -```python -def create_agent(name: str, chat_client: ChatClientProtocol) -> Agent: - """Create a new agent with the specified configuration. - - Args: - name: The name of the agent. - chat_client: The chat client to use for communication. - - Returns: - True if the strings are the same, False otherwise. - - Raises: - ValueError: If one of the strings is empty. - """ - ... -``` - -If in doubt, use the link above to read much more considerations of what to do and when, or use common sense. - -## Coding standards - -```plaintext -agent_framework/ -├── __init__.py # Tier 0: Core components -├── _agents.py # Agent implementations -├── _tools.py # Tool definitions -├── _models.py # Type definitions -├── _logging.py # Logging utilities -├── context_providers.py # Tier 1: Context providers -├── guardrails.py # Tier 1: Guardrails and filters -├── vector_data.py # Tier 1: Vector stores -├── workflows.py # Tier 1: Multi-agent orchestration -└── azure/ # Tier 2: Azure connectors (lazy loaded) - └── __init__.py # Imports from agent-framework-azure -``` - -### Pydantic and Serialization - -This section describes how one can enable serialization for their class using Pydantic. -For more info you can refer to the [Pydantic Documentation](https://docs.pydantic.dev/latest/). - -#### Upgrading existing classes to use Pydantic - -Let's take the following example: - -```python -class A: - def __init__(self, a: int, b: float, c: List[float], d: dict[str, tuple[float, str]] = {}): - self.a = a - self.b = b - self.c = c - self.d = d -``` - -You would convert this to a Pydantic class by sub-classing from the `AFBaseModel` class. - -```python -from pydantic import Field -from ._pydantic import AFBaseModel - -class A(AFBaseModel): - # The notation for the fields is similar to dataclasses. - a: int - b: float - c: list[float] - # Only, instead of using dataclasses.field, you would use pydantic.Field - d: dict[str, tuple[float, str]] = Field(default_factory=dict) -``` - -#### Classes with data that need to be serialized, and some of them are Generic types - -Let's take the following example: - -```python -from typing import TypeVar - -T1 = TypeVar("T1") -T2 = TypeVar("T2", bound=) - -class A: - def __init__(a: int, b: T1, c: T2): - self.a = a - self.b = b - self.c = c -``` - -You can use the `AFBaseModel` to convert these to pydantic serializable classes. - -```python -from typing import Generic, TypeVar - -from ._pydantic import AFBaseModel - -T1 = TypeVar("T1") -T2 = TypeVar("T2", bound=) - -class A(AFBaseModel, Generic[T1, T2]): - # T1 and T2 must be specified in the Generic argument otherwise, pydantic will - # NOT be able to serialize this class - a: int - b: T1 - c: T2 -``` - ## Code quality checks To run the same checks that run during a commit and the GitHub Action `Python Code Quality`, you can use this command, from the [python](../python) folder: @@ -489,7 +196,7 @@ and then you can run the following tasks: uv sync --all-extras --dev ``` -After this initial setup, you can use the following tasks to manage your development environment, it is adviced to use the following setup command since that also installs the pre-commit hooks. +After this initial setup, you can use the following tasks to manage your development environment. It is advised to use the following setup command since that also installs the pre-commit hooks. #### `setup` Set up the development environment with a virtual environment, install dependencies and pre-commit hooks: @@ -547,103 +254,74 @@ Run MyPy type checking: uv run poe mypy ``` -### Testing - -#### `test` -Run unit tests with coverage: -```bash -uv run poe test -``` - -### Documentation - -#### `docs-install` -Install including the documentation tools: -```bash -uv run poe docs-install -``` +### Code Validation -#### `docs-clean` -Remove the docs build directory: +#### `markdown-code-lint` +Lint markdown code blocks: ```bash -uv run poe docs-clean +uv run poe markdown-code-lint ``` -#### `docs-build` -Build the documentation: -```bash -uv run poe docs-build -``` +### Comprehensive Checks -#### `docs-full` -Build the packages, clean and build the documentation: +#### `check` +Run all quality checks (format, lint, pyright, mypy, test, markdown lint): ```bash -uv run poe docs-full +uv run poe check ``` -#### `docs-rebuild` -Clean and build the documentation: -```bash -uv run poe docs-rebuild -``` +### Testing -#### `docs-full-install` -Install the docs dependencies, build the packages, clean and build the documentation: +#### `test` +Run unit tests with coverage by invoking the `test` task in each package sequentially: ```bash -uv run poe docs-full-install +uv run poe test ``` -#### `docs-debug` -Build the documentation with debug information: +To run tests for a specific package only, use the `--directory` flag: ```bash -uv run poe docs-debug -``` +# Run tests for the core package +uv run --directory packages/core poe test -#### `docs-rebuild-debug` -Clean and build the documentation with debug information: -```bash -uv run poe docs-rebuild-debug +# Run tests for the azure-ai package +uv run --directory packages/azure-ai poe test ``` -### Code Validation - -#### `markdown-code-lint` -Lint markdown code blocks: +#### `all-tests` +Run all tests in a single pytest invocation across all packages in parallel (excluding lab and devui). This is faster than `test` as it uses pytest's parallel execution: ```bash -uv run poe markdown-code-lint +uv run poe all-tests ``` -#### `samples-code-check` -Run type checking on samples: +#### `all-tests-cov` +Same as `all-tests` but with coverage reporting enabled: ```bash -uv run poe samples-code-check +uv run poe all-tests-cov ``` -### Comprehensive Checks +### Building and Publishing -#### `check` -Run all quality checks (format, lint, pyright, mypy, test, markdown lint, samples check): +#### `build` +Build all packages: ```bash -uv run poe check +uv run poe build ``` -#### `pre-commit-check` -Run pre-commit specific checks (all of the above, excluding `mypy`): +#### `clean-dist` +Clean the dist directories: ```bash -uv run poe pre-commit-check +uv run poe clean-dist ``` -### Building - -#### `build` -Build the package: +#### `publish` +Publish packages to PyPI: ```bash -uv run poe build +uv run poe publish ``` ## Pre-commit Hooks -You can also run all checks using pre-commit directly: +Pre-commit hooks run automatically on commit and execute a subset of the checks on changed files only. You can also run all checks using pre-commit directly: ```bash uv run pre-commit run -a diff --git a/python/packages/a2a/agent_framework_a2a/_agent.py b/python/packages/a2a/agent_framework_a2a/_agent.py index f90d7214a0..cd85509a40 100644 --- a/python/packages/a2a/agent_framework_a2a/_agent.py +++ b/python/packages/a2a/agent_framework_a2a/_agent.py @@ -237,14 +237,14 @@ async def run_stream( An agent response item. """ messages = self._normalize_messages(messages) - a2a_message = self._chat_message_to_a2a_message(messages[-1]) + a2a_message = self._prepare_message_for_a2a(messages[-1]) response_stream = self.client.send_message(a2a_message) async for item in response_stream: if isinstance(item, Message): # Process A2A Message - contents = self._a2a_parts_to_contents(item.parts) + contents = self._parse_contents_from_a2a(item.parts) yield AgentRunResponseUpdate( contents=contents, role=Role.ASSISTANT if item.role == A2ARole.agent else Role.USER, @@ -255,7 +255,7 @@ async def run_stream( task, _update_event = item if isinstance(task, Task) and task.status.state in TERMINAL_TASK_STATES: # Convert Task artifacts to ChatMessages and yield as separate updates - task_messages = self._task_to_chat_messages(task) + task_messages = self._parse_messages_from_task(task) if task_messages: for message in task_messages: # Use the artifact's ID from raw_representation as message_id for unique identification @@ -280,8 +280,8 @@ async def run_stream( msg = f"Only Message and Task responses are supported from A2A agents. Received: {type(item)}" raise NotImplementedError(msg) - def _chat_message_to_a2a_message(self, message: ChatMessage) -> A2AMessage: - """Convert a ChatMessage to an A2A Message. + def _prepare_message_for_a2a(self, message: ChatMessage) -> A2AMessage: + """Prepare a ChatMessage for the A2A protocol. Transforms Agent Framework ChatMessage objects into A2A protocol Messages by: - Converting all message contents to appropriate A2A Part types @@ -361,8 +361,8 @@ def _chat_message_to_a2a_message(self, message: ChatMessage) -> A2AMessage: metadata=cast(dict[str, Any], message.additional_properties), ) - def _a2a_parts_to_contents(self, parts: Sequence[A2APart]) -> list[Contents]: - """Convert A2A Parts to Agent Framework Contents. + def _parse_contents_from_a2a(self, parts: Sequence[A2APart]) -> list[Contents]: + """Parse A2A Parts into Agent Framework Contents. Transforms A2A protocol Parts into framework-native Content objects, handling text, file (URI/bytes), and data parts with metadata preservation. @@ -410,17 +410,17 @@ def _a2a_parts_to_contents(self, parts: Sequence[A2APart]) -> list[Contents]: raise ValueError(f"Unknown Part kind: {inner_part.kind}") return contents - def _task_to_chat_messages(self, task: Task) -> list[ChatMessage]: - """Convert A2A Task artifacts to ChatMessages with ASSISTANT role.""" + def _parse_messages_from_task(self, task: Task) -> list[ChatMessage]: + """Parse A2A Task artifacts into ChatMessages with ASSISTANT role.""" messages: list[ChatMessage] = [] if task.artifacts is not None: for artifact in task.artifacts: - messages.append(self._artifact_to_chat_message(artifact)) + messages.append(self._parse_message_from_artifact(artifact)) elif task.history is not None and len(task.history) > 0: # Include the last history item as the agent response history_item = task.history[-1] - contents = self._a2a_parts_to_contents(history_item.parts) + contents = self._parse_contents_from_a2a(history_item.parts) messages.append( ChatMessage( role=Role.ASSISTANT if history_item.role == A2ARole.agent else Role.USER, @@ -431,9 +431,9 @@ def _task_to_chat_messages(self, task: Task) -> list[ChatMessage]: return messages - def _artifact_to_chat_message(self, artifact: Artifact) -> ChatMessage: - """Convert A2A Artifact to ChatMessage using part contents.""" - contents = self._a2a_parts_to_contents(artifact.parts) + def _parse_message_from_artifact(self, artifact: Artifact) -> ChatMessage: + """Parse A2A Artifact into ChatMessage using part contents.""" + contents = self._parse_contents_from_a2a(artifact.parts) return ChatMessage( role=Role.ASSISTANT, contents=contents, diff --git a/python/packages/a2a/pyproject.toml b/python/packages/a2a/pyproject.toml index 56d79ce7fe..c37ba371e2 100644 --- a/python/packages/a2a/pyproject.toml +++ b/python/packages/a2a/pyproject.toml @@ -4,7 +4,7 @@ description = "A2A integration for Microsoft Agent Framework." authors = [{ name = "Microsoft", email = "af-support@microsoft.com"}] readme = "README.md" requires-python = ">=3.10" -version = "1.0.0b251216" +version = "1.0.0b251223" license-files = ["LICENSE"] urls.homepage = "https://aka.ms/agent-framework" urls.source = "https://github.com/microsoft/agent-framework/tree/main/python" diff --git a/python/packages/a2a/tests/test_a2a_agent.py b/python/packages/a2a/tests/test_a2a_agent.py index 82d3d02875..58ab18fee4 100644 --- a/python/packages/a2a/tests/test_a2a_agent.py +++ b/python/packages/a2a/tests/test_a2a_agent.py @@ -197,18 +197,18 @@ async def test_run_with_unknown_response_type_raises_error(a2a_agent: A2AAgent, await a2a_agent.run("Test message") -def test_task_to_chat_messages_empty_artifacts(a2a_agent: A2AAgent) -> None: - """Test _task_to_chat_messages with task containing no artifacts.""" +def test_parse_messages_from_task_empty_artifacts(a2a_agent: A2AAgent) -> None: + """Test _parse_messages_from_task with task containing no artifacts.""" task = MagicMock() task.artifacts = None - result = a2a_agent._task_to_chat_messages(task) + result = a2a_agent._parse_messages_from_task(task) assert len(result) == 0 -def test_task_to_chat_messages_with_artifacts(a2a_agent: A2AAgent) -> None: - """Test _task_to_chat_messages with task containing artifacts.""" +def test_parse_messages_from_task_with_artifacts(a2a_agent: A2AAgent) -> None: + """Test _parse_messages_from_task with task containing artifacts.""" task = MagicMock() # Create mock artifacts @@ -232,7 +232,7 @@ def test_task_to_chat_messages_with_artifacts(a2a_agent: A2AAgent) -> None: task.artifacts = [artifact1, artifact2] - result = a2a_agent._task_to_chat_messages(task) + result = a2a_agent._parse_messages_from_task(task) assert len(result) == 2 assert result[0].text == "Content 1" @@ -240,8 +240,8 @@ def test_task_to_chat_messages_with_artifacts(a2a_agent: A2AAgent) -> None: assert all(msg.role == Role.ASSISTANT for msg in result) -def test_artifact_to_chat_message(a2a_agent: A2AAgent) -> None: - """Test _artifact_to_chat_message conversion.""" +def test_parse_message_from_artifact(a2a_agent: A2AAgent) -> None: + """Test _parse_message_from_artifact conversion.""" artifact = MagicMock() artifact.artifact_id = "test-artifact" @@ -253,7 +253,7 @@ def test_artifact_to_chat_message(a2a_agent: A2AAgent) -> None: artifact.parts = [text_part] - result = a2a_agent._artifact_to_chat_message(artifact) + result = a2a_agent._parse_message_from_artifact(artifact) assert isinstance(result, ChatMessage) assert result.role == Role.ASSISTANT @@ -276,7 +276,7 @@ def test_get_uri_data_invalid_uri() -> None: _get_uri_data("not-a-valid-data-uri") -def test_a2a_parts_to_contents_conversion(a2a_agent: A2AAgent) -> None: +def test_parse_contents_from_a2a_conversion(a2a_agent: A2AAgent) -> None: """Test A2A parts to contents conversion.""" agent = A2AAgent(name="Test Agent", client=MockA2AClient(), _http_client=None) @@ -285,7 +285,7 @@ def test_a2a_parts_to_contents_conversion(a2a_agent: A2AAgent) -> None: parts = [Part(root=TextPart(text="First part")), Part(root=TextPart(text="Second part"))] # Convert to contents - contents = agent._a2a_parts_to_contents(parts) + contents = agent._parse_contents_from_a2a(parts) # Verify conversion assert len(contents) == 2 @@ -295,30 +295,30 @@ def test_a2a_parts_to_contents_conversion(a2a_agent: A2AAgent) -> None: assert contents[1].text == "Second part" -def test_chat_message_to_a2a_message_with_error_content(a2a_agent: A2AAgent) -> None: - """Test _chat_message_to_a2a_message with ErrorContent.""" +def test_prepare_message_for_a2a_with_error_content(a2a_agent: A2AAgent) -> None: + """Test _prepare_message_for_a2a with ErrorContent.""" # Create ChatMessage with ErrorContent error_content = ErrorContent(message="Test error message") message = ChatMessage(role=Role.USER, contents=[error_content]) # Convert to A2A message - a2a_message = a2a_agent._chat_message_to_a2a_message(message) + a2a_message = a2a_agent._prepare_message_for_a2a(message) # Verify conversion assert len(a2a_message.parts) == 1 assert a2a_message.parts[0].root.text == "Test error message" -def test_chat_message_to_a2a_message_with_uri_content(a2a_agent: A2AAgent) -> None: - """Test _chat_message_to_a2a_message with UriContent.""" +def test_prepare_message_for_a2a_with_uri_content(a2a_agent: A2AAgent) -> None: + """Test _prepare_message_for_a2a with UriContent.""" # Create ChatMessage with UriContent uri_content = UriContent(uri="http://example.com/file.pdf", media_type="application/pdf") message = ChatMessage(role=Role.USER, contents=[uri_content]) # Convert to A2A message - a2a_message = a2a_agent._chat_message_to_a2a_message(message) + a2a_message = a2a_agent._prepare_message_for_a2a(message) # Verify conversion assert len(a2a_message.parts) == 1 @@ -326,15 +326,15 @@ def test_chat_message_to_a2a_message_with_uri_content(a2a_agent: A2AAgent) -> No assert a2a_message.parts[0].root.file.mime_type == "application/pdf" -def test_chat_message_to_a2a_message_with_data_content(a2a_agent: A2AAgent) -> None: - """Test _chat_message_to_a2a_message with DataContent.""" +def test_prepare_message_for_a2a_with_data_content(a2a_agent: A2AAgent) -> None: + """Test _prepare_message_for_a2a with DataContent.""" # Create ChatMessage with DataContent (base64 data URI) data_content = DataContent(uri="data:text/plain;base64,SGVsbG8gV29ybGQ=", media_type="text/plain") message = ChatMessage(role=Role.USER, contents=[data_content]) # Convert to A2A message - a2a_message = a2a_agent._chat_message_to_a2a_message(message) + a2a_message = a2a_agent._prepare_message_for_a2a(message) # Verify conversion assert len(a2a_message.parts) == 1 @@ -342,14 +342,14 @@ def test_chat_message_to_a2a_message_with_data_content(a2a_agent: A2AAgent) -> N assert a2a_message.parts[0].root.file.mime_type == "text/plain" -def test_chat_message_to_a2a_message_empty_contents_raises_error(a2a_agent: A2AAgent) -> None: - """Test _chat_message_to_a2a_message with empty contents raises ValueError.""" +def test_prepare_message_for_a2a_empty_contents_raises_error(a2a_agent: A2AAgent) -> None: + """Test _prepare_message_for_a2a with empty contents raises ValueError.""" # Create ChatMessage with no contents message = ChatMessage(role=Role.USER, contents=[]) # Should raise ValueError for empty contents with raises(ValueError, match="ChatMessage.contents is empty"): - a2a_agent._chat_message_to_a2a_message(message) + a2a_agent._prepare_message_for_a2a(message) async def test_run_stream_with_message_response(a2a_agent: A2AAgent, mock_a2a_client: MockA2AClient) -> None: @@ -405,7 +405,7 @@ async def test_context_manager_no_cleanup_when_no_http_client() -> None: pass -def test_chat_message_to_a2a_message_with_multiple_contents() -> None: +def test_prepare_message_for_a2a_with_multiple_contents() -> None: """Test conversion of ChatMessage with multiple contents.""" agent = A2AAgent(client=MagicMock(), _http_client=None) @@ -421,7 +421,7 @@ def test_chat_message_to_a2a_message_with_multiple_contents() -> None: ], ) - result = agent._chat_message_to_a2a_message(message) + result = agent._prepare_message_for_a2a(message) # Should have converted all 4 contents to parts assert len(result.parts) == 4 @@ -433,7 +433,7 @@ def test_chat_message_to_a2a_message_with_multiple_contents() -> None: assert result.parts[3].root.kind == "text" # JSON text remains as text (no parsing) -def test_a2a_parts_to_contents_with_data_part() -> None: +def test_parse_contents_from_a2a_with_data_part() -> None: """Test conversion of A2A DataPart.""" agent = A2AAgent(client=MagicMock(), _http_client=None) @@ -441,7 +441,7 @@ def test_a2a_parts_to_contents_with_data_part() -> None: # Create DataPart data_part = Part(root=DataPart(data={"key": "value", "number": 42}, metadata={"source": "test"})) - contents = agent._a2a_parts_to_contents([data_part]) + contents = agent._parse_contents_from_a2a([data_part]) assert len(contents) == 1 @@ -450,7 +450,7 @@ def test_a2a_parts_to_contents_with_data_part() -> None: assert contents[0].additional_properties == {"source": "test"} -def test_a2a_parts_to_contents_unknown_part_kind() -> None: +def test_parse_contents_from_a2a_unknown_part_kind() -> None: """Test error handling for unknown A2A part kind.""" agent = A2AAgent(client=MagicMock(), _http_client=None) @@ -459,10 +459,10 @@ def test_a2a_parts_to_contents_unknown_part_kind() -> None: mock_part.root.kind = "unknown_kind" with raises(ValueError, match="Unknown Part kind: unknown_kind"): - agent._a2a_parts_to_contents([mock_part]) + agent._parse_contents_from_a2a([mock_part]) -def test_chat_message_to_a2a_message_with_hosted_file() -> None: +def test_prepare_message_for_a2a_with_hosted_file() -> None: """Test conversion of ChatMessage with HostedFileContent to A2A message.""" agent = A2AAgent(client=MagicMock(), _http_client=None) @@ -473,7 +473,7 @@ def test_chat_message_to_a2a_message_with_hosted_file() -> None: contents=[HostedFileContent(file_id="hosted://storage/document.pdf")], ) - result = agent._chat_message_to_a2a_message(message) # noqa: SLF001 + result = agent._prepare_message_for_a2a(message) # noqa: SLF001 # Verify the conversion assert len(result.parts) == 1 @@ -488,7 +488,7 @@ def test_chat_message_to_a2a_message_with_hosted_file() -> None: assert part.root.file.mime_type is None # HostedFileContent doesn't specify media_type -def test_a2a_parts_to_contents_with_hosted_file_uri() -> None: +def test_parse_contents_from_a2a_with_hosted_file_uri() -> None: """Test conversion of A2A FilePart with hosted file URI back to UriContent.""" agent = A2AAgent(client=MagicMock(), _http_client=None) @@ -503,7 +503,7 @@ def test_a2a_parts_to_contents_with_hosted_file_uri() -> None: ) ) - contents = agent._a2a_parts_to_contents([file_part]) # noqa: SLF001 + contents = agent._parse_contents_from_a2a([file_part]) # noqa: SLF001 assert len(contents) == 1 diff --git a/python/packages/ag-ui/agent_framework_ag_ui/_orchestrators.py b/python/packages/ag-ui/agent_framework_ag_ui/_orchestrators.py index 654498e371..6bdff552b6 100644 --- a/python/packages/ag-ui/agent_framework_ag_ui/_orchestrators.py +++ b/python/packages/ag-ui/agent_framework_ag_ui/_orchestrators.py @@ -86,7 +86,7 @@ def last_message(self): def run_id(self) -> str: """Get or generate run ID.""" if self._run_id is None: - self._run_id = self.input_data.get("run_id") or str(uuid.uuid4()) + self._run_id = self.input_data.get("run_id") or self.input_data.get("runId") or str(uuid.uuid4()) # This should never be None after the if block above, but satisfy type checkers if self._run_id is None: # pragma: no cover raise RuntimeError("Failed to initialize run_id") @@ -96,7 +96,7 @@ def run_id(self) -> str: def thread_id(self) -> str: """Get or generate thread ID.""" if self._thread_id is None: - self._thread_id = self.input_data.get("thread_id") or str(uuid.uuid4()) + self._thread_id = self.input_data.get("thread_id") or self.input_data.get("threadId") or str(uuid.uuid4()) # This should never be None after the if block above, but satisfy type checkers if self._thread_id is None: # pragma: no cover raise RuntimeError("Failed to initialize thread_id") diff --git a/python/packages/ag-ui/pyproject.toml b/python/packages/ag-ui/pyproject.toml index 8a4adceeee..97694a9bb2 100644 --- a/python/packages/ag-ui/pyproject.toml +++ b/python/packages/ag-ui/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "agent-framework-ag-ui" -version = "1.0.0b251216" +version = "1.0.0b251223" description = "AG-UI protocol integration for Agent Framework" readme = "README.md" license-files = ["LICENSE"] diff --git a/python/packages/ag-ui/tests/test_orchestrators.py b/python/packages/ag-ui/tests/test_orchestrators.py index 10843a259c..af90ea2e88 100644 --- a/python/packages/ag-ui/tests/test_orchestrators.py +++ b/python/packages/ag-ui/tests/test_orchestrators.py @@ -83,3 +83,71 @@ async def test_default_orchestrator_merges_client_tools() -> None: assert "server_tool" in tool_names assert "get_weather" in tool_names assert agent.chat_client.function_invocation_configuration.additional_tools + + +async def test_default_orchestrator_with_camel_case_ids() -> None: + """Client tool is able to extract camelCase IDs.""" + + agent = DummyAgent() + orchestrator = DefaultOrchestrator() + + input_data = { + "runId": "test-camelcase-runid", + "threadId": "test-camelcase-threadid", + "messages": [ + { + "role": "user", + "content": [{"type": "input_text", "text": "Hello"}], + } + ], + "tools": [], + } + + context = ExecutionContext( + input_data=input_data, + agent=agent, + config=AgentConfig(), + ) + + events = [] + async for event in orchestrator.run(context): + events.append(event) + + # assert the last event has the expected run_id and thread_id + last_event = events[-1] + assert last_event.run_id == "test-camelcase-runid" + assert last_event.thread_id == "test-camelcase-threadid" + + +async def test_default_orchestrator_with_snake_case_ids() -> None: + """Client tool is able to extract snake_case IDs.""" + + agent = DummyAgent() + orchestrator = DefaultOrchestrator() + + input_data = { + "run_id": "test-snakecase-runid", + "thread_id": "test-snakecase-threadid", + "messages": [ + { + "role": "user", + "content": [{"type": "input_text", "text": "Hello"}], + } + ], + "tools": [], + } + + context = ExecutionContext( + input_data=input_data, + agent=agent, + config=AgentConfig(), + ) + + events = [] + async for event in orchestrator.run(context): + events.append(event) + + # assert the last event has the expected run_id and thread_id + last_event = events[-1] + assert last_event.run_id == "test-snakecase-runid" + assert last_event.thread_id == "test-snakecase-threadid" diff --git a/python/packages/anthropic/agent_framework_anthropic/_chat_client.py b/python/packages/anthropic/agent_framework_anthropic/_chat_client.py index e4eca2d005..a5b169fbbf 100644 --- a/python/packages/anthropic/agent_framework_anthropic/_chat_client.py +++ b/python/packages/anthropic/agent_framework_anthropic/_chat_client.py @@ -25,7 +25,6 @@ TextContent, TextReasoningContent, TextSpanRegion, - ToolProtocol, UsageContent, UsageDetails, get_logger, @@ -214,9 +213,11 @@ async def _inner_get_response( chat_options: ChatOptions, **kwargs: Any, ) -> ChatResponse: - # Extract necessary state from messages and options - run_options = self._create_run_options(messages, chat_options, **kwargs) + # prepare + run_options = self._prepare_options(messages, chat_options, **kwargs) + # execute message = await self.anthropic_client.beta.messages.create(**run_options, stream=False) + # process return self._process_message(message) async def _inner_get_streaming_response( @@ -226,16 +227,17 @@ async def _inner_get_streaming_response( chat_options: ChatOptions, **kwargs: Any, ) -> AsyncIterable[ChatResponseUpdate]: - # Extract necessary state from messages and options - run_options = self._create_run_options(messages, chat_options, **kwargs) + # prepare + run_options = self._prepare_options(messages, chat_options, **kwargs) + # execute and process async for chunk in await self.anthropic_client.beta.messages.create(**run_options, stream=True): parsed_chunk = self._process_stream_event(chunk) if parsed_chunk: yield parsed_chunk - # region Create Run Options and Helpers + # region Prep methods - def _create_run_options( + def _prepare_options( self, messages: MutableSequence[ChatMessage], chat_options: ChatOptions, @@ -251,78 +253,91 @@ def _create_run_options( Returns: A dictionary of run options for the Anthropic client. """ - if chat_options.additional_properties and "additional_beta_flags" in chat_options.additional_properties: - betas = chat_options.additional_properties.pop("additional_beta_flags") - else: - betas = [] - run_options: dict[str, Any] = { - "model": chat_options.model_id or self.model_id, - "messages": self._convert_messages_to_anthropic_format(messages), - "max_tokens": chat_options.max_tokens or ANTHROPIC_DEFAULT_MAX_TOKENS, - "extra_headers": {"User-Agent": AGENT_FRAMEWORK_USER_AGENT}, - "betas": {*BETA_FLAGS, *self.additional_beta_flags, *betas}, + run_options: dict[str, Any] = chat_options.to_dict( + exclude={ + "type", + "instructions", # handled via system message + "tool_choice", # handled separately + "allow_multiple_tool_calls", # handled via tool_choice + "additional_properties", # handled separately + } + ) + + # translations between ChatOptions and Anthropic API + translations = { + "model_id": "model", + "stop": "stop_sequences", } + for old_key, new_key in translations.items(): + if old_key in run_options and old_key != new_key: + run_options[new_key] = run_options.pop(old_key) + + # model id + if not run_options.get("model"): + if not self.model_id: + raise ValueError("model_id must be a non-empty string") + run_options["model"] = self.model_id + + # max_tokens - Anthropic requires this, default if not provided + if not run_options.get("max_tokens"): + run_options["max_tokens"] = ANTHROPIC_DEFAULT_MAX_TOKENS - # Add any additional options from chat_options or kwargs - if chat_options.temperature is not None: - run_options["temperature"] = chat_options.temperature - if chat_options.top_p is not None: - run_options["top_p"] = chat_options.top_p - if chat_options.stop is not None: - run_options["stop_sequences"] = chat_options.stop + # messages + run_options["messages"] = self._prepare_messages_for_anthropic(messages) + + # system message - first system message is passed as instructions if messages and isinstance(messages[0], ChatMessage) and messages[0].role == Role.SYSTEM: - # first system message is passed as instructions run_options["system"] = messages[0].text - if chat_options.tool_choice is not None: - match ( - chat_options.tool_choice if isinstance(chat_options.tool_choice, str) else chat_options.tool_choice.mode - ): - case "auto": - run_options["tool_choice"] = {"type": "auto"} - if chat_options.allow_multiple_tool_calls is not None: - run_options["tool_choice"][ # type:ignore[reportArgumentType] - "disable_parallel_tool_use" - ] = not chat_options.allow_multiple_tool_calls - case "required": - if chat_options.tool_choice.required_function_name: - run_options["tool_choice"] = { - "type": "tool", - "name": chat_options.tool_choice.required_function_name, - } - if chat_options.allow_multiple_tool_calls is not None: - run_options["tool_choice"][ # type:ignore[reportArgumentType] - "disable_parallel_tool_use" - ] = not chat_options.allow_multiple_tool_calls - else: - run_options["tool_choice"] = {"type": "any"} - if chat_options.allow_multiple_tool_calls is not None: - run_options["tool_choice"][ # type:ignore[reportArgumentType] - "disable_parallel_tool_use" - ] = not chat_options.allow_multiple_tool_calls - case "none": - run_options["tool_choice"] = {"type": "none"} - case _: - logger.debug(f"Ignoring unsupported tool choice mode: {chat_options.tool_choice.mode} for now") - if tools_and_mcp := self._convert_tools_to_anthropic_format(chat_options.tools): - run_options.update(tools_and_mcp) - if chat_options.additional_properties: - run_options.update(chat_options.additional_properties) + + # betas + run_options["betas"] = self._prepare_betas(chat_options) + + # extra headers + run_options["extra_headers"] = {"User-Agent": AGENT_FRAMEWORK_USER_AGENT} + + # tools, mcp servers and tool choice + if tools_config := self._prepare_tools_for_anthropic(chat_options): + run_options.update(tools_config) + + # additional properties + additional_options = { + key: value + for key, value in chat_options.additional_properties.items() + if value is not None and key != "additional_beta_flags" + } + if additional_options: + run_options.update(additional_options) run_options.update(kwargs) return run_options - def _convert_messages_to_anthropic_format(self, messages: MutableSequence[ChatMessage]) -> list[dict[str, Any]]: - """Convert a list of ChatMessages to the format expected by the Anthropic client. + def _prepare_betas(self, chat_options: ChatOptions) -> set[str]: + """Prepare the beta flags for the Anthropic API request. + + Args: + chat_options: The chat options that may contain additional beta flags. + + Returns: + A set of beta flag strings to include in the request. + """ + return { + *BETA_FLAGS, + *self.additional_beta_flags, + *chat_options.additional_properties.get("additional_beta_flags", []), + } + + def _prepare_messages_for_anthropic(self, messages: MutableSequence[ChatMessage]) -> list[dict[str, Any]]: + """Prepare a list of ChatMessages for the Anthropic client. This skips the first message if it is a system message, as Anthropic expects system instructions as a separate parameter. """ # first system message is passed as instructions if messages and isinstance(messages[0], ChatMessage) and messages[0].role == Role.SYSTEM: - return [self._convert_message_to_anthropic_format(msg) for msg in messages[1:]] - return [self._convert_message_to_anthropic_format(msg) for msg in messages] + return [self._prepare_message_for_anthropic(msg) for msg in messages[1:]] + return [self._prepare_message_for_anthropic(msg) for msg in messages] - def _convert_message_to_anthropic_format(self, message: ChatMessage) -> dict[str, Any]: - """Convert a ChatMessage to the format expected by the Anthropic client. + def _prepare_message_for_anthropic(self, message: ChatMessage) -> dict[str, Any]: + """Prepare a ChatMessage for the Anthropic client. Args: message: The ChatMessage to convert. @@ -376,58 +391,96 @@ def _convert_message_to_anthropic_format(self, message: ChatMessage) -> dict[str "content": a_content, } - def _convert_tools_to_anthropic_format( - self, tools: list[ToolProtocol | MutableMapping[str, Any]] | None - ) -> dict[str, Any] | None: - if not tools: - return None - tool_list: list[MutableMapping[str, Any]] = [] - mcp_server_list: list[MutableMapping[str, Any]] = [] - for tool in tools: - match tool: - case MutableMapping(): - tool_list.append(tool) - case AIFunction(): - tool_list.append({ - "type": "custom", - "name": tool.name, - "description": tool.description, - "input_schema": tool.parameters(), - }) - case HostedWebSearchTool(): - search_tool: dict[str, Any] = { - "type": "web_search_20250305", - "name": "web_search", - } - if tool.additional_properties: - search_tool.update(tool.additional_properties) - tool_list.append(search_tool) - case HostedCodeInterpreterTool(): - code_tool: dict[str, Any] = { - "type": "code_execution_20250825", - "name": "code_execution", - } - tool_list.append(code_tool) - case HostedMCPTool(): - server_def: dict[str, Any] = { - "type": "url", - "name": tool.name, - "url": str(tool.url), - } - if tool.allowed_tools: - server_def["tool_configuration"] = {"allowed_tools": list(tool.allowed_tools)} - if tool.headers and (auth := tool.headers.get("authorization")): - server_def["authorization_token"] = auth - mcp_server_list.append(server_def) + def _prepare_tools_for_anthropic(self, chat_options: ChatOptions) -> dict[str, Any] | None: + """Prepare tools and tool choice configuration for the Anthropic API request. + + Args: + chat_options: The chat options containing tools and tool choice settings. + + Returns: + A dictionary with tools, mcp_servers, and tool_choice configuration, or None if empty. + """ + result: dict[str, Any] = {} + + # Process tools + if chat_options.tools: + tool_list: list[MutableMapping[str, Any]] = [] + mcp_server_list: list[MutableMapping[str, Any]] = [] + for tool in chat_options.tools: + match tool: + case MutableMapping(): + tool_list.append(tool) + case AIFunction(): + tool_list.append({ + "type": "custom", + "name": tool.name, + "description": tool.description, + "input_schema": tool.parameters(), + }) + case HostedWebSearchTool(): + search_tool: dict[str, Any] = { + "type": "web_search_20250305", + "name": "web_search", + } + if tool.additional_properties: + search_tool.update(tool.additional_properties) + tool_list.append(search_tool) + case HostedCodeInterpreterTool(): + code_tool: dict[str, Any] = { + "type": "code_execution_20250825", + "name": "code_execution", + } + tool_list.append(code_tool) + case HostedMCPTool(): + server_def: dict[str, Any] = { + "type": "url", + "name": tool.name, + "url": str(tool.url), + } + if tool.allowed_tools: + server_def["tool_configuration"] = {"allowed_tools": list(tool.allowed_tools)} + if tool.headers and (auth := tool.headers.get("authorization")): + server_def["authorization_token"] = auth + mcp_server_list.append(server_def) + case _: + logger.debug(f"Ignoring unsupported tool type: {type(tool)} for now") + + if tool_list: + result["tools"] = tool_list + if mcp_server_list: + result["mcp_servers"] = mcp_server_list + + # Process tool choice + if chat_options.tool_choice is not None: + tool_choice_mode = ( + chat_options.tool_choice if isinstance(chat_options.tool_choice, str) else chat_options.tool_choice.mode + ) + match tool_choice_mode: + case "auto": + tool_choice: dict[str, Any] = {"type": "auto"} + if chat_options.allow_multiple_tool_calls is not None: + tool_choice["disable_parallel_tool_use"] = not chat_options.allow_multiple_tool_calls + result["tool_choice"] = tool_choice + case "required": + if ( + not isinstance(chat_options.tool_choice, str) + and chat_options.tool_choice.required_function_name + ): + tool_choice = { + "type": "tool", + "name": chat_options.tool_choice.required_function_name, + } + else: + tool_choice = {"type": "any"} + if chat_options.allow_multiple_tool_calls is not None: + tool_choice["disable_parallel_tool_use"] = not chat_options.allow_multiple_tool_calls + result["tool_choice"] = tool_choice + case "none": + result["tool_choice"] = {"type": "none"} case _: - logger.debug(f"Ignoring unsupported tool type: {type(tool)} for now") + logger.debug(f"Ignoring unsupported tool choice mode: {tool_choice_mode} for now") - all_tools: dict[str, list[MutableMapping[str, Any]]] = {} - if tool_list: - all_tools["tools"] = tool_list - if mcp_server_list: - all_tools["mcp_servers"] = mcp_server_list - return all_tools + return result or None # region Response Processing Methods @@ -445,11 +498,11 @@ def _process_message(self, message: BetaMessage) -> ChatResponse: messages=[ ChatMessage( role=Role.ASSISTANT, - contents=self._parse_message_contents(message.content), + contents=self._parse_contents_from_anthropic(message.content), raw_representation=message, ) ], - usage_details=self._parse_message_usage(message.usage), + usage_details=self._parse_usage_from_anthropic(message.usage), model_id=message.model, finish_reason=FINISH_REASON_MAP.get(message.stop_reason) if message.stop_reason else None, raw_response=message, @@ -467,12 +520,12 @@ def _process_stream_event(self, event: BetaRawMessageStreamEvent) -> ChatRespons match event.type: case "message_start": usage_details: list[UsageContent] = [] - if event.message.usage and (details := self._parse_message_usage(event.message.usage)): + if event.message.usage and (details := self._parse_usage_from_anthropic(event.message.usage)): usage_details.append(UsageContent(details=details)) return ChatResponseUpdate( response_id=event.message.id, - contents=[*self._parse_message_contents(event.message.content), *usage_details], + contents=[*self._parse_contents_from_anthropic(event.message.content), *usage_details], model_id=event.message.model, finish_reason=FINISH_REASON_MAP.get(event.message.stop_reason) if event.message.stop_reason @@ -480,7 +533,7 @@ def _process_stream_event(self, event: BetaRawMessageStreamEvent) -> ChatRespons raw_response=event, ) case "message_delta": - usage = self._parse_message_usage(event.usage) + usage = self._parse_usage_from_anthropic(event.usage) return ChatResponseUpdate( contents=[UsageContent(details=usage, raw_representation=event.usage)] if usage else [], raw_response=event, @@ -488,13 +541,13 @@ def _process_stream_event(self, event: BetaRawMessageStreamEvent) -> ChatRespons case "message_stop": logger.debug("Received message_stop event; no content to process.") case "content_block_start": - contents = self._parse_message_contents([event.content_block]) + contents = self._parse_contents_from_anthropic([event.content_block]) return ChatResponseUpdate( contents=contents, raw_response=event, ) case "content_block_delta": - contents = self._parse_message_contents([event.delta]) + contents = self._parse_contents_from_anthropic([event.delta]) return ChatResponseUpdate( contents=contents, raw_response=event, @@ -505,7 +558,7 @@ def _process_stream_event(self, event: BetaRawMessageStreamEvent) -> ChatRespons logger.debug(f"Ignoring unsupported event type: {event.type}") return None - def _parse_message_usage(self, usage: BetaUsage | BetaMessageDeltaUsage | None) -> UsageDetails | None: + def _parse_usage_from_anthropic(self, usage: BetaUsage | BetaMessageDeltaUsage | None) -> UsageDetails | None: """Parse usage details from the Anthropic message usage.""" if not usage: return None @@ -518,7 +571,7 @@ def _parse_message_usage(self, usage: BetaUsage | BetaMessageDeltaUsage | None) usage_details.additional_counts["anthropic.cache_read_input_tokens"] = usage.cache_read_input_tokens return usage_details - def _parse_message_contents( + def _parse_contents_from_anthropic( self, content: Sequence[BetaContentBlock | BetaRawContentBlockDelta | BetaTextBlock] ) -> list[Contents]: """Parse contents from the Anthropic message.""" @@ -530,7 +583,7 @@ def _parse_message_contents( TextContent( text=content_block.text, raw_representation=content_block, - annotations=self._parse_citations(content_block), + annotations=self._parse_citations_from_anthropic(content_block), ) ) case "tool_use" | "mcp_tool_use" | "server_tool_use": @@ -549,7 +602,7 @@ def _parse_message_contents( FunctionResultContent( call_id=content_block.tool_use_id, name=name if name and call_id == content_block.tool_use_id else "mcp_tool", - result=self._parse_message_contents(content_block.content) + result=self._parse_contents_from_anthropic(content_block.content) if isinstance(content_block.content, list) else content_block.content, raw_representation=content_block, @@ -608,7 +661,7 @@ def _parse_message_contents( logger.debug(f"Ignoring unsupported content type: {content_block.type} for now") return contents - def _parse_citations( + def _parse_citations_from_anthropic( self, content_block: BetaContentBlock | BetaRawContentBlockDelta | BetaTextBlock ) -> list[Annotations] | None: content_citations = getattr(content_block, "citations", None) diff --git a/python/packages/anthropic/pyproject.toml b/python/packages/anthropic/pyproject.toml index 55ef501a9f..3488c05bca 100644 --- a/python/packages/anthropic/pyproject.toml +++ b/python/packages/anthropic/pyproject.toml @@ -4,7 +4,7 @@ description = "Anthropic integration for Microsoft Agent Framework." authors = [{ name = "Microsoft", email = "af-support@microsoft.com"}] readme = "README.md" requires-python = ">=3.10" -version = "1.0.0b251216" +version = "1.0.0b251223" license-files = ["LICENSE"] urls.homepage = "https://aka.ms/agent-framework" urls.source = "https://github.com/microsoft/agent-framework/tree/main/python" diff --git a/python/packages/anthropic/tests/test_anthropic_client.py b/python/packages/anthropic/tests/test_anthropic_client.py index fa6061a998..e8a3ac9cb0 100644 --- a/python/packages/anthropic/tests/test_anthropic_client.py +++ b/python/packages/anthropic/tests/test_anthropic_client.py @@ -151,12 +151,12 @@ def test_anthropic_client_service_url(mock_anthropic_client: MagicMock) -> None: # Message Conversion Tests -def test_convert_message_to_anthropic_format_text(mock_anthropic_client: MagicMock) -> None: +def test_prepare_message_for_anthropic_text(mock_anthropic_client: MagicMock) -> None: """Test converting text message to Anthropic format.""" chat_client = create_test_anthropic_client(mock_anthropic_client) message = ChatMessage(role=Role.USER, text="Hello, world!") - result = chat_client._convert_message_to_anthropic_format(message) + result = chat_client._prepare_message_for_anthropic(message) assert result["role"] == "user" assert len(result["content"]) == 1 @@ -164,7 +164,7 @@ def test_convert_message_to_anthropic_format_text(mock_anthropic_client: MagicMo assert result["content"][0]["text"] == "Hello, world!" -def test_convert_message_to_anthropic_format_function_call(mock_anthropic_client: MagicMock) -> None: +def test_prepare_message_for_anthropic_function_call(mock_anthropic_client: MagicMock) -> None: """Test converting function call message to Anthropic format.""" chat_client = create_test_anthropic_client(mock_anthropic_client) message = ChatMessage( @@ -178,7 +178,7 @@ def test_convert_message_to_anthropic_format_function_call(mock_anthropic_client ], ) - result = chat_client._convert_message_to_anthropic_format(message) + result = chat_client._prepare_message_for_anthropic(message) assert result["role"] == "assistant" assert len(result["content"]) == 1 @@ -188,7 +188,7 @@ def test_convert_message_to_anthropic_format_function_call(mock_anthropic_client assert result["content"][0]["input"] == {"location": "San Francisco"} -def test_convert_message_to_anthropic_format_function_result(mock_anthropic_client: MagicMock) -> None: +def test_prepare_message_for_anthropic_function_result(mock_anthropic_client: MagicMock) -> None: """Test converting function result message to Anthropic format.""" chat_client = create_test_anthropic_client(mock_anthropic_client) message = ChatMessage( @@ -202,7 +202,7 @@ def test_convert_message_to_anthropic_format_function_result(mock_anthropic_clie ], ) - result = chat_client._convert_message_to_anthropic_format(message) + result = chat_client._prepare_message_for_anthropic(message) assert result["role"] == "user" assert len(result["content"]) == 1 @@ -214,7 +214,7 @@ def test_convert_message_to_anthropic_format_function_result(mock_anthropic_clie assert result["content"][0]["is_error"] is False -def test_convert_message_to_anthropic_format_text_reasoning(mock_anthropic_client: MagicMock) -> None: +def test_prepare_message_for_anthropic_text_reasoning(mock_anthropic_client: MagicMock) -> None: """Test converting text reasoning message to Anthropic format.""" chat_client = create_test_anthropic_client(mock_anthropic_client) message = ChatMessage( @@ -222,7 +222,7 @@ def test_convert_message_to_anthropic_format_text_reasoning(mock_anthropic_clien contents=[TextReasoningContent(text="Let me think about this...")], ) - result = chat_client._convert_message_to_anthropic_format(message) + result = chat_client._prepare_message_for_anthropic(message) assert result["role"] == "assistant" assert len(result["content"]) == 1 @@ -230,7 +230,7 @@ def test_convert_message_to_anthropic_format_text_reasoning(mock_anthropic_clien assert result["content"][0]["thinking"] == "Let me think about this..." -def test_convert_messages_to_anthropic_format_with_system(mock_anthropic_client: MagicMock) -> None: +def test_prepare_messages_for_anthropic_with_system(mock_anthropic_client: MagicMock) -> None: """Test converting messages list with system message.""" chat_client = create_test_anthropic_client(mock_anthropic_client) messages = [ @@ -238,7 +238,7 @@ def test_convert_messages_to_anthropic_format_with_system(mock_anthropic_client: ChatMessage(role=Role.USER, text="Hello!"), ] - result = chat_client._convert_messages_to_anthropic_format(messages) + result = chat_client._prepare_messages_for_anthropic(messages) # System message should be skipped assert len(result) == 1 @@ -246,7 +246,7 @@ def test_convert_messages_to_anthropic_format_with_system(mock_anthropic_client: assert result[0]["content"][0]["text"] == "Hello!" -def test_convert_messages_to_anthropic_format_without_system(mock_anthropic_client: MagicMock) -> None: +def test_prepare_messages_for_anthropic_without_system(mock_anthropic_client: MagicMock) -> None: """Test converting messages list without system message.""" chat_client = create_test_anthropic_client(mock_anthropic_client) messages = [ @@ -254,7 +254,7 @@ def test_convert_messages_to_anthropic_format_without_system(mock_anthropic_clie ChatMessage(role=Role.ASSISTANT, text="Hi there!"), ] - result = chat_client._convert_messages_to_anthropic_format(messages) + result = chat_client._prepare_messages_for_anthropic(messages) assert len(result) == 2 assert result[0]["role"] == "user" @@ -264,7 +264,7 @@ def test_convert_messages_to_anthropic_format_without_system(mock_anthropic_clie # Tool Conversion Tests -def test_convert_tools_to_anthropic_format_ai_function(mock_anthropic_client: MagicMock) -> None: +def test_prepare_tools_for_anthropic_ai_function(mock_anthropic_client: MagicMock) -> None: """Test converting AIFunction to Anthropic format.""" chat_client = create_test_anthropic_client(mock_anthropic_client) @@ -273,9 +273,8 @@ def get_weather(location: Annotated[str, Field(description="Location to get weat """Get weather for a location.""" return f"Weather for {location}" - tools = [get_weather] - - result = chat_client._convert_tools_to_anthropic_format(tools) + chat_options = ChatOptions(tools=[get_weather]) + result = chat_client._prepare_tools_for_anthropic(chat_options) assert result is not None assert "tools" in result @@ -285,12 +284,12 @@ def get_weather(location: Annotated[str, Field(description="Location to get weat assert "Get weather for a location" in result["tools"][0]["description"] -def test_convert_tools_to_anthropic_format_web_search(mock_anthropic_client: MagicMock) -> None: +def test_prepare_tools_for_anthropic_web_search(mock_anthropic_client: MagicMock) -> None: """Test converting HostedWebSearchTool to Anthropic format.""" chat_client = create_test_anthropic_client(mock_anthropic_client) - tools = [HostedWebSearchTool()] + chat_options = ChatOptions(tools=[HostedWebSearchTool()]) - result = chat_client._convert_tools_to_anthropic_format(tools) + result = chat_client._prepare_tools_for_anthropic(chat_options) assert result is not None assert "tools" in result @@ -299,12 +298,12 @@ def test_convert_tools_to_anthropic_format_web_search(mock_anthropic_client: Mag assert result["tools"][0]["name"] == "web_search" -def test_convert_tools_to_anthropic_format_code_interpreter(mock_anthropic_client: MagicMock) -> None: +def test_prepare_tools_for_anthropic_code_interpreter(mock_anthropic_client: MagicMock) -> None: """Test converting HostedCodeInterpreterTool to Anthropic format.""" chat_client = create_test_anthropic_client(mock_anthropic_client) - tools = [HostedCodeInterpreterTool()] + chat_options = ChatOptions(tools=[HostedCodeInterpreterTool()]) - result = chat_client._convert_tools_to_anthropic_format(tools) + result = chat_client._prepare_tools_for_anthropic(chat_options) assert result is not None assert "tools" in result @@ -313,12 +312,12 @@ def test_convert_tools_to_anthropic_format_code_interpreter(mock_anthropic_clien assert result["tools"][0]["name"] == "code_execution" -def test_convert_tools_to_anthropic_format_mcp_tool(mock_anthropic_client: MagicMock) -> None: +def test_prepare_tools_for_anthropic_mcp_tool(mock_anthropic_client: MagicMock) -> None: """Test converting HostedMCPTool to Anthropic format.""" chat_client = create_test_anthropic_client(mock_anthropic_client) - tools = [HostedMCPTool(name="test-mcp", url="https://example.com/mcp")] + chat_options = ChatOptions(tools=[HostedMCPTool(name="test-mcp", url="https://example.com/mcp")]) - result = chat_client._convert_tools_to_anthropic_format(tools) + result = chat_client._prepare_tools_for_anthropic(chat_options) assert result is not None assert "mcp_servers" in result @@ -328,18 +327,20 @@ def test_convert_tools_to_anthropic_format_mcp_tool(mock_anthropic_client: Magic assert result["mcp_servers"][0]["url"] == "https://example.com/mcp" -def test_convert_tools_to_anthropic_format_mcp_with_auth(mock_anthropic_client: MagicMock) -> None: +def test_prepare_tools_for_anthropic_mcp_with_auth(mock_anthropic_client: MagicMock) -> None: """Test converting HostedMCPTool with authorization headers.""" chat_client = create_test_anthropic_client(mock_anthropic_client) - tools = [ - HostedMCPTool( - name="test-mcp", - url="https://example.com/mcp", - headers={"authorization": "Bearer token123"}, - ) - ] + chat_options = ChatOptions( + tools=[ + HostedMCPTool( + name="test-mcp", + url="https://example.com/mcp", + headers={"authorization": "Bearer token123"}, + ) + ] + ) - result = chat_client._convert_tools_to_anthropic_format(tools) + result = chat_client._prepare_tools_for_anthropic(chat_options) assert result is not None assert "mcp_servers" in result @@ -348,12 +349,12 @@ def test_convert_tools_to_anthropic_format_mcp_with_auth(mock_anthropic_client: assert result["mcp_servers"][0]["authorization_token"] == "Bearer token123" -def test_convert_tools_to_anthropic_format_dict_tool(mock_anthropic_client: MagicMock) -> None: +def test_prepare_tools_for_anthropic_dict_tool(mock_anthropic_client: MagicMock) -> None: """Test converting dict tool to Anthropic format.""" chat_client = create_test_anthropic_client(mock_anthropic_client) - tools = [{"type": "custom", "name": "custom_tool", "description": "A custom tool"}] + chat_options = ChatOptions(tools=[{"type": "custom", "name": "custom_tool", "description": "A custom tool"}]) - result = chat_client._convert_tools_to_anthropic_format(tools) + result = chat_client._prepare_tools_for_anthropic(chat_options) assert result is not None assert "tools" in result @@ -361,11 +362,12 @@ def test_convert_tools_to_anthropic_format_dict_tool(mock_anthropic_client: Magi assert result["tools"][0]["name"] == "custom_tool" -def test_convert_tools_to_anthropic_format_none(mock_anthropic_client: MagicMock) -> None: +def test_prepare_tools_for_anthropic_none(mock_anthropic_client: MagicMock) -> None: """Test converting None tools.""" chat_client = create_test_anthropic_client(mock_anthropic_client) + chat_options = ChatOptions() - result = chat_client._convert_tools_to_anthropic_format(None) + result = chat_client._prepare_tools_for_anthropic(chat_options) assert result is None @@ -373,14 +375,14 @@ def test_convert_tools_to_anthropic_format_none(mock_anthropic_client: MagicMock # Run Options Tests -async def test_create_run_options_basic(mock_anthropic_client: MagicMock) -> None: - """Test _create_run_options with basic ChatOptions.""" +async def test_prepare_options_basic(mock_anthropic_client: MagicMock) -> None: + """Test _prepare_options with basic ChatOptions.""" chat_client = create_test_anthropic_client(mock_anthropic_client) messages = [ChatMessage(role=Role.USER, text="Hello")] chat_options = ChatOptions(max_tokens=100, temperature=0.7) - run_options = chat_client._create_run_options(messages, chat_options) + run_options = chat_client._prepare_options(messages, chat_options) assert run_options["model"] == chat_client.model_id assert run_options["max_tokens"] == 100 @@ -388,8 +390,8 @@ async def test_create_run_options_basic(mock_anthropic_client: MagicMock) -> Non assert "messages" in run_options -async def test_create_run_options_with_system_message(mock_anthropic_client: MagicMock) -> None: - """Test _create_run_options with system message.""" +async def test_prepare_options_with_system_message(mock_anthropic_client: MagicMock) -> None: + """Test _prepare_options with system message.""" chat_client = create_test_anthropic_client(mock_anthropic_client) messages = [ @@ -398,52 +400,52 @@ async def test_create_run_options_with_system_message(mock_anthropic_client: Mag ] chat_options = ChatOptions() - run_options = chat_client._create_run_options(messages, chat_options) + run_options = chat_client._prepare_options(messages, chat_options) assert run_options["system"] == "You are helpful." assert len(run_options["messages"]) == 1 # System message not in messages list -async def test_create_run_options_with_tool_choice_auto(mock_anthropic_client: MagicMock) -> None: - """Test _create_run_options with auto tool choice.""" +async def test_prepare_options_with_tool_choice_auto(mock_anthropic_client: MagicMock) -> None: + """Test _prepare_options with auto tool choice.""" chat_client = create_test_anthropic_client(mock_anthropic_client) messages = [ChatMessage(role=Role.USER, text="Hello")] chat_options = ChatOptions(tool_choice="auto") - run_options = chat_client._create_run_options(messages, chat_options) + run_options = chat_client._prepare_options(messages, chat_options) assert run_options["tool_choice"]["type"] == "auto" -async def test_create_run_options_with_tool_choice_required(mock_anthropic_client: MagicMock) -> None: - """Test _create_run_options with required tool choice.""" +async def test_prepare_options_with_tool_choice_required(mock_anthropic_client: MagicMock) -> None: + """Test _prepare_options with required tool choice.""" chat_client = create_test_anthropic_client(mock_anthropic_client) messages = [ChatMessage(role=Role.USER, text="Hello")] # For required with specific function, need to pass as dict chat_options = ChatOptions(tool_choice={"mode": "required", "required_function_name": "get_weather"}) - run_options = chat_client._create_run_options(messages, chat_options) + run_options = chat_client._prepare_options(messages, chat_options) assert run_options["tool_choice"]["type"] == "tool" assert run_options["tool_choice"]["name"] == "get_weather" -async def test_create_run_options_with_tool_choice_none(mock_anthropic_client: MagicMock) -> None: - """Test _create_run_options with none tool choice.""" +async def test_prepare_options_with_tool_choice_none(mock_anthropic_client: MagicMock) -> None: + """Test _prepare_options with none tool choice.""" chat_client = create_test_anthropic_client(mock_anthropic_client) messages = [ChatMessage(role=Role.USER, text="Hello")] chat_options = ChatOptions(tool_choice="none") - run_options = chat_client._create_run_options(messages, chat_options) + run_options = chat_client._prepare_options(messages, chat_options) assert run_options["tool_choice"]["type"] == "none" -async def test_create_run_options_with_tools(mock_anthropic_client: MagicMock) -> None: - """Test _create_run_options with tools.""" +async def test_prepare_options_with_tools(mock_anthropic_client: MagicMock) -> None: + """Test _prepare_options with tools.""" chat_client = create_test_anthropic_client(mock_anthropic_client) @ai_function @@ -454,32 +456,32 @@ def get_weather(location: str) -> str: messages = [ChatMessage(role=Role.USER, text="Hello")] chat_options = ChatOptions(tools=[get_weather]) - run_options = chat_client._create_run_options(messages, chat_options) + run_options = chat_client._prepare_options(messages, chat_options) assert "tools" in run_options assert len(run_options["tools"]) == 1 -async def test_create_run_options_with_stop_sequences(mock_anthropic_client: MagicMock) -> None: - """Test _create_run_options with stop sequences.""" +async def test_prepare_options_with_stop_sequences(mock_anthropic_client: MagicMock) -> None: + """Test _prepare_options with stop sequences.""" chat_client = create_test_anthropic_client(mock_anthropic_client) messages = [ChatMessage(role=Role.USER, text="Hello")] chat_options = ChatOptions(stop=["STOP", "END"]) - run_options = chat_client._create_run_options(messages, chat_options) + run_options = chat_client._prepare_options(messages, chat_options) assert run_options["stop_sequences"] == ["STOP", "END"] -async def test_create_run_options_with_top_p(mock_anthropic_client: MagicMock) -> None: - """Test _create_run_options with top_p.""" +async def test_prepare_options_with_top_p(mock_anthropic_client: MagicMock) -> None: + """Test _prepare_options with top_p.""" chat_client = create_test_anthropic_client(mock_anthropic_client) messages = [ChatMessage(role=Role.USER, text="Hello")] chat_options = ChatOptions(top_p=0.9) - run_options = chat_client._create_run_options(messages, chat_options) + run_options = chat_client._prepare_options(messages, chat_options) assert run_options["top_p"] == 0.9 @@ -540,41 +542,41 @@ def test_process_message_with_tool_use(mock_anthropic_client: MagicMock) -> None assert response.finish_reason == FinishReason.TOOL_CALLS -def test_parse_message_usage_basic(mock_anthropic_client: MagicMock) -> None: - """Test _parse_message_usage with basic usage.""" +def test_parse_usage_from_anthropic_basic(mock_anthropic_client: MagicMock) -> None: + """Test _parse_usage_from_anthropic with basic usage.""" chat_client = create_test_anthropic_client(mock_anthropic_client) usage = BetaUsage(input_tokens=10, output_tokens=5) - result = chat_client._parse_message_usage(usage) + result = chat_client._parse_usage_from_anthropic(usage) assert result is not None assert result.input_token_count == 10 assert result.output_token_count == 5 -def test_parse_message_usage_none(mock_anthropic_client: MagicMock) -> None: - """Test _parse_message_usage with None usage.""" +def test_parse_usage_from_anthropic_none(mock_anthropic_client: MagicMock) -> None: + """Test _parse_usage_from_anthropic with None usage.""" chat_client = create_test_anthropic_client(mock_anthropic_client) - result = chat_client._parse_message_usage(None) + result = chat_client._parse_usage_from_anthropic(None) assert result is None -def test_parse_message_contents_text(mock_anthropic_client: MagicMock) -> None: - """Test _parse_message_contents with text content.""" +def test_parse_contents_from_anthropic_text(mock_anthropic_client: MagicMock) -> None: + """Test _parse_contents_from_anthropic with text content.""" chat_client = create_test_anthropic_client(mock_anthropic_client) content = [BetaTextBlock(type="text", text="Hello!")] - result = chat_client._parse_message_contents(content) + result = chat_client._parse_contents_from_anthropic(content) assert len(result) == 1 assert isinstance(result[0], TextContent) assert result[0].text == "Hello!" -def test_parse_message_contents_tool_use(mock_anthropic_client: MagicMock) -> None: - """Test _parse_message_contents with tool use.""" +def test_parse_contents_from_anthropic_tool_use(mock_anthropic_client: MagicMock) -> None: + """Test _parse_contents_from_anthropic with tool use.""" chat_client = create_test_anthropic_client(mock_anthropic_client) content = [ @@ -585,7 +587,7 @@ def test_parse_message_contents_tool_use(mock_anthropic_client: MagicMock) -> No input={"location": "SF"}, ) ] - result = chat_client._parse_message_contents(content) + result = chat_client._parse_contents_from_anthropic(content) assert len(result) == 1 assert isinstance(result[0], FunctionCallContent) diff --git a/python/packages/azure-ai-search/pyproject.toml b/python/packages/azure-ai-search/pyproject.toml index d4227e3dd8..7c14344ecc 100644 --- a/python/packages/azure-ai-search/pyproject.toml +++ b/python/packages/azure-ai-search/pyproject.toml @@ -4,7 +4,7 @@ description = "Azure AI Search integration for Microsoft Agent Framework." authors = [{ name = "Microsoft", email = "af-support@microsoft.com"}] readme = "README.md" requires-python = ">=3.10" -version = "1.0.0b251216" +version = "1.0.0b251223" license-files = ["LICENSE"] urls.homepage = "https://aka.ms/agent-framework" urls.source = "https://github.com/microsoft/agent-framework/tree/main/python" diff --git a/python/packages/azure-ai/agent_framework_azure_ai/_chat_client.py b/python/packages/azure-ai/agent_framework_azure_ai/_chat_client.py index 839687fbaf..50d18bbdc1 100644 --- a/python/packages/azure-ai/agent_framework_azure_ai/_chat_client.py +++ b/python/packages/azure-ai/agent_framework_azure_ai/_chat_client.py @@ -278,22 +278,13 @@ async def _inner_get_streaming_response( chat_options: ChatOptions, **kwargs: Any, ) -> AsyncIterable[ChatResponseUpdate]: - # Extract necessary state from messages and options - run_options, required_action_results = await self._create_run_options(messages, chat_options, **kwargs) - - # Get the thread ID - thread_id: str | None = ( - chat_options.conversation_id - if chat_options.conversation_id is not None - else run_options.get("conversation_id", self.thread_id) - ) - - # Determine which agent to use and create if needed + # prepare + run_options, required_action_results = await self._prepare_options(messages, chat_options, **kwargs) agent_id = await self._get_agent_id_or_create(run_options) - # Process and yield each update from the stream + # execute and process async for update in self._process_stream( - *(await self._create_agent_stream(thread_id, agent_id, run_options, required_action_results)) + *(await self._create_agent_stream(agent_id, run_options, required_action_results)) ): yield update @@ -342,7 +333,6 @@ async def _get_agent_id_or_create(self, run_options: dict[str, Any] | None = Non async def _create_agent_stream( self, - thread_id: str | None, agent_id: str, run_options: dict[str, Any], required_action_results: list[FunctionResultContent | FunctionApprovalResponseContent] | None, @@ -352,14 +342,14 @@ async def _create_agent_stream( Returns: tuple: (stream, final_thread_id) """ + thread_id = run_options.pop("thread_id", None) + # Get any active run for this thread thread_run = await self._get_active_thread_run(thread_id) stream: AsyncAgentRunStream[AsyncAgentEventHandler[Any]] | AsyncAgentEventHandler[Any] handler: AsyncAgentEventHandler[Any] = AsyncAgentEventHandler() - tool_run_id, tool_outputs, tool_approvals = self._convert_required_action_to_tool_output( - required_action_results - ) + tool_run_id, tool_outputs, tool_approvals = self._prepare_tool_outputs_for_azure_ai(required_action_results) if ( thread_run is not None @@ -421,19 +411,11 @@ async def _prepare_thread( # No thread ID was provided, so create a new thread. thread = await self.agents_client.threads.create( - tool_resources=run_options.get("tool_resources"), metadata=run_options.get("metadata") + tool_resources=run_options.get("tool_resources"), + metadata=run_options.get("metadata"), + messages=run_options.get("additional_messages"), ) - thread_id = thread.id - # workaround for: https://github.com/Azure/azure-sdk-for-python/issues/42805 - # this occurs when otel is enabled - # once fixed, in the function above, readd: - # `messages=run_options.pop("additional_messages")` - for msg in run_options.pop("additional_messages", []): - await self.agents_client.messages.create( - thread_id=thread_id, role=msg.role, content=msg.content, metadata=msg.metadata - ) - # and remove until here. - return thread_id + return thread.id def _extract_url_citations( self, message_delta_chunk: MessageDeltaChunk, azure_search_tool_calls: list[dict[str, Any]] @@ -611,7 +593,7 @@ async def _process_stream( "submit_tool_outputs", "submit_tool_approval", ]: - function_call_contents = self._create_function_call_contents( + function_call_contents = self._parse_function_calls_from_azure_ai( event_data, response_id ) if function_call_contents: @@ -753,8 +735,8 @@ def _capture_azure_search_tool_calls( except Exception as ex: logger.debug(f"Failed to capture Azure AI Search tool call: {ex}") - def _create_function_call_contents(self, event_data: ThreadRun, response_id: str | None) -> list[Contents]: - """Create function call contents from a tool action event.""" + def _parse_function_calls_from_azure_ai(self, event_data: ThreadRun, response_id: str | None) -> list[Contents]: + """Parse function call contents from an Azure AI tool action event.""" if isinstance(event_data, ThreadRun) and event_data.required_action is not None: if isinstance(event_data.required_action, SubmitToolOutputsAction): return [ @@ -815,117 +797,197 @@ def _prepare_tool_choice(self, chat_options: ChatOptions) -> None: chat_options.tool_choice = chat_tool_mode - async def _create_run_options( + async def _prepare_options( self, messages: MutableSequence[ChatMessage], - chat_options: ChatOptions | None, + chat_options: ChatOptions, **kwargs: Any, ) -> tuple[dict[str, Any], list[FunctionResultContent | FunctionApprovalResponseContent] | None]: - run_options: dict[str, Any] = {**kwargs} - agent_definition = await self._load_agent_definition_if_needed() - if chat_options is not None: - run_options["max_completion_tokens"] = chat_options.max_tokens - if chat_options.model_id is not None: - run_options["model"] = chat_options.model_id - else: - run_options["model"] = self.model_id - run_options["top_p"] = chat_options.top_p - run_options["temperature"] = chat_options.temperature - run_options["parallel_tool_calls"] = chat_options.allow_multiple_tool_calls - - tool_definitions: list[ToolDefinition | dict[str, Any]] = [] - - # Add tools from existing agent - if agent_definition is not None: - # Don't include function tools, since they will be passed through chat_options.tools - agent_tools = [tool for tool in agent_definition.tools if not isinstance(tool, FunctionToolDefinition)] - if agent_tools: - tool_definitions.extend(agent_tools) - if agent_definition.tool_resources: - run_options["tool_resources"] = agent_definition.tool_resources - - if chat_options.tool_choice is not None: - if chat_options.tool_choice != "none" and chat_options.tools: - # Add run tools - tool_definitions.extend(await self._prep_tools(chat_options.tools, run_options)) - - # Handle MCP tool resources for approval mode - mcp_tools = [tool for tool in chat_options.tools if isinstance(tool, HostedMCPTool)] - if mcp_tools: - mcp_resources = [] - for mcp_tool in mcp_tools: - server_label = mcp_tool.name.replace(" ", "_") - mcp_resource: dict[str, Any] = {"server_label": server_label} - - # Add headers if they exist - if mcp_tool.headers: - mcp_resource["headers"] = mcp_tool.headers - - if mcp_tool.approval_mode is not None: - match mcp_tool.approval_mode: - case str(): - # Map agent framework approval modes to Azure AI approval modes - approval_mode = ( - "always" if mcp_tool.approval_mode == "always_require" else "never" - ) - mcp_resource["require_approval"] = approval_mode - case _: - if "always_require_approval" in mcp_tool.approval_mode: - mcp_resource["require_approval"] = { - "always": mcp_tool.approval_mode["always_require_approval"] - } - elif "never_require_approval" in mcp_tool.approval_mode: - mcp_resource["require_approval"] = { - "never": mcp_tool.approval_mode["never_require_approval"] - } - - mcp_resources.append(mcp_resource) - - # Add MCP resources to tool_resources - if "tool_resources" not in run_options: - run_options["tool_resources"] = {} - run_options["tool_resources"]["mcp"] = mcp_resources - - if chat_options.tool_choice == "none": - run_options["tool_choice"] = AgentsToolChoiceOptionMode.NONE - elif chat_options.tool_choice == "auto": - run_options["tool_choice"] = AgentsToolChoiceOptionMode.AUTO - elif ( - isinstance(chat_options.tool_choice, ToolMode) - and chat_options.tool_choice == "required" - and chat_options.tool_choice.required_function_name is not None - ): - run_options["tool_choice"] = AgentsNamedToolChoice( - type=AgentsNamedToolChoiceType.FUNCTION, - function=FunctionName(name=chat_options.tool_choice.required_function_name), - ) + # Use to_dict with exclusions for properties handled separately + run_options: dict[str, Any] = chat_options.to_dict( + exclude={ + "type", + "instructions", # handled via messages + "tools", # handled separately + "tool_choice", # handled separately + "response_format", # handled separately + "additional_properties", # handled separately + "frequency_penalty", # not supported + "presence_penalty", # not supported + "user", # not supported + "stop", # not supported + "logit_bias", # not supported + "seed", # not supported + "store", # not supported + } + ) - if tool_definitions: - run_options["tools"] = tool_definitions + # Translation between ChatOptions and Azure AI Agents API + translations = { + "model_id": "model", + "allow_multiple_tool_calls": "parallel_tool_calls", + "max_tokens": "max_completion_tokens", + } + for old_key, new_key in translations.items(): + if old_key in run_options and old_key != new_key: + run_options[new_key] = run_options.pop(old_key) + + # model id fallback + if not run_options.get("model"): + run_options["model"] = self.model_id + + # tools and tool_choice + if tool_definitions := await self._prepare_tool_definitions_and_resources( + chat_options, agent_definition, run_options + ): + run_options["tools"] = tool_definitions - if chat_options.response_format is not None: - run_options["response_format"] = ResponseFormatJsonSchemaType( - json_schema=ResponseFormatJsonSchema( - name=chat_options.response_format.__name__, - schema=chat_options.response_format.model_json_schema(), - ) + if tool_choice := self._prepare_tool_choice_mode(chat_options): + run_options["tool_choice"] = tool_choice + + # response format + if chat_options.response_format is not None: + run_options["response_format"] = ResponseFormatJsonSchemaType( + json_schema=ResponseFormatJsonSchema( + name=chat_options.response_format.__name__, + schema=chat_options.response_format.model_json_schema(), ) + ) + + # messages + additional_messages, instructions, required_action_results = self._prepare_messages(messages) + if additional_messages: + run_options["additional_messages"] = additional_messages + + # Add instruction from existing agent at the beginning + if ( + agent_definition is not None + and agent_definition.instructions + and agent_definition.instructions not in instructions + ): + instructions.insert(0, agent_definition.instructions) + + if instructions: + run_options["instructions"] = "\n".join(instructions) + + # thread_id resolution (conversation_id takes precedence, then kwargs, then instance default) + run_options["thread_id"] = chat_options.conversation_id or kwargs.get("conversation_id") or self.thread_id + + return run_options, required_action_results + + def _prepare_tool_choice_mode( + self, chat_options: ChatOptions + ) -> AgentsToolChoiceOptionMode | AgentsNamedToolChoice | None: + """Prepare the tool choice mode for Azure AI Agents API.""" + if chat_options.tool_choice is None: + return None + if chat_options.tool_choice == "none": + return AgentsToolChoiceOptionMode.NONE + if chat_options.tool_choice == "auto": + return AgentsToolChoiceOptionMode.AUTO + if ( + isinstance(chat_options.tool_choice, ToolMode) + and chat_options.tool_choice == "required" + and chat_options.tool_choice.required_function_name is not None + ): + return AgentsNamedToolChoice( + type=AgentsNamedToolChoiceType.FUNCTION, + function=FunctionName(name=chat_options.tool_choice.required_function_name), + ) + return None + + async def _prepare_tool_definitions_and_resources( + self, + chat_options: ChatOptions, + agent_definition: Agent | None, + run_options: dict[str, Any], + ) -> list[ToolDefinition | dict[str, Any]]: + """Prepare tool definitions and resources for the run options.""" + tool_definitions: list[ToolDefinition | dict[str, Any]] = [] + + # Add tools from existing agent (exclude function tools - passed via chat_options.tools) + if agent_definition is not None: + agent_tools = [tool for tool in agent_definition.tools if not isinstance(tool, FunctionToolDefinition)] + if agent_tools: + tool_definitions.extend(agent_tools) + if agent_definition.tool_resources: + run_options["tool_resources"] = agent_definition.tool_resources + + # Add run tools if tool_choice allows + if chat_options.tool_choice is not None and chat_options.tool_choice != "none" and chat_options.tools: + tool_definitions.extend(await self._prepare_tools_for_azure_ai(chat_options.tools, run_options)) + + # Handle MCP tool resources + mcp_resources = self._prepare_mcp_resources(chat_options.tools) + if mcp_resources: + if "tool_resources" not in run_options: + run_options["tool_resources"] = {} + run_options["tool_resources"]["mcp"] = mcp_resources + return tool_definitions + + def _prepare_mcp_resources( + self, tools: Sequence["ToolProtocol | MutableMapping[str, Any]"] + ) -> list[dict[str, Any]]: + """Prepare MCP tool resources for approval mode configuration.""" + mcp_tools = [tool for tool in tools if isinstance(tool, HostedMCPTool)] + if not mcp_tools: + return [] + + mcp_resources: list[dict[str, Any]] = [] + for mcp_tool in mcp_tools: + server_label = mcp_tool.name.replace(" ", "_") + mcp_resource: dict[str, Any] = {"server_label": server_label} + + if mcp_tool.headers: + mcp_resource["headers"] = mcp_tool.headers + + if mcp_tool.approval_mode is not None: + match mcp_tool.approval_mode: + case str(): + # Map agent framework approval modes to Azure AI approval modes + approval_mode = "always" if mcp_tool.approval_mode == "always_require" else "never" + mcp_resource["require_approval"] = approval_mode + case _: + if "always_require_approval" in mcp_tool.approval_mode: + mcp_resource["require_approval"] = { + "always": mcp_tool.approval_mode["always_require_approval"] + } + elif "never_require_approval" in mcp_tool.approval_mode: + mcp_resource["require_approval"] = { + "never": mcp_tool.approval_mode["never_require_approval"] + } + + mcp_resources.append(mcp_resource) + + return mcp_resources + + def _prepare_messages( + self, messages: MutableSequence[ChatMessage] + ) -> tuple[ + list[ThreadMessageOptions] | None, + list[str], + list[FunctionResultContent | FunctionApprovalResponseContent] | None, + ]: + """Prepare messages for Azure AI Agents API. + + System/developer messages are turned into instructions, since there is no such message roles in Azure AI. + All other messages are added 1:1, treating assistant messages as agent messages + and everything else as user messages. + + Returns: + Tuple of (additional_messages, instructions, required_action_results) + """ instructions: list[str] = [] required_action_results: list[FunctionResultContent | FunctionApprovalResponseContent] | None = None - additional_messages: list[ThreadMessageOptions] | None = None - # System/developer messages are turned into instructions, since there is no such message roles in Azure AI. - # All other messages are added 1:1, treating assistant messages as agent messages - # and everything else as user messages. for chat_message in messages: if chat_message.role.value in ["system", "developer"]: for text_content in [content for content in chat_message.contents if isinstance(content, TextContent)]: instructions.append(text_content.text) - continue message_contents: list[MessageInputContentBlock] = [] @@ -942,7 +1004,7 @@ async def _create_run_options( elif isinstance(content.raw_representation, MessageInputContentBlock): message_contents.append(content.raw_representation) - if len(message_contents) > 0: + if message_contents: if additional_messages is None: additional_messages = [] additional_messages.append( @@ -952,26 +1014,12 @@ async def _create_run_options( ) ) - if additional_messages is not None: - run_options["additional_messages"] = additional_messages - - # Add instruction from existing agent at the beginning - if ( - agent_definition is not None - and agent_definition.instructions - and agent_definition.instructions not in instructions - ): - instructions.insert(0, agent_definition.instructions) - - if len(instructions) > 0: - run_options["instructions"] = "".join(instructions) - - return run_options, required_action_results + return additional_messages, instructions, required_action_results - async def _prep_tools( + async def _prepare_tools_for_azure_ai( self, tools: Sequence["ToolProtocol | MutableMapping[str, Any]"], run_options: dict[str, Any] | None = None ) -> list[ToolDefinition | dict[str, Any]]: - """Prepare tool definitions for the run options.""" + """Prepare tool definitions for the Azure AI Agents API.""" tool_definitions: list[ToolDefinition | dict[str, Any]] = [] for tool in tools: match tool: @@ -1044,10 +1092,11 @@ async def _prep_tools( raise ServiceInitializationError(f"Unsupported tool type: {type(tool)}") return tool_definitions - def _convert_required_action_to_tool_output( + def _prepare_tool_outputs_for_azure_ai( self, required_action_results: list[FunctionResultContent | FunctionApprovalResponseContent] | None, ) -> tuple[str | None, list[ToolOutput] | None, list[ToolApproval] | None]: + """Prepare function results and approvals for submission to the Azure AI API.""" run_id: str | None = None tool_outputs: list[ToolOutput] | None = None tool_approvals: list[ToolApproval] | None = None diff --git a/python/packages/azure-ai/agent_framework_azure_ai/_client.py b/python/packages/azure-ai/agent_framework_azure_ai/_client.py index f4d5328f03..e10fc19068 100644 --- a/python/packages/azure-ai/agent_framework_azure_ai/_client.py +++ b/python/packages/azure-ai/agent_framework_azure_ai/_client.py @@ -28,10 +28,6 @@ ) from azure.core.credentials_async import AsyncTokenCredential from azure.core.exceptions import ResourceNotFoundError -from openai.types.responses.parsed_response import ( - ParsedResponse, -) -from openai.types.responses.response import Response as OpenAIResponse from pydantic import BaseModel, ValidationError from ._shared import AzureAISettings @@ -41,6 +37,11 @@ else: from typing_extensions import Self # pragma: no cover +if sys.version_info >= (3, 12): + from typing import override # type: ignore # pragma: no cover +else: + from typing_extensions import override # type: ignore[import] # pragma: no cover + logger = get_logger("agent_framework.azure") @@ -368,53 +369,21 @@ async def _close_client_if_needed(self) -> None: if self._should_close_client: await self.project_client.close() - def _prepare_input(self, messages: MutableSequence[ChatMessage]) -> tuple[list[ChatMessage], str | None]: - """Prepare input from messages and convert system/developer messages to instructions.""" - result: list[ChatMessage] = [] - instructions_list: list[str] = [] - instructions: str | None = None - - # System/developer messages are turned into instructions, since there is no such message roles in Azure AI. - for message in messages: - if message.role.value in ["system", "developer"]: - for text_content in [content for content in message.contents if isinstance(content, TextContent)]: - instructions_list.append(text_content.text) - else: - result.append(message) - - if len(instructions_list) > 0: - instructions = "".join(instructions_list) - - return result, instructions - - async def prepare_options( + @override + async def _prepare_options( self, messages: MutableSequence[ChatMessage], chat_options: ChatOptions, **kwargs: Any, ) -> dict[str, Any]: """Take ChatOptions and create the specific options for Azure AI.""" - prepared_messages, instructions = self._prepare_input(messages) - run_options = await super().prepare_options(prepared_messages, chat_options, **kwargs) - + prepared_messages, instructions = self._prepare_messages_for_azure_ai(messages) + run_options = await super()._prepare_options(prepared_messages, chat_options, **kwargs) if not self._is_application_endpoint: # Application-scoped response APIs do not support "agent" property. agent_reference = await self._get_agent_reference_or_create(run_options, instructions) run_options["extra_body"] = {"agent": agent_reference} - conversation_id = chat_options.conversation_id or self.conversation_id - - # Handle different conversation ID formats - if conversation_id: - if conversation_id.startswith("resp_"): - # For response IDs, set previous_response_id and remove conversation property - run_options.pop("conversation", None) - run_options["previous_response_id"] = conversation_id - elif conversation_id.startswith("conv_"): - # For conversation IDs, set conversation and remove previous_response_id property - run_options.pop("previous_response_id", None) - run_options["conversation"] = conversation_id - # Remove properties that are not supported on request level # but were configured on agent level exclude = ["model", "tools", "response_format", "temperature", "top_p"] @@ -424,7 +393,33 @@ async def prepare_options( return run_options - async def initialize_client(self) -> None: + @override + def _get_current_conversation_id(self, chat_options: ChatOptions, **kwargs: Any) -> str | None: + """Get the current conversation ID from chat options or kwargs.""" + return chat_options.conversation_id or kwargs.get("conversation_id") or self.conversation_id + + def _prepare_messages_for_azure_ai( + self, messages: MutableSequence[ChatMessage] + ) -> tuple[list[ChatMessage], str | None]: + """Prepare input from messages and convert system/developer messages to instructions.""" + result: list[ChatMessage] = [] + instructions_list: list[str] = [] + instructions: str | None = None + + # System/developer messages are turned into instructions, since there is no such message roles in Azure AI. + for message in messages: + if message.role.value in ["system", "developer"]: + for text_content in [content for content in message.contents if isinstance(content, TextContent)]: + instructions_list.append(text_content.text) + else: + result.append(message) + + if len(instructions_list) > 0: + instructions = "".join(instructions_list) + + return result, instructions + + async def _initialize_client(self) -> None: """Initialize OpenAI client.""" self.client = self.project_client.get_openai_client() # type: ignore @@ -442,7 +437,8 @@ def _update_agent_name_and_description(self, agent_name: str | None, description if description and not self.agent_description: self.agent_description = description - def get_mcp_tool(self, tool: HostedMCPTool) -> Any: + @staticmethod + def _prepare_mcp_tool(tool: HostedMCPTool) -> MCPTool: # type: ignore[override] """Get MCP tool from HostedMCPTool.""" mcp = MCPTool(server_label=tool.name.replace(" ", "_"), server_url=str(tool.url)) @@ -460,17 +456,3 @@ def get_mcp_tool(self, tool: HostedMCPTool) -> Any: mcp["require_approval"] = {"never": {"tool_names": list(never_require_approvals)}} return mcp - - def get_conversation_id( - self, response: OpenAIResponse | ParsedResponse[BaseModel], store: bool | None - ) -> str | None: - """Get the conversation ID from the response if store is True.""" - if store is False: - return None - # If conversation ID exists, it means that we operate with conversation - # so we use conversation ID as input and output. - if response.conversation and response.conversation.id: - return response.conversation.id - # If conversation ID doesn't exist, we operate with responses - # so we use response ID as input and output. - return response.id diff --git a/python/packages/azure-ai/pyproject.toml b/python/packages/azure-ai/pyproject.toml index 685172e2e4..67d89eb81b 100644 --- a/python/packages/azure-ai/pyproject.toml +++ b/python/packages/azure-ai/pyproject.toml @@ -4,7 +4,7 @@ description = "Azure AI Foundry integration for Microsoft Agent Framework." authors = [{ name = "Microsoft", email = "af-support@microsoft.com"}] readme = "README.md" requires-python = ">=3.10" -version = "1.0.0b251216" +version = "1.0.0b251223" license-files = ["LICENSE"] urls.homepage = "https://aka.ms/agent-framework" urls.source = "https://github.com/microsoft/agent-framework/tree/main/python" @@ -83,6 +83,13 @@ include = "../../shared_tasks.toml" mypy = "mypy --config-file $POE_ROOT/pyproject.toml agent_framework_azure_ai" test = "pytest --cov=agent_framework_azure_ai --cov-report=term-missing:skip-covered tests" +[tool.poe.tasks.integration-tests] +cmd = """ +pytest --import-mode=importlib +-n logical --dist loadfile --dist worksteal +tests +""" + [build-system] requires = ["flit-core >= 3.11,<4.0"] build-backend = "flit_core.buildapi" diff --git a/python/packages/azure-ai/tests/test_azure_ai_agent_client.py b/python/packages/azure-ai/tests/test_azure_ai_agent_client.py index f1b4dafb63..134a3586b0 100644 --- a/python/packages/azure-ai/tests/test_azure_ai_agent_client.py +++ b/python/packages/azure-ai/tests/test_azure_ai_agent_client.py @@ -367,33 +367,33 @@ async def test_azure_ai_chat_client_get_agent_id_or_create_missing_model( await chat_client._get_agent_id_or_create() # type: ignore -async def test_azure_ai_chat_client_create_run_options_basic(mock_agents_client: MagicMock) -> None: - """Test _create_run_options with basic ChatOptions.""" +async def test_azure_ai_chat_client_prepare_options_basic(mock_agents_client: MagicMock) -> None: + """Test _prepare_options with basic ChatOptions.""" chat_client = create_test_azure_ai_chat_client(mock_agents_client) messages = [ChatMessage(role=Role.USER, text="Hello")] chat_options = ChatOptions(max_tokens=100, temperature=0.7) - run_options, tool_results = await chat_client._create_run_options(messages, chat_options) # type: ignore + run_options, tool_results = await chat_client._prepare_options(messages, chat_options) # type: ignore assert run_options is not None assert tool_results is None -async def test_azure_ai_chat_client_create_run_options_no_chat_options(mock_agents_client: MagicMock) -> None: - """Test _create_run_options with no ChatOptions.""" +async def test_azure_ai_chat_client_prepare_options_no_chat_options(mock_agents_client: MagicMock) -> None: + """Test _prepare_options with default ChatOptions.""" chat_client = create_test_azure_ai_chat_client(mock_agents_client) messages = [ChatMessage(role=Role.USER, text="Hello")] - run_options, tool_results = await chat_client._create_run_options(messages, None) # type: ignore + run_options, tool_results = await chat_client._prepare_options(messages, ChatOptions()) # type: ignore assert run_options is not None assert tool_results is None -async def test_azure_ai_chat_client_create_run_options_with_image_content(mock_agents_client: MagicMock) -> None: - """Test _create_run_options with image content.""" +async def test_azure_ai_chat_client_prepare_options_with_image_content(mock_agents_client: MagicMock) -> None: + """Test _prepare_options with image content.""" chat_client = create_test_azure_ai_chat_client(mock_agents_client, agent_id="test-agent") @@ -403,7 +403,7 @@ async def test_azure_ai_chat_client_create_run_options_with_image_content(mock_a image_content = UriContent(uri="https://example.com/image.jpg", media_type="image/jpeg") messages = [ChatMessage(role=Role.USER, contents=[image_content])] - run_options, _ = await chat_client._create_run_options(messages, None) # type: ignore + run_options, _ = await chat_client._prepare_options(messages, ChatOptions()) # type: ignore assert "additional_messages" in run_options assert len(run_options["additional_messages"]) == 1 @@ -412,11 +412,11 @@ async def test_azure_ai_chat_client_create_run_options_with_image_content(mock_a assert len(message.content) == 1 -def test_azure_ai_chat_client_convert_function_results_to_tool_output_none(mock_agents_client: MagicMock) -> None: - """Test _convert_required_action_to_tool_output with None input.""" +def test_azure_ai_chat_client_prepare_tool_outputs_for_azure_ai_none(mock_agents_client: MagicMock) -> None: + """Test _prepare_tool_outputs_for_azure_ai with None input.""" chat_client = create_test_azure_ai_chat_client(mock_agents_client) - run_id, tool_outputs, tool_approvals = chat_client._convert_required_action_to_tool_output(None) # type: ignore + run_id, tool_outputs, tool_approvals = chat_client._prepare_tool_outputs_for_azure_ai(None) # type: ignore assert run_id is None assert tool_outputs is None @@ -484,8 +484,8 @@ def test_azure_ai_chat_client_update_agent_name_and_description_with_none_input( assert chat_client.agent_description is None -async def test_azure_ai_chat_client_create_run_options_with_messages(mock_agents_client: MagicMock) -> None: - """Test _create_run_options with different message types.""" +async def test_azure_ai_chat_client_prepare_options_with_messages(mock_agents_client: MagicMock) -> None: + """Test _prepare_options with different message types.""" chat_client = create_test_azure_ai_chat_client(mock_agents_client) # Test with system message (becomes instruction) @@ -494,7 +494,7 @@ async def test_azure_ai_chat_client_create_run_options_with_messages(mock_agents ChatMessage(role=Role.USER, text="Hello"), ] - run_options, _ = await chat_client._create_run_options(messages, None) # type: ignore + run_options, _ = await chat_client._prepare_options(messages, ChatOptions()) # type: ignore assert "instructions" in run_options assert "You are a helpful assistant" in run_options["instructions"] @@ -565,8 +565,8 @@ async def test_azure_ai_chat_client_prepare_thread_cancels_active_run(mock_agent mock_agents_client.runs.cancel.assert_called_once_with("test-thread", "run_123") -def test_azure_ai_chat_client_create_function_call_contents_basic(mock_agents_client: MagicMock) -> None: - """Test _create_function_call_contents with basic function call.""" +def test_azure_ai_chat_client_parse_function_calls_from_azure_ai_basic(mock_agents_client: MagicMock) -> None: + """Test _parse_function_calls_from_azure_ai with basic function call.""" chat_client = create_test_azure_ai_chat_client(mock_agents_client) mock_tool_call = MagicMock(spec=RequiredFunctionToolCall) @@ -580,7 +580,7 @@ def test_azure_ai_chat_client_create_function_call_contents_basic(mock_agents_cl mock_event_data = MagicMock(spec=ThreadRun) mock_event_data.required_action = mock_submit_action - result = chat_client._create_function_call_contents(mock_event_data, "response_123") # type: ignore + result = chat_client._parse_function_calls_from_azure_ai(mock_event_data, "response_123") # type: ignore assert len(result) == 1 assert isinstance(result[0], FunctionCallContent) @@ -588,22 +588,24 @@ def test_azure_ai_chat_client_create_function_call_contents_basic(mock_agents_cl assert result[0].call_id == '["response_123", "call_123"]' -def test_azure_ai_chat_client_create_function_call_contents_no_submit_action(mock_agents_client: MagicMock) -> None: - """Test _create_function_call_contents when required_action is not SubmitToolOutputsAction.""" +def test_azure_ai_chat_client_parse_function_calls_from_azure_ai_no_submit_action( + mock_agents_client: MagicMock, +) -> None: + """Test _parse_function_calls_from_azure_ai when required_action is not SubmitToolOutputsAction.""" chat_client = create_test_azure_ai_chat_client(mock_agents_client) mock_event_data = MagicMock(spec=ThreadRun) mock_event_data.required_action = MagicMock() - result = chat_client._create_function_call_contents(mock_event_data, "response_123") # type: ignore + result = chat_client._parse_function_calls_from_azure_ai(mock_event_data, "response_123") # type: ignore assert result == [] -def test_azure_ai_chat_client_create_function_call_contents_non_function_tool_call( +def test_azure_ai_chat_client_parse_function_calls_from_azure_ai_non_function_tool_call( mock_agents_client: MagicMock, ) -> None: - """Test _create_function_call_contents with non-function tool call.""" + """Test _parse_function_calls_from_azure_ai with non-function tool call.""" chat_client = create_test_azure_ai_chat_client(mock_agents_client) mock_tool_call = MagicMock() @@ -614,37 +616,37 @@ def test_azure_ai_chat_client_create_function_call_contents_non_function_tool_ca mock_event_data = MagicMock(spec=ThreadRun) mock_event_data.required_action = mock_submit_action - result = chat_client._create_function_call_contents(mock_event_data, "response_123") # type: ignore + result = chat_client._parse_function_calls_from_azure_ai(mock_event_data, "response_123") # type: ignore assert result == [] -async def test_azure_ai_chat_client_create_run_options_with_none_tool_choice( +async def test_azure_ai_chat_client_prepare_options_with_none_tool_choice( mock_agents_client: MagicMock, ) -> None: - """Test _create_run_options with tool_choice set to 'none'.""" + """Test _prepare_options with tool_choice set to 'none'.""" chat_client = create_test_azure_ai_chat_client(mock_agents_client) chat_options = ChatOptions() chat_options.tool_choice = "none" - run_options, _ = await chat_client._create_run_options([], chat_options) # type: ignore + run_options, _ = await chat_client._prepare_options([], chat_options) # type: ignore from azure.ai.agents.models import AgentsToolChoiceOptionMode assert run_options["tool_choice"] == AgentsToolChoiceOptionMode.NONE -async def test_azure_ai_chat_client_create_run_options_with_auto_tool_choice( +async def test_azure_ai_chat_client_prepare_options_with_auto_tool_choice( mock_agents_client: MagicMock, ) -> None: - """Test _create_run_options with tool_choice set to 'auto'.""" + """Test _prepare_options with tool_choice set to 'auto'.""" chat_client = create_test_azure_ai_chat_client(mock_agents_client) chat_options = ChatOptions() chat_options.tool_choice = "auto" - run_options, _ = await chat_client._create_run_options([], chat_options) # type: ignore + run_options, _ = await chat_client._prepare_options([], chat_options) # type: ignore from azure.ai.agents.models import AgentsToolChoiceOptionMode @@ -669,10 +671,10 @@ async def test_azure_ai_chat_client_prepare_tool_choice_none_string( assert chat_options.tool_choice == ToolMode.NONE.mode -async def test_azure_ai_chat_client_create_run_options_tool_choice_required_specific_function( +async def test_azure_ai_chat_client_prepare_options_tool_choice_required_specific_function( mock_agents_client: MagicMock, ) -> None: - """Test _create_run_options with ToolMode.REQUIRED specifying a specific function name.""" + """Test _prepare_options with ToolMode.REQUIRED specifying a specific function name.""" chat_client = create_test_azure_ai_chat_client(mock_agents_client) required_tool_mode = ToolMode.REQUIRED("specific_function_name") @@ -682,7 +684,7 @@ async def test_azure_ai_chat_client_create_run_options_tool_choice_required_spec chat_options = ChatOptions(tools=[dict_tool], tool_choice=required_tool_mode) messages = [ChatMessage(role=Role.USER, text="Hello")] - run_options, _ = await chat_client._create_run_options(messages, chat_options) # type: ignore + run_options, _ = await chat_client._prepare_options(messages, chat_options) # type: ignore # Verify tool_choice is set to the specific named function assert "tool_choice" in run_options @@ -692,10 +694,10 @@ async def test_azure_ai_chat_client_create_run_options_tool_choice_required_spec assert tool_choice.function.name == "specific_function_name" # type: ignore -async def test_azure_ai_chat_client_create_run_options_with_response_format( +async def test_azure_ai_chat_client_prepare_options_with_response_format( mock_agents_client: MagicMock, ) -> None: - """Test _create_run_options with response_format configured.""" + """Test _prepare_options with response_format configured.""" chat_client = create_test_azure_ai_chat_client(mock_agents_client) class TestResponseModel(BaseModel): @@ -704,7 +706,7 @@ class TestResponseModel(BaseModel): chat_options = ChatOptions() chat_options.response_format = TestResponseModel - run_options, _ = await chat_client._create_run_options([], chat_options) # type: ignore + run_options, _ = await chat_client._prepare_options([], chat_options) # type: ignore assert "response_format" in run_options response_format = run_options["response_format"] @@ -720,8 +722,8 @@ def test_azure_ai_chat_client_service_url_method(mock_agents_client: MagicMock) assert url == "https://test-endpoint.com/" -async def test_azure_ai_chat_client_prep_tools_ai_function(mock_agents_client: MagicMock) -> None: - """Test _prep_tools with AIFunction tool.""" +async def test_azure_ai_chat_client_prepare_tools_for_azure_ai_ai_function(mock_agents_client: MagicMock) -> None: + """Test _prepare_tools_for_azure_ai with AIFunction tool.""" chat_client = create_test_azure_ai_chat_client(mock_agents_client, agent_id="test-agent") @@ -729,28 +731,28 @@ async def test_azure_ai_chat_client_prep_tools_ai_function(mock_agents_client: M mock_ai_function = MagicMock(spec=AIFunction) mock_ai_function.to_json_schema_spec.return_value = {"type": "function", "function": {"name": "test_function"}} - result = await chat_client._prep_tools([mock_ai_function]) # type: ignore + result = await chat_client._prepare_tools_for_azure_ai([mock_ai_function]) # type: ignore assert len(result) == 1 assert result[0] == {"type": "function", "function": {"name": "test_function"}} mock_ai_function.to_json_schema_spec.assert_called_once() -async def test_azure_ai_chat_client_prep_tools_code_interpreter(mock_agents_client: MagicMock) -> None: - """Test _prep_tools with HostedCodeInterpreterTool.""" +async def test_azure_ai_chat_client_prepare_tools_for_azure_ai_code_interpreter(mock_agents_client: MagicMock) -> None: + """Test _prepare_tools_for_azure_ai with HostedCodeInterpreterTool.""" chat_client = create_test_azure_ai_chat_client(mock_agents_client, agent_id="test-agent") code_interpreter_tool = HostedCodeInterpreterTool() - result = await chat_client._prep_tools([code_interpreter_tool]) # type: ignore + result = await chat_client._prepare_tools_for_azure_ai([code_interpreter_tool]) # type: ignore assert len(result) == 1 assert isinstance(result[0], CodeInterpreterToolDefinition) -async def test_azure_ai_chat_client_prep_tools_mcp_tool(mock_agents_client: MagicMock) -> None: - """Test _prep_tools with HostedMCPTool.""" +async def test_azure_ai_chat_client_prepare_tools_for_azure_ai_mcp_tool(mock_agents_client: MagicMock) -> None: + """Test _prepare_tools_for_azure_ai with HostedMCPTool.""" chat_client = create_test_azure_ai_chat_client(mock_agents_client, agent_id="test-agent") @@ -762,7 +764,7 @@ async def test_azure_ai_chat_client_prep_tools_mcp_tool(mock_agents_client: Magi mock_mcp_tool.definitions = [{"type": "mcp", "name": "test_mcp"}] mock_mcp_tool_class.return_value = mock_mcp_tool - result = await chat_client._prep_tools([mcp_tool]) # type: ignore + result = await chat_client._prepare_tools_for_azure_ai([mcp_tool]) # type: ignore assert len(result) == 1 assert result[0] == {"type": "mcp", "name": "test_mcp"} @@ -774,8 +776,8 @@ async def test_azure_ai_chat_client_prep_tools_mcp_tool(mock_agents_client: Magi assert set(call_args["allowed_tools"]) == {"tool1", "tool2"} -async def test_azure_ai_chat_client_create_run_options_mcp_never_require(mock_agents_client: MagicMock) -> None: - """Test _create_run_options with HostedMCPTool having never_require approval mode.""" +async def test_azure_ai_chat_client_prepare_options_mcp_never_require(mock_agents_client: MagicMock) -> None: + """Test _prepare_options with HostedMCPTool having never_require approval mode.""" chat_client = create_test_azure_ai_chat_client(mock_agents_client) mcp_tool = HostedMCPTool(name="Test MCP Tool", url="https://example.com/mcp", approval_mode="never_require") @@ -784,12 +786,12 @@ async def test_azure_ai_chat_client_create_run_options_mcp_never_require(mock_ag chat_options = ChatOptions(tools=[mcp_tool], tool_choice="auto") with patch("agent_framework_azure_ai._chat_client.McpTool") as mock_mcp_tool_class: - # Mock _prep_tools to avoid actual tool preparation + # Mock _prepare_tools_for_azure_ai to avoid actual tool preparation mock_mcp_tool_instance = MagicMock() mock_mcp_tool_instance.definitions = [{"type": "mcp", "name": "test_mcp"}] mock_mcp_tool_class.return_value = mock_mcp_tool_instance - run_options, _ = await chat_client._create_run_options(messages, chat_options) # type: ignore + run_options, _ = await chat_client._prepare_options(messages, chat_options) # type: ignore # Verify tool_resources is created with correct MCP approval structure assert "tool_resources" in run_options, ( @@ -803,8 +805,8 @@ async def test_azure_ai_chat_client_create_run_options_mcp_never_require(mock_ag assert mcp_resource["require_approval"] == "never" -async def test_azure_ai_chat_client_create_run_options_mcp_with_headers(mock_agents_client: MagicMock) -> None: - """Test _create_run_options with HostedMCPTool having headers.""" +async def test_azure_ai_chat_client_prepare_options_mcp_with_headers(mock_agents_client: MagicMock) -> None: + """Test _prepare_options with HostedMCPTool having headers.""" chat_client = create_test_azure_ai_chat_client(mock_agents_client) # Test with headers @@ -817,12 +819,12 @@ async def test_azure_ai_chat_client_create_run_options_mcp_with_headers(mock_age chat_options = ChatOptions(tools=[mcp_tool], tool_choice="auto") with patch("agent_framework_azure_ai._chat_client.McpTool") as mock_mcp_tool_class: - # Mock _prep_tools to avoid actual tool preparation + # Mock _prepare_tools_for_azure_ai to avoid actual tool preparation mock_mcp_tool_instance = MagicMock() mock_mcp_tool_instance.definitions = [{"type": "mcp", "name": "test_mcp"}] mock_mcp_tool_class.return_value = mock_mcp_tool_instance - run_options, _ = await chat_client._create_run_options(messages, chat_options) # type: ignore + run_options, _ = await chat_client._prepare_options(messages, chat_options) # type: ignore # Verify tool_resources is created with headers assert "tool_resources" in run_options @@ -835,8 +837,10 @@ async def test_azure_ai_chat_client_create_run_options_mcp_with_headers(mock_age assert mcp_resource["headers"] == headers -async def test_azure_ai_chat_client_prep_tools_web_search_bing_grounding(mock_agents_client: MagicMock) -> None: - """Test _prep_tools with HostedWebSearchTool using Bing Grounding.""" +async def test_azure_ai_chat_client_prepare_tools_for_azure_ai_web_search_bing_grounding( + mock_agents_client: MagicMock, +) -> None: + """Test _prepare_tools_for_azure_ai with HostedWebSearchTool using Bing Grounding.""" chat_client = create_test_azure_ai_chat_client(mock_agents_client, agent_id="test-agent") @@ -856,7 +860,7 @@ async def test_azure_ai_chat_client_prep_tools_web_search_bing_grounding(mock_ag mock_bing_tool.definitions = [{"type": "bing_grounding"}] mock_bing_grounding.return_value = mock_bing_tool - result = await chat_client._prep_tools([web_search_tool]) # type: ignore + result = await chat_client._prepare_tools_for_azure_ai([web_search_tool]) # type: ignore assert len(result) == 1 assert result[0] == {"type": "bing_grounding"} @@ -868,10 +872,10 @@ async def test_azure_ai_chat_client_prep_tools_web_search_bing_grounding(mock_ag assert "connection_id" in call_args -async def test_azure_ai_chat_client_prep_tools_web_search_bing_grounding_with_connection_id( +async def test_azure_ai_chat_client_prepare_tools_for_azure_ai_web_search_bing_grounding_with_connection_id( mock_agents_client: MagicMock, ) -> None: - """Test _prep_tools with HostedWebSearchTool using Bing Grounding with connection_id (no HTTP call).""" + """Test _prepare_tools_... with HostedWebSearchTool using Bing Grounding with connection_id (no HTTP call).""" chat_client = create_test_azure_ai_chat_client(mock_agents_client, agent_id="test-agent") @@ -888,15 +892,17 @@ async def test_azure_ai_chat_client_prep_tools_web_search_bing_grounding_with_co mock_bing_tool.definitions = [{"type": "bing_grounding"}] mock_bing_grounding.return_value = mock_bing_tool - result = await chat_client._prep_tools([web_search_tool]) # type: ignore + result = await chat_client._prepare_tools_for_azure_ai([web_search_tool]) # type: ignore assert len(result) == 1 assert result[0] == {"type": "bing_grounding"} mock_bing_grounding.assert_called_once_with(connection_id="direct-connection-id", count=3) -async def test_azure_ai_chat_client_prep_tools_web_search_custom_bing(mock_agents_client: MagicMock) -> None: - """Test _prep_tools with HostedWebSearchTool using Custom Bing Search.""" +async def test_azure_ai_chat_client_prepare_tools_for_azure_ai_web_search_custom_bing( + mock_agents_client: MagicMock, +) -> None: + """Test _prepare_tools_for_azure_ai with HostedWebSearchTool using Custom Bing Search.""" chat_client = create_test_azure_ai_chat_client(mock_agents_client, agent_id="test-agent") @@ -914,16 +920,16 @@ async def test_azure_ai_chat_client_prep_tools_web_search_custom_bing(mock_agent mock_custom_tool.definitions = [{"type": "bing_custom_search"}] mock_custom_bing.return_value = mock_custom_tool - result = await chat_client._prep_tools([web_search_tool]) # type: ignore + result = await chat_client._prepare_tools_for_azure_ai([web_search_tool]) # type: ignore assert len(result) == 1 assert result[0] == {"type": "bing_custom_search"} -async def test_azure_ai_chat_client_prep_tools_file_search_with_vector_stores( +async def test_azure_ai_chat_client_prepare_tools_for_azure_ai_file_search_with_vector_stores( mock_agents_client: MagicMock, ) -> None: - """Test _prep_tools with HostedFileSearchTool using vector stores.""" + """Test _prepare_tools_for_azure_ai with HostedFileSearchTool using vector stores.""" chat_client = create_test_azure_ai_chat_client(mock_agents_client, agent_id="test-agent") @@ -938,7 +944,7 @@ async def test_azure_ai_chat_client_prep_tools_file_search_with_vector_stores( mock_file_search.return_value = mock_file_tool run_options = {} - result = await chat_client._prep_tools([file_search_tool], run_options) # type: ignore + result = await chat_client._prepare_tools_for_azure_ai([file_search_tool], run_options) # type: ignore assert len(result) == 1 assert result[0] == {"type": "file_search"} @@ -973,7 +979,7 @@ async def test_azure_ai_chat_client_create_agent_stream_submit_tool_approvals( with patch("azure.ai.agents.models.AsyncAgentEventHandler", return_value=mock_handler): stream, final_thread_id = await chat_client._create_agent_stream( # type: ignore - "test-thread", "test-agent", {}, [approval_response] + "test-agent", {"thread_id": "test-thread"}, [approval_response] ) # Verify the approvals path was taken @@ -987,26 +993,26 @@ async def test_azure_ai_chat_client_create_agent_stream_submit_tool_approvals( assert call_args["tool_approvals"][0].approve is True -async def test_azure_ai_chat_client_prep_tools_dict_tool(mock_agents_client: MagicMock) -> None: - """Test _prep_tools with dictionary tool definition.""" +async def test_azure_ai_chat_client_prepare_tools_for_azure_ai_dict_tool(mock_agents_client: MagicMock) -> None: + """Test _prepare_tools_for_azure_ai with dictionary tool definition.""" chat_client = create_test_azure_ai_chat_client(mock_agents_client, agent_id="test-agent") dict_tool = {"type": "custom_tool", "config": {"param": "value"}} - result = await chat_client._prep_tools([dict_tool]) # type: ignore + result = await chat_client._prepare_tools_for_azure_ai([dict_tool]) # type: ignore assert len(result) == 1 assert result[0] == dict_tool -async def test_azure_ai_chat_client_prep_tools_unsupported_tool(mock_agents_client: MagicMock) -> None: - """Test _prep_tools with unsupported tool type.""" +async def test_azure_ai_chat_client_prepare_tools_for_azure_ai_unsupported_tool(mock_agents_client: MagicMock) -> None: + """Test _prepare_tools_for_azure_ai with unsupported tool type.""" chat_client = create_test_azure_ai_chat_client(mock_agents_client, agent_id="test-agent") unsupported_tool = "not_a_tool" with pytest.raises(ServiceInitializationError, match="Unsupported tool type: "): - await chat_client._prep_tools([unsupported_tool]) # type: ignore + await chat_client._prepare_tools_for_azure_ai([unsupported_tool]) # type: ignore async def test_azure_ai_chat_client_get_active_thread_run_with_active_run(mock_agents_client: MagicMock) -> None: @@ -1072,16 +1078,16 @@ async def test_azure_ai_chat_client_service_url(mock_agents_client: MagicMock) - assert result == "https://test-endpoint.com/" -async def test_azure_ai_chat_client_convert_required_action_to_tool_output_function_result( +async def test_azure_ai_chat_client_prepare_tool_outputs_for_azure_ai_function_result( mock_agents_client: MagicMock, ) -> None: - """Test _convert_required_action_to_tool_output with FunctionResultContent.""" + """Test _prepare_tool_outputs_for_azure_ai with FunctionResultContent.""" chat_client = create_test_azure_ai_chat_client(mock_agents_client, agent_id="test-agent") # Test with simple result function_result = FunctionResultContent(call_id='["run_123", "call_456"]', result="Simple result") - run_id, tool_outputs, tool_approvals = chat_client._convert_required_action_to_tool_output([function_result]) # type: ignore + run_id, tool_outputs, tool_approvals = chat_client._prepare_tool_outputs_for_azure_ai([function_result]) # type: ignore assert run_id == "run_123" assert tool_approvals is None @@ -1092,7 +1098,7 @@ async def test_azure_ai_chat_client_convert_required_action_to_tool_output_funct async def test_azure_ai_chat_client_convert_required_action_invalid_call_id(mock_agents_client: MagicMock) -> None: - """Test _convert_required_action_to_tool_output with invalid call_id format.""" + """Test _prepare_tool_outputs_for_azure_ai with invalid call_id format.""" chat_client = create_test_azure_ai_chat_client(mock_agents_client, agent_id="test-agent") @@ -1100,19 +1106,19 @@ async def test_azure_ai_chat_client_convert_required_action_invalid_call_id(mock function_result = FunctionResultContent(call_id="invalid_json", result="result") with pytest.raises(json.JSONDecodeError): - chat_client._convert_required_action_to_tool_output([function_result]) # type: ignore + chat_client._prepare_tool_outputs_for_azure_ai([function_result]) # type: ignore async def test_azure_ai_chat_client_convert_required_action_invalid_structure( mock_agents_client: MagicMock, ) -> None: - """Test _convert_required_action_to_tool_output with invalid call_id structure.""" + """Test _prepare_tool_outputs_for_azure_ai with invalid call_id structure.""" chat_client = create_test_azure_ai_chat_client(mock_agents_client, agent_id="test-agent") # Valid JSON but invalid structure (missing second element) function_result = FunctionResultContent(call_id='["run_123"]', result="result") - run_id, tool_outputs, tool_approvals = chat_client._convert_required_action_to_tool_output([function_result]) # type: ignore + run_id, tool_outputs, tool_approvals = chat_client._prepare_tool_outputs_for_azure_ai([function_result]) # type: ignore # Should return None values when structure is invalid assert run_id is None @@ -1123,7 +1129,7 @@ async def test_azure_ai_chat_client_convert_required_action_invalid_structure( async def test_azure_ai_chat_client_convert_required_action_serde_model_results( mock_agents_client: MagicMock, ) -> None: - """Test _convert_required_action_to_tool_output with BaseModel results.""" + """Test _prepare_tool_outputs_for_azure_ai with BaseModel results.""" class MockResult(SerializationMixin): def __init__(self, name: str, value: int): @@ -1136,7 +1142,7 @@ def __init__(self, name: str, value: int): mock_result = MockResult(name="test", value=42) function_result = FunctionResultContent(call_id='["run_123", "call_456"]', result=mock_result) - run_id, tool_outputs, tool_approvals = chat_client._convert_required_action_to_tool_output([function_result]) # type: ignore + run_id, tool_outputs, tool_approvals = chat_client._prepare_tool_outputs_for_azure_ai([function_result]) # type: ignore assert run_id == "run_123" assert tool_approvals is None @@ -1151,7 +1157,7 @@ def __init__(self, name: str, value: int): async def test_azure_ai_chat_client_convert_required_action_multiple_results( mock_agents_client: MagicMock, ) -> None: - """Test _convert_required_action_to_tool_output with multiple results.""" + """Test _prepare_tool_outputs_for_azure_ai with multiple results.""" class MockResult(SerializationMixin): def __init__(self, data: str): @@ -1164,7 +1170,7 @@ def __init__(self, data: str): results_list = [mock_basemodel, {"key": "value"}, "string_result"] function_result = FunctionResultContent(call_id='["run_123", "call_456"]', result=results_list) - run_id, tool_outputs, tool_approvals = chat_client._convert_required_action_to_tool_output([function_result]) # type: ignore + run_id, tool_outputs, tool_approvals = chat_client._prepare_tool_outputs_for_azure_ai([function_result]) # type: ignore assert run_id == "run_123" assert tool_outputs is not None @@ -1184,7 +1190,7 @@ def __init__(self, data: str): async def test_azure_ai_chat_client_convert_required_action_approval_response( mock_agents_client: MagicMock, ) -> None: - """Test _convert_required_action_to_tool_output with FunctionApprovalResponseContent.""" + """Test _prepare_tool_outputs_for_azure_ai with FunctionApprovalResponseContent.""" chat_client = create_test_azure_ai_chat_client(mock_agents_client, agent_id="test-agent") # Test with approval response - need to provide required fields @@ -1194,7 +1200,7 @@ async def test_azure_ai_chat_client_convert_required_action_approval_response( approved=True, ) - run_id, tool_outputs, tool_approvals = chat_client._convert_required_action_to_tool_output([approval_response]) # type: ignore + run_id, tool_outputs, tool_approvals = chat_client._prepare_tool_outputs_for_azure_ai([approval_response]) # type: ignore assert run_id == "run_123" assert tool_outputs is None @@ -1204,10 +1210,10 @@ async def test_azure_ai_chat_client_convert_required_action_approval_response( assert tool_approvals[0].approve is True -async def test_azure_ai_chat_client_create_function_call_contents_approval_request( +async def test_azure_ai_chat_client_parse_function_calls_from_azure_ai_approval_request( mock_agents_client: MagicMock, ) -> None: - """Test _create_function_call_contents with approval action.""" + """Test _parse_function_calls_from_azure_ai with approval action.""" chat_client = create_test_azure_ai_chat_client(mock_agents_client, agent_id="test-agent") # Mock SubmitToolApprovalAction with RequiredMcpToolCall @@ -1222,7 +1228,7 @@ async def test_azure_ai_chat_client_create_function_call_contents_approval_reque mock_event_data = MagicMock(spec=ThreadRun) mock_event_data.required_action = mock_approval_action - result = chat_client._create_function_call_contents(mock_event_data, "response_123") # type: ignore + result = chat_client._parse_function_calls_from_azure_ai(mock_event_data, "response_123") # type: ignore assert len(result) == 1 assert isinstance(result[0], FunctionApprovalRequestContent) @@ -1312,7 +1318,7 @@ async def test_azure_ai_chat_client_create_agent_stream_submit_tool_outputs( with patch("azure.ai.agents.models.AsyncAgentEventHandler", return_value=mock_handler): stream, final_thread_id = await chat_client._create_agent_stream( # type: ignore - thread_id="test-thread", agent_id="test-agent", run_options={}, required_action_results=[function_result] + agent_id="test-agent", run_options={"thread_id": "test-thread"}, required_action_results=[function_result] ) # Should call submit_tool_outputs_stream since we have matching run ID diff --git a/python/packages/azure-ai/tests/test_azure_ai_client.py b/python/packages/azure-ai/tests/test_azure_ai_client.py index 079b93d8c8..028e8fbdb8 100644 --- a/python/packages/azure-ai/tests/test_azure_ai_client.py +++ b/python/packages/azure-ai/tests/test_azure_ai_client.py @@ -249,10 +249,10 @@ async def test_azure_ai_client_get_agent_reference_missing_model( await client._get_agent_reference_or_create({}, None) # type: ignore -async def test_azure_ai_client_prepare_input_with_system_messages( +async def test_azure_ai_client_prepare_messages_for_azure_ai_with_system_messages( mock_project_client: MagicMock, ) -> None: - """Test _prepare_input converts system/developer messages to instructions.""" + """Test _prepare_messages_for_azure_ai converts system/developer messages to instructions.""" client = create_test_azure_ai_client(mock_project_client) messages = [ @@ -261,7 +261,7 @@ async def test_azure_ai_client_prepare_input_with_system_messages( ChatMessage(role=Role.ASSISTANT, contents=[TextContent(text="System response")]), ] - result_messages, instructions = client._prepare_input(messages) # type: ignore + result_messages, instructions = client._prepare_messages_for_azure_ai(messages) # type: ignore assert len(result_messages) == 2 assert result_messages[0].role == Role.USER @@ -269,10 +269,10 @@ async def test_azure_ai_client_prepare_input_with_system_messages( assert instructions == "You are a helpful assistant." -async def test_azure_ai_client_prepare_input_no_system_messages( +async def test_azure_ai_client_prepare_messages_for_azure_ai_no_system_messages( mock_project_client: MagicMock, ) -> None: - """Test _prepare_input with no system/developer messages.""" + """Test _prepare_messages_for_azure_ai with no system/developer messages.""" client = create_test_azure_ai_client(mock_project_client) messages = [ @@ -280,7 +280,7 @@ async def test_azure_ai_client_prepare_input_no_system_messages( ChatMessage(role=Role.ASSISTANT, contents=[TextContent(text="Hi there!")]), ] - result_messages, instructions = client._prepare_input(messages) # type: ignore + result_messages, instructions = client._prepare_messages_for_azure_ai(messages) # type: ignore assert len(result_messages) == 2 assert instructions is None @@ -294,14 +294,14 @@ async def test_azure_ai_client_prepare_options_basic(mock_project_client: MagicM chat_options = ChatOptions() with ( - patch.object(client.__class__.__bases__[0], "prepare_options", return_value={"model": "test-model"}), + patch.object(client.__class__.__bases__[0], "_prepare_options", return_value={"model": "test-model"}), patch.object( client, "_get_agent_reference_or_create", return_value={"name": "test-agent", "version": "1.0", "type": "agent_reference"}, ), ): - run_options = await client.prepare_options(messages, chat_options) + run_options = await client._prepare_options(messages, chat_options) assert "extra_body" in run_options assert run_options["extra_body"]["agent"]["name"] == "test-agent" @@ -329,14 +329,14 @@ async def test_azure_ai_client_prepare_options_with_application_endpoint( chat_options = ChatOptions() with ( - patch.object(client.__class__.__bases__[0], "prepare_options", return_value={"model": "test-model"}), + patch.object(client.__class__.__bases__[0], "_prepare_options", return_value={"model": "test-model"}), patch.object( client, "_get_agent_reference_or_create", return_value={"name": "test-agent", "version": "1", "type": "agent_reference"}, ), ): - run_options = await client.prepare_options(messages, chat_options) + run_options = await client._prepare_options(messages, chat_options) if expects_agent: assert "extra_body" in run_options @@ -369,14 +369,14 @@ async def test_azure_ai_client_prepare_options_with_application_project_client( chat_options = ChatOptions() with ( - patch.object(client.__class__.__bases__[0], "prepare_options", return_value={"model": "test-model"}), + patch.object(client.__class__.__bases__[0], "_prepare_options", return_value={"model": "test-model"}), patch.object( client, "_get_agent_reference_or_create", return_value={"name": "test-agent", "version": "1", "type": "agent_reference"}, ), ): - run_options = await client.prepare_options(messages, chat_options) + run_options = await client._prepare_options(messages, chat_options) if expects_agent: assert "extra_body" in run_options @@ -386,13 +386,13 @@ async def test_azure_ai_client_prepare_options_with_application_project_client( async def test_azure_ai_client_initialize_client(mock_project_client: MagicMock) -> None: - """Test initialize_client method.""" + """Test _initialize_client method.""" client = create_test_azure_ai_client(mock_project_client) mock_openai_client = MagicMock() mock_project_client.get_openai_client = MagicMock(return_value=mock_openai_client) - await client.initialize_client() + await client._initialize_client() assert client.client is mock_openai_client mock_project_client.get_openai_client.assert_called_once() @@ -727,7 +727,7 @@ async def test_azure_ai_client_prepare_options_excludes_response_format( with ( patch.object( client.__class__.__bases__[0], - "prepare_options", + "_prepare_options", return_value={"model": "test-model", "response_format": ResponseFormatModel}, ), patch.object( @@ -736,7 +736,7 @@ async def test_azure_ai_client_prepare_options_excludes_response_format( return_value={"name": "test-agent", "version": "1.0", "type": "agent_reference"}, ), ): - run_options = await client.prepare_options(messages, chat_options) + run_options = await client._prepare_options(messages, chat_options) # response_format should be excluded from final run options assert "response_format" not in run_options @@ -745,94 +745,8 @@ async def test_azure_ai_client_prepare_options_excludes_response_format( assert run_options["extra_body"]["agent"]["name"] == "test-agent" -async def test_azure_ai_client_prepare_options_with_resp_conversation_id( - mock_project_client: MagicMock, -) -> None: - """Test prepare_options with conversation ID starting with 'resp_'.""" - client = create_test_azure_ai_client(mock_project_client, agent_name="test-agent", agent_version="1.0") - - messages = [ChatMessage(role=Role.USER, contents=[TextContent(text="Hello")])] - chat_options = ChatOptions(conversation_id="resp_12345") - - with ( - patch.object( - client.__class__.__bases__[0], - "prepare_options", - return_value={"model": "test-model", "previous_response_id": "old_value", "conversation": "old_conv"}, - ), - patch.object( - client, - "_get_agent_reference_or_create", - return_value={"name": "test-agent", "version": "1.0", "type": "agent_reference"}, - ), - ): - run_options = await client.prepare_options(messages, chat_options) - - # Should set previous_response_id and remove conversation property - assert run_options["previous_response_id"] == "resp_12345" - assert "conversation" not in run_options - - -async def test_azure_ai_client_prepare_options_with_conv_conversation_id( - mock_project_client: MagicMock, -) -> None: - """Test prepare_options with conversation ID starting with 'conv_'.""" - client = create_test_azure_ai_client(mock_project_client, agent_name="test-agent", agent_version="1.0") - - messages = [ChatMessage(role=Role.USER, contents=[TextContent(text="Hello")])] - chat_options = ChatOptions(conversation_id="conv_67890") - - with ( - patch.object( - client.__class__.__bases__[0], - "prepare_options", - return_value={"model": "test-model", "previous_response_id": "old_value", "conversation": "old_conv"}, - ), - patch.object( - client, - "_get_agent_reference_or_create", - return_value={"name": "test-agent", "version": "1.0", "type": "agent_reference"}, - ), - ): - run_options = await client.prepare_options(messages, chat_options) - - # Should set conversation and remove previous_response_id property - assert run_options["conversation"] == "conv_67890" - assert "previous_response_id" not in run_options - - -async def test_azure_ai_client_prepare_options_with_client_conversation_id( - mock_project_client: MagicMock, -) -> None: - """Test prepare_options using client's default conversation ID when chat options don't have one.""" - client = create_test_azure_ai_client( - mock_project_client, agent_name="test-agent", agent_version="1.0", conversation_id="resp_client_default" - ) - - messages = [ChatMessage(role=Role.USER, contents=[TextContent(text="Hello")])] - chat_options = ChatOptions() # No conversation_id specified - - with ( - patch.object( - client.__class__.__bases__[0], - "prepare_options", - return_value={"model": "test-model", "previous_response_id": "old_value", "conversation": "old_conv"}, - ), - patch.object( - client, - "_get_agent_reference_or_create", - return_value={"name": "test-agent", "version": "1.0", "type": "agent_reference"}, - ), - ): - run_options = await client.prepare_options(messages, chat_options) - - # Should use client's default conversation_id and set previous_response_id - assert run_options["previous_response_id"] == "resp_client_default" - assert "conversation" not in run_options - - def test_get_conversation_id_with_store_true_and_conversation_id() -> None: - """Test get_conversation_id returns conversation ID when store is True and conversation exists.""" + """Test _get_conversation_id returns conversation ID when store is True and conversation exists.""" client = create_test_azure_ai_client(MagicMock()) # Mock OpenAI response with conversation @@ -842,13 +756,13 @@ def test_get_conversation_id_with_store_true_and_conversation_id() -> None: mock_conversation.id = "conv_67890" mock_response.conversation = mock_conversation - result = client.get_conversation_id(mock_response, store=True) + result = client._get_conversation_id(mock_response, store=True) assert result == "conv_67890" def test_get_conversation_id_with_store_true_and_no_conversation() -> None: - """Test get_conversation_id returns response ID when store is True and no conversation exists.""" + """Test _get_conversation_id returns response ID when store is True and no conversation exists.""" client = create_test_azure_ai_client(MagicMock()) # Mock OpenAI response without conversation @@ -856,13 +770,13 @@ def test_get_conversation_id_with_store_true_and_no_conversation() -> None: mock_response.id = "resp_12345" mock_response.conversation = None - result = client.get_conversation_id(mock_response, store=True) + result = client._get_conversation_id(mock_response, store=True) assert result == "resp_12345" def test_get_conversation_id_with_store_true_and_empty_conversation_id() -> None: - """Test get_conversation_id returns response ID when store is True and conversation ID is empty.""" + """Test _get_conversation_id returns response ID when store is True and conversation ID is empty.""" client = create_test_azure_ai_client(MagicMock()) # Mock OpenAI response with conversation but empty ID @@ -872,13 +786,13 @@ def test_get_conversation_id_with_store_true_and_empty_conversation_id() -> None mock_conversation.id = "" mock_response.conversation = mock_conversation - result = client.get_conversation_id(mock_response, store=True) + result = client._get_conversation_id(mock_response, store=True) assert result == "resp_12345" def test_get_conversation_id_with_store_false() -> None: - """Test get_conversation_id returns None when store is False.""" + """Test _get_conversation_id returns None when store is False.""" client = create_test_azure_ai_client(MagicMock()) # Mock OpenAI response with conversation @@ -888,13 +802,13 @@ def test_get_conversation_id_with_store_false() -> None: mock_conversation.id = "conv_67890" mock_response.conversation = mock_conversation - result = client.get_conversation_id(mock_response, store=False) + result = client._get_conversation_id(mock_response, store=False) assert result is None def test_get_conversation_id_with_parsed_response_and_store_true() -> None: - """Test get_conversation_id works with ParsedResponse when store is True.""" + """Test _get_conversation_id works with ParsedResponse when store is True.""" client = create_test_azure_ai_client(MagicMock()) # Mock ParsedResponse with conversation @@ -904,13 +818,13 @@ def test_get_conversation_id_with_parsed_response_and_store_true() -> None: mock_conversation.id = "conv_parsed_67890" mock_response.conversation = mock_conversation - result = client.get_conversation_id(mock_response, store=True) + result = client._get_conversation_id(mock_response, store=True) assert result == "conv_parsed_67890" def test_get_conversation_id_with_parsed_response_no_conversation() -> None: - """Test get_conversation_id returns response ID with ParsedResponse when no conversation exists.""" + """Test _get_conversation_id returns response ID with ParsedResponse when no conversation exists.""" client = create_test_azure_ai_client(MagicMock()) # Mock ParsedResponse without conversation @@ -918,7 +832,7 @@ def test_get_conversation_id_with_parsed_response_no_conversation() -> None: mock_response.id = "resp_parsed_12345" mock_response.conversation = None - result = client.get_conversation_id(mock_response, store=True) + result = client._get_conversation_id(mock_response, store=True) assert result == "resp_parsed_12345" diff --git a/python/packages/azurefunctions/pyproject.toml b/python/packages/azurefunctions/pyproject.toml index ccc00ffc73..d5bd833229 100644 --- a/python/packages/azurefunctions/pyproject.toml +++ b/python/packages/azurefunctions/pyproject.toml @@ -4,7 +4,7 @@ description = "Azure Functions integration for Microsoft Agent Framework." authors = [{ name = "Microsoft", email = "af-support@microsoft.com"}] readme = "README.md" requires-python = ">=3.10" -version = "1.0.0b251216" +version = "1.0.0b251223" license-files = ["LICENSE"] urls.homepage = "https://aka.ms/agent-framework" urls.source = "https://github.com/microsoft/agent-framework/tree/main/python" diff --git a/python/packages/bedrock/LICENSE b/python/packages/bedrock/LICENSE new file mode 100644 index 0000000000..79656060de --- /dev/null +++ b/python/packages/bedrock/LICENSE @@ -0,0 +1,21 @@ + MIT License + + Copyright (c) Microsoft Corporation. + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to deal + in the Software without restriction, including without limitation the rights + to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in all + copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + SOFTWARE \ No newline at end of file diff --git a/python/packages/bedrock/README.md b/python/packages/bedrock/README.md new file mode 100644 index 0000000000..6bcd9ff53a --- /dev/null +++ b/python/packages/bedrock/README.md @@ -0,0 +1,19 @@ +# Get Started with Microsoft Agent Framework Bedrock + +Install the provider package: + +```bash +pip install agent-framework-bedrock --pre +``` + +## Bedrock Integration + +The Bedrock integration enables Microsoft Agent Framework applications to call Amazon Bedrock models with familiar chat abstractions, including tool/function calling when you attach tools through `ChatOptions`. + +### Basic Usage Example + +See the [Bedrock sample script](samples/bedrock_sample.py) for a runnable end-to-end script that: + +- Loads credentials from the `BEDROCK_*` environment variables +- Instantiates `BedrockChatClient` +- Sends a simple conversation turn and prints the response diff --git a/python/packages/bedrock/agent_framework_bedrock/__init__.py b/python/packages/bedrock/agent_framework_bedrock/__init__.py new file mode 100644 index 0000000000..84f3e5946c --- /dev/null +++ b/python/packages/bedrock/agent_framework_bedrock/__init__.py @@ -0,0 +1,15 @@ +# Copyright (c) Microsoft. All rights reserved. + +import importlib.metadata + +from ._chat_client import BedrockChatClient + +try: + __version__ = importlib.metadata.version(__name__) +except importlib.metadata.PackageNotFoundError: + __version__ = "0.0.0" + +__all__ = [ + "BedrockChatClient", + "__version__", +] diff --git a/python/packages/bedrock/agent_framework_bedrock/_chat_client.py b/python/packages/bedrock/agent_framework_bedrock/_chat_client.py new file mode 100644 index 0000000000..c1e404834f --- /dev/null +++ b/python/packages/bedrock/agent_framework_bedrock/_chat_client.py @@ -0,0 +1,527 @@ +# Copyright (c) Microsoft. All rights reserved. + +import asyncio +import json +from collections import deque +from collections.abc import AsyncIterable, MutableMapping, MutableSequence, Sequence +from typing import Any, ClassVar +from uuid import uuid4 + +from agent_framework import ( + AGENT_FRAMEWORK_USER_AGENT, + AIFunction, + BaseChatClient, + ChatMessage, + ChatOptions, + ChatResponse, + ChatResponseUpdate, + Contents, + FinishReason, + FunctionCallContent, + FunctionResultContent, + Role, + TextContent, + ToolProtocol, + UsageContent, + UsageDetails, + get_logger, + prepare_function_call_results, + use_chat_middleware, + use_function_invocation, +) +from agent_framework._pydantic import AFBaseSettings +from agent_framework.exceptions import ServiceInitializationError, ServiceInvalidResponseError +from agent_framework.observability import use_instrumentation +from boto3.session import Session as Boto3Session +from botocore.client import BaseClient +from botocore.config import Config as BotoConfig +from pydantic import SecretStr, ValidationError + +logger = get_logger("agent_framework.bedrock") + +DEFAULT_REGION = "us-east-1" +DEFAULT_MAX_TOKENS = 1024 + +ROLE_MAP: dict[Role, str] = { + Role.USER: "user", + Role.ASSISTANT: "assistant", + Role.SYSTEM: "user", + Role.TOOL: "user", +} + +FINISH_REASON_MAP: dict[str, FinishReason] = { + "end_turn": FinishReason.STOP, + "stop_sequence": FinishReason.STOP, + "max_tokens": FinishReason.LENGTH, + "length": FinishReason.LENGTH, + "content_filtered": FinishReason.CONTENT_FILTER, + "tool_use": FinishReason.TOOL_CALLS, +} + + +class BedrockSettings(AFBaseSettings): + """Bedrock configuration settings pulled from environment variables or .env files.""" + + env_prefix: ClassVar[str] = "BEDROCK_" + + region: str = DEFAULT_REGION + chat_model_id: str | None = None + access_key: SecretStr | None = None + secret_key: SecretStr | None = None + session_token: SecretStr | None = None + + +@use_function_invocation +@use_instrumentation +@use_chat_middleware +class BedrockChatClient(BaseChatClient): + """Async chat client for Amazon Bedrock's Converse API.""" + + OTEL_PROVIDER_NAME: ClassVar[str] = "aws.bedrock" # type: ignore[reportIncompatibleVariableOverride, misc] + + def __init__( + self, + *, + region: str | None = None, + model_id: str | None = None, + access_key: str | None = None, + secret_key: str | None = None, + session_token: str | None = None, + client: BaseClient | None = None, + boto3_session: Boto3Session | None = None, + env_file_path: str | None = None, + env_file_encoding: str | None = None, + **kwargs: Any, + ) -> None: + """Create a Bedrock chat client and load AWS credentials. + + Args: + region: Region to send Bedrock requests to; falls back to BEDROCK_REGION. + model_id: Default model identifier; falls back to BEDROCK_CHAT_MODEL_ID. + access_key: Optional AWS access key for manual credential injection. + secret_key: Optional AWS secret key paired with ``access_key``. + session_token: Optional AWS session token for temporary credentials. + client: Preconfigured Bedrock runtime client; when omitted a boto3 session is created. + boto3_session: Custom boto3 session used to build the runtime client if provided. + env_file_path: Optional .env file path used by ``BedrockSettings`` to load defaults. + env_file_encoding: Encoding for the optional .env file. + kwargs: Additional arguments forwarded to ``BaseChatClient``. + """ + try: + settings = BedrockSettings( + region=region, + chat_model_id=model_id, + access_key=access_key, # type: ignore[arg-type] + secret_key=secret_key, # type: ignore[arg-type] + session_token=session_token, # type: ignore[arg-type] + env_file_path=env_file_path, + env_file_encoding=env_file_encoding, + ) + except ValidationError as ex: + raise ServiceInitializationError("Failed to initialize Bedrock settings.", ex) from ex + + if client is None: + session = boto3_session or self._create_session(settings) + client = session.client( + "bedrock-runtime", + region_name=settings.region, + config=BotoConfig(user_agent_extra=AGENT_FRAMEWORK_USER_AGENT), + ) + + super().__init__(**kwargs) + self._bedrock_client = client + self.model_id = settings.chat_model_id + self.region = settings.region + + @staticmethod + def _create_session(settings: BedrockSettings) -> Boto3Session: + session_kwargs: dict[str, Any] = {"region_name": settings.region or DEFAULT_REGION} + if settings.access_key and settings.secret_key: + session_kwargs["aws_access_key_id"] = settings.access_key.get_secret_value() + session_kwargs["aws_secret_access_key"] = settings.secret_key.get_secret_value() + if settings.session_token: + session_kwargs["aws_session_token"] = settings.session_token.get_secret_value() + return Boto3Session(**session_kwargs) + + async def _inner_get_response( + self, + *, + messages: MutableSequence[ChatMessage], + chat_options: ChatOptions, + **kwargs: Any, + ) -> ChatResponse: + request = self._build_converse_request(messages, chat_options, **kwargs) + raw_response = await asyncio.to_thread(self._bedrock_client.converse, **request) + return self._process_converse_response(raw_response) + + async def _inner_get_streaming_response( + self, + *, + messages: MutableSequence[ChatMessage], + chat_options: ChatOptions, + **kwargs: Any, + ) -> AsyncIterable[ChatResponseUpdate]: + response = await self._inner_get_response(messages=messages, chat_options=chat_options, **kwargs) + contents = list(response.messages[0].contents if response.messages else []) + if response.usage_details: + contents.append(UsageContent(details=response.usage_details)) + yield ChatResponseUpdate( + response_id=response.response_id, + contents=contents, + model_id=response.model_id, + finish_reason=response.finish_reason, + raw_representation=response.raw_representation, + ) + + def _build_converse_request( + self, + messages: MutableSequence[ChatMessage], + chat_options: ChatOptions, + **kwargs: Any, + ) -> dict[str, Any]: + model_id = chat_options.model_id or self.model_id + if not model_id: + raise ServiceInitializationError( + "Bedrock model_id is required. Set via chat options or BEDROCK_CHAT_MODEL_ID environment variable." + ) + + system_prompts, conversation = self._prepare_bedrock_messages(messages) + if not conversation: + raise ServiceInitializationError("At least one non-system message is required for Bedrock requests.") + + payload: dict[str, Any] = { + "modelId": model_id, + "messages": conversation, + } + if system_prompts: + payload["system"] = system_prompts + + inference_config: dict[str, Any] = {} + inference_config["maxTokens"] = ( + chat_options.max_tokens if chat_options.max_tokens is not None else DEFAULT_MAX_TOKENS + ) + if chat_options.temperature is not None: + inference_config["temperature"] = chat_options.temperature + if chat_options.top_p is not None: + inference_config["topP"] = chat_options.top_p + if chat_options.stop is not None: + inference_config["stopSequences"] = chat_options.stop + if inference_config: + payload["inferenceConfig"] = inference_config + + tool_config = self._convert_tools_to_bedrock_config(chat_options.tools) + if tool_choice := self._convert_tool_choice(chat_options.tool_choice): + if tool_config is None: + tool_config = {} + tool_config["toolChoice"] = tool_choice + if tool_config: + payload["toolConfig"] = tool_config + + if chat_options.additional_properties: + payload.update(chat_options.additional_properties) + if kwargs: + payload.update(kwargs) + return payload + + def _prepare_bedrock_messages( + self, messages: Sequence[ChatMessage] + ) -> tuple[list[dict[str, str]], list[dict[str, Any]]]: + prompts: list[dict[str, str]] = [] + conversation: list[dict[str, Any]] = [] + pending_tool_use_ids: deque[str] = deque() + for message in messages: + if message.role == Role.SYSTEM: + text_value = message.text + if text_value: + prompts.append({"text": text_value}) + continue + + content_blocks = self._convert_message_to_content_blocks(message) + if not content_blocks: + continue + + role = ROLE_MAP.get(message.role, "user") + if role == "assistant": + pending_tool_use_ids = deque( + block["toolUse"]["toolUseId"] + for block in content_blocks + if isinstance(block, MutableMapping) and "toolUse" in block + ) + elif message.role == Role.TOOL: + content_blocks = self._align_tool_results_with_pending(content_blocks, pending_tool_use_ids) + pending_tool_use_ids.clear() + if not content_blocks: + continue + else: + pending_tool_use_ids.clear() + + conversation.append({"role": role, "content": content_blocks}) + + return prompts, conversation + + def _align_tool_results_with_pending( + self, content_blocks: list[dict[str, Any]], pending_tool_use_ids: deque[str] + ) -> list[dict[str, Any]]: + if not content_blocks: + return content_blocks + if not pending_tool_use_ids: + # No pending tool calls; drop toolResult blocks to avoid Bedrock validation errors + return [ + block for block in content_blocks if not (isinstance(block, MutableMapping) and "toolResult" in block) + ] + + aligned_blocks: list[dict[str, Any]] = [] + pending = deque(pending_tool_use_ids) + for block in content_blocks: + if not isinstance(block, MutableMapping): + aligned_blocks.append(block) + continue + tool_result = block.get("toolResult") + if not tool_result: + aligned_blocks.append(block) + continue + if not pending: + logger.debug("Dropping extra tool result block due to missing pending tool uses: %s", block) + continue + tool_use_id = tool_result.get("toolUseId") + if tool_use_id: + try: + pending.remove(tool_use_id) + except ValueError: + logger.debug("Tool result references unknown toolUseId '%s'. Dropping block.", tool_use_id) + continue + else: + tool_result["toolUseId"] = pending.popleft() + aligned_blocks.append(block) + + return aligned_blocks + + def _convert_message_to_content_blocks(self, message: ChatMessage) -> list[dict[str, Any]]: + blocks: list[dict[str, Any]] = [] + for content in message.contents: + block = self._convert_content_to_bedrock_block(content) + if block is None: + logger.debug("Skipping unsupported content type for Bedrock: %s", type(content)) + continue + blocks.append(block) + return blocks + + def _convert_content_to_bedrock_block(self, content: Contents) -> dict[str, Any] | None: + if isinstance(content, TextContent): + return {"text": content.text} + if isinstance(content, FunctionCallContent): + arguments = content.parse_arguments() or {} + return { + "toolUse": { + "toolUseId": content.call_id or self._generate_tool_call_id(), + "name": content.name, + "input": arguments, + } + } + if isinstance(content, FunctionResultContent): + tool_result_block = { + "toolResult": { + "toolUseId": content.call_id, + "content": self._convert_tool_result_to_blocks(content.result), + "status": "error" if content.exception else "success", + } + } + if content.exception: + tool_result = tool_result_block["toolResult"] + existing_content = tool_result.get("content") + content_list: list[dict[str, Any]] + if isinstance(existing_content, list): + content_list = existing_content + else: + content_list = [] + tool_result["content"] = content_list + content_list.append({"text": str(content.exception)}) + return tool_result_block + return None + + def _convert_tool_result_to_blocks(self, result: Any) -> list[dict[str, Any]]: + prepared_result = prepare_function_call_results(result) + try: + parsed_result = json.loads(prepared_result) + except json.JSONDecodeError: + return [{"text": prepared_result}] + + return self._convert_prepared_tool_result_to_blocks(parsed_result) + + def _convert_prepared_tool_result_to_blocks(self, value: Any) -> list[dict[str, Any]]: + if isinstance(value, list): + blocks: list[dict[str, Any]] = [] + for item in value: + blocks.extend(self._convert_prepared_tool_result_to_blocks(item)) + return blocks or [{"text": ""}] + return [self._normalize_tool_result_value(value)] + + def _normalize_tool_result_value(self, value: Any) -> dict[str, Any]: + if isinstance(value, dict): + return {"json": value} + if isinstance(value, (list, tuple)): + return {"json": list(value)} + if isinstance(value, str): + return {"text": value} + if isinstance(value, (int, float, bool)) or value is None: + return {"json": value} + if isinstance(value, TextContent) and getattr(value, "text", None): + return {"text": value.text} + if hasattr(value, "to_dict"): + try: + return {"json": value.to_dict()} # type: ignore[call-arg] + except Exception: # pragma: no cover - defensive + return {"text": str(value)} + return {"text": str(value)} + + def _convert_tools_to_bedrock_config( + self, tools: list[ToolProtocol | MutableMapping[str, Any]] | None + ) -> dict[str, Any] | None: + if not tools: + return None + converted: list[dict[str, Any]] = [] + for tool in tools: + if isinstance(tool, MutableMapping): + converted.append(dict(tool)) + continue + if isinstance(tool, AIFunction): + converted.append({ + "toolSpec": { + "name": tool.name, + "description": tool.description or "", + "inputSchema": {"json": tool.parameters()}, + } + }) + continue + logger.debug("Ignoring unsupported tool type for Bedrock: %s", type(tool)) + return {"tools": converted} if converted else None + + def _convert_tool_choice(self, tool_choice: Any) -> dict[str, Any] | None: + if not tool_choice: + return None + mode = tool_choice.mode if hasattr(tool_choice, "mode") else str(tool_choice) + required_name = getattr(tool_choice, "required_function_name", None) + match mode: + case "auto": + return {"auto": {}} + case "none": + return {"none": {}} + case "required": + if required_name: + return {"tool": {"name": required_name}} + return {"any": {}} + case _: + logger.debug("Unsupported tool choice mode for Bedrock: %s", mode) + return None + + @staticmethod + def _generate_tool_call_id() -> str: + return f"tool-call-{uuid4().hex}" + + def _process_converse_response(self, response: dict[str, Any]) -> ChatResponse: + output = response.get("output", {}) + message = output.get("message", {}) + content_blocks = message.get("content", []) or [] + contents = self._parse_message_contents(content_blocks) + chat_message = ChatMessage(role=Role.ASSISTANT, contents=contents, raw_representation=message) + usage_details = self._parse_usage(response.get("usage") or output.get("usage")) + finish_reason = self._map_finish_reason(output.get("completionReason") or response.get("stopReason")) + response_id = response.get("responseId") or message.get("id") + model_id = response.get("modelId") or output.get("modelId") or self.model_id + return ChatResponse( + response_id=response_id, + messages=[chat_message], + usage_details=usage_details, + model_id=model_id, + finish_reason=finish_reason, + raw_representation=response, + ) + + def _parse_usage(self, usage: dict[str, Any] | None) -> UsageDetails | None: + if not usage: + return None + details = UsageDetails() + if (input_tokens := usage.get("inputTokens")) is not None: + details.input_token_count = input_tokens + if (output_tokens := usage.get("outputTokens")) is not None: + details.output_token_count = output_tokens + if (total_tokens := usage.get("totalTokens")) is not None: + details.additional_counts["bedrock.total_tokens"] = total_tokens + return details + + def _parse_message_contents(self, content_blocks: Sequence[MutableMapping[str, Any]]) -> list[Any]: + contents: list[Any] = [] + for block in content_blocks: + if text_value := block.get("text"): + contents.append(TextContent(text=text_value, raw_representation=block)) + continue + if (json_value := block.get("json")) is not None: + contents.append(TextContent(text=json.dumps(json_value), raw_representation=block)) + continue + tool_use = block.get("toolUse") + if isinstance(tool_use, MutableMapping): + tool_name = tool_use.get("name") + if not tool_name: + raise ServiceInvalidResponseError("Bedrock response missing required tool name in toolUse block.") + contents.append( + FunctionCallContent( + call_id=tool_use.get("toolUseId") or self._generate_tool_call_id(), + name=tool_name, + arguments=tool_use.get("input"), + raw_representation=block, + ) + ) + continue + tool_result = block.get("toolResult") + if isinstance(tool_result, MutableMapping): + status = (tool_result.get("status") or "success").lower() + exception = None + if status not in {"success", "ok"}: + exception = RuntimeError(f"Bedrock tool result status: {status}") + result_value = self._convert_bedrock_tool_result_to_value(tool_result.get("content")) + contents.append( + FunctionResultContent( + call_id=tool_result.get("toolUseId") or self._generate_tool_call_id(), + result=result_value, + exception=exception, + raw_representation=block, + ) + ) + continue + logger.debug("Ignoring unsupported Bedrock content block: %s", block) + return contents + + def _map_finish_reason(self, reason: str | None) -> FinishReason | None: + if not reason: + return None + return FINISH_REASON_MAP.get(reason.lower()) + + def service_url(self) -> str: + """Returns the service URL for the Bedrock runtime in the configured AWS region. + + Returns: + str: The Bedrock runtime service URL. + """ + return f"https://bedrock-runtime.{self.region}.amazonaws.com" + + def _convert_bedrock_tool_result_to_value(self, content: Any) -> Any: + if not content: + return None + if isinstance(content, Sequence) and not isinstance(content, (str, bytes, bytearray)): + values: list[Any] = [] + for item in content: + if isinstance(item, MutableMapping): + if (text_value := item.get("text")) is not None: + values.append(text_value) + continue + if "json" in item: + values.append(item["json"]) + continue + values.append(item) + return values[0] if len(values) == 1 else values + if isinstance(content, MutableMapping): + if (text_value := content.get("text")) is not None: + return text_value + if "json" in content: + return content["json"] + return content diff --git a/python/packages/bedrock/pyproject.toml b/python/packages/bedrock/pyproject.toml new file mode 100644 index 0000000000..ea6cffda42 --- /dev/null +++ b/python/packages/bedrock/pyproject.toml @@ -0,0 +1,90 @@ +[project] +name = "agent-framework-bedrock" +description = "Amazon Bedrock integration for Microsoft Agent Framework." +authors = [{ name = "Microsoft", email = "af-support@microsoft.com"}] +readme = "README.md" +requires-python = ">=3.10" +version = "1.0.0b251120" +license-files = ["LICENSE"] +urls.homepage = "https://aka.ms/agent-framework" +urls.source = "https://github.com/microsoft/agent-framework/tree/main/python" +urls.release_notes = "https://github.com/microsoft/agent-framework/releases?q=tag%3Apython-1&expanded=true" +urls.issues = "https://github.com/microsoft/agent-framework/issues" +classifiers = [ + "License :: OSI Approved :: MIT License", + "Development Status :: 4 - Beta", + "Intended Audience :: Developers", + "Programming Language :: Python :: 3", + "Programming Language :: Python :: 3.10", + "Programming Language :: Python :: 3.11", + "Programming Language :: Python :: 3.12", + "Programming Language :: Python :: 3.13", + "Programming Language :: Python :: 3.14", + "Typing :: Typed", +] +dependencies = [ + "agent-framework-core", + "boto3>=1.35.0,<2.0.0", + "botocore>=1.35.0,<2.0.0", +] + + +[tool.uv] +prerelease = "if-necessary-or-explicit" +environments = [ + "sys_platform == 'darwin'", + "sys_platform == 'linux'", + "sys_platform == 'win32'" +] + +[tool.uv-dynamic-versioning] +fallback-version = "0.0.0" + +[tool.pytest.ini_options] +testpaths = 'tests' +addopts = "-ra -q -r fEX" +asyncio_mode = "auto" +asyncio_default_fixture_loop_scope = "function" +filterwarnings = [] +timeout = 120 + +[tool.ruff] +extend = "../../pyproject.toml" + +[tool.coverage.run] +omit = [ + "**/__init__.py" +] + +[tool.pyright] +extends = "../../pyproject.toml" + +[tool.mypy] +plugins = ['pydantic.mypy'] +strict = true +python_version = "3.10" +ignore_missing_imports = true +disallow_untyped_defs = true +no_implicit_optional = true +check_untyped_defs = true +warn_return_any = true +show_error_codes = true +warn_unused_ignores = false +disallow_incomplete_defs = true +disallow_untyped_decorators = true + +[tool.bandit] +targets = ["agent_framework_bedrock"] +exclude_dirs = ["tests"] + +[tool.poe] +executor.type = "uv" +include = "../../shared_tasks.toml" + +[tool.poe.tasks] +mypy = "mypy --config-file $POE_ROOT/pyproject.toml agent_framework_bedrock" +test = "pytest --cov=agent_framework_bedrock --cov-report=term-missing:skip-covered tests" + +[build-system] +requires = ["hatchling"] +build-backend = "hatchling.build" \ No newline at end of file diff --git a/python/packages/bedrock/samples/__init__.py b/python/packages/bedrock/samples/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/python/packages/bedrock/samples/bedrock_sample.py b/python/packages/bedrock/samples/bedrock_sample.py new file mode 100644 index 0000000000..9e14b5a385 --- /dev/null +++ b/python/packages/bedrock/samples/bedrock_sample.py @@ -0,0 +1,64 @@ +# Copyright (c) Microsoft. All rights reserved. + +import asyncio +import logging +from collections.abc import Sequence + +from agent_framework import ( + AgentRunResponse, + ChatAgent, + FunctionCallContent, + FunctionResultContent, + Role, + TextContent, + ToolMode, + ai_function, +) + +from agent_framework_bedrock import BedrockChatClient + + +@ai_function +def get_weather(city: str) -> dict[str, str]: + """Return a mock forecast for the requested city.""" + normalized = city.strip() or "New York" + return {"city": normalized, "forecast": "72F and sunny"} + + +async def main() -> None: + """Run the Bedrock sample agent, invoke the weather tool, and log the response.""" + agent = ChatAgent( + chat_client=BedrockChatClient(), + instructions="You are a concise travel assistant.", + name="BedrockWeatherAgent", + tool_choice=ToolMode.AUTO, + tools=[get_weather], + ) + + response = await agent.run("Use the weather tool to check the forecast for new york.") + logging.info("\nAssistant reply:", response.text or "") + _log_response(response) + + +def _log_response(response: AgentRunResponse) -> None: + logging.info("\nConversation transcript:") + for idx, message in enumerate(response.messages, start=1): + tag = f"{idx}. {message.role.value if isinstance(message.role, Role) else message.role}" + _log_contents(tag, message.contents) + + +def _log_contents(tag: str, contents: Sequence[object]) -> None: + logging.info(f"[{tag}] {len(contents)} content blocks") + for idx, content in enumerate(contents, start=1): + if isinstance(content, TextContent): + logging.info(f" {idx}. text -> {content.text}") + elif isinstance(content, FunctionCallContent): + logging.info(f" {idx}. tool_call ({content.name}) -> {content.arguments}") + elif isinstance(content, FunctionResultContent): + logging.info(f" {idx}. tool_result ({content.call_id}) -> {content.result}") + else: # pragma: no cover - defensive + logging.info(f" {idx}. {content.type}") + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/python/packages/bedrock/tests/test_bedrock_client.py b/python/packages/bedrock/tests/test_bedrock_client.py new file mode 100644 index 0000000000..4086dfa429 --- /dev/null +++ b/python/packages/bedrock/tests/test_bedrock_client.py @@ -0,0 +1,69 @@ +# Copyright (c) Microsoft. All rights reserved. + +from __future__ import annotations + +import asyncio +from typing import Any + +import pytest +from agent_framework import ChatMessage, ChatOptions, Role, TextContent +from agent_framework.exceptions import ServiceInitializationError + +from agent_framework_bedrock import BedrockChatClient + + +class _StubBedrockRuntime: + def __init__(self) -> None: + self.calls: list[dict[str, Any]] = [] + + def converse(self, **kwargs: Any) -> dict[str, Any]: + self.calls.append(kwargs) + return { + "modelId": kwargs["modelId"], + "responseId": "resp-123", + "usage": {"inputTokens": 10, "outputTokens": 5, "totalTokens": 15}, + "output": { + "completionReason": "end_turn", + "message": { + "id": "msg-1", + "role": "assistant", + "content": [{"text": "Bedrock says hi"}], + }, + }, + } + + +def test_get_response_invokes_bedrock_runtime() -> None: + stub = _StubBedrockRuntime() + client = BedrockChatClient( + model_id="amazon.titan-text", + region="us-west-2", + client=stub, + ) + + messages = [ + ChatMessage(role=Role.SYSTEM, contents=[TextContent(text="You are concise.")]), + ChatMessage(role=Role.USER, contents=[TextContent(text="hello")]), + ] + + response = asyncio.run(client.get_response(messages=messages, chat_options=ChatOptions(max_tokens=32))) + + assert stub.calls, "Expected the runtime client to be called" + payload = stub.calls[0] + assert payload["modelId"] == "amazon.titan-text" + assert payload["messages"][0]["content"][0]["text"] == "hello" + assert response.messages[0].contents[0].text == "Bedrock says hi" + assert response.usage_details and response.usage_details.input_token_count == 10 + + +def test_build_request_requires_non_system_messages() -> None: + client = BedrockChatClient( + model_id="amazon.titan-text", + region="us-west-2", + client=_StubBedrockRuntime(), + ) + + messages = [ChatMessage(role=Role.SYSTEM, contents=[TextContent(text="Only system text")])] + + with pytest.raises(ServiceInitializationError): + client._build_converse_request(messages, ChatOptions()) diff --git a/python/packages/bedrock/tests/test_bedrock_settings.py b/python/packages/bedrock/tests/test_bedrock_settings.py new file mode 100644 index 0000000000..a3b0894d28 --- /dev/null +++ b/python/packages/bedrock/tests/test_bedrock_settings.py @@ -0,0 +1,133 @@ +# Copyright (c) Microsoft. All rights reserved. + +from __future__ import annotations + +from unittest.mock import MagicMock + +import pytest +from agent_framework import ( + AIFunction, + ChatMessage, + ChatOptions, + FunctionCallContent, + FunctionResultContent, + Role, + TextContent, + ToolMode, +) +from pydantic import BaseModel + +from agent_framework_bedrock._chat_client import BedrockChatClient, BedrockSettings + + +class _WeatherArgs(BaseModel): + location: str + + +def _build_client() -> BedrockChatClient: + fake_runtime = MagicMock() + fake_runtime.converse.return_value = {} + return BedrockChatClient(model_id="test-model", client=fake_runtime) + + +def _dummy_weather(location: str) -> str: # pragma: no cover - helper + return f"Weather in {location}" + + +def test_settings_load_from_environment(monkeypatch: pytest.MonkeyPatch) -> None: + monkeypatch.setenv("BEDROCK_REGION", "us-west-2") + monkeypatch.setenv("BEDROCK_CHAT_MODEL_ID", "anthropic.claude-v2") + settings = BedrockSettings() + assert settings.region == "us-west-2" + assert settings.chat_model_id == "anthropic.claude-v2" + + +def test_build_request_includes_tool_config() -> None: + client = _build_client() + + tool = AIFunction(name="get_weather", description="desc", func=_dummy_weather, input_model=_WeatherArgs) + options = ChatOptions(tools=[tool], tool_choice=ToolMode.REQUIRED("get_weather")) + messages = [ChatMessage(role=Role.USER, contents=[TextContent(text="hi")])] + + request = client._build_converse_request(messages, options) + + assert request["toolConfig"]["tools"][0]["toolSpec"]["name"] == "get_weather" + assert request["toolConfig"]["toolChoice"] == {"tool": {"name": "get_weather"}} + + +def test_build_request_serializes_tool_history() -> None: + client = _build_client() + options = ChatOptions() + messages = [ + ChatMessage(role=Role.USER, contents=[TextContent(text="how's weather?")]), + ChatMessage( + role=Role.ASSISTANT, + contents=[FunctionCallContent(call_id="call-1", name="get_weather", arguments='{"location": "SEA"}')], + ), + ChatMessage( + role=Role.TOOL, + contents=[FunctionResultContent(call_id="call-1", result={"answer": "72F"})], + ), + ] + + request = client._build_converse_request(messages, options) + assistant_block = request["messages"][1]["content"][0]["toolUse"] + result_block = request["messages"][2]["content"][0]["toolResult"] + + assert assistant_block["name"] == "get_weather" + assert assistant_block["input"] == {"location": "SEA"} + assert result_block["toolUseId"] == "call-1" + assert result_block["content"][0]["json"] == {"answer": "72F"} + + +def test_process_response_parses_tool_use_and_result() -> None: + client = _build_client() + response = { + "modelId": "model", + "output": { + "message": { + "id": "msg-1", + "content": [ + {"toolUse": {"toolUseId": "call-1", "name": "get_weather", "input": {"location": "NYC"}}}, + {"text": "Calling tool"}, + ], + }, + "completionReason": "tool_use", + }, + } + + chat_response = client._process_converse_response(response) + contents = chat_response.messages[0].contents + + assert isinstance(contents[0], FunctionCallContent) + assert contents[0].name == "get_weather" + assert isinstance(contents[1], TextContent) + assert chat_response.finish_reason == client._map_finish_reason("tool_use") + + +def test_process_response_parses_tool_result() -> None: + client = _build_client() + response = { + "modelId": "model", + "output": { + "message": { + "id": "msg-2", + "content": [ + { + "toolResult": { + "toolUseId": "call-1", + "status": "success", + "content": [{"json": {"answer": 42}}], + } + } + ], + }, + "completionReason": "end_turn", + }, + } + + chat_response = client._process_converse_response(response) + contents = chat_response.messages[0].contents + + assert isinstance(contents[0], FunctionResultContent) + assert contents[0].result == {"answer": 42} diff --git a/python/packages/chatkit/agent_framework_chatkit/_converter.py b/python/packages/chatkit/agent_framework_chatkit/_converter.py index 0adf040156..1070d83926 100644 --- a/python/packages/chatkit/agent_framework_chatkit/_converter.py +++ b/python/packages/chatkit/agent_framework_chatkit/_converter.py @@ -25,6 +25,7 @@ Attachment, ClientToolCallItem, EndOfTurnItem, + GeneratedImageItem, HiddenContextItem, ImageAttachment, SDKHiddenContextItem, @@ -528,6 +529,9 @@ async def _thread_item_to_input_item( case SDKHiddenContextItem(): out = self.hidden_context_to_input(item) or [] return out if isinstance(out, list) else [out] + case GeneratedImageItem(): + # TODO(evmattso): Implement generated image handling in a future PR + return [] case _: assert_never(item) diff --git a/python/packages/chatkit/pyproject.toml b/python/packages/chatkit/pyproject.toml index 8c73a2ffb0..7e6d6ab846 100644 --- a/python/packages/chatkit/pyproject.toml +++ b/python/packages/chatkit/pyproject.toml @@ -4,7 +4,7 @@ description = "OpenAI ChatKit integration for Microsoft Agent Framework." authors = [{ name = "Microsoft", email = "af-support@microsoft.com"}] readme = "README.md" requires-python = ">=3.10" -version = "1.0.0b251216" +version = "1.0.0b251223" license-files = ["LICENSE"] urls.homepage = "https://aka.ms/agent-framework" urls.source = "https://github.com/microsoft/agent-framework/tree/main/python" diff --git a/python/packages/copilotstudio/pyproject.toml b/python/packages/copilotstudio/pyproject.toml index c8517c6eea..af20dc4c01 100644 --- a/python/packages/copilotstudio/pyproject.toml +++ b/python/packages/copilotstudio/pyproject.toml @@ -4,7 +4,7 @@ description = "Copilot Studio integration for Microsoft Agent Framework." authors = [{ name = "Microsoft", email = "af-support@microsoft.com"}] readme = "README.md" requires-python = ">=3.10" -version = "1.0.0b251216" +version = "1.0.0b251223" license-files = ["LICENSE"] urls.homepage = "https://aka.ms/agent-framework" urls.source = "https://github.com/microsoft/agent-framework/tree/main/python" diff --git a/python/packages/core/agent_framework/_clients.py b/python/packages/core/agent_framework/_clients.py index 506a1be7cd..bfb2c3f7d4 100644 --- a/python/packages/core/agent_framework/_clients.py +++ b/python/packages/core/agent_framework/_clients.py @@ -501,7 +501,7 @@ async def get_response( stop: str | Sequence[str] | None = None, store: bool | None = None, temperature: float | None = None, - tool_choice: ToolMode | Literal["auto", "required", "none"] | dict[str, Any] | None = None, + tool_choice: ToolMode | Literal["auto", "required", "none"] | dict[str, Any] | None = "auto", tools: ToolProtocol | Callable[..., Any] | MutableMapping[str, Any] @@ -535,6 +535,7 @@ async def get_response( store: Whether to store the response. temperature: The sampling temperature to use. tool_choice: The tool choice for the request. + Default is `auto`. tools: The tools to use for the request. top_p: The nucleus sampling probability to use. user: The user to associate with the request. @@ -595,7 +596,7 @@ async def get_streaming_response( stop: str | Sequence[str] | None = None, store: bool | None = None, temperature: float | None = None, - tool_choice: ToolMode | Literal["auto", "required", "none"] | dict[str, Any] | None = None, + tool_choice: ToolMode | Literal["auto", "required", "none"] | dict[str, Any] | None = "auto", tools: ToolProtocol | Callable[..., Any] | MutableMapping[str, Any] @@ -629,6 +630,7 @@ async def get_streaming_response( store: Whether to store the response. temperature: The sampling temperature to use. tool_choice: The tool choice for the request. + Default is `auto`. tools: The tools to use for the request. top_p: The nucleus sampling probability to use. user: The user to associate with the request. diff --git a/python/packages/core/agent_framework/_mcp.py b/python/packages/core/agent_framework/_mcp.py index 37e0d2c54b..a25f359a59 100644 --- a/python/packages/core/agent_framework/_mcp.py +++ b/python/packages/core/agent_framework/_mcp.py @@ -63,21 +63,21 @@ ] -def _mcp_prompt_message_to_chat_message( +def _parse_message_from_mcp( mcp_type: types.PromptMessage | types.SamplingMessage, ) -> ChatMessage: - """Convert a MCP container type to a Agent Framework type.""" + """Parse an MCP container type into an Agent Framework type.""" return ChatMessage( role=Role(value=mcp_type.role), - contents=_mcp_type_to_ai_content(mcp_type.content), + contents=_parse_content_from_mcp(mcp_type.content), raw_representation=mcp_type, ) -def _mcp_call_tool_result_to_ai_contents( +def _parse_contents_from_mcp_tool_result( mcp_type: types.CallToolResult, ) -> list[Contents]: - """Convert a MCP container type to a Agent Framework type. + """Parse an MCP CallToolResult into Agent Framework content types. This function extracts the complete _meta field from CallToolResult objects and merges all metadata into the additional_properties field of converted @@ -111,7 +111,7 @@ def _mcp_call_tool_result_to_ai_contents( # Convert each content item and merge metadata result_contents = [] for item in mcp_type.content: - contents = _mcp_type_to_ai_content(item) + contents = _parse_content_from_mcp(item) if merged_meta_props: for content in contents: @@ -124,7 +124,7 @@ def _mcp_call_tool_result_to_ai_contents( return result_contents -def _mcp_type_to_ai_content( +def _parse_content_from_mcp( mcp_type: types.ImageContent | types.TextContent | types.AudioContent @@ -142,7 +142,7 @@ def _mcp_type_to_ai_content( | types.ToolResultContent ], ) -> list[Contents]: - """Convert a MCP type to a Agent Framework type.""" + """Parse an MCP type into an Agent Framework type.""" mcp_types = mcp_type if isinstance(mcp_type, Sequence) else [mcp_type] return_types: list[Contents] = [] for mcp_type in mcp_types: @@ -178,7 +178,7 @@ def _mcp_type_to_ai_content( return_types.append( FunctionResultContent( call_id=mcp_type.toolUseId, - result=_mcp_type_to_ai_content(mcp_type.content) + result=_parse_content_from_mcp(mcp_type.content) if mcp_type.content else mcp_type.structuredContent, exception=Exception() if mcp_type.isError else None, @@ -211,10 +211,10 @@ def _mcp_type_to_ai_content( return return_types -def _ai_content_to_mcp_types( +def _prepare_content_for_mcp( content: Contents, ) -> types.TextContent | types.ImageContent | types.AudioContent | types.EmbeddedResource | types.ResourceLink | None: - """Convert a BaseContent type to a MCP type.""" + """Prepare an Agent Framework content type for MCP.""" match content: case TextContent(): return types.TextContent(type="text", text=content.text) @@ -253,15 +253,15 @@ def _ai_content_to_mcp_types( return None -def _chat_message_to_mcp_types( +def _prepare_message_for_mcp( content: ChatMessage, ) -> list[types.TextContent | types.ImageContent | types.AudioContent | types.EmbeddedResource | types.ResourceLink]: - """Convert a ChatMessage to a list of MCP types.""" + """Prepare a ChatMessage for MCP format.""" messages: list[ types.TextContent | types.ImageContent | types.AudioContent | types.EmbeddedResource | types.ResourceLink ] = [] for item in content.contents: - mcp_content = _ai_content_to_mcp_types(item) + mcp_content = _prepare_content_for_mcp(item) if mcp_content: messages.append(mcp_content) return messages @@ -469,7 +469,7 @@ async def sampling_callback( logger.debug("Sampling callback called with params: %s", params) messages: list[ChatMessage] = [] for msg in params.messages: - messages.append(_mcp_prompt_message_to_chat_message(msg)) + messages.append(_parse_message_from_mcp(msg)) try: response = await self.chat_client.get_response( messages, @@ -487,7 +487,7 @@ async def sampling_callback( code=types.INTERNAL_ERROR, message="Failed to get chat message content.", ) - mcp_contents = _chat_message_to_mcp_types(response.messages[0]) + mcp_contents = _prepare_message_for_mcp(response.messages[0]) # grab the first content that is of type TextContent or ImageContent mcp_content = next( (content for content in mcp_contents if isinstance(content, (types.TextContent, types.ImageContent))), @@ -692,7 +692,7 @@ async def call_tool(self, tool_name: str, **kwargs: Any) -> list[Contents]: k: v for k, v in kwargs.items() if k not in {"chat_options", "tools", "tool_choice", "thread"} } try: - return _mcp_call_tool_result_to_ai_contents( + return _parse_contents_from_mcp_tool_result( await self.session.call_tool(tool_name, arguments=filtered_kwargs) ) except McpError as mcp_exc: @@ -724,7 +724,7 @@ async def get_prompt(self, prompt_name: str, **kwargs: Any) -> list[ChatMessage] ) try: prompt_result = await self.session.get_prompt(prompt_name, arguments=kwargs) - return [_mcp_prompt_message_to_chat_message(message) for message in prompt_result.messages] + return [_parse_message_from_mcp(message) for message in prompt_result.messages] except McpError as mcp_exc: raise ToolExecutionException(mcp_exc.error.message, inner_exception=mcp_exc) from mcp_exc except Exception as ex: diff --git a/python/packages/core/agent_framework/_tools.py b/python/packages/core/agent_framework/_tools.py index 2f7801c84b..07b11811f3 100644 --- a/python/packages/core/agent_framework/_tools.py +++ b/python/packages/core/agent_framework/_tools.py @@ -573,7 +573,7 @@ class WeatherArgs(BaseModel): """ INJECTABLE: ClassVar[set[str]] = {"func"} - DEFAULT_EXCLUDE: ClassVar[set[str]] = {"input_model", "_invocation_duration_histogram"} + DEFAULT_EXCLUDE: ClassVar[set[str]] = {"input_model", "_invocation_duration_histogram", "_cached_parameters"} def __init__( self, @@ -615,6 +615,7 @@ def __init__( self.func = func self._instance = None # Store the instance for bound methods self.input_model = self._resolve_input_model(input_model) + self._cached_parameters: dict[str, Any] | None = None # Cache for model_json_schema() self.approval_mode = approval_mode or "never_require" if max_invocations is not None and max_invocations < 1: raise ValueError("max_invocations must be at least 1 or None.") @@ -802,8 +803,11 @@ def parameters(self) -> dict[str, Any]: Returns: A dictionary containing the JSON schema for the function's parameters. + The result is cached after the first call for performance. """ - return self.input_model.model_json_schema() + if self._cached_parameters is None: + self._cached_parameters = self.input_model.model_json_schema() + return self._cached_parameters def to_json_schema_spec(self) -> dict[str, Any]: """Convert a AIFunction to the JSON Schema function specification format. @@ -825,7 +829,7 @@ def to_dict(self, *, exclude: set[str] | None = None, exclude_none: bool = True) as_dict = super().to_dict(exclude=exclude, exclude_none=exclude_none) if (exclude and "input_model" in exclude) or not self.input_model: return as_dict - as_dict["input_model"] = self.input_model.model_json_schema() + as_dict["input_model"] = self.parameters() # Use cached parameters() return as_dict @@ -1779,11 +1783,6 @@ async def function_invocation_wrapper( response: "ChatResponse | None" = None fcc_messages: "list[ChatMessage]" = [] - # If tools are provided but tool_choice is not set, default to "auto" for function invocation - tools = _extract_tools(kwargs) - if tools and kwargs.get("tool_choice") is None: - kwargs["tool_choice"] = "auto" - for attempt_idx in range(config.max_iterations if config.enabled else 0): fcc_todo = _collect_approval_responses(prepped_messages) if fcc_todo: diff --git a/python/packages/core/agent_framework/_types.py b/python/packages/core/agent_framework/_types.py index ab68382a83..f804aae052 100644 --- a/python/packages/core/agent_framework/_types.py +++ b/python/packages/core/agent_framework/_types.py @@ -101,7 +101,7 @@ def _parse_content(content_data: MutableMapping[str, Any]) -> "Contents": Raises: ContentError if parsing fails """ - content_type = str(content_data.get("type")) + content_type: str | None = content_data.get("type", None) match content_type: case "text": return TextContent.from_dict(content_data) @@ -127,6 +127,8 @@ def _parse_content(content_data: MutableMapping[str, Any]) -> "Contents": return FunctionApprovalResponseContent.from_dict(content_data) case "text_reasoning": return TextReasoningContent.from_dict(content_data) + case None: + raise ContentError("Content type is missing") case _: raise ContentError(f"Unknown content type '{content_type}'") @@ -789,8 +791,9 @@ class TextReasoningContent(BaseContent): def __init__( self, - text: str, + text: str | None, *, + protected_data: str | None = None, additional_properties: dict[str, Any] | None = None, raw_representation: Any | None = None, annotations: Sequence[Annotations | MutableMapping[str, Any]] | None = None, @@ -802,6 +805,16 @@ def __init__( text: The text content represented by this instance. Keyword Args: + protected_data: This property is used to store data from a provider that should be roundtripped back to the + provider but that is not intended for human consumption. It is often encrypted or otherwise redacted + information that is only intended to be sent back to the provider and not displayed to the user. It's + possible for a TextReasoningContent to contain only `protected_data` and have an empty `text` property. + This data also may be associated with the corresponding `text`, acting as a validation signature for it. + + Note that whereas `text` can be provider agnostic, `protected_data` is provider-specific, and is likely + to only be understood by the provider that created it. The data is often represented as a more complex + object, so it should be serialized to a string before storing so that the whole object is easily + serializable without loss. additional_properties: Optional additional properties associated with the content. raw_representation: Optional raw representation of the content. annotations: Optional annotations associated with the content. @@ -814,6 +827,7 @@ def __init__( **kwargs, ) self.text = text + self.protected_data = protected_data self.type: Literal["text_reasoning"] = "text_reasoning" def __add__(self, other: "TextReasoningContent") -> "TextReasoningContent": @@ -846,13 +860,18 @@ def __add__(self, other: "TextReasoningContent") -> "TextReasoningContent": else: annotations = self.annotations + other.annotations + # Replace protected data. + # Discussion: https://github.com/microsoft/agent-framework/pull/2950#discussion_r2634345613 + protected_data = other.protected_data or self.protected_data + # Create new instance using from_dict for proper deserialization result_dict = { - "text": self.text + other.text, + "text": (self.text or "") + (other.text or "") if self.text is not None or other.text is not None else None, "type": "text_reasoning", "annotations": [ann.to_dict(exclude_none=False) for ann in annotations] if annotations else None, "additional_properties": {**(self.additional_properties or {}), **(other.additional_properties or {})}, "raw_representation": raw_representation, + "protected_data": protected_data, } return TextReasoningContent.from_dict(result_dict) @@ -869,7 +888,9 @@ def __iadd__(self, other: "TextReasoningContent") -> Self: raise TypeError("Incompatible type") # Concatenate text - self.text += other.text + if self.text is not None or other.text is not None: + self.text = (self.text or "") + (other.text or "") + # if both are None, should keep as None # Merge additional properties (self takes precedence) if self.additional_properties is None: @@ -888,6 +909,11 @@ def __iadd__(self, other: "TextReasoningContent") -> Self: self.raw_representation if isinstance(self.raw_representation, list) else [self.raw_representation] ) + (other.raw_representation if isinstance(other.raw_representation, list) else [other.raw_representation]) + # Replace protected data. + # Discussion: https://github.com/microsoft/agent-framework/pull/2950#discussion_r2634345613 + if other.protected_data is not None: + self.protected_data = other.protected_data + # Merge annotations if other.annotations: if self.annotations is None: @@ -2224,27 +2250,30 @@ def _process_update( if update.message_id: message.message_id = update.message_id for content in update.contents: - if ( - isinstance(content, FunctionCallContent) - and len(message.contents) > 0 - and isinstance(message.contents[-1], FunctionCallContent) - ): + # Fast path: get type attribute (most content will have it) + content_type = getattr(content, "type", None) + # Slow path: only check for dict if type is None + if content_type is None and isinstance(content, (dict, MutableMapping)): try: - message.contents[-1] += content - except AdditionItemMismatch: - message.contents.append(content) - elif isinstance(content, UsageContent): - if response.usage_details is None: - response.usage_details = UsageDetails() - response.usage_details += content.details - elif isinstance(content, (dict, MutableMapping)): - try: - cont = _parse_content(content) - message.contents.append(cont) + content = _parse_content(content) + content_type = content.type except ContentError as exc: logger.warning(f"Skipping unknown content type or invalid content: {exc}") - else: - message.contents.append(content) + continue + match content_type: + # mypy doesn't narrow type based on match/case, but we know these are FunctionCallContents + case "function_call" if message.contents and message.contents[-1].type == "function_call": + try: + message.contents[-1] += content # type: ignore[operator] + except AdditionItemMismatch: + message.contents.append(content) + case "usage": + if response.usage_details is None: + response.usage_details = UsageDetails() + # mypy doesn't narrow type based on match/case, but we know this is UsageContent + response.usage_details += content.details # type: ignore[union-attr, arg-type] + case _: + message.contents.append(content) # Incorporate the update's properties into the response. if update.response_id: response.response_id = update.response_id diff --git a/python/packages/core/agent_framework/_workflows/_agent.py b/python/packages/core/agent_framework/_workflows/_agent.py index d4f6c1411d..3cf37c4b49 100644 --- a/python/packages/core/agent_framework/_workflows/_agent.py +++ b/python/packages/core/agent_framework/_workflows/_agent.py @@ -26,6 +26,7 @@ ) from ..exceptions import AgentExecutionException +from ._agent_executor import AgentExecutor from ._checkpoint import CheckpointStorage from ._events import ( AgentRunUpdateEvent, @@ -141,7 +142,8 @@ async def run( checkpoint_storage: Runtime checkpoint storage. When provided with checkpoint_id, used to load and restore the checkpoint. When provided without checkpoint_id, enables checkpointing for this run. - **kwargs: Additional keyword arguments. + **kwargs: Additional keyword arguments passed through to underlying workflow + and ai_function tools. Returns: The final workflow response as an AgentRunResponse. @@ -153,7 +155,7 @@ async def run( response_id = str(uuid.uuid4()) async for update in self._run_stream_impl( - input_messages, response_id, thread, checkpoint_id, checkpoint_storage + input_messages, response_id, thread, checkpoint_id, checkpoint_storage, **kwargs ): response_updates.append(update) @@ -187,7 +189,8 @@ async def run_stream( checkpoint_storage: Runtime checkpoint storage. When provided with checkpoint_id, used to load and restore the checkpoint. When provided without checkpoint_id, enables checkpointing for this run. - **kwargs: Additional keyword arguments. + **kwargs: Additional keyword arguments passed through to underlying workflow + and ai_function tools. Yields: AgentRunResponseUpdate objects representing the workflow execution progress. @@ -198,7 +201,7 @@ async def run_stream( response_id = str(uuid.uuid4()) async for update in self._run_stream_impl( - input_messages, response_id, thread, checkpoint_id, checkpoint_storage + input_messages, response_id, thread, checkpoint_id, checkpoint_storage, **kwargs ): response_updates.append(update) yield update @@ -216,6 +219,7 @@ async def _run_stream_impl( thread: AgentThread, checkpoint_id: str | None = None, checkpoint_storage: CheckpointStorage | None = None, + **kwargs: Any, ) -> AsyncIterable[AgentRunResponseUpdate]: """Internal implementation of streaming execution. @@ -225,6 +229,8 @@ async def _run_stream_impl( thread: The conversation thread containing message history. checkpoint_id: ID of checkpoint to restore from. checkpoint_storage: Runtime checkpoint storage. + **kwargs: Additional keyword arguments passed through to the underlying + workflow and ai_function tools. Yields: AgentRunResponseUpdate objects representing the workflow execution progress. @@ -255,6 +261,7 @@ async def _run_stream_impl( message=None, checkpoint_id=checkpoint_id, checkpoint_storage=checkpoint_storage, + **kwargs, ) else: # Execute workflow with streaming (initial run or no function responses) @@ -268,6 +275,7 @@ async def _run_stream_impl( event_stream = self.workflow.run_stream( message=conversation_messages, checkpoint_storage=checkpoint_storage, + **kwargs, ) # Process events from the stream @@ -286,10 +294,20 @@ def _convert_workflow_event_to_agent_update( AgentRunUpdateEvent, RequestInfoEvent, and WorkflowOutputEvent are processed. Other workflow events are ignored as they are workflow-internal. + + For AgentRunUpdateEvent from AgentExecutor instances, only events from executors + with output_response=True are converted to agent updates. This prevents agent + responses from executors that were not explicitly marked to surface their output. + Non-AgentExecutor executors that emit AgentRunUpdateEvent directly are allowed + through since they explicitly chose to emit the event. """ match event: - case AgentRunUpdateEvent(data=update): - # Direct pass-through of update in an agent streaming event + case AgentRunUpdateEvent(data=update, executor_id=executor_id): + # For AgentExecutor instances, only pass through if output_response=True. + # Non-AgentExecutor executors that emit AgentRunUpdateEvent are allowed through. + executor = self.workflow.executors.get(executor_id) + if isinstance(executor, AgentExecutor) and not executor.output_response: + return None if update: return update return None @@ -297,11 +315,17 @@ def _convert_workflow_event_to_agent_update( case WorkflowOutputEvent(data=data, source_executor_id=source_executor_id): # Convert workflow output to an agent response update. # Handle different data types appropriately. + + # Skip AgentRunResponse from AgentExecutor with output_response=True + # since streaming events already surfaced the content. + if isinstance(data, AgentRunResponse): + executor = self.workflow.executors.get(source_executor_id) + if isinstance(executor, AgentExecutor) and executor.output_response: + return None + if isinstance(data, AgentRunResponseUpdate): - # Already an update, pass through return data if isinstance(data, ChatMessage): - # Convert ChatMessage to update return AgentRunResponseUpdate( contents=list(data.contents), role=data.role, @@ -311,15 +335,9 @@ def _convert_workflow_event_to_agent_update( created_at=datetime.now(tz=timezone.utc).strftime("%Y-%m-%dT%H:%M:%S.%fZ"), raw_representation=data, ) - # Determine contents based on data type - if isinstance(data, BaseContent): - # Already a content type (TextContent, ImageContent, etc.) - contents: list[Contents] = [cast(Contents, data)] - elif isinstance(data, str): - contents = [TextContent(text=data)] - else: - # Fallback: convert to string representation - contents = [TextContent(text=str(data))] + contents = self._extract_contents(data) + if not contents: + return None return AgentRunResponseUpdate( contents=contents, role=Role.ASSISTANT, @@ -405,6 +423,18 @@ def _extract_function_responses(self, input_messages: list[ChatMessage]) -> dict raise AgentExecutionException("Unexpected content type while awaiting request info responses.") return function_responses + def _extract_contents(self, data: Any) -> list[Contents]: + """Recursively extract Contents from workflow output data.""" + if isinstance(data, ChatMessage): + return list(data.contents) + if isinstance(data, list): + return [c for item in data for c in self._extract_contents(item)] + if isinstance(data, BaseContent): + return [cast(Contents, data)] + if isinstance(data, str): + return [TextContent(text=data)] + return [TextContent(text=str(data))] + class _ResponseState(TypedDict): """State for grouping response updates by message_id.""" diff --git a/python/packages/core/agent_framework/_workflows/_agent_executor.py b/python/packages/core/agent_framework/_workflows/_agent_executor.py index 26300ad473..4e0d2058ad 100644 --- a/python/packages/core/agent_framework/_workflows/_agent_executor.py +++ b/python/packages/core/agent_framework/_workflows/_agent_executor.py @@ -99,6 +99,11 @@ def __init__( self._output_response = output_response self._cache: list[ChatMessage] = [] + @property + def output_response(self) -> bool: + """Whether this executor yields AgentRunResponse as workflow output when complete.""" + return self._output_response + @property def workflow_output_types(self) -> list[type[Any]]: # Override to declare AgentRunResponse as a possible output type only if enabled. diff --git a/python/packages/core/agent_framework/_workflows/_handoff.py b/python/packages/core/agent_framework/_workflows/_handoff.py index 9a99657902..33c533c5e5 100644 --- a/python/packages/core/agent_framework/_workflows/_handoff.py +++ b/python/packages/core/agent_framework/_workflows/_handoff.py @@ -871,8 +871,10 @@ def create_specialist() -> AgentProtocol: HandoffBuilder(participants=[coordinator, refund, shipping]) .set_coordinator(coordinator) .with_termination_condition( - lambda conv: sum(1 for msg in conv if msg.role.value == "user") >= 5 - or any("goodbye" in msg.text.lower() for msg in conv[-2:]) + lambda conv: ( + sum(1 for msg in conv if msg.role.value == "user") >= 5 + or any("goodbye" in msg.text.lower() for msg in conv[-2:]) + ) ) .build() ) diff --git a/python/packages/core/agent_framework/_workflows/_viz.py b/python/packages/core/agent_framework/_workflows/_viz.py index 14011cb5a5..0fcf8af32d 100644 --- a/python/packages/core/agent_framework/_workflows/_viz.py +++ b/python/packages/core/agent_framework/_workflows/_viz.py @@ -7,16 +7,16 @@ from pathlib import Path from typing import Literal -from ._edge import FanInEdgeGroup +from ._edge import FanInEdgeGroup, InternalEdgeGroup from ._workflow import Workflow # Import of WorkflowExecutor is performed lazily inside methods to avoid cycles -"""Workflow visualization module using graphviz.""" +"""Workflow visualization module using graphviz and Mermaid.""" class WorkflowViz: - """A class for visualizing workflows using graphviz.""" + """A class for visualizing workflows using graphviz and Mermaid.""" def __init__(self, workflow: Workflow): """Initialize the WorkflowViz with a workflow. @@ -26,9 +26,13 @@ def __init__(self, workflow: Workflow): """ self._workflow = workflow - def to_digraph(self) -> str: + def to_digraph(self, include_internal_executors: bool = False) -> str: """Export the workflow as a DOT format digraph string. + Args: + include_internal_executors (bool): Whether to include internal executors in the visualization. + Default is False. + Returns: A string representation of the workflow in DOT format. """ @@ -39,20 +43,37 @@ def to_digraph(self) -> str: lines.append("") # Emit the top-level workflow nodes/edges - self._emit_workflow_digraph(self._workflow, lines, indent=" ") + self._emit_workflow_digraph( + self._workflow, + lines, + indent=" ", + include_internal_executors=include_internal_executors, + ) # Emit sub-workflows hosted by WorkflowExecutor as nested clusters - self._emit_sub_workflows_digraph(self._workflow, lines, indent=" ") + self._emit_sub_workflows_digraph( + self._workflow, + lines, + indent=" ", + include_internal_executors=include_internal_executors, + ) lines.append("}") return "\n".join(lines) - def export(self, format: Literal["svg", "png", "pdf", "dot"] = "svg", filename: str | None = None) -> str: + def export( + self, + format: Literal["svg", "png", "pdf", "dot"] = "svg", + filename: str | None = None, + include_internal_executors: bool = False, + ) -> str: """Export the workflow visualization to a file or return the file path. Args: format: The output format. Supported formats: 'svg', 'png', 'pdf', 'dot'. filename: Optional filename to save the output. If None, creates a temporary file. + include_internal_executors (bool): Whether to include internal executors in the visualization. + Default is False. Returns: The path to the saved file. @@ -66,7 +87,7 @@ def export(self, format: Literal["svg", "png", "pdf", "dot"] = "svg", filename: raise ValueError(f"Unsupported format: {format}. Supported formats: svg, png, pdf, dot") if format == "dot": - content = self.to_digraph() + content = self.to_digraph(include_internal_executors=include_internal_executors) if filename: with open(filename, "w", encoding="utf-8") as f: f.write(content) @@ -87,7 +108,7 @@ def export(self, format: Literal["svg", "png", "pdf", "dot"] = "svg", filename: ) from e # Create a temporary graphviz Source object - dot_content = self.to_digraph() + dot_content = self.to_digraph(include_internal_executors=include_internal_executors) source = graphviz.Source(dot_content) try: @@ -99,7 +120,7 @@ def export(self, format: Literal["svg", "png", "pdf", "dot"] = "svg", filename: # Remove extension if present since graphviz.render() adds it base_name = str(output_path.with_suffix("")) - source.render(base_name, format=format, cleanup=True) + source.render(base_name, format=format, cleanup=True) # type: ignore # Return the actual filename with extension return f"{base_name}.{format}" @@ -108,7 +129,7 @@ def export(self, format: Literal["svg", "png", "pdf", "dot"] = "svg", filename: temp_path = Path(temp_file.name) base_name = str(temp_path.with_suffix("")) - source.render(base_name, format=format, cleanup=True) + source.render(base_name, format=format, cleanup=True) # type: ignore return f"{base_name}.{format}" except graphviz.backend.execute.ExecutableNotFound as e: raise ImportError( @@ -118,60 +139,72 @@ def export(self, format: Literal["svg", "png", "pdf", "dot"] = "svg", filename: "brew install graphviz on macOS, or download from https://graphviz.org/download/ for other platforms." ) from e - def save_svg(self, filename: str) -> str: + def save_svg(self, filename: str, include_internal_executors: bool = False) -> str: """Convenience method to save as SVG. Args: filename: The filename to save the SVG file. + include_internal_executors (bool): Whether to include internal executors in the visualization. + Default is False. Returns: The path to the saved SVG file. """ - return self.export(format="svg", filename=filename) + return self.export(format="svg", filename=filename, include_internal_executors=include_internal_executors) - def save_png(self, filename: str) -> str: + def save_png(self, filename: str, include_internal_executors: bool = False) -> str: """Convenience method to save as PNG. Args: filename: The filename to save the PNG file. + include_internal_executors (bool): Whether to include internal executors in the visualization. + Default is False. Returns: The path to the saved PNG file. """ - return self.export(format="png", filename=filename) + return self.export(format="png", filename=filename, include_internal_executors=include_internal_executors) - def save_pdf(self, filename: str) -> str: + def save_pdf(self, filename: str, include_internal_executors: bool = False) -> str: """Convenience method to save as PDF. Args: filename: The filename to save the PDF file. + include_internal_executors (bool): Whether to include internal executors in the visualization. + Default is False. Returns: The path to the saved PDF file. """ - return self.export(format="pdf", filename=filename) + return self.export(format="pdf", filename=filename, include_internal_executors=include_internal_executors) - def to_mermaid(self) -> str: + def to_mermaid(self, include_internal_executors: bool = False) -> str: """Export the workflow as a Mermaid flowchart string. + Args: + include_internal_executors (bool): Whether to include internal executors in the visualization. + Default is False. + Returns: A string representation of the workflow in Mermaid flowchart syntax. """ - - def _san(s: str) -> str: - """Sanitize an ID for Mermaid (alphanumeric and underscore, start with letter).""" - s2 = re.sub(r"[^0-9A-Za-z_]", "_", s) - if not s2 or not s2[0].isalpha(): - s2 = f"n_{s2}" - return s2 - lines: list[str] = ["flowchart TD"] # Emit top-level workflow - self._emit_workflow_mermaid(self._workflow, lines, indent=" ") + self._emit_workflow_mermaid( + self._workflow, + lines, + indent=" ", + include_internal_executors=include_internal_executors, + ) # Emit sub-workflows as Mermaid subgraphs - self._emit_sub_workflows_mermaid(self._workflow, lines, indent=" ") + self._emit_sub_workflows_mermaid( + self._workflow, + lines, + indent=" ", + include_internal_executors=include_internal_executors, + ) return "\n".join(lines) @@ -181,13 +214,13 @@ def _fan_in_digest(self, target: str, sources: list[str]) -> str: sources_sorted = sorted(sources) return hashlib.sha256((target + "|" + "|".join(sources_sorted)).encode("utf-8")).hexdigest()[:8] - def _compute_fan_in_descriptors(self, wf: Workflow | None = None) -> list[tuple[str, list[str], str]]: + def _compute_fan_in_descriptors(self, workflow: Workflow | None = None) -> list[tuple[str, list[str], str]]: """Return list of (node_id, sources, target) for fan-in groups. node_id is DOT-oriented: fan_in::target::digest """ result: list[tuple[str, list[str], str]] = [] - workflow = wf or self._workflow + workflow = workflow or self._workflow for group in workflow.edge_groups: if isinstance(group, FanInEdgeGroup): target = group.target_executor_ids[0] @@ -197,13 +230,19 @@ def _compute_fan_in_descriptors(self, wf: Workflow | None = None) -> list[tuple[ result.append((node_id, sorted(sources), target)) return result - def _compute_normal_edges(self, wf: Workflow | None = None) -> list[tuple[str, str, bool]]: + def _compute_normal_edges( + self, + workflow: Workflow | None = None, + include_internal_executors: bool = False, + ) -> list[tuple[str, str, bool]]: """Return list of (source_id, target_id, is_conditional) for non-fan-in groups.""" edges: list[tuple[str, str, bool]] = [] - workflow = wf or self._workflow + workflow = workflow or self._workflow for group in workflow.edge_groups: if isinstance(group, FanInEdgeGroup): continue + if isinstance(group, InternalEdgeGroup) and not include_internal_executors: + continue for edge in group.edges: is_cond = getattr(edge, "_condition", None) is not None edges.append((edge.source_id, edge.target_id, is_cond)) @@ -213,7 +252,14 @@ def _compute_normal_edges(self, wf: Workflow | None = None) -> list[tuple[str, s # region Internal emitters (DOT) - def _emit_workflow_digraph(self, wf: Workflow, lines: list[str], indent: str, ns: str | None = None) -> None: + def _emit_workflow_digraph( + self, + workflow: Workflow, + lines: list[str], + indent: str, + ns: str | None = None, + include_internal_executors: bool = False, + ) -> None: """Emit DOT nodes/edges for the given workflow. If ns (namespace) is provided, node ids are prefixed with f"{ns}/" for uniqueness, @@ -224,16 +270,16 @@ def map_id(x: str) -> str: return f"{ns}/{x}" if ns else x # Nodes - start_executor_id = wf.start_executor_id + start_executor_id = workflow.start_executor_id lines.append( f'{indent}"{map_id(start_executor_id)}" [fillcolor=lightgreen, label="{start_executor_id}\\n(Start)"];' ) - for executor_id in wf.executors: + for executor_id in workflow.executors: if executor_id != start_executor_id: lines.append(f'{indent}"{map_id(executor_id)}" [label="{executor_id}"];') # Fan-in nodes - fan_in_nodes = self._compute_fan_in_descriptors(wf) + fan_in_nodes = self._compute_fan_in_descriptors(workflow) if fan_in_nodes: lines.append("") for node_id, _, _ in fan_in_nodes: @@ -246,11 +292,19 @@ def map_id(x: str) -> str: lines.append(f'{indent}"{map_id(node_id)}" -> "{map_id(target)}";') # Normal edges - for src, tgt, is_cond in self._compute_normal_edges(wf): + for src, tgt, is_cond in self._compute_normal_edges( + workflow, include_internal_executors=include_internal_executors + ): edge_attr = ' [style=dashed, label="conditional"]' if is_cond else "" lines.append(f'{indent}"{map_id(src)}" -> "{map_id(tgt)}"{edge_attr};') - def _emit_sub_workflows_digraph(self, wf: Workflow, lines: list[str], indent: str) -> None: + def _emit_sub_workflows_digraph( + self, + workflow: Workflow, + lines: list[str], + indent: str, + include_internal_executors: bool = False, + ) -> None: """Emit DOT subgraphs for any WorkflowExecutor instances found in the workflow.""" # Lazy import to avoid any potential import cycles try: @@ -258,7 +312,7 @@ def _emit_sub_workflows_digraph(self, wf: Workflow, lines: list[str], indent: st except ImportError: # pragma: no cover - best-effort; if unavailable, skip subgraphs return - for exec_id, exec_obj in wf.executors.items(): + for exec_id, exec_obj in workflow.executors.items(): if isinstance(exec_obj, WorkflowExecutor) and hasattr(exec_obj, "workflow") and exec_obj.workflow: subgraph_id = f"cluster_{uuid.uuid5(uuid.NAMESPACE_OID, exec_id).hex[:8]}" lines.append(f"{indent}subgraph {subgraph_id} {{") @@ -267,10 +321,21 @@ def _emit_sub_workflows_digraph(self, wf: Workflow, lines: list[str], indent: st # Emit the nested workflow inside this cluster using a namespace ns = exec_id - self._emit_workflow_digraph(exec_obj.workflow, lines, indent=f"{indent} ", ns=ns) + self._emit_workflow_digraph( + exec_obj.workflow, + lines, + indent=f"{indent} ", + ns=ns, + include_internal_executors=include_internal_executors, + ) # Recurse into deeper nested sub-workflows - self._emit_sub_workflows_digraph(exec_obj.workflow, lines, indent=f"{indent} ") + self._emit_sub_workflows_digraph( + exec_obj.workflow, + lines, + indent=f"{indent} ", + include_internal_executors=include_internal_executors, + ) lines.append(f"{indent}}}") @@ -278,7 +343,14 @@ def _emit_sub_workflows_digraph(self, wf: Workflow, lines: list[str], indent: st # region Internal emitters (Mermaid) - def _emit_workflow_mermaid(self, wf: Workflow, lines: list[str], indent: str, ns: str | None = None) -> None: + def _emit_workflow_mermaid( + self, + workflow: Workflow, + lines: list[str], + indent: str, + ns: str | None = None, + include_internal_executors: bool = False, + ) -> None: def _san(s: str) -> str: s2 = re.sub(r"[^0-9A-Za-z_]", "_", s) if not s2 or not s2[0].isalpha(): @@ -291,15 +363,15 @@ def map_id(x: str) -> str: return _san(x) # Nodes - start_executor_id = wf.start_executor_id + start_executor_id = workflow.start_executor_id lines.append(f'{indent}{map_id(start_executor_id)}["{start_executor_id} (Start)"];') - for executor_id in wf.executors: + for executor_id in workflow.executors: if executor_id == start_executor_id: continue lines.append(f'{indent}{map_id(executor_id)}["{executor_id}"];') # Fan-in nodes - fan_in_nodes_dot = self._compute_fan_in_descriptors(wf) + fan_in_nodes_dot = self._compute_fan_in_descriptors(workflow) fan_in_nodes: list[tuple[str, list[str], str]] = [] for dot_node_id, sources, target in fan_in_nodes_dot: digest = dot_node_id.split("::")[-1] @@ -318,7 +390,9 @@ def map_id(x: str) -> str: lines.append(f"{indent}{fan_node_id} --> {map_id(target)};") # Normal edges - for src, tgt, is_cond in self._compute_normal_edges(wf): + for src, tgt, is_cond in self._compute_normal_edges( + workflow, include_internal_executors=include_internal_executors + ): s = map_id(src) t = map_id(tgt) if is_cond: @@ -326,7 +400,13 @@ def map_id(x: str) -> str: else: lines.append(f"{indent}{s} --> {t};") - def _emit_sub_workflows_mermaid(self, wf: Workflow, lines: list[str], indent: str) -> None: + def _emit_sub_workflows_mermaid( + self, + workflow: Workflow, + lines: list[str], + indent: str, + include_internal_executors: bool = False, + ) -> None: try: from ._workflow_executor import WorkflowExecutor # type: ignore except ImportError: # pragma: no cover @@ -338,14 +418,25 @@ def _san(s: str) -> str: s2 = f"n_{s2}" return s2 - for exec_id, exec_obj in wf.executors.items(): + for exec_id, exec_obj in workflow.executors.items(): if isinstance(exec_obj, WorkflowExecutor) and hasattr(exec_obj, "workflow") and exec_obj.workflow: sg_id = _san(exec_id) lines.append(f"{indent}subgraph {sg_id}") # Render nested workflow within this subgraph using namespacing - self._emit_workflow_mermaid(exec_obj.workflow, lines, indent=f"{indent} ", ns=exec_id) + self._emit_workflow_mermaid( + exec_obj.workflow, + lines, + indent=f"{indent} ", + ns=exec_id, + include_internal_executors=include_internal_executors, + ) # Recurse into deeper sub-workflows - self._emit_sub_workflows_mermaid(exec_obj.workflow, lines, indent=f"{indent} ") + self._emit_sub_workflows_mermaid( + exec_obj.workflow, + lines, + indent=f"{indent} ", + include_internal_executors=include_internal_executors, + ) lines.append(f"{indent}end") # endregion diff --git a/python/packages/core/agent_framework/_workflows/_workflow_executor.py b/python/packages/core/agent_framework/_workflows/_workflow_executor.py index cc028f337c..dccd76403b 100644 --- a/python/packages/core/agent_framework/_workflows/_workflow_executor.py +++ b/python/packages/core/agent_framework/_workflows/_workflow_executor.py @@ -11,6 +11,7 @@ from ._workflow import Workflow from ._checkpoint_encoding import decode_checkpoint_value, encode_checkpoint_value +from ._const import WORKFLOW_RUN_KWARGS_KEY from ._events import ( RequestInfoEvent, WorkflowErrorEvent, @@ -366,8 +367,11 @@ async def process_workflow(self, input_data: object, ctx: WorkflowContext[Any]) logger.debug(f"WorkflowExecutor {self.id} starting sub-workflow {self.workflow.id} execution {execution_id}") try: - # Run the sub-workflow and collect all events - result = await self.workflow.run(input_data) + # Get kwargs from parent workflow's SharedState to propagate to subworkflow + parent_kwargs: dict[str, Any] = await ctx.get_shared_state(WORKFLOW_RUN_KWARGS_KEY) or {} + + # Run the sub-workflow and collect all events, passing parent kwargs + result = await self.workflow.run(input_data, **parent_kwargs) logger.debug( f"WorkflowExecutor {self.id} sub-workflow {self.workflow.id} " diff --git a/python/packages/core/agent_framework/azure/_chat_client.py b/python/packages/core/agent_framework/azure/_chat_client.py index a8bfec0427..59f74259a4 100644 --- a/python/packages/core/agent_framework/azure/_chat_client.py +++ b/python/packages/core/agent_framework/azure/_chat_client.py @@ -154,7 +154,7 @@ def __init__( ) @override - def _parse_text_from_choice(self, choice: Choice | ChunkChoice) -> TextContent | None: + def _parse_text_from_openai(self, choice: Choice | ChunkChoice) -> TextContent | None: """Parse the choice into a TextContent object. Overwritten from OpenAIBaseChatClient to deal with Azure On Your Data function. diff --git a/python/packages/core/agent_framework/observability.py b/python/packages/core/agent_framework/observability.py index 38fca796c1..9dc6e4d4a9 100644 --- a/python/packages/core/agent_framework/observability.py +++ b/python/packages/core/agent_framework/observability.py @@ -1680,13 +1680,12 @@ def _capture_messages( prepped = prepare_messages(messages, system_instructions=system_instructions) otel_messages: list[dict[str, Any]] = [] for index, message in enumerate(prepped): - otel_messages.append(_to_otel_message(message)) - try: - message_data = message.to_dict(exclude_none=True) - except Exception: - message_data = {"role": message.role.value, "contents": message.contents} + # Reuse the otel message representation for logging instead of calling to_dict() + # to avoid expensive Pydantic serialization overhead + otel_message = _to_otel_message(message) + otel_messages.append(otel_message) logger.info( - message_data, + otel_message, extra={ OtelAttr.EVENT_NAME: OtelAttr.CHOICE if output else ROLE_EVENT_MAP.get(message.role.value), OtelAttr.PROVIDER_NAME: provider_name, diff --git a/python/packages/core/agent_framework/ollama/__init__.py b/python/packages/core/agent_framework/ollama/__init__.py new file mode 100644 index 0000000000..eae73853c2 --- /dev/null +++ b/python/packages/core/agent_framework/ollama/__init__.py @@ -0,0 +1,23 @@ +# Copyright (c) Microsoft. All rights reserved. + +import importlib +from typing import Any + +IMPORT_PATH = "agent_framework_ollama" +PACKAGE_NAME = "agent-framework-ollama" +_IMPORTS = ["__version__", "OllamaChatClient", "OllamaSettings"] + + +def __getattr__(name: str) -> Any: + if name in _IMPORTS: + try: + return getattr(importlib.import_module(IMPORT_PATH), name) + except ModuleNotFoundError as exc: + raise ModuleNotFoundError( + f"The '{PACKAGE_NAME}' package is not installed, please do `pip install {PACKAGE_NAME}`" + ) from exc + raise AttributeError(f"Module {IMPORT_PATH} has no attribute {name}.") + + +def __dir__() -> list[str]: + return _IMPORTS diff --git a/python/packages/core/agent_framework/ollama/__init__.pyi b/python/packages/core/agent_framework/ollama/__init__.pyi new file mode 100644 index 0000000000..3a1e7824d6 --- /dev/null +++ b/python/packages/core/agent_framework/ollama/__init__.pyi @@ -0,0 +1,13 @@ +# Copyright (c) Microsoft. All rights reserved. + +from agent_framework_ollama import ( + OllamaChatClient, + OllamaSettings, + __version__, +) + +__all__ = [ + "OllamaChatClient", + "OllamaSettings", + "__version__", +] diff --git a/python/packages/core/agent_framework/openai/_assistants_client.py b/python/packages/core/agent_framework/openai/_assistants_client.py index 319ad95231..e790a44940 100644 --- a/python/packages/core/agent_framework/openai/_assistants_client.py +++ b/python/packages/core/agent_framework/openai/_assistants_client.py @@ -164,7 +164,7 @@ async def __aexit__(self, exc_type: type[BaseException] | None, exc_val: BaseExc async def close(self) -> None: """Clean up any assistants we created.""" if self._should_delete_assistant and self.assistant_id is not None: - client = await self.ensure_client() + client = await self._ensure_client() await client.beta.assistants.delete(self.assistant_id) object.__setattr__(self, "assistant_id", None) object.__setattr__(self, "_should_delete_assistant", False) @@ -188,7 +188,7 @@ async def _inner_get_streaming_response( chat_options: ChatOptions, **kwargs: Any, ) -> AsyncIterable[ChatResponseUpdate]: - # Extract necessary state from messages and options + # prepare run_options, tool_results = self._prepare_options(messages, chat_options, **kwargs) # Get the thread ID @@ -204,10 +204,10 @@ async def _inner_get_streaming_response( # Determine which assistant to use and create if needed assistant_id = await self._get_assistant_id_or_create() - # Create the streaming response + # execute stream, thread_id = await self._create_assistant_stream(thread_id, assistant_id, run_options, tool_results) - # Process and yield each update from the stream + # process async for update in self._process_stream_events(stream, thread_id): yield update @@ -222,7 +222,7 @@ async def _get_assistant_id_or_create(self) -> str: if not self.model_id: raise ServiceInitializationError("Parameter 'model_id' is required for assistant creation.") - client = await self.ensure_client() + client = await self._ensure_client() created_assistant = await client.beta.assistants.create( model=self.model_id, description=self.assistant_description, @@ -245,11 +245,11 @@ async def _create_assistant_stream( Returns: tuple: (stream, final_thread_id) """ - client = await self.ensure_client() + client = await self._ensure_client() # Get any active run for this thread thread_run = await self._get_active_thread_run(thread_id) - tool_run_id, tool_outputs = self._convert_function_results_to_tool_output(tool_results) + tool_run_id, tool_outputs = self._prepare_tool_outputs_for_assistants(tool_results) if thread_run is not None and tool_run_id is not None and tool_run_id == thread_run.id and tool_outputs: # There's an active run and we have tool results to submit, so submit the results. @@ -270,7 +270,7 @@ async def _create_assistant_stream( async def _get_active_thread_run(self, thread_id: str | None) -> Run | None: """Get any active run for the given thread.""" - client = await self.ensure_client() + client = await self._ensure_client() if thread_id is None: return None @@ -281,7 +281,7 @@ async def _get_active_thread_run(self, thread_id: str | None) -> Run | None: async def _prepare_thread(self, thread_id: str | None, thread_run: Run | None, run_options: dict[str, Any]) -> str: """Prepare the thread for a new run, creating or cleaning up as needed.""" - client = await self.ensure_client() + client = await self._ensure_client() if thread_id is None: # No thread ID was provided, so create a new thread. thread = await client.beta.threads.create( # type: ignore[reportDeprecated] @@ -330,7 +330,7 @@ async def _process_stream_events(self, stream: Any, thread_id: str) -> AsyncIter response_id=response_id, ) elif response.event == "thread.run.requires_action" and isinstance(response.data, Run): - contents = self._create_function_call_contents(response.data, response_id) + contents = self._parse_function_calls_from_assistants(response.data, response_id) if contents: yield ChatResponseUpdate( role=Role.ASSISTANT, @@ -371,8 +371,8 @@ async def _process_stream_events(self, stream: Any, thread_id: str) -> AsyncIter role=Role.ASSISTANT, ) - def _create_function_call_contents(self, event_data: Run, response_id: str | None) -> list[Contents]: - """Create function call contents from a tool action event.""" + def _parse_function_calls_from_assistants(self, event_data: Run, response_id: str | None) -> list[Contents]: + """Parse function call contents from an assistants tool action event.""" contents: list[Contents] = [] if event_data.required_action is not None: @@ -437,7 +437,10 @@ def _prepare_options( if chat_options.response_format is not None: run_options["response_format"] = { "type": "json_schema", - "json_schema": chat_options.response_format.model_json_schema(), + "json_schema": { + "name": chat_options.response_format.__name__, + "schema": chat_options.response_format.model_json_schema(), + }, } instructions: list[str] = [] @@ -487,10 +490,11 @@ def _prepare_options( return run_options, tool_results - def _convert_function_results_to_tool_output( + def _prepare_tool_outputs_for_assistants( self, tool_results: list[FunctionResultContent] | None, ) -> tuple[str | None, list[ToolOutput] | None]: + """Prepare function results for submission to the assistants API.""" run_id: str | None = None tool_outputs: list[ToolOutput] | None = None diff --git a/python/packages/core/agent_framework/openai/_chat_client.py b/python/packages/core/agent_framework/openai/_chat_client.py index 7f0feb0fc7..b7cac3ba20 100644 --- a/python/packages/core/agent_framework/openai/_chat_client.py +++ b/python/packages/core/agent_framework/openai/_chat_client.py @@ -14,7 +14,7 @@ from openai.types.chat.chat_completion_chunk import ChatCompletionChunk from openai.types.chat.chat_completion_chunk import Choice as ChunkChoice from openai.types.chat.chat_completion_message_custom_tool_call import ChatCompletionMessageCustomToolCall -from pydantic import BaseModel, ValidationError +from pydantic import ValidationError from .._clients import BaseChatClient from .._logging import get_logger @@ -34,6 +34,7 @@ FunctionResultContent, Role, TextContent, + TextReasoningContent, UriContent, UsageContent, UsageDetails, @@ -69,10 +70,12 @@ async def _inner_get_response( chat_options: ChatOptions, **kwargs: Any, ) -> ChatResponse: - client = await self.ensure_client() + client = await self._ensure_client() + # prepare options_dict = self._prepare_options(messages, chat_options) try: - return self._create_chat_response( + # execute and process + return self._parse_response_from_openai( await client.chat.completions.create(stream=False, **options_dict), chat_options ) except BadRequestError as ex: @@ -98,14 +101,16 @@ async def _inner_get_streaming_response( chat_options: ChatOptions, **kwargs: Any, ) -> AsyncIterable[ChatResponseUpdate]: - client = await self.ensure_client() + client = await self._ensure_client() + # prepare options_dict = self._prepare_options(messages, chat_options) options_dict["stream_options"] = {"include_usage": True} try: + # execute and process async for chunk in await client.chat.completions.create(stream=True, **options_dict): if len(chunk.choices) == 0 and chunk.usage is None: continue - yield self._create_chat_response_update(chunk) + yield self._parse_response_update_from_openai(chunk) except BadRequestError as ex: if ex.code == "content_filter": raise OpenAIContentFilterException( @@ -124,7 +129,9 @@ async def _inner_get_streaming_response( # region content creation - def _chat_to_tool_spec(self, tools: Sequence[ToolProtocol | MutableMapping[str, Any]]) -> list[dict[str, Any]]: + def _prepare_tools_for_openai( + self, tools: Sequence[ToolProtocol | MutableMapping[str, Any]] + ) -> list[dict[str, Any]]: chat_tools: list[dict[str, Any]] = [] for tool in tools: if isinstance(tool, ToolProtocol): @@ -157,51 +164,65 @@ def _process_web_search_tool( return None def _prepare_options(self, messages: MutableSequence[ChatMessage], chat_options: ChatOptions) -> dict[str, Any]: - # Preprocess web search tool if it exists - options_dict = chat_options.to_dict( + run_options = chat_options.to_dict( exclude={ "type", "instructions", # included as system message - "allow_multiple_tool_calls", # handled separately + "response_format", # handled separately + "additional_properties", # handled separately } ) - if messages and "messages" not in options_dict: - options_dict["messages"] = self._prepare_chat_history_for_request(messages) - if "messages" not in options_dict: + # messages + if messages and "messages" not in run_options: + run_options["messages"] = self._prepare_messages_for_openai(messages) + if "messages" not in run_options: raise ServiceInvalidRequestError("Messages are required for chat completions") + + # Translation between ChatOptions and Chat Completion API + translations = { + "model_id": "model", + "allow_multiple_tool_calls": "parallel_tool_calls", + "max_tokens": "max_output_tokens", + } + for old_key, new_key in translations.items(): + if old_key in run_options and old_key != new_key: + run_options[new_key] = run_options.pop(old_key) + + # model id + if not run_options.get("model"): + if not self.model_id: + raise ValueError("model_id must be a non-empty string") + run_options["model"] = self.model_id + + # tools if chat_options.tools is not None: - web_search_options = self._process_web_search_tool(chat_options.tools) - if web_search_options: - options_dict["web_search_options"] = web_search_options - options_dict["tools"] = self._chat_to_tool_spec(chat_options.tools) - if chat_options.allow_multiple_tool_calls is not None: - options_dict["parallel_tool_calls"] = chat_options.allow_multiple_tool_calls - if not options_dict.get("tools", None): - options_dict.pop("tools", None) - options_dict.pop("parallel_tool_calls", None) - options_dict.pop("tool_choice", None) - - if "model_id" not in options_dict: - options_dict["model"] = self.model_id - else: - options_dict["model"] = options_dict.pop("model_id") - if ( - chat_options.response_format - and isinstance(chat_options.response_format, type) - and issubclass(chat_options.response_format, BaseModel) - ): - options_dict["response_format"] = type_to_response_format_param(chat_options.response_format) - if additional_properties := options_dict.pop("additional_properties", None): - for key, value in additional_properties.items(): - if value is not None: - options_dict[key] = value - if (tool_choice := options_dict.get("tool_choice")) and len(tool_choice.keys()) == 1: - options_dict["tool_choice"] = tool_choice["mode"] - return options_dict - - def _create_chat_response(self, response: ChatCompletion, chat_options: ChatOptions) -> "ChatResponse": - """Create a chat message content object from a choice.""" + # Preprocess web search tool if it exists + if web_search_options := self._process_web_search_tool(chat_options.tools): + run_options["web_search_options"] = web_search_options + run_options["tools"] = self._prepare_tools_for_openai(chat_options.tools) + if not run_options.get("tools", None): + run_options.pop("tools", None) + run_options.pop("parallel_tool_calls", None) + run_options.pop("tool_choice", None) + # tool choice when `tool_choice` is a dict with single key `mode`, extract the mode value + if (tool_choice := run_options.get("tool_choice")) and len(tool_choice.keys()) == 1: + run_options["tool_choice"] = tool_choice["mode"] + + # response format + if chat_options.response_format: + run_options["response_format"] = type_to_response_format_param(chat_options.response_format) + + # additional properties + additional_options = { + key: value for key, value in chat_options.additional_properties.items() if value is not None + } + if additional_options: + run_options.update(additional_options) + return run_options + + def _parse_response_from_openai(self, response: ChatCompletion, chat_options: ChatOptions) -> "ChatResponse": + """Parse a response from OpenAI into a ChatResponse.""" response_metadata = self._get_metadata_from_chat_response(response) messages: list[ChatMessage] = [] finish_reason: FinishReason | None = None @@ -210,15 +231,17 @@ def _create_chat_response(self, response: ChatCompletion, chat_options: ChatOpti if choice.finish_reason: finish_reason = FinishReason(value=choice.finish_reason) contents: list[Contents] = [] - if text_content := self._parse_text_from_choice(choice): + if text_content := self._parse_text_from_openai(choice): contents.append(text_content) - if parsed_tool_calls := [tool for tool in self._get_tool_calls_from_chat_choice(choice)]: + if parsed_tool_calls := [tool for tool in self._parse_tool_calls_from_openai(choice)]: contents.extend(parsed_tool_calls) + if reasoning_details := getattr(choice.message, "reasoning_details", None): + contents.append(TextReasoningContent(None, protected_data=json.dumps(reasoning_details))) messages.append(ChatMessage(role="assistant", contents=contents)) return ChatResponse( response_id=response.id, created_at=datetime.fromtimestamp(response.created, tz=timezone.utc).strftime("%Y-%m-%dT%H:%M:%S.%fZ"), - usage_details=self._usage_details_from_openai(response.usage) if response.usage else None, + usage_details=self._parse_usage_from_openai(response.usage) if response.usage else None, messages=messages, model_id=response.model, additional_properties=response_metadata, @@ -226,16 +249,16 @@ def _create_chat_response(self, response: ChatCompletion, chat_options: ChatOpti response_format=chat_options.response_format, ) - def _create_chat_response_update( + def _parse_response_update_from_openai( self, chunk: ChatCompletionChunk, ) -> ChatResponseUpdate: - """Create a streaming chat message content object from a choice.""" + """Parse a streaming response update from OpenAI.""" chunk_metadata = self._get_metadata_from_streaming_chat_response(chunk) if chunk.usage: return ChatResponseUpdate( role=Role.ASSISTANT, - contents=[UsageContent(details=self._usage_details_from_openai(chunk.usage), raw_representation=chunk)], + contents=[UsageContent(details=self._parse_usage_from_openai(chunk.usage), raw_representation=chunk)], model_id=chunk.model, additional_properties=chunk_metadata, response_id=chunk.id, @@ -245,12 +268,14 @@ def _create_chat_response_update( finish_reason: FinishReason | None = None for choice in chunk.choices: chunk_metadata.update(self._get_metadata_from_chat_choice(choice)) - contents.extend(self._get_tool_calls_from_chat_choice(choice)) + contents.extend(self._parse_tool_calls_from_openai(choice)) if choice.finish_reason: finish_reason = FinishReason(value=choice.finish_reason) - if text_content := self._parse_text_from_choice(choice): + if text_content := self._parse_text_from_openai(choice): contents.append(text_content) + if reasoning_details := getattr(choice.delta, "reasoning_details", None): + contents.append(TextReasoningContent(None, protected_data=json.dumps(reasoning_details))) return ChatResponseUpdate( created_at=datetime.fromtimestamp(chunk.created, tz=timezone.utc).strftime("%Y-%m-%dT%H:%M:%S.%fZ"), contents=contents, @@ -263,7 +288,7 @@ def _create_chat_response_update( message_id=chunk.id, ) - def _usage_details_from_openai(self, usage: CompletionUsage) -> UsageDetails: + def _parse_usage_from_openai(self, usage: CompletionUsage) -> UsageDetails: details = UsageDetails( input_token_count=usage.prompt_tokens, output_token_count=usage.completion_tokens, @@ -285,7 +310,7 @@ def _usage_details_from_openai(self, usage: CompletionUsage) -> UsageDetails: details["prompt/cached_tokens"] = tokens return details - def _parse_text_from_choice(self, choice: Choice | ChunkChoice) -> TextContent | None: + def _parse_text_from_openai(self, choice: Choice | ChunkChoice) -> TextContent | None: """Parse the choice into a TextContent object.""" message = choice.message if isinstance(choice, Choice) else choice.delta if message.content: @@ -312,8 +337,8 @@ def _get_metadata_from_chat_choice(self, choice: Choice | ChunkChoice) -> dict[s "logprobs": getattr(choice, "logprobs", None), } - def _get_tool_calls_from_chat_choice(self, choice: Choice | ChunkChoice) -> list[Contents]: - """Get tool calls from a chat choice.""" + def _parse_tool_calls_from_openai(self, choice: Choice | ChunkChoice) -> list[Contents]: + """Parse tool calls from an OpenAI response choice.""" resp: list[Contents] = [] content = choice.message if isinstance(choice, Choice) else choice.delta if content and content.tool_calls: @@ -331,13 +356,13 @@ def _get_tool_calls_from_chat_choice(self, choice: Choice | ChunkChoice) -> list # When you enable asynchronous content filtering in Azure OpenAI, you may receive empty deltas return resp - def _prepare_chat_history_for_request( + def _prepare_messages_for_openai( self, chat_messages: Sequence[ChatMessage], role_key: str = "role", content_key: str = "content", ) -> list[dict[str, Any]]: - """Prepare the chat history for a request. + """Prepare the chat history for an OpenAI request. Allowing customization of the key names for role/author, and optionally overriding the role. @@ -355,14 +380,14 @@ def _prepare_chat_history_for_request( Returns: prepared_chat_history (Any): The prepared chat history for a request. """ - list_of_list = [self._openai_chat_message_parser(message) for message in chat_messages] + list_of_list = [self._prepare_message_for_openai(message) for message in chat_messages] # Flatten the list of lists into a single list return list(chain.from_iterable(list_of_list)) # region Parsers - def _openai_chat_message_parser(self, message: ChatMessage) -> list[dict[str, Any]]: - """Parse a chat message into the openai format.""" + def _prepare_message_for_openai(self, message: ChatMessage) -> list[dict[str, Any]]: + """Prepare a chat message for OpenAI.""" all_messages: list[dict[str, Any]] = [] for content in message.contents: # Skip approval content - it's internal framework state, not for the LLM @@ -372,28 +397,36 @@ def _openai_chat_message_parser(self, message: ChatMessage) -> list[dict[str, An args: dict[str, Any] = { "role": message.role.value if isinstance(message.role, Role) else message.role, } + if message.author_name and message.role != Role.TOOL: + args["name"] = message.author_name + if "reasoning_details" in message.additional_properties and ( + details := message.additional_properties["reasoning_details"] + ): + args["reasoning_details"] = details match content: case FunctionCallContent(): if all_messages and "tool_calls" in all_messages[-1]: # If the last message already has tool calls, append to it - all_messages[-1]["tool_calls"].append(self._openai_content_parser(content)) + all_messages[-1]["tool_calls"].append(self._prepare_content_for_openai(content)) else: - args["tool_calls"] = [self._openai_content_parser(content)] # type: ignore + args["tool_calls"] = [self._prepare_content_for_openai(content)] # type: ignore case FunctionResultContent(): args["tool_call_id"] = content.call_id if content.result is not None: args["content"] = prepare_function_call_results(content.result) + case TextReasoningContent(protected_data=protected_data) if protected_data is not None: + all_messages[-1]["reasoning_details"] = json.loads(protected_data) case _: if "content" not in args: args["content"] = [] # this is a list to allow multi-modal content - args["content"].append(self._openai_content_parser(content)) # type: ignore + args["content"].append(self._prepare_content_for_openai(content)) # type: ignore if "content" in args or "tool_calls" in args: all_messages.append(args) return all_messages - def _openai_content_parser(self, content: Contents) -> dict[str, Any]: - """Parse contents into the openai format.""" + def _prepare_content_for_openai(self, content: Contents) -> dict[str, Any]: + """Prepare content for OpenAI.""" match content: case FunctionCallContent(): args = json.dumps(content.arguments) if isinstance(content.arguments, Mapping) else content.arguments diff --git a/python/packages/core/agent_framework/openai/_responses_client.py b/python/packages/core/agent_framework/openai/_responses_client.py index ecdd7be660..54a0f5544b 100644 --- a/python/packages/core/agent_framework/openai/_responses_client.py +++ b/python/packages/core/agent_framework/openai/_responses_client.py @@ -89,28 +89,16 @@ async def _inner_get_response( chat_options: ChatOptions, **kwargs: Any, ) -> ChatResponse: - client = await self.ensure_client() - run_options = await self.prepare_options(messages, chat_options, **kwargs) - response_format = run_options.pop("response_format", None) - text_config = run_options.pop("text", None) - text_format, text_config = self._prepare_text_config(response_format=response_format, text_config=text_config) - if text_config: - run_options["text"] = text_config + client = await self._ensure_client() + # prepare + run_options = await self._prepare_options(messages, chat_options, **kwargs) try: - if not text_format: - response = await client.responses.create( - stream=False, - **run_options, - ) - chat_options.conversation_id = self.get_conversation_id(response, chat_options.store) - return self._create_response_content(response, chat_options=chat_options) - parsed_response: ParsedResponse[BaseModel] = await client.responses.parse( - text_format=text_format, - stream=False, - **run_options, - ) - chat_options.conversation_id = self.get_conversation_id(parsed_response, chat_options.store) - return self._create_response_content(parsed_response, chat_options=chat_options) + # execute and process + if "text_format" in run_options: + response = await client.responses.parse(stream=False, **run_options) + else: + response = await client.responses.create(stream=False, **run_options) + return self._parse_response_from_openai(response, chat_options=chat_options) except BadRequestError as ex: if ex.code == "content_filter": raise OpenAIContentFilterException( @@ -134,35 +122,23 @@ async def _inner_get_streaming_response( chat_options: ChatOptions, **kwargs: Any, ) -> AsyncIterable[ChatResponseUpdate]: - client = await self.ensure_client() - run_options = await self.prepare_options(messages, chat_options, **kwargs) + client = await self._ensure_client() + # prepare + run_options = await self._prepare_options(messages, chat_options, **kwargs) function_call_ids: dict[int, tuple[str, str]] = {} # output_index: (call_id, name) - response_format = run_options.pop("response_format", None) - text_config = run_options.pop("text", None) - text_format, text_config = self._prepare_text_config(response_format=response_format, text_config=text_config) - if text_config: - run_options["text"] = text_config try: - if not text_format: - response = await client.responses.create( - stream=True, - **run_options, - ) - async for chunk in response: - update = self._create_streaming_response_content( + # execute and process + if "text_format" not in run_options: + async for chunk in await client.responses.create(stream=True, **run_options): + yield self._parse_chunk_from_openai( chunk, chat_options=chat_options, function_call_ids=function_call_ids ) - yield update return - async with client.responses.stream( - text_format=text_format, - **run_options, - ) as response: + async with client.responses.stream(**run_options) as response: async for chunk in response: - update = self._create_streaming_response_content( + yield self._parse_chunk_from_openai( chunk, chat_options=chat_options, function_call_ids=function_call_ids ) - yield update except BadRequestError as ex: if ex.code == "content_filter": raise OpenAIContentFilterException( @@ -179,33 +155,33 @@ async def _inner_get_streaming_response( inner_exception=ex, ) from ex - def _prepare_text_config( + def _prepare_response_and_text_format( self, *, response_format: Any, text_config: MutableMapping[str, Any] | None, ) -> tuple[type[BaseModel] | None, dict[str, Any] | None]: """Normalize response_format into Responses text configuration and parse target.""" - prepared_text = dict(text_config) if isinstance(text_config, MutableMapping) else None if text_config is not None and not isinstance(text_config, MutableMapping): raise ServiceInvalidRequestError("text must be a mapping when provided.") + text_config = cast(dict[str, Any], text_config) if isinstance(text_config, MutableMapping) else None if response_format is None: - return None, prepared_text + return None, text_config if isinstance(response_format, type) and issubclass(response_format, BaseModel): - if prepared_text and "format" in prepared_text: + if text_config and "format" in text_config: raise ServiceInvalidRequestError("response_format cannot be combined with explicit text.format.") - return response_format, prepared_text + return response_format, text_config if isinstance(response_format, Mapping): format_config = self._convert_response_format(cast("Mapping[str, Any]", response_format)) - if prepared_text is None: - prepared_text = {} - elif "format" in prepared_text and prepared_text["format"] != format_config: + if text_config is None: + text_config = {} + elif "format" in text_config and text_config["format"] != format_config: raise ServiceInvalidRequestError("Conflicting response_format definitions detected.") - prepared_text["format"] = format_config - return None, prepared_text + text_config["format"] = format_config + return None, text_config raise ServiceInvalidRequestError("response_format must be a Pydantic model or mapping.") @@ -245,23 +221,33 @@ def _convert_response_format(self, response_format: Mapping[str, Any]) -> dict[s raise ServiceInvalidRequestError("Unsupported response_format provided for Responses client.") - def get_conversation_id( + def _get_conversation_id( self, response: OpenAIResponse | ParsedResponse[BaseModel], store: bool | None ) -> str | None: """Get the conversation ID from the response if store is True.""" - return None if store is False else response.id + if store is False: + return None + # If conversation ID exists, it means that we operate with conversation + # so we use conversation ID as input and output. + if response.conversation and response.conversation.id: + return response.conversation.id + # If conversation ID doesn't exist, we operate with responses + # so we use response ID as input and output. + return response.id # region Prep methods - def _tools_to_response_tools( - self, tools: Sequence[ToolProtocol | MutableMapping[str, Any]] + def _prepare_tools_for_openai( + self, tools: Sequence[ToolProtocol | MutableMapping[str, Any]] | None ) -> list[ToolParam | dict[str, Any]]: response_tools: list[ToolParam | dict[str, Any]] = [] + if not tools: + return response_tools for tool in tools: if isinstance(tool, ToolProtocol): match tool: case HostedMCPTool(): - response_tools.append(self.get_mcp_tool(tool)) + response_tools.append(self._prepare_mcp_tool(tool)) case HostedCodeInterpreterTool(): tool_args: CodeInterpreterContainerCodeInterpreterToolAuto = {"type": "auto"} if tool.inputs: @@ -363,7 +349,8 @@ def _tools_to_response_tools( response_tools.append(tool_dict) return response_tools - def get_mcp_tool(self, tool: HostedMCPTool) -> Any: + @staticmethod + def _prepare_mcp_tool(tool: HostedMCPTool) -> Mcp: """Get MCP tool from HostedMCPTool.""" mcp: Mcp = { "type": "mcp", @@ -386,18 +373,13 @@ def get_mcp_tool(self, tool: HostedMCPTool) -> Any: return mcp - async def prepare_options( + async def _prepare_options( self, messages: MutableSequence[ChatMessage], chat_options: ChatOptions, **kwargs: Any, ) -> dict[str, Any]: """Take ChatOptions and create the specific options for Responses API.""" - conversation_id = kwargs.pop("conversation_id", None) - - if conversation_id: - chat_options.conversation_id = conversation_id - run_options: dict[str, Any] = chat_options.to_dict( exclude={ "type", @@ -407,12 +389,24 @@ async def prepare_options( "seed", # not supported "stop", # not supported "instructions", # already added as system message + "response_format", # handled separately + "conversation_id", # handled separately + "additional_properties", # handled separately } ) + # messages + request_input = self._prepare_messages_for_openai(messages) + if not request_input: + raise ServiceInvalidRequestError("Messages are required for chat completions") + run_options["input"] = request_input - if chat_options.response_format: - run_options["response_format"] = chat_options.response_format + # model id + if not run_options.get("model"): + if not self.model_id: + raise ValueError("model_id must be a non-empty string") + run_options["model"] = self.model_id + # translations between ChatOptions and Responses API translations = { "model_id": "model", "allow_multiple_tool_calls": "parallel_tool_calls", @@ -423,34 +417,53 @@ async def prepare_options( if old_key in run_options and old_key != new_key: run_options[new_key] = run_options.pop(old_key) + # Handle different conversation ID formats + if conversation_id := self._get_current_conversation_id(chat_options, **kwargs): + if conversation_id.startswith("resp_"): + # For response IDs, set previous_response_id and remove conversation property + run_options["previous_response_id"] = conversation_id + elif conversation_id.startswith("conv_"): + # For conversation IDs, set conversation and remove previous_response_id property + run_options["conversation"] = conversation_id + else: + # If the format is unrecognized, default to previous_response_id + run_options["previous_response_id"] = conversation_id + # tools - if chat_options.tools is None: - run_options.pop("parallel_tool_calls", None) + if tools := self._prepare_tools_for_openai(chat_options.tools): + run_options["tools"] = tools else: - run_options["tools"] = self._tools_to_response_tools(chat_options.tools) + run_options.pop("parallel_tool_calls", None) + run_options.pop("tool_choice", None) + # tool choice when `tool_choice` is a dict with single key `mode`, extract the mode value + if (tool_choice := run_options.get("tool_choice")) and len(tool_choice.keys()) == 1: + run_options["tool_choice"] = tool_choice["mode"] - # model id - if not run_options.get("model"): - if not self.model_id: - raise ValueError("model_id must be a non-empty string") - run_options["model"] = self.model_id + # additional properties + additional_options = { + key: value for key, value in chat_options.additional_properties.items() if value is not None + } + if additional_options: + run_options.update(additional_options) - # messages - request_input = self._prepare_chat_messages_for_request(messages) - if not request_input: - raise ServiceInvalidRequestError("Messages are required for chat completions") - run_options["input"] = request_input + # response format and text config (after additional_properties so user can pass text via additional_properties) + response_format = chat_options.response_format + text_config = run_options.pop("text", None) + response_format, text_config = self._prepare_response_and_text_format( + response_format=response_format, text_config=text_config + ) + if text_config: + run_options["text"] = text_config + if response_format: + run_options["text_format"] = response_format - # additional provider specific settings - if additional_properties := run_options.pop("additional_properties", None): - for key, value in additional_properties.items(): - if value is not None: - run_options[key] = value - if (tool_choice := run_options.get("tool_choice")) and len(tool_choice.keys()) == 1: - run_options["tool_choice"] = tool_choice["mode"] return run_options - def _prepare_chat_messages_for_request(self, chat_messages: Sequence[ChatMessage]) -> list[dict[str, Any]]: + def _get_current_conversation_id(self, chat_options: ChatOptions, **kwargs: Any) -> str | None: + """Get the current conversation ID from chat options or kwargs.""" + return chat_options.conversation_id or kwargs.get("conversation_id") + + def _prepare_messages_for_openai(self, chat_messages: Sequence[ChatMessage]) -> list[dict[str, Any]]: """Prepare the chat messages for a request. Allowing customization of the key names for role/author, and optionally overriding the role. @@ -476,16 +489,16 @@ def _prepare_chat_messages_for_request(self, chat_messages: Sequence[ChatMessage and "fc_id" in content.additional_properties ): call_id_to_id[content.call_id] = content.additional_properties["fc_id"] - list_of_list = [self._openai_chat_message_parser(message, call_id_to_id) for message in chat_messages] + list_of_list = [self._prepare_message_for_openai(message, call_id_to_id) for message in chat_messages] # Flatten the list of lists into a single list return list(chain.from_iterable(list_of_list)) - def _openai_chat_message_parser( + def _prepare_message_for_openai( self, message: ChatMessage, call_id_to_id: dict[str, str], ) -> list[dict[str, Any]]: - """Parse a chat message into the openai format.""" + """Prepare a chat message for the OpenAI Responses API format.""" all_messages: list[dict[str, Any]] = [] args: dict[str, Any] = { "role": message.role.value if isinstance(message.role, Role) else message.role, @@ -497,28 +510,28 @@ def _openai_chat_message_parser( continue case FunctionResultContent(): new_args: dict[str, Any] = {} - new_args.update(self._openai_content_parser(message.role, content, call_id_to_id)) + new_args.update(self._prepare_content_for_openai(message.role, content, call_id_to_id)) all_messages.append(new_args) case FunctionCallContent(): - function_call = self._openai_content_parser(message.role, content, call_id_to_id) + function_call = self._prepare_content_for_openai(message.role, content, call_id_to_id) all_messages.append(function_call) # type: ignore case FunctionApprovalResponseContent() | FunctionApprovalRequestContent(): - all_messages.append(self._openai_content_parser(message.role, content, call_id_to_id)) # type: ignore + all_messages.append(self._prepare_content_for_openai(message.role, content, call_id_to_id)) # type: ignore case _: if "content" not in args: args["content"] = [] - args["content"].append(self._openai_content_parser(message.role, content, call_id_to_id)) # type: ignore + args["content"].append(self._prepare_content_for_openai(message.role, content, call_id_to_id)) # type: ignore if "content" in args or "tool_calls" in args: all_messages.append(args) return all_messages - def _openai_content_parser( + def _prepare_content_for_openai( self, role: Role, content: Contents, call_id_to_id: dict[str, str], ) -> dict[str, Any]: - """Parse contents into the openai format.""" + """Prepare content for the OpenAI Responses API format.""" match content: case TextContent(): return { @@ -625,14 +638,13 @@ def _openai_content_parser( logger.debug("Unsupported content type passed (type: %s)", type(content)) return {} - # region Response creation methods - - def _create_response_content( + # region Parse methods + def _parse_response_from_openai( self, response: OpenAIResponse | ParsedResponse[BaseModel], chat_options: ChatOptions, ) -> "ChatResponse": - """Create a chat message content object from a choice.""" + """Parse an OpenAI Responses API response into a ChatResponse.""" structured_response: BaseModel | None = response.output_parsed if isinstance(response, ParsedResponse) else None # type: ignore[reportUnknownMemberType] metadata: dict[str, Any] = response.metadata or {} @@ -826,11 +838,9 @@ def _create_response_content( "raw_representation": response, } - conversation_id = self.get_conversation_id(response, chat_options.store) # type: ignore[reportArgumentType] - - if conversation_id: + if conversation_id := self._get_conversation_id(response, chat_options.store): args["conversation_id"] = conversation_id - if response.usage and (usage_details := self._usage_details_from_openai(response.usage)): + if response.usage and (usage_details := self._parse_usage_from_openai(response.usage)): args["usage_details"] = usage_details if structured_response: args["value"] = structured_response @@ -838,16 +848,17 @@ def _create_response_content( args["response_format"] = chat_options.response_format return ChatResponse(**args) - def _create_streaming_response_content( + def _parse_chunk_from_openai( self, event: OpenAIResponseStreamEvent, chat_options: ChatOptions, function_call_ids: dict[int, tuple[str, str]], ) -> ChatResponseUpdate: - """Create a streaming chat message content object from a choice.""" + """Parse an OpenAI Responses API streaming event into a ChatResponseUpdate.""" metadata: dict[str, Any] = {} contents: list[Contents] = [] conversation_id: str | None = None + response_id: str | None = None model = self.model_id # TODO(peterychang): Add support for other content types match event.type: @@ -930,11 +941,18 @@ def _create_streaming_response_content( case "response.reasoning_summary_text.done": contents.append(TextReasoningContent(text=event.text, raw_representation=event)) metadata.update(self._get_metadata_from_response(event)) + case "response.created": + response_id = event.response.id + conversation_id = self._get_conversation_id(event.response, chat_options.store) + case "response.in_progress": + response_id = event.response.id + conversation_id = self._get_conversation_id(event.response, chat_options.store) case "response.completed": - conversation_id = self.get_conversation_id(event.response, chat_options.store) + response_id = event.response.id + conversation_id = self._get_conversation_id(event.response, chat_options.store) model = event.response.model if event.response.usage: - usage = self._usage_details_from_openai(event.response.usage) + usage = self._parse_usage_from_openai(event.response.usage) if usage: contents.append(UsageContent(details=usage, raw_representation=event)) case "response.output_item.added": @@ -1096,13 +1114,14 @@ def _get_ann_value(key: str) -> Any: return ChatResponseUpdate( contents=contents, conversation_id=conversation_id, + response_id=response_id, role=Role.ASSISTANT, model_id=model, additional_properties=metadata, raw_representation=event, ) - def _usage_details_from_openai(self, usage: ResponseUsage) -> UsageDetails | None: + def _parse_usage_from_openai(self, usage: ResponseUsage) -> UsageDetails | None: details = UsageDetails( input_token_count=usage.input_tokens, output_token_count=usage.output_tokens, diff --git a/python/packages/core/agent_framework/openai/_shared.py b/python/packages/core/agent_framework/openai/_shared.py index e0df8844e4..77189168f1 100644 --- a/python/packages/core/agent_framework/openai/_shared.py +++ b/python/packages/core/agent_framework/openai/_shared.py @@ -160,16 +160,16 @@ def __init__(self, *, model_id: str | None = None, client: AsyncOpenAI | None = for key, value in kwargs.items(): setattr(self, key, value) - async def initialize_client(self) -> None: + async def _initialize_client(self) -> None: """Initialize OpenAI client asynchronously. Override in subclasses to initialize the OpenAI client asynchronously. """ pass - async def ensure_client(self) -> AsyncOpenAI: + async def _ensure_client(self) -> AsyncOpenAI: """Ensure OpenAI client is initialized.""" - await self.initialize_client() + await self._initialize_client() if self.client is None: raise ServiceInitializationError("OpenAI client is not initialized") diff --git a/python/packages/core/pyproject.toml b/python/packages/core/pyproject.toml index ed6e4e9d1d..eb7cdcefb7 100644 --- a/python/packages/core/pyproject.toml +++ b/python/packages/core/pyproject.toml @@ -4,7 +4,7 @@ description = "Microsoft Agent Framework for building AI Agents with Python. Thi authors = [{ name = "Microsoft", email = "af-support@microsoft.com"}] readme = "README.md" requires-python = ">=3.10" -version = "1.0.0b251216" +version = "1.0.0b251223" license-files = ["LICENSE"] urls.homepage = "https://aka.ms/agent-framework" urls.source = "https://github.com/microsoft/agent-framework/tree/main/python" @@ -53,6 +53,7 @@ all = [ "agent-framework-durabletask", "agent-framework-lab", "agent-framework-mem0", + "agent-framework-ollama", "agent-framework-purview", "agent-framework-redis", ] diff --git a/python/packages/core/tests/azure/test_azure_chat_client.py b/python/packages/core/tests/azure/test_azure_chat_client.py index 1b7dbb904b..7da838529f 100644 --- a/python/packages/core/tests/azure/test_azure_chat_client.py +++ b/python/packages/core/tests/azure/test_azure_chat_client.py @@ -193,7 +193,7 @@ async def test_cmc( mock_create.assert_awaited_once_with( model=azure_openai_unit_test_env["AZURE_OPENAI_CHAT_DEPLOYMENT_NAME"], stream=False, - messages=azure_chat_client._prepare_chat_history_for_request(chat_history), # type: ignore + messages=azure_chat_client._prepare_messages_for_openai(chat_history), # type: ignore ) @@ -216,7 +216,7 @@ async def test_cmc_with_logit_bias( mock_create.assert_awaited_once_with( model=azure_openai_unit_test_env["AZURE_OPENAI_CHAT_DEPLOYMENT_NAME"], - messages=azure_chat_client._prepare_chat_history_for_request(chat_history), # type: ignore + messages=azure_chat_client._prepare_messages_for_openai(chat_history), # type: ignore stream=False, logit_bias=token_bias, ) @@ -241,7 +241,7 @@ async def test_cmc_with_stop( mock_create.assert_awaited_once_with( model=azure_openai_unit_test_env["AZURE_OPENAI_CHAT_DEPLOYMENT_NAME"], - messages=azure_chat_client._prepare_chat_history_for_request(chat_history), # type: ignore + messages=azure_chat_client._prepare_messages_for_openai(chat_history), # type: ignore stream=False, stop=stop, ) @@ -311,7 +311,7 @@ async def test_azure_on_your_data( mock_create.assert_awaited_once_with( model=azure_openai_unit_test_env["AZURE_OPENAI_CHAT_DEPLOYMENT_NAME"], - messages=azure_chat_client._prepare_chat_history_for_request(messages_out), # type: ignore + messages=azure_chat_client._prepare_messages_for_openai(messages_out), # type: ignore stream=False, extra_body=expected_data_settings, ) @@ -381,7 +381,7 @@ async def test_azure_on_your_data_string( mock_create.assert_awaited_once_with( model=azure_openai_unit_test_env["AZURE_OPENAI_CHAT_DEPLOYMENT_NAME"], - messages=azure_chat_client._prepare_chat_history_for_request(messages_out), # type: ignore + messages=azure_chat_client._prepare_messages_for_openai(messages_out), # type: ignore stream=False, extra_body=expected_data_settings, ) @@ -438,7 +438,7 @@ async def test_azure_on_your_data_fail( mock_create.assert_awaited_once_with( model=azure_openai_unit_test_env["AZURE_OPENAI_CHAT_DEPLOYMENT_NAME"], - messages=azure_chat_client._prepare_chat_history_for_request(messages_out), # type: ignore + messages=azure_chat_client._prepare_messages_for_openai(messages_out), # type: ignore stream=False, extra_body=expected_data_settings, ) @@ -584,7 +584,7 @@ async def test_get_streaming( mock_create.assert_awaited_once_with( model=azure_openai_unit_test_env["AZURE_OPENAI_CHAT_DEPLOYMENT_NAME"], stream=True, - messages=azure_chat_client._prepare_chat_history_for_request(chat_history), # type: ignore + messages=azure_chat_client._prepare_messages_for_openai(chat_history), # type: ignore # NOTE: The `stream_options={"include_usage": True}` is explicitly enforced in # `OpenAIChatCompletionBase._inner_get_streaming_response`. # To ensure consistency, we align the arguments here accordingly. diff --git a/python/packages/core/tests/core/test_mcp.py b/python/packages/core/tests/core/test_mcp.py index 93643da30f..18c90d64b3 100644 --- a/python/packages/core/tests/core/test_mcp.py +++ b/python/packages/core/tests/core/test_mcp.py @@ -24,14 +24,14 @@ ) from agent_framework._mcp import ( MCPTool, - _ai_content_to_mcp_types, - _chat_message_to_mcp_types, _get_input_model_from_mcp_prompt, _get_input_model_from_mcp_tool, - _mcp_call_tool_result_to_ai_contents, - _mcp_prompt_message_to_chat_message, - _mcp_type_to_ai_content, _normalize_mcp_name, + _parse_content_from_mcp, + _parse_contents_from_mcp_tool_result, + _parse_message_from_mcp, + _prepare_content_for_mcp, + _prepare_message_for_mcp, ) from agent_framework.exceptions import ToolException, ToolExecutionException @@ -60,7 +60,7 @@ def test_normalize_mcp_name(): def test_mcp_prompt_message_to_ai_content(): """Test conversion from MCP prompt message to AI content.""" mcp_message = types.PromptMessage(role="user", content=types.TextContent(type="text", text="Hello, world!")) - ai_content = _mcp_prompt_message_to_chat_message(mcp_message) + ai_content = _parse_message_from_mcp(mcp_message) assert isinstance(ai_content, ChatMessage) assert ai_content.role.value == "user" @@ -70,7 +70,7 @@ def test_mcp_prompt_message_to_ai_content(): assert ai_content.raw_representation == mcp_message -def test_mcp_call_tool_result_to_ai_contents(): +def test_parse_contents_from_mcp_tool_result(): """Test conversion from MCP tool result to AI contents.""" mcp_result = types.CallToolResult( content=[ @@ -79,7 +79,7 @@ def test_mcp_call_tool_result_to_ai_contents(): types.ImageContent(type="image", data=b"abc", mimeType="image/webp"), ] ) - ai_contents = _mcp_call_tool_result_to_ai_contents(mcp_result) + ai_contents = _parse_contents_from_mcp_tool_result(mcp_result) assert len(ai_contents) == 3 assert isinstance(ai_contents[0], TextContent) @@ -100,7 +100,7 @@ def test_mcp_call_tool_result_with_meta_error(): _meta={"isError": True, "errorCode": "TOOL_ERROR", "errorMessage": "Tool execution failed"}, ) - ai_contents = _mcp_call_tool_result_to_ai_contents(mcp_result) + ai_contents = _parse_contents_from_mcp_tool_result(mcp_result) assert len(ai_contents) == 1 assert isinstance(ai_contents[0], TextContent) @@ -131,7 +131,7 @@ def test_mcp_call_tool_result_with_meta_arbitrary_data(): }, ) - ai_contents = _mcp_call_tool_result_to_ai_contents(mcp_result) + ai_contents = _parse_contents_from_mcp_tool_result(mcp_result) assert len(ai_contents) == 1 assert isinstance(ai_contents[0], TextContent) @@ -153,7 +153,7 @@ def test_mcp_call_tool_result_with_meta_merging_existing_properties(): text_content = types.TextContent(type="text", text="Test content") mcp_result = types.CallToolResult(content=[text_content], _meta={"newField": "newValue", "isError": False}) - ai_contents = _mcp_call_tool_result_to_ai_contents(mcp_result) + ai_contents = _parse_contents_from_mcp_tool_result(mcp_result) assert len(ai_contents) == 1 content = ai_contents[0] @@ -169,7 +169,7 @@ def test_mcp_call_tool_result_with_meta_none(): mcp_result = types.CallToolResult(content=[types.TextContent(type="text", text="No meta test")]) # No _meta field set - ai_contents = _mcp_call_tool_result_to_ai_contents(mcp_result) + ai_contents = _parse_contents_from_mcp_tool_result(mcp_result) assert len(ai_contents) == 1 assert isinstance(ai_contents[0], TextContent) @@ -191,7 +191,7 @@ def test_mcp_call_tool_result_regression_successful_workflow(): ] ) - ai_contents = _mcp_call_tool_result_to_ai_contents(mcp_result) + ai_contents = _parse_contents_from_mcp_tool_result(mcp_result) # Verify basic conversion still works correctly assert len(ai_contents) == 2 @@ -213,7 +213,7 @@ def test_mcp_call_tool_result_regression_successful_workflow(): def test_mcp_content_types_to_ai_content_text(): """Test conversion of MCP text content to AI content.""" mcp_content = types.TextContent(type="text", text="Sample text") - ai_content = _mcp_type_to_ai_content(mcp_content)[0] + ai_content = _parse_content_from_mcp(mcp_content)[0] assert isinstance(ai_content, TextContent) assert ai_content.text == "Sample text" @@ -224,7 +224,7 @@ def test_mcp_content_types_to_ai_content_image(): """Test conversion of MCP image content to AI content.""" mcp_content = types.ImageContent(type="image", data="abc", mimeType="image/jpeg") mcp_content = types.ImageContent(type="image", data=b"abc", mimeType="image/jpeg") - ai_content = _mcp_type_to_ai_content(mcp_content)[0] + ai_content = _parse_content_from_mcp(mcp_content)[0] assert isinstance(ai_content, DataContent) assert ai_content.uri == "data:image/jpeg;base64,abc" @@ -235,7 +235,7 @@ def test_mcp_content_types_to_ai_content_image(): def test_mcp_content_types_to_ai_content_audio(): """Test conversion of MCP audio content to AI content.""" mcp_content = types.AudioContent(type="audio", data="def", mimeType="audio/wav") - ai_content = _mcp_type_to_ai_content(mcp_content)[0] + ai_content = _parse_content_from_mcp(mcp_content)[0] assert isinstance(ai_content, DataContent) assert ai_content.uri == "data:audio/wav;base64,def" @@ -251,7 +251,7 @@ def test_mcp_content_types_to_ai_content_resource_link(): name="test_resource", mimeType="application/json", ) - ai_content = _mcp_type_to_ai_content(mcp_content)[0] + ai_content = _parse_content_from_mcp(mcp_content)[0] assert isinstance(ai_content, UriContent) assert ai_content.uri == "https://example.com/resource" @@ -267,7 +267,7 @@ def test_mcp_content_types_to_ai_content_embedded_resource_text(): text="Embedded text content", ) mcp_content = types.EmbeddedResource(type="resource", resource=text_resource) - ai_content = _mcp_type_to_ai_content(mcp_content)[0] + ai_content = _parse_content_from_mcp(mcp_content)[0] assert isinstance(ai_content, TextContent) assert ai_content.text == "Embedded text content" @@ -283,7 +283,7 @@ def test_mcp_content_types_to_ai_content_embedded_resource_blob(): blob="data:application/octet-stream;base64,dGVzdCBkYXRh", ) mcp_content = types.EmbeddedResource(type="resource", resource=blob_resource) - ai_content = _mcp_type_to_ai_content(mcp_content)[0] + ai_content = _parse_content_from_mcp(mcp_content)[0] assert isinstance(ai_content, DataContent) assert ai_content.uri == "data:application/octet-stream;base64,dGVzdCBkYXRh" @@ -294,7 +294,7 @@ def test_mcp_content_types_to_ai_content_embedded_resource_blob(): def test_ai_content_to_mcp_content_types_text(): """Test conversion of AI text content to MCP content.""" ai_content = TextContent(text="Sample text") - mcp_content = _ai_content_to_mcp_types(ai_content) + mcp_content = _prepare_content_for_mcp(ai_content) assert isinstance(mcp_content, types.TextContent) assert mcp_content.type == "text" @@ -304,7 +304,7 @@ def test_ai_content_to_mcp_content_types_text(): def test_ai_content_to_mcp_content_types_data_image(): """Test conversion of AI data content to MCP content.""" ai_content = DataContent(uri="data:image/png;base64,xyz", media_type="image/png") - mcp_content = _ai_content_to_mcp_types(ai_content) + mcp_content = _prepare_content_for_mcp(ai_content) assert isinstance(mcp_content, types.ImageContent) assert mcp_content.type == "image" @@ -315,7 +315,7 @@ def test_ai_content_to_mcp_content_types_data_image(): def test_ai_content_to_mcp_content_types_data_audio(): """Test conversion of AI data content to MCP content.""" ai_content = DataContent(uri="data:audio/mpeg;base64,xyz", media_type="audio/mpeg") - mcp_content = _ai_content_to_mcp_types(ai_content) + mcp_content = _prepare_content_for_mcp(ai_content) assert isinstance(mcp_content, types.AudioContent) assert mcp_content.type == "audio" @@ -329,7 +329,7 @@ def test_ai_content_to_mcp_content_types_data_binary(): uri="data:application/octet-stream;base64,xyz", media_type="application/octet-stream", ) - mcp_content = _ai_content_to_mcp_types(ai_content) + mcp_content = _prepare_content_for_mcp(ai_content) assert isinstance(mcp_content, types.EmbeddedResource) assert mcp_content.type == "resource" @@ -340,7 +340,7 @@ def test_ai_content_to_mcp_content_types_data_binary(): def test_ai_content_to_mcp_content_types_uri(): """Test conversion of AI URI content to MCP content.""" ai_content = UriContent(uri="https://example.com/resource", media_type="application/json") - mcp_content = _ai_content_to_mcp_types(ai_content) + mcp_content = _prepare_content_for_mcp(ai_content) assert isinstance(mcp_content, types.ResourceLink) assert mcp_content.type == "resource_link" @@ -348,7 +348,7 @@ def test_ai_content_to_mcp_content_types_uri(): assert mcp_content.mimeType == "application/json" -def test_chat_message_to_mcp_types(): +def test_prepare_message_for_mcp(): message = ChatMessage( role="user", contents=[ @@ -356,7 +356,7 @@ def test_chat_message_to_mcp_types(): DataContent(uri="data:image/png;base64,xyz", media_type="image/png"), ], ) - mcp_contents = _chat_message_to_mcp_types(message) + mcp_contents = _prepare_message_for_mcp(message) assert len(mcp_contents) == 2 assert isinstance(mcp_contents[0], types.TextContent) assert isinstance(mcp_contents[1], types.ImageContent) diff --git a/python/packages/core/tests/openai/test_openai_assistants_client.py b/python/packages/core/tests/openai/test_openai_assistants_client.py index b9c32b14b5..861ccc73d1 100644 --- a/python/packages/core/tests/openai/test_openai_assistants_client.py +++ b/python/packages/core/tests/openai/test_openai_assistants_client.py @@ -463,9 +463,9 @@ async def test_openai_assistants_client_process_stream_events_requires_action(mo """Test _process_stream_events with thread.run.requires_action event.""" chat_client = create_test_openai_assistants_client(mock_async_openai) - # Mock the _create_function_call_contents method to return test content + # Mock the _parse_function_calls_from_assistants method to return test content test_function_content = FunctionCallContent(call_id="call-123", name="test_func", arguments={"arg": "value"}) - chat_client._create_function_call_contents = MagicMock(return_value=[test_function_content]) # type: ignore + chat_client._parse_function_calls_from_assistants = MagicMock(return_value=[test_function_content]) # type: ignore # Create a mock Run object mock_run = MagicMock(spec=Run) @@ -498,8 +498,8 @@ async def async_iterator() -> Any: assert update.contents[0] == test_function_content assert update.raw_representation == mock_run - # Verify _create_function_call_contents was called correctly - chat_client._create_function_call_contents.assert_called_once_with(mock_run, None) # type: ignore + # Verify _parse_function_calls_from_assistants was called correctly + chat_client._parse_function_calls_from_assistants.assert_called_once_with(mock_run, None) # type: ignore async def test_openai_assistants_client_process_stream_events_run_step_created(mock_async_openai: MagicMock) -> None: @@ -585,8 +585,8 @@ async def async_iterator() -> Any: assert update.raw_representation == mock_run -def test_openai_assistants_client_create_function_call_contents_basic(mock_async_openai: MagicMock) -> None: - """Test _create_function_call_contents with a simple function call.""" +def test_openai_assistants_client_parse_function_calls_from_assistants_basic(mock_async_openai: MagicMock) -> None: + """Test _parse_function_calls_from_assistants with a simple function call.""" chat_client = create_test_openai_assistants_client(mock_async_openai) @@ -605,7 +605,7 @@ def test_openai_assistants_client_create_function_call_contents_basic(mock_async # Call the method response_id = "response_456" - contents = chat_client._create_function_call_contents(mock_run, response_id) # type: ignore + contents = chat_client._parse_function_calls_from_assistants(mock_run, response_id) # type: ignore # Test that one function call content was created assert len(contents) == 1 @@ -825,24 +825,24 @@ def test_openai_assistants_client_prepare_options_with_image_content(mock_async_ assert message["content"][0]["image_url"]["url"] == "https://example.com/image.jpg" -def test_openai_assistants_client_convert_function_results_to_tool_output_empty(mock_async_openai: MagicMock) -> None: - """Test _convert_function_results_to_tool_output with empty list.""" +def test_openai_assistants_client_prepare_tool_outputs_for_assistants_empty(mock_async_openai: MagicMock) -> None: + """Test _prepare_tool_outputs_for_assistants with empty list.""" chat_client = create_test_openai_assistants_client(mock_async_openai) - run_id, tool_outputs = chat_client._convert_function_results_to_tool_output([]) # type: ignore + run_id, tool_outputs = chat_client._prepare_tool_outputs_for_assistants([]) # type: ignore assert run_id is None assert tool_outputs is None -def test_openai_assistants_client_convert_function_results_to_tool_output_valid(mock_async_openai: MagicMock) -> None: - """Test _convert_function_results_to_tool_output with valid function results.""" +def test_openai_assistants_client_prepare_tool_outputs_for_assistants_valid(mock_async_openai: MagicMock) -> None: + """Test _prepare_tool_outputs_for_assistants with valid function results.""" chat_client = create_test_openai_assistants_client(mock_async_openai) call_id = json.dumps(["run-123", "call-456"]) function_result = FunctionResultContent(call_id=call_id, result="Function executed successfully") - run_id, tool_outputs = chat_client._convert_function_results_to_tool_output([function_result]) # type: ignore + run_id, tool_outputs = chat_client._prepare_tool_outputs_for_assistants([function_result]) # type: ignore assert run_id == "run-123" assert tool_outputs is not None @@ -851,10 +851,10 @@ def test_openai_assistants_client_convert_function_results_to_tool_output_valid( assert tool_outputs[0].get("output") == "Function executed successfully" -def test_openai_assistants_client_convert_function_results_to_tool_output_mismatched_run_ids( +def test_openai_assistants_client_prepare_tool_outputs_for_assistants_mismatched_run_ids( mock_async_openai: MagicMock, ) -> None: - """Test _convert_function_results_to_tool_output with mismatched run IDs.""" + """Test _prepare_tool_outputs_for_assistants with mismatched run IDs.""" chat_client = create_test_openai_assistants_client(mock_async_openai) # Create function results with different run IDs @@ -863,7 +863,7 @@ def test_openai_assistants_client_convert_function_results_to_tool_output_mismat function_result1 = FunctionResultContent(call_id=call_id1, result="Result 1") function_result2 = FunctionResultContent(call_id=call_id2, result="Result 2") - run_id, tool_outputs = chat_client._convert_function_results_to_tool_output([function_result1, function_result2]) # type: ignore + run_id, tool_outputs = chat_client._prepare_tool_outputs_for_assistants([function_result1, function_result2]) # type: ignore # Should only process the first one since run IDs don't match assert run_id == "run-123" diff --git a/python/packages/core/tests/openai/test_openai_chat_client.py b/python/packages/core/tests/openai/test_openai_chat_client.py index 8af3ed61aa..d2ddc1fb02 100644 --- a/python/packages/core/tests/openai/test_openai_chat_client.py +++ b/python/packages/core/tests/openai/test_openai_chat_client.py @@ -182,12 +182,12 @@ def test_unsupported_tool_handling(openai_unit_test_env: dict[str, str]) -> None unsupported_tool.__class__.__name__ = "UnsupportedAITool" # This should ignore the unsupported ToolProtocol and return empty list - result = client._chat_to_tool_spec([unsupported_tool]) # type: ignore + result = client._prepare_tools_for_openai([unsupported_tool]) # type: ignore assert result == [] # Also test with a non-ToolProtocol that should be converted to dict dict_tool = {"type": "function", "name": "test"} - result = client._chat_to_tool_spec([dict_tool]) # type: ignore + result = client._prepare_tools_for_openai([dict_tool]) # type: ignore assert result == [dict_tool] @@ -637,7 +637,7 @@ def test_chat_response_content_order_text_before_tool_calls(openai_unit_test_env ) client = OpenAIChatClient() - response = client._create_chat_response(mock_response, ChatOptions()) + response = client._parse_response_from_openai(mock_response, ChatOptions()) # Verify we have both text and tool call content assert len(response.messages) == 1 @@ -658,7 +658,7 @@ def test_function_result_falsy_values_handling(openai_unit_test_env: dict[str, s # Test with empty list (falsy but not None) message_with_empty_list = ChatMessage(role="tool", contents=[FunctionResultContent(call_id="call-123", result=[])]) - openai_messages = client._openai_chat_message_parser(message_with_empty_list) + openai_messages = client._prepare_message_for_openai(message_with_empty_list) assert len(openai_messages) == 1 assert openai_messages[0]["content"] == "[]" # Empty list should be JSON serialized @@ -667,14 +667,14 @@ def test_function_result_falsy_values_handling(openai_unit_test_env: dict[str, s role="tool", contents=[FunctionResultContent(call_id="call-456", result="")] ) - openai_messages = client._openai_chat_message_parser(message_with_empty_string) + openai_messages = client._prepare_message_for_openai(message_with_empty_string) assert len(openai_messages) == 1 assert openai_messages[0]["content"] == "" # Empty string should be preserved # Test with False (falsy but not None) message_with_false = ChatMessage(role="tool", contents=[FunctionResultContent(call_id="call-789", result=False)]) - openai_messages = client._openai_chat_message_parser(message_with_false) + openai_messages = client._prepare_message_for_openai(message_with_false) assert len(openai_messages) == 1 assert openai_messages[0]["content"] == "false" # False should be JSON serialized @@ -695,7 +695,7 @@ def test_function_result_exception_handling(openai_unit_test_env: dict[str, str] ], ) - openai_messages = client._openai_chat_message_parser(message_with_exception) + openai_messages = client._prepare_message_for_openai(message_with_exception) assert len(openai_messages) == 1 assert openai_messages[0]["content"] == "Error: Function failed." assert openai_messages[0]["tool_call_id"] == "call-123" @@ -708,8 +708,8 @@ def test_prepare_function_call_results_string_passthrough(): assert isinstance(result, str) -def test_openai_content_parser_data_content_image(openai_unit_test_env: dict[str, str]) -> None: - """Test _openai_content_parser converts DataContent with image media type to OpenAI format.""" +def test_prepare_content_for_openai_data_content_image(openai_unit_test_env: dict[str, str]) -> None: + """Test _prepare_content_for_openai converts DataContent with image media type to OpenAI format.""" client = OpenAIChatClient() # Test DataContent with image media type @@ -718,7 +718,7 @@ def test_openai_content_parser_data_content_image(openai_unit_test_env: dict[str media_type="image/png", ) - result = client._openai_content_parser(image_data_content) # type: ignore + result = client._prepare_content_for_openai(image_data_content) # type: ignore # Should convert to OpenAI image_url format assert result["type"] == "image_url" @@ -727,7 +727,7 @@ def test_openai_content_parser_data_content_image(openai_unit_test_env: dict[str # Test DataContent with non-image media type should use default model_dump text_data_content = DataContent(uri="data:text/plain;base64,SGVsbG8gV29ybGQ=", media_type="text/plain") - result = client._openai_content_parser(text_data_content) # type: ignore + result = client._prepare_content_for_openai(text_data_content) # type: ignore # Should use default model_dump format assert result["type"] == "data" @@ -740,7 +740,7 @@ def test_openai_content_parser_data_content_image(openai_unit_test_env: dict[str media_type="audio/wav", ) - result = client._openai_content_parser(audio_data_content) # type: ignore + result = client._prepare_content_for_openai(audio_data_content) # type: ignore # Should convert to OpenAI input_audio format assert result["type"] == "input_audio" @@ -751,7 +751,7 @@ def test_openai_content_parser_data_content_image(openai_unit_test_env: dict[str # Test DataContent with MP3 audio mp3_data_content = DataContent(uri="data:audio/mp3;base64,//uQAAAAWGluZwAAAA8AAAACAAACcQ==", media_type="audio/mp3") - result = client._openai_content_parser(mp3_data_content) # type: ignore + result = client._prepare_content_for_openai(mp3_data_content) # type: ignore # Should convert to OpenAI input_audio format with mp3 assert result["type"] == "input_audio" @@ -760,8 +760,8 @@ def test_openai_content_parser_data_content_image(openai_unit_test_env: dict[str assert result["input_audio"]["format"] == "mp3" -def test_openai_content_parser_document_file_mapping(openai_unit_test_env: dict[str, str]) -> None: - """Test _openai_content_parser converts document files (PDF, DOCX, etc.) to OpenAI file format.""" +def test_prepare_content_for_openai_document_file_mapping(openai_unit_test_env: dict[str, str]) -> None: + """Test _prepare_content_for_openai converts document files (PDF, DOCX, etc.) to OpenAI file format.""" client = OpenAIChatClient() # Test PDF without filename - should omit filename in OpenAI payload @@ -770,7 +770,7 @@ def test_openai_content_parser_document_file_mapping(openai_unit_test_env: dict[ media_type="application/pdf", ) - result = client._openai_content_parser(pdf_data_content) # type: ignore + result = client._prepare_content_for_openai(pdf_data_content) # type: ignore # Should convert to OpenAI file format without filename assert result["type"] == "file" @@ -787,7 +787,7 @@ def test_openai_content_parser_document_file_mapping(openai_unit_test_env: dict[ additional_properties={"filename": "report.pdf"}, ) - result = client._openai_content_parser(pdf_with_filename) # type: ignore + result = client._prepare_content_for_openai(pdf_with_filename) # type: ignore # Should use custom filename assert result["type"] == "file" @@ -820,7 +820,7 @@ def test_openai_content_parser_document_file_mapping(openai_unit_test_env: dict[ media_type=case["media_type"], ) - result = client._openai_content_parser(doc_content) # type: ignore + result = client._prepare_content_for_openai(doc_content) # type: ignore # All application/* types should now be mapped to file format assert result["type"] == "file" @@ -834,7 +834,7 @@ def test_openai_content_parser_document_file_mapping(openai_unit_test_env: dict[ additional_properties={"filename": case["filename"]}, ) - result = client._openai_content_parser(doc_with_filename) # type: ignore + result = client._prepare_content_for_openai(doc_with_filename) # type: ignore # Should now use file format with filename assert result["type"] == "file" @@ -848,7 +848,7 @@ def test_openai_content_parser_document_file_mapping(openai_unit_test_env: dict[ additional_properties={}, ) - result = client._openai_content_parser(pdf_empty_props) # type: ignore + result = client._prepare_content_for_openai(pdf_empty_props) # type: ignore assert result["type"] == "file" assert "filename" not in result["file"] @@ -860,7 +860,7 @@ def test_openai_content_parser_document_file_mapping(openai_unit_test_env: dict[ additional_properties={"filename": None}, ) - result = client._openai_content_parser(pdf_none_filename) # type: ignore + result = client._prepare_content_for_openai(pdf_none_filename) # type: ignore assert result["type"] == "file" assert "filename" not in result["file"] # None filename should be omitted diff --git a/python/packages/core/tests/openai/test_openai_chat_client_base.py b/python/packages/core/tests/openai/test_openai_chat_client_base.py index b146bad613..3e48899509 100644 --- a/python/packages/core/tests/openai/test_openai_chat_client_base.py +++ b/python/packages/core/tests/openai/test_openai_chat_client_base.py @@ -76,7 +76,7 @@ async def test_cmc( mock_create.assert_awaited_once_with( model=openai_unit_test_env["OPENAI_CHAT_MODEL_ID"], stream=False, - messages=openai_chat_completion._prepare_chat_history_for_request(chat_history), # type: ignore + messages=openai_chat_completion._prepare_messages_for_openai(chat_history), # type: ignore ) @@ -97,7 +97,7 @@ async def test_cmc_chat_options( mock_create.assert_awaited_once_with( model=openai_unit_test_env["OPENAI_CHAT_MODEL_ID"], stream=False, - messages=openai_chat_completion._prepare_chat_history_for_request(chat_history), # type: ignore + messages=openai_chat_completion._prepare_messages_for_openai(chat_history), # type: ignore ) @@ -120,7 +120,7 @@ async def test_cmc_no_fcc_in_response( mock_create.assert_awaited_once_with( model=openai_unit_test_env["OPENAI_CHAT_MODEL_ID"], stream=False, - messages=openai_chat_completion._prepare_chat_history_for_request(orig_chat_history), # type: ignore + messages=openai_chat_completion._prepare_messages_for_openai(orig_chat_history), # type: ignore ) @@ -167,7 +167,7 @@ async def test_scmc_chat_options( model=openai_unit_test_env["OPENAI_CHAT_MODEL_ID"], stream=True, stream_options={"include_usage": True}, - messages=openai_chat_completion._prepare_chat_history_for_request(chat_history), # type: ignore + messages=openai_chat_completion._prepare_messages_for_openai(chat_history), # type: ignore ) @@ -203,7 +203,7 @@ async def test_cmc_additional_properties( mock_create.assert_awaited_once_with( model=openai_unit_test_env["OPENAI_CHAT_MODEL_ID"], stream=False, - messages=openai_chat_completion._prepare_chat_history_for_request(chat_history), # type: ignore + messages=openai_chat_completion._prepare_messages_for_openai(chat_history), # type: ignore reasoning_effort="low", ) @@ -246,7 +246,7 @@ async def test_get_streaming( model=openai_unit_test_env["OPENAI_CHAT_MODEL_ID"], stream=True, stream_options={"include_usage": True}, - messages=openai_chat_completion._prepare_chat_history_for_request(orig_chat_history), # type: ignore + messages=openai_chat_completion._prepare_messages_for_openai(orig_chat_history), # type: ignore ) @@ -285,7 +285,7 @@ async def test_get_streaming_singular( model=openai_unit_test_env["OPENAI_CHAT_MODEL_ID"], stream=True, stream_options={"include_usage": True}, - messages=openai_chat_completion._prepare_chat_history_for_request(orig_chat_history), # type: ignore + messages=openai_chat_completion._prepare_messages_for_openai(orig_chat_history), # type: ignore ) @@ -349,7 +349,7 @@ async def test_get_streaming_no_fcc_in_response( model=openai_unit_test_env["OPENAI_CHAT_MODEL_ID"], stream=True, stream_options={"include_usage": True}, - messages=openai_chat_completion._prepare_chat_history_for_request(orig_chat_history), # type: ignore + messages=openai_chat_completion._prepare_messages_for_openai(orig_chat_history), # type: ignore ) @@ -399,7 +399,7 @@ def test_chat_response_created_at_uses_utc(openai_unit_test_env: dict[str, str]) ) client = OpenAIChatClient() - response = client._create_chat_response(mock_response, ChatOptions()) + response = client._parse_response_from_openai(mock_response, ChatOptions()) # Verify that created_at is correctly formatted as UTC assert response.created_at is not None @@ -431,7 +431,7 @@ def test_chat_response_update_created_at_uses_utc(openai_unit_test_env: dict[str ) client = OpenAIChatClient() - response_update = client._create_chat_response_update(mock_chunk) + response_update = client._parse_response_update_from_openai(mock_chunk) # Verify that created_at is correctly formatted as UTC assert response_update.created_at is not None diff --git a/python/packages/core/tests/openai/test_openai_responses_client.py b/python/packages/core/tests/openai/test_openai_responses_client.py index 3863f4701a..a3c7ff5323 100644 --- a/python/packages/core/tests/openai/test_openai_responses_client.py +++ b/python/packages/core/tests/openai/test_openai_responses_client.py @@ -368,16 +368,43 @@ async def test_response_format_parse_path() -> None: mock_parsed_response.output_parsed = None mock_parsed_response.usage = None mock_parsed_response.finish_reason = None + mock_parsed_response.conversation = None # No conversation object with patch.object(client.client.responses, "parse", return_value=mock_parsed_response): response = await client.get_response( messages=[ChatMessage(role="user", text="Test message")], response_format=OutputStruct, store=True ) - + assert response.response_id == "parsed_response_123" assert response.conversation_id == "parsed_response_123" assert response.model_id == "test-model" +async def test_response_format_parse_path_with_conversation_id() -> None: + """Test get_response response_format parsing path with set conversation ID.""" + client = OpenAIResponsesClient(model_id="test-model", api_key="test-key") + + # Mock successful parse response + mock_parsed_response = MagicMock() + mock_parsed_response.id = "parsed_response_123" + mock_parsed_response.text = "Parsed response" + mock_parsed_response.model = "test-model" + mock_parsed_response.created_at = 1000000000 + mock_parsed_response.metadata = {} + mock_parsed_response.output_parsed = None + mock_parsed_response.usage = None + mock_parsed_response.finish_reason = None + mock_parsed_response.conversation = MagicMock() + mock_parsed_response.conversation.id = "conversation_456" + + with patch.object(client.client.responses, "parse", return_value=mock_parsed_response): + response = await client.get_response( + messages=[ChatMessage(role="user", text="Test message")], response_format=OutputStruct, store=True + ) + assert response.response_id == "parsed_response_123" + assert response.conversation_id == "conversation_456" + assert response.model_id == "test-model" + + async def test_bad_request_error_non_content_filter() -> None: """Test get_response BadRequestError without content_filter.""" client = OpenAIResponsesClient(model_id="test-model", api_key="test-key") @@ -454,7 +481,7 @@ async def test_get_streaming_response_with_all_parameters() -> None: def test_response_content_creation_with_annotations() -> None: - """Test _create_response_content with different annotation types.""" + """Test _parse_response_from_openai with different annotation types.""" client = OpenAIResponsesClient(model_id="test-model", api_key="test-key") # Create a mock response with annotated text content @@ -485,7 +512,7 @@ def test_response_content_creation_with_annotations() -> None: mock_response.output = [mock_message_item] with patch.object(client, "_get_metadata_from_response", return_value={}): - response = client._create_response_content(mock_response, chat_options=ChatOptions()) # type: ignore + response = client._parse_response_from_openai(mock_response, chat_options=ChatOptions()) # type: ignore assert len(response.messages[0].contents) >= 1 assert isinstance(response.messages[0].contents[0], TextContent) @@ -494,7 +521,7 @@ def test_response_content_creation_with_annotations() -> None: def test_response_content_creation_with_refusal() -> None: - """Test _create_response_content with refusal content.""" + """Test _parse_response_from_openai with refusal content.""" client = OpenAIResponsesClient(model_id="test-model", api_key="test-key") # Create a mock response with refusal content @@ -516,7 +543,7 @@ def test_response_content_creation_with_refusal() -> None: mock_response.output = [mock_message_item] - response = client._create_response_content(mock_response, chat_options=ChatOptions()) # type: ignore + response = client._parse_response_from_openai(mock_response, chat_options=ChatOptions()) # type: ignore assert len(response.messages[0].contents) == 1 assert isinstance(response.messages[0].contents[0], TextContent) @@ -524,7 +551,7 @@ def test_response_content_creation_with_refusal() -> None: def test_response_content_creation_with_reasoning() -> None: - """Test _create_response_content with reasoning content.""" + """Test _parse_response_from_openai with reasoning content.""" client = OpenAIResponsesClient(model_id="test-model", api_key="test-key") # Create a mock response with reasoning content @@ -546,7 +573,7 @@ def test_response_content_creation_with_reasoning() -> None: mock_response.output = [mock_reasoning_item] - response = client._create_response_content(mock_response, chat_options=ChatOptions()) # type: ignore + response = client._parse_response_from_openai(mock_response, chat_options=ChatOptions()) # type: ignore assert len(response.messages[0].contents) == 2 assert isinstance(response.messages[0].contents[0], TextReasoningContent) @@ -554,7 +581,7 @@ def test_response_content_creation_with_reasoning() -> None: def test_response_content_creation_with_code_interpreter() -> None: - """Test _create_response_content with code interpreter outputs.""" + """Test _parse_response_from_openai with code interpreter outputs.""" client = OpenAIResponsesClient(model_id="test-model", api_key="test-key") @@ -582,7 +609,7 @@ def test_response_content_creation_with_code_interpreter() -> None: mock_response.output = [mock_code_interpreter_item] - response = client._create_response_content(mock_response, chat_options=ChatOptions()) # type: ignore + response = client._parse_response_from_openai(mock_response, chat_options=ChatOptions()) # type: ignore assert len(response.messages[0].contents) == 2 assert isinstance(response.messages[0].contents[0], TextContent) @@ -593,7 +620,7 @@ def test_response_content_creation_with_code_interpreter() -> None: def test_response_content_creation_with_function_call() -> None: - """Test _create_response_content with function call content.""" + """Test _parse_response_from_openai with function call content.""" client = OpenAIResponsesClient(model_id="test-model", api_key="test-key") # Create a mock response with function call @@ -614,7 +641,7 @@ def test_response_content_creation_with_function_call() -> None: mock_response.output = [mock_function_call_item] - response = client._create_response_content(mock_response, chat_options=ChatOptions()) # type: ignore + response = client._parse_response_from_openai(mock_response, chat_options=ChatOptions()) # type: ignore assert len(response.messages[0].contents) == 1 assert isinstance(response.messages[0].contents[0], FunctionCallContent) @@ -624,7 +651,7 @@ def test_response_content_creation_with_function_call() -> None: assert function_call.arguments == '{"location": "Seattle"}' -def test_tools_to_response_tools_with_hosted_mcp() -> None: +def test_prepare_tools_for_openai_with_hosted_mcp() -> None: """Test that HostedMCPTool is converted to the correct response tool dict.""" client = OpenAIResponsesClient(model_id="test-model", api_key="test-key") @@ -638,7 +665,7 @@ def test_tools_to_response_tools_with_hosted_mcp() -> None: additional_properties={"custom": "value"}, ) - resp_tools = client._tools_to_response_tools([tool]) + resp_tools = client._prepare_tools_for_openai([tool]) assert isinstance(resp_tools, list) assert len(resp_tools) == 1 mcp = resp_tools[0] @@ -654,7 +681,7 @@ def test_tools_to_response_tools_with_hosted_mcp() -> None: assert "require_approval" in mcp -def test_create_response_content_with_mcp_approval_request() -> None: +def test_parse_response_from_openai_with_mcp_approval_request() -> None: """Test that a non-streaming mcp_approval_request is parsed into FunctionApprovalRequestContent.""" client = OpenAIResponsesClient(model_id="test-model", api_key="test-key") @@ -675,7 +702,7 @@ def test_create_response_content_with_mcp_approval_request() -> None: mock_response.output = [mock_item] - response = client._create_response_content(mock_response, chat_options=ChatOptions()) # type: ignore + response = client._parse_response_from_openai(mock_response, chat_options=ChatOptions()) # type: ignore assert isinstance(response.messages[0].contents[0], FunctionApprovalRequestContent) req = response.messages[0].contents[0] @@ -716,7 +743,7 @@ def test_responses_client_created_at_uses_utc(openai_unit_test_env: dict[str, st mock_response.output = [mock_message_item] with patch.object(client, "_get_metadata_from_response", return_value={}): - response = client._create_response_content(mock_response, chat_options=ChatOptions()) # type: ignore + response = client._parse_response_from_openai(mock_response, chat_options=ChatOptions()) # type: ignore # Verify that created_at is correctly formatted as UTC assert response.created_at is not None @@ -730,7 +757,7 @@ def test_responses_client_created_at_uses_utc(openai_unit_test_env: dict[str, st ) -def test_tools_to_response_tools_with_raw_image_generation() -> None: +def test_prepare_tools_for_openai_with_raw_image_generation() -> None: """Test that raw image_generation tool dict is handled correctly with parameter mapping.""" client = OpenAIResponsesClient(model_id="test-model", api_key="test-key") @@ -744,7 +771,7 @@ def test_tools_to_response_tools_with_raw_image_generation() -> None: "background": "transparent", } - resp_tools = client._tools_to_response_tools([tool]) + resp_tools = client._prepare_tools_for_openai([tool]) assert isinstance(resp_tools, list) assert len(resp_tools) == 1 @@ -759,7 +786,7 @@ def test_tools_to_response_tools_with_raw_image_generation() -> None: assert image_tool["output_compression"] == 75 -def test_tools_to_response_tools_with_raw_image_generation_openai_responses_params() -> None: +def test_prepare_tools_for_openai_with_raw_image_generation_openai_responses_params() -> None: """Test raw image_generation tool with OpenAI-specific parameters.""" client = OpenAIResponsesClient(model_id="test-model", api_key="test-key") @@ -773,7 +800,7 @@ def test_tools_to_response_tools_with_raw_image_generation_openai_responses_para "partial_images": 2, # Should be integer 0-3 } - resp_tools = client._tools_to_response_tools([tool]) + resp_tools = client._prepare_tools_for_openai([tool]) assert isinstance(resp_tools, list) assert len(resp_tools) == 1 @@ -791,14 +818,14 @@ def test_tools_to_response_tools_with_raw_image_generation_openai_responses_para assert tool_dict["partial_images"] == 2 -def test_tools_to_response_tools_with_raw_image_generation_minimal() -> None: +def test_prepare_tools_for_openai_with_raw_image_generation_minimal() -> None: """Test raw image_generation tool with minimal configuration.""" client = OpenAIResponsesClient(model_id="test-model", api_key="test-key") # Test with minimal parameters (just type) tool = {"type": "image_generation"} - resp_tools = client._tools_to_response_tools([tool]) + resp_tools = client._prepare_tools_for_openai([tool]) assert isinstance(resp_tools, list) assert len(resp_tools) == 1 @@ -809,7 +836,7 @@ def test_tools_to_response_tools_with_raw_image_generation_minimal() -> None: assert len(image_tool) == 1 -def test_create_streaming_response_content_with_mcp_approval_request() -> None: +def test_parse_chunk_from_openai_with_mcp_approval_request() -> None: """Test that a streaming mcp_approval_request event is parsed into FunctionApprovalRequestContent.""" client = OpenAIResponsesClient(model_id="test-model", api_key="test-key") chat_options = ChatOptions() @@ -825,7 +852,7 @@ def test_create_streaming_response_content_with_mcp_approval_request() -> None: mock_item.server_label = "My_MCP" mock_event.item = mock_item - update = client._create_streaming_response_content(mock_event, chat_options, function_call_ids) + update = client._parse_chunk_from_openai(mock_event, chat_options, function_call_ids) assert any(isinstance(c, FunctionApprovalRequestContent) for c in update.contents) fa = next(c for c in update.contents if isinstance(c, FunctionApprovalRequestContent)) assert fa.id == "approval-stream-1" @@ -901,7 +928,7 @@ async def test_end_to_end_mcp_approval_flow(span_exporter) -> None: def test_usage_details_basic() -> None: - """Test _usage_details_from_openai without cached or reasoning tokens.""" + """Test _parse_usage_from_openai without cached or reasoning tokens.""" client = OpenAIResponsesClient(model_id="test-model", api_key="test-key") mock_usage = MagicMock() @@ -911,7 +938,7 @@ def test_usage_details_basic() -> None: mock_usage.input_tokens_details = None mock_usage.output_tokens_details = None - details = client._usage_details_from_openai(mock_usage) # type: ignore + details = client._parse_usage_from_openai(mock_usage) # type: ignore assert details is not None assert details.input_token_count == 100 assert details.output_token_count == 50 @@ -919,7 +946,7 @@ def test_usage_details_basic() -> None: def test_usage_details_with_cached_tokens() -> None: - """Test _usage_details_from_openai with cached input tokens.""" + """Test _parse_usage_from_openai with cached input tokens.""" client = OpenAIResponsesClient(model_id="test-model", api_key="test-key") mock_usage = MagicMock() @@ -930,14 +957,14 @@ def test_usage_details_with_cached_tokens() -> None: mock_usage.input_tokens_details.cached_tokens = 25 mock_usage.output_tokens_details = None - details = client._usage_details_from_openai(mock_usage) # type: ignore + details = client._parse_usage_from_openai(mock_usage) # type: ignore assert details is not None assert details.input_token_count == 200 assert details.additional_counts["openai.cached_input_tokens"] == 25 def test_usage_details_with_reasoning_tokens() -> None: - """Test _usage_details_from_openai with reasoning tokens.""" + """Test _parse_usage_from_openai with reasoning tokens.""" client = OpenAIResponsesClient(model_id="test-model", api_key="test-key") mock_usage = MagicMock() @@ -948,7 +975,7 @@ def test_usage_details_with_reasoning_tokens() -> None: mock_usage.output_tokens_details = MagicMock() mock_usage.output_tokens_details.reasoning_tokens = 30 - details = client._usage_details_from_openai(mock_usage) # type: ignore + details = client._parse_usage_from_openai(mock_usage) # type: ignore assert details is not None assert details.output_token_count == 80 assert details.additional_counts["openai.reasoning_tokens"] == 30 @@ -975,7 +1002,7 @@ def test_get_metadata_from_response() -> None: def test_streaming_response_basic_structure() -> None: - """Test that _create_streaming_response_content returns proper structure.""" + """Test that _parse_chunk_from_openai returns proper structure.""" client = OpenAIResponsesClient(model_id="test-model", api_key="test-key") chat_options = ChatOptions(store=True) function_call_ids: dict[int, tuple[str, str]] = {} @@ -983,7 +1010,7 @@ def test_streaming_response_basic_structure() -> None: # Test with a basic mock event to ensure the method returns proper structure mock_event = MagicMock() - response = client._create_streaming_response_content(mock_event, chat_options, function_call_ids) # type: ignore + response = client._parse_chunk_from_openai(mock_event, chat_options, function_call_ids) # type: ignore # Should get a valid ChatResponseUpdate structure assert isinstance(response, ChatResponseUpdate) @@ -993,6 +1020,44 @@ def test_streaming_response_basic_structure() -> None: assert response.raw_representation is mock_event +def test_streaming_response_created_type() -> None: + """Test streaming response with created type""" + client = OpenAIResponsesClient(model_id="test-model", api_key="test-key") + chat_options = ChatOptions() + function_call_ids: dict[int, tuple[str, str]] = {} + + mock_event = MagicMock() + mock_event.type = "response.created" + mock_event.response = MagicMock() + mock_event.response.id = "resp_1234" + mock_event.response.conversation = MagicMock() + mock_event.response.conversation.id = "conv_5678" + + response = client._parse_chunk_from_openai(mock_event, chat_options, function_call_ids) + + assert response.response_id == "resp_1234" + assert response.conversation_id == "conv_5678" + + +def test_streaming_response_in_progress_type() -> None: + """Test streaming response with in_progress type""" + client = OpenAIResponsesClient(model_id="test-model", api_key="test-key") + chat_options = ChatOptions() + function_call_ids: dict[int, tuple[str, str]] = {} + + mock_event = MagicMock() + mock_event.type = "response.in_progress" + mock_event.response = MagicMock() + mock_event.response.id = "resp_1234" + mock_event.response.conversation = MagicMock() + mock_event.response.conversation.id = "conv_5678" + + response = client._parse_chunk_from_openai(mock_event, chat_options, function_call_ids) + + assert response.response_id == "resp_1234" + assert response.conversation_id == "conv_5678" + + def test_streaming_annotation_added_with_file_path() -> None: """Test streaming annotation added event with file_path type extracts HostedFileContent.""" client = OpenAIResponsesClient(model_id="test-model", api_key="test-key") @@ -1008,7 +1073,7 @@ def test_streaming_annotation_added_with_file_path() -> None: "index": 42, } - response = client._create_streaming_response_content(mock_event, chat_options, function_call_ids) + response = client._parse_chunk_from_openai(mock_event, chat_options, function_call_ids) assert len(response.contents) == 1 content = response.contents[0] @@ -1035,7 +1100,7 @@ def test_streaming_annotation_added_with_file_citation() -> None: "index": 15, } - response = client._create_streaming_response_content(mock_event, chat_options, function_call_ids) + response = client._parse_chunk_from_openai(mock_event, chat_options, function_call_ids) assert len(response.contents) == 1 content = response.contents[0] @@ -1064,7 +1129,7 @@ def test_streaming_annotation_added_with_container_file_citation() -> None: "end_index": 50, } - response = client._create_streaming_response_content(mock_event, chat_options, function_call_ids) + response = client._parse_chunk_from_openai(mock_event, chat_options, function_call_ids) assert len(response.contents) == 1 content = response.contents[0] @@ -1091,7 +1156,7 @@ def test_streaming_annotation_added_with_unknown_type() -> None: "url": "https://example.com", } - response = client._create_streaming_response_content(mock_event, chat_options, function_call_ids) + response = client._parse_chunk_from_openai(mock_event, chat_options, function_call_ids) # url_citation should not produce HostedFileContent assert len(response.contents) == 0 @@ -1137,8 +1202,8 @@ async def run_streaming(): asyncio.run(run_streaming()) -def test_openai_content_parser_image_content() -> None: - """Test _openai_content_parser with image content variations.""" +def test_prepare_content_for_openai_image_content() -> None: + """Test _prepare_content_for_openai with image content variations.""" client = OpenAIResponsesClient(model_id="test-model", api_key="test-key") # Test image content with detail parameter and file_id @@ -1147,7 +1212,7 @@ def test_openai_content_parser_image_content() -> None: media_type="image/jpeg", additional_properties={"detail": "high", "file_id": "file_123"}, ) - result = client._openai_content_parser(Role.USER, image_content_with_detail, {}) # type: ignore + result = client._prepare_content_for_openai(Role.USER, image_content_with_detail, {}) # type: ignore assert result["type"] == "input_image" assert result["image_url"] == "https://example.com/image.jpg" assert result["detail"] == "high" @@ -1155,47 +1220,47 @@ def test_openai_content_parser_image_content() -> None: # Test image content without additional properties (defaults) image_content_basic = UriContent(uri="https://example.com/basic.png", media_type="image/png") - result = client._openai_content_parser(Role.USER, image_content_basic, {}) # type: ignore + result = client._prepare_content_for_openai(Role.USER, image_content_basic, {}) # type: ignore assert result["type"] == "input_image" assert result["detail"] == "auto" assert result["file_id"] is None -def test_openai_content_parser_audio_content() -> None: - """Test _openai_content_parser with audio content variations.""" +def test_prepare_content_for_openai_audio_content() -> None: + """Test _prepare_content_for_openai with audio content variations.""" client = OpenAIResponsesClient(model_id="test-model", api_key="test-key") # Test WAV audio content wav_content = UriContent(uri="data:audio/wav;base64,abc123", media_type="audio/wav") - result = client._openai_content_parser(Role.USER, wav_content, {}) # type: ignore + result = client._prepare_content_for_openai(Role.USER, wav_content, {}) # type: ignore assert result["type"] == "input_audio" assert result["input_audio"]["data"] == "data:audio/wav;base64,abc123" assert result["input_audio"]["format"] == "wav" # Test MP3 audio content mp3_content = UriContent(uri="data:audio/mp3;base64,def456", media_type="audio/mp3") - result = client._openai_content_parser(Role.USER, mp3_content, {}) # type: ignore + result = client._prepare_content_for_openai(Role.USER, mp3_content, {}) # type: ignore assert result["type"] == "input_audio" assert result["input_audio"]["format"] == "mp3" -def test_openai_content_parser_unsupported_content() -> None: - """Test _openai_content_parser with unsupported content types.""" +def test_prepare_content_for_openai_unsupported_content() -> None: + """Test _prepare_content_for_openai with unsupported content types.""" client = OpenAIResponsesClient(model_id="test-model", api_key="test-key") # Test unsupported audio format unsupported_audio = UriContent(uri="data:audio/ogg;base64,ghi789", media_type="audio/ogg") - result = client._openai_content_parser(Role.USER, unsupported_audio, {}) # type: ignore + result = client._prepare_content_for_openai(Role.USER, unsupported_audio, {}) # type: ignore assert result == {} # Test non-media content text_uri_content = UriContent(uri="https://example.com/document.txt", media_type="text/plain") - result = client._openai_content_parser(Role.USER, text_uri_content, {}) # type: ignore + result = client._prepare_content_for_openai(Role.USER, text_uri_content, {}) # type: ignore assert result == {} -def test_create_streaming_response_content_code_interpreter() -> None: - """Test _create_streaming_response_content with code_interpreter_call.""" +def test_parse_chunk_from_openai_code_interpreter() -> None: + """Test _parse_chunk_from_openai with code_interpreter_call.""" client = OpenAIResponsesClient(model_id="test-model", api_key="test-key") chat_options = ChatOptions() function_call_ids: dict[int, tuple[str, str]] = {} @@ -1211,15 +1276,15 @@ def test_create_streaming_response_content_code_interpreter() -> None: mock_item_image.code = None mock_event_image.item = mock_item_image - result = client._create_streaming_response_content(mock_event_image, chat_options, function_call_ids) # type: ignore + result = client._parse_chunk_from_openai(mock_event_image, chat_options, function_call_ids) # type: ignore assert len(result.contents) == 1 assert isinstance(result.contents[0], UriContent) assert result.contents[0].uri == "https://example.com/plot.png" assert result.contents[0].media_type == "image" -def test_create_streaming_response_content_reasoning() -> None: - """Test _create_streaming_response_content with reasoning content.""" +def test_parse_chunk_from_openai_reasoning() -> None: + """Test _parse_chunk_from_openai with reasoning content.""" client = OpenAIResponsesClient(model_id="test-model", api_key="test-key") chat_options = ChatOptions() function_call_ids: dict[int, tuple[str, str]] = {} @@ -1234,7 +1299,7 @@ def test_create_streaming_response_content_reasoning() -> None: mock_item_reasoning.summary = ["Problem analysis summary"] mock_event_reasoning.item = mock_item_reasoning - result = client._create_streaming_response_content(mock_event_reasoning, chat_options, function_call_ids) # type: ignore + result = client._parse_chunk_from_openai(mock_event_reasoning, chat_options, function_call_ids) # type: ignore assert len(result.contents) == 1 assert isinstance(result.contents[0], TextReasoningContent) assert result.contents[0].text == "Analyzing the problem step by step..." @@ -1242,8 +1307,8 @@ def test_create_streaming_response_content_reasoning() -> None: assert result.contents[0].additional_properties["summary"] == "Problem analysis summary" -def test_openai_content_parser_text_reasoning_comprehensive() -> None: - """Test _openai_content_parser with TextReasoningContent all additional properties.""" +def test_prepare_content_for_openai_text_reasoning_comprehensive() -> None: + """Test _prepare_content_for_openai with TextReasoningContent all additional properties.""" client = OpenAIResponsesClient(model_id="test-model", api_key="test-key") # Test TextReasoningContent with all additional properties @@ -1255,7 +1320,7 @@ def test_openai_content_parser_text_reasoning_comprehensive() -> None: "encrypted_content": "secure_data_456", }, ) - result = client._openai_content_parser(Role.ASSISTANT, comprehensive_reasoning, {}) # type: ignore + result = client._prepare_content_for_openai(Role.ASSISTANT, comprehensive_reasoning, {}) # type: ignore assert result["type"] == "reasoning" assert result["summary"]["text"] == "Comprehensive reasoning summary" assert result["status"] == "in_progress" @@ -1280,7 +1345,7 @@ def test_streaming_reasoning_text_delta_event() -> None: ) with patch.object(client, "_get_metadata_from_response", return_value={}) as mock_metadata: - response = client._create_streaming_response_content(event, chat_options, function_call_ids) # type: ignore + response = client._parse_chunk_from_openai(event, chat_options, function_call_ids) # type: ignore assert len(response.contents) == 1 assert isinstance(response.contents[0], TextReasoningContent) @@ -1305,7 +1370,7 @@ def test_streaming_reasoning_text_done_event() -> None: ) with patch.object(client, "_get_metadata_from_response", return_value={"test": "data"}) as mock_metadata: - response = client._create_streaming_response_content(event, chat_options, function_call_ids) # type: ignore + response = client._parse_chunk_from_openai(event, chat_options, function_call_ids) # type: ignore assert len(response.contents) == 1 assert isinstance(response.contents[0], TextReasoningContent) @@ -1331,7 +1396,7 @@ def test_streaming_reasoning_summary_text_delta_event() -> None: ) with patch.object(client, "_get_metadata_from_response", return_value={}) as mock_metadata: - response = client._create_streaming_response_content(event, chat_options, function_call_ids) # type: ignore + response = client._parse_chunk_from_openai(event, chat_options, function_call_ids) # type: ignore assert len(response.contents) == 1 assert isinstance(response.contents[0], TextReasoningContent) @@ -1356,7 +1421,7 @@ def test_streaming_reasoning_summary_text_done_event() -> None: ) with patch.object(client, "_get_metadata_from_response", return_value={"custom": "meta"}) as mock_metadata: - response = client._create_streaming_response_content(event, chat_options, function_call_ids) # type: ignore + response = client._parse_chunk_from_openai(event, chat_options, function_call_ids) # type: ignore assert len(response.contents) == 1 assert isinstance(response.contents[0], TextReasoningContent) @@ -1392,8 +1457,8 @@ def test_streaming_reasoning_events_preserve_metadata() -> None: ) with patch.object(client, "_get_metadata_from_response", return_value={"test": "metadata"}): - text_response = client._create_streaming_response_content(text_event, chat_options, function_call_ids) # type: ignore - reasoning_response = client._create_streaming_response_content(reasoning_event, chat_options, function_call_ids) # type: ignore + text_response = client._parse_chunk_from_openai(text_event, chat_options, function_call_ids) # type: ignore + reasoning_response = client._parse_chunk_from_openai(reasoning_event, chat_options, function_call_ids) # type: ignore # Both should preserve metadata assert text_response.additional_properties == {"test": "metadata"} @@ -1404,7 +1469,7 @@ def test_streaming_reasoning_events_preserve_metadata() -> None: assert isinstance(reasoning_response.contents[0], TextReasoningContent) -def test_create_response_content_image_generation_raw_base64(): +def test_parse_response_from_openai_image_generation_raw_base64(): """Test image generation response parsing with raw base64 string.""" client = OpenAIResponsesClient(model_id="test-model", api_key="test-key") @@ -1428,7 +1493,7 @@ def test_create_response_content_image_generation_raw_base64(): mock_response.output = [mock_item] with patch.object(client, "_get_metadata_from_response", return_value={}): - response = client._create_response_content(mock_response, chat_options=ChatOptions()) # type: ignore + response = client._parse_response_from_openai(mock_response, chat_options=ChatOptions()) # type: ignore # Verify the response contains DataContent with proper URI and media_type assert len(response.messages[0].contents) == 1 @@ -1438,7 +1503,7 @@ def test_create_response_content_image_generation_raw_base64(): assert content.media_type == "image/png" -def test_create_response_content_image_generation_existing_data_uri(): +def test_parse_response_from_openai_image_generation_existing_data_uri(): """Test image generation response parsing with existing data URI.""" client = OpenAIResponsesClient(model_id="test-model", api_key="test-key") @@ -1461,7 +1526,7 @@ def test_create_response_content_image_generation_existing_data_uri(): mock_response.output = [mock_item] with patch.object(client, "_get_metadata_from_response", return_value={}): - response = client._create_response_content(mock_response, chat_options=ChatOptions()) # type: ignore + response = client._parse_response_from_openai(mock_response, chat_options=ChatOptions()) # type: ignore # Verify the response contains DataContent with proper media_type parsed from URI assert len(response.messages[0].contents) == 1 @@ -1471,7 +1536,7 @@ def test_create_response_content_image_generation_existing_data_uri(): assert content.media_type == "image/webp" -def test_create_response_content_image_generation_format_detection(): +def test_parse_response_from_openai_image_generation_format_detection(): """Test different image format detection from base64 data.""" client = OpenAIResponsesClient(model_id="test-model", api_key="test-key") @@ -1493,7 +1558,7 @@ def test_create_response_content_image_generation_format_detection(): mock_response_jpeg.output = [mock_item_jpeg] with patch.object(client, "_get_metadata_from_response", return_value={}): - response_jpeg = client._create_response_content(mock_response_jpeg, chat_options=ChatOptions()) # type: ignore + response_jpeg = client._parse_response_from_openai(mock_response_jpeg, chat_options=ChatOptions()) # type: ignore content_jpeg = response_jpeg.messages[0].contents[0] assert isinstance(content_jpeg, DataContent) assert content_jpeg.media_type == "image/jpeg" @@ -1517,14 +1582,14 @@ def test_create_response_content_image_generation_format_detection(): mock_response_webp.output = [mock_item_webp] with patch.object(client, "_get_metadata_from_response", return_value={}): - response_webp = client._create_response_content(mock_response_webp, chat_options=ChatOptions()) # type: ignore + response_webp = client._parse_response_from_openai(mock_response_webp, chat_options=ChatOptions()) # type: ignore content_webp = response_webp.messages[0].contents[0] assert isinstance(content_webp, DataContent) assert content_webp.media_type == "image/webp" assert "data:image/webp;base64," in content_webp.uri -def test_create_response_content_image_generation_fallback(): +def test_parse_response_from_openai_image_generation_fallback(): """Test image generation with invalid base64 falls back to PNG.""" client = OpenAIResponsesClient(model_id="test-model", api_key="test-key") @@ -1547,7 +1612,7 @@ def test_create_response_content_image_generation_fallback(): mock_response.output = [mock_item] with patch.object(client, "_get_metadata_from_response", return_value={}): - response = client._create_response_content(mock_response, chat_options=ChatOptions()) # type: ignore + response = client._parse_response_from_openai(mock_response, chat_options=ChatOptions()) # type: ignore # Verify it falls back to PNG format for unrecognized binary data assert len(response.messages[0].contents) == 1 @@ -1563,21 +1628,21 @@ async def test_prepare_options_store_parameter_handling() -> None: test_conversation_id = "test-conversation-123" chat_options = ChatOptions(store=True, conversation_id=test_conversation_id) - options = await client.prepare_options(messages, chat_options) + options = await client._prepare_options(messages, chat_options) # type: ignore assert options["store"] is True assert options["previous_response_id"] == test_conversation_id chat_options = ChatOptions(store=False, conversation_id="") - options = await client.prepare_options(messages, chat_options) + options = await client._prepare_options(messages, chat_options) # type: ignore assert options["store"] is False chat_options = ChatOptions(store=None, conversation_id=None) - options = await client.prepare_options(messages, chat_options) + options = await client._prepare_options(messages, chat_options) # type: ignore assert "store" not in options assert "previous_response_id" not in options chat_options = ChatOptions() - options = await client.prepare_options(messages, chat_options) + options = await client._prepare_options(messages, chat_options) # type: ignore assert "store" not in options assert "previous_response_id" not in options diff --git a/python/packages/core/tests/workflow/test_workflow_agent.py b/python/packages/core/tests/workflow/test_workflow_agent.py index 51b3544b22..3263eb854e 100644 --- a/python/packages/core/tests/workflow/test_workflow_agent.py +++ b/python/packages/core/tests/workflow/test_workflow_agent.py @@ -1,11 +1,13 @@ # Copyright (c) Microsoft. All rights reserved. import uuid +from collections.abc import AsyncIterable from typing import Any import pytest from agent_framework import ( + AgentProtocol, AgentRunResponse, AgentRunResponseUpdate, AgentRunUpdateEvent, @@ -422,6 +424,48 @@ async def raw_yielding_executor(messages: list[ChatMessage], ctx: WorkflowContex assert isinstance(updates[2].raw_representation, CustomData) assert updates[2].raw_representation.value == 42 + async def test_workflow_as_agent_yield_output_with_list_of_chat_messages(self) -> None: + """Test that yield_output with list[ChatMessage] extracts contents from all messages. + + Note: TextContent items are coalesced by _finalize_response, so multiple text contents + become a single merged TextContent in the final response. + """ + + @executor + async def list_yielding_executor(messages: list[ChatMessage], ctx: WorkflowContext) -> None: + # Yield a list of ChatMessages (as SequentialBuilder does) + msg_list = [ + ChatMessage(role=Role.USER, contents=[TextContent(text="first message")]), + ChatMessage(role=Role.ASSISTANT, contents=[TextContent(text="second message")]), + ChatMessage( + role=Role.ASSISTANT, + contents=[TextContent(text="third"), TextContent(text="fourth")], + ), + ] + await ctx.yield_output(msg_list) + + workflow = WorkflowBuilder().set_start_executor(list_yielding_executor).build() + agent = workflow.as_agent("list-msg-agent") + + # Verify streaming returns the update with all 4 contents before coalescing + updates: list[AgentRunResponseUpdate] = [] + async for update in agent.run_stream("test"): + updates.append(update) + + assert len(updates) == 1 + assert len(updates[0].contents) == 4 + texts = [c.text for c in updates[0].contents if isinstance(c, TextContent)] + assert texts == ["first message", "second message", "third", "fourth"] + + # Verify run() coalesces text contents (expected behavior) + result = await agent.run("test") + + assert isinstance(result, AgentRunResponse) + assert len(result.messages) == 1 + # TextContent items are coalesced into one + assert len(result.messages[0].contents) == 1 + assert result.messages[0].text == "first messagesecond messagethirdfourth" + async def test_thread_conversation_history_included_in_workflow_run(self) -> None: """Test that conversation history from thread is included when running WorkflowAgent. @@ -521,6 +565,142 @@ async def test_checkpoint_storage_passed_to_workflow(self) -> None: checkpoints = await checkpoint_storage.list_checkpoints(workflow.id) assert len(checkpoints) > 0, "Checkpoints should have been created when checkpoint_storage is provided" + async def test_agent_executor_output_response_false_filters_streaming_events(self): + """Test that AgentExecutor with output_response=False does not surface streaming events.""" + + class MockAgent(AgentProtocol): + """Mock agent for testing.""" + + def __init__(self, name: str, response_text: str) -> None: + self._name = name + self._response_text = response_text + self._description: str | None = None + + @property + def name(self) -> str | None: + return self._name + + @property + def description(self) -> str | None: + return self._description + + def get_new_thread(self) -> AgentThread: + return AgentThread() + + async def run(self, messages: Any, *, thread: AgentThread | None = None, **kwargs: Any) -> AgentRunResponse: + return AgentRunResponse( + messages=[ChatMessage(role=Role.ASSISTANT, text=self._response_text)], + text=self._response_text, + ) + + async def run_stream( + self, messages: Any, *, thread: AgentThread | None = None, **kwargs: Any + ) -> AsyncIterable[AgentRunResponseUpdate]: + for word in self._response_text.split(): + yield AgentRunResponseUpdate( + contents=[TextContent(text=word + " ")], + role=Role.ASSISTANT, + author_name=self._name, + ) + + @executor + async def start_executor(messages: list[ChatMessage], ctx: WorkflowContext) -> None: + from agent_framework import AgentExecutorRequest + + await ctx.yield_output("Start output") + await ctx.send_message(AgentExecutorRequest(messages=messages, should_respond=True)) + + # Build workflow: start -> agent1 (no output) -> agent2 (output_response=True) + workflow = ( + WorkflowBuilder() + .register_executor(lambda: start_executor, "start") + .register_agent(lambda: MockAgent("agent1", "Agent1 output - should NOT appear"), "agent1") + .register_agent( + lambda: MockAgent("agent2", "Agent2 output - SHOULD appear"), "agent2", output_response=True + ) + .set_start_executor("start") + .add_edge("start", "agent1") + .add_edge("agent1", "agent2") + .build() + ) + + agent = WorkflowAgent(workflow=workflow, name="Test Agent") + result = await agent.run("Test input") + + # Collect all message texts + texts = [msg.text for msg in result.messages if msg.text] + + # Start output should appear (from yield_output) + assert any("Start output" in t for t in texts), "Start output should appear" + + # Agent1 output should NOT appear (output_response=False) + assert not any("Agent1" in t for t in texts), "Agent1 output should NOT appear" + + # Agent2 output should appear (output_response=True) + assert any("Agent2" in t for t in texts), "Agent2 output should appear" + + async def test_agent_executor_output_response_no_duplicate_from_workflow_output_event(self): + """Test that AgentExecutor with output_response=True does not duplicate content.""" + + class MockAgent(AgentProtocol): + """Mock agent for testing.""" + + def __init__(self, name: str, response_text: str) -> None: + self._name = name + self._response_text = response_text + self._description: str | None = None + + @property + def name(self) -> str | None: + return self._name + + @property + def description(self) -> str | None: + return self._description + + def get_new_thread(self) -> AgentThread: + return AgentThread() + + async def run(self, messages: Any, *, thread: AgentThread | None = None, **kwargs: Any) -> AgentRunResponse: + return AgentRunResponse( + messages=[ChatMessage(role=Role.ASSISTANT, text=self._response_text)], + text=self._response_text, + ) + + async def run_stream( + self, messages: Any, *, thread: AgentThread | None = None, **kwargs: Any + ) -> AsyncIterable[AgentRunResponseUpdate]: + yield AgentRunResponseUpdate( + contents=[TextContent(text=self._response_text)], + role=Role.ASSISTANT, + author_name=self._name, + ) + + @executor + async def start_executor(messages: list[ChatMessage], ctx: WorkflowContext) -> None: + from agent_framework import AgentExecutorRequest + + await ctx.send_message(AgentExecutorRequest(messages=messages, should_respond=True)) + + # Build workflow with single agent that has output_response=True + workflow = ( + WorkflowBuilder() + .register_executor(lambda: start_executor, "start") + .register_agent(lambda: MockAgent("agent", "Unique response text"), "agent", output_response=True) + .set_start_executor("start") + .add_edge("start", "agent") + .build() + ) + + agent = WorkflowAgent(workflow=workflow, name="Test Agent") + result = await agent.run("Test input") + + # Count occurrences of the unique response text + unique_text_count = sum(1 for msg in result.messages if msg.text and "Unique response text" in msg.text) + + # Should appear exactly once (not duplicated from both streaming and WorkflowOutputEvent) + assert unique_text_count == 1, f"Response should appear exactly once, but appeared {unique_text_count} times" + class TestWorkflowAgentMergeUpdates: """Test cases specifically for the WorkflowAgent.merge_updates static method.""" diff --git a/python/packages/core/tests/workflow/test_workflow_builder.py b/python/packages/core/tests/workflow/test_workflow_builder.py index 83c9d41c22..91a213e3c2 100644 --- a/python/packages/core/tests/workflow/test_workflow_builder.py +++ b/python/packages/core/tests/workflow/test_workflow_builder.py @@ -245,7 +245,8 @@ def test_register_multiple_executors(): # Build workflow with edges using registered names workflow = ( - builder.set_start_executor("ExecutorA") + builder + .set_start_executor("ExecutorA") .add_edge("ExecutorA", "ExecutorB") .add_edge("ExecutorB", "ExecutorC") .build() @@ -426,7 +427,8 @@ def test_register_with_fan_in_edges(): # Add fan-in edges using registered names # Both Source1 and Source2 need to be reachable, so connect Source1 to Source2 workflow = ( - builder.set_start_executor("Source1") + builder + .set_start_executor("Source1") .add_edge("Source1", "Source2") .add_fan_in_edges(["Source1", "Source2"], "Aggregator") .build() diff --git a/python/packages/core/tests/workflow/test_workflow_kwargs.py b/python/packages/core/tests/workflow/test_workflow_kwargs.py index 864258b76c..5b7637057b 100644 --- a/python/packages/core/tests/workflow/test_workflow_kwargs.py +++ b/python/packages/core/tests/workflow/test_workflow_kwargs.py @@ -490,3 +490,266 @@ async def prepare_final_answer(self, context: MagenticContext) -> ChatMessage: # endregion + + +# region WorkflowAgent (as_agent) kwargs Tests + + +async def test_workflow_as_agent_run_propagates_kwargs_to_underlying_agent() -> None: + """Test that kwargs passed to workflow_agent.run() flow through to the underlying agents.""" + agent = _KwargsCapturingAgent(name="inner_agent") + workflow = SequentialBuilder().participants([agent]).build() + workflow_agent = workflow.as_agent(name="TestWorkflowAgent") + + custom_data = {"endpoint": "https://api.example.com", "version": "v1"} + user_token = {"user_name": "alice", "access_level": "admin"} + + _ = await workflow_agent.run( + "test message", + custom_data=custom_data, + user_token=user_token, + ) + + # Verify inner agent received kwargs + assert len(agent.captured_kwargs) >= 1, "Inner agent should have been invoked at least once" + received = agent.captured_kwargs[0] + assert "custom_data" in received, "Inner agent should receive custom_data kwarg" + assert "user_token" in received, "Inner agent should receive user_token kwarg" + assert received["custom_data"] == custom_data + assert received["user_token"] == user_token + + +async def test_workflow_as_agent_run_stream_propagates_kwargs_to_underlying_agent() -> None: + """Test that kwargs passed to workflow_agent.run_stream() flow through to the underlying agents.""" + agent = _KwargsCapturingAgent(name="inner_agent") + workflow = SequentialBuilder().participants([agent]).build() + workflow_agent = workflow.as_agent(name="TestWorkflowAgent") + + custom_data = {"session_id": "xyz123"} + api_token = "secret-token" + + async for _ in workflow_agent.run_stream( + "test message", + custom_data=custom_data, + api_token=api_token, + ): + pass + + # Verify inner agent received kwargs + assert len(agent.captured_kwargs) >= 1, "Inner agent should have been invoked at least once" + received = agent.captured_kwargs[0] + assert "custom_data" in received, "Inner agent should receive custom_data kwarg" + assert "api_token" in received, "Inner agent should receive api_token kwarg" + assert received["custom_data"] == custom_data + assert received["api_token"] == api_token + + +async def test_workflow_as_agent_propagates_kwargs_to_multiple_agents() -> None: + """Test that kwargs flow to all agents when using workflow.as_agent().""" + agent1 = _KwargsCapturingAgent(name="agent1") + agent2 = _KwargsCapturingAgent(name="agent2") + workflow = SequentialBuilder().participants([agent1, agent2]).build() + workflow_agent = workflow.as_agent(name="MultiAgentWorkflow") + + custom_data = {"batch_id": "batch-001"} + + _ = await workflow_agent.run("test message", custom_data=custom_data) + + # Both agents should have received kwargs + assert len(agent1.captured_kwargs) >= 1, "First agent should be invoked" + assert len(agent2.captured_kwargs) >= 1, "Second agent should be invoked" + assert agent1.captured_kwargs[0].get("custom_data") == custom_data + assert agent2.captured_kwargs[0].get("custom_data") == custom_data + + +async def test_workflow_as_agent_kwargs_with_none_values() -> None: + """Test that kwargs with None values are passed through correctly via as_agent().""" + agent = _KwargsCapturingAgent(name="none_test_agent") + workflow = SequentialBuilder().participants([agent]).build() + workflow_agent = workflow.as_agent(name="NoneTestWorkflow") + + _ = await workflow_agent.run("test", optional_param=None, other_param="value") + + assert len(agent.captured_kwargs) >= 1 + received = agent.captured_kwargs[0] + assert "optional_param" in received + assert received["optional_param"] is None + assert received["other_param"] == "value" + + +async def test_workflow_as_agent_kwargs_with_complex_nested_data() -> None: + """Test that complex nested data structures flow through correctly via as_agent().""" + agent = _KwargsCapturingAgent(name="nested_agent") + workflow = SequentialBuilder().participants([agent]).build() + workflow_agent = workflow.as_agent(name="NestedDataWorkflow") + + complex_data = { + "level1": { + "level2": { + "level3": ["a", "b", "c"], + "number": 42, + }, + "list": [1, 2, {"nested": True}], + }, + } + + _ = await workflow_agent.run("test", complex_data=complex_data) + + assert len(agent.captured_kwargs) >= 1 + received = agent.captured_kwargs[0] + assert received.get("complex_data") == complex_data + + +# endregion + + +# region SubWorkflow (WorkflowExecutor) Tests + + +async def test_subworkflow_kwargs_propagation() -> None: + """Test that kwargs are propagated to subworkflows. + + Verifies kwargs passed to parent workflow.run_stream() flow through to agents + in subworkflows wrapped by WorkflowExecutor. + """ + from agent_framework._workflows._workflow_executor import WorkflowExecutor + + # Create an agent inside the subworkflow that captures kwargs + inner_agent = _KwargsCapturingAgent(name="inner_agent") + + # Build the inner (sub) workflow with the agent + inner_workflow = SequentialBuilder().participants([inner_agent]).build() + + # Wrap the inner workflow in a WorkflowExecutor so it can be used as a subworkflow + subworkflow_executor = WorkflowExecutor(workflow=inner_workflow, id="subworkflow_executor") + + # Build the outer (parent) workflow containing the subworkflow + outer_workflow = SequentialBuilder().participants([subworkflow_executor]).build() + + # Define kwargs that should propagate to subworkflow + custom_data = {"api_key": "secret123", "endpoint": "https://api.example.com"} + user_token = {"user_name": "alice", "access_level": "admin"} + + # Run the outer workflow with kwargs + async for event in outer_workflow.run_stream( + "test message for subworkflow", + custom_data=custom_data, + user_token=user_token, + ): + if isinstance(event, WorkflowStatusEvent) and event.state == WorkflowRunState.IDLE: + break + + # Verify that the inner agent was called + assert len(inner_agent.captured_kwargs) >= 1, "Inner agent in subworkflow should have been invoked" + + received_kwargs = inner_agent.captured_kwargs[0] + + # Verify kwargs were propagated from parent workflow to subworkflow agent + assert "custom_data" in received_kwargs, ( + f"Subworkflow agent should receive 'custom_data' kwarg. Received keys: {list(received_kwargs.keys())}" + ) + assert "user_token" in received_kwargs, ( + f"Subworkflow agent should receive 'user_token' kwarg. Received keys: {list(received_kwargs.keys())}" + ) + assert received_kwargs.get("custom_data") == custom_data, ( + f"Expected custom_data={custom_data}, got {received_kwargs.get('custom_data')}" + ) + assert received_kwargs.get("user_token") == user_token, ( + f"Expected user_token={user_token}, got {received_kwargs.get('user_token')}" + ) + + +async def test_subworkflow_kwargs_accessible_via_shared_state() -> None: + """Test that kwargs are accessible via SharedState within subworkflow. + + Verifies that WORKFLOW_RUN_KWARGS_KEY is populated in the subworkflow's SharedState + with kwargs from the parent workflow. + """ + from agent_framework import Executor, WorkflowContext, handler + from agent_framework._workflows._workflow_executor import WorkflowExecutor + + captured_kwargs_from_state: list[dict[str, Any]] = [] + + class _SharedStateReader(Executor): + """Executor that reads kwargs from SharedState for verification.""" + + @handler + async def read_kwargs(self, msgs: list[ChatMessage], ctx: WorkflowContext[list[ChatMessage]]) -> None: + kwargs_from_state = await ctx.get_shared_state(WORKFLOW_RUN_KWARGS_KEY) + captured_kwargs_from_state.append(kwargs_from_state or {}) + await ctx.send_message(msgs) + + # Build inner workflow with SharedState reader + state_reader = _SharedStateReader(id="state_reader") + inner_workflow = SequentialBuilder().participants([state_reader]).build() + + # Wrap as subworkflow + subworkflow_executor = WorkflowExecutor(workflow=inner_workflow, id="subworkflow") + + # Build outer workflow + outer_workflow = SequentialBuilder().participants([subworkflow_executor]).build() + + # Run with kwargs + async for event in outer_workflow.run_stream( + "test", + my_custom_kwarg="should_be_propagated", + another_kwarg=42, + ): + if isinstance(event, WorkflowStatusEvent) and event.state == WorkflowRunState.IDLE: + break + + # Verify the state reader was invoked + assert len(captured_kwargs_from_state) >= 1, "SharedState reader should have been invoked" + + kwargs_in_subworkflow = captured_kwargs_from_state[0] + + assert kwargs_in_subworkflow.get("my_custom_kwarg") == "should_be_propagated", ( + f"Expected 'my_custom_kwarg' in subworkflow SharedState, got: {kwargs_in_subworkflow}" + ) + assert kwargs_in_subworkflow.get("another_kwarg") == 42, ( + f"Expected 'another_kwarg'=42 in subworkflow SharedState, got: {kwargs_in_subworkflow}" + ) + + +async def test_nested_subworkflow_kwargs_propagation() -> None: + """Test kwargs propagation through multiple levels of nested subworkflows. + + Verifies kwargs flow through 3 levels: + - Outer workflow + - Middle subworkflow (WorkflowExecutor) + - Inner subworkflow (WorkflowExecutor) with agent + """ + from agent_framework._workflows._workflow_executor import WorkflowExecutor + + # Innermost agent + inner_agent = _KwargsCapturingAgent(name="deeply_nested_agent") + + # Build inner workflow + inner_workflow = SequentialBuilder().participants([inner_agent]).build() + inner_executor = WorkflowExecutor(workflow=inner_workflow, id="inner_executor") + + # Build middle workflow containing inner + middle_workflow = SequentialBuilder().participants([inner_executor]).build() + middle_executor = WorkflowExecutor(workflow=middle_workflow, id="middle_executor") + + # Build outer workflow containing middle + outer_workflow = SequentialBuilder().participants([middle_executor]).build() + + # Run with kwargs + async for event in outer_workflow.run_stream( + "deeply nested test", + deep_kwarg="should_reach_inner", + ): + if isinstance(event, WorkflowStatusEvent) and event.state == WorkflowRunState.IDLE: + break + + # Verify inner agent was called + assert len(inner_agent.captured_kwargs) >= 1, "Deeply nested agent should be invoked" + + received = inner_agent.captured_kwargs[0] + assert received.get("deep_kwarg") == "should_reach_inner", ( + f"Deeply nested agent should receive 'deep_kwarg'. Got: {received}" + ) + + +# endregion diff --git a/python/packages/declarative/agent_framework_declarative/_loader.py b/python/packages/declarative/agent_framework_declarative/_loader.py index b5ae1683ba..86a6b94225 100644 --- a/python/packages/declarative/agent_framework_declarative/_loader.py +++ b/python/packages/declarative/agent_framework_declarative/_loader.py @@ -37,6 +37,7 @@ RemoteConnection, Tool, WebSearchTool, + _safe_mode_context, agent_schema_dispatch, ) @@ -118,7 +119,9 @@ def __init__( client_kwargs: Mapping[str, Any] | None = None, additional_mappings: Mapping[str, ProviderTypeMapping] | None = None, default_provider: str = "AzureAIClient", - env_file: str | None = None, + safe_mode: bool = True, + env_file_path: str | None = None, + env_file_encoding: str | None = None, ) -> None: """Create the agent factory, with bindings. @@ -151,7 +154,15 @@ def __init__( that accepts the model.id value. default_provider: The default provider used when model.provider is not specified, default is "AzureAIClient". - env_file: An optional path to a .env file to load environment variables from. + safe_mode: Whether to run in safe mode, default is True. + When safe_mode is True, environment variables are not accessible in the powerfx expressions. + You can still use environment variables, but through the constructors of the classes. + Which means you must make sure you are using the standard env variable names of the classes + you are using and not custom ones and remove the powerfx statements that start with `=Env.`. + Only when you trust the source of your yaml files, you can set safe_mode to False + via the AgentFactory constructor. + env_file_path: The path to the .env file to load environment variables from. + env_file_encoding: The encoding of the .env file, defaults to 'utf-8'. """ self.chat_client = chat_client self.bindings = bindings @@ -159,7 +170,8 @@ def __init__( self.client_kwargs = client_kwargs or {} self.additional_mappings = additional_mappings or {} self.default_provider: str = default_provider - load_dotenv(dotenv_path=env_file) + self.safe_mode = safe_mode + load_dotenv(dotenv_path=env_file_path, encoding=env_file_encoding) def create_agent_from_yaml_path(self, yaml_path: str | Path) -> ChatAgent: """Create a ChatAgent from a YAML file path. @@ -215,6 +227,8 @@ def create_agent_from_yaml(self, yaml_str: str) -> ChatAgent: ModuleNotFoundError: If the required module for the provider type cannot be imported. AttributeError: If the required class for the provider type cannot be found in the module. """ + # Set safe_mode context before parsing YAML to control PowerFx environment variable access + _safe_mode_context.set(self.safe_mode) prompt_agent = agent_schema_dispatch(yaml.safe_load(yaml_str)) if not isinstance(prompt_agent, PromptAgent): raise DeclarativeLoaderError("Only yaml definitions for a PromptAgent are supported for agent creation.") diff --git a/python/packages/declarative/agent_framework_declarative/_models.py b/python/packages/declarative/agent_framework_declarative/_models.py index aaba468bdf..b3a235a732 100644 --- a/python/packages/declarative/agent_framework_declarative/_models.py +++ b/python/packages/declarative/agent_framework_declarative/_models.py @@ -2,6 +2,7 @@ import os import sys from collections.abc import MutableMapping +from contextvars import ContextVar from typing import Any, Literal, TypeVar, Union from agent_framework import get_logger @@ -21,6 +22,11 @@ logger = get_logger("agent_framework.declarative") +# Context variable for safe_mode setting. +# When True (default), environment variables are NOT accessible in PowerFx expressions. +# When False, environment variables CAN be accessed via Env symbol in PowerFx. +_safe_mode_context: ContextVar[bool] = ContextVar("safe_mode", default=True) + @overload def _try_powerfx_eval(value: None, log_value: bool = True) -> None: ... @@ -49,6 +55,9 @@ def _try_powerfx_eval(value: str | None, log_value: bool = True) -> str | None: ) return value try: + safe_mode = _safe_mode_context.get() + if safe_mode: + return engine.eval(value[1:]) return engine.eval(value[1:], symbols={"Env": dict(os.environ)}) except Exception as exc: if log_value: diff --git a/python/packages/declarative/pyproject.toml b/python/packages/declarative/pyproject.toml index 5a82f3a1e5..eeefb80ee5 100644 --- a/python/packages/declarative/pyproject.toml +++ b/python/packages/declarative/pyproject.toml @@ -4,7 +4,7 @@ description = "Declarative specification support for Microsoft Agent Framework." authors = [{ name = "Microsoft", email = "af-support@microsoft.com"}] readme = "README.md" requires-python = ">=3.10" -version = "1.0.0b251216" +version = "1.0.0b251223" license-files = ["LICENSE"] urls.homepage = "https://aka.ms/agent-framework" urls.source = "https://github.com/microsoft/agent-framework/tree/main/python" diff --git a/python/packages/declarative/tests/test_declarative_loader.py b/python/packages/declarative/tests/test_declarative_loader.py index daf4ab06f8..338671b212 100644 --- a/python/packages/declarative/tests/test_declarative_loader.py +++ b/python/packages/declarative/tests/test_declarative_loader.py @@ -454,3 +454,140 @@ def test_agent_schema_dispatch_agent_samples(yaml_file: Path, agent_samples_dir: result = agent_schema_dispatch(yaml.safe_load(content)) # Result can be None for unknown kinds, but should not raise exceptions assert result is not None, f"agent_schema_dispatch returned None for {yaml_file.relative_to(agent_samples_dir)}" + + +class TestAgentFactorySafeMode: + """Tests for AgentFactory safe_mode parameter.""" + + def test_agent_factory_safe_mode_default_is_true(self): + """Test that safe_mode is True by default.""" + from agent_framework_declarative._loader import AgentFactory + + factory = AgentFactory() + assert factory.safe_mode is True + + def test_agent_factory_safe_mode_can_be_set_false(self): + """Test that safe_mode can be explicitly set to False.""" + from agent_framework_declarative._loader import AgentFactory + + factory = AgentFactory(safe_mode=False) + assert factory.safe_mode is False + + def test_agent_factory_safe_mode_blocks_env_in_yaml(self, monkeypatch): + """Test that safe_mode=True blocks environment variable access in YAML parsing.""" + from unittest.mock import MagicMock + + from agent_framework_declarative._loader import AgentFactory + + monkeypatch.setenv("TEST_MODEL_ID", "gpt-4-from-env") + + # Create a mock chat client to avoid needing real provider + mock_client = MagicMock() + + yaml_content = """ +kind: Prompt +name: test-agent +description: =Env.TEST_DESCRIPTION +instructions: Hello world +""" + monkeypatch.setenv("TEST_DESCRIPTION", "Description from env") + + # With safe_mode=True (default), Env access should fail and return original value + factory = AgentFactory(chat_client=mock_client, safe_mode=True) + agent = factory.create_agent_from_yaml(yaml_content) + + # The description should NOT be resolved from env (PowerFx fails, returns original) + assert agent.description == "=Env.TEST_DESCRIPTION" + + def test_agent_factory_safe_mode_false_allows_env_in_yaml(self, monkeypatch): + """Test that safe_mode=False allows environment variable access in YAML parsing.""" + from unittest.mock import MagicMock + + from agent_framework_declarative._loader import AgentFactory + + monkeypatch.setenv("TEST_DESCRIPTION", "Description from env") + + # Create a mock chat client to avoid needing real provider + mock_client = MagicMock() + + yaml_content = """ +kind: Prompt +name: test-agent +description: =Env.TEST_DESCRIPTION +instructions: Hello world +""" + + # With safe_mode=False, Env access should work + factory = AgentFactory(chat_client=mock_client, safe_mode=False) + agent = factory.create_agent_from_yaml(yaml_content) + + # The description should be resolved from env + assert agent.description == "Description from env" + + def test_agent_factory_safe_mode_with_api_key_connection(self, monkeypatch): + """Test safe_mode with API key connection containing env variable.""" + from agent_framework_declarative._models import _safe_mode_context + + monkeypatch.setenv("MY_API_KEY", "secret-key-123") + + yaml_content = """ +kind: Prompt +name: test-agent +description: Test agent +instructions: Hello +model: + id: gpt-4 + provider: OpenAI + apiType: Chat + connection: + kind: key + apiKey: =Env.MY_API_KEY +""" + + # Manually trigger the YAML parsing to check the context is set correctly + import yaml as yaml_module + + from agent_framework_declarative._models import agent_schema_dispatch + + token = _safe_mode_context.set(True) # Ensure we're in safe mode + try: + result = agent_schema_dispatch(yaml_module.safe_load(yaml_content)) + + # The API key should NOT be resolved (still has the PowerFx expression) + assert result.model.connection.apiKey == "=Env.MY_API_KEY" + finally: + _safe_mode_context.reset(token) + + def test_agent_factory_safe_mode_false_resolves_api_key(self, monkeypatch): + """Test safe_mode=False resolves API key from environment.""" + from agent_framework_declarative._models import _safe_mode_context + + monkeypatch.setenv("MY_API_KEY", "secret-key-123") + + yaml_content = """ +kind: Prompt +name: test-agent +description: Test agent +instructions: Hello +model: + id: gpt-4 + provider: OpenAI + apiType: Chat + connection: + kind: key + apiKey: =Env.MY_API_KEY +""" + + # With safe_mode=False, the API key should be resolved + import yaml as yaml_module + + from agent_framework_declarative._models import agent_schema_dispatch + + token = _safe_mode_context.set(False) # Disable safe mode + try: + result = agent_schema_dispatch(yaml_module.safe_load(yaml_content)) + + # The API key should be resolved from environment + assert result.model.connection.apiKey == "secret-key-123" + finally: + _safe_mode_context.reset(token) diff --git a/python/packages/declarative/tests/test_declarative_models.py b/python/packages/declarative/tests/test_declarative_models.py index dc13b3a642..7f7357eda1 100644 --- a/python/packages/declarative/tests/test_declarative_models.py +++ b/python/packages/declarative/tests/test_declarative_models.py @@ -41,6 +41,7 @@ Template, ToolResource, WebSearchTool, + _safe_mode_context, _try_powerfx_eval, ) @@ -874,35 +875,50 @@ def test_env_variable_access(self, monkeypatch): monkeypatch.setenv("API_KEY", "secret123") monkeypatch.setenv("PORT", "8080") - # Test basic env access - assert _try_powerfx_eval("=Env.TEST_VAR") == "test_value" - assert _try_powerfx_eval("=Env.API_KEY") == "secret123" - assert _try_powerfx_eval("=Env.PORT") == "8080" + # Set safe_mode=False to allow environment variable access + token = _safe_mode_context.set(False) + try: + # Test basic env access + assert _try_powerfx_eval("=Env.TEST_VAR") == "test_value" + assert _try_powerfx_eval("=Env.API_KEY") == "secret123" + assert _try_powerfx_eval("=Env.PORT") == "8080" + finally: + _safe_mode_context.reset(token) def test_env_variable_with_string_concatenation(self, monkeypatch): """Test env variables with string concatenation operator.""" monkeypatch.setenv("BASE_URL", "https://api.example.com") monkeypatch.setenv("API_VERSION", "v1") - # Test concatenation with & - result = _try_powerfx_eval('=Env.BASE_URL & "/" & Env.API_VERSION') - assert result == "https://api.example.com/v1" + # Set safe_mode=False to allow environment variable access + token = _safe_mode_context.set(False) + try: + # Test concatenation with & + result = _try_powerfx_eval('=Env.BASE_URL & "/" & Env.API_VERSION') + assert result == "https://api.example.com/v1" - # Test concatenation with literals - result = _try_powerfx_eval('="API Key: " & Env.API_VERSION') - assert result == "API Key: v1" + # Test concatenation with literals + result = _try_powerfx_eval('="API Key: " & Env.API_VERSION') + assert result == "API Key: v1" + finally: + _safe_mode_context.reset(token) def test_string_comparison_operators(self, monkeypatch): """Test PowerFx string comparison operators.""" monkeypatch.setenv("ENV_MODE", "production") - # Equal to - returns bool - assert _try_powerfx_eval('=Env.ENV_MODE = "production"') is True - assert _try_powerfx_eval('=Env.ENV_MODE = "development"') is False + # Set safe_mode=False to allow environment variable access + token = _safe_mode_context.set(False) + try: + # Equal to - returns bool + assert _try_powerfx_eval('=Env.ENV_MODE = "production"') is True + assert _try_powerfx_eval('=Env.ENV_MODE = "development"') is False - # Not equal to - returns bool - assert _try_powerfx_eval('=Env.ENV_MODE <> "development"') is True - assert _try_powerfx_eval('=Env.ENV_MODE <> "production"') is False + # Not equal to - returns bool + assert _try_powerfx_eval('=Env.ENV_MODE <> "development"') is True + assert _try_powerfx_eval('=Env.ENV_MODE <> "production"') is False + finally: + _safe_mode_context.reset(token) def test_string_in_operator(self): """Test PowerFx 'in' operator for substring testing (case-insensitive).""" @@ -958,11 +974,54 @@ def test_env_with_special_characters(self, monkeypatch): monkeypatch.setenv("URL_WITH_QUERY", "https://example.com?param=value") monkeypatch.setenv("PATH_WITH_SPACES", "C:\\Program Files\\App") - result = _try_powerfx_eval("=Env.URL_WITH_QUERY") - assert result == "https://example.com?param=value" + # Set safe_mode=False to allow environment variable access + token = _safe_mode_context.set(False) + try: + result = _try_powerfx_eval("=Env.URL_WITH_QUERY") + assert result == "https://example.com?param=value" + + result = _try_powerfx_eval("=Env.PATH_WITH_SPACES") + assert result == "C:\\Program Files\\App" + finally: + _safe_mode_context.reset(token) + + def test_safe_mode_blocks_env_access(self, monkeypatch): + """Test that safe_mode=True (default) blocks environment variable access.""" + monkeypatch.setenv("SECRET_VAR", "secret_value") + + # Set safe_mode=True (default) + token = _safe_mode_context.set(True) + try: + # When safe_mode=True, Env is not available and the expression fails, + # returning the original value + result = _try_powerfx_eval("=Env.SECRET_VAR") + assert result == "=Env.SECRET_VAR" + finally: + _safe_mode_context.reset(token) + + def test_safe_mode_context_isolation(self, monkeypatch): + """Test that safe_mode context variable properly isolates env access.""" + monkeypatch.setenv("TEST_VAR", "test_value") - result = _try_powerfx_eval("=Env.PATH_WITH_SPACES") - assert result == "C:\\Program Files\\App" + # First, set safe_mode=True - should NOT allow env access + token = _safe_mode_context.set(True) + try: + result_safe = _try_powerfx_eval("=Env.TEST_VAR") + assert result_safe == "=Env.TEST_VAR" + + # Then, set safe_mode=False - should allow env access + token2 = _safe_mode_context.set(False) + try: + result_unsafe = _try_powerfx_eval("=Env.TEST_VAR") + assert result_unsafe == "test_value" + finally: + _safe_mode_context.reset(token2) + + # After reset, should block again + result_safe_again = _try_powerfx_eval("=Env.TEST_VAR") + assert result_safe_again == "=Env.TEST_VAR" + finally: + _safe_mode_context.reset(token) class TestAgentManifest: diff --git a/python/packages/devui/agent_framework_devui/_executor.py b/python/packages/devui/agent_framework_devui/_executor.py index be769cba09..1f28c8772c 100644 --- a/python/packages/devui/agent_framework_devui/_executor.py +++ b/python/packages/devui/agent_framework_devui/_executor.py @@ -248,7 +248,7 @@ async def _execute_agent( # Get thread from conversation parameter (OpenAI standard!) thread = None - conversation_id = request.get_conversation_id() + conversation_id = request._get_conversation_id() if conversation_id: thread = self.conversation_store.get_thread(conversation_id) if thread: @@ -324,7 +324,7 @@ async def _execute_workflow( entity_id = request.get_entity_id() or "unknown" # Get or create session conversation for checkpoint storage - conversation_id = request.get_conversation_id() + conversation_id = request._get_conversation_id() if not conversation_id: # Create default session if not provided import time diff --git a/python/packages/devui/agent_framework_devui/models/_openai_custom.py b/python/packages/devui/agent_framework_devui/models/_openai_custom.py index f82ef90b72..ac0e74034a 100644 --- a/python/packages/devui/agent_framework_devui/models/_openai_custom.py +++ b/python/packages/devui/agent_framework_devui/models/_openai_custom.py @@ -324,7 +324,7 @@ def get_entity_id(self) -> str | None: return self.metadata.get("entity_id") return None - def get_conversation_id(self) -> str | None: + def _get_conversation_id(self) -> str | None: """Extract conversation_id from conversation parameter. Supports both string and object forms: diff --git a/python/packages/devui/pyproject.toml b/python/packages/devui/pyproject.toml index 48334dca8a..2430ceccb8 100644 --- a/python/packages/devui/pyproject.toml +++ b/python/packages/devui/pyproject.toml @@ -4,7 +4,7 @@ description = "Debug UI for Microsoft Agent Framework with OpenAI-compatible API authors = [{ name = "Microsoft", email = "af-support@microsoft.com"}] readme = "README.md" requires-python = ">=3.10" -version = "1.0.0b251216" +version = "1.0.0b251223" license-files = ["LICENSE"] urls.homepage = "https://github.com/microsoft/agent-framework" urls.source = "https://github.com/microsoft/agent-framework/tree/main/python" diff --git a/python/packages/foundry_local/LICENSE b/python/packages/foundry_local/LICENSE new file mode 100644 index 0000000000..9e841e7a26 --- /dev/null +++ b/python/packages/foundry_local/LICENSE @@ -0,0 +1,21 @@ + MIT License + + Copyright (c) Microsoft Corporation. + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to deal + in the Software without restriction, including without limitation the rights + to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in all + copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + SOFTWARE diff --git a/python/packages/foundry_local/README.md b/python/packages/foundry_local/README.md new file mode 100644 index 0000000000..c65e5a0386 --- /dev/null +++ b/python/packages/foundry_local/README.md @@ -0,0 +1,9 @@ +# Get Started with Microsoft Agent Framework Foundry Local + +Please install this package as the extra for `agent-framework`: + +```bash +pip install agent-framework-foundry-local --pre +``` + +and see the [README](https://github.com/microsoft/agent-framework/tree/main/python/README.md) for more information. diff --git a/python/packages/foundry_local/agent_framework_foundry_local/__init__.py b/python/packages/foundry_local/agent_framework_foundry_local/__init__.py new file mode 100644 index 0000000000..dbea932348 --- /dev/null +++ b/python/packages/foundry_local/agent_framework_foundry_local/__init__.py @@ -0,0 +1,15 @@ +# Copyright (c) Microsoft. All rights reserved. + +import importlib.metadata + +from ._foundry_local_client import FoundryLocalClient + +try: + __version__ = importlib.metadata.version(__name__) +except importlib.metadata.PackageNotFoundError: + __version__ = "0.0.0" # Fallback for development mode + +__all__ = [ + "FoundryLocalClient", + "__version__", +] diff --git a/python/packages/foundry_local/agent_framework_foundry_local/_foundry_local_client.py b/python/packages/foundry_local/agent_framework_foundry_local/_foundry_local_client.py new file mode 100644 index 0000000000..c2b7bd34ab --- /dev/null +++ b/python/packages/foundry_local/agent_framework_foundry_local/_foundry_local_client.py @@ -0,0 +1,160 @@ +# Copyright (c) Microsoft. All rights reserved. + +from typing import Any, ClassVar + +from agent_framework import use_chat_middleware, use_function_invocation +from agent_framework._pydantic import AFBaseSettings +from agent_framework.exceptions import ServiceInitializationError +from agent_framework.observability import use_instrumentation +from agent_framework.openai._chat_client import OpenAIBaseChatClient +from foundry_local import FoundryLocalManager +from foundry_local.models import DeviceType +from openai import AsyncOpenAI + +__all__ = [ + "FoundryLocalClient", +] + + +class FoundryLocalSettings(AFBaseSettings): + """Foundry local model settings. + + The settings are first loaded from environment variables with the prefix 'FOUNDRY_LOCAL_'. + If the environment variables are not found, the settings can be loaded from a .env file + with the encoding 'utf-8'. If the settings are not found in the .env file, the settings + are ignored; however, validation will fail alerting that the settings are missing. + + Attributes: + model_id: The name of the model deployment to use. + (Env var FOUNDRY_LOCAL_MODEL_ID) + Parameters: + env_file_path: If provided, the .env settings are read from this file path location. + env_file_encoding: The encoding of the .env file, defaults to 'utf-8'. + """ + + env_prefix: ClassVar[str] = "FOUNDRY_LOCAL_" + + model_id: str + + +@use_function_invocation +@use_instrumentation +@use_chat_middleware +class FoundryLocalClient(OpenAIBaseChatClient): + """Foundry Local Chat completion class.""" + + def __init__( + self, + model_id: str | None = None, + *, + bootstrap: bool = True, + timeout: float | None = None, + prepare_model: bool = True, + device: DeviceType | None = None, + env_file_path: str | None = None, + env_file_encoding: str = "utf-8", + **kwargs: Any, + ) -> None: + """Initialize a FoundryLocalClient. + + Keyword Args: + model_id: The Foundry Local model ID or alias to use. If not provided, + it will be loaded from the FoundryLocalSettings. + bootstrap: Whether to start the Foundry Local service if not already running. + Default is True. + timeout: Optional timeout for requests to Foundry Local. + This timeout is applied to any call to the Foundry Local service. + prepare_model: Whether to download the model into the cache, and load the model into + the inferencing service upon initialization. Default is True. + If false, the first call to generate a completion will load the model, + and might take a long time. + device: The device type to use for model inference. + The device is used to select the appropriate model variant. + If not provided, the default device for your system will be used. + The values are in the foundry_local.models.DeviceType enum. + env_file_path: If provided, the .env settings are read from this file path location. + env_file_encoding: The encoding of the .env file, defaults to 'utf-8'. + kwargs: Additional keyword arguments, are passed to the OpenAIBaseChatClient. + This can include middleware and additional properties. + + Examples: + + .. code-block:: python + + # Create a FoundryLocalClient with a specific model ID: + from agent_framework_foundry_local import FoundryLocalClient + + client = FoundryLocalClient(model_id="phi-4-mini") + + agent = client.create_agent( + name="LocalAgent", + instructions="You are a helpful agent.", + tools=get_weather, + ) + response = await agent.run("What's the weather like in Seattle?") + + # Or you can set the model id in the environment: + os.environ["FOUNDRY_LOCAL_MODEL_ID"] = "phi-4-mini" + client = FoundryLocalClient() + + # A FoundryLocalManager is created and if set, the service is started. + # The FoundryLocalManager is available via the `manager` property. + # For instance to find out which models are available: + for model in client.manager.list_catalog_models(): + print(f"- {model.alias} for {model.task} - id={model.id}") + + # Other options include specifying the device type: + from foundry_local.models import DeviceType + + client = FoundryLocalClient( + model_id="phi-4-mini", + device=DeviceType.GPU, + ) + # and choosing if the model should be prepared on initialization: + client = FoundryLocalClient( + model_id="phi-4-mini", + prepare_model=False, + ) + # Beware, in this case the first request to generate a completion + # will take a long time as the model is loaded then. + # Alternatively, you could call the `download_model` and `load_model` methods + # on the `manager` property manually. + client.manager.download_model(alias_or_model_id="phi-4-mini", device=DeviceType.CPU) + client.manager.load_model(alias_or_model_id="phi-4-mini", device=DeviceType.CPU) + + # You can also use the CLI: + `foundry model load phi-4-mini --device Auto` + + Raises: + ServiceInitializationError: If the specified model ID or alias is not found. + Sometimes a model might be available but if you have specified a device + type that is not supported by the model, it will not be found. + + """ + settings = FoundryLocalSettings( + model_id=model_id, # type: ignore + env_file_path=env_file_path, + env_file_encoding=env_file_encoding, + ) + manager = FoundryLocalManager(bootstrap=bootstrap, timeout=timeout) + model_info = manager.get_model_info( + alias_or_model_id=settings.model_id, + device=device, + ) + if model_info is None: + message = ( + f"Model with ID or alias '{settings.model_id}:{device.value}' not found in Foundry Local." + if device + else f"Model with ID or alias '{settings.model_id}' for your current device not found in Foundry Local." + ) + raise ServiceInitializationError(message) + if prepare_model: + manager.download_model(alias_or_model_id=model_info.id, device=device) + manager.load_model(alias_or_model_id=model_info.id, device=device) + + super().__init__( + model_id=model_info.id, + client=AsyncOpenAI(base_url=manager.endpoint, api_key=manager.api_key), + **kwargs, + ) + self.manager = manager diff --git a/python/packages/foundry_local/agent_framework_foundry_local/py.typed b/python/packages/foundry_local/agent_framework_foundry_local/py.typed new file mode 100644 index 0000000000..e69de29bb2 diff --git a/python/packages/foundry_local/pyproject.toml b/python/packages/foundry_local/pyproject.toml new file mode 100644 index 0000000000..6e2d2baf44 --- /dev/null +++ b/python/packages/foundry_local/pyproject.toml @@ -0,0 +1,87 @@ +[project] +name = "agent-framework-foundry-local" +description = "Foundry Local integration for Microsoft Agent Framework." +authors = [{ name = "Microsoft", email = "af-support@microsoft.com"}] +readme = "README.md" +requires-python = ">=3.10" +version = "1.0.0b251223" +license-files = ["LICENSE"] +urls.homepage = "https://aka.ms/agent-framework" +urls.source = "https://github.com/microsoft/agent-framework/tree/main/python" +urls.release_notes = "https://github.com/microsoft/agent-framework/releases?q=tag%3Apython-1&expanded=true" +urls.issues = "https://github.com/microsoft/agent-framework/issues" +classifiers = [ + "License :: OSI Approved :: MIT License", + "Development Status :: 4 - Beta", + "Intended Audience :: Developers", + "Programming Language :: Python :: 3", + "Programming Language :: Python :: 3.10", + "Programming Language :: Python :: 3.11", + "Programming Language :: Python :: 3.12", + "Programming Language :: Python :: 3.13", + "Programming Language :: Python :: 3.14", + "Typing :: Typed", +] +dependencies = [ + "agent-framework-core", + "foundry-local-sdk>=0.5.1,<1", +] + +[tool.uv] +prerelease = "if-necessary-or-explicit" +environments = [ + "sys_platform == 'darwin'", + "sys_platform == 'linux'", + "sys_platform == 'win32'" +] + +[tool.uv-dynamic-versioning] +fallback-version = "0.0.0" +[tool.pytest.ini_options] +testpaths = 'tests' +addopts = "-ra -q -r fEX" +asyncio_mode = "auto" +asyncio_default_fixture_loop_scope = "function" +filterwarnings = [] +timeout = 120 + +[tool.ruff] +extend = "../../pyproject.toml" + +[tool.coverage.run] +omit = [ + "**/__init__.py" +] + +[tool.pyright] +extends = "../../pyproject.toml" +exclude = ['tests'] + +[tool.mypy] +plugins = ['pydantic.mypy'] +strict = true +python_version = "3.10" +ignore_missing_imports = true +disallow_untyped_defs = true +no_implicit_optional = true +check_untyped_defs = true +warn_return_any = true +show_error_codes = true +warn_unused_ignores = false +disallow_incomplete_defs = true +disallow_untyped_decorators = true + +[tool.bandit] +targets = ["agent_framework_foundry_local"] +exclude_dirs = ["tests"] + +[tool.poe] +executor.type = "uv" +include = "../../shared_tasks.toml" +[tool.poe.tasks] +mypy = "mypy --config-file $POE_ROOT/pyproject.toml agent_framework_foundry_local" +test = "pytest --cov=agent_framework_foundry_local --cov-report=term-missing:skip-covered tests" + +[build-system] +requires = ["flit-core >= 3.11,<4.0"] +build-backend = "flit_core.buildapi" diff --git a/python/packages/foundry_local/samples/foundry_local_agent.py b/python/packages/foundry_local/samples/foundry_local_agent.py new file mode 100644 index 0000000000..e74f3de073 --- /dev/null +++ b/python/packages/foundry_local/samples/foundry_local_agent.py @@ -0,0 +1,78 @@ +# Copyright (c) Microsoft. All rights reserved. +# ruff: noqa + +import asyncio +from random import randint +from typing import TYPE_CHECKING, Annotated + +from agent_framework_foundry_local import FoundryLocalClient + +if TYPE_CHECKING: + from agent_framework import ChatAgent + +""" +This sample demonstrates basic usage of the FoundryLocalClient. +Shows both streaming and non-streaming responses with function tools. + +Running this sample the first time will be slow, as the model needs to be +downloaded and initialized. + +Also, not every model supports function calling, so be sure to check the +model capabilities in the Foundry catalog, or pick one from the list printed +when running this sample. +""" + + +def get_weather( + location: Annotated[str, "The location to get the weather for."], +) -> str: + """Get the weather for a given location.""" + conditions = ["sunny", "cloudy", "rainy", "stormy"] + return f"The weather in {location} is {conditions[randint(0, 3)]} with a high of {randint(10, 30)}°C." + + +async def non_streaming_example(agent: "ChatAgent") -> None: + """Example of non-streaming response (get the complete result at once).""" + print("=== Non-streaming Response Example ===") + + query = "What's the weather like in Seattle?" + print(f"User: {query}") + result = await agent.run(query) + print(f"Agent: {result}\n") + + +async def streaming_example(agent: "ChatAgent") -> None: + """Example of streaming response (get results as they are generated).""" + print("=== Streaming Response Example ===") + + query = "What's the weather like in Amsterdam?" + print(f"User: {query}") + print("Agent: ", end="", flush=True) + async for chunk in agent.run_stream(query): + if chunk.text: + print(chunk.text, end="", flush=True) + print("\n") + + +async def main() -> None: + print("=== Basic Foundry Local Client Agent Example ===") + + client = FoundryLocalClient(model_id="phi-4-mini") + print(f"Client Model ID: {client.model_id}\n") + print("Other available models (tool calling supported only):") + for model in client.manager.list_catalog_models(): + if model.supports_tool_calling: + print( + f"- {model.alias} for {model.task} - id={model.id} - {(model.file_size_mb / 1000):.2f} GB - {model.license}" + ) + agent = client.create_agent( + name="LocalAgent", + instructions="You are a helpful agent.", + tools=get_weather, + ) + await non_streaming_example(agent) + await streaming_example(agent) + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/python/packages/foundry_local/tests/conftest.py b/python/packages/foundry_local/tests/conftest.py new file mode 100644 index 0000000000..0afc223356 --- /dev/null +++ b/python/packages/foundry_local/tests/conftest.py @@ -0,0 +1,55 @@ +# Copyright (c) Microsoft. All rights reserved. +from typing import Any +from unittest.mock import MagicMock + +from pytest import fixture + + +@fixture +def exclude_list(request: Any) -> list[str]: + """Fixture that returns a list of environment variables to exclude.""" + return request.param if hasattr(request, "param") else [] + + +@fixture +def override_env_param_dict(request: Any) -> dict[str, str]: + """Fixture that returns a dict of environment variables to override.""" + return request.param if hasattr(request, "param") else {} + + +@fixture() +def foundry_local_unit_test_env(monkeypatch: Any, exclude_list: list[str], override_env_param_dict: dict[str, str]): + """Fixture to set environment variables for FoundryLocalSettings.""" + if exclude_list is None: + exclude_list = [] + + if override_env_param_dict is None: + override_env_param_dict = {} + + env_vars = { + "FOUNDRY_LOCAL_MODEL_ID": "test-model-id", + } + + env_vars.update(override_env_param_dict) + + for key, value in env_vars.items(): + if key in exclude_list: + monkeypatch.delenv(key, raising=False) + continue + monkeypatch.setenv(key, value) + + return env_vars + + +@fixture +def mock_foundry_local_manager() -> MagicMock: + """Fixture that provides a mock FoundryLocalManager.""" + mock_manager = MagicMock() + mock_manager.endpoint = "http://localhost:5272/v1" + mock_manager.api_key = "test-api-key" + + mock_model_info = MagicMock() + mock_model_info.id = "test-model-id" + mock_manager.get_model_info.return_value = mock_model_info + + return mock_manager diff --git a/python/packages/foundry_local/tests/test_foundry_local_client.py b/python/packages/foundry_local/tests/test_foundry_local_client.py new file mode 100644 index 0000000000..324c94630e --- /dev/null +++ b/python/packages/foundry_local/tests/test_foundry_local_client.py @@ -0,0 +1,198 @@ +# Copyright (c) Microsoft. All rights reserved. + +from unittest.mock import MagicMock, patch + +import pytest +from agent_framework import ChatClientProtocol +from agent_framework.exceptions import ServiceInitializationError +from pydantic import ValidationError + +from agent_framework_foundry_local import FoundryLocalClient +from agent_framework_foundry_local._foundry_local_client import FoundryLocalSettings + +# Settings Tests + + +def test_foundry_local_settings_init_from_env(foundry_local_unit_test_env: dict[str, str]) -> None: + """Test FoundryLocalSettings initialization from environment variables.""" + settings = FoundryLocalSettings(env_file_path="test.env") + + assert settings.model_id == foundry_local_unit_test_env["FOUNDRY_LOCAL_MODEL_ID"] + + +def test_foundry_local_settings_init_with_explicit_values() -> None: + """Test FoundryLocalSettings initialization with explicit values.""" + settings = FoundryLocalSettings(model_id="custom-model-id", env_file_path="test.env") + + assert settings.model_id == "custom-model-id" + + +@pytest.mark.parametrize("exclude_list", [["FOUNDRY_LOCAL_MODEL_ID"]], indirect=True) +def test_foundry_local_settings_missing_model_id(foundry_local_unit_test_env: dict[str, str]) -> None: + """Test FoundryLocalSettings when model_id is missing raises ValidationError.""" + with pytest.raises(ValidationError): + FoundryLocalSettings(env_file_path="test.env") + + +def test_foundry_local_settings_explicit_overrides_env(foundry_local_unit_test_env: dict[str, str]) -> None: + """Test that explicit values override environment variables.""" + settings = FoundryLocalSettings(model_id="override-model-id", env_file_path="test.env") + + assert settings.model_id == "override-model-id" + assert settings.model_id != foundry_local_unit_test_env["FOUNDRY_LOCAL_MODEL_ID"] + + +# Client Initialization Tests + + +def test_foundry_local_client_init(mock_foundry_local_manager: MagicMock) -> None: + """Test FoundryLocalClient initialization with mocked manager.""" + with patch( + "agent_framework_foundry_local._foundry_local_client.FoundryLocalManager", + return_value=mock_foundry_local_manager, + ): + client = FoundryLocalClient(model_id="test-model-id", env_file_path="test.env") + + assert client.model_id == "test-model-id" + assert client.manager is mock_foundry_local_manager + assert isinstance(client, ChatClientProtocol) + + +def test_foundry_local_client_init_with_bootstrap_false(mock_foundry_local_manager: MagicMock) -> None: + """Test FoundryLocalClient initialization with bootstrap=False.""" + with patch( + "agent_framework_foundry_local._foundry_local_client.FoundryLocalManager", + return_value=mock_foundry_local_manager, + ) as mock_manager_class: + FoundryLocalClient(model_id="test-model-id", bootstrap=False, env_file_path="test.env") + + mock_manager_class.assert_called_once_with( + bootstrap=False, + timeout=None, + ) + + +def test_foundry_local_client_init_with_timeout(mock_foundry_local_manager: MagicMock) -> None: + """Test FoundryLocalClient initialization with custom timeout.""" + with patch( + "agent_framework_foundry_local._foundry_local_client.FoundryLocalManager", + return_value=mock_foundry_local_manager, + ) as mock_manager_class: + FoundryLocalClient(model_id="test-model-id", timeout=60.0, env_file_path="test.env") + + mock_manager_class.assert_called_once_with( + bootstrap=True, + timeout=60.0, + ) + + +def test_foundry_local_client_init_model_not_found(mock_foundry_local_manager: MagicMock) -> None: + """Test FoundryLocalClient initialization when model is not found.""" + mock_foundry_local_manager.get_model_info.return_value = None + + with ( + patch( + "agent_framework_foundry_local._foundry_local_client.FoundryLocalManager", + return_value=mock_foundry_local_manager, + ), + pytest.raises(ServiceInitializationError, match="not found in Foundry Local"), + ): + FoundryLocalClient(model_id="unknown-model", env_file_path="test.env") + + +def test_foundry_local_client_uses_model_info_id(mock_foundry_local_manager: MagicMock) -> None: + """Test that client uses the model ID from model_info, not the alias.""" + mock_model_info = MagicMock() + mock_model_info.id = "resolved-model-id" + mock_foundry_local_manager.get_model_info.return_value = mock_model_info + + with patch( + "agent_framework_foundry_local._foundry_local_client.FoundryLocalManager", + return_value=mock_foundry_local_manager, + ): + client = FoundryLocalClient(model_id="model-alias", env_file_path="test.env") + + assert client.model_id == "resolved-model-id" + + +def test_foundry_local_client_init_from_env( + foundry_local_unit_test_env: dict[str, str], mock_foundry_local_manager: MagicMock +) -> None: + """Test FoundryLocalClient initialization using environment variables.""" + with patch( + "agent_framework_foundry_local._foundry_local_client.FoundryLocalManager", + return_value=mock_foundry_local_manager, + ): + client = FoundryLocalClient(env_file_path="test.env") + + assert client.model_id == foundry_local_unit_test_env["FOUNDRY_LOCAL_MODEL_ID"] + + +def test_foundry_local_client_init_with_device(mock_foundry_local_manager: MagicMock) -> None: + """Test FoundryLocalClient initialization with device parameter.""" + from foundry_local.models import DeviceType + + with patch( + "agent_framework_foundry_local._foundry_local_client.FoundryLocalManager", + return_value=mock_foundry_local_manager, + ): + FoundryLocalClient(model_id="test-model-id", device=DeviceType.CPU, env_file_path="test.env") + + mock_foundry_local_manager.get_model_info.assert_called_once_with( + alias_or_model_id="test-model-id", + device=DeviceType.CPU, + ) + mock_foundry_local_manager.download_model.assert_called_once_with( + alias_or_model_id="test-model-id", + device=DeviceType.CPU, + ) + mock_foundry_local_manager.load_model.assert_called_once_with( + alias_or_model_id="test-model-id", + device=DeviceType.CPU, + ) + + +def test_foundry_local_client_init_model_not_found_with_device(mock_foundry_local_manager: MagicMock) -> None: + """Test FoundryLocalClient error message includes device when model not found with device specified.""" + from foundry_local.models import DeviceType + + mock_foundry_local_manager.get_model_info.return_value = None + + with ( + patch( + "agent_framework_foundry_local._foundry_local_client.FoundryLocalManager", + return_value=mock_foundry_local_manager, + ), + pytest.raises(ServiceInitializationError, match="unknown-model:GPU.*not found"), + ): + FoundryLocalClient(model_id="unknown-model", device=DeviceType.GPU, env_file_path="test.env") + + +def test_foundry_local_client_init_with_prepare_model_false(mock_foundry_local_manager: MagicMock) -> None: + """Test FoundryLocalClient initialization with prepare_model=False skips download and load.""" + with patch( + "agent_framework_foundry_local._foundry_local_client.FoundryLocalManager", + return_value=mock_foundry_local_manager, + ): + FoundryLocalClient(model_id="test-model-id", prepare_model=False, env_file_path="test.env") + + mock_foundry_local_manager.download_model.assert_not_called() + mock_foundry_local_manager.load_model.assert_not_called() + + +def test_foundry_local_client_init_calls_download_and_load(mock_foundry_local_manager: MagicMock) -> None: + """Test FoundryLocalClient initialization calls download_model and load_model by default.""" + with patch( + "agent_framework_foundry_local._foundry_local_client.FoundryLocalManager", + return_value=mock_foundry_local_manager, + ): + FoundryLocalClient(model_id="test-model-id", env_file_path="test.env") + + mock_foundry_local_manager.download_model.assert_called_once_with( + alias_or_model_id="test-model-id", + device=None, + ) + mock_foundry_local_manager.load_model.assert_called_once_with( + alias_or_model_id="test-model-id", + device=None, + ) diff --git a/python/packages/lab/pyproject.toml b/python/packages/lab/pyproject.toml index ba40548ff1..4465b6fdca 100644 --- a/python/packages/lab/pyproject.toml +++ b/python/packages/lab/pyproject.toml @@ -4,7 +4,7 @@ description = "Experimental modules for Microsoft Agent Framework" authors = [{ name = "Microsoft", email = "af-support@microsoft.com"}] readme = "README.md" requires-python = ">=3.10" -version = "1.0.0b251216" +version = "1.0.0b251223" license-files = ["LICENSE"] urls.homepage = "https://aka.ms/agent-framework" urls.source = "https://github.com/microsoft/agent-framework/tree/main/python" diff --git a/python/packages/mem0/pyproject.toml b/python/packages/mem0/pyproject.toml index 4eacbc4db1..361517afae 100644 --- a/python/packages/mem0/pyproject.toml +++ b/python/packages/mem0/pyproject.toml @@ -4,7 +4,7 @@ description = "Mem0 integration for Microsoft Agent Framework." authors = [{ name = "Microsoft", email = "af-support@microsoft.com"}] readme = "README.md" requires-python = ">=3.10" -version = "1.0.0b251216" +version = "1.0.0b251223" license-files = ["LICENSE"] urls.homepage = "https://aka.ms/agent-framework" urls.source = "https://github.com/microsoft/agent-framework/tree/main/python" diff --git a/python/packages/ollama/agent_framework_ollama/_chat_client.py b/python/packages/ollama/agent_framework_ollama/_chat_client.py index 07df5a0d5f..f047a5d4b3 100644 --- a/python/packages/ollama/agent_framework_ollama/_chat_client.py +++ b/python/packages/ollama/agent_framework_ollama/_chat_client.py @@ -117,9 +117,11 @@ async def _inner_get_response( chat_options: ChatOptions, **kwargs: Any, ) -> ChatResponse: + # prepare options_dict = self._prepare_options(messages, chat_options) try: + # execute response: OllamaChatResponse = await self.client.chat( # type: ignore[misc] stream=False, **options_dict, @@ -128,7 +130,8 @@ async def _inner_get_response( except Exception as ex: raise ServiceResponseException(f"Ollama chat request failed : {ex}", ex) from ex - return self._ollama_response_to_agent_framework_response(response) + # process + return self._parse_response_from_ollama(response) async def _inner_get_streaming_response( self, @@ -137,9 +140,11 @@ async def _inner_get_streaming_response( chat_options: ChatOptions, **kwargs: Any, ) -> AsyncIterable[ChatResponseUpdate]: + # prepare options_dict = self._prepare_options(messages, chat_options) try: + # execute response_object: AsyncIterable[OllamaChatResponse] = await self.client.chat( # type: ignore[misc] stream=True, **options_dict, @@ -148,49 +153,61 @@ async def _inner_get_streaming_response( except Exception as ex: raise ServiceResponseException(f"Ollama streaming chat request failed : {ex}", ex) from ex + # process async for part in response_object: - yield self._ollama_streaming_response_to_agent_framework_response(part) + yield self._parse_streaming_response_from_ollama(part) def _prepare_options(self, messages: MutableSequence[ChatMessage], chat_options: ChatOptions) -> dict[str, Any]: - # Preprocess web search tool if it exists - options_dict = chat_options.to_dict(exclude={"instructions", "type"}) - - # Promote additional_properties to the top level of options_dict - additional_props = options_dict.pop("additional_properties", {}) - options_dict.update(additional_props) - - # Prepare Messages from Agent Framework format to Ollama format - if messages and "messages" not in options_dict: - options_dict["messages"] = self._prepare_chat_history_for_request(messages) - if "messages" not in options_dict: - raise ServiceInvalidRequestError("Messages are required for chat completions") - - # Prepare Tools from Agent Framework format to Json Schema format - if chat_options.tools: - options_dict["tools"] = self._chat_to_tool_spec(chat_options.tools) - - # Currently Ollama only supports auto tool choice + # tool choice - Currently Ollama only supports auto tool choice if chat_options.tool_choice == "required": raise ServiceInvalidRequestError("Ollama does not support required tool choice.") - # Always auto: remove tool_choice since Ollama does not expose configuration to force or disable tools. - if "tool_choice" in options_dict: - del options_dict["tool_choice"] - # Rename model_id to model for Ollama API, if no model is provided use the one from client initialization - if "model_id" in options_dict: - options_dict["model"] = options_dict.pop("model_id") + run_options = chat_options.to_dict( + exclude={ + "type", + "instructions", + "tool_choice", # Ollama does not support tool_choice configuration + "additional_properties", # handled separately + } + ) + + # messages + if messages and "messages" not in run_options: + run_options["messages"] = self._prepare_messages_for_ollama(messages) + if "messages" not in run_options: + raise ServiceInvalidRequestError("Messages are required for chat completions") - if "model_id" not in options_dict: - options_dict["model"] = self.model_id + # translations between ChatOptions and Ollama API + translations = {"model_id": "model"} + for old_key, new_key in translations.items(): + if old_key in run_options and old_key != new_key: + run_options[new_key] = run_options.pop(old_key) + + # model id + if not run_options.get("model"): + if not self.model_id: + raise ValueError("model_id must be a non-empty string") + run_options["model"] = self.model_id + + # tools + if chat_options.tools and (tools := self._prepare_tools_for_ollama(chat_options.tools)): + run_options["tools"] = tools + + # additional properties + additional_options = { + key: value for key, value in chat_options.additional_properties.items() if value is not None + } + if additional_options: + run_options.update(additional_options) - return options_dict + return run_options - def _prepare_chat_history_for_request(self, messages: MutableSequence[ChatMessage]) -> list[OllamaMessage]: - ollama_messages = [self._agent_framework_message_to_ollama_message(msg) for msg in messages] + def _prepare_messages_for_ollama(self, messages: MutableSequence[ChatMessage]) -> list[OllamaMessage]: + ollama_messages = [self._prepare_message_for_ollama(msg) for msg in messages] # Flatten the list of lists into a single list return list(chain.from_iterable(ollama_messages)) - def _agent_framework_message_to_ollama_message(self, message: ChatMessage) -> list[OllamaMessage]: + def _prepare_message_for_ollama(self, message: ChatMessage) -> list[OllamaMessage]: message_converters: dict[str, Callable[[ChatMessage], list[OllamaMessage]]] = { Role.SYSTEM.value: self._format_system_message, Role.USER.value: self._format_user_message, @@ -222,7 +239,8 @@ def _format_user_message(self, message: ChatMessage) -> list[OllamaMessage]: def _format_assistant_message(self, message: ChatMessage) -> list[OllamaMessage]: text_content = message.text - reasoning_contents = "".join(c.text for c in message.contents if isinstance(c, TextReasoningContent)) + # Ollama shouldn't have encrypted reasoning, so we just process text. + reasoning_contents = "".join((c.text or "") for c in message.contents if isinstance(c, TextReasoningContent)) assistant_message = OllamaMessage(role="assistant", content=text_content, thinking=reasoning_contents) @@ -250,21 +268,19 @@ def _format_tool_message(self, message: ChatMessage) -> list[OllamaMessage]: if isinstance(item, FunctionResultContent) ] - def _ollama_response_to_agent_framework_content(self, response: OllamaChatResponse) -> list[Contents]: + def _parse_contents_from_ollama(self, response: OllamaChatResponse) -> list[Contents]: contents: list[Contents] = [] if response.message.thinking: contents.append(TextReasoningContent(text=response.message.thinking)) if response.message.content: contents.append(TextContent(text=response.message.content)) if response.message.tool_calls: - tool_calls = self._parse_ollama_tool_calls(response.message.tool_calls) + tool_calls = self._parse_tool_calls_from_ollama(response.message.tool_calls) contents.extend(tool_calls) return contents - def _ollama_streaming_response_to_agent_framework_response( - self, response: OllamaChatResponse - ) -> ChatResponseUpdate: - contents = self._ollama_response_to_agent_framework_content(response) + def _parse_streaming_response_from_ollama(self, response: OllamaChatResponse) -> ChatResponseUpdate: + contents = self._parse_contents_from_ollama(response) return ChatResponseUpdate( contents=contents, role=Role.ASSISTANT, @@ -272,8 +288,8 @@ def _ollama_streaming_response_to_agent_framework_response( created_at=response.created_at, ) - def _ollama_response_to_agent_framework_response(self, response: OllamaChatResponse) -> ChatResponse: - contents = self._ollama_response_to_agent_framework_content(response) + def _parse_response_from_ollama(self, response: OllamaChatResponse) -> ChatResponse: + contents = self._parse_contents_from_ollama(response) return ChatResponse( messages=[ChatMessage(role=Role.ASSISTANT, contents=contents)], @@ -285,7 +301,7 @@ def _ollama_response_to_agent_framework_response(self, response: OllamaChatRespo ), ) - def _parse_ollama_tool_calls(self, tool_calls: Sequence[OllamaMessage.ToolCall]) -> list[Contents]: + def _parse_tool_calls_from_ollama(self, tool_calls: Sequence[OllamaMessage.ToolCall]) -> list[Contents]: resp: list[Contents] = [] for tool in tool_calls: fcc = FunctionCallContent( @@ -297,7 +313,7 @@ def _parse_ollama_tool_calls(self, tool_calls: Sequence[OllamaMessage.ToolCall]) resp.append(fcc) return resp - def _chat_to_tool_spec(self, tools: list[ToolProtocol | MutableMapping[str, Any]]) -> list[dict[str, Any]]: + def _prepare_tools_for_ollama(self, tools: list[ToolProtocol | MutableMapping[str, Any]]) -> list[dict[str, Any]]: chat_tools: list[dict[str, Any]] = [] for tool in tools: if isinstance(tool, ToolProtocol): diff --git a/python/packages/ollama/getting_started/README.md b/python/packages/ollama/getting_started/README.md deleted file mode 100644 index bf159c5475..0000000000 --- a/python/packages/ollama/getting_started/README.md +++ /dev/null @@ -1,38 +0,0 @@ -# Ollama Examples - -This folder contains examples demonstrating how to use Ollama models with the Agent Framework. - -## Prerequisites - -1. **Install Ollama**: Download and install Ollama from [ollama.com](https://ollama.com/) -2. **Start Ollama**: Ensure Ollama is running on your local machine -3. **Pull a model**: Run `ollama pull mistral` (or any other model you prefer) - - For function calling examples, use models that support tool calling like `mistral` or `qwen2.5` - - For reasoning examples, use models that support reasoning like `qwen2.5:8b` - - For Multimodality you can use models like `gemma3:4b` - -> **Note**: Not all models support all features. Function calling and reasoning capabilities depend on the specific model you're using. - -## Examples - -| File | Description | -|------|-------------| -| [`ollama_agent_basic.py`](ollama_agent_basic.py) | Demonstrates basic Ollama agent usage with the native Ollama Chat Client. Shows both streaming and non-streaming responses with tool calling capabilities. | -| [`ollama_agent_reasoning.py`](ollama_agent_reasoning.py) | Demonstrates Ollama agent with reasoning capabilities using the native Ollama Chat Client. Shows how to enable thinking/reasoning mode. | -| [`ollama_chat_client.py`](ollama_chat_client.py) | Ollama Chat Client with native Ollama Chat Client | -| [`ollama_chat_multimodal.py`](ollama_chat_multimodal.py) | Ollama Chat with multimodal native Ollama Chat Client | - -## Configuration - -The examples use environment variables for configuration. Set the appropriate variables based on which example you're running: - -### For Native Ollama Examples (`ollama_agent_basic.py`, `ollama_agent_reasoning.py`) - -Set the following environment variables: - -- `OLLAMA_HOST`: The base URL for your Ollama server (optional, defaults to `http://localhost:11434`) - - Example: `export OLLAMA_HOST="http://localhost:11434"` - -- `OLLAMA_CHAT_MODEL_ID`: The model name to use - - Example: `export OLLAMA_CHAT_MODEL_ID="qwen2.5:8b"` - - Must be a model you have pulled with Ollama \ No newline at end of file diff --git a/python/packages/ollama/pyproject.toml b/python/packages/ollama/pyproject.toml index 5325bb9d83..b983c6ac93 100644 --- a/python/packages/ollama/pyproject.toml +++ b/python/packages/ollama/pyproject.toml @@ -4,7 +4,7 @@ description = "Ollama integration for Microsoft Agent Framework." authors = [{ name = "Microsoft", email = "af-support@microsoft.com"}] readme = "README.md" requires-python = ">=3.10" -version = "1.0.0b251216" +version = "1.0.0b251223" license-files = ["LICENSE"] urls.homepage = "https://learn.microsoft.com/en-us/agent-framework/" urls.source = "https://github.com/microsoft/agent-framework/tree/main/python" diff --git a/python/packages/purview/pyproject.toml b/python/packages/purview/pyproject.toml index 304e5ea178..eaebae4276 100644 --- a/python/packages/purview/pyproject.toml +++ b/python/packages/purview/pyproject.toml @@ -4,7 +4,7 @@ description = "Microsoft Purview (Graph dataSecurityAndGovernance) integration f authors = [{ name = "Microsoft", email = "af-support@microsoft.com"}] readme = "README.md" requires-python = ">=3.10" -version = "1.0.0b251216" +version = "1.0.0b251223" license-files = ["LICENSE"] urls.homepage = "https://github.com/microsoft/agent-framework" urls.source = "https://github.com/microsoft/agent-framework/tree/main/python" diff --git a/python/packages/redis/agent_framework_redis/_chat_message_store.py b/python/packages/redis/agent_framework_redis/_chat_message_store.py index 4c83dcc86f..4b50c63571 100644 --- a/python/packages/redis/agent_framework_redis/_chat_message_store.py +++ b/python/packages/redis/agent_framework_redis/_chat_message_store.py @@ -9,6 +9,7 @@ import redis.asyncio as redis from agent_framework import ChatMessage from agent_framework._serialization import SerializationMixin +from redis.credentials import CredentialProvider class RedisStoreState(SerializationMixin): @@ -55,6 +56,11 @@ class RedisChatMessageStore: def __init__( self, redis_url: str | None = None, + credential_provider: CredentialProvider | None = None, + host: str | None = None, + port: int = 6380, + ssl: bool = True, + username: str | None = None, thread_id: str | None = None, key_prefix: str = "chat_messages", max_messages: int | None = None, @@ -63,12 +69,19 @@ def __init__( """Initialize the Redis chat message store. Creates a Redis-backed chat message store for a specific conversation thread. - The store will automatically create a Redis connection and manage message - persistence using Redis List operations. + Supports both traditional URL-based authentication and Azure Managed Redis + with credential provider. Args: redis_url: Redis connection URL (e.g., "redis://localhost:6379"). - Required for establishing Redis connection. + Used for traditional authentication. Mutually exclusive with credential_provider. + credential_provider: Redis credential provider (redis.credentials.CredentialProvider) for + Azure AD authentication. Requires host parameter. Mutually exclusive with redis_url. + host: Redis host name (e.g., "myredis.redis.cache.windows.net"). + Required when using credential_provider. + port: Redis port number. Defaults to 6380 (Azure Redis SSL port). + ssl: Enable SSL/TLS connection. Defaults to True. + username: Redis username. Defaults to None. thread_id: Unique identifier for this conversation thread. If not provided, a UUID will be auto-generated. This becomes part of the Redis key: {key_prefix}:{thread_id} @@ -82,23 +95,58 @@ def __init__( Useful for resuming conversations or seeding with context. Raises: - ValueError: If redis_url is None (Redis connection is required). - redis.ConnectionError: If unable to connect to Redis server. + ValueError: If neither redis_url nor credential_provider is provided. + ValueError: If both redis_url and credential_provider are provided. + ValueError: If credential_provider is used without host parameter. + + Examples: + Traditional connection: + store = RedisChatMessageStore( + redis_url="redis://localhost:6379", + thread_id="conversation_123" + ) + + Azure Managed Redis with credential provider: + from redis.credentials import CredentialProvider + from azure.identity.aio import DefaultAzureCredential + + store = RedisChatMessageStore( + credential_provider=CredentialProvider(DefaultAzureCredential()), + host="myredis.redis.cache.windows.net", + thread_id="conversation_123" + ) + """ + # Validate connection parameters + if redis_url is None and credential_provider is None: + raise ValueError("Either redis_url or credential_provider must be provided") + if redis_url is not None and credential_provider is not None: + raise ValueError("redis_url and credential_provider are mutually exclusive") - """ - # Validate required parameters - if redis_url is None: - raise ValueError("redis_url is required for Redis connection") + if credential_provider is not None and host is None: + raise ValueError("host is required when using credential_provider") # Store configuration - self.redis_url = redis_url self.thread_id = thread_id or f"thread_{uuid4()}" self.key_prefix = key_prefix self.max_messages = max_messages - # Initialize Redis client with connection pooling and async support - self._redis_client = redis.from_url(redis_url, decode_responses=True) # type: ignore[no-untyped-call] + # Initialize Redis client based on authentication method + if credential_provider is not None and host is not None: + # Azure AD authentication with credential provider + self.redis_url = None # Not using URL-based auth + self._redis_client = redis.Redis( + host=host, + port=port, + ssl=ssl, + username=username, + credential_provider=credential_provider, + decode_responses=True, + ) + else: + # Traditional URL-based authentication + self.redis_url = redis_url + self._redis_client = redis.from_url(redis_url, decode_responses=True) # type: ignore[no-untyped-call] # Handle initial messages (will be moved to Redis on first access) self._initial_messages = list(messages) if messages else [] diff --git a/python/packages/redis/pyproject.toml b/python/packages/redis/pyproject.toml index f2f5031368..47b9f65809 100644 --- a/python/packages/redis/pyproject.toml +++ b/python/packages/redis/pyproject.toml @@ -4,7 +4,7 @@ description = "Redis integration for Microsoft Agent Framework." authors = [{ name = "Microsoft", email = "af-support@microsoft.com"}] readme = "README.md" requires-python = ">=3.10" -version = "1.0.0b251216" +version = "1.0.0b251223" license-files = ["LICENSE"] urls.homepage = "https://aka.ms/agent-framework" urls.source = "https://github.com/microsoft/agent-framework/tree/main/python" diff --git a/python/packages/redis/tests/test_redis_chat_message_store.py b/python/packages/redis/tests/test_redis_chat_message_store.py index fa403eb2fe..dc97d81872 100644 --- a/python/packages/redis/tests/test_redis_chat_message_store.py +++ b/python/packages/redis/tests/test_redis_chat_message_store.py @@ -93,11 +93,118 @@ def test_init_with_max_messages(self): assert store.max_messages == 100 def test_init_with_redis_url_required(self): - """Test that redis_url is required for initialization.""" - with pytest.raises(ValueError, match="redis_url is required for Redis connection"): - # Should raise an exception since redis_url is required + """Test that either redis_url or credential_provider is required.""" + with pytest.raises(ValueError, match="Either redis_url or credential_provider must be provided"): RedisChatMessageStore(thread_id="test123") + def test_init_with_credential_provider(self): + """Test initialization with credential_provider.""" + mock_credential_provider = MagicMock() + + with patch("agent_framework_redis._chat_message_store.redis.Redis") as mock_redis_class: + mock_redis_instance = MagicMock() + mock_redis_class.return_value = mock_redis_instance + + store = RedisChatMessageStore( + credential_provider=mock_credential_provider, + host="myredis.redis.cache.windows.net", + thread_id="test123", + ) + + # Verify Redis.Redis was called with correct parameters + mock_redis_class.assert_called_once_with( + host="myredis.redis.cache.windows.net", + port=6380, + ssl=True, + username=None, + credential_provider=mock_credential_provider, + decode_responses=True, + ) + # Verify store instance is properly initialized + assert store.thread_id == "test123" + assert store.redis_url is None # Should be None for credential provider auth + assert store.key_prefix == "chat_messages" + assert store.max_messages is None + + def test_init_with_credential_provider_custom_port(self): + """Test initialization with credential_provider and custom port.""" + mock_credential_provider = MagicMock() + + with patch("agent_framework_redis._chat_message_store.redis.Redis") as mock_redis_class: + mock_redis_instance = MagicMock() + mock_redis_class.return_value = mock_redis_instance + + store = RedisChatMessageStore( + credential_provider=mock_credential_provider, + host="myredis.redis.cache.windows.net", + port=6379, + ssl=False, + username="admin", + thread_id="test123", + ) + + # Verify custom parameters were passed + mock_redis_class.assert_called_once_with( + host="myredis.redis.cache.windows.net", + port=6379, + ssl=False, + username="admin", + credential_provider=mock_credential_provider, + decode_responses=True, + ) + # Verify store instance is properly initialized + assert store.thread_id == "test123" + assert store.redis_url is None # Should be None for credential provider auth + assert store.key_prefix == "chat_messages" + + def test_init_credential_provider_requires_host(self): + """Test that credential_provider requires host parameter.""" + mock_credential_provider = MagicMock() + + with pytest.raises(ValueError, match="host is required when using credential_provider"): + RedisChatMessageStore( + credential_provider=mock_credential_provider, + thread_id="test123", + ) + + def test_init_mutually_exclusive_params(self): + """Test that redis_url and credential_provider are mutually exclusive.""" + mock_credential_provider = MagicMock() + + with pytest.raises(ValueError, match="redis_url and credential_provider are mutually exclusive"): + RedisChatMessageStore( + redis_url="redis://localhost:6379", + credential_provider=mock_credential_provider, + host="myredis.redis.cache.windows.net", + thread_id="test123", + ) + + async def test_serialize_with_credential_provider(self): + """Test that serialization works correctly with credential provider authentication.""" + mock_credential_provider = MagicMock() + + with patch("agent_framework_redis._chat_message_store.redis.Redis") as mock_redis_class: + mock_redis_instance = MagicMock() + mock_redis_class.return_value = mock_redis_instance + + store = RedisChatMessageStore( + credential_provider=mock_credential_provider, + host="myredis.redis.cache.windows.net", + thread_id="test123", + key_prefix="custom_prefix", + max_messages=100, + ) + + # Serialize the store state + state = await store.serialize() + + # Verify serialization includes correct values + assert state["thread_id"] == "test123" + assert state["redis_url"] is None # Should be None for credential provider auth + assert state["key_prefix"] == "custom_prefix" + assert state["max_messages"] == 100 + assert state["type"] == "redis_store_state" + def test_init_with_initial_messages(self, sample_messages): """Test initialization with initial messages.""" with patch("agent_framework_redis._chat_message_store.redis.from_url"): diff --git a/python/pyproject.toml b/python/pyproject.toml index ded393a42f..edf0ae9661 100644 --- a/python/pyproject.toml +++ b/python/pyproject.toml @@ -4,7 +4,7 @@ description = "Microsoft Agent Framework for building AI Agents with Python. Thi authors = [{ name = "Microsoft", email = "af-support@microsoft.com"}] readme = "README.md" requires-python = ">=3.10" -version = "1.0.0b251216" +version = "1.0.0b251223" license-files = ["LICENSE"] urls.homepage = "https://aka.ms/agent-framework" urls.source = "https://github.com/microsoft/agent-framework/tree/main/python" @@ -23,7 +23,7 @@ classifiers = [ "Typing :: Typed", ] dependencies = [ - "agent-framework-core[all]==1.0.0b251216", + "agent-framework-core[all]==1.0.0b251223", ] [dependency-groups] @@ -90,11 +90,13 @@ agent-framework-azure-ai-search = { workspace = true } agent-framework-anthropic = { workspace = true } agent-framework-azure-ai = { workspace = true } agent-framework-azurefunctions = { workspace = true } +agent-framework-bedrock = { workspace = true } agent-framework-chatkit = { workspace = true } agent-framework-copilotstudio = { workspace = true } agent-framework-declarative = { workspace = true } agent-framework-devui = { workspace = true } agent-framework-durabletask = { workspace = true } +agent-framework-foundry-local = { workspace = true } agent-framework-lab = { workspace = true } agent-framework-mem0 = { workspace = true } agent-framework-ollama = { workspace = true } @@ -266,13 +268,6 @@ pytest --import-mode=importlib packages/**/tests """ -[tool.poe.tasks.azure-ai-tests] -cmd = """ -pytest --import-mode=importlib --n logical --dist loadfile --dist worksteal -packages/azure-ai/tests -""" - [tool.poe.tasks.venv] cmd = "uv venv --clear --python $python" args = [{ name = "python", default = "3.13", options = ['-p', '--python'] }] diff --git a/python/samples/README.md b/python/samples/README.md index 9c87ddd67b..c03390515c 100644 --- a/python/samples/README.md +++ b/python/samples/README.md @@ -99,11 +99,15 @@ This directory contains samples demonstrating the capabilities of Microsoft Agen ### Ollama +The recommended way to use Ollama is via the native `OllamaChatClient` from the `agent-framework-ollama` package. + | File | Description | |------|-------------| -| [`getting_started/agents/ollama/ollama_with_openai_chat_client.py`](./getting_started/agents/ollama/ollama_with_openai_chat_client.py) | Ollama with OpenAI Chat Client Example | -| [`packages/ollama/getting_started/ollama_agent_basic.py`](../packages/ollama/getting_started/ollama_agent_basic.py) | (Experimental) Ollama Agent with native Ollama Chat Client | -| [`packages/ollama/getting_started/ollama_agent_reasoning.py`](../packages/ollama/getting_started/ollama_agent_reasoning.py) | (Experimental) Ollama Reasoning Agent with native Ollama Chat Client | +| [`getting_started/agents/ollama/ollama_agent_basic.py`](./getting_started/agents/ollama/ollama_agent_basic.py) | Basic Ollama Agent with native Ollama Chat Client | +| [`getting_started/agents/ollama/ollama_agent_reasoning.py`](./getting_started/agents/ollama/ollama_agent_reasoning.py) | Ollama Agent with reasoning capabilities | +| [`getting_started/agents/ollama/ollama_chat_client.py`](./getting_started/agents/ollama/ollama_chat_client.py) | Direct usage of Ollama Chat Client | +| [`getting_started/agents/ollama/ollama_chat_multimodal.py`](./getting_started/agents/ollama/ollama_chat_multimodal.py) | Ollama Chat Client with multimodal (image) input | +| [`getting_started/agents/ollama/ollama_with_openai_chat_client.py`](./getting_started/agents/ollama/ollama_with_openai_chat_client.py) | Alternative: Ollama via OpenAI Chat Client | ### OpenAI @@ -149,7 +153,7 @@ This directory contains samples demonstrating the capabilities of Microsoft Agen | [`getting_started/chat_client/openai_assistants_client.py`](./getting_started/chat_client/openai_assistants_client.py) | OpenAI Assistants Client Direct Usage Example | | [`getting_started/chat_client/openai_chat_client.py`](./getting_started/chat_client/openai_chat_client.py) | OpenAI Chat Client Direct Usage Example | | [`getting_started/chat_client/openai_responses_client.py`](./getting_started/chat_client/openai_responses_client.py) | OpenAI Responses Client Direct Usage Example | -| [`packages/ollama/getting_started/ollama_chat_client.py`](../packages/ollama/getting_started/ollama_chat_client.py) | (Experimental) Ollama Chat Client with native Ollama Chat Client | + ## Context Providers @@ -225,7 +229,7 @@ This directory contains samples demonstrating the capabilities of Microsoft Agen | [`getting_started/multimodal_input/azure_chat_multimodal.py`](./getting_started/multimodal_input/azure_chat_multimodal.py) | Azure OpenAI Chat with multimodal (image) input example | | [`getting_started/multimodal_input/azure_responses_multimodal.py`](./getting_started/multimodal_input/azure_responses_multimodal.py) | Azure OpenAI Responses with multimodal (image) input example | | [`getting_started/multimodal_input/openai_chat_multimodal.py`](./getting_started/multimodal_input/openai_chat_multimodal.py) | OpenAI Chat with multimodal (image) input example | -| [`packages/ollama/getting_started/ollama_chat_multimodal.py`](../packages/ollama/getting_started/ollama_chat_multimodal.py) | (Experimental) Ollama Chat with multimodal native Ollama Chat Client | + ## Azure Functions diff --git a/python/samples/amazon/bedrock_sample.py b/python/samples/amazon/bedrock_sample.py new file mode 100644 index 0000000000..42feb98ebd --- /dev/null +++ b/python/samples/amazon/bedrock_sample.py @@ -0,0 +1 @@ +"""This sample has moved to python/packages/bedrock/samples/bedrock_sample.py.""" diff --git a/python/samples/getting_started/agents/ollama/README.md b/python/samples/getting_started/agents/ollama/README.md index e6d0878c74..ac4b2cb3d0 100644 --- a/python/samples/getting_started/agents/ollama/README.md +++ b/python/samples/getting_started/agents/ollama/README.md @@ -8,20 +8,41 @@ This folder contains examples demonstrating how to use Ollama models with the Ag 2. **Start Ollama**: Ensure Ollama is running on your local machine 3. **Pull a model**: Run `ollama pull mistral` (or any other model you prefer) - For function calling examples, use models that support tool calling like `mistral` or `qwen2.5` - - For reasoning examples, use models that support reasoning like `qwen2.5:8b` + - For reasoning examples, use models that support reasoning like `qwen3:8b` + - For multimodal examples, use models like `gemma3:4b` -> **Note**: Not all models support all features. Function calling and reasoning capabilities depend on the specific model you're using. +> **Note**: Not all models support all features. Function calling, reasoning, and multimodal capabilities depend on the specific model you're using. + +## Recommended Approach + +The recommended way to use Ollama with Agent Framework is via the native `OllamaChatClient` from the `agent-framework-ollama` package. This provides full support for Ollama-specific features like reasoning mode. + +Alternatively, you can use the `OpenAIChatClient` configured to point to your local Ollama server, which may be useful if you're already familiar with the OpenAI client interface. ## Examples | File | Description | |------|-------------| -| [`ollama_with_openai_chat_client.py`](ollama_with_openai_chat_client.py) | Demonstrates how to configure OpenAI Chat Client to use local Ollama models. Shows both streaming and non-streaming responses with tool calling capabilities. | +| [`ollama_agent_basic.py`](ollama_agent_basic.py) | Basic Ollama agent with tool calling using native Ollama Chat Client. Shows both streaming and non-streaming responses. | +| [`ollama_agent_reasoning.py`](ollama_agent_reasoning.py) | Ollama agent with reasoning capabilities using native Ollama Chat Client. Shows how to enable thinking/reasoning mode. | +| [`ollama_chat_client.py`](ollama_chat_client.py) | Direct usage of the native Ollama Chat Client with tool calling. | +| [`ollama_chat_multimodal.py`](ollama_chat_multimodal.py) | Ollama Chat Client with multimodal (image) input capabilities. | +| [`ollama_with_openai_chat_client.py`](ollama_with_openai_chat_client.py) | Alternative approach using OpenAI Chat Client configured to use local Ollama models. | ## Configuration The examples use environment variables for configuration. Set the appropriate variables based on which example you're running: +### For Native Ollama Examples + +Set the following environment variables: + +- `OLLAMA_HOST`: The base URL for your Ollama server (optional, defaults to `http://localhost:11434`) + - Example: `export OLLAMA_HOST="http://localhost:11434"` + +- `OLLAMA_CHAT_MODEL_ID`: The model name to use + - Example: `export OLLAMA_CHAT_MODEL_ID="qwen2.5:8b"` + - Must be a model you have pulled with Ollama ### For OpenAI Client with Ollama (`ollama_with_openai_chat_client.py`) diff --git a/python/packages/ollama/getting_started/ollama_agent_basic.py b/python/samples/getting_started/agents/ollama/ollama_agent_basic.py similarity index 97% rename from python/packages/ollama/getting_started/ollama_agent_basic.py rename to python/samples/getting_started/agents/ollama/ollama_agent_basic.py index 3769410332..4d2a69b56b 100644 --- a/python/packages/ollama/getting_started/ollama_agent_basic.py +++ b/python/samples/getting_started/agents/ollama/ollama_agent_basic.py @@ -3,7 +3,7 @@ import asyncio from datetime import datetime -from agent_framework_ollama import OllamaChatClient +from agent_framework.ollama import OllamaChatClient """ Ollama Agent Basic Example diff --git a/python/packages/ollama/getting_started/ollama_agent_reasoning.py b/python/samples/getting_started/agents/ollama/ollama_agent_reasoning.py similarity index 88% rename from python/packages/ollama/getting_started/ollama_agent_reasoning.py rename to python/samples/getting_started/agents/ollama/ollama_agent_reasoning.py index a76492b42b..e0ce24bb85 100644 --- a/python/packages/ollama/getting_started/ollama_agent_reasoning.py +++ b/python/samples/getting_started/agents/ollama/ollama_agent_reasoning.py @@ -3,8 +3,7 @@ import asyncio from agent_framework import TextReasoningContent - -from agent_framework_ollama import OllamaChatClient +from agent_framework.ollama import OllamaChatClient """ Ollama Agent Reasoning Example @@ -31,7 +30,7 @@ async def reasoning_example() -> None: print(f"User: {query}") # Enable Reasoning on per request level result = await agent.run(query) - reasoning = "".join(c.text for c in result.messages[-1].contents if isinstance(c, TextReasoningContent)) + reasoning = "".join((c.text or "") for c in result.messages[-1].contents if isinstance(c, TextReasoningContent)) print(f"Reasoning: {reasoning}") print(f"Answer: {result}\n") diff --git a/python/packages/ollama/getting_started/ollama_chat_client.py b/python/samples/getting_started/agents/ollama/ollama_chat_client.py similarity index 63% rename from python/packages/ollama/getting_started/ollama_chat_client.py rename to python/samples/getting_started/agents/ollama/ollama_chat_client.py index 5cde122ecd..5d7197d8f5 100644 --- a/python/packages/ollama/getting_started/ollama_chat_client.py +++ b/python/samples/getting_started/agents/ollama/ollama_chat_client.py @@ -3,12 +3,19 @@ import asyncio from datetime import datetime -from agent_framework_ollama import OllamaChatClient +from agent_framework.ollama import OllamaChatClient -# Ensure to install Ollama and have a model running locally before running the sample -# Not all Models support function calling, to test function calling try llama3.2 -# Set the model to use via the OLLAMA_CHAT_MODEL_ID environment variable or modify the code below. -# https://ollama.com/ +""" +Ollama Chat Client Example + +This sample demonstrates using the native Ollama Chat Client directly. + +Ensure to install Ollama and have a model running locally before running the sample. +Not all Models support function calling, to test function calling try llama3.2 +Set the model to use via the OLLAMA_CHAT_MODEL_ID environment variable or modify the code below. +https://ollama.com/ + +""" def get_time(): diff --git a/python/packages/ollama/getting_started/ollama_chat_multimodal.py b/python/samples/getting_started/agents/ollama/ollama_chat_multimodal.py similarity index 96% rename from python/packages/ollama/getting_started/ollama_chat_multimodal.py rename to python/samples/getting_started/agents/ollama/ollama_chat_multimodal.py index e0670d785d..724cecbe72 100644 --- a/python/packages/ollama/getting_started/ollama_chat_multimodal.py +++ b/python/samples/getting_started/agents/ollama/ollama_chat_multimodal.py @@ -3,8 +3,7 @@ import asyncio from agent_framework import ChatMessage, DataContent, Role, TextContent - -from agent_framework_ollama import OllamaChatClient +from agent_framework.ollama import OllamaChatClient """ Ollama Agent Multimodal Example diff --git a/python/samples/getting_started/context_providers/redis/README.md b/python/samples/getting_started/context_providers/redis/README.md index 94df89eff9..e0fde57bf2 100644 --- a/python/samples/getting_started/context_providers/redis/README.md +++ b/python/samples/getting_started/context_providers/redis/README.md @@ -8,8 +8,11 @@ This folder contains an example demonstrating how to use the Redis context provi | File | Description | |------|-------------| +| [`azure_redis_conversation.py`](azure_redis_conversation.py) | Demonstrates conversation persistence with RedisChatMessageStore and Azure Redis with Azure AD (Entra ID) authentication using credential provider. | | [`redis_basics.py`](redis_basics.py) | Shows standalone provider usage and agent integration. Demonstrates writing messages to Redis, retrieving context via full‑text or hybrid vector search, and persisting preferences across threads. Also includes a simple tool example whose outputs are remembered. | -| [`redis_threads.py`](redis_threads.py) | Demonstrates thread scoping. Includes: (1) global thread scope with a fixed `thread_id` shared across operations; (2) per‑operation thread scope where `scope_to_per_operation_thread_id=True` binds memory to a single thread for the provider’s lifetime; and (3) multiple agents with isolated memory via different `agent_id` values. | +| [`redis_conversation.py`](redis_conversation.py) | Simple example showing conversation persistence with RedisChatMessageStore using traditional connection string authentication. | +| [`redis_threads.py`](redis_threads.py) | Demonstrates thread scoping. Includes: (1) global thread scope with a fixed `thread_id` shared across operations; (2) per‑operation thread scope where `scope_to_per_operation_thread_id=True` binds memory to a single thread for the provider's lifetime; and (3) multiple agents with isolated memory via different `agent_id` values. | + ## Prerequisites diff --git a/python/samples/getting_started/context_providers/redis/azure_redis_conversation.py b/python/samples/getting_started/context_providers/redis/azure_redis_conversation.py new file mode 100644 index 0000000000..ca3e4694f5 --- /dev/null +++ b/python/samples/getting_started/context_providers/redis/azure_redis_conversation.py @@ -0,0 +1,123 @@ +# Copyright (c) Microsoft. All rights reserved. + +"""Azure Managed Redis Chat Message Store with Azure AD Authentication + +This example demonstrates how to use Azure Managed Redis with Azure AD authentication +to persist conversational details using RedisChatMessageStore. + +Requirements: + - Azure Managed Redis instance with Azure AD authentication enabled + - Azure credentials configured (az login or managed identity) + - agent-framework-redis: pip install agent-framework-redis + - azure-identity: pip install azure-identity + +Environment Variables: + - AZURE_REDIS_HOST: Your Azure Managed Redis host (e.g., myredis.redis.cache.windows.net) + - OPENAI_API_KEY: Your OpenAI API key + - OPENAI_CHAT_MODEL_ID: OpenAI model (e.g., gpt-4o-mini) + - AZURE_USER_OBJECT_ID: Your Azure AD User Object ID for authentication +""" + +import asyncio +import os + +from agent_framework.openai import OpenAIChatClient +from agent_framework.redis import RedisChatMessageStore +from azure.identity.aio import AzureCliCredential +from redis.credentials import CredentialProvider + + +class AzureCredentialProvider(CredentialProvider): + """Credential provider for Azure AD authentication with Redis Enterprise.""" + + def __init__(self, azure_credential: AzureCliCredential, user_object_id: str): + self.azure_credential = azure_credential + self.user_object_id = user_object_id + + async def get_credentials_async(self) -> tuple[str] | tuple[str, str]: + """Get Azure AD token for Redis authentication. + + Returns (username, token) where username is the Azure user's Object ID. + """ + token = await self.azure_credential.get_token("https://redis.azure.com/.default") + return (self.user_object_id, token.token) + + +async def main() -> None: + redis_host = os.environ.get("AZURE_REDIS_HOST") + if not redis_host: + print("ERROR: Set AZURE_REDIS_HOST environment variable") + return + + # For Azure Redis with Entra ID, username must be your Object ID + user_object_id = os.environ.get("AZURE_USER_OBJECT_ID") + if not user_object_id: + print("ERROR: Set AZURE_USER_OBJECT_ID environment variable") + print("Get your Object ID from the Azure Portal") + return + + # Create Azure CLI credential provider (uses 'az login' credentials) + azure_credential = AzureCliCredential() + credential_provider = AzureCredentialProvider(azure_credential, user_object_id) + + thread_id = "azure_test_thread" + + # Factory for creating Azure Redis chat message store + chat_message_store_factory = lambda: RedisChatMessageStore( + credential_provider=credential_provider, + host=redis_host, + port=10000, + ssl=True, + thread_id=thread_id, + key_prefix="chat_messages", + max_messages=100, + ) + + # Create chat client + client = OpenAIChatClient() + + # Create agent with Azure Redis store + agent = client.create_agent( + name="AzureRedisAssistant", + instructions="You are a helpful assistant.", + chat_message_store_factory=chat_message_store_factory, + ) + + # Conversation + query = "Remember that I enjoy gumbo" + result = await agent.run(query) + print("User: ", query) + print("Agent: ", result) + + # Ask the agent to recall the stored preference; it should retrieve from memory + query = "What do I enjoy?" + result = await agent.run(query) + print("User: ", query) + print("Agent: ", result) + + query = "What did I say to you just now?" + result = await agent.run(query) + print("User: ", query) + print("Agent: ", result) + + query = "Remember that I have a meeting at 3pm tomorrow" + result = await agent.run(query) + print("User: ", query) + print("Agent: ", result) + + query = "Tulips are red" + result = await agent.run(query) + print("User: ", query) + print("Agent: ", result) + + query = "What was the first thing I said to you this conversation?" + result = await agent.run(query) + print("User: ", query) + print("Agent: ", result) + + # Cleanup + await azure_credential.close() + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/python/samples/getting_started/context_providers/redis/redis_conversation.py b/python/samples/getting_started/context_providers/redis/redis_conversation.py index 1ca54a4ae6..26748ae1c0 100644 --- a/python/samples/getting_started/context_providers/redis/redis_conversation.py +++ b/python/samples/getting_started/context_providers/redis/redis_conversation.py @@ -91,7 +91,7 @@ async def main() -> None: print("User: ", query) print("Agent: ", result) - query = "Remember that anyone who does not clean shrimp will be eaten by a shark" + query = "Remember that I have a meeting at 3pm tomorro" result = await agent.run(query) print("User: ", query) print("Agent: ", result) diff --git a/python/samples/getting_started/mcp/README.md b/python/samples/getting_started/mcp/README.md index 56f6199314..1df1a449b6 100644 --- a/python/samples/getting_started/mcp/README.md +++ b/python/samples/getting_started/mcp/README.md @@ -12,8 +12,12 @@ The Model Context Protocol (MCP) is an open standard for connecting AI agents to |--------|------|-------------| | **Agent as MCP Server** | [`agent_as_mcp_server.py`](agent_as_mcp_server.py) | Shows how to expose an Agent Framework agent as an MCP server that other AI applications can connect to | | **API Key Authentication** | [`mcp_api_key_auth.py`](mcp_api_key_auth.py) | Demonstrates API key authentication with MCP servers | +| **GitHub Integration with PAT** | [`mcp_github_pat.py`](mcp_github_pat.py) | Demonstrates connecting to GitHub's MCP server using Personal Access Token (PAT) authentication | ## Prerequisites - `OPENAI_API_KEY` environment variable - `OPENAI_RESPONSES_MODEL_ID` environment variable + +For `mcp_github_pat.py`: +- `GITHUB_PAT` - Your GitHub Personal Access Token (create at https://github.com/settings/tokens) diff --git a/python/samples/getting_started/mcp/mcp_github_pat.py b/python/samples/getting_started/mcp/mcp_github_pat.py new file mode 100644 index 0000000000..3d9d8c4916 --- /dev/null +++ b/python/samples/getting_started/mcp/mcp_github_pat.py @@ -0,0 +1,81 @@ +# Copyright (c) Microsoft. All rights reserved. + +import asyncio +import os + +from agent_framework import ChatAgent, HostedMCPTool +from agent_framework.openai import OpenAIResponsesClient +from dotenv import load_dotenv + +""" +MCP GitHub Integration with Personal Access Token (PAT) + +This example demonstrates how to connect to GitHub's remote MCP server using a Personal Access +Token (PAT) for authentication. The agent can use GitHub operations like searching repositories, +reading files, creating issues, and more depending on how you scope your token. + +Prerequisites: +1. A GitHub Personal Access Token with appropriate scopes + - Create one at: https://github.com/settings/tokens + - For read-only operations, you can use more restrictive scopes +2. Environment variables: + - GITHUB_PAT: Your GitHub Personal Access Token (required) + - OPENAI_API_KEY: Your OpenAI API key (required) + - OPENAI_RESPONSES_MODEL_ID: Your OpenAI model ID (required) +""" + + +async def github_mcp_example() -> None: + """Example of using GitHub MCP server with PAT authentication.""" + # 1. Load environment variables from .env file if present + load_dotenv() + + # 2. Get configuration from environment + github_pat = os.getenv("GITHUB_PAT") + if not github_pat: + raise ValueError( + "GITHUB_PAT environment variable must be set. Create a token at https://github.com/settings/tokens" + ) + + # 3. Create authentication headers with GitHub PAT + auth_headers = { + "Authorization": f"Bearer {github_pat}", + } + + # 4. Create MCP tool with authentication + # HostedMCPTool manages the connection to the MCP server and makes its tools available + # Set approval_mode="never_require" to allow the MCP tool to execute without approval + github_mcp_tool = HostedMCPTool( + name="GitHub", + description="Tool for interacting with GitHub.", + url="https://api.githubcopilot.com/mcp/", + headers=auth_headers, + approval_mode="never_require", + ) + + # 5. Create agent with the GitHub MCP tool + async with ChatAgent( + chat_client=OpenAIResponsesClient(), + name="GitHubAgent", + instructions=( + "You are a helpful assistant that can help users interact with GitHub. " + "You can search for repositories, read file contents, check issues, and more. " + "Always be clear about what operations you're performing." + ), + tools=github_mcp_tool, + ) as agent: + # Example 1: Get authenticated user information + query1 = "What is my GitHub username and tell me about my account?" + print(f"\nUser: {query1}") + result1 = await agent.run(query1) + print(f"Agent: {result1.text}") + + # Example 2: List my repositories + query2 = "List all the repositories I own on GitHub" + print(f"\nUser: {query2}") + result2 = await agent.run(query2) + print(f"Agent: {result2.text}") + + +if __name__ == "__main__": + asyncio.run(github_mcp_example()) diff --git a/python/samples/getting_started/workflows/README.md b/python/samples/getting_started/workflows/README.md index 805d808540..1adaab2c7c 100644 --- a/python/samples/getting_started/workflows/README.md +++ b/python/samples/getting_started/workflows/README.md @@ -45,6 +45,7 @@ Once comfortable with these, explore the rest of the samples below. | Workflow as Agent (Reflection Pattern) | [agents/workflow_as_agent_reflection_pattern.py](./agents/workflow_as_agent_reflection_pattern.py) | Wrap a workflow so it can behave like an agent (reflection pattern) | | Workflow as Agent + HITL | [agents/workflow_as_agent_human_in_the_loop.py](./agents/workflow_as_agent_human_in_the_loop.py) | Extend workflow-as-agent with human-in-the-loop capability | | Workflow as Agent with Thread | [agents/workflow_as_agent_with_thread.py](./agents/workflow_as_agent_with_thread.py) | Use AgentThread to maintain conversation history across workflow-as-agent invocations | +| Workflow as Agent kwargs | [agents/workflow_as_agent_kwargs.py](./agents/workflow_as_agent_kwargs.py) | Pass custom context (data, user tokens) via kwargs through workflow.as_agent() to @ai_function tools | | Handoff Workflow as Agent | [agents/handoff_workflow_as_agent.py](./agents/handoff_workflow_as_agent.py) | Use a HandoffBuilder workflow as an agent with HITL via FunctionCallContent/FunctionResultContent | ### checkpoint @@ -64,6 +65,7 @@ Once comfortable with these, explore the rest of the samples below. | Sub-Workflow (Basics) | [composition/sub_workflow_basics.py](./composition/sub_workflow_basics.py) | Wrap a workflow as an executor and orchestrate sub-workflows | | Sub-Workflow: Request Interception | [composition/sub_workflow_request_interception.py](./composition/sub_workflow_request_interception.py) | Intercept and forward sub-workflow requests using @handler for SubWorkflowRequestMessage | | Sub-Workflow: Parallel Requests | [composition/sub_workflow_parallel_requests.py](./composition/sub_workflow_parallel_requests.py) | Multiple specialized interceptors handling different request types from same sub-workflow | +| Sub-Workflow: kwargs Propagation | [composition/sub_workflow_kwargs.py](./composition/sub_workflow_kwargs.py) | Pass custom context (user tokens, config) from parent workflow through to sub-workflow agents | ### control-flow @@ -75,6 +77,7 @@ Once comfortable with these, explore the rest of the samples below. | Switch-Case Edge Group | [control-flow/switch_case_edge_group.py](./control-flow/switch_case_edge_group.py) | Switch-case branching using classifier outputs | | Multi-Selection Edge Group | [control-flow/multi_selection_edge_group.py](./control-flow/multi_selection_edge_group.py) | Select one or many targets dynamically (subset fan-out) | | Simple Loop | [control-flow/simple_loop.py](./control-flow/simple_loop.py) | Feedback loop where an agent judges ABOVE/BELOW/MATCHED | +| Workflow Cancellation | [control-flow/workflow_cancellation.py](./control-flow/workflow_cancellation.py) | Cancel a running workflow using asyncio tasks | ### human-in-the-loop diff --git a/python/samples/getting_started/workflows/agents/workflow_as_agent_kwargs.py b/python/samples/getting_started/workflows/agents/workflow_as_agent_kwargs.py new file mode 100644 index 0000000000..141a5a658d --- /dev/null +++ b/python/samples/getting_started/workflows/agents/workflow_as_agent_kwargs.py @@ -0,0 +1,140 @@ +# Copyright (c) Microsoft. All rights reserved. + +import asyncio +import json +from typing import Annotated, Any + +from agent_framework import SequentialBuilder, ai_function +from agent_framework.openai import OpenAIChatClient +from pydantic import Field + +""" +Sample: Workflow as Agent with kwargs Propagation to @ai_function Tools + +This sample demonstrates how to flow custom context (skill data, user tokens, etc.) +through a workflow exposed via .as_agent() to @ai_function tools using the **kwargs pattern. + +Key Concepts: +- Build a workflow using SequentialBuilder (or any builder pattern) +- Expose the workflow as a reusable agent via workflow.as_agent() +- Pass custom context as kwargs when invoking workflow_agent.run() or run_stream() +- kwargs are stored in SharedState and propagated to all agent invocations +- @ai_function tools receive kwargs via **kwargs parameter + +When to use workflow.as_agent(): +- To treat an entire workflow orchestration as a single agent +- To compose workflows into higher-level orchestrations +- To maintain a consistent agent interface for callers + +Prerequisites: +- OpenAI environment variables configured +""" + + +# Define tools that accept custom context via **kwargs +@ai_function +def get_user_data( + query: Annotated[str, Field(description="What user data to retrieve")], + **kwargs: Any, +) -> str: + """Retrieve user-specific data based on the authenticated context.""" + user_token = kwargs.get("user_token", {}) + user_name = user_token.get("user_name", "anonymous") + access_level = user_token.get("access_level", "none") + + print(f"\n[get_user_data] Received kwargs keys: {list(kwargs.keys())}") + print(f"[get_user_data] User: {user_name}") + print(f"[get_user_data] Access level: {access_level}") + + return f"Retrieved data for user {user_name} with {access_level} access: {query}" + + +@ai_function +def call_api( + endpoint_name: Annotated[str, Field(description="Name of the API endpoint to call")], + **kwargs: Any, +) -> str: + """Call an API using the configured endpoints from custom_data.""" + custom_data = kwargs.get("custom_data", {}) + api_config = custom_data.get("api_config", {}) + + base_url = api_config.get("base_url", "unknown") + endpoints = api_config.get("endpoints", {}) + + print(f"\n[call_api] Received kwargs keys: {list(kwargs.keys())}") + print(f"[call_api] Base URL: {base_url}") + print(f"[call_api] Available endpoints: {list(endpoints.keys())}") + + if endpoint_name in endpoints: + return f"Called {base_url}{endpoints[endpoint_name]} successfully" + return f"Endpoint '{endpoint_name}' not found in configuration" + + +async def main() -> None: + print("=" * 70) + print("Workflow as Agent kwargs Flow Demo") + print("=" * 70) + + # Create chat client + chat_client = OpenAIChatClient() + + # Create agent with tools that use kwargs + agent = chat_client.create_agent( + name="assistant", + instructions=( + "You are a helpful assistant. Use the available tools to help users. " + "When asked about user data, use get_user_data. " + "When asked to call an API, use call_api." + ), + tools=[get_user_data, call_api], + ) + + # Build a sequential workflow + workflow = SequentialBuilder().participants([agent]).build() + + # Expose the workflow as an agent using .as_agent() + workflow_agent = workflow.as_agent(name="WorkflowAgent") + + # Define custom context that will flow to ai_functions via kwargs + custom_data = { + "api_config": { + "base_url": "https://api.example.com", + "endpoints": { + "users": "/v1/users", + "orders": "/v1/orders", + "products": "/v1/products", + }, + }, + } + + user_token = { + "user_name": "bob@contoso.com", + "access_level": "admin", + } + + print("\nCustom Data being passed:") + print(json.dumps(custom_data, indent=2)) + print(f"\nUser: {user_token['user_name']}") + print("\n" + "-" * 70) + print("Workflow Agent Execution (watch for [tool_name] logs showing kwargs received):") + print("-" * 70) + + # Run workflow agent with kwargs - these will flow through to ai_functions + # Note: kwargs are passed to workflow_agent.run_stream() just like workflow.run_stream() + print("\n===== Streaming Response =====") + async for update in workflow_agent.run_stream( + "Please get my user data and then call the users API endpoint.", + custom_data=custom_data, + user_token=user_token, + ): + if update.text: + print(update.text, end="", flush=True) + print() + + print("\n" + "=" * 70) + print("Sample Complete") + print("=" * 70) + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/python/samples/getting_started/workflows/composition/sub_workflow_kwargs.py b/python/samples/getting_started/workflows/composition/sub_workflow_kwargs.py new file mode 100644 index 0000000000..1efef28bae --- /dev/null +++ b/python/samples/getting_started/workflows/composition/sub_workflow_kwargs.py @@ -0,0 +1,143 @@ +# Copyright (c) Microsoft. All rights reserved. + +import asyncio +import json +from typing import Annotated, Any + +from agent_framework import ( + ChatMessage, + SequentialBuilder, + WorkflowExecutor, + WorkflowOutputEvent, + ai_function, +) +from agent_framework.openai import OpenAIChatClient + +""" +Sample: Sub-Workflow kwargs Propagation + +This sample demonstrates how custom context (kwargs) flows from a parent workflow +through to agents in sub-workflows. When you pass kwargs to the parent workflow's +run_stream() or run(), they automatically propagate to nested sub-workflows. + +Key Concepts: +- kwargs passed to parent workflow.run_stream() propagate to sub-workflows +- Sub-workflow agents receive the same kwargs as the parent workflow +- Works with nested WorkflowExecutor compositions at any depth +- Useful for passing authentication tokens, configuration, or request context + +Prerequisites: +- OpenAI environment variables configured +""" + + +# Define tools that access custom context via **kwargs +@ai_function +def get_authenticated_data( + resource: Annotated[str, "The resource to fetch"], + **kwargs: Any, +) -> str: + """Fetch data using the authenticated user context from kwargs.""" + user_token = kwargs.get("user_token", {}) + user_name = user_token.get("user_name", "anonymous") + access_level = user_token.get("access_level", "none") + + print(f"\n[get_authenticated_data] kwargs keys: {list(kwargs.keys())}") + print(f"[get_authenticated_data] User: {user_name}, Access: {access_level}") + + return f"Fetched '{resource}' for user {user_name} ({access_level} access)" + + +@ai_function +def call_configured_service( + service_name: Annotated[str, "Name of the service to call"], + **kwargs: Any, +) -> str: + """Call a service using configuration from kwargs.""" + config = kwargs.get("service_config", {}) + services = config.get("services", {}) + + print(f"\n[call_configured_service] kwargs keys: {list(kwargs.keys())}") + print(f"[call_configured_service] Available services: {list(services.keys())}") + + if service_name in services: + endpoint = services[service_name] + return f"Called service '{service_name}' at {endpoint}" + return f"Service '{service_name}' not found in configuration" + + +async def main() -> None: + print("=" * 70) + print("Sub-Workflow kwargs Propagation Demo") + print("=" * 70) + + # Create chat client + chat_client = OpenAIChatClient() + + # Create an agent with tools that use kwargs + inner_agent = chat_client.create_agent( + name="data_agent", + instructions=( + "You are a data access agent. Use the available tools to help users. " + "When asked to fetch data, use get_authenticated_data. " + "When asked to call a service, use call_configured_service." + ), + tools=[get_authenticated_data, call_configured_service], + ) + + # Build the inner (sub) workflow with the agent + inner_workflow = SequentialBuilder().participants([inner_agent]).build() + + # Wrap the inner workflow in a WorkflowExecutor to use it as a sub-workflow + subworkflow_executor = WorkflowExecutor( + workflow=inner_workflow, + id="data_subworkflow", + ) + + # Build the outer (parent) workflow containing the sub-workflow + outer_workflow = SequentialBuilder().participants([subworkflow_executor]).build() + + # Define custom context that will flow through to the sub-workflow's agent + user_token = { + "user_name": "alice@contoso.com", + "access_level": "admin", + "session_id": "sess_12345", + } + + service_config = { + "services": { + "users": "https://api.example.com/v1/users", + "orders": "https://api.example.com/v1/orders", + "inventory": "https://api.example.com/v1/inventory", + }, + "timeout": 30, + } + + print("\nContext being passed to parent workflow:") + print(f" user_token: {json.dumps(user_token, indent=4)}") + print(f" service_config: {json.dumps(service_config, indent=4)}") + print("\n" + "-" * 70) + print("Workflow Execution (kwargs flow: parent -> sub-workflow -> agent -> tool):") + print("-" * 70) + + # Run the OUTER workflow with kwargs + # These kwargs will automatically propagate to the inner sub-workflow + async for event in outer_workflow.run_stream( + "Please fetch my profile data and then call the users service.", + user_token=user_token, + service_config=service_config, + ): + if isinstance(event, WorkflowOutputEvent): + output_data = event.data + if isinstance(output_data, list): + for item in output_data: # type: ignore + if isinstance(item, ChatMessage) and item.text: + print(f"\n[Final Answer]: {item.text}") + + print("\n" + "=" * 70) + print("Sample Complete - kwargs successfully flowed through sub-workflow!") + print("=" * 70) + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/python/samples/getting_started/workflows/control-flow/workflow_cancellation.py b/python/samples/getting_started/workflows/control-flow/workflow_cancellation.py new file mode 100644 index 0000000000..2ebd5bd128 --- /dev/null +++ b/python/samples/getting_started/workflows/control-flow/workflow_cancellation.py @@ -0,0 +1,103 @@ +# Copyright (c) Microsoft. All rights reserved. + +import asyncio + +from agent_framework import WorkflowBuilder, WorkflowContext, executor +from typing_extensions import Never + +""" +Sample: Workflow Cancellation + +A three-step workflow where each step takes 2 seconds. We cancel it after 3 seconds +to demonstrate mid-execution cancellation using asyncio tasks. + +Purpose: +Show how to cancel a running workflow by wrapping it in an asyncio.Task. This pattern +works with both workflow.run() and workflow.run_stream(). Useful for implementing +timeouts, graceful shutdown, or A2A executors that need cancellation support. + +Prerequisites: +- No external services required. +""" + + +@executor(id="step1") +async def step1(text: str, ctx: WorkflowContext[str]) -> None: + """First step - simulates 2 seconds of work.""" + print("[Step1] Starting...") + await asyncio.sleep(2) + print("[Step1] Done") + await ctx.send_message(text.upper()) + + +@executor(id="step2") +async def step2(text: str, ctx: WorkflowContext[str]) -> None: + """Second step - simulates 2 seconds of work.""" + print("[Step2] Starting...") + await asyncio.sleep(2) + print("[Step2] Done") + await ctx.send_message(text + "!") + + +@executor(id="step3") +async def step3(text: str, ctx: WorkflowContext[Never, str]) -> None: + """Final step - simulates 2 seconds of work.""" + print("[Step3] Starting...") + await asyncio.sleep(2) + print("[Step3] Done") + await ctx.yield_output(f"Result: {text}") + + +def build_workflow(): + """Build a simple 3-step sequential workflow (~6 seconds total).""" + return ( + WorkflowBuilder() + .register_executor(lambda: step1, name="step1") + .register_executor(lambda: step2, name="step2") + .register_executor(lambda: step3, name="step3") + .add_edge("step1", "step2") + .add_edge("step2", "step3") + .set_start_executor("step1") + .build() + ) + + +async def run_with_cancellation() -> None: + """Cancel the workflow after 3 seconds (mid-execution during Step2).""" + print("=== Run with cancellation ===\n") + workflow = build_workflow() + + # Wrap workflow.run() in a task to enable cancellation + task = asyncio.create_task(workflow.run("hello world")) + + # Wait 3 seconds (Step1 completes, Step2 is mid-execution), then cancel + await asyncio.sleep(3) + print("\n--- Cancelling workflow ---\n") + task.cancel() + + try: + await task + except asyncio.CancelledError: + print("Workflow was cancelled") + + +async def run_to_completion() -> None: + """Let the workflow run to completion and get the result.""" + print("=== Run to completion ===\n") + workflow = build_workflow() + + # Run without cancellation - await the result directly + result = await workflow.run("hello world") + + print(f"\nWorkflow completed with output: {result.get_outputs()}") + + +async def main() -> None: + """Demonstrate both cancellation and completion scenarios.""" + await run_with_cancellation() + print("\n") + await run_to_completion() + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/python/samples/getting_started/workflows/visualization/concurrent_with_visualization.py b/python/samples/getting_started/workflows/visualization/concurrent_with_visualization.py index 21a9ff4b08..81545b75c7 100644 --- a/python/samples/getting_started/workflows/visualization/concurrent_with_visualization.py +++ b/python/samples/getting_started/workflows/visualization/concurrent_with_visualization.py @@ -6,14 +6,12 @@ from agent_framework import ( AgentExecutorRequest, AgentExecutorResponse, - AgentRunEvent, ChatAgent, ChatMessage, Executor, Role, WorkflowBuilder, WorkflowContext, - WorkflowOutputEvent, WorkflowViz, handler, ) @@ -124,7 +122,7 @@ def create_legal_agent() -> ChatAgent: async def main() -> None: """Build and run the concurrent workflow with visualization.""" - # 1) Build a simple fan-out/fan-in workflow + # Build a simple fan-out/fan-in workflow workflow = ( WorkflowBuilder() .register_agent(create_researcher_agent, name="researcher") @@ -138,31 +136,22 @@ async def main() -> None: .build() ) - # 1.5) Generate workflow visualization + # Generate workflow visualization print("Generating workflow visualization...") viz = WorkflowViz(workflow) # Print out the mermaid string. print("Mermaid string: \n=======") print(viz.to_mermaid()) print("=======") - # Print out the DiGraph string. + # Print out the DiGraph string with internal executors. print("DiGraph string: \n=======") - print(viz.to_digraph()) + print(viz.to_digraph(include_internal_executors=True)) print("=======") # Export the DiGraph visualization as SVG. svg_file = viz.export(format="svg") print(f"SVG file saved to: {svg_file}") - # 2) Run with a single prompt - async for event in workflow.run_stream("We are launching a new budget-friendly electric bike for urban commuters."): - if isinstance(event, AgentRunEvent): - # Show which agent ran and what step completed. - print(event) - elif isinstance(event, WorkflowOutputEvent): - print("===== Final Aggregated Output =====") - print(event.data) - if __name__ == "__main__": asyncio.run(main()) diff --git a/python/uv.lock b/python/uv.lock index fcd106ccc7..0504bc9827 100644 --- a/python/uv.lock +++ b/python/uv.lock @@ -33,12 +33,14 @@ members = [ "agent-framework-azure-ai", "agent-framework-azure-ai-search", "agent-framework-azurefunctions", + "agent-framework-bedrock", "agent-framework-chatkit", "agent-framework-copilotstudio", "agent-framework-core", "agent-framework-declarative", "agent-framework-devui", "agent-framework-durabletask", + "agent-framework-foundry-local", "agent-framework-lab", "agent-framework-mem0", "agent-framework-ollama", @@ -91,7 +93,7 @@ wheels = [ [[package]] name = "agent-framework" -version = "1.0.0b251216" +version = "1.0.0b251223" source = { virtual = "." } dependencies = [ { name = "agent-framework-core", extra = ["all"], marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, @@ -158,7 +160,7 @@ docs = [ [[package]] name = "agent-framework-a2a" -version = "1.0.0b251216" +version = "1.0.0b251223" source = { editable = "packages/a2a" } dependencies = [ { name = "a2a-sdk", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, @@ -173,7 +175,7 @@ requires-dist = [ [[package]] name = "agent-framework-ag-ui" -version = "1.0.0b251216" +version = "1.0.0b251223" source = { editable = "packages/ag-ui" } dependencies = [ { name = "ag-ui-protocol", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, @@ -203,7 +205,7 @@ provides-extras = ["dev"] [[package]] name = "agent-framework-anthropic" -version = "1.0.0b251216" +version = "1.0.0b251223" source = { editable = "packages/anthropic" } dependencies = [ { name = "agent-framework-core", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, @@ -218,7 +220,7 @@ requires-dist = [ [[package]] name = "agent-framework-azure-ai" -version = "1.0.0b251216" +version = "1.0.0b251223" source = { editable = "packages/azure-ai" } dependencies = [ { name = "agent-framework-core", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, @@ -237,7 +239,7 @@ requires-dist = [ [[package]] name = "agent-framework-azure-ai-search" -version = "1.0.0b251216" +version = "1.0.0b251223" source = { editable = "packages/azure-ai-search" } dependencies = [ { name = "agent-framework-core", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, @@ -252,7 +254,7 @@ requires-dist = [ [[package]] name = "agent-framework-azurefunctions" -version = "1.0.0b251216" +version = "1.0.0b251223" source = { editable = "packages/azurefunctions" } dependencies = [ { name = "agent-framework-core", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, @@ -277,9 +279,26 @@ requires-dist = [ [package.metadata.requires-dev] dev = [{ name = "types-python-dateutil", specifier = ">=2.9.0" }] +[[package]] +name = "agent-framework-bedrock" +version = "1.0.0b251120" +source = { editable = "packages/bedrock" } +dependencies = [ + { name = "agent-framework-core", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, + { name = "boto3", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, + { name = "botocore", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, +] + +[package.metadata] +requires-dist = [ + { name = "agent-framework-core", editable = "packages/core" }, + { name = "boto3", specifier = ">=1.35.0,<2.0.0" }, + { name = "botocore", specifier = ">=1.35.0,<2.0.0" }, +] + [[package]] name = "agent-framework-chatkit" -version = "1.0.0b251216" +version = "1.0.0b251223" source = { editable = "packages/chatkit" } dependencies = [ { name = "agent-framework-core", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, @@ -294,7 +313,7 @@ requires-dist = [ [[package]] name = "agent-framework-copilotstudio" -version = "1.0.0b251216" +version = "1.0.0b251223" source = { editable = "packages/copilotstudio" } dependencies = [ { name = "agent-framework-core", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, @@ -309,7 +328,7 @@ requires-dist = [ [[package]] name = "agent-framework-core" -version = "1.0.0b251216" +version = "1.0.0b251223" source = { editable = "packages/core" } dependencies = [ { name = "azure-identity", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, @@ -339,6 +358,7 @@ all = [ { name = "agent-framework-durabletask", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, { name = "agent-framework-lab", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, { name = "agent-framework-mem0", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, + { name = "agent-framework-ollama", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, { name = "agent-framework-purview", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, { name = "agent-framework-redis", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, ] @@ -358,6 +378,7 @@ requires-dist = [ { name = "agent-framework-durabletask", marker = "extra == 'all'", editable = "packages/durabletask" }, { name = "agent-framework-lab", marker = "extra == 'all'", editable = "packages/lab" }, { name = "agent-framework-mem0", marker = "extra == 'all'", editable = "packages/mem0" }, + { name = "agent-framework-ollama", marker = "extra == 'all'", editable = "packages/ollama" }, { name = "agent-framework-purview", marker = "extra == 'all'", editable = "packages/purview" }, { name = "agent-framework-redis", marker = "extra == 'all'", editable = "packages/redis" }, { name = "azure-identity", specifier = ">=1,<2" }, @@ -375,7 +396,7 @@ provides-extras = ["all"] [[package]] name = "agent-framework-declarative" -version = "1.0.0b251216" +version = "1.0.0b251223" source = { editable = "packages/declarative" } dependencies = [ { name = "agent-framework-core", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, @@ -400,7 +421,7 @@ dev = [{ name = "types-pyyaml" }] [[package]] name = "agent-framework-devui" -version = "1.0.0b251216" +version = "1.0.0b251223" source = { editable = "packages/devui" } dependencies = [ { name = "agent-framework-core", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, @@ -457,9 +478,24 @@ requires-dist = [ [package.metadata.requires-dev] dev = [{ name = "types-python-dateutil", specifier = ">=2.9.0" }] +[[package]] +name = "agent-framework-foundry-local" +version = "1.0.0b251223" +source = { editable = "packages/foundry_local" } +dependencies = [ + { name = "agent-framework-core", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, + { name = "foundry-local-sdk", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, +] + +[package.metadata] +requires-dist = [ + { name = "agent-framework-core", editable = "packages/core" }, + { name = "foundry-local-sdk", specifier = ">=0.5.1,<1" }, +] + [[package]] name = "agent-framework-lab" -version = "1.0.0b251216" +version = "1.0.0b251223" source = { editable = "packages/lab" } dependencies = [ { name = "agent-framework-core", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, @@ -483,7 +519,7 @@ math = [ tau2 = [ { name = "loguru", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, { name = "numpy", version = "2.2.6", source = { registry = "https://pypi.org/simple" }, marker = "(python_full_version < '3.11' and sys_platform == 'darwin') or (python_full_version < '3.11' and sys_platform == 'linux') or (python_full_version < '3.11' and sys_platform == 'win32')" }, - { name = "numpy", version = "2.3.5", source = { registry = "https://pypi.org/simple" }, marker = "(python_full_version >= '3.11' and sys_platform == 'darwin') or (python_full_version >= '3.11' and sys_platform == 'linux') or (python_full_version >= '3.11' and sys_platform == 'win32')" }, + { name = "numpy", version = "2.4.0", source = { registry = "https://pypi.org/simple" }, marker = "(python_full_version >= '3.11' and sys_platform == 'darwin') or (python_full_version >= '3.11' and sys_platform == 'linux') or (python_full_version >= '3.11' and sys_platform == 'win32')" }, { name = "pydantic", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, { name = "tiktoken", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, ] @@ -550,7 +586,7 @@ dev = [ [[package]] name = "agent-framework-mem0" -version = "1.0.0b251216" +version = "1.0.0b251223" source = { editable = "packages/mem0" } dependencies = [ { name = "agent-framework-core", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, @@ -565,7 +601,7 @@ requires-dist = [ [[package]] name = "agent-framework-ollama" -version = "1.0.0b251216" +version = "1.0.0b251223" source = { editable = "packages/ollama" } dependencies = [ { name = "agent-framework-core", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, @@ -580,7 +616,7 @@ requires-dist = [ [[package]] name = "agent-framework-purview" -version = "1.0.0b251216" +version = "1.0.0b251223" source = { editable = "packages/purview" } dependencies = [ { name = "agent-framework-core", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, @@ -597,12 +633,12 @@ requires-dist = [ [[package]] name = "agent-framework-redis" -version = "1.0.0b251216" +version = "1.0.0b251223" source = { editable = "packages/redis" } dependencies = [ { name = "agent-framework-core", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, { name = "numpy", version = "2.2.6", source = { registry = "https://pypi.org/simple" }, marker = "(python_full_version < '3.11' and sys_platform == 'darwin') or (python_full_version < '3.11' and sys_platform == 'linux') or (python_full_version < '3.11' and sys_platform == 'win32')" }, - { name = "numpy", version = "2.3.5", source = { registry = "https://pypi.org/simple" }, marker = "(python_full_version >= '3.11' and sys_platform == 'darwin') or (python_full_version >= '3.11' and sys_platform == 'linux') or (python_full_version >= '3.11' and sys_platform == 'win32')" }, + { name = "numpy", version = "2.4.0", source = { registry = "https://pypi.org/simple" }, marker = "(python_full_version >= '3.11' and sys_platform == 'darwin') or (python_full_version >= '3.11' and sys_platform == 'linux') or (python_full_version >= '3.11' and sys_platform == 'win32')" }, { name = "redis", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, { name = "redisvl", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, ] @@ -889,14 +925,14 @@ wheels = [ [[package]] name = "apscheduler" -version = "3.11.1" +version = "3.11.2" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "tzlocal", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/d0/81/192db4f8471de5bc1f0d098783decffb1e6e69c4f8b4bc6711094691950b/apscheduler-3.11.1.tar.gz", hash = "sha256:0db77af6400c84d1747fe98a04b8b58f0080c77d11d338c4f507a9752880f221", size = 108044, upload-time = "2025-10-31T18:55:42.819Z" } +sdist = { url = "https://files.pythonhosted.org/packages/07/12/3e4389e5920b4c1763390c6d371162f3784f86f85cd6d6c1bfe68eef14e2/apscheduler-3.11.2.tar.gz", hash = "sha256:2a9966b052ec805f020c8c4c3ae6e6a06e24b1bf19f2e11d91d8cca0473eef41", size = 108683, upload-time = "2025-12-22T00:39:34.884Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/58/9f/d3c76f76c73fcc959d28e9def45b8b1cc3d7722660c5003b19c1022fd7f4/apscheduler-3.11.1-py3-none-any.whl", hash = "sha256:6162cb5683cb09923654fa9bdd3130c4be4bfda6ad8990971c9597ecd52965d2", size = 64278, upload-time = "2025-10-31T18:55:41.186Z" }, + { url = "https://files.pythonhosted.org/packages/9f/64/2e54428beba8d9992aa478bb8f6de9e4ecaa5f8f513bcfd567ed7fb0262d/apscheduler-3.11.2-py3-none-any.whl", hash = "sha256:ce005177f741409db4e4dd40a7431b76feb856b9dd69d57e0da49d6715bfd26d", size = 64439, upload-time = "2025-12-22T00:39:33.303Z" }, ] [[package]] @@ -1487,7 +1523,7 @@ resolution-markers = [ "python_full_version == '3.11.*' and sys_platform == 'win32'", ] dependencies = [ - { name = "numpy", version = "2.3.5", source = { registry = "https://pypi.org/simple" }, marker = "(python_full_version >= '3.11' and sys_platform == 'darwin') or (python_full_version >= '3.11' and sys_platform == 'linux') or (python_full_version >= '3.11' and sys_platform == 'win32')" }, + { name = "numpy", version = "2.4.0", source = { registry = "https://pypi.org/simple" }, marker = "(python_full_version >= '3.11' and sys_platform == 'darwin') or (python_full_version >= '3.11' and sys_platform == 'linux') or (python_full_version >= '3.11' and sys_platform == 'win32')" }, ] sdist = { url = "https://files.pythonhosted.org/packages/58/01/1253e6698a07380cd31a736d248a3f2a50a7c88779a1813da27503cadc2a/contourpy-1.3.3.tar.gz", hash = "sha256:083e12155b210502d0bca491432bb04d56dc3432f95a979b429f2848c3dbe880", size = 13466174, upload-time = "2025-07-26T12:03:12.549Z" } wheels = [ @@ -1566,101 +1602,101 @@ wheels = [ [[package]] name = "coverage" -version = "7.13.0" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/b6/45/2c665ca77ec32ad67e25c77daf1cee28ee4558f3bc571cdbaf88a00b9f23/coverage-7.13.0.tar.gz", hash = "sha256:a394aa27f2d7ff9bc04cf703817773a59ad6dfbd577032e690f961d2460ee936", size = 820905, upload-time = "2025-12-08T13:14:38.055Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/db/08/bdd7ccca14096f7eb01412b87ac11e5d16e4cb54b6e328afc9dee8bdaec1/coverage-7.13.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:02d9fb9eccd48f6843c98a37bd6817462f130b86da8660461e8f5e54d4c06070", size = 217979, upload-time = "2025-12-08T13:12:14.505Z" }, - { url = "https://files.pythonhosted.org/packages/fa/f0/d1302e3416298a28b5663ae1117546a745d9d19fde7e28402b2c5c3e2109/coverage-7.13.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:367449cf07d33dc216c083f2036bb7d976c6e4903ab31be400ad74ad9f85ce98", size = 218496, upload-time = "2025-12-08T13:12:16.237Z" }, - { url = "https://files.pythonhosted.org/packages/07/26/d36c354c8b2a320819afcea6bffe72839efd004b98d1d166b90801d49d57/coverage-7.13.0-cp310-cp310-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:cdb3c9f8fef0a954c632f64328a3935988d33a6604ce4bf67ec3e39670f12ae5", size = 245237, upload-time = "2025-12-08T13:12:17.858Z" }, - { url = "https://files.pythonhosted.org/packages/91/52/be5e85631e0eec547873d8b08dd67a5f6b111ecfe89a86e40b89b0c1c61c/coverage-7.13.0-cp310-cp310-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:d10fd186aac2316f9bbb46ef91977f9d394ded67050ad6d84d94ed6ea2e8e54e", size = 247061, upload-time = "2025-12-08T13:12:19.132Z" }, - { url = "https://files.pythonhosted.org/packages/0f/45/a5e8fa0caf05fbd8fa0402470377bff09cc1f026d21c05c71e01295e55ab/coverage-7.13.0-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:7f88ae3e69df2ab62fb0bc5219a597cb890ba5c438190ffa87490b315190bb33", size = 248928, upload-time = "2025-12-08T13:12:20.702Z" }, - { url = "https://files.pythonhosted.org/packages/f5/42/ffb5069b6fd1b95fae482e02f3fecf380d437dd5a39bae09f16d2e2e7e01/coverage-7.13.0-cp310-cp310-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:c4be718e51e86f553bcf515305a158a1cd180d23b72f07ae76d6017c3cc5d791", size = 245931, upload-time = "2025-12-08T13:12:22.243Z" }, - { url = "https://files.pythonhosted.org/packages/95/6e/73e809b882c2858f13e55c0c36e94e09ce07e6165d5644588f9517efe333/coverage-7.13.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:a00d3a393207ae12f7c49bb1c113190883b500f48979abb118d8b72b8c95c032", size = 246968, upload-time = "2025-12-08T13:12:23.52Z" }, - { url = "https://files.pythonhosted.org/packages/87/08/64ebd9e64b6adb8b4a4662133d706fbaccecab972e0b3ccc23f64e2678ad/coverage-7.13.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:3a7b1cd820e1b6116f92c6128f1188e7afe421c7e1b35fa9836b11444e53ebd9", size = 244972, upload-time = "2025-12-08T13:12:24.781Z" }, - { url = "https://files.pythonhosted.org/packages/12/97/f4d27c6fe0cb375a5eced4aabcaef22de74766fb80a3d5d2015139e54b22/coverage-7.13.0-cp310-cp310-musllinux_1_2_riscv64.whl", hash = "sha256:37eee4e552a65866f15dedd917d5e5f3d59805994260720821e2c1b51ac3248f", size = 245241, upload-time = "2025-12-08T13:12:28.041Z" }, - { url = "https://files.pythonhosted.org/packages/0c/94/42f8ae7f633bf4c118bf1038d80472f9dade88961a466f290b81250f7ab7/coverage-7.13.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:62d7c4f13102148c78d7353c6052af6d899a7f6df66a32bddcc0c0eb7c5326f8", size = 245847, upload-time = "2025-12-08T13:12:29.337Z" }, - { url = "https://files.pythonhosted.org/packages/a8/2f/6369ca22b6b6d933f4f4d27765d313d8914cc4cce84f82a16436b1a233db/coverage-7.13.0-cp310-cp310-win32.whl", hash = "sha256:24e4e56304fdb56f96f80eabf840eab043b3afea9348b88be680ec5986780a0f", size = 220573, upload-time = "2025-12-08T13:12:30.905Z" }, - { url = "https://files.pythonhosted.org/packages/f1/dc/a6a741e519acceaeccc70a7f4cfe5d030efc4b222595f0677e101af6f1f3/coverage-7.13.0-cp310-cp310-win_amd64.whl", hash = "sha256:74c136e4093627cf04b26a35dab8cbfc9b37c647f0502fc313376e11726ba303", size = 221509, upload-time = "2025-12-08T13:12:32.09Z" }, - { url = "https://files.pythonhosted.org/packages/f1/dc/888bf90d8b1c3d0b4020a40e52b9f80957d75785931ec66c7dfaccc11c7d/coverage-7.13.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:0dfa3855031070058add1a59fdfda0192fd3e8f97e7c81de0596c145dea51820", size = 218104, upload-time = "2025-12-08T13:12:33.333Z" }, - { url = "https://files.pythonhosted.org/packages/8d/ea/069d51372ad9c380214e86717e40d1a743713a2af191cfba30a0911b0a4a/coverage-7.13.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:4fdb6f54f38e334db97f72fa0c701e66d8479af0bc3f9bfb5b90f1c30f54500f", size = 218606, upload-time = "2025-12-08T13:12:34.498Z" }, - { url = "https://files.pythonhosted.org/packages/68/09/77b1c3a66c2aa91141b6c4471af98e5b1ed9b9e6d17255da5eb7992299e3/coverage-7.13.0-cp311-cp311-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:7e442c013447d1d8d195be62852270b78b6e255b79b8675bad8479641e21fd96", size = 248999, upload-time = "2025-12-08T13:12:36.02Z" }, - { url = "https://files.pythonhosted.org/packages/0a/32/2e2f96e9d5691eaf1181d9040f850b8b7ce165ea10810fd8e2afa534cef7/coverage-7.13.0-cp311-cp311-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:1ed5630d946859de835a85e9a43b721123a8a44ec26e2830b296d478c7fd4259", size = 250925, upload-time = "2025-12-08T13:12:37.221Z" }, - { url = "https://files.pythonhosted.org/packages/7b/45/b88ddac1d7978859b9a39a8a50ab323186148f1d64bc068f86fc77706321/coverage-7.13.0-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:7f15a931a668e58087bc39d05d2b4bf4b14ff2875b49c994bbdb1c2217a8daeb", size = 253032, upload-time = "2025-12-08T13:12:38.763Z" }, - { url = "https://files.pythonhosted.org/packages/71/cb/e15513f94c69d4820a34b6bf3d2b1f9f8755fa6021be97c7065442d7d653/coverage-7.13.0-cp311-cp311-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:30a3a201a127ea57f7e14ba43c93c9c4be8b7d17a26e03bb49e6966d019eede9", size = 249134, upload-time = "2025-12-08T13:12:40.382Z" }, - { url = "https://files.pythonhosted.org/packages/09/61/d960ff7dc9e902af3310ce632a875aaa7860f36d2bc8fc8b37ee7c1b82a5/coverage-7.13.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:7a485ff48fbd231efa32d58f479befce52dcb6bfb2a88bb7bf9a0b89b1bc8030", size = 250731, upload-time = "2025-12-08T13:12:41.992Z" }, - { url = "https://files.pythonhosted.org/packages/98/34/c7c72821794afc7c7c2da1db8f00c2c98353078aa7fb6b5ff36aac834b52/coverage-7.13.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:22486cdafba4f9e471c816a2a5745337742a617fef68e890d8baf9f3036d7833", size = 248795, upload-time = "2025-12-08T13:12:43.331Z" }, - { url = "https://files.pythonhosted.org/packages/0a/5b/e0f07107987a43b2def9aa041c614ddb38064cbf294a71ef8c67d43a0cdd/coverage-7.13.0-cp311-cp311-musllinux_1_2_riscv64.whl", hash = "sha256:263c3dbccc78e2e331e59e90115941b5f53e85cfcc6b3b2fbff1fd4e3d2c6ea8", size = 248514, upload-time = "2025-12-08T13:12:44.546Z" }, - { url = "https://files.pythonhosted.org/packages/71/c2/c949c5d3b5e9fc6dd79e1b73cdb86a59ef14f3709b1d72bf7668ae12e000/coverage-7.13.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:e5330fa0cc1f5c3c4c3bb8e101b742025933e7848989370a1d4c8c5e401ea753", size = 249424, upload-time = "2025-12-08T13:12:45.759Z" }, - { url = "https://files.pythonhosted.org/packages/11/f1/bbc009abd6537cec0dffb2cc08c17a7f03de74c970e6302db4342a6e05af/coverage-7.13.0-cp311-cp311-win32.whl", hash = "sha256:0f4872f5d6c54419c94c25dd6ae1d015deeb337d06e448cd890a1e89a8ee7f3b", size = 220597, upload-time = "2025-12-08T13:12:47.378Z" }, - { url = "https://files.pythonhosted.org/packages/c4/f6/d9977f2fb51c10fbaed0718ce3d0a8541185290b981f73b1d27276c12d91/coverage-7.13.0-cp311-cp311-win_amd64.whl", hash = "sha256:51a202e0f80f241ccb68e3e26e19ab5b3bf0f813314f2c967642f13ebcf1ddfe", size = 221536, upload-time = "2025-12-08T13:12:48.7Z" }, - { url = "https://files.pythonhosted.org/packages/be/ad/3fcf43fd96fb43e337a3073dea63ff148dcc5c41ba7a14d4c7d34efb2216/coverage-7.13.0-cp311-cp311-win_arm64.whl", hash = "sha256:d2a9d7f1c11487b1c69367ab3ac2d81b9b3721f097aa409a3191c3e90f8f3dd7", size = 220206, upload-time = "2025-12-08T13:12:50.365Z" }, - { url = "https://files.pythonhosted.org/packages/9b/f1/2619559f17f31ba00fc40908efd1fbf1d0a5536eb75dc8341e7d660a08de/coverage-7.13.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:0b3d67d31383c4c68e19a88e28fc4c2e29517580f1b0ebec4a069d502ce1e0bf", size = 218274, upload-time = "2025-12-08T13:12:52.095Z" }, - { url = "https://files.pythonhosted.org/packages/2b/11/30d71ae5d6e949ff93b2a79a2c1b4822e00423116c5c6edfaeef37301396/coverage-7.13.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:581f086833d24a22c89ae0fe2142cfaa1c92c930adf637ddf122d55083fb5a0f", size = 218638, upload-time = "2025-12-08T13:12:53.418Z" }, - { url = "https://files.pythonhosted.org/packages/79/c2/fce80fc6ded8d77e53207489d6065d0fed75db8951457f9213776615e0f5/coverage-7.13.0-cp312-cp312-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:0a3a30f0e257df382f5f9534d4ce3d4cf06eafaf5192beb1a7bd066cb10e78fb", size = 250129, upload-time = "2025-12-08T13:12:54.744Z" }, - { url = "https://files.pythonhosted.org/packages/5b/b6/51b5d1eb6fcbb9a1d5d6984e26cbe09018475c2922d554fd724dd0f056ee/coverage-7.13.0-cp312-cp312-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:583221913fbc8f53b88c42e8dbb8fca1d0f2e597cb190ce45916662b8b9d9621", size = 252885, upload-time = "2025-12-08T13:12:56.401Z" }, - { url = "https://files.pythonhosted.org/packages/0d/f8/972a5affea41de798691ab15d023d3530f9f56a72e12e243f35031846ff7/coverage-7.13.0-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:5f5d9bd30756fff3e7216491a0d6d520c448d5124d3d8e8f56446d6412499e74", size = 253974, upload-time = "2025-12-08T13:12:57.718Z" }, - { url = "https://files.pythonhosted.org/packages/8a/56/116513aee860b2c7968aa3506b0f59b22a959261d1dbf3aea7b4450a7520/coverage-7.13.0-cp312-cp312-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:a23e5a1f8b982d56fa64f8e442e037f6ce29322f1f9e6c2344cd9e9f4407ee57", size = 250538, upload-time = "2025-12-08T13:12:59.254Z" }, - { url = "https://files.pythonhosted.org/packages/d6/75/074476d64248fbadf16dfafbf93fdcede389ec821f74ca858d7c87d2a98c/coverage-7.13.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:9b01c22bc74a7fb44066aaf765224c0d933ddf1f5047d6cdfe4795504a4493f8", size = 251912, upload-time = "2025-12-08T13:13:00.604Z" }, - { url = "https://files.pythonhosted.org/packages/f2/d2/aa4f8acd1f7c06024705c12609d8698c51b27e4d635d717cd1934c9668e2/coverage-7.13.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:898cce66d0836973f48dda4e3514d863d70142bdf6dfab932b9b6a90ea5b222d", size = 250054, upload-time = "2025-12-08T13:13:01.892Z" }, - { url = "https://files.pythonhosted.org/packages/19/98/8df9e1af6a493b03694a1e8070e024e7d2cdc77adedc225a35e616d505de/coverage-7.13.0-cp312-cp312-musllinux_1_2_riscv64.whl", hash = "sha256:3ab483ea0e251b5790c2aac03acde31bff0c736bf8a86829b89382b407cd1c3b", size = 249619, upload-time = "2025-12-08T13:13:03.236Z" }, - { url = "https://files.pythonhosted.org/packages/d8/71/f8679231f3353018ca66ef647fa6fe7b77e6bff7845be54ab84f86233363/coverage-7.13.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:1d84e91521c5e4cb6602fe11ece3e1de03b2760e14ae4fcf1a4b56fa3c801fcd", size = 251496, upload-time = "2025-12-08T13:13:04.511Z" }, - { url = "https://files.pythonhosted.org/packages/04/86/9cb406388034eaf3c606c22094edbbb82eea1fa9d20c0e9efadff20d0733/coverage-7.13.0-cp312-cp312-win32.whl", hash = "sha256:193c3887285eec1dbdb3f2bd7fbc351d570ca9c02ca756c3afbc71b3c98af6ef", size = 220808, upload-time = "2025-12-08T13:13:06.422Z" }, - { url = "https://files.pythonhosted.org/packages/1c/59/af483673df6455795daf5f447c2f81a3d2fcfc893a22b8ace983791f6f34/coverage-7.13.0-cp312-cp312-win_amd64.whl", hash = "sha256:4f3e223b2b2db5e0db0c2b97286aba0036ca000f06aca9b12112eaa9af3d92ae", size = 221616, upload-time = "2025-12-08T13:13:07.95Z" }, - { url = "https://files.pythonhosted.org/packages/64/b0/959d582572b30a6830398c60dd419c1965ca4b5fb38ac6b7093a0d50ca8d/coverage-7.13.0-cp312-cp312-win_arm64.whl", hash = "sha256:086cede306d96202e15a4b77ace8472e39d9f4e5f9fd92dd4fecdfb2313b2080", size = 220261, upload-time = "2025-12-08T13:13:09.581Z" }, - { url = "https://files.pythonhosted.org/packages/7c/cc/bce226595eb3bf7d13ccffe154c3c487a22222d87ff018525ab4dd2e9542/coverage-7.13.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:28ee1c96109974af104028a8ef57cec21447d42d0e937c0275329272e370ebcf", size = 218297, upload-time = "2025-12-08T13:13:10.977Z" }, - { url = "https://files.pythonhosted.org/packages/3b/9f/73c4d34600aae03447dff3d7ad1d0ac649856bfb87d1ca7d681cfc913f9e/coverage-7.13.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:d1e97353dcc5587b85986cda4ff3ec98081d7e84dd95e8b2a6d59820f0545f8a", size = 218673, upload-time = "2025-12-08T13:13:12.562Z" }, - { url = "https://files.pythonhosted.org/packages/63/ab/8fa097db361a1e8586535ae5073559e6229596b3489ec3ef2f5b38df8cb2/coverage-7.13.0-cp313-cp313-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:99acd4dfdfeb58e1937629eb1ab6ab0899b131f183ee5f23e0b5da5cba2fec74", size = 249652, upload-time = "2025-12-08T13:13:13.909Z" }, - { url = "https://files.pythonhosted.org/packages/90/3a/9bfd4de2ff191feb37ef9465855ca56a6f2f30a3bca172e474130731ac3d/coverage-7.13.0-cp313-cp313-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:ff45e0cd8451e293b63ced93161e189780baf444119391b3e7d25315060368a6", size = 252251, upload-time = "2025-12-08T13:13:15.553Z" }, - { url = "https://files.pythonhosted.org/packages/df/61/b5d8105f016e1b5874af0d7c67542da780ccd4a5f2244a433d3e20ceb1ad/coverage-7.13.0-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:f4f72a85316d8e13234cafe0a9f81b40418ad7a082792fa4165bd7d45d96066b", size = 253492, upload-time = "2025-12-08T13:13:16.849Z" }, - { url = "https://files.pythonhosted.org/packages/f3/b8/0fad449981803cc47a4694768b99823fb23632150743f9c83af329bb6090/coverage-7.13.0-cp313-cp313-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:11c21557d0e0a5a38632cbbaca5f008723b26a89d70db6315523df6df77d6232", size = 249850, upload-time = "2025-12-08T13:13:18.142Z" }, - { url = "https://files.pythonhosted.org/packages/9a/e9/8d68337c3125014d918cf4327d5257553a710a2995a6a6de2ac77e5aa429/coverage-7.13.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:76541dc8d53715fb4f7a3a06b34b0dc6846e3c69bc6204c55653a85dd6220971", size = 251633, upload-time = "2025-12-08T13:13:19.56Z" }, - { url = "https://files.pythonhosted.org/packages/55/14/d4112ab26b3a1bc4b3c1295d8452dcf399ed25be4cf649002fb3e64b2d93/coverage-7.13.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:6e9e451dee940a86789134b6b0ffbe31c454ade3b849bb8a9d2cca2541a8e91d", size = 249586, upload-time = "2025-12-08T13:13:20.883Z" }, - { url = "https://files.pythonhosted.org/packages/2c/a9/22b0000186db663b0d82f86c2f1028099ae9ac202491685051e2a11a5218/coverage-7.13.0-cp313-cp313-musllinux_1_2_riscv64.whl", hash = "sha256:5c67dace46f361125e6b9cace8fe0b729ed8479f47e70c89b838d319375c8137", size = 249412, upload-time = "2025-12-08T13:13:22.22Z" }, - { url = "https://files.pythonhosted.org/packages/a1/2e/42d8e0d9e7527fba439acdc6ed24a2b97613b1dc85849b1dd935c2cffef0/coverage-7.13.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:f59883c643cb19630500f57016f76cfdcd6845ca8c5b5ea1f6e17f74c8e5f511", size = 251191, upload-time = "2025-12-08T13:13:23.899Z" }, - { url = "https://files.pythonhosted.org/packages/a4/af/8c7af92b1377fd8860536aadd58745119252aaaa71a5213e5a8e8007a9f5/coverage-7.13.0-cp313-cp313-win32.whl", hash = "sha256:58632b187be6f0be500f553be41e277712baa278147ecb7559983c6d9faf7ae1", size = 220829, upload-time = "2025-12-08T13:13:25.182Z" }, - { url = "https://files.pythonhosted.org/packages/58/f9/725e8bf16f343d33cbe076c75dc8370262e194ff10072c0608b8e5cf33a3/coverage-7.13.0-cp313-cp313-win_amd64.whl", hash = "sha256:73419b89f812f498aca53f757dd834919b48ce4799f9d5cad33ca0ae442bdb1a", size = 221640, upload-time = "2025-12-08T13:13:26.836Z" }, - { url = "https://files.pythonhosted.org/packages/8a/ff/e98311000aa6933cc79274e2b6b94a2fe0fe3434fca778eba82003675496/coverage-7.13.0-cp313-cp313-win_arm64.whl", hash = "sha256:eb76670874fdd6091eedcc856128ee48c41a9bbbb9c3f1c7c3cf169290e3ffd6", size = 220269, upload-time = "2025-12-08T13:13:28.116Z" }, - { url = "https://files.pythonhosted.org/packages/cf/cf/bbaa2e1275b300343ea865f7d424cc0a2e2a1df6925a070b2b2d5d765330/coverage-7.13.0-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:6e63ccc6e0ad8986386461c3c4b737540f20426e7ec932f42e030320896c311a", size = 218990, upload-time = "2025-12-08T13:13:29.463Z" }, - { url = "https://files.pythonhosted.org/packages/21/1d/82f0b3323b3d149d7672e7744c116e9c170f4957e0c42572f0366dbb4477/coverage-7.13.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:494f5459ffa1bd45e18558cd98710c36c0b8fbfa82a5eabcbe671d80ecffbfe8", size = 219340, upload-time = "2025-12-08T13:13:31.524Z" }, - { url = "https://files.pythonhosted.org/packages/fb/e3/fe3fd4702a3832a255f4d43013eacb0ef5fc155a5960ea9269d8696db28b/coverage-7.13.0-cp313-cp313t-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:06cac81bf10f74034e055e903f5f946e3e26fc51c09fc9f584e4a1605d977053", size = 260638, upload-time = "2025-12-08T13:13:32.965Z" }, - { url = "https://files.pythonhosted.org/packages/ad/01/63186cb000307f2b4da463f72af9b85d380236965574c78e7e27680a2593/coverage-7.13.0-cp313-cp313t-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:f2ffc92b46ed6e6760f1d47a71e56b5664781bc68986dbd1836b2b70c0ce2071", size = 262705, upload-time = "2025-12-08T13:13:34.378Z" }, - { url = "https://files.pythonhosted.org/packages/7c/a1/c0dacef0cc865f2455d59eed3548573ce47ed603205ffd0735d1d78b5906/coverage-7.13.0-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:0602f701057c6823e5db1b74530ce85f17c3c5be5c85fc042ac939cbd909426e", size = 265125, upload-time = "2025-12-08T13:13:35.73Z" }, - { url = "https://files.pythonhosted.org/packages/ef/92/82b99223628b61300bd382c205795533bed021505eab6dd86e11fb5d7925/coverage-7.13.0-cp313-cp313t-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:25dc33618d45456ccb1d37bce44bc78cf269909aa14c4db2e03d63146a8a1493", size = 259844, upload-time = "2025-12-08T13:13:37.69Z" }, - { url = "https://files.pythonhosted.org/packages/cf/2c/89b0291ae4e6cd59ef042708e1c438e2290f8c31959a20055d8768349ee2/coverage-7.13.0-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:71936a8b3b977ddd0b694c28c6a34f4fff2e9dd201969a4ff5d5fc7742d614b0", size = 262700, upload-time = "2025-12-08T13:13:39.525Z" }, - { url = "https://files.pythonhosted.org/packages/bf/f9/a5f992efae1996245e796bae34ceb942b05db275e4b34222a9a40b9fbd3b/coverage-7.13.0-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:936bc20503ce24770c71938d1369461f0c5320830800933bc3956e2a4ded930e", size = 260321, upload-time = "2025-12-08T13:13:41.172Z" }, - { url = "https://files.pythonhosted.org/packages/4c/89/a29f5d98c64fedbe32e2ac3c227fbf78edc01cc7572eee17d61024d89889/coverage-7.13.0-cp313-cp313t-musllinux_1_2_riscv64.whl", hash = "sha256:af0a583efaacc52ae2521f8d7910aff65cdb093091d76291ac5820d5e947fc1c", size = 259222, upload-time = "2025-12-08T13:13:43.282Z" }, - { url = "https://files.pythonhosted.org/packages/b3/c3/940fe447aae302a6701ee51e53af7e08b86ff6eed7631e5740c157ee22b9/coverage-7.13.0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:f1c23e24a7000da892a312fb17e33c5f94f8b001de44b7cf8ba2e36fbd15859e", size = 261411, upload-time = "2025-12-08T13:13:44.72Z" }, - { url = "https://files.pythonhosted.org/packages/eb/31/12a4aec689cb942a89129587860ed4d0fd522d5fda81237147fde554b8ae/coverage-7.13.0-cp313-cp313t-win32.whl", hash = "sha256:5f8a0297355e652001015e93be345ee54393e45dc3050af4a0475c5a2b767d46", size = 221505, upload-time = "2025-12-08T13:13:46.332Z" }, - { url = "https://files.pythonhosted.org/packages/65/8c/3b5fe3259d863572d2b0827642c50c3855d26b3aefe80bdc9eba1f0af3b0/coverage-7.13.0-cp313-cp313t-win_amd64.whl", hash = "sha256:6abb3a4c52f05e08460bd9acf04fec027f8718ecaa0d09c40ffbc3fbd70ecc39", size = 222569, upload-time = "2025-12-08T13:13:47.79Z" }, - { url = "https://files.pythonhosted.org/packages/b0/39/f71fa8316a96ac72fc3908839df651e8eccee650001a17f2c78cdb355624/coverage-7.13.0-cp313-cp313t-win_arm64.whl", hash = "sha256:3ad968d1e3aa6ce5be295ab5fe3ae1bf5bb4769d0f98a80a0252d543a2ef2e9e", size = 220841, upload-time = "2025-12-08T13:13:49.243Z" }, - { url = "https://files.pythonhosted.org/packages/f8/4b/9b54bedda55421449811dcd5263a2798a63f48896c24dfb92b0f1b0845bd/coverage-7.13.0-cp314-cp314-macosx_10_15_x86_64.whl", hash = "sha256:453b7ec753cf5e4356e14fe858064e5520c460d3bbbcb9c35e55c0d21155c256", size = 218343, upload-time = "2025-12-08T13:13:50.811Z" }, - { url = "https://files.pythonhosted.org/packages/59/df/c3a1f34d4bba2e592c8979f924da4d3d4598b0df2392fbddb7761258e3dc/coverage-7.13.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:af827b7cbb303e1befa6c4f94fd2bf72f108089cfa0f8abab8f4ca553cf5ca5a", size = 218672, upload-time = "2025-12-08T13:13:52.284Z" }, - { url = "https://files.pythonhosted.org/packages/07/62/eec0659e47857698645ff4e6ad02e30186eb8afd65214fd43f02a76537cb/coverage-7.13.0-cp314-cp314-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:9987a9e4f8197a1000280f7cc089e3ea2c8b3c0a64d750537809879a7b4ceaf9", size = 249715, upload-time = "2025-12-08T13:13:53.791Z" }, - { url = "https://files.pythonhosted.org/packages/23/2d/3c7ff8b2e0e634c1f58d095f071f52ed3c23ff25be524b0ccae8b71f99f8/coverage-7.13.0-cp314-cp314-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:3188936845cd0cb114fa6a51842a304cdbac2958145d03be2377ec41eb285d19", size = 252225, upload-time = "2025-12-08T13:13:55.274Z" }, - { url = "https://files.pythonhosted.org/packages/aa/ac/fb03b469d20e9c9a81093575003f959cf91a4a517b783aab090e4538764b/coverage-7.13.0-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:a2bdb3babb74079f021696cb46b8bb5f5661165c385d3a238712b031a12355be", size = 253559, upload-time = "2025-12-08T13:13:57.161Z" }, - { url = "https://files.pythonhosted.org/packages/29/62/14afa9e792383c66cc0a3b872a06ded6e4ed1079c7d35de274f11d27064e/coverage-7.13.0-cp314-cp314-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:7464663eaca6adba4175f6c19354feea61ebbdd735563a03d1e472c7072d27bb", size = 249724, upload-time = "2025-12-08T13:13:58.692Z" }, - { url = "https://files.pythonhosted.org/packages/31/b7/333f3dab2939070613696ab3ee91738950f0467778c6e5a5052e840646b7/coverage-7.13.0-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:8069e831f205d2ff1f3d355e82f511eb7c5522d7d413f5db5756b772ec8697f8", size = 251582, upload-time = "2025-12-08T13:14:00.642Z" }, - { url = "https://files.pythonhosted.org/packages/81/cb/69162bda9381f39b2287265d7e29ee770f7c27c19f470164350a38318764/coverage-7.13.0-cp314-cp314-musllinux_1_2_i686.whl", hash = "sha256:6fb2d5d272341565f08e962cce14cdf843a08ac43bd621783527adb06b089c4b", size = 249538, upload-time = "2025-12-08T13:14:02.556Z" }, - { url = "https://files.pythonhosted.org/packages/e0/76/350387b56a30f4970abe32b90b2a434f87d29f8b7d4ae40d2e8a85aacfb3/coverage-7.13.0-cp314-cp314-musllinux_1_2_riscv64.whl", hash = "sha256:5e70f92ef89bac1ac8a99b3324923b4749f008fdbd7aa9cb35e01d7a284a04f9", size = 249349, upload-time = "2025-12-08T13:14:04.015Z" }, - { url = "https://files.pythonhosted.org/packages/86/0d/7f6c42b8d59f4c7e43ea3059f573c0dcfed98ba46eb43c68c69e52ae095c/coverage-7.13.0-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:4b5de7d4583e60d5fd246dd57fcd3a8aa23c6e118a8c72b38adf666ba8e7e927", size = 251011, upload-time = "2025-12-08T13:14:05.505Z" }, - { url = "https://files.pythonhosted.org/packages/d7/f1/4bb2dff379721bb0b5c649d5c5eaf438462cad824acf32eb1b7ca0c7078e/coverage-7.13.0-cp314-cp314-win32.whl", hash = "sha256:a6c6e16b663be828a8f0b6c5027d36471d4a9f90d28444aa4ced4d48d7d6ae8f", size = 221091, upload-time = "2025-12-08T13:14:07.127Z" }, - { url = "https://files.pythonhosted.org/packages/ba/44/c239da52f373ce379c194b0ee3bcc121020e397242b85f99e0afc8615066/coverage-7.13.0-cp314-cp314-win_amd64.whl", hash = "sha256:0900872f2fdb3ee5646b557918d02279dc3af3dfb39029ac4e945458b13f73bc", size = 221904, upload-time = "2025-12-08T13:14:08.542Z" }, - { url = "https://files.pythonhosted.org/packages/89/1f/b9f04016d2a29c2e4a0307baefefad1a4ec5724946a2b3e482690486cade/coverage-7.13.0-cp314-cp314-win_arm64.whl", hash = "sha256:3a10260e6a152e5f03f26db4a407c4c62d3830b9af9b7c0450b183615f05d43b", size = 220480, upload-time = "2025-12-08T13:14:10.958Z" }, - { url = "https://files.pythonhosted.org/packages/16/d4/364a1439766c8e8647860584171c36010ca3226e6e45b1753b1b249c5161/coverage-7.13.0-cp314-cp314t-macosx_10_15_x86_64.whl", hash = "sha256:9097818b6cc1cfb5f174e3263eba4a62a17683bcfe5c4b5d07f4c97fa51fbf28", size = 219074, upload-time = "2025-12-08T13:14:13.345Z" }, - { url = "https://files.pythonhosted.org/packages/ce/f4/71ba8be63351e099911051b2089662c03d5671437a0ec2171823c8e03bec/coverage-7.13.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:0018f73dfb4301a89292c73be6ba5f58722ff79f51593352759c1790ded1cabe", size = 219342, upload-time = "2025-12-08T13:14:15.02Z" }, - { url = "https://files.pythonhosted.org/packages/5e/25/127d8ed03d7711a387d96f132589057213e3aef7475afdaa303412463f22/coverage-7.13.0-cp314-cp314t-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:166ad2a22ee770f5656e1257703139d3533b4a0b6909af67c6b4a3adc1c98657", size = 260713, upload-time = "2025-12-08T13:14:16.907Z" }, - { url = "https://files.pythonhosted.org/packages/fd/db/559fbb6def07d25b2243663b46ba9eb5a3c6586c0c6f4e62980a68f0ee1c/coverage-7.13.0-cp314-cp314t-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:f6aaef16d65d1787280943f1c8718dc32e9cf141014e4634d64446702d26e0ff", size = 262825, upload-time = "2025-12-08T13:14:18.68Z" }, - { url = "https://files.pythonhosted.org/packages/37/99/6ee5bf7eff884766edb43bd8736b5e1c5144d0fe47498c3779326fe75a35/coverage-7.13.0-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:e999e2dcc094002d6e2c7bbc1fb85b58ba4f465a760a8014d97619330cdbbbf3", size = 265233, upload-time = "2025-12-08T13:14:20.55Z" }, - { url = "https://files.pythonhosted.org/packages/d8/90/92f18fe0356ea69e1f98f688ed80cec39f44e9f09a1f26a1bbf017cc67f2/coverage-7.13.0-cp314-cp314t-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:00c3d22cf6fb1cf3bf662aaaa4e563be8243a5ed2630339069799835a9cc7f9b", size = 259779, upload-time = "2025-12-08T13:14:22.367Z" }, - { url = "https://files.pythonhosted.org/packages/90/5d/b312a8b45b37a42ea7d27d7d3ff98ade3a6c892dd48d1d503e773503373f/coverage-7.13.0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:22ccfe8d9bb0d6134892cbe1262493a8c70d736b9df930f3f3afae0fe3ac924d", size = 262700, upload-time = "2025-12-08T13:14:24.309Z" }, - { url = "https://files.pythonhosted.org/packages/63/f8/b1d0de5c39351eb71c366f872376d09386640840a2e09b0d03973d791e20/coverage-7.13.0-cp314-cp314t-musllinux_1_2_i686.whl", hash = "sha256:9372dff5ea15930fea0445eaf37bbbafbc771a49e70c0aeed8b4e2c2614cc00e", size = 260302, upload-time = "2025-12-08T13:14:26.068Z" }, - { url = "https://files.pythonhosted.org/packages/aa/7c/d42f4435bc40c55558b3109a39e2d456cddcec37434f62a1f1230991667a/coverage-7.13.0-cp314-cp314t-musllinux_1_2_riscv64.whl", hash = "sha256:69ac2c492918c2461bc6ace42d0479638e60719f2a4ef3f0815fa2df88e9f940", size = 259136, upload-time = "2025-12-08T13:14:27.604Z" }, - { url = "https://files.pythonhosted.org/packages/b8/d3/23413241dc04d47cfe19b9a65b32a2edd67ecd0b817400c2843ebc58c847/coverage-7.13.0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:739c6c051a7540608d097b8e13c76cfa85263ced467168dc6b477bae3df7d0e2", size = 261467, upload-time = "2025-12-08T13:14:29.09Z" }, - { url = "https://files.pythonhosted.org/packages/13/e6/6e063174500eee216b96272c0d1847bf215926786f85c2bd024cf4d02d2f/coverage-7.13.0-cp314-cp314t-win32.whl", hash = "sha256:fe81055d8c6c9de76d60c94ddea73c290b416e061d40d542b24a5871bad498b7", size = 221875, upload-time = "2025-12-08T13:14:31.106Z" }, - { url = "https://files.pythonhosted.org/packages/3b/46/f4fb293e4cbe3620e3ac2a3e8fd566ed33affb5861a9b20e3dd6c1896cbc/coverage-7.13.0-cp314-cp314t-win_amd64.whl", hash = "sha256:445badb539005283825959ac9fa4a28f712c214b65af3a2c464f1adc90f5fcbc", size = 222982, upload-time = "2025-12-08T13:14:33.1Z" }, - { url = "https://files.pythonhosted.org/packages/68/62/5b3b9018215ed9733fbd1ae3b2ed75c5de62c3b55377a52cae732e1b7805/coverage-7.13.0-cp314-cp314t-win_arm64.whl", hash = "sha256:de7f6748b890708578fc4b7bb967d810aeb6fcc9bff4bb77dbca77dab2f9df6a", size = 221016, upload-time = "2025-12-08T13:14:34.601Z" }, - { url = "https://files.pythonhosted.org/packages/8d/4c/1968f32fb9a2604645827e11ff84a31e59d532e01995f904723b4f5328b3/coverage-7.13.0-py3-none-any.whl", hash = "sha256:850d2998f380b1e266459ca5b47bc9e7daf9af1d070f66317972f382d46f1904", size = 210068, upload-time = "2025-12-08T13:14:36.236Z" }, +version = "7.13.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/23/f9/e92df5e07f3fc8d4c7f9a0f146ef75446bf870351cd37b788cf5897f8079/coverage-7.13.1.tar.gz", hash = "sha256:b7593fe7eb5feaa3fbb461ac79aac9f9fc0387a5ca8080b0c6fe2ca27b091afd", size = 825862, upload-time = "2025-12-28T15:42:56.969Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/2d/9a/3742e58fd04b233df95c012ee9f3dfe04708a5e1d32613bd2d47d4e1be0d/coverage-7.13.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:e1fa280b3ad78eea5be86f94f461c04943d942697e0dac889fa18fff8f5f9147", size = 218633, upload-time = "2025-12-28T15:40:10.165Z" }, + { url = "https://files.pythonhosted.org/packages/7e/45/7e6bdc94d89cd7c8017ce735cf50478ddfe765d4fbf0c24d71d30ea33d7a/coverage-7.13.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c3d8c679607220979434f494b139dfb00131ebf70bb406553d69c1ff01a5c33d", size = 219147, upload-time = "2025-12-28T15:40:12.069Z" }, + { url = "https://files.pythonhosted.org/packages/f7/38/0d6a258625fd7f10773fe94097dc16937a5f0e3e0cdf3adef67d3ac6baef/coverage-7.13.1-cp310-cp310-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:339dc63b3eba969067b00f41f15ad161bf2946613156fb131266d8debc8e44d0", size = 245894, upload-time = "2025-12-28T15:40:13.556Z" }, + { url = "https://files.pythonhosted.org/packages/27/58/409d15ea487986994cbd4d06376e9860e9b157cfbfd402b1236770ab8dd2/coverage-7.13.1-cp310-cp310-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:db622b999ffe49cb891f2fff3b340cdc2f9797d01a0a202a0973ba2562501d90", size = 247721, upload-time = "2025-12-28T15:40:15.37Z" }, + { url = "https://files.pythonhosted.org/packages/da/bf/6e8056a83fd7a96c93341f1ffe10df636dd89f26d5e7b9ca511ce3bcf0df/coverage-7.13.1-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:d1443ba9acbb593fa7c1c29e011d7c9761545fe35e7652e85ce7f51a16f7e08d", size = 249585, upload-time = "2025-12-28T15:40:17.226Z" }, + { url = "https://files.pythonhosted.org/packages/f4/15/e1daff723f9f5959acb63cbe35b11203a9df77ee4b95b45fffd38b318390/coverage-7.13.1-cp310-cp310-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:c832ec92c4499ac463186af72f9ed4d8daec15499b16f0a879b0d1c8e5cf4a3b", size = 246597, upload-time = "2025-12-28T15:40:19.028Z" }, + { url = "https://files.pythonhosted.org/packages/74/a6/1efd31c5433743a6ddbc9d37ac30c196bb07c7eab3d74fbb99b924c93174/coverage-7.13.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:562ec27dfa3f311e0db1ba243ec6e5f6ab96b1edfcfc6cf86f28038bc4961ce6", size = 247626, upload-time = "2025-12-28T15:40:20.846Z" }, + { url = "https://files.pythonhosted.org/packages/6d/9f/1609267dd3e749f57fdd66ca6752567d1c13b58a20a809dc409b263d0b5f/coverage-7.13.1-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:4de84e71173d4dada2897e5a0e1b7877e5eefbfe0d6a44edee6ce31d9b8ec09e", size = 245629, upload-time = "2025-12-28T15:40:22.397Z" }, + { url = "https://files.pythonhosted.org/packages/e2/f6/6815a220d5ec2466383d7cc36131b9fa6ecbe95c50ec52a631ba733f306a/coverage-7.13.1-cp310-cp310-musllinux_1_2_riscv64.whl", hash = "sha256:a5a68357f686f8c4d527a2dc04f52e669c2fc1cbde38f6f7eb6a0e58cbd17cae", size = 245901, upload-time = "2025-12-28T15:40:23.836Z" }, + { url = "https://files.pythonhosted.org/packages/ac/58/40576554cd12e0872faf6d2c0eb3bc85f71d78427946ddd19ad65201e2c0/coverage-7.13.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:77cc258aeb29a3417062758975521eae60af6f79e930d6993555eeac6a8eac29", size = 246505, upload-time = "2025-12-28T15:40:25.421Z" }, + { url = "https://files.pythonhosted.org/packages/3b/77/9233a90253fba576b0eee81707b5781d0e21d97478e5377b226c5b096c0f/coverage-7.13.1-cp310-cp310-win32.whl", hash = "sha256:bb4f8c3c9a9f34423dba193f241f617b08ffc63e27f67159f60ae6baf2dcfe0f", size = 221257, upload-time = "2025-12-28T15:40:27.217Z" }, + { url = "https://files.pythonhosted.org/packages/e0/43/e842ff30c1a0a623ec80db89befb84a3a7aad7bfe44a6ea77d5a3e61fedd/coverage-7.13.1-cp310-cp310-win_amd64.whl", hash = "sha256:c8e2706ceb622bc63bac98ebb10ef5da80ed70fbd8a7999a5076de3afaef0fb1", size = 222191, upload-time = "2025-12-28T15:40:28.916Z" }, + { url = "https://files.pythonhosted.org/packages/b4/9b/77baf488516e9ced25fc215a6f75d803493fc3f6a1a1227ac35697910c2a/coverage-7.13.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:1a55d509a1dc5a5b708b5dad3b5334e07a16ad4c2185e27b40e4dba796ab7f88", size = 218755, upload-time = "2025-12-28T15:40:30.812Z" }, + { url = "https://files.pythonhosted.org/packages/d7/cd/7ab01154e6eb79ee2fab76bf4d89e94c6648116557307ee4ebbb85e5c1bf/coverage-7.13.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:4d010d080c4888371033baab27e47c9df7d6fb28d0b7b7adf85a4a49be9298b3", size = 219257, upload-time = "2025-12-28T15:40:32.333Z" }, + { url = "https://files.pythonhosted.org/packages/01/d5/b11ef7863ffbbdb509da0023fad1e9eda1c0eaea61a6d2ea5b17d4ac706e/coverage-7.13.1-cp311-cp311-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:d938b4a840fb1523b9dfbbb454f652967f18e197569c32266d4d13f37244c3d9", size = 249657, upload-time = "2025-12-28T15:40:34.1Z" }, + { url = "https://files.pythonhosted.org/packages/f7/7c/347280982982383621d29b8c544cf497ae07ac41e44b1ca4903024131f55/coverage-7.13.1-cp311-cp311-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:bf100a3288f9bb7f919b87eb84f87101e197535b9bd0e2c2b5b3179633324fee", size = 251581, upload-time = "2025-12-28T15:40:36.131Z" }, + { url = "https://files.pythonhosted.org/packages/82/f6/ebcfed11036ade4c0d75fa4453a6282bdd225bc073862766eec184a4c643/coverage-7.13.1-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ef6688db9bf91ba111ae734ba6ef1a063304a881749726e0d3575f5c10a9facf", size = 253691, upload-time = "2025-12-28T15:40:37.626Z" }, + { url = "https://files.pythonhosted.org/packages/02/92/af8f5582787f5d1a8b130b2dcba785fa5e9a7a8e121a0bb2220a6fdbdb8a/coverage-7.13.1-cp311-cp311-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:0b609fc9cdbd1f02e51f67f51e5aee60a841ef58a68d00d5ee2c0faf357481a3", size = 249799, upload-time = "2025-12-28T15:40:39.47Z" }, + { url = "https://files.pythonhosted.org/packages/24/aa/0e39a2a3b16eebf7f193863323edbff38b6daba711abaaf807d4290cf61a/coverage-7.13.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:c43257717611ff5e9a1d79dce8e47566235ebda63328718d9b65dd640bc832ef", size = 251389, upload-time = "2025-12-28T15:40:40.954Z" }, + { url = "https://files.pythonhosted.org/packages/73/46/7f0c13111154dc5b978900c0ccee2e2ca239b910890e674a77f1363d483e/coverage-7.13.1-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:e09fbecc007f7b6afdfb3b07ce5bd9f8494b6856dd4f577d26c66c391b829851", size = 249450, upload-time = "2025-12-28T15:40:42.489Z" }, + { url = "https://files.pythonhosted.org/packages/ac/ca/e80da6769e8b669ec3695598c58eef7ad98b0e26e66333996aee6316db23/coverage-7.13.1-cp311-cp311-musllinux_1_2_riscv64.whl", hash = "sha256:a03a4f3a19a189919c7055098790285cc5c5b0b3976f8d227aea39dbf9f8bfdb", size = 249170, upload-time = "2025-12-28T15:40:44.279Z" }, + { url = "https://files.pythonhosted.org/packages/af/18/9e29baabdec1a8644157f572541079b4658199cfd372a578f84228e860de/coverage-7.13.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:3820778ea1387c2b6a818caec01c63adc5b3750211af6447e8dcfb9b6f08dbba", size = 250081, upload-time = "2025-12-28T15:40:45.748Z" }, + { url = "https://files.pythonhosted.org/packages/00/f8/c3021625a71c3b2f516464d322e41636aea381018319050a8114105872ee/coverage-7.13.1-cp311-cp311-win32.whl", hash = "sha256:ff10896fa55167371960c5908150b434b71c876dfab97b69478f22c8b445ea19", size = 221281, upload-time = "2025-12-28T15:40:47.232Z" }, + { url = "https://files.pythonhosted.org/packages/27/56/c216625f453df6e0559ed666d246fcbaaa93f3aa99eaa5080cea1229aa3d/coverage-7.13.1-cp311-cp311-win_amd64.whl", hash = "sha256:a998cc0aeeea4c6d5622a3754da5a493055d2d95186bad877b0a34ea6e6dbe0a", size = 222215, upload-time = "2025-12-28T15:40:49.19Z" }, + { url = "https://files.pythonhosted.org/packages/5c/9a/be342e76f6e531cae6406dc46af0d350586f24d9b67fdfa6daee02df71af/coverage-7.13.1-cp311-cp311-win_arm64.whl", hash = "sha256:fea07c1a39a22614acb762e3fbbb4011f65eedafcb2948feeef641ac78b4ee5c", size = 220886, upload-time = "2025-12-28T15:40:51.067Z" }, + { url = "https://files.pythonhosted.org/packages/ce/8a/87af46cccdfa78f53db747b09f5f9a21d5fc38d796834adac09b30a8ce74/coverage-7.13.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:6f34591000f06e62085b1865c9bc5f7858df748834662a51edadfd2c3bfe0dd3", size = 218927, upload-time = "2025-12-28T15:40:52.814Z" }, + { url = "https://files.pythonhosted.org/packages/82/a8/6e22fdc67242a4a5a153f9438d05944553121c8f4ba70cb072af4c41362e/coverage-7.13.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:b67e47c5595b9224599016e333f5ec25392597a89d5744658f837d204e16c63e", size = 219288, upload-time = "2025-12-28T15:40:54.262Z" }, + { url = "https://files.pythonhosted.org/packages/d0/0a/853a76e03b0f7c4375e2ca025df45c918beb367f3e20a0a8e91967f6e96c/coverage-7.13.1-cp312-cp312-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:3e7b8bd70c48ffb28461ebe092c2345536fb18bbbf19d287c8913699735f505c", size = 250786, upload-time = "2025-12-28T15:40:56.059Z" }, + { url = "https://files.pythonhosted.org/packages/ea/b4/694159c15c52b9f7ec7adf49d50e5f8ee71d3e9ef38adb4445d13dd56c20/coverage-7.13.1-cp312-cp312-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:c223d078112e90dc0e5c4e35b98b9584164bea9fbbd221c0b21c5241f6d51b62", size = 253543, upload-time = "2025-12-28T15:40:57.585Z" }, + { url = "https://files.pythonhosted.org/packages/96/b2/7f1f0437a5c855f87e17cf5d0dc35920b6440ff2b58b1ba9788c059c26c8/coverage-7.13.1-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:794f7c05af0763b1bbd1b9e6eff0e52ad068be3b12cd96c87de037b01390c968", size = 254635, upload-time = "2025-12-28T15:40:59.443Z" }, + { url = "https://files.pythonhosted.org/packages/e9/d1/73c3fdb8d7d3bddd9473c9c6a2e0682f09fc3dfbcb9c3f36412a7368bcab/coverage-7.13.1-cp312-cp312-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:0642eae483cc8c2902e4af7298bf886d605e80f26382124cddc3967c2a3df09e", size = 251202, upload-time = "2025-12-28T15:41:01.328Z" }, + { url = "https://files.pythonhosted.org/packages/66/3c/f0edf75dcc152f145d5598329e864bbbe04ab78660fe3e8e395f9fff010f/coverage-7.13.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:9f5e772ed5fef25b3de9f2008fe67b92d46831bd2bc5bdc5dd6bfd06b83b316f", size = 252566, upload-time = "2025-12-28T15:41:03.319Z" }, + { url = "https://files.pythonhosted.org/packages/17/b3/e64206d3c5f7dcbceafd14941345a754d3dbc78a823a6ed526e23b9cdaab/coverage-7.13.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:45980ea19277dc0a579e432aef6a504fe098ef3a9032ead15e446eb0f1191aee", size = 250711, upload-time = "2025-12-28T15:41:06.411Z" }, + { url = "https://files.pythonhosted.org/packages/dc/ad/28a3eb970a8ef5b479ee7f0c484a19c34e277479a5b70269dc652b730733/coverage-7.13.1-cp312-cp312-musllinux_1_2_riscv64.whl", hash = "sha256:e4f18eca6028ffa62adbd185a8f1e1dd242f2e68164dba5c2b74a5204850b4cf", size = 250278, upload-time = "2025-12-28T15:41:08.285Z" }, + { url = "https://files.pythonhosted.org/packages/54/e3/c8f0f1a93133e3e1291ca76cbb63565bd4b5c5df63b141f539d747fff348/coverage-7.13.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:f8dca5590fec7a89ed6826fce625595279e586ead52e9e958d3237821fbc750c", size = 252154, upload-time = "2025-12-28T15:41:09.969Z" }, + { url = "https://files.pythonhosted.org/packages/d0/bf/9939c5d6859c380e405b19e736321f1c7d402728792f4c752ad1adcce005/coverage-7.13.1-cp312-cp312-win32.whl", hash = "sha256:ff86d4e85188bba72cfb876df3e11fa243439882c55957184af44a35bd5880b7", size = 221487, upload-time = "2025-12-28T15:41:11.468Z" }, + { url = "https://files.pythonhosted.org/packages/fa/dc/7282856a407c621c2aad74021680a01b23010bb8ebf427cf5eacda2e876f/coverage-7.13.1-cp312-cp312-win_amd64.whl", hash = "sha256:16cc1da46c04fb0fb128b4dc430b78fa2aba8a6c0c9f8eb391fd5103409a6ac6", size = 222299, upload-time = "2025-12-28T15:41:13.386Z" }, + { url = "https://files.pythonhosted.org/packages/10/79/176a11203412c350b3e9578620013af35bcdb79b651eb976f4a4b32044fa/coverage-7.13.1-cp312-cp312-win_arm64.whl", hash = "sha256:8d9bc218650022a768f3775dd7fdac1886437325d8d295d923ebcfef4892ad5c", size = 220941, upload-time = "2025-12-28T15:41:14.975Z" }, + { url = "https://files.pythonhosted.org/packages/a3/a4/e98e689347a1ff1a7f67932ab535cef82eb5e78f32a9e4132e114bbb3a0a/coverage-7.13.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:cb237bfd0ef4d5eb6a19e29f9e528ac67ac3be932ea6b44fb6cc09b9f3ecff78", size = 218951, upload-time = "2025-12-28T15:41:16.653Z" }, + { url = "https://files.pythonhosted.org/packages/32/33/7cbfe2bdc6e2f03d6b240d23dc45fdaf3fd270aaf2d640be77b7f16989ab/coverage-7.13.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:1dcb645d7e34dcbcc96cd7c132b1fc55c39263ca62eb961c064eb3928997363b", size = 219325, upload-time = "2025-12-28T15:41:18.609Z" }, + { url = "https://files.pythonhosted.org/packages/59/f6/efdabdb4929487baeb7cb2a9f7dac457d9356f6ad1b255be283d58b16316/coverage-7.13.1-cp313-cp313-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:3d42df8201e00384736f0df9be2ced39324c3907607d17d50d50116c989d84cd", size = 250309, upload-time = "2025-12-28T15:41:20.629Z" }, + { url = "https://files.pythonhosted.org/packages/12/da/91a52516e9d5aea87d32d1523f9cdcf7a35a3b298e6be05d6509ba3cfab2/coverage-7.13.1-cp313-cp313-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:fa3edde1aa8807de1d05934982416cb3ec46d1d4d91e280bcce7cca01c507992", size = 252907, upload-time = "2025-12-28T15:41:22.257Z" }, + { url = "https://files.pythonhosted.org/packages/75/38/f1ea837e3dc1231e086db1638947e00d264e7e8c41aa8ecacf6e1e0c05f4/coverage-7.13.1-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:9edd0e01a343766add6817bc448408858ba6b489039eaaa2018474e4001651a4", size = 254148, upload-time = "2025-12-28T15:41:23.87Z" }, + { url = "https://files.pythonhosted.org/packages/7f/43/f4f16b881aaa34954ba446318dea6b9ed5405dd725dd8daac2358eda869a/coverage-7.13.1-cp313-cp313-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:985b7836931d033570b94c94713c6dba5f9d3ff26045f72c3e5dbc5fe3361e5a", size = 250515, upload-time = "2025-12-28T15:41:25.437Z" }, + { url = "https://files.pythonhosted.org/packages/84/34/8cba7f00078bd468ea914134e0144263194ce849ec3baad187ffb6203d1c/coverage-7.13.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:ffed1e4980889765c84a5d1a566159e363b71d6b6fbaf0bebc9d3c30bc016766", size = 252292, upload-time = "2025-12-28T15:41:28.459Z" }, + { url = "https://files.pythonhosted.org/packages/8c/a4/cffac66c7652d84ee4ac52d3ccb94c015687d3b513f9db04bfcac2ac800d/coverage-7.13.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:8842af7f175078456b8b17f1b73a0d16a65dcbdc653ecefeb00a56b3c8c298c4", size = 250242, upload-time = "2025-12-28T15:41:30.02Z" }, + { url = "https://files.pythonhosted.org/packages/f4/78/9a64d462263dde416f3c0067efade7b52b52796f489b1037a95b0dc389c9/coverage-7.13.1-cp313-cp313-musllinux_1_2_riscv64.whl", hash = "sha256:ccd7a6fca48ca9c131d9b0a2972a581e28b13416fc313fb98b6d24a03ce9a398", size = 250068, upload-time = "2025-12-28T15:41:32.007Z" }, + { url = "https://files.pythonhosted.org/packages/69/c8/a8994f5fece06db7c4a97c8fc1973684e178599b42e66280dded0524ef00/coverage-7.13.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:0403f647055de2609be776965108447deb8e384fe4a553c119e3ff6bfbab4784", size = 251846, upload-time = "2025-12-28T15:41:33.946Z" }, + { url = "https://files.pythonhosted.org/packages/cc/f7/91fa73c4b80305c86598a2d4e54ba22df6bf7d0d97500944af7ef155d9f7/coverage-7.13.1-cp313-cp313-win32.whl", hash = "sha256:549d195116a1ba1e1ae2f5ca143f9777800f6636eab917d4f02b5310d6d73461", size = 221512, upload-time = "2025-12-28T15:41:35.519Z" }, + { url = "https://files.pythonhosted.org/packages/45/0b/0768b4231d5a044da8f75e097a8714ae1041246bb765d6b5563bab456735/coverage-7.13.1-cp313-cp313-win_amd64.whl", hash = "sha256:5899d28b5276f536fcf840b18b61a9fce23cc3aec1d114c44c07fe94ebeaa500", size = 222321, upload-time = "2025-12-28T15:41:37.371Z" }, + { url = "https://files.pythonhosted.org/packages/9b/b8/bdcb7253b7e85157282450262008f1366aa04663f3e3e4c30436f596c3e2/coverage-7.13.1-cp313-cp313-win_arm64.whl", hash = "sha256:868a2fae76dfb06e87291bcbd4dcbcc778a8500510b618d50496e520bd94d9b9", size = 220949, upload-time = "2025-12-28T15:41:39.553Z" }, + { url = "https://files.pythonhosted.org/packages/70/52/f2be52cc445ff75ea8397948c96c1b4ee14f7f9086ea62fc929c5ae7b717/coverage-7.13.1-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:67170979de0dacac3f3097d02b0ad188d8edcea44ccc44aaa0550af49150c7dc", size = 219643, upload-time = "2025-12-28T15:41:41.567Z" }, + { url = "https://files.pythonhosted.org/packages/47/79/c85e378eaa239e2edec0c5523f71542c7793fe3340954eafb0bc3904d32d/coverage-7.13.1-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:f80e2bb21bfab56ed7405c2d79d34b5dc0bc96c2c1d2a067b643a09fb756c43a", size = 219997, upload-time = "2025-12-28T15:41:43.418Z" }, + { url = "https://files.pythonhosted.org/packages/fe/9b/b1ade8bfb653c0bbce2d6d6e90cc6c254cbb99b7248531cc76253cb4da6d/coverage-7.13.1-cp313-cp313t-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:f83351e0f7dcdb14d7326c3d8d8c4e915fa685cbfdc6281f9470d97a04e9dfe4", size = 261296, upload-time = "2025-12-28T15:41:45.207Z" }, + { url = "https://files.pythonhosted.org/packages/1f/af/ebf91e3e1a2473d523e87e87fd8581e0aa08741b96265730e2d79ce78d8d/coverage-7.13.1-cp313-cp313t-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:bb3f6562e89bad0110afbe64e485aac2462efdce6232cdec7862a095dc3412f6", size = 263363, upload-time = "2025-12-28T15:41:47.163Z" }, + { url = "https://files.pythonhosted.org/packages/c4/8b/fb2423526d446596624ac7fde12ea4262e66f86f5120114c3cfd0bb2befa/coverage-7.13.1-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:77545b5dcda13b70f872c3b5974ac64c21d05e65b1590b441c8560115dc3a0d1", size = 265783, upload-time = "2025-12-28T15:41:49.03Z" }, + { url = "https://files.pythonhosted.org/packages/9b/26/ef2adb1e22674913b89f0fe7490ecadcef4a71fa96f5ced90c60ec358789/coverage-7.13.1-cp313-cp313t-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:a4d240d260a1aed814790bbe1f10a5ff31ce6c21bc78f0da4a1e8268d6c80dbd", size = 260508, upload-time = "2025-12-28T15:41:51.035Z" }, + { url = "https://files.pythonhosted.org/packages/ce/7d/f0f59b3404caf662e7b5346247883887687c074ce67ba453ea08c612b1d5/coverage-7.13.1-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:d2287ac9360dec3837bfdad969963a5d073a09a85d898bd86bea82aa8876ef3c", size = 263357, upload-time = "2025-12-28T15:41:52.631Z" }, + { url = "https://files.pythonhosted.org/packages/1a/b1/29896492b0b1a047604d35d6fa804f12818fa30cdad660763a5f3159e158/coverage-7.13.1-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:0d2c11f3ea4db66b5cbded23b20185c35066892c67d80ec4be4bab257b9ad1e0", size = 260978, upload-time = "2025-12-28T15:41:54.589Z" }, + { url = "https://files.pythonhosted.org/packages/48/f2/971de1238a62e6f0a4128d37adadc8bb882ee96afbe03ff1570291754629/coverage-7.13.1-cp313-cp313t-musllinux_1_2_riscv64.whl", hash = "sha256:3fc6a169517ca0d7ca6846c3c5392ef2b9e38896f61d615cb75b9e7134d4ee1e", size = 259877, upload-time = "2025-12-28T15:41:56.263Z" }, + { url = "https://files.pythonhosted.org/packages/6a/fc/0474efcbb590ff8628830e9aaec5f1831594874360e3251f1fdec31d07a3/coverage-7.13.1-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:d10a2ed46386e850bb3de503a54f9fe8192e5917fcbb143bfef653a9355e9a53", size = 262069, upload-time = "2025-12-28T15:41:58.093Z" }, + { url = "https://files.pythonhosted.org/packages/88/4f/3c159b7953db37a7b44c0eab8a95c37d1aa4257c47b4602c04022d5cb975/coverage-7.13.1-cp313-cp313t-win32.whl", hash = "sha256:75a6f4aa904301dab8022397a22c0039edc1f51e90b83dbd4464b8a38dc87842", size = 222184, upload-time = "2025-12-28T15:41:59.763Z" }, + { url = "https://files.pythonhosted.org/packages/58/a5/6b57d28f81417f9335774f20679d9d13b9a8fb90cd6160957aa3b54a2379/coverage-7.13.1-cp313-cp313t-win_amd64.whl", hash = "sha256:309ef5706e95e62578cda256b97f5e097916a2c26247c287bbe74794e7150df2", size = 223250, upload-time = "2025-12-28T15:42:01.52Z" }, + { url = "https://files.pythonhosted.org/packages/81/7c/160796f3b035acfbb58be80e02e484548595aa67e16a6345e7910ace0a38/coverage-7.13.1-cp313-cp313t-win_arm64.whl", hash = "sha256:92f980729e79b5d16d221038dbf2e8f9a9136afa072f9d5d6ed4cb984b126a09", size = 221521, upload-time = "2025-12-28T15:42:03.275Z" }, + { url = "https://files.pythonhosted.org/packages/aa/8e/ba0e597560c6563fc0adb902fda6526df5d4aa73bb10adf0574d03bd2206/coverage-7.13.1-cp314-cp314-macosx_10_15_x86_64.whl", hash = "sha256:97ab3647280d458a1f9adb85244e81587505a43c0c7cff851f5116cd2814b894", size = 218996, upload-time = "2025-12-28T15:42:04.978Z" }, + { url = "https://files.pythonhosted.org/packages/6b/8e/764c6e116f4221dc7aa26c4061181ff92edb9c799adae6433d18eeba7a14/coverage-7.13.1-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:8f572d989142e0908e6acf57ad1b9b86989ff057c006d13b76c146ec6a20216a", size = 219326, upload-time = "2025-12-28T15:42:06.691Z" }, + { url = "https://files.pythonhosted.org/packages/4f/a6/6130dc6d8da28cdcbb0f2bf8865aeca9b157622f7c0031e48c6cf9a0e591/coverage-7.13.1-cp314-cp314-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:d72140ccf8a147e94274024ff6fd8fb7811354cf7ef88b1f0a988ebaa5bc774f", size = 250374, upload-time = "2025-12-28T15:42:08.786Z" }, + { url = "https://files.pythonhosted.org/packages/82/2b/783ded568f7cd6b677762f780ad338bf4b4750205860c17c25f7c708995e/coverage-7.13.1-cp314-cp314-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:d3c9f051b028810f5a87c88e5d6e9af3c0ff32ef62763bf15d29f740453ca909", size = 252882, upload-time = "2025-12-28T15:42:10.515Z" }, + { url = "https://files.pythonhosted.org/packages/cd/b2/9808766d082e6a4d59eb0cc881a57fc1600eb2c5882813eefff8254f71b5/coverage-7.13.1-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:f398ba4df52d30b1763f62eed9de5620dcde96e6f491f4c62686736b155aa6e4", size = 254218, upload-time = "2025-12-28T15:42:12.208Z" }, + { url = "https://files.pythonhosted.org/packages/44/ea/52a985bb447c871cb4d2e376e401116520991b597c85afdde1ea9ef54f2c/coverage-7.13.1-cp314-cp314-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:132718176cc723026d201e347f800cd1a9e4b62ccd3f82476950834dad501c75", size = 250391, upload-time = "2025-12-28T15:42:14.21Z" }, + { url = "https://files.pythonhosted.org/packages/7f/1d/125b36cc12310718873cfc8209ecfbc1008f14f4f5fa0662aa608e579353/coverage-7.13.1-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:9e549d642426e3579b3f4b92d0431543b012dcb6e825c91619d4e93b7363c3f9", size = 252239, upload-time = "2025-12-28T15:42:16.292Z" }, + { url = "https://files.pythonhosted.org/packages/6a/16/10c1c164950cade470107f9f14bbac8485f8fb8515f515fca53d337e4a7f/coverage-7.13.1-cp314-cp314-musllinux_1_2_i686.whl", hash = "sha256:90480b2134999301eea795b3a9dbf606c6fbab1b489150c501da84a959442465", size = 250196, upload-time = "2025-12-28T15:42:18.54Z" }, + { url = "https://files.pythonhosted.org/packages/2a/c6/cd860fac08780c6fd659732f6ced1b40b79c35977c1356344e44d72ba6c4/coverage-7.13.1-cp314-cp314-musllinux_1_2_riscv64.whl", hash = "sha256:e825dbb7f84dfa24663dd75835e7257f8882629fc11f03ecf77d84a75134b864", size = 250008, upload-time = "2025-12-28T15:42:20.365Z" }, + { url = "https://files.pythonhosted.org/packages/f0/3a/a8c58d3d38f82a5711e1e0a67268362af48e1a03df27c03072ac30feefcf/coverage-7.13.1-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:623dcc6d7a7ba450bbdbeedbaa0c42b329bdae16491af2282f12a7e809be7eb9", size = 251671, upload-time = "2025-12-28T15:42:22.114Z" }, + { url = "https://files.pythonhosted.org/packages/f0/bc/fd4c1da651d037a1e3d53e8cb3f8182f4b53271ffa9a95a2e211bacc0349/coverage-7.13.1-cp314-cp314-win32.whl", hash = "sha256:6e73ebb44dca5f708dc871fe0b90cf4cff1a13f9956f747cc87b535a840386f5", size = 221777, upload-time = "2025-12-28T15:42:23.919Z" }, + { url = "https://files.pythonhosted.org/packages/4b/50/71acabdc8948464c17e90b5ffd92358579bd0910732c2a1c9537d7536aa6/coverage-7.13.1-cp314-cp314-win_amd64.whl", hash = "sha256:be753b225d159feb397bd0bf91ae86f689bad0da09d3b301478cd39b878ab31a", size = 222592, upload-time = "2025-12-28T15:42:25.619Z" }, + { url = "https://files.pythonhosted.org/packages/f7/c8/a6fb943081bb0cc926499c7907731a6dc9efc2cbdc76d738c0ab752f1a32/coverage-7.13.1-cp314-cp314-win_arm64.whl", hash = "sha256:228b90f613b25ba0019361e4ab81520b343b622fc657daf7e501c4ed6a2366c0", size = 221169, upload-time = "2025-12-28T15:42:27.629Z" }, + { url = "https://files.pythonhosted.org/packages/16/61/d5b7a0a0e0e40d62e59bc8c7aa1afbd86280d82728ba97f0673b746b78e2/coverage-7.13.1-cp314-cp314t-macosx_10_15_x86_64.whl", hash = "sha256:60cfb538fe9ef86e5b2ab0ca8fc8d62524777f6c611dcaf76dc16fbe9b8e698a", size = 219730, upload-time = "2025-12-28T15:42:29.306Z" }, + { url = "https://files.pythonhosted.org/packages/a3/2c/8881326445fd071bb49514d1ce97d18a46a980712b51fee84f9ab42845b4/coverage-7.13.1-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:57dfc8048c72ba48a8c45e188d811e5efd7e49b387effc8fb17e97936dde5bf6", size = 220001, upload-time = "2025-12-28T15:42:31.319Z" }, + { url = "https://files.pythonhosted.org/packages/b5/d7/50de63af51dfa3a7f91cc37ad8fcc1e244b734232fbc8b9ab0f3c834a5cd/coverage-7.13.1-cp314-cp314t-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:3f2f725aa3e909b3c5fdb8192490bdd8e1495e85906af74fe6e34a2a77ba0673", size = 261370, upload-time = "2025-12-28T15:42:32.992Z" }, + { url = "https://files.pythonhosted.org/packages/e1/2c/d31722f0ec918fd7453b2758312729f645978d212b410cd0f7c2aed88a94/coverage-7.13.1-cp314-cp314t-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:9ee68b21909686eeb21dfcba2c3b81fee70dcf38b140dcd5aa70680995fa3aa5", size = 263485, upload-time = "2025-12-28T15:42:34.759Z" }, + { url = "https://files.pythonhosted.org/packages/fa/7a/2c114fa5c5fc08ba0777e4aec4c97e0b4a1afcb69c75f1f54cff78b073ab/coverage-7.13.1-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:724b1b270cb13ea2e6503476e34541a0b1f62280bc997eab443f87790202033d", size = 265890, upload-time = "2025-12-28T15:42:36.517Z" }, + { url = "https://files.pythonhosted.org/packages/65/d9/f0794aa1c74ceabc780fe17f6c338456bbc4e96bd950f2e969f48ac6fb20/coverage-7.13.1-cp314-cp314t-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:916abf1ac5cf7eb16bc540a5bf75c71c43a676f5c52fcb9fe75a2bd75fb944e8", size = 260445, upload-time = "2025-12-28T15:42:38.646Z" }, + { url = "https://files.pythonhosted.org/packages/49/23/184b22a00d9bb97488863ced9454068c79e413cb23f472da6cbddc6cfc52/coverage-7.13.1-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:776483fd35b58d8afe3acbd9988d5de592ab6da2d2a865edfdbc9fdb43e7c486", size = 263357, upload-time = "2025-12-28T15:42:40.788Z" }, + { url = "https://files.pythonhosted.org/packages/7d/bd/58af54c0c9199ea4190284f389005779d7daf7bf3ce40dcd2d2b2f96da69/coverage-7.13.1-cp314-cp314t-musllinux_1_2_i686.whl", hash = "sha256:b6f3b96617e9852703f5b633ea01315ca45c77e879584f283c44127f0f1ec564", size = 260959, upload-time = "2025-12-28T15:42:42.808Z" }, + { url = "https://files.pythonhosted.org/packages/4b/2a/6839294e8f78a4891bf1df79d69c536880ba2f970d0ff09e7513d6e352e9/coverage-7.13.1-cp314-cp314t-musllinux_1_2_riscv64.whl", hash = "sha256:bd63e7b74661fed317212fab774e2a648bc4bb09b35f25474f8e3325d2945cd7", size = 259792, upload-time = "2025-12-28T15:42:44.818Z" }, + { url = "https://files.pythonhosted.org/packages/ba/c3/528674d4623283310ad676c5af7414b9850ab6d55c2300e8aa4b945ec554/coverage-7.13.1-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:933082f161bbb3e9f90d00990dc956120f608cdbcaeea15c4d897f56ef4fe416", size = 262123, upload-time = "2025-12-28T15:42:47.108Z" }, + { url = "https://files.pythonhosted.org/packages/06/c5/8c0515692fb4c73ac379d8dc09b18eaf0214ecb76ea6e62467ba7a1556ff/coverage-7.13.1-cp314-cp314t-win32.whl", hash = "sha256:18be793c4c87de2965e1c0f060f03d9e5aff66cfeae8e1dbe6e5b88056ec153f", size = 222562, upload-time = "2025-12-28T15:42:49.144Z" }, + { url = "https://files.pythonhosted.org/packages/05/0e/c0a0c4678cb30dac735811db529b321d7e1c9120b79bd728d4f4d6b010e9/coverage-7.13.1-cp314-cp314t-win_amd64.whl", hash = "sha256:0e42e0ec0cd3e0d851cb3c91f770c9301f48647cb2877cb78f74bdaa07639a79", size = 223670, upload-time = "2025-12-28T15:42:51.218Z" }, + { url = "https://files.pythonhosted.org/packages/f5/5f/b177aa0011f354abf03a8f30a85032686d290fdeed4222b27d36b4372a50/coverage-7.13.1-cp314-cp314t-win_arm64.whl", hash = "sha256:eaecf47ef10c72ece9a2a92118257da87e460e113b83cc0d2905cbbe931792b4", size = 221707, upload-time = "2025-12-28T15:42:53.034Z" }, + { url = "https://files.pythonhosted.org/packages/cc/48/d9f421cb8da5afaa1a64570d9989e00fb7955e6acddc5a12979f7666ef60/coverage-7.13.1-py3-none-any.whl", hash = "sha256:2016745cb3ba554469d02819d78958b571792bb68e31302610e898f80dd3a573", size = 210722, upload-time = "2025-12-28T15:42:54.901Z" }, ] [package.optional-dependencies] @@ -1906,7 +1942,7 @@ wheels = [ [[package]] name = "fastapi" -version = "0.125.0" +version = "0.128.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "annotated-doc", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, @@ -1914,9 +1950,9 @@ dependencies = [ { name = "starlette", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, { name = "typing-extensions", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/17/71/2df15009fb4bdd522a069d2fbca6007c6c5487fce5cb965be00fc335f1d1/fastapi-0.125.0.tar.gz", hash = "sha256:16b532691a33e2c5dee1dac32feb31dc6eb41a3dd4ff29a95f9487cb21c054c0", size = 370550, upload-time = "2025-12-17T21:41:44.15Z" } +sdist = { url = "https://files.pythonhosted.org/packages/52/08/8c8508db6c7b9aae8f7175046af41baad690771c9bcde676419965e338c7/fastapi-0.128.0.tar.gz", hash = "sha256:1cc179e1cef10a6be60ffe429f79b829dce99d8de32d7acb7e6c8dfdf7f2645a", size = 365682, upload-time = "2025-12-27T15:21:13.714Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/34/2f/ff2fcc98f500713368d8b650e1bbc4a0b3ebcdd3e050dcdaad5f5a13fd7e/fastapi-0.125.0-py3-none-any.whl", hash = "sha256:2570ec4f3aecf5cca8f0428aed2398b774fcdfee6c2116f86e80513f2f86a7a1", size = 112888, upload-time = "2025-12-17T21:41:41.286Z" }, + { url = "https://files.pythonhosted.org/packages/5c/05/5cbb59154b093548acd0f4c7c474a118eda06da25aa75c616b72d8fcd92a/fastapi-0.128.0-py3-none-any.whl", hash = "sha256:aebd93f9716ee3b4f4fcfe13ffb7cf308d99c9f3ab5622d8877441072561582d", size = 103094, upload-time = "2025-12-27T15:21:12.154Z" }, ] [[package]] @@ -1999,11 +2035,11 @@ wheels = [ [[package]] name = "filelock" -version = "3.20.1" +version = "3.20.2" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/a7/23/ce7a1126827cedeb958fc043d61745754464eb56c5937c35bbf2b8e26f34/filelock-3.20.1.tar.gz", hash = "sha256:b8360948b351b80f420878d8516519a2204b07aefcdcfd24912a5d33127f188c", size = 19476, upload-time = "2025-12-15T23:54:28.027Z" } +sdist = { url = "https://files.pythonhosted.org/packages/c1/e0/a75dbe4bca1e7d41307323dad5ea2efdd95408f74ab2de8bd7dba9b51a1a/filelock-3.20.2.tar.gz", hash = "sha256:a2241ff4ddde2a7cebddf78e39832509cb045d18ec1a09d7248d6bfc6bfbbe64", size = 19510, upload-time = "2026-01-02T15:33:32.582Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/e3/7f/a1a97644e39e7316d850784c642093c99df1290a460df4ede27659056834/filelock-3.20.1-py3-none-any.whl", hash = "sha256:15d9e9a67306188a44baa72f569d2bfd803076269365fdea0934385da4dc361a", size = 16666, upload-time = "2025-12-15T23:54:26.874Z" }, + { url = "https://files.pythonhosted.org/packages/9a/30/ab407e2ec752aa541704ed8f93c11e2a5d92c168b8a755d818b74a3c5c2d/filelock-3.20.2-py3-none-any.whl", hash = "sha256:fbba7237d6ea277175a32c54bb71ef814a8546d8601269e1bfc388de333974e8", size = 16697, upload-time = "2026-01-02T15:33:31.133Z" }, ] [[package]] @@ -2105,6 +2141,19 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/c7/4e/ce75a57ff3aebf6fc1f4e9d508b8e5810618a33d900ad6c19eb30b290b97/fonttools-4.61.1-py3-none-any.whl", hash = "sha256:17d2bf5d541add43822bcf0c43d7d847b160c9bb01d15d5007d84e2217aaa371", size = 1148996, upload-time = "2025-12-12T17:31:21.03Z" }, ] +[[package]] +name = "foundry-local-sdk" +version = "0.5.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "httpx", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, + { name = "pydantic", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, + { name = "tqdm", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, +] +wheels = [ + { url = "https://files.pythonhosted.org/packages/ed/6b/76a7fe8f9f4c52cc84eaa1cd1b66acddf993496d55d6ea587bf0d0854d1c/foundry_local_sdk-0.5.1-py3-none-any.whl", hash = "sha256:f3639a3666bc3a94410004a91671338910ac2e1b8094b1587cc4db0f4a7df07e", size = 14003, upload-time = "2025-11-21T05:39:58.099Z" }, +] + [[package]] name = "frozenlist" version = "1.8.0" @@ -2686,14 +2735,14 @@ wheels = [ [[package]] name = "importlib-metadata" -version = "8.7.0" +version = "8.7.1" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "zipp", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/76/66/650a33bd90f786193e4de4b3ad86ea60b53c89b669a5c7be931fac31cdb0/importlib_metadata-8.7.0.tar.gz", hash = "sha256:d13b81ad223b890aa16c5471f2ac3056cf76c5f10f82d6f9292f0b415f389000", size = 56641, upload-time = "2025-04-27T15:29:01.736Z" } +sdist = { url = "https://files.pythonhosted.org/packages/f3/49/3b30cad09e7771a4982d9975a8cbf64f00d4a1ececb53297f1d9a7be1b10/importlib_metadata-8.7.1.tar.gz", hash = "sha256:49fef1ae6440c182052f407c8d34a68f72efc36db9ca90dc0113398f2fdde8bb", size = 57107, upload-time = "2025-12-21T10:00:19.278Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/20/b0/36bd937216ec521246249be3bf9855081de4c5e06a0c9b4219dbeda50373/importlib_metadata-8.7.0-py3-none-any.whl", hash = "sha256:e5dd1551894c77868a30651cef00984d50e1002d06942a7101d34870c5f02afd", size = 27656, upload-time = "2025-04-27T15:29:00.214Z" }, + { url = "https://files.pythonhosted.org/packages/fa/5e/f8e9a1d23b9c20a551a8a02ea3637b4642e22c2626e3a13a9a29cdea99eb/importlib_metadata-8.7.1-py3-none-any.whl", hash = "sha256:5a1f80bf1daa489495071efbb095d75a634cf28a8bc299581244063b53176151", size = 27865, upload-time = "2025-12-21T10:00:18.329Z" }, ] [[package]] @@ -3008,7 +3057,7 @@ wheels = [ [[package]] name = "langfuse" -version = "3.11.1" +version = "3.11.2" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "backoff", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, @@ -3022,87 +3071,87 @@ dependencies = [ { name = "requests", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, { name = "wrapt", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/70/a4/f7c5919a1e7c26904dd0caa52dc90b75e616d94bece157429169ffce264a/langfuse-3.11.1.tar.gz", hash = "sha256:52bdb5bae2bb7c2add22777a0f88a1a5c96f90ec994935b773992153e57e94f8", size = 230854, upload-time = "2025-12-19T14:31:11.372Z" } +sdist = { url = "https://files.pythonhosted.org/packages/71/10/6b28f3b2c008b1f48478c4f45ceb956dfcc951910f5896b3fe44c20174db/langfuse-3.11.2.tar.gz", hash = "sha256:ab5f296a8056815b7288c7f25bc308a5e79f82a8634467b25daffdde99276e09", size = 230795, upload-time = "2025-12-23T20:42:57.177Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/3a/ff/256e5814227373179e6c70c05ecead72b19dcda3cd2e0004bd643f64c70e/langfuse-3.11.1-py3-none-any.whl", hash = "sha256:f489c97fb2231b14e75383100158cdd6a158b87c1e9c9f96b2cdcbc015c48319", size = 413776, upload-time = "2025-12-19T14:31:10.166Z" }, + { url = "https://files.pythonhosted.org/packages/e9/04/95407023b786ed2eef1e2cd220f5baf7b1dd70d88645af129cc1fd1da867/langfuse-3.11.2-py3-none-any.whl", hash = "sha256:84faea9f909694023cc7f0eb45696be190248c8790424f22af57ca4cd7a29f2d", size = 413786, upload-time = "2025-12-23T20:42:55.48Z" }, ] [[package]] name = "librt" -version = "0.7.4" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/93/e4/b59bdf1197fdf9888452ea4d2048cdad61aef85eb83e99dc52551d7fdc04/librt-0.7.4.tar.gz", hash = "sha256:3871af56c59864d5fd21d1ac001eb2fb3b140d52ba0454720f2e4a19812404ba", size = 145862, upload-time = "2025-12-15T16:52:43.862Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/06/1e/3e61dff6c07a3b400fe907d3164b92b3b3023ef86eac1ee236869dc276f7/librt-0.7.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:dc300cb5a5a01947b1ee8099233156fdccd5001739e5f596ecfbc0dab07b5a3b", size = 54708, upload-time = "2025-12-15T16:51:03.752Z" }, - { url = "https://files.pythonhosted.org/packages/87/98/ab2428b0a80d0fd67decaeea84a5ec920e3dd4d95ecfd074c71f51bd7315/librt-0.7.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:ee8d3323d921e0f6919918a97f9b5445a7dfe647270b2629ec1008aa676c0bc0", size = 56656, upload-time = "2025-12-15T16:51:05.038Z" }, - { url = "https://files.pythonhosted.org/packages/c1/ce/de1fad3a16e4fb5b6605bd6cbe6d0e5207cc8eca58993835749a1da0812b/librt-0.7.4-cp310-cp310-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:95cb80854a355b284c55f79674f6187cc9574df4dc362524e0cce98c89ee8331", size = 161024, upload-time = "2025-12-15T16:51:06.31Z" }, - { url = "https://files.pythonhosted.org/packages/88/00/ddfcdc1147dd7fb68321d7b064b12f0b9101d85f466a46006f86096fde8d/librt-0.7.4-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:3ca1caedf8331d8ad6027f93b52d68ed8f8009f5c420c246a46fe9d3be06be0f", size = 169529, upload-time = "2025-12-15T16:51:07.907Z" }, - { url = "https://files.pythonhosted.org/packages/dd/b3/915702c7077df2483b015030d1979404474f490fe9a071e9576f7b26fef6/librt-0.7.4-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c2a6f1236151e6fe1da289351b5b5bce49651c91554ecc7b70a947bced6fe212", size = 183270, upload-time = "2025-12-15T16:51:09.164Z" }, - { url = "https://files.pythonhosted.org/packages/45/19/ab2f217e8ec509fca4ea9e2e5022b9f72c1a7b7195f5a5770d299df807ea/librt-0.7.4-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:7766b57aeebaf3f1dac14fdd4a75c9a61f2ed56d8ebeefe4189db1cb9d2a3783", size = 179038, upload-time = "2025-12-15T16:51:10.538Z" }, - { url = "https://files.pythonhosted.org/packages/10/1c/d40851d187662cf50312ebbc0b277c7478dd78dbaaf5ee94056f1d7f2f83/librt-0.7.4-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:1c4c89fb01157dd0a3bfe9e75cd6253b0a1678922befcd664eca0772a4c6c979", size = 173502, upload-time = "2025-12-15T16:51:11.888Z" }, - { url = "https://files.pythonhosted.org/packages/07/52/d5880835c772b22c38db18660420fa6901fd9e9a433b65f0ba9b0f4da764/librt-0.7.4-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:f7fa8beef580091c02b4fd26542de046b2abfe0aaefa02e8bcf68acb7618f2b3", size = 193570, upload-time = "2025-12-15T16:51:13.168Z" }, - { url = "https://files.pythonhosted.org/packages/f1/35/22d3c424b82f86ce019c0addadf001d459dfac8036aecc07fadc5c541053/librt-0.7.4-cp310-cp310-win32.whl", hash = "sha256:543c42fa242faae0466fe72d297976f3c710a357a219b1efde3a0539a68a6997", size = 42596, upload-time = "2025-12-15T16:51:14.422Z" }, - { url = "https://files.pythonhosted.org/packages/95/b1/e7c316ac5fe60ac1fdfe515198087205220803c4cf923ee63e1cb8380b17/librt-0.7.4-cp310-cp310-win_amd64.whl", hash = "sha256:25cc40d8eb63f0a7ea4c8f49f524989b9df901969cb860a2bc0e4bad4b8cb8a8", size = 48972, upload-time = "2025-12-15T16:51:15.516Z" }, - { url = "https://files.pythonhosted.org/packages/84/64/44089b12d8b4714a7f0e2f33fb19285ba87702d4be0829f20b36ebeeee07/librt-0.7.4-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:3485b9bb7dfa66167d5500ffdafdc35415b45f0da06c75eb7df131f3357b174a", size = 54709, upload-time = "2025-12-15T16:51:16.699Z" }, - { url = "https://files.pythonhosted.org/packages/26/ef/6fa39fb5f37002f7d25e0da4f24d41b457582beea9369eeb7e9e73db5508/librt-0.7.4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:188b4b1a770f7f95ea035d5bbb9d7367248fc9d12321deef78a269ebf46a5729", size = 56663, upload-time = "2025-12-15T16:51:17.856Z" }, - { url = "https://files.pythonhosted.org/packages/9d/e4/cbaca170a13bee2469c90df9e47108610b4422c453aea1aec1779ac36c24/librt-0.7.4-cp311-cp311-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:1b668b1c840183e4e38ed5a99f62fac44c3a3eef16870f7f17cfdfb8b47550ed", size = 161703, upload-time = "2025-12-15T16:51:19.421Z" }, - { url = "https://files.pythonhosted.org/packages/d0/32/0b2296f9cc7e693ab0d0835e355863512e5eac90450c412777bd699c76ae/librt-0.7.4-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:0e8f864b521f6cfedb314d171630f827efee08f5c3462bcbc2244ab8e1768cd6", size = 171027, upload-time = "2025-12-15T16:51:20.721Z" }, - { url = "https://files.pythonhosted.org/packages/d8/33/c70b6d40f7342716e5f1353c8da92d9e32708a18cbfa44897a93ec2bf879/librt-0.7.4-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:4df7c9def4fc619a9c2ab402d73a0c5b53899abe090e0100323b13ccb5a3dd82", size = 184700, upload-time = "2025-12-15T16:51:22.272Z" }, - { url = "https://files.pythonhosted.org/packages/e4/c8/555c405155da210e4c4113a879d378f54f850dbc7b794e847750a8fadd43/librt-0.7.4-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:f79bc3595b6ed159a1bf0cdc70ed6ebec393a874565cab7088a219cca14da727", size = 180719, upload-time = "2025-12-15T16:51:23.561Z" }, - { url = "https://files.pythonhosted.org/packages/6b/88/34dc1f1461c5613d1b73f0ecafc5316cc50adcc1b334435985b752ed53e5/librt-0.7.4-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:77772a4b8b5f77d47d883846928c36d730b6e612a6388c74cba33ad9eb149c11", size = 174535, upload-time = "2025-12-15T16:51:25.031Z" }, - { url = "https://files.pythonhosted.org/packages/b6/5a/f3fafe80a221626bcedfa9fe5abbf5f04070989d44782f579b2d5920d6d0/librt-0.7.4-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:064a286e6ab0b4c900e228ab4fa9cb3811b4b83d3e0cc5cd816b2d0f548cb61c", size = 195236, upload-time = "2025-12-15T16:51:26.328Z" }, - { url = "https://files.pythonhosted.org/packages/d8/77/5c048d471ce17f4c3a6e08419be19add4d291e2f7067b877437d482622ac/librt-0.7.4-cp311-cp311-win32.whl", hash = "sha256:42da201c47c77b6cc91fc17e0e2b330154428d35d6024f3278aa2683e7e2daf2", size = 42930, upload-time = "2025-12-15T16:51:27.853Z" }, - { url = "https://files.pythonhosted.org/packages/fb/3b/514a86305a12c3d9eac03e424b07cd312c7343a9f8a52719aa079590a552/librt-0.7.4-cp311-cp311-win_amd64.whl", hash = "sha256:d31acb5886c16ae1711741f22504195af46edec8315fe69b77e477682a87a83e", size = 49240, upload-time = "2025-12-15T16:51:29.037Z" }, - { url = "https://files.pythonhosted.org/packages/ba/01/3b7b1914f565926b780a734fac6e9a4d2c7aefe41f4e89357d73697a9457/librt-0.7.4-cp311-cp311-win_arm64.whl", hash = "sha256:114722f35093da080a333b3834fff04ef43147577ed99dd4db574b03a5f7d170", size = 42613, upload-time = "2025-12-15T16:51:30.194Z" }, - { url = "https://files.pythonhosted.org/packages/f3/e7/b805d868d21f425b7e76a0ea71a2700290f2266a4f3c8357fcf73efc36aa/librt-0.7.4-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:7dd3b5c37e0fb6666c27cf4e2c88ae43da904f2155c4cfc1e5a2fdce3b9fcf92", size = 55688, upload-time = "2025-12-15T16:51:31.571Z" }, - { url = "https://files.pythonhosted.org/packages/59/5e/69a2b02e62a14cfd5bfd9f1e9adea294d5bcfeea219c7555730e5d068ee4/librt-0.7.4-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:a9c5de1928c486201b23ed0cc4ac92e6e07be5cd7f3abc57c88a9cf4f0f32108", size = 57141, upload-time = "2025-12-15T16:51:32.714Z" }, - { url = "https://files.pythonhosted.org/packages/6e/6b/05dba608aae1272b8ea5ff8ef12c47a4a099a04d1e00e28a94687261d403/librt-0.7.4-cp312-cp312-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:078ae52ffb3f036396cc4aed558e5b61faedd504a3c1f62b8ae34bf95ae39d94", size = 165322, upload-time = "2025-12-15T16:51:33.986Z" }, - { url = "https://files.pythonhosted.org/packages/8f/bc/199533d3fc04a4cda8d7776ee0d79955ab0c64c79ca079366fbc2617e680/librt-0.7.4-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ce58420e25097b2fc201aef9b9f6d65df1eb8438e51154e1a7feb8847e4a55ab", size = 174216, upload-time = "2025-12-15T16:51:35.384Z" }, - { url = "https://files.pythonhosted.org/packages/62/ec/09239b912a45a8ed117cb4a6616d9ff508f5d3131bd84329bf2f8d6564f1/librt-0.7.4-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:b719c8730c02a606dc0e8413287e8e94ac2d32a51153b300baf1f62347858fba", size = 189005, upload-time = "2025-12-15T16:51:36.687Z" }, - { url = "https://files.pythonhosted.org/packages/46/2e/e188313d54c02f5b0580dd31476bb4b0177514ff8d2be9f58d4a6dc3a7ba/librt-0.7.4-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:3749ef74c170809e6dee68addec9d2458700a8de703de081c888e92a8b015cf9", size = 183960, upload-time = "2025-12-15T16:51:37.977Z" }, - { url = "https://files.pythonhosted.org/packages/eb/84/f1d568d254518463d879161d3737b784137d236075215e56c7c9be191cee/librt-0.7.4-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:b35c63f557653c05b5b1b6559a074dbabe0afee28ee2a05b6c9ba21ad0d16a74", size = 177609, upload-time = "2025-12-15T16:51:40.584Z" }, - { url = "https://files.pythonhosted.org/packages/5d/43/060bbc1c002f0d757c33a1afe6bf6a565f947a04841139508fc7cef6c08b/librt-0.7.4-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:1ef704e01cb6ad39ad7af668d51677557ca7e5d377663286f0ee1b6b27c28e5f", size = 199269, upload-time = "2025-12-15T16:51:41.879Z" }, - { url = "https://files.pythonhosted.org/packages/ff/7f/708f8f02d8012ee9f366c07ea6a92882f48bd06cc1ff16a35e13d0fbfb08/librt-0.7.4-cp312-cp312-win32.whl", hash = "sha256:c66c2b245926ec15188aead25d395091cb5c9df008d3b3207268cd65557d6286", size = 43186, upload-time = "2025-12-15T16:51:43.149Z" }, - { url = "https://files.pythonhosted.org/packages/f1/a5/4e051b061c8b2509be31b2c7ad4682090502c0a8b6406edcf8c6b4fe1ef7/librt-0.7.4-cp312-cp312-win_amd64.whl", hash = "sha256:71a56f4671f7ff723451f26a6131754d7c1809e04e22ebfbac1db8c9e6767a20", size = 49455, upload-time = "2025-12-15T16:51:44.336Z" }, - { url = "https://files.pythonhosted.org/packages/d0/d2/90d84e9f919224a3c1f393af1636d8638f54925fdc6cd5ee47f1548461e5/librt-0.7.4-cp312-cp312-win_arm64.whl", hash = "sha256:419eea245e7ec0fe664eb7e85e7ff97dcdb2513ca4f6b45a8ec4a3346904f95a", size = 42828, upload-time = "2025-12-15T16:51:45.498Z" }, - { url = "https://files.pythonhosted.org/packages/fe/4d/46a53ccfbb39fd0b493fd4496eb76f3ebc15bb3e45d8c2e695a27587edf5/librt-0.7.4-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:d44a1b1ba44cbd2fc3cb77992bef6d6fdb1028849824e1dd5e4d746e1f7f7f0b", size = 55745, upload-time = "2025-12-15T16:51:46.636Z" }, - { url = "https://files.pythonhosted.org/packages/7f/2b/3ac7f5212b1828bf4f979cf87f547db948d3e28421d7a430d4db23346ce4/librt-0.7.4-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:c9cab4b3de1f55e6c30a84c8cee20e4d3b2476f4d547256694a1b0163da4fe32", size = 57166, upload-time = "2025-12-15T16:51:48.219Z" }, - { url = "https://files.pythonhosted.org/packages/e8/99/6523509097cbe25f363795f0c0d1c6a3746e30c2994e25b5aefdab119b21/librt-0.7.4-cp313-cp313-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:2857c875f1edd1feef3c371fbf830a61b632fb4d1e57160bb1e6a3206e6abe67", size = 165833, upload-time = "2025-12-15T16:51:49.443Z" }, - { url = "https://files.pythonhosted.org/packages/fe/35/323611e59f8fe032649b4fb7e77f746f96eb7588fcbb31af26bae9630571/librt-0.7.4-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:b370a77be0a16e1ad0270822c12c21462dc40496e891d3b0caf1617c8cc57e20", size = 174818, upload-time = "2025-12-15T16:51:51.015Z" }, - { url = "https://files.pythonhosted.org/packages/41/e6/40fb2bb21616c6e06b6a64022802228066e9a31618f493e03f6b9661548a/librt-0.7.4-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:d05acd46b9a52087bfc50c59dfdf96a2c480a601e8898a44821c7fd676598f74", size = 189607, upload-time = "2025-12-15T16:51:52.671Z" }, - { url = "https://files.pythonhosted.org/packages/32/48/1b47c7d5d28b775941e739ed2bfe564b091c49201b9503514d69e4ed96d7/librt-0.7.4-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:70969229cb23d9c1a80e14225838d56e464dc71fa34c8342c954fc50e7516dee", size = 184585, upload-time = "2025-12-15T16:51:54.027Z" }, - { url = "https://files.pythonhosted.org/packages/75/a6/ee135dfb5d3b54d5d9001dbe483806229c6beac3ee2ba1092582b7efeb1b/librt-0.7.4-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:4450c354b89dbb266730893862dbff06006c9ed5b06b6016d529b2bf644fc681", size = 178249, upload-time = "2025-12-15T16:51:55.248Z" }, - { url = "https://files.pythonhosted.org/packages/04/87/d5b84ec997338be26af982bcd6679be0c1db9a32faadab1cf4bb24f9e992/librt-0.7.4-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:adefe0d48ad35b90b6f361f6ff5a1bd95af80c17d18619c093c60a20e7a5b60c", size = 199851, upload-time = "2025-12-15T16:51:56.933Z" }, - { url = "https://files.pythonhosted.org/packages/86/63/ba1333bf48306fe398e3392a7427ce527f81b0b79d0d91618c4610ce9d15/librt-0.7.4-cp313-cp313-win32.whl", hash = "sha256:21ea710e96c1e050635700695095962a22ea420d4b3755a25e4909f2172b4ff2", size = 43249, upload-time = "2025-12-15T16:51:58.498Z" }, - { url = "https://files.pythonhosted.org/packages/f9/8a/de2c6df06cdfa9308c080e6b060fe192790b6a48a47320b215e860f0e98c/librt-0.7.4-cp313-cp313-win_amd64.whl", hash = "sha256:772e18696cf5a64afee908662fbcb1f907460ddc851336ee3a848ef7684c8e1e", size = 49417, upload-time = "2025-12-15T16:51:59.618Z" }, - { url = "https://files.pythonhosted.org/packages/31/66/8ee0949efc389691381ed686185e43536c20e7ad880c122dd1f31e65c658/librt-0.7.4-cp313-cp313-win_arm64.whl", hash = "sha256:52e34c6af84e12921748c8354aa6acf1912ca98ba60cdaa6920e34793f1a0788", size = 42824, upload-time = "2025-12-15T16:52:00.784Z" }, - { url = "https://files.pythonhosted.org/packages/74/81/6921e65c8708eb6636bbf383aa77e6c7dad33a598ed3b50c313306a2da9d/librt-0.7.4-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:4f1ee004942eaaed6e06c087d93ebc1c67e9a293e5f6b9b5da558df6bf23dc5d", size = 55191, upload-time = "2025-12-15T16:52:01.97Z" }, - { url = "https://files.pythonhosted.org/packages/0d/d6/3eb864af8a8de8b39cc8dd2e9ded1823979a27795d72c4eea0afa8c26c9f/librt-0.7.4-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:d854c6dc0f689bad7ed452d2a3ecff58029d80612d336a45b62c35e917f42d23", size = 56898, upload-time = "2025-12-15T16:52:03.356Z" }, - { url = "https://files.pythonhosted.org/packages/49/bc/b1d4c0711fdf79646225d576faee8747b8528a6ec1ceb6accfd89ade7102/librt-0.7.4-cp314-cp314-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:a4f7339d9e445280f23d63dea842c0c77379c4a47471c538fc8feedab9d8d063", size = 163725, upload-time = "2025-12-15T16:52:04.572Z" }, - { url = "https://files.pythonhosted.org/packages/2c/08/61c41cd8f0a6a41fc99ea78a2205b88187e45ba9800792410ed62f033584/librt-0.7.4-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:39003fc73f925e684f8521b2dbf34f61a5deb8a20a15dcf53e0d823190ce8848", size = 172469, upload-time = "2025-12-15T16:52:05.863Z" }, - { url = "https://files.pythonhosted.org/packages/8b/c7/4ee18b4d57f01444230bc18cf59103aeab8f8c0f45e84e0e540094df1df1/librt-0.7.4-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:6bb15ee29d95875ad697d449fe6071b67f730f15a6961913a2b0205015ca0843", size = 186804, upload-time = "2025-12-15T16:52:07.192Z" }, - { url = "https://files.pythonhosted.org/packages/a1/af/009e8ba3fbf830c936842da048eda1b34b99329f402e49d88fafff6525d1/librt-0.7.4-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:02a69369862099e37d00765583052a99d6a68af7e19b887e1b78fee0146b755a", size = 181807, upload-time = "2025-12-15T16:52:08.554Z" }, - { url = "https://files.pythonhosted.org/packages/85/26/51ae25f813656a8b117c27a974f25e8c1e90abcd5a791ac685bf5b489a1b/librt-0.7.4-cp314-cp314-musllinux_1_2_i686.whl", hash = "sha256:ec72342cc4d62f38b25a94e28b9efefce41839aecdecf5e9627473ed04b7be16", size = 175595, upload-time = "2025-12-15T16:52:10.186Z" }, - { url = "https://files.pythonhosted.org/packages/48/93/36d6c71f830305f88996b15c8e017aa8d1e03e2e947b40b55bbf1a34cf24/librt-0.7.4-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:776dbb9bfa0fc5ce64234b446995d8d9f04badf64f544ca036bd6cff6f0732ce", size = 196504, upload-time = "2025-12-15T16:52:11.472Z" }, - { url = "https://files.pythonhosted.org/packages/08/11/8299e70862bb9d704735bf132c6be09c17b00fbc7cda0429a9df222fdc1b/librt-0.7.4-cp314-cp314-win32.whl", hash = "sha256:0f8cac84196d0ffcadf8469d9ded4d4e3a8b1c666095c2a291e22bf58e1e8a9f", size = 39738, upload-time = "2025-12-15T16:52:12.962Z" }, - { url = "https://files.pythonhosted.org/packages/54/d5/656b0126e4e0f8e2725cd2d2a1ec40f71f37f6f03f135a26b663c0e1a737/librt-0.7.4-cp314-cp314-win_amd64.whl", hash = "sha256:037f5cb6fe5abe23f1dc058054d50e9699fcc90d0677eee4e4f74a8677636a1a", size = 45976, upload-time = "2025-12-15T16:52:14.441Z" }, - { url = "https://files.pythonhosted.org/packages/60/86/465ff07b75c1067da8fa7f02913c4ead096ef106cfac97a977f763783bfb/librt-0.7.4-cp314-cp314-win_arm64.whl", hash = "sha256:a5deebb53d7a4d7e2e758a96befcd8edaaca0633ae71857995a0f16033289e44", size = 39073, upload-time = "2025-12-15T16:52:15.621Z" }, - { url = "https://files.pythonhosted.org/packages/b3/a0/24941f85960774a80d4b3c2aec651d7d980466da8101cae89e8b032a3e21/librt-0.7.4-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:b4c25312c7f4e6ab35ab16211bdf819e6e4eddcba3b2ea632fb51c9a2a97e105", size = 57369, upload-time = "2025-12-15T16:52:16.782Z" }, - { url = "https://files.pythonhosted.org/packages/77/a0/ddb259cae86ab415786c1547d0fe1b40f04a7b089f564fd5c0242a3fafb2/librt-0.7.4-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:618b7459bb392bdf373f2327e477597fff8f9e6a1878fffc1b711c013d1b0da4", size = 59230, upload-time = "2025-12-15T16:52:18.259Z" }, - { url = "https://files.pythonhosted.org/packages/31/11/77823cb530ab8a0c6fac848ac65b745be446f6f301753b8990e8809080c9/librt-0.7.4-cp314-cp314t-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:1437c3f72a30c7047f16fd3e972ea58b90172c3c6ca309645c1c68984f05526a", size = 183869, upload-time = "2025-12-15T16:52:19.457Z" }, - { url = "https://files.pythonhosted.org/packages/a4/ce/157db3614cf3034b3f702ae5ba4fefda4686f11eea4b7b96542324a7a0e7/librt-0.7.4-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:c96cb76f055b33308f6858b9b594618f1b46e147a4d03a4d7f0c449e304b9b95", size = 194606, upload-time = "2025-12-15T16:52:20.795Z" }, - { url = "https://files.pythonhosted.org/packages/30/ef/6ec4c7e3d6490f69a4fd2803516fa5334a848a4173eac26d8ee6507bff6e/librt-0.7.4-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:28f990e6821204f516d09dc39966ef8b84556ffd648d5926c9a3f681e8de8906", size = 206776, upload-time = "2025-12-15T16:52:22.229Z" }, - { url = "https://files.pythonhosted.org/packages/ad/22/750b37bf549f60a4782ab80e9d1e9c44981374ab79a7ea68670159905918/librt-0.7.4-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:bc4aebecc79781a1b77d7d4e7d9fe080385a439e198d993b557b60f9117addaf", size = 203205, upload-time = "2025-12-15T16:52:23.603Z" }, - { url = "https://files.pythonhosted.org/packages/7a/87/2e8a0f584412a93df5faad46c5fa0a6825fdb5eba2ce482074b114877f44/librt-0.7.4-cp314-cp314t-musllinux_1_2_i686.whl", hash = "sha256:022cc673e69283a42621dd453e2407cf1647e77f8bd857d7ad7499901e62376f", size = 196696, upload-time = "2025-12-15T16:52:24.951Z" }, - { url = "https://files.pythonhosted.org/packages/e5/ca/7bf78fa950e43b564b7de52ceeb477fb211a11f5733227efa1591d05a307/librt-0.7.4-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:2b3ca211ae8ea540569e9c513da052699b7b06928dcda61247cb4f318122bdb5", size = 217191, upload-time = "2025-12-15T16:52:26.194Z" }, - { url = "https://files.pythonhosted.org/packages/d6/49/3732b0e8424ae35ad5c3166d9dd5bcdae43ce98775e0867a716ff5868064/librt-0.7.4-cp314-cp314t-win32.whl", hash = "sha256:8a461f6456981d8c8e971ff5a55f2e34f4e60871e665d2f5fde23ee74dea4eeb", size = 40276, upload-time = "2025-12-15T16:52:27.54Z" }, - { url = "https://files.pythonhosted.org/packages/35/d6/d8823e01bd069934525fddb343189c008b39828a429b473fb20d67d5cd36/librt-0.7.4-cp314-cp314t-win_amd64.whl", hash = "sha256:721a7b125a817d60bf4924e1eec2a7867bfcf64cfc333045de1df7a0629e4481", size = 46772, upload-time = "2025-12-15T16:52:28.653Z" }, - { url = "https://files.pythonhosted.org/packages/36/e9/a0aa60f5322814dd084a89614e9e31139702e342f8459ad8af1984a18168/librt-0.7.4-cp314-cp314t-win_arm64.whl", hash = "sha256:76b2ba71265c0102d11458879b4d53ccd0b32b0164d14deb8d2b598a018e502f", size = 39724, upload-time = "2025-12-15T16:52:29.836Z" }, +version = "0.7.7" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/b7/29/47f29026ca17f35cf299290292d5f8331f5077364974b7675a353179afa2/librt-0.7.7.tar.gz", hash = "sha256:81d957b069fed1890953c3b9c3895c7689960f233eea9a1d9607f71ce7f00b2c", size = 145910, upload-time = "2026-01-01T23:52:22.87Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/c6/84/2cfb1f3b9b60bab52e16a220c931223fc8e963d0d7bb9132bef012aafc3f/librt-0.7.7-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:e4836c5645f40fbdc275e5670819bde5ab5f2e882290d304e3c6ddab1576a6d0", size = 54709, upload-time = "2026-01-01T23:50:48.326Z" }, + { url = "https://files.pythonhosted.org/packages/19/a1/3127b277e9d3784a8040a54e8396d9ae5c64d6684dc6db4b4089b0eedcfb/librt-0.7.7-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:6ae8aec43117a645a31e5f60e9e3a0797492e747823b9bda6972d521b436b4e8", size = 56658, upload-time = "2026-01-01T23:50:49.74Z" }, + { url = "https://files.pythonhosted.org/packages/3a/e9/b91b093a5c42eb218120445f3fef82e0b977fa2225f4d6fc133d25cdf86a/librt-0.7.7-cp310-cp310-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:aea05f701ccd2a76b34f0daf47ca5068176ff553510b614770c90d76ac88df06", size = 161026, upload-time = "2026-01-01T23:50:50.853Z" }, + { url = "https://files.pythonhosted.org/packages/c7/cb/1ded77d5976a79d7057af4a010d577ce4f473ff280984e68f4974a3281e5/librt-0.7.7-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:7b16ccaeff0ed4355dfb76fe1ea7a5d6d03b5ad27f295f77ee0557bc20a72495", size = 169529, upload-time = "2026-01-01T23:50:52.24Z" }, + { url = "https://files.pythonhosted.org/packages/da/6e/6ca5bdaa701e15f05000ac1a4c5d1475c422d3484bd3d1ca9e8c2f5be167/librt-0.7.7-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c48c7e150c095d5e3cea7452347ba26094be905d6099d24f9319a8b475fcd3e0", size = 183271, upload-time = "2026-01-01T23:50:55.287Z" }, + { url = "https://files.pythonhosted.org/packages/e7/2d/55c0e38073997b4bbb5ddff25b6d1bbba8c2f76f50afe5bb9c844b702f34/librt-0.7.7-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:4dcee2f921a8632636d1c37f1bbdb8841d15666d119aa61e5399c5268e7ce02e", size = 179039, upload-time = "2026-01-01T23:50:56.807Z" }, + { url = "https://files.pythonhosted.org/packages/33/4e/3662a41ae8bb81b226f3968426293517b271d34d4e9fd4b59fc511f1ae40/librt-0.7.7-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:14ef0f4ac3728ffd85bfc58e2f2f48fb4ef4fa871876f13a73a7381d10a9f77c", size = 173505, upload-time = "2026-01-01T23:50:58.291Z" }, + { url = "https://files.pythonhosted.org/packages/f8/5d/cf768deb8bdcbac5f8c21fcb32dd483d038d88c529fd351bbe50590b945d/librt-0.7.7-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:e4ab69fa37f8090f2d971a5d2bc606c7401170dbdae083c393d6cbf439cb45b8", size = 193570, upload-time = "2026-01-01T23:50:59.546Z" }, + { url = "https://files.pythonhosted.org/packages/a1/ea/ee70effd13f1d651976d83a2812391f6203971740705e3c0900db75d4bce/librt-0.7.7-cp310-cp310-win32.whl", hash = "sha256:4bf3cc46d553693382d2abf5f5bd493d71bb0f50a7c0beab18aa13a5545c8900", size = 42600, upload-time = "2026-01-01T23:51:00.694Z" }, + { url = "https://files.pythonhosted.org/packages/f0/eb/dc098730f281cba76c279b71783f5de2edcba3b880c1ab84a093ef826062/librt-0.7.7-cp310-cp310-win_amd64.whl", hash = "sha256:f0c8fe5aeadd8a0e5b0598f8a6ee3533135ca50fd3f20f130f9d72baf5c6ac58", size = 48977, upload-time = "2026-01-01T23:51:01.726Z" }, + { url = "https://files.pythonhosted.org/packages/f0/56/30b5c342518005546df78841cb0820ae85a17e7d07d521c10ef367306d0d/librt-0.7.7-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:a487b71fbf8a9edb72a8c7a456dda0184642d99cd007bc819c0b7ab93676a8ee", size = 54709, upload-time = "2026-01-01T23:51:02.774Z" }, + { url = "https://files.pythonhosted.org/packages/72/78/9f120e3920b22504d4f3835e28b55acc2cc47c9586d2e1b6ba04c3c1bf01/librt-0.7.7-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:f4d4efb218264ecf0f8516196c9e2d1a0679d9fb3bb15df1155a35220062eba8", size = 56663, upload-time = "2026-01-01T23:51:03.838Z" }, + { url = "https://files.pythonhosted.org/packages/1c/ea/7d7a1ee7dfc1151836028eba25629afcf45b56bbc721293e41aa2e9b8934/librt-0.7.7-cp311-cp311-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:b8bb331aad734b059c4b450cd0a225652f16889e286b2345af5e2c3c625c3d85", size = 161705, upload-time = "2026-01-01T23:51:04.917Z" }, + { url = "https://files.pythonhosted.org/packages/45/a5/952bc840ac8917fbcefd6bc5f51ad02b89721729814f3e2bfcc1337a76d6/librt-0.7.7-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:467dbd7443bda08338fc8ad701ed38cef48194017554f4c798b0a237904b3f99", size = 171029, upload-time = "2026-01-01T23:51:06.09Z" }, + { url = "https://files.pythonhosted.org/packages/fa/bf/c017ff7da82dc9192cf40d5e802a48a25d00e7639b6465cfdcee5893a22c/librt-0.7.7-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:50d1d1ee813d2d1a3baf2873634ba506b263032418d16287c92ec1cc9c1a00cb", size = 184704, upload-time = "2026-01-01T23:51:07.549Z" }, + { url = "https://files.pythonhosted.org/packages/77/ec/72f3dd39d2cdfd6402ab10836dc9cbf854d145226062a185b419c4f1624a/librt-0.7.7-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:c7e5070cf3ec92d98f57574da0224f8c73faf1ddd6d8afa0b8c9f6e86997bc74", size = 180719, upload-time = "2026-01-01T23:51:09.062Z" }, + { url = "https://files.pythonhosted.org/packages/78/86/06e7a1a81b246f3313bf515dd9613a1c81583e6fd7843a9f4d625c4e926d/librt-0.7.7-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:bdb9f3d865b2dafe7f9ad7f30ef563c80d0ddd2fdc8cc9b8e4f242f475e34d75", size = 174537, upload-time = "2026-01-01T23:51:10.611Z" }, + { url = "https://files.pythonhosted.org/packages/83/08/f9fb2edc9c7a76e95b2924ce81d545673f5b034e8c5dd92159d1c7dae0c6/librt-0.7.7-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:8185c8497d45164e256376f9da5aed2bb26ff636c798c9dabe313b90e9f25b28", size = 195238, upload-time = "2026-01-01T23:51:11.762Z" }, + { url = "https://files.pythonhosted.org/packages/ba/56/ea2d2489d3ea1f47b301120e03a099e22de7b32c93df9a211e6ff4f9bf38/librt-0.7.7-cp311-cp311-win32.whl", hash = "sha256:44d63ce643f34a903f09ff7ca355aae019a3730c7afd6a3c037d569beeb5d151", size = 42939, upload-time = "2026-01-01T23:51:13.192Z" }, + { url = "https://files.pythonhosted.org/packages/58/7b/c288f417e42ba2a037f1c0753219e277b33090ed4f72f292fb6fe175db4c/librt-0.7.7-cp311-cp311-win_amd64.whl", hash = "sha256:7d13cc340b3b82134f8038a2bfe7137093693dcad8ba5773da18f95ad6b77a8a", size = 49240, upload-time = "2026-01-01T23:51:14.264Z" }, + { url = "https://files.pythonhosted.org/packages/7c/24/738eb33a6c1516fdb2dfd2a35db6e5300f7616679b573585be0409bc6890/librt-0.7.7-cp311-cp311-win_arm64.whl", hash = "sha256:983de36b5a83fe9222f4f7dcd071f9b1ac6f3f17c0af0238dadfb8229588f890", size = 42613, upload-time = "2026-01-01T23:51:15.268Z" }, + { url = "https://files.pythonhosted.org/packages/56/72/1cd9d752070011641e8aee046c851912d5f196ecd726fffa7aed2070f3e0/librt-0.7.7-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:2a85a1fc4ed11ea0eb0a632459ce004a2d14afc085a50ae3463cd3dfe1ce43fc", size = 55687, upload-time = "2026-01-01T23:51:16.291Z" }, + { url = "https://files.pythonhosted.org/packages/50/aa/d5a1d4221c4fe7e76ae1459d24d6037783cb83c7645164c07d7daf1576ec/librt-0.7.7-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:c87654e29a35938baead1c4559858f346f4a2a7588574a14d784f300ffba0efd", size = 57136, upload-time = "2026-01-01T23:51:17.363Z" }, + { url = "https://files.pythonhosted.org/packages/23/6f/0c86b5cb5e7ef63208c8cc22534df10ecc5278efc0d47fb8815577f3ca2f/librt-0.7.7-cp312-cp312-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:c9faaebb1c6212c20afd8043cd6ed9de0a47d77f91a6b5b48f4e46ed470703fe", size = 165320, upload-time = "2026-01-01T23:51:18.455Z" }, + { url = "https://files.pythonhosted.org/packages/16/37/df4652690c29f645ffe405b58285a4109e9fe855c5bb56e817e3e75840b3/librt-0.7.7-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:1908c3e5a5ef86b23391448b47759298f87f997c3bd153a770828f58c2bb4630", size = 174216, upload-time = "2026-01-01T23:51:19.599Z" }, + { url = "https://files.pythonhosted.org/packages/9a/d6/d3afe071910a43133ec9c0f3e4ce99ee6df0d4e44e4bddf4b9e1c6ed41cc/librt-0.7.7-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:dbc4900e95a98fc0729523be9d93a8fedebb026f32ed9ffc08acd82e3e181503", size = 189005, upload-time = "2026-01-01T23:51:21.052Z" }, + { url = "https://files.pythonhosted.org/packages/d5/18/74060a870fe2d9fd9f47824eba6717ce7ce03124a0d1e85498e0e7efc1b2/librt-0.7.7-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:a7ea4e1fbd253e5c68ea0fe63d08577f9d288a73f17d82f652ebc61fa48d878d", size = 183961, upload-time = "2026-01-01T23:51:22.493Z" }, + { url = "https://files.pythonhosted.org/packages/7c/5e/918a86c66304af66a3c1d46d54df1b2d0b8894babc42a14fb6f25511497f/librt-0.7.7-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:ef7699b7a5a244b1119f85c5bbc13f152cd38240cbb2baa19b769433bae98e50", size = 177610, upload-time = "2026-01-01T23:51:23.874Z" }, + { url = "https://files.pythonhosted.org/packages/b2/d7/b5e58dc2d570f162e99201b8c0151acf40a03a39c32ab824dd4febf12736/librt-0.7.7-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:955c62571de0b181d9e9e0a0303c8bc90d47670a5eff54cf71bf5da61d1899cf", size = 199272, upload-time = "2026-01-01T23:51:25.341Z" }, + { url = "https://files.pythonhosted.org/packages/18/87/8202c9bd0968bdddc188ec3811985f47f58ed161b3749299f2c0dd0f63fb/librt-0.7.7-cp312-cp312-win32.whl", hash = "sha256:1bcd79be209313b270b0e1a51c67ae1af28adad0e0c7e84c3ad4b5cb57aaa75b", size = 43189, upload-time = "2026-01-01T23:51:26.799Z" }, + { url = "https://files.pythonhosted.org/packages/61/8d/80244b267b585e7aa79ffdac19f66c4861effc3a24598e77909ecdd0850e/librt-0.7.7-cp312-cp312-win_amd64.whl", hash = "sha256:4353ee891a1834567e0302d4bd5e60f531912179578c36f3d0430f8c5e16b456", size = 49462, upload-time = "2026-01-01T23:51:27.813Z" }, + { url = "https://files.pythonhosted.org/packages/2d/1f/75db802d6a4992d95e8a889682601af9b49d5a13bbfa246d414eede1b56c/librt-0.7.7-cp312-cp312-win_arm64.whl", hash = "sha256:a76f1d679beccccdf8c1958e732a1dfcd6e749f8821ee59d7bec009ac308c029", size = 42828, upload-time = "2026-01-01T23:51:28.804Z" }, + { url = "https://files.pythonhosted.org/packages/8d/5e/d979ccb0a81407ec47c14ea68fb217ff4315521730033e1dd9faa4f3e2c1/librt-0.7.7-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:8f4a0b0a3c86ba9193a8e23bb18f100d647bf192390ae195d84dfa0a10fb6244", size = 55746, upload-time = "2026-01-01T23:51:29.828Z" }, + { url = "https://files.pythonhosted.org/packages/f5/2c/3b65861fb32f802c3783d6ac66fc5589564d07452a47a8cf9980d531cad3/librt-0.7.7-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:5335890fea9f9e6c4fdf8683061b9ccdcbe47c6dc03ab8e9b68c10acf78be78d", size = 57174, upload-time = "2026-01-01T23:51:31.226Z" }, + { url = "https://files.pythonhosted.org/packages/50/df/030b50614b29e443607220097ebaf438531ea218c7a9a3e21ea862a919cd/librt-0.7.7-cp313-cp313-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:9b4346b1225be26def3ccc6c965751c74868f0578cbcba293c8ae9168483d811", size = 165834, upload-time = "2026-01-01T23:51:32.278Z" }, + { url = "https://files.pythonhosted.org/packages/5d/e1/bd8d1eacacb24be26a47f157719553bbd1b3fe812c30dddf121c0436fd0b/librt-0.7.7-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:a10b8eebdaca6e9fdbaf88b5aefc0e324b763a5f40b1266532590d5afb268a4c", size = 174819, upload-time = "2026-01-01T23:51:33.461Z" }, + { url = "https://files.pythonhosted.org/packages/46/7d/91d6c3372acf54a019c1ad8da4c9ecf4fc27d039708880bf95f48dbe426a/librt-0.7.7-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:067be973d90d9e319e6eb4ee2a9b9307f0ecd648b8a9002fa237289a4a07a9e7", size = 189607, upload-time = "2026-01-01T23:51:34.604Z" }, + { url = "https://files.pythonhosted.org/packages/fa/ac/44604d6d3886f791fbd1c6ae12d5a782a8f4aca927484731979f5e92c200/librt-0.7.7-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:23d2299ed007812cccc1ecef018db7d922733382561230de1f3954db28433977", size = 184586, upload-time = "2026-01-01T23:51:35.845Z" }, + { url = "https://files.pythonhosted.org/packages/5c/26/d8a6e4c17117b7f9b83301319d9a9de862ae56b133efb4bad8b3aa0808c9/librt-0.7.7-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:6b6f8ea465524aa4c7420c7cc4ca7d46fe00981de8debc67b1cc2e9957bb5b9d", size = 178251, upload-time = "2026-01-01T23:51:37.018Z" }, + { url = "https://files.pythonhosted.org/packages/99/ab/98d857e254376f8e2f668e807daccc1f445e4b4fc2f6f9c1cc08866b0227/librt-0.7.7-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:f8df32a99cc46eb0ee90afd9ada113ae2cafe7e8d673686cf03ec53e49635439", size = 199853, upload-time = "2026-01-01T23:51:38.195Z" }, + { url = "https://files.pythonhosted.org/packages/7c/55/4523210d6ae5134a5da959900be43ad8bab2e4206687b6620befddb5b5fd/librt-0.7.7-cp313-cp313-win32.whl", hash = "sha256:86f86b3b785487c7760247bcdac0b11aa8bf13245a13ed05206286135877564b", size = 43247, upload-time = "2026-01-01T23:51:39.629Z" }, + { url = "https://files.pythonhosted.org/packages/25/40/3ec0fed5e8e9297b1cf1a3836fb589d3de55f9930e3aba988d379e8ef67c/librt-0.7.7-cp313-cp313-win_amd64.whl", hash = "sha256:4862cb2c702b1f905c0503b72d9d4daf65a7fdf5a9e84560e563471e57a56949", size = 49419, upload-time = "2026-01-01T23:51:40.674Z" }, + { url = "https://files.pythonhosted.org/packages/1c/7a/aab5f0fb122822e2acbc776addf8b9abfb4944a9056c00c393e46e543177/librt-0.7.7-cp313-cp313-win_arm64.whl", hash = "sha256:0996c83b1cb43c00e8c87835a284f9057bc647abd42b5871e5f941d30010c832", size = 42828, upload-time = "2026-01-01T23:51:41.731Z" }, + { url = "https://files.pythonhosted.org/packages/69/9c/228a5c1224bd23809a635490a162e9cbdc68d99f0eeb4a696f07886b8206/librt-0.7.7-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:23daa1ab0512bafdd677eb1bfc9611d8ffbe2e328895671e64cb34166bc1b8c8", size = 55188, upload-time = "2026-01-01T23:51:43.14Z" }, + { url = "https://files.pythonhosted.org/packages/ba/c2/0e7c6067e2b32a156308205e5728f4ed6478c501947e9142f525afbc6bd2/librt-0.7.7-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:558a9e5a6f3cc1e20b3168fb1dc802d0d8fa40731f6e9932dcc52bbcfbd37111", size = 56895, upload-time = "2026-01-01T23:51:44.534Z" }, + { url = "https://files.pythonhosted.org/packages/0e/77/de50ff70c80855eb79d1d74035ef06f664dd073fb7fb9d9fb4429651b8eb/librt-0.7.7-cp314-cp314-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:2567cb48dc03e5b246927ab35cbb343376e24501260a9b5e30b8e255dca0d1d2", size = 163724, upload-time = "2026-01-01T23:51:45.571Z" }, + { url = "https://files.pythonhosted.org/packages/6e/19/f8e4bf537899bdef9e0bb9f0e4b18912c2d0f858ad02091b6019864c9a6d/librt-0.7.7-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:6066c638cdf85ff92fc6f932d2d73c93a0e03492cdfa8778e6d58c489a3d7259", size = 172470, upload-time = "2026-01-01T23:51:46.823Z" }, + { url = "https://files.pythonhosted.org/packages/42/4c/dcc575b69d99076768e8dd6141d9aecd4234cba7f0e09217937f52edb6ed/librt-0.7.7-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:a609849aca463074c17de9cda173c276eb8fee9e441053529e7b9e249dc8b8ee", size = 186806, upload-time = "2026-01-01T23:51:48.009Z" }, + { url = "https://files.pythonhosted.org/packages/fe/f8/4094a2b7816c88de81239a83ede6e87f1138477d7ee956c30f136009eb29/librt-0.7.7-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:add4e0a000858fe9bb39ed55f31085506a5c38363e6eb4a1e5943a10c2bfc3d1", size = 181809, upload-time = "2026-01-01T23:51:49.35Z" }, + { url = "https://files.pythonhosted.org/packages/1b/ac/821b7c0ab1b5a6cd9aee7ace8309c91545a2607185101827f79122219a7e/librt-0.7.7-cp314-cp314-musllinux_1_2_i686.whl", hash = "sha256:a3bfe73a32bd0bdb9a87d586b05a23c0a1729205d79df66dee65bb2e40d671ba", size = 175597, upload-time = "2026-01-01T23:51:50.636Z" }, + { url = "https://files.pythonhosted.org/packages/71/f9/27f6bfbcc764805864c04211c6ed636fe1d58f57a7b68d1f4ae5ed74e0e0/librt-0.7.7-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:0ecce0544d3db91a40f8b57ae26928c02130a997b540f908cefd4d279d6c5848", size = 196506, upload-time = "2026-01-01T23:51:52.535Z" }, + { url = "https://files.pythonhosted.org/packages/46/ba/c9b9c6fc931dd7ea856c573174ccaf48714905b1a7499904db2552e3bbaf/librt-0.7.7-cp314-cp314-win32.whl", hash = "sha256:8f7a74cf3a80f0c3b0ec75b0c650b2f0a894a2cec57ef75f6f72c1e82cdac61d", size = 39747, upload-time = "2026-01-01T23:51:53.683Z" }, + { url = "https://files.pythonhosted.org/packages/c5/69/cd1269337c4cde3ee70176ee611ab0058aa42fc8ce5c9dce55f48facfcd8/librt-0.7.7-cp314-cp314-win_amd64.whl", hash = "sha256:3d1fe2e8df3268dd6734dba33ededae72ad5c3a859b9577bc00b715759c5aaab", size = 45971, upload-time = "2026-01-01T23:51:54.697Z" }, + { url = "https://files.pythonhosted.org/packages/79/fd/e0844794423f5583108c5991313c15e2b400995f44f6ec6871f8aaf8243c/librt-0.7.7-cp314-cp314-win_arm64.whl", hash = "sha256:2987cf827011907d3dfd109f1be0d61e173d68b1270107bb0e89f2fca7f2ed6b", size = 39075, upload-time = "2026-01-01T23:51:55.726Z" }, + { url = "https://files.pythonhosted.org/packages/42/02/211fd8f7c381e7b2a11d0fdfcd410f409e89967be2e705983f7c6342209a/librt-0.7.7-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:8e92c8de62b40bfce91d5e12c6e8b15434da268979b1af1a6589463549d491e6", size = 57368, upload-time = "2026-01-01T23:51:56.706Z" }, + { url = "https://files.pythonhosted.org/packages/4c/b6/aca257affae73ece26041ae76032153266d110453173f67d7603058e708c/librt-0.7.7-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:f683dcd49e2494a7535e30f779aa1ad6e3732a019d80abe1309ea91ccd3230e3", size = 59238, upload-time = "2026-01-01T23:51:58.066Z" }, + { url = "https://files.pythonhosted.org/packages/96/47/7383a507d8e0c11c78ca34c9d36eab9000db5989d446a2f05dc40e76c64f/librt-0.7.7-cp314-cp314t-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:9b15e5d17812d4d629ff576699954f74e2cc24a02a4fc401882dd94f81daba45", size = 183870, upload-time = "2026-01-01T23:51:59.204Z" }, + { url = "https://files.pythonhosted.org/packages/a4/b8/50f3d8eec8efdaf79443963624175c92cec0ba84827a66b7fcfa78598e51/librt-0.7.7-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:c084841b879c4d9b9fa34e5d5263994f21aea7fd9c6add29194dbb41a6210536", size = 194608, upload-time = "2026-01-01T23:52:00.419Z" }, + { url = "https://files.pythonhosted.org/packages/23/d9/1b6520793aadb59d891e3b98ee057a75de7f737e4a8b4b37fdbecb10d60f/librt-0.7.7-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:10c8fb9966f84737115513fecbaf257f9553d067a7dd45a69c2c7e5339e6a8dc", size = 206776, upload-time = "2026-01-01T23:52:01.705Z" }, + { url = "https://files.pythonhosted.org/packages/ff/db/331edc3bba929d2756fa335bfcf736f36eff4efcb4f2600b545a35c2ae58/librt-0.7.7-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:9b5fb1ecb2c35362eab2dbd354fd1efa5a8440d3e73a68be11921042a0edc0ff", size = 203206, upload-time = "2026-01-01T23:52:03.315Z" }, + { url = "https://files.pythonhosted.org/packages/b2/e1/6af79ec77204e85f6f2294fc171a30a91bb0e35d78493532ed680f5d98be/librt-0.7.7-cp314-cp314t-musllinux_1_2_i686.whl", hash = "sha256:d1454899909d63cc9199a89fcc4f81bdd9004aef577d4ffc022e600c412d57f3", size = 196697, upload-time = "2026-01-01T23:52:04.857Z" }, + { url = "https://files.pythonhosted.org/packages/f3/46/de55ecce4b2796d6d243295c221082ca3a944dc2fb3a52dcc8660ce7727d/librt-0.7.7-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:7ef28f2e7a016b29792fe0a2dd04dec75725b32a1264e390c366103f834a9c3a", size = 217193, upload-time = "2026-01-01T23:52:06.159Z" }, + { url = "https://files.pythonhosted.org/packages/41/61/33063e271949787a2f8dd33c5260357e3d512a114fc82ca7890b65a76e2d/librt-0.7.7-cp314-cp314t-win32.whl", hash = "sha256:5e419e0db70991b6ba037b70c1d5bbe92b20ddf82f31ad01d77a347ed9781398", size = 40277, upload-time = "2026-01-01T23:52:07.625Z" }, + { url = "https://files.pythonhosted.org/packages/06/21/1abd972349f83a696ea73159ac964e63e2d14086fdd9bc7ca878c25fced4/librt-0.7.7-cp314-cp314t-win_amd64.whl", hash = "sha256:d6b7d93657332c817b8d674ef6bf1ab7796b4f7ce05e420fd45bd258a72ac804", size = 46765, upload-time = "2026-01-01T23:52:08.647Z" }, + { url = "https://files.pythonhosted.org/packages/51/0e/b756c7708143a63fca65a51ca07990fa647db2cc8fcd65177b9e96680255/librt-0.7.7-cp314-cp314t-win_arm64.whl", hash = "sha256:142c2cd91794b79fd0ce113bd658993b7ede0fe93057668c2f98a45ca00b7e91", size = 39724, upload-time = "2026-01-01T23:52:09.745Z" }, ] [[package]] name = "litellm" -version = "1.80.10" +version = "1.80.11" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "aiohttp", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, @@ -3120,9 +3169,9 @@ dependencies = [ { name = "tiktoken", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, { name = "tokenizers", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/dd/44/0aaa7449e7c4aa05668ec03f1f68a01b1e476591071d9659a68db19371a2/litellm-1.80.10.tar.gz", hash = "sha256:4a4aff7558945c2f7e5c6523e67c1b5525a46b10b0e1ad6b8f847cb13b16779e", size = 12764777, upload-time = "2025-12-14T02:07:05.362Z" } +sdist = { url = "https://files.pythonhosted.org/packages/55/47/be6cd7b356418ca8bef3b843507940ce77b76ef2dfe515f2b4ba9b461ff0/litellm-1.80.11.tar.gz", hash = "sha256:c9fc63e7acb6360363238fe291bcff1488c59ff66020416d8376c0ee56414a19", size = 13189510, upload-time = "2025-12-22T12:47:29.181Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/8c/a9/4814b6aa58f6705df2831eaadeb5bc8240684c8c9d5964245212f85049d1/litellm-1.80.10-py3-none-any.whl", hash = "sha256:9b3e561efaba0eb1291cb1555d3dcb7283cf7f3cb65aadbcdb42e2a8765898c8", size = 11264240, upload-time = "2025-12-14T02:07:02.414Z" }, + { url = "https://files.pythonhosted.org/packages/97/0b/9e637344f24f3fe0e8039cd2337389fe05e0d31f518bc3e0a5cdbe45784a/litellm-1.80.11-py3-none-any.whl", hash = "sha256:406283d66ead77dc7ff0e0b2559c80e9e497d8e7c2257efb1cb9210a20d09d54", size = 11456346, upload-time = "2025-12-22T12:47:26.469Z" }, ] [package.optional-dependencies] @@ -3155,20 +3204,20 @@ proxy = [ [[package]] name = "litellm-enterprise" -version = "0.1.25" +version = "0.1.27" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/e8/0a/2a166a3e598c581fcd295ec74f0706718051d0aeefccc636914d18c7fc86/litellm_enterprise-0.1.25.tar.gz", hash = "sha256:1c82178b8e2c85f47b31910fd103a322b46d6caea44cd7a8c80b00fdcfeacd22", size = 43420, upload-time = "2025-12-11T04:33:10.694Z" } +sdist = { url = "https://files.pythonhosted.org/packages/b6/b5/2304eed58f0142b3570c50580b451db9b7709012d5b436c2100783ae2220/litellm_enterprise-0.1.27.tar.gz", hash = "sha256:aa40c87f7c8df64beb79e75f71e1b5c0a458350efa68527e3491e6f27f2cbd57", size = 46829, upload-time = "2025-12-18T00:01:33.398Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/8b/b2/31ac8e551216a6a92e6d47b6d9e3e49d8957ee6da15abf73eeb9aaeec1f3/litellm_enterprise-0.1.25-py3-none-any.whl", hash = "sha256:80c8f1996846453ad309e74cd6d2659d9508320370df5d462d34326b06401c4d", size = 104440, upload-time = "2025-12-11T04:33:06.103Z" }, + { url = "https://files.pythonhosted.org/packages/7e/23/ec61a6aa76b6938d3de8cad206875b0500e1df234fa3535b282b1a4850b5/litellm_enterprise-0.1.27-py3-none-any.whl", hash = "sha256:41b9d41d04123f492060a742091006dc1d182b54ce3a1c0e18ee75d623c63e91", size = 108107, upload-time = "2025-12-18T00:01:31.966Z" }, ] [[package]] name = "litellm-proxy-extras" -version = "0.4.14" +version = "0.4.16" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/ab/3f/8beffc57faae37a3c47a532cdfb2ac3bf6397905a8c0f4b50366bc0f0dc5/litellm_proxy_extras-0.4.14.tar.gz", hash = "sha256:518680192aac39c8c4f96ee0f3a87e2905250d3edaea8e0c4ff54ee598e775b0", size = 19525, upload-time = "2025-12-14T00:20:52.111Z" } +sdist = { url = "https://files.pythonhosted.org/packages/b7/5e/9ff050691f4f4cffd40af72a8f81750112531b8c406e623d980eef73a394/litellm_proxy_extras-0.4.16.tar.gz", hash = "sha256:ff1ee4ea119318b471bb71a99d8bc941159d4d2c09bee797dd29768e9504befb", size = 20430, upload-time = "2025-12-20T09:22:11.52Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/6e/61/67f9d39afda151e3f37d2f1426fd797675a2af0d2c1045764b598476a4a9/litellm_proxy_extras-0.4.14-py3-none-any.whl", hash = "sha256:6943e19abb696e080b5a2a01472b99b1d78603ecb9df24604eba428f54440e7d", size = 43086, upload-time = "2025-12-14T00:20:50.298Z" }, + { url = "https://files.pythonhosted.org/packages/49/54/b2ccbd4905583fef466e26f0777b9e4ff7a6e6f6996e564c24637cc7d459/litellm_proxy_extras-0.4.16-py3-none-any.whl", hash = "sha256:5651e777c7f4c0e87c6722971bca19b8f40f417b08f74001cab2d0a5b1c63a91", size = 44450, upload-time = "2025-12-20T09:22:10.11Z" }, ] [[package]] @@ -3292,7 +3341,7 @@ dependencies = [ { name = "fonttools", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, { name = "kiwisolver", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, { name = "numpy", version = "2.2.6", source = { registry = "https://pypi.org/simple" }, marker = "(python_full_version < '3.11' and sys_platform == 'darwin') or (python_full_version < '3.11' and sys_platform == 'linux') or (python_full_version < '3.11' and sys_platform == 'win32')" }, - { name = "numpy", version = "2.3.5", source = { registry = "https://pypi.org/simple" }, marker = "(python_full_version >= '3.11' and sys_platform == 'darwin') or (python_full_version >= '3.11' and sys_platform == 'linux') or (python_full_version >= '3.11' and sys_platform == 'win32')" }, + { name = "numpy", version = "2.4.0", source = { registry = "https://pypi.org/simple" }, marker = "(python_full_version >= '3.11' and sys_platform == 'darwin') or (python_full_version >= '3.11' and sys_platform == 'linux') or (python_full_version >= '3.11' and sys_platform == 'win32')" }, { name = "packaging", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, { name = "pillow", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, { name = "pyparsing", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, @@ -3459,7 +3508,7 @@ version = "0.5.4" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "numpy", version = "2.2.6", source = { registry = "https://pypi.org/simple" }, marker = "(python_full_version < '3.11' and sys_platform == 'darwin') or (python_full_version < '3.11' and sys_platform == 'linux') or (python_full_version < '3.11' and sys_platform == 'win32')" }, - { name = "numpy", version = "2.3.5", source = { registry = "https://pypi.org/simple" }, marker = "(python_full_version >= '3.11' and sys_platform == 'darwin') or (python_full_version >= '3.11' and sys_platform == 'linux') or (python_full_version >= '3.11' and sys_platform == 'win32')" }, + { name = "numpy", version = "2.4.0", source = { registry = "https://pypi.org/simple" }, marker = "(python_full_version >= '3.11' and sys_platform == 'darwin') or (python_full_version >= '3.11' and sys_platform == 'linux') or (python_full_version >= '3.11' and sys_platform == 'win32')" }, ] sdist = { url = "https://files.pythonhosted.org/packages/0e/4a/c27b42ed9b1c7d13d9ba8b6905dece787d6259152f2309338aed29b2447b/ml_dtypes-0.5.4.tar.gz", hash = "sha256:8ab06a50fb9bf9666dd0fe5dfb4676fa2b0ac0f31ecff72a6c3af8e22c063453", size = 692314, upload-time = "2025-11-17T22:32:31.031Z" } wheels = [ @@ -3738,11 +3787,11 @@ wheels = [ [[package]] name = "nodeenv" -version = "1.9.1" +version = "1.10.0" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/43/16/fc88b08840de0e0a72a2f9d8c6bae36be573e475a6326ae854bcc549fc45/nodeenv-1.9.1.tar.gz", hash = "sha256:6ec12890a2dab7946721edbfbcd91f3319c6ccc9aec47be7c7e6b7011ee6645f", size = 47437, upload-time = "2024-06-04T18:44:11.171Z" } +sdist = { url = "https://files.pythonhosted.org/packages/24/bf/d1bda4f6168e0b2e9e5958945e01910052158313224ada5ce1fb2e1113b8/nodeenv-1.10.0.tar.gz", hash = "sha256:996c191ad80897d076bdfba80a41994c2b47c68e224c542b48feba42ba00f8bb", size = 55611, upload-time = "2025-12-20T14:08:54.006Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/d2/1d/1b658dbd2b9fa9c4c9f32accbfc0205d532c8c6194dc0f2a4c0428e7128a/nodeenv-1.9.1-py2.py3-none-any.whl", hash = "sha256:ba11c9782d29c27c70ffbdda2d7415098754709be8a7056d79a737cd901155c9", size = 22314, upload-time = "2024-06-04T18:44:08.352Z" }, + { url = "https://files.pythonhosted.org/packages/88/b2/d0896bdcdc8d28a7fc5717c305f1a861c26e18c05047949fb371034d98bd/nodeenv-1.10.0-py2.py3-none-any.whl", hash = "sha256:5bb13e3eed2923615535339b3c620e76779af4cb4c6a90deccc9e36b274d3827", size = 23438, upload-time = "2025-12-20T14:08:52.782Z" }, ] [[package]] @@ -3814,7 +3863,7 @@ wheels = [ [[package]] name = "numpy" -version = "2.3.5" +version = "2.4.0" source = { registry = "https://pypi.org/simple" } resolution-markers = [ "python_full_version >= '3.14' and sys_platform == 'darwin'", @@ -3830,81 +3879,79 @@ resolution-markers = [ "python_full_version == '3.12.*' and sys_platform == 'win32'", "python_full_version == '3.11.*' and sys_platform == 'win32'", ] -sdist = { url = "https://files.pythonhosted.org/packages/76/65/21b3bc86aac7b8f2862db1e808f1ea22b028e30a225a34a5ede9bf8678f2/numpy-2.3.5.tar.gz", hash = "sha256:784db1dcdab56bf0517743e746dfb0f885fc68d948aba86eeec2cba234bdf1c0", size = 20584950, upload-time = "2025-11-16T22:52:42.067Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/43/77/84dd1d2e34d7e2792a236ba180b5e8fcc1e3e414e761ce0253f63d7f572e/numpy-2.3.5-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:de5672f4a7b200c15a4127042170a694d4df43c992948f5e1af57f0174beed10", size = 17034641, upload-time = "2025-11-16T22:49:19.336Z" }, - { url = "https://files.pythonhosted.org/packages/2a/ea/25e26fa5837106cde46ae7d0b667e20f69cbbc0efd64cba8221411ab26ae/numpy-2.3.5-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:acfd89508504a19ed06ef963ad544ec6664518c863436306153e13e94605c218", size = 12528324, upload-time = "2025-11-16T22:49:22.582Z" }, - { url = "https://files.pythonhosted.org/packages/4d/1a/e85f0eea4cf03d6a0228f5c0256b53f2df4bc794706e7df019fc622e47f1/numpy-2.3.5-cp311-cp311-macosx_14_0_arm64.whl", hash = "sha256:ffe22d2b05504f786c867c8395de703937f934272eb67586817b46188b4ded6d", size = 5356872, upload-time = "2025-11-16T22:49:25.408Z" }, - { url = "https://files.pythonhosted.org/packages/5c/bb/35ef04afd567f4c989c2060cde39211e4ac5357155c1833bcd1166055c61/numpy-2.3.5-cp311-cp311-macosx_14_0_x86_64.whl", hash = "sha256:872a5cf366aec6bb1147336480fef14c9164b154aeb6542327de4970282cd2f5", size = 6893148, upload-time = "2025-11-16T22:49:27.549Z" }, - { url = "https://files.pythonhosted.org/packages/f2/2b/05bbeb06e2dff5eab512dfc678b1cc5ee94d8ac5956a0885c64b6b26252b/numpy-2.3.5-cp311-cp311-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:3095bdb8dd297e5920b010e96134ed91d852d81d490e787beca7e35ae1d89cf7", size = 14557282, upload-time = "2025-11-16T22:49:30.964Z" }, - { url = "https://files.pythonhosted.org/packages/65/fb/2b23769462b34398d9326081fad5655198fcf18966fcb1f1e49db44fbf31/numpy-2.3.5-cp311-cp311-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:8cba086a43d54ca804ce711b2a940b16e452807acebe7852ff327f1ecd49b0d4", size = 16897903, upload-time = "2025-11-16T22:49:34.191Z" }, - { url = "https://files.pythonhosted.org/packages/ac/14/085f4cf05fc3f1e8aa95e85404e984ffca9b2275a5dc2b1aae18a67538b8/numpy-2.3.5-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:6cf9b429b21df6b99f4dee7a1218b8b7ffbbe7df8764dc0bd60ce8a0708fed1e", size = 16341672, upload-time = "2025-11-16T22:49:37.2Z" }, - { url = "https://files.pythonhosted.org/packages/6f/3b/1f73994904142b2aa290449b3bb99772477b5fd94d787093e4f24f5af763/numpy-2.3.5-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:396084a36abdb603546b119d96528c2f6263921c50df3c8fd7cb28873a237748", size = 18838896, upload-time = "2025-11-16T22:49:39.727Z" }, - { url = "https://files.pythonhosted.org/packages/cd/b9/cf6649b2124f288309ffc353070792caf42ad69047dcc60da85ee85fea58/numpy-2.3.5-cp311-cp311-win32.whl", hash = "sha256:b0c7088a73aef3d687c4deef8452a3ac7c1be4e29ed8bf3b366c8111128ac60c", size = 6563608, upload-time = "2025-11-16T22:49:42.079Z" }, - { url = "https://files.pythonhosted.org/packages/aa/44/9fe81ae1dcc29c531843852e2874080dc441338574ccc4306b39e2ff6e59/numpy-2.3.5-cp311-cp311-win_amd64.whl", hash = "sha256:a414504bef8945eae5f2d7cb7be2d4af77c5d1cb5e20b296c2c25b61dff2900c", size = 13078442, upload-time = "2025-11-16T22:49:43.99Z" }, - { url = "https://files.pythonhosted.org/packages/6d/a7/f99a41553d2da82a20a2f22e93c94f928e4490bb447c9ff3c4ff230581d3/numpy-2.3.5-cp311-cp311-win_arm64.whl", hash = "sha256:0cd00b7b36e35398fa2d16af7b907b65304ef8bb4817a550e06e5012929830fa", size = 10458555, upload-time = "2025-11-16T22:49:47.092Z" }, - { url = "https://files.pythonhosted.org/packages/44/37/e669fe6cbb2b96c62f6bbedc6a81c0f3b7362f6a59230b23caa673a85721/numpy-2.3.5-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:74ae7b798248fe62021dbf3c914245ad45d1a6b0cb4a29ecb4b31d0bfbc4cc3e", size = 16733873, upload-time = "2025-11-16T22:49:49.84Z" }, - { url = "https://files.pythonhosted.org/packages/c5/65/df0db6c097892c9380851ab9e44b52d4f7ba576b833996e0080181c0c439/numpy-2.3.5-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:ee3888d9ff7c14604052b2ca5535a30216aa0a58e948cdd3eeb8d3415f638769", size = 12259838, upload-time = "2025-11-16T22:49:52.863Z" }, - { url = "https://files.pythonhosted.org/packages/5b/e1/1ee06e70eb2136797abe847d386e7c0e830b67ad1d43f364dd04fa50d338/numpy-2.3.5-cp312-cp312-macosx_14_0_arm64.whl", hash = "sha256:612a95a17655e213502f60cfb9bf9408efdc9eb1d5f50535cc6eb365d11b42b5", size = 5088378, upload-time = "2025-11-16T22:49:55.055Z" }, - { url = "https://files.pythonhosted.org/packages/6d/9c/1ca85fb86708724275103b81ec4cf1ac1d08f465368acfc8da7ab545bdae/numpy-2.3.5-cp312-cp312-macosx_14_0_x86_64.whl", hash = "sha256:3101e5177d114a593d79dd79658650fe28b5a0d8abeb8ce6f437c0e6df5be1a4", size = 6628559, upload-time = "2025-11-16T22:49:57.371Z" }, - { url = "https://files.pythonhosted.org/packages/74/78/fcd41e5a0ce4f3f7b003da85825acddae6d7ecb60cf25194741b036ca7d6/numpy-2.3.5-cp312-cp312-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:8b973c57ff8e184109db042c842423ff4f60446239bd585a5131cc47f06f789d", size = 14250702, upload-time = "2025-11-16T22:49:59.632Z" }, - { url = "https://files.pythonhosted.org/packages/b6/23/2a1b231b8ff672b4c450dac27164a8b2ca7d9b7144f9c02d2396518352eb/numpy-2.3.5-cp312-cp312-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:0d8163f43acde9a73c2a33605353a4f1bc4798745a8b1d73183b28e5b435ae28", size = 16606086, upload-time = "2025-11-16T22:50:02.127Z" }, - { url = "https://files.pythonhosted.org/packages/a0/c5/5ad26fbfbe2012e190cc7d5003e4d874b88bb18861d0829edc140a713021/numpy-2.3.5-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:51c1e14eb1e154ebd80e860722f9e6ed6ec89714ad2db2d3aa33c31d7c12179b", size = 16025985, upload-time = "2025-11-16T22:50:04.536Z" }, - { url = "https://files.pythonhosted.org/packages/d2/fa/dd48e225c46c819288148d9d060b047fd2a6fb1eb37eae25112ee4cb4453/numpy-2.3.5-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:b46b4ec24f7293f23adcd2d146960559aaf8020213de8ad1909dba6c013bf89c", size = 18542976, upload-time = "2025-11-16T22:50:07.557Z" }, - { url = "https://files.pythonhosted.org/packages/05/79/ccbd23a75862d95af03d28b5c6901a1b7da4803181513d52f3b86ed9446e/numpy-2.3.5-cp312-cp312-win32.whl", hash = "sha256:3997b5b3c9a771e157f9aae01dd579ee35ad7109be18db0e85dbdbe1de06e952", size = 6285274, upload-time = "2025-11-16T22:50:10.746Z" }, - { url = "https://files.pythonhosted.org/packages/2d/57/8aeaf160312f7f489dea47ab61e430b5cb051f59a98ae68b7133ce8fa06a/numpy-2.3.5-cp312-cp312-win_amd64.whl", hash = "sha256:86945f2ee6d10cdfd67bcb4069c1662dd711f7e2a4343db5cecec06b87cf31aa", size = 12782922, upload-time = "2025-11-16T22:50:12.811Z" }, - { url = "https://files.pythonhosted.org/packages/78/a6/aae5cc2ca78c45e64b9ef22f089141d661516856cf7c8a54ba434576900d/numpy-2.3.5-cp312-cp312-win_arm64.whl", hash = "sha256:f28620fe26bee16243be2b7b874da327312240a7cdc38b769a697578d2100013", size = 10194667, upload-time = "2025-11-16T22:50:16.16Z" }, - { url = "https://files.pythonhosted.org/packages/db/69/9cde09f36da4b5a505341180a3f2e6fadc352fd4d2b7096ce9778db83f1a/numpy-2.3.5-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:d0f23b44f57077c1ede8c5f26b30f706498b4862d3ff0a7298b8411dd2f043ff", size = 16728251, upload-time = "2025-11-16T22:50:19.013Z" }, - { url = "https://files.pythonhosted.org/packages/79/fb/f505c95ceddd7027347b067689db71ca80bd5ecc926f913f1a23e65cf09b/numpy-2.3.5-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:aa5bc7c5d59d831d9773d1170acac7893ce3a5e130540605770ade83280e7188", size = 12254652, upload-time = "2025-11-16T22:50:21.487Z" }, - { url = "https://files.pythonhosted.org/packages/78/da/8c7738060ca9c31b30e9301ee0cf6c5ffdbf889d9593285a1cead337f9a5/numpy-2.3.5-cp313-cp313-macosx_14_0_arm64.whl", hash = "sha256:ccc933afd4d20aad3c00bcef049cb40049f7f196e0397f1109dba6fed63267b0", size = 5083172, upload-time = "2025-11-16T22:50:24.562Z" }, - { url = "https://files.pythonhosted.org/packages/a4/b4/ee5bb2537fb9430fd2ef30a616c3672b991a4129bb1c7dcc42aa0abbe5d7/numpy-2.3.5-cp313-cp313-macosx_14_0_x86_64.whl", hash = "sha256:afaffc4393205524af9dfa400fa250143a6c3bc646c08c9f5e25a9f4b4d6a903", size = 6622990, upload-time = "2025-11-16T22:50:26.47Z" }, - { url = "https://files.pythonhosted.org/packages/95/03/dc0723a013c7d7c19de5ef29e932c3081df1c14ba582b8b86b5de9db7f0f/numpy-2.3.5-cp313-cp313-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:9c75442b2209b8470d6d5d8b1c25714270686f14c749028d2199c54e29f20b4d", size = 14248902, upload-time = "2025-11-16T22:50:28.861Z" }, - { url = "https://files.pythonhosted.org/packages/f5/10/ca162f45a102738958dcec8023062dad0cbc17d1ab99d68c4e4a6c45fb2b/numpy-2.3.5-cp313-cp313-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:11e06aa0af8c0f05104d56450d6093ee639e15f24ecf62d417329d06e522e017", size = 16597430, upload-time = "2025-11-16T22:50:31.56Z" }, - { url = "https://files.pythonhosted.org/packages/2a/51/c1e29be863588db58175175f057286900b4b3327a1351e706d5e0f8dd679/numpy-2.3.5-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:ed89927b86296067b4f81f108a2271d8926467a8868e554eaf370fc27fa3ccaf", size = 16024551, upload-time = "2025-11-16T22:50:34.242Z" }, - { url = "https://files.pythonhosted.org/packages/83/68/8236589d4dbb87253d28259d04d9b814ec0ecce7cb1c7fed29729f4c3a78/numpy-2.3.5-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:51c55fe3451421f3a6ef9a9c1439e82101c57a2c9eab9feb196a62b1a10b58ce", size = 18533275, upload-time = "2025-11-16T22:50:37.651Z" }, - { url = "https://files.pythonhosted.org/packages/40/56/2932d75b6f13465239e3b7b7e511be27f1b8161ca2510854f0b6e521c395/numpy-2.3.5-cp313-cp313-win32.whl", hash = "sha256:1978155dd49972084bd6ef388d66ab70f0c323ddee6f693d539376498720fb7e", size = 6277637, upload-time = "2025-11-16T22:50:40.11Z" }, - { url = "https://files.pythonhosted.org/packages/0c/88/e2eaa6cffb115b85ed7c7c87775cb8bcf0816816bc98ca8dbfa2ee33fe6e/numpy-2.3.5-cp313-cp313-win_amd64.whl", hash = "sha256:00dc4e846108a382c5869e77c6ed514394bdeb3403461d25a829711041217d5b", size = 12779090, upload-time = "2025-11-16T22:50:42.503Z" }, - { url = "https://files.pythonhosted.org/packages/8f/88/3f41e13a44ebd4034ee17baa384acac29ba6a4fcc2aca95f6f08ca0447d1/numpy-2.3.5-cp313-cp313-win_arm64.whl", hash = "sha256:0472f11f6ec23a74a906a00b48a4dcf3849209696dff7c189714511268d103ae", size = 10194710, upload-time = "2025-11-16T22:50:44.971Z" }, - { url = "https://files.pythonhosted.org/packages/13/cb/71744144e13389d577f867f745b7df2d8489463654a918eea2eeb166dfc9/numpy-2.3.5-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:414802f3b97f3c1eef41e530aaba3b3c1620649871d8cb38c6eaff034c2e16bd", size = 16827292, upload-time = "2025-11-16T22:50:47.715Z" }, - { url = "https://files.pythonhosted.org/packages/71/80/ba9dc6f2a4398e7f42b708a7fdc841bb638d353be255655498edbf9a15a8/numpy-2.3.5-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:5ee6609ac3604fa7780e30a03e5e241a7956f8e2fcfe547d51e3afa5247ac47f", size = 12378897, upload-time = "2025-11-16T22:50:51.327Z" }, - { url = "https://files.pythonhosted.org/packages/2e/6d/db2151b9f64264bcceccd51741aa39b50150de9b602d98ecfe7e0c4bff39/numpy-2.3.5-cp313-cp313t-macosx_14_0_arm64.whl", hash = "sha256:86d835afea1eaa143012a2d7a3f45a3adce2d7adc8b4961f0b362214d800846a", size = 5207391, upload-time = "2025-11-16T22:50:54.542Z" }, - { url = "https://files.pythonhosted.org/packages/80/ae/429bacace5ccad48a14c4ae5332f6aa8ab9f69524193511d60ccdfdc65fa/numpy-2.3.5-cp313-cp313t-macosx_14_0_x86_64.whl", hash = "sha256:30bc11310e8153ca664b14c5f1b73e94bd0503681fcf136a163de856f3a50139", size = 6721275, upload-time = "2025-11-16T22:50:56.794Z" }, - { url = "https://files.pythonhosted.org/packages/74/5b/1919abf32d8722646a38cd527bc3771eb229a32724ee6ba340ead9b92249/numpy-2.3.5-cp313-cp313t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:1062fde1dcf469571705945b0f221b73928f34a20c904ffb45db101907c3454e", size = 14306855, upload-time = "2025-11-16T22:50:59.208Z" }, - { url = "https://files.pythonhosted.org/packages/a5/87/6831980559434973bebc30cd9c1f21e541a0f2b0c280d43d3afd909b66d0/numpy-2.3.5-cp313-cp313t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:ce581db493ea1a96c0556360ede6607496e8bf9b3a8efa66e06477267bc831e9", size = 16657359, upload-time = "2025-11-16T22:51:01.991Z" }, - { url = "https://files.pythonhosted.org/packages/dd/91/c797f544491ee99fd00495f12ebb7802c440c1915811d72ac5b4479a3356/numpy-2.3.5-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:cc8920d2ec5fa99875b670bb86ddeb21e295cb07aa331810d9e486e0b969d946", size = 16093374, upload-time = "2025-11-16T22:51:05.291Z" }, - { url = "https://files.pythonhosted.org/packages/74/a6/54da03253afcbe7a72785ec4da9c69fb7a17710141ff9ac5fcb2e32dbe64/numpy-2.3.5-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:9ee2197ef8c4f0dfe405d835f3b6a14f5fee7782b5de51ba06fb65fc9b36e9f1", size = 18594587, upload-time = "2025-11-16T22:51:08.585Z" }, - { url = "https://files.pythonhosted.org/packages/80/e9/aff53abbdd41b0ecca94285f325aff42357c6b5abc482a3fcb4994290b18/numpy-2.3.5-cp313-cp313t-win32.whl", hash = "sha256:70b37199913c1bd300ff6e2693316c6f869c7ee16378faf10e4f5e3275b299c3", size = 6405940, upload-time = "2025-11-16T22:51:11.541Z" }, - { url = "https://files.pythonhosted.org/packages/d5/81/50613fec9d4de5480de18d4f8ef59ad7e344d497edbef3cfd80f24f98461/numpy-2.3.5-cp313-cp313t-win_amd64.whl", hash = "sha256:b501b5fa195cc9e24fe102f21ec0a44dffc231d2af79950b451e0d99cea02234", size = 12920341, upload-time = "2025-11-16T22:51:14.312Z" }, - { url = "https://files.pythonhosted.org/packages/bb/ab/08fd63b9a74303947f34f0bd7c5903b9c5532c2d287bead5bdf4c556c486/numpy-2.3.5-cp313-cp313t-win_arm64.whl", hash = "sha256:a80afd79f45f3c4a7d341f13acbe058d1ca8ac017c165d3fa0d3de6bc1a079d7", size = 10262507, upload-time = "2025-11-16T22:51:16.846Z" }, - { url = "https://files.pythonhosted.org/packages/ba/97/1a914559c19e32d6b2e233cf9a6a114e67c856d35b1d6babca571a3e880f/numpy-2.3.5-cp314-cp314-macosx_10_15_x86_64.whl", hash = "sha256:bf06bc2af43fa8d32d30fae16ad965663e966b1a3202ed407b84c989c3221e82", size = 16735706, upload-time = "2025-11-16T22:51:19.558Z" }, - { url = "https://files.pythonhosted.org/packages/57/d4/51233b1c1b13ecd796311216ae417796b88b0616cfd8a33ae4536330748a/numpy-2.3.5-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:052e8c42e0c49d2575621c158934920524f6c5da05a1d3b9bab5d8e259e045f0", size = 12264507, upload-time = "2025-11-16T22:51:22.492Z" }, - { url = "https://files.pythonhosted.org/packages/45/98/2fe46c5c2675b8306d0b4a3ec3494273e93e1226a490f766e84298576956/numpy-2.3.5-cp314-cp314-macosx_14_0_arm64.whl", hash = "sha256:1ed1ec893cff7040a02c8aa1c8611b94d395590d553f6b53629a4461dc7f7b63", size = 5093049, upload-time = "2025-11-16T22:51:25.171Z" }, - { url = "https://files.pythonhosted.org/packages/ce/0e/0698378989bb0ac5f1660c81c78ab1fe5476c1a521ca9ee9d0710ce54099/numpy-2.3.5-cp314-cp314-macosx_14_0_x86_64.whl", hash = "sha256:2dcd0808a421a482a080f89859a18beb0b3d1e905b81e617a188bd80422d62e9", size = 6626603, upload-time = "2025-11-16T22:51:27Z" }, - { url = "https://files.pythonhosted.org/packages/5e/a6/9ca0eecc489640615642a6cbc0ca9e10df70df38c4d43f5a928ff18d8827/numpy-2.3.5-cp314-cp314-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:727fd05b57df37dc0bcf1a27767a3d9a78cbbc92822445f32cc3436ba797337b", size = 14262696, upload-time = "2025-11-16T22:51:29.402Z" }, - { url = "https://files.pythonhosted.org/packages/c8/f6/07ec185b90ec9d7217a00eeeed7383b73d7e709dae2a9a021b051542a708/numpy-2.3.5-cp314-cp314-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:fffe29a1ef00883599d1dc2c51aa2e5d80afe49523c261a74933df395c15c520", size = 16597350, upload-time = "2025-11-16T22:51:32.167Z" }, - { url = "https://files.pythonhosted.org/packages/75/37/164071d1dde6a1a84c9b8e5b414fa127981bad47adf3a6b7e23917e52190/numpy-2.3.5-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:8f7f0e05112916223d3f438f293abf0727e1181b5983f413dfa2fefc4098245c", size = 16040190, upload-time = "2025-11-16T22:51:35.403Z" }, - { url = "https://files.pythonhosted.org/packages/08/3c/f18b82a406b04859eb026d204e4e1773eb41c5be58410f41ffa511d114ae/numpy-2.3.5-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:2e2eb32ddb9ccb817d620ac1d8dae7c3f641c1e5f55f531a33e8ab97960a75b8", size = 18536749, upload-time = "2025-11-16T22:51:39.698Z" }, - { url = "https://files.pythonhosted.org/packages/40/79/f82f572bf44cf0023a2fe8588768e23e1592585020d638999f15158609e1/numpy-2.3.5-cp314-cp314-win32.whl", hash = "sha256:66f85ce62c70b843bab1fb14a05d5737741e74e28c7b8b5a064de10142fad248", size = 6335432, upload-time = "2025-11-16T22:51:42.476Z" }, - { url = "https://files.pythonhosted.org/packages/a3/2e/235b4d96619931192c91660805e5e49242389742a7a82c27665021db690c/numpy-2.3.5-cp314-cp314-win_amd64.whl", hash = "sha256:e6a0bc88393d65807d751a614207b7129a310ca4fe76a74e5c7da5fa5671417e", size = 12919388, upload-time = "2025-11-16T22:51:45.275Z" }, - { url = "https://files.pythonhosted.org/packages/07/2b/29fd75ce45d22a39c61aad74f3d718e7ab67ccf839ca8b60866054eb15f8/numpy-2.3.5-cp314-cp314-win_arm64.whl", hash = "sha256:aeffcab3d4b43712bb7a60b65f6044d444e75e563ff6180af8f98dd4b905dfd2", size = 10476651, upload-time = "2025-11-16T22:51:47.749Z" }, - { url = "https://files.pythonhosted.org/packages/17/e1/f6a721234ebd4d87084cfa68d081bcba2f5cfe1974f7de4e0e8b9b2a2ba1/numpy-2.3.5-cp314-cp314t-macosx_10_15_x86_64.whl", hash = "sha256:17531366a2e3a9e30762c000f2c43a9aaa05728712e25c11ce1dbe700c53ad41", size = 16834503, upload-time = "2025-11-16T22:51:50.443Z" }, - { url = "https://files.pythonhosted.org/packages/5c/1c/baf7ffdc3af9c356e1c135e57ab7cf8d247931b9554f55c467efe2c69eff/numpy-2.3.5-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:d21644de1b609825ede2f48be98dfde4656aefc713654eeee280e37cadc4e0ad", size = 12381612, upload-time = "2025-11-16T22:51:53.609Z" }, - { url = "https://files.pythonhosted.org/packages/74/91/f7f0295151407ddc9ba34e699013c32c3c91944f9b35fcf9281163dc1468/numpy-2.3.5-cp314-cp314t-macosx_14_0_arm64.whl", hash = "sha256:c804e3a5aba5460c73955c955bdbd5c08c354954e9270a2c1565f62e866bdc39", size = 5210042, upload-time = "2025-11-16T22:51:56.213Z" }, - { url = "https://files.pythonhosted.org/packages/2e/3b/78aebf345104ec50dd50a4d06ddeb46a9ff5261c33bcc58b1c4f12f85ec2/numpy-2.3.5-cp314-cp314t-macosx_14_0_x86_64.whl", hash = "sha256:cc0a57f895b96ec78969c34f682c602bf8da1a0270b09bc65673df2e7638ec20", size = 6724502, upload-time = "2025-11-16T22:51:58.584Z" }, - { url = "https://files.pythonhosted.org/packages/02/c6/7c34b528740512e57ef1b7c8337ab0b4f0bddf34c723b8996c675bc2bc91/numpy-2.3.5-cp314-cp314t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:900218e456384ea676e24ea6a0417f030a3b07306d29d7ad843957b40a9d8d52", size = 14308962, upload-time = "2025-11-16T22:52:01.698Z" }, - { url = "https://files.pythonhosted.org/packages/80/35/09d433c5262bc32d725bafc619e095b6a6651caf94027a03da624146f655/numpy-2.3.5-cp314-cp314t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:09a1bea522b25109bf8e6f3027bd810f7c1085c64a0c7ce050c1676ad0ba010b", size = 16655054, upload-time = "2025-11-16T22:52:04.267Z" }, - { url = "https://files.pythonhosted.org/packages/7a/ab/6a7b259703c09a88804fa2430b43d6457b692378f6b74b356155283566ac/numpy-2.3.5-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:04822c00b5fd0323c8166d66c701dc31b7fbd252c100acd708c48f763968d6a3", size = 16091613, upload-time = "2025-11-16T22:52:08.651Z" }, - { url = "https://files.pythonhosted.org/packages/c2/88/330da2071e8771e60d1038166ff9d73f29da37b01ec3eb43cb1427464e10/numpy-2.3.5-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:d6889ec4ec662a1a37eb4b4fb26b6100841804dac55bd9df579e326cdc146227", size = 18591147, upload-time = "2025-11-16T22:52:11.453Z" }, - { url = "https://files.pythonhosted.org/packages/51/41/851c4b4082402d9ea860c3626db5d5df47164a712cb23b54be028b184c1c/numpy-2.3.5-cp314-cp314t-win32.whl", hash = "sha256:93eebbcf1aafdf7e2ddd44c2923e2672e1010bddc014138b229e49725b4d6be5", size = 6479806, upload-time = "2025-11-16T22:52:14.641Z" }, - { url = "https://files.pythonhosted.org/packages/90/30/d48bde1dfd93332fa557cff1972fbc039e055a52021fbef4c2c4b1eefd17/numpy-2.3.5-cp314-cp314t-win_amd64.whl", hash = "sha256:c8a9958e88b65c3b27e22ca2a076311636850b612d6bbfb76e8d156aacde2aaf", size = 13105760, upload-time = "2025-11-16T22:52:17.975Z" }, - { url = "https://files.pythonhosted.org/packages/2d/fd/4b5eb0b3e888d86aee4d198c23acec7d214baaf17ea93c1adec94c9518b9/numpy-2.3.5-cp314-cp314t-win_arm64.whl", hash = "sha256:6203fdf9f3dc5bdaed7319ad8698e685c7a3be10819f41d32a0723e611733b42", size = 10545459, upload-time = "2025-11-16T22:52:20.55Z" }, - { url = "https://files.pythonhosted.org/packages/c6/65/f9dea8e109371ade9c782b4e4756a82edf9d3366bca495d84d79859a0b79/numpy-2.3.5-pp311-pypy311_pp73-macosx_10_15_x86_64.whl", hash = "sha256:f0963b55cdd70fad460fa4c1341f12f976bb26cb66021a5580329bd498988310", size = 16910689, upload-time = "2025-11-16T22:52:23.247Z" }, - { url = "https://files.pythonhosted.org/packages/00/4f/edb00032a8fb92ec0a679d3830368355da91a69cab6f3e9c21b64d0bb986/numpy-2.3.5-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:f4255143f5160d0de972d28c8f9665d882b5f61309d8362fdd3e103cf7bf010c", size = 12457053, upload-time = "2025-11-16T22:52:26.367Z" }, - { url = "https://files.pythonhosted.org/packages/16/a4/e8a53b5abd500a63836a29ebe145fc1ab1f2eefe1cfe59276020373ae0aa/numpy-2.3.5-pp311-pypy311_pp73-macosx_14_0_arm64.whl", hash = "sha256:a4b9159734b326535f4dd01d947f919c6eefd2d9827466a696c44ced82dfbc18", size = 5285635, upload-time = "2025-11-16T22:52:29.266Z" }, - { url = "https://files.pythonhosted.org/packages/a3/2f/37eeb9014d9c8b3e9c55bc599c68263ca44fdbc12a93e45a21d1d56df737/numpy-2.3.5-pp311-pypy311_pp73-macosx_14_0_x86_64.whl", hash = "sha256:2feae0d2c91d46e59fcd62784a3a83b3fb677fead592ce51b5a6fbb4f95965ff", size = 6801770, upload-time = "2025-11-16T22:52:31.421Z" }, - { url = "https://files.pythonhosted.org/packages/7d/e4/68d2f474df2cb671b2b6c2986a02e520671295647dad82484cde80ca427b/numpy-2.3.5-pp311-pypy311_pp73-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ffac52f28a7849ad7576293c0cb7b9f08304e8f7d738a8cb8a90ec4c55a998eb", size = 14391768, upload-time = "2025-11-16T22:52:33.593Z" }, - { url = "https://files.pythonhosted.org/packages/b8/50/94ccd8a2b141cb50651fddd4f6a48874acb3c91c8f0842b08a6afc4b0b21/numpy-2.3.5-pp311-pypy311_pp73-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:63c0e9e7eea69588479ebf4a8a270d5ac22763cc5854e9a7eae952a3908103f7", size = 16729263, upload-time = "2025-11-16T22:52:36.369Z" }, - { url = "https://files.pythonhosted.org/packages/2d/ee/346fa473e666fe14c52fcdd19ec2424157290a032d4c41f98127bfb31ac7/numpy-2.3.5-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:f16417ec91f12f814b10bafe79ef77e70113a2f5f7018640e7425ff979253425", size = 12967213, upload-time = "2025-11-16T22:52:39.38Z" }, +sdist = { url = "https://files.pythonhosted.org/packages/a4/7a/6a3d14e205d292b738db449d0de649b373a59edb0d0b4493821d0a3e8718/numpy-2.4.0.tar.gz", hash = "sha256:6e504f7b16118198f138ef31ba24d985b124c2c469fe8467007cf30fd992f934", size = 20685720, upload-time = "2025-12-20T16:18:19.023Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/26/7e/7bae7cbcc2f8132271967aa03e03954fc1e48aa1f3bf32b29ca95fbef352/numpy-2.4.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:316b2f2584682318539f0bcaca5a496ce9ca78c88066579ebd11fd06f8e4741e", size = 16940166, upload-time = "2025-12-20T16:15:43.434Z" }, + { url = "https://files.pythonhosted.org/packages/0f/27/6c13f5b46776d6246ec884ac5817452672156a506d08a1f2abb39961930a/numpy-2.4.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:a2718c1de8504121714234b6f8241d0019450353276c88b9453c9c3d92e101db", size = 12641781, upload-time = "2025-12-20T16:15:45.701Z" }, + { url = "https://files.pythonhosted.org/packages/14/1c/83b4998d4860d15283241d9e5215f28b40ac31f497c04b12fa7f428ff370/numpy-2.4.0-cp311-cp311-macosx_14_0_arm64.whl", hash = "sha256:21555da4ec4a0c942520ead42c3b0dc9477441e085c42b0fbdd6a084869a6f6b", size = 5470247, upload-time = "2025-12-20T16:15:47.943Z" }, + { url = "https://files.pythonhosted.org/packages/54/08/cbce72c835d937795571b0464b52069f869c9e78b0c076d416c5269d2718/numpy-2.4.0-cp311-cp311-macosx_14_0_x86_64.whl", hash = "sha256:413aa561266a4be2d06cd2b9665e89d9f54c543f418773076a76adcf2af08bc7", size = 6799807, upload-time = "2025-12-20T16:15:49.795Z" }, + { url = "https://files.pythonhosted.org/packages/ff/be/2e647961cd8c980591d75cdcd9e8f647d69fbe05e2a25613dc0a2ea5fb1a/numpy-2.4.0-cp311-cp311-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:0feafc9e03128074689183031181fac0897ff169692d8492066e949041096548", size = 14701992, upload-time = "2025-12-20T16:15:51.615Z" }, + { url = "https://files.pythonhosted.org/packages/a2/fb/e1652fb8b6fd91ce6ed429143fe2e01ce714711e03e5b762615e7b36172c/numpy-2.4.0-cp311-cp311-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:a8fdfed3deaf1928fb7667d96e0567cdf58c2b370ea2ee7e586aa383ec2cb346", size = 16646871, upload-time = "2025-12-20T16:15:54.129Z" }, + { url = "https://files.pythonhosted.org/packages/62/23/d841207e63c4322842f7cd042ae981cffe715c73376dcad8235fb31debf1/numpy-2.4.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:e06a922a469cae9a57100864caf4f8a97a1026513793969f8ba5b63137a35d25", size = 16487190, upload-time = "2025-12-20T16:15:56.147Z" }, + { url = "https://files.pythonhosted.org/packages/bc/a0/6a842c8421ebfdec0a230e65f61e0dabda6edbef443d999d79b87c273965/numpy-2.4.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:927ccf5cd17c48f801f4ed43a7e5673a2724bd2171460be3e3894e6e332ef83a", size = 18580762, upload-time = "2025-12-20T16:15:58.524Z" }, + { url = "https://files.pythonhosted.org/packages/0a/d1/c79e0046641186f2134dde05e6181825b911f8bdcef31b19ddd16e232847/numpy-2.4.0-cp311-cp311-win32.whl", hash = "sha256:882567b7ae57c1b1a0250208cc21a7976d8cbcc49d5a322e607e6f09c9e0bd53", size = 6233359, upload-time = "2025-12-20T16:16:00.938Z" }, + { url = "https://files.pythonhosted.org/packages/fc/f0/74965001d231f28184d6305b8cdc1b6fcd4bf23033f6cb039cfe76c9fca7/numpy-2.4.0-cp311-cp311-win_amd64.whl", hash = "sha256:8b986403023c8f3bf8f487c2e6186afda156174d31c175f747d8934dfddf3479", size = 12601132, upload-time = "2025-12-20T16:16:02.484Z" }, + { url = "https://files.pythonhosted.org/packages/65/32/55408d0f46dfebce38017f5bd931affa7256ad6beac1a92a012e1fbc67a7/numpy-2.4.0-cp311-cp311-win_arm64.whl", hash = "sha256:3f3096405acc48887458bbf9f6814d43785ac7ba2a57ea6442b581dedbc60ce6", size = 10573977, upload-time = "2025-12-20T16:16:04.77Z" }, + { url = "https://files.pythonhosted.org/packages/8b/ff/f6400ffec95de41c74b8e73df32e3fff1830633193a7b1e409be7fb1bb8c/numpy-2.4.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:2a8b6bb8369abefb8bd1801b054ad50e02b3275c8614dc6e5b0373c305291037", size = 16653117, upload-time = "2025-12-20T16:16:06.709Z" }, + { url = "https://files.pythonhosted.org/packages/fd/28/6c23e97450035072e8d830a3c411bf1abd1f42c611ff9d29e3d8f55c6252/numpy-2.4.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:2e284ca13d5a8367e43734148622caf0b261b275673823593e3e3634a6490f83", size = 12369711, upload-time = "2025-12-20T16:16:08.758Z" }, + { url = "https://files.pythonhosted.org/packages/bc/af/acbef97b630ab1bb45e6a7d01d1452e4251aa88ce680ac36e56c272120ec/numpy-2.4.0-cp312-cp312-macosx_14_0_arm64.whl", hash = "sha256:49ff32b09f5aa0cd30a20c2b39db3e669c845589f2b7fc910365210887e39344", size = 5198355, upload-time = "2025-12-20T16:16:10.902Z" }, + { url = "https://files.pythonhosted.org/packages/c1/c8/4e0d436b66b826f2e53330adaa6311f5cac9871a5b5c31ad773b27f25a74/numpy-2.4.0-cp312-cp312-macosx_14_0_x86_64.whl", hash = "sha256:36cbfb13c152b1c7c184ddac43765db8ad672567e7bafff2cc755a09917ed2e6", size = 6545298, upload-time = "2025-12-20T16:16:12.607Z" }, + { url = "https://files.pythonhosted.org/packages/ef/27/e1f5d144ab54eac34875e79037011d511ac57b21b220063310cb96c80fbc/numpy-2.4.0-cp312-cp312-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:35ddc8f4914466e6fc954c76527aa91aa763682a4f6d73249ef20b418fe6effb", size = 14398387, upload-time = "2025-12-20T16:16:14.257Z" }, + { url = "https://files.pythonhosted.org/packages/67/64/4cb909dd5ab09a9a5d086eff9586e69e827b88a5585517386879474f4cf7/numpy-2.4.0-cp312-cp312-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:dc578891de1db95b2a35001b695451767b580bb45753717498213c5ff3c41d63", size = 16363091, upload-time = "2025-12-20T16:16:17.32Z" }, + { url = "https://files.pythonhosted.org/packages/9d/9c/8efe24577523ec6809261859737cf117b0eb6fdb655abdfdc81b2e468ce4/numpy-2.4.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:98e81648e0b36e325ab67e46b5400a7a6d4a22b8a7c8e8bbfe20e7db7906bf95", size = 16176394, upload-time = "2025-12-20T16:16:19.524Z" }, + { url = "https://files.pythonhosted.org/packages/61/f0/1687441ece7b47a62e45a1f82015352c240765c707928edd8aef875d5951/numpy-2.4.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:d57b5046c120561ba8fa8e4030fbb8b822f3063910fa901ffadf16e2b7128ad6", size = 18287378, upload-time = "2025-12-20T16:16:22.866Z" }, + { url = "https://files.pythonhosted.org/packages/d3/6f/f868765d44e6fc466467ed810ba9d8d6db1add7d4a748abfa2a4c99a3194/numpy-2.4.0-cp312-cp312-win32.whl", hash = "sha256:92190db305a6f48734d3982f2c60fa30d6b5ee9bff10f2887b930d7b40119f4c", size = 5955432, upload-time = "2025-12-20T16:16:25.06Z" }, + { url = "https://files.pythonhosted.org/packages/d4/b5/94c1e79fcbab38d1ca15e13777477b2914dd2d559b410f96949d6637b085/numpy-2.4.0-cp312-cp312-win_amd64.whl", hash = "sha256:680060061adb2d74ce352628cb798cfdec399068aa7f07ba9fb818b2b3305f98", size = 12306201, upload-time = "2025-12-20T16:16:26.979Z" }, + { url = "https://files.pythonhosted.org/packages/70/09/c39dadf0b13bb0768cd29d6a3aaff1fb7c6905ac40e9aaeca26b1c086e06/numpy-2.4.0-cp312-cp312-win_arm64.whl", hash = "sha256:39699233bc72dd482da1415dcb06076e32f60eddc796a796c5fb6c5efce94667", size = 10308234, upload-time = "2025-12-20T16:16:29.417Z" }, + { url = "https://files.pythonhosted.org/packages/a7/0d/853fd96372eda07c824d24adf02e8bc92bb3731b43a9b2a39161c3667cc4/numpy-2.4.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:a152d86a3ae00ba5f47b3acf3b827509fd0b6cb7d3259665e63dafbad22a75ea", size = 16649088, upload-time = "2025-12-20T16:16:31.421Z" }, + { url = "https://files.pythonhosted.org/packages/e3/37/cc636f1f2a9f585434e20a3e6e63422f70bfe4f7f6698e941db52ea1ac9a/numpy-2.4.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:39b19251dec4de8ff8496cd0806cbe27bf0684f765abb1f4809554de93785f2d", size = 12364065, upload-time = "2025-12-20T16:16:33.491Z" }, + { url = "https://files.pythonhosted.org/packages/ed/69/0b78f37ca3690969beee54103ce5f6021709134e8020767e93ba691a72f1/numpy-2.4.0-cp313-cp313-macosx_14_0_arm64.whl", hash = "sha256:009bd0ea12d3c784b6639a8457537016ce5172109e585338e11334f6a7bb88ee", size = 5192640, upload-time = "2025-12-20T16:16:35.636Z" }, + { url = "https://files.pythonhosted.org/packages/1d/2a/08569f8252abf590294dbb09a430543ec8f8cc710383abfb3e75cc73aeda/numpy-2.4.0-cp313-cp313-macosx_14_0_x86_64.whl", hash = "sha256:5fe44e277225fd3dff6882d86d3d447205d43532c3627313d17e754fb3905a0e", size = 6541556, upload-time = "2025-12-20T16:16:37.276Z" }, + { url = "https://files.pythonhosted.org/packages/93/e9/a949885a4e177493d61519377952186b6cbfdf1d6002764c664ba28349b5/numpy-2.4.0-cp313-cp313-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:f935c4493eda9069851058fa0d9e39dbf6286be690066509305e52912714dbb2", size = 14396562, upload-time = "2025-12-20T16:16:38.953Z" }, + { url = "https://files.pythonhosted.org/packages/99/98/9d4ad53b0e9ef901c2ef1d550d2136f5ac42d3fd2988390a6def32e23e48/numpy-2.4.0-cp313-cp313-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:8cfa5f29a695cb7438965e6c3e8d06e0416060cf0d709c1b1c1653a939bf5c2a", size = 16351719, upload-time = "2025-12-20T16:16:41.503Z" }, + { url = "https://files.pythonhosted.org/packages/28/de/5f3711a38341d6e8dd619f6353251a0cdd07f3d6d101a8fd46f4ef87f895/numpy-2.4.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:ba0cb30acd3ef11c94dc27fbfba68940652492bc107075e7ffe23057f9425681", size = 16176053, upload-time = "2025-12-20T16:16:44.552Z" }, + { url = "https://files.pythonhosted.org/packages/2a/5b/2a3753dc43916501b4183532e7ace862e13211042bceafa253afb5c71272/numpy-2.4.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:60e8c196cd82cbbd4f130b5290007e13e6de3eca79f0d4d38014769d96a7c475", size = 18277859, upload-time = "2025-12-20T16:16:47.174Z" }, + { url = "https://files.pythonhosted.org/packages/2c/c5/a18bcdd07a941db3076ef489d036ab16d2bfc2eae0cf27e5a26e29189434/numpy-2.4.0-cp313-cp313-win32.whl", hash = "sha256:5f48cb3e88fbc294dc90e215d86fbaf1c852c63dbdb6c3a3e63f45c4b57f7344", size = 5953849, upload-time = "2025-12-20T16:16:49.554Z" }, + { url = "https://files.pythonhosted.org/packages/4f/f1/719010ff8061da6e8a26e1980cf090412d4f5f8060b31f0c45d77dd67a01/numpy-2.4.0-cp313-cp313-win_amd64.whl", hash = "sha256:a899699294f28f7be8992853c0c60741f16ff199205e2e6cdca155762cbaa59d", size = 12302840, upload-time = "2025-12-20T16:16:51.227Z" }, + { url = "https://files.pythonhosted.org/packages/f5/5a/b3d259083ed8b4d335270c76966cb6cf14a5d1b69e1a608994ac57a659e6/numpy-2.4.0-cp313-cp313-win_arm64.whl", hash = "sha256:9198f447e1dc5647d07c9a6bbe2063cc0132728cc7175b39dbc796da5b54920d", size = 10308509, upload-time = "2025-12-20T16:16:53.313Z" }, + { url = "https://files.pythonhosted.org/packages/31/01/95edcffd1bb6c0633df4e808130545c4f07383ab629ac7e316fb44fff677/numpy-2.4.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:74623f2ab5cc3f7c886add4f735d1031a1d2be4a4ae63c0546cfd74e7a31ddf6", size = 12491815, upload-time = "2025-12-20T16:16:55.496Z" }, + { url = "https://files.pythonhosted.org/packages/59/ea/5644b8baa92cc1c7163b4b4458c8679852733fa74ca49c942cfa82ded4e0/numpy-2.4.0-cp313-cp313t-macosx_14_0_arm64.whl", hash = "sha256:0804a8e4ab070d1d35496e65ffd3cf8114c136a2b81f61dfab0de4b218aacfd5", size = 5320321, upload-time = "2025-12-20T16:16:57.468Z" }, + { url = "https://files.pythonhosted.org/packages/26/4e/e10938106d70bc21319bd6a86ae726da37edc802ce35a3a71ecdf1fdfe7f/numpy-2.4.0-cp313-cp313t-macosx_14_0_x86_64.whl", hash = "sha256:02a2038eb27f9443a8b266a66911e926566b5a6ffd1a689b588f7f35b81e7dc3", size = 6641635, upload-time = "2025-12-20T16:16:59.379Z" }, + { url = "https://files.pythonhosted.org/packages/b3/8d/a8828e3eaf5c0b4ab116924df82f24ce3416fa38d0674d8f708ddc6c8aac/numpy-2.4.0-cp313-cp313t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:1889b3a3f47a7b5bee16bc25a2145bd7cb91897f815ce3499db64c7458b6d91d", size = 14456053, upload-time = "2025-12-20T16:17:01.768Z" }, + { url = "https://files.pythonhosted.org/packages/68/a1/17d97609d87d4520aa5ae2dcfb32305654550ac6a35effb946d303e594ce/numpy-2.4.0-cp313-cp313t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:85eef4cb5625c47ee6425c58a3502555e10f45ee973da878ac8248ad58c136f3", size = 16401702, upload-time = "2025-12-20T16:17:04.235Z" }, + { url = "https://files.pythonhosted.org/packages/18/32/0f13c1b2d22bea1118356b8b963195446f3af124ed7a5adfa8fdecb1b6ca/numpy-2.4.0-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:6dc8b7e2f4eb184b37655195f421836cfae6f58197b67e3ffc501f1333d993fa", size = 16242493, upload-time = "2025-12-20T16:17:06.856Z" }, + { url = "https://files.pythonhosted.org/packages/ae/23/48f21e3d309fbc137c068a1475358cbd3a901b3987dcfc97a029ab3068e2/numpy-2.4.0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:44aba2f0cafd287871a495fb3163408b0bd25bbce135c6f621534a07f4f7875c", size = 18324222, upload-time = "2025-12-20T16:17:09.392Z" }, + { url = "https://files.pythonhosted.org/packages/ac/52/41f3d71296a3dcaa4f456aaa3c6fc8e745b43d0552b6bde56571bb4b4a0f/numpy-2.4.0-cp313-cp313t-win32.whl", hash = "sha256:20c115517513831860c573996e395707aa9fb691eb179200125c250e895fcd93", size = 6076216, upload-time = "2025-12-20T16:17:11.437Z" }, + { url = "https://files.pythonhosted.org/packages/35/ff/46fbfe60ab0710d2a2b16995f708750307d30eccbb4c38371ea9e986866e/numpy-2.4.0-cp313-cp313t-win_amd64.whl", hash = "sha256:b48e35f4ab6f6a7597c46e301126ceba4c44cd3280e3750f85db48b082624fa4", size = 12444263, upload-time = "2025-12-20T16:17:13.182Z" }, + { url = "https://files.pythonhosted.org/packages/a3/e3/9189ab319c01d2ed556c932ccf55064c5d75bb5850d1df7a482ce0badead/numpy-2.4.0-cp313-cp313t-win_arm64.whl", hash = "sha256:4d1cfce39e511069b11e67cd0bd78ceff31443b7c9e5c04db73c7a19f572967c", size = 10378265, upload-time = "2025-12-20T16:17:15.211Z" }, + { url = "https://files.pythonhosted.org/packages/ab/ed/52eac27de39d5e5a6c9aadabe672bc06f55e24a3d9010cd1183948055d76/numpy-2.4.0-cp314-cp314-macosx_10_15_x86_64.whl", hash = "sha256:c95eb6db2884917d86cde0b4d4cf31adf485c8ec36bf8696dd66fa70de96f36b", size = 16647476, upload-time = "2025-12-20T16:17:17.671Z" }, + { url = "https://files.pythonhosted.org/packages/77/c0/990ce1b7fcd4e09aeaa574e2a0a839589e4b08b2ca68070f1acb1fea6736/numpy-2.4.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:65167da969cd1ec3a1df31cb221ca3a19a8aaa25370ecb17d428415e93c1935e", size = 12374563, upload-time = "2025-12-20T16:17:20.216Z" }, + { url = "https://files.pythonhosted.org/packages/37/7c/8c5e389c6ae8f5fd2277a988600d79e9625db3fff011a2d87ac80b881a4c/numpy-2.4.0-cp314-cp314-macosx_14_0_arm64.whl", hash = "sha256:3de19cfecd1465d0dcf8a5b5ea8b3155b42ed0b639dba4b71e323d74f2a3be5e", size = 5203107, upload-time = "2025-12-20T16:17:22.47Z" }, + { url = "https://files.pythonhosted.org/packages/e6/94/ca5b3bd6a8a70a5eec9a0b8dd7f980c1eff4b8a54970a9a7fef248ef564f/numpy-2.4.0-cp314-cp314-macosx_14_0_x86_64.whl", hash = "sha256:6c05483c3136ac4c91b4e81903cb53a8707d316f488124d0398499a4f8e8ef51", size = 6538067, upload-time = "2025-12-20T16:17:24.001Z" }, + { url = "https://files.pythonhosted.org/packages/79/43/993eb7bb5be6761dde2b3a3a594d689cec83398e3f58f4758010f3b85727/numpy-2.4.0-cp314-cp314-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:36667db4d6c1cea79c8930ab72fadfb4060feb4bfe724141cd4bd064d2e5f8ce", size = 14411926, upload-time = "2025-12-20T16:17:25.822Z" }, + { url = "https://files.pythonhosted.org/packages/03/75/d4c43b61de473912496317a854dac54f1efec3eeb158438da6884b70bb90/numpy-2.4.0-cp314-cp314-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:9a818668b674047fd88c4cddada7ab8f1c298812783e8328e956b78dc4807f9f", size = 16354295, upload-time = "2025-12-20T16:17:28.308Z" }, + { url = "https://files.pythonhosted.org/packages/b8/0a/b54615b47ee8736a6461a4bb6749128dd3435c5a759d5663f11f0e9af4ac/numpy-2.4.0-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:1ee32359fb7543b7b7bd0b2f46294db27e29e7bbdf70541e81b190836cd83ded", size = 16190242, upload-time = "2025-12-20T16:17:30.993Z" }, + { url = "https://files.pythonhosted.org/packages/98/ce/ea207769aacad6246525ec6c6bbd66a2bf56c72443dc10e2f90feed29290/numpy-2.4.0-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:e493962256a38f58283de033d8af176c5c91c084ea30f15834f7545451c42059", size = 18280875, upload-time = "2025-12-20T16:17:33.327Z" }, + { url = "https://files.pythonhosted.org/packages/17/ef/ec409437aa962ea372ed601c519a2b141701683ff028f894b7466f0ab42b/numpy-2.4.0-cp314-cp314-win32.whl", hash = "sha256:6bbaebf0d11567fa8926215ae731e1d58e6ec28a8a25235b8a47405d301332db", size = 6002530, upload-time = "2025-12-20T16:17:35.729Z" }, + { url = "https://files.pythonhosted.org/packages/5f/4a/5cb94c787a3ed1ac65e1271b968686521169a7b3ec0b6544bb3ca32960b0/numpy-2.4.0-cp314-cp314-win_amd64.whl", hash = "sha256:3d857f55e7fdf7c38ab96c4558c95b97d1c685be6b05c249f5fdafcbd6f9899e", size = 12435890, upload-time = "2025-12-20T16:17:37.599Z" }, + { url = "https://files.pythonhosted.org/packages/48/a0/04b89db963af9de1104975e2544f30de89adbf75b9e75f7dd2599be12c79/numpy-2.4.0-cp314-cp314-win_arm64.whl", hash = "sha256:bb50ce5fb202a26fd5404620e7ef820ad1ab3558b444cb0b55beb7ef66cd2d63", size = 10591892, upload-time = "2025-12-20T16:17:39.649Z" }, + { url = "https://files.pythonhosted.org/packages/53/e5/d74b5ccf6712c06c7a545025a6a71bfa03bdc7e0568b405b0d655232fd92/numpy-2.4.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:355354388cba60f2132df297e2d53053d4063f79077b67b481d21276d61fc4df", size = 12494312, upload-time = "2025-12-20T16:17:41.714Z" }, + { url = "https://files.pythonhosted.org/packages/c2/08/3ca9cc2ddf54dfee7ae9a6479c071092a228c68aef08252aa08dac2af002/numpy-2.4.0-cp314-cp314t-macosx_14_0_arm64.whl", hash = "sha256:1d8f9fde5f6dc1b6fc34df8162f3b3079365468703fee7f31d4e0cc8c63baed9", size = 5322862, upload-time = "2025-12-20T16:17:44.145Z" }, + { url = "https://files.pythonhosted.org/packages/87/74/0bb63a68394c0c1e52670cfff2e309afa41edbe11b3327d9af29e4383f34/numpy-2.4.0-cp314-cp314t-macosx_14_0_x86_64.whl", hash = "sha256:e0434aa22c821f44eeb4c650b81c7fbdd8c0122c6c4b5a576a76d5a35625ecd9", size = 6644986, upload-time = "2025-12-20T16:17:46.203Z" }, + { url = "https://files.pythonhosted.org/packages/06/8f/9264d9bdbcf8236af2823623fe2f3981d740fc3461e2787e231d97c38c28/numpy-2.4.0-cp314-cp314t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:40483b2f2d3ba7aad426443767ff5632ec3156ef09742b96913787d13c336471", size = 14457958, upload-time = "2025-12-20T16:17:48.017Z" }, + { url = "https://files.pythonhosted.org/packages/8c/d9/f9a69ae564bbc7236a35aa883319364ef5fd41f72aa320cc1cbe66148fe2/numpy-2.4.0-cp314-cp314t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:d9e6a7664ddd9746e20b7325351fe1a8408d0a2bf9c63b5e898290ddc8f09544", size = 16398394, upload-time = "2025-12-20T16:17:50.409Z" }, + { url = "https://files.pythonhosted.org/packages/34/c7/39241501408dde7f885d241a98caba5421061a2c6d2b2197ac5e3aa842d8/numpy-2.4.0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:ecb0019d44f4cdb50b676c5d0cb4b1eae8e15d1ed3d3e6639f986fc92b2ec52c", size = 16241044, upload-time = "2025-12-20T16:17:52.661Z" }, + { url = "https://files.pythonhosted.org/packages/7c/95/cae7effd90e065a95e59fe710eeee05d7328ed169776dfdd9f789e032125/numpy-2.4.0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:d0ffd9e2e4441c96a9c91ec1783285d80bf835b677853fc2770a89d50c1e48ac", size = 18321772, upload-time = "2025-12-20T16:17:54.947Z" }, + { url = "https://files.pythonhosted.org/packages/96/df/3c6c279accd2bfb968a76298e5b276310bd55d243df4fa8ac5816d79347d/numpy-2.4.0-cp314-cp314t-win32.whl", hash = "sha256:77f0d13fa87036d7553bf81f0e1fe3ce68d14c9976c9851744e4d3e91127e95f", size = 6148320, upload-time = "2025-12-20T16:17:57.249Z" }, + { url = "https://files.pythonhosted.org/packages/92/8d/f23033cce252e7a75cae853d17f582e86534c46404dea1c8ee094a9d6d84/numpy-2.4.0-cp314-cp314t-win_amd64.whl", hash = "sha256:b1f5b45829ac1848893f0ddf5cb326110604d6df96cdc255b0bf9edd154104d4", size = 12623460, upload-time = "2025-12-20T16:17:58.963Z" }, + { url = "https://files.pythonhosted.org/packages/a4/4f/1f8475907d1a7c4ef9020edf7f39ea2422ec896849245f00688e4b268a71/numpy-2.4.0-cp314-cp314t-win_arm64.whl", hash = "sha256:23a3e9d1a6f360267e8fbb38ba5db355a6a7e9be71d7fce7ab3125e88bb646c8", size = 10661799, upload-time = "2025-12-20T16:18:01.078Z" }, + { url = "https://files.pythonhosted.org/packages/4b/ef/088e7c7342f300aaf3ee5f2c821c4b9996a1bef2aaf6a49cc8ab4883758e/numpy-2.4.0-pp311-pypy311_pp73-macosx_10_15_x86_64.whl", hash = "sha256:b54c83f1c0c0f1d748dca0af516062b8829d53d1f0c402be24b4257a9c48ada6", size = 16819003, upload-time = "2025-12-20T16:18:03.41Z" }, + { url = "https://files.pythonhosted.org/packages/ff/ce/a53017b5443b4b84517182d463fc7bcc2adb4faa8b20813f8e5f5aeb5faa/numpy-2.4.0-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:aabb081ca0ec5d39591fc33018cd4b3f96e1a2dd6756282029986d00a785fba4", size = 12567105, upload-time = "2025-12-20T16:18:05.594Z" }, + { url = "https://files.pythonhosted.org/packages/77/58/5ff91b161f2ec650c88a626c3905d938c89aaadabd0431e6d9c1330c83e2/numpy-2.4.0-pp311-pypy311_pp73-macosx_14_0_arm64.whl", hash = "sha256:8eafe7c36c8430b7794edeab3087dec7bf31d634d92f2af9949434b9d1964cba", size = 5395590, upload-time = "2025-12-20T16:18:08.031Z" }, + { url = "https://files.pythonhosted.org/packages/1d/4e/f1a084106df8c2df8132fc437e56987308e0524836aa7733721c8429d4fe/numpy-2.4.0-pp311-pypy311_pp73-macosx_14_0_x86_64.whl", hash = "sha256:2f585f52b2baf07ff3356158d9268ea095e221371f1074fadea2f42544d58b4d", size = 6709947, upload-time = "2025-12-20T16:18:09.836Z" }, + { url = "https://files.pythonhosted.org/packages/63/09/3d8aeb809c0332c3f642da812ac2e3d74fc9252b3021f8c30c82e99e3f3d/numpy-2.4.0-pp311-pypy311_pp73-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:32ed06d0fe9cae27d8fb5f400c63ccee72370599c75e683a6358dd3a4fb50aaf", size = 14535119, upload-time = "2025-12-20T16:18:12.105Z" }, + { url = "https://files.pythonhosted.org/packages/fd/7f/68f0fc43a2cbdc6bb239160c754d87c922f60fbaa0fa3cd3d312b8a7f5ee/numpy-2.4.0-pp311-pypy311_pp73-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:57c540ed8fb1f05cb997c6761cd56db72395b0d6985e90571ff660452ade4f98", size = 16475815, upload-time = "2025-12-20T16:18:14.433Z" }, + { url = "https://files.pythonhosted.org/packages/11/73/edeacba3167b1ca66d51b1a5a14697c2c40098b5ffa01811c67b1785a5ab/numpy-2.4.0-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:a39fb973a726e63223287adc6dafe444ce75af952d711e400f3bf2b36ef55a7b", size = 12489376, upload-time = "2025-12-20T16:18:16.524Z" }, ] [[package]] @@ -4246,7 +4293,7 @@ version = "2.3.3" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "numpy", version = "2.2.6", source = { registry = "https://pypi.org/simple" }, marker = "(python_full_version < '3.11' and sys_platform == 'darwin') or (python_full_version < '3.11' and sys_platform == 'linux') or (python_full_version < '3.11' and sys_platform == 'win32')" }, - { name = "numpy", version = "2.3.5", source = { registry = "https://pypi.org/simple" }, marker = "(python_full_version >= '3.11' and sys_platform == 'darwin') or (python_full_version >= '3.11' and sys_platform == 'linux') or (python_full_version >= '3.11' and sys_platform == 'win32')" }, + { name = "numpy", version = "2.4.0", source = { registry = "https://pypi.org/simple" }, marker = "(python_full_version >= '3.11' and sys_platform == 'darwin') or (python_full_version >= '3.11' and sys_platform == 'linux') or (python_full_version >= '3.11' and sys_platform == 'win32')" }, { name = "python-dateutil", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, { name = "pytz", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, { name = "tzdata", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, @@ -4322,100 +4369,100 @@ wheels = [ [[package]] name = "pillow" -version = "12.0.0" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/5a/b0/cace85a1b0c9775a9f8f5d5423c8261c858760e2466c79b2dd184638b056/pillow-12.0.0.tar.gz", hash = "sha256:87d4f8125c9988bfbed67af47dd7a953e2fc7b0cc1e7800ec6d2080d490bb353", size = 47008828, upload-time = "2025-10-15T18:24:14.008Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/5d/08/26e68b6b5da219c2a2cb7b563af008b53bb8e6b6fcb3fa40715fcdb2523a/pillow-12.0.0-cp310-cp310-macosx_10_10_x86_64.whl", hash = "sha256:3adfb466bbc544b926d50fe8f4a4e6abd8c6bffd28a26177594e6e9b2b76572b", size = 5289809, upload-time = "2025-10-15T18:21:27.791Z" }, - { url = "https://files.pythonhosted.org/packages/cb/e9/4e58fb097fb74c7b4758a680aacd558810a417d1edaa7000142976ef9d2f/pillow-12.0.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:1ac11e8ea4f611c3c0147424eae514028b5e9077dd99ab91e1bd7bc33ff145e1", size = 4650606, upload-time = "2025-10-15T18:21:29.823Z" }, - { url = "https://files.pythonhosted.org/packages/4b/e0/1fa492aa9f77b3bc6d471c468e62bfea1823056bf7e5e4f1914d7ab2565e/pillow-12.0.0-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:d49e2314c373f4c2b39446fb1a45ed333c850e09d0c59ac79b72eb3b95397363", size = 6221023, upload-time = "2025-10-15T18:21:31.415Z" }, - { url = "https://files.pythonhosted.org/packages/c1/09/4de7cd03e33734ccd0c876f0251401f1314e819cbfd89a0fcb6e77927cc6/pillow-12.0.0-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:c7b2a63fd6d5246349f3d3f37b14430d73ee7e8173154461785e43036ffa96ca", size = 8024937, upload-time = "2025-10-15T18:21:33.453Z" }, - { url = "https://files.pythonhosted.org/packages/2e/69/0688e7c1390666592876d9d474f5e135abb4acb39dcb583c4dc5490f1aff/pillow-12.0.0-cp310-cp310-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:d64317d2587c70324b79861babb9c09f71fbb780bad212018874b2c013d8600e", size = 6334139, upload-time = "2025-10-15T18:21:35.395Z" }, - { url = "https://files.pythonhosted.org/packages/ed/1c/880921e98f525b9b44ce747ad1ea8f73fd7e992bafe3ca5e5644bf433dea/pillow-12.0.0-cp310-cp310-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:d77153e14b709fd8b8af6f66a3afbb9ed6e9fc5ccf0b6b7e1ced7b036a228782", size = 7026074, upload-time = "2025-10-15T18:21:37.219Z" }, - { url = "https://files.pythonhosted.org/packages/28/03/96f718331b19b355610ef4ebdbbde3557c726513030665071fd025745671/pillow-12.0.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:32ed80ea8a90ee3e6fa08c21e2e091bba6eda8eccc83dbc34c95169507a91f10", size = 6448852, upload-time = "2025-10-15T18:21:39.168Z" }, - { url = "https://files.pythonhosted.org/packages/3a/a0/6a193b3f0cc9437b122978d2c5cbce59510ccf9a5b48825096ed7472da2f/pillow-12.0.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:c828a1ae702fc712978bda0320ba1b9893d99be0badf2647f693cc01cf0f04fa", size = 7117058, upload-time = "2025-10-15T18:21:40.997Z" }, - { url = "https://files.pythonhosted.org/packages/a7/c4/043192375eaa4463254e8e61f0e2ec9a846b983929a8d0a7122e0a6d6fff/pillow-12.0.0-cp310-cp310-win32.whl", hash = "sha256:bd87e140e45399c818fac4247880b9ce719e4783d767e030a883a970be632275", size = 6295431, upload-time = "2025-10-15T18:21:42.518Z" }, - { url = "https://files.pythonhosted.org/packages/92/c6/c2f2fc7e56301c21827e689bb8b0b465f1b52878b57471a070678c0c33cd/pillow-12.0.0-cp310-cp310-win_amd64.whl", hash = "sha256:455247ac8a4cfb7b9bc45b7e432d10421aea9fc2e74d285ba4072688a74c2e9d", size = 7000412, upload-time = "2025-10-15T18:21:44.404Z" }, - { url = "https://files.pythonhosted.org/packages/b2/d2/5f675067ba82da7a1c238a73b32e3fd78d67f9d9f80fbadd33a40b9c0481/pillow-12.0.0-cp310-cp310-win_arm64.whl", hash = "sha256:6ace95230bfb7cd79ef66caa064bbe2f2a1e63d93471c3a2e1f1348d9f22d6b7", size = 2435903, upload-time = "2025-10-15T18:21:46.29Z" }, - { url = "https://files.pythonhosted.org/packages/0e/5a/a2f6773b64edb921a756eb0729068acad9fc5208a53f4a349396e9436721/pillow-12.0.0-cp311-cp311-macosx_10_10_x86_64.whl", hash = "sha256:0fd00cac9c03256c8b2ff58f162ebcd2587ad3e1f2e397eab718c47e24d231cc", size = 5289798, upload-time = "2025-10-15T18:21:47.763Z" }, - { url = "https://files.pythonhosted.org/packages/2e/05/069b1f8a2e4b5a37493da6c5868531c3f77b85e716ad7a590ef87d58730d/pillow-12.0.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:a3475b96f5908b3b16c47533daaa87380c491357d197564e0ba34ae75c0f3257", size = 4650589, upload-time = "2025-10-15T18:21:49.515Z" }, - { url = "https://files.pythonhosted.org/packages/61/e3/2c820d6e9a36432503ead175ae294f96861b07600a7156154a086ba7111a/pillow-12.0.0-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:110486b79f2d112cf6add83b28b627e369219388f64ef2f960fef9ebaf54c642", size = 6230472, upload-time = "2025-10-15T18:21:51.052Z" }, - { url = "https://files.pythonhosted.org/packages/4f/89/63427f51c64209c5e23d4d52071c8d0f21024d3a8a487737caaf614a5795/pillow-12.0.0-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:5269cc1caeedb67e6f7269a42014f381f45e2e7cd42d834ede3c703a1d915fe3", size = 8033887, upload-time = "2025-10-15T18:21:52.604Z" }, - { url = "https://files.pythonhosted.org/packages/f6/1b/c9711318d4901093c15840f268ad649459cd81984c9ec9887756cca049a5/pillow-12.0.0-cp311-cp311-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:aa5129de4e174daccbc59d0a3b6d20eaf24417d59851c07ebb37aeb02947987c", size = 6343964, upload-time = "2025-10-15T18:21:54.619Z" }, - { url = "https://files.pythonhosted.org/packages/41/1e/db9470f2d030b4995083044cd8738cdd1bf773106819f6d8ba12597d5352/pillow-12.0.0-cp311-cp311-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:bee2a6db3a7242ea309aa7ee8e2780726fed67ff4e5b40169f2c940e7eb09227", size = 7034756, upload-time = "2025-10-15T18:21:56.151Z" }, - { url = "https://files.pythonhosted.org/packages/cc/b0/6177a8bdd5ee4ed87cba2de5a3cc1db55ffbbec6176784ce5bb75aa96798/pillow-12.0.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:90387104ee8400a7b4598253b4c406f8958f59fcf983a6cea2b50d59f7d63d0b", size = 6458075, upload-time = "2025-10-15T18:21:57.759Z" }, - { url = "https://files.pythonhosted.org/packages/bc/5e/61537aa6fa977922c6a03253a0e727e6e4a72381a80d63ad8eec350684f2/pillow-12.0.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:bc91a56697869546d1b8f0a3ff35224557ae7f881050e99f615e0119bf934b4e", size = 7125955, upload-time = "2025-10-15T18:21:59.372Z" }, - { url = "https://files.pythonhosted.org/packages/1f/3d/d5033539344ee3cbd9a4d69e12e63ca3a44a739eb2d4c8da350a3d38edd7/pillow-12.0.0-cp311-cp311-win32.whl", hash = "sha256:27f95b12453d165099c84f8a8bfdfd46b9e4bda9e0e4b65f0635430027f55739", size = 6298440, upload-time = "2025-10-15T18:22:00.982Z" }, - { url = "https://files.pythonhosted.org/packages/4d/42/aaca386de5cc8bd8a0254516957c1f265e3521c91515b16e286c662854c4/pillow-12.0.0-cp311-cp311-win_amd64.whl", hash = "sha256:b583dc9070312190192631373c6c8ed277254aa6e6084b74bdd0a6d3b221608e", size = 6999256, upload-time = "2025-10-15T18:22:02.617Z" }, - { url = "https://files.pythonhosted.org/packages/ba/f1/9197c9c2d5708b785f631a6dfbfa8eb3fb9672837cb92ae9af812c13b4ed/pillow-12.0.0-cp311-cp311-win_arm64.whl", hash = "sha256:759de84a33be3b178a64c8ba28ad5c135900359e85fb662bc6e403ad4407791d", size = 2436025, upload-time = "2025-10-15T18:22:04.598Z" }, - { url = "https://files.pythonhosted.org/packages/2c/90/4fcce2c22caf044e660a198d740e7fbc14395619e3cb1abad12192c0826c/pillow-12.0.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:53561a4ddc36facb432fae7a9d8afbfaf94795414f5cdc5fc52f28c1dca90371", size = 5249377, upload-time = "2025-10-15T18:22:05.993Z" }, - { url = "https://files.pythonhosted.org/packages/fd/e0/ed960067543d080691d47d6938ebccbf3976a931c9567ab2fbfab983a5dd/pillow-12.0.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:71db6b4c1653045dacc1585c1b0d184004f0d7e694c7b34ac165ca70c0838082", size = 4650343, upload-time = "2025-10-15T18:22:07.718Z" }, - { url = "https://files.pythonhosted.org/packages/e7/a1/f81fdeddcb99c044bf7d6faa47e12850f13cee0849537a7d27eeab5534d4/pillow-12.0.0-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:2fa5f0b6716fc88f11380b88b31fe591a06c6315e955c096c35715788b339e3f", size = 6232981, upload-time = "2025-10-15T18:22:09.287Z" }, - { url = "https://files.pythonhosted.org/packages/88/e1/9098d3ce341a8750b55b0e00c03f1630d6178f38ac191c81c97a3b047b44/pillow-12.0.0-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:82240051c6ca513c616f7f9da06e871f61bfd7805f566275841af15015b8f98d", size = 8041399, upload-time = "2025-10-15T18:22:10.872Z" }, - { url = "https://files.pythonhosted.org/packages/a7/62/a22e8d3b602ae8cc01446d0c57a54e982737f44b6f2e1e019a925143771d/pillow-12.0.0-cp312-cp312-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:55f818bd74fe2f11d4d7cbc65880a843c4075e0ac7226bc1a23261dbea531953", size = 6347740, upload-time = "2025-10-15T18:22:12.769Z" }, - { url = "https://files.pythonhosted.org/packages/4f/87/424511bdcd02c8d7acf9f65caa09f291a519b16bd83c3fb3374b3d4ae951/pillow-12.0.0-cp312-cp312-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:b87843e225e74576437fd5b6a4c2205d422754f84a06942cfaf1dc32243e45a8", size = 7040201, upload-time = "2025-10-15T18:22:14.813Z" }, - { url = "https://files.pythonhosted.org/packages/dc/4d/435c8ac688c54d11755aedfdd9f29c9eeddf68d150fe42d1d3dbd2365149/pillow-12.0.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:c607c90ba67533e1b2355b821fef6764d1dd2cbe26b8c1005ae84f7aea25ff79", size = 6462334, upload-time = "2025-10-15T18:22:16.375Z" }, - { url = "https://files.pythonhosted.org/packages/2b/f2/ad34167a8059a59b8ad10bc5c72d4d9b35acc6b7c0877af8ac885b5f2044/pillow-12.0.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:21f241bdd5080a15bc86d3466a9f6074a9c2c2b314100dd896ac81ee6db2f1ba", size = 7134162, upload-time = "2025-10-15T18:22:17.996Z" }, - { url = "https://files.pythonhosted.org/packages/0c/b1/a7391df6adacf0a5c2cf6ac1cf1fcc1369e7d439d28f637a847f8803beb3/pillow-12.0.0-cp312-cp312-win32.whl", hash = "sha256:dd333073e0cacdc3089525c7df7d39b211bcdf31fc2824e49d01c6b6187b07d0", size = 6298769, upload-time = "2025-10-15T18:22:19.923Z" }, - { url = "https://files.pythonhosted.org/packages/a2/0b/d87733741526541c909bbf159e338dcace4f982daac6e5a8d6be225ca32d/pillow-12.0.0-cp312-cp312-win_amd64.whl", hash = "sha256:9fe611163f6303d1619bbcb653540a4d60f9e55e622d60a3108be0d5b441017a", size = 7001107, upload-time = "2025-10-15T18:22:21.644Z" }, - { url = "https://files.pythonhosted.org/packages/bc/96/aaa61ce33cc98421fb6088af2a03be4157b1e7e0e87087c888e2370a7f45/pillow-12.0.0-cp312-cp312-win_arm64.whl", hash = "sha256:7dfb439562f234f7d57b1ac6bc8fe7f838a4bd49c79230e0f6a1da93e82f1fad", size = 2436012, upload-time = "2025-10-15T18:22:23.621Z" }, - { url = "https://files.pythonhosted.org/packages/62/f2/de993bb2d21b33a98d031ecf6a978e4b61da207bef02f7b43093774c480d/pillow-12.0.0-cp313-cp313-ios_13_0_arm64_iphoneos.whl", hash = "sha256:0869154a2d0546545cde61d1789a6524319fc1897d9ee31218eae7a60ccc5643", size = 4045493, upload-time = "2025-10-15T18:22:25.758Z" }, - { url = "https://files.pythonhosted.org/packages/0e/b6/bc8d0c4c9f6f111a783d045310945deb769b806d7574764234ffd50bc5ea/pillow-12.0.0-cp313-cp313-ios_13_0_arm64_iphonesimulator.whl", hash = "sha256:a7921c5a6d31b3d756ec980f2f47c0cfdbce0fc48c22a39347a895f41f4a6ea4", size = 4120461, upload-time = "2025-10-15T18:22:27.286Z" }, - { url = "https://files.pythonhosted.org/packages/5d/57/d60d343709366a353dc56adb4ee1e7d8a2cc34e3fbc22905f4167cfec119/pillow-12.0.0-cp313-cp313-ios_13_0_x86_64_iphonesimulator.whl", hash = "sha256:1ee80a59f6ce048ae13cda1abf7fbd2a34ab9ee7d401c46be3ca685d1999a399", size = 3576912, upload-time = "2025-10-15T18:22:28.751Z" }, - { url = "https://files.pythonhosted.org/packages/a4/a4/a0a31467e3f83b94d37568294b01d22b43ae3c5d85f2811769b9c66389dd/pillow-12.0.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:c50f36a62a22d350c96e49ad02d0da41dbd17ddc2e29750dbdba4323f85eb4a5", size = 5249132, upload-time = "2025-10-15T18:22:30.641Z" }, - { url = "https://files.pythonhosted.org/packages/83/06/48eab21dd561de2914242711434c0c0eb992ed08ff3f6107a5f44527f5e9/pillow-12.0.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:5193fde9a5f23c331ea26d0cf171fbf67e3f247585f50c08b3e205c7aeb4589b", size = 4650099, upload-time = "2025-10-15T18:22:32.73Z" }, - { url = "https://files.pythonhosted.org/packages/fc/bd/69ed99fd46a8dba7c1887156d3572fe4484e3f031405fcc5a92e31c04035/pillow-12.0.0-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:bde737cff1a975b70652b62d626f7785e0480918dece11e8fef3c0cf057351c3", size = 6230808, upload-time = "2025-10-15T18:22:34.337Z" }, - { url = "https://files.pythonhosted.org/packages/ea/94/8fad659bcdbf86ed70099cb60ae40be6acca434bbc8c4c0d4ef356d7e0de/pillow-12.0.0-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:a6597ff2b61d121172f5844b53f21467f7082f5fb385a9a29c01414463f93b07", size = 8037804, upload-time = "2025-10-15T18:22:36.402Z" }, - { url = "https://files.pythonhosted.org/packages/20/39/c685d05c06deecfd4e2d1950e9a908aa2ca8bc4e6c3b12d93b9cafbd7837/pillow-12.0.0-cp313-cp313-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:0b817e7035ea7f6b942c13aa03bb554fc44fea70838ea21f8eb31c638326584e", size = 6345553, upload-time = "2025-10-15T18:22:38.066Z" }, - { url = "https://files.pythonhosted.org/packages/38/57/755dbd06530a27a5ed74f8cb0a7a44a21722ebf318edbe67ddbd7fb28f88/pillow-12.0.0-cp313-cp313-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:f4f1231b7dec408e8670264ce63e9c71409d9583dd21d32c163e25213ee2a344", size = 7037729, upload-time = "2025-10-15T18:22:39.769Z" }, - { url = "https://files.pythonhosted.org/packages/ca/b6/7e94f4c41d238615674d06ed677c14883103dce1c52e4af16f000338cfd7/pillow-12.0.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:6e51b71417049ad6ab14c49608b4a24d8fb3fe605e5dfabfe523b58064dc3d27", size = 6459789, upload-time = "2025-10-15T18:22:41.437Z" }, - { url = "https://files.pythonhosted.org/packages/9c/14/4448bb0b5e0f22dd865290536d20ec8a23b64e2d04280b89139f09a36bb6/pillow-12.0.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:d120c38a42c234dc9a8c5de7ceaaf899cf33561956acb4941653f8bdc657aa79", size = 7130917, upload-time = "2025-10-15T18:22:43.152Z" }, - { url = "https://files.pythonhosted.org/packages/dd/ca/16c6926cc1c015845745d5c16c9358e24282f1e588237a4c36d2b30f182f/pillow-12.0.0-cp313-cp313-win32.whl", hash = "sha256:4cc6b3b2efff105c6a1656cfe59da4fdde2cda9af1c5e0b58529b24525d0a098", size = 6302391, upload-time = "2025-10-15T18:22:44.753Z" }, - { url = "https://files.pythonhosted.org/packages/6d/2a/dd43dcfd6dae9b6a49ee28a8eedb98c7d5ff2de94a5d834565164667b97b/pillow-12.0.0-cp313-cp313-win_amd64.whl", hash = "sha256:4cf7fed4b4580601c4345ceb5d4cbf5a980d030fd5ad07c4d2ec589f95f09905", size = 7007477, upload-time = "2025-10-15T18:22:46.838Z" }, - { url = "https://files.pythonhosted.org/packages/77/f0/72ea067f4b5ae5ead653053212af05ce3705807906ba3f3e8f58ddf617e6/pillow-12.0.0-cp313-cp313-win_arm64.whl", hash = "sha256:9f0b04c6b8584c2c193babcccc908b38ed29524b29dd464bc8801bf10d746a3a", size = 2435918, upload-time = "2025-10-15T18:22:48.399Z" }, - { url = "https://files.pythonhosted.org/packages/f5/5e/9046b423735c21f0487ea6cb5b10f89ea8f8dfbe32576fe052b5ba9d4e5b/pillow-12.0.0-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:7fa22993bac7b77b78cae22bad1e2a987ddf0d9015c63358032f84a53f23cdc3", size = 5251406, upload-time = "2025-10-15T18:22:49.905Z" }, - { url = "https://files.pythonhosted.org/packages/12/66/982ceebcdb13c97270ef7a56c3969635b4ee7cd45227fa707c94719229c5/pillow-12.0.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:f135c702ac42262573fe9714dfe99c944b4ba307af5eb507abef1667e2cbbced", size = 4653218, upload-time = "2025-10-15T18:22:51.587Z" }, - { url = "https://files.pythonhosted.org/packages/16/b3/81e625524688c31859450119bf12674619429cab3119eec0e30a7a1029cb/pillow-12.0.0-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:c85de1136429c524e55cfa4e033b4a7940ac5c8ee4d9401cc2d1bf48154bbc7b", size = 6266564, upload-time = "2025-10-15T18:22:53.215Z" }, - { url = "https://files.pythonhosted.org/packages/98/59/dfb38f2a41240d2408096e1a76c671d0a105a4a8471b1871c6902719450c/pillow-12.0.0-cp313-cp313t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:38df9b4bfd3db902c9c2bd369bcacaf9d935b2fff73709429d95cc41554f7b3d", size = 8069260, upload-time = "2025-10-15T18:22:54.933Z" }, - { url = "https://files.pythonhosted.org/packages/dc/3d/378dbea5cd1874b94c312425ca77b0f47776c78e0df2df751b820c8c1d6c/pillow-12.0.0-cp313-cp313t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:7d87ef5795da03d742bf49439f9ca4d027cde49c82c5371ba52464aee266699a", size = 6379248, upload-time = "2025-10-15T18:22:56.605Z" }, - { url = "https://files.pythonhosted.org/packages/84/b0/d525ef47d71590f1621510327acec75ae58c721dc071b17d8d652ca494d8/pillow-12.0.0-cp313-cp313t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:aff9e4d82d082ff9513bdd6acd4f5bd359f5b2c870907d2b0a9c5e10d40c88fe", size = 7066043, upload-time = "2025-10-15T18:22:58.53Z" }, - { url = "https://files.pythonhosted.org/packages/61/2c/aced60e9cf9d0cde341d54bf7932c9ffc33ddb4a1595798b3a5150c7ec4e/pillow-12.0.0-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:8d8ca2b210ada074d57fcee40c30446c9562e542fc46aedc19baf758a93532ee", size = 6490915, upload-time = "2025-10-15T18:23:00.582Z" }, - { url = "https://files.pythonhosted.org/packages/ef/26/69dcb9b91f4e59f8f34b2332a4a0a951b44f547c4ed39d3e4dcfcff48f89/pillow-12.0.0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:99a7f72fb6249302aa62245680754862a44179b545ded638cf1fef59befb57ef", size = 7157998, upload-time = "2025-10-15T18:23:02.627Z" }, - { url = "https://files.pythonhosted.org/packages/61/2b/726235842220ca95fa441ddf55dd2382b52ab5b8d9c0596fe6b3f23dafe8/pillow-12.0.0-cp313-cp313t-win32.whl", hash = "sha256:4078242472387600b2ce8d93ade8899c12bf33fa89e55ec89fe126e9d6d5d9e9", size = 6306201, upload-time = "2025-10-15T18:23:04.709Z" }, - { url = "https://files.pythonhosted.org/packages/c0/3d/2afaf4e840b2df71344ababf2f8edd75a705ce500e5dc1e7227808312ae1/pillow-12.0.0-cp313-cp313t-win_amd64.whl", hash = "sha256:2c54c1a783d6d60595d3514f0efe9b37c8808746a66920315bfd34a938d7994b", size = 7013165, upload-time = "2025-10-15T18:23:06.46Z" }, - { url = "https://files.pythonhosted.org/packages/6f/75/3fa09aa5cf6ed04bee3fa575798ddf1ce0bace8edb47249c798077a81f7f/pillow-12.0.0-cp313-cp313t-win_arm64.whl", hash = "sha256:26d9f7d2b604cd23aba3e9faf795787456ac25634d82cd060556998e39c6fa47", size = 2437834, upload-time = "2025-10-15T18:23:08.194Z" }, - { url = "https://files.pythonhosted.org/packages/54/2a/9a8c6ba2c2c07b71bec92cf63e03370ca5e5f5c5b119b742bcc0cde3f9c5/pillow-12.0.0-cp314-cp314-ios_13_0_arm64_iphoneos.whl", hash = "sha256:beeae3f27f62308f1ddbcfb0690bf44b10732f2ef43758f169d5e9303165d3f9", size = 4045531, upload-time = "2025-10-15T18:23:10.121Z" }, - { url = "https://files.pythonhosted.org/packages/84/54/836fdbf1bfb3d66a59f0189ff0b9f5f666cee09c6188309300df04ad71fa/pillow-12.0.0-cp314-cp314-ios_13_0_arm64_iphonesimulator.whl", hash = "sha256:d4827615da15cd59784ce39d3388275ec093ae3ee8d7f0c089b76fa87af756c2", size = 4120554, upload-time = "2025-10-15T18:23:12.14Z" }, - { url = "https://files.pythonhosted.org/packages/0d/cd/16aec9f0da4793e98e6b54778a5fbce4f375c6646fe662e80600b8797379/pillow-12.0.0-cp314-cp314-ios_13_0_x86_64_iphonesimulator.whl", hash = "sha256:3e42edad50b6909089750e65c91aa09aaf1e0a71310d383f11321b27c224ed8a", size = 3576812, upload-time = "2025-10-15T18:23:13.962Z" }, - { url = "https://files.pythonhosted.org/packages/f6/b7/13957fda356dc46339298b351cae0d327704986337c3c69bb54628c88155/pillow-12.0.0-cp314-cp314-macosx_10_15_x86_64.whl", hash = "sha256:e5d8efac84c9afcb40914ab49ba063d94f5dbdf5066db4482c66a992f47a3a3b", size = 5252689, upload-time = "2025-10-15T18:23:15.562Z" }, - { url = "https://files.pythonhosted.org/packages/fc/f5/eae31a306341d8f331f43edb2e9122c7661b975433de5e447939ae61c5da/pillow-12.0.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:266cd5f2b63ff316d5a1bba46268e603c9caf5606d44f38c2873c380950576ad", size = 4650186, upload-time = "2025-10-15T18:23:17.379Z" }, - { url = "https://files.pythonhosted.org/packages/86/62/2a88339aa40c4c77e79108facbd307d6091e2c0eb5b8d3cf4977cfca2fe6/pillow-12.0.0-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:58eea5ebe51504057dd95c5b77d21700b77615ab0243d8152793dc00eb4faf01", size = 6230308, upload-time = "2025-10-15T18:23:18.971Z" }, - { url = "https://files.pythonhosted.org/packages/c7/33/5425a8992bcb32d1cb9fa3dd39a89e613d09a22f2c8083b7bf43c455f760/pillow-12.0.0-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:f13711b1a5ba512d647a0e4ba79280d3a9a045aaf7e0cc6fbe96b91d4cdf6b0c", size = 8039222, upload-time = "2025-10-15T18:23:20.909Z" }, - { url = "https://files.pythonhosted.org/packages/d8/61/3f5d3b35c5728f37953d3eec5b5f3e77111949523bd2dd7f31a851e50690/pillow-12.0.0-cp314-cp314-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:6846bd2d116ff42cba6b646edf5bf61d37e5cbd256425fa089fee4ff5c07a99e", size = 6346657, upload-time = "2025-10-15T18:23:23.077Z" }, - { url = "https://files.pythonhosted.org/packages/3a/be/ee90a3d79271227e0f0a33c453531efd6ed14b2e708596ba5dd9be948da3/pillow-12.0.0-cp314-cp314-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c98fa880d695de164b4135a52fd2e9cd7b7c90a9d8ac5e9e443a24a95ef9248e", size = 7038482, upload-time = "2025-10-15T18:23:25.005Z" }, - { url = "https://files.pythonhosted.org/packages/44/34/a16b6a4d1ad727de390e9bd9f19f5f669e079e5826ec0f329010ddea492f/pillow-12.0.0-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:fa3ed2a29a9e9d2d488b4da81dcb54720ac3104a20bf0bd273f1e4648aff5af9", size = 6461416, upload-time = "2025-10-15T18:23:27.009Z" }, - { url = "https://files.pythonhosted.org/packages/b6/39/1aa5850d2ade7d7ba9f54e4e4c17077244ff7a2d9e25998c38a29749eb3f/pillow-12.0.0-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:d034140032870024e6b9892c692fe2968493790dd57208b2c37e3fb35f6df3ab", size = 7131584, upload-time = "2025-10-15T18:23:29.752Z" }, - { url = "https://files.pythonhosted.org/packages/bf/db/4fae862f8fad0167073a7733973bfa955f47e2cac3dc3e3e6257d10fab4a/pillow-12.0.0-cp314-cp314-win32.whl", hash = "sha256:1b1b133e6e16105f524a8dec491e0586d072948ce15c9b914e41cdadd209052b", size = 6400621, upload-time = "2025-10-15T18:23:32.06Z" }, - { url = "https://files.pythonhosted.org/packages/2b/24/b350c31543fb0107ab2599464d7e28e6f856027aadda995022e695313d94/pillow-12.0.0-cp314-cp314-win_amd64.whl", hash = "sha256:8dc232e39d409036af549c86f24aed8273a40ffa459981146829a324e0848b4b", size = 7142916, upload-time = "2025-10-15T18:23:34.71Z" }, - { url = "https://files.pythonhosted.org/packages/0f/9b/0ba5a6fd9351793996ef7487c4fdbde8d3f5f75dbedc093bb598648fddf0/pillow-12.0.0-cp314-cp314-win_arm64.whl", hash = "sha256:d52610d51e265a51518692045e372a4c363056130d922a7351429ac9f27e70b0", size = 2523836, upload-time = "2025-10-15T18:23:36.967Z" }, - { url = "https://files.pythonhosted.org/packages/f5/7a/ceee0840aebc579af529b523d530840338ecf63992395842e54edc805987/pillow-12.0.0-cp314-cp314t-macosx_10_15_x86_64.whl", hash = "sha256:1979f4566bb96c1e50a62d9831e2ea2d1211761e5662afc545fa766f996632f6", size = 5255092, upload-time = "2025-10-15T18:23:38.573Z" }, - { url = "https://files.pythonhosted.org/packages/44/76/20776057b4bfd1aef4eeca992ebde0f53a4dce874f3ae693d0ec90a4f79b/pillow-12.0.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:b2e4b27a6e15b04832fe9bf292b94b5ca156016bbc1ea9c2c20098a0320d6cf6", size = 4653158, upload-time = "2025-10-15T18:23:40.238Z" }, - { url = "https://files.pythonhosted.org/packages/82/3f/d9ff92ace07be8836b4e7e87e6a4c7a8318d47c2f1463ffcf121fc57d9cb/pillow-12.0.0-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:fb3096c30df99fd01c7bf8e544f392103d0795b9f98ba71a8054bcbf56b255f1", size = 6267882, upload-time = "2025-10-15T18:23:42.434Z" }, - { url = "https://files.pythonhosted.org/packages/9f/7a/4f7ff87f00d3ad33ba21af78bfcd2f032107710baf8280e3722ceec28cda/pillow-12.0.0-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:7438839e9e053ef79f7112c881cef684013855016f928b168b81ed5835f3e75e", size = 8071001, upload-time = "2025-10-15T18:23:44.29Z" }, - { url = "https://files.pythonhosted.org/packages/75/87/fcea108944a52dad8cca0715ae6247e271eb80459364a98518f1e4f480c1/pillow-12.0.0-cp314-cp314t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:5d5c411a8eaa2299322b647cd932586b1427367fd3184ffbb8f7a219ea2041ca", size = 6380146, upload-time = "2025-10-15T18:23:46.065Z" }, - { url = "https://files.pythonhosted.org/packages/91/52/0d31b5e571ef5fd111d2978b84603fce26aba1b6092f28e941cb46570745/pillow-12.0.0-cp314-cp314t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:d7e091d464ac59d2c7ad8e7e08105eaf9dafbc3883fd7265ffccc2baad6ac925", size = 7067344, upload-time = "2025-10-15T18:23:47.898Z" }, - { url = "https://files.pythonhosted.org/packages/7b/f4/2dd3d721f875f928d48e83bb30a434dee75a2531bca839bb996bb0aa5a91/pillow-12.0.0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:792a2c0be4dcc18af9d4a2dfd8a11a17d5e25274a1062b0ec1c2d79c76f3e7f8", size = 6491864, upload-time = "2025-10-15T18:23:49.607Z" }, - { url = "https://files.pythonhosted.org/packages/30/4b/667dfcf3d61fc309ba5a15b141845cece5915e39b99c1ceab0f34bf1d124/pillow-12.0.0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:afbefa430092f71a9593a99ab6a4e7538bc9eabbf7bf94f91510d3503943edc4", size = 7158911, upload-time = "2025-10-15T18:23:51.351Z" }, - { url = "https://files.pythonhosted.org/packages/a2/2f/16cabcc6426c32218ace36bf0d55955e813f2958afddbf1d391849fee9d1/pillow-12.0.0-cp314-cp314t-win32.whl", hash = "sha256:3830c769decf88f1289680a59d4f4c46c72573446352e2befec9a8512104fa52", size = 6408045, upload-time = "2025-10-15T18:23:53.177Z" }, - { url = "https://files.pythonhosted.org/packages/35/73/e29aa0c9c666cf787628d3f0dcf379f4791fba79f4936d02f8b37165bdf8/pillow-12.0.0-cp314-cp314t-win_amd64.whl", hash = "sha256:905b0365b210c73afb0ebe9101a32572152dfd1c144c7e28968a331b9217b94a", size = 7148282, upload-time = "2025-10-15T18:23:55.316Z" }, - { url = "https://files.pythonhosted.org/packages/c1/70/6b41bdcddf541b437bbb9f47f94d2db5d9ddef6c37ccab8c9107743748a4/pillow-12.0.0-cp314-cp314t-win_arm64.whl", hash = "sha256:99353a06902c2e43b43e8ff74ee65a7d90307d82370604746738a1e0661ccca7", size = 2525630, upload-time = "2025-10-15T18:23:57.149Z" }, - { url = "https://files.pythonhosted.org/packages/1d/b3/582327e6c9f86d037b63beebe981425d6811104cb443e8193824ef1a2f27/pillow-12.0.0-pp311-pypy311_pp73-macosx_10_15_x86_64.whl", hash = "sha256:b22bd8c974942477156be55a768f7aa37c46904c175be4e158b6a86e3a6b7ca8", size = 5215068, upload-time = "2025-10-15T18:23:59.594Z" }, - { url = "https://files.pythonhosted.org/packages/fd/d6/67748211d119f3b6540baf90f92fae73ae51d5217b171b0e8b5f7e5d558f/pillow-12.0.0-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:805ebf596939e48dbb2e4922a1d3852cfc25c38160751ce02da93058b48d252a", size = 4614994, upload-time = "2025-10-15T18:24:01.669Z" }, - { url = "https://files.pythonhosted.org/packages/2d/e1/f8281e5d844c41872b273b9f2c34a4bf64ca08905668c8ae730eedc7c9fa/pillow-12.0.0-pp311-pypy311_pp73-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:cae81479f77420d217def5f54b5b9d279804d17e982e0f2fa19b1d1e14ab5197", size = 5246639, upload-time = "2025-10-15T18:24:03.403Z" }, - { url = "https://files.pythonhosted.org/packages/94/5a/0d8ab8ffe8a102ff5df60d0de5af309015163bf710c7bb3e8311dd3b3ad0/pillow-12.0.0-pp311-pypy311_pp73-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:aeaefa96c768fc66818730b952a862235d68825c178f1b3ffd4efd7ad2edcb7c", size = 6986839, upload-time = "2025-10-15T18:24:05.344Z" }, - { url = "https://files.pythonhosted.org/packages/20/2e/3434380e8110b76cd9eb00a363c484b050f949b4bbe84ba770bb8508a02c/pillow-12.0.0-pp311-pypy311_pp73-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:09f2d0abef9e4e2f349305a4f8cc784a8a6c2f58a8c4892eea13b10a943bd26e", size = 5313505, upload-time = "2025-10-15T18:24:07.137Z" }, - { url = "https://files.pythonhosted.org/packages/57/ca/5a9d38900d9d74785141d6580950fe705de68af735ff6e727cb911b64740/pillow-12.0.0-pp311-pypy311_pp73-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:bdee52571a343d721fb2eb3b090a82d959ff37fc631e3f70422e0c2e029f3e76", size = 5963654, upload-time = "2025-10-15T18:24:09.579Z" }, - { url = "https://files.pythonhosted.org/packages/95/7e/f896623c3c635a90537ac093c6a618ebe1a90d87206e42309cb5d98a1b9e/pillow-12.0.0-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:b290fd8aa38422444d4b50d579de197557f182ef1068b75f5aa8558638b8d0a5", size = 6997850, upload-time = "2025-10-15T18:24:11.495Z" }, +version = "12.1.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/d0/02/d52c733a2452ef1ffcc123b68e6606d07276b0e358db70eabad7e40042b7/pillow-12.1.0.tar.gz", hash = "sha256:5c5ae0a06e9ea030ab786b0251b32c7e4ce10e58d983c0d5c56029455180b5b9", size = 46977283, upload-time = "2026-01-02T09:13:29.892Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/fe/41/f73d92b6b883a579e79600d391f2e21cb0df767b2714ecbd2952315dfeef/pillow-12.1.0-cp310-cp310-macosx_10_10_x86_64.whl", hash = "sha256:fb125d860738a09d363a88daa0f59c4533529a90e564785e20fe875b200b6dbd", size = 5304089, upload-time = "2026-01-02T09:10:24.953Z" }, + { url = "https://files.pythonhosted.org/packages/94/55/7aca2891560188656e4a91ed9adba305e914a4496800da6b5c0a15f09edf/pillow-12.1.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:cad302dc10fac357d3467a74a9561c90609768a6f73a1923b0fd851b6486f8b0", size = 4657815, upload-time = "2026-01-02T09:10:27.063Z" }, + { url = "https://files.pythonhosted.org/packages/e9/d2/b28221abaa7b4c40b7dba948f0f6a708bd7342c4d47ce342f0ea39643974/pillow-12.1.0-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:a40905599d8079e09f25027423aed94f2823adaf2868940de991e53a449e14a8", size = 6222593, upload-time = "2026-01-02T09:10:29.115Z" }, + { url = "https://files.pythonhosted.org/packages/71/b8/7a61fb234df6a9b0b479f69e66901209d89ff72a435b49933f9122f94cac/pillow-12.1.0-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:92a7fe4225365c5e3a8e598982269c6d6698d3e783b3b1ae979e7819f9cd55c1", size = 8027579, upload-time = "2026-01-02T09:10:31.182Z" }, + { url = "https://files.pythonhosted.org/packages/ea/51/55c751a57cc524a15a0e3db20e5cde517582359508d62305a627e77fd295/pillow-12.1.0-cp310-cp310-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:f10c98f49227ed8383d28174ee95155a675c4ed7f85e2e573b04414f7e371bda", size = 6335760, upload-time = "2026-01-02T09:10:33.02Z" }, + { url = "https://files.pythonhosted.org/packages/dc/7c/60e3e6f5e5891a1a06b4c910f742ac862377a6fe842f7184df4a274ce7bf/pillow-12.1.0-cp310-cp310-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:8637e29d13f478bc4f153d8daa9ffb16455f0a6cb287da1b432fdad2bfbd66c7", size = 7027127, upload-time = "2026-01-02T09:10:35.009Z" }, + { url = "https://files.pythonhosted.org/packages/06/37/49d47266ba50b00c27ba63a7c898f1bb41a29627ced8c09e25f19ebec0ff/pillow-12.1.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:21e686a21078b0f9cb8c8a961d99e6a4ddb88e0fc5ea6e130172ddddc2e5221a", size = 6449896, upload-time = "2026-01-02T09:10:36.793Z" }, + { url = "https://files.pythonhosted.org/packages/f9/e5/67fd87d2913902462cd9b79c6211c25bfe95fcf5783d06e1367d6d9a741f/pillow-12.1.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:2415373395a831f53933c23ce051021e79c8cd7979822d8cc478547a3f4da8ef", size = 7151345, upload-time = "2026-01-02T09:10:39.064Z" }, + { url = "https://files.pythonhosted.org/packages/bd/15/f8c7abf82af68b29f50d77c227e7a1f87ce02fdc66ded9bf603bc3b41180/pillow-12.1.0-cp310-cp310-win32.whl", hash = "sha256:e75d3dba8fc1ddfec0cd752108f93b83b4f8d6ab40e524a95d35f016b9683b09", size = 6325568, upload-time = "2026-01-02T09:10:41.035Z" }, + { url = "https://files.pythonhosted.org/packages/d4/24/7d1c0e160b6b5ac2605ef7d8be537e28753c0db5363d035948073f5513d7/pillow-12.1.0-cp310-cp310-win_amd64.whl", hash = "sha256:64efdf00c09e31efd754448a383ea241f55a994fd079866b92d2bbff598aad91", size = 7032367, upload-time = "2026-01-02T09:10:43.09Z" }, + { url = "https://files.pythonhosted.org/packages/f4/03/41c038f0d7a06099254c60f618d0ec7be11e79620fc23b8e85e5b31d9a44/pillow-12.1.0-cp310-cp310-win_arm64.whl", hash = "sha256:f188028b5af6b8fb2e9a76ac0f841a575bd1bd396e46ef0840d9b88a48fdbcea", size = 2452345, upload-time = "2026-01-02T09:10:44.795Z" }, + { url = "https://files.pythonhosted.org/packages/43/c4/bf8328039de6cc22182c3ef007a2abfbbdab153661c0a9aa78af8d706391/pillow-12.1.0-cp311-cp311-macosx_10_10_x86_64.whl", hash = "sha256:a83e0850cb8f5ac975291ebfc4170ba481f41a28065277f7f735c202cd8e0af3", size = 5304057, upload-time = "2026-01-02T09:10:46.627Z" }, + { url = "https://files.pythonhosted.org/packages/43/06/7264c0597e676104cc22ca73ee48f752767cd4b1fe084662620b17e10120/pillow-12.1.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:b6e53e82ec2db0717eabb276aa56cf4e500c9a7cec2c2e189b55c24f65a3e8c0", size = 4657811, upload-time = "2026-01-02T09:10:49.548Z" }, + { url = "https://files.pythonhosted.org/packages/72/64/f9189e44474610daf83da31145fa56710b627b5c4c0b9c235e34058f6b31/pillow-12.1.0-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:40a8e3b9e8773876d6e30daed22f016509e3987bab61b3b7fe309d7019a87451", size = 6232243, upload-time = "2026-01-02T09:10:51.62Z" }, + { url = "https://files.pythonhosted.org/packages/ef/30/0df458009be6a4caca4ca2c52975e6275c387d4e5c95544e34138b41dc86/pillow-12.1.0-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:800429ac32c9b72909c671aaf17ecd13110f823ddb7db4dfef412a5587c2c24e", size = 8037872, upload-time = "2026-01-02T09:10:53.446Z" }, + { url = "https://files.pythonhosted.org/packages/e4/86/95845d4eda4f4f9557e25381d70876aa213560243ac1a6d619c46caaedd9/pillow-12.1.0-cp311-cp311-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:0b022eaaf709541b391ee069f0022ee5b36c709df71986e3f7be312e46f42c84", size = 6345398, upload-time = "2026-01-02T09:10:55.426Z" }, + { url = "https://files.pythonhosted.org/packages/5c/1f/8e66ab9be3aaf1435bc03edd1ebdf58ffcd17f7349c1d970cafe87af27d9/pillow-12.1.0-cp311-cp311-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:1f345e7bc9d7f368887c712aa5054558bad44d2a301ddf9248599f4161abc7c0", size = 7034667, upload-time = "2026-01-02T09:10:57.11Z" }, + { url = "https://files.pythonhosted.org/packages/f9/f6/683b83cb9b1db1fb52b87951b1c0b99bdcfceaa75febf11406c19f82cb5e/pillow-12.1.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:d70347c8a5b7ccd803ec0c85c8709f036e6348f1e6a5bf048ecd9c64d3550b8b", size = 6458743, upload-time = "2026-01-02T09:10:59.331Z" }, + { url = "https://files.pythonhosted.org/packages/9a/7d/de833d63622538c1d58ce5395e7c6cb7e7dce80decdd8bde4a484e095d9f/pillow-12.1.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:1fcc52d86ce7a34fd17cb04e87cfdb164648a3662a6f20565910a99653d66c18", size = 7159342, upload-time = "2026-01-02T09:11:01.82Z" }, + { url = "https://files.pythonhosted.org/packages/8c/40/50d86571c9e5868c42b81fe7da0c76ca26373f3b95a8dd675425f4a92ec1/pillow-12.1.0-cp311-cp311-win32.whl", hash = "sha256:3ffaa2f0659e2f740473bcf03c702c39a8d4b2b7ffc629052028764324842c64", size = 6328655, upload-time = "2026-01-02T09:11:04.556Z" }, + { url = "https://files.pythonhosted.org/packages/6c/af/b1d7e301c4cd26cd45d4af884d9ee9b6fab893b0ad2450d4746d74a6968c/pillow-12.1.0-cp311-cp311-win_amd64.whl", hash = "sha256:806f3987ffe10e867bab0ddad45df1148a2b98221798457fa097ad85d6e8bc75", size = 7031469, upload-time = "2026-01-02T09:11:06.538Z" }, + { url = "https://files.pythonhosted.org/packages/48/36/d5716586d887fb2a810a4a61518a327a1e21c8b7134c89283af272efe84b/pillow-12.1.0-cp311-cp311-win_arm64.whl", hash = "sha256:9f5fefaca968e700ad1a4a9de98bf0869a94e397fe3524c4c9450c1445252304", size = 2452515, upload-time = "2026-01-02T09:11:08.226Z" }, + { url = "https://files.pythonhosted.org/packages/20/31/dc53fe21a2f2996e1b7d92bf671cdb157079385183ef7c1ae08b485db510/pillow-12.1.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:a332ac4ccb84b6dde65dbace8431f3af08874bf9770719d32a635c4ef411b18b", size = 5262642, upload-time = "2026-01-02T09:11:10.138Z" }, + { url = "https://files.pythonhosted.org/packages/ab/c1/10e45ac9cc79419cedf5121b42dcca5a50ad2b601fa080f58c22fb27626e/pillow-12.1.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:907bfa8a9cb790748a9aa4513e37c88c59660da3bcfffbd24a7d9e6abf224551", size = 4657464, upload-time = "2026-01-02T09:11:12.319Z" }, + { url = "https://files.pythonhosted.org/packages/ad/26/7b82c0ab7ef40ebede7a97c72d473bda5950f609f8e0c77b04af574a0ddb/pillow-12.1.0-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:efdc140e7b63b8f739d09a99033aa430accce485ff78e6d311973a67b6bf3208", size = 6234878, upload-time = "2026-01-02T09:11:14.096Z" }, + { url = "https://files.pythonhosted.org/packages/76/25/27abc9792615b5e886ca9411ba6637b675f1b77af3104710ac7353fe5605/pillow-12.1.0-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:bef9768cab184e7ae6e559c032e95ba8d07b3023c289f79a2bd36e8bf85605a5", size = 8044868, upload-time = "2026-01-02T09:11:15.903Z" }, + { url = "https://files.pythonhosted.org/packages/0a/ea/f200a4c36d836100e7bc738fc48cd963d3ba6372ebc8298a889e0cfc3359/pillow-12.1.0-cp312-cp312-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:742aea052cf5ab5034a53c3846165bc3ce88d7c38e954120db0ab867ca242661", size = 6349468, upload-time = "2026-01-02T09:11:17.631Z" }, + { url = "https://files.pythonhosted.org/packages/11/8f/48d0b77ab2200374c66d344459b8958c86693be99526450e7aee714e03e4/pillow-12.1.0-cp312-cp312-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:a6dfc2af5b082b635af6e08e0d1f9f1c4e04d17d4e2ca0ef96131e85eda6eb17", size = 7041518, upload-time = "2026-01-02T09:11:19.389Z" }, + { url = "https://files.pythonhosted.org/packages/1d/23/c281182eb986b5d31f0a76d2a2c8cd41722d6fb8ed07521e802f9bba52de/pillow-12.1.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:609e89d9f90b581c8d16358c9087df76024cf058fa693dd3e1e1620823f39670", size = 6462829, upload-time = "2026-01-02T09:11:21.28Z" }, + { url = "https://files.pythonhosted.org/packages/25/ef/7018273e0faac099d7b00982abdcc39142ae6f3bd9ceb06de09779c4a9d6/pillow-12.1.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:43b4899cfd091a9693a1278c4982f3e50f7fb7cff5153b05174b4afc9593b616", size = 7166756, upload-time = "2026-01-02T09:11:23.559Z" }, + { url = "https://files.pythonhosted.org/packages/8f/c8/993d4b7ab2e341fe02ceef9576afcf5830cdec640be2ac5bee1820d693d4/pillow-12.1.0-cp312-cp312-win32.whl", hash = "sha256:aa0c9cc0b82b14766a99fbe6084409972266e82f459821cd26997a488a7261a7", size = 6328770, upload-time = "2026-01-02T09:11:25.661Z" }, + { url = "https://files.pythonhosted.org/packages/a7/87/90b358775a3f02765d87655237229ba64a997b87efa8ccaca7dd3e36e7a7/pillow-12.1.0-cp312-cp312-win_amd64.whl", hash = "sha256:d70534cea9e7966169ad29a903b99fc507e932069a881d0965a1a84bb57f6c6d", size = 7033406, upload-time = "2026-01-02T09:11:27.474Z" }, + { url = "https://files.pythonhosted.org/packages/5d/cf/881b457eccacac9e5b2ddd97d5071fb6d668307c57cbf4e3b5278e06e536/pillow-12.1.0-cp312-cp312-win_arm64.whl", hash = "sha256:65b80c1ee7e14a87d6a068dd3b0aea268ffcabfe0498d38661b00c5b4b22e74c", size = 2452612, upload-time = "2026-01-02T09:11:29.309Z" }, + { url = "https://files.pythonhosted.org/packages/dd/c7/2530a4aa28248623e9d7f27316b42e27c32ec410f695929696f2e0e4a778/pillow-12.1.0-cp313-cp313-ios_13_0_arm64_iphoneos.whl", hash = "sha256:7b5dd7cbae20285cdb597b10eb5a2c13aa9de6cde9bb64a3c1317427b1db1ae1", size = 4062543, upload-time = "2026-01-02T09:11:31.566Z" }, + { url = "https://files.pythonhosted.org/packages/8f/1f/40b8eae823dc1519b87d53c30ed9ef085506b05281d313031755c1705f73/pillow-12.1.0-cp313-cp313-ios_13_0_arm64_iphonesimulator.whl", hash = "sha256:29a4cef9cb672363926f0470afc516dbf7305a14d8c54f7abbb5c199cd8f8179", size = 4138373, upload-time = "2026-01-02T09:11:33.367Z" }, + { url = "https://files.pythonhosted.org/packages/d4/77/6fa60634cf06e52139fd0e89e5bbf055e8166c691c42fb162818b7fda31d/pillow-12.1.0-cp313-cp313-ios_13_0_x86_64_iphonesimulator.whl", hash = "sha256:681088909d7e8fa9e31b9799aaa59ba5234c58e5e4f1951b4c4d1082a2e980e0", size = 3601241, upload-time = "2026-01-02T09:11:35.011Z" }, + { url = "https://files.pythonhosted.org/packages/4f/bf/28ab865de622e14b747f0cd7877510848252d950e43002e224fb1c9ababf/pillow-12.1.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:983976c2ab753166dc66d36af6e8ec15bb511e4a25856e2227e5f7e00a160587", size = 5262410, upload-time = "2026-01-02T09:11:36.682Z" }, + { url = "https://files.pythonhosted.org/packages/1c/34/583420a1b55e715937a85bd48c5c0991598247a1fd2eb5423188e765ea02/pillow-12.1.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:db44d5c160a90df2d24a24760bbd37607d53da0b34fb546c4c232af7192298ac", size = 4657312, upload-time = "2026-01-02T09:11:38.535Z" }, + { url = "https://files.pythonhosted.org/packages/1d/fd/f5a0896839762885b3376ff04878f86ab2b097c2f9a9cdccf4eda8ba8dc0/pillow-12.1.0-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:6b7a9d1db5dad90e2991645874f708e87d9a3c370c243c2d7684d28f7e133e6b", size = 6232605, upload-time = "2026-01-02T09:11:40.602Z" }, + { url = "https://files.pythonhosted.org/packages/98/aa/938a09d127ac1e70e6ed467bd03834350b33ef646b31edb7452d5de43792/pillow-12.1.0-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:6258f3260986990ba2fa8a874f8b6e808cf5abb51a94015ca3dc3c68aa4f30ea", size = 8041617, upload-time = "2026-01-02T09:11:42.721Z" }, + { url = "https://files.pythonhosted.org/packages/17/e8/538b24cb426ac0186e03f80f78bc8dc7246c667f58b540bdd57c71c9f79d/pillow-12.1.0-cp313-cp313-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:e115c15e3bc727b1ca3e641a909f77f8ca72a64fff150f666fcc85e57701c26c", size = 6346509, upload-time = "2026-01-02T09:11:44.955Z" }, + { url = "https://files.pythonhosted.org/packages/01/9a/632e58ec89a32738cabfd9ec418f0e9898a2b4719afc581f07c04a05e3c9/pillow-12.1.0-cp313-cp313-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:6741e6f3074a35e47c77b23a4e4f2d90db3ed905cb1c5e6e0d49bff2045632bc", size = 7038117, upload-time = "2026-01-02T09:11:46.736Z" }, + { url = "https://files.pythonhosted.org/packages/c7/a2/d40308cf86eada842ca1f3ffa45d0ca0df7e4ab33c83f81e73f5eaed136d/pillow-12.1.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:935b9d1aed48fcfb3f838caac506f38e29621b44ccc4f8a64d575cb1b2a88644", size = 6460151, upload-time = "2026-01-02T09:11:48.625Z" }, + { url = "https://files.pythonhosted.org/packages/f1/88/f5b058ad6453a085c5266660a1417bdad590199da1b32fb4efcff9d33b05/pillow-12.1.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:5fee4c04aad8932da9f8f710af2c1a15a83582cfb884152a9caa79d4efcdbf9c", size = 7164534, upload-time = "2026-01-02T09:11:50.445Z" }, + { url = "https://files.pythonhosted.org/packages/19/ce/c17334caea1db789163b5d855a5735e47995b0b5dc8745e9a3605d5f24c0/pillow-12.1.0-cp313-cp313-win32.whl", hash = "sha256:a786bf667724d84aa29b5db1c61b7bfdde380202aaca12c3461afd6b71743171", size = 6332551, upload-time = "2026-01-02T09:11:52.234Z" }, + { url = "https://files.pythonhosted.org/packages/e5/07/74a9d941fa45c90a0d9465098fe1ec85de3e2afbdc15cc4766622d516056/pillow-12.1.0-cp313-cp313-win_amd64.whl", hash = "sha256:461f9dfdafa394c59cd6d818bdfdbab4028b83b02caadaff0ffd433faf4c9a7a", size = 7040087, upload-time = "2026-01-02T09:11:54.822Z" }, + { url = "https://files.pythonhosted.org/packages/88/09/c99950c075a0e9053d8e880595926302575bc742b1b47fe1bbcc8d388d50/pillow-12.1.0-cp313-cp313-win_arm64.whl", hash = "sha256:9212d6b86917a2300669511ed094a9406888362e085f2431a7da985a6b124f45", size = 2452470, upload-time = "2026-01-02T09:11:56.522Z" }, + { url = "https://files.pythonhosted.org/packages/b5/ba/970b7d85ba01f348dee4d65412476321d40ee04dcb51cd3735b9dc94eb58/pillow-12.1.0-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:00162e9ca6d22b7c3ee8e61faa3c3253cd19b6a37f126cad04f2f88b306f557d", size = 5264816, upload-time = "2026-01-02T09:11:58.227Z" }, + { url = "https://files.pythonhosted.org/packages/10/60/650f2fb55fdba7a510d836202aa52f0baac633e50ab1cf18415d332188fb/pillow-12.1.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:7d6daa89a00b58c37cb1747ec9fb7ac3bc5ffd5949f5888657dfddde6d1312e0", size = 4660472, upload-time = "2026-01-02T09:12:00.798Z" }, + { url = "https://files.pythonhosted.org/packages/2b/c0/5273a99478956a099d533c4f46cbaa19fd69d606624f4334b85e50987a08/pillow-12.1.0-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:e2479c7f02f9d505682dc47df8c0ea1fc5e264c4d1629a5d63fe3e2334b89554", size = 6268974, upload-time = "2026-01-02T09:12:02.572Z" }, + { url = "https://files.pythonhosted.org/packages/b4/26/0bf714bc2e73d5267887d47931d53c4ceeceea6978148ed2ab2a4e6463c4/pillow-12.1.0-cp313-cp313t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:f188d580bd870cda1e15183790d1cc2fa78f666e76077d103edf048eed9c356e", size = 8073070, upload-time = "2026-01-02T09:12:04.75Z" }, + { url = "https://files.pythonhosted.org/packages/43/cf/1ea826200de111a9d65724c54f927f3111dc5ae297f294b370a670c17786/pillow-12.1.0-cp313-cp313t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:0fde7ec5538ab5095cc02df38ee99b0443ff0e1c847a045554cf5f9af1f4aa82", size = 6380176, upload-time = "2026-01-02T09:12:06.626Z" }, + { url = "https://files.pythonhosted.org/packages/03/e0/7938dd2b2013373fd85d96e0f38d62b7a5a262af21ac274250c7ca7847c9/pillow-12.1.0-cp313-cp313t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:0ed07dca4a8464bada6139ab38f5382f83e5f111698caf3191cb8dbf27d908b4", size = 7067061, upload-time = "2026-01-02T09:12:08.624Z" }, + { url = "https://files.pythonhosted.org/packages/86/ad/a2aa97d37272a929a98437a8c0ac37b3cf012f4f8721e1bd5154699b2518/pillow-12.1.0-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:f45bd71d1fa5e5749587613037b172e0b3b23159d1c00ef2fc920da6f470e6f0", size = 6491824, upload-time = "2026-01-02T09:12:10.488Z" }, + { url = "https://files.pythonhosted.org/packages/a4/44/80e46611b288d51b115826f136fb3465653c28f491068a72d3da49b54cd4/pillow-12.1.0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:277518bf4fe74aa91489e1b20577473b19ee70fb97c374aa50830b279f25841b", size = 7190911, upload-time = "2026-01-02T09:12:12.772Z" }, + { url = "https://files.pythonhosted.org/packages/86/77/eacc62356b4cf81abe99ff9dbc7402750044aed02cfd6a503f7c6fc11f3e/pillow-12.1.0-cp313-cp313t-win32.whl", hash = "sha256:7315f9137087c4e0ee73a761b163fc9aa3b19f5f606a7fc08d83fd3e4379af65", size = 6336445, upload-time = "2026-01-02T09:12:14.775Z" }, + { url = "https://files.pythonhosted.org/packages/e7/3c/57d81d0b74d218706dafccb87a87ea44262c43eef98eb3b164fd000e0491/pillow-12.1.0-cp313-cp313t-win_amd64.whl", hash = "sha256:0ddedfaa8b5f0b4ffbc2fa87b556dc59f6bb4ecb14a53b33f9189713ae8053c0", size = 7045354, upload-time = "2026-01-02T09:12:16.599Z" }, + { url = "https://files.pythonhosted.org/packages/ac/82/8b9b97bba2e3576a340f93b044a3a3a09841170ab4c1eb0d5c93469fd32f/pillow-12.1.0-cp313-cp313t-win_arm64.whl", hash = "sha256:80941e6d573197a0c28f394753de529bb436b1ca990ed6e765cf42426abc39f8", size = 2454547, upload-time = "2026-01-02T09:12:18.704Z" }, + { url = "https://files.pythonhosted.org/packages/8c/87/bdf971d8bbcf80a348cc3bacfcb239f5882100fe80534b0ce67a784181d8/pillow-12.1.0-cp314-cp314-ios_13_0_arm64_iphoneos.whl", hash = "sha256:5cb7bc1966d031aec37ddb9dcf15c2da5b2e9f7cc3ca7c54473a20a927e1eb91", size = 4062533, upload-time = "2026-01-02T09:12:20.791Z" }, + { url = "https://files.pythonhosted.org/packages/ff/4f/5eb37a681c68d605eb7034c004875c81f86ec9ef51f5be4a63eadd58859a/pillow-12.1.0-cp314-cp314-ios_13_0_arm64_iphonesimulator.whl", hash = "sha256:97e9993d5ed946aba26baf9c1e8cf18adbab584b99f452ee72f7ee8acb882796", size = 4138546, upload-time = "2026-01-02T09:12:23.664Z" }, + { url = "https://files.pythonhosted.org/packages/11/6d/19a95acb2edbace40dcd582d077b991646b7083c41b98da4ed7555b59733/pillow-12.1.0-cp314-cp314-ios_13_0_x86_64_iphonesimulator.whl", hash = "sha256:414b9a78e14ffeb98128863314e62c3f24b8a86081066625700b7985b3f529bd", size = 3601163, upload-time = "2026-01-02T09:12:26.338Z" }, + { url = "https://files.pythonhosted.org/packages/fc/36/2b8138e51cb42e4cc39c3297713455548be855a50558c3ac2beebdc251dd/pillow-12.1.0-cp314-cp314-macosx_10_15_x86_64.whl", hash = "sha256:e6bdb408f7c9dd2a5ff2b14a3b0bb6d4deb29fb9961e6eb3ae2031ae9a5cec13", size = 5266086, upload-time = "2026-01-02T09:12:28.782Z" }, + { url = "https://files.pythonhosted.org/packages/53/4b/649056e4d22e1caa90816bf99cef0884aed607ed38075bd75f091a607a38/pillow-12.1.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:3413c2ae377550f5487991d444428f1a8ae92784aac79caa8b1e3b89b175f77e", size = 4657344, upload-time = "2026-01-02T09:12:31.117Z" }, + { url = "https://files.pythonhosted.org/packages/6c/6b/c5742cea0f1ade0cd61485dc3d81f05261fc2276f537fbdc00802de56779/pillow-12.1.0-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:e5dcbe95016e88437ecf33544ba5db21ef1b8dd6e1b434a2cb2a3d605299e643", size = 6232114, upload-time = "2026-01-02T09:12:32.936Z" }, + { url = "https://files.pythonhosted.org/packages/bf/8f/9f521268ce22d63991601aafd3d48d5ff7280a246a1ef62d626d67b44064/pillow-12.1.0-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:d0a7735df32ccbcc98b98a1ac785cc4b19b580be1bdf0aeb5c03223220ea09d5", size = 8042708, upload-time = "2026-01-02T09:12:34.78Z" }, + { url = "https://files.pythonhosted.org/packages/1a/eb/257f38542893f021502a1bbe0c2e883c90b5cff26cc33b1584a841a06d30/pillow-12.1.0-cp314-cp314-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:0c27407a2d1b96774cbc4a7594129cc027339fd800cd081e44497722ea1179de", size = 6347762, upload-time = "2026-01-02T09:12:36.748Z" }, + { url = "https://files.pythonhosted.org/packages/c4/5a/8ba375025701c09b309e8d5163c5a4ce0102fa86bbf8800eb0d7ac87bc51/pillow-12.1.0-cp314-cp314-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:15c794d74303828eaa957ff8070846d0efe8c630901a1c753fdc63850e19ecd9", size = 7039265, upload-time = "2026-01-02T09:12:39.082Z" }, + { url = "https://files.pythonhosted.org/packages/cf/dc/cf5e4cdb3db533f539e88a7bbf9f190c64ab8a08a9bc7a4ccf55067872e4/pillow-12.1.0-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:c990547452ee2800d8506c4150280757f88532f3de2a58e3022e9b179107862a", size = 6462341, upload-time = "2026-01-02T09:12:40.946Z" }, + { url = "https://files.pythonhosted.org/packages/d0/47/0291a25ac9550677e22eda48510cfc4fa4b2ef0396448b7fbdc0a6946309/pillow-12.1.0-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:b63e13dd27da389ed9475b3d28510f0f954bca0041e8e551b2a4eb1eab56a39a", size = 7165395, upload-time = "2026-01-02T09:12:42.706Z" }, + { url = "https://files.pythonhosted.org/packages/4f/4c/e005a59393ec4d9416be06e6b45820403bb946a778e39ecec62f5b2b991e/pillow-12.1.0-cp314-cp314-win32.whl", hash = "sha256:1a949604f73eb07a8adab38c4fe50791f9919344398bdc8ac6b307f755fc7030", size = 6431413, upload-time = "2026-01-02T09:12:44.944Z" }, + { url = "https://files.pythonhosted.org/packages/1c/af/f23697f587ac5f9095d67e31b81c95c0249cd461a9798a061ed6709b09b5/pillow-12.1.0-cp314-cp314-win_amd64.whl", hash = "sha256:4f9f6a650743f0ddee5593ac9e954ba1bdbc5e150bc066586d4f26127853ab94", size = 7176779, upload-time = "2026-01-02T09:12:46.727Z" }, + { url = "https://files.pythonhosted.org/packages/b3/36/6a51abf8599232f3e9afbd16d52829376a68909fe14efe29084445db4b73/pillow-12.1.0-cp314-cp314-win_arm64.whl", hash = "sha256:808b99604f7873c800c4840f55ff389936ef1948e4e87645eaf3fccbc8477ac4", size = 2543105, upload-time = "2026-01-02T09:12:49.243Z" }, + { url = "https://files.pythonhosted.org/packages/82/54/2e1dd20c8749ff225080d6ba465a0cab4387f5db0d1c5fb1439e2d99923f/pillow-12.1.0-cp314-cp314t-macosx_10_15_x86_64.whl", hash = "sha256:bc11908616c8a283cf7d664f77411a5ed2a02009b0097ff8abbba5e79128ccf2", size = 5268571, upload-time = "2026-01-02T09:12:51.11Z" }, + { url = "https://files.pythonhosted.org/packages/57/61/571163a5ef86ec0cf30d265ac2a70ae6fc9e28413d1dc94fa37fae6bda89/pillow-12.1.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:896866d2d436563fa2a43a9d72f417874f16b5545955c54a64941e87c1376c61", size = 4660426, upload-time = "2026-01-02T09:12:52.865Z" }, + { url = "https://files.pythonhosted.org/packages/5e/e1/53ee5163f794aef1bf84243f755ee6897a92c708505350dd1923f4afec48/pillow-12.1.0-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:8e178e3e99d3c0ea8fc64b88447f7cac8ccf058af422a6cedc690d0eadd98c51", size = 6269908, upload-time = "2026-01-02T09:12:54.884Z" }, + { url = "https://files.pythonhosted.org/packages/bc/0b/b4b4106ff0ee1afa1dc599fde6ab230417f800279745124f6c50bcffed8e/pillow-12.1.0-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:079af2fb0c599c2ec144ba2c02766d1b55498e373b3ac64687e43849fbbef5bc", size = 8074733, upload-time = "2026-01-02T09:12:56.802Z" }, + { url = "https://files.pythonhosted.org/packages/19/9f/80b411cbac4a732439e629a26ad3ef11907a8c7fc5377b7602f04f6fe4e7/pillow-12.1.0-cp314-cp314t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:bdec5e43377761c5dbca620efb69a77f6855c5a379e32ac5b158f54c84212b14", size = 6381431, upload-time = "2026-01-02T09:12:58.823Z" }, + { url = "https://files.pythonhosted.org/packages/8f/b7/d65c45db463b66ecb6abc17c6ba6917a911202a07662247e1355ce1789e7/pillow-12.1.0-cp314-cp314t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:565c986f4b45c020f5421a4cea13ef294dde9509a8577f29b2fc5edc7587fff8", size = 7068529, upload-time = "2026-01-02T09:13:00.885Z" }, + { url = "https://files.pythonhosted.org/packages/50/96/dfd4cd726b4a45ae6e3c669fc9e49deb2241312605d33aba50499e9d9bd1/pillow-12.1.0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:43aca0a55ce1eefc0aefa6253661cb54571857b1a7b2964bd8a1e3ef4b729924", size = 6492981, upload-time = "2026-01-02T09:13:03.314Z" }, + { url = "https://files.pythonhosted.org/packages/4d/1c/b5dc52cf713ae46033359c5ca920444f18a6359ce1020dd3e9c553ea5bc6/pillow-12.1.0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:0deedf2ea233722476b3a81e8cdfbad786f7adbed5d848469fa59fe52396e4ef", size = 7191878, upload-time = "2026-01-02T09:13:05.276Z" }, + { url = "https://files.pythonhosted.org/packages/53/26/c4188248bd5edaf543864fe4834aebe9c9cb4968b6f573ce014cc42d0720/pillow-12.1.0-cp314-cp314t-win32.whl", hash = "sha256:b17fbdbe01c196e7e159aacb889e091f28e61020a8abeac07b68079b6e626988", size = 6438703, upload-time = "2026-01-02T09:13:07.491Z" }, + { url = "https://files.pythonhosted.org/packages/b8/0e/69ed296de8ea05cb03ee139cee600f424ca166e632567b2d66727f08c7ed/pillow-12.1.0-cp314-cp314t-win_amd64.whl", hash = "sha256:27b9baecb428899db6c0de572d6d305cfaf38ca1596b5c0542a5182e3e74e8c6", size = 7182927, upload-time = "2026-01-02T09:13:09.841Z" }, + { url = "https://files.pythonhosted.org/packages/fc/f5/68334c015eed9b5cff77814258717dec591ded209ab5b6fb70e2ae873d1d/pillow-12.1.0-cp314-cp314t-win_arm64.whl", hash = "sha256:f61333d817698bdcdd0f9d7793e365ac3d2a21c1f1eb02b32ad6aefb8d8ea831", size = 2545104, upload-time = "2026-01-02T09:13:12.068Z" }, + { url = "https://files.pythonhosted.org/packages/8b/bc/224b1d98cffd7164b14707c91aac83c07b047fbd8f58eba4066a3e53746a/pillow-12.1.0-pp311-pypy311_pp73-macosx_10_15_x86_64.whl", hash = "sha256:ca94b6aac0d7af2a10ba08c0f888b3d5114439b6b3ef39968378723622fed377", size = 5228605, upload-time = "2026-01-02T09:13:14.084Z" }, + { url = "https://files.pythonhosted.org/packages/0c/ca/49ca7769c4550107de049ed85208240ba0f330b3f2e316f24534795702ce/pillow-12.1.0-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:351889afef0f485b84078ea40fe33727a0492b9af3904661b0abbafee0355b72", size = 4622245, upload-time = "2026-01-02T09:13:15.964Z" }, + { url = "https://files.pythonhosted.org/packages/73/48/fac807ce82e5955bcc2718642b94b1bd22a82a6d452aea31cbb678cddf12/pillow-12.1.0-pp311-pypy311_pp73-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:bb0984b30e973f7e2884362b7d23d0a348c7143ee559f38ef3eaab640144204c", size = 5247593, upload-time = "2026-01-02T09:13:17.913Z" }, + { url = "https://files.pythonhosted.org/packages/d2/95/3e0742fe358c4664aed4fd05d5f5373dcdad0b27af52aa0972568541e3f4/pillow-12.1.0-pp311-pypy311_pp73-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:84cabc7095dd535ca934d57e9ce2a72ffd216e435a84acb06b2277b1de2689bd", size = 6989008, upload-time = "2026-01-02T09:13:20.083Z" }, + { url = "https://files.pythonhosted.org/packages/5a/74/fe2ac378e4e202e56d50540d92e1ef4ff34ed687f3c60f6a121bcf99437e/pillow-12.1.0-pp311-pypy311_pp73-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:53d8b764726d3af1a138dd353116f774e3862ec7e3794e0c8781e30db0f35dfc", size = 5313824, upload-time = "2026-01-02T09:13:22.405Z" }, + { url = "https://files.pythonhosted.org/packages/f3/77/2a60dee1adee4e2655ac328dd05c02a955c1cd683b9f1b82ec3feb44727c/pillow-12.1.0-pp311-pypy311_pp73-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:5da841d81b1a05ef940a8567da92decaa15bc4d7dedb540a8c219ad83d91808a", size = 5963278, upload-time = "2026-01-02T09:13:24.706Z" }, + { url = "https://files.pythonhosted.org/packages/2d/71/64e9b1c7f04ae0027f788a248e6297d7fcc29571371fe7d45495a78172c0/pillow-12.1.0-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:75af0b4c229ac519b155028fa1be632d812a519abba9b46b20e50c6caa184f19", size = 7029809, upload-time = "2026-01-02T09:13:26.541Z" }, ] [[package]] @@ -4469,16 +4516,16 @@ wheels = [ [[package]] name = "poethepoet" -version = "0.38.0" +version = "0.39.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "pastel", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, { name = "pyyaml", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, { name = "tomli", marker = "(python_full_version < '3.11' and sys_platform == 'darwin') or (python_full_version < '3.11' and sys_platform == 'linux') or (python_full_version < '3.11' and sys_platform == 'win32')" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/4d/14/d1f795f314c4bf3ad6d64216e370bdfda73093ed76e979485778b655a7ac/poethepoet-0.38.0.tar.gz", hash = "sha256:aeeb2f0a2cf0d3afa833976eff3ac7b8f5e472ae64171824900d79d3c68163c7", size = 77339, upload-time = "2025-11-23T13:51:28.246Z" } +sdist = { url = "https://files.pythonhosted.org/packages/ec/f5/7561bc002bcd40c96633173fb2eb5daf2589dd4dfc4a2cb1b1a2cc6bb60f/poethepoet-0.39.0.tar.gz", hash = "sha256:85f279ffaf2a58ba31db2048ab897740d4db839233c3f28740cf0517b3367710", size = 79816, upload-time = "2025-12-25T15:10:57.799Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/38/89/2bf7d43ef4b0d60f446933ae9d3649f95c2c45c47b6736d121b602c28361/poethepoet-0.38.0-py3-none-any.whl", hash = "sha256:214bd9fcb348ff3dfd1466579d67e0c02242451a7044aced1a79641adef9cad0", size = 101938, upload-time = "2025-11-23T13:51:26.518Z" }, + { url = "https://files.pythonhosted.org/packages/b2/18/9f356cdf243ca90a985f2e8e9eab938b1ddac461acfa6348db434ba70525/poethepoet-0.39.0-py3-none-any.whl", hash = "sha256:2865b04230df0fb43b6fb4f739c975b19be03d6db18f0ab760b3fb5c56b50b5c", size = 104763, upload-time = "2025-12-25T15:10:56.099Z" }, ] [[package]] @@ -4521,7 +4568,7 @@ wheels = [ [[package]] name = "posthog" -version = "7.4.0" +version = "7.4.2" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "backoff", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, @@ -4531,22 +4578,22 @@ dependencies = [ { name = "six", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, { name = "typing-extensions", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/14/e5/5262d1604a3eb19b23d4e896bce87b4603fd39ec366a96b27e19e3299aef/posthog-7.4.0.tar.gz", hash = "sha256:1fb97b11960e24fcf0b80f0a6450b2311478e5a3ee6ea3c6f9284ff89060a876", size = 143780, upload-time = "2025-12-16T23:42:05.829Z" } +sdist = { url = "https://files.pythonhosted.org/packages/fb/a9/90bee782b3122b69462c579c496e2431786d3d1adebd1bff66d5e69f66cf/posthog-7.4.2.tar.gz", hash = "sha256:5953f31a21c5e2485ac57eb5d600a231a70118f884f438c0e8b493c30373c409", size = 144136, upload-time = "2025-12-22T11:03:15.301Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/9f/8b/13066693d7a6f94fb5da3407417bbbc3f6aa8487051294d0ef766c1567fa/posthog-7.4.0-py3-none-any.whl", hash = "sha256:f9d4e32c1c0f2110256b1aae7046ed90af312c1dbb1eecc6a9cb427733b22970", size = 166079, upload-time = "2025-12-16T23:42:04.33Z" }, + { url = "https://files.pythonhosted.org/packages/c5/95/ed48309ec45d8856e38c07ff2f65e7f904e2e4249905d2bf903e2eb0ad32/posthog-7.4.2-py3-none-any.whl", hash = "sha256:36954f06f4adede905d97faeb24926a705a4d86f4a308506b15b41b661ef064c", size = 166516, upload-time = "2025-12-22T11:03:14.27Z" }, ] [[package]] name = "powerfx" -version = "0.0.33" +version = "0.0.34" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "cffi", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, { name = "pythonnet", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/5e/41/8f95f72f4f3b7ea54357c449bf5bd94813b6321dec31db9ffcbf578e2fa3/powerfx-0.0.33.tar.gz", hash = "sha256:85e8330bef8a7a207c3e010aa232df0ae38825e94d590c73daf3a3f44115cb09", size = 3236647, upload-time = "2025-11-20T19:31:09.414Z" } +sdist = { url = "https://files.pythonhosted.org/packages/9f/fb/6c4bf87e0c74ca1c563921ce89ca1c5785b7576bca932f7255cdf81082a7/powerfx-0.0.34.tar.gz", hash = "sha256:956992e7afd272657ed16d80f4cad24ec95d9e4a79fb9dfa4a068a09e136af32", size = 3237555, upload-time = "2025-12-22T15:50:59.682Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/e3/40/bcaf2edf8189f054fd1097c0968097467603d4450f365838553bcbf76e4c/powerfx-0.0.33-py3-none-any.whl", hash = "sha256:3c6073368f36c9a6f1aff3c64bedb076fdc5fceaeaa41a527ccb617c5b6f1e3b", size = 3482059, upload-time = "2025-11-20T19:31:06.76Z" }, + { url = "https://files.pythonhosted.org/packages/6f/96/0f8a1f86485b3ec0315e3e8403326884a0334b3dcd699df2482669cca4be/powerfx-0.0.34-py3-none-any.whl", hash = "sha256:f2dc1c42ba8bfa4c72a7fcff2a00755b95394547388ca0b3e36579c49ee7ed75", size = 3483089, upload-time = "2025-12-22T15:50:57.536Z" }, ] [[package]] @@ -5009,48 +5056,46 @@ crypto = [ [[package]] name = "pynacl" -version = "1.6.1" +version = "1.6.2" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "cffi", marker = "(platform_python_implementation != 'PyPy' and sys_platform == 'darwin') or (platform_python_implementation != 'PyPy' and sys_platform == 'linux') or (platform_python_implementation != 'PyPy' and sys_platform == 'win32')" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/b2/46/aeca065d227e2265125aea590c9c47fbf5786128c9400ee0eb7c88931f06/pynacl-1.6.1.tar.gz", hash = "sha256:8d361dac0309f2b6ad33b349a56cd163c98430d409fa503b10b70b3ad66eaa1d", size = 3506616, upload-time = "2025-11-10T16:02:13.195Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/75/d6/4b2dca33ed512de8f54e5c6074aa06eaeb225bfbcd9b16f33a414389d6bd/pynacl-1.6.1-cp314-cp314t-macosx_10_10_universal2.whl", hash = "sha256:7d7c09749450c385301a3c20dca967a525152ae4608c0a096fe8464bfc3df93d", size = 389109, upload-time = "2025-11-10T16:01:28.79Z" }, - { url = "https://files.pythonhosted.org/packages/3c/30/e8dbb8ff4fa2559bbbb2187ba0d0d7faf728d17cb8396ecf4a898b22d3da/pynacl-1.6.1-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:fc734c1696ffd49b40f7c1779c89ba908157c57345cf626be2e0719488a076d3", size = 808254, upload-time = "2025-11-10T16:01:37.839Z" }, - { url = "https://files.pythonhosted.org/packages/44/f9/f5449c652f31da00249638dbab065ad4969c635119094b79b17c3a4da2ab/pynacl-1.6.1-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:3cd787ec1f5c155dc8ecf39b1333cfef41415dc96d392f1ce288b4fe970df489", size = 1407365, upload-time = "2025-11-10T16:01:40.454Z" }, - { url = "https://files.pythonhosted.org/packages/eb/2f/9aa5605f473b712065c0a193ebf4ad4725d7a245533f0cd7e5dcdbc78f35/pynacl-1.6.1-cp314-cp314t-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:6b35d93ab2df03ecb3aa506be0d3c73609a51449ae0855c2e89c7ed44abde40b", size = 843842, upload-time = "2025-11-10T16:01:30.524Z" }, - { url = "https://files.pythonhosted.org/packages/32/8d/748f0f6956e207453da8f5f21a70885fbbb2e060d5c9d78e0a4a06781451/pynacl-1.6.1-cp314-cp314t-manylinux_2_26_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:dece79aecbb8f4640a1adbb81e4aa3bfb0e98e99834884a80eb3f33c7c30e708", size = 1445559, upload-time = "2025-11-10T16:01:33.663Z" }, - { url = "https://files.pythonhosted.org/packages/78/d0/2387f0dcb0e9816f38373999e48db4728ed724d31accdd4e737473319d35/pynacl-1.6.1-cp314-cp314t-manylinux_2_34_aarch64.whl", hash = "sha256:c2228054f04bf32d558fb89bb99f163a8197d5a9bf4efa13069a7fa8d4b93fc3", size = 825791, upload-time = "2025-11-10T16:01:34.823Z" }, - { url = "https://files.pythonhosted.org/packages/18/3d/ef6fb7eb072aaf15f280bc66f26ab97e7fc9efa50fb1927683013ef47473/pynacl-1.6.1-cp314-cp314t-manylinux_2_34_x86_64.whl", hash = "sha256:2b12f1b97346f177affcdfdc78875ff42637cb40dcf79484a97dae3448083a78", size = 1410843, upload-time = "2025-11-10T16:01:36.401Z" }, - { url = "https://files.pythonhosted.org/packages/e3/fb/23824a017526850ee7d8a1cc4cd1e3e5082800522c10832edbbca8619537/pynacl-1.6.1-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:e735c3a1bdfde3834503baf1a6d74d4a143920281cb724ba29fb84c9f49b9c48", size = 801140, upload-time = "2025-11-10T16:01:42.013Z" }, - { url = "https://files.pythonhosted.org/packages/5d/d1/ebc6b182cb98603a35635b727d62f094bc201bf610f97a3bb6357fe688d2/pynacl-1.6.1-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:3384a454adf5d716a9fadcb5eb2e3e72cd49302d1374a60edc531c9957a9b014", size = 1371966, upload-time = "2025-11-10T16:01:43.297Z" }, - { url = "https://files.pythonhosted.org/packages/64/f4/c9d7b6f02924b1f31db546c7bd2a83a2421c6b4a8e6a2e53425c9f2802e0/pynacl-1.6.1-cp314-cp314t-win32.whl", hash = "sha256:d8615ee34d01c8e0ab3f302dcdd7b32e2bcf698ba5f4809e7cc407c8cdea7717", size = 230482, upload-time = "2025-11-10T16:01:47.688Z" }, - { url = "https://files.pythonhosted.org/packages/c4/2c/942477957fba22da7bf99131850e5ebdff66623418ab48964e78a7a8293e/pynacl-1.6.1-cp314-cp314t-win_amd64.whl", hash = "sha256:5f5b35c1a266f8a9ad22525049280a600b19edd1f785bccd01ae838437dcf935", size = 243232, upload-time = "2025-11-10T16:01:45.208Z" }, - { url = "https://files.pythonhosted.org/packages/7a/0c/bdbc0d04a53b96a765ab03aa2cf9a76ad8653d70bf1665459b9a0dedaa1c/pynacl-1.6.1-cp314-cp314t-win_arm64.whl", hash = "sha256:d984c91fe3494793b2a1fb1e91429539c6c28e9ec8209d26d25041ec599ccf63", size = 187907, upload-time = "2025-11-10T16:01:46.328Z" }, - { url = "https://files.pythonhosted.org/packages/49/41/3cfb3b4f3519f6ff62bf71bf1722547644bcfb1b05b8fdbdc300249ba113/pynacl-1.6.1-cp38-abi3-macosx_10_10_universal2.whl", hash = "sha256:a6f9fd6d6639b1e81115c7f8ff16b8dedba1e8098d2756275d63d208b0e32021", size = 387591, upload-time = "2025-11-10T16:01:49.1Z" }, - { url = "https://files.pythonhosted.org/packages/18/21/b8a6563637799f617a3960f659513eccb3fcc655d5fc2be6e9dc6416826f/pynacl-1.6.1-cp38-abi3-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:e49a3f3d0da9f79c1bec2aa013261ab9fa651c7da045d376bd306cf7c1792993", size = 798866, upload-time = "2025-11-10T16:01:55.688Z" }, - { url = "https://files.pythonhosted.org/packages/e8/6c/dc38033bc3ea461e05ae8f15a81e0e67ab9a01861d352ae971c99de23e7c/pynacl-1.6.1-cp38-abi3-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:7713f8977b5d25f54a811ec9efa2738ac592e846dd6e8a4d3f7578346a841078", size = 1398001, upload-time = "2025-11-10T16:01:57.101Z" }, - { url = "https://files.pythonhosted.org/packages/9f/05/3ec0796a9917100a62c5073b20c4bce7bf0fea49e99b7906d1699cc7b61b/pynacl-1.6.1-cp38-abi3-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:5a3becafc1ee2e5ea7f9abc642f56b82dcf5be69b961e782a96ea52b55d8a9fc", size = 834024, upload-time = "2025-11-10T16:01:50.228Z" }, - { url = "https://files.pythonhosted.org/packages/f0/b7/ae9982be0f344f58d9c64a1c25d1f0125c79201634efe3c87305ac7cb3e3/pynacl-1.6.1-cp38-abi3-manylinux_2_26_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:4ce50d19f1566c391fedc8dc2f2f5be265ae214112ebe55315e41d1f36a7f0a9", size = 1436766, upload-time = "2025-11-10T16:01:51.886Z" }, - { url = "https://files.pythonhosted.org/packages/b4/51/b2ccbf89cf3025a02e044dd68a365cad593ebf70f532299f2c047d2b7714/pynacl-1.6.1-cp38-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:543f869140f67d42b9b8d47f922552d7a967e6c116aad028c9bfc5f3f3b3a7b7", size = 817275, upload-time = "2025-11-10T16:01:53.351Z" }, - { url = "https://files.pythonhosted.org/packages/a8/6c/dd9ee8214edf63ac563b08a9b30f98d116942b621d39a751ac3256694536/pynacl-1.6.1-cp38-abi3-manylinux_2_34_x86_64.whl", hash = "sha256:a2bb472458c7ca959aeeff8401b8efef329b0fc44a89d3775cffe8fad3398ad8", size = 1401891, upload-time = "2025-11-10T16:01:54.587Z" }, - { url = "https://files.pythonhosted.org/packages/0f/c1/97d3e1c83772d78ee1db3053fd674bc6c524afbace2bfe8d419fd55d7ed1/pynacl-1.6.1-cp38-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:3206fa98737fdc66d59b8782cecc3d37d30aeec4593d1c8c145825a345bba0f0", size = 772291, upload-time = "2025-11-10T16:01:58.111Z" }, - { url = "https://files.pythonhosted.org/packages/4d/ca/691ff2fe12f3bb3e43e8e8df4b806f6384593d427f635104d337b8e00291/pynacl-1.6.1-cp38-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:53543b4f3d8acb344f75fd4d49f75e6572fce139f4bfb4815a9282296ff9f4c0", size = 1370839, upload-time = "2025-11-10T16:01:59.252Z" }, - { url = "https://files.pythonhosted.org/packages/30/27/06fe5389d30391fce006442246062cc35773c84fbcad0209fbbf5e173734/pynacl-1.6.1-cp38-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:319de653ef84c4f04e045eb250e6101d23132372b0a61a7acf91bac0fda8e58c", size = 791371, upload-time = "2025-11-10T16:02:01.075Z" }, - { url = "https://files.pythonhosted.org/packages/2c/7a/e2bde8c9d39074a5aa046c7d7953401608d1f16f71e237f4bef3fb9d7e49/pynacl-1.6.1-cp38-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:262a8de6bba4aee8a66f5edf62c214b06647461c9b6b641f8cd0cb1e3b3196fe", size = 1363031, upload-time = "2025-11-10T16:02:02.656Z" }, - { url = "https://files.pythonhosted.org/packages/dd/b6/63fd77264dae1087770a1bb414bc604470f58fbc21d83822fc9c76248076/pynacl-1.6.1-cp38-abi3-win32.whl", hash = "sha256:9fd1a4eb03caf8a2fe27b515a998d26923adb9ddb68db78e35ca2875a3830dde", size = 226585, upload-time = "2025-11-10T16:02:07.116Z" }, - { url = "https://files.pythonhosted.org/packages/12/c8/b419180f3fdb72ab4d45e1d88580761c267c7ca6eda9a20dcbcba254efe6/pynacl-1.6.1-cp38-abi3-win_amd64.whl", hash = "sha256:a569a4069a7855f963940040f35e87d8bc084cb2d6347428d5ad20550a0a1a21", size = 238923, upload-time = "2025-11-10T16:02:04.401Z" }, - { url = "https://files.pythonhosted.org/packages/35/76/c34426d532e4dce7ff36e4d92cb20f4cbbd94b619964b93d24e8f5b5510f/pynacl-1.6.1-cp38-abi3-win_arm64.whl", hash = "sha256:5953e8b8cfadb10889a6e7bd0f53041a745d1b3d30111386a1bb37af171e6daf", size = 183970, upload-time = "2025-11-10T16:02:05.786Z" }, +sdist = { url = "https://files.pythonhosted.org/packages/d9/9a/4019b524b03a13438637b11538c82781a5eda427394380381af8f04f467a/pynacl-1.6.2.tar.gz", hash = "sha256:018494d6d696ae03c7e656e5e74cdfd8ea1326962cc401bcf018f1ed8436811c", size = 3511692, upload-time = "2026-01-01T17:48:10.851Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/4b/79/0e3c34dc3c4671f67d251c07aa8eb100916f250ee470df230b0ab89551b4/pynacl-1.6.2-cp314-cp314t-macosx_10_10_universal2.whl", hash = "sha256:622d7b07cc5c02c666795792931b50c91f3ce3c2649762efb1ef0d5684c81594", size = 390064, upload-time = "2026-01-01T17:31:57.264Z" }, + { url = "https://files.pythonhosted.org/packages/eb/1c/23a26e931736e13b16483795c8a6b2f641bf6a3d5238c22b070a5112722c/pynacl-1.6.2-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:d071c6a9a4c94d79eb665db4ce5cedc537faf74f2355e4d502591d850d3913c0", size = 809370, upload-time = "2026-01-01T17:31:59.198Z" }, + { url = "https://files.pythonhosted.org/packages/87/74/8d4b718f8a22aea9e8dcc8b95deb76d4aae380e2f5b570cc70b5fd0a852d/pynacl-1.6.2-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:fe9847ca47d287af41e82be1dd5e23023d3c31a951da134121ab02e42ac218c9", size = 1408304, upload-time = "2026-01-01T17:32:01.162Z" }, + { url = "https://files.pythonhosted.org/packages/fd/73/be4fdd3a6a87fe8a4553380c2b47fbd1f7f58292eb820902f5c8ac7de7b0/pynacl-1.6.2-cp314-cp314t-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:04316d1fc625d860b6c162fff704eb8426b1a8bcd3abacea11142cbd99a6b574", size = 844871, upload-time = "2026-01-01T17:32:02.824Z" }, + { url = "https://files.pythonhosted.org/packages/55/ad/6efc57ab75ee4422e96b5f2697d51bbcf6cdcc091e66310df91fbdc144a8/pynacl-1.6.2-cp314-cp314t-manylinux_2_26_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:44081faff368d6c5553ccf55322ef2819abb40e25afaec7e740f159f74813634", size = 1446356, upload-time = "2026-01-01T17:32:04.452Z" }, + { url = "https://files.pythonhosted.org/packages/78/b7/928ee9c4779caa0a915844311ab9fb5f99585621c5d6e4574538a17dca07/pynacl-1.6.2-cp314-cp314t-manylinux_2_34_aarch64.whl", hash = "sha256:a9f9932d8d2811ce1a8ffa79dcbdf3970e7355b5c8eb0c1a881a57e7f7d96e88", size = 826814, upload-time = "2026-01-01T17:32:06.078Z" }, + { url = "https://files.pythonhosted.org/packages/f7/a9/1bdba746a2be20f8809fee75c10e3159d75864ef69c6b0dd168fc60e485d/pynacl-1.6.2-cp314-cp314t-manylinux_2_34_x86_64.whl", hash = "sha256:bc4a36b28dd72fb4845e5d8f9760610588a96d5a51f01d84d8c6ff9849968c14", size = 1411742, upload-time = "2026-01-01T17:32:07.651Z" }, + { url = "https://files.pythonhosted.org/packages/f3/2f/5e7ea8d85f9f3ea5b6b87db1d8388daa3587eed181bdeb0306816fdbbe79/pynacl-1.6.2-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:3bffb6d0f6becacb6526f8f42adfb5efb26337056ee0831fb9a7044d1a964444", size = 801714, upload-time = "2026-01-01T17:32:09.558Z" }, + { url = "https://files.pythonhosted.org/packages/06/ea/43fe2f7eab5f200e40fb10d305bf6f87ea31b3bbc83443eac37cd34a9e1e/pynacl-1.6.2-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:2fef529ef3ee487ad8113d287a593fa26f48ee3620d92ecc6f1d09ea38e0709b", size = 1372257, upload-time = "2026-01-01T17:32:11.026Z" }, + { url = "https://files.pythonhosted.org/packages/4d/54/c9ea116412788629b1347e415f72195c25eb2f3809b2d3e7b25f5c79f13a/pynacl-1.6.2-cp314-cp314t-win32.whl", hash = "sha256:a84bf1c20339d06dc0c85d9aea9637a24f718f375d861b2668b2f9f96fa51145", size = 231319, upload-time = "2026-01-01T17:32:12.46Z" }, + { url = "https://files.pythonhosted.org/packages/ce/04/64e9d76646abac2dccf904fccba352a86e7d172647557f35b9fe2a5ee4a1/pynacl-1.6.2-cp314-cp314t-win_amd64.whl", hash = "sha256:320ef68a41c87547c91a8b58903c9caa641ab01e8512ce291085b5fe2fcb7590", size = 244044, upload-time = "2026-01-01T17:32:13.781Z" }, + { url = "https://files.pythonhosted.org/packages/33/33/7873dc161c6a06f43cda13dec67b6fe152cb2f982581151956fa5e5cdb47/pynacl-1.6.2-cp314-cp314t-win_arm64.whl", hash = "sha256:d29bfe37e20e015a7d8b23cfc8bd6aa7909c92a1b8f41ee416bbb3e79ef182b2", size = 188740, upload-time = "2026-01-01T17:32:15.083Z" }, + { url = "https://files.pythonhosted.org/packages/be/7b/4845bbf88e94586ec47a432da4e9107e3fc3ce37eb412b1398630a37f7dd/pynacl-1.6.2-cp38-abi3-macosx_10_10_universal2.whl", hash = "sha256:c949ea47e4206af7c8f604b8278093b674f7c79ed0d4719cc836902bf4517465", size = 388458, upload-time = "2026-01-01T17:32:16.829Z" }, + { url = "https://files.pythonhosted.org/packages/1e/b4/e927e0653ba63b02a4ca5b4d852a8d1d678afbf69b3dbf9c4d0785ac905c/pynacl-1.6.2-cp38-abi3-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:8845c0631c0be43abdd865511c41eab235e0be69c81dc66a50911594198679b0", size = 800020, upload-time = "2026-01-01T17:32:18.34Z" }, + { url = "https://files.pythonhosted.org/packages/7f/81/d60984052df5c97b1d24365bc1e30024379b42c4edcd79d2436b1b9806f2/pynacl-1.6.2-cp38-abi3-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:22de65bb9010a725b0dac248f353bb072969c94fa8d6b1f34b87d7953cf7bbe4", size = 1399174, upload-time = "2026-01-01T17:32:20.239Z" }, + { url = "https://files.pythonhosted.org/packages/68/f7/322f2f9915c4ef27d140101dd0ed26b479f7e6f5f183590fd32dfc48c4d3/pynacl-1.6.2-cp38-abi3-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:46065496ab748469cdd999246d17e301b2c24ae2fdf739132e580a0e94c94a87", size = 835085, upload-time = "2026-01-01T17:32:22.24Z" }, + { url = "https://files.pythonhosted.org/packages/3e/d0/f301f83ac8dbe53442c5a43f6a39016f94f754d7a9815a875b65e218a307/pynacl-1.6.2-cp38-abi3-manylinux_2_26_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:8a66d6fb6ae7661c58995f9c6435bda2b1e68b54b598a6a10247bfcdadac996c", size = 1437614, upload-time = "2026-01-01T17:32:23.766Z" }, + { url = "https://files.pythonhosted.org/packages/c4/58/fc6e649762b029315325ace1a8c6be66125e42f67416d3dbd47b69563d61/pynacl-1.6.2-cp38-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:26bfcd00dcf2cf160f122186af731ae30ab120c18e8375684ec2670dccd28130", size = 818251, upload-time = "2026-01-01T17:32:25.69Z" }, + { url = "https://files.pythonhosted.org/packages/c9/a8/b917096b1accc9acd878819a49d3d84875731a41eb665f6ebc826b1af99e/pynacl-1.6.2-cp38-abi3-manylinux_2_34_x86_64.whl", hash = "sha256:c8a231e36ec2cab018c4ad4358c386e36eede0319a0c41fed24f840b1dac59f6", size = 1402859, upload-time = "2026-01-01T17:32:27.215Z" }, + { url = "https://files.pythonhosted.org/packages/85/42/fe60b5f4473e12c72f977548e4028156f4d340b884c635ec6b063fe7e9a5/pynacl-1.6.2-cp38-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:68be3a09455743ff9505491220b64440ced8973fe930f270c8e07ccfa25b1f9e", size = 791926, upload-time = "2026-01-01T17:32:29.314Z" }, + { url = "https://files.pythonhosted.org/packages/fa/f9/e40e318c604259301cc091a2a63f237d9e7b424c4851cafaea4ea7c4834e/pynacl-1.6.2-cp38-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:8b097553b380236d51ed11356c953bf8ce36a29a3e596e934ecabe76c985a577", size = 1363101, upload-time = "2026-01-01T17:32:31.263Z" }, + { url = "https://files.pythonhosted.org/packages/48/47/e761c254f410c023a469284a9bc210933e18588ca87706ae93002c05114c/pynacl-1.6.2-cp38-abi3-win32.whl", hash = "sha256:5811c72b473b2f38f7e2a3dc4f8642e3a3e9b5e7317266e4ced1fba85cae41aa", size = 227421, upload-time = "2026-01-01T17:32:33.076Z" }, + { url = "https://files.pythonhosted.org/packages/41/ad/334600e8cacc7d86587fe5f565480fde569dfb487389c8e1be56ac21d8ac/pynacl-1.6.2-cp38-abi3-win_amd64.whl", hash = "sha256:62985f233210dee6548c223301b6c25440852e13d59a8b81490203c3227c5ba0", size = 239754, upload-time = "2026-01-01T17:32:34.557Z" }, + { url = "https://files.pythonhosted.org/packages/29/7d/5945b5af29534641820d3bd7b00962abbbdfee84ec7e19f0d5b3175f9a31/pynacl-1.6.2-cp38-abi3-win_arm64.whl", hash = "sha256:834a43af110f743a754448463e8fd61259cd4ab5bbedcf70f9dabad1d28a394c", size = 184801, upload-time = "2026-01-01T17:32:36.309Z" }, ] [[package]] name = "pyparsing" -version = "3.2.5" +version = "3.3.1" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/f2/a5/181488fc2b9d093e3972d2a472855aae8a03f000592dbfce716a512b3359/pyparsing-3.2.5.tar.gz", hash = "sha256:2df8d5b7b2802ef88e8d016a2eb9c7aeaa923529cd251ed0fe4608275d4105b6", size = 1099274, upload-time = "2025-09-21T04:11:06.277Z" } +sdist = { url = "https://files.pythonhosted.org/packages/33/c1/1d9de9aeaa1b89b0186e5fe23294ff6517fce1bc69149185577cd31016b2/pyparsing-3.3.1.tar.gz", hash = "sha256:47fad0f17ac1e2cad3de3b458570fbc9b03560aa029ed5e16ee5554da9a2251c", size = 1550512, upload-time = "2025-12-23T03:14:04.391Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/10/5e/1aa9a93198c6b64513c9d7752de7422c06402de6600a8767da1524f9570b/pyparsing-3.2.5-py3-none-any.whl", hash = "sha256:e38a4f02064cf41fe6593d328d0512495ad1f3d8a91c4f73fc401b3079a59a5e", size = 113890, upload-time = "2025-09-21T04:11:04.117Z" }, + { url = "https://files.pythonhosted.org/packages/8b/40/2614036cdd416452f5bf98ec037f38a1afb17f327cb8e6b652d4729e0af8/pyparsing-3.3.1-py3-none-any.whl", hash = "sha256:023b5e7e5520ad96642e2c6db4cb683d3970bd640cdf7115049a6e9c3682df82", size = 121793, upload-time = "2025-12-23T03:14:02.103Z" }, ] [[package]] @@ -5322,7 +5367,7 @@ dependencies = [ { name = "grpcio", version = "1.76.0", source = { registry = "https://pypi.org/simple" }, marker = "(python_full_version >= '3.14' and sys_platform == 'darwin') or (python_full_version >= '3.14' and sys_platform == 'linux') or (python_full_version >= '3.14' and sys_platform == 'win32')" }, { name = "httpx", extra = ["http2"], marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, { name = "numpy", version = "2.2.6", source = { registry = "https://pypi.org/simple" }, marker = "(python_full_version < '3.11' and sys_platform == 'darwin') or (python_full_version < '3.11' and sys_platform == 'linux') or (python_full_version < '3.11' and sys_platform == 'win32')" }, - { name = "numpy", version = "2.3.5", source = { registry = "https://pypi.org/simple" }, marker = "(python_full_version >= '3.11' and sys_platform == 'darwin') or (python_full_version >= '3.11' and sys_platform == 'linux') or (python_full_version >= '3.11' and sys_platform == 'win32')" }, + { name = "numpy", version = "2.4.0", source = { registry = "https://pypi.org/simple" }, marker = "(python_full_version >= '3.11' and sys_platform == 'darwin') or (python_full_version >= '3.11' and sys_platform == 'linux') or (python_full_version >= '3.11' and sys_platform == 'win32')" }, { name = "portalocker", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, { name = "protobuf", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, { name = "pydantic", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, @@ -5353,7 +5398,7 @@ dependencies = [ { name = "jsonpath-ng", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, { name = "ml-dtypes", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, { name = "numpy", version = "2.2.6", source = { registry = "https://pypi.org/simple" }, marker = "(python_full_version < '3.11' and sys_platform == 'darwin') or (python_full_version < '3.11' and sys_platform == 'linux') or (python_full_version < '3.11' and sys_platform == 'win32')" }, - { name = "numpy", version = "2.3.5", source = { registry = "https://pypi.org/simple" }, marker = "(python_full_version >= '3.11' and sys_platform == 'darwin') or (python_full_version >= '3.11' and sys_platform == 'linux') or (python_full_version >= '3.11' and sys_platform == 'win32')" }, + { name = "numpy", version = "2.4.0", source = { registry = "https://pypi.org/simple" }, marker = "(python_full_version >= '3.11' and sys_platform == 'darwin') or (python_full_version >= '3.11' and sys_platform == 'linux') or (python_full_version >= '3.11' and sys_platform == 'win32')" }, { name = "pydantic", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, { name = "python-ulid", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, { name = "pyyaml", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, @@ -5769,7 +5814,7 @@ resolution-markers = [ ] dependencies = [ { name = "joblib", marker = "(python_full_version >= '3.11' and sys_platform == 'darwin') or (python_full_version >= '3.11' and sys_platform == 'linux') or (python_full_version >= '3.11' and sys_platform == 'win32')" }, - { name = "numpy", version = "2.3.5", source = { registry = "https://pypi.org/simple" }, marker = "(python_full_version >= '3.11' and sys_platform == 'darwin') or (python_full_version >= '3.11' and sys_platform == 'linux') or (python_full_version >= '3.11' and sys_platform == 'win32')" }, + { name = "numpy", version = "2.4.0", source = { registry = "https://pypi.org/simple" }, marker = "(python_full_version >= '3.11' and sys_platform == 'darwin') or (python_full_version >= '3.11' and sys_platform == 'linux') or (python_full_version >= '3.11' and sys_platform == 'win32')" }, { name = "scipy", version = "1.16.3", source = { registry = "https://pypi.org/simple" }, marker = "(python_full_version >= '3.11' and sys_platform == 'darwin') or (python_full_version >= '3.11' and sys_platform == 'linux') or (python_full_version >= '3.11' and sys_platform == 'win32')" }, { name = "threadpoolctl", marker = "(python_full_version >= '3.11' and sys_platform == 'darwin') or (python_full_version >= '3.11' and sys_platform == 'linux') or (python_full_version >= '3.11' and sys_platform == 'win32')" }, ] @@ -5893,7 +5938,7 @@ resolution-markers = [ "python_full_version == '3.11.*' and sys_platform == 'win32'", ] dependencies = [ - { name = "numpy", version = "2.3.5", source = { registry = "https://pypi.org/simple" }, marker = "(python_full_version >= '3.11' and sys_platform == 'darwin') or (python_full_version >= '3.11' and sys_platform == 'linux') or (python_full_version >= '3.11' and sys_platform == 'win32')" }, + { name = "numpy", version = "2.4.0", source = { registry = "https://pypi.org/simple" }, marker = "(python_full_version >= '3.11' and sys_platform == 'darwin') or (python_full_version >= '3.11' and sys_platform == 'linux') or (python_full_version >= '3.11' and sys_platform == 'win32')" }, ] sdist = { url = "https://files.pythonhosted.org/packages/0a/ca/d8ace4f98322d01abcd52d381134344bf7b431eba7ed8b42bdea5a3c2ac9/scipy-1.16.3.tar.gz", hash = "sha256:01e87659402762f43bd2fee13370553a17ada367d42e7487800bf2916535aecb", size = 30597883, upload-time = "2025-10-28T17:38:54.068Z" } wheels = [ @@ -5966,7 +6011,7 @@ source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "matplotlib", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, { name = "numpy", version = "2.2.6", source = { registry = "https://pypi.org/simple" }, marker = "(python_full_version < '3.11' and sys_platform == 'darwin') or (python_full_version < '3.11' and sys_platform == 'linux') or (python_full_version < '3.11' and sys_platform == 'win32')" }, - { name = "numpy", version = "2.3.5", source = { registry = "https://pypi.org/simple" }, marker = "(python_full_version >= '3.11' and sys_platform == 'darwin') or (python_full_version >= '3.11' and sys_platform == 'linux') or (python_full_version >= '3.11' and sys_platform == 'win32')" }, + { name = "numpy", version = "2.4.0", source = { registry = "https://pypi.org/simple" }, marker = "(python_full_version >= '3.11' and sys_platform == 'darwin') or (python_full_version >= '3.11' and sys_platform == 'linux') or (python_full_version >= '3.11' and sys_platform == 'win32')" }, { name = "pandas", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, ] sdist = { url = "https://files.pythonhosted.org/packages/86/59/a451d7420a77ab0b98f7affa3a1d78a313d2f7281a57afb1a34bae8ab412/seaborn-0.13.2.tar.gz", hash = "sha256:93e60a40988f4d65e9f4885df477e2fdaff6b73a9ded434c1ab356dd57eefff7", size = 1457696, upload-time = "2024-01-25T13:21:52.551Z" } @@ -6253,15 +6298,15 @@ wheels = [ [[package]] name = "sse-starlette" -version = "3.0.4" +version = "3.1.2" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "anyio", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, { name = "starlette", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/17/8b/54651ad49bce99a50fd61a7f19c2b6a79fbb072e693101fbb1194c362054/sse_starlette-3.0.4.tar.gz", hash = "sha256:5e34286862e96ead0eb70f5ddd0bd21ab1f6473a8f44419dd267f431611383dd", size = 22576, upload-time = "2025-12-14T16:22:52.493Z" } +sdist = { url = "https://files.pythonhosted.org/packages/da/34/f5df66cb383efdbf4f2db23cabb27f51b1dcb737efaf8a558f6f1d195134/sse_starlette-3.1.2.tar.gz", hash = "sha256:55eff034207a83a0eb86de9a68099bd0157838f0b8b999a1b742005c71e33618", size = 26303, upload-time = "2025-12-31T08:02:20.023Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/71/22/8ab1066358601163e1ac732837adba3672f703818f693e179b24e0d3b65c/sse_starlette-3.0.4-py3-none-any.whl", hash = "sha256:32c80ef0d04506ced4b0b6ab8fe300925edc37d26f666afb1874c754895f5dc3", size = 11764, upload-time = "2025-12-14T16:22:51.453Z" }, + { url = "https://files.pythonhosted.org/packages/b7/95/8c4b76eec9ae574474e5d2997557cebf764bcd3586458956c30631ae08f4/sse_starlette-3.1.2-py3-none-any.whl", hash = "sha256:cd800dd349f4521b317b9391d3796fa97b71748a4da9b9e00aafab32dda375c8", size = 12484, upload-time = "2025-12-31T08:02:18.894Z" }, ] [[package]] @@ -6525,15 +6570,15 @@ wheels = [ [[package]] name = "typer-slim" -version = "0.20.1" +version = "0.21.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "click", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, { name = "typing-extensions", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/3f/3d/6a4ec47010e8de34dade20c8e7bce90502b173f62a6b41619523a3fcf562/typer_slim-0.20.1.tar.gz", hash = "sha256:bb9e4f7e6dc31551c8a201383df322b81b0ce37239a5ead302598a2ebb6f7c9c", size = 106113, upload-time = "2025-12-19T16:48:54.206Z" } +sdist = { url = "https://files.pythonhosted.org/packages/f9/3b/2f60ce16f578b1db5b8816d37d6a4d9786b33b76407fc8c13b0b86312c31/typer_slim-0.21.0.tar.gz", hash = "sha256:f2dbd150cfa0fead2242e21fa9f654dfc64773763ddf07c6be9a49ad34f79557", size = 106841, upload-time = "2025-12-25T09:54:55.998Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/d8/f9/a273c8b57c69ac1b90509ebda204972265fdc978fbbecc25980786f8c038/typer_slim-0.20.1-py3-none-any.whl", hash = "sha256:8e89c5dbaffe87a4f86f4c7a9e2f7059b5b68c66f558f298969d42ce34f10122", size = 47440, upload-time = "2025-12-19T16:48:52.678Z" }, + { url = "https://files.pythonhosted.org/packages/b4/84/e97abf10e4a699194ff07fd586ec7f4cf867d9d04bead559a65f9e7aff84/typer_slim-0.21.0-py3-none-any.whl", hash = "sha256:92aee2188ac6fc2b2924bd75bb61a340b78bd8cd51fd9735533ce5a856812c8e", size = 47174, upload-time = "2025-12-25T09:54:54.609Z" }, ] [[package]] @@ -6619,28 +6664,28 @@ wheels = [ [[package]] name = "uv" -version = "0.9.18" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/e3/03/1afff9e6362dc9d3a9e03743da0a4b4c7a0809f859c79eb52bbae31ea582/uv-0.9.18.tar.gz", hash = "sha256:17b5502f7689c4dc1fdeee9d8437a9a6664dcaa8476e70046b5f4753559533f5", size = 3824466, upload-time = "2025-12-16T15:45:11.81Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/26/9c/92fad10fcee8ea170b66442d95fd2af308fe9a107909ded4b3cc384fdc69/uv-0.9.18-py3-none-linux_armv6l.whl", hash = "sha256:e9e4915bb280c1f79b9a1c16021e79f61ed7c6382856ceaa99d53258cb0b4951", size = 21345538, upload-time = "2025-12-16T15:45:13.992Z" }, - { url = "https://files.pythonhosted.org/packages/81/b1/b0e5808e05acb54aa118c625d9f7b117df614703b0cbb89d419d03d117f3/uv-0.9.18-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:d91abfd2649987996e3778729140c305ef0f6ff5909f55aac35c3c372544a24f", size = 20439572, upload-time = "2025-12-16T15:45:26.397Z" }, - { url = "https://files.pythonhosted.org/packages/b7/0b/9487d83adf5b7fd1e20ced33f78adf84cb18239c3d7e91f224cedba46c08/uv-0.9.18-py3-none-macosx_11_0_arm64.whl", hash = "sha256:cf33f4146fd97e94cdebe6afc5122208eea8c55b65ca4127f5a5643c9717c8b8", size = 18952907, upload-time = "2025-12-16T15:44:48.399Z" }, - { url = "https://files.pythonhosted.org/packages/58/92/c8f7ae8900eff8e4ce1f7826d2e1e2ad5a95a5f141abdb539865aff79930/uv-0.9.18-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.musllinux_1_1_aarch64.whl", hash = "sha256:edf965e9a5c55f74020ac82285eb0dfe7fac4f325ad0a7afc816290269ecfec1", size = 20772495, upload-time = "2025-12-16T15:45:29.614Z" }, - { url = "https://files.pythonhosted.org/packages/5a/28/9831500317c1dd6cde5099e3eb3b22b88ac75e47df7b502f6aef4df5750e/uv-0.9.18-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ae10a941bd7ca1ee69edbe3998c34dce0a9fc2d2406d98198343daf7d2078493", size = 20949623, upload-time = "2025-12-16T15:44:57.482Z" }, - { url = "https://files.pythonhosted.org/packages/0c/ff/1fe1ffa69c8910e54dd11f01fb0765d4fd537ceaeb0c05fa584b6b635b82/uv-0.9.18-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a1669a95b588f613b13dd10e08ced6d5bcd79169bba29a2240eee87532648790", size = 21920580, upload-time = "2025-12-16T15:44:39.009Z" }, - { url = "https://files.pythonhosted.org/packages/d6/ee/eed3ec7679ee80e16316cfc95ed28ef6851700bcc66edacfc583cbd2cc47/uv-0.9.18-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:11e1e406590d3159138288203a41ff8a8904600b8628a57462f04ff87d62c477", size = 23491234, upload-time = "2025-12-16T15:45:32.59Z" }, - { url = "https://files.pythonhosted.org/packages/78/58/64b15df743c79ad03ea7fbcbd27b146ba16a116c57f557425dd4e44d6684/uv-0.9.18-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1e82078d3c622cb4c60da87f156168ffa78b9911136db7ffeb8e5b0a040bf30e", size = 23095438, upload-time = "2025-12-16T15:45:17.916Z" }, - { url = "https://files.pythonhosted.org/packages/43/6d/3d3dae71796961603c3871699e10d6b9de2e65a3c327b58d4750610a5f93/uv-0.9.18-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:704abaf6e76b4d293fc1f24bef2c289021f1df0de9ed351f476cbbf67a7edae0", size = 22140992, upload-time = "2025-12-16T15:44:45.527Z" }, - { url = "https://files.pythonhosted.org/packages/31/91/1042d0966a30e937df500daed63e1f61018714406ce4023c8a6e6d2dcf7c/uv-0.9.18-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3332188fd8d96a68e5001409a52156dced910bf1bc41ec3066534cffcd46eb68", size = 22229626, upload-time = "2025-12-16T15:45:20.712Z" }, - { url = "https://files.pythonhosted.org/packages/5a/1f/0a4a979bb2bf6e1292cc57882955bf1d7757cad40b1862d524c59c2a2ad8/uv-0.9.18-py3-none-manylinux_2_28_aarch64.whl", hash = "sha256:b7295e6d505f1fd61c54b1219e3b18e11907396333a9fa61cefe489c08fc7995", size = 20896524, upload-time = "2025-12-16T15:45:06.799Z" }, - { url = "https://files.pythonhosted.org/packages/a5/3c/24f92e56af00cac7d9bed2888d99a580f8093c8745395ccf6213bfccf20b/uv-0.9.18-py3-none-manylinux_2_31_riscv64.whl", hash = "sha256:62ea0e518dd4ab76e6f06c0f43a25898a6342a3ecf996c12f27f08eb801ef7f1", size = 22077340, upload-time = "2025-12-16T15:44:51.271Z" }, - { url = "https://files.pythonhosted.org/packages/9c/3e/73163116f748800e676bf30cee838448e74ac4cc2f716c750e1705bc3fe4/uv-0.9.18-py3-none-musllinux_1_1_armv7l.whl", hash = "sha256:8bd073e30030211ba01206caa57b4d63714e1adee2c76a1678987dd52f72d44d", size = 20932956, upload-time = "2025-12-16T15:45:00.3Z" }, - { url = "https://files.pythonhosted.org/packages/59/1b/a26990b51a17de1ffe41fbf2e30de3a98f0e0bce40cc60829fb9d9ed1a8a/uv-0.9.18-py3-none-musllinux_1_1_i686.whl", hash = "sha256:f248e013d10e1fc7a41f94310628b4a8130886b6d683c7c85c42b5b36d1bcd02", size = 21357247, upload-time = "2025-12-16T15:45:23.575Z" }, - { url = "https://files.pythonhosted.org/packages/5f/20/b6ba14fdd671e9237b22060d7422aba4a34503e3e42d914dbf925eff19aa/uv-0.9.18-py3-none-musllinux_1_1_x86_64.whl", hash = "sha256:17bedf2b0791e87d889e1c7f125bd5de77e4b7579aec372fa06ba832e07c957e", size = 22443585, upload-time = "2025-12-16T15:44:42.213Z" }, - { url = "https://files.pythonhosted.org/packages/5e/da/1b3dd596964f90a122cfe94dcf5b6b89cf5670eb84434b8c23864382576f/uv-0.9.18-py3-none-win32.whl", hash = "sha256:de6f0bb3e9c18e484545bd1549ec3c956968a141a393d42e2efb25281cb62787", size = 20091088, upload-time = "2025-12-16T15:45:03.225Z" }, - { url = "https://files.pythonhosted.org/packages/11/0b/50e13ebc1eedb36d88524b7740f78351be33213073e3faf81ac8925d0c6e/uv-0.9.18-py3-none-win_amd64.whl", hash = "sha256:c82b0e2e36b33e2146fba5f0ae6906b9679b3b5fe6a712e5d624e45e441e58e9", size = 22181193, upload-time = "2025-12-16T15:44:54.394Z" }, - { url = "https://files.pythonhosted.org/packages/8c/d4/0bf338d863a3d9e5545e268d77a8e6afdd75d26bffc939603042f2e739f9/uv-0.9.18-py3-none-win_arm64.whl", hash = "sha256:4c4ce0ed080440bbda2377488575d426867f94f5922323af6d4728a1cd4d091d", size = 20564933, upload-time = "2025-12-16T15:45:09.819Z" }, +version = "0.9.21" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/e2/2b/4e2090bc3a6265b445b3d31ca6fff20c6458d11145069f7e48ade3e2d75b/uv-0.9.21.tar.gz", hash = "sha256:aa4ca6ccd68e81b5ebaa3684d3c4df2b51a982ac16211eadf0707741d36e6488", size = 3834762, upload-time = "2025-12-30T16:12:51.927Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/da/26/0750c5bb1637ebefe1db0936dc76ead8ce97f17368cda950642bfd90fa3f/uv-0.9.21-py3-none-linux_armv6l.whl", hash = "sha256:0b330eaced2fd9d94e2a70f3bb6c8fd7beadc9d9bf9f1227eb14da44039c413a", size = 21266556, upload-time = "2025-12-30T16:12:47.311Z" }, + { url = "https://files.pythonhosted.org/packages/3e/ef/f019466c1e367ea68003cf35f4d44cc328694ed4a59b6004aa7dcacb2b35/uv-0.9.21-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:1d8e0940bddd37a55f4479d61adaa6b302b780d473f037fc084e48b09a1678e7", size = 20485648, upload-time = "2025-12-30T16:12:15.746Z" }, + { url = "https://files.pythonhosted.org/packages/2a/41/f735bd9a5b4848b6f4f1028e6d768f581559d68eddb6403eb0f19ca4c843/uv-0.9.21-py3-none-macosx_11_0_arm64.whl", hash = "sha256:cb420ddab7bcdd12c2352d4b551ced428d104311c0b98ce205675ab5c97072db", size = 18986976, upload-time = "2025-12-30T16:12:25.034Z" }, + { url = "https://files.pythonhosted.org/packages/9a/5f/01d537e05927594dc379ff8bc04f8cde26384d25108a9f63758eae2a7936/uv-0.9.21-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.musllinux_1_1_aarch64.whl", hash = "sha256:a36d164438a6310c9fceebd041d80f7cffcc63ba80a7c83ee98394fadf2b8545", size = 20819312, upload-time = "2025-12-30T16:12:41.802Z" }, + { url = "https://files.pythonhosted.org/packages/18/89/9497395f57e007a2daed8172042ecccade3ff5569fd367d093f49bd6a4a8/uv-0.9.21-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:c0ad83ce874cbbf9eda569ba793a9fb70870db426e9862300db8cf2950a7fe3b", size = 20900227, upload-time = "2025-12-30T16:12:19.242Z" }, + { url = "https://files.pythonhosted.org/packages/04/61/a3f6dfc75d278cce96b370e00b6f03d73ec260e5304f622504848bad219d/uv-0.9.21-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9076191c934b813147060e4cd97e33a58999de0f9c46f8ac67f614e154dae5c8", size = 21965424, upload-time = "2025-12-30T16:12:01.589Z" }, + { url = "https://files.pythonhosted.org/packages/18/3e/344e8c1078cfea82159c6608b8694f24fdfe850ce329a4708c026cb8b0ff/uv-0.9.21-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:2ce0f6aca91f7fbf1192e43c063f4de3666fd43126aacc71ff7d5a79f831af59", size = 23540343, upload-time = "2025-12-30T16:12:13.139Z" }, + { url = "https://files.pythonhosted.org/packages/7f/20/5826659a81526687c6e5b5507f3f79f4f4b7e3022f3efae2ba36b19864c3/uv-0.9.21-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:0b4817642d5ef248b74ca7be3505e5e012a06be050669b80d1f7ced5ad50d188", size = 23171564, upload-time = "2025-12-30T16:12:22.219Z" }, + { url = "https://files.pythonhosted.org/packages/a6/8d/404c54e019bb99ce474dc21e6b96c8a1351ba3c06e5e19fd8dcae0ba1899/uv-0.9.21-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4fb42237fa309d79905fb73f653f63c1fe45a51193411c614b13512cf5506df3", size = 22202400, upload-time = "2025-12-30T16:12:04.612Z" }, + { url = "https://files.pythonhosted.org/packages/1a/f0/aa3d0081a2004050564364a1ef3277ddf889c9989a7278c0a9cce8284926/uv-0.9.21-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f1d22f0ac03635d661e811c69d7c0b292751f90699acc6a1fb1509e17c936474", size = 22206448, upload-time = "2025-12-30T16:12:30.626Z" }, + { url = "https://files.pythonhosted.org/packages/fc/a9/7a375e723a588f31f305ddf9ae2097af0b9dc7f7813641788b5b9764a237/uv-0.9.21-py3-none-manylinux_2_28_aarch64.whl", hash = "sha256:cdd805909d360ad67640201376c8eb02de08dcf1680a1a81aebd9519daed6023", size = 20940568, upload-time = "2025-12-30T16:12:27.533Z" }, + { url = "https://files.pythonhosted.org/packages/18/d5/6187ffb7e1d24df34defe2718db8c4c3c08f153d3e7da22c250134b79cd1/uv-0.9.21-py3-none-manylinux_2_31_riscv64.whl", hash = "sha256:82e438595a609cbe4e45c413a54bd5756d37c8c39108ce7b2799aff15f7d3337", size = 22085077, upload-time = "2025-12-30T16:12:10.153Z" }, + { url = "https://files.pythonhosted.org/packages/ee/fa/8e211167d0690d9f15a08da610a0383d2f43a6c838890878e14948472284/uv-0.9.21-py3-none-musllinux_1_1_armv7l.whl", hash = "sha256:fc1c06e1e5df423e1517e350ea2c9d85ecefd0919188a0a9f19bd239bbbdeeaf", size = 20862893, upload-time = "2025-12-30T16:12:49.87Z" }, + { url = "https://files.pythonhosted.org/packages/33/b2/9d24d84cb9a1a6a5ea98d03a29abf800d87e5710d25e53896dc73aeb63a5/uv-0.9.21-py3-none-musllinux_1_1_i686.whl", hash = "sha256:9ef3d2a213c7720f4dae336e5123fe88427200d7523c78091c4ab7f849c3f13f", size = 21428397, upload-time = "2025-12-30T16:12:07.483Z" }, + { url = "https://files.pythonhosted.org/packages/4f/40/1e8e4c2e1308432c708eaa66dccdb83d2ee6120ea2b7d65e04fc06f48ff8/uv-0.9.21-py3-none-musllinux_1_1_x86_64.whl", hash = "sha256:8da20914d92ba4cc35f071414d3da7365294fc0b7114da8ac2ab3a86c695096f", size = 22450537, upload-time = "2025-12-30T16:12:33.36Z" }, + { url = "https://files.pythonhosted.org/packages/18/b8/99c4731d001f512e844dfdc740db2bf2fea56d538749b639d21f5117a74a/uv-0.9.21-py3-none-win32.whl", hash = "sha256:e716e23bc0ec8cbb0811f99e653745e0cf15223e7ba5d8857d46be5b40b3045b", size = 20032654, upload-time = "2025-12-30T16:12:36.007Z" }, + { url = "https://files.pythonhosted.org/packages/29/6b/da441bf335f5e1c0c100b7dfb9702b6fed367ba703e543037bf1e70bf8c3/uv-0.9.21-py3-none-win_amd64.whl", hash = "sha256:64a7bb0e4e6a4c2d98c2d55f42aead7c2df0ceb17d5911d1a42b76228cab4525", size = 22206744, upload-time = "2025-12-30T16:12:38.953Z" }, + { url = "https://files.pythonhosted.org/packages/98/02/afbed8309fe07aaa9fa58a98941cebffbcd300fe70499a02a6806d93517b/uv-0.9.21-py3-none-win_arm64.whl", hash = "sha256:6c13c40966812f6bd6ecb6546e5d3e27e7fe9cefa07018f074f51d703cb29e1c", size = 20591604, upload-time = "2025-12-30T16:12:44.634Z" }, ] [[package]]