From 26a85f45d441ace87040a66e7ff4948234d9c7e5 Mon Sep 17 00:00:00 2001 From: Evan Mattson Date: Thu, 5 Feb 2026 08:04:15 +0900 Subject: [PATCH 1/4] Move orchestrations to dedicated package --- .../agent_framework/_workflows/__init__.py | 50 - .../orchestrations/__init__.py | 61 + .../orchestrations/__init__.pyi | 141 ++ python/packages/core/pyproject.toml | 1 + python/packages/orchestrations/LICENSE | 21 + python/packages/orchestrations/README.md | 88 + .../__init__.py | 91 + .../_concurrent.py | 572 +++++ .../_group_chat.py | 996 +++++++++ .../_handoff.py | 1222 ++++++++++ .../_magentic.py | 1981 +++++++++++++++++ .../_sequential.py | 310 +++ .../agent_framework_orchestrations/py.typed | 0 python/packages/orchestrations/pyproject.toml | 87 + .../orchestrations/tests/test_concurrent.py | 549 +++++ .../orchestrations/tests/test_group_chat.py | 1333 +++++++++++ .../orchestrations/tests/test_handoff.py | 708 ++++++ .../orchestrations/tests/test_magentic.py | 1298 +++++++++++ .../orchestrations/tests/test_sequential.py | 453 ++++ python/pyproject.toml | 2 + .../getting_started/orchestrations/README.md | 71 + .../concurrent_agents.py | 3 +- .../concurrent_custom_agent_executors.py | 2 +- .../concurrent_custom_aggregator.py | 3 +- .../concurrent_participant_factory.py | 2 +- .../group_chat_agent_manager.py | 2 +- .../group_chat_philosophical_debate.py | 2 +- .../group_chat_simple_selector.py | 3 +- .../handoff_autonomous.py | 7 +- .../handoff_participant_factory.py | 12 +- .../handoff_simple.py | 4 +- .../handoff_with_code_interpreter_file.py | 7 +- .../magentic.py | 4 +- .../magentic_checkpoint.py | 3 +- .../magentic_human_plan_review.py | 3 +- .../sequential_agents.py | 3 +- .../sequential_custom_executors.py | 2 +- .../sequential_participant_factory.py | 2 +- .../getting_started/workflows/README.md | 26 +- python/uv.lock | 945 ++++---- 40 files changed, 10489 insertions(+), 581 deletions(-) create mode 100644 python/packages/core/agent_framework/orchestrations/__init__.py create mode 100644 python/packages/core/agent_framework/orchestrations/__init__.pyi create mode 100644 python/packages/orchestrations/LICENSE create mode 100644 python/packages/orchestrations/README.md create mode 100644 python/packages/orchestrations/agent_framework_orchestrations/__init__.py create mode 100644 python/packages/orchestrations/agent_framework_orchestrations/_concurrent.py create mode 100644 python/packages/orchestrations/agent_framework_orchestrations/_group_chat.py create mode 100644 python/packages/orchestrations/agent_framework_orchestrations/_handoff.py create mode 100644 python/packages/orchestrations/agent_framework_orchestrations/_magentic.py create mode 100644 python/packages/orchestrations/agent_framework_orchestrations/_sequential.py create mode 100644 python/packages/orchestrations/agent_framework_orchestrations/py.typed create mode 100644 python/packages/orchestrations/pyproject.toml create mode 100644 python/packages/orchestrations/tests/test_concurrent.py create mode 100644 python/packages/orchestrations/tests/test_group_chat.py create mode 100644 python/packages/orchestrations/tests/test_handoff.py create mode 100644 python/packages/orchestrations/tests/test_magentic.py create mode 100644 python/packages/orchestrations/tests/test_sequential.py create mode 100644 python/samples/getting_started/orchestrations/README.md rename python/samples/getting_started/{workflows/orchestration => orchestrations}/concurrent_agents.py (98%) rename python/samples/getting_started/{workflows/orchestration => orchestrations}/concurrent_custom_agent_executors.py (99%) rename python/samples/getting_started/{workflows/orchestration => orchestrations}/concurrent_custom_aggregator.py (98%) rename python/samples/getting_started/{workflows/orchestration => orchestrations}/concurrent_participant_factory.py (99%) rename python/samples/getting_started/{workflows/orchestration => orchestrations}/group_chat_agent_manager.py (98%) rename python/samples/getting_started/{workflows/orchestration => orchestrations}/group_chat_philosophical_debate.py (99%) rename python/samples/getting_started/{workflows/orchestration => orchestrations}/group_chat_simple_selector.py (98%) rename python/samples/getting_started/{workflows/orchestration => orchestrations}/handoff_autonomous.py (97%) rename python/samples/getting_started/{workflows/orchestration => orchestrations}/handoff_participant_factory.py (97%) rename python/samples/getting_started/{workflows/orchestration => orchestrations}/handoff_simple.py (99%) rename python/samples/getting_started/{workflows/orchestration => orchestrations}/handoff_with_code_interpreter_file.py (98%) rename python/samples/getting_started/{workflows/orchestration => orchestrations}/magentic.py (98%) rename python/samples/getting_started/{workflows/orchestration => orchestrations}/magentic_checkpoint.py (99%) rename python/samples/getting_started/{workflows/orchestration => orchestrations}/magentic_human_plan_review.py (98%) rename python/samples/getting_started/{workflows/orchestration => orchestrations}/sequential_agents.py (96%) rename python/samples/getting_started/{workflows/orchestration => orchestrations}/sequential_custom_executors.py (98%) rename python/samples/getting_started/{workflows/orchestration => orchestrations}/sequential_participant_factory.py (98%) diff --git a/python/packages/core/agent_framework/_workflows/__init__.py b/python/packages/core/agent_framework/_workflows/__init__.py index 70706ff827..1a8bb27589 100644 --- a/python/packages/core/agent_framework/_workflows/__init__.py +++ b/python/packages/core/agent_framework/_workflows/__init__.py @@ -20,7 +20,6 @@ WorkflowCheckpoint, ) from ._checkpoint_summary import WorkflowCheckpointSummary, get_checkpoint_summary -from ._concurrent import ConcurrentBuilder from ._const import ( DEFAULT_MAX_ITERATIONS, ) @@ -68,30 +67,6 @@ handler, ) from ._function_executor import FunctionExecutor, executor -from ._group_chat import ( - AgentBasedGroupChatOrchestrator, - GroupChatBuilder, - GroupChatState, -) -from ._handoff import HandoffAgentUserRequest, HandoffBuilder, HandoffSentEvent -from ._magentic import ( - ORCH_MSG_KIND_INSTRUCTION, - ORCH_MSG_KIND_NOTICE, - ORCH_MSG_KIND_TASK_LEDGER, - ORCH_MSG_KIND_USER_TASK, - MagenticBuilder, - MagenticContext, - MagenticManagerBase, - MagenticOrchestrator, - MagenticOrchestratorEvent, - MagenticOrchestratorEventType, - MagenticPlanReviewRequest, - MagenticPlanReviewResponse, - MagenticProgressLedger, - MagenticProgressLedgerItem, - MagenticResetSignal, - StandardMagenticManager, -) from ._orchestration_request_info import AgentRequestInfoResponse from ._orchestration_state import OrchestrationState from ._request_info_mixin import response_handler @@ -101,7 +76,6 @@ Message, RunnerContext, ) -from ._sequential import SequentialBuilder from ._shared_state import SharedState from ._validation import ( EdgeDuplicationError, @@ -123,11 +97,6 @@ __all__ = [ "DEFAULT_MAX_ITERATIONS", - "ORCH_MSG_KIND_INSTRUCTION", - "ORCH_MSG_KIND_NOTICE", - "ORCH_MSG_KIND_TASK_LEDGER", - "ORCH_MSG_KIND_USER_TASK", - "AgentBasedGroupChatOrchestrator", "AgentExecutor", "AgentExecutorRequest", "AgentExecutorResponse", @@ -137,7 +106,6 @@ "BaseGroupChatOrchestrator", "Case", "CheckpointStorage", - "ConcurrentBuilder", "Default", "Edge", "EdgeCondition", @@ -152,36 +120,18 @@ "FileCheckpointStorage", "FunctionExecutor", "GraphConnectivityError", - "GroupChatBuilder", "GroupChatRequestMessage", "GroupChatRequestSentEvent", "GroupChatResponseReceivedEvent", - "GroupChatState", - "HandoffAgentUserRequest", - "HandoffBuilder", - "HandoffSentEvent", "InMemoryCheckpointStorage", "InProcRunnerContext", - "MagenticBuilder", - "MagenticContext", - "MagenticManagerBase", - "MagenticOrchestrator", - "MagenticOrchestratorEvent", - "MagenticOrchestratorEventType", - "MagenticPlanReviewRequest", - "MagenticPlanReviewResponse", - "MagenticProgressLedger", - "MagenticProgressLedgerItem", - "MagenticResetSignal", "Message", "OrchestrationState", "RequestInfoEvent", "Runner", "RunnerContext", - "SequentialBuilder", "SharedState", "SingleEdgeGroup", - "StandardMagenticManager", "SubWorkflowRequestMessage", "SubWorkflowResponseMessage", "SuperStepCompletedEvent", diff --git a/python/packages/core/agent_framework/orchestrations/__init__.py b/python/packages/core/agent_framework/orchestrations/__init__.py new file mode 100644 index 0000000000..ac141eed72 --- /dev/null +++ b/python/packages/core/agent_framework/orchestrations/__init__.py @@ -0,0 +1,61 @@ +# Copyright (c) Microsoft. All rights reserved. + +import importlib +from typing import Any + +IMPORT_PATH = "agent_framework_orchestrations" +PACKAGE_NAME = "agent-framework-orchestrations" +_IMPORTS = [ + "__version__", + # Sequential + "SequentialBuilder", + # Concurrent + "ConcurrentBuilder", + # Handoff + "HandoffAgentExecutor", + "HandoffAgentUserRequest", + "HandoffBuilder", + "HandoffConfiguration", + "HandoffSentEvent", + # Group Chat + "AgentBasedGroupChatOrchestrator", + "AgentOrchestrationOutput", + "GroupChatBuilder", + "GroupChatOrchestrator", + "GroupChatSelectionFunction", + "GroupChatState", + # Magentic + "MAGENTIC_MANAGER_NAME", + "ORCH_MSG_KIND_INSTRUCTION", + "ORCH_MSG_KIND_NOTICE", + "ORCH_MSG_KIND_TASK_LEDGER", + "ORCH_MSG_KIND_USER_TASK", + "MagenticAgentExecutor", + "MagenticBuilder", + "MagenticContext", + "MagenticManagerBase", + "MagenticOrchestrator", + "MagenticOrchestratorEvent", + "MagenticOrchestratorEventType", + "MagenticPlanReviewRequest", + "MagenticPlanReviewResponse", + "MagenticProgressLedger", + "MagenticProgressLedgerItem", + "MagenticResetSignal", + "StandardMagenticManager", +] + + +def __getattr__(name: str) -> Any: + if name in _IMPORTS: + try: + return getattr(importlib.import_module(IMPORT_PATH), name) + except ModuleNotFoundError as exc: + raise ModuleNotFoundError( + f"The '{PACKAGE_NAME}' package is not installed, please do `pip install {PACKAGE_NAME}`" + ) from exc + raise AttributeError(f"Module {IMPORT_PATH} has no attribute {name}.") + + +def __dir__() -> list[str]: + return _IMPORTS diff --git a/python/packages/core/agent_framework/orchestrations/__init__.pyi b/python/packages/core/agent_framework/orchestrations/__init__.pyi new file mode 100644 index 0000000000..2ab4a3cc6e --- /dev/null +++ b/python/packages/core/agent_framework/orchestrations/__init__.pyi @@ -0,0 +1,141 @@ +# Copyright (c) Microsoft. All rights reserved. + +# Type stubs for lazy-loaded orchestrations module +# These re-export types from agent_framework_orchestrations + +from agent_framework_orchestrations import ( + # Magentic + MAGENTIC_MANAGER_NAME as MAGENTIC_MANAGER_NAME, +) +from agent_framework_orchestrations import ( + ORCH_MSG_KIND_INSTRUCTION as ORCH_MSG_KIND_INSTRUCTION, +) +from agent_framework_orchestrations import ( + ORCH_MSG_KIND_NOTICE as ORCH_MSG_KIND_NOTICE, +) +from agent_framework_orchestrations import ( + ORCH_MSG_KIND_TASK_LEDGER as ORCH_MSG_KIND_TASK_LEDGER, +) +from agent_framework_orchestrations import ( + ORCH_MSG_KIND_USER_TASK as ORCH_MSG_KIND_USER_TASK, +) +from agent_framework_orchestrations import ( + # Group Chat + AgentBasedGroupChatOrchestrator as AgentBasedGroupChatOrchestrator, +) +from agent_framework_orchestrations import ( + AgentOrchestrationOutput as AgentOrchestrationOutput, +) +from agent_framework_orchestrations import ( + # Concurrent + ConcurrentBuilder as ConcurrentBuilder, +) +from agent_framework_orchestrations import ( + GroupChatBuilder as GroupChatBuilder, +) +from agent_framework_orchestrations import ( + GroupChatOrchestrator as GroupChatOrchestrator, +) +from agent_framework_orchestrations import ( + GroupChatSelectionFunction as GroupChatSelectionFunction, +) +from agent_framework_orchestrations import ( + GroupChatState as GroupChatState, +) +from agent_framework_orchestrations import ( + # Handoff + HandoffAgentExecutor as HandoffAgentExecutor, +) +from agent_framework_orchestrations import ( + HandoffAgentUserRequest as HandoffAgentUserRequest, +) +from agent_framework_orchestrations import ( + HandoffBuilder as HandoffBuilder, +) +from agent_framework_orchestrations import ( + HandoffConfiguration as HandoffConfiguration, +) +from agent_framework_orchestrations import ( + HandoffSentEvent as HandoffSentEvent, +) +from agent_framework_orchestrations import ( + MagenticAgentExecutor as MagenticAgentExecutor, +) +from agent_framework_orchestrations import ( + MagenticBuilder as MagenticBuilder, +) +from agent_framework_orchestrations import ( + MagenticContext as MagenticContext, +) +from agent_framework_orchestrations import ( + MagenticManagerBase as MagenticManagerBase, +) +from agent_framework_orchestrations import ( + MagenticOrchestrator as MagenticOrchestrator, +) +from agent_framework_orchestrations import ( + MagenticOrchestratorEvent as MagenticOrchestratorEvent, +) +from agent_framework_orchestrations import ( + MagenticOrchestratorEventType as MagenticOrchestratorEventType, +) +from agent_framework_orchestrations import ( + MagenticPlanReviewRequest as MagenticPlanReviewRequest, +) +from agent_framework_orchestrations import ( + MagenticPlanReviewResponse as MagenticPlanReviewResponse, +) +from agent_framework_orchestrations import ( + MagenticProgressLedger as MagenticProgressLedger, +) +from agent_framework_orchestrations import ( + MagenticProgressLedgerItem as MagenticProgressLedgerItem, +) +from agent_framework_orchestrations import ( + MagenticResetSignal as MagenticResetSignal, +) +from agent_framework_orchestrations import ( + # Sequential + SequentialBuilder as SequentialBuilder, +) +from agent_framework_orchestrations import ( + StandardMagenticManager as StandardMagenticManager, +) +from agent_framework_orchestrations import ( + __version__ as __version__, +) + +__all__ = [ + "MAGENTIC_MANAGER_NAME", + "ORCH_MSG_KIND_INSTRUCTION", + "ORCH_MSG_KIND_NOTICE", + "ORCH_MSG_KIND_TASK_LEDGER", + "ORCH_MSG_KIND_USER_TASK", + "AgentBasedGroupChatOrchestrator", + "AgentOrchestrationOutput", + "ConcurrentBuilder", + "GroupChatBuilder", + "GroupChatOrchestrator", + "GroupChatSelectionFunction", + "GroupChatState", + "HandoffAgentExecutor", + "HandoffAgentUserRequest", + "HandoffBuilder", + "HandoffConfiguration", + "HandoffSentEvent", + "MagenticAgentExecutor", + "MagenticBuilder", + "MagenticContext", + "MagenticManagerBase", + "MagenticOrchestrator", + "MagenticOrchestratorEvent", + "MagenticOrchestratorEventType", + "MagenticPlanReviewRequest", + "MagenticPlanReviewResponse", + "MagenticProgressLedger", + "MagenticProgressLedgerItem", + "MagenticResetSignal", + "SequentialBuilder", + "StandardMagenticManager", + "__version__", +] diff --git a/python/packages/core/pyproject.toml b/python/packages/core/pyproject.toml index b68e8038dd..726c1cdcb4 100644 --- a/python/packages/core/pyproject.toml +++ b/python/packages/core/pyproject.toml @@ -55,6 +55,7 @@ all = [ "agent-framework-lab", "agent-framework-mem0", "agent-framework-ollama", + "agent-framework-orchestrations", "agent-framework-purview", "agent-framework-redis", ] diff --git a/python/packages/orchestrations/LICENSE b/python/packages/orchestrations/LICENSE new file mode 100644 index 0000000000..9e841e7a26 --- /dev/null +++ b/python/packages/orchestrations/LICENSE @@ -0,0 +1,21 @@ + MIT License + + Copyright (c) Microsoft Corporation. + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to deal + in the Software without restriction, including without limitation the rights + to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in all + copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + SOFTWARE diff --git a/python/packages/orchestrations/README.md b/python/packages/orchestrations/README.md new file mode 100644 index 0000000000..68ddebe267 --- /dev/null +++ b/python/packages/orchestrations/README.md @@ -0,0 +1,88 @@ +# Agent Framework Orchestrations + +Orchestration patterns for Microsoft Agent Framework. This package provides high-level builders for common multi-agent workflow patterns. + +## Installation + +```bash +pip install agent-framework-orchestrations +``` + +## Orchestration Patterns + +### SequentialBuilder + +Chain agents/executors in sequence, passing conversation context along: + +```python +from agent_framework_orchestrations import SequentialBuilder + +workflow = SequentialBuilder().participants([agent1, agent2, agent3]).build() +``` + +### ConcurrentBuilder + +Fan-out to multiple agents in parallel, then aggregate results: + +```python +from agent_framework_orchestrations import ConcurrentBuilder + +workflow = ConcurrentBuilder().participants([agent1, agent2, agent3]).build() +``` + +### HandoffBuilder + +Decentralized agent routing where agents decide handoff targets: + +```python +from agent_framework_orchestrations import HandoffBuilder + +workflow = ( + HandoffBuilder() + .participants([triage, billing, support]) + .with_start_agent(triage) + .build() +) +``` + +### GroupChatBuilder + +Orchestrator-directed multi-agent conversations: + +```python +from agent_framework_orchestrations import GroupChatBuilder + +workflow = ( + GroupChatBuilder() + .with_orchestrator(selection_func=my_selector) + .participants([agent1, agent2]) + .build() +) +``` + +### MagenticBuilder + +Sophisticated multi-agent orchestration using the Magentic One pattern: + +```python +from agent_framework_orchestrations import MagenticBuilder + +workflow = ( + MagenticBuilder() + .participants([researcher, writer, reviewer]) + .with_manager(agent=manager_agent) + .build() +) +``` + +## Usage with agent_framework + +You can also import orchestrations through the main agent_framework package: + +```python +from agent_framework.orchestrations import SequentialBuilder, ConcurrentBuilder +``` + +## Documentation + +For more information, see the [Agent Framework documentation](https://aka.ms/agent-framework). diff --git a/python/packages/orchestrations/agent_framework_orchestrations/__init__.py b/python/packages/orchestrations/agent_framework_orchestrations/__init__.py new file mode 100644 index 0000000000..75c8c8de61 --- /dev/null +++ b/python/packages/orchestrations/agent_framework_orchestrations/__init__.py @@ -0,0 +1,91 @@ +# Copyright (c) Microsoft. All rights reserved. + +"""Orchestration patterns for Microsoft Agent Framework. + +This package provides high-level builders for common multi-agent workflow patterns: +- SequentialBuilder: Chain agents in sequence +- ConcurrentBuilder: Fan-out to multiple agents in parallel +- HandoffBuilder: Decentralized agent routing +- GroupChatBuilder: Orchestrator-directed multi-agent conversations +- MagenticBuilder: Magentic One pattern for sophisticated multi-agent orchestration +""" + +import importlib.metadata + +try: + __version__ = importlib.metadata.version(__name__) +except importlib.metadata.PackageNotFoundError: + __version__ = "0.0.0" # Fallback for development mode + +from ._concurrent import ConcurrentBuilder +from ._group_chat import ( + AgentBasedGroupChatOrchestrator, + AgentOrchestrationOutput, + GroupChatBuilder, + GroupChatOrchestrator, + GroupChatSelectionFunction, + GroupChatState, +) +from ._handoff import ( + HandoffAgentExecutor, + HandoffAgentUserRequest, + HandoffBuilder, + HandoffConfiguration, + HandoffSentEvent, +) +from ._magentic import ( + MAGENTIC_MANAGER_NAME, + ORCH_MSG_KIND_INSTRUCTION, + ORCH_MSG_KIND_NOTICE, + ORCH_MSG_KIND_TASK_LEDGER, + ORCH_MSG_KIND_USER_TASK, + MagenticAgentExecutor, + MagenticBuilder, + MagenticContext, + MagenticManagerBase, + MagenticOrchestrator, + MagenticOrchestratorEvent, + MagenticOrchestratorEventType, + MagenticPlanReviewRequest, + MagenticPlanReviewResponse, + MagenticProgressLedger, + MagenticProgressLedgerItem, + MagenticResetSignal, + StandardMagenticManager, +) +from ._sequential import SequentialBuilder + +__all__ = [ + "MAGENTIC_MANAGER_NAME", + "ORCH_MSG_KIND_INSTRUCTION", + "ORCH_MSG_KIND_NOTICE", + "ORCH_MSG_KIND_TASK_LEDGER", + "ORCH_MSG_KIND_USER_TASK", + "AgentBasedGroupChatOrchestrator", + "AgentOrchestrationOutput", + "ConcurrentBuilder", + "GroupChatBuilder", + "GroupChatOrchestrator", + "GroupChatSelectionFunction", + "GroupChatState", + "HandoffAgentExecutor", + "HandoffAgentUserRequest", + "HandoffBuilder", + "HandoffConfiguration", + "HandoffSentEvent", + "MagenticAgentExecutor", + "MagenticBuilder", + "MagenticContext", + "MagenticManagerBase", + "MagenticOrchestrator", + "MagenticOrchestratorEvent", + "MagenticOrchestratorEventType", + "MagenticPlanReviewRequest", + "MagenticPlanReviewResponse", + "MagenticProgressLedger", + "MagenticProgressLedgerItem", + "MagenticResetSignal", + "SequentialBuilder", + "StandardMagenticManager", + "__version__", +] diff --git a/python/packages/orchestrations/agent_framework_orchestrations/_concurrent.py b/python/packages/orchestrations/agent_framework_orchestrations/_concurrent.py new file mode 100644 index 0000000000..45e74f2bbf --- /dev/null +++ b/python/packages/orchestrations/agent_framework_orchestrations/_concurrent.py @@ -0,0 +1,572 @@ +# Copyright (c) Microsoft. All rights reserved. + +import asyncio +import inspect +import logging +from collections.abc import Callable, Sequence +from typing import Any + +from agent_framework import AgentProtocol, ChatMessage +from agent_framework._workflows._agent_executor import AgentExecutor, AgentExecutorRequest, AgentExecutorResponse +from agent_framework._workflows._agent_utils import resolve_agent_id +from agent_framework._workflows._checkpoint import CheckpointStorage +from agent_framework._workflows._executor import Executor, handler +from agent_framework._workflows._message_utils import normalize_messages_input +from agent_framework._workflows._orchestration_request_info import AgentApprovalExecutor +from agent_framework._workflows._workflow import Workflow +from agent_framework._workflows._workflow_builder import WorkflowBuilder +from agent_framework._workflows._workflow_context import WorkflowContext +from typing_extensions import Never + +logger = logging.getLogger(__name__) + +"""Concurrent builder for agent-only fan-out/fan-in workflows. + +This module provides a high-level, agent-focused API to quickly assemble a +parallel workflow with: +- a default dispatcher that broadcasts the input to all agent participants +- a default aggregator that combines all agent conversations and completes the workflow + +Notes: +- Participants can be provided as AgentProtocol or Executor instances via `.participants()`, + or as factories returning AgentProtocol or Executor via `.register_participants()`. +- A custom aggregator can be provided as: + - an Executor instance (it should handle list[AgentExecutorResponse], + yield output), or + - a callback function with signature: + def cb(results: list[AgentExecutorResponse]) -> Any | None + def cb(results: list[AgentExecutorResponse], ctx: WorkflowContext) -> Any | None + The callback is wrapped in _CallbackAggregator. + If the callback returns a non-None value, _CallbackAggregator yields that as output. + If it returns None, the callback may have already yielded an output via ctx, so no further action is taken. +""" + + +class _DispatchToAllParticipants(Executor): + """Broadcasts input to all downstream participants (via fan-out edges).""" + + @handler + async def from_request(self, request: AgentExecutorRequest, ctx: WorkflowContext[AgentExecutorRequest]) -> None: + # No explicit target: edge routing delivers to all connected participants. + await ctx.send_message(request) + + @handler + async def from_str(self, prompt: str, ctx: WorkflowContext[AgentExecutorRequest]) -> None: + request = AgentExecutorRequest(messages=normalize_messages_input(prompt), should_respond=True) + await ctx.send_message(request) + + @handler + async def from_message(self, message: ChatMessage, ctx: WorkflowContext[AgentExecutorRequest]) -> None: + request = AgentExecutorRequest(messages=normalize_messages_input(message), should_respond=True) + await ctx.send_message(request) + + @handler + async def from_messages( + self, + messages: list[str | ChatMessage], + ctx: WorkflowContext[AgentExecutorRequest], + ) -> None: + request = AgentExecutorRequest(messages=normalize_messages_input(messages), should_respond=True) + await ctx.send_message(request) + + +class _AggregateAgentConversations(Executor): + """Aggregates agent responses and completes with combined ChatMessages. + + Emits a list[ChatMessage] shaped as: + [ single_user_prompt?, agent1_final_assistant, agent2_final_assistant, ... ] + + - Extracts a single user prompt (first user message seen across results). + - For each result, selects the final assistant message (prefers agent_response.messages). + - Avoids duplicating the same user message per agent. + """ + + @handler + async def aggregate( + self, results: list[AgentExecutorResponse], ctx: WorkflowContext[Never, list[ChatMessage]] + ) -> None: + if not results: + logger.error("Concurrent aggregator received empty results list") + raise ValueError("Aggregation failed: no results provided") + + def _is_role(msg: Any, role: str) -> bool: + r = getattr(msg, "role", None) + if r is None: + return False + # Normalize both r and role to lowercase strings for comparison + r_str = str(r).lower() if isinstance(r, str) or hasattr(r, "__str__") else r + role_str = str(role).lower() + return r_str == role_str + + prompt_message: ChatMessage | None = None + assistant_replies: list[ChatMessage] = [] + + for r in results: + resp_messages = list(getattr(r.agent_response, "messages", []) or []) + conv = r.full_conversation if r.full_conversation is not None else resp_messages + + logger.debug( + f"Aggregating executor {getattr(r, 'executor_id', '')}: " + f"{len(resp_messages)} response msgs, {len(conv)} conversation msgs" + ) + + # Capture a single user prompt (first encountered across any conversation) + if prompt_message is None: + found_user = next((m for m in conv if _is_role(m, "user")), None) + if found_user is not None: + prompt_message = found_user + + # Pick the final assistant message from the response; fallback to conversation search + final_assistant = next((m for m in reversed(resp_messages) if _is_role(m, "assistant")), None) + if final_assistant is None: + final_assistant = next((m for m in reversed(conv) if _is_role(m, "assistant")), None) + + if final_assistant is not None: + assistant_replies.append(final_assistant) + else: + logger.warning( + f"No assistant reply found for executor {getattr(r, 'executor_id', '')}; skipping" + ) + + if not assistant_replies: + logger.error(f"Aggregation failed: no assistant replies found across {len(results)} results") + raise RuntimeError("Aggregation failed: no assistant replies found") + + output: list[ChatMessage] = [] + if prompt_message is not None: + output.append(prompt_message) + else: + logger.warning("No user prompt found in any conversation; emitting assistants only") + output.extend(assistant_replies) + + await ctx.yield_output(output) + + +class _CallbackAggregator(Executor): + """Wraps a Python callback as an aggregator. + + Accepts either an async or sync callback with one of the signatures: + - (results: list[AgentExecutorResponse]) -> Any | None + - (results: list[AgentExecutorResponse], ctx: WorkflowContext[Any]) -> Any | None + + Notes: + - Async callbacks are awaited directly. + - Sync callbacks are executed via asyncio.to_thread to avoid blocking the event loop. + - If the callback returns a non-None value, it is yielded as an output. + """ + + def __init__(self, callback: Callable[..., Any], id: str | None = None) -> None: + derived_id = getattr(callback, "__name__", "") or "" + if not derived_id or derived_id == "": + derived_id = f"{type(self).__name__}_unnamed" + super().__init__(id or derived_id) + self._callback = callback + self._param_count = len(inspect.signature(callback).parameters) + + @handler + async def aggregate(self, results: list[AgentExecutorResponse], ctx: WorkflowContext[Never, Any]) -> None: + # Call according to provided signature, always non-blocking for sync callbacks + if self._param_count >= 2: + if inspect.iscoroutinefunction(self._callback): + ret = await self._callback(results, ctx) # type: ignore[misc] + else: + ret = await asyncio.to_thread(self._callback, results, ctx) + else: + if inspect.iscoroutinefunction(self._callback): + ret = await self._callback(results) # type: ignore[misc] + else: + ret = await asyncio.to_thread(self._callback, results) + + # If the callback returned a value, finalize the workflow with it + if ret is not None: + await ctx.yield_output(ret) + + +class ConcurrentBuilder: + r"""High-level builder for concurrent agent workflows. + + - `participants([...])` accepts a list of AgentProtocol (recommended) or Executor. + - `register_participants([...])` accepts a list of factories for AgentProtocol (recommended) + or Executor factories + - `build()` wires: dispatcher -> fan-out -> participants -> fan-in -> aggregator. + - `with_aggregator(...)` overrides the default aggregator with an Executor or callback. + - `register_aggregator(...)` accepts a factory for an Executor as custom aggregator. + + Usage: + + .. code-block:: python + + from agent_framework_orchestrations import ConcurrentBuilder + + # Minimal: use default aggregator (returns list[ChatMessage]) + workflow = ConcurrentBuilder().participants([agent1, agent2, agent3]).build() + + # With agent factories + workflow = ConcurrentBuilder().register_participants([create_agent1, create_agent2, create_agent3]).build() + + + # Custom aggregator via callback (sync or async). The callback receives + # list[AgentExecutorResponse] and its return value becomes the workflow's output. + def summarize(results: list[AgentExecutorResponse]) -> str: + return " | ".join(r.agent_response.messages[-1].text for r in results) + + + workflow = ConcurrentBuilder().participants([agent1, agent2, agent3]).with_aggregator(summarize).build() + + + # Custom aggregator via a factory + class MyAggregator(Executor): + @handler + async def aggregate(self, results: list[AgentExecutorResponse], ctx: WorkflowContext[Never, str]) -> None: + await ctx.yield_output(" | ".join(r.agent_response.messages[-1].text for r in results)) + + + workflow = ( + ConcurrentBuilder() + .register_participants([create_agent1, create_agent2, create_agent3]) + .register_aggregator(lambda: MyAggregator(id="my_aggregator")) + .build() + ) + + + # Enable checkpoint persistence so runs can resume + workflow = ConcurrentBuilder().participants([agent1, agent2, agent3]).with_checkpointing(storage).build() + + # Enable request info before aggregation + workflow = ConcurrentBuilder().participants([agent1, agent2]).with_request_info().build() + """ + + def __init__(self) -> None: + self._participants: list[AgentProtocol | Executor] = [] + self._participant_factories: list[Callable[[], AgentProtocol | Executor]] = [] + self._aggregator: Executor | None = None + self._aggregator_factory: Callable[[], Executor] | None = None + self._checkpoint_storage: CheckpointStorage | None = None + self._request_info_enabled: bool = False + self._request_info_filter: set[str] | None = None + + def register_participants( + self, + participant_factories: Sequence[Callable[[], AgentProtocol | Executor]], + ) -> "ConcurrentBuilder": + r"""Define the parallel participants for this concurrent workflow. + + Accepts factories (callables) that return AgentProtocol instances (e.g., created + by a chat client) or Executor instances. Each participant created by a factory + is wired as a parallel branch using fan-out edges from an internal dispatcher. + + Args: + participant_factories: Sequence of callables returning AgentProtocol or Executor instances + + Raises: + ValueError: if `participant_factories` is empty or `.participants()` + or `.register_participants()` were already called + + Example: + + .. code-block:: python + + def create_researcher() -> ChatAgent: + return ... + + + def create_marketer() -> ChatAgent: + return ... + + + def create_legal() -> ChatAgent: + return ... + + + class MyCustomExecutor(Executor): ... + + + wf = ConcurrentBuilder().register_participants([create_researcher, create_marketer, create_legal]).build() + + # Mixing agent(s) and executor(s) is supported + wf2 = ConcurrentBuilder().register_participants([create_researcher, MyCustomExecutor]).build() + """ + if self._participants: + raise ValueError("Cannot mix .participants() and .register_participants() in the same builder instance.") + + if self._participant_factories: + raise ValueError("register_participants() has already been called on this builder instance.") + + if not participant_factories: + raise ValueError("participant_factories cannot be empty") + + self._participant_factories = list(participant_factories) + return self + + def participants(self, participants: Sequence[AgentProtocol | Executor]) -> "ConcurrentBuilder": + r"""Define the parallel participants for this concurrent workflow. + + Accepts AgentProtocol instances (e.g., created by a chat client) or Executor + instances. Each participant is wired as a parallel branch using fan-out edges + from an internal dispatcher. + + Args: + participants: Sequence of AgentProtocol or Executor instances + + Raises: + ValueError: if `participants` is empty, contains duplicates, or `.register_participants()` + or `.participants()` were already called + TypeError: if any entry is not AgentProtocol or Executor + + Example: + + .. code-block:: python + + wf = ConcurrentBuilder().participants([researcher_agent, marketer_agent, legal_agent]).build() + + # Mixing agent(s) and executor(s) is supported + wf2 = ConcurrentBuilder().participants([researcher_agent, my_custom_executor]).build() + """ + if self._participant_factories: + raise ValueError("Cannot mix .participants() and .register_participants() in the same builder instance.") + + if self._participants: + raise ValueError("participants() has already been called on this builder instance.") + + if not participants: + raise ValueError("participants cannot be empty") + + # Defensive duplicate detection + seen_agent_ids: set[int] = set() + seen_executor_ids: set[str] = set() + for p in participants: + if isinstance(p, Executor): + if p.id in seen_executor_ids: + raise ValueError(f"Duplicate executor participant detected: id '{p.id}'") + seen_executor_ids.add(p.id) + elif isinstance(p, AgentProtocol): + pid = id(p) + if pid in seen_agent_ids: + raise ValueError("Duplicate agent participant detected (same agent instance provided twice)") + seen_agent_ids.add(pid) + else: + raise TypeError(f"participants must be AgentProtocol or Executor instances; got {type(p).__name__}") + + self._participants = list(participants) + return self + + def register_aggregator(self, aggregator_factory: Callable[[], Executor]) -> "ConcurrentBuilder": + r"""Define a custom aggregator for this concurrent workflow. + + Accepts a factory (callable) that returns an Executor instance. The executor + should handle `list[AgentExecutorResponse]` and yield output using `ctx.yield_output(...)`. + + Args: + aggregator_factory: Callable that returns an Executor instance + + Example: + .. code-block:: python + + class MyCustomExecutor(Executor): ... + + + wf = ( + ConcurrentBuilder() + .register_participants([create_researcher, create_marketer, create_legal]) + .register_aggregator(lambda: MyCustomExecutor(id="my_aggregator")) + .build() + ) + """ + if self._aggregator is not None: + raise ValueError( + "Cannot mix .with_aggregator(...) and .register_aggregator(...) in the same builder instance." + ) + + if self._aggregator_factory is not None: + raise ValueError("register_aggregator() has already been called on this builder instance.") + + self._aggregator_factory = aggregator_factory + return self + + def with_aggregator( + self, + aggregator: Executor + | Callable[[list[AgentExecutorResponse]], Any] + | Callable[[list[AgentExecutorResponse], WorkflowContext[Never, Any]], Any], + ) -> "ConcurrentBuilder": + r"""Override the default aggregator with an executor or a callback. + + - Executor: must handle `list[AgentExecutorResponse]` and yield output using `ctx.yield_output(...)` + - Callback: sync or async callable with one of the signatures: + `(results: list[AgentExecutorResponse]) -> Any | None` or + `(results: list[AgentExecutorResponse], ctx: WorkflowContext) -> Any | None`. + If the callback returns a non-None value, it becomes the workflow's output. + + Args: + aggregator: Executor instance, or callback function + + Example: + + .. code-block:: python + # Executor-based aggregator + class CustomAggregator(Executor): + @handler + async def aggregate(self, results: list[AgentExecutorResponse], ctx: WorkflowContext) -> None: + await ctx.yield_output(" | ".join(r.agent_response.messages[-1].text for r in results)) + + + wf = ConcurrentBuilder().participants([a1, a2, a3]).with_aggregator(CustomAggregator()).build() + + + # Callback-based aggregator (string result) + async def summarize(results: list[AgentExecutorResponse]) -> str: + return " | ".join(r.agent_response.messages[-1].text for r in results) + + + wf = ConcurrentBuilder().participants([a1, a2, a3]).with_aggregator(summarize).build() + + + # Callback-based aggregator (yield result) + async def summarize(results: list[AgentExecutorResponse], ctx: WorkflowContext[Never, str]) -> None: + await ctx.yield_output(" | ".join(r.agent_response.messages[-1].text for r in results)) + + + wf = ConcurrentBuilder().participants([a1, a2, a3]).with_aggregator(summarize).build() + """ + if self._aggregator_factory is not None: + raise ValueError( + "Cannot mix .with_aggregator(...) and .register_aggregator(...) in the same builder instance." + ) + + if self._aggregator is not None: + raise ValueError("with_aggregator() has already been called on this builder instance.") + + if isinstance(aggregator, Executor): + self._aggregator = aggregator + elif callable(aggregator): + self._aggregator = _CallbackAggregator(aggregator) + else: + raise TypeError("aggregator must be an Executor or a callable") + + return self + + def with_checkpointing(self, checkpoint_storage: CheckpointStorage) -> "ConcurrentBuilder": + """Enable checkpoint persistence using the provided storage backend. + + Args: + checkpoint_storage: CheckpointStorage instance for persisting workflow state + """ + self._checkpoint_storage = checkpoint_storage + return self + + def with_request_info( + self, + *, + agents: Sequence[str | AgentProtocol] | None = None, + ) -> "ConcurrentBuilder": + """Enable request info after agent participant responses. + + This enables human-in-the-loop (HIL) scenarios for the sequential orchestration. + When enabled, the workflow pauses after each agent participant runs, emitting + a RequestInfoEvent that allows the caller to review the conversation and optionally + inject guidance for the agent participant to iterate. The caller provides input via + the standard response_handler/request_info pattern. + + Simulated flow with HIL: + Input -> [Agent Participant <-> Request Info] -> [Agent Participant <-> Request Info] -> ... + + Note: This is only available for agent participants. Executor participants can incorporate + request info handling in their own implementation if desired. + + Args: + agents: Optional list of agents names or agent factories to enable request info for. + If None, enables HIL for all agent participants. + + Returns: + Self for fluent chaining + """ + from agent_framework._workflows._orchestration_request_info import resolve_request_info_filter + + self._request_info_enabled = True + self._request_info_filter = resolve_request_info_filter(list(agents) if agents else None) + + return self + + def _resolve_participants(self) -> list[Executor]: + """Resolve participant instances into Executor objects.""" + if not self._participants and not self._participant_factories: + raise ValueError("No participants provided. Call .participants() or .register_participants() first.") + # We don't need to check if both are set since that is handled in the respective methods + + participants: list[Executor | AgentProtocol] = [] + if self._participant_factories: + # Resolve the participant factories now. This doesn't break the factory pattern + # since the Sequential builder still creates new instances per workflow build. + for factory in self._participant_factories: + p = factory() + participants.append(p) + else: + participants = self._participants + + executors: list[Executor] = [] + for p in participants: + if isinstance(p, Executor): + executors.append(p) + elif isinstance(p, AgentProtocol): + if self._request_info_enabled and ( + not self._request_info_filter or resolve_agent_id(p) in self._request_info_filter + ): + # Handle request info enabled agents + executors.append(AgentApprovalExecutor(p)) + else: + executors.append(AgentExecutor(p)) + else: + raise TypeError(f"Participants must be AgentProtocol or Executor instances. Got {type(p).__name__}.") + + return executors + + def build(self) -> Workflow: + r"""Build and validate the concurrent workflow. + + Wiring pattern: + - Dispatcher (internal) fans out the input to all `participants` + - Fan-in collects `AgentExecutorResponse` objects from all participants + - If request info is enabled, the orchestration emits a request info event with outputs from all participants + before sending the outputs to the aggregator + - Aggregator yields output and the workflow becomes idle. The output is either: + - list[ChatMessage] (default aggregator: one user + one assistant per agent) + - custom payload from the provided aggregator + + Returns: + Workflow: a ready-to-run workflow instance + + Raises: + ValueError: if no participants were defined + + Example: + + .. code-block:: python + + workflow = ConcurrentBuilder().participants([agent1, agent2]).build() + """ + # Internal nodes + dispatcher = _DispatchToAllParticipants(id="dispatcher") + aggregator = ( + self._aggregator + if self._aggregator is not None + else ( + self._aggregator_factory() + if self._aggregator_factory is not None + else _AggregateAgentConversations(id="aggregator") + ) + ) + + # Resolve participants and participant factories to executors + participants: list[Executor] = self._resolve_participants() + + builder = WorkflowBuilder() + builder.set_start_executor(dispatcher) + # Fan-out for parallel execution + builder.add_fan_out_edges(dispatcher, participants) + # Direct fan-in to aggregator + builder.add_fan_in_edges(participants, aggregator) + + if self._checkpoint_storage is not None: + builder = builder.with_checkpointing(self._checkpoint_storage) + + return builder.build() diff --git a/python/packages/orchestrations/agent_framework_orchestrations/_group_chat.py b/python/packages/orchestrations/agent_framework_orchestrations/_group_chat.py new file mode 100644 index 0000000000..cbd752ce5c --- /dev/null +++ b/python/packages/orchestrations/agent_framework_orchestrations/_group_chat.py @@ -0,0 +1,996 @@ +# Copyright (c) Microsoft. All rights reserved. + +"""Group chat orchestration primitives. + +This module introduces a reusable orchestration surface for orchestrator-directed +multi-agent conversations. The key components are: + +- GroupChatRequestMessage / GroupChatResponseMessage: canonical envelopes used + between the orchestrator and participants. +- GroupChatSelectionFunction: asynchronous callable for pluggable speaker selection logic. +- GroupChatOrchestrator: runtime state machine that delegates to a + selection function to select the next participant or complete the task. +- GroupChatBuilder: high-level builder that wires orchestrators and participants + into a workflow graph. It mirrors the ergonomics of SequentialBuilder and + ConcurrentBuilder while allowing Magentic to reuse the same infrastructure. + +The default wiring uses AgentExecutor under the hood for agent participants so +existing observability and streaming semantics continue to apply. +""" + +import inspect +import logging +import sys +from collections import OrderedDict +from collections.abc import Awaitable, Callable, Sequence +from dataclasses import dataclass +from typing import Any, ClassVar, cast, overload + +from agent_framework import AgentProtocol, ChatAgent +from agent_framework._threads import AgentThread +from agent_framework._types import ChatMessage +from agent_framework._workflows._agent_executor import AgentExecutor, AgentExecutorRequest, AgentExecutorResponse +from agent_framework._workflows._agent_utils import resolve_agent_id +from agent_framework._workflows._base_group_chat_orchestrator import ( + BaseGroupChatOrchestrator, + GroupChatParticipantMessage, + GroupChatRequestMessage, + GroupChatResponseMessage, + GroupChatWorkflowContext_T_Out, + ParticipantRegistry, + TerminationCondition, +) +from agent_framework._workflows._checkpoint import CheckpointStorage +from agent_framework._workflows._conversation_state import decode_chat_messages, encode_chat_messages +from agent_framework._workflows._executor import Executor +from agent_framework._workflows._orchestration_request_info import AgentApprovalExecutor +from agent_framework._workflows._workflow import Workflow +from agent_framework._workflows._workflow_builder import WorkflowBuilder +from agent_framework._workflows._workflow_context import WorkflowContext +from pydantic import BaseModel, Field +from typing_extensions import Never + +if sys.version_info >= (3, 12): + from typing import override # type: ignore # pragma: no cover +else: + from typing_extensions import override # type: ignore # pragma: no cover + +logger = logging.getLogger(__name__) + + +@dataclass(frozen=True) +class GroupChatState: + """Immutable state of the group chat for the selection function to determine the next speaker. + + Attributes: + current_round: The current round index of the group chat, starting from 0. + participants: A mapping of participant names to their descriptions in the group chat. + conversation: The full conversation history up to this point as a list of ChatMessage. + """ + + # Round index, starting from 0 + current_round: int + # participant name to description mapping as a ordered dict + participants: OrderedDict[str, str] + # Full conversation history up to this point + conversation: list[ChatMessage] + + +# region Default orchestrator + + +# Type alias for the selection function used by the orchestrator to choose the next speaker. +GroupChatSelectionFunction = Callable[[GroupChatState], Awaitable[str] | str] + + +class GroupChatOrchestrator(BaseGroupChatOrchestrator): + """Orchestrator that manages a group chat between multiple participants. + + This group chat orchestrator operates under the direction of a selection function + provided at initialization. The selection function receives the current state of + the group chat and returns the name of the next participant to speak. + + This orchestrator drives the conversation loop as follows: + 1. Receives initial messages, saves to history, and broadcasts to all participants + 2. Invokes the selection function to determine the next speaker based on the most recent state + 3. Sends a request to the selected participant to generate a response + 4. Receives the participant's response, saves to history, and broadcasts to all participants + except the one that just spoke + 5. Repeats steps 2-4 until the termination conditions are met + + This is the most basic orchestrator, great for getting started with multi-agent + conversations. More advanced orchestrators can be built by extending BaseGroupChatOrchestrator + and implementing custom logic in the message and response handlers. + """ + + def __init__( + self, + id: str, + participant_registry: ParticipantRegistry, + selection_func: GroupChatSelectionFunction, + *, + name: str | None = None, + max_rounds: int | None = None, + termination_condition: TerminationCondition | None = None, + ) -> None: + """Initialize the GroupChatOrchestrator. + + Args: + id: Unique executor ID for the orchestrator. The ID must be unique within the workflow. + participant_registry: Registry of participants in the group chat that track executor types + (agents vs. executors) and provide resolution utilities. + selection_func: Function to select the next speaker based on conversation state + name: Optional display name for the orchestrator in the messages, defaults to executor ID. + A more descriptive name that is not an ID could help models better understand the role + of the orchestrator in multi-agent conversations. If the ID is not human-friendly, + providing a name can improve context for the agents. + max_rounds: Optional limit on selection rounds to prevent infinite loops. + termination_condition: Optional callable that halts the conversation when it returns True + + Note: If neither `max_rounds` nor `termination_condition` is provided, the conversation + will continue indefinitely. It is recommended to always set one of these to ensure proper termination. + + Example: + .. code-block:: python + + from agent_framework_orchestrations import GroupChatOrchestrator + + + async def round_robin_selector(state: GroupChatState) -> str: + # Simple round-robin selection among participants + return state.participants[state.current_round % len(state.participants)] + + + orchestrator = GroupChatOrchestrator( + id="group_chat_orchestrator_1", + selection_func=round_robin_selector, + participants=["researcher", "writer"], + name="Coordinator", + max_rounds=10, + ) + """ + super().__init__( + id, + participant_registry, + name=name, + max_rounds=max_rounds, + termination_condition=termination_condition, + ) + self._selection_func = selection_func + + @override + async def _handle_messages( + self, + messages: list[ChatMessage], + ctx: WorkflowContext[GroupChatWorkflowContext_T_Out, list[ChatMessage]], + ) -> None: + """Initialize orchestrator state and start the conversation loop.""" + self._append_messages(messages) + # Termination condition will also be applied to the input messages + if await self._check_terminate_and_yield(cast(WorkflowContext[Never, list[ChatMessage]], ctx)): + return + + next_speaker = await self._get_next_speaker() + + # Broadcast messages to all participants for context + await self._broadcast_messages_to_participants( + messages, + cast(WorkflowContext[AgentExecutorRequest | GroupChatParticipantMessage], ctx), + ) + # Send request to selected participant + await self._send_request_to_participant( + next_speaker, + cast(WorkflowContext[AgentExecutorRequest | GroupChatRequestMessage], ctx), + ) + self._increment_round() + + @override + async def _handle_response( + self, + response: AgentExecutorResponse | GroupChatResponseMessage, + ctx: WorkflowContext[GroupChatWorkflowContext_T_Out, list[ChatMessage]], + ) -> None: + """Handle a participant response.""" + messages = self._process_participant_response(response) + self._append_messages(messages) + + if await self._check_terminate_and_yield(cast(WorkflowContext[Never, list[ChatMessage]], ctx)): + return + if await self._check_round_limit_and_yield(cast(WorkflowContext[Never, list[ChatMessage]], ctx)): + return + + next_speaker = await self._get_next_speaker() + + # Broadcast participant messages to all participants for context, except + # the participant that just responded + participant = ctx.get_source_executor_id() + await self._broadcast_messages_to_participants( + messages, + cast(WorkflowContext[AgentExecutorRequest | GroupChatParticipantMessage], ctx), + participants=[p for p in self._participant_registry.participants if p != participant], + ) + # Send request to selected participant + await self._send_request_to_participant( + next_speaker, + cast(WorkflowContext[AgentExecutorRequest | GroupChatRequestMessage], ctx), + ) + self._increment_round() + + async def _get_next_speaker(self) -> str: + """Determine the next speaker using the selection function.""" + group_chat_state = GroupChatState( + current_round=self._round_index, + participants=self._participant_registry.participants, + conversation=self._get_conversation(), + ) + + next_speaker = self._selection_func(group_chat_state) + if inspect.isawaitable(next_speaker): + next_speaker = await next_speaker + + if next_speaker not in self._participant_registry.participants: + raise RuntimeError(f"Selection function returned unknown participant '{next_speaker}'.") + + return next_speaker + + +# endregion + +# region Agent-based orchestrator + + +class AgentOrchestrationOutput(BaseModel): + """Structured output type for the agent in AgentBasedGroupChatOrchestrator.""" + + model_config = { + "extra": "forbid", + # OpenAI strict mode requires all properties to be in required array + "json_schema_extra": {"required": ["terminate", "reason", "next_speaker", "final_message"]}, + } + + # Whether to terminate the conversation + terminate: bool + # An explanation for the decision made + reason: str + # Next speaker to select if not terminating + next_speaker: str | None = Field( + default=None, + description="Name of the next participant to speak (if not terminating)", + ) + # Optional final message to send if terminating + final_message: str | None = Field(default=None, description="Optional final message if terminating") + + +class AgentBasedGroupChatOrchestrator(BaseGroupChatOrchestrator): + """Orchestrator that manages a group chat between multiple participants. + + This group chat orchestrator is driven by an agent that can select the next speaker + intelligently based on the conversation context. + + This orchestrator drives the conversation loop as follows: + 1. Receives initial messages, saves to history, and broadcasts to all participants + 2. Invokes the agent to determine the next speaker based on the most recent state + 3. Sends a request to the selected participant to generate a response + 4. Receives the participant's response, saves to history, and broadcasts to all participants + except the one that just spoke + 5. Repeats steps 2-4 until the termination conditions are met + + Note: The agent will be asked to generate a structured output of type `AgentOrchestrationOutput`, + thus it must be capable of structured output. + """ + + def __init__( + self, + agent: ChatAgent, + participant_registry: ParticipantRegistry, + *, + max_rounds: int | None = None, + termination_condition: TerminationCondition | None = None, + retry_attempts: int | None = None, + thread: AgentThread | None = None, + ) -> None: + """Initialize the GroupChatOrchestrator. + + Args: + agent: Agent that selects the next speaker based on conversation state + participant_registry: Registry of participants in the group chat that track executor types + (agents vs. executors) and provide resolution utilities. + max_rounds: Optional limit on selection rounds to prevent infinite loops. + termination_condition: Optional callable that halts the conversation when it returns True + retry_attempts: Optional number of retry attempts for the agent in case of failure. + thread: Optional agent thread to use for the orchestrator agent. + """ + super().__init__( + resolve_agent_id(agent), + participant_registry, + name=agent.name, + max_rounds=max_rounds, + termination_condition=termination_condition, + ) + self._agent = agent + self._retry_attempts = retry_attempts + self._thread = thread or agent.get_new_thread() + # Cache for messages since last agent invocation + # This is different from the full conversation history maintained by the base orchestrator + self._cache: list[ChatMessage] = [] + + @override + def _append_messages(self, messages: Sequence[ChatMessage]) -> None: + self._cache.extend(messages) + return super()._append_messages(messages) + + @override + async def _handle_messages( + self, + messages: list[ChatMessage], + ctx: WorkflowContext[GroupChatWorkflowContext_T_Out, list[ChatMessage]], + ) -> None: + """Initialize orchestrator state and start the conversation loop.""" + self._append_messages(messages) + # Termination condition will also be applied to the input messages + if await self._check_terminate_and_yield(cast(WorkflowContext[Never, list[ChatMessage]], ctx)): + return + + agent_orchestration_output = await self._invoke_agent() + if await self._check_agent_terminate_and_yield( + agent_orchestration_output, + cast(WorkflowContext[Never, list[ChatMessage]], ctx), + ): + return + + # Broadcast messages to all participants for context + await self._broadcast_messages_to_participants( + messages, + cast(WorkflowContext[AgentExecutorRequest | GroupChatParticipantMessage], ctx), + ) + # Send request to selected participant + await self._send_request_to_participant( + # If not terminating, next_speaker must be provided thus will not be None + agent_orchestration_output.next_speaker, # type: ignore[arg-type] + cast(WorkflowContext[AgentExecutorRequest | GroupChatRequestMessage], ctx), + ) + self._increment_round() + + @override + async def _handle_response( + self, + response: AgentExecutorResponse | GroupChatResponseMessage, + ctx: WorkflowContext[GroupChatWorkflowContext_T_Out, list[ChatMessage]], + ) -> None: + """Handle a participant response.""" + messages = self._process_participant_response(response) + self._append_messages(messages) + if await self._check_terminate_and_yield(cast(WorkflowContext[Never, list[ChatMessage]], ctx)): + return + if await self._check_round_limit_and_yield(cast(WorkflowContext[Never, list[ChatMessage]], ctx)): + return + + agent_orchestration_output = await self._invoke_agent() + if await self._check_agent_terminate_and_yield( + agent_orchestration_output, + cast(WorkflowContext[Never, list[ChatMessage]], ctx), + ): + return + + # Broadcast participant messages to all participants for context, except + # the participant that just responded + participant = ctx.get_source_executor_id() + await self._broadcast_messages_to_participants( + messages, + cast(WorkflowContext[AgentExecutorRequest | GroupChatParticipantMessage], ctx), + participants=[p for p in self._participant_registry.participants if p != participant], + ) + # Send request to selected participant + await self._send_request_to_participant( + # If not terminating, next_speaker must be provided thus will not be None + agent_orchestration_output.next_speaker, # type: ignore[arg-type] + cast(WorkflowContext[AgentExecutorRequest | GroupChatRequestMessage], ctx), + ) + self._increment_round() + + async def _invoke_agent(self) -> AgentOrchestrationOutput: + """Invoke the orchestrator agent to determine the next speaker and termination.""" + + async def _invoke_agent_helper(conversation: list[ChatMessage]) -> AgentOrchestrationOutput: + # Run the agent in non-streaming mode for simplicity + agent_response = await self._agent.run( + messages=conversation, + thread=self._thread, + options={"response_format": AgentOrchestrationOutput}, + ) + # Parse and validate the structured output + agent_orchestration_output = AgentOrchestrationOutput.model_validate_json(agent_response.text) + + if not agent_orchestration_output.terminate and not agent_orchestration_output.next_speaker: + raise ValueError("next_speaker must be provided if not terminating the conversation.") + + return agent_orchestration_output + + # We only need the last message for context since history is maintained in the thread + current_conversation = self._cache.copy() + self._cache.clear() + instruction = ( + "Decide what to do next. Respond with a JSON object of the following format:\n" + "{\n" + ' "terminate": ,\n' + ' "reason": "",\n' + ' "next_speaker": "",\n' + ' "final_message": ""\n' + "}\n" + "If not terminating, here are the valid participant names (case-sensitive) and their descriptions:\n" + + "\n".join([ + f"{name}: {description}" for name, description in self._participant_registry.participants.items() + ]) + ) + # Prepend instruction as system message + current_conversation.append(ChatMessage("user", [instruction])) + + retry_attempts = self._retry_attempts + while True: + try: + return await _invoke_agent_helper(current_conversation) + except Exception as ex: + logger.error(f"Agent orchestration invocation failed: {ex}") + if retry_attempts is None or retry_attempts <= 0: + raise + retry_attempts -= 1 + logger.debug(f"Retrying agent orchestration invocation, attempts left: {retry_attempts}") + # We don't need the full conversation since the thread should maintain history + current_conversation = [ + ChatMessage( + role="user", + text=f"Your input could not be parsed due to an error: {ex}. Please try again.", + ) + ] + + async def _check_agent_terminate_and_yield( + self, + agent_orchestration_output: AgentOrchestrationOutput, + ctx: WorkflowContext[Never, list[ChatMessage]], + ) -> bool: + """Check if the agent requested termination and yield completion if so. + + Args: + agent_orchestration_output: Output from the orchestrator agent + ctx: Workflow context for yielding output + Returns: + True if termination was requested and output was yielded, False otherwise + """ + if agent_orchestration_output.terminate: + final_message = ( + agent_orchestration_output.final_message or "The conversation has been terminated by the agent." + ) + self._append_messages([self._create_completion_message(final_message)]) + await ctx.yield_output(self._full_conversation) + return True + + return False + + @override + async def on_checkpoint_save(self) -> dict[str, Any]: + """Capture current orchestrator state for checkpointing.""" + state = await super().on_checkpoint_save() + state["cache"] = encode_chat_messages(self._cache) + serialized_thread = await self._thread.serialize() + state["thread"] = serialized_thread + + return state + + @override + async def on_checkpoint_restore(self, state: dict[str, Any]) -> None: + """Restore executor state from checkpoint.""" + await super().on_checkpoint_restore(state) + self._cache = decode_chat_messages(state.get("cache", [])) + serialized_thread = state.get("thread") + if serialized_thread: + self._thread = await self._agent.deserialize_thread(serialized_thread) + + +# endregion + +# region Builder + + +class GroupChatBuilder: + r"""High-level builder for group chat workflows. + + GroupChat coordinates multi-agent conversations using an orchestrator that can dynamically + select participants to speak at each turn based on the conversation state. + + Routing Pattern: + Agents respond in turns as directed by the orchestrator until termination conditions are met. + This provides a centralized approach to multi-agent collaboration, similar to a star topology. + + Participants can be a combination of agents and executors. If they are executors, they + must implement the expected handlers for receiving GroupChat messages and returning responses + (Read our official documentation for details on implementing custom participant executors). + + The orchestrator can be provided directly, or a simple selection function can be defined + to choose the next speaker based on the current state. The builder wires everything together + into a complete workflow graph that can be executed. + + Outputs: + The final conversation history as a list of ChatMessage once the group chat completes. + """ + + DEFAULT_ORCHESTRATOR_ID: ClassVar[str] = "group_chat_orchestrator" + + def __init__(self) -> None: + """Initialize the GroupChatBuilder.""" + self._participants: dict[str, AgentProtocol | Executor] = {} + self._participant_factories: list[Callable[[], AgentProtocol | Executor]] = [] + + # Orchestrator related members + self._orchestrator: BaseGroupChatOrchestrator | None = None + self._orchestrator_factory: Callable[[], ChatAgent | BaseGroupChatOrchestrator] | None = None + self._selection_func: GroupChatSelectionFunction | None = None + self._agent_orchestrator: ChatAgent | None = None + self._termination_condition: TerminationCondition | None = None + self._max_rounds: int | None = None + self._orchestrator_name: str | None = None + + # Checkpoint related members + self._checkpoint_storage: CheckpointStorage | None = None + + # Request info related members + self._request_info_enabled: bool = False + self._request_info_filter: set[str] = set() + + @overload + def with_orchestrator(self, *, agent: ChatAgent | Callable[[], ChatAgent]) -> "GroupChatBuilder": + """Set the orchestrator for this group chat workflow using a ChatAgent. + + Args: + agent: An instance of ChatAgent or a callable that produces one to manage the group chat. + + Returns: + Self for fluent chaining. + """ + ... + + @overload + def with_orchestrator( + self, *, orchestrator: BaseGroupChatOrchestrator | Callable[[], BaseGroupChatOrchestrator] + ) -> "GroupChatBuilder": + """Set the orchestrator for this group chat workflow using a custom orchestrator. + + Args: + orchestrator: An instance of BaseGroupChatOrchestrator or a callable that produces one to + manage the group chat. + + Returns: + Self for fluent chaining. + + Note: + When using a custom orchestrator that implements `BaseGroupChatOrchestrator`, setting + `termination_condition` and `max_rounds` on the builder will have no effect since the + orchestrator is already fully defined. + """ + ... + + @overload + def with_orchestrator( + self, + *, + selection_func: GroupChatSelectionFunction, + orchestrator_name: str | None = None, + ) -> "GroupChatBuilder": + """Set the orchestrator for this group chat workflow using a selection function. + + Args: + selection_func: Callable that receives the current GroupChatState and returns + the name of the next participant to speak, or None to finish. + orchestrator_name: Optional display name for the orchestrator in the workflow. + If not provided, defaults to `GroupChatBuilder.DEFAULT_ORCHESTRATOR_ID`. + + Returns: + Self for fluent chaining. + """ + ... + + def with_orchestrator( + self, + *, + agent: ChatAgent | Callable[[], ChatAgent] | None = None, + orchestrator: BaseGroupChatOrchestrator | Callable[[], BaseGroupChatOrchestrator] | None = None, + selection_func: GroupChatSelectionFunction | None = None, + orchestrator_name: str | None = None, + ) -> "GroupChatBuilder": + """Set the orchestrator for this group chat workflow. + + An group chat orchestrator is responsible for managing the flow of conversation, making + sure all participants are synced and picking the next speaker according to the defined logic + until the termination conditions are met. + + There are a few ways to configure the orchestrator: + 1. Provide a ChatAgent instance or a factory function that produces one to use an agent-based orchestrator + 2. Provide a BaseGroupChatOrchestrator instance or a factory function that produces one to use a custom + orchestrator + 3. Provide a selection function to use that picks the next speaker based on the function logic + + You can only use one of the above methods to configure the orchestrator. + + Args: + agent: An instance of ChatAgent or a callable that produces one to manage the group chat. + orchestrator: An instance of BaseGroupChatOrchestrator or a callable that produces one to manage the group + chat. + selection_func: Callable that receives the current GroupChatState and returns + the name of the next participant to speak, or None to finish. + orchestrator_name: Optional display name for the orchestrator in the workflow if + using a selection function. If not provided, defaults to + `GroupChatBuilder.DEFAULT_ORCHESTRATOR_ID`. This parameter is + ignored if using an agent or custom orchestrator. + + Returns: + Self for fluent chaining. + + Raises: + ValueError: If an orchestrator has already been set or if none or multiple + of the parameters are provided. + + Note: + When using a custom orchestrator that implements `BaseGroupChatOrchestrator`, either + via the `orchestrator` or `orchestrator_factory` parameters, setting `termination_condition` + and `max_rounds` on the builder will have no effect since the orchestrator is already + fully defined. + + Example: + .. code-block:: python + + from agent_framework_orchestrations import GroupChatBuilder + + + orchestrator = CustomGroupChatOrchestrator(...) + workflow = GroupChatBuilder().with_orchestrator(orchestrator).participants([agent1, agent2]).build() + """ + if self._agent_orchestrator is not None: + raise ValueError( + "An agent orchestrator has already been configured. Call with_orchestrator(...) once only." + ) + + if self._orchestrator is not None: + raise ValueError("An orchestrator has already been configured. Call with_orchestrator(...) once only.") + + if self._orchestrator_factory is not None: + raise ValueError("A factory has already been configured. Call with_orchestrator(...) once only.") + + if self._selection_func is not None: + raise ValueError("A selection function has already been configured. Call with_orchestrator(...) once only.") + + if sum(x is not None for x in [agent, orchestrator, selection_func]) != 1: + raise ValueError("Exactly one of agent, orchestrator, or selection_func must be provided.") + + if agent is not None and isinstance(agent, ChatAgent): + self._agent_orchestrator = agent + elif orchestrator is not None and isinstance(orchestrator, BaseGroupChatOrchestrator): + self._orchestrator = orchestrator + elif selection_func is not None: + self._selection_func = selection_func + self._orchestrator_name = orchestrator_name + else: + self._orchestrator_factory = agent or orchestrator + + return self + + def register_participants( + self, + participant_factories: Sequence[Callable[[], AgentProtocol | Executor]], + ) -> "GroupChatBuilder": + """Register participant factories for this group chat workflow. + + Args: + participant_factories: Sequence of callables that produce participant definitions + when invoked. Each callable should return either an AgentProtocol instance + (auto-wrapped as AgentExecutor) or an Executor instance. + + Returns: + Self for fluent chaining + + Raises: + ValueError: If participant_factories is empty, or participants + or participant factories are already set + """ + if self._participants: + raise ValueError("Cannot mix .participants() and .register_participants() in the same builder instance.") + + if self._participant_factories: + raise ValueError("register_participants() has already been called on this builder instance.") + + if not participant_factories: + raise ValueError("participant_factories cannot be empty") + + self._participant_factories = list(participant_factories) + return self + + def participants(self, participants: Sequence[AgentProtocol | Executor]) -> "GroupChatBuilder": + """Define participants for this group chat workflow. + + Accepts AgentProtocol instances (auto-wrapped as AgentExecutor) or Executor instances. + + Args: + participants: Sequence of participant definitions + + Returns: + Self for fluent chaining + + Raises: + ValueError: If participants are empty, names are duplicated, or participants + or participant factories are already set + TypeError: If any participant is not AgentProtocol or Executor instance + + Example: + + .. code-block:: python + + from agent_framework_orchestrations import GroupChatBuilder + + workflow = ( + GroupChatBuilder() + .with_orchestrator(selection_func=my_selection_function) + .participants([agent1, agent2, custom_executor]) + .build() + ) + """ + if self._participant_factories: + raise ValueError("Cannot mix .participants() and .register_participants() in the same builder instance.") + + if self._participants: + raise ValueError("participants have already been set. Call participants() at most once.") + + if not participants: + raise ValueError("participants cannot be empty.") + + # Name of the executor mapped to participant instance + named: dict[str, AgentProtocol | Executor] = {} + for participant in participants: + if isinstance(participant, Executor): + identifier = participant.id + elif isinstance(participant, AgentProtocol): + if not participant.name: + raise ValueError("AgentProtocol participants must have a non-empty name.") + identifier = participant.name + else: + raise TypeError( + f"Participants must be AgentProtocol or Executor instances. Got {type(participant).__name__}." + ) + + if identifier in named: + raise ValueError(f"Duplicate participant name '{identifier}' detected") + + named[identifier] = participant + + self._participants = named + + return self + + def with_termination_condition(self, termination_condition: TerminationCondition) -> "GroupChatBuilder": + """Set a custom termination condition for the group chat workflow. + + Args: + termination_condition: Callable that receives the conversation history and returns + True to terminate the conversation, False to continue. + + Returns: + Self for fluent chaining + + Example: + + .. code-block:: python + + from agent_framework import ChatMessage + from agent_framework_orchestrations import GroupChatBuilder + + + def stop_after_two_calls(conversation: list[ChatMessage]) -> bool: + calls = sum(1 for msg in conversation if msg.role == "assistant" and msg.author_name == "specialist") + return calls >= 2 + + + specialist_agent = ... + workflow = ( + GroupChatBuilder() + .with_orchestrator(selection_func=my_selection_function) + .participants([agent1, specialist_agent]) + .with_termination_condition(stop_after_two_calls) + .build() + ) + """ + if self._orchestrator is not None or self._orchestrator_factory is not None: + logger.warning( + "Orchestrator has already been configured; setting termination condition on builder has no effect." + ) + + self._termination_condition = termination_condition + return self + + def with_max_rounds(self, max_rounds: int | None) -> "GroupChatBuilder": + """Set a maximum number of orchestrator rounds to prevent infinite conversations. + + When the round limit is reached, the workflow automatically completes with + a default completion message. Setting to None allows unlimited rounds. + + Args: + max_rounds: Maximum number of orchestrator selection rounds, or None for unlimited + + Returns: + Self for fluent chaining + """ + if self._orchestrator is not None or self._orchestrator_factory is not None: + logger.warning("Orchestrator has already been configured; setting max rounds on builder has no effect.") + + self._max_rounds = max_rounds + return self + + def with_checkpointing(self, checkpoint_storage: CheckpointStorage) -> "GroupChatBuilder": + """Enable checkpointing for the built workflow using the provided storage. + + Checkpointing allows the workflow to persist state and resume from interruption + points, enabling long-running conversations and failure recovery. + + Args: + checkpoint_storage: Storage implementation for persisting workflow state + + Returns: + Self for fluent chaining + + Example: + + .. code-block:: python + + from agent_framework import MemoryCheckpointStorage + from agent_framework_orchestrations import GroupChatBuilder + + storage = MemoryCheckpointStorage() + workflow = ( + GroupChatBuilder() + .with_orchestrator(selection_func=my_selection_function) + .participants([agent1, agent2]) + .with_checkpointing(storage) + .build() + ) + """ + self._checkpoint_storage = checkpoint_storage + return self + + def with_request_info(self, *, agents: Sequence[str | AgentProtocol] | None = None) -> "GroupChatBuilder": + """Enable request info after agent participant responses. + + This enables human-in-the-loop (HIL) scenarios for the group chat orchestration. + When enabled, the workflow pauses after each agent participant runs, emitting + a RequestInfoEvent that allows the caller to review the conversation and optionally + inject guidance for the agent participant to iterate. The caller provides input via + the standard response_handler/request_info pattern. + + Simulated flow with HIL: + Input -> Orchestrator -> [Participant <-> Request Info] -> Orchestrator -> [Participant <-> Request Info] -> ... + + Note: This is only available for agent participants. Executor participants can incorporate + request info handling in their own implementation if desired. + + Args: + agents: Optional list of agents names to enable request info for. + If None, enables HIL for all agent participants. + + Returns: + Self for fluent chaining + """ + from agent_framework._workflows._orchestration_request_info import resolve_request_info_filter + + self._request_info_enabled = True + self._request_info_filter = resolve_request_info_filter(list(agents) if agents else None) + + return self + + def _resolve_orchestrator(self, participants: Sequence[Executor]) -> Executor: + """Determine the orchestrator to use for the workflow. + + Args: + participants: List of resolved participant executors + """ + if all( + x is None + for x in [self._agent_orchestrator, self._selection_func, self._orchestrator, self._orchestrator_factory] + ): + raise ValueError("No orchestrator has been configured. Call with_orchestrator() to set one.") + # We don't need to check if multiple are set since that is handled in with_orchestrator() + + if self._agent_orchestrator: + return AgentBasedGroupChatOrchestrator( + agent=self._agent_orchestrator, + participant_registry=ParticipantRegistry(participants), + max_rounds=self._max_rounds, + termination_condition=self._termination_condition, + ) + + if self._selection_func: + return GroupChatOrchestrator( + id=self.DEFAULT_ORCHESTRATOR_ID, + participant_registry=ParticipantRegistry(participants), + selection_func=self._selection_func, + name=self._orchestrator_name, + max_rounds=self._max_rounds, + termination_condition=self._termination_condition, + ) + + if self._orchestrator: + return self._orchestrator + + if self._orchestrator_factory: + orchestrator_instance = self._orchestrator_factory() + if isinstance(orchestrator_instance, ChatAgent): + return AgentBasedGroupChatOrchestrator( + agent=orchestrator_instance, + participant_registry=ParticipantRegistry(participants), + max_rounds=self._max_rounds, + termination_condition=self._termination_condition, + ) + if isinstance(orchestrator_instance, BaseGroupChatOrchestrator): + return orchestrator_instance + raise TypeError( + f"Orchestrator factory must return ChatAgent or BaseGroupChatOrchestrator instance. " + f"Got {type(orchestrator_instance).__name__}." + ) + + # This should never be reached due to the checks above + raise RuntimeError("Orchestrator could not be resolved. Please provide one via with_orchestrator()") + + def _resolve_participants(self) -> list[Executor]: + """Resolve participant instances into Executor objects.""" + if not self._participants and not self._participant_factories: + raise ValueError("No participants provided. Call .participants() or .register_participants() first.") + # We don't need to check if both are set since that is handled in the respective methods + + participants: list[Executor | AgentProtocol] = [] + if self._participant_factories: + for factory in self._participant_factories: + participant = factory() + participants.append(participant) + else: + participants = list(self._participants.values()) + + executors: list[Executor] = [] + for participant in participants: + if isinstance(participant, Executor): + executors.append(participant) + elif isinstance(participant, AgentProtocol): + if self._request_info_enabled and ( + not self._request_info_filter or resolve_agent_id(participant) in self._request_info_filter + ): + # Handle request info enabled agents + executors.append(AgentApprovalExecutor(participant)) + else: + executors.append(AgentExecutor(participant)) + else: + raise TypeError( + f"Participants must be AgentProtocol or Executor instances. Got {type(participant).__name__}." + ) + + return executors + + def build(self) -> Workflow: + """Build and validate the group chat workflow. + + Assembles the orchestrator and participants into a complete workflow graph. + The workflow graph consists of bi-directional edges between the orchestrator and each participant, + allowing for message exchanges in both directions. + + Returns: + Validated Workflow instance ready for execution + """ + # Resolve orchestrator and participants to executors + participants: list[Executor] = self._resolve_participants() + orchestrator: Executor = self._resolve_orchestrator(participants) + + # Build workflow graph + workflow_builder = WorkflowBuilder().set_start_executor(orchestrator) + for participant in participants: + # Orchestrator and participant bi-directional edges + workflow_builder = workflow_builder.add_edge(orchestrator, participant) + workflow_builder = workflow_builder.add_edge(participant, orchestrator) + if self._checkpoint_storage is not None: + workflow_builder = workflow_builder.with_checkpointing(self._checkpoint_storage) + + return workflow_builder.build() + + +# endregion diff --git a/python/packages/orchestrations/agent_framework_orchestrations/_handoff.py b/python/packages/orchestrations/agent_framework_orchestrations/_handoff.py new file mode 100644 index 0000000000..6004c1c28e --- /dev/null +++ b/python/packages/orchestrations/agent_framework_orchestrations/_handoff.py @@ -0,0 +1,1222 @@ +# Copyright (c) Microsoft. All rights reserved. + +"""High-level builder for conversational handoff workflows. + +The handoff pattern models a group of agents that can intelligently route +control to other agents based on the conversation context. + +The flow is typically: + + user input -> Agent A -> Agent B -> Agent C -> Agent A -> ... -> output + +Depending of wether request info is enabled, the flow may include user input (except when an agent hands off): + + user input -> [Agent A -> Request info] -> [Agent B -> Request info] -> [Agent C -> ... -> output + +The difference between a group chat workflow and a handoff workflow is that in group chat there is +always a orchestrator that decides who to speak next, while in handoff the agents themselves decide +who to handoff to next by invoking a tool call that names the target agent. + +Group Chat: centralized orchestration of multiple agents +Handoff: decentralized routing by agents themselves + +Key properties: +- The entire conversation is maintained and reused on every hop +- Agents signal handoffs by invoking a tool call that names the other agents +- In human_in_loop mode (default), the workflow requests user input after each agent response + that doesn't trigger a handoff +- In autonomous mode, agents continue responding until they invoke a handoff tool or reach + a termination condition or turn limit +""" + +import inspect +import logging +import sys +from collections.abc import Awaitable, Callable, Mapping, Sequence +from dataclasses import dataclass +from typing import Any, cast + +from agent_framework import AgentProtocol, ChatAgent +from agent_framework._middleware import FunctionInvocationContext, FunctionMiddleware +from agent_framework._threads import AgentThread +from agent_framework._tools import FunctionTool, tool +from agent_framework._types import AgentResponse, ChatMessage +from agent_framework._workflows._agent_executor import AgentExecutor, AgentExecutorRequest, AgentExecutorResponse +from agent_framework._workflows._agent_utils import resolve_agent_id +from agent_framework._workflows._base_group_chat_orchestrator import TerminationCondition +from agent_framework._workflows._checkpoint import CheckpointStorage +from agent_framework._workflows._events import WorkflowEvent +from agent_framework._workflows._orchestrator_helpers import clean_conversation_for_handoff +from agent_framework._workflows._request_info_mixin import response_handler +from agent_framework._workflows._workflow import Workflow +from agent_framework._workflows._workflow_builder import WorkflowBuilder +from agent_framework._workflows._workflow_context import WorkflowContext +from typing_extensions import Never + +if sys.version_info >= (3, 12): + from typing import override # type: ignore # pragma: no cover +else: + from typing_extensions import override # type: ignore # pragma: no cover + + +logger = logging.getLogger(__name__) + + +# region Handoff events +class HandoffSentEvent(WorkflowEvent): + """Base class for handoff workflow events.""" + + def __init__(self, source: str, target: str, data: Any | None = None) -> None: + """Initialize handoff sent event. + + Args: + source: Identifier of the source agent initiating the handoff + target: Identifier of the target agent receiving the handoff + data: Optional event-specific data + """ + super().__init__(data) + self.source = source + self.target = target + + +# endregion + + +@dataclass +class HandoffConfiguration: + """Configuration for handoff routing between agents. + + Attributes: + target_id: Identifier of the target agent to hand off to + description: Optional human-readable description of the handoff + """ + + target_id: str + description: str | None = None + + def __init__(self, *, target: str | AgentProtocol, description: str | None = None) -> None: + """Initialize HandoffConfiguration. + + Args: + target: Target agent identifier or AgentProtocol instance + description: Optional human-readable description of the handoff + """ + self.target_id = resolve_agent_id(target) if isinstance(target, AgentProtocol) else target + self.description = description + + def __eq__(self, other: Any) -> bool: + """Determine equality based on source_id and target_id.""" + if not isinstance(other, HandoffConfiguration): + return False + + return self.target_id == other.target_id + + def __hash__(self) -> int: + """Compute hash based on source_id and target_id.""" + return hash(self.target_id) + + +def get_handoff_tool_name(target_id: str) -> str: + """Get the standardized handoff tool name for a given target agent ID.""" + return f"handoff_to_{target_id}" + + +HANDOFF_FUNCTION_RESULT_KEY = "handoff_to" + + +class _AutoHandoffMiddleware(FunctionMiddleware): + """Intercept handoff tool invocations and short-circuit execution with synthetic results.""" + + def __init__(self, handoffs: Sequence[HandoffConfiguration]) -> None: + """Initialise middleware with the mapping from tool name to specialist id.""" + self._handoff_functions = {get_handoff_tool_name(handoff.target_id): handoff.target_id for handoff in handoffs} + + async def process( + self, + context: FunctionInvocationContext, + next: Callable[[FunctionInvocationContext], Awaitable[None]], + ) -> None: + """Intercept matching handoff tool calls and inject synthetic results.""" + if context.function.name not in self._handoff_functions: + await next(context) + return + + # Short-circuit execution and provide deterministic response payload for the tool call. + context.result = {HANDOFF_FUNCTION_RESULT_KEY: self._handoff_functions[context.function.name]} + context.terminate = True + + +@dataclass +class HandoffAgentUserRequest: + """Request issued to the user after an agent run in a handoff workflow. + + Attributes: + agent_response: The response generated by the agent at the most recent turn + """ + + agent_response: AgentResponse + + @staticmethod + def create_response(response: str | list[str] | ChatMessage | list[ChatMessage]) -> list[ChatMessage]: + """Create a HandoffAgentUserRequest from a simple text response.""" + messages: list[ChatMessage] = [] + if isinstance(response, str): + messages.append(ChatMessage("user", [response])) + elif isinstance(response, ChatMessage): + messages.append(response) + elif isinstance(response, list): + for item in response: + if isinstance(item, ChatMessage): + messages.append(item) + elif isinstance(item, str): + messages.append(ChatMessage("user", [item])) + else: + raise TypeError("List items must be either str or ChatMessage instances") + else: + raise TypeError("Response must be str, list of str, ChatMessage, or list of ChatMessage") + + return messages + + @staticmethod + def terminate() -> list[ChatMessage]: + """Create a termination response for the handoff workflow.""" + return [] + + +# In autonomous mode, the agent continues responding until it requests a handoff +# or reaches a turn limit, after which it requests user input to continue. +_AUTONOMOUS_MODE_DEFAULT_PROMPT = "User did not respond. Continue assisting autonomously." +_DEFAULT_AUTONOMOUS_TURN_LIMIT = 50 + +# region Handoff Agent Executor + + +class HandoffAgentExecutor(AgentExecutor): + """Specialized AgentExecutor that supports handoff tool interception.""" + + def __init__( + self, + agent: AgentProtocol, + handoffs: Sequence[HandoffConfiguration], + *, + agent_thread: AgentThread | None = None, + is_start_agent: bool = False, + termination_condition: TerminationCondition | None = None, + autonomous_mode: bool = False, + autonomous_mode_prompt: str | None = None, + autonomous_mode_turn_limit: int | None = None, + ) -> None: + """Initialize the HandoffAgentExecutor. + + Args: + agent: The agent to execute + handoffs: Sequence of handoff configurations defining target agents + agent_thread: Optional AgentThread that manages the agent's execution context + is_start_agent: Whether this agent is the starting agent in the handoff workflow. + There can only be one starting agent in a handoff workflow. + termination_condition: Optional callable that determines when to terminate the workflow + autonomous_mode: Whether the agent should operate involve external systems after + a response that does not trigger a handoff or before the turn + limit is reached. This allows the agent to perform long-running + tasks (e.g., research, coding, analysis) without prematurely returning + control to the coordinator or user. + autonomous_mode_prompt: Prompt to provide to the agent when continuing in autonomous mode. + This will guide the agent in the absence of user input. + autonomous_mode_turn_limit: Maximum number of autonomous turns before requesting user input. + """ + cloned_agent = self._prepare_agent_with_handoffs(agent, handoffs) + super().__init__(cloned_agent, agent_thread=agent_thread) + + self._handoff_targets = {handoff.target_id for handoff in handoffs} + self._termination_condition = termination_condition + self._is_start_agent = is_start_agent + + # Autonomous mode members + self._autonomous_mode = autonomous_mode + self._autonomous_mode_prompt = autonomous_mode_prompt or _AUTONOMOUS_MODE_DEFAULT_PROMPT + self._autonomous_mode_turn_limit = autonomous_mode_turn_limit or _DEFAULT_AUTONOMOUS_TURN_LIMIT + self._autonomous_mode_turns = 0 + + def _prepare_agent_with_handoffs( + self, + agent: AgentProtocol, + handoffs: Sequence[HandoffConfiguration], + ) -> AgentProtocol: + """Prepare an agent by adding handoff tools for the specified target agents. + + Args: + agent: The agent to prepare + handoffs: Sequence of handoff configurations defining target agents + + Returns: + A new AgentExecutor instance with handoff tools added + """ + if not isinstance(agent, ChatAgent): + raise TypeError( + "Handoff can only be applied to ChatAgent. Please ensure the agent is a ChatAgent instance." + ) + + # Clone the agent to avoid mutating the original + cloned_agent = self._clone_chat_agent(agent) # type: ignore + # Add handoff tools to the cloned agent + self._apply_auto_tools(cloned_agent, handoffs) + # Add middleware to handle handoff tool invocations + middleware = _AutoHandoffMiddleware(handoffs) + existing_middleware = list(cloned_agent.middleware or []) + existing_middleware.append(middleware) + cloned_agent.middleware = existing_middleware + + return cloned_agent + + def _clone_chat_agent(self, agent: ChatAgent) -> ChatAgent: + """Produce a deep copy of the ChatAgent while preserving runtime configuration.""" + options = agent.default_options + middleware = list(agent.middleware or []) + + # Reconstruct the original tools list by combining regular tools with MCP tools. + # ChatAgent.__init__ separates MCP tools during initialization, + # so we need to recombine them here to pass the complete tools list to the constructor. + # This makes sure MCP tools are preserved when cloning agents for handoff workflows. + tools_from_options = options.get("tools") + all_tools = list(tools_from_options) if tools_from_options else [] + if agent.mcp_tools: + all_tools.extend(agent.mcp_tools) + + logit_bias = options.get("logit_bias") + metadata = options.get("metadata") + + # Disable parallel tool calls to prevent the agent from invoking multiple handoff tools at once. + cloned_options: dict[str, Any] = { + "allow_multiple_tool_calls": False, + "frequency_penalty": options.get("frequency_penalty"), + "instructions": options.get("instructions"), + "logit_bias": dict(logit_bias) if logit_bias else None, + "max_tokens": options.get("max_tokens"), + "metadata": dict(metadata) if metadata else None, + "model_id": options.get("model_id"), + "presence_penalty": options.get("presence_penalty"), + "response_format": options.get("response_format"), + "seed": options.get("seed"), + "stop": options.get("stop"), + "store": options.get("store"), + "temperature": options.get("temperature"), + "tool_choice": options.get("tool_choice"), + "tools": all_tools if all_tools else None, + "top_p": options.get("top_p"), + "user": options.get("user"), + } + + return ChatAgent( + chat_client=agent.chat_client, + id=agent.id, + name=agent.name, + description=agent.description, + chat_message_store_factory=agent.chat_message_store_factory, + context_providers=agent.context_provider, + middleware=middleware, + default_options=cloned_options, # type: ignore[arg-type] + ) + + def _apply_auto_tools(self, agent: ChatAgent, targets: Sequence[HandoffConfiguration]) -> None: + """Attach synthetic handoff tools to a chat agent and return the target lookup table. + + Creates handoff tools for each specialist agent that this agent can route to. + + Args: + agent: The ChatAgent to add handoff tools to + targets: Sequence of handoff configurations defining target agents + """ + default_options = agent.default_options + existing_tools = list(default_options.get("tools") or []) + existing_names = {getattr(tool, "name", "") for tool in existing_tools if hasattr(tool, "name")} + + new_tools: list[FunctionTool[Any, Any]] = [] + for target in targets: + handoff_tool = self._create_handoff_tool(target.target_id, target.description) + if handoff_tool.name in existing_names: + raise ValueError( + f"Agent '{resolve_agent_id(agent)}' already has a tool named '{handoff_tool.name}'. " + f"Handoff tool name '{handoff_tool.name}' conflicts with existing tool." + "Please rename the existing tool or modify the target agent ID to avoid conflicts." + ) + new_tools.append(handoff_tool) + + if new_tools: + default_options["tools"] = existing_tools + new_tools # type: ignore[operator] + else: + default_options["tools"] = existing_tools + + def _create_handoff_tool(self, target_id: str, description: str | None = None) -> FunctionTool[Any, Any]: + """Construct the synthetic handoff tool that signals routing to `target_id`.""" + tool_name = get_handoff_tool_name(target_id) + doc = description or f"Handoff to the {target_id} agent." + # Note: approval_mode is set to "never_require" for handoff tools because + # they are framework-internal signals that trigger routing logic, not + # actual function executions. They are automatically intercepted by + # _AutoHandoffMiddleware which short-circuits execution and provides synthetic + # results, so the function body never actually runs in practice. + + @tool(name=tool_name, description=doc, approval_mode="never_require") + def _handoff_tool(context: str | None = None) -> str: + """Return a deterministic acknowledgement that encodes the target alias.""" + return f"Handoff to {target_id}" + + return _handoff_tool + + @override + async def _run_agent_and_emit(self, ctx: WorkflowContext[AgentExecutorResponse, AgentResponse]) -> None: + """Override to support handoff.""" + # When the full conversation is empty, it means this is the first run. + # Broadcast the initial cache to all other agents. Subsequent runs won't + # need this since responses are broadcast after each agent run and user input. + if self._is_start_agent and not self._full_conversation: + await self._broadcast_messages(self._cache.copy(), cast(WorkflowContext[AgentExecutorRequest], ctx)) + + # Append the cache to the full conversation history + self._full_conversation.extend(self._cache) + + # Check termination condition before running the agent + if await self._check_terminate_and_yield(cast(WorkflowContext[Never, list[ChatMessage]], ctx)): + return + + # Run the agent + if ctx.is_streaming(): + # Streaming mode: emit incremental updates + response = await self._run_agent_streaming(cast(WorkflowContext, ctx)) + else: + # Non-streaming mode: use run() and emit single event + response = await self._run_agent(cast(WorkflowContext, ctx)) + + # Clear the cache after running the agent + self._cache.clear() + + # A function approval request is issued by the base AgentExecutor + if response is None: + # Agent did not complete (e.g., waiting for user input); do not emit response + logger.debug("AgentExecutor %s: Agent did not complete, awaiting user input", self.id) + return + + # Remove function call related content from the agent response for full conversation history + cleaned_response = clean_conversation_for_handoff(response.messages) + # Append the agent response to the full conversation history. This list removes + # function call related content such that the result stays consistent regardless + # of which agent yields the final output. + self._full_conversation.extend(cleaned_response) + # Broadcast the cleaned response to all other agents + await self._broadcast_messages(cleaned_response, cast(WorkflowContext[AgentExecutorRequest], ctx)) + + # Check if a handoff was requested + if handoff_target := self._is_handoff_requested(response): + if handoff_target not in self._handoff_targets: + raise ValueError( + f"Agent '{resolve_agent_id(self._agent)}' attempted to handoff to unknown " + f"target '{handoff_target}'. Valid targets are: {', '.join(self._handoff_targets)}" + ) + + await cast(WorkflowContext[AgentExecutorRequest], ctx).send_message( + AgentExecutorRequest(messages=[], should_respond=True), target_id=handoff_target + ) + await ctx.add_event(HandoffSentEvent(source=self.id, target=handoff_target)) + self._autonomous_mode_turns = 0 # Reset autonomous mode turn counter on handoff + return + + # Handle case where no handoff was requested + if self._autonomous_mode and self._autonomous_mode_turns < self._autonomous_mode_turn_limit: + # In autonomous mode, continue running the agent until a handoff is requested + # or a termination condition is met. + # This allows the agent to perform long-running tasks without returning control + # to the coordinator or user prematurely. + self._cache.extend([ChatMessage("user", [self._autonomous_mode_prompt])]) + self._autonomous_mode_turns += 1 + await self._run_agent_and_emit(ctx) + else: + # The response is handled via `handle_response` + self._autonomous_mode_turns = 0 # Reset autonomous mode turn counter on handoff + await ctx.request_info(HandoffAgentUserRequest(response), list[ChatMessage]) + + @response_handler + async def handle_response( + self, + original_request: HandoffAgentUserRequest, + response: list[ChatMessage], + ctx: WorkflowContext[AgentExecutorResponse, AgentResponse], + ) -> None: + """Handle user response for a request that is issued after agent runs. + + The request only occurs when the agent did not request a handoff and + autonomous mode is disabled. + + Note that this is different that the `handle_user_input_response` method + in the base AgentExecutor, which handles function approval responses. + + Args: + original_request: The original HandoffAgentUserRequest issued to the user + response: The user's response messages + ctx: The workflow context + + If the response is empty, it indicates termination of the handoff workflow. + """ + if not response: + await cast(WorkflowContext[Never, list[ChatMessage]], ctx).yield_output(self._full_conversation) + return + + # Broadcast the user response to all other agents + await self._broadcast_messages(response, cast(WorkflowContext[AgentExecutorRequest], ctx)) + + # Append the user response messages to the cache + self._cache.extend(response) + await self._run_agent_and_emit(ctx) + + async def _broadcast_messages( + self, + messages: list[ChatMessage], + ctx: WorkflowContext[AgentExecutorRequest], + ) -> None: + """Broadcast the workflow cache to the agent before running.""" + agent_executor_request = AgentExecutorRequest( + messages=messages, + should_respond=False, # Other agents do not need to respond yet + ) + # Since all agents are connected via fan-out, we can directly send the message + await ctx.send_message(agent_executor_request) + + def _is_handoff_requested(self, response: AgentResponse) -> str | None: + """Determine if the agent response includes a handoff request. + + If a handoff tool is invoked, the middleware will short-circuit execution + and provide a synthetic result that includes the target agent ID. The message + that contains the function result will be the last message in the response. + """ + if not response.messages: + return None + + last_message = response.messages[-1] + for content in last_message.contents: + if content.type == "function_result": + # Use string comparison instead of isinstance to improve performance + if content.result and isinstance(content.result, dict): + handoff_target = content.result.get(HANDOFF_FUNCTION_RESULT_KEY) # type: ignore + if isinstance(handoff_target, str): + return handoff_target + else: + continue + + return None + + async def _check_terminate_and_yield(self, ctx: WorkflowContext[Never, list[ChatMessage]]) -> bool: + """Check termination conditions and yield completion if met. + + Args: + ctx: Workflow context for yielding output + + Returns: + True if termination condition met and output yielded, False otherwise + """ + if self._termination_condition is None: + return False + + terminated = self._termination_condition(self._full_conversation) + if inspect.isawaitable(terminated): + terminated = await terminated + + if terminated: + await ctx.yield_output(self._full_conversation) + return True + + return False + + @override + async def on_checkpoint_save(self) -> dict[str, Any]: + """Serialize the executor state for checkpointing.""" + state = await super().on_checkpoint_save() + state["_autonomous_mode_turns"] = self._autonomous_mode_turns + return state + + @override + async def on_checkpoint_restore(self, state: dict[str, Any]) -> None: + """Restore the executor state from a checkpoint.""" + await super().on_checkpoint_restore(state) + if "_autonomous_mode_turns" in state: + self._autonomous_mode_turns = state["_autonomous_mode_turns"] + + +# endregion Handoff Agent Executor + +# region Handoff workflow builder + + +class HandoffBuilder: + r"""Fluent builder for conversational handoff workflows with multiple agents. + + The handoff pattern enables a group of agents to route control among themselves. + + Routing Pattern: + Agents can hand off to other agents using `.add_handoff()`. This provides a decentralized + approach to multi-agent collaboration. Handoffs can be configured using `.add_handoff`. If + none are specified, all agents can hand off to all others by default (making a mesh topology). + + Participants must be agents. Support for custom executors is not available in handoff workflows. + + Outputs: + The final conversation history as a list of ChatMessage once the group chat completes. + + Note: + Agents in handoff workflows must be ChatAgent instances and support local tool calls. + """ + + def __init__( + self, + *, + name: str | None = None, + participants: Sequence[AgentProtocol] | None = None, + participant_factories: Mapping[str, Callable[[], AgentProtocol]] | None = None, + description: str | None = None, + ) -> None: + r"""Initialize a HandoffBuilder for creating conversational handoff workflows. + + The builder starts in an unconfigured state and requires you to call: + 1. `.participants([...])` - Register agents + 2. or `.participant_factories({...})` - Register agent factories + 3. `.build()` - Construct the final Workflow + + Optional configuration methods allow you to customize context management, + termination logic, and persistence. + + Args: + name: Optional workflow identifier used in logging and debugging. + If not provided, a default name will be generated. + participants: Optional list of agents that will participate in the handoff workflow. + You can also call `.participants([...])` later. Each participant must have a + unique identifier (`.name` is preferred if set, otherwise `.id` is used). + participant_factories: Optional mapping of factory names to callables that produce agents when invoked. + This allows for lazy instantiation and state isolation per workflow instance + created by this builder. + description: Optional human-readable description explaining the workflow's + purpose. Useful for documentation and observability. + """ + self._name = name + self._description = description + + # Participant related members + self._participants: dict[str, AgentProtocol] = {} + self._participant_factories: dict[str, Callable[[], AgentProtocol]] = {} + self._start_id: str | None = None + if participant_factories: + self.register_participants(participant_factories) + + if participants: + self.participants(participants) + + # Handoff related members + self._handoff_config: dict[str, set[HandoffConfiguration]] = {} + + # Checkpoint related members + self._checkpoint_storage: CheckpointStorage | None = None + + # Autonomous mode related + self._autonomous_mode: bool = False + self._autonomous_mode_prompts: dict[str, str] = {} + self._autonomous_mode_turn_limits: dict[str, int] = {} + self._autonomous_mode_enabled_agents: list[str] = [] + + # Termination related members + self._termination_condition: Callable[[list[ChatMessage]], bool | Awaitable[bool]] | None = None + + def register_participants( + self, participant_factories: Mapping[str, Callable[[], AgentProtocol]] + ) -> "HandoffBuilder": + """Register factories that produce agents for the handoff workflow. + + Each factory is a callable that returns an AgentProtocol instance. + Factories are invoked when building the workflow, allowing for lazy instantiation + and state isolation per workflow instance. + + Args: + participant_factories: Mapping of factory names to callables that return AgentProtocol + instances. Each produced participant must have a unique identifier + (`.name` is preferred if set, otherwise `.id` is used). + + Returns: + Self for method chaining. + + Raises: + ValueError: If participant_factories is empty or `.participants(...)` or `.register_participants(...)` + has already been called. + + Example: + .. code-block:: python + + from agent_framework import ChatAgent + from agent_framework_orchestrations import HandoffBuilder + + + def create_triage() -> ChatAgent: + return ... + + + def create_refund_agent() -> ChatAgent: + return ... + + + def create_billing_agent() -> ChatAgent: + return ... + + + factories = { + "triage": create_triage, + "refund": create_refund_agent, + "billing": create_billing_agent, + } + + # Handoff will be created automatically unless specified otherwise + # The default creates a mesh topology where all agents can handoff to all others + builder = HandoffBuilder().register_participants(factories) + builder.with_start_agent("triage") + """ + if self._participants: + raise ValueError("Cannot mix .participants() and .register_participants() in the same builder instance.") + + if self._participant_factories: + raise ValueError("register_participants() has already been called on this builder instance.") + if not participant_factories: + raise ValueError("participant_factories cannot be empty") + + self._participant_factories = dict(participant_factories) + return self + + def participants(self, participants: Sequence[AgentProtocol]) -> "HandoffBuilder": + """Register the agents that will participate in the handoff workflow. + + Args: + participants: Sequence of AgentProtocol instances. Each must have a unique identifier. + (`.name` is preferred if set, otherwise `.id` is used). + + Returns: + Self for method chaining. + + Raises: + ValueError: If participants is empty, contains duplicates, or `.participants()` or + `.register_participants()` has already been called. + TypeError: If participants are not AgentProtocol instances. + + Example: + + .. code-block:: python + + from agent_framework_orchestrations import HandoffBuilder + from agent_framework.openai import OpenAIChatClient + + client = OpenAIChatClient() + triage = client.as_agent(instructions="...", name="triage_agent") + refund = client.as_agent(instructions="...", name="refund_agent") + billing = client.as_agent(instructions="...", name="billing_agent") + + builder = HandoffBuilder().participants([triage, refund, billing]) + builder.with_start_agent(triage) + """ + if self._participant_factories: + raise ValueError("Cannot mix .participants() and .register_participants() in the same builder instance.") + + if self._participants: + raise ValueError("participants have already been assigned") + + if not participants: + raise ValueError("participants cannot be empty") + + named: dict[str, AgentProtocol] = {} + for participant in participants: + if isinstance(participant, AgentProtocol): + resolved_id = self._resolve_to_id(participant) + else: + raise TypeError( + f"Participants must be AgentProtocol or Executor instances. Got {type(participant).__name__}." + ) + + if resolved_id in named: + raise ValueError(f"Duplicate participant name '{resolved_id}' detected") + named[resolved_id] = participant + + self._participants = named + + return self + + def add_handoff( + self, + source: str | AgentProtocol, + targets: Sequence[str] | Sequence[AgentProtocol], + *, + description: str | None = None, + ) -> "HandoffBuilder": + """Add handoff routing from a source agent to one or more target agents. + + This method enables agent-to-agent handoffs by configuring which agents + can hand off to which others. Call this method multiple times to build a + complete routing graph. If no handoffs are specified, all agents can hand off + to all others by default (mesh topology). + + Args: + source: The agent that can initiate the handoff. Can be: + - Factory name (str): If using participant factories + - AgentProtocol instance: The actual agent object + - Cannot mix factory names and instances across source and targets + targets: One or more target agents that the source can hand off to. Can be: + - Factory name (str): If using participant factories + - AgentProtocol instance: The actual agent object + - Single target: ["billing_agent"] or [agent_instance] + - Multiple targets: ["billing_agent", "support_agent"] or [agent1, agent2] + - Cannot mix factory names and instances across source and targets + description: Optional custom description for the handoff. If not provided, the description + of the target agent(s) will be used. If the target agent has no description, + no description will be set for the handoff tool, which is not recommended. + If multiple targets are provided, description will be shared among all handoff + tools. To configure distinct descriptions for multiple targets, call add_handoff() + separately for each target. + + Returns: + Self for method chaining. + + Raises: + ValueError: 1) If source or targets are not in the participants list, or if + participants(...) hasn't been called yet. + 2) If source or targets are factory names (str) but participant_factories(...) + hasn't been called yet, or if they are not in the participant_factories list. + TypeError: If mixing factory names (str) and AgentProtocol/Executor instances + + Examples: + Single target (using factory name): + + .. code-block:: python + + builder.add_handoff("triage_agent", "billing_agent") + + Multiple targets (using factory names): + + .. code-block:: python + + builder.add_handoff("triage_agent", ["billing_agent", "support_agent", "escalation_agent"]) + + Multiple targets (using agent instances): + + .. code-block:: python + + builder.add_handoff(triage, [billing, support, escalation]) + + Chain multiple configurations: + + .. code-block:: python + + workflow = ( + HandoffBuilder(participants=[triage, replacement, delivery, billing]) + .add_handoff(triage, [replacement, delivery, billing]) + .add_handoff(replacement, [delivery, billing]) + .add_handoff(delivery, [billing]) + .build() + ) + + Note: + - Handoff tools are automatically registered for each source agent + - If a source agent is configured multiple times via add_handoff, targets are merged + """ + if isinstance(source, str) and all(isinstance(t, str) for t in targets): + # Both source and targets are factory names + if not self._participant_factories: + raise ValueError("Call participant_factories(...) before add_handoff(...)") + + if source not in self._participant_factories: + raise ValueError(f"Source factory name '{source}' is not in the participant_factories list") + + for target in targets: + if target not in self._participant_factories: + raise ValueError(f"Target factory name '{target}' is not in the participant_factories list") + + # Merge with existing handoff configuration for this source + if source in self._handoff_config: + # Add new targets to existing list, avoiding duplicates + for t in targets: + if t in self._handoff_config[source]: + logger.warning(f"Handoff from '{source}' to '{t}' is already configured; overwriting.") + self._handoff_config[source].add(HandoffConfiguration(target=t, description=description)) + else: + self._handoff_config[source] = set() + for t in targets: + self._handoff_config[source].add(HandoffConfiguration(target=t, description=description)) + return self + + if isinstance(source, (AgentProtocol)) and all(isinstance(t, AgentProtocol) for t in targets): + # Both source and targets are instances + if not self._participants: + raise ValueError("Call participants(...) before add_handoff(...)") + + # Resolve source agent ID + source_id = self._resolve_to_id(source) + if source_id not in self._participants: + raise ValueError(f"Source agent '{source}' is not in the participants list") + + # Resolve all target IDs + target_ids: list[str] = [] + for target in targets: + target_id = self._resolve_to_id(target) + if target_id not in self._participants: + raise ValueError(f"Target agent '{target}' is not in the participants list") + target_ids.append(target_id) + + # Merge with existing handoff configuration for this source + if source_id in self._handoff_config: + # Add new targets to existing list, avoiding duplicates + for t in target_ids: + if t in self._handoff_config[source_id]: + logger.warning(f"Handoff from '{source_id}' to '{t}' is already configured; overwriting.") + self._handoff_config[source_id].add(HandoffConfiguration(target=t, description=description)) + else: + self._handoff_config[source_id] = set() + for t in target_ids: + self._handoff_config[source_id].add(HandoffConfiguration(target=t, description=description)) + + return self + + raise TypeError( + "Cannot mix factory names (str) and AgentProtocol instances across source and targets in add_handoff()" + ) + + def with_start_agent(self, agent: str | AgentProtocol) -> "HandoffBuilder": + """Set the agent that will initiate the handoff workflow. + + If not specified, the first registered participant will be used as the starting agent. + + Args: + agent: The agent that will start the workflow. Can be: + - Factory name (str): If using participant factories + - AgentProtocol instance: The actual agent object + Returns: + Self for method chaining. + """ + if isinstance(agent, str): + if self._participant_factories: + if agent not in self._participant_factories: + raise ValueError(f"Start agent factory name '{agent}' is not in the participant_factories list") + else: + raise ValueError("Call register_participants(...) before with_start_agent(...)") + self._start_id = agent + elif isinstance(agent, AgentProtocol): + resolved_id = self._resolve_to_id(agent) + if self._participants: + if resolved_id not in self._participants: + raise ValueError(f"Start agent '{resolved_id}' is not in the participants list") + else: + raise ValueError("Call participants(...) before with_start_agent(...)") + self._start_id = resolved_id + else: + raise TypeError("Start agent must be a factory name (str) or an AgentProtocol instance") + + return self + + def with_autonomous_mode( + self, + *, + agents: Sequence[AgentProtocol] | Sequence[str] | None = None, + prompts: dict[str, str] | None = None, + turn_limits: dict[str, int] | None = None, + ) -> "HandoffBuilder": + """Enable autonomous mode for the handoff workflow. + + Autonomous mode allows agents to continue responding without user input. + The default behavior when autonomous mode is disabled is to return control to the user + after each agent response that does not trigger a handoff. With autonomous mode enabled, + agents can continue the conversation until they request a handoff or the turn limit is reached. + + Args: + agents: Optional list of agents to enable autonomous mode for. Can be: + - Factory names (str): If using participant factories + - AgentProtocol instances: The actual agent objects + - If not provided, all agents will operate in autonomous mode. + prompts: Optional mapping of agent identifiers/factory names to custom prompts to use when continuing + in autonomous mode. If not provided, a default prompt will be used. + turn_limits: Optional mapping of agent identifiers/factory names to maximum number of autonomous turns + before returning control to the user. If not provided, a default turn limit will be used. + """ + self._autonomous_mode = True + self._autonomous_mode_prompts = prompts or {} + self._autonomous_mode_turn_limits = turn_limits or {} + self._autonomous_mode_enabled_agents = [self._resolve_to_id(agent) for agent in agents] if agents else [] + + return self + + def with_checkpointing(self, checkpoint_storage: CheckpointStorage) -> "HandoffBuilder": + """Enable workflow state persistence for resumable conversations. + + Checkpointing allows the workflow to save its state at key points, enabling you to: + - Resume conversations after application restarts + - Implement long-running support tickets that span multiple sessions + - Recover from failures without losing conversation context + - Audit and replay conversation history + + Args: + checkpoint_storage: Storage backend implementing CheckpointStorage interface. + Common implementations: InMemoryCheckpointStorage (testing), + database-backed storage (production). + + Returns: + Self for method chaining. + + Example (In-Memory): + + .. code-block:: python + + from agent_framework import InMemoryCheckpointStorage + + storage = InMemoryCheckpointStorage() + workflow = HandoffBuilder(participants=[triage, refund, billing]).with_checkpointing(storage).build() + + # Run workflow with a session ID for resumption + async for event in workflow.run_stream("Help me", session_id="user_123"): + # Process events... + pass + + # Later, resume the same conversation + async for event in workflow.run_stream("I need a refund", session_id="user_123"): + # Conversation continues from where it left off + pass + + Use Cases: + - Customer support systems with persistent ticket history + - Multi-day conversations that need to survive server restarts + - Compliance requirements for conversation auditing + - A/B testing different agent configurations on same conversation + + Note: + Checkpointing adds overhead for serialization and storage I/O. Use it when + persistence is required, not for simple stateless request-response patterns. + """ + self._checkpoint_storage = checkpoint_storage + return self + + def with_termination_condition(self, termination_condition: TerminationCondition) -> "HandoffBuilder": + """Set a custom termination condition for the handoff workflow. + + The condition can be either synchronous or asynchronous. + + Args: + termination_condition: Function that receives the full conversation and returns True + (or awaitable True) if the workflow should terminate. + + Returns: + Self for chaining. + + Example: + + .. code-block:: python + + # Synchronous condition + builder.with_termination_condition( + lambda conv: len(conv) > 20 or any("goodbye" in msg.text.lower() for msg in conv[-2:]) + ) + + + # Asynchronous condition + async def check_termination(conv: list[ChatMessage]) -> bool: + # Can perform async operations + return len(conv) > 20 + + + builder.with_termination_condition(check_termination) + """ + self._termination_condition = termination_condition + return self + + def build(self) -> Workflow: + """Construct the final Workflow instance from the configured builder. + + This method validates the configuration and assembles all internal components: + - Starting agent executor + - Specialist agent executors + - Request/response handling + + Returns: + A fully configured Workflow ready to execute via `.run()` or `.run_stream()`. + + Raises: + ValueError: If participants or coordinator were not configured, or if + required configuration is invalid. + """ + # Resolve agents (either from instances or factories) + # The returned map keys are either executor IDs or factory names, which is need to resolve handoff configs + resolved_agents = self._resolve_agents() + # Resolve handoff configurations to use agent display names + # The returned map keys are executor IDs + resolved_handoffs = self._resolve_handoffs(resolved_agents) + # Resolve agents into executors + executors = self._resolve_executors(resolved_agents, resolved_handoffs) + + # Build the workflow graph + if self._start_id is None: + raise ValueError("Must call with_start_agent(...) before building the workflow.") + start_executor = executors[self._resolve_to_id(resolved_agents[self._start_id])] + builder = WorkflowBuilder( + name=self._name, + description=self._description, + ).set_start_executor(start_executor) + + # Add the appropriate edges + # In handoff workflows, all executors are connected, making a fully connected graph. + # This is because for all agents to stay synchronized, the active agent must be able to + # broadcast updates to all others via edges. Handoffs are controlled internally by the + # `HandoffAgentExecutor` instances using handoff tools and middleware. + for executor in executors.values(): + targets = [e for e in executors.values() if e.id != executor.id] + # Fan-out requires at least 2 targets. Just in case there are only 2 agents total, + # we add a direct edge if there's only 1 target. + if len(targets) > 1: + builder = builder.add_fan_out_edges(executor, targets) + elif len(targets) == 1: + builder = builder.add_edge(executor, targets[0]) + + # Configure checkpointing if enabled + if self._checkpoint_storage: + builder.with_checkpointing(self._checkpoint_storage) + + return builder.build() + + # region Internal Helper Methods + + def _resolve_agents(self) -> dict[str, AgentProtocol]: + """Resolve participant factories into agent instances. + + If agent instances were provided directly via participants(...), those are + returned as-is. If participant factories were provided via participant_factories(...), + those are invoked to create the agent instances. + + Returns: + Map of executor IDs or factory names to `AgentProtocol` instances + """ + if not self._participants and not self._participant_factories: + raise ValueError("No participants provided. Call .participants() or .register_participants() first.") + # We don't need to check if both are set since that is handled in the respective methods + + if self._participants: + return self._participants + + if self._participant_factories: + # Invoke each factory to create participant instances + factory_names_to_agents: dict[str, AgentProtocol] = {} + for factory_name, factory in self._participant_factories.items(): + instance = factory() + if isinstance(instance, AgentProtocol): + resolved_id = self._resolve_to_id(instance) + else: + raise TypeError(f"Participants must be AgentProtocol instances. Got {type(instance).__name__}.") + + if resolved_id in factory_names_to_agents: + raise ValueError(f"Duplicate participant name '{resolved_id}' detected") + + # Map executors by factory name (not executor.id) because handoff configs reference factory names + # This allows users to configure handoffs using the factory names they provided + factory_names_to_agents[factory_name] = instance + + return factory_names_to_agents + + raise ValueError("No executors or participant_factories have been configured") + + def _resolve_handoffs(self, agents: Mapping[str, AgentProtocol]) -> dict[str, list[HandoffConfiguration]]: + """Handoffs may be specified using factory names or instances; resolve to executor IDs. + + Args: + agents: Map of agent IDs or factory names to `AgentProtocol` instances + + Returns: + Map of executor IDs to list of HandoffConfiguration instances + """ + # Updated map that used agent resolved IDs as keys + updated_handoff_configurations: dict[str, list[HandoffConfiguration]] = {} + if self._handoff_config: + # Use explicit handoff configuration from add_handoff() calls + for source_id, handoff_configurations in self._handoff_config.items(): + source_agent = agents.get(source_id) + if not source_agent: + raise ValueError( + f"Handoff source agent '{source_id}' not found. " + "Please make sure source has been added as either a participant or participant_factory." + ) + for handoff_config in handoff_configurations: + target_agent = agents.get(handoff_config.target_id) + if not target_agent: + raise ValueError( + f"Handoff target agent '{handoff_config.target_id}' not found for source '{source_id}'. " + "Please make sure target has been added as either a participant or participant_factory." + ) + + updated_handoff_configurations.setdefault(self._resolve_to_id(source_agent), []).append( + HandoffConfiguration( + target=self._resolve_to_id(target_agent), + description=handoff_config.description or target_agent.description, + ) + ) + else: + # Use default handoff configuration: all agents can hand off to all others (mesh topology) + for source_id, source_agent in agents.items(): + for target_id, target_agent in agents.items(): + if source_id == target_id: + continue # Skip self-handoff + updated_handoff_configurations.setdefault(self._resolve_to_id(source_agent), []).append( + HandoffConfiguration( + target=self._resolve_to_id(target_agent), + description=target_agent.description, + ) + ) + + return updated_handoff_configurations + + def _resolve_executors( + self, + agents: dict[str, AgentProtocol], + handoffs: dict[str, list[HandoffConfiguration]], + ) -> dict[str, HandoffAgentExecutor]: + """Resolve agents into HandoffAgentExecutors. + + Args: + agents: Map of agent IDs or factory names to `AgentProtocol` instances + handoffs: Map of executor IDs to list of HandoffConfiguration instances + + Returns: + Tuple of (starting executor ID, list of HandoffAgentExecutor instances) + """ + executors: dict[str, HandoffAgentExecutor] = {} + + for id, agent in agents.items(): + # Note that here `id` may be either factory name or agent resolved ID + resolved_id = self._resolve_to_id(agent) + if resolved_id not in handoffs or not handoffs.get(resolved_id): + logger.warning( + f"No handoff configuration found for agent '{resolved_id}'. " + "This agent will not be able to hand off to any other agents and your workflow may get stuck." + ) + + # Autonomous mode is enabled only for specified agents (or all if none specified) + autonomous_mode = self._autonomous_mode and ( + not self._autonomous_mode_enabled_agents or id in self._autonomous_mode_enabled_agents + ) + + executors[resolved_id] = HandoffAgentExecutor( + agent=agent, + handoffs=handoffs.get(resolved_id, []), + is_start_agent=(id == self._start_id), + termination_condition=self._termination_condition, + autonomous_mode=autonomous_mode, + autonomous_mode_prompt=self._autonomous_mode_prompts.get(id, None), + autonomous_mode_turn_limit=self._autonomous_mode_turn_limits.get(id, None), + ) + + return executors + + def _resolve_to_id(self, candidate: str | AgentProtocol) -> str: + """Resolve a participant reference into a concrete executor identifier.""" + if isinstance(candidate, AgentProtocol): + return resolve_agent_id(candidate) + if isinstance(candidate, str): + return candidate + + raise TypeError(f"Invalid starting agent reference: {type(candidate).__name__}") + + # endregion Internal Helper Methods + + +# endregion Handoff workflow builder diff --git a/python/packages/orchestrations/agent_framework_orchestrations/_magentic.py b/python/packages/orchestrations/agent_framework_orchestrations/_magentic.py new file mode 100644 index 0000000000..cb180d4d43 --- /dev/null +++ b/python/packages/orchestrations/agent_framework_orchestrations/_magentic.py @@ -0,0 +1,1981 @@ +# Copyright (c) Microsoft. All rights reserved. + +import asyncio +import contextlib +import json +import logging +import re +import sys +from abc import ABC, abstractmethod +from collections.abc import Callable, Sequence +from dataclasses import dataclass, field +from enum import Enum +from typing import Any, ClassVar, TypeVar, cast, overload + +from agent_framework import ( + AgentProtocol, + AgentResponse, + ChatMessage, +) +from agent_framework._workflows._agent_executor import AgentExecutor, AgentExecutorRequest, AgentExecutorResponse +from agent_framework._workflows._base_group_chat_orchestrator import ( + BaseGroupChatOrchestrator, + GroupChatParticipantMessage, + GroupChatRequestMessage, + GroupChatResponseMessage, + GroupChatWorkflowContext_T_Out, + ParticipantRegistry, +) +from agent_framework._workflows._checkpoint import CheckpointStorage +from agent_framework._workflows._events import ExecutorEvent +from agent_framework._workflows._executor import Executor, handler +from agent_framework._workflows._model_utils import DictConvertible, encode_value +from agent_framework._workflows._request_info_mixin import response_handler +from agent_framework._workflows._workflow import Workflow +from agent_framework._workflows._workflow_builder import WorkflowBuilder +from agent_framework._workflows._workflow_context import WorkflowContext +from typing_extensions import Never + +if sys.version_info >= (3, 12): + from typing import override # type: ignore # pragma: no cover +else: + from typing_extensions import override # type: ignore # pragma: no cover +if sys.version_info >= (3, 11): + from typing import Self # type: ignore # pragma: no cover +else: + from typing_extensions import Self # type: ignore # pragma: no cover + + +logger = logging.getLogger(__name__) + +# Consistent author name for messages produced by the Magentic manager/orchestrator +MAGENTIC_MANAGER_NAME = "magentic_manager" + +# Optional kinds for generic orchestrator message callback +ORCH_MSG_KIND_USER_TASK = "user_task" +ORCH_MSG_KIND_TASK_LEDGER = "task_ledger" +# Newly surfaced kinds for unified callback consumers +ORCH_MSG_KIND_INSTRUCTION = "instruction" +ORCH_MSG_KIND_NOTICE = "notice" + + +def _message_to_payload(message: ChatMessage) -> Any: + if hasattr(message, "to_dict") and callable(getattr(message, "to_dict", None)): + with contextlib.suppress(Exception): + return message.to_dict() # type: ignore[attr-defined] + if hasattr(message, "to_json") and callable(getattr(message, "to_json", None)): + with contextlib.suppress(Exception): + json_payload = message.to_json() # type: ignore[attr-defined] + if isinstance(json_payload, str): + with contextlib.suppress(Exception): + return json.loads(json_payload) + return json_payload + if hasattr(message, "__dict__"): + return encode_value(message.__dict__) + return message + + +def _message_from_payload(payload: Any) -> ChatMessage: + if isinstance(payload, ChatMessage): + return payload + if hasattr(ChatMessage, "from_dict") and isinstance(payload, dict): + with contextlib.suppress(Exception): + return ChatMessage.from_dict(payload) # type: ignore[attr-defined,no-any-return] + if hasattr(ChatMessage, "from_json") and isinstance(payload, str): + with contextlib.suppress(Exception): + return ChatMessage.from_json(payload) # type: ignore[attr-defined,no-any-return] + if isinstance(payload, dict): + with contextlib.suppress(Exception): + return ChatMessage(**payload) # type: ignore[arg-type] + if isinstance(payload, str): + with contextlib.suppress(Exception): + decoded = json.loads(payload) + if isinstance(decoded, dict): + return _message_from_payload(decoded) + raise TypeError("Unable to reconstruct ChatMessage from payload") + + +# region Magentic One Prompts + +ORCHESTRATOR_TASK_LEDGER_FACTS_PROMPT = """Below I will present you a request. + +Before we begin addressing the request, please answer the following pre-survey to the best of your ability. +Keep in mind that you are Ken Jennings-level with trivia, and Mensa-level with puzzles, so there should be +a deep well to draw from. + +Here is the request: + +{task} + +Here is the pre-survey: + + 1. Please list any specific facts or figures that are GIVEN in the request itself. It is possible that + there are none. + 2. Please list any facts that may need to be looked up, and WHERE SPECIFICALLY they might be found. + In some cases, authoritative sources are mentioned in the request itself. + 3. Please list any facts that may need to be derived (e.g., via logical deduction, simulation, or computation) + 4. Please list any facts that are recalled from memory, hunches, well-reasoned guesses, etc. + +When answering this survey, keep in mind that "facts" will typically be specific names, dates, statistics, etc. +Your answer should use headings: + + 1. GIVEN OR VERIFIED FACTS + 2. FACTS TO LOOK UP + 3. FACTS TO DERIVE + 4. EDUCATED GUESSES + +DO NOT include any other headings or sections in your response. DO NOT list next steps or plans until asked to do so. +""" + +ORCHESTRATOR_TASK_LEDGER_PLAN_PROMPT = """Fantastic. To address this request we have assembled the following team: + +{team} + +Based on the team composition, and known and unknown facts, please devise a short bullet-point plan for addressing the +original request. Remember, there is no requirement to involve all team members. A team member's particular expertise +may not be needed for this task. +""" + +# Added to render the ledger in a single assistant message, mirroring the original behavior. +ORCHESTRATOR_TASK_LEDGER_FULL_PROMPT = """ +We are working to address the following user request: + +{task} + + +To answer this request we have assembled the following team: + +{team} + + +Here is an initial fact sheet to consider: + +{facts} + + +Here is the plan to follow as best as possible: + +{plan} +""" + +ORCHESTRATOR_TASK_LEDGER_FACTS_UPDATE_PROMPT = """As a reminder, we are working to solve the following task: + +{task} + +It is clear we are not making as much progress as we would like, but we may have learned something new. +Please rewrite the following fact sheet, updating it to include anything new we have learned that may be helpful. + +Example edits can include (but are not limited to) adding new guesses, moving educated guesses to verified facts +if appropriate, etc. Updates may be made to any section of the fact sheet, and more than one section of the fact +sheet can be edited. This is an especially good time to update educated guesses, so please at least add or update +one educated guess or hunch, and explain your reasoning. + +Here is the old fact sheet: + +{old_facts} +""" + +ORCHESTRATOR_TASK_LEDGER_PLAN_UPDATE_PROMPT = """Please briefly explain what went wrong on this last run +(the root cause of the failure), and then come up with a new plan that takes steps and includes hints to overcome prior +challenges and especially avoids repeating the same mistakes. As before, the new plan should be concise, expressed in +bullet-point form, and consider the following team composition: + +{team} +""" + +ORCHESTRATOR_PROGRESS_LEDGER_PROMPT = """ +Recall we are working on the following request: + +{task} + +And we have assembled the following team: + +{team} + +To make progress on the request, please answer the following questions, including necessary reasoning: + + - Is the request fully satisfied? (True if complete, or False if the original request has yet to be + SUCCESSFULLY and FULLY addressed) + - Are we in a loop where we are repeating the same requests and or getting the same responses as before? + Loops can span multiple turns, and can include repeated actions like scrolling up or down more than a + handful of times. + - Are we making forward progress? (True if just starting, or recent messages are adding value. False if recent + messages show evidence of being stuck in a loop or if there is evidence of significant barriers to success + such as the inability to read from a required file) + - Who should speak next? (select from: {names}) + - What instruction or question would you give this team member? (Phrase as if speaking directly to them, and + include any specific information they may need) + +Please output an answer in pure JSON format according to the following schema. The JSON object must be parsable as-is. +DO NOT OUTPUT ANYTHING OTHER THAN JSON, AND DO NOT DEVIATE FROM THIS SCHEMA: + +{{ + "is_request_satisfied": {{ + + "reason": string, + "answer": boolean + }}, + "is_in_loop": {{ + "reason": string, + "answer": boolean + }}, + "is_progress_being_made": {{ + "reason": string, + "answer": boolean + }}, + "next_speaker": {{ + "reason": string, + "answer": string (select from: {names}) + }}, + "instruction_or_question": {{ + "reason": string, + "answer": string + }} +}} +""" + +ORCHESTRATOR_FINAL_ANSWER_PROMPT = """ +We are working on the following task: +{task} + +We have completed the task. + +The above messages contain the conversation that took place to complete the task. + +Based on the information gathered, provide the final answer to the original request. +The answer should be phrased as if you were speaking to the user. +""" + + +# region Messages and Types + + +def _new_chat_history() -> list[ChatMessage]: + """Typed default factory for chat history list to satisfy type checkers.""" + return [] + + +def _new_participant_descriptions() -> dict[str, str]: + """Typed default factory for participant descriptions dict to satisfy type checkers.""" + return {} + + +@dataclass +class _MagenticTaskLedger(DictConvertible): + """Internal: Task ledger for the Standard Magentic manager.""" + + facts: ChatMessage + plan: ChatMessage + + def to_dict(self) -> dict[str, Any]: + return {"facts": _message_to_payload(self.facts), "plan": _message_to_payload(self.plan)} + + @classmethod + def from_dict(cls, data: dict[str, Any]) -> "_MagenticTaskLedger": + return cls( + facts=_message_from_payload(data.get("facts")), + plan=_message_from_payload(data.get("plan")), + ) + + +@dataclass +class MagenticProgressLedgerItem(DictConvertible): + """Internal: A progress ledger item.""" + + reason: str + answer: str | bool + + def to_dict(self) -> dict[str, Any]: + return {"reason": self.reason, "answer": self.answer} + + @classmethod + def from_dict(cls, data: dict[str, Any]) -> "MagenticProgressLedgerItem": + answer_value = data.get("answer") + if not isinstance(answer_value, (str, bool)): + answer_value = "" # Default to empty string if not str or bool + return cls(reason=data.get("reason", ""), answer=answer_value) + + +@dataclass +class MagenticProgressLedger(DictConvertible): + """Internal: A progress ledger for tracking workflow progress.""" + + is_request_satisfied: MagenticProgressLedgerItem + is_in_loop: MagenticProgressLedgerItem + is_progress_being_made: MagenticProgressLedgerItem + next_speaker: MagenticProgressLedgerItem + instruction_or_question: MagenticProgressLedgerItem + + def to_dict(self) -> dict[str, Any]: + return { + "is_request_satisfied": self.is_request_satisfied.to_dict(), + "is_in_loop": self.is_in_loop.to_dict(), + "is_progress_being_made": self.is_progress_being_made.to_dict(), + "next_speaker": self.next_speaker.to_dict(), + "instruction_or_question": self.instruction_or_question.to_dict(), + } + + @classmethod + def from_dict(cls, data: dict[str, Any]) -> "MagenticProgressLedger": + return cls( + is_request_satisfied=MagenticProgressLedgerItem.from_dict(data.get("is_request_satisfied", {})), + is_in_loop=MagenticProgressLedgerItem.from_dict(data.get("is_in_loop", {})), + is_progress_being_made=MagenticProgressLedgerItem.from_dict(data.get("is_progress_being_made", {})), + next_speaker=MagenticProgressLedgerItem.from_dict(data.get("next_speaker", {})), + instruction_or_question=MagenticProgressLedgerItem.from_dict(data.get("instruction_or_question", {})), + ) + + +@dataclass +class MagenticContext(DictConvertible): + """Context for the Magentic manager.""" + + task: str + chat_history: list[ChatMessage] = field(default_factory=_new_chat_history) + participant_descriptions: dict[str, str] = field(default_factory=_new_participant_descriptions) + round_count: int = 0 + stall_count: int = 0 + reset_count: int = 0 + + def to_dict(self) -> dict[str, Any]: + return { + "task": self.task, + "chat_history": [_message_to_payload(msg) for msg in self.chat_history], + "participant_descriptions": dict(self.participant_descriptions), + "round_count": self.round_count, + "stall_count": self.stall_count, + "reset_count": self.reset_count, + } + + @classmethod + def from_dict(cls, data: dict[str, Any]) -> "MagenticContext": + # Validate required fields + # `task` is required + task = data.get("task") + if task is None or not isinstance(task, str): + raise ValueError("MagenticContext requires a 'task' string field.") + # `chat_history` is required + chat_history_payload = data.get("chat_history", []) + history: list[ChatMessage] = [] + for item in chat_history_payload: + history.append(_message_from_payload(item)) + # `participant_descriptions` is required + participant_descriptions = data.get("participant_descriptions") + if not isinstance(participant_descriptions, dict) or not participant_descriptions: + raise ValueError("MagenticContext requires a 'participant_descriptions' dictionary field.") + if not all(isinstance(k, str) and isinstance(v, str) for k, v in participant_descriptions.items()): # type: ignore + raise ValueError("MagenticContext 'participant_descriptions' must be a dict of str to str.") + + return cls( + task=task, + chat_history=history, + participant_descriptions=participant_descriptions, # type: ignore + round_count=data.get("round_count", 0), + stall_count=data.get("stall_count", 0), + reset_count=data.get("reset_count", 0), + ) + + def reset(self) -> None: + """Reset the context. + + This will clear the chat history and reset the stall count. + This will not reset the task, round count, or participant descriptions. + """ + self.chat_history.clear() + self.stall_count = 0 + self.reset_count += 1 + + +# endregion Messages and Types + +# region Utilities + + +def _team_block(participants: dict[str, str]) -> str: + """Render participant descriptions as a readable block.""" + return "\n".join(f"- {name}: {desc}" for name, desc in participants.items()) + + +def _extract_json(text: str) -> dict[str, Any]: + """Potentially temp helper method. + + Note: this method is required right now because the ChatClientProtocol, when calling + response.text, returns duplicate JSON payloads - need to figure out why. + + The `text` method is concatenating multiple text contents from diff msgs into a single string. + """ + fence = re.search(r"```(?:json)?\s*(\{[\s\S]*?\})\s*```", text, flags=re.IGNORECASE) + if fence: + candidate = fence.group(1) + else: + # Find first balanced JSON object + start = text.find("{") + if start == -1: + raise ValueError("No JSON object found.") + depth = 0 + end = None + for i, ch in enumerate(text[start:], start=start): + if ch == "{": + depth += 1 + elif ch == "}": + depth -= 1 + if depth == 0: + end = i + 1 + break + if end is None: + raise ValueError("Unbalanced JSON braces.") + candidate = text[start:end] + + for attempt in (candidate, candidate.replace("True", "true").replace("False", "false").replace("None", "null")): + with contextlib.suppress(Exception): + val = json.loads(attempt) + if isinstance(val, dict): + return cast(dict[str, Any], val) + + with contextlib.suppress(Exception): + import ast + + obj = ast.literal_eval(candidate) + if isinstance(obj, dict): + return cast(dict[str, Any], obj) + + raise ValueError("Unable to parse JSON from model output.") + + +T = TypeVar("T") + + +def _coerce_model(model_cls: type[T], data: dict[str, Any]) -> T: + # Use type: ignore to suppress mypy errors for dynamic attribute access + # We check with hasattr() first, so this is safe + if hasattr(model_cls, "from_dict") and callable(model_cls.from_dict): # type: ignore[attr-defined] + return model_cls.from_dict(data) # type: ignore[attr-defined,return-value,no-any-return] + return model_cls(**data) # type: ignore[arg-type,call-arg] + + +# endregion Utilities + +# region Magentic Manager + + +class MagenticManagerBase(ABC): + """Base class for the Magentic One manager.""" + + def __init__( + self, + *, + max_stall_count: int = 3, + max_reset_count: int | None = None, + max_round_count: int | None = None, + ) -> None: + self.max_stall_count = max_stall_count + self.max_reset_count = max_reset_count + self.max_round_count = max_round_count + # Base prompt surface for type safety; concrete managers may override with a str field. + self.task_ledger_full_prompt: str = ORCHESTRATOR_TASK_LEDGER_FULL_PROMPT + + @abstractmethod + async def plan(self, magentic_context: MagenticContext) -> ChatMessage: + """Create a plan for the task.""" + ... + + @abstractmethod + async def replan(self, magentic_context: MagenticContext) -> ChatMessage: + """Replan for the task.""" + ... + + @abstractmethod + async def create_progress_ledger(self, magentic_context: MagenticContext) -> MagenticProgressLedger: + """Create a progress ledger.""" + ... + + @abstractmethod + async def prepare_final_answer(self, magentic_context: MagenticContext) -> ChatMessage: + """Prepare the final answer.""" + ... + + def on_checkpoint_save(self) -> dict[str, Any]: + """Serialize runtime state for checkpointing.""" + return {} + + def on_checkpoint_restore(self, state: dict[str, Any]) -> None: + """Restore runtime state from checkpoint data.""" + return + + +class StandardMagenticManager(MagenticManagerBase): + """Standard Magentic manager that performs real LLM calls via a ChatAgent. + + The manager constructs prompts that mirror the original Magentic One orchestration: + - Facts gathering + - Plan creation + - Progress ledger in JSON + - Facts update and plan update on reset + - Final answer synthesis + """ + + task_ledger: _MagenticTaskLedger | None + + MANAGER_NAME: ClassVar[str] = "StandardMagenticManager" + + def __init__( + self, + agent: AgentProtocol, + task_ledger: _MagenticTaskLedger | None = None, + *, + task_ledger_facts_prompt: str | None = None, + task_ledger_plan_prompt: str | None = None, + task_ledger_full_prompt: str | None = None, + task_ledger_facts_update_prompt: str | None = None, + task_ledger_plan_update_prompt: str | None = None, + progress_ledger_prompt: str | None = None, + final_answer_prompt: str | None = None, + max_stall_count: int = 3, + max_reset_count: int | None = None, + max_round_count: int | None = None, + progress_ledger_retry_count: int | None = None, + ) -> None: + """Initialize the Standard Magentic Manager. + + Args: + agent: An agent instance to use for LLM calls. The agent's configured + options (temperature, seed, instructions, etc.) will be applied. + task_ledger: Optional task ledger for managing task state. + + Keyword Args: + task_ledger_facts_prompt: Optional prompt for the task ledger facts. + task_ledger_plan_prompt: Optional prompt for the task ledger plan. + task_ledger_full_prompt: Optional prompt for the full task ledger. + task_ledger_facts_update_prompt: Optional prompt for updating task ledger facts. + task_ledger_plan_update_prompt: Optional prompt for updating task ledger plan. + progress_ledger_prompt: Optional prompt for the progress ledger. + final_answer_prompt: Optional prompt for the final answer. + max_stall_count: Maximum number of stalls allowed. + max_reset_count: Maximum number of resets allowed. + max_round_count: Maximum number of rounds allowed. + progress_ledger_retry_count: Maximum number of retries for the progress ledger. + """ + super().__init__( + max_stall_count=max_stall_count, + max_reset_count=max_reset_count, + max_round_count=max_round_count, + ) + + self._agent: AgentProtocol = agent + self.task_ledger: _MagenticTaskLedger | None = task_ledger + + # Prompts may be overridden if needed + self.task_ledger_facts_prompt: str = task_ledger_facts_prompt or ORCHESTRATOR_TASK_LEDGER_FACTS_PROMPT + self.task_ledger_plan_prompt: str = task_ledger_plan_prompt or ORCHESTRATOR_TASK_LEDGER_PLAN_PROMPT + self.task_ledger_full_prompt = task_ledger_full_prompt or ORCHESTRATOR_TASK_LEDGER_FULL_PROMPT + self.task_ledger_facts_update_prompt: str = ( + task_ledger_facts_update_prompt or ORCHESTRATOR_TASK_LEDGER_FACTS_UPDATE_PROMPT + ) + self.task_ledger_plan_update_prompt: str = ( + task_ledger_plan_update_prompt or ORCHESTRATOR_TASK_LEDGER_PLAN_UPDATE_PROMPT + ) + self.progress_ledger_prompt: str = progress_ledger_prompt or ORCHESTRATOR_PROGRESS_LEDGER_PROMPT + self.final_answer_prompt: str = final_answer_prompt or ORCHESTRATOR_FINAL_ANSWER_PROMPT + + self.progress_ledger_retry_count: int = ( + progress_ledger_retry_count if progress_ledger_retry_count is not None else 3 + ) + + async def _complete( + self, + messages: list[ChatMessage], + ) -> ChatMessage: + """Call the underlying agent and return the last assistant message. + + The agent's run method is called which applies the agent's configured options + (temperature, seed, instructions, etc.). + """ + response: AgentResponse = await self._agent.run(messages) + if not response.messages: + raise RuntimeError("Agent returned no messages in response.") + if len(response.messages) > 1: + logger.warning("Agent returned multiple messages; using the last one.") + + return response.messages[-1] + + async def plan(self, magentic_context: MagenticContext) -> ChatMessage: + """Create facts and plan using the model, then render a combined task ledger as a single assistant message.""" + team_text = _team_block(magentic_context.participant_descriptions) + + # Gather facts + facts_user = ChatMessage( + role="user", + text=self.task_ledger_facts_prompt.format(task=magentic_context.task), + ) + facts_msg = await self._complete([*magentic_context.chat_history, facts_user]) + + # Create plan + plan_user = ChatMessage( + role="user", + text=self.task_ledger_plan_prompt.format(team=team_text), + ) + plan_msg = await self._complete([*magentic_context.chat_history, facts_user, facts_msg, plan_user]) + + # Store ledger and render full combined view + self.task_ledger = _MagenticTaskLedger(facts=facts_msg, plan=plan_msg) + + # Also store individual messages in chat_history for better grounding + # This gives the progress ledger model access to the detailed reasoning + magentic_context.chat_history.extend([facts_user, facts_msg, plan_user, plan_msg]) + + combined = self.task_ledger_full_prompt.format( + task=magentic_context.task, + team=team_text, + facts=facts_msg.text, + plan=plan_msg.text, + ) + return ChatMessage("assistant", [combined], author_name=MAGENTIC_MANAGER_NAME) + + async def replan(self, magentic_context: MagenticContext) -> ChatMessage: + """Update facts and plan when stalling or looping has been detected.""" + if self.task_ledger is None: + raise RuntimeError("replan() called before plan(); call plan() once before requesting a replan.") + + team_text = _team_block(magentic_context.participant_descriptions) + + # Update facts + facts_update_user = ChatMessage( + "user", + [ + self.task_ledger_facts_update_prompt.format( + task=magentic_context.task, old_facts=self.task_ledger.facts.text + ) + ], + ) + updated_facts = await self._complete([*magentic_context.chat_history, facts_update_user]) + + # Update plan + plan_update_user = ChatMessage( + "user", + [self.task_ledger_plan_update_prompt.format(team=team_text)], + ) + updated_plan = await self._complete([ + *magentic_context.chat_history, + facts_update_user, + updated_facts, + plan_update_user, + ]) + + # Store and render + self.task_ledger = _MagenticTaskLedger(facts=updated_facts, plan=updated_plan) + + # Also store individual messages in chat_history for better grounding + # This gives the progress ledger model access to the detailed reasoning + magentic_context.chat_history.extend([facts_update_user, updated_facts, plan_update_user, updated_plan]) + + combined = self.task_ledger_full_prompt.format( + task=magentic_context.task, + team=team_text, + facts=updated_facts.text, + plan=updated_plan.text, + ) + return ChatMessage("assistant", [combined], author_name=MAGENTIC_MANAGER_NAME) + + async def create_progress_ledger(self, magentic_context: MagenticContext) -> MagenticProgressLedger: + """Use the model to produce a JSON progress ledger based on the conversation so far. + + Adds lightweight retries with backoff for transient parse issues and avoids selecting a + non-existent "unknown" agent. If there are no participants, a clear error is raised. + """ + agent_names = list(magentic_context.participant_descriptions.keys()) + if not agent_names: + raise RuntimeError("No participants configured; cannot determine next speaker.") + + names_csv = ", ".join(agent_names) + team_text = _team_block(magentic_context.participant_descriptions) + + prompt = self.progress_ledger_prompt.format( + task=magentic_context.task, + team=team_text, + names=names_csv, + ) + user_message = ChatMessage("user", [prompt]) + + # Include full context to help the model decide current stage, with small retry loop + attempts = 0 + last_error: Exception | None = None + while attempts < self.progress_ledger_retry_count: + raw = await self._complete([*magentic_context.chat_history, user_message]) + try: + ledger_dict = _extract_json(raw.text) + return _coerce_model(MagenticProgressLedger, ledger_dict) + except Exception as ex: + last_error = ex + attempts += 1 + logger.warning( + f"Progress ledger JSON parse failed (attempt {attempts}/{self.progress_ledger_retry_count}): {ex}" + ) + if attempts < self.progress_ledger_retry_count: + # brief backoff before next try + await asyncio.sleep(0.25 * attempts) + + raise RuntimeError( + f"Progress ledger parse failed after {self.progress_ledger_retry_count} attempt(s): {last_error}" + ) + + async def prepare_final_answer(self, magentic_context: MagenticContext) -> ChatMessage: + """Ask the model to produce the final answer addressed to the user.""" + prompt = self.final_answer_prompt.format(task=magentic_context.task) + user_message = ChatMessage("user", [prompt]) + response = await self._complete([*magentic_context.chat_history, user_message]) + # Ensure role is assistant + return ChatMessage( + role="assistant", + text=response.text, + author_name=response.author_name or MAGENTIC_MANAGER_NAME, + ) + + @override + def on_checkpoint_save(self) -> dict[str, Any]: + state: dict[str, Any] = {} + if self.task_ledger is not None: + state["task_ledger"] = self.task_ledger.to_dict() + return state + + @override + def on_checkpoint_restore(self, state: dict[str, Any]) -> None: + ledger = state.get("task_ledger") + if ledger is not None: + try: + self.task_ledger = _MagenticTaskLedger.from_dict(ledger) + except Exception: # pragma: no cover - defensive + logger.warning("Failed to restore manager task ledger from checkpoint state") + + +# endregion Magentic Manager + +# region Magentic Orchestrator + + +class MagenticResetSignal: + """Signal to indicate that the Magentic workflow should reset. + + This signal can be raised within the orchestrator's inner loop to trigger + a reset of the Magentic context, clearing chat history and resetting + stall counts. + """ + + pass + + +class MagenticOrchestratorEventType(str, Enum): + """Types of Magentic orchestrator events.""" + + PLAN_CREATED = "plan_created" + REPLANNED = "replanned" + PROGRESS_LEDGER_UPDATED = "progress_ledger_updated" + + +@dataclass +class MagenticOrchestratorEvent(ExecutorEvent): + """Base class for Magentic orchestrator events.""" + + def __init__( + self, + executor_id: str, + event_type: MagenticOrchestratorEventType, + data: ChatMessage | MagenticProgressLedger, + ) -> None: + super().__init__(executor_id, data) + self.event_type = event_type + + def __repr__(self) -> str: + return f"{self.__class__.__name__}(executor_id={self.executor_id}, event_type={self.event_type})" + + +# region Request info related types + + +@dataclass +class MagenticPlanReviewResponse: + """Response to a human plan review request. + + Attributes: + review: List of messages containing feedback and suggested revisions. If empty, + the plan is considered approved. + """ + + review: list[ChatMessage] + + @staticmethod + def approve() -> "MagenticPlanReviewResponse": + """Create an approval response.""" + return MagenticPlanReviewResponse(review=[]) + + @staticmethod + def revise(feedback: str | list[str] | ChatMessage | list[ChatMessage]) -> "MagenticPlanReviewResponse": + """Create a revision response with feedback.""" + if isinstance(feedback, str): + feedback = [ChatMessage("user", [feedback])] + elif isinstance(feedback, ChatMessage): + feedback = [feedback] + elif isinstance(feedback, list): + feedback = [ChatMessage("user", [item]) if isinstance(item, str) else item for item in feedback] + + return MagenticPlanReviewResponse(review=feedback) + + +@dataclass +class MagenticPlanReviewRequest: + """Request for human review of a proposed plan. + + Attributes: + plan: The proposed plan message. + current_progress: The current progress ledger, if available. + During the initial plan review, this will be None. In subsequent + reviews after replanning (due to stalls), this will contain the + latest progress ledger that determined no progress had been made + or the workflow was in a loop. + is_stalled: Whether the workflow is currently stalled. + """ + + plan: ChatMessage + current_progress: MagenticProgressLedger | None + is_stalled: bool + + def approve(self) -> MagenticPlanReviewResponse: + """Create an approval response.""" + return MagenticPlanReviewResponse.approve() + + def revise(self, feedback: str | list[str] | ChatMessage | list[ChatMessage]) -> MagenticPlanReviewResponse: + """Create a revision response with feedback.""" + return MagenticPlanReviewResponse.revise(feedback) + + +# endregion Human Intervention Types + + +class MagenticOrchestrator(BaseGroupChatOrchestrator): + """Magentic orchestrator that defines the workflow structure. + + This orchestrator manages the overall Magentic workflow in the following structure: + + 1. Upon receiving the task (a list of messages), it creates the plan using the manager + then runs the inner loop. + 2. The inner loop is distributed and implementation is decentralized. In the orchestrator, + it is responsible for: + - Creating the progress ledger using the manager. + - Checking for task completion. + - Detecting stalling or looping and triggering replanning if needed. + - Sending requests to participants based on the progress ledger's next speaker. + - Issue requests for human intervention if enabled and needed. + 3. The inner loop waits for responses from the selected participant, then continues the loop. + 4. The orchestrator breaks out of the inner loop when the replanning or final answer conditions are met. + 5. The outer loop handles replanning and reenters the inner loop. + """ + + def __init__( + self, + manager: MagenticManagerBase, + participant_registry: ParticipantRegistry, + *, + require_plan_signoff: bool = False, + ) -> None: + """Initialize the Magentic orchestrator. + + Args: + manager: The Magentic manager instance to use for planning and progress tracking. + participant_registry: Registry of participants involved in the workflow. + + Keyword Args: + require_plan_signoff: If True, requires human approval of the initial plan before proceeding. + """ + super().__init__("magentic_orchestrator", participant_registry) + self._manager = manager + self._require_plan_signoff = require_plan_signoff + + # Task related state + self._magentic_context: MagenticContext | None = None + self._task_ledger: ChatMessage | None = None + self._progress_ledger: MagenticProgressLedger | None = None + + # Termination related state + self._terminated: bool = False + self._max_rounds = manager.max_round_count + + @override + async def _handle_messages( + self, + messages: list[ChatMessage], + ctx: WorkflowContext[GroupChatWorkflowContext_T_Out, list[ChatMessage]], + ) -> None: + """Handle the initial task messages to start the workflow.""" + if self._terminated: + raise RuntimeError( + "This Magentic workflow has already been completed. No further messages can be processed. " + "Use the builder to create a new workflow instance to handle additional tasks." + ) + + if not messages: + raise ValueError("Magentic orchestrator requires at least one message to start the workflow.") + + if len(messages) > 1: + raise ValueError("Magentic only support a single task message to start the workflow.") + + if messages[0].text.strip() == "": + raise ValueError("Magentic task message must contain non-empty text.") + + self._magentic_context = MagenticContext( + task=messages[0].text, + participant_descriptions=self._participant_registry.participants, + chat_history=list(messages), + ) + + # Initial planning using the manager with real model calls + self._task_ledger = await self._manager.plan(self._magentic_context.clone(deep=True)) + await ctx.add_event( + MagenticOrchestratorEvent( + executor_id=self.id, + event_type=MagenticOrchestratorEventType.PLAN_CREATED, + data=self._task_ledger, + ) + ) + + # If a human must sign off, ask now and return. The response handler will resume. + if self._require_plan_signoff: + await self._send_plan_review_request(cast(WorkflowContext, ctx)) + return + + # Add task ledger to conversation history + self._magentic_context.chat_history.append(self._task_ledger) + + logger.debug("Task ledger created.") + + # Start the inner loop + await self._run_inner_loop(ctx) + + @override + async def _handle_response( + self, + response: AgentExecutorResponse | GroupChatResponseMessage, + ctx: WorkflowContext[GroupChatWorkflowContext_T_Out, list[ChatMessage]], + ) -> None: + """Handle a response message from a participant.""" + if self._magentic_context is None or self._task_ledger is None: + raise RuntimeError("Context or task ledger not initialized") + + messages = self._process_participant_response(response) + + self._magentic_context.chat_history.extend(messages) + + # Broadcast participant messages to all participants for context, except + # the participant that just responded + participant = ctx.get_source_executor_id() + await self._broadcast_messages_to_participants( + messages, + cast(WorkflowContext[AgentExecutorRequest | GroupChatParticipantMessage], ctx), + participants=[p for p in self._participant_registry.participants if p != participant], + ) + + await self._run_inner_loop(ctx) + + @response_handler + async def handle_plan_review_response( + self, + original_request: MagenticPlanReviewRequest, + response: MagenticPlanReviewResponse, + ctx: WorkflowContext[GroupChatWorkflowContext_T_Out, list[ChatMessage]], + ) -> None: + """Handle the human response to the plan review request. + + Logic: + There are code paths which will trigger a plan review request to the human: + - Initial plan creation if `require_plan_signoff` is True. + - Potentially during the inner loop if stalling is detected (resetting and replanning). + + The human can either approve the plan or request revisions with comments. + - If approved, proceed to run the outer loop, which simply adds the task ledger + to the conversation and enters the inner loop. + - If revision requested, append the review comments to the chat history, + trigger replanning via the manager, emit a REPLANNED event, then run the outer loop. + """ + if self._magentic_context is None or self._task_ledger is None: + raise RuntimeError("Context or task ledger not initialized") + + # Case 1: Approved + if len(response.review) == 0: + logger.debug("Magentic Orchestrator: Plan review approved by human.") + await self._run_outer_loop(ctx) + return + # Case 2: Revision requested + logger.debug("Magentic Orchestrator: Plan review revision requested by human.") + self._magentic_context.chat_history.extend(response.review) + self._task_ledger = await self._manager.replan(self._magentic_context.clone(deep=True)) + await ctx.add_event( + MagenticOrchestratorEvent( + executor_id=self.id, + event_type=MagenticOrchestratorEventType.REPLANNED, + data=self._task_ledger, + ) + ) + # Continue the review process by sending the new plan for review again until approved + # We don't need to check if `_require_plan_signoff` is True here, since we are already + # in the review process. + await self._send_plan_review_request(cast(WorkflowContext, ctx), is_stalled=original_request.is_stalled) + + async def _send_plan_review_request(self, ctx: WorkflowContext, is_stalled: bool = False) -> None: + """Send a human intervention request for plan review. + + The response will be handled in the response handler `handle_plan_review_response`. + """ + if self._task_ledger is None: + raise RuntimeError("No task ledger available for plan review request.") + + await ctx.request_info( + MagenticPlanReviewRequest( + plan=self._task_ledger, + current_progress=self._progress_ledger, + is_stalled=is_stalled, + ), + MagenticPlanReviewResponse, + ) + + async def _run_inner_loop( + self, + ctx: WorkflowContext[GroupChatWorkflowContext_T_Out, list[ChatMessage]], + ) -> None: + """Run the inner orchestration loop. Coordination phase. Serialized with a lock.""" + if self._magentic_context is None or self._task_ledger is None: + raise RuntimeError("Context or task ledger not initialized") + + await self._run_inner_loop_helper(ctx) + + async def _run_inner_loop_helper( + self, + ctx: WorkflowContext[GroupChatWorkflowContext_T_Out, list[ChatMessage]], + ) -> None: + """Run inner loop with exclusive access.""" + # Narrow optional context for the remainder of this method + if self._magentic_context is None: + raise RuntimeError("Context not initialized") + # Check limits first + within_limits = await self._check_within_limits_or_complete( + cast(WorkflowContext[Never, list[ChatMessage]], ctx) + ) + if not within_limits: + return + + self._magentic_context.round_count += 1 + self._increment_round() + logger.debug(f"Magentic Orchestrator: Inner loop - round {self._round_index}") + + # Create progress ledger using the manager + try: + self._progress_ledger = await self._manager.create_progress_ledger(self._magentic_context.clone(deep=True)) + except Exception as ex: + logger.warning(f"Magentic Orchestrator: Progress ledger creation failed, triggering reset: {ex}") + await self._reset_and_replan(ctx) + return + + await ctx.add_event( + MagenticOrchestratorEvent( + executor_id=self.id, + event_type=MagenticOrchestratorEventType.PROGRESS_LEDGER_UPDATED, + data=self._progress_ledger, + ) + ) + + logger.debug( + f"Progress evaluation: satisfied={self._progress_ledger.is_request_satisfied.answer}, " + f"next={self._progress_ledger.next_speaker.answer}" + ) + + # Check for task completion + if self._progress_ledger.is_request_satisfied.answer: + logger.info("Magentic Orchestrator: Task completed") + await self._prepare_final_answer(cast(WorkflowContext[Never, list[ChatMessage]], ctx)) + return + + # Check for stalling or looping + if not self._progress_ledger.is_progress_being_made.answer or self._progress_ledger.is_in_loop.answer: + self._magentic_context.stall_count += 1 + else: + self._magentic_context.stall_count = max(0, self._magentic_context.stall_count - 1) + + if self._magentic_context.stall_count > self._manager.max_stall_count: + logger.debug(f"Magentic Orchestrator: Stalling detected after {self._magentic_context.stall_count} rounds") + await self._reset_and_replan(ctx) + return + + # Determine the next speaker and instruction + next_speaker = self._progress_ledger.next_speaker.answer + if not isinstance(next_speaker, str): + # Fallback to first participant if ledger returns non-string + logger.warning("Next speaker answer was not a string; selecting first participant as fallback") + next_speaker = next(iter(self._participant_registry.participants.keys())) + instruction = self._progress_ledger.instruction_or_question.answer + + if next_speaker not in self._participant_registry.participants: + logger.warning(f"Invalid next speaker: {next_speaker}") + await self._prepare_final_answer(cast(WorkflowContext[Never, list[ChatMessage]], ctx)) + return + + # Add instruction to conversation (assistant guidance) + instruction_msg = ChatMessage( + role="assistant", + text=str(instruction), + author_name=MAGENTIC_MANAGER_NAME, + ) + self._magentic_context.chat_history.append(instruction_msg) + + # Request specific agent to respond + logger.debug(f"Magentic Orchestrator: Requesting {next_speaker} to respond") + await self._send_request_to_participant( + next_speaker, + cast(WorkflowContext[AgentExecutorRequest | GroupChatRequestMessage], ctx), + additional_instruction=str(instruction), + ) + + async def _reset_and_replan( + self, + ctx: WorkflowContext[GroupChatWorkflowContext_T_Out, list[ChatMessage]], + ) -> None: + """Reset context and replan.""" + if self._magentic_context is None: + raise RuntimeError("Context not initialized") + + logger.debug("Magentic Orchestrator: Resetting and replanning") + + # Reset context + self._magentic_context.reset() + + # Reset all participant states + await self._reset_participants(cast(WorkflowContext[MagenticResetSignal], ctx)) + + # Replan + self._task_ledger = await self._manager.replan(self._magentic_context.clone(deep=True)) + await ctx.add_event( + MagenticOrchestratorEvent( + executor_id=self.id, + event_type=MagenticOrchestratorEventType.REPLANNED, + data=self._task_ledger, + ) + ) + # If a human must sign off, ask now and return. The response handler will resume. + if self._require_plan_signoff: + await self._send_plan_review_request(cast(WorkflowContext, ctx), is_stalled=True) + return + + self._magentic_context.chat_history.append(self._task_ledger) + + # Restart outer loop + await self._run_outer_loop(ctx) + + async def _run_outer_loop( + self, + ctx: WorkflowContext[GroupChatWorkflowContext_T_Out, list[ChatMessage]], + ) -> None: + """Run the outer orchestration loop - planning phase.""" + if self._magentic_context is None: + raise RuntimeError("Context not initialized") + + logger.debug("Magentic Orchestrator: Outer loop - entering inner loop") + + # Add task ledger to history if not already there + if self._task_ledger and ( + not self._magentic_context.chat_history or self._magentic_context.chat_history[-1] != self._task_ledger + ): + self._magentic_context.chat_history.append(self._task_ledger) + + # Start inner loop + await self._run_inner_loop(ctx) + + async def _prepare_final_answer(self, ctx: WorkflowContext[Never, list[ChatMessage]]) -> None: + """Prepare the final answer using the manager.""" + if self._magentic_context is None: + raise RuntimeError("Context not initialized") + + logger.info("Magentic Orchestrator: Preparing final answer") + final_answer = await self._manager.prepare_final_answer(self._magentic_context.clone(deep=True)) + + # Emit a completed event for the workflow + await ctx.yield_output([final_answer]) + + self._terminated = True + + async def _check_within_limits_or_complete(self, ctx: WorkflowContext[Never, list[ChatMessage]]) -> bool: + """Check if orchestrator is within operational limits. + + If limits are exceeded, yield a termination message and mark the workflow as terminated. + + Args: + ctx: The workflow context. + + Returns: + True if within limits, False if limits exceeded and workflow is terminated. + """ + if self._magentic_context is None: + raise RuntimeError("Context not initialized") + + hit_round_limit = self._max_rounds is not None and self._round_index >= self._max_rounds + hit_reset_limit = ( + self._manager.max_reset_count is not None + and self._magentic_context.reset_count >= self._manager.max_reset_count + ) + + if hit_round_limit or hit_reset_limit: + limit_type = "round" if hit_round_limit else "reset" + logger.error(f"Magentic Orchestrator: Max {limit_type} count reached") + + # Yield the full conversation with an indication of termination due to limits + await ctx.yield_output([ + *self._magentic_context.chat_history, + ChatMessage( + role="assistant", + text=f"Workflow terminated due to reaching maximum {limit_type} count.", + author_name=MAGENTIC_MANAGER_NAME, + ), + ]) + self._terminated = True + + return False + + return True + + async def _reset_participants(self, ctx: WorkflowContext[MagenticResetSignal]) -> None: + """Reset all participant executors.""" + # Orchestrator is connected to all participants. Sending the message without specifying + # a target will broadcast to all. + await ctx.send_message(MagenticResetSignal()) + + @override + async def on_checkpoint_save(self) -> dict[str, Any]: + """Capture current orchestrator state for checkpointing.""" + state = await super().on_checkpoint_save() + state["terminated"] = self._terminated + + if self._magentic_context is not None: + state["magentic_context"] = self._magentic_context.to_dict() + if self._task_ledger is not None: + state["task_ledger"] = _message_to_payload(self._task_ledger) + if self._progress_ledger is not None: + state["progress_ledger"] = self._progress_ledger.to_dict() + + try: + state["manager_state"] = self._manager.on_checkpoint_save() + except Exception as exc: + logger.warning(f"Failed to save manager state for checkpoint: {exc}\nSkipping...") + + return state + + @override + async def on_checkpoint_restore(self, state: dict[str, Any]) -> None: + """Restore executor state from checkpoint.""" + await super().on_checkpoint_restore(state) + self._terminated = state.get("terminated", False) + + magentic_context_data = state.get("magentic_context") + if magentic_context_data is not None: + try: + self._magentic_context = MagenticContext.from_dict(magentic_context_data) + except Exception: # pragma: no cover - defensive + logger.warning("Failed to restore Magentic context from checkpoint data") + self._magentic_context = None + + task_ledger_data = state.get("task_ledger") + if task_ledger_data is not None: + try: + self._task_ledger = _message_from_payload(task_ledger_data) + except Exception: # pragma: no cover - defensive + logger.warning("Failed to restore task ledger from checkpoint data") + self._task_ledger = None + + progress_ledger_data = state.get("progress_ledger") + if progress_ledger_data is not None: + try: + self._progress_ledger = MagenticProgressLedger.from_dict(progress_ledger_data) + except Exception: # pragma: no cover - defensive + logger.warning("Failed to restore progress ledger from checkpoint data") + self._progress_ledger = None + + manager_state = state.get("manager_state") + if manager_state is not None: + try: + self._manager.on_checkpoint_restore(manager_state) + except Exception as exc: + logger.warning(f"Failed to restore manager state from checkpoint: {exc}\nSkipping...") + + +# endregion Magentic Orchestrator + +# region Magentic Agent Executor + + +class MagenticAgentExecutor(AgentExecutor): + """Specialized AgentExecutor for Magentic agent participants.""" + + def __init__(self, agent: AgentProtocol) -> None: + """Initialize a Magentic Agent Executor. + + This executor wraps an AgentProtocol instance to be used as a participant + in a Magentic One workflow. + + Args: + agent: The agent instance to wrap. + + Notes: Magentic pattern requires a reset operation upon replanning. This executor + extends the base AgentExecutor to handle resets appropriately. In order to handle + resets, the agent threads and other states are reset when requested by the orchestrator. + And because of this, MagenticAgentExecutor does not support custom threads. + """ + super().__init__(agent) + + @handler + async def handle_magentic_reset(self, signal: MagenticResetSignal, ctx: WorkflowContext) -> None: + """Handle reset signal from the Magentic orchestrator. + + This method resets the internal state of the agent executor, including + any threads or caches, to prepare for a fresh start after replanning. + + Args: + signal: The MagenticResetSignal instance. + ctx: The workflow context. + """ + # Message related + self._cache.clear() + self._full_conversation.clear() + # Request into related + self._pending_agent_requests.clear() + self._pending_responses_to_agent.clear() + # Reset threads + self._agent_thread = self._agent.get_new_thread() + + +# endregion Magentic Agent Executor + +# region Magentic Workflow Builder + + +class MagenticBuilder: + """Fluent builder for creating Magentic One multi-agent orchestration workflows. + + Magentic One workflows use an LLM-powered manager to coordinate multiple agents through + dynamic task planning, progress tracking, and adaptive replanning. The manager creates + plans, selects agents, monitors progress, and determines when to replan or complete. + + The builder provides a fluent API for configuring participants, the manager, optional + plan review, checkpointing, and event callbacks. + + Human-in-the-loop Support: + Magentic provides specialized HITL mechanisms via: + + - `.with_plan_review()` - Review and approve/revise plans before execution + - `.with_human_input_on_stall()` - Intervene when workflow stalls + - Tool approval via `function_approval_request` - Approve individual tool calls + + These emit `MagenticHumanInterventionRequest` events that provide structured + decision options (APPROVE, REVISE, CONTINUE, REPLAN, GUIDANCE) appropriate + for Magentic's planning-based orchestration. + """ + + def __init__(self) -> None: + """Initialize the Magentic workflow builder.""" + self._participants: dict[str, AgentProtocol | Executor] = {} + self._participant_factories: list[Callable[[], AgentProtocol | Executor]] = [] + + # Manager related members + self._manager: MagenticManagerBase | None = None + self._manager_factory: Callable[[], MagenticManagerBase] | None = None + self._manager_agent_factory: Callable[[], AgentProtocol] | None = None + self._standard_manager_options: dict[str, Any] = {} + self._enable_plan_review: bool = False + + self._checkpoint_storage: CheckpointStorage | None = None + + def register_participants( + self, + participant_factories: Sequence[Callable[[], AgentProtocol | Executor]], + ) -> "MagenticBuilder": + """Register participant factories for this Magentic workflow. + + Args: + participant_factories: Sequence of callables that return AgentProtocol or Executor instances. + + Returns: + Self for method chaining + + Raises: + ValueError: If participant_factories is empty, or participants + or participant factories are already set + """ + if self._participants: + raise ValueError("Cannot mix .participants() and .register_participants() in the same builder instance.") + + if self._participant_factories: + raise ValueError("register_participants() has already been called on this builder instance.") + + if not participant_factories: + raise ValueError("participant_factories cannot be empty") + + self._participant_factories = list(participant_factories) + return self + + def participants(self, participants: Sequence[AgentProtocol | Executor]) -> Self: + """Define participants for this Magentic workflow. + + Accepts AgentProtocol instances (auto-wrapped as AgentExecutor) or Executor instances. + + Args: + participants: Sequence of participant definitions + + Returns: + Self for method chaining + + Raises: + ValueError: If participants are empty, names are duplicated, or participants + or participant factories are already set + TypeError: If any participant is not AgentProtocol or Executor instance + + Example: + + .. code-block:: python + + workflow = ( + MagenticBuilder() + .participants([research_agent, writing_agent, coding_agent, review_agent]) + .with_manager(agent=manager_agent) + .build() + ) + + Notes: + - Participant names become part of the manager's context for selection + - Agent descriptions (if available) are extracted and provided to the manager + - Can be called multiple times to add participants incrementally + """ + if self._participant_factories: + raise ValueError("Cannot mix .participants() and .register_participants() in the same builder instance.") + + if self._participants: + raise ValueError("participants have already been set. Call participants(...) at most once.") + + if not participants: + raise ValueError("participants cannot be empty.") + + # Name of the executor mapped to participant instance + named: dict[str, AgentProtocol | Executor] = {} + for participant in participants: + if isinstance(participant, Executor): + identifier = participant.id + elif isinstance(participant, AgentProtocol): + if not participant.name: + raise ValueError("AgentProtocol participants must have a non-empty name.") + identifier = participant.name + else: + raise TypeError( + f"Participants must be AgentProtocol or Executor instances. Got {type(participant).__name__}." + ) + + if identifier in named: + raise ValueError(f"Duplicate participant name '{identifier}' detected") + + named[identifier] = participant + + self._participants = named + + return self + + def with_plan_review(self, enable: bool = True) -> "MagenticBuilder": + """Enable or disable human-in-the-loop plan review before task execution. + + When enabled, the workflow will pause after the manager generates the initial + plan and emit a MagenticHumanInterventionRequest event with kind=PLAN_REVIEW. + A human reviewer can then approve, request revisions, or reject the plan. + The workflow continues only after approval. + + This is useful for: + - High-stakes tasks requiring human oversight + - Validating the manager's understanding of requirements + - Catching hallucinations or unrealistic plans early + - Educational scenarios where learners review AI planning + + Args: + enable: Whether to require plan review (default True) + + Returns: + Self for method chaining + + Usage: + + .. code-block:: python + + workflow = ( + MagenticBuilder() + .participants(agent1=agent1) + .with_manager(agent=manager_agent) + .with_plan_review(enable=True) + .build() + ) + + # During execution, handle plan review + async for event in workflow.run_stream("task"): + if isinstance(event, RequestInfoEvent): + request = event.data + if isinstance(request, MagenticHumanInterventionRequest): + if request.kind == MagenticHumanInterventionKind.PLAN_REVIEW: + # Review plan and respond + reply = MagenticHumanInterventionReply(decision=MagenticHumanInterventionDecision.APPROVE) + await workflow.send_responses({event.request_id: reply}) + + See Also: + - :class:`MagenticHumanInterventionRequest`: Event emitted for review + - :class:`MagenticHumanInterventionReply`: Response to send back + - :class:`MagenticHumanInterventionDecision`: APPROVE/REVISE options + """ + self._enable_plan_review = enable + return self + + def with_checkpointing(self, checkpoint_storage: CheckpointStorage) -> "MagenticBuilder": + """Enable workflow state persistence using the provided checkpoint storage. + + Checkpointing allows workflows to be paused, resumed across process restarts, + or recovered after failures. The entire workflow state including conversation + history, task ledgers, and progress is persisted at key points. + + Args: + checkpoint_storage: Storage backend for checkpoints (e.g., InMemoryCheckpointStorage, + FileCheckpointStorage, or custom implementations) + + Returns: + Self for method chaining + + Usage: + + .. code-block:: python + + from agent_framework import InMemoryCheckpointStorage + + storage = InMemoryCheckpointStorage() + workflow = ( + MagenticBuilder() + .participants([agent1]) + .with_manager(agent=manager_agent) + .with_checkpointing(storage) + .build() + ) + + # First run + thread_id = "task-123" + async for msg in workflow.run("task", thread_id=thread_id): + print(msg.text) + + # Resume from checkpoint + async for msg in workflow.run("continue", thread_id=thread_id): + print(msg.text) + + Notes: + - Checkpoints are created after each significant state transition + - Thread ID must be consistent across runs to resume properly + - Storage implementations may have different persistence guarantees + """ + self._checkpoint_storage = checkpoint_storage + return self + + @overload + def with_manager(self, *, manager: MagenticManagerBase) -> Self: + """Configure the workflow with a pre-defined Magentic manager instance. + + Args: + manager: A custom manager instance (subclass of MagenticManagerBase) + + Returns: + Self for method chaining + """ + ... + + @overload + def with_manager(self, *, manager_factory: Callable[[], MagenticManagerBase]) -> Self: + """Configure the workflow with a factory for creating custom Magentic manager instances. + + Args: + manager_factory: Callable that returns a new MagenticManagerBase instance + + Returns: + Self for method chaining + """ + ... + + @overload + def with_manager( + self, + *, + agent: AgentProtocol, + task_ledger: _MagenticTaskLedger | None = None, + # Prompt overrides + task_ledger_facts_prompt: str | None = None, + task_ledger_plan_prompt: str | None = None, + task_ledger_full_prompt: str | None = None, + task_ledger_facts_update_prompt: str | None = None, + task_ledger_plan_update_prompt: str | None = None, + progress_ledger_prompt: str | None = None, + final_answer_prompt: str | None = None, + # Limits + max_stall_count: int = 3, + max_reset_count: int | None = None, + max_round_count: int | None = None, + ) -> Self: + """Configure the workflow with an agent for creating a standard manager. + + This will create a StandardMagenticManager using the provided agent. + + Args: + agent: AgentProtocol instance for the standard magentic manager + (`StandardMagenticManager`) + task_ledger: Optional custom task ledger implementation for specialized + prompting or structured output requirements + task_ledger_facts_prompt: Custom prompt template for extracting facts from + task description + task_ledger_plan_prompt: Custom prompt template for generating initial plan + task_ledger_full_prompt: Custom prompt template for complete task ledger + (facts + plan combined) + task_ledger_facts_update_prompt: Custom prompt template for updating facts + based on agent progress + task_ledger_plan_update_prompt: Custom prompt template for replanning when + needed + progress_ledger_prompt: Custom prompt template for assessing progress and + determining next actions + final_answer_prompt: Custom prompt template for synthesizing final response + when task is complete + max_stall_count: Maximum consecutive rounds without progress before triggering + replan (default 3). Set to 0 to disable stall detection. + max_reset_count: Maximum number of complete resets allowed before failing. + None means unlimited resets. + max_round_count: Maximum total coordination rounds before stopping with + partial result. None means unlimited rounds. + + Returns: + Self for method chaining + """ + ... + + @overload + def with_manager( + self, + *, + agent_factory: Callable[[], AgentProtocol], + task_ledger: _MagenticTaskLedger | None = None, + # Prompt overrides + task_ledger_facts_prompt: str | None = None, + task_ledger_plan_prompt: str | None = None, + task_ledger_full_prompt: str | None = None, + task_ledger_facts_update_prompt: str | None = None, + task_ledger_plan_update_prompt: str | None = None, + progress_ledger_prompt: str | None = None, + final_answer_prompt: str | None = None, + # Limits + max_stall_count: int = 3, + max_reset_count: int | None = None, + max_round_count: int | None = None, + ) -> Self: + """Configure the workflow with a factory for creating the manager agent. + + This will create a StandardMagenticManager using the provided agent factory. + + Args: + agent_factory: Callable that returns a new AgentProtocol instance for the standard + magentic manager (`StandardMagenticManager`) + task_ledger: Optional custom task ledger implementation for specialized + prompting or structured output requirements + task_ledger_facts_prompt: Custom prompt template for extracting facts from + task description + task_ledger_plan_prompt: Custom prompt template for generating initial plan + task_ledger_full_prompt: Custom prompt template for complete task ledger + (facts + plan combined) + task_ledger_facts_update_prompt: Custom prompt template for updating facts + based on agent progress + task_ledger_plan_update_prompt: Custom prompt template for replanning when + needed + progress_ledger_prompt: Custom prompt template for assessing progress and + determining next actions + final_answer_prompt: Custom prompt template for synthesizing final response + when task is complete + max_stall_count: Maximum consecutive rounds without progress before triggering + replan (default 3). Set to 0 to disable stall detection. + max_reset_count: Maximum number of complete resets allowed before failing. + None means unlimited resets. + max_round_count: Maximum total coordination rounds before stopping with + partial result. None means unlimited rounds. + + Returns: + Self for method chaining + """ + ... + + def with_manager( + self, + *, + manager: MagenticManagerBase | None = None, + manager_factory: Callable[[], MagenticManagerBase] | None = None, + agent_factory: Callable[[], AgentProtocol] | None = None, + # Constructor args for StandardMagenticManager when manager is not provided + agent: AgentProtocol | None = None, + task_ledger: _MagenticTaskLedger | None = None, + # Prompt overrides + task_ledger_facts_prompt: str | None = None, + task_ledger_plan_prompt: str | None = None, + task_ledger_full_prompt: str | None = None, + task_ledger_facts_update_prompt: str | None = None, + task_ledger_plan_update_prompt: str | None = None, + progress_ledger_prompt: str | None = None, + final_answer_prompt: str | None = None, + # Limits + max_stall_count: int = 3, + max_reset_count: int | None = None, + max_round_count: int | None = None, + ) -> Self: + """Configure the workflow manager for task planning and agent coordination. + + The manager is responsible for creating plans, selecting agents, tracking progress, + and deciding when to replan or complete. This method supports four usage patterns: + + 1. **Provide existing manager**: Pass a pre-configured manager instance (custom + or standard) for full control over behavior + 2. **Factory for custom manager**: Pass a callable that returns a new manager + instance for more advanced scenarios so that the builder can be reused + 3. **Factory for agent**: Pass a callable that returns a new agent instance to + automatically create a `StandardMagenticManager` + 4. **Auto-create with agent**: Pass an agent to automatically create a `StandardMagenticManager` + + Args: + manager: Pre-configured manager instance (`StandardMagenticManager` or custom + `MagenticManagerBase` subclass). If provided, all other arguments are ignored. + manager_factory: Callable that returns a new manager instance. + agent_factory: Callable that returns a new agent instance. + agent: Agent instance for generating plans and decisions. The agent's + configured instructions and options (temperature, seed, etc.) will be + applied. + task_ledger: Optional custom task ledger implementation for specialized + prompting or structured output requirements + task_ledger_facts_prompt: Custom prompt template for extracting facts from + task description + task_ledger_plan_prompt: Custom prompt template for generating initial plan + task_ledger_full_prompt: Custom prompt template for complete task ledger + (facts + plan combined) + task_ledger_facts_update_prompt: Custom prompt template for updating facts + based on agent progress + task_ledger_plan_update_prompt: Custom prompt template for replanning when + needed + progress_ledger_prompt: Custom prompt template for assessing progress and + determining next actions + final_answer_prompt: Custom prompt template for synthesizing final response + when task is complete + max_stall_count: Maximum consecutive rounds without progress before triggering + replan (default 3). Set to 0 to disable stall detection. + max_reset_count: Maximum number of complete resets allowed before failing. + None means unlimited resets. + max_round_count: Maximum total coordination rounds before stopping with + partial result. None means unlimited rounds. + + Returns: + Self for method chaining + + Raises: + ValueError: If manager is None and agent is not provided. + + Usage with agent (recommended): + + .. code-block:: python + + from agent_framework import ChatAgent, ChatOptions + from agent_framework.openai import OpenAIChatClient + + # Configure manager agent with specific options and instructions + manager_agent = ChatAgent( + name="Coordinator", + chat_client=OpenAIChatClient(model_id="gpt-4o"), + options=ChatOptions(temperature=0.3, seed=42), + instructions="Be concise and focus on accuracy", + ) + + workflow = ( + MagenticBuilder() + .participants(agent1=agent1, agent2=agent2) + .with_manager( + agent=manager_agent, + max_round_count=20, + max_stall_count=3, + ) + .build() + ) + + Usage with custom manager: + + .. code-block:: python + + class MyManager(MagenticManagerBase): + async def plan(self, context: MagenticContext) -> ChatMessage: + # Custom planning logic + return ChatMessage("assistant", ["..."]) + + + manager = MyManager() + workflow = MagenticBuilder().participants(agent1=agent1).with_manager(manager).build() + + Usage with prompt customization: + + .. code-block:: python + + workflow = ( + MagenticBuilder() + .participants(coder=coder_agent, reviewer=reviewer_agent) + .with_manager( + agent=manager_agent, + task_ledger_plan_prompt="Create a detailed step-by-step plan...", + progress_ledger_prompt="Assess progress and decide next action...", + max_stall_count=2, + ) + .build() + ) + + Notes: + - StandardMagenticManager uses structured LLM calls for all decisions + - Custom managers can implement alternative selection strategies + - Prompt templates support Jinja2-style variable substitution + - Stall detection helps prevent infinite loops in stuck scenarios + - The agent's instructions are used as system instructions for all manager prompts + """ + if any([self._manager, self._manager_factory, self._manager_agent_factory]): + raise ValueError("with_manager() has already been called on this builder instance.") + + if sum(x is not None for x in [manager, agent, manager_factory, agent_factory]) != 1: + raise ValueError("Exactly one of manager, agent, manager_factory, or agent_factory must be provided.") + + def _log_warning_if_constructor_args_provided() -> None: + if any( + arg is not None + for arg in [ + task_ledger, + task_ledger_facts_prompt, + task_ledger_plan_prompt, + task_ledger_full_prompt, + task_ledger_facts_update_prompt, + task_ledger_plan_update_prompt, + progress_ledger_prompt, + final_answer_prompt, + max_stall_count, + max_reset_count, + max_round_count, + ] + ): + logger.warning("Customer manager provided; all other with_manager() arguments will be ignored.") + + if manager is not None: + self._manager = manager + _log_warning_if_constructor_args_provided() + elif agent is not None: + self._manager = StandardMagenticManager( + agent=agent, + task_ledger=task_ledger, + task_ledger_facts_prompt=task_ledger_facts_prompt, + task_ledger_plan_prompt=task_ledger_plan_prompt, + task_ledger_full_prompt=task_ledger_full_prompt, + task_ledger_facts_update_prompt=task_ledger_facts_update_prompt, + task_ledger_plan_update_prompt=task_ledger_plan_update_prompt, + progress_ledger_prompt=progress_ledger_prompt, + final_answer_prompt=final_answer_prompt, + max_stall_count=max_stall_count, + max_reset_count=max_reset_count, + max_round_count=max_round_count, + ) + elif manager_factory is not None: + self._manager_factory = manager_factory + _log_warning_if_constructor_args_provided() + elif agent_factory is not None: + self._manager_agent_factory = agent_factory + self._standard_manager_options = { + "task_ledger": task_ledger, + "task_ledger_facts_prompt": task_ledger_facts_prompt, + "task_ledger_plan_prompt": task_ledger_plan_prompt, + "task_ledger_full_prompt": task_ledger_full_prompt, + "task_ledger_facts_update_prompt": task_ledger_facts_update_prompt, + "task_ledger_plan_update_prompt": task_ledger_plan_update_prompt, + "progress_ledger_prompt": progress_ledger_prompt, + "final_answer_prompt": final_answer_prompt, + "max_stall_count": max_stall_count, + "max_reset_count": max_reset_count, + "max_round_count": max_round_count, + } + + return self + + def _resolve_orchestrator(self, participants: Sequence[Executor]) -> Executor: + """Determine the orchestrator to use for the workflow. + + Args: + participants: List of resolved participant executors + """ + if all(x is None for x in [self._manager, self._manager_factory, self._manager_agent_factory]): + raise ValueError("No manager configured. Call with_manager(...) before building the orchestrator.") + # We don't need to check if multiple are set since that is handled in with_orchestrator() + + if self._manager: + manager = self._manager + elif self._manager_factory: + manager = self._manager_factory() + elif self._manager_agent_factory: + agent_instance = self._manager_agent_factory() + manager = StandardMagenticManager( + agent=agent_instance, + **self._standard_manager_options, + ) + else: + # This should never be reached due to the checks above + raise RuntimeError("Manager could not be resolved. Please set the manager properly with with_manager().") + + return MagenticOrchestrator( + manager=manager, + participant_registry=ParticipantRegistry(participants), + require_plan_signoff=self._enable_plan_review, + ) + + def _resolve_participants(self) -> list[Executor]: + """Resolve participant instances into Executor objects.""" + if not self._participants and not self._participant_factories: + raise ValueError("No participants provided. Call .participants() or .register_participants() first.") + # We don't need to check if both are set since that is handled in the respective methods + + participants: list[Executor | AgentProtocol] = [] + if self._participant_factories: + for factory in self._participant_factories: + participant = factory() + participants.append(participant) + else: + participants = list(self._participants.values()) + + executors: list[Executor] = [] + for participant in participants: + if isinstance(participant, Executor): + executors.append(participant) + elif isinstance(participant, AgentProtocol): + executors.append(MagenticAgentExecutor(participant)) + else: + raise TypeError( + f"Participants must be AgentProtocol or Executor instances. Got {type(participant).__name__}." + ) + + return executors + + def build(self) -> Workflow: + """Build a Magentic workflow with the orchestrator and all agent executors.""" + logger.info(f"Building Magentic workflow with {len(self._participants)} participants") + + participants: list[Executor] = self._resolve_participants() + orchestrator: Executor = self._resolve_orchestrator(participants) + + # Build workflow graph + workflow_builder = WorkflowBuilder().set_start_executor(orchestrator) + for participant in participants: + # Orchestrator and participant bi-directional edges + workflow_builder = workflow_builder.add_edge(orchestrator, participant) + workflow_builder = workflow_builder.add_edge(participant, orchestrator) + if self._checkpoint_storage is not None: + workflow_builder = workflow_builder.with_checkpointing(self._checkpoint_storage) + + return workflow_builder.build() + + +# endregion Magentic Workflow Builder diff --git a/python/packages/orchestrations/agent_framework_orchestrations/_sequential.py b/python/packages/orchestrations/agent_framework_orchestrations/_sequential.py new file mode 100644 index 0000000000..53473a4997 --- /dev/null +++ b/python/packages/orchestrations/agent_framework_orchestrations/_sequential.py @@ -0,0 +1,310 @@ +# Copyright (c) Microsoft. All rights reserved. + +"""Sequential builder for agent/executor workflows with shared conversation context. + +This module provides a high-level, agent-focused API to assemble a sequential +workflow where: +- Participants can be provided as AgentProtocol or Executor instances via `.participants()`, + or as factories returning AgentProtocol or Executor via `.register_participants()` +- A shared conversation context (list[ChatMessage]) is passed along the chain +- Agents append their assistant messages to the context +- Custom executors can transform or summarize and return a refined context +- The workflow finishes with the final context produced by the last participant + +Typical wiring: + input -> _InputToConversation -> participant1 -> (agent? -> _ResponseToConversation) -> ... -> participantN -> _EndWithConversation + +Notes: +- Participants can mix AgentProtocol and Executor objects +- Agents are auto-wrapped by WorkflowBuilder as AgentExecutor (unless already wrapped) +- AgentExecutor produces AgentExecutorResponse; _ResponseToConversation converts this to list[ChatMessage] +- Non-agent executors must define a handler that consumes `list[ChatMessage]` and sends back + the updated `list[ChatMessage]` via their workflow context + +Why include the small internal adapter executors? +- Input normalization ("input-conversation"): ensures the workflow always starts with a + `list[ChatMessage]` regardless of whether callers pass a `str`, a single `ChatMessage`, + or a list. This keeps the first hop strongly typed and avoids boilerplate in participants. +- Agent response adaptation ("to-conversation:"): agents (via AgentExecutor) + emit `AgentExecutorResponse`. The adapter converts that to a `list[ChatMessage]` + using `full_conversation` so original prompts aren't lost when chaining. +- Result output ("end"): yields the final conversation list and the workflow becomes idle + giving a consistent terminal payload shape for both agents and custom executors. + +These adapters are first-class executors by design so they are type-checked at edges, +observable (ExecutorInvoke/Completed events), and easily testable/reusable. Their IDs are +deterministic and self-describing (for example, "to-conversation:writer") to reduce event-log +confusion and to mirror how the concurrent builder uses explicit dispatcher/aggregator nodes. +""" # noqa: E501 + +import logging +from collections.abc import Callable, Sequence +from typing import Any + +from agent_framework import AgentProtocol, ChatMessage +from agent_framework._workflows._agent_executor import ( + AgentExecutor, + AgentExecutorResponse, +) +from agent_framework._workflows._agent_utils import resolve_agent_id +from agent_framework._workflows._checkpoint import CheckpointStorage +from agent_framework._workflows._executor import ( + Executor, + handler, +) +from agent_framework._workflows._message_utils import normalize_messages_input +from agent_framework._workflows._orchestration_request_info import AgentApprovalExecutor +from agent_framework._workflows._workflow import Workflow +from agent_framework._workflows._workflow_builder import WorkflowBuilder +from agent_framework._workflows._workflow_context import WorkflowContext + +logger = logging.getLogger(__name__) + + +class _InputToConversation(Executor): + """Normalizes initial input into a list[ChatMessage] conversation.""" + + @handler + async def from_str(self, prompt: str, ctx: WorkflowContext[list[ChatMessage]]) -> None: + await ctx.send_message(normalize_messages_input(prompt)) + + @handler + async def from_message(self, message: ChatMessage, ctx: WorkflowContext[list[ChatMessage]]) -> None: + await ctx.send_message(normalize_messages_input(message)) + + @handler + async def from_messages(self, messages: list[str | ChatMessage], ctx: WorkflowContext[list[ChatMessage]]) -> None: + await ctx.send_message(normalize_messages_input(messages)) + + +class _EndWithConversation(Executor): + """Terminates the workflow by emitting the final conversation context.""" + + @handler + async def end_with_messages( + self, + conversation: list[ChatMessage], + ctx: WorkflowContext[Any, list[ChatMessage]], + ) -> None: + """Handler for ending with a list of ChatMessage. + + This is used when the last participant is a custom executor. + """ + await ctx.yield_output(list(conversation)) + + @handler + async def end_with_agent_executor_response( + self, + response: AgentExecutorResponse, + ctx: WorkflowContext[Any, list[ChatMessage] | None], + ) -> None: + """Handle case where last participant is an agent. + + The agent is wrapped by AgentExecutor and emits AgentExecutorResponse. + """ + await ctx.yield_output(response.full_conversation) + + +class SequentialBuilder: + r"""High-level builder for sequential agent/executor workflows with shared context. + + - `participants([...])` accepts a list of AgentProtocol (recommended) or Executor instances + - `register_participants([...])` accepts a list of factories for AgentProtocol (recommended) + or Executor factories + - Executors must define a handler that consumes list[ChatMessage] and sends out a list[ChatMessage] + - The workflow wires participants in order, passing a list[ChatMessage] down the chain + - Agents append their assistant messages to the conversation + - Custom executors can transform/summarize and return a list[ChatMessage] + - The final output is the conversation produced by the last participant + + Usage: + + .. code-block:: python + + from agent_framework_orchestrations import SequentialBuilder + + # With agent instances + workflow = SequentialBuilder().participants([agent1, agent2, summarizer_exec]).build() + + # With agent factories + workflow = ( + SequentialBuilder().register_participants([create_agent1, create_agent2, create_summarizer_exec]).build() + ) + + # Enable checkpoint persistence + workflow = SequentialBuilder().participants([agent1, agent2]).with_checkpointing(storage).build() + + # Enable request info for mid-workflow feedback (pauses before each agent) + workflow = SequentialBuilder().participants([agent1, agent2]).with_request_info().build() + + # Enable request info only for specific agents + workflow = ( + SequentialBuilder() + .participants([agent1, agent2, agent3]) + .with_request_info(agents=[agent2]) # Only pause before agent2 + .build() + ) + """ + + def __init__(self) -> None: + self._participants: list[AgentProtocol | Executor] = [] + self._participant_factories: list[Callable[[], AgentProtocol | Executor]] = [] + self._checkpoint_storage: CheckpointStorage | None = None + self._request_info_enabled: bool = False + self._request_info_filter: set[str] | None = None + + def register_participants( + self, + participant_factories: Sequence[Callable[[], AgentProtocol | Executor]], + ) -> "SequentialBuilder": + """Register participant factories for this sequential workflow.""" + if self._participants: + raise ValueError("Cannot mix .participants() and .register_participants() in the same builder instance.") + + if self._participant_factories: + raise ValueError("register_participants() has already been called on this builder instance.") + + if not participant_factories: + raise ValueError("participant_factories cannot be empty") + + self._participant_factories = list(participant_factories) + return self + + def participants(self, participants: Sequence[AgentProtocol | Executor]) -> "SequentialBuilder": + """Define the ordered participants for this sequential workflow. + + Accepts AgentProtocol instances (auto-wrapped as AgentExecutor) or Executor instances. + Raises if empty or duplicates are provided for clarity. + """ + if self._participant_factories: + raise ValueError("Cannot mix .participants() and .register_participants() in the same builder instance.") + + if self._participants: + raise ValueError("participants() has already been called on this builder instance.") + + if not participants: + raise ValueError("participants cannot be empty") + + # Defensive duplicate detection + seen_agent_ids: set[int] = set() + seen_executor_ids: set[str] = set() + for p in participants: + if isinstance(p, Executor): + if p.id in seen_executor_ids: + raise ValueError(f"Duplicate executor participant detected: id '{p.id}'") + seen_executor_ids.add(p.id) + else: + # Treat non-Executor as agent-like (AgentProtocol). Structural checks can be brittle at runtime. + pid = id(p) + if pid in seen_agent_ids: + raise ValueError("Duplicate agent participant detected (same agent instance provided twice)") + seen_agent_ids.add(pid) + + self._participants = list(participants) + return self + + def with_checkpointing(self, checkpoint_storage: CheckpointStorage) -> "SequentialBuilder": + """Enable checkpointing for the built workflow using the provided storage.""" + self._checkpoint_storage = checkpoint_storage + return self + + def with_request_info( + self, + *, + agents: Sequence[str | AgentProtocol] | None = None, + ) -> "SequentialBuilder": + """Enable request info after agent participant responses. + + This enables human-in-the-loop (HIL) scenarios for the sequential orchestration. + When enabled, the workflow pauses after each agent participant runs, emitting + a RequestInfoEvent that allows the caller to review the conversation and optionally + inject guidance for the agent participant to iterate. The caller provides input via + the standard response_handler/request_info pattern. + + Simulated flow with HIL: + Input -> [Agent Participant <-> Request Info] -> [Agent Participant <-> Request Info] -> ... + + Note: This is only available for agent participants. Executor participants can incorporate + request info handling in their own implementation if desired. + + Args: + agents: Optional list of agents names or agent factories to enable request info for. + If None, enables HIL for all agent participants. + + Returns: + Self for fluent chaining + """ + from agent_framework._workflows._orchestration_request_info import resolve_request_info_filter + + self._request_info_enabled = True + self._request_info_filter = resolve_request_info_filter(list(agents) if agents else None) + + return self + + def _resolve_participants(self) -> list[Executor]: + """Resolve participant instances into Executor objects.""" + if not self._participants and not self._participant_factories: + raise ValueError("No participants provided. Call .participants() or .register_participants() first.") + # We don't need to check if both are set since that is handled in the respective methods + + participants: list[Executor | AgentProtocol] = [] + if self._participant_factories: + # Resolve the participant factories now. This doesn't break the factory pattern + # since the Sequential builder still creates new instances per workflow build. + for factory in self._participant_factories: + p = factory() + participants.append(p) + else: + participants = self._participants + + executors: list[Executor] = [] + for p in participants: + if isinstance(p, Executor): + executors.append(p) + elif isinstance(p, AgentProtocol): + if self._request_info_enabled and ( + not self._request_info_filter or resolve_agent_id(p) in self._request_info_filter + ): + # Handle request info enabled agents + executors.append(AgentApprovalExecutor(p)) + else: + executors.append(AgentExecutor(p)) + else: + raise TypeError(f"Participants must be AgentProtocol or Executor instances. Got {type(p).__name__}.") + + return executors + + def build(self) -> Workflow: + """Build and validate the sequential workflow. + + Wiring pattern: + - _InputToConversation normalizes the initial input into list[ChatMessage] + - For each participant in order: + - If Agent (or AgentExecutor): pass conversation to the agent, then optionally + route through a request info interceptor, then convert response to conversation + via _ResponseToConversation + - Else (custom Executor): pass conversation directly to the executor + - _EndWithConversation yields the final conversation and the workflow becomes idle + """ + # Internal nodes + input_conv = _InputToConversation(id="input-conversation") + end = _EndWithConversation(id="end") + + # Resolve participants and participant factories to executors + participants: list[Executor] = self._resolve_participants() + + builder = WorkflowBuilder() + builder.set_start_executor(input_conv) + + # Start of the chain is the input normalizer + prior: Executor | AgentProtocol = input_conv + for p in participants: + builder.add_edge(prior, p) + prior = p + # Terminate with the final conversation + builder.add_edge(prior, end) + + if self._checkpoint_storage is not None: + builder = builder.with_checkpointing(self._checkpoint_storage) + + return builder.build() diff --git a/python/packages/orchestrations/agent_framework_orchestrations/py.typed b/python/packages/orchestrations/agent_framework_orchestrations/py.typed new file mode 100644 index 0000000000..e69de29bb2 diff --git a/python/packages/orchestrations/pyproject.toml b/python/packages/orchestrations/pyproject.toml new file mode 100644 index 0000000000..60ce61be1b --- /dev/null +++ b/python/packages/orchestrations/pyproject.toml @@ -0,0 +1,87 @@ +[project] +name = "agent-framework-orchestrations" +description = "Orchestration patterns for Microsoft Agent Framework. Includes SequentialBuilder, ConcurrentBuilder, HandoffBuilder, GroupChatBuilder, and MagenticBuilder." +authors = [{ name = "Microsoft", email = "af-support@microsoft.com"}] +readme = "README.md" +requires-python = ">=3.10" +version = "1.0.0b260130" +license-files = ["LICENSE"] +urls.homepage = "https://aka.ms/agent-framework" +urls.source = "https://github.com/microsoft/agent-framework/tree/main/python" +urls.release_notes = "https://github.com/microsoft/agent-framework/releases?q=tag%3Apython-1&expanded=true" +urls.issues = "https://github.com/microsoft/agent-framework/issues" +classifiers = [ + "License :: OSI Approved :: MIT License", + "Development Status :: 4 - Beta", + "Intended Audience :: Developers", + "Programming Language :: Python :: 3", + "Programming Language :: Python :: 3.10", + "Programming Language :: Python :: 3.11", + "Programming Language :: Python :: 3.12", + "Programming Language :: Python :: 3.13", + "Programming Language :: Python :: 3.14", + "Typing :: Typed", +] +dependencies = [ + "agent-framework-core", +] + +[tool.uv] +prerelease = "if-necessary-or-explicit" +environments = [ + "sys_platform == 'darwin'", + "sys_platform == 'linux'", + "sys_platform == 'win32'" +] + +[tool.uv-dynamic-versioning] +fallback-version = "0.0.0" + +[tool.pytest.ini_options] +testpaths = 'tests' +addopts = "-ra -q -r fEX" +asyncio_mode = "auto" +asyncio_default_fixture_loop_scope = "function" +filterwarnings = [] +timeout = 120 + +[tool.ruff] +extend = "../../pyproject.toml" + +[tool.coverage.run] +omit = [ + "**/__init__.py" +] + +[tool.pyright] +extends = "../../pyproject.toml" +exclude = ['tests'] + +[tool.mypy] +plugins = ['pydantic.mypy'] +strict = true +python_version = "3.10" +ignore_missing_imports = true +disallow_untyped_defs = true +no_implicit_optional = true +check_untyped_defs = true +warn_return_any = true +show_error_codes = true +warn_unused_ignores = false +disallow_incomplete_defs = true +disallow_untyped_decorators = true + +[tool.bandit] +targets = ["agent_framework_orchestrations"] +exclude_dirs = ["tests"] + +[tool.poe] +executor.type = "uv" +include = "../../shared_tasks.toml" +[tool.poe.tasks] +mypy = "mypy --config-file $POE_ROOT/pyproject.toml agent_framework_orchestrations" +test = "pytest --cov=agent_framework_orchestrations --cov-report=term-missing:skip-covered tests" + +[build-system] +requires = ["flit-core >= 3.11,<4.0"] +build-backend = "flit_core.buildapi" diff --git a/python/packages/orchestrations/tests/test_concurrent.py b/python/packages/orchestrations/tests/test_concurrent.py new file mode 100644 index 0000000000..afee1cb21f --- /dev/null +++ b/python/packages/orchestrations/tests/test_concurrent.py @@ -0,0 +1,549 @@ +# Copyright (c) Microsoft. All rights reserved. + +from typing import Any, cast + +import pytest +from agent_framework import ( + AgentExecutorRequest, + AgentExecutorResponse, + AgentResponse, + ChatMessage, + ConcurrentBuilder, + Executor, + WorkflowContext, + WorkflowOutputEvent, + WorkflowRunState, + WorkflowStatusEvent, + handler, +) +from agent_framework._workflows._checkpoint import InMemoryCheckpointStorage +from typing_extensions import Never + + +class _FakeAgentExec(Executor): + """Test executor that mimics an agent by emitting an AgentExecutorResponse. + + It takes the incoming AgentExecutorRequest, produces a single assistant message + with the configured reply text, and sends an AgentExecutorResponse that includes + full_conversation (the original user prompt followed by the assistant message). + """ + + def __init__(self, id: str, reply_text: str) -> None: + super().__init__(id) + self._reply_text = reply_text + + @handler + async def run(self, request: AgentExecutorRequest, ctx: WorkflowContext[AgentExecutorResponse]) -> None: + response = AgentResponse(messages=ChatMessage("assistant", text=self._reply_text)) + full_conversation = list(request.messages) + list(response.messages) + await ctx.send_message(AgentExecutorResponse(self.id, response, full_conversation=full_conversation)) + + +def test_concurrent_builder_rejects_empty_participants() -> None: + with pytest.raises(ValueError): + ConcurrentBuilder().participants([]) + + +def test_concurrent_builder_rejects_duplicate_executors() -> None: + a = _FakeAgentExec("dup", "A") + b = _FakeAgentExec("dup", "B") # same executor id + with pytest.raises(ValueError): + ConcurrentBuilder().participants([a, b]) + + +def test_concurrent_builder_rejects_duplicate_executors_from_factories() -> None: + """Test that duplicate executor IDs from factories are detected at build time.""" + + def create_dup1() -> Executor: + return _FakeAgentExec("dup", "A") + + def create_dup2() -> Executor: + return _FakeAgentExec("dup", "B") # same executor id + + builder = ConcurrentBuilder().register_participants([create_dup1, create_dup2]) + with pytest.raises(ValueError, match="Duplicate executor ID 'dup' detected in workflow."): + builder.build() + + +def test_concurrent_builder_rejects_mixed_participants_and_factories() -> None: + """Test that mixing .participants() and .register_participants() raises an error.""" + # Case 1: participants first, then register_participants + with pytest.raises(ValueError, match="Cannot mix .participants"): + ( + ConcurrentBuilder() + .participants([_FakeAgentExec("a", "A")]) + .register_participants([lambda: _FakeAgentExec("b", "B")]) + ) + + # Case 2: register_participants first, then participants + with pytest.raises(ValueError, match="Cannot mix .participants"): + ( + ConcurrentBuilder() + .register_participants([lambda: _FakeAgentExec("a", "A")]) + .participants([_FakeAgentExec("b", "B")]) + ) + + +def test_concurrent_builder_rejects_multiple_calls_to_participants() -> None: + """Test that multiple calls to .participants() raises an error.""" + with pytest.raises(ValueError, match=r"participants\(\) has already been called"): + (ConcurrentBuilder().participants([_FakeAgentExec("a", "A")]).participants([_FakeAgentExec("b", "B")])) + + +def test_concurrent_builder_rejects_multiple_calls_to_register_participants() -> None: + """Test that multiple calls to .register_participants() raises an error.""" + with pytest.raises(ValueError, match=r"register_participants\(\) has already been called"): + ( + ConcurrentBuilder() + .register_participants([lambda: _FakeAgentExec("a", "A")]) + .register_participants([lambda: _FakeAgentExec("b", "B")]) + ) + + +async def test_concurrent_default_aggregator_emits_single_user_and_assistants() -> None: + # Three synthetic agent executors + e1 = _FakeAgentExec("agentA", "Alpha") + e2 = _FakeAgentExec("agentB", "Beta") + e3 = _FakeAgentExec("agentC", "Gamma") + + wf = ConcurrentBuilder().participants([e1, e2, e3]).build() + + completed = False + output: list[ChatMessage] | None = None + async for ev in wf.run_stream("prompt: hello world"): + if isinstance(ev, WorkflowStatusEvent) and ev.state == WorkflowRunState.IDLE: + completed = True + elif isinstance(ev, WorkflowOutputEvent): + output = cast(list[ChatMessage], ev.data) + if completed and output is not None: + break + + assert completed + assert output is not None + messages: list[ChatMessage] = output + + # Expect one user message + one assistant message per participant + assert len(messages) == 1 + 3 + assert messages[0].role == "user" + assert "hello world" in messages[0].text + + assistant_texts = {m.text for m in messages[1:]} + assert assistant_texts == {"Alpha", "Beta", "Gamma"} + assert all(m.role == "assistant" for m in messages[1:]) + + +async def test_concurrent_custom_aggregator_callback_is_used() -> None: + # Two synthetic agent executors for brevity + e1 = _FakeAgentExec("agentA", "One") + e2 = _FakeAgentExec("agentB", "Two") + + async def summarize(results: list[AgentExecutorResponse]) -> str: + texts: list[str] = [] + for r in results: + msgs: list[ChatMessage] = r.agent_response.messages + texts.append(msgs[-1].text if msgs else "") + return " | ".join(sorted(texts)) + + wf = ConcurrentBuilder().participants([e1, e2]).with_aggregator(summarize).build() + + completed = False + output: str | None = None + async for ev in wf.run_stream("prompt: custom"): + if isinstance(ev, WorkflowStatusEvent) and ev.state == WorkflowRunState.IDLE: + completed = True + elif isinstance(ev, WorkflowOutputEvent): + output = cast(str, ev.data) + if completed and output is not None: + break + + assert completed + assert output is not None + # Custom aggregator returns a string payload + assert isinstance(output, str) + assert output == "One | Two" + + +async def test_concurrent_custom_aggregator_sync_callback_is_used() -> None: + e1 = _FakeAgentExec("agentA", "One") + e2 = _FakeAgentExec("agentB", "Two") + + # Sync callback with ctx parameter (should run via asyncio.to_thread) + def summarize_sync(results: list[AgentExecutorResponse], _ctx: WorkflowContext[Any]) -> str: # type: ignore[unused-argument] + texts: list[str] = [] + for r in results: + msgs: list[ChatMessage] = r.agent_response.messages + texts.append(msgs[-1].text if msgs else "") + return " | ".join(sorted(texts)) + + wf = ConcurrentBuilder().participants([e1, e2]).with_aggregator(summarize_sync).build() + + completed = False + output: str | None = None + async for ev in wf.run_stream("prompt: custom sync"): + if isinstance(ev, WorkflowStatusEvent) and ev.state == WorkflowRunState.IDLE: + completed = True + elif isinstance(ev, WorkflowOutputEvent): + output = cast(str, ev.data) + if completed and output is not None: + break + + assert completed + assert output is not None + assert isinstance(output, str) + assert output == "One | Two" + + +def test_concurrent_custom_aggregator_uses_callback_name_for_id() -> None: + e1 = _FakeAgentExec("agentA", "One") + e2 = _FakeAgentExec("agentB", "Two") + + def summarize(results: list[AgentExecutorResponse]) -> str: # type: ignore[override] + return str(len(results)) + + wf = ConcurrentBuilder().participants([e1, e2]).with_aggregator(summarize).build() + + assert "summarize" in wf.executors + aggregator = wf.executors["summarize"] + assert aggregator.id == "summarize" + + +async def test_concurrent_with_aggregator_executor_instance() -> None: + """Test with_aggregator using an Executor instance (not factory).""" + + class CustomAggregator(Executor): + @handler + async def aggregate(self, results: list[AgentExecutorResponse], ctx: WorkflowContext[Never, str]) -> None: + texts: list[str] = [] + for r in results: + msgs: list[ChatMessage] = r.agent_response.messages + texts.append(msgs[-1].text if msgs else "") + await ctx.yield_output(" & ".join(sorted(texts))) + + e1 = _FakeAgentExec("agentA", "One") + e2 = _FakeAgentExec("agentB", "Two") + + aggregator_instance = CustomAggregator(id="instance_aggregator") + wf = ConcurrentBuilder().participants([e1, e2]).with_aggregator(aggregator_instance).build() + + completed = False + output: str | None = None + async for ev in wf.run_stream("prompt: instance test"): + if isinstance(ev, WorkflowStatusEvent) and ev.state == WorkflowRunState.IDLE: + completed = True + elif isinstance(ev, WorkflowOutputEvent): + output = cast(str, ev.data) + if completed and output is not None: + break + + assert completed + assert output is not None + assert isinstance(output, str) + assert output == "One & Two" + + +async def test_concurrent_with_aggregator_executor_factory() -> None: + """Test with_aggregator using an Executor factory.""" + + class CustomAggregator(Executor): + @handler + async def aggregate(self, results: list[AgentExecutorResponse], ctx: WorkflowContext[Never, str]) -> None: + texts: list[str] = [] + for r in results: + msgs: list[ChatMessage] = r.agent_response.messages + texts.append(msgs[-1].text if msgs else "") + await ctx.yield_output(" | ".join(sorted(texts))) + + e1 = _FakeAgentExec("agentA", "One") + e2 = _FakeAgentExec("agentB", "Two") + + wf = ( + ConcurrentBuilder() + .participants([e1, e2]) + .register_aggregator(lambda: CustomAggregator(id="custom_aggregator")) + .build() + ) + + completed = False + output: str | None = None + async for ev in wf.run_stream("prompt: factory test"): + if isinstance(ev, WorkflowStatusEvent) and ev.state == WorkflowRunState.IDLE: + completed = True + elif isinstance(ev, WorkflowOutputEvent): + output = cast(str, ev.data) + if completed and output is not None: + break + + assert completed + assert output is not None + assert isinstance(output, str) + assert output == "One | Two" + + +async def test_concurrent_with_aggregator_executor_factory_with_default_id() -> None: + """Test with_aggregator using an Executor class directly as factory (with default __init__ parameters).""" + + class CustomAggregator(Executor): + def __init__(self, id: str = "default_aggregator") -> None: + super().__init__(id) + + @handler + async def aggregate(self, results: list[AgentExecutorResponse], ctx: WorkflowContext[Never, str]) -> None: + texts: list[str] = [] + for r in results: + msgs: list[ChatMessage] = r.agent_response.messages + texts.append(msgs[-1].text if msgs else "") + await ctx.yield_output(" | ".join(sorted(texts))) + + e1 = _FakeAgentExec("agentA", "One") + e2 = _FakeAgentExec("agentB", "Two") + + wf = ConcurrentBuilder().participants([e1, e2]).register_aggregator(CustomAggregator).build() + + completed = False + output: str | None = None + async for ev in wf.run_stream("prompt: factory test"): + if isinstance(ev, WorkflowStatusEvent) and ev.state == WorkflowRunState.IDLE: + completed = True + elif isinstance(ev, WorkflowOutputEvent): + output = cast(str, ev.data) + if completed and output is not None: + break + + assert completed + assert output is not None + assert isinstance(output, str) + assert output == "One | Two" + + +def test_concurrent_builder_rejects_multiple_calls_to_with_aggregator() -> None: + """Test that multiple calls to .with_aggregator() raises an error.""" + + def summarize(results: list[AgentExecutorResponse]) -> str: # type: ignore[override] + return str(len(results)) + + with pytest.raises(ValueError, match=r"with_aggregator\(\) has already been called"): + (ConcurrentBuilder().with_aggregator(summarize).with_aggregator(summarize)) + + +def test_concurrent_builder_rejects_multiple_calls_to_register_aggregator() -> None: + """Test that multiple calls to .register_aggregator() raises an error.""" + + class CustomAggregator(Executor): + pass + + with pytest.raises(ValueError, match=r"register_aggregator\(\) has already been called"): + ( + ConcurrentBuilder() + .register_aggregator(lambda: CustomAggregator(id="agg1")) + .register_aggregator(lambda: CustomAggregator(id="agg2")) + ) + + +async def test_concurrent_checkpoint_resume_round_trip() -> None: + storage = InMemoryCheckpointStorage() + + participants = ( + _FakeAgentExec("agentA", "Alpha"), + _FakeAgentExec("agentB", "Beta"), + _FakeAgentExec("agentC", "Gamma"), + ) + + wf = ConcurrentBuilder().participants(list(participants)).with_checkpointing(storage).build() + + baseline_output: list[ChatMessage] | None = None + async for ev in wf.run_stream("checkpoint concurrent"): + if isinstance(ev, WorkflowOutputEvent): + baseline_output = ev.data # type: ignore[assignment] + if isinstance(ev, WorkflowStatusEvent) and ev.state == WorkflowRunState.IDLE: + break + + assert baseline_output is not None + + checkpoints = await storage.list_checkpoints() + assert checkpoints + checkpoints.sort(key=lambda cp: cp.timestamp) + resume_checkpoint = next( + (cp for cp in checkpoints if (cp.metadata or {}).get("checkpoint_type") == "superstep"), + checkpoints[-1], + ) + + resumed_participants = ( + _FakeAgentExec("agentA", "Alpha"), + _FakeAgentExec("agentB", "Beta"), + _FakeAgentExec("agentC", "Gamma"), + ) + wf_resume = ConcurrentBuilder().participants(list(resumed_participants)).with_checkpointing(storage).build() + + resumed_output: list[ChatMessage] | None = None + async for ev in wf_resume.run_stream(checkpoint_id=resume_checkpoint.checkpoint_id): + if isinstance(ev, WorkflowOutputEvent): + resumed_output = ev.data # type: ignore[assignment] + if isinstance(ev, WorkflowStatusEvent) and ev.state in ( + WorkflowRunState.IDLE, + WorkflowRunState.IDLE_WITH_PENDING_REQUESTS, + ): + break + + assert resumed_output is not None + assert [m.role for m in resumed_output] == [m.role for m in baseline_output] + assert [m.text for m in resumed_output] == [m.text for m in baseline_output] + + +async def test_concurrent_checkpoint_runtime_only() -> None: + """Test checkpointing configured ONLY at runtime, not at build time.""" + storage = InMemoryCheckpointStorage() + + agents = [_FakeAgentExec(id="agent1", reply_text="A1"), _FakeAgentExec(id="agent2", reply_text="A2")] + wf = ConcurrentBuilder().participants(agents).build() + + baseline_output: list[ChatMessage] | None = None + async for ev in wf.run_stream("runtime checkpoint test", checkpoint_storage=storage): + if isinstance(ev, WorkflowOutputEvent): + baseline_output = ev.data # type: ignore[assignment] + if isinstance(ev, WorkflowStatusEvent) and ev.state == WorkflowRunState.IDLE: + break + + assert baseline_output is not None + + checkpoints = await storage.list_checkpoints() + assert checkpoints + checkpoints.sort(key=lambda cp: cp.timestamp) + + resume_checkpoint = next( + (cp for cp in checkpoints if (cp.metadata or {}).get("checkpoint_type") == "superstep"), + checkpoints[-1], + ) + + resumed_agents = [_FakeAgentExec(id="agent1", reply_text="A1"), _FakeAgentExec(id="agent2", reply_text="A2")] + wf_resume = ConcurrentBuilder().participants(resumed_agents).build() + + resumed_output: list[ChatMessage] | None = None + async for ev in wf_resume.run_stream(checkpoint_id=resume_checkpoint.checkpoint_id, checkpoint_storage=storage): + if isinstance(ev, WorkflowOutputEvent): + resumed_output = ev.data # type: ignore[assignment] + if isinstance(ev, WorkflowStatusEvent) and ev.state in ( + WorkflowRunState.IDLE, + WorkflowRunState.IDLE_WITH_PENDING_REQUESTS, + ): + break + + assert resumed_output is not None + assert [m.role for m in resumed_output] == [m.role for m in baseline_output] + + +async def test_concurrent_checkpoint_runtime_overrides_buildtime() -> None: + """Test that runtime checkpoint storage overrides build-time configuration.""" + import tempfile + + with tempfile.TemporaryDirectory() as temp_dir1, tempfile.TemporaryDirectory() as temp_dir2: + from agent_framework._workflows._checkpoint import FileCheckpointStorage + + buildtime_storage = FileCheckpointStorage(temp_dir1) + runtime_storage = FileCheckpointStorage(temp_dir2) + + agents = [_FakeAgentExec(id="agent1", reply_text="A1"), _FakeAgentExec(id="agent2", reply_text="A2")] + wf = ConcurrentBuilder().participants(agents).with_checkpointing(buildtime_storage).build() + + baseline_output: list[ChatMessage] | None = None + async for ev in wf.run_stream("override test", checkpoint_storage=runtime_storage): + if isinstance(ev, WorkflowOutputEvent): + baseline_output = ev.data # type: ignore[assignment] + if isinstance(ev, WorkflowStatusEvent) and ev.state == WorkflowRunState.IDLE: + break + + assert baseline_output is not None + + buildtime_checkpoints = await buildtime_storage.list_checkpoints() + runtime_checkpoints = await runtime_storage.list_checkpoints() + + assert len(runtime_checkpoints) > 0, "Runtime storage should have checkpoints" + assert len(buildtime_checkpoints) == 0, "Build-time storage should have no checkpoints when overridden" + + +def test_concurrent_builder_rejects_empty_participant_factories() -> None: + with pytest.raises(ValueError): + ConcurrentBuilder().register_participants([]) + + +async def test_concurrent_builder_reusable_after_build_with_participants() -> None: + """Test that the builder can be reused to build multiple identical workflows with participants().""" + e1 = _FakeAgentExec("agentA", "One") + e2 = _FakeAgentExec("agentB", "Two") + + builder = ConcurrentBuilder().participants([e1, e2]) + + builder.build() + + assert builder._participants[0] is e1 # type: ignore + assert builder._participants[1] is e2 # type: ignore + assert builder._participant_factories == [] # type: ignore + + +async def test_concurrent_builder_reusable_after_build_with_factories() -> None: + """Test that the builder can be reused to build multiple workflows with register_participants().""" + call_count = 0 + + def create_agent_executor_a() -> Executor: + nonlocal call_count + call_count += 1 + return _FakeAgentExec("agentA", "One") + + def create_agent_executor_b() -> Executor: + nonlocal call_count + call_count += 1 + return _FakeAgentExec("agentB", "Two") + + builder = ConcurrentBuilder().register_participants([create_agent_executor_a, create_agent_executor_b]) + + # Build the first workflow + wf1 = builder.build() + + assert builder._participants == [] # type: ignore + assert len(builder._participant_factories) == 2 # type: ignore + assert call_count == 2 + + # Build the second workflow + wf2 = builder.build() + assert call_count == 4 + + # Verify that the two workflows have different executor instances + assert wf1.executors["agentA"] is not wf2.executors["agentA"] + assert wf1.executors["agentB"] is not wf2.executors["agentB"] + + +async def test_concurrent_with_register_participants() -> None: + """Test workflow creation using register_participants with factories.""" + + def create_agent1() -> Executor: + return _FakeAgentExec("agentA", "Alpha") + + def create_agent2() -> Executor: + return _FakeAgentExec("agentB", "Beta") + + def create_agent3() -> Executor: + return _FakeAgentExec("agentC", "Gamma") + + wf = ConcurrentBuilder().register_participants([create_agent1, create_agent2, create_agent3]).build() + + completed = False + output: list[ChatMessage] | None = None + async for ev in wf.run_stream("test prompt"): + if isinstance(ev, WorkflowStatusEvent) and ev.state == WorkflowRunState.IDLE: + completed = True + elif isinstance(ev, WorkflowOutputEvent): + output = cast(list[ChatMessage], ev.data) + if completed and output is not None: + break + + assert completed + assert output is not None + messages: list[ChatMessage] = output + + # Expect one user message + one assistant message per participant + assert len(messages) == 1 + 3 + assert messages[0].role == "user" + assert "test prompt" in messages[0].text + + assistant_texts = {m.text for m in messages[1:]} + assert assistant_texts == {"Alpha", "Beta", "Gamma"} + assert all(m.role == "assistant" for m in messages[1:]) diff --git a/python/packages/orchestrations/tests/test_group_chat.py b/python/packages/orchestrations/tests/test_group_chat.py new file mode 100644 index 0000000000..57e7ac279c --- /dev/null +++ b/python/packages/orchestrations/tests/test_group_chat.py @@ -0,0 +1,1333 @@ +# Copyright (c) Microsoft. All rights reserved. + +from collections.abc import AsyncIterable, Callable, Sequence +from typing import Any, cast + +import pytest +from agent_framework import ( + AgentExecutorResponse, + AgentRequestInfoResponse, + AgentResponse, + AgentResponseUpdate, + AgentThread, + BaseAgent, + BaseGroupChatOrchestrator, + ChatAgent, + ChatMessage, + ChatResponse, + ChatResponseUpdate, + Content, + GroupChatBuilder, + GroupChatState, + MagenticContext, + MagenticManagerBase, + MagenticProgressLedger, + MagenticProgressLedgerItem, + RequestInfoEvent, + WorkflowOutputEvent, + WorkflowRunState, + WorkflowStatusEvent, +) +from agent_framework._workflows._checkpoint import InMemoryCheckpointStorage + + +class StubAgent(BaseAgent): + def __init__(self, agent_name: str, reply_text: str, **kwargs: Any) -> None: + super().__init__(name=agent_name, description=f"Stub agent {agent_name}", **kwargs) + self._reply_text = reply_text + + async def run( # type: ignore[override] + self, + messages: str | ChatMessage | Sequence[str | ChatMessage] | None = None, + *, + thread: AgentThread | None = None, + **kwargs: Any, + ) -> AgentResponse: + response = ChatMessage("assistant", [self._reply_text], author_name=self.name) + return AgentResponse(messages=[response]) + + def run_stream( # type: ignore[override] + self, + messages: str | ChatMessage | Sequence[str | ChatMessage] | None = None, + *, + thread: AgentThread | None = None, + **kwargs: Any, + ) -> AsyncIterable[AgentResponseUpdate]: + async def _stream() -> AsyncIterable[AgentResponseUpdate]: + yield AgentResponseUpdate( + contents=[Content.from_text(text=self._reply_text)], role="assistant", author_name=self.name + ) + + return _stream() + + +class MockChatClient: + """Mock chat client that raises NotImplementedError for all methods.""" + + additional_properties: dict[str, Any] + + async def get_response(self, messages: Any, **kwargs: Any) -> ChatResponse: + raise NotImplementedError + + def get_streaming_response(self, messages: Any, **kwargs: Any) -> AsyncIterable[ChatResponseUpdate]: + raise NotImplementedError + + +class StubManagerAgent(ChatAgent): + def __init__(self) -> None: + super().__init__(chat_client=MockChatClient(), name="manager_agent", description="Stub manager") + self._call_count = 0 + + async def run( + self, + messages: str | ChatMessage | Sequence[str | ChatMessage] | None = None, + *, + thread: AgentThread | None = None, + **kwargs: Any, + ) -> AgentResponse: + if self._call_count == 0: + self._call_count += 1 + # First call: select the agent (using AgentOrchestrationOutput format) + payload = {"terminate": False, "reason": "Selecting agent", "next_speaker": "agent", "final_message": None} + return AgentResponse( + messages=[ + ChatMessage( + role="assistant", + text=( + '{"terminate": false, "reason": "Selecting agent", ' + '"next_speaker": "agent", "final_message": null}' + ), + author_name=self.name, + ) + ], + value=payload, + ) + + # Second call: terminate + payload = { + "terminate": True, + "reason": "Task complete", + "next_speaker": None, + "final_message": "agent manager final", + } + return AgentResponse( + messages=[ + ChatMessage( + role="assistant", + text=( + '{"terminate": true, "reason": "Task complete", ' + '"next_speaker": null, "final_message": "agent manager final"}' + ), + author_name=self.name, + ) + ], + value=payload, + ) + + def run_stream( + self, + messages: str | ChatMessage | Sequence[str | ChatMessage] | None = None, + *, + thread: AgentThread | None = None, + **kwargs: Any, + ) -> AsyncIterable[AgentResponseUpdate]: + if self._call_count == 0: + self._call_count += 1 + + async def _stream_initial() -> AsyncIterable[AgentResponseUpdate]: + yield AgentResponseUpdate( + contents=[ + Content.from_text( + text=( + '{"terminate": false, "reason": "Selecting agent", ' + '"next_speaker": "agent", "final_message": null}' + ) + ) + ], + role="assistant", + author_name=self.name, + ) + + return _stream_initial() + + async def _stream_final() -> AsyncIterable[AgentResponseUpdate]: + yield AgentResponseUpdate( + contents=[ + Content.from_text( + text=( + '{"terminate": true, "reason": "Task complete", ' + '"next_speaker": null, "final_message": "agent manager final"}' + ) + ) + ], + role="assistant", + author_name=self.name, + ) + + return _stream_final() + + +def make_sequence_selector() -> Callable[[GroupChatState], str]: + state_counter = {"value": 0} + + def _selector(state: GroupChatState) -> str: + participants = list(state.participants.keys()) + step = state_counter["value"] + state_counter["value"] = step + 1 + if step == 0: + return participants[0] + if step == 1 and len(participants) > 1: + return participants[1] + # Return first participant to continue (will be limited by max_rounds in tests) + return participants[0] + + return _selector + + +class StubMagenticManager(MagenticManagerBase): + def __init__(self) -> None: + super().__init__(max_stall_count=3, max_round_count=5) + self._round = 0 + + async def plan(self, magentic_context: MagenticContext) -> ChatMessage: + return ChatMessage("assistant", ["plan"], author_name="magentic_manager") + + async def replan(self, magentic_context: MagenticContext) -> ChatMessage: + return await self.plan(magentic_context) + + async def create_progress_ledger(self, magentic_context: MagenticContext) -> MagenticProgressLedger: + participants = list(magentic_context.participant_descriptions.keys()) + target = participants[0] if participants else "agent" + if self._round == 0: + self._round += 1 + return MagenticProgressLedger( + is_request_satisfied=MagenticProgressLedgerItem(reason="", answer=False), + is_in_loop=MagenticProgressLedgerItem(reason="", answer=False), + is_progress_being_made=MagenticProgressLedgerItem(reason="", answer=True), + next_speaker=MagenticProgressLedgerItem(reason="", answer=target), + instruction_or_question=MagenticProgressLedgerItem(reason="", answer="respond"), + ) + return MagenticProgressLedger( + is_request_satisfied=MagenticProgressLedgerItem(reason="", answer=True), + is_in_loop=MagenticProgressLedgerItem(reason="", answer=False), + is_progress_being_made=MagenticProgressLedgerItem(reason="", answer=True), + next_speaker=MagenticProgressLedgerItem(reason="", answer=target), + instruction_or_question=MagenticProgressLedgerItem(reason="", answer=""), + ) + + async def prepare_final_answer(self, magentic_context: MagenticContext) -> ChatMessage: + return ChatMessage("assistant", ["final"], author_name="magentic_manager") + + +async def test_group_chat_builder_basic_flow() -> None: + selector = make_sequence_selector() + alpha = StubAgent("alpha", "ack from alpha") + beta = StubAgent("beta", "ack from beta") + + workflow = ( + GroupChatBuilder() + .with_orchestrator(selection_func=selector, orchestrator_name="manager") + .participants([alpha, beta]) + .with_max_rounds(2) # Limit rounds to prevent infinite loop + .build() + ) + + outputs: list[list[ChatMessage]] = [] + async for event in workflow.run_stream("coordinate task"): + if isinstance(event, WorkflowOutputEvent): + data = event.data + if isinstance(data, list): + outputs.append(cast(list[ChatMessage], data)) + + assert len(outputs) == 1 + assert len(outputs[0]) >= 1 + # Check that both agents contributed + authors = {msg.author_name for msg in outputs[0] if msg.author_name in ["alpha", "beta"]} + assert len(authors) == 2 + + +async def test_group_chat_as_agent_accepts_conversation() -> None: + selector = make_sequence_selector() + alpha = StubAgent("alpha", "ack from alpha") + beta = StubAgent("beta", "ack from beta") + + workflow = ( + GroupChatBuilder() + .with_orchestrator(selection_func=selector, orchestrator_name="manager") + .participants([alpha, beta]) + .with_max_rounds(2) # Limit rounds to prevent infinite loop + .build() + ) + + agent = workflow.as_agent(name="group-chat-agent") + conversation = [ + ChatMessage("user", ["kickoff"], author_name="user"), + ChatMessage("assistant", ["noted"], author_name="alpha"), + ] + response = await agent.run(conversation) + + assert response.messages, "Expected agent conversation output" + + +# Comprehensive tests for group chat functionality + + +class TestGroupChatBuilder: + """Tests for GroupChatBuilder validation and configuration.""" + + def test_build_without_manager_raises_error(self) -> None: + """Test that building without a manager raises ValueError.""" + agent = StubAgent("test", "response") + + builder = GroupChatBuilder().participants([agent]) + + with pytest.raises( + ValueError, match=r"No orchestrator has been configured\. Call with_orchestrator\(\) to set one\." + ): + builder.build() + + def test_build_without_participants_raises_error(self) -> None: + """Test that building without participants raises ValueError.""" + + def selector(state: GroupChatState) -> str: + return "agent" + + builder = GroupChatBuilder().with_orchestrator(selection_func=selector) + + with pytest.raises( + ValueError, + match=r"No participants provided\. Call \.participants\(\) or \.register_participants\(\) first\.", + ): + builder.build() + + def test_duplicate_manager_configuration_raises_error(self) -> None: + """Test that configuring multiple managers raises ValueError.""" + + def selector(state: GroupChatState) -> str: + return "agent" + + builder = GroupChatBuilder().with_orchestrator(selection_func=selector) + + with pytest.raises( + ValueError, + match=r"A selection function has already been configured\. Call with_orchestrator\(\.\.\.\) once only\.", + ): + builder.with_orchestrator(selection_func=selector) + + def test_empty_participants_raises_error(self) -> None: + """Test that empty participants list raises ValueError.""" + + def selector(state: GroupChatState) -> str: + return "agent" + + builder = GroupChatBuilder().with_orchestrator(selection_func=selector) + + with pytest.raises(ValueError, match="participants cannot be empty"): + builder.participants([]) + + def test_duplicate_participant_names_raises_error(self) -> None: + """Test that duplicate participant names raise ValueError.""" + agent1 = StubAgent("test", "response1") + agent2 = StubAgent("test", "response2") + + def selector(state: GroupChatState) -> str: + return "agent" + + builder = GroupChatBuilder().with_orchestrator(selection_func=selector) + + with pytest.raises(ValueError, match="Duplicate participant name 'test'"): + builder.participants([agent1, agent2]) + + def test_agent_without_name_raises_error(self) -> None: + """Test that agent without name attribute raises ValueError.""" + + class AgentWithoutName(BaseAgent): + def __init__(self) -> None: + super().__init__(name="", description="test") + + async def run(self, messages: Any = None, *, thread: Any = None, **kwargs: Any) -> AgentResponse: + return AgentResponse(messages=[]) + + def run_stream( + self, messages: Any = None, *, thread: Any = None, **kwargs: Any + ) -> AsyncIterable[AgentResponseUpdate]: + async def _stream() -> AsyncIterable[AgentResponseUpdate]: + yield AgentResponseUpdate(contents=[]) + + return _stream() + + agent = AgentWithoutName() + + def selector(state: GroupChatState) -> str: + return "agent" + + builder = GroupChatBuilder().with_orchestrator(selection_func=selector) + + with pytest.raises(ValueError, match="AgentProtocol participants must have a non-empty name"): + builder.participants([agent]) + + def test_empty_participant_name_raises_error(self) -> None: + """Test that empty participant name raises ValueError.""" + agent = StubAgent("", "response") # Agent with empty name + + def selector(state: GroupChatState) -> str: + return "agent" + + builder = GroupChatBuilder().with_orchestrator(selection_func=selector) + + with pytest.raises(ValueError, match="AgentProtocol participants must have a non-empty name"): + builder.participants([agent]) + + +class TestGroupChatWorkflow: + """Tests for GroupChat workflow functionality.""" + + async def test_max_rounds_enforcement(self) -> None: + """Test that max_rounds properly limits conversation rounds.""" + call_count = {"value": 0} + + def selector(state: GroupChatState) -> str: + call_count["value"] += 1 + # Always return the agent name to try to continue indefinitely + return "agent" + + agent = StubAgent("agent", "response") + + workflow = ( + GroupChatBuilder() + .with_orchestrator(selection_func=selector) + .participants([agent]) + .with_max_rounds(2) # Limit to 2 rounds + .build() + ) + + outputs: list[list[ChatMessage]] = [] + async for event in workflow.run_stream("test task"): + if isinstance(event, WorkflowOutputEvent): + data = event.data + if isinstance(data, list): + outputs.append(cast(list[ChatMessage], data)) + + # Should have terminated due to max_rounds, expect at least one output + assert len(outputs) >= 1 + # The final message in the conversation should be about round limit + conversation = outputs[-1] + assert len(conversation) >= 1 + final_output = conversation[-1] + assert "maximum number of rounds" in final_output.text.lower() + + async def test_termination_condition_halts_conversation(self) -> None: + """Test that a custom termination condition stops the workflow.""" + + def selector(state: GroupChatState) -> str: + return "agent" + + def termination_condition(conversation: list[ChatMessage]) -> bool: + replies = [msg for msg in conversation if msg.role == "assistant" and msg.author_name == "agent"] + return len(replies) >= 2 + + agent = StubAgent("agent", "response") + + workflow = ( + GroupChatBuilder() + .with_orchestrator(selection_func=selector) + .participants([agent]) + .with_termination_condition(termination_condition) + .build() + ) + + outputs: list[list[ChatMessage]] = [] + async for event in workflow.run_stream("test task"): + if isinstance(event, WorkflowOutputEvent): + data = event.data + if isinstance(data, list): + outputs.append(cast(list[ChatMessage], data)) + + assert outputs, "Expected termination to yield output" + conversation = outputs[-1] + agent_replies = [msg for msg in conversation if msg.author_name == "agent" and msg.role == "assistant"] + assert len(agent_replies) == 2 + final_output = conversation[-1] + # The orchestrator uses its ID as author_name by default + assert "termination condition" in final_output.text.lower() + + async def test_termination_condition_agent_manager_finalizes(self) -> None: + """Test that termination condition with agent orchestrator produces default termination message.""" + manager = StubManagerAgent() + worker = StubAgent("agent", "response") + + workflow = ( + GroupChatBuilder() + .with_orchestrator(agent=manager) + .participants([worker]) + .with_termination_condition(lambda conv: any(msg.author_name == "agent" for msg in conv)) + .build() + ) + + outputs: list[list[ChatMessage]] = [] + async for event in workflow.run_stream("test task"): + if isinstance(event, WorkflowOutputEvent): + data = event.data + if isinstance(data, list): + outputs.append(cast(list[ChatMessage], data)) + + assert outputs, "Expected termination to yield output" + conversation = outputs[-1] + assert conversation[-1].text == BaseGroupChatOrchestrator.TERMINATION_CONDITION_MET_MESSAGE + assert conversation[-1].author_name == manager.name + + async def test_unknown_participant_error(self) -> None: + """Test that unknown participant selection raises error.""" + + def selector(state: GroupChatState) -> str: + return "unknown_agent" # Return non-existent participant + + agent = StubAgent("agent", "response") + + workflow = GroupChatBuilder().with_orchestrator(selection_func=selector).participants([agent]).build() + + with pytest.raises(RuntimeError, match="Selection function returned unknown participant 'unknown_agent'"): + async for _ in workflow.run_stream("test task"): + pass + + +class TestCheckpointing: + """Tests for checkpointing functionality.""" + + async def test_workflow_with_checkpointing(self) -> None: + """Test that workflow works with checkpointing enabled.""" + + def selector(state: GroupChatState) -> str: + return "agent" + + agent = StubAgent("agent", "response") + storage = InMemoryCheckpointStorage() + + workflow = ( + GroupChatBuilder() + .with_orchestrator(selection_func=selector) + .participants([agent]) + .with_max_rounds(1) + .with_checkpointing(storage) + .build() + ) + + outputs: list[list[ChatMessage]] = [] + async for event in workflow.run_stream("test task"): + if isinstance(event, WorkflowOutputEvent): + data = event.data + if isinstance(data, list): + outputs.append(cast(list[ChatMessage], data)) + + assert len(outputs) == 1 # Should complete normally + + +class TestConversationHandling: + """Tests for different conversation input types.""" + + async def test_handle_empty_conversation_raises_error(self) -> None: + """Test that empty conversation list raises ValueError.""" + + def selector(state: GroupChatState) -> str: + return "agent" + + agent = StubAgent("agent", "response") + + workflow = ( + GroupChatBuilder() + .with_orchestrator(selection_func=selector) + .participants([agent]) + .with_max_rounds(1) + .build() + ) + + with pytest.raises(ValueError, match="At least one ChatMessage is required to start the group chat workflow."): + async for _ in workflow.run_stream([]): + pass + + async def test_handle_string_input(self) -> None: + """Test handling string input creates proper ChatMessage.""" + + def selector(state: GroupChatState) -> str: + # Verify the conversation has the user message + assert len(state.conversation) > 0 + assert state.conversation[0].role == "user" + assert state.conversation[0].text == "test string" + return "agent" + + agent = StubAgent("agent", "response") + + workflow = ( + GroupChatBuilder() + .with_orchestrator(selection_func=selector) + .participants([agent]) + .with_max_rounds(1) + .build() + ) + + outputs: list[list[ChatMessage]] = [] + async for event in workflow.run_stream("test string"): + if isinstance(event, WorkflowOutputEvent): + data = event.data + if isinstance(data, list): + outputs.append(cast(list[ChatMessage], data)) + + assert len(outputs) == 1 + + async def test_handle_chat_message_input(self) -> None: + """Test handling ChatMessage input directly.""" + task_message = ChatMessage("user", ["test message"]) + + def selector(state: GroupChatState) -> str: + # Verify the task message was preserved in conversation + assert len(state.conversation) > 0 + assert state.conversation[0] == task_message + return "agent" + + agent = StubAgent("agent", "response") + + workflow = ( + GroupChatBuilder() + .with_orchestrator(selection_func=selector) + .participants([agent]) + .with_max_rounds(1) + .build() + ) + + outputs: list[list[ChatMessage]] = [] + async for event in workflow.run_stream(task_message): + if isinstance(event, WorkflowOutputEvent): + data = event.data + if isinstance(data, list): + outputs.append(cast(list[ChatMessage], data)) + + assert len(outputs) == 1 + + async def test_handle_conversation_list_input(self) -> None: + """Test handling conversation list preserves context.""" + conversation = [ + ChatMessage("system", ["system message"]), + ChatMessage("user", ["user message"]), + ] + + def selector(state: GroupChatState) -> str: + # Verify conversation context is preserved + assert len(state.conversation) >= 2 + assert state.conversation[-1].text == "user message" + return "agent" + + agent = StubAgent("agent", "response") + + workflow = ( + GroupChatBuilder() + .with_orchestrator(selection_func=selector) + .participants([agent]) + .with_max_rounds(1) + .build() + ) + + outputs: list[list[ChatMessage]] = [] + async for event in workflow.run_stream(conversation): + if isinstance(event, WorkflowOutputEvent): + data = event.data + if isinstance(data, list): + outputs.append(cast(list[ChatMessage], data)) + + assert len(outputs) == 1 + + +class TestRoundLimitEnforcement: + """Tests for round limit checking functionality.""" + + async def test_round_limit_in_apply_directive(self) -> None: + """Test round limit enforcement.""" + rounds_called = {"count": 0} + + def selector(state: GroupChatState) -> str: + rounds_called["count"] += 1 + # Keep trying to select agent to test limit enforcement + return "agent" + + agent = StubAgent("agent", "response") + + workflow = ( + GroupChatBuilder() + .with_orchestrator(selection_func=selector) + .participants([agent]) + .with_max_rounds(1) # Very low limit + .build() + ) + + outputs: list[list[ChatMessage]] = [] + async for event in workflow.run_stream("test"): + if isinstance(event, WorkflowOutputEvent): + data = event.data + if isinstance(data, list): + outputs.append(cast(list[ChatMessage], data)) + + # Should have at least one output (the round limit message) + assert len(outputs) >= 1 + # The last message in the conversation should be about round limit + conversation = outputs[-1] + assert len(conversation) >= 1 + final_output = conversation[-1] + assert "maximum number of rounds" in final_output.text.lower() + + async def test_round_limit_in_ingest_participant_message(self) -> None: + """Test round limit enforcement after participant response.""" + responses_received = {"count": 0} + + def selector(state: GroupChatState) -> str: + responses_received["count"] += 1 + if responses_received["count"] == 1: + return "agent" # First call selects agent + return "agent" # Try to continue, but should hit limit + + agent = StubAgent("agent", "response from agent") + + workflow = ( + GroupChatBuilder() + .with_orchestrator(selection_func=selector) + .participants([agent]) + .with_max_rounds(1) # Hit limit after first response + .build() + ) + + outputs: list[list[ChatMessage]] = [] + async for event in workflow.run_stream("test"): + if isinstance(event, WorkflowOutputEvent): + data = event.data + if isinstance(data, list): + outputs.append(cast(list[ChatMessage], data)) + + # Should have at least one output (the round limit message) + assert len(outputs) >= 1 + # The last message in the conversation should be about round limit + conversation = outputs[-1] + assert len(conversation) >= 1 + final_output = conversation[-1] + assert "maximum number of rounds" in final_output.text.lower() + + +async def test_group_chat_checkpoint_runtime_only() -> None: + """Test checkpointing configured ONLY at runtime, not at build time.""" + storage = InMemoryCheckpointStorage() + + agent_a = StubAgent("agentA", "Reply from A") + agent_b = StubAgent("agentB", "Reply from B") + selector = make_sequence_selector() + + wf = ( + GroupChatBuilder() + .participants([agent_a, agent_b]) + .with_orchestrator(selection_func=selector) + .with_max_rounds(2) + .build() + ) + + baseline_output: list[ChatMessage] | None = None + async for ev in wf.run_stream("runtime checkpoint test", checkpoint_storage=storage): + if isinstance(ev, WorkflowOutputEvent): + baseline_output = cast(list[ChatMessage], ev.data) if isinstance(ev.data, list) else None # type: ignore + if isinstance(ev, WorkflowStatusEvent) and ev.state in ( + WorkflowRunState.IDLE, + WorkflowRunState.IDLE_WITH_PENDING_REQUESTS, + ): + break + + assert baseline_output is not None + + checkpoints = await storage.list_checkpoints() + assert len(checkpoints) > 0, "Runtime-only checkpointing should have created checkpoints" + + +async def test_group_chat_checkpoint_runtime_overrides_buildtime() -> None: + """Test that runtime checkpoint storage overrides build-time configuration.""" + import tempfile + + with tempfile.TemporaryDirectory() as temp_dir1, tempfile.TemporaryDirectory() as temp_dir2: + from agent_framework._workflows._checkpoint import FileCheckpointStorage + + buildtime_storage = FileCheckpointStorage(temp_dir1) + runtime_storage = FileCheckpointStorage(temp_dir2) + + agent_a = StubAgent("agentA", "Reply from A") + agent_b = StubAgent("agentB", "Reply from B") + selector = make_sequence_selector() + + wf = ( + GroupChatBuilder() + .participants([agent_a, agent_b]) + .with_orchestrator(selection_func=selector) + .with_max_rounds(2) + .with_checkpointing(buildtime_storage) + .build() + ) + baseline_output: list[ChatMessage] | None = None + async for ev in wf.run_stream("override test", checkpoint_storage=runtime_storage): + if isinstance(ev, WorkflowOutputEvent): + baseline_output = cast(list[ChatMessage], ev.data) if isinstance(ev.data, list) else None # type: ignore + if isinstance(ev, WorkflowStatusEvent) and ev.state in ( + WorkflowRunState.IDLE, + WorkflowRunState.IDLE_WITH_PENDING_REQUESTS, + ): + break + + assert baseline_output is not None + + buildtime_checkpoints = await buildtime_storage.list_checkpoints() + runtime_checkpoints = await runtime_storage.list_checkpoints() + + assert len(runtime_checkpoints) > 0, "Runtime storage should have checkpoints" + assert len(buildtime_checkpoints) == 0, "Build-time storage should have no checkpoints when overridden" + + +async def test_group_chat_with_request_info_filtering(): + """Test that with_request_info(agents=[...]) only pauses before specified agents run.""" + # Create agents - we want to verify only beta triggers pause + alpha = StubAgent("alpha", "response from alpha") + beta = StubAgent("beta", "response from beta") + + # Manager that selects alpha first, then beta, then finishes + call_count = 0 + + async def selector(state: GroupChatState) -> str: + nonlocal call_count + call_count += 1 + if call_count == 1: + return "alpha" + if call_count == 2: + return "beta" + # Return to alpha to continue + return "alpha" + + workflow = ( + GroupChatBuilder() + .with_orchestrator(selection_func=selector, orchestrator_name="manager") + .participants([alpha, beta]) + .with_max_rounds(2) + .with_request_info(agents=["beta"]) # Only pause before beta runs + .build() + ) + + # Run until we get a request info event (should be before beta, not alpha) + request_events: list[RequestInfoEvent] = [] + async for event in workflow.run_stream("test task"): + if isinstance(event, RequestInfoEvent) and isinstance(event.data, AgentExecutorResponse): + request_events.append(event) + # Don't break - let stream complete naturally when paused + + # Should have exactly one request event before beta + assert len(request_events) == 1 + request_event = request_events[0] + + # The target agent should be beta's executor ID + assert isinstance(request_event.data, AgentExecutorResponse) + assert request_event.source_executor_id == "beta" + + # Continue the workflow with a response + outputs: list[WorkflowOutputEvent] = [] + async for event in workflow.send_responses_streaming({ + request_event.request_id: AgentRequestInfoResponse.approve() + }): + if isinstance(event, WorkflowOutputEvent): + outputs.append(event) + + # Workflow should complete + assert len(outputs) == 1 + + +async def test_group_chat_with_request_info_no_filter_pauses_all(): + """Test that with_request_info() without agents pauses before all participants.""" + # Create agents + alpha = StubAgent("alpha", "response from alpha") + + # Manager selects alpha then finishes + call_count = 0 + + async def selector(state: GroupChatState) -> str: + nonlocal call_count + call_count += 1 + if call_count == 1: + return "alpha" + # Keep returning alpha to continue + return "alpha" + + workflow = ( + GroupChatBuilder() + .with_orchestrator(selection_func=selector, orchestrator_name="manager") + .participants([alpha]) + .with_max_rounds(1) + .with_request_info() # No filter - pause for all + .build() + ) + + # Run until we get a request info event + request_events: list[RequestInfoEvent] = [] + async for event in workflow.run_stream("test task"): + if isinstance(event, RequestInfoEvent) and isinstance(event.data, AgentExecutorResponse): + request_events.append(event) + break + + # Should pause before alpha + assert len(request_events) == 1 + assert request_events[0].source_executor_id == "alpha" + + +def test_group_chat_builder_with_request_info_returns_self(): + """Test that with_request_info() returns self for method chaining.""" + builder = GroupChatBuilder() + result = builder.with_request_info() + assert result is builder + + # Also test with agents parameter + builder2 = GroupChatBuilder() + result2 = builder2.with_request_info(agents=["test"]) + assert result2 is builder2 + + +# region Participant Factory Tests + + +def test_group_chat_builder_rejects_empty_participant_factories(): + """Test that GroupChatBuilder rejects empty participant_factories list.""" + + def selector(state: GroupChatState) -> str: + return list(state.participants.keys())[0] + + with pytest.raises(ValueError, match=r"participant_factories cannot be empty"): + GroupChatBuilder().register_participants([]) + + with pytest.raises( + ValueError, + match=r"No participants provided\. Call \.participants\(\) or \.register_participants\(\) first\.", + ): + GroupChatBuilder().with_orchestrator(selection_func=selector).build() + + +def test_group_chat_builder_rejects_mixing_participants_and_factories(): + """Test that mixing .participants() and .register_participants() raises an error.""" + alpha = StubAgent("alpha", "reply from alpha") + + # Case 1: participants first, then register_participants + with pytest.raises(ValueError, match="Cannot mix .participants"): + GroupChatBuilder().participants([alpha]).register_participants([lambda: StubAgent("beta", "reply from beta")]) + + # Case 2: register_participants first, then participants + with pytest.raises(ValueError, match="Cannot mix .participants"): + GroupChatBuilder().register_participants([lambda: alpha]).participants([StubAgent("beta", "reply from beta")]) + + +def test_group_chat_builder_rejects_multiple_calls_to_register_participants(): + """Test that multiple calls to .register_participants() raises an error.""" + with pytest.raises( + ValueError, match=r"register_participants\(\) has already been called on this builder instance." + ): + ( + GroupChatBuilder() + .register_participants([lambda: StubAgent("alpha", "reply from alpha")]) + .register_participants([lambda: StubAgent("beta", "reply from beta")]) + ) + + +def test_group_chat_builder_rejects_multiple_calls_to_participants(): + """Test that multiple calls to .participants() raises an error.""" + with pytest.raises(ValueError, match="participants have already been set"): + ( + GroupChatBuilder() + .participants([StubAgent("alpha", "reply from alpha")]) + .participants([StubAgent("beta", "reply from beta")]) + ) + + +async def test_group_chat_with_participant_factories(): + """Test workflow creation using participant_factories.""" + call_count = 0 + + def create_alpha() -> StubAgent: + nonlocal call_count + call_count += 1 + return StubAgent("alpha", "reply from alpha") + + def create_beta() -> StubAgent: + nonlocal call_count + call_count += 1 + return StubAgent("beta", "reply from beta") + + selector = make_sequence_selector() + + workflow = ( + GroupChatBuilder() + .register_participants([create_alpha, create_beta]) + .with_orchestrator(selection_func=selector) + .with_max_rounds(2) + .build() + ) + + # Factories should be called during build + assert call_count == 2 + + outputs: list[WorkflowOutputEvent] = [] + async for event in workflow.run_stream("coordinate task"): + if isinstance(event, WorkflowOutputEvent): + outputs.append(event) + + assert len(outputs) == 1 + + +async def test_group_chat_participant_factories_reusable_builder(): + """Test that the builder can be reused to build multiple workflows with factories.""" + call_count = 0 + + def create_alpha() -> StubAgent: + nonlocal call_count + call_count += 1 + return StubAgent("alpha", "reply from alpha") + + def create_beta() -> StubAgent: + nonlocal call_count + call_count += 1 + return StubAgent("beta", "reply from beta") + + selector = make_sequence_selector() + + builder = ( + GroupChatBuilder() + .register_participants([create_alpha, create_beta]) + .with_orchestrator(selection_func=selector) + .with_max_rounds(2) + ) + + # Build first workflow + wf1 = builder.build() + assert call_count == 2 + + # Build second workflow + wf2 = builder.build() + assert call_count == 4 + + # Verify that the two workflows have different agent instances + assert wf1.executors["alpha"] is not wf2.executors["alpha"] + assert wf1.executors["beta"] is not wf2.executors["beta"] + + +async def test_group_chat_participant_factories_with_checkpointing(): + """Test checkpointing with participant_factories.""" + storage = InMemoryCheckpointStorage() + + def create_alpha() -> StubAgent: + return StubAgent("alpha", "reply from alpha") + + def create_beta() -> StubAgent: + return StubAgent("beta", "reply from beta") + + selector = make_sequence_selector() + + workflow = ( + GroupChatBuilder() + .register_participants([create_alpha, create_beta]) + .with_orchestrator(selection_func=selector) + .with_checkpointing(storage) + .with_max_rounds(2) + .build() + ) + + outputs: list[WorkflowOutputEvent] = [] + async for event in workflow.run_stream("checkpoint test"): + if isinstance(event, WorkflowOutputEvent): + outputs.append(event) + + assert outputs, "Should have workflow output" + + checkpoints = await storage.list_checkpoints() + assert checkpoints, "Checkpoints should be created during workflow execution" + + +# endregion + +# region Orchestrator Factory Tests + + +def test_group_chat_builder_rejects_multiple_orchestrator_configurations(): + """Test that configuring multiple orchestrators raises ValueError.""" + + def selector(state: GroupChatState) -> str: + return list(state.participants.keys())[0] + + def agent_factory() -> ChatAgent: + return cast(ChatAgent, StubManagerAgent()) + + builder = GroupChatBuilder().with_orchestrator(selection_func=selector) + + # Already has a selection_func, should fail on second call + with pytest.raises(ValueError, match=r"A selection function has already been configured"): + builder.with_orchestrator(selection_func=selector) + + # Test with agent_factory + builder2 = GroupChatBuilder().with_orchestrator(agent=agent_factory) + with pytest.raises(ValueError, match=r"A factory has already been configured"): + builder2.with_orchestrator(agent=agent_factory) + + +def test_group_chat_builder_requires_exactly_one_orchestrator_option(): + """Test that exactly one orchestrator option must be provided.""" + + def selector(state: GroupChatState) -> str: + return list(state.participants.keys())[0] + + def agent_factory() -> ChatAgent: + return cast(ChatAgent, StubManagerAgent()) + + # No options provided + with pytest.raises(ValueError, match="Exactly one of"): + GroupChatBuilder().with_orchestrator() # type: ignore + + # Multiple options provided + with pytest.raises(ValueError, match="Exactly one of"): + GroupChatBuilder().with_orchestrator(selection_func=selector, agent=agent_factory) # type: ignore + + +async def test_group_chat_with_orchestrator_factory_returning_chat_agent(): + """Test workflow creation using orchestrator_factory that returns ChatAgent.""" + factory_call_count = 0 + + class DynamicManagerAgent(ChatAgent): + """Manager agent that dynamically selects from available participants.""" + + def __init__(self) -> None: + super().__init__(chat_client=MockChatClient(), name="dynamic_manager", description="Dynamic manager") + self._call_count = 0 + + async def run( + self, + messages: str | ChatMessage | Sequence[str | ChatMessage] | None = None, + *, + thread: AgentThread | None = None, + **kwargs: Any, + ) -> AgentResponse: + if self._call_count == 0: + self._call_count += 1 + payload = { + "terminate": False, + "reason": "Selecting alpha", + "next_speaker": "alpha", + "final_message": None, + } + return AgentResponse( + messages=[ + ChatMessage( + role="assistant", + text=( + '{"terminate": false, "reason": "Selecting alpha", ' + '"next_speaker": "alpha", "final_message": null}' + ), + author_name=self.name, + ) + ], + value=payload, + ) + + payload = { + "terminate": True, + "reason": "Task complete", + "next_speaker": None, + "final_message": "dynamic manager final", + } + return AgentResponse( + messages=[ + ChatMessage( + role="assistant", + text=( + '{"terminate": true, "reason": "Task complete", ' + '"next_speaker": null, "final_message": "dynamic manager final"}' + ), + author_name=self.name, + ) + ], + value=payload, + ) + + def agent_factory() -> ChatAgent: + nonlocal factory_call_count + factory_call_count += 1 + return cast(ChatAgent, DynamicManagerAgent()) + + alpha = StubAgent("alpha", "reply from alpha") + beta = StubAgent("beta", "reply from beta") + + workflow = GroupChatBuilder().participants([alpha, beta]).with_orchestrator(agent=agent_factory).build() + + # Factory should be called during build + assert factory_call_count == 1 + + outputs: list[WorkflowOutputEvent] = [] + async for event in workflow.run_stream("coordinate task"): + if isinstance(event, WorkflowOutputEvent): + outputs.append(event) + + assert len(outputs) == 1 + # The DynamicManagerAgent terminates after second call with final_message + final_messages = outputs[0].data + assert isinstance(final_messages, list) + assert any( + msg.text == "dynamic manager final" + for msg in cast(list[ChatMessage], final_messages) + if msg.author_name == "dynamic_manager" + ) + + +def test_group_chat_with_orchestrator_factory_returning_base_orchestrator(): + """Test that orchestrator_factory returning BaseGroupChatOrchestrator is used as-is.""" + factory_call_count = 0 + selector = make_sequence_selector() + + def orchestrator_factory() -> BaseGroupChatOrchestrator: + nonlocal factory_call_count + factory_call_count += 1 + from agent_framework._workflows._base_group_chat_orchestrator import ParticipantRegistry + from agent_framework._workflows._group_chat import GroupChatOrchestrator + + # Create a custom orchestrator; when returning BaseGroupChatOrchestrator, + # the builder uses it as-is without modifying its participant registry + return GroupChatOrchestrator( + id="custom_orchestrator", + participant_registry=ParticipantRegistry([]), + selection_func=selector, + max_rounds=2, + ) + + alpha = StubAgent("alpha", "reply from alpha") + + workflow = GroupChatBuilder().participants([alpha]).with_orchestrator(orchestrator=orchestrator_factory).build() + + # Factory should be called during build + assert factory_call_count == 1 + # Verify the custom orchestrator is in the workflow + assert "custom_orchestrator" in workflow.executors + + +async def test_group_chat_orchestrator_factory_reusable_builder(): + """Test that the builder can be reused to build multiple workflows with orchestrator factory.""" + factory_call_count = 0 + + def agent_factory() -> ChatAgent: + nonlocal factory_call_count + factory_call_count += 1 + return cast(ChatAgent, StubManagerAgent()) + + alpha = StubAgent("alpha", "reply from alpha") + beta = StubAgent("beta", "reply from beta") + + builder = GroupChatBuilder().participants([alpha, beta]).with_orchestrator(agent=agent_factory) + + # Build first workflow + wf1 = builder.build() + assert factory_call_count == 1 + + # Build second workflow + wf2 = builder.build() + assert factory_call_count == 2 + + # Verify that the two workflows have different orchestrator instances + assert wf1.executors["manager_agent"] is not wf2.executors["manager_agent"] + + +def test_group_chat_orchestrator_factory_invalid_return_type(): + """Test that orchestrator_factory raising error for invalid return type.""" + + def invalid_factory() -> Any: + return "invalid type" + + alpha = StubAgent("alpha", "reply from alpha") + + with pytest.raises( + TypeError, + match=r"Orchestrator factory must return ChatAgent or BaseGroupChatOrchestrator instance", + ): + (GroupChatBuilder().participants([alpha]).with_orchestrator(orchestrator=invalid_factory).build()) + + with pytest.raises( + TypeError, + match=r"Orchestrator factory must return ChatAgent or BaseGroupChatOrchestrator instance", + ): + (GroupChatBuilder().participants([alpha]).with_orchestrator(agent=invalid_factory).build()) + + +def test_group_chat_with_both_participant_and_orchestrator_factories(): + """Test workflow creation using both participant_factories and orchestrator_factory.""" + participant_factory_call_count = 0 + agent_factory_call_count = 0 + + def create_alpha() -> StubAgent: + nonlocal participant_factory_call_count + participant_factory_call_count += 1 + return StubAgent("alpha", "reply from alpha") + + def create_beta() -> StubAgent: + nonlocal participant_factory_call_count + participant_factory_call_count += 1 + return StubAgent("beta", "reply from beta") + + def agent_factory() -> ChatAgent: + nonlocal agent_factory_call_count + agent_factory_call_count += 1 + return cast(ChatAgent, StubManagerAgent()) + + workflow = ( + GroupChatBuilder() + .register_participants([create_alpha, create_beta]) + .with_orchestrator(agent=agent_factory) + .build() + ) + + # All factories should be called during build + assert participant_factory_call_count == 2 + assert agent_factory_call_count == 1 + + # Verify all executors are present in the workflow + assert "alpha" in workflow.executors + assert "beta" in workflow.executors + assert "manager_agent" in workflow.executors + + +async def test_group_chat_factories_reusable_for_multiple_workflows(): + """Test that both factories are reused correctly for multiple workflow builds.""" + participant_factory_call_count = 0 + agent_factory_call_count = 0 + + def create_alpha() -> StubAgent: + nonlocal participant_factory_call_count + participant_factory_call_count += 1 + return StubAgent("alpha", "reply from alpha") + + def create_beta() -> StubAgent: + nonlocal participant_factory_call_count + participant_factory_call_count += 1 + return StubAgent("beta", "reply from beta") + + def agent_factory() -> ChatAgent: + nonlocal agent_factory_call_count + agent_factory_call_count += 1 + return cast(ChatAgent, StubManagerAgent()) + + builder = ( + GroupChatBuilder().register_participants([create_alpha, create_beta]).with_orchestrator(agent=agent_factory) + ) + + # Build first workflow + wf1 = builder.build() + assert participant_factory_call_count == 2 + assert agent_factory_call_count == 1 + + # Build second workflow + wf2 = builder.build() + assert participant_factory_call_count == 4 + assert agent_factory_call_count == 2 + + # Verify that the workflows have different agent and orchestrator instances + assert wf1.executors["alpha"] is not wf2.executors["alpha"] + assert wf1.executors["beta"] is not wf2.executors["beta"] + assert wf1.executors["manager_agent"] is not wf2.executors["manager_agent"] + + +# endregion diff --git a/python/packages/orchestrations/tests/test_handoff.py b/python/packages/orchestrations/tests/test_handoff.py new file mode 100644 index 0000000000..93a373a872 --- /dev/null +++ b/python/packages/orchestrations/tests/test_handoff.py @@ -0,0 +1,708 @@ +# Copyright (c) Microsoft. All rights reserved. + +from collections.abc import AsyncIterable +from typing import Any, cast +from unittest.mock import AsyncMock, MagicMock + +import pytest +from agent_framework import ( + ChatAgent, + ChatMessage, + ChatResponse, + ChatResponseUpdate, + Content, + HandoffAgentUserRequest, + HandoffBuilder, + RequestInfoEvent, + WorkflowEvent, + WorkflowOutputEvent, + resolve_agent_id, + use_function_invocation, +) + + +@use_function_invocation +class MockChatClient: + """Mock chat client for testing handoff workflows.""" + + additional_properties: dict[str, Any] + + def __init__( + self, + name: str, + *, + handoff_to: str | None = None, + ) -> None: + """Initialize the mock chat client. + + Args: + name: The name of the agent using this chat client. + handoff_to: The name of the agent to hand off to, or None for no handoff. + This is hardcoded for testing purposes so that the agent always attempts to hand off. + """ + self._name = name + self._handoff_to = handoff_to + self._call_index = 0 + + async def get_response(self, messages: Any, **kwargs: Any) -> ChatResponse: + contents = _build_reply_contents(self._name, self._handoff_to, self._next_call_id()) + reply = ChatMessage( + role="assistant", + contents=contents, + ) + return ChatResponse(messages=reply, response_id="mock_response") + + def get_streaming_response(self, messages: Any, **kwargs: Any) -> AsyncIterable[ChatResponseUpdate]: + async def _stream() -> AsyncIterable[ChatResponseUpdate]: + contents = _build_reply_contents(self._name, self._handoff_to, self._next_call_id()) + yield ChatResponseUpdate(contents=contents, role="assistant") + + return _stream() + + def _next_call_id(self) -> str | None: + if not self._handoff_to: + return None + call_id = f"{self._name}-handoff-{self._call_index}" + self._call_index += 1 + return call_id + + +def _build_reply_contents( + agent_name: str, + handoff_to: str | None, + call_id: str | None, +) -> list[Content]: + contents: list[Content] = [] + if handoff_to and call_id: + contents.append( + Content.from_function_call( + call_id=call_id, name=f"handoff_to_{handoff_to}", arguments={"handoff_to": handoff_to} + ) + ) + text = f"{agent_name} reply" + contents.append(Content.from_text(text=text)) + return contents + + +class MockHandoffAgent(ChatAgent): + """Mock agent that can hand off to another agent.""" + + def __init__( + self, + *, + name: str, + handoff_to: str | None = None, + ) -> None: + """Initialize the mock handoff agent. + + Args: + name: The name of the agent. + handoff_to: The name of the agent to hand off to, or None for no handoff. + This is hardcoded for testing purposes so that the agent always attempts to hand off. + """ + super().__init__(chat_client=MockChatClient(name, handoff_to=handoff_to), name=name, id=name) + + +async def _drain(stream: AsyncIterable[WorkflowEvent]) -> list[WorkflowEvent]: + return [event async for event in stream] + + +async def test_handoff(): + """Test that agents can hand off to each other.""" + + # `triage` hands off to `specialist`, who then hands off to `escalation`. + # `escalation` has no handoff, so the workflow should request user input to continue. + triage = MockHandoffAgent(name="triage", handoff_to="specialist") + specialist = MockHandoffAgent(name="specialist", handoff_to="escalation") + escalation = MockHandoffAgent(name="escalation") + + # Without explicitly defining handoffs, the builder will create connections + # between all agents. + workflow = ( + HandoffBuilder(participants=[triage, specialist, escalation]) + .with_start_agent(triage) + .with_termination_condition(lambda conv: sum(1 for m in conv if m.role == "user") >= 2) + .build() + ) + + # Start conversation - triage hands off to specialist then escalation + # escalation won't trigger a handoff, so the response from it will become + # a request for user input because autonomous mode is not enabled by default. + events = await _drain(workflow.run_stream("Need technical support")) + requests = [ev for ev in events if isinstance(ev, RequestInfoEvent)] + + assert requests + assert len(requests) == 1 + + request = requests[0] + assert isinstance(request.data, HandoffAgentUserRequest) + assert request.source_executor_id == escalation.name + + +async def test_autonomous_mode_yields_output_without_user_request(): + """Ensure autonomous interaction mode yields output without requesting user input.""" + triage = MockHandoffAgent(name="triage", handoff_to="specialist") + specialist = MockHandoffAgent(name="specialist") + + workflow = ( + HandoffBuilder(participants=[triage, specialist]) + .with_start_agent(triage) + # Since specialist has no handoff, the specialist will be generating normal responses. + # With autonomous mode, this should continue until the termination condition is met. + .with_autonomous_mode( + agents=[specialist], + turn_limits={resolve_agent_id(specialist): 1}, + ) + # This termination condition ensures the workflow runs through both agents. + # First message is the user message to triage, second is triage's response, which + # is a handoff to specialist, third is specialist's response that should not request + # user input due to autonomous mode. Fourth message will come from the specialist + # again and will trigger termination. + .with_termination_condition(lambda conv: len(conv) >= 4) + .build() + ) + + events = await _drain(workflow.run_stream("Package arrived broken")) + requests = [ev for ev in events if isinstance(ev, RequestInfoEvent)] + assert not requests, "Autonomous mode should not request additional user input" + + outputs = [ev for ev in events if isinstance(ev, WorkflowOutputEvent)] + assert outputs, "Autonomous mode should yield a workflow output" + + final_conversation = outputs[-1].data + assert isinstance(final_conversation, list) + conversation_list = cast(list[ChatMessage], final_conversation) + assert any(msg.role == "assistant" and (msg.text or "").startswith("specialist reply") for msg in conversation_list) + + +async def test_autonomous_mode_resumes_user_input_on_turn_limit(): + """Autonomous mode should resume user input request when turn limit is reached.""" + triage = MockHandoffAgent(name="triage", handoff_to="worker") + worker = MockHandoffAgent(name="worker") + + workflow = ( + HandoffBuilder(participants=[triage, worker]) + .with_start_agent(triage) + .with_autonomous_mode(agents=[worker], turn_limits={resolve_agent_id(worker): 2}) + .with_termination_condition(lambda conv: False) + .build() + ) + + events = await _drain(workflow.run_stream("Start")) + requests = [ev for ev in events if isinstance(ev, RequestInfoEvent)] + assert requests and len(requests) == 1, "Turn limit should force a user input request" + assert requests[0].source_executor_id == worker.name + + +def test_build_fails_without_start_agent(): + """Verify that build() raises ValueError when with_start_agent() was not called.""" + triage = MockHandoffAgent(name="triage") + specialist = MockHandoffAgent(name="specialist") + + with pytest.raises(ValueError, match=r"Must call with_start_agent\(...\) before building the workflow."): + HandoffBuilder(participants=[triage, specialist]).build() + + +def test_build_fails_without_participants(): + """Verify that build() raises ValueError when no participants are provided.""" + with pytest.raises( + ValueError, match=r"No participants provided\. Call \.participants\(\) or \.register_participants\(\) first." + ): + HandoffBuilder().build() + + +async def test_handoff_async_termination_condition() -> None: + """Test that async termination conditions work correctly.""" + termination_call_count = 0 + + async def async_termination(conv: list[ChatMessage]) -> bool: + nonlocal termination_call_count + termination_call_count += 1 + user_count = sum(1 for msg in conv if msg.role == "user") + return user_count >= 2 + + coordinator = MockHandoffAgent(name="coordinator", handoff_to="worker") + worker = MockHandoffAgent(name="worker") + + workflow = ( + HandoffBuilder(participants=[coordinator, worker]) + .with_start_agent(coordinator) + .with_termination_condition(async_termination) + .build() + ) + + events = await _drain(workflow.run_stream("First user message")) + requests = [ev for ev in events if isinstance(ev, RequestInfoEvent)] + assert requests + + events = await _drain( + workflow.send_responses_streaming({requests[-1].request_id: [ChatMessage("user", ["Second user message"])]}) + ) + outputs = [ev for ev in events if isinstance(ev, WorkflowOutputEvent)] + assert len(outputs) == 1 + + final_conversation = outputs[0].data + assert isinstance(final_conversation, list) + final_conv_list = cast(list[ChatMessage], final_conversation) + user_messages = [msg for msg in final_conv_list if msg.role == "user"] + assert len(user_messages) == 2 + assert termination_call_count > 0 + + +async def test_tool_choice_preserved_from_agent_config(): + """Verify that agent-level tool_choice configuration is preserved and not overridden.""" + # Create a mock chat client that records the tool_choice used + recorded_tool_choices: list[Any] = [] + + async def mock_get_response(messages: Any, options: dict[str, Any] | None = None, **kwargs: Any) -> ChatResponse: + if options: + recorded_tool_choices.append(options.get("tool_choice")) + return ChatResponse( + messages=[ChatMessage("assistant", ["Response"])], + response_id="test_response", + ) + + mock_client = MagicMock() + mock_client.get_response = AsyncMock(side_effect=mock_get_response) + + # Create agent with specific tool_choice configuration via default_options + agent = ChatAgent( + chat_client=mock_client, + name="test_agent", + default_options={"tool_choice": {"mode": "required"}}, # type: ignore + ) + + # Run the agent + await agent.run("Test message") + + # Verify tool_choice was preserved + assert len(recorded_tool_choices) > 0, "No tool_choice recorded" + last_tool_choice = recorded_tool_choices[-1] + assert last_tool_choice is not None, "tool_choice should not be None" + assert last_tool_choice == {"mode": "required"}, f"Expected 'required', got {last_tool_choice}" + + +# region Participant Factory Tests + + +def test_handoff_builder_rejects_empty_participant_factories(): + """Test that HandoffBuilder rejects empty participant_factories dictionary.""" + # Empty factories are rejected immediately when calling participant_factories() + with pytest.raises(ValueError, match=r"participant_factories cannot be empty"): + HandoffBuilder().register_participants({}) + + with pytest.raises( + ValueError, match=r"No participants provided\. Call \.participants\(\) or \.register_participants\(\) first\." + ): + HandoffBuilder(participant_factories={}).build() + + +def test_handoff_builder_rejects_mixing_participants_and_factories(): + """Test that mixing participants and participant_factories in __init__ raises an error.""" + triage = MockHandoffAgent(name="triage") + with pytest.raises(ValueError, match="Cannot mix .participants"): + HandoffBuilder(participants=[triage], participant_factories={"triage": lambda: triage}) + + +def test_handoff_builder_rejects_mixing_participants_and_participant_factories_methods(): + """Test that mixing .participants() and .participant_factories() raises an error.""" + triage = MockHandoffAgent(name="triage") + + # Case 1: participants first, then participant_factories + with pytest.raises(ValueError, match="Cannot mix .participants"): + HandoffBuilder(participants=[triage]).register_participants({ + "specialist": lambda: MockHandoffAgent(name="specialist") + }) + + # Case 2: participant_factories first, then participants + with pytest.raises(ValueError, match="Cannot mix .participants"): + HandoffBuilder(participant_factories={"triage": lambda: triage}).participants([ + MockHandoffAgent(name="specialist") + ]) + + # Case 3: participants(), then participant_factories() + with pytest.raises(ValueError, match="Cannot mix .participants"): + HandoffBuilder().participants([triage]).register_participants({ + "specialist": lambda: MockHandoffAgent(name="specialist") + }) + + # Case 4: participant_factories(), then participants() + with pytest.raises(ValueError, match="Cannot mix .participants"): + HandoffBuilder().register_participants({"triage": lambda: triage}).participants([ + MockHandoffAgent(name="specialist") + ]) + + # Case 5: mix during initialization + with pytest.raises(ValueError, match="Cannot mix .participants"): + HandoffBuilder( + participants=[triage], participant_factories={"specialist": lambda: MockHandoffAgent(name="specialist")} + ) + + +def test_handoff_builder_rejects_multiple_calls_to_participant_factories(): + """Test that multiple calls to .participant_factories() raises an error.""" + with pytest.raises( + ValueError, match=r"register_participants\(\) has already been called on this builder instance." + ): + ( + HandoffBuilder() + .register_participants({"agent1": lambda: MockHandoffAgent(name="agent1")}) + .register_participants({"agent2": lambda: MockHandoffAgent(name="agent2")}) + ) + + +def test_handoff_builder_rejects_multiple_calls_to_participants(): + """Test that multiple calls to .participants() raises an error.""" + with pytest.raises(ValueError, match="participants have already been assigned"): + ( + HandoffBuilder() + .participants([MockHandoffAgent(name="agent1")]) + .participants([MockHandoffAgent(name="agent2")]) + ) + + +def test_handoff_builder_rejects_instance_coordinator_with_factories(): + """Test that using an agent instance for set_coordinator when using factories raises an error.""" + + def create_triage() -> MockHandoffAgent: + return MockHandoffAgent(name="triage") + + def create_specialist() -> MockHandoffAgent: + return MockHandoffAgent(name="specialist") + + # Create an agent instance + coordinator_instance = MockHandoffAgent(name="coordinator") + + with pytest.raises(ValueError, match=r"Call participants\(\.\.\.\) before with_start_agent\(\.\.\.\)"): + ( + HandoffBuilder( + participant_factories={"triage": create_triage, "specialist": create_specialist} + ).with_start_agent(coordinator_instance) # Instance, not factory name + ) + + +def test_handoff_builder_rejects_factory_name_coordinator_with_instances(): + """Test that using a factory name for set_coordinator when using instances raises an error.""" + triage = MockHandoffAgent(name="triage") + specialist = MockHandoffAgent(name="specialist") + + with pytest.raises(ValueError, match=r"Call register_participants\(...\) before with_start_agent\(...\)"): + ( + HandoffBuilder(participants=[triage, specialist]).with_start_agent( + "triage" + ) # String factory name, not instance + ) + + +def test_handoff_builder_rejects_mixed_types_in_add_handoff_source(): + """Test that add_handoff rejects factory name source with instance-based participants.""" + triage = MockHandoffAgent(name="triage") + specialist = MockHandoffAgent(name="specialist") + + with pytest.raises(TypeError, match="Cannot mix factory names \\(str\\) and AgentProtocol.*instances"): + ( + HandoffBuilder(participants=[triage, specialist]) + .with_start_agent(triage) + .add_handoff("triage", [specialist]) # String source with instance participants + ) + + +def test_handoff_builder_accepts_all_factory_names_in_add_handoff(): + """Test that add_handoff accepts all factory names when using participant_factories.""" + + def create_triage() -> MockHandoffAgent: + return MockHandoffAgent(name="triage") + + def create_specialist_a() -> MockHandoffAgent: + return MockHandoffAgent(name="specialist_a") + + def create_specialist_b() -> MockHandoffAgent: + return MockHandoffAgent(name="specialist_b") + + # This should work - all strings with participant_factories + builder = ( + HandoffBuilder( + participant_factories={ + "triage": create_triage, + "specialist_a": create_specialist_a, + "specialist_b": create_specialist_b, + } + ) + .with_start_agent("triage") + .add_handoff("triage", ["specialist_a", "specialist_b"]) + ) + + workflow = builder.build() + assert "triage" in workflow.executors + assert "specialist_a" in workflow.executors + assert "specialist_b" in workflow.executors + + +def test_handoff_builder_accepts_all_instances_in_add_handoff(): + """Test that add_handoff accepts all instances when using participants.""" + triage = MockHandoffAgent(name="triage", handoff_to="specialist_a") + specialist_a = MockHandoffAgent(name="specialist_a") + specialist_b = MockHandoffAgent(name="specialist_b") + + # This should work - all instances with participants + builder = ( + HandoffBuilder(participants=[triage, specialist_a, specialist_b]) + .with_start_agent(triage) + .add_handoff(triage, [specialist_a, specialist_b]) + ) + + workflow = builder.build() + assert "triage" in workflow.executors + assert "specialist_a" in workflow.executors + assert "specialist_b" in workflow.executors + + +async def test_handoff_with_participant_factories(): + """Test workflow creation using participant_factories.""" + call_count = 0 + + def create_triage() -> MockHandoffAgent: + nonlocal call_count + call_count += 1 + return MockHandoffAgent(name="triage", handoff_to="specialist") + + def create_specialist() -> MockHandoffAgent: + nonlocal call_count + call_count += 1 + return MockHandoffAgent(name="specialist") + + workflow = ( + HandoffBuilder(participant_factories={"triage": create_triage, "specialist": create_specialist}) + .with_start_agent("triage") + .with_termination_condition(lambda conv: sum(1 for m in conv if m.role == "user") >= 2) + .build() + ) + + # Factories should be called during build + assert call_count == 2 + + events = await _drain(workflow.run_stream("Need help")) + requests = [ev for ev in events if isinstance(ev, RequestInfoEvent)] + assert requests + + # Follow-up message + events = await _drain( + workflow.send_responses_streaming({requests[-1].request_id: [ChatMessage("user", ["More details"])]}) + ) + outputs = [ev for ev in events if isinstance(ev, WorkflowOutputEvent)] + assert outputs + + +async def test_handoff_participant_factories_reusable_builder(): + """Test that the builder can be reused to build multiple workflows with factories.""" + call_count = 0 + + def create_triage() -> MockHandoffAgent: + nonlocal call_count + call_count += 1 + return MockHandoffAgent(name="triage", handoff_to="specialist") + + def create_specialist() -> MockHandoffAgent: + nonlocal call_count + call_count += 1 + return MockHandoffAgent(name="specialist") + + builder = HandoffBuilder( + participant_factories={"triage": create_triage, "specialist": create_specialist} + ).with_start_agent("triage") + + # Build first workflow + wf1 = builder.build() + assert call_count == 2 + + # Build second workflow + wf2 = builder.build() + assert call_count == 4 + + # Verify that the two workflows have different agent instances + assert wf1.executors["triage"] is not wf2.executors["triage"] + assert wf1.executors["specialist"] is not wf2.executors["specialist"] + + +async def test_handoff_with_participant_factories_and_add_handoff(): + """Test that .add_handoff() works correctly with participant_factories.""" + + def create_triage() -> MockHandoffAgent: + return MockHandoffAgent(name="triage", handoff_to="specialist_a") + + def create_specialist_a() -> MockHandoffAgent: + return MockHandoffAgent(name="specialist_a", handoff_to="specialist_b") + + def create_specialist_b() -> MockHandoffAgent: + return MockHandoffAgent(name="specialist_b") + + workflow = ( + HandoffBuilder( + participant_factories={ + "triage": create_triage, + "specialist_a": create_specialist_a, + "specialist_b": create_specialist_b, + } + ) + .with_start_agent("triage") + .add_handoff("triage", ["specialist_a", "specialist_b"]) + .add_handoff("specialist_a", ["specialist_b"]) + .with_termination_condition(lambda conv: sum(1 for m in conv if m.role == "user") >= 3) + .build() + ) + + # Start conversation - triage hands off to specialist_a + events = await _drain(workflow.run_stream("Initial request")) + requests = [ev for ev in events if isinstance(ev, RequestInfoEvent)] + assert requests + + # Verify specialist_a executor exists and was called + assert "specialist_a" in workflow.executors + + # Second user message - specialist_a hands off to specialist_b + events = await _drain( + workflow.send_responses_streaming({requests[-1].request_id: [ChatMessage("user", ["Need escalation"])]}) + ) + requests = [ev for ev in events if isinstance(ev, RequestInfoEvent)] + assert requests + + # Verify specialist_b executor exists + assert "specialist_b" in workflow.executors + + +async def test_handoff_participant_factories_with_checkpointing(): + """Test checkpointing with participant_factories.""" + from agent_framework._workflows._checkpoint import InMemoryCheckpointStorage + + storage = InMemoryCheckpointStorage() + + def create_triage() -> MockHandoffAgent: + return MockHandoffAgent(name="triage", handoff_to="specialist") + + def create_specialist() -> MockHandoffAgent: + return MockHandoffAgent(name="specialist") + + workflow = ( + HandoffBuilder(participant_factories={"triage": create_triage, "specialist": create_specialist}) + .with_start_agent("triage") + .with_checkpointing(storage) + .with_termination_condition(lambda conv: sum(1 for m in conv if m.role == "user") >= 2) + .build() + ) + + # Run workflow and capture output + events = await _drain(workflow.run_stream("checkpoint test")) + requests = [ev for ev in events if isinstance(ev, RequestInfoEvent)] + assert requests + + events = await _drain( + workflow.send_responses_streaming({requests[-1].request_id: [ChatMessage("user", ["follow up"])]}) + ) + outputs = [ev for ev in events if isinstance(ev, WorkflowOutputEvent)] + assert outputs, "Should have workflow output after termination condition is met" + + # List checkpoints - just verify they were created + checkpoints = await storage.list_checkpoints() + assert checkpoints, "Checkpoints should be created during workflow execution" + + +def test_handoff_set_coordinator_with_factory_name(): + """Test that set_coordinator accepts factory name as string.""" + + def create_triage() -> MockHandoffAgent: + return MockHandoffAgent(name="triage") + + def create_specialist() -> MockHandoffAgent: + return MockHandoffAgent(name="specialist") + + builder = HandoffBuilder( + participant_factories={"triage": create_triage, "specialist": create_specialist} + ).with_start_agent("triage") + + workflow = builder.build() + assert "triage" in workflow.executors + + +def test_handoff_add_handoff_with_factory_names(): + """Test that add_handoff accepts factory names as strings.""" + + def create_triage() -> MockHandoffAgent: + return MockHandoffAgent(name="triage", handoff_to="specialist_a") + + def create_specialist_a() -> MockHandoffAgent: + return MockHandoffAgent(name="specialist_a") + + def create_specialist_b() -> MockHandoffAgent: + return MockHandoffAgent(name="specialist_b") + + builder = ( + HandoffBuilder( + participant_factories={ + "triage": create_triage, + "specialist_a": create_specialist_a, + "specialist_b": create_specialist_b, + } + ) + .with_start_agent("triage") + .add_handoff("triage", ["specialist_a", "specialist_b"]) + ) + + workflow = builder.build() + assert "triage" in workflow.executors + assert "specialist_a" in workflow.executors + assert "specialist_b" in workflow.executors + + +async def test_handoff_participant_factories_autonomous_mode(): + """Test autonomous mode with participant_factories.""" + + def create_triage() -> MockHandoffAgent: + return MockHandoffAgent(name="triage", handoff_to="specialist") + + def create_specialist() -> MockHandoffAgent: + return MockHandoffAgent(name="specialist") + + workflow = ( + HandoffBuilder(participant_factories={"triage": create_triage, "specialist": create_specialist}) + .with_start_agent("triage") + .with_autonomous_mode(agents=["specialist"], turn_limits={"specialist": 1}) + .build() + ) + + events = await _drain(workflow.run_stream("Issue")) + requests = [ev for ev in events if isinstance(ev, RequestInfoEvent)] + assert requests and len(requests) == 1 + assert requests[0].source_executor_id == "specialist" + + +def test_handoff_participant_factories_invalid_coordinator_name(): + """Test that set_coordinator raises error for non-existent factory name.""" + + def create_triage() -> MockHandoffAgent: + return MockHandoffAgent(name="triage") + + with pytest.raises( + ValueError, match="Start agent factory name 'nonexistent' is not in the participant_factories list" + ): + (HandoffBuilder(participant_factories={"triage": create_triage}).with_start_agent("nonexistent").build()) + + +def test_handoff_participant_factories_invalid_handoff_target(): + """Test that add_handoff raises error for non-existent target factory name.""" + + def create_triage() -> MockHandoffAgent: + return MockHandoffAgent(name="triage") + + def create_specialist() -> MockHandoffAgent: + return MockHandoffAgent(name="specialist") + + with pytest.raises(ValueError, match="Target factory name 'nonexistent' is not in the participant_factories list"): + ( + HandoffBuilder(participant_factories={"triage": create_triage, "specialist": create_specialist}) + .with_start_agent("triage") + .add_handoff("triage", ["nonexistent"]) + .build() + ) + + +# endregion Participant Factory Tests diff --git a/python/packages/orchestrations/tests/test_magentic.py b/python/packages/orchestrations/tests/test_magentic.py new file mode 100644 index 0000000000..5f9be8ee4f --- /dev/null +++ b/python/packages/orchestrations/tests/test_magentic.py @@ -0,0 +1,1298 @@ +# Copyright (c) Microsoft. All rights reserved. + +import sys +from collections.abc import AsyncIterable, Sequence +from dataclasses import dataclass +from typing import Any, ClassVar, cast + +import pytest +from agent_framework import ( + AgentProtocol, + AgentResponse, + AgentResponseUpdate, + AgentRunUpdateEvent, + AgentThread, + BaseAgent, + ChatMessage, + Content, + Executor, + GroupChatRequestMessage, + MagenticBuilder, + MagenticContext, + MagenticManagerBase, + MagenticOrchestrator, + MagenticOrchestratorEvent, + MagenticPlanReviewRequest, + MagenticProgressLedger, + MagenticProgressLedgerItem, + RequestInfoEvent, + StandardMagenticManager, + Workflow, + WorkflowCheckpoint, + WorkflowCheckpointException, + WorkflowContext, + WorkflowEvent, + WorkflowOutputEvent, + WorkflowRunState, + WorkflowStatusEvent, + handler, +) +from agent_framework._workflows._checkpoint import InMemoryCheckpointStorage + +if sys.version_info >= (3, 12): + from typing import override # type: ignore # pragma: no cover +else: + from typing_extensions import override # type: ignore # pragma: no cover + + +def test_magentic_context_reset_behavior(): + ctx = MagenticContext( + task="task", + participant_descriptions={"Alice": "Researcher"}, + ) + # seed context state + ctx.chat_history.append(ChatMessage("assistant", ["draft"])) + ctx.stall_count = 2 + prev_reset = ctx.reset_count + + ctx.reset() + + assert ctx.chat_history == [] + assert ctx.stall_count == 0 + assert ctx.reset_count == prev_reset + 1 + + +@dataclass +class _SimpleLedger: + facts: ChatMessage + plan: ChatMessage + + +class FakeManager(MagenticManagerBase): + """Deterministic manager for tests that avoids real LLM calls.""" + + FINAL_ANSWER: ClassVar[str] = "FINAL" + + def __init__( + self, + *, + max_stall_count: int = 3, + max_reset_count: int | None = None, + max_round_count: int | None = None, + ) -> None: + super().__init__( + max_stall_count=max_stall_count, + max_reset_count=max_reset_count, + max_round_count=max_round_count, + ) + self.name = "magentic_manager" + self.task_ledger: _SimpleLedger | None = None + self.next_speaker_name: str = "agentA" + self.instruction_text: str = "Proceed with step 1" + + @override + def on_checkpoint_save(self) -> dict[str, Any]: + state = super().on_checkpoint_save() + if self.task_ledger is not None: + state = dict(state) + state["task_ledger"] = { + "facts": self.task_ledger.facts.to_dict(), + "plan": self.task_ledger.plan.to_dict(), + } + return state + + @override + def on_checkpoint_restore(self, state: dict[str, Any]) -> None: + super().on_checkpoint_restore(state) + ledger_state = state.get("task_ledger") + if isinstance(ledger_state, dict): + ledger_dict = cast(dict[str, Any], ledger_state) + facts_payload = cast(dict[str, Any] | None, ledger_dict.get("facts")) + plan_payload = cast(dict[str, Any] | None, ledger_dict.get("plan")) + if facts_payload is not None and plan_payload is not None: + try: + facts = ChatMessage.from_dict(facts_payload) + plan = ChatMessage.from_dict(plan_payload) + self.task_ledger = _SimpleLedger(facts=facts, plan=plan) + except Exception: # pragma: no cover - defensive + pass + + async def plan(self, magentic_context: MagenticContext) -> ChatMessage: + facts = ChatMessage("assistant", ["GIVEN OR VERIFIED FACTS\n- A\n"]) + plan = ChatMessage("assistant", ["- Do X\n- Do Y\n"]) + self.task_ledger = _SimpleLedger(facts=facts, plan=plan) + combined = f"Task: {magentic_context.task}\n\nFacts:\n{facts.text}\n\nPlan:\n{plan.text}" + return ChatMessage("assistant", [combined], author_name=self.name) + + async def replan(self, magentic_context: MagenticContext) -> ChatMessage: + facts = ChatMessage("assistant", ["GIVEN OR VERIFIED FACTS\n- A2\n"]) + plan = ChatMessage("assistant", ["- Do Z\n"]) + self.task_ledger = _SimpleLedger(facts=facts, plan=plan) + combined = f"Task: {magentic_context.task}\n\nFacts:\n{facts.text}\n\nPlan:\n{plan.text}" + return ChatMessage("assistant", [combined], author_name=self.name) + + async def create_progress_ledger(self, magentic_context: MagenticContext) -> MagenticProgressLedger: + # At least two messages in chat history means request is satisfied for testing + is_satisfied = len(magentic_context.chat_history) > 1 + return MagenticProgressLedger( + is_request_satisfied=MagenticProgressLedgerItem(reason="test", answer=is_satisfied), + is_in_loop=MagenticProgressLedgerItem(reason="test", answer=False), + is_progress_being_made=MagenticProgressLedgerItem(reason="test", answer=True), + next_speaker=MagenticProgressLedgerItem(reason="test", answer=self.next_speaker_name), + instruction_or_question=MagenticProgressLedgerItem(reason="test", answer=self.instruction_text), + ) + + async def prepare_final_answer(self, magentic_context: MagenticContext) -> ChatMessage: + return ChatMessage("assistant", [self.FINAL_ANSWER], author_name=self.name) + + +class StubAgent(BaseAgent): + def __init__(self, agent_name: str, reply_text: str, **kwargs: Any) -> None: + super().__init__(name=agent_name, description=f"Stub agent {agent_name}", **kwargs) + self._reply_text = reply_text + + async def run( # type: ignore[override] + self, + messages: str | ChatMessage | Sequence[str | ChatMessage] | None = None, + *, + thread: AgentThread | None = None, + **kwargs: Any, + ) -> AgentResponse: + response = ChatMessage("assistant", [self._reply_text], author_name=self.name) + return AgentResponse(messages=[response]) + + def run_stream( # type: ignore[override] + self, + messages: str | ChatMessage | Sequence[str | ChatMessage] | None = None, + *, + thread: AgentThread | None = None, + **kwargs: Any, + ) -> AsyncIterable[AgentResponseUpdate]: + async def _stream() -> AsyncIterable[AgentResponseUpdate]: + yield AgentResponseUpdate( + contents=[Content.from_text(text=self._reply_text)], role="assistant", author_name=self.name + ) + + return _stream() + + +class DummyExec(Executor): + def __init__(self, name: str) -> None: + super().__init__(name) + + @handler + async def _noop( + self, message: GroupChatRequestMessage, ctx: WorkflowContext[ChatMessage] + ) -> None: # pragma: no cover - not called + pass + + +async def test_magentic_builder_returns_workflow_and_runs() -> None: + manager = FakeManager() + agent = StubAgent(manager.next_speaker_name, "first draft") + + workflow = MagenticBuilder().participants([agent]).with_manager(manager=manager).build() + + assert isinstance(workflow, Workflow) + + outputs: list[ChatMessage] = [] + orchestrator_event_count = 0 + async for event in workflow.run_stream("compose summary"): + if isinstance(event, WorkflowOutputEvent): + msg = event.data + if isinstance(msg, list): + outputs.extend(cast(list[ChatMessage], msg)) + elif isinstance(event, MagenticOrchestratorEvent): + orchestrator_event_count += 1 + + assert outputs, "Expected a final output message" + assert len(outputs) >= 1 + final = outputs[-1] + assert final.text == manager.FINAL_ANSWER + assert final.author_name == manager.name + assert orchestrator_event_count > 0, "Expected orchestrator events to be emitted" + + +async def test_magentic_as_agent_does_not_accept_conversation() -> None: + manager = FakeManager() + writer = StubAgent(manager.next_speaker_name, "summary response") + + workflow = MagenticBuilder().participants([writer]).with_manager(manager=manager).build() + + agent = workflow.as_agent(name="magentic-agent") + conversation = [ + ChatMessage("system", ["Guidelines"], author_name="system"), + ChatMessage("user", ["Summarize the findings"], author_name="requester"), + ] + with pytest.raises(ValueError, match="Magentic only support a single task message to start the workflow."): + await agent.run(conversation) + + +async def test_standard_manager_plan_and_replan_combined_ledger(): + manager = FakeManager() + ctx = MagenticContext( + task="demo task", + participant_descriptions={"agentA": "Agent A"}, + ) + + first = await manager.plan(ctx.clone()) + assert first.role == "assistant" and "Facts:" in first.text and "Plan:" in first.text + assert manager.task_ledger is not None + + replanned = await manager.replan(ctx.clone()) + assert "A2" in replanned.text or "Do Z" in replanned.text + + +async def test_magentic_workflow_plan_review_approval_to_completion(): + manager = FakeManager() + wf = MagenticBuilder().participants([DummyExec("agentA")]).with_manager(manager=manager).with_plan_review().build() + + req_event: RequestInfoEvent | None = None + async for ev in wf.run_stream("do work"): + if isinstance(ev, RequestInfoEvent) and ev.request_type is MagenticPlanReviewRequest: + req_event = ev + assert req_event is not None + assert isinstance(req_event.data, MagenticPlanReviewRequest) + + completed = False + output: list[ChatMessage] | None = None + async for ev in wf.send_responses_streaming(responses={req_event.request_id: req_event.data.approve()}): + if isinstance(ev, WorkflowStatusEvent) and ev.state == WorkflowRunState.IDLE: + completed = True + elif isinstance(ev, WorkflowOutputEvent): + output = ev.data # type: ignore[assignment] + if completed and output is not None: + break + + assert completed + assert output is not None + assert isinstance(output, list) + assert all(isinstance(msg, ChatMessage) for msg in output) + + +async def test_magentic_plan_review_with_revise(): + class CountingManager(FakeManager): + # Declare as a model field so assignment is allowed under Pydantic + replan_count: int = 0 + + def __init__(self, *args, **kwargs) -> None: # type: ignore[no-untyped-def] + super().__init__(*args, **kwargs) + + async def replan(self, magentic_context: MagenticContext) -> ChatMessage: # type: ignore[override] + self.replan_count += 1 + return await super().replan(magentic_context) + + manager = CountingManager() + wf = ( + MagenticBuilder() + .participants([DummyExec(name=manager.next_speaker_name)]) + .with_manager(manager=manager) + .with_plan_review() + .build() + ) + + # Wait for the initial plan review request + req_event: RequestInfoEvent | None = None + async for ev in wf.run_stream("do work"): + if isinstance(ev, RequestInfoEvent) and ev.request_type is MagenticPlanReviewRequest: + req_event = ev + assert req_event is not None + assert isinstance(req_event.data, MagenticPlanReviewRequest) + + # Send a revise response + saw_second_review = False + completed = False + async for ev in wf.send_responses_streaming( + responses={req_event.request_id: req_event.data.revise("Looks good; consider Z")} + ): + if isinstance(ev, RequestInfoEvent) and ev.request_type is MagenticPlanReviewRequest: + saw_second_review = True + req_event = ev + + # Approve the second review + async for ev in wf.send_responses_streaming( + responses={req_event.request_id: req_event.data.approve()} # type: ignore[union-attr] + ): + if isinstance(ev, WorkflowStatusEvent) and ev.state == WorkflowRunState.IDLE: + completed = True + break + + assert completed + assert manager.replan_count >= 1 + assert saw_second_review is True + # Replan from FakeManager updates facts/plan to include A2 / Do Z + assert manager.task_ledger is not None + combined_text = (manager.task_ledger.facts.text or "") + (manager.task_ledger.plan.text or "") + assert ("A2" in combined_text) or ("Do Z" in combined_text) + + +async def test_magentic_orchestrator_round_limit_produces_partial_result(): + manager = FakeManager(max_round_count=1) + wf = ( + MagenticBuilder() + .participants([DummyExec(name=manager.next_speaker_name)]) + .with_manager(manager=manager) + .build() + ) + + events: list[WorkflowEvent] = [] + async for ev in wf.run_stream("round limit test"): + events.append(ev) + + idle_status = next( + (e for e in events if isinstance(e, WorkflowStatusEvent) and e.state == WorkflowRunState.IDLE), + None, + ) + assert idle_status is not None + # Check that we got workflow output via WorkflowOutputEvent + output_event = next((e for e in events if isinstance(e, WorkflowOutputEvent)), None) + assert output_event is not None + data = output_event.data + assert isinstance(data, list) + assert len(data) > 0 # type: ignore + assert data[-1].role == "assistant" # type: ignore + assert all(isinstance(msg, ChatMessage) for msg in data) # type: ignore + + +async def test_magentic_checkpoint_resume_round_trip(): + storage = InMemoryCheckpointStorage() + + manager1 = FakeManager() + wf = ( + MagenticBuilder() + .participants([DummyExec(name=manager1.next_speaker_name)]) + .with_manager(manager=manager1) + .with_plan_review() + .with_checkpointing(storage) + .build() + ) + + task_text = "checkpoint task" + req_event: RequestInfoEvent | None = None + async for ev in wf.run_stream(task_text): + if isinstance(ev, RequestInfoEvent) and ev.request_type is MagenticPlanReviewRequest: + req_event = ev + assert req_event is not None + assert isinstance(req_event.data, MagenticPlanReviewRequest) + + checkpoints = await storage.list_checkpoints() + assert checkpoints + checkpoints.sort(key=lambda cp: cp.timestamp) + resume_checkpoint = checkpoints[-1] + + manager2 = FakeManager() + wf_resume = ( + MagenticBuilder() + .participants([DummyExec(name=manager2.next_speaker_name)]) + .with_manager(manager=manager2) + .with_plan_review() + .with_checkpointing(storage) + .build() + ) + + completed: WorkflowOutputEvent | None = None + req_event = None + async for event in wf_resume.run_stream( + resume_checkpoint.checkpoint_id, + ): + if isinstance(event, RequestInfoEvent) and event.request_type is MagenticPlanReviewRequest: + req_event = event + assert req_event is not None + assert isinstance(req_event.data, MagenticPlanReviewRequest) + + responses = {req_event.request_id: req_event.data.approve()} + async for event in wf_resume.send_responses_streaming(responses=responses): + if isinstance(event, WorkflowOutputEvent): + completed = event + assert completed is not None + + orchestrator = next(exec for exec in wf_resume.executors.values() if isinstance(exec, MagenticOrchestrator)) + assert orchestrator._magentic_context is not None # type: ignore[reportPrivateUsage] + assert orchestrator._magentic_context.chat_history # type: ignore[reportPrivateUsage] + assert orchestrator._task_ledger is not None # type: ignore[reportPrivateUsage] + assert manager2.task_ledger is not None + # Latest entry in chat history should be the task ledger plan + assert orchestrator._magentic_context.chat_history[-1].text == orchestrator._task_ledger.text # type: ignore[reportPrivateUsage] + + +class StubManagerAgent(BaseAgent): + """Stub agent for testing StandardMagenticManager.""" + + async def run( + self, + messages: str | ChatMessage | Sequence[str | ChatMessage] | None = None, + *, + thread: Any = None, + **kwargs: Any, + ) -> AgentResponse: + return AgentResponse(messages=[ChatMessage("assistant", ["ok"])]) + + def run_stream( + self, + messages: str | ChatMessage | Sequence[str | ChatMessage] | None = None, + *, + thread: Any = None, + **kwargs: Any, + ) -> AsyncIterable[AgentResponseUpdate]: + async def _gen() -> AsyncIterable[AgentResponseUpdate]: + yield AgentResponseUpdate(message_deltas=[ChatMessage("assistant", ["ok"])]) + + return _gen() + + +async def test_standard_manager_plan_and_replan_via_complete_monkeypatch(): + mgr = StandardMagenticManager(StubManagerAgent()) + + async def fake_complete_plan(messages: list[ChatMessage], **kwargs: Any) -> ChatMessage: + # Return a different response depending on call order length + if any("FACTS" in (m.text or "") for m in messages): + return ChatMessage("assistant", ["- step A\n- step B"]) + return ChatMessage("assistant", ["GIVEN OR VERIFIED FACTS\n- fact1"]) + + # First, patch to produce facts then plan + mgr._complete = fake_complete_plan # type: ignore[attr-defined] + + ctx = MagenticContext(task="T", participant_descriptions={"A": "desc"}) + combined = await mgr.plan(ctx.clone()) + # Assert structural headings and that steps appear in the combined ledger output. + assert "We are working to address the following user request:" in combined.text + assert "Here is the plan to follow as best as possible:" in combined.text + assert any(t in combined.text for t in ("- step A", "- step B", "- step")) + + # Now replan with new outputs + async def fake_complete_replan(messages: list[ChatMessage], **kwargs: Any) -> ChatMessage: + if any("Please briefly explain" in (m.text or "") for m in messages): + return ChatMessage("assistant", ["- new step"]) + return ChatMessage("assistant", ["GIVEN OR VERIFIED FACTS\n- updated"]) + + mgr._complete = fake_complete_replan # type: ignore[attr-defined] + combined2 = await mgr.replan(ctx.clone()) + assert "updated" in combined2.text or "new step" in combined2.text + + +async def test_standard_manager_progress_ledger_success_and_error(): + mgr = StandardMagenticManager(agent=StubManagerAgent()) + ctx = MagenticContext(task="task", participant_descriptions={"alice": "desc"}) + + # Success path: valid JSON + async def fake_complete_ok(messages: list[ChatMessage], **kwargs: Any) -> ChatMessage: + json_text = ( + '{"is_request_satisfied": {"reason": "r", "answer": false}, ' + '"is_in_loop": {"reason": "r", "answer": false}, ' + '"is_progress_being_made": {"reason": "r", "answer": true}, ' + '"next_speaker": {"reason": "r", "answer": "alice"}, ' + '"instruction_or_question": {"reason": "r", "answer": "do"}}' + ) + return ChatMessage("assistant", [json_text]) + + mgr._complete = fake_complete_ok # type: ignore[attr-defined] + ledger = await mgr.create_progress_ledger(ctx.clone()) + assert ledger.next_speaker.answer == "alice" + + # Error path: invalid JSON now raises to avoid emitting planner-oriented instructions to agents + async def fake_complete_bad(messages: list[ChatMessage], **kwargs: Any) -> ChatMessage: + return ChatMessage("assistant", ["not-json"]) + + mgr._complete = fake_complete_bad # type: ignore[attr-defined] + with pytest.raises(RuntimeError): + await mgr.create_progress_ledger(ctx.clone()) + + +class InvokeOnceManager(MagenticManagerBase): + def __init__(self) -> None: + super().__init__(max_round_count=5, max_stall_count=3, max_reset_count=2) + self._invoked = False + + async def plan(self, magentic_context: MagenticContext) -> ChatMessage: + return ChatMessage("assistant", ["ledger"]) + + async def replan(self, magentic_context: MagenticContext) -> ChatMessage: + return ChatMessage("assistant", ["re-ledger"]) + + async def create_progress_ledger(self, magentic_context: MagenticContext) -> MagenticProgressLedger: + if not self._invoked: + # First round: ask agentA to respond + self._invoked = True + return MagenticProgressLedger( + is_request_satisfied=MagenticProgressLedgerItem(reason="r", answer=False), + is_in_loop=MagenticProgressLedgerItem(reason="r", answer=False), + is_progress_being_made=MagenticProgressLedgerItem(reason="r", answer=True), + next_speaker=MagenticProgressLedgerItem(reason="r", answer="agentA"), + instruction_or_question=MagenticProgressLedgerItem(reason="r", answer="say hi"), + ) + # Next round: mark satisfied so run can conclude + return MagenticProgressLedger( + is_request_satisfied=MagenticProgressLedgerItem(reason="r", answer=True), + is_in_loop=MagenticProgressLedgerItem(reason="r", answer=False), + is_progress_being_made=MagenticProgressLedgerItem(reason="r", answer=True), + next_speaker=MagenticProgressLedgerItem(reason="r", answer="agentA"), + instruction_or_question=MagenticProgressLedgerItem(reason="r", answer="done"), + ) + + async def prepare_final_answer(self, magentic_context: MagenticContext) -> ChatMessage: + return ChatMessage("assistant", ["final"]) + + +class StubThreadAgent(BaseAgent): + def __init__(self, name: str | None = None) -> None: + super().__init__(name=name or "agentA") + + async def run_stream(self, messages=None, *, thread=None, **kwargs): # type: ignore[override] + yield AgentResponseUpdate( + contents=[Content.from_text(text="thread-ok")], + author_name=self.name, + role="assistant", + ) + + async def run(self, messages=None, *, thread=None, **kwargs): # type: ignore[override] + return AgentResponse(messages=[ChatMessage("assistant", ["thread-ok"], author_name=self.name)]) + + +class StubAssistantsClient: + pass # class name used for branch detection + + +class StubAssistantsAgent(BaseAgent): + chat_client: object | None = None # allow assignment via Pydantic field + + def __init__(self) -> None: + super().__init__(name="agentA") + self.chat_client = StubAssistantsClient() # type name contains 'AssistantsClient' + + async def run_stream(self, messages=None, *, thread=None, **kwargs): # type: ignore[override] + yield AgentResponseUpdate( + contents=[Content.from_text(text="assistants-ok")], + author_name=self.name, + role="assistant", + ) + + async def run(self, messages=None, *, thread=None, **kwargs): # type: ignore[override] + return AgentResponse(messages=[ChatMessage("assistant", ["assistants-ok"], author_name=self.name)]) + + +async def _collect_agent_responses_setup(participant: AgentProtocol) -> list[ChatMessage]: + captured: list[ChatMessage] = [] + + wf = MagenticBuilder().participants([participant]).with_manager(manager=InvokeOnceManager()).build() + + # Run a bounded stream to allow one invoke and then completion + events: list[WorkflowEvent] = [] + async for ev in wf.run_stream("task"): # plan review disabled + events.append(ev) + if isinstance(ev, WorkflowOutputEvent): + break + if isinstance(ev, AgentRunUpdateEvent): + captured.append( + ChatMessage( + role=ev.data.role or "assistant", + text=ev.data.text or "", + author_name=ev.data.author_name, + ) + ) + + return captured + + +async def test_agent_executor_invoke_with_thread_chat_client(): + agent = StubThreadAgent() + captured = await _collect_agent_responses_setup(agent) + # Should have at least one response from agentA via _MagenticAgentExecutor path + assert any((m.author_name == agent.name and "ok" in (m.text or "")) for m in captured) + + +async def test_agent_executor_invoke_with_assistants_client_messages(): + agent = StubAssistantsAgent() + captured = await _collect_agent_responses_setup(agent) + assert any((m.author_name == agent.name and "ok" in (m.text or "")) for m in captured) + + +async def _collect_checkpoints( + storage: InMemoryCheckpointStorage, +) -> list[WorkflowCheckpoint]: + checkpoints = await storage.list_checkpoints() + assert checkpoints + checkpoints.sort(key=lambda cp: cp.timestamp) + return checkpoints + + +async def test_magentic_checkpoint_resume_inner_loop_superstep(): + storage = InMemoryCheckpointStorage() + + workflow = ( + MagenticBuilder() + .participants([StubThreadAgent()]) + .with_manager(manager=InvokeOnceManager()) + .with_checkpointing(storage) + .build() + ) + + async for event in workflow.run_stream("inner-loop task"): + if isinstance(event, WorkflowOutputEvent): + break + + checkpoints = await _collect_checkpoints(storage) + inner_loop_checkpoint = next(cp for cp in checkpoints if cp.metadata.get("superstep") == 1) # type: ignore[reportUnknownMemberType] + + resumed = ( + MagenticBuilder() + .participants([StubThreadAgent()]) + .with_manager(manager=InvokeOnceManager()) + .with_checkpointing(storage) + .build() + ) + + completed: WorkflowOutputEvent | None = None + async for event in resumed.run_stream(checkpoint_id=inner_loop_checkpoint.checkpoint_id): # type: ignore[reportUnknownMemberType] + if isinstance(event, WorkflowOutputEvent): + completed = event + + assert completed is not None + + +async def test_magentic_checkpoint_resume_from_saved_state(): + """Test that we can resume workflow execution from a saved checkpoint.""" + storage = InMemoryCheckpointStorage() + + # Use the working InvokeOnceManager first to get a completed workflow + manager = InvokeOnceManager() + + workflow = ( + MagenticBuilder() + .participants([StubThreadAgent()]) + .with_manager(manager=manager) + .with_checkpointing(storage) + .build() + ) + + async for event in workflow.run_stream("checkpoint resume task"): + if isinstance(event, WorkflowOutputEvent): + break + + checkpoints = await _collect_checkpoints(storage) + + # Verify we can resume from the last saved checkpoint + resumed_state = checkpoints[-1] # Use the last checkpoint + + resumed_workflow = ( + MagenticBuilder() + .participants([StubThreadAgent()]) + .with_manager(manager=InvokeOnceManager()) + .with_checkpointing(storage) + .build() + ) + + completed: WorkflowOutputEvent | None = None + async for event in resumed_workflow.run_stream(checkpoint_id=resumed_state.checkpoint_id): + if isinstance(event, WorkflowOutputEvent): + completed = event + + assert completed is not None + + +async def test_magentic_checkpoint_resume_rejects_participant_renames(): + storage = InMemoryCheckpointStorage() + + manager = InvokeOnceManager() + + workflow = ( + MagenticBuilder() + .participants([StubThreadAgent()]) + .with_manager(manager=manager) + .with_plan_review() + .with_checkpointing(storage) + .build() + ) + + req_event: RequestInfoEvent | None = None + async for event in workflow.run_stream("task"): + if isinstance(event, RequestInfoEvent) and event.request_type is MagenticPlanReviewRequest: + req_event = event + + assert req_event is not None + assert isinstance(req_event.data, MagenticPlanReviewRequest) + + checkpoints = await _collect_checkpoints(storage) + target_checkpoint = checkpoints[-1] + + renamed_workflow = ( + MagenticBuilder() + .participants([StubThreadAgent(name="renamedAgent")]) + .with_manager(manager=InvokeOnceManager()) + .with_plan_review() + .with_checkpointing(storage) + .build() + ) + + with pytest.raises(WorkflowCheckpointException, match="Workflow graph has changed"): + async for _ in renamed_workflow.run_stream( + checkpoint_id=target_checkpoint.checkpoint_id, # type: ignore[reportUnknownMemberType] + ): + pass + + +class NotProgressingManager(MagenticManagerBase): + """ + A manager that never marks progress being made, to test stall/reset limits. + """ + + async def plan(self, magentic_context: MagenticContext) -> ChatMessage: + return ChatMessage("assistant", ["ledger"]) + + async def replan(self, magentic_context: MagenticContext) -> ChatMessage: + return ChatMessage("assistant", ["re-ledger"]) + + async def create_progress_ledger(self, magentic_context: MagenticContext) -> MagenticProgressLedger: + return MagenticProgressLedger( + is_request_satisfied=MagenticProgressLedgerItem(reason="r", answer=False), + is_in_loop=MagenticProgressLedgerItem(reason="r", answer=True), + is_progress_being_made=MagenticProgressLedgerItem(reason="r", answer=False), + next_speaker=MagenticProgressLedgerItem(reason="r", answer="agentA"), + instruction_or_question=MagenticProgressLedgerItem(reason="r", answer="done"), + ) + + async def prepare_final_answer(self, magentic_context: MagenticContext) -> ChatMessage: + return ChatMessage("assistant", ["final"]) + + +async def test_magentic_stall_and_reset_reach_limits(): + manager = NotProgressingManager(max_round_count=10, max_stall_count=0, max_reset_count=1) + + wf = MagenticBuilder().participants([DummyExec("agentA")]).with_manager(manager=manager).build() + + events: list[WorkflowEvent] = [] + async for ev in wf.run_stream("test limits"): + events.append(ev) + + idle_status = next( + (e for e in events if isinstance(e, WorkflowStatusEvent) and e.state == WorkflowRunState.IDLE), + None, + ) + assert idle_status is not None + output_event = next((e for e in events if isinstance(e, WorkflowOutputEvent)), None) + assert output_event is not None + assert isinstance(output_event.data, list) + assert all(isinstance(msg, ChatMessage) for msg in output_event.data) # type: ignore + assert len(output_event.data) > 0 # type: ignore + assert output_event.data[-1].text is not None # type: ignore + assert output_event.data[-1].text == "Workflow terminated due to reaching maximum reset count." # type: ignore + + +async def test_magentic_checkpoint_runtime_only() -> None: + """Test checkpointing configured ONLY at runtime, not at build time.""" + storage = InMemoryCheckpointStorage() + + manager = FakeManager(max_round_count=10) + wf = MagenticBuilder().participants([DummyExec("agentA")]).with_manager(manager=manager).build() + + baseline_output: ChatMessage | None = None + async for ev in wf.run_stream("runtime checkpoint test", checkpoint_storage=storage): + if isinstance(ev, WorkflowOutputEvent): + baseline_output = ev.data # type: ignore[assignment] + if isinstance(ev, WorkflowStatusEvent) and ev.state in ( + WorkflowRunState.IDLE, + WorkflowRunState.IDLE_WITH_PENDING_REQUESTS, + ): + break + + assert baseline_output is not None + + checkpoints = await storage.list_checkpoints() + assert len(checkpoints) > 0, "Runtime-only checkpointing should have created checkpoints" + + +async def test_magentic_checkpoint_runtime_overrides_buildtime() -> None: + """Test that runtime checkpoint storage overrides build-time configuration.""" + import tempfile + + with ( + tempfile.TemporaryDirectory() as temp_dir1, + tempfile.TemporaryDirectory() as temp_dir2, + ): + from agent_framework._workflows._checkpoint import FileCheckpointStorage + + buildtime_storage = FileCheckpointStorage(temp_dir1) + runtime_storage = FileCheckpointStorage(temp_dir2) + + manager = FakeManager(max_round_count=10) + wf = ( + MagenticBuilder() + .participants([DummyExec("agentA")]) + .with_manager(manager=manager) + .with_checkpointing(buildtime_storage) + .build() + ) + + baseline_output: ChatMessage | None = None + async for ev in wf.run_stream("override test", checkpoint_storage=runtime_storage): + if isinstance(ev, WorkflowOutputEvent): + baseline_output = ev.data # type: ignore[assignment] + if isinstance(ev, WorkflowStatusEvent) and ev.state in ( + WorkflowRunState.IDLE, + WorkflowRunState.IDLE_WITH_PENDING_REQUESTS, + ): + break + + assert baseline_output is not None + + buildtime_checkpoints = await buildtime_storage.list_checkpoints() + runtime_checkpoints = await runtime_storage.list_checkpoints() + + assert len(runtime_checkpoints) > 0, "Runtime storage should have checkpoints" + assert len(buildtime_checkpoints) == 0, "Build-time storage should have no checkpoints when overridden" + + +# region Message Deduplication Tests + + +async def test_magentic_context_no_duplicate_on_reset(): + """Test that MagenticContext.reset() clears chat_history without leaving duplicates.""" + ctx = MagenticContext(task="task", participant_descriptions={"Alice": "Researcher"}) + + # Add some history + ctx.chat_history.append(ChatMessage("assistant", ["response1"])) + ctx.chat_history.append(ChatMessage("assistant", ["response2"])) + assert len(ctx.chat_history) == 2 + + # Reset + ctx.reset() + + # Verify clean slate + assert len(ctx.chat_history) == 0, "chat_history should be empty after reset" + + # Add new history + ctx.chat_history.append(ChatMessage("assistant", ["new_response"])) + assert len(ctx.chat_history) == 1, "Should have exactly 1 message after adding to reset context" + + +async def test_magentic_checkpoint_restore_no_duplicate_history(): + """Test that checkpoint restore does not create duplicate messages in chat_history.""" + manager = FakeManager(max_round_count=10) + storage = InMemoryCheckpointStorage() + + wf = ( + MagenticBuilder() + .participants([DummyExec("agentA")]) + .with_manager(manager=manager) + .with_checkpointing(storage) + .build() + ) + + # Run with conversation history to create initial checkpoint + conversation: list[ChatMessage] = [ + ChatMessage("user", ["task_msg"]), + ] + + async for event in wf.run_stream(conversation): + if isinstance(event, WorkflowStatusEvent) and event.state in ( + WorkflowRunState.IDLE, + WorkflowRunState.IDLE_WITH_PENDING_REQUESTS, + ): + break + + # Get checkpoint + checkpoints = await storage.list_checkpoints() + assert len(checkpoints) > 0, "Should have created checkpoints" + + latest_checkpoint = checkpoints[-1] + + # Load checkpoint and verify no duplicates in shared state + checkpoint_data = await storage.load_checkpoint(latest_checkpoint.checkpoint_id) + assert checkpoint_data is not None + + # Check the magentic_context in the checkpoint + for _, executor_state in checkpoint_data.metadata.items(): + if isinstance(executor_state, dict) and "magentic_context" in executor_state: + ctx_data: dict[str, Any] = executor_state["magentic_context"] # type: ignore + chat_history = ctx_data.get("chat_history", []) # type: ignore + + # Count unique messages by text + texts = [ # type: ignore + msg.get("text") or (msg.get("contents", [{}])[0].get("text") if msg.get("contents") else None) # type: ignore + for msg in chat_history # type: ignore + ] + text_counts: dict[str, int] = {} + for text in texts: # type: ignore + if text: + text_counts[text] = text_counts.get(text, 0) + 1 # type: ignore + + # Input messages should not be duplicated + assert text_counts.get("history_msg", 0) <= 1, ( + f"'history_msg' appears {text_counts.get('history_msg', 0)} times in checkpoint - expected <= 1" + ) + assert text_counts.get("task_msg", 0) <= 1, ( + f"'task_msg' appears {text_counts.get('task_msg', 0)} times in checkpoint - expected <= 1" + ) + + +# endregion + +# region Participant Factory Tests + + +def test_magentic_builder_rejects_empty_participant_factories(): + """Test that MagenticBuilder rejects empty participant_factories list.""" + with pytest.raises(ValueError, match=r"participant_factories cannot be empty"): + MagenticBuilder().register_participants([]) + + with pytest.raises( + ValueError, + match=r"No participants provided\. Call \.participants\(\) or \.register_participants\(\) first\.", + ): + MagenticBuilder().with_manager(manager=FakeManager()).build() + + +def test_magentic_builder_rejects_mixing_participants_and_factories(): + """Test that mixing .participants() and .register_participants() raises an error.""" + agent = StubAgent("agentA", "reply from agentA") + + # Case 1: participants first, then register_participants + with pytest.raises(ValueError, match="Cannot mix .participants"): + MagenticBuilder().participants([agent]).register_participants([lambda: StubAgent("agentB", "reply")]) + + # Case 2: register_participants first, then participants + with pytest.raises(ValueError, match="Cannot mix .participants"): + MagenticBuilder().register_participants([lambda: agent]).participants([StubAgent("agentB", "reply")]) + + +def test_magentic_builder_rejects_multiple_calls_to_register_participants(): + """Test that multiple calls to .register_participants() raises an error.""" + with pytest.raises( + ValueError, match=r"register_participants\(\) has already been called on this builder instance." + ): + ( + MagenticBuilder() + .register_participants([lambda: StubAgent("agentA", "reply from agentA")]) + .register_participants([lambda: StubAgent("agentB", "reply from agentB")]) + ) + + +def test_magentic_builder_rejects_multiple_calls_to_participants(): + """Test that multiple calls to .participants() raises an error.""" + with pytest.raises(ValueError, match="participants have already been set"): + ( + MagenticBuilder() + .participants([StubAgent("agentA", "reply from agentA")]) + .participants([StubAgent("agentB", "reply from agentB")]) + ) + + +async def test_magentic_with_participant_factories(): + """Test workflow creation using participant_factories.""" + call_count = 0 + + def create_agent() -> StubAgent: + nonlocal call_count + call_count += 1 + return StubAgent("agentA", "reply from agentA") + + manager = FakeManager() + workflow = MagenticBuilder().register_participants([create_agent]).with_manager(manager=manager).build() + + # Factory should be called during build + assert call_count == 1 + + outputs: list[WorkflowOutputEvent] = [] + async for event in workflow.run_stream("test task"): + if isinstance(event, WorkflowOutputEvent): + outputs.append(event) + + assert len(outputs) == 1 + + +async def test_magentic_participant_factories_reusable_builder(): + """Test that the builder can be reused to build multiple workflows with factories.""" + call_count = 0 + + def create_agent() -> StubAgent: + nonlocal call_count + call_count += 1 + return StubAgent("agentA", "reply from agentA") + + builder = MagenticBuilder().register_participants([create_agent]).with_manager(manager=FakeManager()) + + # Build first workflow + wf1 = builder.build() + assert call_count == 1 + + # Build second workflow + wf2 = builder.build() + assert call_count == 2 + + # Verify that the two workflows have different agent instances + assert wf1.executors["agentA"] is not wf2.executors["agentA"] + + +async def test_magentic_participant_factories_with_checkpointing(): + """Test checkpointing with participant_factories.""" + storage = InMemoryCheckpointStorage() + + def create_agent() -> StubAgent: + return StubAgent("agentA", "reply from agentA") + + manager = FakeManager() + workflow = ( + MagenticBuilder() + .register_participants([create_agent]) + .with_manager(manager=manager) + .with_checkpointing(storage) + .build() + ) + + outputs: list[WorkflowOutputEvent] = [] + async for event in workflow.run_stream("checkpoint test"): + if isinstance(event, WorkflowOutputEvent): + outputs.append(event) + + assert outputs, "Should have workflow output" + + checkpoints = await storage.list_checkpoints() + assert checkpoints, "Checkpoints should be created during workflow execution" + + +# endregion + +# region Manager Factory Tests + + +def test_magentic_builder_rejects_multiple_manager_configurations(): + """Test that configuring multiple managers raises ValueError.""" + manager = FakeManager() + + builder = MagenticBuilder().with_manager(manager=manager) + + with pytest.raises(ValueError, match=r"with_manager\(\) has already been called"): + builder.with_manager(manager=manager) + + +def test_magentic_builder_requires_exactly_one_manager_option(): + """Test that exactly one manager option must be provided.""" + manager = FakeManager() + + def manager_factory() -> MagenticManagerBase: + return FakeManager() + + # No options provided + with pytest.raises(ValueError, match="Exactly one of"): + MagenticBuilder().with_manager() # type: ignore + + # Multiple options provided + with pytest.raises(ValueError, match="Exactly one of"): + MagenticBuilder().with_manager(manager=manager, manager_factory=manager_factory) # type: ignore + + +async def test_magentic_with_manager_factory(): + """Test workflow creation using manager_factory.""" + factory_call_count = 0 + + def manager_factory() -> MagenticManagerBase: + nonlocal factory_call_count + factory_call_count += 1 + return FakeManager() + + agent = StubAgent("agentA", "reply from agentA") + workflow = MagenticBuilder().participants([agent]).with_manager(manager_factory=manager_factory).build() + + # Factory should be called during build + assert factory_call_count == 1 + + outputs: list[WorkflowOutputEvent] = [] + async for event in workflow.run_stream("test task"): + if isinstance(event, WorkflowOutputEvent): + outputs.append(event) + + assert len(outputs) == 1 + + +async def test_magentic_with_agent_factory(): + """Test workflow creation using agent_factory for StandardMagenticManager.""" + factory_call_count = 0 + + def agent_factory() -> AgentProtocol: + nonlocal factory_call_count + factory_call_count += 1 + return cast(AgentProtocol, StubManagerAgent()) + + participant = StubAgent("agentA", "reply from agentA") + workflow = ( + MagenticBuilder() + .participants([participant]) + .with_manager(agent_factory=agent_factory, max_round_count=1) + .build() + ) + + # Factory should be called during build + assert factory_call_count == 1 + + # Verify workflow can be started (may not complete successfully due to stub behavior) + event_count = 0 + async for _ in workflow.run_stream("test task"): + event_count += 1 + if event_count > 10: + break + + assert event_count > 0 + + +async def test_magentic_manager_factory_reusable_builder(): + """Test that the builder can be reused to build multiple workflows with manager factory.""" + factory_call_count = 0 + + def manager_factory() -> MagenticManagerBase: + nonlocal factory_call_count + factory_call_count += 1 + return FakeManager() + + agent = StubAgent("agentA", "reply from agentA") + builder = MagenticBuilder().participants([agent]).with_manager(manager_factory=manager_factory) + + # Build first workflow + wf1 = builder.build() + assert factory_call_count == 1 + + # Build second workflow + wf2 = builder.build() + assert factory_call_count == 2 + + # Verify that the two workflows have different orchestrator instances + orchestrator1 = next(e for e in wf1.executors.values() if isinstance(e, MagenticOrchestrator)) + orchestrator2 = next(e for e in wf2.executors.values() if isinstance(e, MagenticOrchestrator)) + assert orchestrator1 is not orchestrator2 + + +def test_magentic_with_both_participant_and_manager_factories(): + """Test workflow creation using both participant_factories and manager_factory.""" + participant_factory_call_count = 0 + manager_factory_call_count = 0 + + def create_agent() -> StubAgent: + nonlocal participant_factory_call_count + participant_factory_call_count += 1 + return StubAgent("agentA", "reply from agentA") + + def manager_factory() -> MagenticManagerBase: + nonlocal manager_factory_call_count + manager_factory_call_count += 1 + return FakeManager() + + workflow = ( + MagenticBuilder().register_participants([create_agent]).with_manager(manager_factory=manager_factory).build() + ) + + # All factories should be called during build + assert participant_factory_call_count == 1 + assert manager_factory_call_count == 1 + + # Verify executor is present in the workflow + assert "agentA" in workflow.executors + + +async def test_magentic_factories_reusable_for_multiple_workflows(): + """Test that both factories are reused correctly for multiple workflow builds.""" + participant_factory_call_count = 0 + manager_factory_call_count = 0 + + def create_agent() -> StubAgent: + nonlocal participant_factory_call_count + participant_factory_call_count += 1 + return StubAgent("agentA", "reply from agentA") + + def manager_factory() -> MagenticManagerBase: + nonlocal manager_factory_call_count + manager_factory_call_count += 1 + return FakeManager() + + builder = MagenticBuilder().register_participants([create_agent]).with_manager(manager_factory=manager_factory) + + # Build first workflow + wf1 = builder.build() + assert participant_factory_call_count == 1 + assert manager_factory_call_count == 1 + + # Build second workflow + wf2 = builder.build() + assert participant_factory_call_count == 2 + assert manager_factory_call_count == 2 + + # Verify that the workflows have different agent and orchestrator instances + assert wf1.executors["agentA"] is not wf2.executors["agentA"] + + orchestrator1 = next(e for e in wf1.executors.values() if isinstance(e, MagenticOrchestrator)) + orchestrator2 = next(e for e in wf2.executors.values() if isinstance(e, MagenticOrchestrator)) + assert orchestrator1 is not orchestrator2 + + +def test_magentic_agent_factory_with_standard_manager_options(): + """Test that agent_factory properly passes through standard manager options.""" + factory_call_count = 0 + + def agent_factory() -> AgentProtocol: + nonlocal factory_call_count + factory_call_count += 1 + return cast(AgentProtocol, StubManagerAgent()) + + # Custom options to verify they are passed through + custom_max_stall_count = 5 + custom_max_reset_count = 2 + custom_max_round_count = 10 + custom_facts_prompt = "Custom facts prompt: {task}" + custom_plan_prompt = "Custom plan prompt: {team}" + custom_full_prompt = "Custom full prompt: {task} {team} {facts} {plan}" + custom_facts_update_prompt = "Custom facts update: {task} {old_facts}" + custom_plan_update_prompt = "Custom plan update: {team}" + custom_progress_prompt = "Custom progress: {task} {team} {names}" + custom_final_prompt = "Custom final: {task}" + + # Create a custom task ledger + from agent_framework._workflows._magentic import _MagenticTaskLedger # type: ignore + + custom_task_ledger = _MagenticTaskLedger( + facts=ChatMessage("assistant", ["Custom facts"]), + plan=ChatMessage("assistant", ["Custom plan"]), + ) + + participant = StubAgent("agentA", "reply from agentA") + workflow = ( + MagenticBuilder() + .participants([participant]) + .with_manager( + agent_factory=agent_factory, + task_ledger=custom_task_ledger, + max_stall_count=custom_max_stall_count, + max_reset_count=custom_max_reset_count, + max_round_count=custom_max_round_count, + task_ledger_facts_prompt=custom_facts_prompt, + task_ledger_plan_prompt=custom_plan_prompt, + task_ledger_full_prompt=custom_full_prompt, + task_ledger_facts_update_prompt=custom_facts_update_prompt, + task_ledger_plan_update_prompt=custom_plan_update_prompt, + progress_ledger_prompt=custom_progress_prompt, + final_answer_prompt=custom_final_prompt, + ) + .build() + ) + + # Factory should be called during build + assert factory_call_count == 1 + + # Get the orchestrator and verify the manager has the custom options + orchestrator = next(e for e in workflow.executors.values() if isinstance(e, MagenticOrchestrator)) + manager = orchestrator._manager # type: ignore[reportPrivateUsage] + + # Verify the manager is a StandardMagenticManager with the expected options + from agent_framework import StandardMagenticManager + + assert isinstance(manager, StandardMagenticManager) + assert manager.task_ledger is custom_task_ledger + assert manager.max_stall_count == custom_max_stall_count + assert manager.max_reset_count == custom_max_reset_count + assert manager.max_round_count == custom_max_round_count + assert manager.task_ledger_facts_prompt == custom_facts_prompt + assert manager.task_ledger_plan_prompt == custom_plan_prompt + assert manager.task_ledger_full_prompt == custom_full_prompt + assert manager.task_ledger_facts_update_prompt == custom_facts_update_prompt + assert manager.task_ledger_plan_update_prompt == custom_plan_update_prompt + assert manager.progress_ledger_prompt == custom_progress_prompt + assert manager.final_answer_prompt == custom_final_prompt + + +# endregion diff --git a/python/packages/orchestrations/tests/test_sequential.py b/python/packages/orchestrations/tests/test_sequential.py new file mode 100644 index 0000000000..389a33c7c0 --- /dev/null +++ b/python/packages/orchestrations/tests/test_sequential.py @@ -0,0 +1,453 @@ +# Copyright (c) Microsoft. All rights reserved. + +from collections.abc import AsyncIterable +from typing import Any + +import pytest +from agent_framework import ( + AgentExecutorResponse, + AgentResponse, + AgentResponseUpdate, + AgentThread, + BaseAgent, + ChatMessage, + Content, + Executor, + SequentialBuilder, + TypeCompatibilityError, + WorkflowContext, + WorkflowOutputEvent, + WorkflowRunState, + WorkflowStatusEvent, + handler, +) +from agent_framework._workflows._checkpoint import InMemoryCheckpointStorage + + +class _EchoAgent(BaseAgent): + """Simple agent that appends a single assistant message with its name.""" + + async def run( # type: ignore[override] + self, + messages: str | ChatMessage | list[str] | list[ChatMessage] | None = None, + *, + thread: AgentThread | None = None, + **kwargs: Any, + ) -> AgentResponse: + return AgentResponse(messages=[ChatMessage("assistant", [f"{self.name} reply"])]) + + async def run_stream( # type: ignore[override] + self, + messages: str | ChatMessage | list[str] | list[ChatMessage] | None = None, + *, + thread: AgentThread | None = None, + **kwargs: Any, + ) -> AsyncIterable[AgentResponseUpdate]: + # Minimal async generator with one assistant update + yield AgentResponseUpdate(contents=[Content.from_text(text=f"{self.name} reply")]) + + +class _SummarizerExec(Executor): + """Custom executor that summarizes by appending a short assistant message.""" + + @handler + async def summarize(self, agent_response: AgentExecutorResponse, ctx: WorkflowContext[list[ChatMessage]]) -> None: + conversation = agent_response.full_conversation or [] + user_texts = [m.text for m in conversation if m.role == "user"] + agents = [m.author_name or m.role for m in conversation if m.role == "assistant"] + summary = ChatMessage("assistant", [f"Summary of users:{len(user_texts)} agents:{len(agents)}"]) + await ctx.send_message(list(conversation) + [summary]) + + +class _InvalidExecutor(Executor): + """Invalid executor that does not have a handler that accepts a list of chat messages""" + + @handler + async def summarize(self, conversation: list[str], ctx: WorkflowContext[list[ChatMessage]]) -> None: + pass + + +def test_sequential_builder_rejects_empty_participants() -> None: + with pytest.raises(ValueError): + SequentialBuilder().participants([]) + + +def test_sequential_builder_rejects_empty_participant_factories() -> None: + with pytest.raises(ValueError): + SequentialBuilder().register_participants([]) + + +def test_sequential_builder_rejects_mixing_participants_and_factories() -> None: + """Test that mixing .participants() and .register_participants() raises an error.""" + a1 = _EchoAgent(id="agent1", name="A1") + + # Try .participants() then .register_participants() + with pytest.raises(ValueError, match="Cannot mix"): + SequentialBuilder().participants([a1]).register_participants([lambda: _EchoAgent(id="agent2", name="A2")]) + + # Try .register_participants() then .participants() + with pytest.raises(ValueError, match="Cannot mix"): + SequentialBuilder().register_participants([lambda: _EchoAgent(id="agent1", name="A1")]).participants([a1]) + + +def test_sequential_builder_validation_rejects_invalid_executor() -> None: + """Test that adding an invalid executor to the builder raises an error.""" + with pytest.raises(TypeCompatibilityError): + SequentialBuilder().participants([_EchoAgent(id="agent1", name="A1"), _InvalidExecutor(id="invalid")]).build() + + +async def test_sequential_agents_append_to_context() -> None: + a1 = _EchoAgent(id="agent1", name="A1") + a2 = _EchoAgent(id="agent2", name="A2") + + wf = SequentialBuilder().participants([a1, a2]).build() + + completed = False + output: list[ChatMessage] | None = None + async for ev in wf.run_stream("hello sequential"): + if isinstance(ev, WorkflowStatusEvent) and ev.state == WorkflowRunState.IDLE: + completed = True + elif isinstance(ev, WorkflowOutputEvent): + output = ev.data # type: ignore[assignment] + if completed and output is not None: + break + + assert completed + assert output is not None + assert isinstance(output, list) + msgs: list[ChatMessage] = output + assert len(msgs) == 3 + assert msgs[0].role == "user" and "hello sequential" in msgs[0].text + assert msgs[1].role == "assistant" and (msgs[1].author_name == "A1" or True) + assert msgs[2].role == "assistant" and (msgs[2].author_name == "A2" or True) + assert "A1 reply" in msgs[1].text + assert "A2 reply" in msgs[2].text + + +async def test_sequential_register_participants_with_agent_factories() -> None: + """Test that register_participants works with agent factories.""" + + def create_agent1() -> _EchoAgent: + return _EchoAgent(id="agent1", name="A1") + + def create_agent2() -> _EchoAgent: + return _EchoAgent(id="agent2", name="A2") + + wf = SequentialBuilder().register_participants([create_agent1, create_agent2]).build() + + completed = False + output: list[ChatMessage] | None = None + async for ev in wf.run_stream("hello factories"): + if isinstance(ev, WorkflowStatusEvent) and ev.state == WorkflowRunState.IDLE: + completed = True + elif isinstance(ev, WorkflowOutputEvent): + output = ev.data + if completed and output is not None: + break + + assert completed + assert output is not None + assert isinstance(output, list) + msgs: list[ChatMessage] = output + assert len(msgs) == 3 + assert msgs[0].role == "user" and "hello factories" in msgs[0].text + assert msgs[1].role == "assistant" and "A1 reply" in msgs[1].text + assert msgs[2].role == "assistant" and "A2 reply" in msgs[2].text + + +async def test_sequential_with_custom_executor_summary() -> None: + a1 = _EchoAgent(id="agent1", name="A1") + summarizer = _SummarizerExec(id="summarizer") + + wf = SequentialBuilder().participants([a1, summarizer]).build() + + completed = False + output: list[ChatMessage] | None = None + async for ev in wf.run_stream("topic X"): + if isinstance(ev, WorkflowStatusEvent) and ev.state == WorkflowRunState.IDLE: + completed = True + elif isinstance(ev, WorkflowOutputEvent): + output = ev.data + if completed and output is not None: + break + + assert completed + assert output is not None + msgs: list[ChatMessage] = output + # Expect: [user, A1 reply, summary] + assert len(msgs) == 3 + assert msgs[0].role == "user" + assert msgs[1].role == "assistant" and "A1 reply" in msgs[1].text + assert msgs[2].role == "assistant" and msgs[2].text.startswith("Summary of users:") + + +async def test_sequential_register_participants_mixed_agents_and_executors() -> None: + """Test register_participants with both agent and executor factories.""" + + def create_agent() -> _EchoAgent: + return _EchoAgent(id="agent1", name="A1") + + def create_summarizer() -> _SummarizerExec: + return _SummarizerExec(id="summarizer") + + wf = SequentialBuilder().register_participants([create_agent, create_summarizer]).build() + + completed = False + output: list[ChatMessage] | None = None + async for ev in wf.run_stream("topic Y"): + if isinstance(ev, WorkflowStatusEvent) and ev.state == WorkflowRunState.IDLE: + completed = True + elif isinstance(ev, WorkflowOutputEvent): + output = ev.data + if completed and output is not None: + break + + assert completed + assert output is not None + msgs: list[ChatMessage] = output + # Expect: [user, A1 reply, summary] + assert len(msgs) == 3 + assert msgs[0].role == "user" and "topic Y" in msgs[0].text + assert msgs[1].role == "assistant" and "A1 reply" in msgs[1].text + assert msgs[2].role == "assistant" and msgs[2].text.startswith("Summary of users:") + + +async def test_sequential_checkpoint_resume_round_trip() -> None: + storage = InMemoryCheckpointStorage() + + initial_agents = (_EchoAgent(id="agent1", name="A1"), _EchoAgent(id="agent2", name="A2")) + wf = SequentialBuilder().participants(list(initial_agents)).with_checkpointing(storage).build() + + baseline_output: list[ChatMessage] | None = None + async for ev in wf.run_stream("checkpoint sequential"): + if isinstance(ev, WorkflowOutputEvent): + baseline_output = ev.data # type: ignore[assignment] + if isinstance(ev, WorkflowStatusEvent) and ev.state == WorkflowRunState.IDLE: + break + + assert baseline_output is not None + + checkpoints = await storage.list_checkpoints() + assert checkpoints + checkpoints.sort(key=lambda cp: cp.timestamp) + + resume_checkpoint = next( + (cp for cp in checkpoints if (cp.metadata or {}).get("checkpoint_type") == "superstep"), + checkpoints[-1], + ) + + resumed_agents = (_EchoAgent(id="agent1", name="A1"), _EchoAgent(id="agent2", name="A2")) + wf_resume = SequentialBuilder().participants(list(resumed_agents)).with_checkpointing(storage).build() + + resumed_output: list[ChatMessage] | None = None + async for ev in wf_resume.run_stream(checkpoint_id=resume_checkpoint.checkpoint_id): + if isinstance(ev, WorkflowOutputEvent): + resumed_output = ev.data # type: ignore[assignment] + if isinstance(ev, WorkflowStatusEvent) and ev.state in ( + WorkflowRunState.IDLE, + WorkflowRunState.IDLE_WITH_PENDING_REQUESTS, + ): + break + + assert resumed_output is not None + assert [m.role for m in resumed_output] == [m.role for m in baseline_output] + assert [m.text for m in resumed_output] == [m.text for m in baseline_output] + + +async def test_sequential_checkpoint_runtime_only() -> None: + """Test checkpointing configured ONLY at runtime, not at build time.""" + storage = InMemoryCheckpointStorage() + + agents = (_EchoAgent(id="agent1", name="A1"), _EchoAgent(id="agent2", name="A2")) + wf = SequentialBuilder().participants(list(agents)).build() + + baseline_output: list[ChatMessage] | None = None + async for ev in wf.run_stream("runtime checkpoint test", checkpoint_storage=storage): + if isinstance(ev, WorkflowOutputEvent): + baseline_output = ev.data # type: ignore[assignment] + if isinstance(ev, WorkflowStatusEvent) and ev.state == WorkflowRunState.IDLE: + break + + assert baseline_output is not None + + checkpoints = await storage.list_checkpoints() + assert checkpoints + checkpoints.sort(key=lambda cp: cp.timestamp) + + resume_checkpoint = next( + (cp for cp in checkpoints if (cp.metadata or {}).get("checkpoint_type") == "superstep"), + checkpoints[-1], + ) + + resumed_agents = (_EchoAgent(id="agent1", name="A1"), _EchoAgent(id="agent2", name="A2")) + wf_resume = SequentialBuilder().participants(list(resumed_agents)).build() + + resumed_output: list[ChatMessage] | None = None + async for ev in wf_resume.run_stream(checkpoint_id=resume_checkpoint.checkpoint_id, checkpoint_storage=storage): + if isinstance(ev, WorkflowOutputEvent): + resumed_output = ev.data # type: ignore[assignment] + if isinstance(ev, WorkflowStatusEvent) and ev.state in ( + WorkflowRunState.IDLE, + WorkflowRunState.IDLE_WITH_PENDING_REQUESTS, + ): + break + + assert resumed_output is not None + assert [m.role for m in resumed_output] == [m.role for m in baseline_output] + assert [m.text for m in resumed_output] == [m.text for m in baseline_output] + + +async def test_sequential_checkpoint_runtime_overrides_buildtime() -> None: + """Test that runtime checkpoint storage overrides build-time configuration.""" + import tempfile + + with tempfile.TemporaryDirectory() as temp_dir1, tempfile.TemporaryDirectory() as temp_dir2: + from agent_framework._workflows._checkpoint import FileCheckpointStorage + + buildtime_storage = FileCheckpointStorage(temp_dir1) + runtime_storage = FileCheckpointStorage(temp_dir2) + + agents = (_EchoAgent(id="agent1", name="A1"), _EchoAgent(id="agent2", name="A2")) + wf = SequentialBuilder().participants(list(agents)).with_checkpointing(buildtime_storage).build() + + baseline_output: list[ChatMessage] | None = None + async for ev in wf.run_stream("override test", checkpoint_storage=runtime_storage): + if isinstance(ev, WorkflowOutputEvent): + baseline_output = ev.data # type: ignore[assignment] + if isinstance(ev, WorkflowStatusEvent) and ev.state == WorkflowRunState.IDLE: + break + + assert baseline_output is not None + + buildtime_checkpoints = await buildtime_storage.list_checkpoints() + runtime_checkpoints = await runtime_storage.list_checkpoints() + + assert len(runtime_checkpoints) > 0, "Runtime storage should have checkpoints" + assert len(buildtime_checkpoints) == 0, "Build-time storage should have no checkpoints when overridden" + + +async def test_sequential_register_participants_with_checkpointing() -> None: + """Test that checkpointing works with register_participants.""" + storage = InMemoryCheckpointStorage() + + def create_agent1() -> _EchoAgent: + return _EchoAgent(id="agent1", name="A1") + + def create_agent2() -> _EchoAgent: + return _EchoAgent(id="agent2", name="A2") + + wf = SequentialBuilder().register_participants([create_agent1, create_agent2]).with_checkpointing(storage).build() + + baseline_output: list[ChatMessage] | None = None + async for ev in wf.run_stream("checkpoint with factories"): + if isinstance(ev, WorkflowOutputEvent): + baseline_output = ev.data + if isinstance(ev, WorkflowStatusEvent) and ev.state == WorkflowRunState.IDLE: + break + + assert baseline_output is not None + + checkpoints = await storage.list_checkpoints() + assert checkpoints + checkpoints.sort(key=lambda cp: cp.timestamp) + + resume_checkpoint = next( + (cp for cp in checkpoints if (cp.metadata or {}).get("checkpoint_type") == "superstep"), + checkpoints[-1], + ) + + wf_resume = ( + SequentialBuilder().register_participants([create_agent1, create_agent2]).with_checkpointing(storage).build() + ) + + resumed_output: list[ChatMessage] | None = None + async for ev in wf_resume.run_stream(checkpoint_id=resume_checkpoint.checkpoint_id): + if isinstance(ev, WorkflowOutputEvent): + resumed_output = ev.data + if isinstance(ev, WorkflowStatusEvent) and ev.state in ( + WorkflowRunState.IDLE, + WorkflowRunState.IDLE_WITH_PENDING_REQUESTS, + ): + break + + assert resumed_output is not None + assert [m.role for m in resumed_output] == [m.role for m in baseline_output] + assert [m.text for m in resumed_output] == [m.text for m in baseline_output] + + +async def test_sequential_register_participants_factories_called_on_build() -> None: + """Test that factories are called during build(), not during register_participants().""" + call_count = 0 + + def create_agent() -> _EchoAgent: + nonlocal call_count + call_count += 1 + return _EchoAgent(id=f"agent{call_count}", name=f"A{call_count}") + + builder = SequentialBuilder().register_participants([create_agent, create_agent]) + + # Factories should not be called yet + assert call_count == 0 + + wf = builder.build() + + # Now factories should have been called + assert call_count == 2 + + # Run the workflow to ensure it works + completed = False + output: list[ChatMessage] | None = None + async for ev in wf.run_stream("test factories timing"): + if isinstance(ev, WorkflowStatusEvent) and ev.state == WorkflowRunState.IDLE: + completed = True + elif isinstance(ev, WorkflowOutputEvent): + output = ev.data # type: ignore[assignment] + if completed and output is not None: + break + + assert completed + assert output is not None + msgs: list[ChatMessage] = output + # Should have user message + 2 agent replies + assert len(msgs) == 3 + + +async def test_sequential_builder_reusable_after_build_with_participants() -> None: + """Test that the builder can be reused to build multiple identical workflows with participants().""" + a1 = _EchoAgent(id="agent1", name="A1") + a2 = _EchoAgent(id="agent2", name="A2") + + builder = SequentialBuilder().participants([a1, a2]) + + # Build first workflow + builder.build() + + assert builder._participants[0] is a1 # type: ignore + assert builder._participants[1] is a2 # type: ignore + assert builder._participant_factories == [] # type: ignore + + +async def test_sequential_builder_reusable_after_build_with_factories() -> None: + """Test that the builder can be reused to build multiple workflows with register_participants().""" + call_count = 0 + + def create_agent1() -> _EchoAgent: + nonlocal call_count + call_count += 1 + return _EchoAgent(id="agent1", name="A1") + + def create_agent2() -> _EchoAgent: + nonlocal call_count + call_count += 1 + return _EchoAgent(id="agent2", name="A2") + + builder = SequentialBuilder().register_participants([create_agent1, create_agent2]) + + # Build first workflow - factories should be called + builder.build() + + assert call_count == 2 + assert builder._participants == [] # type: ignore + assert len(builder._participant_factories) == 2 # type: ignore + assert builder._participant_factories[0] is create_agent1 # type: ignore + assert builder._participant_factories[1] is create_agent2 # type: ignore diff --git a/python/pyproject.toml b/python/pyproject.toml index a14354cbe4..ebbc83ac4b 100644 --- a/python/pyproject.toml +++ b/python/pyproject.toml @@ -104,6 +104,7 @@ agent-framework-purview = { workspace = true } agent-framework-redis = { workspace = true } agent-framework-github-copilot = { workspace = true } agent-framework-claude = { workspace = true } +agent-framework-orchestrations = { workspace = true } [tool.ruff] line-length = 120 @@ -253,6 +254,7 @@ pytest --import-mode=importlib --cov=agent_framework_mem0 --cov=agent_framework_purview --cov=agent_framework_redis +--cov=agent_framework_orchestrations --cov-config=pyproject.toml --cov-report=term-missing:skip-covered --ignore-glob=packages/lab/** diff --git a/python/samples/getting_started/orchestrations/README.md b/python/samples/getting_started/orchestrations/README.md new file mode 100644 index 0000000000..886be7775b --- /dev/null +++ b/python/samples/getting_started/orchestrations/README.md @@ -0,0 +1,71 @@ +# Orchestration Getting Started Samples + +## Installation + +The orchestrations package is included when you install `agent-framework` (which pulls in all optional packages): + +```bash +pip install agent-framework +``` + +Or install the orchestrations package directly: + +```bash +pip install agent-framework-orchestrations +``` + +Orchestration builders are available via the `agent_framework.orchestrations` submodule: + +```python +from agent_framework.orchestrations import ( + SequentialBuilder, + ConcurrentBuilder, + HandoffBuilder, + GroupChatBuilder, + MagenticBuilder, +) +``` + +## Samples Overview + +| Sample | File | Concepts | +| ------------------------------------------------- | ------------------------------------------------------------------------------------ | ---------------------------------------------------------------------------------------------------------------- | +| Concurrent Orchestration (Default Aggregator) | [concurrent_agents.py](./concurrent_agents.py) | Fan-out to multiple agents; fan-in with default aggregator returning combined ChatMessages | +| Concurrent Orchestration (Custom Aggregator) | [concurrent_custom_aggregator.py](./concurrent_custom_aggregator.py) | Override aggregator via callback; summarize results with an LLM | +| Concurrent Orchestration (Custom Agent Executors) | [concurrent_custom_agent_executors.py](./concurrent_custom_agent_executors.py) | Child executors own ChatAgents; concurrent fan-out/fan-in via ConcurrentBuilder | +| Concurrent Orchestration (Participant Factory) | [concurrent_participant_factory.py](./concurrent_participant_factory.py) | Use participant factories for state isolation between workflow instances | +| Group Chat with Agent Manager | [group_chat_agent_manager.py](./group_chat_agent_manager.py) | Agent-based manager using `with_orchestrator(agent=)` to select next speaker | +| Group Chat Philosophical Debate | [group_chat_philosophical_debate.py](./group_chat_philosophical_debate.py) | Agent manager moderates long-form, multi-round debate across diverse participants | +| Group Chat with Simple Function Selector | [group_chat_simple_selector.py](./group_chat_simple_selector.py) | Group chat with a simple function selector for next speaker | +| Handoff (Simple) | [handoff_simple.py](./handoff_simple.py) | Single-tier routing: triage agent routes to specialists, control returns to user after each specialist response | +| Handoff (Autonomous) | [handoff_autonomous.py](./handoff_autonomous.py) | Autonomous mode: specialists iterate independently until invoking a handoff tool using `.with_autonomous_mode()` | +| Handoff (Participant Factory) | [handoff_participant_factory.py](./handoff_participant_factory.py) | Use participant factories for state isolation between workflow instances | +| Handoff with Code Interpreter | [handoff_with_code_interpreter_file.py](./handoff_with_code_interpreter_file.py) | Retrieve file IDs from code interpreter output in handoff workflow | +| Handoff with Azure AI Agent | [handoff_participant_factory_azure_ai_agent.py](./handoff_participant_factory_azure_ai_agent.py) | Handoff workflow with tool approvals and checkpoint resume using AzureAIProjectAgentProvider | +| Magentic Workflow (Multi-Agent) | [magentic.py](./magentic.py) | Orchestrate multiple agents with Magentic manager and streaming | +| Magentic + Human Plan Review | [magentic_human_plan_review.py](./magentic_human_plan_review.py) | Human reviews/updates the plan before execution | +| Magentic + Checkpoint Resume | [magentic_checkpoint.py](./magentic_checkpoint.py) | Resume Magentic orchestration from saved checkpoints | +| Sequential Orchestration (Agents) | [sequential_agents.py](./sequential_agents.py) | Chain agents sequentially with shared conversation context | +| Sequential Orchestration (Custom Executor) | [sequential_custom_executors.py](./sequential_custom_executors.py) | Mix agents with a summarizer that appends a compact summary | +| Sequential Orchestration (Participant Factories) | [sequential_participant_factory.py](./sequential_participant_factory.py) | Use participant factories for state isolation between workflow instances | + +## Tips + +**Magentic checkpointing tip**: Treat `MagenticBuilder.participants` keys as stable identifiers. When resuming from a checkpoint, the rebuilt workflow must reuse the same participant names; otherwise the checkpoint cannot be applied and the run will fail fast. + +**Handoff workflow tip**: Handoff workflows maintain the full conversation history including any `ChatMessage.additional_properties` emitted by your agents. This ensures routing metadata remains intact across all agent transitions. For specialist-to-specialist handoffs, use `.add_handoff(source, targets)` to configure which agents can route to which others with a fluent, type-safe API. + +**Sequential orchestration note**: Sequential orchestration uses a few small adapter nodes for plumbing: +- `input-conversation` normalizes input to `list[ChatMessage]` +- `to-conversation:` converts agent responses into the shared conversation +- `complete` publishes the final `WorkflowOutputEvent` + +These may appear in event streams (ExecutorInvoke/Completed). They're analogous to concurrent's dispatcher and aggregator and can be ignored if you only care about agent activity. + +## Environment Variables + +- **AzureOpenAIChatClient**: Set Azure OpenAI environment variables as documented [here](https://github.com/microsoft/agent-framework/blob/main/python/samples/getting_started/chat_client/README.md#environment-variables). + +- **OpenAI** (used in some orchestration samples): + - [OpenAIChatClient env vars](https://github.com/microsoft/agent-framework/blob/main/python/samples/getting_started/agents/openai_chat_client/README.md) + - [OpenAIResponsesClient env vars](https://github.com/microsoft/agent-framework/blob/main/python/samples/getting_started/agents/openai_responses_client/README.md) diff --git a/python/samples/getting_started/workflows/orchestration/concurrent_agents.py b/python/samples/getting_started/orchestrations/concurrent_agents.py similarity index 98% rename from python/samples/getting_started/workflows/orchestration/concurrent_agents.py rename to python/samples/getting_started/orchestrations/concurrent_agents.py index 2be0f29f9c..60c3d6eb01 100644 --- a/python/samples/getting_started/workflows/orchestration/concurrent_agents.py +++ b/python/samples/getting_started/orchestrations/concurrent_agents.py @@ -3,8 +3,9 @@ import asyncio from typing import Any -from agent_framework import ChatMessage, ConcurrentBuilder +from agent_framework import ChatMessage from agent_framework.azure import AzureOpenAIChatClient +from agent_framework.orchestrations import ConcurrentBuilder from azure.identity import AzureCliCredential """ diff --git a/python/samples/getting_started/workflows/orchestration/concurrent_custom_agent_executors.py b/python/samples/getting_started/orchestrations/concurrent_custom_agent_executors.py similarity index 99% rename from python/samples/getting_started/workflows/orchestration/concurrent_custom_agent_executors.py rename to python/samples/getting_started/orchestrations/concurrent_custom_agent_executors.py index 76203dba63..55512ecc6e 100644 --- a/python/samples/getting_started/workflows/orchestration/concurrent_custom_agent_executors.py +++ b/python/samples/getting_started/orchestrations/concurrent_custom_agent_executors.py @@ -8,12 +8,12 @@ AgentExecutorResponse, ChatAgent, ChatMessage, - ConcurrentBuilder, Executor, WorkflowContext, handler, ) from agent_framework.azure import AzureOpenAIChatClient +from agent_framework.orchestrations import ConcurrentBuilder from azure.identity import AzureCliCredential """ diff --git a/python/samples/getting_started/workflows/orchestration/concurrent_custom_aggregator.py b/python/samples/getting_started/orchestrations/concurrent_custom_aggregator.py similarity index 98% rename from python/samples/getting_started/workflows/orchestration/concurrent_custom_aggregator.py rename to python/samples/getting_started/orchestrations/concurrent_custom_aggregator.py index 1690c2baad..994107acc3 100644 --- a/python/samples/getting_started/workflows/orchestration/concurrent_custom_aggregator.py +++ b/python/samples/getting_started/orchestrations/concurrent_custom_aggregator.py @@ -3,8 +3,9 @@ import asyncio from typing import Any -from agent_framework import ChatMessage, ConcurrentBuilder +from agent_framework import ChatMessage from agent_framework.azure import AzureOpenAIChatClient +from agent_framework.orchestrations import ConcurrentBuilder from azure.identity import AzureCliCredential """ diff --git a/python/samples/getting_started/workflows/orchestration/concurrent_participant_factory.py b/python/samples/getting_started/orchestrations/concurrent_participant_factory.py similarity index 99% rename from python/samples/getting_started/workflows/orchestration/concurrent_participant_factory.py rename to python/samples/getting_started/orchestrations/concurrent_participant_factory.py index 941456a823..fd21378a37 100644 --- a/python/samples/getting_started/workflows/orchestration/concurrent_participant_factory.py +++ b/python/samples/getting_started/orchestrations/concurrent_participant_factory.py @@ -6,13 +6,13 @@ from agent_framework import ( ChatAgent, ChatMessage, - ConcurrentBuilder, Executor, Workflow, WorkflowContext, handler, ) from agent_framework.azure import AzureOpenAIChatClient +from agent_framework.orchestrations import ConcurrentBuilder from azure.identity import AzureCliCredential """ diff --git a/python/samples/getting_started/workflows/orchestration/group_chat_agent_manager.py b/python/samples/getting_started/orchestrations/group_chat_agent_manager.py similarity index 98% rename from python/samples/getting_started/workflows/orchestration/group_chat_agent_manager.py rename to python/samples/getting_started/orchestrations/group_chat_agent_manager.py index cdc03a5ea5..9ce3371141 100644 --- a/python/samples/getting_started/workflows/orchestration/group_chat_agent_manager.py +++ b/python/samples/getting_started/orchestrations/group_chat_agent_manager.py @@ -6,10 +6,10 @@ AgentRunUpdateEvent, ChatAgent, ChatMessage, - GroupChatBuilder, WorkflowOutputEvent, ) from agent_framework.azure import AzureOpenAIChatClient +from agent_framework.orchestrations import GroupChatBuilder from azure.identity import AzureCliCredential """ diff --git a/python/samples/getting_started/workflows/orchestration/group_chat_philosophical_debate.py b/python/samples/getting_started/orchestrations/group_chat_philosophical_debate.py similarity index 99% rename from python/samples/getting_started/workflows/orchestration/group_chat_philosophical_debate.py rename to python/samples/getting_started/orchestrations/group_chat_philosophical_debate.py index de613dea2e..34f19b2ad2 100644 --- a/python/samples/getting_started/workflows/orchestration/group_chat_philosophical_debate.py +++ b/python/samples/getting_started/orchestrations/group_chat_philosophical_debate.py @@ -8,10 +8,10 @@ AgentRunUpdateEvent, ChatAgent, ChatMessage, - GroupChatBuilder, WorkflowOutputEvent, ) from agent_framework.azure import AzureOpenAIChatClient +from agent_framework.orchestrations import GroupChatBuilder from azure.identity import AzureCliCredential logging.basicConfig(level=logging.WARNING) diff --git a/python/samples/getting_started/workflows/orchestration/group_chat_simple_selector.py b/python/samples/getting_started/orchestrations/group_chat_simple_selector.py similarity index 98% rename from python/samples/getting_started/workflows/orchestration/group_chat_simple_selector.py rename to python/samples/getting_started/orchestrations/group_chat_simple_selector.py index 1047cd6f22..3215150edf 100644 --- a/python/samples/getting_started/workflows/orchestration/group_chat_simple_selector.py +++ b/python/samples/getting_started/orchestrations/group_chat_simple_selector.py @@ -6,11 +6,10 @@ AgentRunUpdateEvent, ChatAgent, ChatMessage, - GroupChatBuilder, - GroupChatState, WorkflowOutputEvent, ) from agent_framework.azure import AzureOpenAIChatClient +from agent_framework.orchestrations import GroupChatBuilder, GroupChatState from azure.identity import AzureCliCredential """ diff --git a/python/samples/getting_started/workflows/orchestration/handoff_autonomous.py b/python/samples/getting_started/orchestrations/handoff_autonomous.py similarity index 97% rename from python/samples/getting_started/workflows/orchestration/handoff_autonomous.py rename to python/samples/getting_started/orchestrations/handoff_autonomous.py index e33b230ce7..c9e97e4589 100644 --- a/python/samples/getting_started/workflows/orchestration/handoff_autonomous.py +++ b/python/samples/getting_started/orchestrations/handoff_autonomous.py @@ -9,13 +9,12 @@ AgentRunUpdateEvent, ChatAgent, ChatMessage, - HandoffBuilder, - HostedWebSearchTool, WorkflowEvent, WorkflowOutputEvent, resolve_agent_id, ) from agent_framework.azure import AzureOpenAIChatClient +from agent_framework.orchestrations import HandoffBuilder from azure.identity import AzureCliCredential logging.basicConfig(level=logging.ERROR) @@ -62,7 +61,6 @@ def create_agents( "coordinator. Keep each individual response focused on one aspect." ), name="research_agent", - tools=[HostedWebSearchTool()], ) summary_agent = chat_client.as_agent( @@ -130,8 +128,7 @@ async def main() -> None: ) .with_termination_condition( # Terminate after coordinator provides 5 assistant responses - lambda conv: sum(1 for msg in conv if msg.author_name == "coordinator" and msg.role == "assistant") - >= 5 + lambda conv: sum(1 for msg in conv if msg.author_name == "coordinator" and msg.role == "assistant") >= 5 ) .build() ) diff --git a/python/samples/getting_started/workflows/orchestration/handoff_participant_factory.py b/python/samples/getting_started/orchestrations/handoff_participant_factory.py similarity index 97% rename from python/samples/getting_started/workflows/orchestration/handoff_participant_factory.py rename to python/samples/getting_started/orchestrations/handoff_participant_factory.py index 9107e217c6..3c370f4d25 100644 --- a/python/samples/getting_started/workflows/orchestration/handoff_participant_factory.py +++ b/python/samples/getting_started/orchestrations/handoff_participant_factory.py @@ -9,9 +9,6 @@ AgentRunEvent, ChatAgent, ChatMessage, - HandoffAgentUserRequest, - HandoffBuilder, - HandoffSentEvent, RequestInfoEvent, Workflow, WorkflowEvent, @@ -21,6 +18,7 @@ tool, ) from agent_framework.azure import AzureOpenAIChatClient +from agent_framework.orchestrations import HandoffAgentUserRequest, HandoffBuilder, HandoffSentEvent from azure.identity import AzureCliCredential logging.basicConfig(level=logging.ERROR) @@ -237,9 +235,11 @@ async def main() -> None: # Custom termination: Check if the triage agent has provided a closing message. # This looks for the last message being from triage_agent and containing "welcome", # which indicates the conversation has concluded naturally. - lambda conversation: len(conversation) > 0 - and conversation[-1].author_name == "triage_agent" - and "welcome" in conversation[-1].text.lower() + lambda conversation: ( + len(conversation) > 0 + and conversation[-1].author_name == "triage_agent" + and "welcome" in conversation[-1].text.lower() + ) ) ) diff --git a/python/samples/getting_started/workflows/orchestration/handoff_simple.py b/python/samples/getting_started/orchestrations/handoff_simple.py similarity index 99% rename from python/samples/getting_started/workflows/orchestration/handoff_simple.py rename to python/samples/getting_started/orchestrations/handoff_simple.py index 2e7f53a82d..8d635bafb5 100644 --- a/python/samples/getting_started/workflows/orchestration/handoff_simple.py +++ b/python/samples/getting_started/orchestrations/handoff_simple.py @@ -8,9 +8,6 @@ AgentRunEvent, ChatAgent, ChatMessage, - HandoffAgentUserRequest, - HandoffBuilder, - HandoffSentEvent, RequestInfoEvent, WorkflowEvent, WorkflowOutputEvent, @@ -19,6 +16,7 @@ tool, ) from agent_framework.azure import AzureOpenAIChatClient +from agent_framework.orchestrations import HandoffAgentUserRequest, HandoffBuilder, HandoffSentEvent from azure.identity import AzureCliCredential """Sample: Simple handoff workflow. diff --git a/python/samples/getting_started/workflows/orchestration/handoff_with_code_interpreter_file.py b/python/samples/getting_started/orchestrations/handoff_with_code_interpreter_file.py similarity index 98% rename from python/samples/getting_started/workflows/orchestration/handoff_with_code_interpreter_file.py rename to python/samples/getting_started/orchestrations/handoff_with_code_interpreter_file.py index 0c0616850b..820fee70ca 100644 --- a/python/samples/getting_started/workflows/orchestration/handoff_with_code_interpreter_file.py +++ b/python/samples/getting_started/orchestrations/handoff_with_code_interpreter_file.py @@ -32,16 +32,13 @@ from agent_framework import ( AgentRunUpdateEvent, ChatAgent, - Content, - HandoffAgentUserRequest, - HandoffBuilder, HostedCodeInterpreterTool, - HostedFileContent, RequestInfoEvent, WorkflowEvent, WorkflowRunState, WorkflowStatusEvent, ) +from agent_framework.orchestrations import HandoffAgentUserRequest, HandoffBuilder from azure.identity.aio import AzureCliCredential # Toggle between V1 (AzureAIAgentClient) and V2 (AzureAIClient) @@ -72,7 +69,7 @@ def _handle_events(events: list[WorkflowEvent]) -> tuple[list[RequestInfoEvent], elif isinstance(event, AgentRunUpdateEvent): for content in event.data.contents: - if isinstance(content, HostedFileContent): + if content.type == "hosted_file" and content.file_id: file_ids.append(content.file_id) print(f"[Found HostedFileContent: file_id={content.file_id}]") elif content.type == "text" and content.annotations: diff --git a/python/samples/getting_started/workflows/orchestration/magentic.py b/python/samples/getting_started/orchestrations/magentic.py similarity index 98% rename from python/samples/getting_started/workflows/orchestration/magentic.py rename to python/samples/getting_started/orchestrations/magentic.py index 60746bc113..cf3750b9b9 100644 --- a/python/samples/getting_started/workflows/orchestration/magentic.py +++ b/python/samples/getting_started/orchestrations/magentic.py @@ -11,12 +11,10 @@ ChatMessage, GroupChatRequestSentEvent, HostedCodeInterpreterTool, - MagenticBuilder, - MagenticOrchestratorEvent, - MagenticProgressLedger, WorkflowOutputEvent, ) from agent_framework.openai import OpenAIChatClient, OpenAIResponsesClient +from agent_framework.orchestrations import MagenticBuilder, MagenticOrchestratorEvent, MagenticProgressLedger logging.basicConfig(level=logging.WARNING) logger = logging.getLogger(__name__) diff --git a/python/samples/getting_started/workflows/orchestration/magentic_checkpoint.py b/python/samples/getting_started/orchestrations/magentic_checkpoint.py similarity index 99% rename from python/samples/getting_started/workflows/orchestration/magentic_checkpoint.py rename to python/samples/getting_started/orchestrations/magentic_checkpoint.py index 2dd6a1a170..48f9dce5be 100644 --- a/python/samples/getting_started/workflows/orchestration/magentic_checkpoint.py +++ b/python/samples/getting_started/orchestrations/magentic_checkpoint.py @@ -9,8 +9,6 @@ ChatAgent, ChatMessage, FileCheckpointStorage, - MagenticBuilder, - MagenticPlanReviewRequest, RequestInfoEvent, WorkflowCheckpoint, WorkflowOutputEvent, @@ -18,6 +16,7 @@ WorkflowStatusEvent, ) from agent_framework.azure import AzureOpenAIChatClient +from agent_framework.orchestrations import MagenticBuilder, MagenticPlanReviewRequest from azure.identity._credentials import AzureCliCredential """ diff --git a/python/samples/getting_started/workflows/orchestration/magentic_human_plan_review.py b/python/samples/getting_started/orchestrations/magentic_human_plan_review.py similarity index 98% rename from python/samples/getting_started/workflows/orchestration/magentic_human_plan_review.py rename to python/samples/getting_started/orchestrations/magentic_human_plan_review.py index 1050463d01..28dd009502 100644 --- a/python/samples/getting_started/workflows/orchestration/magentic_human_plan_review.py +++ b/python/samples/getting_started/orchestrations/magentic_human_plan_review.py @@ -8,12 +8,11 @@ AgentRunUpdateEvent, ChatAgent, ChatMessage, - MagenticBuilder, - MagenticPlanReviewRequest, RequestInfoEvent, WorkflowOutputEvent, ) from agent_framework.openai import OpenAIChatClient +from agent_framework.orchestrations import MagenticBuilder, MagenticPlanReviewRequest """ Sample: Magentic Orchestration with Human Plan Review diff --git a/python/samples/getting_started/workflows/orchestration/sequential_agents.py b/python/samples/getting_started/orchestrations/sequential_agents.py similarity index 96% rename from python/samples/getting_started/workflows/orchestration/sequential_agents.py rename to python/samples/getting_started/orchestrations/sequential_agents.py index 59a9cb5bdd..681a810846 100644 --- a/python/samples/getting_started/workflows/orchestration/sequential_agents.py +++ b/python/samples/getting_started/orchestrations/sequential_agents.py @@ -3,8 +3,9 @@ import asyncio from typing import cast -from agent_framework import ChatMessage, SequentialBuilder, WorkflowOutputEvent +from agent_framework import ChatMessage, WorkflowOutputEvent from agent_framework.azure import AzureOpenAIChatClient +from agent_framework.orchestrations import SequentialBuilder from azure.identity import AzureCliCredential """ diff --git a/python/samples/getting_started/workflows/orchestration/sequential_custom_executors.py b/python/samples/getting_started/orchestrations/sequential_custom_executors.py similarity index 98% rename from python/samples/getting_started/workflows/orchestration/sequential_custom_executors.py rename to python/samples/getting_started/orchestrations/sequential_custom_executors.py index 09454f8b12..8b1cc8d8eb 100644 --- a/python/samples/getting_started/workflows/orchestration/sequential_custom_executors.py +++ b/python/samples/getting_started/orchestrations/sequential_custom_executors.py @@ -7,11 +7,11 @@ AgentExecutorResponse, ChatMessage, Executor, - SequentialBuilder, WorkflowContext, handler, ) from agent_framework.azure import AzureOpenAIChatClient +from agent_framework.orchestrations import SequentialBuilder from azure.identity import AzureCliCredential """ diff --git a/python/samples/getting_started/workflows/orchestration/sequential_participant_factory.py b/python/samples/getting_started/orchestrations/sequential_participant_factory.py similarity index 98% rename from python/samples/getting_started/workflows/orchestration/sequential_participant_factory.py rename to python/samples/getting_started/orchestrations/sequential_participant_factory.py index 8b78a38926..243c4b145a 100644 --- a/python/samples/getting_started/workflows/orchestration/sequential_participant_factory.py +++ b/python/samples/getting_started/orchestrations/sequential_participant_factory.py @@ -6,12 +6,12 @@ ChatAgent, ChatMessage, Executor, - SequentialBuilder, Workflow, WorkflowContext, handler, ) from agent_framework.azure import AzureOpenAIChatClient +from agent_framework.orchestrations import SequentialBuilder from azure.identity import AzureCliCredential """ diff --git a/python/samples/getting_started/workflows/README.md b/python/samples/getting_started/workflows/README.md index 524f93fd61..a98e462d98 100644 --- a/python/samples/getting_started/workflows/README.md +++ b/python/samples/getting_started/workflows/README.md @@ -110,31 +110,7 @@ For additional observability samples in Agent Framework, see the [observability ### orchestration -| Sample | File | Concepts | -| ------------------------------------------------- | ---------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------- | -| Concurrent Orchestration (Default Aggregator) | [orchestration/concurrent_agents.py](./orchestration/concurrent_agents.py) | Fan-out to multiple agents; fan-in with default aggregator returning combined ChatMessages | -| Concurrent Orchestration (Custom Aggregator) | [orchestration/concurrent_custom_aggregator.py](./orchestration/concurrent_custom_aggregator.py) | Override aggregator via callback; summarize results with an LLM | -| Concurrent Orchestration (Custom Agent Executors) | [orchestration/concurrent_custom_agent_executors.py](./orchestration/concurrent_custom_agent_executors.py) | Child executors own ChatAgents; concurrent fan-out/fan-in via ConcurrentBuilder | -| Concurrent Orchestration (Participant Factory) | [orchestration/concurrent_participant_factory.py](./orchestration/concurrent_participant_factory.py) | Use participant factories for state isolation between workflow instances | -| Group Chat with Agent Manager | [orchestration/group_chat_agent_manager.py](./orchestration/group_chat_agent_manager.py) | Agent-based manager using `with_orchestrator(agent=)` to select next speaker | -| Group Chat Philosophical Debate | [orchestration/group_chat_philosophical_debate.py](./orchestration/group_chat_philosophical_debate.py) | Agent manager moderates long-form, multi-round debate across diverse participants | -| Group Chat with Simple Function Selector | [orchestration/group_chat_simple_selector.py](./orchestration/group_chat_simple_selector.py) | Group chat with a simple function selector for next speaker | -| Handoff (Simple) | [orchestration/handoff_simple.py](./orchestration/handoff_simple.py) | Single-tier routing: triage agent routes to specialists, control returns to user after each specialist response | -| Handoff (Autonomous) | [orchestration/handoff_autonomous.py](./orchestration/handoff_autonomous.py) | Autonomous mode: specialists iterate independently until invoking a handoff tool using `.with_autonomous_mode()` | -| Handoff (Participant Factory) | [orchestration/handoff_participant_factory.py](./orchestration/handoff_participant_factory.py) | Use participant factories for state isolation between workflow instances | -| Magentic Workflow (Multi-Agent) | [orchestration/magentic.py](./orchestration/magentic.py) | Orchestrate multiple agents with Magentic manager and streaming | -| Magentic + Human Plan Review | [orchestration/magentic_human_plan_review.py](./orchestration/magentic_human_plan_review.py) | Human reviews/updates the plan before execution | -| Magentic + Checkpoint Resume | [orchestration/magentic_checkpoint.py](./orchestration/magentic_checkpoint.py) | Resume Magentic orchestration from saved checkpoints | -| Sequential Orchestration (Agents) | [orchestration/sequential_agents.py](./orchestration/sequential_agents.py) | Chain agents sequentially with shared conversation context | -| Sequential Orchestration (Custom Executor) | [orchestration/sequential_custom_executors.py](./orchestration/sequential_custom_executors.py) | Mix agents with a summarizer that appends a compact summary | -| Sequential Orchestration (Participant Factories) | [orchestration/sequential_participant_factory.py](./orchestration/sequential_participant_factory.py) | Use participant factories for state isolation between workflow instances | - -**Magentic checkpointing tip**: Treat `MagenticBuilder.participants` keys as stable identifiers. When resuming from a checkpoint, the rebuilt workflow must reuse the same participant names; otherwise the checkpoint cannot be applied and the run will fail fast. - -**Handoff workflow tip**: Handoff workflows maintain the full conversation history including any -`ChatMessage.additional_properties` emitted by your agents. This ensures routing metadata remains -intact across all agent transitions. For specialist-to-specialist handoffs, use `.add_handoff(source, targets)` -to configure which agents can route to which others with a fluent, type-safe API. +Orchestration samples (Sequential, Concurrent, Handoff, GroupChat, Magentic) have moved to the dedicated [orchestrations samples directory](../orchestrations/README.md). ### parallelism diff --git a/python/uv.lock b/python/uv.lock index 63e2cd11b8..1eba1ebdc7 100644 --- a/python/uv.lock +++ b/python/uv.lock @@ -46,6 +46,7 @@ members = [ "agent-framework-lab", "agent-framework-mem0", "agent-framework-ollama", + "agent-framework-orchestrations", "agent-framework-purview", "agent-framework-redis", ] @@ -372,6 +373,7 @@ all = [ { name = "agent-framework-lab", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, { name = "agent-framework-mem0", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, { name = "agent-framework-ollama", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, + { name = "agent-framework-orchestrations", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, { name = "agent-framework-purview", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, { name = "agent-framework-redis", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, ] @@ -393,6 +395,7 @@ requires-dist = [ { name = "agent-framework-lab", marker = "extra == 'all'", editable = "packages/lab" }, { name = "agent-framework-mem0", marker = "extra == 'all'", editable = "packages/mem0" }, { name = "agent-framework-ollama", marker = "extra == 'all'", editable = "packages/ollama" }, + { name = "agent-framework-orchestrations", marker = "extra == 'all'", editable = "packages/orchestrations" }, { name = "agent-framework-purview", marker = "extra == 'all'", editable = "packages/purview" }, { name = "agent-framework-redis", marker = "extra == 'all'", editable = "packages/redis" }, { name = "azure-identity", specifier = ">=1,<2" }, @@ -550,7 +553,7 @@ math = [ tau2 = [ { name = "loguru", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, { name = "numpy", version = "2.2.6", source = { registry = "https://pypi.org/simple" }, marker = "(python_full_version < '3.11' and sys_platform == 'darwin') or (python_full_version < '3.11' and sys_platform == 'linux') or (python_full_version < '3.11' and sys_platform == 'win32')" }, - { name = "numpy", version = "2.4.1", source = { registry = "https://pypi.org/simple" }, marker = "(python_full_version >= '3.11' and sys_platform == 'darwin') or (python_full_version >= '3.11' and sys_platform == 'linux') or (python_full_version >= '3.11' and sys_platform == 'win32')" }, + { name = "numpy", version = "2.4.2", source = { registry = "https://pypi.org/simple" }, marker = "(python_full_version >= '3.11' and sys_platform == 'darwin') or (python_full_version >= '3.11' and sys_platform == 'linux') or (python_full_version >= '3.11' and sys_platform == 'win32')" }, { name = "pydantic", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, { name = "tiktoken", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, ] @@ -645,6 +648,17 @@ requires-dist = [ { name = "ollama", specifier = ">=0.5.3" }, ] +[[package]] +name = "agent-framework-orchestrations" +version = "1.0.0b260130" +source = { editable = "packages/orchestrations" } +dependencies = [ + { name = "agent-framework-core", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, +] + +[package.metadata] +requires-dist = [{ name = "agent-framework-core", editable = "packages/core" }] + [[package]] name = "agent-framework-purview" version = "1.0.0b260130" @@ -669,7 +683,7 @@ source = { editable = "packages/redis" } dependencies = [ { name = "agent-framework-core", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, { name = "numpy", version = "2.2.6", source = { registry = "https://pypi.org/simple" }, marker = "(python_full_version < '3.11' and sys_platform == 'darwin') or (python_full_version < '3.11' and sys_platform == 'linux') or (python_full_version < '3.11' and sys_platform == 'win32')" }, - { name = "numpy", version = "2.4.1", source = { registry = "https://pypi.org/simple" }, marker = "(python_full_version >= '3.11' and sys_platform == 'darwin') or (python_full_version >= '3.11' and sys_platform == 'linux') or (python_full_version >= '3.11' and sys_platform == 'win32')" }, + { name = "numpy", version = "2.4.2", source = { registry = "https://pypi.org/simple" }, marker = "(python_full_version >= '3.11' and sys_platform == 'darwin') or (python_full_version >= '3.11' and sys_platform == 'linux') or (python_full_version >= '3.11' and sys_platform == 'win32')" }, { name = "redis", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, { name = "redisvl", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, ] @@ -914,7 +928,7 @@ wheels = [ [[package]] name = "anthropic" -version = "0.77.0" +version = "0.77.1" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "anyio", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, @@ -926,9 +940,9 @@ dependencies = [ { name = "sniffio", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, { name = "typing-extensions", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/eb/85/6cb5da3cf91de2eeea89726316e8c5c8c31e2d61ee7cb1233d7e95512c31/anthropic-0.77.0.tar.gz", hash = "sha256:ce36efeb80cb1e25430a88440dc0f9aa5c87f10d080ab70a1bdfd5c2c5fbedb4", size = 504575, upload-time = "2026-01-29T18:20:41.507Z" } +sdist = { url = "https://files.pythonhosted.org/packages/88/61/50aef0587acd9dd8bf1b8b7fd7fbb25ba4c6ec5387a6ffc195a697951fcc/anthropic-0.77.1.tar.gz", hash = "sha256:a19d78ff6fff9e05d211e3a936051cd5b9462f0eac043d2d45b2372f455d11cd", size = 504691, upload-time = "2026-02-03T17:44:22.667Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/ac/27/9df785d3f94df9ac72f43ee9e14b8120b37d992b18f4952774ed46145022/anthropic-0.77.0-py3-none-any.whl", hash = "sha256:65cc83a3c82ce622d5c677d0d7706c77d29dc83958c6b10286e12fda6ffb2651", size = 397867, upload-time = "2026-01-29T18:20:39.481Z" }, + { url = "https://files.pythonhosted.org/packages/2b/54/e83babf9833547c5548b4e25230ef3d62492e45925b0d104a43e501918a0/anthropic-0.77.1-py3-none-any.whl", hash = "sha256:76fd6f2ab36033a5294d58182a5f712dab9573c3a54413a275ecdf29e727c1e0", size = 397856, upload-time = "2026-02-03T17:44:20.962Z" }, ] [[package]] @@ -1107,7 +1121,7 @@ wheels = [ [[package]] name = "azure-functions-durable" -version = "1.4.0" +version = "1.5.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "aiohttp", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, @@ -1118,9 +1132,9 @@ dependencies = [ { name = "python-dateutil", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, { name = "requests", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/51/3a/f168b434fa69eaaf5d14b54d88239b851eceb7e10f666b55289dd0933ccb/azure-functions-durable-1.4.0.tar.gz", hash = "sha256:945488ef28917dae4295a4dd6e6f6601ffabe32e3fbb94ceb261c9b65b6e6c0f", size = 176584, upload-time = "2025-09-24T23:57:46.673Z" } +sdist = { url = "https://files.pythonhosted.org/packages/d0/7c/3654377e7000c4bd6b6edbb959efc4ad867005353843a4d810dfa8fbb72b/azure_functions_durable-1.5.0.tar.gz", hash = "sha256:131fbdf08fa1140d94dc3948fcf9000d8da58aaa5a0ffc4db0ea3be97d5551e2", size = 183733, upload-time = "2026-02-04T20:33:45.788Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/74/01/7f03229fa5c05a5cc7e41172aef80c5242d28aeea0825f592f93141a4b91/azure_functions_durable-1.4.0-py3-none-any.whl", hash = "sha256:0efe919cdda96924791feabe192a37c7d872414b4c6ce348417a02ee53d8cc31", size = 143159, upload-time = "2025-09-24T23:57:45.294Z" }, + { url = "https://files.pythonhosted.org/packages/b8/25/fb054d81c1fda64b229b04b4051657fedd4a72f53c51c59fcaca3a454d2f/azure_functions_durable-1.5.0-py3-none-any.whl", hash = "sha256:aea683193328924ae56eebb8f80647e186baf93e26c061f09ce532702c279ddc", size = 146619, upload-time = "2026-02-04T20:33:16.838Z" }, ] [[package]] @@ -1171,11 +1185,11 @@ wheels = [ [[package]] name = "babel" -version = "2.17.0" +version = "2.18.0" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/7d/6b/d52e42361e1aa00709585ecc30b3f9684b3ab62530771402248b1b1d6240/babel-2.17.0.tar.gz", hash = "sha256:0c54cffb19f690cdcc52a3b50bcbf71e07a808d1c80d549f2459b9d2cf0afb9d", size = 9951852, upload-time = "2025-02-01T15:17:41.026Z" } +sdist = { url = "https://files.pythonhosted.org/packages/7d/b2/51899539b6ceeeb420d40ed3cd4b7a40519404f9baf3d4ac99dc413a834b/babel-2.18.0.tar.gz", hash = "sha256:b80b99a14bd085fcacfa15c9165f651fbb3406e66cc603abf11c5750937c992d", size = 9959554, upload-time = "2026-02-01T12:30:56.078Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/b7/b8/3fe70c75fe32afc4bb507f75563d39bc5642255d1d94f1f23604725780bf/babel-2.17.0-py3-none-any.whl", hash = "sha256:4d0b53093fdfb4b21c92b5213dba5a1b23885afa8383709427046b21c366e5f2", size = 10182537, upload-time = "2025-02-01T15:17:37.39Z" }, + { url = "https://files.pythonhosted.org/packages/77/f5/21d2de20e8b8b0408f0681956ca2c69f1320a3848ac50e6e7f39c6159675/babel-2.18.0-py3-none-any.whl", hash = "sha256:e2b422b277c2b9a9630c1d7903c2a00d0830c409c59ac8cae9081c92f1aeba35", size = 10196845, upload-time = "2026-02-01T12:30:53.445Z" }, ] [[package]] @@ -1424,19 +1438,19 @@ wheels = [ [[package]] name = "claude-agent-sdk" -version = "0.1.25" +version = "0.1.29" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "anyio", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, { name = "mcp", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, { name = "typing-extensions", marker = "(python_full_version < '3.11' and sys_platform == 'darwin') or (python_full_version < '3.11' and sys_platform == 'linux') or (python_full_version < '3.11' and sys_platform == 'win32')" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/c5/ce/d8dd6eb56e981d1b981bf6766e1849878c54fbd160b6862e7c8e11b282d3/claude_agent_sdk-0.1.25.tar.gz", hash = "sha256:e2284fa2ece778d04b225f0f34118ea2623ae1f9fe315bc3bf921792658b6645", size = 57113, upload-time = "2026-01-29T01:20:17.353Z" } +sdist = { url = "https://files.pythonhosted.org/packages/f2/a7/e1449285606b98119729249394ad0e93e75ea6d25fa3006d734b21f73044/claude_agent_sdk-0.1.29.tar.gz", hash = "sha256:ece32436a81fc015ca325d4121edeb5627ae9af15b5079f7b42d5eda9dcdb7a3", size = 59801, upload-time = "2026-02-04T00:53:54.099Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/23/09/e25dad92af3305ded5490d4493f782b1cb8c530145a7107bceea26ec811e/claude_agent_sdk-0.1.25-py3-none-macosx_11_0_arm64.whl", hash = "sha256:6adeffacbb75fe5c91529512331587a7af0e5e6dcbce4bd6b3a6ef8a51bdabeb", size = 54672313, upload-time = "2026-01-29T01:20:03.651Z" }, - { url = "https://files.pythonhosted.org/packages/28/0f/7b39ce9dd7d8f995e2c9d2049e1ce79f9010144a6793e8dd6ea9df23f53e/claude_agent_sdk-0.1.25-py3-none-manylinux_2_17_aarch64.whl", hash = "sha256:f210a05b2b471568c7f4019875b0ab451c783397f21edc32d7bd9a7144d9aad1", size = 68848229, upload-time = "2026-01-29T01:20:07.311Z" }, - { url = "https://files.pythonhosted.org/packages/40/6f/0b22cd9a68c39c0a8f5bd024072c15ca89bfa2dbfad3a94a35f6a1a90ecd/claude_agent_sdk-0.1.25-py3-none-manylinux_2_17_x86_64.whl", hash = "sha256:3399c3c748eb42deac308c6230cb0bb6b975c51b0495b42fe06896fa741d336f", size = 70562885, upload-time = "2026-01-29T01:20:11.033Z" }, - { url = "https://files.pythonhosted.org/packages/5c/b6/2aaf28eeaa994e5491ad9589a9b006d5112b167aab8ced0823a6ffd86e4f/claude_agent_sdk-0.1.25-py3-none-win_amd64.whl", hash = "sha256:c5e8fe666b88049080ae4ac2a02dbd2d5c00ab1c495683d3c2f7dfab8ff1fec9", size = 72746667, upload-time = "2026-01-29T01:20:14.271Z" }, + { url = "https://files.pythonhosted.org/packages/41/98/8915e3bb6acccf2b62b101545b286f30fd63e5421e9a3483b88a0c88f49b/claude_agent_sdk-0.1.29-py3-none-macosx_11_0_arm64.whl", hash = "sha256:811de31c92bd90250ebbfd79758c538766c672abde244ae0f7dec2d02ed5a1f7", size = 54225884, upload-time = "2026-02-04T00:53:38.169Z" }, + { url = "https://files.pythonhosted.org/packages/91/a7/9a8801ae25e453877bc71b5dc4f4818171bc9c04319e0681d3950fbe0232/claude_agent_sdk-0.1.29-py3-none-manylinux_2_17_aarch64.whl", hash = "sha256:6279360d251ce8b8e9d922b03e3492c88736648e7f5e7c9f301fde0eef37928f", size = 68426447, upload-time = "2026-02-04T00:53:42.539Z" }, + { url = "https://files.pythonhosted.org/packages/87/9c/aab63fe82c7cba80ee5234b0a928a032340cdaba0e48d23544e592b6f9ca/claude_agent_sdk-0.1.29-py3-none-manylinux_2_17_x86_64.whl", hash = "sha256:4d1f01fe5f7252126f35808e2887a40125b784ac0dbf73b9509a4065a4766149", size = 70124488, upload-time = "2026-02-04T00:53:46.675Z" }, + { url = "https://files.pythonhosted.org/packages/63/30/135575231e53c10d4a99f1fa7b0b548f2ae89b907e41d0b2d158bde1896e/claude_agent_sdk-0.1.29-py3-none-win_amd64.whl", hash = "sha256:67fb58a72f0dd54d079c538078130cc8c888bc60652d3d396768ffaee6716467", size = 72305314, upload-time = "2026-02-04T00:53:51.045Z" }, ] [[package]] @@ -1456,7 +1470,7 @@ name = "clr-loader" version = "0.2.10" source = { registry = "https://pypi.org/simple" } dependencies = [ - { name = "cffi", marker = "(python_full_version < '3.14' and sys_platform == 'darwin') or (python_full_version < '3.14' and sys_platform == 'linux') or (python_full_version < '3.14' and sys_platform == 'win32')" }, + { name = "cffi", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, ] sdist = { url = "https://files.pythonhosted.org/packages/18/24/c12faf3f61614b3131b5c98d3bf0d376b49c7feaa73edca559aeb2aee080/clr_loader-0.2.10.tar.gz", hash = "sha256:81f114afbc5005bafc5efe5af1341d400e22137e275b042a8979f3feb9fc9446", size = 83605, upload-time = "2026-01-03T23:13:06.984Z" } wheels = [ @@ -1563,7 +1577,7 @@ resolution-markers = [ "python_full_version == '3.11.*' and sys_platform == 'win32'", ] dependencies = [ - { name = "numpy", version = "2.4.1", source = { registry = "https://pypi.org/simple" }, marker = "(python_full_version >= '3.11' and sys_platform == 'darwin') or (python_full_version >= '3.11' and sys_platform == 'linux') or (python_full_version >= '3.11' and sys_platform == 'win32')" }, + { name = "numpy", version = "2.4.2", source = { registry = "https://pypi.org/simple" }, marker = "(python_full_version >= '3.11' and sys_platform == 'darwin') or (python_full_version >= '3.11' and sys_platform == 'linux') or (python_full_version >= '3.11' and sys_platform == 'win32')" }, ] sdist = { url = "https://files.pythonhosted.org/packages/58/01/1253e6698a07380cd31a736d248a3f2a50a7c88779a1813da27503cadc2a/contourpy-1.3.3.tar.gz", hash = "sha256:083e12155b210502d0bca491432bb04d56dc3432f95a979b429f2848c3dbe880", size = 13466174, upload-time = "2025-07-26T12:03:12.549Z" } wheels = [ @@ -1642,101 +1656,101 @@ wheels = [ [[package]] name = "coverage" -version = "7.13.2" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/ad/49/349848445b0e53660e258acbcc9b0d014895b6739237920886672240f84b/coverage-7.13.2.tar.gz", hash = "sha256:044c6951ec37146b72a50cc81ef02217d27d4c3640efd2640311393cbbf143d3", size = 826523, upload-time = "2026-01-25T13:00:04.889Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/a4/2d/63e37369c8e81a643afe54f76073b020f7b97ddbe698c5c944b51b0a2bc5/coverage-7.13.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:f4af3b01763909f477ea17c962e2cca8f39b350a4e46e3a30838b2c12e31b81b", size = 218842, upload-time = "2026-01-25T12:57:15.3Z" }, - { url = "https://files.pythonhosted.org/packages/57/06/86ce882a8d58cbcb3030e298788988e618da35420d16a8c66dac34f138d0/coverage-7.13.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:36393bd2841fa0b59498f75466ee9bdec4f770d3254f031f23e8fd8e140ffdd2", size = 219360, upload-time = "2026-01-25T12:57:17.572Z" }, - { url = "https://files.pythonhosted.org/packages/cd/84/70b0eb1ee19ca4ef559c559054c59e5b2ae4ec9af61398670189e5d276e9/coverage-7.13.2-cp310-cp310-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:9cc7573518b7e2186bd229b1a0fe24a807273798832c27032c4510f47ffdb896", size = 246123, upload-time = "2026-01-25T12:57:19.087Z" }, - { url = "https://files.pythonhosted.org/packages/35/fb/05b9830c2e8275ebc031e0019387cda99113e62bb500ab328bb72578183b/coverage-7.13.2-cp310-cp310-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:ca9566769b69a5e216a4e176d54b9df88f29d750c5b78dbb899e379b4e14b30c", size = 247930, upload-time = "2026-01-25T12:57:20.929Z" }, - { url = "https://files.pythonhosted.org/packages/81/aa/3f37858ca2eed4f09b10ca3c6ddc9041be0a475626cd7fd2712f4a2d526f/coverage-7.13.2-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:9c9bdea644e94fd66d75a6f7e9a97bb822371e1fe7eadae2cacd50fcbc28e4dc", size = 249804, upload-time = "2026-01-25T12:57:22.904Z" }, - { url = "https://files.pythonhosted.org/packages/b6/b3/c904f40c56e60a2d9678a5ee8df3d906d297d15fb8bec5756c3b0a67e2df/coverage-7.13.2-cp310-cp310-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:5bd447332ec4f45838c1ad42268ce21ca87c40deb86eabd59888859b66be22a5", size = 246815, upload-time = "2026-01-25T12:57:24.314Z" }, - { url = "https://files.pythonhosted.org/packages/41/91/ddc1c5394ca7fd086342486440bfdd6b9e9bda512bf774599c7c7a0081e0/coverage-7.13.2-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:7c79ad5c28a16a1277e1187cf83ea8dafdcc689a784228a7d390f19776db7c31", size = 247843, upload-time = "2026-01-25T12:57:26.544Z" }, - { url = "https://files.pythonhosted.org/packages/87/d2/cdff8f4cd33697883c224ea8e003e9c77c0f1a837dc41d95a94dd26aad67/coverage-7.13.2-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:76e06ccacd1fb6ada5d076ed98a8c6f66e2e6acd3df02819e2ee29fd637b76ad", size = 245850, upload-time = "2026-01-25T12:57:28.507Z" }, - { url = "https://files.pythonhosted.org/packages/f5/42/e837febb7866bf2553ab53dd62ed52f9bb36d60c7e017c55376ad21fbb05/coverage-7.13.2-cp310-cp310-musllinux_1_2_riscv64.whl", hash = "sha256:49d49e9a5e9f4dc3d3dac95278a020afa6d6bdd41f63608a76fa05a719d5b66f", size = 246116, upload-time = "2026-01-25T12:57:30.16Z" }, - { url = "https://files.pythonhosted.org/packages/09/b1/4a3f935d7df154df02ff4f71af8d61298d713a7ba305d050ae475bfbdde2/coverage-7.13.2-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:ed2bce0e7bfa53f7b0b01c722da289ef6ad4c18ebd52b1f93704c21f116360c8", size = 246720, upload-time = "2026-01-25T12:57:32.165Z" }, - { url = "https://files.pythonhosted.org/packages/e1/fe/538a6fd44c515f1c5197a3f078094cbaf2ce9f945df5b44e29d95c864bff/coverage-7.13.2-cp310-cp310-win32.whl", hash = "sha256:1574983178b35b9af4db4a9f7328a18a14a0a0ce76ffaa1c1bacb4cc82089a7c", size = 221465, upload-time = "2026-01-25T12:57:33.511Z" }, - { url = "https://files.pythonhosted.org/packages/5e/09/4b63a024295f326ec1a40ec8def27799300ce8775b1cbf0d33b1790605c4/coverage-7.13.2-cp310-cp310-win_amd64.whl", hash = "sha256:a360a8baeb038928ceb996f5623a4cd508728f8f13e08d4e96ce161702f3dd99", size = 222397, upload-time = "2026-01-25T12:57:34.927Z" }, - { url = "https://files.pythonhosted.org/packages/6c/01/abca50583a8975bb6e1c59eff67ed8e48bb127c07dad5c28d9e96ccc09ec/coverage-7.13.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:060ebf6f2c51aff5ba38e1f43a2095e087389b1c69d559fde6049a4b0001320e", size = 218971, upload-time = "2026-01-25T12:57:36.953Z" }, - { url = "https://files.pythonhosted.org/packages/eb/0e/b6489f344d99cd1e5b4d5e1be52dfd3f8a3dc5112aa6c33948da8cabad4e/coverage-7.13.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:c1ea8ca9db5e7469cd364552985e15911548ea5b69c48a17291f0cac70484b2e", size = 219473, upload-time = "2026-01-25T12:57:38.934Z" }, - { url = "https://files.pythonhosted.org/packages/17/11/db2f414915a8e4ec53f60b17956c27f21fb68fcf20f8a455ce7c2ccec638/coverage-7.13.2-cp311-cp311-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:b780090d15fd58f07cf2011943e25a5f0c1c894384b13a216b6c86c8a8a7c508", size = 249896, upload-time = "2026-01-25T12:57:40.365Z" }, - { url = "https://files.pythonhosted.org/packages/80/06/0823fe93913663c017e508e8810c998c8ebd3ec2a5a85d2c3754297bdede/coverage-7.13.2-cp311-cp311-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:88a800258d83acb803c38175b4495d293656d5fac48659c953c18e5f539a274b", size = 251810, upload-time = "2026-01-25T12:57:42.045Z" }, - { url = "https://files.pythonhosted.org/packages/61/dc/b151c3cc41b28cdf7f0166c5fa1271cbc305a8ec0124cce4b04f74791a18/coverage-7.13.2-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:6326e18e9a553e674d948536a04a80d850a5eeefe2aae2e6d7cf05d54046c01b", size = 253920, upload-time = "2026-01-25T12:57:44.026Z" }, - { url = "https://files.pythonhosted.org/packages/2d/35/e83de0556e54a4729a2b94ea816f74ce08732e81945024adee46851c2264/coverage-7.13.2-cp311-cp311-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:59562de3f797979e1ff07c587e2ac36ba60ca59d16c211eceaa579c266c5022f", size = 250025, upload-time = "2026-01-25T12:57:45.624Z" }, - { url = "https://files.pythonhosted.org/packages/39/67/af2eb9c3926ce3ea0d58a0d2516fcbdacf7a9fc9559fe63076beaf3f2596/coverage-7.13.2-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:27ba1ed6f66b0e2d61bfa78874dffd4f8c3a12f8e2b5410e515ab345ba7bc9c3", size = 251612, upload-time = "2026-01-25T12:57:47.713Z" }, - { url = "https://files.pythonhosted.org/packages/26/62/5be2e25f3d6c711d23b71296f8b44c978d4c8b4e5b26871abfc164297502/coverage-7.13.2-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:8be48da4d47cc68754ce643ea50b3234557cbefe47c2f120495e7bd0a2756f2b", size = 249670, upload-time = "2026-01-25T12:57:49.378Z" }, - { url = "https://files.pythonhosted.org/packages/b3/51/400d1b09a8344199f9b6a6fc1868005d766b7ea95e7882e494fa862ca69c/coverage-7.13.2-cp311-cp311-musllinux_1_2_riscv64.whl", hash = "sha256:2a47a4223d3361b91176aedd9d4e05844ca67d7188456227b6bf5e436630c9a1", size = 249395, upload-time = "2026-01-25T12:57:50.86Z" }, - { url = "https://files.pythonhosted.org/packages/e0/36/f02234bc6e5230e2f0a63fd125d0a2093c73ef20fdf681c7af62a140e4e7/coverage-7.13.2-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:c6f141b468740197d6bd38f2b26ade124363228cc3f9858bd9924ab059e00059", size = 250298, upload-time = "2026-01-25T12:57:52.287Z" }, - { url = "https://files.pythonhosted.org/packages/b0/06/713110d3dd3151b93611c9cbfc65c15b4156b44f927fced49ac0b20b32a4/coverage-7.13.2-cp311-cp311-win32.whl", hash = "sha256:89567798404af067604246e01a49ef907d112edf2b75ef814b1364d5ce267031", size = 221485, upload-time = "2026-01-25T12:57:53.876Z" }, - { url = "https://files.pythonhosted.org/packages/16/0c/3ae6255fa1ebcb7dec19c9a59e85ef5f34566d1265c70af5b2fc981da834/coverage-7.13.2-cp311-cp311-win_amd64.whl", hash = "sha256:21dd57941804ae2ac7e921771a5e21bbf9aabec317a041d164853ad0a96ce31e", size = 222421, upload-time = "2026-01-25T12:57:55.433Z" }, - { url = "https://files.pythonhosted.org/packages/b5/37/fabc3179af4d61d89ea47bd04333fec735cd5e8b59baad44fed9fc4170d7/coverage-7.13.2-cp311-cp311-win_arm64.whl", hash = "sha256:10758e0586c134a0bafa28f2d37dd2cdb5e4a90de25c0fc0c77dabbad46eca28", size = 221088, upload-time = "2026-01-25T12:57:57.41Z" }, - { url = "https://files.pythonhosted.org/packages/46/39/e92a35f7800222d3f7b2cbb7bbc3b65672ae8d501cb31801b2d2bd7acdf1/coverage-7.13.2-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:f106b2af193f965d0d3234f3f83fc35278c7fb935dfbde56ae2da3dd2c03b84d", size = 219142, upload-time = "2026-01-25T12:58:00.448Z" }, - { url = "https://files.pythonhosted.org/packages/45/7a/8bf9e9309c4c996e65c52a7c5a112707ecdd9fbaf49e10b5a705a402bbb4/coverage-7.13.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:78f45d21dc4d5d6bd29323f0320089ef7eae16e4bef712dff79d184fa7330af3", size = 219503, upload-time = "2026-01-25T12:58:02.451Z" }, - { url = "https://files.pythonhosted.org/packages/87/93/17661e06b7b37580923f3f12406ac91d78aeed293fb6da0b69cc7957582f/coverage-7.13.2-cp312-cp312-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:fae91dfecd816444c74531a9c3d6ded17a504767e97aa674d44f638107265b99", size = 251006, upload-time = "2026-01-25T12:58:04.059Z" }, - { url = "https://files.pythonhosted.org/packages/12/f0/f9e59fb8c310171497f379e25db060abef9fa605e09d63157eebec102676/coverage-7.13.2-cp312-cp312-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:264657171406c114787b441484de620e03d8f7202f113d62fcd3d9688baa3e6f", size = 253750, upload-time = "2026-01-25T12:58:05.574Z" }, - { url = "https://files.pythonhosted.org/packages/e5/b1/1935e31add2232663cf7edd8269548b122a7d100047ff93475dbaaae673e/coverage-7.13.2-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ae47d8dcd3ded0155afbb59c62bd8ab07ea0fd4902e1c40567439e6db9dcaf2f", size = 254862, upload-time = "2026-01-25T12:58:07.647Z" }, - { url = "https://files.pythonhosted.org/packages/af/59/b5e97071ec13df5f45da2b3391b6cdbec78ba20757bc92580a5b3d5fa53c/coverage-7.13.2-cp312-cp312-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:8a0b33e9fd838220b007ce8f299114d406c1e8edb21336af4c97a26ecfd185aa", size = 251420, upload-time = "2026-01-25T12:58:09.309Z" }, - { url = "https://files.pythonhosted.org/packages/3f/75/9495932f87469d013dc515fb0ce1aac5fa97766f38f6b1a1deb1ee7b7f3a/coverage-7.13.2-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:b3becbea7f3ce9a2d4d430f223ec15888e4deb31395840a79e916368d6004cce", size = 252786, upload-time = "2026-01-25T12:58:10.909Z" }, - { url = "https://files.pythonhosted.org/packages/6a/59/af550721f0eb62f46f7b8cb7e6f1860592189267b1c411a4e3a057caacee/coverage-7.13.2-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:f819c727a6e6eeb8711e4ce63d78c620f69630a2e9d53bc95ca5379f57b6ba94", size = 250928, upload-time = "2026-01-25T12:58:12.449Z" }, - { url = "https://files.pythonhosted.org/packages/9b/b1/21b4445709aae500be4ab43bbcfb4e53dc0811c3396dcb11bf9f23fd0226/coverage-7.13.2-cp312-cp312-musllinux_1_2_riscv64.whl", hash = "sha256:4f7b71757a3ab19f7ba286e04c181004c1d61be921795ee8ba6970fd0ec91da5", size = 250496, upload-time = "2026-01-25T12:58:14.047Z" }, - { url = "https://files.pythonhosted.org/packages/ba/b1/0f5d89dfe0392990e4f3980adbde3eb34885bc1effb2dc369e0bf385e389/coverage-7.13.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:b7fc50d2afd2e6b4f6f2f403b70103d280a8e0cb35320cbbe6debcda02a1030b", size = 252373, upload-time = "2026-01-25T12:58:15.976Z" }, - { url = "https://files.pythonhosted.org/packages/01/c9/0cf1a6a57a9968cc049a6b896693faa523c638a5314b1fc374eb2b2ac904/coverage-7.13.2-cp312-cp312-win32.whl", hash = "sha256:292250282cf9bcf206b543d7608bda17ca6fc151f4cbae949fc7e115112fbd41", size = 221696, upload-time = "2026-01-25T12:58:17.517Z" }, - { url = "https://files.pythonhosted.org/packages/4d/05/d7540bf983f09d32803911afed135524570f8c47bb394bf6206c1dc3a786/coverage-7.13.2-cp312-cp312-win_amd64.whl", hash = "sha256:eeea10169fac01549a7921d27a3e517194ae254b542102267bef7a93ed38c40e", size = 222504, upload-time = "2026-01-25T12:58:19.115Z" }, - { url = "https://files.pythonhosted.org/packages/15/8b/1a9f037a736ced0a12aacf6330cdaad5008081142a7070bc58b0f7930cbc/coverage-7.13.2-cp312-cp312-win_arm64.whl", hash = "sha256:2a5b567f0b635b592c917f96b9a9cb3dbd4c320d03f4bf94e9084e494f2e8894", size = 221120, upload-time = "2026-01-25T12:58:21.334Z" }, - { url = "https://files.pythonhosted.org/packages/a7/f0/3d3eac7568ab6096ff23791a526b0048a1ff3f49d0e236b2af6fb6558e88/coverage-7.13.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:ed75de7d1217cf3b99365d110975f83af0528c849ef5180a12fd91b5064df9d6", size = 219168, upload-time = "2026-01-25T12:58:23.376Z" }, - { url = "https://files.pythonhosted.org/packages/a3/a6/f8b5cfeddbab95fdef4dcd682d82e5dcff7a112ced57a959f89537ee9995/coverage-7.13.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:97e596de8fa9bada4d88fde64a3f4d37f1b6131e4faa32bad7808abc79887ddc", size = 219537, upload-time = "2026-01-25T12:58:24.932Z" }, - { url = "https://files.pythonhosted.org/packages/7b/e6/8d8e6e0c516c838229d1e41cadcec91745f4b1031d4db17ce0043a0423b4/coverage-7.13.2-cp313-cp313-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:68c86173562ed4413345410c9480a8d64864ac5e54a5cda236748031e094229f", size = 250528, upload-time = "2026-01-25T12:58:26.567Z" }, - { url = "https://files.pythonhosted.org/packages/8e/78/befa6640f74092b86961f957f26504c8fba3d7da57cc2ab7407391870495/coverage-7.13.2-cp313-cp313-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:7be4d613638d678b2b3773b8f687537b284d7074695a43fe2fbbfc0e31ceaed1", size = 253132, upload-time = "2026-01-25T12:58:28.251Z" }, - { url = "https://files.pythonhosted.org/packages/9d/10/1630db1edd8ce675124a2ee0f7becc603d2bb7b345c2387b4b95c6907094/coverage-7.13.2-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:d7f63ce526a96acd0e16c4af8b50b64334239550402fb1607ce6a584a6d62ce9", size = 254374, upload-time = "2026-01-25T12:58:30.294Z" }, - { url = "https://files.pythonhosted.org/packages/ed/1d/0d9381647b1e8e6d310ac4140be9c428a0277330991e0c35bdd751e338a4/coverage-7.13.2-cp313-cp313-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:406821f37f864f968e29ac14c3fccae0fec9fdeba48327f0341decf4daf92d7c", size = 250762, upload-time = "2026-01-25T12:58:32.036Z" }, - { url = "https://files.pythonhosted.org/packages/43/e4/5636dfc9a7c871ee8776af83ee33b4c26bc508ad6cee1e89b6419a366582/coverage-7.13.2-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:ee68e5a4e3e5443623406b905db447dceddffee0dceb39f4e0cd9ec2a35004b5", size = 252502, upload-time = "2026-01-25T12:58:33.961Z" }, - { url = "https://files.pythonhosted.org/packages/02/2a/7ff2884d79d420cbb2d12fed6fff727b6d0ef27253140d3cdbbd03187ee0/coverage-7.13.2-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:2ee0e58cca0c17dd9c6c1cdde02bb705c7b3fbfa5f3b0b5afeda20d4ebff8ef4", size = 250463, upload-time = "2026-01-25T12:58:35.529Z" }, - { url = "https://files.pythonhosted.org/packages/91/c0/ba51087db645b6c7261570400fc62c89a16278763f36ba618dc8657a187b/coverage-7.13.2-cp313-cp313-musllinux_1_2_riscv64.whl", hash = "sha256:6e5bbb5018bf76a56aabdb64246b5288d5ae1b7d0dd4d0534fe86df2c2992d1c", size = 250288, upload-time = "2026-01-25T12:58:37.226Z" }, - { url = "https://files.pythonhosted.org/packages/03/07/44e6f428551c4d9faf63ebcefe49b30e5c89d1be96f6a3abd86a52da9d15/coverage-7.13.2-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:a55516c68ef3e08e134e818d5e308ffa6b1337cc8b092b69b24287bf07d38e31", size = 252063, upload-time = "2026-01-25T12:58:38.821Z" }, - { url = "https://files.pythonhosted.org/packages/c2/67/35b730ad7e1859dd57e834d1bc06080d22d2f87457d53f692fce3f24a5a9/coverage-7.13.2-cp313-cp313-win32.whl", hash = "sha256:5b20211c47a8abf4abc3319d8ce2464864fa9f30c5fcaf958a3eed92f4f1fef8", size = 221716, upload-time = "2026-01-25T12:58:40.484Z" }, - { url = "https://files.pythonhosted.org/packages/0d/82/e5fcf5a97c72f45fc14829237a6550bf49d0ab882ac90e04b12a69db76b4/coverage-7.13.2-cp313-cp313-win_amd64.whl", hash = "sha256:14f500232e521201cf031549fb1ebdfc0a40f401cf519157f76c397e586c3beb", size = 222522, upload-time = "2026-01-25T12:58:43.247Z" }, - { url = "https://files.pythonhosted.org/packages/b1/f1/25d7b2f946d239dd2d6644ca2cc060d24f97551e2af13b6c24c722ae5f97/coverage-7.13.2-cp313-cp313-win_arm64.whl", hash = "sha256:9779310cb5a9778a60c899f075a8514c89fa6d10131445c2207fc893e0b14557", size = 221145, upload-time = "2026-01-25T12:58:45Z" }, - { url = "https://files.pythonhosted.org/packages/9e/f7/080376c029c8f76fadfe43911d0daffa0cbdc9f9418a0eead70c56fb7f4b/coverage-7.13.2-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:e64fa5a1e41ce5df6b547cbc3d3699381c9e2c2c369c67837e716ed0f549d48e", size = 219861, upload-time = "2026-01-25T12:58:46.586Z" }, - { url = "https://files.pythonhosted.org/packages/42/11/0b5e315af5ab35f4c4a70e64d3314e4eec25eefc6dec13be3a7d5ffe8ac5/coverage-7.13.2-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:b01899e82a04085b6561eb233fd688474f57455e8ad35cd82286463ba06332b7", size = 220207, upload-time = "2026-01-25T12:58:48.277Z" }, - { url = "https://files.pythonhosted.org/packages/b2/0c/0874d0318fb1062117acbef06a09cf8b63f3060c22265adaad24b36306b7/coverage-7.13.2-cp313-cp313t-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:838943bea48be0e2768b0cf7819544cdedc1bbb2f28427eabb6eb8c9eb2285d3", size = 261504, upload-time = "2026-01-25T12:58:49.904Z" }, - { url = "https://files.pythonhosted.org/packages/83/5e/1cd72c22ecb30751e43a72f40ba50fcef1b7e93e3ea823bd9feda8e51f9a/coverage-7.13.2-cp313-cp313t-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:93d1d25ec2b27e90bcfef7012992d1f5121b51161b8bffcda756a816cf13c2c3", size = 263582, upload-time = "2026-01-25T12:58:51.582Z" }, - { url = "https://files.pythonhosted.org/packages/9b/da/8acf356707c7a42df4d0657020308e23e5a07397e81492640c186268497c/coverage-7.13.2-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:93b57142f9621b0d12349c43fc7741fe578e4bc914c1e5a54142856cfc0bf421", size = 266008, upload-time = "2026-01-25T12:58:53.234Z" }, - { url = "https://files.pythonhosted.org/packages/41/41/ea1730af99960309423c6ea8d6a4f1fa5564b2d97bd1d29dda4b42611f04/coverage-7.13.2-cp313-cp313t-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:f06799ae1bdfff7ccb8665d75f8291c69110ba9585253de254688aa8a1ccc6c5", size = 260762, upload-time = "2026-01-25T12:58:55.372Z" }, - { url = "https://files.pythonhosted.org/packages/22/fa/02884d2080ba71db64fdc127b311db60e01fe6ba797d9c8363725e39f4d5/coverage-7.13.2-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:7f9405ab4f81d490811b1d91c7a20361135a2df4c170e7f0b747a794da5b7f23", size = 263571, upload-time = "2026-01-25T12:58:57.52Z" }, - { url = "https://files.pythonhosted.org/packages/d2/6b/4083aaaeba9b3112f55ac57c2ce7001dc4d8fa3fcc228a39f09cc84ede27/coverage-7.13.2-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:f9ab1d5b86f8fbc97a5b3cd6280a3fd85fef3b028689d8a2c00918f0d82c728c", size = 261200, upload-time = "2026-01-25T12:58:59.255Z" }, - { url = "https://files.pythonhosted.org/packages/e9/d2/aea92fa36d61955e8c416ede9cf9bf142aa196f3aea214bb67f85235a050/coverage-7.13.2-cp313-cp313t-musllinux_1_2_riscv64.whl", hash = "sha256:f674f59712d67e841525b99e5e2b595250e39b529c3bda14764e4f625a3fa01f", size = 260095, upload-time = "2026-01-25T12:59:01.066Z" }, - { url = "https://files.pythonhosted.org/packages/0d/ae/04ffe96a80f107ea21b22b2367175c621da920063260a1c22f9452fd7866/coverage-7.13.2-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:c6cadac7b8ace1ba9144feb1ae3cb787a6065ba6d23ffc59a934b16406c26573", size = 262284, upload-time = "2026-01-25T12:59:02.802Z" }, - { url = "https://files.pythonhosted.org/packages/1c/7a/6f354dcd7dfc41297791d6fb4e0d618acb55810bde2c1fd14b3939e05c2b/coverage-7.13.2-cp313-cp313t-win32.whl", hash = "sha256:14ae4146465f8e6e6253eba0cccd57423e598a4cb925958b240c805300918343", size = 222389, upload-time = "2026-01-25T12:59:04.563Z" }, - { url = "https://files.pythonhosted.org/packages/8d/d5/080ad292a4a3d3daf411574be0a1f56d6dee2c4fdf6b005342be9fac807f/coverage-7.13.2-cp313-cp313t-win_amd64.whl", hash = "sha256:9074896edd705a05769e3de0eac0a8388484b503b68863dd06d5e473f874fd47", size = 223450, upload-time = "2026-01-25T12:59:06.677Z" }, - { url = "https://files.pythonhosted.org/packages/88/96/df576fbacc522e9fb8d1c4b7a7fc62eb734be56e2cba1d88d2eabe08ea3f/coverage-7.13.2-cp313-cp313t-win_arm64.whl", hash = "sha256:69e526e14f3f854eda573d3cf40cffd29a1a91c684743d904c33dbdcd0e0f3e7", size = 221707, upload-time = "2026-01-25T12:59:08.363Z" }, - { url = "https://files.pythonhosted.org/packages/55/53/1da9e51a0775634b04fcc11eb25c002fc58ee4f92ce2e8512f94ac5fc5bf/coverage-7.13.2-cp314-cp314-macosx_10_15_x86_64.whl", hash = "sha256:387a825f43d680e7310e6f325b2167dd093bc8ffd933b83e9aa0983cf6e0a2ef", size = 219213, upload-time = "2026-01-25T12:59:11.909Z" }, - { url = "https://files.pythonhosted.org/packages/46/35/b3caac3ebbd10230fea5a33012b27d19e999a17c9285c4228b4b2e35b7da/coverage-7.13.2-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:f0d7fea9d8e5d778cd5a9e8fc38308ad688f02040e883cdc13311ef2748cb40f", size = 219549, upload-time = "2026-01-25T12:59:13.638Z" }, - { url = "https://files.pythonhosted.org/packages/76/9c/e1cf7def1bdc72c1907e60703983a588f9558434a2ff94615747bd73c192/coverage-7.13.2-cp314-cp314-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:e080afb413be106c95c4ee96b4fffdc9e2fa56a8bbf90b5c0918e5c4449412f5", size = 250586, upload-time = "2026-01-25T12:59:15.808Z" }, - { url = "https://files.pythonhosted.org/packages/ba/49/f54ec02ed12be66c8d8897270505759e057b0c68564a65c429ccdd1f139e/coverage-7.13.2-cp314-cp314-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:a7fc042ba3c7ce25b8a9f097eb0f32a5ce1ccdb639d9eec114e26def98e1f8a4", size = 253093, upload-time = "2026-01-25T12:59:17.491Z" }, - { url = "https://files.pythonhosted.org/packages/fb/5e/aaf86be3e181d907e23c0f61fccaeb38de8e6f6b47aed92bf57d8fc9c034/coverage-7.13.2-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:d0ba505e021557f7f8173ee8cd6b926373d8653e5ff7581ae2efce1b11ef4c27", size = 254446, upload-time = "2026-01-25T12:59:19.752Z" }, - { url = "https://files.pythonhosted.org/packages/28/c8/a5fa01460e2d75b0c853b392080d6829d3ca8b5ab31e158fa0501bc7c708/coverage-7.13.2-cp314-cp314-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:7de326f80e3451bd5cc7239ab46c73ddb658fe0b7649476bc7413572d36cd548", size = 250615, upload-time = "2026-01-25T12:59:21.928Z" }, - { url = "https://files.pythonhosted.org/packages/86/0b/6d56315a55f7062bb66410732c24879ccb2ec527ab6630246de5fe45a1df/coverage-7.13.2-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:abaea04f1e7e34841d4a7b343904a3f59481f62f9df39e2cd399d69a187a9660", size = 252452, upload-time = "2026-01-25T12:59:23.592Z" }, - { url = "https://files.pythonhosted.org/packages/30/19/9bc550363ebc6b0ea121977ee44d05ecd1e8bf79018b8444f1028701c563/coverage-7.13.2-cp314-cp314-musllinux_1_2_i686.whl", hash = "sha256:9f93959ee0c604bccd8e0697be21de0887b1f73efcc3aa73a3ec0fd13feace92", size = 250418, upload-time = "2026-01-25T12:59:25.392Z" }, - { url = "https://files.pythonhosted.org/packages/1f/53/580530a31ca2f0cc6f07a8f2ab5460785b02bb11bdf815d4c4d37a4c5169/coverage-7.13.2-cp314-cp314-musllinux_1_2_riscv64.whl", hash = "sha256:13fe81ead04e34e105bf1b3c9f9cdf32ce31736ee5d90a8d2de02b9d3e1bcb82", size = 250231, upload-time = "2026-01-25T12:59:27.888Z" }, - { url = "https://files.pythonhosted.org/packages/e2/42/dd9093f919dc3088cb472893651884bd675e3df3d38a43f9053656dca9a2/coverage-7.13.2-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:d6d16b0f71120e365741bca2cb473ca6fe38930bc5431c5e850ba949f708f892", size = 251888, upload-time = "2026-01-25T12:59:29.636Z" }, - { url = "https://files.pythonhosted.org/packages/fa/a6/0af4053e6e819774626e133c3d6f70fae4d44884bfc4b126cb647baee8d3/coverage-7.13.2-cp314-cp314-win32.whl", hash = "sha256:9b2f4714bb7d99ba3790ee095b3b4ac94767e1347fe424278a0b10acb3ff04fe", size = 221968, upload-time = "2026-01-25T12:59:31.424Z" }, - { url = "https://files.pythonhosted.org/packages/c4/cc/5aff1e1f80d55862442855517bb8ad8ad3a68639441ff6287dde6a58558b/coverage-7.13.2-cp314-cp314-win_amd64.whl", hash = "sha256:e4121a90823a063d717a96e0a0529c727fb31ea889369a0ee3ec00ed99bf6859", size = 222783, upload-time = "2026-01-25T12:59:33.118Z" }, - { url = "https://files.pythonhosted.org/packages/de/20/09abafb24f84b3292cc658728803416c15b79f9ee5e68d25238a895b07d9/coverage-7.13.2-cp314-cp314-win_arm64.whl", hash = "sha256:6873f0271b4a15a33e7590f338d823f6f66f91ed147a03938d7ce26efd04eee6", size = 221348, upload-time = "2026-01-25T12:59:34.939Z" }, - { url = "https://files.pythonhosted.org/packages/b6/60/a3820c7232db63be060e4019017cd3426751c2699dab3c62819cdbcea387/coverage-7.13.2-cp314-cp314t-macosx_10_15_x86_64.whl", hash = "sha256:f61d349f5b7cd95c34017f1927ee379bfbe9884300d74e07cf630ccf7a610c1b", size = 219950, upload-time = "2026-01-25T12:59:36.624Z" }, - { url = "https://files.pythonhosted.org/packages/fd/37/e4ef5975fdeb86b1e56db9a82f41b032e3d93a840ebaf4064f39e770d5c5/coverage-7.13.2-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:a43d34ce714f4ca674c0d90beb760eb05aad906f2c47580ccee9da8fe8bfb417", size = 220209, upload-time = "2026-01-25T12:59:38.339Z" }, - { url = "https://files.pythonhosted.org/packages/54/df/d40e091d00c51adca1e251d3b60a8b464112efa3004949e96a74d7c19a64/coverage-7.13.2-cp314-cp314t-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:bff1b04cb9d4900ce5c56c4942f047dc7efe57e2608cb7c3c8936e9970ccdbee", size = 261576, upload-time = "2026-01-25T12:59:40.446Z" }, - { url = "https://files.pythonhosted.org/packages/c5/44/5259c4bed54e3392e5c176121af9f71919d96dde853386e7730e705f3520/coverage-7.13.2-cp314-cp314t-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:6ae99e4560963ad8e163e819e5d77d413d331fd00566c1e0856aa252303552c1", size = 263704, upload-time = "2026-01-25T12:59:42.346Z" }, - { url = "https://files.pythonhosted.org/packages/16/bd/ae9f005827abcbe2c70157459ae86053971c9fa14617b63903abbdce26d9/coverage-7.13.2-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:e79a8c7d461820257d9aa43716c4efc55366d7b292e46b5b37165be1d377405d", size = 266109, upload-time = "2026-01-25T12:59:44.073Z" }, - { url = "https://files.pythonhosted.org/packages/a2/c0/8e279c1c0f5b1eaa3ad9b0fb7a5637fc0379ea7d85a781c0fe0bb3cfc2ab/coverage-7.13.2-cp314-cp314t-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:060ee84f6a769d40c492711911a76811b4befb6fba50abb450371abb720f5bd6", size = 260686, upload-time = "2026-01-25T12:59:45.804Z" }, - { url = "https://files.pythonhosted.org/packages/b2/47/3a8112627e9d863e7cddd72894171c929e94491a597811725befdcd76bce/coverage-7.13.2-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:3bca209d001fd03ea2d978f8a4985093240a355c93078aee3f799852c23f561a", size = 263568, upload-time = "2026-01-25T12:59:47.929Z" }, - { url = "https://files.pythonhosted.org/packages/92/bc/7ea367d84afa3120afc3ce6de294fd2dcd33b51e2e7fbe4bbfd200f2cb8c/coverage-7.13.2-cp314-cp314t-musllinux_1_2_i686.whl", hash = "sha256:6b8092aa38d72f091db61ef83cb66076f18f02da3e1a75039a4f218629600e04", size = 261174, upload-time = "2026-01-25T12:59:49.717Z" }, - { url = "https://files.pythonhosted.org/packages/33/b7/f1092dcecb6637e31cc2db099581ee5c61a17647849bae6b8261a2b78430/coverage-7.13.2-cp314-cp314t-musllinux_1_2_riscv64.whl", hash = "sha256:4a3158dc2dcce5200d91ec28cd315c999eebff355437d2765840555d765a6e5f", size = 260017, upload-time = "2026-01-25T12:59:51.463Z" }, - { url = "https://files.pythonhosted.org/packages/2b/cd/f3d07d4b95fbe1a2ef0958c15da614f7e4f557720132de34d2dc3aa7e911/coverage-7.13.2-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:3973f353b2d70bd9796cc12f532a05945232ccae966456c8ed7034cb96bbfd6f", size = 262337, upload-time = "2026-01-25T12:59:53.407Z" }, - { url = "https://files.pythonhosted.org/packages/e0/db/b0d5b2873a07cb1e06a55d998697c0a5a540dcefbf353774c99eb3874513/coverage-7.13.2-cp314-cp314t-win32.whl", hash = "sha256:79f6506a678a59d4ded048dc72f1859ebede8ec2b9a2d509ebe161f01c2879d3", size = 222749, upload-time = "2026-01-25T12:59:56.316Z" }, - { url = "https://files.pythonhosted.org/packages/e5/2f/838a5394c082ac57d85f57f6aba53093b30d9089781df72412126505716f/coverage-7.13.2-cp314-cp314t-win_amd64.whl", hash = "sha256:196bfeabdccc5a020a57d5a368c681e3a6ceb0447d153aeccc1ab4d70a5032ba", size = 223857, upload-time = "2026-01-25T12:59:58.201Z" }, - { url = "https://files.pythonhosted.org/packages/44/d4/b608243e76ead3a4298824b50922b89ef793e50069ce30316a65c1b4d7ef/coverage-7.13.2-cp314-cp314t-win_arm64.whl", hash = "sha256:69269ab58783e090bfbf5b916ab3d188126e22d6070bbfc93098fdd474ef937c", size = 221881, upload-time = "2026-01-25T13:00:00.449Z" }, - { url = "https://files.pythonhosted.org/packages/d2/db/d291e30fdf7ea617a335531e72294e0c723356d7fdde8fba00610a76bda9/coverage-7.13.2-py3-none-any.whl", hash = "sha256:40ce1ea1e25125556d8e76bd0b61500839a07944cc287ac21d5626f3e620cad5", size = 210943, upload-time = "2026-01-25T13:00:02.388Z" }, +version = "7.13.3" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/11/43/3e4ac666cc35f231fa70c94e9f38459299de1a152813f9d2f60fc5f3ecaf/coverage-7.13.3.tar.gz", hash = "sha256:f7f6182d3dfb8802c1747eacbfe611b669455b69b7c037484bb1efbbb56711ac", size = 826832, upload-time = "2026-02-03T14:02:30.944Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ab/07/1c8099563a8a6c389a31c2d0aa1497cee86d6248bb4b9ba5e779215db9f9/coverage-7.13.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:0b4f345f7265cdbdb5ec2521ffff15fa49de6d6c39abf89fc7ad68aa9e3a55f0", size = 219143, upload-time = "2026-02-03T13:59:40.459Z" }, + { url = "https://files.pythonhosted.org/packages/69/39/a892d44af7aa092cab70e0cc5cdbba18eeccfe1d6930695dab1742eef9e9/coverage-7.13.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:96c3be8bae9d0333e403cc1a8eb078a7f928b5650bae94a18fb4820cc993fb9b", size = 219663, upload-time = "2026-02-03T13:59:41.951Z" }, + { url = "https://files.pythonhosted.org/packages/9a/25/9669dcf4c2bb4c3861469e6db20e52e8c11908cf53c14ec9b12e9fd4d602/coverage-7.13.3-cp310-cp310-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:d6f4a21328ea49d38565b55599e1c02834e76583a6953e5586d65cb1efebd8f8", size = 246424, upload-time = "2026-02-03T13:59:43.418Z" }, + { url = "https://files.pythonhosted.org/packages/f3/68/d9766c4e298aca62ea5d9543e1dd1e4e1439d7284815244d8b7db1840bfb/coverage-7.13.3-cp310-cp310-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:fc970575799a9d17d5c3fafd83a0f6ccf5d5117cdc9ad6fbd791e9ead82418b0", size = 248228, upload-time = "2026-02-03T13:59:44.816Z" }, + { url = "https://files.pythonhosted.org/packages/f0/e2/eea6cb4a4bd443741adf008d4cccec83a1f75401df59b6559aca2bdd9710/coverage-7.13.3-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:87ff33b652b3556b05e204ae20793d1f872161b0fa5ec8a9ac76f8430e152ed6", size = 250103, upload-time = "2026-02-03T13:59:46.271Z" }, + { url = "https://files.pythonhosted.org/packages/db/77/664280ecd666c2191610842177e2fab9e5dbdeef97178e2078fed46a3d2c/coverage-7.13.3-cp310-cp310-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:7df8759ee57b9f3f7b66799b7660c282f4375bef620ade1686d6a7b03699e75f", size = 247107, upload-time = "2026-02-03T13:59:48.53Z" }, + { url = "https://files.pythonhosted.org/packages/2b/df/2a672eab99e0d0eba52d8a63e47dc92245eee26954d1b2d3c8f7d372151f/coverage-7.13.3-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:f45c9bcb16bee25a798ccba8a2f6a1251b19de6a0d617bb365d7d2f386c4e20e", size = 248143, upload-time = "2026-02-03T13:59:50.027Z" }, + { url = "https://files.pythonhosted.org/packages/a5/dc/a104e7a87c13e57a358b8b9199a8955676e1703bb372d79722b54978ae45/coverage-7.13.3-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:318b2e4753cbf611061e01b6cc81477e1cdfeb69c36c4a14e6595e674caadb56", size = 246148, upload-time = "2026-02-03T13:59:52.025Z" }, + { url = "https://files.pythonhosted.org/packages/2b/89/e113d3a58dc20b03b7e59aed1e53ebc9ca6167f961876443e002b10e3ae9/coverage-7.13.3-cp310-cp310-musllinux_1_2_riscv64.whl", hash = "sha256:24db3959de8ee394eeeca89ccb8ba25305c2da9a668dd44173394cbd5aa0777f", size = 246414, upload-time = "2026-02-03T13:59:53.859Z" }, + { url = "https://files.pythonhosted.org/packages/3f/60/a3fd0a6e8d89b488396019a2268b6a1f25ab56d6d18f3be50f35d77b47dc/coverage-7.13.3-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:be14d0622125edef21b3a4d8cd2d138c4872bf6e38adc90fd92385e3312f406a", size = 247023, upload-time = "2026-02-03T13:59:55.454Z" }, + { url = "https://files.pythonhosted.org/packages/19/fa/de4840bb939dbb22ba0648a6d8069fa91c9cf3b3fca8b0d1df461e885b3d/coverage-7.13.3-cp310-cp310-win32.whl", hash = "sha256:53be4aab8ddef18beb6188f3a3fdbf4d1af2277d098d4e618be3a8e6c88e74be", size = 221751, upload-time = "2026-02-03T13:59:57.383Z" }, + { url = "https://files.pythonhosted.org/packages/de/87/233ff8b7ef62fb63f58c78623b50bef69681111e0c4d43504f422d88cda4/coverage-7.13.3-cp310-cp310-win_amd64.whl", hash = "sha256:bfeee64ad8b4aae3233abb77eb6b52b51b05fa89da9645518671b9939a78732b", size = 222686, upload-time = "2026-02-03T13:59:58.825Z" }, + { url = "https://files.pythonhosted.org/packages/ec/09/1ac74e37cf45f17eb41e11a21854f7f92a4c2d6c6098ef4a1becb0c6d8d3/coverage-7.13.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:5907605ee20e126eeee2abe14aae137043c2c8af2fa9b38d2ab3b7a6b8137f73", size = 219276, upload-time = "2026-02-03T14:00:00.296Z" }, + { url = "https://files.pythonhosted.org/packages/2e/cb/71908b08b21beb2c437d0d5870c4ec129c570ca1b386a8427fcdb11cf89c/coverage-7.13.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:a88705500988c8acad8b8fd86c2a933d3aa96bec1ddc4bc5cb256360db7bbd00", size = 219776, upload-time = "2026-02-03T14:00:02.414Z" }, + { url = "https://files.pythonhosted.org/packages/09/85/c4f3dd69232887666a2c0394d4be21c60ea934d404db068e6c96aa59cd87/coverage-7.13.3-cp311-cp311-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:7bbb5aa9016c4c29e3432e087aa29ebee3f8fda089cfbfb4e6d64bd292dcd1c2", size = 250196, upload-time = "2026-02-03T14:00:04.197Z" }, + { url = "https://files.pythonhosted.org/packages/9c/cc/560ad6f12010344d0778e268df5ba9aa990aacccc310d478bf82bf3d302c/coverage-7.13.3-cp311-cp311-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:0c2be202a83dde768937a61cdc5d06bf9fb204048ca199d93479488e6247656c", size = 252111, upload-time = "2026-02-03T14:00:05.639Z" }, + { url = "https://files.pythonhosted.org/packages/f0/66/3193985fb2c58e91f94cfbe9e21a6fdf941e9301fe2be9e92c072e9c8f8c/coverage-7.13.3-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:0f45e32ef383ce56e0ca099b2e02fcdf7950be4b1b56afaab27b4ad790befe5b", size = 254217, upload-time = "2026-02-03T14:00:07.738Z" }, + { url = "https://files.pythonhosted.org/packages/c5/78/f0f91556bf1faa416792e537c523c5ef9db9b1d32a50572c102b3d7c45b3/coverage-7.13.3-cp311-cp311-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:6ed2e787249b922a93cd95c671cc9f4c9797a106e81b455c83a9ddb9d34590c0", size = 250318, upload-time = "2026-02-03T14:00:09.224Z" }, + { url = "https://files.pythonhosted.org/packages/6f/aa/fc654e45e837d137b2c1f3a2cc09b4aea1e8b015acd2f774fa0f3d2ddeba/coverage-7.13.3-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:05dd25b21afffe545e808265897c35f32d3e4437663923e0d256d9ab5031fb14", size = 251909, upload-time = "2026-02-03T14:00:10.712Z" }, + { url = "https://files.pythonhosted.org/packages/73/4d/ab53063992add8a9ca0463c9d92cce5994a29e17affd1c2daa091b922a93/coverage-7.13.3-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:46d29926349b5c4f1ea4fca95e8c892835515f3600995a383fa9a923b5739ea4", size = 249971, upload-time = "2026-02-03T14:00:12.402Z" }, + { url = "https://files.pythonhosted.org/packages/29/25/83694b81e46fcff9899694a1b6f57573429cdd82b57932f09a698f03eea5/coverage-7.13.3-cp311-cp311-musllinux_1_2_riscv64.whl", hash = "sha256:fae6a21537519c2af00245e834e5bf2884699cc7c1055738fd0f9dc37a3644ad", size = 249692, upload-time = "2026-02-03T14:00:13.868Z" }, + { url = "https://files.pythonhosted.org/packages/d4/ef/d68fc304301f4cb4bf6aefa0045310520789ca38dabdfba9dbecd3f37919/coverage-7.13.3-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:c672d4e2f0575a4ca2bf2aa0c5ced5188220ab806c1bb6d7179f70a11a017222", size = 250597, upload-time = "2026-02-03T14:00:15.461Z" }, + { url = "https://files.pythonhosted.org/packages/8d/85/240ad396f914df361d0f71e912ddcedb48130c71b88dc4193fe3c0306f00/coverage-7.13.3-cp311-cp311-win32.whl", hash = "sha256:fcda51c918c7a13ad93b5f89a58d56e3a072c9e0ba5c231b0ed81404bf2648fb", size = 221773, upload-time = "2026-02-03T14:00:17.462Z" }, + { url = "https://files.pythonhosted.org/packages/2f/71/165b3a6d3d052704a9ab52d11ea64ef3426745de517dda44d872716213a7/coverage-7.13.3-cp311-cp311-win_amd64.whl", hash = "sha256:d1a049b5c51b3b679928dd35e47c4a2235e0b6128b479a7596d0ef5b42fa6301", size = 222711, upload-time = "2026-02-03T14:00:19.449Z" }, + { url = "https://files.pythonhosted.org/packages/51/d0/0ddc9c5934cdd52639c5df1f1eb0fdab51bb52348f3a8d1c7db9c600d93a/coverage-7.13.3-cp311-cp311-win_arm64.whl", hash = "sha256:79f2670c7e772f4917895c3d89aad59e01f3dbe68a4ed2d0373b431fad1dcfba", size = 221377, upload-time = "2026-02-03T14:00:20.968Z" }, + { url = "https://files.pythonhosted.org/packages/94/44/330f8e83b143f6668778ed61d17ece9dc48459e9e74669177de02f45fec5/coverage-7.13.3-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:ed48b4170caa2c4420e0cd27dc977caaffc7eecc317355751df8373dddcef595", size = 219441, upload-time = "2026-02-03T14:00:22.585Z" }, + { url = "https://files.pythonhosted.org/packages/08/e7/29db05693562c2e65bdf6910c0af2fd6f9325b8f43caf7a258413f369e30/coverage-7.13.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:8f2adf4bcffbbec41f366f2e6dffb9d24e8172d16e91da5799c9b7ed6b5716e6", size = 219801, upload-time = "2026-02-03T14:00:24.186Z" }, + { url = "https://files.pythonhosted.org/packages/90/ae/7f8a78249b02b0818db46220795f8ac8312ea4abd1d37d79ea81db5cae81/coverage-7.13.3-cp312-cp312-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:01119735c690786b6966a1e9f098da4cd7ca9174c4cfe076d04e653105488395", size = 251306, upload-time = "2026-02-03T14:00:25.798Z" }, + { url = "https://files.pythonhosted.org/packages/62/71/a18a53d1808e09b2e9ebd6b47dad5e92daf4c38b0686b4c4d1b2f3e42b7f/coverage-7.13.3-cp312-cp312-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:8bb09e83c603f152d855f666d70a71765ca8e67332e5829e62cb9466c176af23", size = 254051, upload-time = "2026-02-03T14:00:27.474Z" }, + { url = "https://files.pythonhosted.org/packages/4a/0a/eb30f6455d04c5a3396d0696cad2df0269ae7444bb322f86ffe3376f7bf9/coverage-7.13.3-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:b607a40cba795cfac6d130220d25962931ce101f2f478a29822b19755377fb34", size = 255160, upload-time = "2026-02-03T14:00:29.024Z" }, + { url = "https://files.pythonhosted.org/packages/7b/7e/a45baac86274ce3ed842dbb84f14560c673ad30535f397d89164ec56c5df/coverage-7.13.3-cp312-cp312-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:44f14a62f5da2e9aedf9080e01d2cda61df39197d48e323538ec037336d68da8", size = 251709, upload-time = "2026-02-03T14:00:30.641Z" }, + { url = "https://files.pythonhosted.org/packages/c0/df/dd0dc12f30da11349993f3e218901fdf82f45ee44773596050c8f5a1fb25/coverage-7.13.3-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:debf29e0b157769843dff0981cc76f79e0ed04e36bb773c6cac5f6029054bd8a", size = 253083, upload-time = "2026-02-03T14:00:32.14Z" }, + { url = "https://files.pythonhosted.org/packages/ab/32/fc764c8389a8ce95cb90eb97af4c32f392ab0ac23ec57cadeefb887188d3/coverage-7.13.3-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:824bb95cd71604031ae9a48edb91fd6effde669522f960375668ed21b36e3ec4", size = 251227, upload-time = "2026-02-03T14:00:34.721Z" }, + { url = "https://files.pythonhosted.org/packages/dd/ca/d025e9da8f06f24c34d2da9873957cfc5f7e0d67802c3e34d0caa8452130/coverage-7.13.3-cp312-cp312-musllinux_1_2_riscv64.whl", hash = "sha256:8f1010029a5b52dc427c8e2a8dbddb2303ddd180b806687d1acd1bb1d06649e7", size = 250794, upload-time = "2026-02-03T14:00:36.278Z" }, + { url = "https://files.pythonhosted.org/packages/45/c7/76bf35d5d488ec8f68682eb8e7671acc50a6d2d1c1182de1d2b6d4ffad3b/coverage-7.13.3-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:cd5dee4fd7659d8306ffa79eeaaafd91fa30a302dac3af723b9b469e549247e0", size = 252671, upload-time = "2026-02-03T14:00:38.368Z" }, + { url = "https://files.pythonhosted.org/packages/bf/10/1921f1a03a7c209e1cb374f81a6b9b68b03cdb3ecc3433c189bc90e2a3d5/coverage-7.13.3-cp312-cp312-win32.whl", hash = "sha256:f7f153d0184d45f3873b3ad3ad22694fd73aadcb8cdbc4337ab4b41ea6b4dff1", size = 221986, upload-time = "2026-02-03T14:00:40.442Z" }, + { url = "https://files.pythonhosted.org/packages/3c/7c/f5d93297f8e125a80c15545edc754d93e0ed8ba255b65e609b185296af01/coverage-7.13.3-cp312-cp312-win_amd64.whl", hash = "sha256:03a6e5e1e50819d6d7436f5bc40c92ded7e484e400716886ac921e35c133149d", size = 222793, upload-time = "2026-02-03T14:00:42.106Z" }, + { url = "https://files.pythonhosted.org/packages/43/59/c86b84170015b4555ebabca8649bdf9f4a1f737a73168088385ed0f947c4/coverage-7.13.3-cp312-cp312-win_arm64.whl", hash = "sha256:51c4c42c0e7d09a822b08b6cf79b3c4db8333fffde7450da946719ba0d45730f", size = 221410, upload-time = "2026-02-03T14:00:43.726Z" }, + { url = "https://files.pythonhosted.org/packages/81/f3/4c333da7b373e8c8bfb62517e8174a01dcc373d7a9083698e3b39d50d59c/coverage-7.13.3-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:853c3d3c79ff0db65797aad79dee6be020efd218ac4510f15a205f1e8d13ce25", size = 219468, upload-time = "2026-02-03T14:00:45.829Z" }, + { url = "https://files.pythonhosted.org/packages/d6/31/0714337b7d23630c8de2f4d56acf43c65f8728a45ed529b34410683f7217/coverage-7.13.3-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:f75695e157c83d374f88dcc646a60cb94173304a9258b2e74ba5a66b7614a51a", size = 219839, upload-time = "2026-02-03T14:00:47.407Z" }, + { url = "https://files.pythonhosted.org/packages/12/99/bd6f2a2738144c98945666f90cae446ed870cecf0421c767475fcf42cdbe/coverage-7.13.3-cp313-cp313-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:2d098709621d0819039f3f1e471ee554f55a0b2ac0d816883c765b14129b5627", size = 250828, upload-time = "2026-02-03T14:00:49.029Z" }, + { url = "https://files.pythonhosted.org/packages/6f/99/97b600225fbf631e6f5bfd3ad5bcaf87fbb9e34ff87492e5a572ff01bbe2/coverage-7.13.3-cp313-cp313-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:16d23d6579cf80a474ad160ca14d8b319abaa6db62759d6eef53b2fc979b58c8", size = 253432, upload-time = "2026-02-03T14:00:50.655Z" }, + { url = "https://files.pythonhosted.org/packages/5f/5c/abe2b3490bda26bd4f5e3e799be0bdf00bd81edebedc2c9da8d3ef288fa8/coverage-7.13.3-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:00d34b29a59d2076e6f318b30a00a69bf63687e30cd882984ed444e753990cc1", size = 254672, upload-time = "2026-02-03T14:00:52.757Z" }, + { url = "https://files.pythonhosted.org/packages/31/ba/5d1957c76b40daff53971fe0adb84d9c2162b614280031d1d0653dd010c1/coverage-7.13.3-cp313-cp313-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:ab6d72bffac9deb6e6cb0f61042e748de3f9f8e98afb0375a8e64b0b6e11746b", size = 251050, upload-time = "2026-02-03T14:00:54.332Z" }, + { url = "https://files.pythonhosted.org/packages/69/dc/dffdf3bfe9d32090f047d3c3085378558cb4eb6778cda7de414ad74581ed/coverage-7.13.3-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:e129328ad1258e49cae0123a3b5fcb93d6c2fa90d540f0b4c7cdcdc019aaa3dc", size = 252801, upload-time = "2026-02-03T14:00:56.121Z" }, + { url = "https://files.pythonhosted.org/packages/87/51/cdf6198b0f2746e04511a30dc9185d7b8cdd895276c07bdb538e37f1cd50/coverage-7.13.3-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:2213a8d88ed35459bda71597599d4eec7c2ebad201c88f0bfc2c26fd9b0dd2ea", size = 250763, upload-time = "2026-02-03T14:00:58.719Z" }, + { url = "https://files.pythonhosted.org/packages/d7/1a/596b7d62218c1d69f2475b69cc6b211e33c83c902f38ee6ae9766dd422da/coverage-7.13.3-cp313-cp313-musllinux_1_2_riscv64.whl", hash = "sha256:00dd3f02de6d5f5c9c3d95e3e036c3c2e2a669f8bf2d3ceb92505c4ce7838f67", size = 250587, upload-time = "2026-02-03T14:01:01.197Z" }, + { url = "https://files.pythonhosted.org/packages/f7/46/52330d5841ff660f22c130b75f5e1dd3e352c8e7baef5e5fef6b14e3e991/coverage-7.13.3-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:f9bada7bc660d20b23d7d312ebe29e927b655cf414dadcdb6335a2075695bd86", size = 252358, upload-time = "2026-02-03T14:01:02.824Z" }, + { url = "https://files.pythonhosted.org/packages/36/8a/e69a5be51923097ba7d5cff9724466e74fe486e9232020ba97c809a8b42b/coverage-7.13.3-cp313-cp313-win32.whl", hash = "sha256:75b3c0300f3fa15809bd62d9ca8b170eb21fcf0100eb4b4154d6dc8b3a5bbd43", size = 222007, upload-time = "2026-02-03T14:01:04.876Z" }, + { url = "https://files.pythonhosted.org/packages/0a/09/a5a069bcee0d613bdd48ee7637fa73bc09e7ed4342b26890f2df97cc9682/coverage-7.13.3-cp313-cp313-win_amd64.whl", hash = "sha256:a2f7589c6132c44c53f6e705e1a6677e2b7821378c22f7703b2cf5388d0d4587", size = 222812, upload-time = "2026-02-03T14:01:07.296Z" }, + { url = "https://files.pythonhosted.org/packages/3d/4f/d62ad7dfe32f9e3d4a10c178bb6f98b10b083d6e0530ca202b399371f6c1/coverage-7.13.3-cp313-cp313-win_arm64.whl", hash = "sha256:123ceaf2b9d8c614f01110f908a341e05b1b305d6b2ada98763b9a5a59756051", size = 221433, upload-time = "2026-02-03T14:01:09.156Z" }, + { url = "https://files.pythonhosted.org/packages/04/b2/4876c46d723d80b9c5b695f1a11bf5f7c3dabf540ec00d6edc076ff025e6/coverage-7.13.3-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:cc7fd0f726795420f3678ac82ff882c7fc33770bd0074463b5aef7293285ace9", size = 220162, upload-time = "2026-02-03T14:01:11.409Z" }, + { url = "https://files.pythonhosted.org/packages/fc/04/9942b64a0e0bdda2c109f56bda42b2a59d9d3df4c94b85a323c1cae9fc77/coverage-7.13.3-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:d358dc408edc28730aed5477a69338e444e62fba0b7e9e4a131c505fadad691e", size = 220510, upload-time = "2026-02-03T14:01:13.038Z" }, + { url = "https://files.pythonhosted.org/packages/5a/82/5cfe1e81eae525b74669f9795f37eb3edd4679b873d79d1e6c1c14ee6c1c/coverage-7.13.3-cp313-cp313t-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:5d67b9ed6f7b5527b209b24b3df9f2e5bf0198c1bbf99c6971b0e2dcb7e2a107", size = 261801, upload-time = "2026-02-03T14:01:14.674Z" }, + { url = "https://files.pythonhosted.org/packages/0b/ec/a553d7f742fd2cd12e36a16a7b4b3582d5934b496ef2b5ea8abeb10903d4/coverage-7.13.3-cp313-cp313t-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:59224bfb2e9b37c1335ae35d00daa3a5b4e0b1a20f530be208fff1ecfa436f43", size = 263882, upload-time = "2026-02-03T14:01:16.343Z" }, + { url = "https://files.pythonhosted.org/packages/e1/58/8f54a2a93e3d675635bc406de1c9ac8d551312142ff52c9d71b5e533ad45/coverage-7.13.3-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ae9306b5299e31e31e0d3b908c66bcb6e7e3ddca143dea0266e9ce6c667346d3", size = 266306, upload-time = "2026-02-03T14:01:18.02Z" }, + { url = "https://files.pythonhosted.org/packages/1a/be/e593399fd6ea1f00aee79ebd7cc401021f218d34e96682a92e1bae092ff6/coverage-7.13.3-cp313-cp313t-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:343aaeb5f8bb7bcd38620fd7bc56e6ee8207847d8c6103a1e7b72322d381ba4a", size = 261051, upload-time = "2026-02-03T14:01:19.757Z" }, + { url = "https://files.pythonhosted.org/packages/5c/e5/e9e0f6138b21bcdebccac36fbfde9cf15eb1bbcea9f5b1f35cd1f465fb91/coverage-7.13.3-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:b2182129f4c101272ff5f2f18038d7b698db1bf8e7aa9e615cb48440899ad32e", size = 263868, upload-time = "2026-02-03T14:01:21.487Z" }, + { url = "https://files.pythonhosted.org/packages/9a/bf/de72cfebb69756f2d4a2dde35efcc33c47d85cd3ebdf844b3914aac2ef28/coverage-7.13.3-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:94d2ac94bd0cc57c5626f52f8c2fffed1444b5ae8c9fc68320306cc2b255e155", size = 261498, upload-time = "2026-02-03T14:01:23.097Z" }, + { url = "https://files.pythonhosted.org/packages/f2/91/4a2d313a70fc2e98ca53afd1c8ce67a89b1944cd996589a5b1fe7fbb3e5c/coverage-7.13.3-cp313-cp313t-musllinux_1_2_riscv64.whl", hash = "sha256:65436cde5ecabe26fb2f0bf598962f0a054d3f23ad529361326ac002c61a2a1e", size = 260394, upload-time = "2026-02-03T14:01:24.949Z" }, + { url = "https://files.pythonhosted.org/packages/40/83/25113af7cf6941e779eb7ed8de2a677865b859a07ccee9146d4cc06a03e3/coverage-7.13.3-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:db83b77f97129813dbd463a67e5335adc6a6a91db652cc085d60c2d512746f96", size = 262579, upload-time = "2026-02-03T14:01:26.703Z" }, + { url = "https://files.pythonhosted.org/packages/1e/19/a5f2b96262977e82fb9aabbe19b4d83561f5d063f18dde3e72f34ffc3b2f/coverage-7.13.3-cp313-cp313t-win32.whl", hash = "sha256:dfb428e41377e6b9ba1b0a32df6db5409cb089a0ed1d0a672dc4953ec110d84f", size = 222679, upload-time = "2026-02-03T14:01:28.553Z" }, + { url = "https://files.pythonhosted.org/packages/81/82/ef1747b88c87a5c7d7edc3704799ebd650189a9158e680a063308b6125ef/coverage-7.13.3-cp313-cp313t-win_amd64.whl", hash = "sha256:5badd7e596e6b0c89aa8ec6d37f4473e4357f982ce57f9a2942b0221cd9cf60c", size = 223740, upload-time = "2026-02-03T14:01:30.776Z" }, + { url = "https://files.pythonhosted.org/packages/1c/4c/a67c7bb5b560241c22736a9cb2f14c5034149ffae18630323fde787339e4/coverage-7.13.3-cp313-cp313t-win_arm64.whl", hash = "sha256:989aa158c0eb19d83c76c26f4ba00dbb272485c56e452010a3450bdbc9daafd9", size = 221996, upload-time = "2026-02-03T14:01:32.495Z" }, + { url = "https://files.pythonhosted.org/packages/5e/b3/677bb43427fed9298905106f39c6520ac75f746f81b8f01104526a8026e4/coverage-7.13.3-cp314-cp314-macosx_10_15_x86_64.whl", hash = "sha256:c6f6169bbdbdb85aab8ac0392d776948907267fcc91deeacf6f9d55f7a83ae3b", size = 219513, upload-time = "2026-02-03T14:01:34.29Z" }, + { url = "https://files.pythonhosted.org/packages/42/53/290046e3bbf8986cdb7366a42dab3440b9983711eaff044a51b11006c67b/coverage-7.13.3-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:2f5e731627a3d5ef11a2a35aa0c6f7c435867c7ccbc391268eb4f2ca5dbdcc10", size = 219850, upload-time = "2026-02-03T14:01:35.984Z" }, + { url = "https://files.pythonhosted.org/packages/ea/2b/ab41f10345ba2e49d5e299be8663be2b7db33e77ac1b85cd0af985ea6406/coverage-7.13.3-cp314-cp314-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:9db3a3285d91c0b70fab9f39f0a4aa37d375873677efe4e71e58d8321e8c5d39", size = 250886, upload-time = "2026-02-03T14:01:38.287Z" }, + { url = "https://files.pythonhosted.org/packages/72/2d/b3f6913ee5a1d5cdd04106f257e5fac5d048992ffc2d9995d07b0f17739f/coverage-7.13.3-cp314-cp314-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:06e49c5897cb12e3f7ecdc111d44e97c4f6d0557b81a7a0204ed70a8b038f86f", size = 253393, upload-time = "2026-02-03T14:01:40.118Z" }, + { url = "https://files.pythonhosted.org/packages/f0/f6/b1f48810ffc6accf49a35b9943636560768f0812330f7456aa87dc39aff5/coverage-7.13.3-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:fb25061a66802df9fc13a9ba1967d25faa4dae0418db469264fd9860a921dde4", size = 254740, upload-time = "2026-02-03T14:01:42.413Z" }, + { url = "https://files.pythonhosted.org/packages/57/d0/e59c54f9be0b61808f6bc4c8c4346bd79f02dd6bbc3f476ef26124661f20/coverage-7.13.3-cp314-cp314-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:99fee45adbb1caeb914da16f70e557fb7ff6ddc9e4b14de665bd41af631367ef", size = 250905, upload-time = "2026-02-03T14:01:44.163Z" }, + { url = "https://files.pythonhosted.org/packages/d5/f7/5291bcdf498bafbee3796bb32ef6966e9915aebd4d0954123c8eae921c32/coverage-7.13.3-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:318002f1fd819bdc1651c619268aa5bc853c35fa5cc6d1e8c96bd9cd6c828b75", size = 252753, upload-time = "2026-02-03T14:01:45.974Z" }, + { url = "https://files.pythonhosted.org/packages/a0/a9/1dcafa918c281554dae6e10ece88c1add82db685be123e1b05c2056ff3fb/coverage-7.13.3-cp314-cp314-musllinux_1_2_i686.whl", hash = "sha256:71295f2d1d170b9977dc386d46a7a1b7cbb30e5405492529b4c930113a33f895", size = 250716, upload-time = "2026-02-03T14:01:48.844Z" }, + { url = "https://files.pythonhosted.org/packages/44/bb/4ea4eabcce8c4f6235df6e059fbc5db49107b24c4bdffc44aee81aeca5a8/coverage-7.13.3-cp314-cp314-musllinux_1_2_riscv64.whl", hash = "sha256:5b1ad2e0dc672625c44bc4fe34514602a9fd8b10d52ddc414dc585f74453516c", size = 250530, upload-time = "2026-02-03T14:01:50.793Z" }, + { url = "https://files.pythonhosted.org/packages/6d/31/4a6c9e6a71367e6f923b27b528448c37f4e959b7e4029330523014691007/coverage-7.13.3-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:b2beb64c145593a50d90db5c7178f55daeae129123b0d265bdb3cbec83e5194a", size = 252186, upload-time = "2026-02-03T14:01:52.607Z" }, + { url = "https://files.pythonhosted.org/packages/27/92/e1451ef6390a4f655dc42da35d9971212f7abbbcad0bdb7af4407897eb76/coverage-7.13.3-cp314-cp314-win32.whl", hash = "sha256:3d1aed4f4e837a832df2f3b4f68a690eede0de4560a2dbc214ea0bc55aabcdb4", size = 222253, upload-time = "2026-02-03T14:01:55.071Z" }, + { url = "https://files.pythonhosted.org/packages/8a/98/78885a861a88de020c32a2693487c37d15a9873372953f0c3c159d575a43/coverage-7.13.3-cp314-cp314-win_amd64.whl", hash = "sha256:9f9efbbaf79f935d5fbe3ad814825cbce4f6cdb3054384cb49f0c0f496125fa0", size = 223069, upload-time = "2026-02-03T14:01:56.95Z" }, + { url = "https://files.pythonhosted.org/packages/eb/fb/3784753a48da58a5337972abf7ca58b1fb0f1bda21bc7b4fae992fd28e47/coverage-7.13.3-cp314-cp314-win_arm64.whl", hash = "sha256:31b6e889c53d4e6687ca63706148049494aace140cffece1c4dc6acadb70a7b3", size = 221633, upload-time = "2026-02-03T14:01:58.758Z" }, + { url = "https://files.pythonhosted.org/packages/40/f9/75b732d9674d32cdbffe801ed5f770786dd1c97eecedef2125b0d25102dc/coverage-7.13.3-cp314-cp314t-macosx_10_15_x86_64.whl", hash = "sha256:c5e9787cec750793a19a28df7edd85ac4e49d3fb91721afcdc3b86f6c08d9aa8", size = 220243, upload-time = "2026-02-03T14:02:01.109Z" }, + { url = "https://files.pythonhosted.org/packages/cf/7e/2868ec95de5a65703e6f0c87407ea822d1feb3619600fbc3c1c4fa986090/coverage-7.13.3-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:e5b86db331c682fd0e4be7098e6acee5e8a293f824d41487c667a93705d415ca", size = 220515, upload-time = "2026-02-03T14:02:02.862Z" }, + { url = "https://files.pythonhosted.org/packages/7d/eb/9f0d349652fced20bcaea0f67fc5777bd097c92369f267975732f3dc5f45/coverage-7.13.3-cp314-cp314t-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:edc7754932682d52cf6e7a71806e529ecd5ce660e630e8bd1d37109a2e5f63ba", size = 261874, upload-time = "2026-02-03T14:02:04.727Z" }, + { url = "https://files.pythonhosted.org/packages/ee/a5/6619bc4a6c7b139b16818149a3e74ab2e21599ff9a7b6811b6afde99f8ec/coverage-7.13.3-cp314-cp314t-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:d3a16d6398666510a6886f67f43d9537bfd0e13aca299688a19daa84f543122f", size = 264004, upload-time = "2026-02-03T14:02:06.634Z" }, + { url = "https://files.pythonhosted.org/packages/29/b7/90aa3fc645a50c6f07881fca4fd0ba21e3bfb6ce3a7078424ea3a35c74c9/coverage-7.13.3-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:303d38b19626c1981e1bb067a9928236d88eb0e4479b18a74812f05a82071508", size = 266408, upload-time = "2026-02-03T14:02:09.037Z" }, + { url = "https://files.pythonhosted.org/packages/62/55/08bb2a1e4dcbae384e638f0effef486ba5987b06700e481691891427d879/coverage-7.13.3-cp314-cp314t-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:284e06eadfe15ddfee2f4ee56631f164ef897a7d7d5a15bca5f0bb88889fc5ba", size = 260977, upload-time = "2026-02-03T14:02:11.755Z" }, + { url = "https://files.pythonhosted.org/packages/9b/76/8bd4ae055a42d8fb5dd2230e5cf36ff2e05f85f2427e91b11a27fea52ed7/coverage-7.13.3-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:d401f0864a1d3198422816878e4e84ca89ec1c1bf166ecc0ae01380a39b888cd", size = 263868, upload-time = "2026-02-03T14:02:13.565Z" }, + { url = "https://files.pythonhosted.org/packages/e3/f9/ba000560f11e9e32ec03df5aa8477242c2d95b379c99ac9a7b2e7fbacb1a/coverage-7.13.3-cp314-cp314t-musllinux_1_2_i686.whl", hash = "sha256:3f379b02c18a64de78c4ccdddf1c81c2c5ae1956c72dacb9133d7dd7809794ab", size = 261474, upload-time = "2026-02-03T14:02:16.069Z" }, + { url = "https://files.pythonhosted.org/packages/90/4b/4de4de8f9ca7af4733bfcf4baa440121b7dbb3856daf8428ce91481ff63b/coverage-7.13.3-cp314-cp314t-musllinux_1_2_riscv64.whl", hash = "sha256:7a482f2da9086971efb12daca1d6547007ede3674ea06e16d7663414445c683e", size = 260317, upload-time = "2026-02-03T14:02:17.996Z" }, + { url = "https://files.pythonhosted.org/packages/05/71/5cd8436e2c21410ff70be81f738c0dddea91bcc3189b1517d26e0102ccb3/coverage-7.13.3-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:562136b0d401992118d9b49fbee5454e16f95f85b120a4226a04d816e33fe024", size = 262635, upload-time = "2026-02-03T14:02:20.405Z" }, + { url = "https://files.pythonhosted.org/packages/e7/f8/2834bb45bdd70b55a33ec354b8b5f6062fc90e5bb787e14385903a979503/coverage-7.13.3-cp314-cp314t-win32.whl", hash = "sha256:ca46e5c3be3b195098dd88711890b8011a9fa4feca942292bb84714ce5eab5d3", size = 223035, upload-time = "2026-02-03T14:02:22.323Z" }, + { url = "https://files.pythonhosted.org/packages/26/75/f8290f0073c00d9ae14056d2b84ab92dff21d5370e464cb6cb06f52bf580/coverage-7.13.3-cp314-cp314t-win_amd64.whl", hash = "sha256:06d316dbb3d9fd44cca05b2dbcfbef22948493d63a1f28e828d43e6cc505fed8", size = 224142, upload-time = "2026-02-03T14:02:24.143Z" }, + { url = "https://files.pythonhosted.org/packages/03/01/43ac78dfea8946c4a9161bbc034b5549115cb2b56781a4b574927f0d141a/coverage-7.13.3-cp314-cp314t-win_arm64.whl", hash = "sha256:299d66e9218193f9dc6e4880629ed7c4cd23486005166247c283fb98531656c3", size = 222166, upload-time = "2026-02-03T14:02:26.005Z" }, + { url = "https://files.pythonhosted.org/packages/7d/fb/70af542d2d938c778c9373ce253aa4116dbe7c0a5672f78b2b2ae0e1b94b/coverage-7.13.3-py3-none-any.whl", hash = "sha256:90a8af9dba6429b2573199622d72e0ebf024d6276f16abce394ad4d181bb0910", size = 211237, upload-time = "2026-02-03T14:02:27.986Z" }, ] [package.optional-dependencies] @@ -1959,7 +1973,7 @@ name = "exceptiongroup" version = "1.3.1" source = { registry = "https://pypi.org/simple" } dependencies = [ - { name = "typing-extensions", marker = "(python_full_version < '3.11' and sys_platform == 'darwin') or (python_full_version < '3.11' and sys_platform == 'linux') or (python_full_version < '3.11' and sys_platform == 'win32')" }, + { name = "typing-extensions", marker = "(python_full_version < '3.13' and sys_platform == 'darwin') or (python_full_version < '3.13' and sys_platform == 'linux') or (python_full_version < '3.13' and sys_platform == 'win32')" }, ] sdist = { url = "https://files.pythonhosted.org/packages/50/79/66800aadf48771f6b62f7eb014e352e5d06856655206165d775e675a02c9/exceptiongroup-1.3.1.tar.gz", hash = "sha256:8b412432c6055b0b7d14c310000ae93352ed6754f70fa8f7c34141f91c4e3219", size = 30371, upload-time = "2025-11-21T23:01:54.787Z" } wheels = [ @@ -1977,7 +1991,7 @@ wheels = [ [[package]] name = "fastapi" -version = "0.128.0" +version = "0.128.1" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "annotated-doc", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, @@ -1985,9 +1999,9 @@ dependencies = [ { name = "starlette", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, { name = "typing-extensions", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/52/08/8c8508db6c7b9aae8f7175046af41baad690771c9bcde676419965e338c7/fastapi-0.128.0.tar.gz", hash = "sha256:1cc179e1cef10a6be60ffe429f79b829dce99d8de32d7acb7e6c8dfdf7f2645a", size = 365682, upload-time = "2025-12-27T15:21:13.714Z" } +sdist = { url = "https://files.pythonhosted.org/packages/f6/59/28bde150415783ff084334e3de106eb7461a57864cf69f343950ad5a5ddd/fastapi-0.128.1.tar.gz", hash = "sha256:ce5be4fa26d4ce6f54debcc873d1fb8e0e248f5c48d7502ba6c61457ab2dc766", size = 374260, upload-time = "2026-02-04T17:35:10.542Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/5c/05/5cbb59154b093548acd0f4c7c474a118eda06da25aa75c616b72d8fcd92a/fastapi-0.128.0-py3-none-any.whl", hash = "sha256:aebd93f9716ee3b4f4fcfe13ffb7cf308d99c9f3ab5622d8877441072561582d", size = 103094, upload-time = "2025-12-27T15:21:12.154Z" }, + { url = "https://files.pythonhosted.org/packages/1a/08/3953db1979ea131c68279b997c6465080118b407f0800445b843f8e164b3/fastapi-0.128.1-py3-none-any.whl", hash = "sha256:ee82146bbf91ea5bbf2bb8629e4c6e056c4fbd997ea6068501b11b15260b50fb", size = 103810, upload-time = "2026-02-04T17:35:08.02Z" }, ] [[package]] @@ -2348,16 +2362,16 @@ wheels = [ [[package]] name = "github-copilot-sdk" -version = "0.1.20" +version = "0.1.21" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "pydantic", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, { name = "python-dateutil", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, { name = "typing-extensions", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/02/7d/afde0ec85815a558612130dc5ff79536299f411e672410c3edc0c1edeb2a/github_copilot_sdk-0.1.20.tar.gz", hash = "sha256:9e89cd46577fd18dd808d7113b7e20e021c4f944121a0a4891945460fb26c53c", size = 92207, upload-time = "2026-01-30T00:25:20.509Z" } +sdist = { url = "https://files.pythonhosted.org/packages/95/d0/f1b55044e1a3e3f368c867cbf91e68e36282efa9f53eb03532cf761a84e8/github_copilot_sdk-0.1.21.tar.gz", hash = "sha256:1c8572d1155fcedb1c3c4f02b4d4fe0aec97ccba63ab0c1b87f8f871da4922ea", size = 96353, upload-time = "2026-02-03T23:15:26.627Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/55/91/f8cfa809184988a273af58824b312d31a532ee3ee70875100b5061540178/github_copilot_sdk-0.1.20-py3-none-any.whl", hash = "sha256:e7fa1bb843e2494930126551b80f3a035f36c47a05f9173ad0cdfb4151ad9346", size = 40306, upload-time = "2026-01-30T00:25:19.184Z" }, + { url = "https://files.pythonhosted.org/packages/ba/39/b8107ca00e42c44bd964e187aa81a60ae2e09fcbae9f255f7e50d7c0cead/github_copilot_sdk-0.1.21-py3-none-any.whl", hash = "sha256:c09d4004d14171474680c6d9279c0f10d6b4636c370f574828da6181aafb6b34", size = 43732, upload-time = "2026-02-03T23:15:25.377Z" }, ] [[package]] @@ -2420,7 +2434,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/fe/65/5b235b40581ad75ab97dcd8b4218022ae8e3ab77c13c919f1a1dfe9171fd/greenlet-3.3.1-cp310-cp310-macosx_11_0_universal2.whl", hash = "sha256:04bee4775f40ecefcdaa9d115ab44736cd4b9c5fba733575bfe9379419582e13", size = 273723, upload-time = "2026-01-23T15:30:37.521Z" }, { url = "https://files.pythonhosted.org/packages/ce/ad/eb4729b85cba2d29499e0a04ca6fbdd8f540afd7be142fd571eea43d712f/greenlet-3.3.1-cp310-cp310-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:50e1457f4fed12a50e427988a07f0f9df53cf0ee8da23fab16e6732c2ec909d4", size = 574874, upload-time = "2026-01-23T16:00:54.551Z" }, { url = "https://files.pythonhosted.org/packages/87/32/57cad7fe4c8b82fdaa098c89498ef85ad92dfbb09d5eb713adedfc2ae1f5/greenlet-3.3.1-cp310-cp310-manylinux_2_24_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:070472cd156f0656f86f92e954591644e158fd65aa415ffbe2d44ca77656a8f5", size = 586309, upload-time = "2026-01-23T16:05:25.18Z" }, - { url = "https://files.pythonhosted.org/packages/66/66/f041005cb87055e62b0d68680e88ec1a57f4688523d5e2fb305841bc8307/greenlet-3.3.1-cp310-cp310-manylinux_2_24_s390x.manylinux_2_28_s390x.whl", hash = "sha256:1108b61b06b5224656121c3c8ee8876161c491cbe74e5c519e0634c837cf93d5", size = 597461, upload-time = "2026-01-23T16:15:51.943Z" }, { url = "https://files.pythonhosted.org/packages/87/eb/8a1ec2da4d55824f160594a75a9d8354a5fe0a300fb1c48e7944265217e1/greenlet-3.3.1-cp310-cp310-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:3a300354f27dd86bae5fbf7002e6dd2b3255cd372e9242c933faf5e859b703fe", size = 586985, upload-time = "2026-01-23T15:32:47.968Z" }, { url = "https://files.pythonhosted.org/packages/15/1c/0621dd4321dd8c351372ee8f9308136acb628600658a49be1b7504208738/greenlet-3.3.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:e84b51cbebf9ae573b5fbd15df88887815e3253fc000a7d0ff95170e8f7e9729", size = 1547271, upload-time = "2026-01-23T16:04:18.977Z" }, { url = "https://files.pythonhosted.org/packages/9d/53/24047f8924c83bea7a59c8678d9571209c6bfe5f4c17c94a78c06024e9f2/greenlet-3.3.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:e0093bd1a06d899892427217f0ff2a3c8f306182b8c754336d32e2d587c131b4", size = 1613427, upload-time = "2026-01-23T15:33:44.428Z" }, @@ -2428,7 +2441,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/ec/e8/2e1462c8fdbe0f210feb5ac7ad2d9029af8be3bf45bd9fa39765f821642f/greenlet-3.3.1-cp311-cp311-macosx_11_0_universal2.whl", hash = "sha256:5fd23b9bc6d37b563211c6abbb1b3cab27db385a4449af5c32e932f93017080c", size = 274974, upload-time = "2026-01-23T15:31:02.891Z" }, { url = "https://files.pythonhosted.org/packages/7e/a8/530a401419a6b302af59f67aaf0b9ba1015855ea7e56c036b5928793c5bd/greenlet-3.3.1-cp311-cp311-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:09f51496a0bfbaa9d74d36a52d2580d1ef5ed4fdfcff0a73730abfbbbe1403dd", size = 577175, upload-time = "2026-01-23T16:00:56.213Z" }, { url = "https://files.pythonhosted.org/packages/8e/89/7e812bb9c05e1aaef9b597ac1d0962b9021d2c6269354966451e885c4e6b/greenlet-3.3.1-cp311-cp311-manylinux_2_24_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:cb0feb07fe6e6a74615ee62a880007d976cf739b6669cce95daa7373d4fc69c5", size = 590401, upload-time = "2026-01-23T16:05:26.365Z" }, - { url = "https://files.pythonhosted.org/packages/70/ae/e2d5f0e59b94a2269b68a629173263fa40b63da32f5c231307c349315871/greenlet-3.3.1-cp311-cp311-manylinux_2_24_s390x.manylinux_2_28_s390x.whl", hash = "sha256:67ea3fc73c8cd92f42467a72b75e8f05ed51a0e9b1d15398c913416f2dafd49f", size = 601161, upload-time = "2026-01-23T16:15:53.456Z" }, { url = "https://files.pythonhosted.org/packages/5c/ae/8d472e1f5ac5efe55c563f3eabb38c98a44b832602e12910750a7c025802/greenlet-3.3.1-cp311-cp311-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:39eda9ba259cc9801da05351eaa8576e9aa83eb9411e8f0c299e05d712a210f2", size = 590272, upload-time = "2026-01-23T15:32:49.411Z" }, { url = "https://files.pythonhosted.org/packages/a8/51/0fde34bebfcadc833550717eade64e35ec8738e6b097d5d248274a01258b/greenlet-3.3.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:e2e7e882f83149f0a71ac822ebf156d902e7a5d22c9045e3e0d1daf59cee2cc9", size = 1550729, upload-time = "2026-01-23T16:04:20.867Z" }, { url = "https://files.pythonhosted.org/packages/16/c9/2fb47bee83b25b119d5a35d580807bb8b92480a54b68fef009a02945629f/greenlet-3.3.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:80aa4d79eb5564f2e0a6144fcc744b5a37c56c4a92d60920720e99210d88db0f", size = 1615552, upload-time = "2026-01-23T15:33:45.743Z" }, @@ -2437,7 +2449,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/f9/c8/9d76a66421d1ae24340dfae7e79c313957f6e3195c144d2c73333b5bfe34/greenlet-3.3.1-cp312-cp312-macosx_11_0_universal2.whl", hash = "sha256:7e806ca53acf6d15a888405880766ec84721aa4181261cd11a457dfe9a7a4975", size = 276443, upload-time = "2026-01-23T15:30:10.066Z" }, { url = "https://files.pythonhosted.org/packages/81/99/401ff34bb3c032d1f10477d199724f5e5f6fbfb59816ad1455c79c1eb8e7/greenlet-3.3.1-cp312-cp312-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:d842c94b9155f1c9b3058036c24ffb8ff78b428414a19792b2380be9cecf4f36", size = 597359, upload-time = "2026-01-23T16:00:57.394Z" }, { url = "https://files.pythonhosted.org/packages/2b/bc/4dcc0871ed557792d304f50be0f7487a14e017952ec689effe2180a6ff35/greenlet-3.3.1-cp312-cp312-manylinux_2_24_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:20fedaadd422fa02695f82093f9a98bad3dab5fcda793c658b945fcde2ab27ba", size = 607805, upload-time = "2026-01-23T16:05:28.068Z" }, - { url = "https://files.pythonhosted.org/packages/3b/cd/7a7ca57588dac3389e97f7c9521cb6641fd8b6602faf1eaa4188384757df/greenlet-3.3.1-cp312-cp312-manylinux_2_24_s390x.manylinux_2_28_s390x.whl", hash = "sha256:c620051669fd04ac6b60ebc70478210119c56e2d5d5df848baec4312e260e4ca", size = 622363, upload-time = "2026-01-23T16:15:54.754Z" }, { url = "https://files.pythonhosted.org/packages/cf/05/821587cf19e2ce1f2b24945d890b164401e5085f9d09cbd969b0c193cd20/greenlet-3.3.1-cp312-cp312-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:14194f5f4305800ff329cbf02c5fcc88f01886cadd29941b807668a45f0d2336", size = 609947, upload-time = "2026-01-23T15:32:51.004Z" }, { url = "https://files.pythonhosted.org/packages/a4/52/ee8c46ed9f8babaa93a19e577f26e3d28a519feac6350ed6f25f1afee7e9/greenlet-3.3.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:7b2fe4150a0cf59f847a67db8c155ac36aed89080a6a639e9f16df5d6c6096f1", size = 1567487, upload-time = "2026-01-23T16:04:22.125Z" }, { url = "https://files.pythonhosted.org/packages/8f/7c/456a74f07029597626f3a6db71b273a3632aecb9afafeeca452cfa633197/greenlet-3.3.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:49f4ad195d45f4a66a0eb9c1ba4832bb380570d361912fa3554746830d332149", size = 1636087, upload-time = "2026-01-23T15:33:47.486Z" }, @@ -2446,7 +2457,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/ec/ab/d26750f2b7242c2b90ea2ad71de70cfcd73a948a49513188a0fc0d6fc15a/greenlet-3.3.1-cp313-cp313-macosx_11_0_universal2.whl", hash = "sha256:7ab327905cabb0622adca5971e488064e35115430cec2c35a50fd36e72a315b3", size = 275205, upload-time = "2026-01-23T15:30:24.556Z" }, { url = "https://files.pythonhosted.org/packages/10/d3/be7d19e8fad7c5a78eeefb2d896a08cd4643e1e90c605c4be3b46264998f/greenlet-3.3.1-cp313-cp313-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:65be2f026ca6a176f88fb935ee23c18333ccea97048076aef4db1ef5bc0713ac", size = 599284, upload-time = "2026-01-23T16:00:58.584Z" }, { url = "https://files.pythonhosted.org/packages/ae/21/fe703aaa056fdb0f17e5afd4b5c80195bbdab701208918938bd15b00d39b/greenlet-3.3.1-cp313-cp313-manylinux_2_24_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:7a3ae05b3d225b4155bda56b072ceb09d05e974bc74be6c3fc15463cf69f33fd", size = 610274, upload-time = "2026-01-23T16:05:29.312Z" }, - { url = "https://files.pythonhosted.org/packages/06/00/95df0b6a935103c0452dad2203f5be8377e551b8466a29650c4c5a5af6cc/greenlet-3.3.1-cp313-cp313-manylinux_2_24_s390x.manylinux_2_28_s390x.whl", hash = "sha256:12184c61e5d64268a160226fb4818af4df02cfead8379d7f8b99a56c3a54ff3e", size = 624375, upload-time = "2026-01-23T16:15:55.915Z" }, { url = "https://files.pythonhosted.org/packages/cb/86/5c6ab23bb3c28c21ed6bebad006515cfe08b04613eb105ca0041fecca852/greenlet-3.3.1-cp313-cp313-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:6423481193bbbe871313de5fd06a082f2649e7ce6e08015d2a76c1e9186ca5b3", size = 612904, upload-time = "2026-01-23T15:32:52.317Z" }, { url = "https://files.pythonhosted.org/packages/c2/f3/7949994264e22639e40718c2daf6f6df5169bf48fb038c008a489ec53a50/greenlet-3.3.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:33a956fe78bbbda82bfc95e128d61129b32d66bcf0a20a1f0c08aa4839ffa951", size = 1567316, upload-time = "2026-01-23T16:04:23.316Z" }, { url = "https://files.pythonhosted.org/packages/8d/6e/d73c94d13b6465e9f7cd6231c68abde838bb22408596c05d9059830b7872/greenlet-3.3.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:4b065d3284be43728dd280f6f9a13990b56470b81be20375a207cdc814a983f2", size = 1636549, upload-time = "2026-01-23T15:33:48.643Z" }, @@ -2455,7 +2465,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/ae/fb/011c7c717213182caf78084a9bea51c8590b0afda98001f69d9f853a495b/greenlet-3.3.1-cp314-cp314-macosx_11_0_universal2.whl", hash = "sha256:bd59acd8529b372775cd0fcbc5f420ae20681c5b045ce25bd453ed8455ab99b5", size = 275737, upload-time = "2026-01-23T15:32:16.889Z" }, { url = "https://files.pythonhosted.org/packages/41/2e/a3a417d620363fdbb08a48b1dd582956a46a61bf8fd27ee8164f9dfe87c2/greenlet-3.3.1-cp314-cp314-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:b31c05dd84ef6871dd47120386aed35323c944d86c3d91a17c4b8d23df62f15b", size = 646422, upload-time = "2026-01-23T16:01:00.354Z" }, { url = "https://files.pythonhosted.org/packages/b4/09/c6c4a0db47defafd2d6bab8ddfe47ad19963b4e30f5bed84d75328059f8c/greenlet-3.3.1-cp314-cp314-manylinux_2_24_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:02925a0bfffc41e542c70aa14c7eda3593e4d7e274bfcccca1827e6c0875902e", size = 658219, upload-time = "2026-01-23T16:05:30.956Z" }, - { url = "https://files.pythonhosted.org/packages/e2/89/b95f2ddcc5f3c2bc09c8ee8d77be312df7f9e7175703ab780f2014a0e781/greenlet-3.3.1-cp314-cp314-manylinux_2_24_s390x.manylinux_2_28_s390x.whl", hash = "sha256:3e0f3878ca3a3ff63ab4ea478585942b53df66ddde327b59ecb191b19dbbd62d", size = 671455, upload-time = "2026-01-23T16:15:57.232Z" }, { url = "https://files.pythonhosted.org/packages/80/38/9d42d60dffb04b45f03dbab9430898352dba277758640751dc5cc316c521/greenlet-3.3.1-cp314-cp314-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:34a729e2e4e4ffe9ae2408d5ecaf12f944853f40ad724929b7585bca808a9d6f", size = 660237, upload-time = "2026-01-23T15:32:53.967Z" }, { url = "https://files.pythonhosted.org/packages/96/61/373c30b7197f9e756e4c81ae90a8d55dc3598c17673f91f4d31c3c689c3f/greenlet-3.3.1-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:aec9ab04e82918e623415947921dea15851b152b822661cce3f8e4393c3df683", size = 1615261, upload-time = "2026-01-23T16:04:25.066Z" }, { url = "https://files.pythonhosted.org/packages/fd/d3/ca534310343f5945316f9451e953dcd89b36fe7a19de652a1dc5a0eeef3f/greenlet-3.3.1-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:71c767cf281a80d02b6c1bdc41c9468e1f5a494fb11bc8688c360524e273d7b1", size = 1683719, upload-time = "2026-01-23T15:33:50.61Z" }, @@ -2464,7 +2473,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/28/24/cbbec49bacdcc9ec652a81d3efef7b59f326697e7edf6ed775a5e08e54c2/greenlet-3.3.1-cp314-cp314t-macosx_11_0_universal2.whl", hash = "sha256:3e63252943c921b90abb035ebe9de832c436401d9c45f262d80e2d06cc659242", size = 282706, upload-time = "2026-01-23T15:33:05.525Z" }, { url = "https://files.pythonhosted.org/packages/86/2e/4f2b9323c144c4fe8842a4e0d92121465485c3c2c5b9e9b30a52e80f523f/greenlet-3.3.1-cp314-cp314t-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:76e39058e68eb125de10c92524573924e827927df5d3891fbc97bd55764a8774", size = 651209, upload-time = "2026-01-23T16:01:01.517Z" }, { url = "https://files.pythonhosted.org/packages/d9/87/50ca60e515f5bb55a2fbc5f0c9b5b156de7d2fc51a0a69abc9d23914a237/greenlet-3.3.1-cp314-cp314t-manylinux_2_24_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:c9f9d5e7a9310b7a2f416dd13d2e3fd8b42d803968ea580b7c0f322ccb389b97", size = 654300, upload-time = "2026-01-23T16:05:32.199Z" }, - { url = "https://files.pythonhosted.org/packages/7c/25/c51a63f3f463171e09cb586eb64db0861eb06667ab01a7968371a24c4f3b/greenlet-3.3.1-cp314-cp314t-manylinux_2_24_s390x.manylinux_2_28_s390x.whl", hash = "sha256:4b9721549a95db96689458a1e0ae32412ca18776ed004463df3a9299c1b257ab", size = 662574, upload-time = "2026-01-23T16:15:58.364Z" }, { url = "https://files.pythonhosted.org/packages/1d/94/74310866dfa2b73dd08659a3d18762f83985ad3281901ba0ee9a815194fb/greenlet-3.3.1-cp314-cp314t-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:92497c78adf3ac703b57f1e3813c2d874f27f71a178f9ea5887855da413cd6d2", size = 653842, upload-time = "2026-01-23T15:32:55.671Z" }, { url = "https://files.pythonhosted.org/packages/97/43/8bf0ffa3d498eeee4c58c212a3905dd6146c01c8dc0b0a046481ca29b18c/greenlet-3.3.1-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:ed6b402bc74d6557a705e197d47f9063733091ed6357b3de33619d8a8d93ac53", size = 1614917, upload-time = "2026-01-23T16:04:26.276Z" }, { url = "https://files.pythonhosted.org/packages/89/90/a3be7a5f378fc6e84abe4dcfb2ba32b07786861172e502388b4c90000d1b/greenlet-3.3.1-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:59913f1e5ada20fde795ba906916aea25d442abcc0593fba7e26c92b7ad76249", size = 1676092, upload-time = "2026-01-23T15:33:52.176Z" }, @@ -2732,7 +2740,7 @@ wheels = [ [[package]] name = "huggingface-hub" -version = "1.3.5" +version = "1.4.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "filelock", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, @@ -2746,9 +2754,9 @@ dependencies = [ { name = "typer-slim", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, { name = "typing-extensions", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/67/e9/2658cb9bc4c72a67b7f87650e827266139befaf499095883d30dabc4d49f/huggingface_hub-1.3.5.tar.gz", hash = "sha256:8045aca8ddab35d937138f3c386c6d43a275f53437c5c64cdc9aa8408653b4ed", size = 627456, upload-time = "2026-01-29T10:34:19.687Z" } +sdist = { url = "https://files.pythonhosted.org/packages/d9/0e/e73927175162b8a4702b9f59268860f441fbe037c3960b1b6791eeb1deb7/huggingface_hub-1.4.0.tar.gz", hash = "sha256:dd8ca29409be10f544b624265f7ffe13a1a5c3f049f493b5dc9816ef3c6bd57b", size = 641608, upload-time = "2026-02-04T13:48:55.341Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/f9/84/a579b95c46fe8e319f89dc700c087596f665141575f4dcf136aaa97d856f/huggingface_hub-1.3.5-py3-none-any.whl", hash = "sha256:fe332d7f86a8af874768452295c22cd3f37730fb2463cf6cc3295e26036f8ef9", size = 536675, upload-time = "2026-01-29T10:34:17.713Z" }, + { url = "https://files.pythonhosted.org/packages/3f/74/f0fb3a54fbca7c0aeff85f41d93b90ca3f6a36d918459401a3890763c54b/huggingface_hub-1.4.0-py3-none-any.whl", hash = "sha256:49d380ffddb31d9d4b6acc0792691f8fa077e1ed51980ed42c7abca62ec1b3b6", size = 553202, upload-time = "2026-02-04T13:48:53.545Z" }, ] [[package]] @@ -2840,99 +2848,99 @@ wheels = [ [[package]] name = "jiter" -version = "0.12.0" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/45/9d/e0660989c1370e25848bb4c52d061c71837239738ad937e83edca174c273/jiter-0.12.0.tar.gz", hash = "sha256:64dfcd7d5c168b38d3f9f8bba7fc639edb3418abcc74f22fdbe6b8938293f30b", size = 168294, upload-time = "2025-11-09T20:49:23.302Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/3b/91/13cb9505f7be74a933f37da3af22e029f6ba64f5669416cb8b2774bc9682/jiter-0.12.0-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:e7acbaba9703d5de82a2c98ae6a0f59ab9770ab5af5fa35e43a303aee962cf65", size = 316652, upload-time = "2025-11-09T20:46:41.021Z" }, - { url = "https://files.pythonhosted.org/packages/4e/76/4e9185e5d9bb4e482cf6dec6410d5f78dfeb374cfcecbbe9888d07c52daa/jiter-0.12.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:364f1a7294c91281260364222f535bc427f56d4de1d8ffd718162d21fbbd602e", size = 319829, upload-time = "2025-11-09T20:46:43.281Z" }, - { url = "https://files.pythonhosted.org/packages/86/af/727de50995d3a153138139f259baae2379d8cb0522c0c00419957bc478a6/jiter-0.12.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:85ee4d25805d4fb23f0a5167a962ef8e002dbfb29c0989378488e32cf2744b62", size = 350568, upload-time = "2025-11-09T20:46:45.075Z" }, - { url = "https://files.pythonhosted.org/packages/6a/c1/d6e9f4b7a3d5ac63bcbdfddeb50b2dcfbdc512c86cffc008584fdc350233/jiter-0.12.0-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:796f466b7942107eb889c08433b6e31b9a7ed31daceaecf8af1be26fb26c0ca8", size = 369052, upload-time = "2025-11-09T20:46:46.818Z" }, - { url = "https://files.pythonhosted.org/packages/eb/be/00824cd530f30ed73fa8a4f9f3890a705519e31ccb9e929f1e22062e7c76/jiter-0.12.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:35506cb71f47dba416694e67af996bbdefb8e3608f1f78799c2e1f9058b01ceb", size = 481585, upload-time = "2025-11-09T20:46:48.319Z" }, - { url = "https://files.pythonhosted.org/packages/74/b6/2ad7990dff9504d4b5052eef64aa9574bd03d722dc7edced97aad0d47be7/jiter-0.12.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:726c764a90c9218ec9e4f99a33d6bf5ec169163f2ca0fc21b654e88c2abc0abc", size = 380541, upload-time = "2025-11-09T20:46:49.643Z" }, - { url = "https://files.pythonhosted.org/packages/b5/c7/f3c26ecbc1adbf1db0d6bba99192143d8fe8504729d9594542ecc4445784/jiter-0.12.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:baa47810c5565274810b726b0dc86d18dce5fd17b190ebdc3890851d7b2a0e74", size = 364423, upload-time = "2025-11-09T20:46:51.731Z" }, - { url = "https://files.pythonhosted.org/packages/18/51/eac547bf3a2d7f7e556927278e14c56a0604b8cddae75815d5739f65f81d/jiter-0.12.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:f8ec0259d3f26c62aed4d73b198c53e316ae11f0f69c8fbe6682c6dcfa0fcce2", size = 389958, upload-time = "2025-11-09T20:46:53.432Z" }, - { url = "https://files.pythonhosted.org/packages/2c/1f/9ca592e67175f2db156cff035e0d817d6004e293ee0c1d73692d38fcb596/jiter-0.12.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:79307d74ea83465b0152fa23e5e297149506435535282f979f18b9033c0bb025", size = 522084, upload-time = "2025-11-09T20:46:54.848Z" }, - { url = "https://files.pythonhosted.org/packages/83/ff/597d9cdc3028f28224f53e1a9d063628e28b7a5601433e3196edda578cdd/jiter-0.12.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:cf6e6dd18927121fec86739f1a8906944703941d000f0639f3eb6281cc601dca", size = 513054, upload-time = "2025-11-09T20:46:56.487Z" }, - { url = "https://files.pythonhosted.org/packages/24/6d/1970bce1351bd02e3afcc5f49e4f7ef3dabd7fb688f42be7e8091a5b809a/jiter-0.12.0-cp310-cp310-win32.whl", hash = "sha256:b6ae2aec8217327d872cbfb2c1694489057b9433afce447955763e6ab015b4c4", size = 206368, upload-time = "2025-11-09T20:46:58.638Z" }, - { url = "https://files.pythonhosted.org/packages/e3/6b/eb1eb505b2d86709b59ec06681a2b14a94d0941db091f044b9f0e16badc0/jiter-0.12.0-cp310-cp310-win_amd64.whl", hash = "sha256:c7f49ce90a71e44f7e1aa9e7ec415b9686bbc6a5961e57eab511015e6759bc11", size = 204847, upload-time = "2025-11-09T20:47:00.295Z" }, - { url = "https://files.pythonhosted.org/packages/32/f9/eaca4633486b527ebe7e681c431f529b63fe2709e7c5242fc0f43f77ce63/jiter-0.12.0-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:d8f8a7e317190b2c2d60eb2e8aa835270b008139562d70fe732e1c0020ec53c9", size = 316435, upload-time = "2025-11-09T20:47:02.087Z" }, - { url = "https://files.pythonhosted.org/packages/10/c1/40c9f7c22f5e6ff715f28113ebaba27ab85f9af2660ad6e1dd6425d14c19/jiter-0.12.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:2218228a077e784c6c8f1a8e5d6b8cb1dea62ce25811c356364848554b2056cd", size = 320548, upload-time = "2025-11-09T20:47:03.409Z" }, - { url = "https://files.pythonhosted.org/packages/6b/1b/efbb68fe87e7711b00d2cfd1f26bb4bfc25a10539aefeaa7727329ffb9cb/jiter-0.12.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9354ccaa2982bf2188fd5f57f79f800ef622ec67beb8329903abf6b10da7d423", size = 351915, upload-time = "2025-11-09T20:47:05.171Z" }, - { url = "https://files.pythonhosted.org/packages/15/2d/c06e659888c128ad1e838123d0638f0efad90cc30860cb5f74dd3f2fc0b3/jiter-0.12.0-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:8f2607185ea89b4af9a604d4c7ec40e45d3ad03ee66998b031134bc510232bb7", size = 368966, upload-time = "2025-11-09T20:47:06.508Z" }, - { url = "https://files.pythonhosted.org/packages/6b/20/058db4ae5fb07cf6a4ab2e9b9294416f606d8e467fb74c2184b2a1eeacba/jiter-0.12.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3a585a5e42d25f2e71db5f10b171f5e5ea641d3aa44f7df745aa965606111cc2", size = 482047, upload-time = "2025-11-09T20:47:08.382Z" }, - { url = "https://files.pythonhosted.org/packages/49/bb/dc2b1c122275e1de2eb12905015d61e8316b2f888bdaac34221c301495d6/jiter-0.12.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:bd9e21d34edff5a663c631f850edcb786719c960ce887a5661e9c828a53a95d9", size = 380835, upload-time = "2025-11-09T20:47:09.81Z" }, - { url = "https://files.pythonhosted.org/packages/23/7d/38f9cd337575349de16da575ee57ddb2d5a64d425c9367f5ef9e4612e32e/jiter-0.12.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4a612534770470686cd5431478dc5a1b660eceb410abade6b1b74e320ca98de6", size = 364587, upload-time = "2025-11-09T20:47:11.529Z" }, - { url = "https://files.pythonhosted.org/packages/f0/a3/b13e8e61e70f0bb06085099c4e2462647f53cc2ca97614f7fedcaa2bb9f3/jiter-0.12.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:3985aea37d40a908f887b34d05111e0aae822943796ebf8338877fee2ab67725", size = 390492, upload-time = "2025-11-09T20:47:12.993Z" }, - { url = "https://files.pythonhosted.org/packages/07/71/e0d11422ed027e21422f7bc1883c61deba2d9752b720538430c1deadfbca/jiter-0.12.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:b1207af186495f48f72529f8d86671903c8c10127cac6381b11dddc4aaa52df6", size = 522046, upload-time = "2025-11-09T20:47:14.6Z" }, - { url = "https://files.pythonhosted.org/packages/9f/59/b968a9aa7102a8375dbbdfbd2aeebe563c7e5dddf0f47c9ef1588a97e224/jiter-0.12.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:ef2fb241de583934c9915a33120ecc06d94aa3381a134570f59eed784e87001e", size = 513392, upload-time = "2025-11-09T20:47:16.011Z" }, - { url = "https://files.pythonhosted.org/packages/ca/e4/7df62002499080dbd61b505c5cb351aa09e9959d176cac2aa8da6f93b13b/jiter-0.12.0-cp311-cp311-win32.whl", hash = "sha256:453b6035672fecce8007465896a25b28a6b59cfe8fbc974b2563a92f5a92a67c", size = 206096, upload-time = "2025-11-09T20:47:17.344Z" }, - { url = "https://files.pythonhosted.org/packages/bb/60/1032b30ae0572196b0de0e87dce3b6c26a1eff71aad5fe43dee3082d32e0/jiter-0.12.0-cp311-cp311-win_amd64.whl", hash = "sha256:ca264b9603973c2ad9435c71a8ec8b49f8f715ab5ba421c85a51cde9887e421f", size = 204899, upload-time = "2025-11-09T20:47:19.365Z" }, - { url = "https://files.pythonhosted.org/packages/49/d5/c145e526fccdb834063fb45c071df78b0cc426bbaf6de38b0781f45d956f/jiter-0.12.0-cp311-cp311-win_arm64.whl", hash = "sha256:cb00ef392e7d684f2754598c02c409f376ddcef857aae796d559e6cacc2d78a5", size = 188070, upload-time = "2025-11-09T20:47:20.75Z" }, - { url = "https://files.pythonhosted.org/packages/92/c9/5b9f7b4983f1b542c64e84165075335e8a236fa9e2ea03a0c79780062be8/jiter-0.12.0-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:305e061fa82f4680607a775b2e8e0bcb071cd2205ac38e6ef48c8dd5ebe1cf37", size = 314449, upload-time = "2025-11-09T20:47:22.999Z" }, - { url = "https://files.pythonhosted.org/packages/98/6e/e8efa0e78de00db0aee82c0cf9e8b3f2027efd7f8a71f859d8f4be8e98ef/jiter-0.12.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:5c1860627048e302a528333c9307c818c547f214d8659b0705d2195e1a94b274", size = 319855, upload-time = "2025-11-09T20:47:24.779Z" }, - { url = "https://files.pythonhosted.org/packages/20/26/894cd88e60b5d58af53bec5c6759d1292bd0b37a8b5f60f07abf7a63ae5f/jiter-0.12.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:df37577a4f8408f7e0ec3205d2a8f87672af8f17008358063a4d6425b6081ce3", size = 350171, upload-time = "2025-11-09T20:47:26.469Z" }, - { url = "https://files.pythonhosted.org/packages/f5/27/a7b818b9979ac31b3763d25f3653ec3a954044d5e9f5d87f2f247d679fd1/jiter-0.12.0-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:75fdd787356c1c13a4f40b43c2156276ef7a71eb487d98472476476d803fb2cf", size = 365590, upload-time = "2025-11-09T20:47:27.918Z" }, - { url = "https://files.pythonhosted.org/packages/ba/7e/e46195801a97673a83746170b17984aa8ac4a455746354516d02ca5541b4/jiter-0.12.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1eb5db8d9c65b112aacf14fcd0faae9913d07a8afea5ed06ccdd12b724e966a1", size = 479462, upload-time = "2025-11-09T20:47:29.654Z" }, - { url = "https://files.pythonhosted.org/packages/ca/75/f833bfb009ab4bd11b1c9406d333e3b4357709ed0570bb48c7c06d78c7dd/jiter-0.12.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:73c568cc27c473f82480abc15d1301adf333a7ea4f2e813d6a2c7d8b6ba8d0df", size = 378983, upload-time = "2025-11-09T20:47:31.026Z" }, - { url = "https://files.pythonhosted.org/packages/71/b3/7a69d77943cc837d30165643db753471aff5df39692d598da880a6e51c24/jiter-0.12.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4321e8a3d868919bcb1abb1db550d41f2b5b326f72df29e53b2df8b006eb9403", size = 361328, upload-time = "2025-11-09T20:47:33.286Z" }, - { url = "https://files.pythonhosted.org/packages/b0/ac/a78f90caf48d65ba70d8c6efc6f23150bc39dc3389d65bbec2a95c7bc628/jiter-0.12.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:0a51bad79f8cc9cac2b4b705039f814049142e0050f30d91695a2d9a6611f126", size = 386740, upload-time = "2025-11-09T20:47:34.703Z" }, - { url = "https://files.pythonhosted.org/packages/39/b6/5d31c2cc8e1b6a6bcf3c5721e4ca0a3633d1ab4754b09bc7084f6c4f5327/jiter-0.12.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:2a67b678f6a5f1dd6c36d642d7db83e456bc8b104788262aaefc11a22339f5a9", size = 520875, upload-time = "2025-11-09T20:47:36.058Z" }, - { url = "https://files.pythonhosted.org/packages/30/b5/4df540fae4e9f68c54b8dab004bd8c943a752f0b00efd6e7d64aa3850339/jiter-0.12.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:efe1a211fe1fd14762adea941e3cfd6c611a136e28da6c39272dbb7a1bbe6a86", size = 511457, upload-time = "2025-11-09T20:47:37.932Z" }, - { url = "https://files.pythonhosted.org/packages/07/65/86b74010e450a1a77b2c1aabb91d4a91dd3cd5afce99f34d75fd1ac64b19/jiter-0.12.0-cp312-cp312-win32.whl", hash = "sha256:d779d97c834b4278276ec703dc3fc1735fca50af63eb7262f05bdb4e62203d44", size = 204546, upload-time = "2025-11-09T20:47:40.47Z" }, - { url = "https://files.pythonhosted.org/packages/1c/c7/6659f537f9562d963488e3e55573498a442503ced01f7e169e96a6110383/jiter-0.12.0-cp312-cp312-win_amd64.whl", hash = "sha256:e8269062060212b373316fe69236096aaf4c49022d267c6736eebd66bbbc60bb", size = 205196, upload-time = "2025-11-09T20:47:41.794Z" }, - { url = "https://files.pythonhosted.org/packages/21/f4/935304f5169edadfec7f9c01eacbce4c90bb9a82035ac1de1f3bd2d40be6/jiter-0.12.0-cp312-cp312-win_arm64.whl", hash = "sha256:06cb970936c65de926d648af0ed3d21857f026b1cf5525cb2947aa5e01e05789", size = 186100, upload-time = "2025-11-09T20:47:43.007Z" }, - { url = "https://files.pythonhosted.org/packages/3d/a6/97209693b177716e22576ee1161674d1d58029eb178e01866a0422b69224/jiter-0.12.0-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:6cc49d5130a14b732e0612bc76ae8db3b49898732223ef8b7599aa8d9810683e", size = 313658, upload-time = "2025-11-09T20:47:44.424Z" }, - { url = "https://files.pythonhosted.org/packages/06/4d/125c5c1537c7d8ee73ad3d530a442d6c619714b95027143f1b61c0b4dfe0/jiter-0.12.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:37f27a32ce36364d2fa4f7fdc507279db604d27d239ea2e044c8f148410defe1", size = 318605, upload-time = "2025-11-09T20:47:45.973Z" }, - { url = "https://files.pythonhosted.org/packages/99/bf/a840b89847885064c41a5f52de6e312e91fa84a520848ee56c97e4fa0205/jiter-0.12.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bbc0944aa3d4b4773e348cda635252824a78f4ba44328e042ef1ff3f6080d1cf", size = 349803, upload-time = "2025-11-09T20:47:47.535Z" }, - { url = "https://files.pythonhosted.org/packages/8a/88/e63441c28e0db50e305ae23e19c1d8fae012d78ed55365da392c1f34b09c/jiter-0.12.0-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:da25c62d4ee1ffbacb97fac6dfe4dcd6759ebdc9015991e92a6eae5816287f44", size = 365120, upload-time = "2025-11-09T20:47:49.284Z" }, - { url = "https://files.pythonhosted.org/packages/0a/7c/49b02714af4343970eb8aca63396bc1c82fa01197dbb1e9b0d274b550d4e/jiter-0.12.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:048485c654b838140b007390b8182ba9774621103bd4d77c9c3f6f117474ba45", size = 479918, upload-time = "2025-11-09T20:47:50.807Z" }, - { url = "https://files.pythonhosted.org/packages/69/ba/0a809817fdd5a1db80490b9150645f3aae16afad166960bcd562be194f3b/jiter-0.12.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:635e737fbb7315bef0037c19b88b799143d2d7d3507e61a76751025226b3ac87", size = 379008, upload-time = "2025-11-09T20:47:52.211Z" }, - { url = "https://files.pythonhosted.org/packages/5f/c3/c9fc0232e736c8877d9e6d83d6eeb0ba4e90c6c073835cc2e8f73fdeef51/jiter-0.12.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4e017c417b1ebda911bd13b1e40612704b1f5420e30695112efdbed8a4b389ed", size = 361785, upload-time = "2025-11-09T20:47:53.512Z" }, - { url = "https://files.pythonhosted.org/packages/96/61/61f69b7e442e97ca6cd53086ddc1cf59fb830549bc72c0a293713a60c525/jiter-0.12.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:89b0bfb8b2bf2351fba36bb211ef8bfceba73ef58e7f0c68fb67b5a2795ca2f9", size = 386108, upload-time = "2025-11-09T20:47:54.893Z" }, - { url = "https://files.pythonhosted.org/packages/e9/2e/76bb3332f28550c8f1eba3bf6e5efe211efda0ddbbaf24976bc7078d42a5/jiter-0.12.0-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:f5aa5427a629a824a543672778c9ce0c5e556550d1569bb6ea28a85015287626", size = 519937, upload-time = "2025-11-09T20:47:56.253Z" }, - { url = "https://files.pythonhosted.org/packages/84/d6/fa96efa87dc8bff2094fb947f51f66368fa56d8d4fc9e77b25d7fbb23375/jiter-0.12.0-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:ed53b3d6acbcb0fd0b90f20c7cb3b24c357fe82a3518934d4edfa8c6898e498c", size = 510853, upload-time = "2025-11-09T20:47:58.32Z" }, - { url = "https://files.pythonhosted.org/packages/8a/28/93f67fdb4d5904a708119a6ab58a8f1ec226ff10a94a282e0215402a8462/jiter-0.12.0-cp313-cp313-win32.whl", hash = "sha256:4747de73d6b8c78f2e253a2787930f4fffc68da7fa319739f57437f95963c4de", size = 204699, upload-time = "2025-11-09T20:47:59.686Z" }, - { url = "https://files.pythonhosted.org/packages/c4/1f/30b0eb087045a0abe2a5c9c0c0c8da110875a1d3be83afd4a9a4e548be3c/jiter-0.12.0-cp313-cp313-win_amd64.whl", hash = "sha256:e25012eb0c456fcc13354255d0338cd5397cce26c77b2832b3c4e2e255ea5d9a", size = 204258, upload-time = "2025-11-09T20:48:01.01Z" }, - { url = "https://files.pythonhosted.org/packages/2c/f4/2b4daf99b96bce6fc47971890b14b2a36aef88d7beb9f057fafa032c6141/jiter-0.12.0-cp313-cp313-win_arm64.whl", hash = "sha256:c97b92c54fe6110138c872add030a1f99aea2401ddcdaa21edf74705a646dd60", size = 185503, upload-time = "2025-11-09T20:48:02.35Z" }, - { url = "https://files.pythonhosted.org/packages/39/ca/67bb15a7061d6fe20b9b2a2fd783e296a1e0f93468252c093481a2f00efa/jiter-0.12.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:53839b35a38f56b8be26a7851a48b89bc47e5d88e900929df10ed93b95fea3d6", size = 317965, upload-time = "2025-11-09T20:48:03.783Z" }, - { url = "https://files.pythonhosted.org/packages/18/af/1788031cd22e29c3b14bc6ca80b16a39a0b10e611367ffd480c06a259831/jiter-0.12.0-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:94f669548e55c91ab47fef8bddd9c954dab1938644e715ea49d7e117015110a4", size = 345831, upload-time = "2025-11-09T20:48:05.55Z" }, - { url = "https://files.pythonhosted.org/packages/05/17/710bf8472d1dff0d3caf4ced6031060091c1320f84ee7d5dcbed1f352417/jiter-0.12.0-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:351d54f2b09a41600ffea43d081522d792e81dcfb915f6d2d242744c1cc48beb", size = 361272, upload-time = "2025-11-09T20:48:06.951Z" }, - { url = "https://files.pythonhosted.org/packages/fb/f1/1dcc4618b59761fef92d10bcbb0b038b5160be653b003651566a185f1a5c/jiter-0.12.0-cp313-cp313t-win_amd64.whl", hash = "sha256:2a5e90604620f94bf62264e7c2c038704d38217b7465b863896c6d7c902b06c7", size = 204604, upload-time = "2025-11-09T20:48:08.328Z" }, - { url = "https://files.pythonhosted.org/packages/d9/32/63cb1d9f1c5c6632a783c0052cde9ef7ba82688f7065e2f0d5f10a7e3edb/jiter-0.12.0-cp313-cp313t-win_arm64.whl", hash = "sha256:88ef757017e78d2860f96250f9393b7b577b06a956ad102c29c8237554380db3", size = 185628, upload-time = "2025-11-09T20:48:09.572Z" }, - { url = "https://files.pythonhosted.org/packages/a8/99/45c9f0dbe4a1416b2b9a8a6d1236459540f43d7fb8883cff769a8db0612d/jiter-0.12.0-cp314-cp314-macosx_10_12_x86_64.whl", hash = "sha256:c46d927acd09c67a9fb1416df45c5a04c27e83aae969267e98fba35b74e99525", size = 312478, upload-time = "2025-11-09T20:48:10.898Z" }, - { url = "https://files.pythonhosted.org/packages/4c/a7/54ae75613ba9e0f55fcb0bc5d1f807823b5167cc944e9333ff322e9f07dd/jiter-0.12.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:774ff60b27a84a85b27b88cd5583899c59940bcc126caca97eb2a9df6aa00c49", size = 318706, upload-time = "2025-11-09T20:48:12.266Z" }, - { url = "https://files.pythonhosted.org/packages/59/31/2aa241ad2c10774baf6c37f8b8e1f39c07db358f1329f4eb40eba179c2a2/jiter-0.12.0-cp314-cp314-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c5433fab222fb072237df3f637d01b81f040a07dcac1cb4a5c75c7aa9ed0bef1", size = 351894, upload-time = "2025-11-09T20:48:13.673Z" }, - { url = "https://files.pythonhosted.org/packages/54/4f/0f2759522719133a9042781b18cc94e335b6d290f5e2d3e6899d6af933e3/jiter-0.12.0-cp314-cp314-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:f8c593c6e71c07866ec6bfb790e202a833eeec885022296aff6b9e0b92d6a70e", size = 365714, upload-time = "2025-11-09T20:48:15.083Z" }, - { url = "https://files.pythonhosted.org/packages/dc/6f/806b895f476582c62a2f52c453151edd8a0fde5411b0497baaa41018e878/jiter-0.12.0-cp314-cp314-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:90d32894d4c6877a87ae00c6b915b609406819dce8bc0d4e962e4de2784e567e", size = 478989, upload-time = "2025-11-09T20:48:16.706Z" }, - { url = "https://files.pythonhosted.org/packages/86/6c/012d894dc6e1033acd8db2b8346add33e413ec1c7c002598915278a37f79/jiter-0.12.0-cp314-cp314-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:798e46eed9eb10c3adbbacbd3bdb5ecd4cf7064e453d00dbef08802dae6937ff", size = 378615, upload-time = "2025-11-09T20:48:18.614Z" }, - { url = "https://files.pythonhosted.org/packages/87/30/d718d599f6700163e28e2c71c0bbaf6dace692e7df2592fd793ac9276717/jiter-0.12.0-cp314-cp314-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b3f1368f0a6719ea80013a4eb90ba72e75d7ea67cfc7846db2ca504f3df0169a", size = 364745, upload-time = "2025-11-09T20:48:20.117Z" }, - { url = "https://files.pythonhosted.org/packages/8f/85/315b45ce4b6ddc7d7fceca24068543b02bdc8782942f4ee49d652e2cc89f/jiter-0.12.0-cp314-cp314-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:65f04a9d0b4406f7e51279710b27484af411896246200e461d80d3ba0caa901a", size = 386502, upload-time = "2025-11-09T20:48:21.543Z" }, - { url = "https://files.pythonhosted.org/packages/74/0b/ce0434fb40c5b24b368fe81b17074d2840748b4952256bab451b72290a49/jiter-0.12.0-cp314-cp314-musllinux_1_1_aarch64.whl", hash = "sha256:fd990541982a24281d12b67a335e44f117e4c6cbad3c3b75c7dea68bf4ce3a67", size = 519845, upload-time = "2025-11-09T20:48:22.964Z" }, - { url = "https://files.pythonhosted.org/packages/e8/a3/7a7a4488ba052767846b9c916d208b3ed114e3eb670ee984e4c565b9cf0d/jiter-0.12.0-cp314-cp314-musllinux_1_1_x86_64.whl", hash = "sha256:b111b0e9152fa7df870ecaebb0bd30240d9f7fff1f2003bcb4ed0f519941820b", size = 510701, upload-time = "2025-11-09T20:48:24.483Z" }, - { url = "https://files.pythonhosted.org/packages/c3/16/052ffbf9d0467b70af24e30f91e0579e13ded0c17bb4a8eb2aed3cb60131/jiter-0.12.0-cp314-cp314-win32.whl", hash = "sha256:a78befb9cc0a45b5a5a0d537b06f8544c2ebb60d19d02c41ff15da28a9e22d42", size = 205029, upload-time = "2025-11-09T20:48:25.749Z" }, - { url = "https://files.pythonhosted.org/packages/e4/18/3cf1f3f0ccc789f76b9a754bdb7a6977e5d1d671ee97a9e14f7eb728d80e/jiter-0.12.0-cp314-cp314-win_amd64.whl", hash = "sha256:e1fe01c082f6aafbe5c8faf0ff074f38dfb911d53f07ec333ca03f8f6226debf", size = 204960, upload-time = "2025-11-09T20:48:27.415Z" }, - { url = "https://files.pythonhosted.org/packages/02/68/736821e52ecfdeeb0f024b8ab01b5a229f6b9293bbdb444c27efade50b0f/jiter-0.12.0-cp314-cp314-win_arm64.whl", hash = "sha256:d72f3b5a432a4c546ea4bedc84cce0c3404874f1d1676260b9c7f048a9855451", size = 185529, upload-time = "2025-11-09T20:48:29.125Z" }, - { url = "https://files.pythonhosted.org/packages/30/61/12ed8ee7a643cce29ac97c2281f9ce3956eb76b037e88d290f4ed0d41480/jiter-0.12.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:e6ded41aeba3603f9728ed2b6196e4df875348ab97b28fc8afff115ed42ba7a7", size = 318974, upload-time = "2025-11-09T20:48:30.87Z" }, - { url = "https://files.pythonhosted.org/packages/2d/c6/f3041ede6d0ed5e0e79ff0de4c8f14f401bbf196f2ef3971cdbe5fd08d1d/jiter-0.12.0-cp314-cp314t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a947920902420a6ada6ad51892082521978e9dd44a802663b001436e4b771684", size = 345932, upload-time = "2025-11-09T20:48:32.658Z" }, - { url = "https://files.pythonhosted.org/packages/d5/5d/4d94835889edd01ad0e2dbfc05f7bdfaed46292e7b504a6ac7839aa00edb/jiter-0.12.0-cp314-cp314t-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:add5e227e0554d3a52cf390a7635edaffdf4f8fce4fdbcef3cc2055bb396a30c", size = 367243, upload-time = "2025-11-09T20:48:34.093Z" }, - { url = "https://files.pythonhosted.org/packages/fd/76/0051b0ac2816253a99d27baf3dda198663aff882fa6ea7deeb94046da24e/jiter-0.12.0-cp314-cp314t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3f9b1cda8fcb736250d7e8711d4580ebf004a46771432be0ae4796944b5dfa5d", size = 479315, upload-time = "2025-11-09T20:48:35.507Z" }, - { url = "https://files.pythonhosted.org/packages/70/ae/83f793acd68e5cb24e483f44f482a1a15601848b9b6f199dacb970098f77/jiter-0.12.0-cp314-cp314t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:deeb12a2223fe0135c7ff1356a143d57f95bbf1f4a66584f1fc74df21d86b993", size = 380714, upload-time = "2025-11-09T20:48:40.014Z" }, - { url = "https://files.pythonhosted.org/packages/b1/5e/4808a88338ad2c228b1126b93fcd8ba145e919e886fe910d578230dabe3b/jiter-0.12.0-cp314-cp314t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c596cc0f4cb574877550ce4ecd51f8037469146addd676d7c1a30ebe6391923f", size = 365168, upload-time = "2025-11-09T20:48:41.462Z" }, - { url = "https://files.pythonhosted.org/packages/0c/d4/04619a9e8095b42aef436b5aeb4c0282b4ff1b27d1db1508df9f5dc82750/jiter-0.12.0-cp314-cp314t-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:5ab4c823b216a4aeab3fdbf579c5843165756bd9ad87cc6b1c65919c4715f783", size = 387893, upload-time = "2025-11-09T20:48:42.921Z" }, - { url = "https://files.pythonhosted.org/packages/17/ea/d3c7e62e4546fdc39197fa4a4315a563a89b95b6d54c0d25373842a59cbe/jiter-0.12.0-cp314-cp314t-musllinux_1_1_aarch64.whl", hash = "sha256:e427eee51149edf962203ff8db75a7514ab89be5cb623fb9cea1f20b54f1107b", size = 520828, upload-time = "2025-11-09T20:48:44.278Z" }, - { url = "https://files.pythonhosted.org/packages/cc/0b/c6d3562a03fd767e31cb119d9041ea7958c3c80cb3d753eafb19b3b18349/jiter-0.12.0-cp314-cp314t-musllinux_1_1_x86_64.whl", hash = "sha256:edb868841f84c111255ba5e80339d386d937ec1fdce419518ce1bd9370fac5b6", size = 511009, upload-time = "2025-11-09T20:48:45.726Z" }, - { url = "https://files.pythonhosted.org/packages/aa/51/2cb4468b3448a8385ebcd15059d325c9ce67df4e2758d133ab9442b19834/jiter-0.12.0-cp314-cp314t-win32.whl", hash = "sha256:8bbcfe2791dfdb7c5e48baf646d37a6a3dcb5a97a032017741dea9f817dca183", size = 205110, upload-time = "2025-11-09T20:48:47.033Z" }, - { url = "https://files.pythonhosted.org/packages/b2/c5/ae5ec83dec9c2d1af805fd5fe8f74ebded9c8670c5210ec7820ce0dbeb1e/jiter-0.12.0-cp314-cp314t-win_amd64.whl", hash = "sha256:2fa940963bf02e1d8226027ef461e36af472dea85d36054ff835aeed944dd873", size = 205223, upload-time = "2025-11-09T20:48:49.076Z" }, - { url = "https://files.pythonhosted.org/packages/97/9a/3c5391907277f0e55195550cf3fa8e293ae9ee0c00fb402fec1e38c0c82f/jiter-0.12.0-cp314-cp314t-win_arm64.whl", hash = "sha256:506c9708dd29b27288f9f8f1140c3cb0e3d8ddb045956d7757b1fa0e0f39a473", size = 185564, upload-time = "2025-11-09T20:48:50.376Z" }, - { url = "https://files.pythonhosted.org/packages/fe/54/5339ef1ecaa881c6948669956567a64d2670941925f245c434f494ffb0e5/jiter-0.12.0-graalpy311-graalpy242_311_native-macosx_10_12_x86_64.whl", hash = "sha256:4739a4657179ebf08f85914ce50332495811004cc1747852e8b2041ed2aab9b8", size = 311144, upload-time = "2025-11-09T20:49:10.503Z" }, - { url = "https://files.pythonhosted.org/packages/27/74/3446c652bffbd5e81ab354e388b1b5fc1d20daac34ee0ed11ff096b1b01a/jiter-0.12.0-graalpy311-graalpy242_311_native-macosx_11_0_arm64.whl", hash = "sha256:41da8def934bf7bec16cb24bd33c0ca62126d2d45d81d17b864bd5ad721393c3", size = 305877, upload-time = "2025-11-09T20:49:12.269Z" }, - { url = "https://files.pythonhosted.org/packages/a1/f4/ed76ef9043450f57aac2d4fbeb27175aa0eb9c38f833be6ef6379b3b9a86/jiter-0.12.0-graalpy311-graalpy242_311_native-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9c44ee814f499c082e69872d426b624987dbc5943ab06e9bbaa4f81989fdb79e", size = 340419, upload-time = "2025-11-09T20:49:13.803Z" }, - { url = "https://files.pythonhosted.org/packages/21/01/857d4608f5edb0664aa791a3d45702e1a5bcfff9934da74035e7b9803846/jiter-0.12.0-graalpy311-graalpy242_311_native-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cd2097de91cf03eaa27b3cbdb969addf83f0179c6afc41bbc4513705e013c65d", size = 347212, upload-time = "2025-11-09T20:49:15.643Z" }, - { url = "https://files.pythonhosted.org/packages/cb/f5/12efb8ada5f5c9edc1d4555fe383c1fb2eac05ac5859258a72d61981d999/jiter-0.12.0-graalpy312-graalpy250_312_native-macosx_10_12_x86_64.whl", hash = "sha256:e8547883d7b96ef2e5fe22b88f8a4c8725a56e7f4abafff20fd5272d634c7ecb", size = 309974, upload-time = "2025-11-09T20:49:17.187Z" }, - { url = "https://files.pythonhosted.org/packages/85/15/d6eb3b770f6a0d332675141ab3962fd4a7c270ede3515d9f3583e1d28276/jiter-0.12.0-graalpy312-graalpy250_312_native-macosx_11_0_arm64.whl", hash = "sha256:89163163c0934854a668ed783a2546a0617f71706a2551a4a0666d91ab365d6b", size = 304233, upload-time = "2025-11-09T20:49:18.734Z" }, - { url = "https://files.pythonhosted.org/packages/8c/3e/e7e06743294eea2cf02ced6aa0ff2ad237367394e37a0e2b4a1108c67a36/jiter-0.12.0-graalpy312-graalpy250_312_native-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d96b264ab7d34bbb2312dedc47ce07cd53f06835eacbc16dde3761f47c3a9e7f", size = 338537, upload-time = "2025-11-09T20:49:20.317Z" }, - { url = "https://files.pythonhosted.org/packages/2f/9c/6753e6522b8d0ef07d3a3d239426669e984fb0eba15a315cdbc1253904e4/jiter-0.12.0-graalpy312-graalpy250_312_native-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c24e864cb30ab82311c6425655b0cdab0a98c5d973b065c66a3f020740c2324c", size = 346110, upload-time = "2025-11-09T20:49:21.817Z" }, +version = "0.13.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/0d/5e/4ec91646aee381d01cdb9974e30882c9cd3b8c5d1079d6b5ff4af522439a/jiter-0.13.0.tar.gz", hash = "sha256:f2839f9c2c7e2dffc1bc5929a510e14ce0a946be9365fd1219e7ef342dae14f4", size = 164847, upload-time = "2026-02-02T12:37:56.441Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/d0/5a/41da76c5ea07bec1b0472b6b2fdb1b651074d504b19374d7e130e0cdfb25/jiter-0.13.0-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:2ffc63785fd6c7977defe49b9824ae6ce2b2e2b77ce539bdaf006c26da06342e", size = 311164, upload-time = "2026-02-02T12:35:17.688Z" }, + { url = "https://files.pythonhosted.org/packages/40/cb/4a1bf994a3e869f0d39d10e11efb471b76d0ad70ecbfb591427a46c880c2/jiter-0.13.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:4a638816427006c1e3f0013eb66d391d7a3acda99a7b0cf091eff4497ccea33a", size = 320296, upload-time = "2026-02-02T12:35:19.828Z" }, + { url = "https://files.pythonhosted.org/packages/09/82/acd71ca9b50ecebadc3979c541cd717cce2fe2bc86236f4fa597565d8f1a/jiter-0.13.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:19928b5d1ce0ff8c1ee1b9bdef3b5bfc19e8304f1b904e436caf30bc15dc6cf5", size = 352742, upload-time = "2026-02-02T12:35:21.258Z" }, + { url = "https://files.pythonhosted.org/packages/71/03/d1fc996f3aecfd42eb70922edecfb6dd26421c874503e241153ad41df94f/jiter-0.13.0-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:309549b778b949d731a2f0e1594a3f805716be704a73bf3ad9a807eed5eb5721", size = 363145, upload-time = "2026-02-02T12:35:24.653Z" }, + { url = "https://files.pythonhosted.org/packages/f1/61/a30492366378cc7a93088858f8991acd7d959759fe6138c12a4644e58e81/jiter-0.13.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:bcdabaea26cb04e25df3103ce47f97466627999260290349a88c8136ecae0060", size = 487683, upload-time = "2026-02-02T12:35:26.162Z" }, + { url = "https://files.pythonhosted.org/packages/20/4e/4223cffa9dbbbc96ed821c5aeb6bca510848c72c02086d1ed3f1da3d58a7/jiter-0.13.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a3a377af27b236abbf665a69b2bdd680e3b5a0bd2af825cd3b81245279a7606c", size = 373579, upload-time = "2026-02-02T12:35:27.582Z" }, + { url = "https://files.pythonhosted.org/packages/fe/c9/b0489a01329ab07a83812d9ebcffe7820a38163c6d9e7da644f926ff877c/jiter-0.13.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fe49d3ff6db74321f144dff9addd4a5874d3105ac5ba7c5b77fac099cfae31ae", size = 362904, upload-time = "2026-02-02T12:35:28.925Z" }, + { url = "https://files.pythonhosted.org/packages/05/af/53e561352a44afcba9a9bc67ee1d320b05a370aed8df54eafe714c4e454d/jiter-0.13.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2113c17c9a67071b0f820733c0893ed1d467b5fcf4414068169e5c2cabddb1e2", size = 392380, upload-time = "2026-02-02T12:35:30.385Z" }, + { url = "https://files.pythonhosted.org/packages/76/2a/dd805c3afb8ed5b326c5ae49e725d1b1255b9754b1b77dbecdc621b20773/jiter-0.13.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:ab1185ca5c8b9491b55ebf6c1e8866b8f68258612899693e24a92c5fdb9455d5", size = 517939, upload-time = "2026-02-02T12:35:31.865Z" }, + { url = "https://files.pythonhosted.org/packages/20/2a/7b67d76f55b8fe14c937e7640389612f05f9a4145fc28ae128aaa5e62257/jiter-0.13.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:9621ca242547edc16400981ca3231e0c91c0c4c1ab8573a596cd9bb3575d5c2b", size = 551696, upload-time = "2026-02-02T12:35:33.306Z" }, + { url = "https://files.pythonhosted.org/packages/85/9c/57cdd64dac8f4c6ab8f994fe0eb04dc9fd1db102856a4458fcf8a99dfa62/jiter-0.13.0-cp310-cp310-win32.whl", hash = "sha256:a7637d92b1c9d7a771e8c56f445c7f84396d48f2e756e5978840ecba2fac0894", size = 204592, upload-time = "2026-02-02T12:35:34.58Z" }, + { url = "https://files.pythonhosted.org/packages/a7/38/f4f3ea5788b8a5bae7510a678cdc747eda0c45ffe534f9878ff37e7cf3b3/jiter-0.13.0-cp310-cp310-win_amd64.whl", hash = "sha256:c1b609e5cbd2f52bb74fb721515745b407df26d7b800458bd97cb3b972c29e7d", size = 206016, upload-time = "2026-02-02T12:35:36.435Z" }, + { url = "https://files.pythonhosted.org/packages/71/29/499f8c9eaa8a16751b1c0e45e6f5f1761d180da873d417996cc7bddc8eef/jiter-0.13.0-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:ea026e70a9a28ebbdddcbcf0f1323128a8db66898a06eaad3a4e62d2f554d096", size = 311157, upload-time = "2026-02-02T12:35:37.758Z" }, + { url = "https://files.pythonhosted.org/packages/50/f6/566364c777d2ab450b92100bea11333c64c38d32caf8dc378b48e5b20c46/jiter-0.13.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:66aa3e663840152d18cc8ff1e4faad3dd181373491b9cfdc6004b92198d67911", size = 319729, upload-time = "2026-02-02T12:35:39.246Z" }, + { url = "https://files.pythonhosted.org/packages/73/dd/560f13ec5e4f116d8ad2658781646cca91b617ae3b8758d4a5076b278f70/jiter-0.13.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c3524798e70655ff19aec58c7d05adb1f074fecff62da857ea9be2b908b6d701", size = 354766, upload-time = "2026-02-02T12:35:40.662Z" }, + { url = "https://files.pythonhosted.org/packages/7c/0d/061faffcfe94608cbc28a0d42a77a74222bdf5055ccdbe5fd2292b94f510/jiter-0.13.0-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ec7e287d7fbd02cb6e22f9a00dd9c9cd504c40a61f2c61e7e1f9690a82726b4c", size = 362587, upload-time = "2026-02-02T12:35:42.025Z" }, + { url = "https://files.pythonhosted.org/packages/92/c9/c66a7864982fd38a9773ec6e932e0398d1262677b8c60faecd02ffb67bf3/jiter-0.13.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:47455245307e4debf2ce6c6e65a717550a0244231240dcf3b8f7d64e4c2f22f4", size = 487537, upload-time = "2026-02-02T12:35:43.459Z" }, + { url = "https://files.pythonhosted.org/packages/6c/86/84eb4352cd3668f16d1a88929b5888a3fe0418ea8c1dfc2ad4e7bf6e069a/jiter-0.13.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ee9da221dca6e0429c2704c1b3655fe7b025204a71d4d9b73390c759d776d165", size = 373717, upload-time = "2026-02-02T12:35:44.928Z" }, + { url = "https://files.pythonhosted.org/packages/6e/09/9fe4c159358176f82d4390407a03f506a8659ed13ca3ac93a843402acecf/jiter-0.13.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:24ab43126d5e05f3d53a36a8e11eb2f23304c6c1117844aaaf9a0aa5e40b5018", size = 362683, upload-time = "2026-02-02T12:35:46.636Z" }, + { url = "https://files.pythonhosted.org/packages/c9/5e/85f3ab9caca0c1d0897937d378b4a515cae9e119730563572361ea0c48ae/jiter-0.13.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:9da38b4fedde4fb528c740c2564628fbab737166a0e73d6d46cb4bb5463ff411", size = 392345, upload-time = "2026-02-02T12:35:48.088Z" }, + { url = "https://files.pythonhosted.org/packages/12/4c/05b8629ad546191939e6f0c2f17e29f542a398f4a52fb987bc70b6d1eb8b/jiter-0.13.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:0b34c519e17658ed88d5047999a93547f8889f3c1824120c26ad6be5f27b6cf5", size = 517775, upload-time = "2026-02-02T12:35:49.482Z" }, + { url = "https://files.pythonhosted.org/packages/4d/88/367ea2eb6bc582c7052e4baf5ddf57ebe5ab924a88e0e09830dfb585c02d/jiter-0.13.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:d2a6394e6af690d462310a86b53c47ad75ac8c21dc79f120714ea449979cb1d3", size = 551325, upload-time = "2026-02-02T12:35:51.104Z" }, + { url = "https://files.pythonhosted.org/packages/f3/12/fa377ffb94a2f28c41afaed093e0d70cfe512035d5ecb0cad0ae4792d35e/jiter-0.13.0-cp311-cp311-win32.whl", hash = "sha256:0f0c065695f616a27c920a56ad0d4fc46415ef8b806bf8fc1cacf25002bd24e1", size = 204709, upload-time = "2026-02-02T12:35:52.467Z" }, + { url = "https://files.pythonhosted.org/packages/cb/16/8e8203ce92f844dfcd3d9d6a5a7322c77077248dbb12da52d23193a839cd/jiter-0.13.0-cp311-cp311-win_amd64.whl", hash = "sha256:0733312953b909688ae3c2d58d043aa040f9f1a6a75693defed7bc2cc4bf2654", size = 204560, upload-time = "2026-02-02T12:35:53.925Z" }, + { url = "https://files.pythonhosted.org/packages/44/26/97cc40663deb17b9e13c3a5cf29251788c271b18ee4d262c8f94798b8336/jiter-0.13.0-cp311-cp311-win_arm64.whl", hash = "sha256:5d9b34ad56761b3bf0fbe8f7e55468704107608512350962d3317ffd7a4382d5", size = 189608, upload-time = "2026-02-02T12:35:55.304Z" }, + { url = "https://files.pythonhosted.org/packages/2e/30/7687e4f87086829955013ca12a9233523349767f69653ebc27036313def9/jiter-0.13.0-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:0a2bd69fc1d902e89925fc34d1da51b2128019423d7b339a45d9e99c894e0663", size = 307958, upload-time = "2026-02-02T12:35:57.165Z" }, + { url = "https://files.pythonhosted.org/packages/c3/27/e57f9a783246ed95481e6749cc5002a8a767a73177a83c63ea71f0528b90/jiter-0.13.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:f917a04240ef31898182f76a332f508f2cc4b57d2b4d7ad2dbfebbfe167eb505", size = 318597, upload-time = "2026-02-02T12:35:58.591Z" }, + { url = "https://files.pythonhosted.org/packages/cf/52/e5719a60ac5d4d7c5995461a94ad5ef962a37c8bf5b088390e6fad59b2ff/jiter-0.13.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c1e2b199f446d3e82246b4fd9236d7cb502dc2222b18698ba0d986d2fecc6152", size = 348821, upload-time = "2026-02-02T12:36:00.093Z" }, + { url = "https://files.pythonhosted.org/packages/61/db/c1efc32b8ba4c740ab3fc2d037d8753f67685f475e26b9d6536a4322bcdd/jiter-0.13.0-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:04670992b576fa65bd056dbac0c39fe8bd67681c380cb2b48efa885711d9d726", size = 364163, upload-time = "2026-02-02T12:36:01.937Z" }, + { url = "https://files.pythonhosted.org/packages/55/8a/fb75556236047c8806995671a18e4a0ad646ed255276f51a20f32dceaeec/jiter-0.13.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5a1aff1fbdb803a376d4d22a8f63f8e7ccbce0b4890c26cc7af9e501ab339ef0", size = 483709, upload-time = "2026-02-02T12:36:03.41Z" }, + { url = "https://files.pythonhosted.org/packages/7e/16/43512e6ee863875693a8e6f6d532e19d650779d6ba9a81593ae40a9088ff/jiter-0.13.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3b3fb8c2053acaef8580809ac1d1f7481a0a0bdc012fd7f5d8b18fb696a5a089", size = 370480, upload-time = "2026-02-02T12:36:04.791Z" }, + { url = "https://files.pythonhosted.org/packages/f8/4c/09b93e30e984a187bc8aaa3510e1ec8dcbdcd71ca05d2f56aac0492453aa/jiter-0.13.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bdaba7d87e66f26a2c45d8cbadcbfc4bf7884182317907baf39cfe9775bb4d93", size = 360735, upload-time = "2026-02-02T12:36:06.994Z" }, + { url = "https://files.pythonhosted.org/packages/1a/1b/46c5e349019874ec5dfa508c14c37e29864ea108d376ae26d90bee238cd7/jiter-0.13.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:7b88d649135aca526da172e48083da915ec086b54e8e73a425ba50999468cc08", size = 391814, upload-time = "2026-02-02T12:36:08.368Z" }, + { url = "https://files.pythonhosted.org/packages/15/9e/26184760e85baee7162ad37b7912797d2077718476bf91517641c92b3639/jiter-0.13.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:e404ea551d35438013c64b4f357b0474c7abf9f781c06d44fcaf7a14c69ff9e2", size = 513990, upload-time = "2026-02-02T12:36:09.993Z" }, + { url = "https://files.pythonhosted.org/packages/e9/34/2c9355247d6debad57a0a15e76ab1566ab799388042743656e566b3b7de1/jiter-0.13.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:1f4748aad1b4a93c8bdd70f604d0f748cdc0e8744c5547798acfa52f10e79228", size = 548021, upload-time = "2026-02-02T12:36:11.376Z" }, + { url = "https://files.pythonhosted.org/packages/ac/4a/9f2c23255d04a834398b9c2e0e665382116911dc4d06b795710503cdad25/jiter-0.13.0-cp312-cp312-win32.whl", hash = "sha256:0bf670e3b1445fc4d31612199f1744f67f889ee1bbae703c4b54dc097e5dd394", size = 203024, upload-time = "2026-02-02T12:36:12.682Z" }, + { url = "https://files.pythonhosted.org/packages/09/ee/f0ae675a957ae5a8f160be3e87acea6b11dc7b89f6b7ab057e77b2d2b13a/jiter-0.13.0-cp312-cp312-win_amd64.whl", hash = "sha256:15db60e121e11fe186c0b15236bd5d18381b9ddacdcf4e659feb96fc6c969c92", size = 205424, upload-time = "2026-02-02T12:36:13.93Z" }, + { url = "https://files.pythonhosted.org/packages/1b/02/ae611edf913d3cbf02c97cdb90374af2082c48d7190d74c1111dde08bcdd/jiter-0.13.0-cp312-cp312-win_arm64.whl", hash = "sha256:41f92313d17989102f3cb5dd533a02787cdb99454d494344b0361355da52fcb9", size = 186818, upload-time = "2026-02-02T12:36:15.308Z" }, + { url = "https://files.pythonhosted.org/packages/91/9c/7ee5a6ff4b9991e1a45263bfc46731634c4a2bde27dfda6c8251df2d958c/jiter-0.13.0-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:1f8a55b848cbabf97d861495cd65f1e5c590246fabca8b48e1747c4dfc8f85bf", size = 306897, upload-time = "2026-02-02T12:36:16.748Z" }, + { url = "https://files.pythonhosted.org/packages/7c/02/be5b870d1d2be5dd6a91bdfb90f248fbb7dcbd21338f092c6b89817c3dbf/jiter-0.13.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:f556aa591c00f2c45eb1b89f68f52441a016034d18b65da60e2d2875bbbf344a", size = 317507, upload-time = "2026-02-02T12:36:18.351Z" }, + { url = "https://files.pythonhosted.org/packages/da/92/b25d2ec333615f5f284f3a4024f7ce68cfa0604c322c6808b2344c7f5d2b/jiter-0.13.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f7e1d61da332ec412350463891923f960c3073cf1aae93b538f0bb4c8cd46efb", size = 350560, upload-time = "2026-02-02T12:36:19.746Z" }, + { url = "https://files.pythonhosted.org/packages/be/ec/74dcb99fef0aca9fbe56b303bf79f6bd839010cb18ad41000bf6cc71eec0/jiter-0.13.0-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:3097d665a27bc96fd9bbf7f86178037db139f319f785e4757ce7ccbf390db6c2", size = 363232, upload-time = "2026-02-02T12:36:21.243Z" }, + { url = "https://files.pythonhosted.org/packages/1b/37/f17375e0bb2f6a812d4dd92d7616e41917f740f3e71343627da9db2824ce/jiter-0.13.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9d01ecc3a8cbdb6f25a37bd500510550b64ddf9f7d64a107d92f3ccb25035d0f", size = 483727, upload-time = "2026-02-02T12:36:22.688Z" }, + { url = "https://files.pythonhosted.org/packages/77/d2/a71160a5ae1a1e66c1395b37ef77da67513b0adba73b993a27fbe47eb048/jiter-0.13.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ed9bbc30f5d60a3bdf63ae76beb3f9db280d7f195dfcfa61af792d6ce912d159", size = 370799, upload-time = "2026-02-02T12:36:24.106Z" }, + { url = "https://files.pythonhosted.org/packages/01/99/ed5e478ff0eb4e8aa5fd998f9d69603c9fd3f32de3bd16c2b1194f68361c/jiter-0.13.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:98fbafb6e88256f4454de33c1f40203d09fc33ed19162a68b3b257b29ca7f663", size = 359120, upload-time = "2026-02-02T12:36:25.519Z" }, + { url = "https://files.pythonhosted.org/packages/16/be/7ffd08203277a813f732ba897352797fa9493faf8dc7995b31f3d9cb9488/jiter-0.13.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:5467696f6b827f1116556cb0db620440380434591e93ecee7fd14d1a491b6daa", size = 390664, upload-time = "2026-02-02T12:36:26.866Z" }, + { url = "https://files.pythonhosted.org/packages/d1/84/e0787856196d6d346264d6dcccb01f741e5f0bd014c1d9a2ebe149caf4f3/jiter-0.13.0-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:2d08c9475d48b92892583df9da592a0e2ac49bcd41fae1fec4f39ba6cf107820", size = 513543, upload-time = "2026-02-02T12:36:28.217Z" }, + { url = "https://files.pythonhosted.org/packages/65/50/ecbd258181c4313cf79bca6c88fb63207d04d5bf5e4f65174114d072aa55/jiter-0.13.0-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:aed40e099404721d7fcaf5b89bd3b4568a4666358bcac7b6b15c09fb6252ab68", size = 547262, upload-time = "2026-02-02T12:36:29.678Z" }, + { url = "https://files.pythonhosted.org/packages/27/da/68f38d12e7111d2016cd198161b36e1f042bd115c169255bcb7ec823a3bf/jiter-0.13.0-cp313-cp313-win32.whl", hash = "sha256:36ebfbcffafb146d0e6ffb3e74d51e03d9c35ce7c625c8066cdbfc7b953bdc72", size = 200630, upload-time = "2026-02-02T12:36:31.808Z" }, + { url = "https://files.pythonhosted.org/packages/25/65/3bd1a972c9a08ecd22eb3b08a95d1941ebe6938aea620c246cf426ae09c2/jiter-0.13.0-cp313-cp313-win_amd64.whl", hash = "sha256:8d76029f077379374cf0dbc78dbe45b38dec4a2eb78b08b5194ce836b2517afc", size = 202602, upload-time = "2026-02-02T12:36:33.679Z" }, + { url = "https://files.pythonhosted.org/packages/15/fe/13bd3678a311aa67686bb303654792c48206a112068f8b0b21426eb6851e/jiter-0.13.0-cp313-cp313-win_arm64.whl", hash = "sha256:bb7613e1a427cfcb6ea4544f9ac566b93d5bf67e0d48c787eca673ff9c9dff2b", size = 185939, upload-time = "2026-02-02T12:36:35.065Z" }, + { url = "https://files.pythonhosted.org/packages/49/19/a929ec002ad3228bc97ca01dbb14f7632fffdc84a95ec92ceaf4145688ae/jiter-0.13.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:fa476ab5dd49f3bf3a168e05f89358c75a17608dbabb080ef65f96b27c19ab10", size = 316616, upload-time = "2026-02-02T12:36:36.579Z" }, + { url = "https://files.pythonhosted.org/packages/52/56/d19a9a194afa37c1728831e5fb81b7722c3de18a3109e8f282bfc23e587a/jiter-0.13.0-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ade8cb6ff5632a62b7dbd4757d8c5573f7a2e9ae285d6b5b841707d8363205ef", size = 346850, upload-time = "2026-02-02T12:36:38.058Z" }, + { url = "https://files.pythonhosted.org/packages/36/4a/94e831c6bf287754a8a019cb966ed39ff8be6ab78cadecf08df3bb02d505/jiter-0.13.0-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9950290340acc1adaded363edd94baebcee7dabdfa8bee4790794cd5cfad2af6", size = 358551, upload-time = "2026-02-02T12:36:39.417Z" }, + { url = "https://files.pythonhosted.org/packages/a2/ec/a4c72c822695fa80e55d2b4142b73f0012035d9fcf90eccc56bc060db37c/jiter-0.13.0-cp313-cp313t-win_amd64.whl", hash = "sha256:2b4972c6df33731aac0742b64fd0d18e0a69bc7d6e03108ce7d40c85fd9e3e6d", size = 201950, upload-time = "2026-02-02T12:36:40.791Z" }, + { url = "https://files.pythonhosted.org/packages/b6/00/393553ec27b824fbc29047e9c7cd4a3951d7fbe4a76743f17e44034fa4e4/jiter-0.13.0-cp313-cp313t-win_arm64.whl", hash = "sha256:701a1e77d1e593c1b435315ff625fd071f0998c5f02792038a5ca98899261b7d", size = 185852, upload-time = "2026-02-02T12:36:42.077Z" }, + { url = "https://files.pythonhosted.org/packages/6e/f5/f1997e987211f6f9bd71b8083047b316208b4aca0b529bb5f8c96c89ef3e/jiter-0.13.0-cp314-cp314-macosx_10_12_x86_64.whl", hash = "sha256:cc5223ab19fe25e2f0bf2643204ad7318896fe3729bf12fde41b77bfc4fafff0", size = 308804, upload-time = "2026-02-02T12:36:43.496Z" }, + { url = "https://files.pythonhosted.org/packages/cd/8f/5482a7677731fd44881f0204981ce2d7175db271f82cba2085dd2212e095/jiter-0.13.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:9776ebe51713acf438fd9b4405fcd86893ae5d03487546dae7f34993217f8a91", size = 318787, upload-time = "2026-02-02T12:36:45.071Z" }, + { url = "https://files.pythonhosted.org/packages/f3/b9/7257ac59778f1cd025b26a23c5520a36a424f7f1b068f2442a5b499b7464/jiter-0.13.0-cp314-cp314-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:879e768938e7b49b5e90b7e3fecc0dbec01b8cb89595861fb39a8967c5220d09", size = 353880, upload-time = "2026-02-02T12:36:47.365Z" }, + { url = "https://files.pythonhosted.org/packages/c3/87/719eec4a3f0841dad99e3d3604ee4cba36af4419a76f3cb0b8e2e691ad67/jiter-0.13.0-cp314-cp314-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:682161a67adea11e3aae9038c06c8b4a9a71023228767477d683f69903ebc607", size = 366702, upload-time = "2026-02-02T12:36:48.871Z" }, + { url = "https://files.pythonhosted.org/packages/d2/65/415f0a75cf6921e43365a1bc227c565cb949caca8b7532776e430cbaa530/jiter-0.13.0-cp314-cp314-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a13b68cd1cd8cc9de8f244ebae18ccb3e4067ad205220ef324c39181e23bbf66", size = 486319, upload-time = "2026-02-02T12:36:53.006Z" }, + { url = "https://files.pythonhosted.org/packages/54/a2/9e12b48e82c6bbc6081fd81abf915e1443add1b13d8fc586e1d90bb02bb8/jiter-0.13.0-cp314-cp314-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:87ce0f14c6c08892b610686ae8be350bf368467b6acd5085a5b65441e2bf36d2", size = 372289, upload-time = "2026-02-02T12:36:54.593Z" }, + { url = "https://files.pythonhosted.org/packages/4e/c1/e4693f107a1789a239c759a432e9afc592366f04e901470c2af89cfd28e1/jiter-0.13.0-cp314-cp314-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0c365005b05505a90d1c47856420980d0237adf82f70c4aff7aebd3c1cc143ad", size = 360165, upload-time = "2026-02-02T12:36:56.112Z" }, + { url = "https://files.pythonhosted.org/packages/17/08/91b9ea976c1c758240614bd88442681a87672eebc3d9a6dde476874e706b/jiter-0.13.0-cp314-cp314-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:1317fdffd16f5873e46ce27d0e0f7f4f90f0cdf1d86bf6abeaea9f63ca2c401d", size = 389634, upload-time = "2026-02-02T12:36:57.495Z" }, + { url = "https://files.pythonhosted.org/packages/18/23/58325ef99390d6d40427ed6005bf1ad54f2577866594bcf13ce55675f87d/jiter-0.13.0-cp314-cp314-musllinux_1_1_aarch64.whl", hash = "sha256:c05b450d37ba0c9e21c77fef1f205f56bcee2330bddca68d344baebfc55ae0df", size = 514933, upload-time = "2026-02-02T12:36:58.909Z" }, + { url = "https://files.pythonhosted.org/packages/5b/25/69f1120c7c395fd276c3996bb8adefa9c6b84c12bb7111e5c6ccdcd8526d/jiter-0.13.0-cp314-cp314-musllinux_1_1_x86_64.whl", hash = "sha256:775e10de3849d0631a97c603f996f518159272db00fdda0a780f81752255ee9d", size = 548842, upload-time = "2026-02-02T12:37:00.433Z" }, + { url = "https://files.pythonhosted.org/packages/18/05/981c9669d86850c5fbb0d9e62bba144787f9fba84546ba43d624ee27ef29/jiter-0.13.0-cp314-cp314-win32.whl", hash = "sha256:632bf7c1d28421c00dd8bbb8a3bac5663e1f57d5cd5ed962bce3c73bf62608e6", size = 202108, upload-time = "2026-02-02T12:37:01.718Z" }, + { url = "https://files.pythonhosted.org/packages/8d/96/cdcf54dd0b0341db7d25413229888a346c7130bd20820530905fdb65727b/jiter-0.13.0-cp314-cp314-win_amd64.whl", hash = "sha256:f22ef501c3f87ede88f23f9b11e608581c14f04db59b6a801f354397ae13739f", size = 204027, upload-time = "2026-02-02T12:37:03.075Z" }, + { url = "https://files.pythonhosted.org/packages/fb/f9/724bcaaab7a3cd727031fe4f6995cb86c4bd344909177c186699c8dec51a/jiter-0.13.0-cp314-cp314-win_arm64.whl", hash = "sha256:07b75fe09a4ee8e0c606200622e571e44943f47254f95e2436c8bdcaceb36d7d", size = 187199, upload-time = "2026-02-02T12:37:04.414Z" }, + { url = "https://files.pythonhosted.org/packages/62/92/1661d8b9fd6a3d7a2d89831db26fe3c1509a287d83ad7838831c7b7a5c7e/jiter-0.13.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:964538479359059a35fb400e769295d4b315ae61e4105396d355a12f7fef09f0", size = 318423, upload-time = "2026-02-02T12:37:05.806Z" }, + { url = "https://files.pythonhosted.org/packages/4f/3b/f77d342a54d4ebcd128e520fc58ec2f5b30a423b0fd26acdfc0c6fef8e26/jiter-0.13.0-cp314-cp314t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e104da1db1c0991b3eaed391ccd650ae8d947eab1480c733e5a3fb28d4313e40", size = 351438, upload-time = "2026-02-02T12:37:07.189Z" }, + { url = "https://files.pythonhosted.org/packages/76/b3/ba9a69f0e4209bd3331470c723c2f5509e6f0482e416b612431a5061ed71/jiter-0.13.0-cp314-cp314t-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:0e3a5f0cde8ff433b8e88e41aa40131455420fb3649a3c7abdda6145f8cb7202", size = 364774, upload-time = "2026-02-02T12:37:08.579Z" }, + { url = "https://files.pythonhosted.org/packages/b3/16/6cdb31fa342932602458dbb631bfbd47f601e03d2e4950740e0b2100b570/jiter-0.13.0-cp314-cp314t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:57aab48f40be1db920a582b30b116fe2435d184f77f0e4226f546794cedd9cf0", size = 487238, upload-time = "2026-02-02T12:37:10.066Z" }, + { url = "https://files.pythonhosted.org/packages/ed/b1/956cc7abaca8d95c13aa8d6c9b3f3797241c246cd6e792934cc4c8b250d2/jiter-0.13.0-cp314-cp314t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7772115877c53f62beeb8fd853cab692dbc04374ef623b30f997959a4c0e7e95", size = 372892, upload-time = "2026-02-02T12:37:11.656Z" }, + { url = "https://files.pythonhosted.org/packages/26/c4/97ecde8b1e74f67b8598c57c6fccf6df86ea7861ed29da84629cdbba76c4/jiter-0.13.0-cp314-cp314t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1211427574b17b633cfceba5040de8081e5abf114f7a7602f73d2e16f9fdaa59", size = 360309, upload-time = "2026-02-02T12:37:13.244Z" }, + { url = "https://files.pythonhosted.org/packages/4b/d7/eabe3cf46715854ccc80be2cd78dd4c36aedeb30751dbf85a1d08c14373c/jiter-0.13.0-cp314-cp314t-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:7beae3a3d3b5212d3a55d2961db3c292e02e302feb43fce6a3f7a31b90ea6dfe", size = 389607, upload-time = "2026-02-02T12:37:14.881Z" }, + { url = "https://files.pythonhosted.org/packages/df/2d/03963fc0804e6109b82decfb9974eb92df3797fe7222428cae12f8ccaa0c/jiter-0.13.0-cp314-cp314t-musllinux_1_1_aarch64.whl", hash = "sha256:e5562a0f0e90a6223b704163ea28e831bd3a9faa3512a711f031611e6b06c939", size = 514986, upload-time = "2026-02-02T12:37:16.326Z" }, + { url = "https://files.pythonhosted.org/packages/f6/6c/8c83b45eb3eb1c1e18d841fe30b4b5bc5619d781267ca9bc03e005d8fd0a/jiter-0.13.0-cp314-cp314t-musllinux_1_1_x86_64.whl", hash = "sha256:6c26a424569a59140fb51160a56df13f438a2b0967365e987889186d5fc2f6f9", size = 548756, upload-time = "2026-02-02T12:37:17.736Z" }, + { url = "https://files.pythonhosted.org/packages/47/66/eea81dfff765ed66c68fd2ed8c96245109e13c896c2a5015c7839c92367e/jiter-0.13.0-cp314-cp314t-win32.whl", hash = "sha256:24dc96eca9f84da4131cdf87a95e6ce36765c3b156fc9ae33280873b1c32d5f6", size = 201196, upload-time = "2026-02-02T12:37:19.101Z" }, + { url = "https://files.pythonhosted.org/packages/ff/32/4ac9c7a76402f8f00d00842a7f6b83b284d0cf7c1e9d4227bc95aa6d17fa/jiter-0.13.0-cp314-cp314t-win_amd64.whl", hash = "sha256:0a8d76c7524087272c8ae913f5d9d608bd839154b62c4322ef65723d2e5bb0b8", size = 204215, upload-time = "2026-02-02T12:37:20.495Z" }, + { url = "https://files.pythonhosted.org/packages/f9/8e/7def204fea9f9be8b3c21a6f2dd6c020cf56c7d5ff753e0e23ed7f9ea57e/jiter-0.13.0-cp314-cp314t-win_arm64.whl", hash = "sha256:2c26cf47e2cad140fa23b6d58d435a7c0161f5c514284802f25e87fddfe11024", size = 187152, upload-time = "2026-02-02T12:37:22.124Z" }, + { url = "https://files.pythonhosted.org/packages/79/b3/3c29819a27178d0e461a8571fb63c6ae38be6dc36b78b3ec2876bbd6a910/jiter-0.13.0-graalpy311-graalpy242_311_native-macosx_10_12_x86_64.whl", hash = "sha256:b1cbfa133241d0e6bdab48dcdc2604e8ba81512f6bbd68ec3e8e1357dd3c316c", size = 307016, upload-time = "2026-02-02T12:37:42.755Z" }, + { url = "https://files.pythonhosted.org/packages/eb/ae/60993e4b07b1ac5ebe46da7aa99fdbb802eb986c38d26e3883ac0125c4e0/jiter-0.13.0-graalpy311-graalpy242_311_native-macosx_11_0_arm64.whl", hash = "sha256:db367d8be9fad6e8ebbac4a7578b7af562e506211036cba2c06c3b998603c3d2", size = 305024, upload-time = "2026-02-02T12:37:44.774Z" }, + { url = "https://files.pythonhosted.org/packages/77/fa/2227e590e9cf98803db2811f172b2d6460a21539ab73006f251c66f44b14/jiter-0.13.0-graalpy311-graalpy242_311_native-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:45f6f8efb2f3b0603092401dc2df79fa89ccbc027aaba4174d2d4133ed661434", size = 339337, upload-time = "2026-02-02T12:37:46.668Z" }, + { url = "https://files.pythonhosted.org/packages/2d/92/015173281f7eb96c0ef580c997da8ef50870d4f7f4c9e03c845a1d62ae04/jiter-0.13.0-graalpy311-graalpy242_311_native-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:597245258e6ad085d064780abfb23a284d418d3e61c57362d9449c6c7317ee2d", size = 346395, upload-time = "2026-02-02T12:37:48.09Z" }, + { url = "https://files.pythonhosted.org/packages/80/60/e50fa45dd7e2eae049f0ce964663849e897300433921198aef94b6ffa23a/jiter-0.13.0-graalpy312-graalpy250_312_native-macosx_10_12_x86_64.whl", hash = "sha256:3d744a6061afba08dd7ae375dcde870cffb14429b7477e10f67e9e6d68772a0a", size = 305169, upload-time = "2026-02-02T12:37:50.376Z" }, + { url = "https://files.pythonhosted.org/packages/d2/73/a009f41c5eed71c49bec53036c4b33555afcdee70682a18c6f66e396c039/jiter-0.13.0-graalpy312-graalpy250_312_native-macosx_11_0_arm64.whl", hash = "sha256:ff732bd0a0e778f43d5009840f20b935e79087b4dc65bd36f1cd0f9b04b8ff7f", size = 303808, upload-time = "2026-02-02T12:37:52.092Z" }, + { url = "https://files.pythonhosted.org/packages/c4/10/528b439290763bff3d939268085d03382471b442f212dca4ff5f12802d43/jiter-0.13.0-graalpy312-graalpy250_312_native-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ab44b178f7981fcaea7e0a5df20e773c663d06ffda0198f1a524e91b2fde7e59", size = 337384, upload-time = "2026-02-02T12:37:53.582Z" }, + { url = "https://files.pythonhosted.org/packages/67/8a/a342b2f0251f3dac4ca17618265d93bf244a2a4d089126e81e4c1056ac50/jiter-0.13.0-graalpy312-graalpy250_312_native-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7bb00b6d26db67a05fe3e12c76edc75f32077fb51deed13822dc648fa373bc19", size = 343768, upload-time = "2026-02-02T12:37:55.055Z" }, ] [[package]] @@ -3205,7 +3213,7 @@ wheels = [ [[package]] name = "litellm" -version = "1.81.5" +version = "1.81.7" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "aiohttp", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, @@ -3221,9 +3229,9 @@ dependencies = [ { name = "tiktoken", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, { name = "tokenizers", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/38/f4/c109bc5504520baa7b96a910b619d1b1b5af6cb5c28053e53adfed83e3ab/litellm-1.81.5.tar.gz", hash = "sha256:599994651cbb64b8ee7cd3b4979275139afc6e426bdd4aa840a61121bb3b04c9", size = 13615436, upload-time = "2026-01-29T01:37:54.817Z" } +sdist = { url = "https://files.pythonhosted.org/packages/77/69/cfa8a1d68cd10223a9d9741c411e131aece85c60c29c1102d762738b3e5c/litellm-1.81.7.tar.gz", hash = "sha256:442ff38708383ebee21357b3d936e58938172bae892f03bc5be4019ed4ff4a17", size = 14039864, upload-time = "2026-02-03T19:43:10.633Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/74/0f/5312b944208efeec5dcbf8e0ed956f8f7c430b0c6458301d206380c90b56/litellm-1.81.5-py3-none-any.whl", hash = "sha256:206505c5a0c6503e465154b9c979772be3ede3f5bf746d15b37dca5ae54d239f", size = 11950016, upload-time = "2026-01-29T01:37:52.6Z" }, + { url = "https://files.pythonhosted.org/packages/60/95/8cecc7e6377171e4ac96f23d65236af8706d99c1b7b71a94c72206672810/litellm-1.81.7-py3-none-any.whl", hash = "sha256:58466c88c3289c6a3830d88768cf8f307581d9e6c87861de874d1128bb2de90d", size = 12254178, upload-time = "2026-02-03T19:43:08.035Z" }, ] [package.optional-dependencies] @@ -3265,11 +3273,11 @@ wheels = [ [[package]] name = "litellm-proxy-extras" -version = "0.4.27" +version = "0.4.29" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/01/af/9fdc22e7e3dcaa44c0f206a3f12065286c32d7e453f87e14dac1e69cf49a/litellm_proxy_extras-0.4.27.tar.gz", hash = "sha256:81059120016cfc03c82aa9664424912bdcffad103f66a5f925fef6b26f2cc151", size = 23269, upload-time = "2026-01-24T22:03:26.97Z" } +sdist = { url = "https://files.pythonhosted.org/packages/42/c5/9c4325452b3b3fc144e942f0f0e6582374d588f3159a0706594e3422943c/litellm_proxy_extras-0.4.29.tar.gz", hash = "sha256:1a8266911e0546f1e17e6714ca20b72e9fef47c1683f9c16399cf2d1786437a0", size = 23561, upload-time = "2026-01-31T23:13:58.707Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/50/c8/508b5a277e5d56e71ef51c5fe8111c7ec045ffd98f126089af803171ccc6/litellm_proxy_extras-0.4.27-py3-none-any.whl", hash = "sha256:752c1faabc86ce3d2b1fa451495d34de82323798e37b9cb5c0fea93deae1c5c8", size = 50073, upload-time = "2026-01-24T22:03:25.757Z" }, + { url = "https://files.pythonhosted.org/packages/b0/d6/7393367fdf4b65d80ba0c32d517743a7aa8975a36b32cc70a0352b9514aa/litellm_proxy_extras-0.4.29-py3-none-any.whl", hash = "sha256:c36c1b69675c61acccc6b61dd610eb37daeb72c6fd819461cefb5b0cc7e0550f", size = 50734, upload-time = "2026-01-31T23:13:56.986Z" }, ] [[package]] @@ -3393,7 +3401,7 @@ dependencies = [ { name = "fonttools", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, { name = "kiwisolver", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, { name = "numpy", version = "2.2.6", source = { registry = "https://pypi.org/simple" }, marker = "(python_full_version < '3.11' and sys_platform == 'darwin') or (python_full_version < '3.11' and sys_platform == 'linux') or (python_full_version < '3.11' and sys_platform == 'win32')" }, - { name = "numpy", version = "2.4.1", source = { registry = "https://pypi.org/simple" }, marker = "(python_full_version >= '3.11' and sys_platform == 'darwin') or (python_full_version >= '3.11' and sys_platform == 'linux') or (python_full_version >= '3.11' and sys_platform == 'win32')" }, + { name = "numpy", version = "2.4.2", source = { registry = "https://pypi.org/simple" }, marker = "(python_full_version >= '3.11' and sys_platform == 'darwin') or (python_full_version >= '3.11' and sys_platform == 'linux') or (python_full_version >= '3.11' and sys_platform == 'win32')" }, { name = "packaging", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, { name = "pillow", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, { name = "pyparsing", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, @@ -3498,7 +3506,7 @@ wheels = [ [[package]] name = "mem0ai" -version = "1.0.2" +version = "1.0.3" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "openai", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, @@ -3509,9 +3517,9 @@ dependencies = [ { name = "qdrant-client", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, { name = "sqlalchemy", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/4c/b3/57edb1253e7dc24d41e102722a585d6e08a96c6191a6a04e43112c01dc5d/mem0ai-1.0.2.tar.gz", hash = "sha256:533c370e8a4e817d47a583cb7fa4df55db59de8dd67be39f2b927e2ad19607d1", size = 182395, upload-time = "2026-01-13T07:40:00.666Z" } +sdist = { url = "https://files.pythonhosted.org/packages/ce/b6/9d3a747a5c1af2b4f73572a3d296bf5e99c99630a3f201b0ddbb14e811e6/mem0ai-1.0.3.tar.gz", hash = "sha256:8f7abe485a61653e3f2d3f8c222f531f8b52660b19d88820c56522103d9f31b5", size = 182698, upload-time = "2026-02-03T05:38:04.608Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/d7/82/59309070bd2d2ddccebd89d8ebb7a2155ce12531f0c36123d0a39eada544/mem0ai-1.0.2-py3-none-any.whl", hash = "sha256:3528523653bc57efa477d55e703dcedf8decc23868d4dbcc6d43a97f2315834a", size = 275428, upload-time = "2026-01-13T07:39:58.339Z" }, + { url = "https://files.pythonhosted.org/packages/84/3e/b300ab9fa6efd36c78f1402684eab1483f282c4ca6e983920fceb9c0f4fb/mem0ai-1.0.3-py3-none-any.whl", hash = "sha256:f500c3decc12c2663b2ad829ac4edcd0c674f2bd9bf4abf7f5c0522aef3d3cf8", size = 275722, upload-time = "2026-02-03T05:38:03.126Z" }, ] [[package]] @@ -3560,7 +3568,7 @@ version = "0.5.4" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "numpy", version = "2.2.6", source = { registry = "https://pypi.org/simple" }, marker = "(python_full_version < '3.11' and sys_platform == 'darwin') or (python_full_version < '3.11' and sys_platform == 'linux') or (python_full_version < '3.11' and sys_platform == 'win32')" }, - { name = "numpy", version = "2.4.1", source = { registry = "https://pypi.org/simple" }, marker = "(python_full_version >= '3.11' and sys_platform == 'darwin') or (python_full_version >= '3.11' and sys_platform == 'linux') or (python_full_version >= '3.11' and sys_platform == 'win32')" }, + { name = "numpy", version = "2.4.2", source = { registry = "https://pypi.org/simple" }, marker = "(python_full_version >= '3.11' and sys_platform == 'darwin') or (python_full_version >= '3.11' and sys_platform == 'linux') or (python_full_version >= '3.11' and sys_platform == 'win32')" }, ] sdist = { url = "https://files.pythonhosted.org/packages/0e/4a/c27b42ed9b1c7d13d9ba8b6905dece787d6259152f2309338aed29b2447b/ml_dtypes-0.5.4.tar.gz", hash = "sha256:8ab06a50fb9bf9666dd0fe5dfb4676fa2b0ac0f31ecff72a6c3af8e22c063453", size = 692314, upload-time = "2025-11-17T22:32:31.031Z" } wheels = [ @@ -3830,11 +3838,11 @@ wheels = [ [[package]] name = "narwhals" -version = "2.15.0" +version = "2.16.0" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/47/6d/b57c64e5038a8cf071bce391bb11551657a74558877ac961e7fa905ece27/narwhals-2.15.0.tar.gz", hash = "sha256:a9585975b99d95084268445a1fdd881311fa26ef1caa18020d959d5b2ff9a965", size = 603479, upload-time = "2026-01-06T08:10:13.27Z" } +sdist = { url = "https://files.pythonhosted.org/packages/fc/6f/713be67779028d482c6e0f2dde5bc430021b2578a4808c1c9f6d7ad48257/narwhals-2.16.0.tar.gz", hash = "sha256:155bb45132b370941ba0396d123cf9ed192bf25f39c4cea726f2da422ca4e145", size = 618268, upload-time = "2026-02-02T10:31:00.545Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/3d/2e/cf2ffeb386ac3763526151163ad7da9f1b586aac96d2b4f7de1eaebf0c61/narwhals-2.15.0-py3-none-any.whl", hash = "sha256:cbfe21ca19d260d9fd67f995ec75c44592d1f106933b03ddd375df7ac841f9d6", size = 432856, upload-time = "2026-01-06T08:10:11.511Z" }, + { url = "https://files.pythonhosted.org/packages/03/cc/7cb74758e6df95e0c4e1253f203b6dd7f348bf2f29cf89e9210a2416d535/narwhals-2.16.0-py3-none-any.whl", hash = "sha256:846f1fd7093ac69d63526e50732033e86c30ea0026a44d9b23991010c7d1485d", size = 443951, upload-time = "2026-02-02T10:30:58.635Z" }, ] [[package]] @@ -3915,7 +3923,7 @@ wheels = [ [[package]] name = "numpy" -version = "2.4.1" +version = "2.4.2" source = { registry = "https://pypi.org/simple" } resolution-markers = [ "python_full_version >= '3.14' and sys_platform == 'darwin'", @@ -3931,79 +3939,79 @@ resolution-markers = [ "python_full_version == '3.12.*' and sys_platform == 'win32'", "python_full_version == '3.11.*' and sys_platform == 'win32'", ] -sdist = { url = "https://files.pythonhosted.org/packages/24/62/ae72ff66c0f1fd959925b4c11f8c2dea61f47f6acaea75a08512cdfe3fed/numpy-2.4.1.tar.gz", hash = "sha256:a1ceafc5042451a858231588a104093474c6a5c57dcc724841f5c888d237d690", size = 20721320, upload-time = "2026-01-10T06:44:59.619Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/a5/34/2b1bc18424f3ad9af577f6ce23600319968a70575bd7db31ce66731bbef9/numpy-2.4.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:0cce2a669e3c8ba02ee563c7835f92c153cf02edff1ae05e1823f1dde21b16a5", size = 16944563, upload-time = "2026-01-10T06:42:14.615Z" }, - { url = "https://files.pythonhosted.org/packages/2c/57/26e5f97d075aef3794045a6ca9eada6a4ed70eb9a40e7a4a93f9ac80d704/numpy-2.4.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:899d2c18024984814ac7e83f8f49d8e8180e2fbe1b2e252f2e7f1d06bea92425", size = 12645658, upload-time = "2026-01-10T06:42:17.298Z" }, - { url = "https://files.pythonhosted.org/packages/8e/ba/80fc0b1e3cb2fd5c6143f00f42eb67762aa043eaa05ca924ecc3222a7849/numpy-2.4.1-cp311-cp311-macosx_14_0_arm64.whl", hash = "sha256:09aa8a87e45b55a1c2c205d42e2808849ece5c484b2aab11fecabec3841cafba", size = 5474132, upload-time = "2026-01-10T06:42:19.637Z" }, - { url = "https://files.pythonhosted.org/packages/40/ae/0a5b9a397f0e865ec171187c78d9b57e5588afc439a04ba9cab1ebb2c945/numpy-2.4.1-cp311-cp311-macosx_14_0_x86_64.whl", hash = "sha256:edee228f76ee2dab4579fad6f51f6a305de09d444280109e0f75df247ff21501", size = 6804159, upload-time = "2026-01-10T06:42:21.44Z" }, - { url = "https://files.pythonhosted.org/packages/86/9c/841c15e691c7085caa6fd162f063eff494099c8327aeccd509d1ab1e36ab/numpy-2.4.1-cp311-cp311-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:a92f227dbcdc9e4c3e193add1a189a9909947d4f8504c576f4a732fd0b54240a", size = 14708058, upload-time = "2026-01-10T06:42:23.546Z" }, - { url = "https://files.pythonhosted.org/packages/5d/9d/7862db06743f489e6a502a3b93136d73aea27d97b2cf91504f70a27501d6/numpy-2.4.1-cp311-cp311-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:538bf4ec353709c765ff75ae616c34d3c3dca1a68312727e8f2676ea644f8509", size = 16651501, upload-time = "2026-01-10T06:42:25.909Z" }, - { url = "https://files.pythonhosted.org/packages/a6/9c/6fc34ebcbd4015c6e5f0c0ce38264010ce8a546cb6beacb457b84a75dfc8/numpy-2.4.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:ac08c63cb7779b85e9d5318e6c3518b424bc1f364ac4cb2c6136f12e5ff2dccc", size = 16492627, upload-time = "2026-01-10T06:42:28.938Z" }, - { url = "https://files.pythonhosted.org/packages/aa/63/2494a8597502dacda439f61b3c0db4da59928150e62be0e99395c3ad23c5/numpy-2.4.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:4f9c360ecef085e5841c539a9a12b883dff005fbd7ce46722f5e9cef52634d82", size = 18585052, upload-time = "2026-01-10T06:42:31.312Z" }, - { url = "https://files.pythonhosted.org/packages/6a/93/098e1162ae7522fc9b618d6272b77404c4656c72432ecee3abc029aa3de0/numpy-2.4.1-cp311-cp311-win32.whl", hash = "sha256:0f118ce6b972080ba0758c6087c3617b5ba243d806268623dc34216d69099ba0", size = 6236575, upload-time = "2026-01-10T06:42:33.872Z" }, - { url = "https://files.pythonhosted.org/packages/8c/de/f5e79650d23d9e12f38a7bc6b03ea0835b9575494f8ec94c11c6e773b1b1/numpy-2.4.1-cp311-cp311-win_amd64.whl", hash = "sha256:18e14c4d09d55eef39a6ab5b08406e84bc6869c1e34eef45564804f90b7e0574", size = 12604479, upload-time = "2026-01-10T06:42:35.778Z" }, - { url = "https://files.pythonhosted.org/packages/dd/65/e1097a7047cff12ce3369bd003811516b20ba1078dbdec135e1cd7c16c56/numpy-2.4.1-cp311-cp311-win_arm64.whl", hash = "sha256:6461de5113088b399d655d45c3897fa188766415d0f568f175ab071c8873bd73", size = 10578325, upload-time = "2026-01-10T06:42:38.518Z" }, - { url = "https://files.pythonhosted.org/packages/78/7f/ec53e32bf10c813604edf07a3682616bd931d026fcde7b6d13195dfb684a/numpy-2.4.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:d3703409aac693fa82c0aee023a1ae06a6e9d065dba10f5e8e80f642f1e9d0a2", size = 16656888, upload-time = "2026-01-10T06:42:40.913Z" }, - { url = "https://files.pythonhosted.org/packages/b8/e0/1f9585d7dae8f14864e948fd7fa86c6cb72dee2676ca2748e63b1c5acfe0/numpy-2.4.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:7211b95ca365519d3596a1d8688a95874cc94219d417504d9ecb2df99fa7bfa8", size = 12373956, upload-time = "2026-01-10T06:42:43.091Z" }, - { url = "https://files.pythonhosted.org/packages/8e/43/9762e88909ff2326f5e7536fa8cb3c49fb03a7d92705f23e6e7f553d9cb3/numpy-2.4.1-cp312-cp312-macosx_14_0_arm64.whl", hash = "sha256:5adf01965456a664fc727ed69cc71848f28d063217c63e1a0e200a118d5eec9a", size = 5202567, upload-time = "2026-01-10T06:42:45.107Z" }, - { url = "https://files.pythonhosted.org/packages/4b/ee/34b7930eb61e79feb4478800a4b95b46566969d837546aa7c034c742ef98/numpy-2.4.1-cp312-cp312-macosx_14_0_x86_64.whl", hash = "sha256:26f0bcd9c79a00e339565b303badc74d3ea2bd6d52191eeca5f95936cad107d0", size = 6549459, upload-time = "2026-01-10T06:42:48.152Z" }, - { url = "https://files.pythonhosted.org/packages/79/e3/5f115fae982565771be994867c89bcd8d7208dbfe9469185497d70de5ddf/numpy-2.4.1-cp312-cp312-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:0093e85df2960d7e4049664b26afc58b03236e967fb942354deef3208857a04c", size = 14404859, upload-time = "2026-01-10T06:42:49.947Z" }, - { url = "https://files.pythonhosted.org/packages/d9/7d/9c8a781c88933725445a859cac5d01b5871588a15969ee6aeb618ba99eee/numpy-2.4.1-cp312-cp312-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:7ad270f438cbdd402c364980317fb6b117d9ec5e226fff5b4148dd9aa9fc6e02", size = 16371419, upload-time = "2026-01-10T06:42:52.409Z" }, - { url = "https://files.pythonhosted.org/packages/a6/d2/8aa084818554543f17cf4162c42f162acbd3bb42688aefdba6628a859f77/numpy-2.4.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:297c72b1b98100c2e8f873d5d35fb551fce7040ade83d67dd51d38c8d42a2162", size = 16182131, upload-time = "2026-01-10T06:42:54.694Z" }, - { url = "https://files.pythonhosted.org/packages/60/db/0425216684297c58a8df35f3284ef56ec4a043e6d283f8a59c53562caf1b/numpy-2.4.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:cf6470d91d34bf669f61d515499859fa7a4c2f7c36434afb70e82df7217933f9", size = 18295342, upload-time = "2026-01-10T06:42:56.991Z" }, - { url = "https://files.pythonhosted.org/packages/31/4c/14cb9d86240bd8c386c881bafbe43f001284b7cce3bc01623ac9475da163/numpy-2.4.1-cp312-cp312-win32.whl", hash = "sha256:b6bcf39112e956594b3331316d90c90c90fb961e39696bda97b89462f5f3943f", size = 5959015, upload-time = "2026-01-10T06:42:59.631Z" }, - { url = "https://files.pythonhosted.org/packages/51/cf/52a703dbeb0c65807540d29699fef5fda073434ff61846a564d5c296420f/numpy-2.4.1-cp312-cp312-win_amd64.whl", hash = "sha256:e1a27bb1b2dee45a2a53f5ca6ff2d1a7f135287883a1689e930d44d1ff296c87", size = 12310730, upload-time = "2026-01-10T06:43:01.627Z" }, - { url = "https://files.pythonhosted.org/packages/69/80/a828b2d0ade5e74a9fe0f4e0a17c30fdc26232ad2bc8c9f8b3197cf7cf18/numpy-2.4.1-cp312-cp312-win_arm64.whl", hash = "sha256:0e6e8f9d9ecf95399982019c01223dc130542960a12edfa8edd1122dfa66a8a8", size = 10312166, upload-time = "2026-01-10T06:43:03.673Z" }, - { url = "https://files.pythonhosted.org/packages/04/68/732d4b7811c00775f3bd522a21e8dd5a23f77eb11acdeb663e4a4ebf0ef4/numpy-2.4.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:d797454e37570cfd61143b73b8debd623c3c0952959adb817dd310a483d58a1b", size = 16652495, upload-time = "2026-01-10T06:43:06.283Z" }, - { url = "https://files.pythonhosted.org/packages/20/ca/857722353421a27f1465652b2c66813eeeccea9d76d5f7b74b99f298e60e/numpy-2.4.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:82c55962006156aeef1629b953fd359064aa47e4d82cfc8e67f0918f7da3344f", size = 12368657, upload-time = "2026-01-10T06:43:09.094Z" }, - { url = "https://files.pythonhosted.org/packages/81/0d/2377c917513449cc6240031a79d30eb9a163d32a91e79e0da47c43f2c0c8/numpy-2.4.1-cp313-cp313-macosx_14_0_arm64.whl", hash = "sha256:71abbea030f2cfc3092a0ff9f8c8fdefdc5e0bf7d9d9c99663538bb0ecdac0b9", size = 5197256, upload-time = "2026-01-10T06:43:13.634Z" }, - { url = "https://files.pythonhosted.org/packages/17/39/569452228de3f5de9064ac75137082c6214be1f5c532016549a7923ab4b5/numpy-2.4.1-cp313-cp313-macosx_14_0_x86_64.whl", hash = "sha256:5b55aa56165b17aaf15520beb9cbd33c9039810e0d9643dd4379e44294c7303e", size = 6545212, upload-time = "2026-01-10T06:43:15.661Z" }, - { url = "https://files.pythonhosted.org/packages/8c/a4/77333f4d1e4dac4395385482557aeecf4826e6ff517e32ca48e1dafbe42a/numpy-2.4.1-cp313-cp313-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:c0faba4a331195bfa96f93dd9dfaa10b2c7aa8cda3a02b7fd635e588fe821bf5", size = 14402871, upload-time = "2026-01-10T06:43:17.324Z" }, - { url = "https://files.pythonhosted.org/packages/ba/87/d341e519956273b39d8d47969dd1eaa1af740615394fe67d06f1efa68773/numpy-2.4.1-cp313-cp313-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:d3e3087f53e2b4428766b54932644d148613c5a595150533ae7f00dab2f319a8", size = 16359305, upload-time = "2026-01-10T06:43:19.376Z" }, - { url = "https://files.pythonhosted.org/packages/32/91/789132c6666288eaa20ae8066bb99eba1939362e8f1a534949a215246e97/numpy-2.4.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:49e792ec351315e16da54b543db06ca8a86985ab682602d90c60ef4ff4db2a9c", size = 16181909, upload-time = "2026-01-10T06:43:21.808Z" }, - { url = "https://files.pythonhosted.org/packages/cf/b8/090b8bd27b82a844bb22ff8fdf7935cb1980b48d6e439ae116f53cdc2143/numpy-2.4.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:79e9e06c4c2379db47f3f6fc7a8652e7498251789bf8ff5bd43bf478ef314ca2", size = 18284380, upload-time = "2026-01-10T06:43:23.957Z" }, - { url = "https://files.pythonhosted.org/packages/67/78/722b62bd31842ff029412271556a1a27a98f45359dea78b1548a3a9996aa/numpy-2.4.1-cp313-cp313-win32.whl", hash = "sha256:3d1a100e48cb266090a031397863ff8a30050ceefd798f686ff92c67a486753d", size = 5957089, upload-time = "2026-01-10T06:43:27.535Z" }, - { url = "https://files.pythonhosted.org/packages/da/a6/cf32198b0b6e18d4fbfa9a21a992a7fca535b9bb2b0cdd217d4a3445b5ca/numpy-2.4.1-cp313-cp313-win_amd64.whl", hash = "sha256:92a0e65272fd60bfa0d9278e0484c2f52fe03b97aedc02b357f33fe752c52ffb", size = 12307230, upload-time = "2026-01-10T06:43:29.298Z" }, - { url = "https://files.pythonhosted.org/packages/44/6c/534d692bfb7d0afe30611320c5fb713659dcb5104d7cc182aff2aea092f5/numpy-2.4.1-cp313-cp313-win_arm64.whl", hash = "sha256:20d4649c773f66cc2fc36f663e091f57c3b7655f936a4c681b4250855d1da8f5", size = 10313125, upload-time = "2026-01-10T06:43:31.782Z" }, - { url = "https://files.pythonhosted.org/packages/da/a1/354583ac5c4caa566de6ddfbc42744409b515039e085fab6e0ff942e0df5/numpy-2.4.1-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:f93bc6892fe7b0663e5ffa83b61aab510aacffd58c16e012bb9352d489d90cb7", size = 12496156, upload-time = "2026-01-10T06:43:34.237Z" }, - { url = "https://files.pythonhosted.org/packages/51/b0/42807c6e8cce58c00127b1dc24d365305189991f2a7917aa694a109c8d7d/numpy-2.4.1-cp313-cp313t-macosx_14_0_arm64.whl", hash = "sha256:178de8f87948163d98a4c9ab5bee4ce6519ca918926ec8df195af582de28544d", size = 5324663, upload-time = "2026-01-10T06:43:36.211Z" }, - { url = "https://files.pythonhosted.org/packages/fe/55/7a621694010d92375ed82f312b2f28017694ed784775269115323e37f5e2/numpy-2.4.1-cp313-cp313t-macosx_14_0_x86_64.whl", hash = "sha256:98b35775e03ab7f868908b524fc0a84d38932d8daf7b7e1c3c3a1b6c7a2c9f15", size = 6645224, upload-time = "2026-01-10T06:43:37.884Z" }, - { url = "https://files.pythonhosted.org/packages/50/96/9fa8635ed9d7c847d87e30c834f7109fac5e88549d79ef3324ab5c20919f/numpy-2.4.1-cp313-cp313t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:941c2a93313d030f219f3a71fd3d91a728b82979a5e8034eb2e60d394a2b83f9", size = 14462352, upload-time = "2026-01-10T06:43:39.479Z" }, - { url = "https://files.pythonhosted.org/packages/03/d1/8cf62d8bb2062da4fb82dd5d49e47c923f9c0738032f054e0a75342faba7/numpy-2.4.1-cp313-cp313t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:529050522e983e00a6c1c6b67411083630de8b57f65e853d7b03d9281b8694d2", size = 16407279, upload-time = "2026-01-10T06:43:41.93Z" }, - { url = "https://files.pythonhosted.org/packages/86/1c/95c86e17c6b0b31ce6ef219da00f71113b220bcb14938c8d9a05cee0ff53/numpy-2.4.1-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:2302dc0224c1cbc49bb94f7064f3f923a971bfae45c33870dcbff63a2a550505", size = 16248316, upload-time = "2026-01-10T06:43:44.121Z" }, - { url = "https://files.pythonhosted.org/packages/30/b4/e7f5ff8697274c9d0fa82398b6a372a27e5cef069b37df6355ccb1f1db1a/numpy-2.4.1-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:9171a42fcad32dcf3fa86f0a4faa5e9f8facefdb276f54b8b390d90447cff4e2", size = 18329884, upload-time = "2026-01-10T06:43:46.613Z" }, - { url = "https://files.pythonhosted.org/packages/37/a4/b073f3e9d77f9aec8debe8ca7f9f6a09e888ad1ba7488f0c3b36a94c03ac/numpy-2.4.1-cp313-cp313t-win32.whl", hash = "sha256:382ad67d99ef49024f11d1ce5dcb5ad8432446e4246a4b014418ba3a1175a1f4", size = 6081138, upload-time = "2026-01-10T06:43:48.854Z" }, - { url = "https://files.pythonhosted.org/packages/16/16/af42337b53844e67752a092481ab869c0523bc95c4e5c98e4dac4e9581ac/numpy-2.4.1-cp313-cp313t-win_amd64.whl", hash = "sha256:62fea415f83ad8fdb6c20840578e5fbaf5ddd65e0ec6c3c47eda0f69da172510", size = 12447478, upload-time = "2026-01-10T06:43:50.476Z" }, - { url = "https://files.pythonhosted.org/packages/6c/f8/fa85b2eac68ec631d0b631abc448552cb17d39afd17ec53dcbcc3537681a/numpy-2.4.1-cp313-cp313t-win_arm64.whl", hash = "sha256:a7870e8c5fc11aef57d6fea4b4085e537a3a60ad2cdd14322ed531fdca68d261", size = 10382981, upload-time = "2026-01-10T06:43:52.575Z" }, - { url = "https://files.pythonhosted.org/packages/1b/a7/ef08d25698e0e4b4efbad8d55251d20fe2a15f6d9aa7c9b30cd03c165e6f/numpy-2.4.1-cp314-cp314-macosx_10_15_x86_64.whl", hash = "sha256:3869ea1ee1a1edc16c29bbe3a2f2a4e515cc3a44d43903ad41e0cacdbaf733dc", size = 16652046, upload-time = "2026-01-10T06:43:54.797Z" }, - { url = "https://files.pythonhosted.org/packages/8f/39/e378b3e3ca13477e5ac70293ec027c438d1927f18637e396fe90b1addd72/numpy-2.4.1-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:e867df947d427cdd7a60e3e271729090b0f0df80f5f10ab7dd436f40811699c3", size = 12378858, upload-time = "2026-01-10T06:43:57.099Z" }, - { url = "https://files.pythonhosted.org/packages/c3/74/7ec6154f0006910ed1fdbb7591cf4432307033102b8a22041599935f8969/numpy-2.4.1-cp314-cp314-macosx_14_0_arm64.whl", hash = "sha256:e3bd2cb07841166420d2fa7146c96ce00cb3410664cbc1a6be028e456c4ee220", size = 5207417, upload-time = "2026-01-10T06:43:59.037Z" }, - { url = "https://files.pythonhosted.org/packages/f7/b7/053ac11820d84e42f8feea5cb81cc4fcd1091499b45b1ed8c7415b1bf831/numpy-2.4.1-cp314-cp314-macosx_14_0_x86_64.whl", hash = "sha256:f0a90aba7d521e6954670550e561a4cb925713bd944445dbe9e729b71f6cabee", size = 6542643, upload-time = "2026-01-10T06:44:01.852Z" }, - { url = "https://files.pythonhosted.org/packages/c0/c4/2e7908915c0e32ca636b92e4e4a3bdec4cb1e7eb0f8aedf1ed3c68a0d8cd/numpy-2.4.1-cp314-cp314-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:5d558123217a83b2d1ba316b986e9248a1ed1971ad495963d555ccd75dcb1556", size = 14418963, upload-time = "2026-01-10T06:44:04.047Z" }, - { url = "https://files.pythonhosted.org/packages/eb/c0/3ed5083d94e7ffd7c404e54619c088e11f2e1939a9544f5397f4adb1b8ba/numpy-2.4.1-cp314-cp314-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:2f44de05659b67d20499cbc96d49f2650769afcb398b79b324bb6e297bfe3844", size = 16363811, upload-time = "2026-01-10T06:44:06.207Z" }, - { url = "https://files.pythonhosted.org/packages/0e/68/42b66f1852bf525050a67315a4fb94586ab7e9eaa541b1bef530fab0c5dd/numpy-2.4.1-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:69e7419c9012c4aaf695109564e3387f1259f001b4326dfa55907b098af082d3", size = 16197643, upload-time = "2026-01-10T06:44:08.33Z" }, - { url = "https://files.pythonhosted.org/packages/d2/40/e8714fc933d85f82c6bfc7b998a0649ad9769a32f3494ba86598aaf18a48/numpy-2.4.1-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:2ffd257026eb1b34352e749d7cc1678b5eeec3e329ad8c9965a797e08ccba205", size = 18289601, upload-time = "2026-01-10T06:44:10.841Z" }, - { url = "https://files.pythonhosted.org/packages/80/9a/0d44b468cad50315127e884802351723daca7cf1c98d102929468c81d439/numpy-2.4.1-cp314-cp314-win32.whl", hash = "sha256:727c6c3275ddefa0dc078524a85e064c057b4f4e71ca5ca29a19163c607be745", size = 6005722, upload-time = "2026-01-10T06:44:13.332Z" }, - { url = "https://files.pythonhosted.org/packages/7e/bb/c6513edcce5a831810e2dddc0d3452ce84d208af92405a0c2e58fd8e7881/numpy-2.4.1-cp314-cp314-win_amd64.whl", hash = "sha256:7d5d7999df434a038d75a748275cd6c0094b0ecdb0837342b332a82defc4dc4d", size = 12438590, upload-time = "2026-01-10T06:44:15.006Z" }, - { url = "https://files.pythonhosted.org/packages/e9/da/a598d5cb260780cf4d255102deba35c1d072dc028c4547832f45dd3323a8/numpy-2.4.1-cp314-cp314-win_arm64.whl", hash = "sha256:ce9ce141a505053b3c7bce3216071f3bf5c182b8b28930f14cd24d43932cd2df", size = 10596180, upload-time = "2026-01-10T06:44:17.386Z" }, - { url = "https://files.pythonhosted.org/packages/de/bc/ea3f2c96fcb382311827231f911723aeff596364eb6e1b6d1d91128aa29b/numpy-2.4.1-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:4e53170557d37ae404bf8d542ca5b7c629d6efa1117dac6a83e394142ea0a43f", size = 12498774, upload-time = "2026-01-10T06:44:19.467Z" }, - { url = "https://files.pythonhosted.org/packages/aa/ab/ef9d939fe4a812648c7a712610b2ca6140b0853c5efea361301006c02ae5/numpy-2.4.1-cp314-cp314t-macosx_14_0_arm64.whl", hash = "sha256:a73044b752f5d34d4232f25f18160a1cc418ea4507f5f11e299d8ac36875f8a0", size = 5327274, upload-time = "2026-01-10T06:44:23.189Z" }, - { url = "https://files.pythonhosted.org/packages/bd/31/d381368e2a95c3b08b8cf7faac6004849e960f4a042d920337f71cef0cae/numpy-2.4.1-cp314-cp314t-macosx_14_0_x86_64.whl", hash = "sha256:fb1461c99de4d040666ca0444057b06541e5642f800b71c56e6ea92d6a853a0c", size = 6648306, upload-time = "2026-01-10T06:44:25.012Z" }, - { url = "https://files.pythonhosted.org/packages/c8/e5/0989b44ade47430be6323d05c23207636d67d7362a1796ccbccac6773dd2/numpy-2.4.1-cp314-cp314t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:423797bdab2eeefbe608d7c1ec7b2b4fd3c58d51460f1ee26c7500a1d9c9ee93", size = 14464653, upload-time = "2026-01-10T06:44:26.706Z" }, - { url = "https://files.pythonhosted.org/packages/10/a7/cfbe475c35371cae1358e61f20c5f075badc18c4797ab4354140e1d283cf/numpy-2.4.1-cp314-cp314t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:52b5f61bdb323b566b528899cc7db2ba5d1015bda7ea811a8bcf3c89c331fa42", size = 16405144, upload-time = "2026-01-10T06:44:29.378Z" }, - { url = "https://files.pythonhosted.org/packages/f8/a3/0c63fe66b534888fa5177cc7cef061541064dbe2b4b60dcc60ffaf0d2157/numpy-2.4.1-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:42d7dd5fa36d16d52a84f821eb96031836fd405ee6955dd732f2023724d0aa01", size = 16247425, upload-time = "2026-01-10T06:44:31.721Z" }, - { url = "https://files.pythonhosted.org/packages/6b/2b/55d980cfa2c93bd40ff4c290bf824d792bd41d2fe3487b07707559071760/numpy-2.4.1-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:e7b6b5e28bbd47b7532698e5db2fe1db693d84b58c254e4389d99a27bb9b8f6b", size = 18330053, upload-time = "2026-01-10T06:44:34.617Z" }, - { url = "https://files.pythonhosted.org/packages/23/12/8b5fc6b9c487a09a7957188e0943c9ff08432c65e34567cabc1623b03a51/numpy-2.4.1-cp314-cp314t-win32.whl", hash = "sha256:5de60946f14ebe15e713a6f22850c2372fa72f4ff9a432ab44aa90edcadaa65a", size = 6152482, upload-time = "2026-01-10T06:44:36.798Z" }, - { url = "https://files.pythonhosted.org/packages/00/a5/9f8ca5856b8940492fc24fbe13c1bc34d65ddf4079097cf9e53164d094e1/numpy-2.4.1-cp314-cp314t-win_amd64.whl", hash = "sha256:8f085da926c0d491ffff3096f91078cc97ea67e7e6b65e490bc8dcda65663be2", size = 12627117, upload-time = "2026-01-10T06:44:38.828Z" }, - { url = "https://files.pythonhosted.org/packages/ad/0d/eca3d962f9eef265f01a8e0d20085c6dd1f443cbffc11b6dede81fd82356/numpy-2.4.1-cp314-cp314t-win_arm64.whl", hash = "sha256:6436cffb4f2bf26c974344439439c95e152c9a527013f26b3577be6c2ca64295", size = 10667121, upload-time = "2026-01-10T06:44:41.644Z" }, - { url = "https://files.pythonhosted.org/packages/1e/48/d86f97919e79314a1cdee4c832178763e6e98e623e123d0bada19e92c15a/numpy-2.4.1-pp311-pypy311_pp73-macosx_10_15_x86_64.whl", hash = "sha256:8ad35f20be147a204e28b6a0575fbf3540c5e5f802634d4258d55b1ff5facce1", size = 16822202, upload-time = "2026-01-10T06:44:43.738Z" }, - { url = "https://files.pythonhosted.org/packages/51/e9/1e62a7f77e0f37dcfb0ad6a9744e65df00242b6ea37dfafb55debcbf5b55/numpy-2.4.1-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:8097529164c0f3e32bb89412a0905d9100bf434d9692d9fc275e18dcf53c9344", size = 12569985, upload-time = "2026-01-10T06:44:45.945Z" }, - { url = "https://files.pythonhosted.org/packages/c7/7e/914d54f0c801342306fdcdce3e994a56476f1b818c46c47fc21ae968088c/numpy-2.4.1-pp311-pypy311_pp73-macosx_14_0_arm64.whl", hash = "sha256:ea66d2b41ca4a1630aae5507ee0a71647d3124d1741980138aa8f28f44dac36e", size = 5398484, upload-time = "2026-01-10T06:44:48.012Z" }, - { url = "https://files.pythonhosted.org/packages/1c/d8/9570b68584e293a33474e7b5a77ca404f1dcc655e40050a600dee81d27fb/numpy-2.4.1-pp311-pypy311_pp73-macosx_14_0_x86_64.whl", hash = "sha256:d3f8f0df9f4b8be57b3bf74a1d087fec68f927a2fab68231fdb442bf2c12e426", size = 6713216, upload-time = "2026-01-10T06:44:49.725Z" }, - { url = "https://files.pythonhosted.org/packages/33/9b/9dd6e2db8d49eb24f86acaaa5258e5f4c8ed38209a4ee9de2d1a0ca25045/numpy-2.4.1-pp311-pypy311_pp73-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:2023ef86243690c2791fd6353e5b4848eedaa88ca8a2d129f462049f6d484696", size = 14538937, upload-time = "2026-01-10T06:44:51.498Z" }, - { url = "https://files.pythonhosted.org/packages/53/87/d5bd995b0f798a37105b876350d346eea5838bd8f77ea3d7a48392f3812b/numpy-2.4.1-pp311-pypy311_pp73-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:8361ea4220d763e54cff2fbe7d8c93526b744f7cd9ddab47afeff7e14e8503be", size = 16479830, upload-time = "2026-01-10T06:44:53.931Z" }, - { url = "https://files.pythonhosted.org/packages/5b/c7/b801bf98514b6ae6475e941ac05c58e6411dd863ea92916bfd6d510b08c1/numpy-2.4.1-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:4f1b68ff47680c2925f8063402a693ede215f0257f02596b1318ecdfb1d79e33", size = 12492579, upload-time = "2026-01-10T06:44:57.094Z" }, +sdist = { url = "https://files.pythonhosted.org/packages/57/fd/0005efbd0af48e55eb3c7208af93f2862d4b1a56cd78e84309a2d959208d/numpy-2.4.2.tar.gz", hash = "sha256:659a6107e31a83c4e33f763942275fd278b21d095094044eb35569e86a21ddae", size = 20723651, upload-time = "2026-01-31T23:13:10.135Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/d3/44/71852273146957899753e69986246d6a176061ea183407e95418c2aa4d9a/numpy-2.4.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:e7e88598032542bd49af7c4747541422884219056c268823ef6e5e89851c8825", size = 16955478, upload-time = "2026-01-31T23:10:25.623Z" }, + { url = "https://files.pythonhosted.org/packages/74/41/5d17d4058bd0cd96bcbd4d9ff0fb2e21f52702aab9a72e4a594efa18692f/numpy-2.4.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:7edc794af8b36ca37ef5fcb5e0d128c7e0595c7b96a2318d1badb6fcd8ee86b1", size = 14965467, upload-time = "2026-01-31T23:10:28.186Z" }, + { url = "https://files.pythonhosted.org/packages/49/48/fb1ce8136c19452ed15f033f8aee91d5defe515094e330ce368a0647846f/numpy-2.4.2-cp311-cp311-macosx_14_0_arm64.whl", hash = "sha256:6e9f61981ace1360e42737e2bae58b27bf28a1b27e781721047d84bd754d32e7", size = 5475172, upload-time = "2026-01-31T23:10:30.848Z" }, + { url = "https://files.pythonhosted.org/packages/40/a9/3feb49f17bbd1300dd2570432961f5c8a4ffeff1db6f02c7273bd020a4c9/numpy-2.4.2-cp311-cp311-macosx_14_0_x86_64.whl", hash = "sha256:cb7bbb88aa74908950d979eeaa24dbdf1a865e3c7e45ff0121d8f70387b55f73", size = 6805145, upload-time = "2026-01-31T23:10:32.352Z" }, + { url = "https://files.pythonhosted.org/packages/3f/39/fdf35cbd6d6e2fcad42fcf85ac04a85a0d0fbfbf34b30721c98d602fd70a/numpy-2.4.2-cp311-cp311-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:4f069069931240b3fc703f1e23df63443dbd6390614c8c44a87d96cd0ec81eb1", size = 15966084, upload-time = "2026-01-31T23:10:34.502Z" }, + { url = "https://files.pythonhosted.org/packages/1b/46/6fa4ea94f1ddf969b2ee941290cca6f1bfac92b53c76ae5f44afe17ceb69/numpy-2.4.2-cp311-cp311-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c02ef4401a506fb60b411467ad501e1429a3487abca4664871d9ae0b46c8ba32", size = 16899477, upload-time = "2026-01-31T23:10:37.075Z" }, + { url = "https://files.pythonhosted.org/packages/09/a1/2a424e162b1a14a5bd860a464ab4e07513916a64ab1683fae262f735ccd2/numpy-2.4.2-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:2653de5c24910e49c2b106499803124dde62a5a1fe0eedeaecf4309a5f639390", size = 17323429, upload-time = "2026-01-31T23:10:39.704Z" }, + { url = "https://files.pythonhosted.org/packages/ce/a2/73014149ff250628df72c58204822ac01d768697913881aacf839ff78680/numpy-2.4.2-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:1ae241bbfc6ae276f94a170b14785e561cb5e7f626b6688cf076af4110887413", size = 18635109, upload-time = "2026-01-31T23:10:41.924Z" }, + { url = "https://files.pythonhosted.org/packages/6c/0c/73e8be2f1accd56df74abc1c5e18527822067dced5ec0861b5bb882c2ce0/numpy-2.4.2-cp311-cp311-win32.whl", hash = "sha256:df1b10187212b198dd45fa943d8985a3c8cf854aed4923796e0e019e113a1bda", size = 6237915, upload-time = "2026-01-31T23:10:45.26Z" }, + { url = "https://files.pythonhosted.org/packages/76/ae/e0265e0163cf127c24c3969d29f1c4c64551a1e375d95a13d32eab25d364/numpy-2.4.2-cp311-cp311-win_amd64.whl", hash = "sha256:b9c618d56a29c9cb1c4da979e9899be7578d2e0b3c24d52079c166324c9e8695", size = 12607972, upload-time = "2026-01-31T23:10:47.021Z" }, + { url = "https://files.pythonhosted.org/packages/29/a5/c43029af9b8014d6ea157f192652c50042e8911f4300f8f6ed3336bf437f/numpy-2.4.2-cp311-cp311-win_arm64.whl", hash = "sha256:47c5a6ed21d9452b10227e5e8a0e1c22979811cad7dcc19d8e3e2fb8fa03f1a3", size = 10485763, upload-time = "2026-01-31T23:10:50.087Z" }, + { url = "https://files.pythonhosted.org/packages/51/6e/6f394c9c77668153e14d4da83bcc247beb5952f6ead7699a1a2992613bea/numpy-2.4.2-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:21982668592194c609de53ba4933a7471880ccbaadcc52352694a59ecc860b3a", size = 16667963, upload-time = "2026-01-31T23:10:52.147Z" }, + { url = "https://files.pythonhosted.org/packages/1f/f8/55483431f2b2fd015ae6ed4fe62288823ce908437ed49db5a03d15151678/numpy-2.4.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:40397bda92382fcec844066efb11f13e1c9a3e2a8e8f318fb72ed8b6db9f60f1", size = 14693571, upload-time = "2026-01-31T23:10:54.789Z" }, + { url = "https://files.pythonhosted.org/packages/2f/20/18026832b1845cdc82248208dd929ca14c9d8f2bac391f67440707fff27c/numpy-2.4.2-cp312-cp312-macosx_14_0_arm64.whl", hash = "sha256:b3a24467af63c67829bfaa61eecf18d5432d4f11992688537be59ecd6ad32f5e", size = 5203469, upload-time = "2026-01-31T23:10:57.343Z" }, + { url = "https://files.pythonhosted.org/packages/7d/33/2eb97c8a77daaba34eaa3fa7241a14ac5f51c46a6bd5911361b644c4a1e2/numpy-2.4.2-cp312-cp312-macosx_14_0_x86_64.whl", hash = "sha256:805cc8de9fd6e7a22da5aed858e0ab16be5a4db6c873dde1d7451c541553aa27", size = 6550820, upload-time = "2026-01-31T23:10:59.429Z" }, + { url = "https://files.pythonhosted.org/packages/b1/91/b97fdfd12dc75b02c44e26c6638241cc004d4079a0321a69c62f51470c4c/numpy-2.4.2-cp312-cp312-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:6d82351358ffbcdcd7b686b90742a9b86632d6c1c051016484fa0b326a0a1548", size = 15663067, upload-time = "2026-01-31T23:11:01.291Z" }, + { url = "https://files.pythonhosted.org/packages/f5/c6/a18e59f3f0b8071cc85cbc8d80cd02d68aa9710170b2553a117203d46936/numpy-2.4.2-cp312-cp312-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:9e35d3e0144137d9fdae62912e869136164534d64a169f86438bc9561b6ad49f", size = 16619782, upload-time = "2026-01-31T23:11:03.669Z" }, + { url = "https://files.pythonhosted.org/packages/b7/83/9751502164601a79e18847309f5ceec0b1446d7b6aa12305759b72cf98b2/numpy-2.4.2-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:adb6ed2ad29b9e15321d167d152ee909ec73395901b70936f029c3bc6d7f4460", size = 17013128, upload-time = "2026-01-31T23:11:05.913Z" }, + { url = "https://files.pythonhosted.org/packages/61/c4/c4066322256ec740acc1c8923a10047818691d2f8aec254798f3dd90f5f2/numpy-2.4.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:8906e71fd8afcb76580404e2a950caef2685df3d2a57fe82a86ac8d33cc007ba", size = 18345324, upload-time = "2026-01-31T23:11:08.248Z" }, + { url = "https://files.pythonhosted.org/packages/ab/af/6157aa6da728fa4525a755bfad486ae7e3f76d4c1864138003eb84328497/numpy-2.4.2-cp312-cp312-win32.whl", hash = "sha256:ec055f6dae239a6299cace477b479cca2fc125c5675482daf1dd886933a1076f", size = 5960282, upload-time = "2026-01-31T23:11:10.497Z" }, + { url = "https://files.pythonhosted.org/packages/92/0f/7ceaaeaacb40567071e94dbf2c9480c0ae453d5bb4f52bea3892c39dc83c/numpy-2.4.2-cp312-cp312-win_amd64.whl", hash = "sha256:209fae046e62d0ce6435fcfe3b1a10537e858249b3d9b05829e2a05218296a85", size = 12314210, upload-time = "2026-01-31T23:11:12.176Z" }, + { url = "https://files.pythonhosted.org/packages/2f/a3/56c5c604fae6dd40fa2ed3040d005fca97e91bd320d232ac9931d77ba13c/numpy-2.4.2-cp312-cp312-win_arm64.whl", hash = "sha256:fbde1b0c6e81d56f5dccd95dd4a711d9b95df1ae4009a60887e56b27e8d903fa", size = 10220171, upload-time = "2026-01-31T23:11:14.684Z" }, + { url = "https://files.pythonhosted.org/packages/a1/22/815b9fe25d1d7ae7d492152adbc7226d3eff731dffc38fe970589fcaaa38/numpy-2.4.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:25f2059807faea4b077a2b6837391b5d830864b3543627f381821c646f31a63c", size = 16663696, upload-time = "2026-01-31T23:11:17.516Z" }, + { url = "https://files.pythonhosted.org/packages/09/f0/817d03a03f93ba9c6c8993de509277d84e69f9453601915e4a69554102a1/numpy-2.4.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:bd3a7a9f5847d2fb8c2c6d1c862fa109c31a9abeca1a3c2bd5a64572955b2979", size = 14688322, upload-time = "2026-01-31T23:11:19.883Z" }, + { url = "https://files.pythonhosted.org/packages/da/b4/f805ab79293c728b9a99438775ce51885fd4f31b76178767cfc718701a39/numpy-2.4.2-cp313-cp313-macosx_14_0_arm64.whl", hash = "sha256:8e4549f8a3c6d13d55041925e912bfd834285ef1dd64d6bc7d542583355e2e98", size = 5198157, upload-time = "2026-01-31T23:11:22.375Z" }, + { url = "https://files.pythonhosted.org/packages/74/09/826e4289844eccdcd64aac27d13b0fd3f32039915dd5b9ba01baae1f436c/numpy-2.4.2-cp313-cp313-macosx_14_0_x86_64.whl", hash = "sha256:aea4f66ff44dfddf8c2cffd66ba6538c5ec67d389285292fe428cb2c738c8aef", size = 6546330, upload-time = "2026-01-31T23:11:23.958Z" }, + { url = "https://files.pythonhosted.org/packages/19/fb/cbfdbfa3057a10aea5422c558ac57538e6acc87ec1669e666d32ac198da7/numpy-2.4.2-cp313-cp313-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:c3cd545784805de05aafe1dde61752ea49a359ccba9760c1e5d1c88a93bbf2b7", size = 15660968, upload-time = "2026-01-31T23:11:25.713Z" }, + { url = "https://files.pythonhosted.org/packages/04/dc/46066ce18d01645541f0186877377b9371b8fa8017fa8262002b4ef22612/numpy-2.4.2-cp313-cp313-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:d0d9b7c93578baafcbc5f0b83eaf17b79d345c6f36917ba0c67f45226911d499", size = 16607311, upload-time = "2026-01-31T23:11:28.117Z" }, + { url = "https://files.pythonhosted.org/packages/14/d9/4b5adfc39a43fa6bf918c6d544bc60c05236cc2f6339847fc5b35e6cb5b0/numpy-2.4.2-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:f74f0f7779cc7ae07d1810aab8ac6b1464c3eafb9e283a40da7309d5e6e48fbb", size = 17012850, upload-time = "2026-01-31T23:11:30.888Z" }, + { url = "https://files.pythonhosted.org/packages/b7/20/adb6e6adde6d0130046e6fdfb7675cc62bc2f6b7b02239a09eb58435753d/numpy-2.4.2-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:c7ac672d699bf36275c035e16b65539931347d68b70667d28984c9fb34e07fa7", size = 18334210, upload-time = "2026-01-31T23:11:33.214Z" }, + { url = "https://files.pythonhosted.org/packages/78/0e/0a73b3dff26803a8c02baa76398015ea2a5434d9b8265a7898a6028c1591/numpy-2.4.2-cp313-cp313-win32.whl", hash = "sha256:8e9afaeb0beff068b4d9cd20d322ba0ee1cecfb0b08db145e4ab4dd44a6b5110", size = 5958199, upload-time = "2026-01-31T23:11:35.385Z" }, + { url = "https://files.pythonhosted.org/packages/43/bc/6352f343522fcb2c04dbaf94cb30cca6fd32c1a750c06ad6231b4293708c/numpy-2.4.2-cp313-cp313-win_amd64.whl", hash = "sha256:7df2de1e4fba69a51c06c28f5a3de36731eb9639feb8e1cf7e4a7b0daf4cf622", size = 12310848, upload-time = "2026-01-31T23:11:38.001Z" }, + { url = "https://files.pythonhosted.org/packages/6e/8d/6da186483e308da5da1cc6918ce913dcfe14ffde98e710bfeff2a6158d4e/numpy-2.4.2-cp313-cp313-win_arm64.whl", hash = "sha256:0fece1d1f0a89c16b03442eae5c56dc0be0c7883b5d388e0c03f53019a4bfd71", size = 10221082, upload-time = "2026-01-31T23:11:40.392Z" }, + { url = "https://files.pythonhosted.org/packages/25/a1/9510aa43555b44781968935c7548a8926274f815de42ad3997e9e83680dd/numpy-2.4.2-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:5633c0da313330fd20c484c78cdd3f9b175b55e1a766c4a174230c6b70ad8262", size = 14815866, upload-time = "2026-01-31T23:11:42.495Z" }, + { url = "https://files.pythonhosted.org/packages/36/30/6bbb5e76631a5ae46e7923dd16ca9d3f1c93cfa8d4ed79a129814a9d8db3/numpy-2.4.2-cp313-cp313t-macosx_14_0_arm64.whl", hash = "sha256:d9f64d786b3b1dd742c946c42d15b07497ed14af1a1f3ce840cce27daa0ce913", size = 5325631, upload-time = "2026-01-31T23:11:44.7Z" }, + { url = "https://files.pythonhosted.org/packages/46/00/3a490938800c1923b567b3a15cd17896e68052e2145d8662aaf3e1ffc58f/numpy-2.4.2-cp313-cp313t-macosx_14_0_x86_64.whl", hash = "sha256:b21041e8cb6a1eb5312dd1d2f80a94d91efffb7a06b70597d44f1bd2dfc315ab", size = 6646254, upload-time = "2026-01-31T23:11:46.341Z" }, + { url = "https://files.pythonhosted.org/packages/d3/e9/fac0890149898a9b609caa5af7455a948b544746e4b8fe7c212c8edd71f8/numpy-2.4.2-cp313-cp313t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:00ab83c56211a1d7c07c25e3217ea6695e50a3e2f255053686b081dc0b091a82", size = 15720138, upload-time = "2026-01-31T23:11:48.082Z" }, + { url = "https://files.pythonhosted.org/packages/ea/5c/08887c54e68e1e28df53709f1893ce92932cc6f01f7c3d4dc952f61ffd4e/numpy-2.4.2-cp313-cp313t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:2fb882da679409066b4603579619341c6d6898fc83a8995199d5249f986e8e8f", size = 16655398, upload-time = "2026-01-31T23:11:50.293Z" }, + { url = "https://files.pythonhosted.org/packages/4d/89/253db0fa0e66e9129c745e4ef25631dc37d5f1314dad2b53e907b8538e6d/numpy-2.4.2-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:66cb9422236317f9d44b67b4d18f44efe6e9c7f8794ac0462978513359461554", size = 17079064, upload-time = "2026-01-31T23:11:52.927Z" }, + { url = "https://files.pythonhosted.org/packages/2a/d5/cbade46ce97c59c6c3da525e8d95b7abe8a42974a1dc5c1d489c10433e88/numpy-2.4.2-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:0f01dcf33e73d80bd8dc0f20a71303abbafa26a19e23f6b68d1aa9990af90257", size = 18379680, upload-time = "2026-01-31T23:11:55.22Z" }, + { url = "https://files.pythonhosted.org/packages/40/62/48f99ae172a4b63d981babe683685030e8a3df4f246c893ea5c6ef99f018/numpy-2.4.2-cp313-cp313t-win32.whl", hash = "sha256:52b913ec40ff7ae845687b0b34d8d93b60cb66dcee06996dd5c99f2fc9328657", size = 6082433, upload-time = "2026-01-31T23:11:58.096Z" }, + { url = "https://files.pythonhosted.org/packages/07/38/e054a61cfe48ad9f1ed0d188e78b7e26859d0b60ef21cd9de4897cdb5326/numpy-2.4.2-cp313-cp313t-win_amd64.whl", hash = "sha256:5eea80d908b2c1f91486eb95b3fb6fab187e569ec9752ab7d9333d2e66bf2d6b", size = 12451181, upload-time = "2026-01-31T23:11:59.782Z" }, + { url = "https://files.pythonhosted.org/packages/6e/a4/a05c3a6418575e185dd84d0b9680b6bb2e2dc3e4202f036b7b4e22d6e9dc/numpy-2.4.2-cp313-cp313t-win_arm64.whl", hash = "sha256:fd49860271d52127d61197bb50b64f58454e9f578cb4b2c001a6de8b1f50b0b1", size = 10290756, upload-time = "2026-01-31T23:12:02.438Z" }, + { url = "https://files.pythonhosted.org/packages/18/88/b7df6050bf18fdcfb7046286c6535cabbdd2064a3440fca3f069d319c16e/numpy-2.4.2-cp314-cp314-macosx_10_15_x86_64.whl", hash = "sha256:444be170853f1f9d528428eceb55f12918e4fda5d8805480f36a002f1415e09b", size = 16663092, upload-time = "2026-01-31T23:12:04.521Z" }, + { url = "https://files.pythonhosted.org/packages/25/7a/1fee4329abc705a469a4afe6e69b1ef7e915117747886327104a8493a955/numpy-2.4.2-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:d1240d50adff70c2a88217698ca844723068533f3f5c5fa6ee2e3220e3bdb000", size = 14698770, upload-time = "2026-01-31T23:12:06.96Z" }, + { url = "https://files.pythonhosted.org/packages/fb/0b/f9e49ba6c923678ad5bc38181c08ac5e53b7a5754dbca8e581aa1a56b1ff/numpy-2.4.2-cp314-cp314-macosx_14_0_arm64.whl", hash = "sha256:7cdde6de52fb6664b00b056341265441192d1291c130e99183ec0d4b110ff8b1", size = 5208562, upload-time = "2026-01-31T23:12:09.632Z" }, + { url = "https://files.pythonhosted.org/packages/7d/12/d7de8f6f53f9bb76997e5e4c069eda2051e3fe134e9181671c4391677bb2/numpy-2.4.2-cp314-cp314-macosx_14_0_x86_64.whl", hash = "sha256:cda077c2e5b780200b6b3e09d0b42205a3d1c68f30c6dceb90401c13bff8fe74", size = 6543710, upload-time = "2026-01-31T23:12:11.969Z" }, + { url = "https://files.pythonhosted.org/packages/09/63/c66418c2e0268a31a4cf8a8b512685748200f8e8e8ec6c507ce14e773529/numpy-2.4.2-cp314-cp314-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:d30291931c915b2ab5717c2974bb95ee891a1cf22ebc16a8006bd59cd210d40a", size = 15677205, upload-time = "2026-01-31T23:12:14.33Z" }, + { url = "https://files.pythonhosted.org/packages/5d/6c/7f237821c9642fb2a04d2f1e88b4295677144ca93285fd76eff3bcba858d/numpy-2.4.2-cp314-cp314-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:bba37bc29d4d85761deed3954a1bc62be7cf462b9510b51d367b769a8c8df325", size = 16611738, upload-time = "2026-01-31T23:12:16.525Z" }, + { url = "https://files.pythonhosted.org/packages/c2/a7/39c4cdda9f019b609b5c473899d87abff092fc908cfe4d1ecb2fcff453b0/numpy-2.4.2-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:b2f0073ed0868db1dcd86e052d37279eef185b9c8db5bf61f30f46adac63c909", size = 17028888, upload-time = "2026-01-31T23:12:19.306Z" }, + { url = "https://files.pythonhosted.org/packages/da/b3/e84bb64bdfea967cc10950d71090ec2d84b49bc691df0025dddb7c26e8e3/numpy-2.4.2-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:7f54844851cdb630ceb623dcec4db3240d1ac13d4990532446761baede94996a", size = 18339556, upload-time = "2026-01-31T23:12:21.816Z" }, + { url = "https://files.pythonhosted.org/packages/88/f5/954a291bc1192a27081706862ac62bb5920fbecfbaa302f64682aa90beed/numpy-2.4.2-cp314-cp314-win32.whl", hash = "sha256:12e26134a0331d8dbd9351620f037ec470b7c75929cb8a1537f6bfe411152a1a", size = 6006899, upload-time = "2026-01-31T23:12:24.14Z" }, + { url = "https://files.pythonhosted.org/packages/05/cb/eff72a91b2efdd1bc98b3b8759f6a1654aa87612fc86e3d87d6fe4f948c4/numpy-2.4.2-cp314-cp314-win_amd64.whl", hash = "sha256:068cdb2d0d644cdb45670810894f6a0600797a69c05f1ac478e8d31670b8ee75", size = 12443072, upload-time = "2026-01-31T23:12:26.33Z" }, + { url = "https://files.pythonhosted.org/packages/37/75/62726948db36a56428fce4ba80a115716dc4fad6a3a4352487f8bb950966/numpy-2.4.2-cp314-cp314-win_arm64.whl", hash = "sha256:6ed0be1ee58eef41231a5c943d7d1375f093142702d5723ca2eb07db9b934b05", size = 10494886, upload-time = "2026-01-31T23:12:28.488Z" }, + { url = "https://files.pythonhosted.org/packages/36/2f/ee93744f1e0661dc267e4b21940870cabfae187c092e1433b77b09b50ac4/numpy-2.4.2-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:98f16a80e917003a12c0580f97b5f875853ebc33e2eaa4bccfc8201ac6869308", size = 14818567, upload-time = "2026-01-31T23:12:30.709Z" }, + { url = "https://files.pythonhosted.org/packages/a7/24/6535212add7d76ff938d8bdc654f53f88d35cddedf807a599e180dcb8e66/numpy-2.4.2-cp314-cp314t-macosx_14_0_arm64.whl", hash = "sha256:20abd069b9cda45874498b245c8015b18ace6de8546bf50dfa8cea1696ed06ef", size = 5328372, upload-time = "2026-01-31T23:12:32.962Z" }, + { url = "https://files.pythonhosted.org/packages/5e/9d/c48f0a035725f925634bf6b8994253b43f2047f6778a54147d7e213bc5a7/numpy-2.4.2-cp314-cp314t-macosx_14_0_x86_64.whl", hash = "sha256:e98c97502435b53741540a5717a6749ac2ada901056c7db951d33e11c885cc7d", size = 6649306, upload-time = "2026-01-31T23:12:34.797Z" }, + { url = "https://files.pythonhosted.org/packages/81/05/7c73a9574cd4a53a25907bad38b59ac83919c0ddc8234ec157f344d57d9a/numpy-2.4.2-cp314-cp314t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:da6cad4e82cb893db4b69105c604d805e0c3ce11501a55b5e9f9083b47d2ffe8", size = 15722394, upload-time = "2026-01-31T23:12:36.565Z" }, + { url = "https://files.pythonhosted.org/packages/35/fa/4de10089f21fc7d18442c4a767ab156b25c2a6eaf187c0db6d9ecdaeb43f/numpy-2.4.2-cp314-cp314t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:9e4424677ce4b47fe73c8b5556d876571f7c6945d264201180db2dc34f676ab5", size = 16653343, upload-time = "2026-01-31T23:12:39.188Z" }, + { url = "https://files.pythonhosted.org/packages/b8/f9/d33e4ffc857f3763a57aa85650f2e82486832d7492280ac21ba9efda80da/numpy-2.4.2-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:2b8f157c8a6f20eb657e240f8985cc135598b2b46985c5bccbde7616dc9c6b1e", size = 17078045, upload-time = "2026-01-31T23:12:42.041Z" }, + { url = "https://files.pythonhosted.org/packages/c8/b8/54bdb43b6225badbea6389fa038c4ef868c44f5890f95dd530a218706da3/numpy-2.4.2-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:5daf6f3914a733336dab21a05cdec343144600e964d2fcdabaac0c0269874b2a", size = 18380024, upload-time = "2026-01-31T23:12:44.331Z" }, + { url = "https://files.pythonhosted.org/packages/a5/55/6e1a61ded7af8df04016d81b5b02daa59f2ea9252ee0397cb9f631efe9e5/numpy-2.4.2-cp314-cp314t-win32.whl", hash = "sha256:8c50dd1fc8826f5b26a5ee4d77ca55d88a895f4e4819c7ecc2a9f5905047a443", size = 6153937, upload-time = "2026-01-31T23:12:47.229Z" }, + { url = "https://files.pythonhosted.org/packages/45/aa/fa6118d1ed6d776b0983f3ceac9b1a5558e80df9365b1c3aa6d42bf9eee4/numpy-2.4.2-cp314-cp314t-win_amd64.whl", hash = "sha256:fcf92bee92742edd401ba41135185866f7026c502617f422eb432cfeca4fe236", size = 12631844, upload-time = "2026-01-31T23:12:48.997Z" }, + { url = "https://files.pythonhosted.org/packages/32/0a/2ec5deea6dcd158f254a7b372fb09cfba5719419c8d66343bab35237b3fb/numpy-2.4.2-cp314-cp314t-win_arm64.whl", hash = "sha256:1f92f53998a17265194018d1cc321b2e96e900ca52d54c7c77837b71b9465181", size = 10565379, upload-time = "2026-01-31T23:12:51.345Z" }, + { url = "https://files.pythonhosted.org/packages/f4/f8/50e14d36d915ef64d8f8bc4a087fc8264d82c785eda6711f80ab7e620335/numpy-2.4.2-pp311-pypy311_pp73-macosx_10_15_x86_64.whl", hash = "sha256:89f7268c009bc492f506abd6f5265defa7cb3f7487dc21d357c3d290add45082", size = 16833179, upload-time = "2026-01-31T23:12:53.5Z" }, + { url = "https://files.pythonhosted.org/packages/17/17/809b5cad63812058a8189e91a1e2d55a5a18fd04611dbad244e8aeae465c/numpy-2.4.2-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:e6dee3bb76aa4009d5a912180bf5b2de012532998d094acee25d9cb8dee3e44a", size = 14889755, upload-time = "2026-01-31T23:12:55.933Z" }, + { url = "https://files.pythonhosted.org/packages/3e/ea/181b9bcf7627fc8371720316c24db888dcb9829b1c0270abf3d288b2e29b/numpy-2.4.2-pp311-pypy311_pp73-macosx_14_0_arm64.whl", hash = "sha256:cd2bd2bbed13e213d6b55dc1d035a4f91748a7d3edc9480c13898b0353708920", size = 5399500, upload-time = "2026-01-31T23:12:58.671Z" }, + { url = "https://files.pythonhosted.org/packages/33/9f/413adf3fc955541ff5536b78fcf0754680b3c6d95103230252a2c9408d23/numpy-2.4.2-pp311-pypy311_pp73-macosx_14_0_x86_64.whl", hash = "sha256:cf28c0c1d4c4bf00f509fa7eb02c58d7caf221b50b467bcb0d9bbf1584d5c821", size = 6714252, upload-time = "2026-01-31T23:13:00.518Z" }, + { url = "https://files.pythonhosted.org/packages/91/da/643aad274e29ccbdf42ecd94dafe524b81c87bcb56b83872d54827f10543/numpy-2.4.2-pp311-pypy311_pp73-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:e04ae107ac591763a47398bb45b568fc38f02dbc4aa44c063f67a131f99346cb", size = 15797142, upload-time = "2026-01-31T23:13:02.219Z" }, + { url = "https://files.pythonhosted.org/packages/66/27/965b8525e9cb5dc16481b30a1b3c21e50c7ebf6e9dbd48d0c4d0d5089c7e/numpy-2.4.2-pp311-pypy311_pp73-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:602f65afdef699cda27ec0b9224ae5dc43e328f4c24c689deaf77133dbee74d0", size = 16727979, upload-time = "2026-01-31T23:13:04.62Z" }, + { url = "https://files.pythonhosted.org/packages/de/e5/b7d20451657664b07986c2f6e3be564433f5dcaf3482d68eaecd79afaf03/numpy-2.4.2-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:be71bf1edb48ebbbf7f6337b5bfd2f895d1902f6335a5830b20141fc126ffba0", size = 12502577, upload-time = "2026-01-31T23:13:07.08Z" }, ] [[package]] @@ -4251,83 +4259,83 @@ wheels = [ [[package]] name = "orjson" -version = "3.11.6" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/70/a3/4e09c61a5f0c521cba0bb433639610ae037437669f1a4cbc93799e731d78/orjson-3.11.6.tar.gz", hash = "sha256:0a54c72259f35299fd033042367df781c2f66d10252955ca1efb7db309b954cb", size = 6175856, upload-time = "2026-01-29T15:13:07.942Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/30/3c/098ed0e49c565fdf1ccc6a75b190115d1ca74148bf5b6ab036554a550650/orjson-3.11.6-cp310-cp310-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:a613fc37e007143d5b6286dccb1394cd114b07832417006a02b620ddd8279e37", size = 250411, upload-time = "2026-01-29T15:11:17.941Z" }, - { url = "https://files.pythonhosted.org/packages/15/7c/cb11a360fd228ceebade03b1e8e9e138dd4b1b3b11602b72dbdad915aded/orjson-3.11.6-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:46ebee78f709d3ba7a65384cfe285bb0763157c6d2f836e7bde2f12d33a867a2", size = 138147, upload-time = "2026-01-29T15:11:19.659Z" }, - { url = "https://files.pythonhosted.org/packages/4e/4b/e57b5c45ffe69fbef7cbd56e9f40e2dc0d5de920caafefcc6981d1a7efc5/orjson-3.11.6-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:a726fa86d2368cd57990f2bd95ef5495a6e613b08fc9585dfe121ec758fb08d1", size = 135110, upload-time = "2026-01-29T15:11:21.231Z" }, - { url = "https://files.pythonhosted.org/packages/b0/6e/4f21c6256f8cee3c0c69926cf7ac821cfc36f218512eedea2e2dc4a490c8/orjson-3.11.6-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:150f12e59d6864197770c78126e1a6e07a3da73d1728731bf3bc1e8b96ffdbe6", size = 140995, upload-time = "2026-01-29T15:11:22.902Z" }, - { url = "https://files.pythonhosted.org/packages/d0/78/92c36205ba2f6094ba1eea60c8e646885072abe64f155196833988c14b74/orjson-3.11.6-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9a2d9746a5b5ce20c0908ada451eb56da4ffa01552a50789a0354d8636a02953", size = 144435, upload-time = "2026-01-29T15:11:24.124Z" }, - { url = "https://files.pythonhosted.org/packages/4d/52/1b518d164005811eb3fea92650e76e7d9deadb0b41e92c483373b1e82863/orjson-3.11.6-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:afd177f5dd91666d31e9019f1b06d2fcdf8a409a1637ddcb5915085dede85680", size = 142734, upload-time = "2026-01-29T15:11:25.708Z" }, - { url = "https://files.pythonhosted.org/packages/4b/11/60ea7885a2b7c1bf60ed8b5982356078a73785bd3bab392041a5bcf8de7c/orjson-3.11.6-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8d777ec41a327bd3b7de97ba7bce12cc1007815ca398e4e4de9ec56c022c090b", size = 145802, upload-time = "2026-01-29T15:11:26.917Z" }, - { url = "https://files.pythonhosted.org/packages/41/7f/15a927e7958fd4f7560fb6dbb9346bee44a168e40168093c46020d866098/orjson-3.11.6-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:f3a135f83185c87c13ff231fcb7dbb2fa4332a376444bd65135b50ff4cc5265c", size = 147504, upload-time = "2026-01-29T15:11:28.07Z" }, - { url = "https://files.pythonhosted.org/packages/66/1f/cabb9132a533f4f913e29294d0a1ca818b1a9a52e990526fe3f7ddd75f1c/orjson-3.11.6-cp310-cp310-musllinux_1_2_armv7l.whl", hash = "sha256:2a8eeed7d4544cf391a142b0dd06029dac588e96cc692d9ab1c3f05b1e57c7f6", size = 421408, upload-time = "2026-01-29T15:11:29.314Z" }, - { url = "https://files.pythonhosted.org/packages/4c/b9/09bda9257a982e300313e4a9fc9b9c3aaff424d07bcf765bf045e4e3ed03/orjson-3.11.6-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:9d576865a21e5cc6695be8fb78afc812079fd361ce6a027a7d41561b61b33a90", size = 155801, upload-time = "2026-01-29T15:11:30.575Z" }, - { url = "https://files.pythonhosted.org/packages/98/19/4e40ea3e5f4c6a8d51f31fd2382351ee7b396fecca915b17cd1af588175b/orjson-3.11.6-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:925e2df51f60aa50f8797830f2adfc05330425803f4105875bb511ced98b7f89", size = 147647, upload-time = "2026-01-29T15:11:31.856Z" }, - { url = "https://files.pythonhosted.org/packages/5a/73/ef4bd7dd15042cf33a402d16b87b9e969e71edb452b63b6e2b05025d1f7d/orjson-3.11.6-cp310-cp310-win32.whl", hash = "sha256:09dded2de64e77ac0b312ad59f35023548fb87393a57447e1bb36a26c181a90f", size = 139770, upload-time = "2026-01-29T15:11:33.031Z" }, - { url = "https://files.pythonhosted.org/packages/b4/ac/daab6e10467f7fffd7081ba587b492505b49313130ff5446a6fe28bf076e/orjson-3.11.6-cp310-cp310-win_amd64.whl", hash = "sha256:3a63b5e7841ca8635214c6be7c0bf0246aa8c5cd4ef0c419b14362d0b2fb13de", size = 136783, upload-time = "2026-01-29T15:11:34.686Z" }, - { url = "https://files.pythonhosted.org/packages/f3/fd/d6b0a36854179b93ed77839f107c4089d91cccc9f9ba1b752b6e3bac5f34/orjson-3.11.6-cp311-cp311-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:e259e85a81d76d9665f03d6129e09e4435531870de5961ddcd0bf6e3a7fde7d7", size = 250029, upload-time = "2026-01-29T15:11:35.942Z" }, - { url = "https://files.pythonhosted.org/packages/a3/bb/22902619826641cf3b627c24aab62e2ad6b571bdd1d34733abb0dd57f67a/orjson-3.11.6-cp311-cp311-macosx_15_0_arm64.whl", hash = "sha256:52263949f41b4a4822c6b1353bcc5ee2f7109d53a3b493501d3369d6d0e7937a", size = 134518, upload-time = "2026-01-29T15:11:37.347Z" }, - { url = "https://files.pythonhosted.org/packages/72/90/7a818da4bba1de711a9653c420749c0ac95ef8f8651cbc1dca551f462fe0/orjson-3.11.6-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6439e742fa7834a24698d358a27346bb203bff356ae0402e7f5df8f749c621a8", size = 137917, upload-time = "2026-01-29T15:11:38.511Z" }, - { url = "https://files.pythonhosted.org/packages/59/0f/02846c1cac8e205cb3822dd8aa8f9114acda216f41fd1999ace6b543418d/orjson-3.11.6-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:b81ffd68f084b4e993e3867acb554a049fa7787cc8710bbcc1e26965580d99be", size = 134923, upload-time = "2026-01-29T15:11:39.711Z" }, - { url = "https://files.pythonhosted.org/packages/94/cf/aeaf683001b474bb3c3c757073a4231dfdfe8467fceaefa5bfd40902c99f/orjson-3.11.6-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a5a5468e5e60f7ef6d7f9044b06c8f94a3c56ba528c6e4f7f06ae95164b595ec", size = 140752, upload-time = "2026-01-29T15:11:41.347Z" }, - { url = "https://files.pythonhosted.org/packages/fc/fe/dad52d8315a65f084044a0819d74c4c9daf9ebe0681d30f525b0d29a31f0/orjson-3.11.6-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:72c5005eb45bd2535632d4f3bec7ad392832cfc46b62a3021da3b48a67734b45", size = 144201, upload-time = "2026-01-29T15:11:42.537Z" }, - { url = "https://files.pythonhosted.org/packages/36/bc/ab070dd421565b831801077f1e390c4d4af8bfcecafc110336680a33866b/orjson-3.11.6-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0b14dd49f3462b014455a28a4d810d3549bf990567653eb43765cd847df09145", size = 142380, upload-time = "2026-01-29T15:11:44.309Z" }, - { url = "https://files.pythonhosted.org/packages/e6/d8/4b581c725c3a308717f28bf45a9fdac210bca08b67e8430143699413ff06/orjson-3.11.6-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6e0bb2c1ea30ef302f0f89f9bf3e7f9ab5e2af29dc9f80eb87aa99788e4e2d65", size = 145582, upload-time = "2026-01-29T15:11:45.506Z" }, - { url = "https://files.pythonhosted.org/packages/5b/a2/09aab99b39f9a7f175ea8fa29adb9933a3d01e7d5d603cdee7f1c40c8da2/orjson-3.11.6-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:825e0a85d189533c6bff7e2fc417a28f6fcea53d27125c4551979aecd6c9a197", size = 147270, upload-time = "2026-01-29T15:11:46.782Z" }, - { url = "https://files.pythonhosted.org/packages/b8/2f/5ef8eaf7829dc50da3bf497c7775b21ee88437bc8c41f959aa3504ca6631/orjson-3.11.6-cp311-cp311-musllinux_1_2_armv7l.whl", hash = "sha256:b04575417a26530637f6ab4b1f7b4f666eb0433491091da4de38611f97f2fcf3", size = 421222, upload-time = "2026-01-29T15:11:48.106Z" }, - { url = "https://files.pythonhosted.org/packages/3b/b0/dd6b941294c2b5b13da5fdc7e749e58d0c55a5114ab37497155e83050e95/orjson-3.11.6-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:b83eb2e40e8c4da6d6b340ee6b1d6125f5195eb1b0ebb7eac23c6d9d4f92d224", size = 155562, upload-time = "2026-01-29T15:11:49.408Z" }, - { url = "https://files.pythonhosted.org/packages/8e/09/43924331a847476ae2f9a16bd6d3c9dab301265006212ba0d3d7fd58763a/orjson-3.11.6-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:1f42da604ee65a6b87eef858c913ce3e5777872b19321d11e6fc6d21de89b64f", size = 147432, upload-time = "2026-01-29T15:11:50.635Z" }, - { url = "https://files.pythonhosted.org/packages/5d/e9/d9865961081816909f6b49d880749dbbd88425afd7c5bbce0549e2290d77/orjson-3.11.6-cp311-cp311-win32.whl", hash = "sha256:5ae45df804f2d344cffb36c43fdf03c82fb6cd247f5faa41e21891b40dfbf733", size = 139623, upload-time = "2026-01-29T15:11:51.82Z" }, - { url = "https://files.pythonhosted.org/packages/b4/f9/6836edb92f76eec1082919101eb1145d2f9c33c8f2c5e6fa399b82a2aaa8/orjson-3.11.6-cp311-cp311-win_amd64.whl", hash = "sha256:f4295948d65ace0a2d8f2c4ccc429668b7eb8af547578ec882e16bf79b0050b2", size = 136647, upload-time = "2026-01-29T15:11:53.454Z" }, - { url = "https://files.pythonhosted.org/packages/b3/0c/4954082eea948c9ae52ee0bcbaa2f99da3216a71bcc314ab129bde22e565/orjson-3.11.6-cp311-cp311-win_arm64.whl", hash = "sha256:314e9c45e0b81b547e3a1cfa3df3e07a815821b3dac9fe8cb75014071d0c16a4", size = 135327, upload-time = "2026-01-29T15:11:56.616Z" }, - { url = "https://files.pythonhosted.org/packages/14/ba/759f2879f41910b7e5e0cdbd9cf82a4f017c527fb0e972e9869ca7fe4c8e/orjson-3.11.6-cp312-cp312-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:6f03f30cd8953f75f2a439070c743c7336d10ee940da918d71c6f3556af3ddcf", size = 249988, upload-time = "2026-01-29T15:11:58.294Z" }, - { url = "https://files.pythonhosted.org/packages/f0/70/54cecb929e6c8b10104fcf580b0cc7dc551aa193e83787dd6f3daba28bb5/orjson-3.11.6-cp312-cp312-macosx_15_0_arm64.whl", hash = "sha256:af44baae65ef386ad971469a8557a0673bb042b0b9fd4397becd9c2dfaa02588", size = 134445, upload-time = "2026-01-29T15:11:59.819Z" }, - { url = "https://files.pythonhosted.org/packages/f2/6f/ec0309154457b9ba1ad05f11faa4441f76037152f75e1ac577db3ce7ca96/orjson-3.11.6-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c310a48542094e4f7dbb6ac076880994986dda8ca9186a58c3cb70a3514d3231", size = 137708, upload-time = "2026-01-29T15:12:01.488Z" }, - { url = "https://files.pythonhosted.org/packages/20/52/3c71b80840f8bab9cb26417302707b7716b7d25f863f3a541bcfa232fe6e/orjson-3.11.6-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:d8dfa7a5d387f15ecad94cb6b2d2d5f4aeea64efd8d526bfc03c9812d01e1cc0", size = 134798, upload-time = "2026-01-29T15:12:02.705Z" }, - { url = "https://files.pythonhosted.org/packages/30/51/b490a43b22ff736282360bd02e6bded455cf31dfc3224e01cd39f919bbd2/orjson-3.11.6-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ba8daee3e999411b50f8b50dbb0a3071dd1845f3f9a1a0a6fa6de86d1689d84d", size = 140839, upload-time = "2026-01-29T15:12:03.956Z" }, - { url = "https://files.pythonhosted.org/packages/95/bc/4bcfe4280c1bc63c5291bb96f98298845b6355da2226d3400e17e7b51e53/orjson-3.11.6-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f89d104c974eafd7436d7a5fdbc57f7a1e776789959a2f4f1b2eab5c62a339f4", size = 144080, upload-time = "2026-01-29T15:12:05.151Z" }, - { url = "https://files.pythonhosted.org/packages/01/74/22970f9ead9ab1f1b5f8c227a6c3aa8d71cd2c5acd005868a1d44f2362fa/orjson-3.11.6-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b2e2e2456788ca5ea75616c40da06fc885a7dc0389780e8a41bf7c5389ba257b", size = 142435, upload-time = "2026-01-29T15:12:06.641Z" }, - { url = "https://files.pythonhosted.org/packages/29/34/d564aff85847ab92c82ee43a7a203683566c2fca0723a5f50aebbe759603/orjson-3.11.6-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2a42efebc45afabb1448001e90458c4020d5c64fbac8a8dc4045b777db76cb5a", size = 145631, upload-time = "2026-01-29T15:12:08.351Z" }, - { url = "https://files.pythonhosted.org/packages/e7/ef/016957a3890752c4aa2368326ea69fa53cdc1fdae0a94a542b6410dbdf52/orjson-3.11.6-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:71b7cbef8471324966c3738c90ba38775563ef01b512feb5ad4805682188d1b9", size = 147058, upload-time = "2026-01-29T15:12:10.023Z" }, - { url = "https://files.pythonhosted.org/packages/56/cc/9a899c3972085645b3225569f91a30e221f441e5dc8126e6d060b971c252/orjson-3.11.6-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:f8515e5910f454fe9a8e13c2bb9dc4bae4c1836313e967e72eb8a4ad874f0248", size = 421161, upload-time = "2026-01-29T15:12:11.308Z" }, - { url = "https://files.pythonhosted.org/packages/21/a8/767d3fbd6d9b8fdee76974db40619399355fd49bf91a6dd2c4b6909ccf05/orjson-3.11.6-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:300360edf27c8c9bf7047345a94fddf3a8b8922df0ff69d71d854a170cb375cf", size = 155757, upload-time = "2026-01-29T15:12:12.776Z" }, - { url = "https://files.pythonhosted.org/packages/ad/0b/205cd69ac87e2272e13ef3f5f03a3d4657e317e38c1b08aaa2ef97060bbc/orjson-3.11.6-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:caaed4dad39e271adfadc106fab634d173b2bb23d9cf7e67bd645f879175ebfc", size = 147446, upload-time = "2026-01-29T15:12:14.166Z" }, - { url = "https://files.pythonhosted.org/packages/de/c5/dd9f22aa9f27c54c7d05cc32f4580c9ac9b6f13811eeb81d6c4c3f50d6b1/orjson-3.11.6-cp312-cp312-win32.whl", hash = "sha256:955368c11808c89793e847830e1b1007503a5923ddadc108547d3b77df761044", size = 139717, upload-time = "2026-01-29T15:12:15.7Z" }, - { url = "https://files.pythonhosted.org/packages/23/a1/e62fc50d904486970315a1654b8cfb5832eb46abb18cd5405118e7e1fc79/orjson-3.11.6-cp312-cp312-win_amd64.whl", hash = "sha256:2c68de30131481150073d90a5d227a4a421982f42c025ecdfb66157f9579e06f", size = 136711, upload-time = "2026-01-29T15:12:17.055Z" }, - { url = "https://files.pythonhosted.org/packages/04/3d/b4fefad8bdf91e0fe212eb04975aeb36ea92997269d68857efcc7eb1dda3/orjson-3.11.6-cp312-cp312-win_arm64.whl", hash = "sha256:65dfa096f4e3a5e02834b681f539a87fbe85adc82001383c0db907557f666bfc", size = 135212, upload-time = "2026-01-29T15:12:18.3Z" }, - { url = "https://files.pythonhosted.org/packages/ae/45/d9c71c8c321277bc1ceebf599bc55ba826ae538b7c61f287e9a7e71bd589/orjson-3.11.6-cp313-cp313-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:e4ae1670caabb598a88d385798692ce2a1b2f078971b3329cfb85253c6097f5b", size = 249828, upload-time = "2026-01-29T15:12:20.14Z" }, - { url = "https://files.pythonhosted.org/packages/ac/7e/4afcf4cfa9c2f93846d70eee9c53c3c0123286edcbeb530b7e9bd2aea1b2/orjson-3.11.6-cp313-cp313-macosx_15_0_arm64.whl", hash = "sha256:2c6b81f47b13dac2caa5d20fbc953c75eb802543abf48403a4703ed3bff225f0", size = 134339, upload-time = "2026-01-29T15:12:22.01Z" }, - { url = "https://files.pythonhosted.org/packages/40/10/6d2b8a064c8d2411d3d0ea6ab43125fae70152aef6bea77bb50fa54d4097/orjson-3.11.6-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:647d6d034e463764e86670644bdcaf8e68b076e6e74783383b01085ae9ab334f", size = 137662, upload-time = "2026-01-29T15:12:23.307Z" }, - { url = "https://files.pythonhosted.org/packages/5a/50/5804ea7d586baf83ee88969eefda97a24f9a5bdba0727f73e16305175b26/orjson-3.11.6-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:8523b9cc4ef174ae52414f7699e95ee657c16aa18b3c3c285d48d7966cce9081", size = 134626, upload-time = "2026-01-29T15:12:25.099Z" }, - { url = "https://files.pythonhosted.org/packages/9e/2e/f0492ed43e376722bb4afd648e06cc1e627fc7ec8ff55f6ee739277813ea/orjson-3.11.6-cp313-cp313-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:313dfd7184cde50c733fc0d5c8c0e2f09017b573afd11dc36bd7476b30b4cb17", size = 140873, upload-time = "2026-01-29T15:12:26.369Z" }, - { url = "https://files.pythonhosted.org/packages/10/15/6f874857463421794a303a39ac5494786ad46a4ab46d92bda6705d78c5aa/orjson-3.11.6-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:905ee036064ff1e1fd1fb800055ac477cdcb547a78c22c1bc2bbf8d5d1a6fb42", size = 144044, upload-time = "2026-01-29T15:12:28.082Z" }, - { url = "https://files.pythonhosted.org/packages/d2/c7/b7223a3a70f1d0cc2d86953825de45f33877ee1b124a91ca1f79aa6e643f/orjson-3.11.6-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ce374cb98411356ba906914441fc993f271a7a666d838d8de0e0900dd4a4bc12", size = 142396, upload-time = "2026-01-29T15:12:30.529Z" }, - { url = "https://files.pythonhosted.org/packages/87/e3/aa1b6d3ad3cd80f10394134f73ae92a1d11fdbe974c34aa199cc18bb5fcf/orjson-3.11.6-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cded072b9f65fcfd188aead45efa5bd528ba552add619b3ad2a81f67400ec450", size = 145600, upload-time = "2026-01-29T15:12:31.848Z" }, - { url = "https://files.pythonhosted.org/packages/f6/cf/e4aac5a46cbd39d7e769ef8650efa851dfce22df1ba97ae2b33efe893b12/orjson-3.11.6-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:7ab85bdbc138e1f73a234db6bb2e4cc1f0fcec8f4bd2bd2430e957a01aadf746", size = 146967, upload-time = "2026-01-29T15:12:33.203Z" }, - { url = "https://files.pythonhosted.org/packages/0b/04/975b86a4bcf6cfeda47aad15956d52fbeda280811206e9967380fa9355c8/orjson-3.11.6-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:351b96b614e3c37a27b8ab048239ebc1e0be76cc17481a430d70a77fb95d3844", size = 421003, upload-time = "2026-01-29T15:12:35.097Z" }, - { url = "https://files.pythonhosted.org/packages/28/d1/0369d0baf40eea5ff2300cebfe209883b2473ab4aa4c4974c8bd5ee42bb2/orjson-3.11.6-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:f9959c85576beae5cdcaaf39510b15105f1ee8b70d5dacd90152617f57be8c83", size = 155695, upload-time = "2026-01-29T15:12:36.589Z" }, - { url = "https://files.pythonhosted.org/packages/ab/1f/d10c6d6ae26ff1d7c3eea6fd048280ef2e796d4fb260c5424fd021f68ecf/orjson-3.11.6-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:75682d62b1b16b61a30716d7a2ec1f4c36195de4a1c61f6665aedd947b93a5d5", size = 147392, upload-time = "2026-01-29T15:12:37.876Z" }, - { url = "https://files.pythonhosted.org/packages/8d/43/7479921c174441a0aa5277c313732e20713c0969ac303be9f03d88d3db5d/orjson-3.11.6-cp313-cp313-win32.whl", hash = "sha256:40dc277999c2ef227dcc13072be879b4cfd325502daeb5c35ed768f706f2bf30", size = 139718, upload-time = "2026-01-29T15:12:39.274Z" }, - { url = "https://files.pythonhosted.org/packages/88/bc/9ffe7dfbf8454bc4e75bb8bf3a405ed9e0598df1d3535bb4adcd46be07d0/orjson-3.11.6-cp313-cp313-win_amd64.whl", hash = "sha256:f0f6e9f8ff7905660bc3c8a54cd4a675aa98f7f175cf00a59815e2ff42c0d916", size = 136635, upload-time = "2026-01-29T15:12:40.593Z" }, - { url = "https://files.pythonhosted.org/packages/6f/7e/51fa90b451470447ea5023b20d83331ec741ae28d1e6d8ed547c24e7de14/orjson-3.11.6-cp313-cp313-win_arm64.whl", hash = "sha256:1608999478664de848e5900ce41f25c4ecdfc4beacbc632b6fd55e1a586e5d38", size = 135175, upload-time = "2026-01-29T15:12:41.997Z" }, - { url = "https://files.pythonhosted.org/packages/31/9f/46ca908abaeeec7560638ff20276ab327b980d73b3cc2f5b205b4a1c60b3/orjson-3.11.6-cp314-cp314-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:6026db2692041d2a23fe2545606df591687787825ad5821971ef0974f2c47630", size = 249823, upload-time = "2026-01-29T15:12:43.332Z" }, - { url = "https://files.pythonhosted.org/packages/ff/78/ca478089818d18c9cd04f79c43f74ddd031b63c70fa2a946eb5e85414623/orjson-3.11.6-cp314-cp314-macosx_15_0_arm64.whl", hash = "sha256:132b0ab2e20c73afa85cf142e547511feb3d2f5b7943468984658f3952b467d4", size = 134328, upload-time = "2026-01-29T15:12:45.171Z" }, - { url = "https://files.pythonhosted.org/packages/39/5e/cbb9d830ed4e47f4375ad8eef8e4fff1bf1328437732c3809054fc4e80be/orjson-3.11.6-cp314-cp314-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b376fb05f20a96ec117d47987dd3b39265c635725bda40661b4c5b73b77b5fde", size = 137651, upload-time = "2026-01-29T15:12:46.602Z" }, - { url = "https://files.pythonhosted.org/packages/7c/3a/35df6558c5bc3a65ce0961aefee7f8364e59af78749fc796ea255bfa0cf5/orjson-3.11.6-cp314-cp314-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:954dae4e080574672a1dfcf2a840eddef0f27bd89b0e94903dd0824e9c1db060", size = 134596, upload-time = "2026-01-29T15:12:47.95Z" }, - { url = "https://files.pythonhosted.org/packages/cd/8e/3d32dd7b7f26a19cc4512d6ed0ae3429567c71feef720fe699ff43c5bc9e/orjson-3.11.6-cp314-cp314-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:fe515bb89d59e1e4b48637a964f480b35c0a2676de24e65e55310f6016cca7ce", size = 140923, upload-time = "2026-01-29T15:12:49.333Z" }, - { url = "https://files.pythonhosted.org/packages/6c/9c/1efbf5c99b3304f25d6f0d493a8d1492ee98693637c10ce65d57be839d7b/orjson-3.11.6-cp314-cp314-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:380f9709c275917af28feb086813923251e11ee10687257cd7f1ea188bcd4485", size = 144068, upload-time = "2026-01-29T15:12:50.927Z" }, - { url = "https://files.pythonhosted.org/packages/82/83/0d19eeb5be797de217303bbb55dde58dba26f996ed905d301d98fd2d4637/orjson-3.11.6-cp314-cp314-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a8173e0d3f6081e7034c51cf984036d02f6bab2a2126de5a759d79f8e5a140e7", size = 142493, upload-time = "2026-01-29T15:12:52.432Z" }, - { url = "https://files.pythonhosted.org/packages/32/a7/573fec3df4dc8fc259b7770dc6c0656f91adce6e19330c78d23f87945d1e/orjson-3.11.6-cp314-cp314-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6dddf9ba706294906c56ef5150a958317b09aa3a8a48df1c52ccf22ec1907eac", size = 145616, upload-time = "2026-01-29T15:12:53.903Z" }, - { url = "https://files.pythonhosted.org/packages/c2/0e/23551b16f21690f7fd5122e3cf40fdca5d77052a434d0071990f97f5fe2f/orjson-3.11.6-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:cbae5c34588dc79938dffb0b6fbe8c531f4dc8a6ad7f39759a9eb5d2da405ef2", size = 146951, upload-time = "2026-01-29T15:12:55.698Z" }, - { url = "https://files.pythonhosted.org/packages/b8/63/5e6c8f39805c39123a18e412434ea364349ee0012548d08aa586e2bd6aa9/orjson-3.11.6-cp314-cp314-musllinux_1_2_armv7l.whl", hash = "sha256:f75c318640acbddc419733b57f8a07515e587a939d8f54363654041fd1f4e465", size = 421024, upload-time = "2026-01-29T15:12:57.434Z" }, - { url = "https://files.pythonhosted.org/packages/1d/4d/724975cf0087f6550bd01fd62203418afc0ea33fd099aed318c5bcc52df8/orjson-3.11.6-cp314-cp314-musllinux_1_2_i686.whl", hash = "sha256:e0ab8d13aa2a3e98b4a43487c9205b2c92c38c054b4237777484d503357c8437", size = 155774, upload-time = "2026-01-29T15:12:59.397Z" }, - { url = "https://files.pythonhosted.org/packages/a8/a3/f4c4e3f46b55db29e0a5f20493b924fc791092d9a03ff2068c9fe6c1002f/orjson-3.11.6-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:f884c7fb1020d44612bd7ac0db0babba0e2f78b68d9a650c7959bf99c783773f", size = 147393, upload-time = "2026-01-29T15:13:00.769Z" }, - { url = "https://files.pythonhosted.org/packages/ee/86/6f5529dd27230966171ee126cecb237ed08e9f05f6102bfaf63e5b32277d/orjson-3.11.6-cp314-cp314-win32.whl", hash = "sha256:8d1035d1b25732ec9f971e833a3e299d2b1a330236f75e6fd945ad982c76aaf3", size = 139760, upload-time = "2026-01-29T15:13:02.173Z" }, - { url = "https://files.pythonhosted.org/packages/d3/b5/91ae7037b2894a6b5002fb33f4fbccec98424a928469835c3837fbb22a9b/orjson-3.11.6-cp314-cp314-win_amd64.whl", hash = "sha256:931607a8865d21682bb72de54231655c86df1870502d2962dbfd12c82890d077", size = 136633, upload-time = "2026-01-29T15:13:04.267Z" }, - { url = "https://files.pythonhosted.org/packages/55/74/f473a3ec7a0a7ebc825ca8e3c86763f7d039f379860c81ba12dcdd456547/orjson-3.11.6-cp314-cp314-win_arm64.whl", hash = "sha256:fe71f6b283f4f1832204ab8235ce07adad145052614f77c876fcf0dac97bc06f", size = 135168, upload-time = "2026-01-29T15:13:05.932Z" }, +version = "3.11.7" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/53/45/b268004f745ede84e5798b48ee12b05129d19235d0e15267aa57dcdb400b/orjson-3.11.7.tar.gz", hash = "sha256:9b1a67243945819ce55d24a30b59d6a168e86220452d2c96f4d1f093e71c0c49", size = 6144992, upload-time = "2026-02-02T15:38:49.29Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/de/1a/a373746fa6d0e116dd9e54371a7b54622c44d12296d5d0f3ad5e3ff33490/orjson-3.11.7-cp310-cp310-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:a02c833f38f36546ba65a452127633afce4cf0dd7296b753d3bb54e55e5c0174", size = 229140, upload-time = "2026-02-02T15:37:06.082Z" }, + { url = "https://files.pythonhosted.org/packages/52/a2/fa129e749d500f9b183e8a3446a193818a25f60261e9ce143ad61e975208/orjson-3.11.7-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b63c6e6738d7c3470ad01601e23376aa511e50e1f3931395b9f9c722406d1a67", size = 128670, upload-time = "2026-02-02T15:37:08.002Z" }, + { url = "https://files.pythonhosted.org/packages/08/93/1e82011cd1e0bd051ef9d35bed1aa7fb4ea1f0a055dc2c841b46b43a9ebd/orjson-3.11.7-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:043d3006b7d32c7e233b8cfb1f01c651013ea079e08dcef7189a29abd8befe11", size = 123832, upload-time = "2026-02-02T15:37:09.191Z" }, + { url = "https://files.pythonhosted.org/packages/fe/d8/a26b431ef962c7d55736674dddade876822f3e33223c1f47a36879350d04/orjson-3.11.7-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:57036b27ac8a25d81112eb0cc9835cd4833c5b16e1467816adc0015f59e870dc", size = 129171, upload-time = "2026-02-02T15:37:11.112Z" }, + { url = "https://files.pythonhosted.org/packages/a7/19/f47819b84a580f490da260c3ee9ade214cf4cf78ac9ce8c1c758f80fdfc9/orjson-3.11.7-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:733ae23ada68b804b222c44affed76b39e30806d38660bf1eb200520d259cc16", size = 141967, upload-time = "2026-02-02T15:37:12.282Z" }, + { url = "https://files.pythonhosted.org/packages/5b/cd/37ece39a0777ba077fdcdbe4cccae3be8ed00290c14bf8afdc548befc260/orjson-3.11.7-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5fdfad2093bdd08245f2e204d977facd5f871c88c4a71230d5bcbd0e43bf6222", size = 130991, upload-time = "2026-02-02T15:37:13.465Z" }, + { url = "https://files.pythonhosted.org/packages/8f/ed/f2b5d66aa9b6b5c02ff5f120efc7b38c7c4962b21e6be0f00fd99a5c348e/orjson-3.11.7-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cededd6738e1c153530793998e31c05086582b08315db48ab66649768f326baa", size = 133674, upload-time = "2026-02-02T15:37:14.694Z" }, + { url = "https://files.pythonhosted.org/packages/c4/6e/baa83e68d1aa09fa8c3e5b2c087d01d0a0bd45256de719ed7bc22c07052d/orjson-3.11.7-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:14f440c7268c8f8633d1b3d443a434bd70cb15686117ea6beff8fdc8f5917a1e", size = 138722, upload-time = "2026-02-02T15:37:16.501Z" }, + { url = "https://files.pythonhosted.org/packages/0c/47/7f8ef4963b772cd56999b535e553f7eb5cd27e9dd6c049baee6f18bfa05d/orjson-3.11.7-cp310-cp310-musllinux_1_2_armv7l.whl", hash = "sha256:3a2479753bbb95b0ebcf7969f562cdb9668e6d12416a35b0dda79febf89cdea2", size = 409056, upload-time = "2026-02-02T15:37:17.895Z" }, + { url = "https://files.pythonhosted.org/packages/38/eb/2df104dd2244b3618f25325a656f85cc3277f74bbd91224752410a78f3c7/orjson-3.11.7-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:71924496986275a737f38e3f22b4e0878882b3f7a310d2ff4dc96e812789120c", size = 144196, upload-time = "2026-02-02T15:37:19.349Z" }, + { url = "https://files.pythonhosted.org/packages/b6/2a/ee41de0aa3a6686598661eae2b4ebdff1340c65bfb17fcff8b87138aab21/orjson-3.11.7-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:b4a9eefdc70bf8bf9857f0290f973dec534ac84c35cd6a7f4083be43e7170a8f", size = 134979, upload-time = "2026-02-02T15:37:20.906Z" }, + { url = "https://files.pythonhosted.org/packages/4c/fa/92fc5d3d402b87a8b28277a9ed35386218a6a5287c7fe5ee9b9f02c53fb2/orjson-3.11.7-cp310-cp310-win32.whl", hash = "sha256:ae9e0b37a834cef7ce8f99de6498f8fad4a2c0bf6bfc3d02abd8ed56aa15b2de", size = 127968, upload-time = "2026-02-02T15:37:23.178Z" }, + { url = "https://files.pythonhosted.org/packages/07/29/a576bf36d73d60df06904d3844a9df08e25d59eba64363aaf8ec2f9bff41/orjson-3.11.7-cp310-cp310-win_amd64.whl", hash = "sha256:d772afdb22555f0c58cfc741bdae44180122b3616faa1ecadb595cd526e4c993", size = 125128, upload-time = "2026-02-02T15:37:24.329Z" }, + { url = "https://files.pythonhosted.org/packages/37/02/da6cb01fc6087048d7f61522c327edf4250f1683a58a839fdcc435746dd5/orjson-3.11.7-cp311-cp311-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:9487abc2c2086e7c8eb9a211d2ce8855bae0e92586279d0d27b341d5ad76c85c", size = 228664, upload-time = "2026-02-02T15:37:25.542Z" }, + { url = "https://files.pythonhosted.org/packages/c1/c2/5885e7a5881dba9a9af51bc564e8967225a642b3e03d089289a35054e749/orjson-3.11.7-cp311-cp311-macosx_15_0_arm64.whl", hash = "sha256:79cacb0b52f6004caf92405a7e1f11e6e2de8bdf9019e4f76b44ba045125cd6b", size = 125344, upload-time = "2026-02-02T15:37:26.92Z" }, + { url = "https://files.pythonhosted.org/packages/a4/1d/4e7688de0a92d1caf600dfd5fb70b4c5bfff51dfa61ac555072ef2d0d32a/orjson-3.11.7-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c2e85fe4698b6a56d5e2ebf7ae87544d668eb6bde1ad1226c13f44663f20ec9e", size = 128404, upload-time = "2026-02-02T15:37:28.108Z" }, + { url = "https://files.pythonhosted.org/packages/2f/b2/ec04b74ae03a125db7bd69cffd014b227b7f341e3261bf75b5eb88a1aa92/orjson-3.11.7-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:b8d14b71c0b12963fe8a62aac87119f1afdf4cb88a400f61ca5ae581449efcb5", size = 123677, upload-time = "2026-02-02T15:37:30.287Z" }, + { url = "https://files.pythonhosted.org/packages/4c/69/f95bdf960605f08f827f6e3291fe243d8aa9c5c9ff017a8d7232209184c3/orjson-3.11.7-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:91c81ef070c8f3220054115e1ef468b1c9ce8497b4e526cb9f68ab4dc0a7ac62", size = 128950, upload-time = "2026-02-02T15:37:31.595Z" }, + { url = "https://files.pythonhosted.org/packages/a4/1b/de59c57bae1d148ef298852abd31909ac3089cff370dfd4cd84cc99cbc42/orjson-3.11.7-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:411ebaf34d735e25e358a6d9e7978954a9c9d58cfb47bc6683cdc3964cd2f910", size = 141756, upload-time = "2026-02-02T15:37:32.985Z" }, + { url = "https://files.pythonhosted.org/packages/ee/9e/9decc59f4499f695f65c650f6cfa6cd4c37a3fbe8fa235a0a3614cb54386/orjson-3.11.7-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a16bcd08ab0bcdfc7e8801d9c4a9cc17e58418e4d48ddc6ded4e9e4b1a94062b", size = 130812, upload-time = "2026-02-02T15:37:34.204Z" }, + { url = "https://files.pythonhosted.org/packages/28/e6/59f932bcabd1eac44e334fe8e3281a92eacfcb450586e1f4bde0423728d8/orjson-3.11.7-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9c0b51672e466fd7e56230ffbae7f1639e18d0ce023351fb75da21b71bc2c960", size = 133444, upload-time = "2026-02-02T15:37:35.446Z" }, + { url = "https://files.pythonhosted.org/packages/f1/36/b0f05c0eaa7ca30bc965e37e6a2956b0d67adb87a9872942d3568da846ae/orjson-3.11.7-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:136dcd6a2e796dfd9ffca9fc027d778567b0b7c9968d092842d3c323cef88aa8", size = 138609, upload-time = "2026-02-02T15:37:36.657Z" }, + { url = "https://files.pythonhosted.org/packages/b8/03/58ec7d302b8d86944c60c7b4b82975d5161fcce4c9bc8c6cb1d6741b6115/orjson-3.11.7-cp311-cp311-musllinux_1_2_armv7l.whl", hash = "sha256:7ba61079379b0ae29e117db13bda5f28d939766e410d321ec1624afc6a0b0504", size = 408918, upload-time = "2026-02-02T15:37:38.076Z" }, + { url = "https://files.pythonhosted.org/packages/06/3a/868d65ef9a8b99be723bd510de491349618abd9f62c826cf206d962db295/orjson-3.11.7-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:0527a4510c300e3b406591b0ba69b5dc50031895b0a93743526a3fc45f59d26e", size = 143998, upload-time = "2026-02-02T15:37:39.706Z" }, + { url = "https://files.pythonhosted.org/packages/5b/c7/1e18e1c83afe3349f4f6dc9e14910f0ae5f82eac756d1412ea4018938535/orjson-3.11.7-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:a709e881723c9b18acddcfb8ba357322491ad553e277cf467e1e7e20e2d90561", size = 134802, upload-time = "2026-02-02T15:37:41.002Z" }, + { url = "https://files.pythonhosted.org/packages/d4/0b/ccb7ee1a65b37e8eeb8b267dc953561d72370e85185e459616d4345bab34/orjson-3.11.7-cp311-cp311-win32.whl", hash = "sha256:c43b8b5bab288b6b90dac410cca7e986a4fa747a2e8f94615aea407da706980d", size = 127828, upload-time = "2026-02-02T15:37:42.241Z" }, + { url = "https://files.pythonhosted.org/packages/af/9e/55c776dffda3f381e0f07d010a4f5f3902bf48eaba1bb7684d301acd4924/orjson-3.11.7-cp311-cp311-win_amd64.whl", hash = "sha256:6543001328aa857187f905308a028935864aefe9968af3848401b6fe80dbb471", size = 124941, upload-time = "2026-02-02T15:37:43.444Z" }, + { url = "https://files.pythonhosted.org/packages/aa/8e/424a620fa7d263b880162505fb107ef5e0afaa765b5b06a88312ac291560/orjson-3.11.7-cp311-cp311-win_arm64.whl", hash = "sha256:1ee5cc7160a821dfe14f130bc8e63e7611051f964b463d9e2a3a573204446a4d", size = 126245, upload-time = "2026-02-02T15:37:45.18Z" }, + { url = "https://files.pythonhosted.org/packages/80/bf/76f4f1665f6983385938f0e2a5d7efa12a58171b8456c252f3bae8a4cf75/orjson-3.11.7-cp312-cp312-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:bd03ea7606833655048dab1a00734a2875e3e86c276e1d772b2a02556f0d895f", size = 228545, upload-time = "2026-02-02T15:37:46.376Z" }, + { url = "https://files.pythonhosted.org/packages/79/53/6c72c002cb13b5a978a068add59b25a8bdf2800ac1c9c8ecdb26d6d97064/orjson-3.11.7-cp312-cp312-macosx_15_0_arm64.whl", hash = "sha256:89e440ebc74ce8ab5c7bc4ce6757b4a6b1041becb127df818f6997b5c71aa60b", size = 125224, upload-time = "2026-02-02T15:37:47.697Z" }, + { url = "https://files.pythonhosted.org/packages/2c/83/10e48852865e5dd151bdfe652c06f7da484578ed02c5fca938e3632cb0b8/orjson-3.11.7-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5ede977b5fe5ac91b1dffc0a517ca4542d2ec8a6a4ff7b2652d94f640796342a", size = 128154, upload-time = "2026-02-02T15:37:48.954Z" }, + { url = "https://files.pythonhosted.org/packages/6e/52/a66e22a2b9abaa374b4a081d410edab6d1e30024707b87eab7c734afe28d/orjson-3.11.7-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:b7b1dae39230a393df353827c855a5f176271c23434cfd2db74e0e424e693e10", size = 123548, upload-time = "2026-02-02T15:37:50.187Z" }, + { url = "https://files.pythonhosted.org/packages/de/38/605d371417021359f4910c496f764c48ceb8997605f8c25bf1dfe58c0ebe/orjson-3.11.7-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ed46f17096e28fb28d2975834836a639af7278aa87c84f68ab08fbe5b8bd75fa", size = 129000, upload-time = "2026-02-02T15:37:51.426Z" }, + { url = "https://files.pythonhosted.org/packages/44/98/af32e842b0ffd2335c89714d48ca4e3917b42f5d6ee5537832e069a4b3ac/orjson-3.11.7-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3726be79e36e526e3d9c1aceaadbfb4a04ee80a72ab47b3f3c17fefb9812e7b8", size = 141686, upload-time = "2026-02-02T15:37:52.607Z" }, + { url = "https://files.pythonhosted.org/packages/96/0b/fc793858dfa54be6feee940c1463370ece34b3c39c1ca0aa3845f5ba9892/orjson-3.11.7-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0724e265bc548af1dedebd9cb3d24b4e1c1e685a343be43e87ba922a5c5fff2f", size = 130812, upload-time = "2026-02-02T15:37:53.944Z" }, + { url = "https://files.pythonhosted.org/packages/dc/91/98a52415059db3f374757d0b7f0f16e3b5cd5976c90d1c2b56acaea039e6/orjson-3.11.7-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e7745312efa9e11c17fbd3cb3097262d079da26930ae9ae7ba28fb738367cbad", size = 133440, upload-time = "2026-02-02T15:37:55.615Z" }, + { url = "https://files.pythonhosted.org/packages/dc/b6/cb540117bda61791f46381f8c26c8f93e802892830a6055748d3bb1925ab/orjson-3.11.7-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:f904c24bdeabd4298f7a977ef14ca2a022ca921ed670b92ecd16ab6f3d01f867", size = 138386, upload-time = "2026-02-02T15:37:56.814Z" }, + { url = "https://files.pythonhosted.org/packages/63/1a/50a3201c334a7f17c231eee5f841342190723794e3b06293f26e7cf87d31/orjson-3.11.7-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:b9fc4d0f81f394689e0814617aadc4f2ea0e8025f38c226cbf22d3b5ddbf025d", size = 408853, upload-time = "2026-02-02T15:37:58.291Z" }, + { url = "https://files.pythonhosted.org/packages/87/cd/8de1c67d0be44fdc22701e5989c0d015a2adf391498ad42c4dc589cd3013/orjson-3.11.7-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:849e38203e5be40b776ed2718e587faf204d184fc9a008ae441f9442320c0cab", size = 144130, upload-time = "2026-02-02T15:38:00.163Z" }, + { url = "https://files.pythonhosted.org/packages/0f/fe/d605d700c35dd55f51710d159fc54516a280923cd1b7e47508982fbb387d/orjson-3.11.7-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:4682d1db3bcebd2b64757e0ddf9e87ae5f00d29d16c5cdf3a62f561d08cc3dd2", size = 134818, upload-time = "2026-02-02T15:38:01.507Z" }, + { url = "https://files.pythonhosted.org/packages/e4/e4/15ecc67edb3ddb3e2f46ae04475f2d294e8b60c1825fbe28a428b93b3fbd/orjson-3.11.7-cp312-cp312-win32.whl", hash = "sha256:f4f7c956b5215d949a1f65334cf9d7612dde38f20a95f2315deef167def91a6f", size = 127923, upload-time = "2026-02-02T15:38:02.75Z" }, + { url = "https://files.pythonhosted.org/packages/34/70/2e0855361f76198a3965273048c8e50a9695d88cd75811a5b46444895845/orjson-3.11.7-cp312-cp312-win_amd64.whl", hash = "sha256:bf742e149121dc5648ba0a08ea0871e87b660467ef168a3a5e53bc1fbd64bb74", size = 125007, upload-time = "2026-02-02T15:38:04.032Z" }, + { url = "https://files.pythonhosted.org/packages/68/40/c2051bd19fc467610fed469dc29e43ac65891571138f476834ca192bc290/orjson-3.11.7-cp312-cp312-win_arm64.whl", hash = "sha256:26c3b9132f783b7d7903bf1efb095fed8d4a3a85ec0d334ee8beff3d7a4749d5", size = 126089, upload-time = "2026-02-02T15:38:05.297Z" }, + { url = "https://files.pythonhosted.org/packages/89/25/6e0e52cac5aab51d7b6dcd257e855e1dec1c2060f6b28566c509b4665f62/orjson-3.11.7-cp313-cp313-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:1d98b30cc1313d52d4af17d9c3d307b08389752ec5f2e5febdfada70b0f8c733", size = 228390, upload-time = "2026-02-02T15:38:06.8Z" }, + { url = "https://files.pythonhosted.org/packages/a5/29/a77f48d2fc8a05bbc529e5ff481fb43d914f9e383ea2469d4f3d51df3d00/orjson-3.11.7-cp313-cp313-macosx_15_0_arm64.whl", hash = "sha256:d897e81f8d0cbd2abb82226d1860ad2e1ab3ff16d7b08c96ca00df9d45409ef4", size = 125189, upload-time = "2026-02-02T15:38:08.181Z" }, + { url = "https://files.pythonhosted.org/packages/89/25/0a16e0729a0e6a1504f9d1a13cdd365f030068aab64cec6958396b9969d7/orjson-3.11.7-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:814be4b49b228cfc0b3c565acf642dd7d13538f966e3ccde61f4f55be3e20785", size = 128106, upload-time = "2026-02-02T15:38:09.41Z" }, + { url = "https://files.pythonhosted.org/packages/66/da/a2e505469d60666a05ab373f1a6322eb671cb2ba3a0ccfc7d4bc97196787/orjson-3.11.7-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:d06e5c5fed5caedd2e540d62e5b1c25e8c82431b9e577c33537e5fa4aa909539", size = 123363, upload-time = "2026-02-02T15:38:10.73Z" }, + { url = "https://files.pythonhosted.org/packages/23/bf/ed73f88396ea35c71b38961734ea4a4746f7ca0768bf28fd551d37e48dd0/orjson-3.11.7-cp313-cp313-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:31c80ce534ac4ea3739c5ee751270646cbc46e45aea7576a38ffec040b4029a1", size = 129007, upload-time = "2026-02-02T15:38:12.138Z" }, + { url = "https://files.pythonhosted.org/packages/73/3c/b05d80716f0225fc9008fbf8ab22841dcc268a626aa550561743714ce3bf/orjson-3.11.7-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f50979824bde13d32b4320eedd513431c921102796d86be3eee0b58e58a3ecd1", size = 141667, upload-time = "2026-02-02T15:38:13.398Z" }, + { url = "https://files.pythonhosted.org/packages/61/e8/0be9b0addd9bf86abfc938e97441dcd0375d494594b1c8ad10fe57479617/orjson-3.11.7-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9e54f3808e2b6b945078c41aa8d9b5834b28c50843846e97807e5adb75fa9705", size = 130832, upload-time = "2026-02-02T15:38:14.698Z" }, + { url = "https://files.pythonhosted.org/packages/c9/ec/c68e3b9021a31d9ec15a94931db1410136af862955854ed5dd7e7e4f5bff/orjson-3.11.7-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a12b80df61aab7b98b490fe9e4879925ba666fccdfcd175252ce4d9035865ace", size = 133373, upload-time = "2026-02-02T15:38:16.109Z" }, + { url = "https://files.pythonhosted.org/packages/d2/45/f3466739aaafa570cc8e77c6dbb853c48bf56e3b43738020e2661e08b0ac/orjson-3.11.7-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:996b65230271f1a97026fd0e6a753f51fbc0c335d2ad0c6201f711b0da32693b", size = 138307, upload-time = "2026-02-02T15:38:17.453Z" }, + { url = "https://files.pythonhosted.org/packages/e1/84/9f7f02288da1ffb31405c1be07657afd1eecbcb4b64ee2817b6fe0f785fa/orjson-3.11.7-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:ab49d4b2a6a1d415ddb9f37a21e02e0d5dbfe10b7870b21bf779fc21e9156157", size = 408695, upload-time = "2026-02-02T15:38:18.831Z" }, + { url = "https://files.pythonhosted.org/packages/18/07/9dd2f0c0104f1a0295ffbe912bc8d63307a539b900dd9e2c48ef7810d971/orjson-3.11.7-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:390a1dce0c055ddf8adb6aa94a73b45a4a7d7177b5c584b8d1c1947f2ba60fb3", size = 144099, upload-time = "2026-02-02T15:38:20.28Z" }, + { url = "https://files.pythonhosted.org/packages/a5/66/857a8e4a3292e1f7b1b202883bcdeb43a91566cf59a93f97c53b44bd6801/orjson-3.11.7-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:1eb80451a9c351a71dfaf5b7ccc13ad065405217726b59fdbeadbcc544f9d223", size = 134806, upload-time = "2026-02-02T15:38:22.186Z" }, + { url = "https://files.pythonhosted.org/packages/0a/5b/6ebcf3defc1aab3a338ca777214966851e92efb1f30dc7fc8285216e6d1b/orjson-3.11.7-cp313-cp313-win32.whl", hash = "sha256:7477aa6a6ec6139c5cb1cc7b214643592169a5494d200397c7fc95d740d5fcf3", size = 127914, upload-time = "2026-02-02T15:38:23.511Z" }, + { url = "https://files.pythonhosted.org/packages/00/04/c6f72daca5092e3117840a1b1e88dfc809cc1470cf0734890d0366b684a1/orjson-3.11.7-cp313-cp313-win_amd64.whl", hash = "sha256:b9f95dcdea9d4f805daa9ddf02617a89e484c6985fa03055459f90e87d7a0757", size = 124986, upload-time = "2026-02-02T15:38:24.836Z" }, + { url = "https://files.pythonhosted.org/packages/03/ba/077a0f6f1085d6b806937246860fafbd5b17f3919c70ee3f3d8d9c713f38/orjson-3.11.7-cp313-cp313-win_arm64.whl", hash = "sha256:800988273a014a0541483dc81021247d7eacb0c845a9d1a34a422bc718f41539", size = 126045, upload-time = "2026-02-02T15:38:26.216Z" }, + { url = "https://files.pythonhosted.org/packages/e9/1e/745565dca749813db9a093c5ebc4bac1a9475c64d54b95654336ac3ed961/orjson-3.11.7-cp314-cp314-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:de0a37f21d0d364954ad5de1970491d7fbd0fb1ef7417d4d56a36dc01ba0c0a0", size = 228391, upload-time = "2026-02-02T15:38:27.757Z" }, + { url = "https://files.pythonhosted.org/packages/46/19/e40f6225da4d3aa0c8dc6e5219c5e87c2063a560fe0d72a88deb59776794/orjson-3.11.7-cp314-cp314-macosx_15_0_arm64.whl", hash = "sha256:c2428d358d85e8da9d37cba18b8c4047c55222007a84f97156a5b22028dfbfc0", size = 125188, upload-time = "2026-02-02T15:38:29.241Z" }, + { url = "https://files.pythonhosted.org/packages/9d/7e/c4de2babef2c0817fd1f048fd176aa48c37bec8aef53d2fa932983032cce/orjson-3.11.7-cp314-cp314-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3c4bc6c6ac52cdaa267552544c73e486fecbd710b7ac09bc024d5a78555a22f6", size = 128097, upload-time = "2026-02-02T15:38:30.618Z" }, + { url = "https://files.pythonhosted.org/packages/eb/74/233d360632bafd2197f217eee7fb9c9d0229eac0c18128aee5b35b0014fe/orjson-3.11.7-cp314-cp314-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:bd0d68edd7dfca1b2eca9361a44ac9f24b078de3481003159929a0573f21a6bf", size = 123364, upload-time = "2026-02-02T15:38:32.363Z" }, + { url = "https://files.pythonhosted.org/packages/79/51/af79504981dd31efe20a9e360eb49c15f06df2b40e7f25a0a52d9ae888e8/orjson-3.11.7-cp314-cp314-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:623ad1b9548ef63886319c16fa317848e465a21513b31a6ad7b57443c3e0dcf5", size = 129076, upload-time = "2026-02-02T15:38:33.68Z" }, + { url = "https://files.pythonhosted.org/packages/67/e2/da898eb68b72304f8de05ca6715870d09d603ee98d30a27e8a9629abc64b/orjson-3.11.7-cp314-cp314-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:6e776b998ac37c0396093d10290e60283f59cfe0fc3fccbd0ccc4bd04dd19892", size = 141705, upload-time = "2026-02-02T15:38:34.989Z" }, + { url = "https://files.pythonhosted.org/packages/c5/89/15364d92acb3d903b029e28d834edb8780c2b97404cbf7929aa6b9abdb24/orjson-3.11.7-cp314-cp314-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:652c6c3af76716f4a9c290371ba2e390ede06f6603edb277b481daf37f6f464e", size = 130855, upload-time = "2026-02-02T15:38:36.379Z" }, + { url = "https://files.pythonhosted.org/packages/c2/8b/ecdad52d0b38d4b8f514be603e69ccd5eacf4e7241f972e37e79792212ec/orjson-3.11.7-cp314-cp314-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a56df3239294ea5964adf074c54bcc4f0ccd21636049a2cf3ca9cf03b5d03cf1", size = 133386, upload-time = "2026-02-02T15:38:37.704Z" }, + { url = "https://files.pythonhosted.org/packages/b9/0e/45e1dcf10e17d0924b7c9162f87ec7b4ca79e28a0548acf6a71788d3e108/orjson-3.11.7-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:bda117c4148e81f746655d5a3239ae9bd00cb7bc3ca178b5fc5a5997e9744183", size = 138295, upload-time = "2026-02-02T15:38:39.096Z" }, + { url = "https://files.pythonhosted.org/packages/63/d7/4d2e8b03561257af0450f2845b91fbd111d7e526ccdf737267108075e0ba/orjson-3.11.7-cp314-cp314-musllinux_1_2_armv7l.whl", hash = "sha256:23d6c20517a97a9daf1d48b580fcdc6f0516c6f4b5038823426033690b4d2650", size = 408720, upload-time = "2026-02-02T15:38:40.634Z" }, + { url = "https://files.pythonhosted.org/packages/78/cf/d45343518282108b29c12a65892445fc51f9319dc3c552ceb51bb5905ed2/orjson-3.11.7-cp314-cp314-musllinux_1_2_i686.whl", hash = "sha256:8ff206156006da5b847c9304b6308a01e8cdbc8cce824e2779a5ba71c3def141", size = 144152, upload-time = "2026-02-02T15:38:42.262Z" }, + { url = "https://files.pythonhosted.org/packages/a9/3a/d6001f51a7275aacd342e77b735c71fa04125a3f93c36fee4526bc8c654e/orjson-3.11.7-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:962d046ee1765f74a1da723f4b33e3b228fe3a48bd307acce5021dfefe0e29b2", size = 134814, upload-time = "2026-02-02T15:38:43.627Z" }, + { url = "https://files.pythonhosted.org/packages/1d/d3/f19b47ce16820cc2c480f7f1723e17f6d411b3a295c60c8ad3aa9ff1c96a/orjson-3.11.7-cp314-cp314-win32.whl", hash = "sha256:89e13dd3f89f1c38a9c9eba5fbf7cdc2d1feca82f5f290864b4b7a6aac704576", size = 127997, upload-time = "2026-02-02T15:38:45.06Z" }, + { url = "https://files.pythonhosted.org/packages/12/df/172771902943af54bf661a8d102bdf2e7f932127968080632bda6054b62c/orjson-3.11.7-cp314-cp314-win_amd64.whl", hash = "sha256:845c3e0d8ded9c9271cd79596b9b552448b885b97110f628fb687aee2eed11c1", size = 124985, upload-time = "2026-02-02T15:38:46.388Z" }, + { url = "https://files.pythonhosted.org/packages/6f/1c/f2a8d8a1b17514660a614ce5f7aac74b934e69f5abc2700cc7ced882a009/orjson-3.11.7-cp314-cp314-win_arm64.whl", hash = "sha256:4a2e9c5be347b937a2e0203866f12bba36082e89b402ddb9e927d5822e43088d", size = 126038, upload-time = "2026-02-02T15:38:47.703Z" }, ] [[package]] @@ -4424,7 +4432,7 @@ resolution-markers = [ "python_full_version == '3.11.*' and sys_platform == 'win32'", ] dependencies = [ - { name = "numpy", version = "2.4.1", source = { registry = "https://pypi.org/simple" }, marker = "(python_full_version >= '3.11' and sys_platform == 'darwin') or (python_full_version >= '3.11' and sys_platform == 'linux') or (python_full_version >= '3.11' and sys_platform == 'win32')" }, + { name = "numpy", version = "2.4.2", source = { registry = "https://pypi.org/simple" }, marker = "(python_full_version >= '3.11' and sys_platform == 'darwin') or (python_full_version >= '3.11' and sys_platform == 'linux') or (python_full_version >= '3.11' and sys_platform == 'win32')" }, { name = "python-dateutil", marker = "(python_full_version >= '3.11' and sys_platform == 'darwin') or (python_full_version >= '3.11' and sys_platform == 'linux') or (python_full_version >= '3.11' and sys_platform == 'win32')" }, { name = "tzdata", marker = "python_full_version >= '3.11' and sys_platform == 'win32'" }, ] @@ -4597,11 +4605,11 @@ wheels = [ [[package]] name = "pip" -version = "25.3" +version = "26.0" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/fe/6e/74a3f0179a4a73a53d66ce57fdb4de0080a8baa1de0063de206d6167acc2/pip-25.3.tar.gz", hash = "sha256:8d0538dbbd7babbd207f261ed969c65de439f6bc9e5dbd3b3b9a77f25d95f343", size = 1803014, upload-time = "2025-10-25T00:55:41.394Z" } +sdist = { url = "https://files.pythonhosted.org/packages/44/c2/65686a7783a7c27a329706207147e82f23c41221ee9ae33128fc331670a0/pip-26.0.tar.gz", hash = "sha256:3ce220a0a17915972fbf1ab451baae1521c4539e778b28127efa79b974aff0fa", size = 1812654, upload-time = "2026-01-31T01:40:54.361Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/44/3c/d717024885424591d5376220b5e836c2d5293ce2011523c9de23ff7bf068/pip-25.3-py3-none-any.whl", hash = "sha256:9655943313a94722b7774661c21049070f6bbb0a1516bf02f7c8d5d9201514cd", size = 1778622, upload-time = "2025-10-25T00:55:39.247Z" }, + { url = "https://files.pythonhosted.org/packages/69/00/5ac7aa77688ec4d34148b423d34dc0c9bc4febe0d872a9a1ad9860b2f6f1/pip-26.0-py3-none-any.whl", hash = "sha256:98436feffb9e31bc9339cf369fd55d3331b1580b6a6f1173bacacddcf9c34754", size = 1787564, upload-time = "2026-01-31T01:40:52.252Z" }, ] [[package]] @@ -4660,30 +4668,30 @@ wheels = [ [[package]] name = "polars" -version = "1.37.1" +version = "1.38.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "polars-runtime-32", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/84/ae/dfebf31b9988c20998140b54d5b521f64ce08879f2c13d9b4d44d7c87e32/polars-1.37.1.tar.gz", hash = "sha256:0309e2a4633e712513401964b4d95452f124ceabf7aec6db50affb9ced4a274e", size = 715572, upload-time = "2026-01-12T23:27:03.267Z" } +sdist = { url = "https://files.pythonhosted.org/packages/e1/56/bce1c1244431b0ebc4e5d413fdbcf7f85ec30fc98595fcfb7328a869d794/polars-1.38.0.tar.gz", hash = "sha256:4dee569944c613d8c621eb709e452354e1570bd3d47ccb2d3d36681fb1bd2cf6", size = 717801, upload-time = "2026-02-04T12:00:34.246Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/08/75/ec73e38812bca7c2240aff481b9ddff20d1ad2f10dee4b3353f5eeaacdab/polars-1.37.1-py3-none-any.whl", hash = "sha256:377fed8939a2f1223c1563cfabdc7b4a3d6ff846efa1f2ddeb8644fafd9b1aff", size = 805749, upload-time = "2026-01-12T23:25:48.595Z" }, + { url = "https://files.pythonhosted.org/packages/c3/47/61e7a47f77e321aa1cbf4141cc60df9d6e63b9f469c5525226535552a04c/polars-1.38.0-py3-none-any.whl", hash = "sha256:d7a31b47da8c9522aa38908c46ac72eab8eaf0c992e024f9c95fedba4cbe7759", size = 810116, upload-time = "2026-02-04T11:59:21.425Z" }, ] [[package]] name = "polars-runtime-32" -version = "1.37.1" +version = "1.38.0" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/40/0b/addabe5e8d28a5a4c9887a08907be7ddc3fce892dc38f37d14b055438a57/polars_runtime_32-1.37.1.tar.gz", hash = "sha256:68779d4a691da20a5eb767d74165a8f80a2bdfbde4b54acf59af43f7fa028d8f", size = 2818945, upload-time = "2026-01-12T23:27:04.653Z" } +sdist = { url = "https://files.pythonhosted.org/packages/8c/8d/8f5764d722ad16ddb1b6db997aca7a41110dad446000ee2e3f8f48503f0e/polars_runtime_32-1.38.0.tar.gz", hash = "sha256:69ba986bff34f70d7eab931005e5d81dd4dc6c5c12e3532a4bd0fc7022671692", size = 2812354, upload-time = "2026-02-04T12:00:36.041Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/2a/a2/e828ea9f845796de02d923edb790e408ca0b560cd68dbd74bb99a1b3c461/polars_runtime_32-1.37.1-cp310-abi3-macosx_10_12_x86_64.whl", hash = "sha256:0b8d4d73ea9977d3731927740e59d814647c5198bdbe359bcf6a8bfce2e79771", size = 43499912, upload-time = "2026-01-12T23:25:51.182Z" }, - { url = "https://files.pythonhosted.org/packages/7e/46/81b71b7aa9e3703ee6e4ef1f69a87e40f58ea7c99212bf49a95071e99c8c/polars_runtime_32-1.37.1-cp310-abi3-macosx_11_0_arm64.whl", hash = "sha256:c682bf83f5f352e5e02f5c16c652c48ca40442f07b236f30662b22217320ce76", size = 39695707, upload-time = "2026-01-12T23:25:54.289Z" }, - { url = "https://files.pythonhosted.org/packages/81/2e/20009d1fde7ee919e24040f5c87cb9d0e4f8e3f109b74ba06bc10c02459c/polars_runtime_32-1.37.1-cp310-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fc82b5bbe70ca1a4b764eed1419f6336752d6ba9fc1245388d7f8b12438afa2c", size = 41467034, upload-time = "2026-01-12T23:25:56.925Z" }, - { url = "https://files.pythonhosted.org/packages/eb/21/9b55bea940524324625b1e8fd96233290303eb1bf2c23b54573487bbbc25/polars_runtime_32-1.37.1-cp310-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a8362d11ac5193b994c7e9048ffe22ccfb976699cfbf6e128ce0302e06728894", size = 45142711, upload-time = "2026-01-12T23:26:00.817Z" }, - { url = "https://files.pythonhosted.org/packages/8c/25/c5f64461aeccdac6834a89f826d051ccd3b4ce204075e562c87a06ed2619/polars_runtime_32-1.37.1-cp310-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:04f5d5a2f013dca7391b7d8e7672fa6d37573a87f1d45d3dd5f0d9b5565a4b0f", size = 41638564, upload-time = "2026-01-12T23:26:04.186Z" }, - { url = "https://files.pythonhosted.org/packages/35/af/509d3cf6c45e764ccf856beaae26fc34352f16f10f94a7839b1042920a73/polars_runtime_32-1.37.1-cp310-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:fbfde7c0ca8209eeaed546e4a32cca1319189aa61c5f0f9a2b4494262bd0c689", size = 44721136, upload-time = "2026-01-12T23:26:07.088Z" }, - { url = "https://files.pythonhosted.org/packages/af/d1/5c0a83a625f72beef59394bebc57d12637997632a4f9d3ab2ffc2cc62bbf/polars_runtime_32-1.37.1-cp310-abi3-win_amd64.whl", hash = "sha256:da3d3642ae944e18dd17109d2a3036cb94ce50e5495c5023c77b1599d4c861bc", size = 44948288, upload-time = "2026-01-12T23:26:10.214Z" }, - { url = "https://files.pythonhosted.org/packages/10/f3/061bb702465904b6502f7c9081daee34b09ccbaa4f8c94cf43a2a3b6dd6f/polars_runtime_32-1.37.1-cp310-abi3-win_arm64.whl", hash = "sha256:55f2c4847a8d2e267612f564de7b753a4bde3902eaabe7b436a0a4abf75949a0", size = 41001914, upload-time = "2026-01-12T23:26:12.997Z" }, + { url = "https://files.pythonhosted.org/packages/51/eb/a8981ec070dd9bea9569292f38b0268159e39f63f5376ffae27a0c7d2ee7/polars_runtime_32-1.38.0-cp310-abi3-macosx_10_12_x86_64.whl", hash = "sha256:03f43c10a419837b89a493e946090cdaee08ce50a8d1933f2e8ac3a6874d7db4", size = 44106460, upload-time = "2026-02-04T11:59:23.546Z" }, + { url = "https://files.pythonhosted.org/packages/64/de/c2a2037b2d658b91067647b99be43bc91af3a7b4868e32efcc118f383add/polars_runtime_32-1.38.0-cp310-abi3-macosx_11_0_arm64.whl", hash = "sha256:d664e53cba734e9fbed87d1c33078a13b5fc39b3e8790318fc65fa78954ea2d0", size = 40228076, upload-time = "2026-02-04T11:59:26.497Z" }, + { url = "https://files.pythonhosted.org/packages/4a/0f/9204210e7d05b3953813bb09627585c161221f512f2672b31065a02f4727/polars_runtime_32-1.38.0-cp310-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c073c7b7e6e559769e10cdadbafce86d32b0709d5790de920081c6129acae507", size = 41988273, upload-time = "2026-02-04T11:59:29.01Z" }, + { url = "https://files.pythonhosted.org/packages/89/64/4c5dbb1c2d2c025f8e7c7e433bd343c4fc955ceadd087a7ad456de8668f8/polars_runtime_32-1.38.0-cp310-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8806ddb684b17ae8b0bcb91d8d5ba361b04b0a31d77ce7f861d16b47734b3012", size = 45749469, upload-time = "2026-02-04T11:59:32.292Z" }, + { url = "https://files.pythonhosted.org/packages/d7/f8/da2d324d686b1fc438dfb721677fb44f7f5aab6ae0d1fa5b281e986fde82/polars_runtime_32-1.38.0-cp310-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:c7b41163189bd3305fe2307e66fe478b35c4faa467777d74c32b70b52292039b", size = 42159740, upload-time = "2026-02-04T11:59:35.608Z" }, + { url = "https://files.pythonhosted.org/packages/37/88/fe02e4450e9b582ea6f1a7490921208a9c3a0a1efdf976aadbaa4cae73bb/polars_runtime_32-1.38.0-cp310-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:e944f924a99750909299fa701edb07a63a5988e5ee58d673993f3d9147a22276", size = 45327635, upload-time = "2026-02-04T11:59:38.28Z" }, + { url = "https://files.pythonhosted.org/packages/68/db/9bb8007a4bea76b476537740ed18c8bccd809faa390ca1443134e98f8b60/polars_runtime_32-1.38.0-cp310-abi3-win_amd64.whl", hash = "sha256:46fbfb4ee6f8e1914dc0babfb6a138ead552db05a2d9e531c1fb19411b1a6744", size = 45670197, upload-time = "2026-02-04T11:59:41.297Z" }, + { url = "https://files.pythonhosted.org/packages/58/78/28f793ec2e1cff72c0ced1bc9186c9b4dbfe44ca8316df11b2aa8039764c/polars_runtime_32-1.38.0-cp310-abi3-win_arm64.whl", hash = "sha256:ed0e6d7a546de9179e5715bffe9d3b94ba658d5655bbbf44943e138e061dcc90", size = 41637784, upload-time = "2026-02-04T11:59:44.396Z" }, ] [[package]] @@ -4700,7 +4708,7 @@ wheels = [ [[package]] name = "posthog" -version = "7.7.0" +version = "7.8.2" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "backoff", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, @@ -4710,9 +4718,9 @@ dependencies = [ { name = "six", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, { name = "typing-extensions", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/23/dd/ca6d5a79614af27ededc0dca85e77f42f7704e29f8314819d7ce92b9a7f3/posthog-7.7.0.tar.gz", hash = "sha256:b4f2b1a616e099961f6ab61a5a2f88de62082c26801699e556927d21c00737ef", size = 160766, upload-time = "2026-01-27T21:15:41.63Z" } +sdist = { url = "https://files.pythonhosted.org/packages/9c/5c/35edae017d92b2f7625a2b3be45dc36c8e6e14acbe5dbeeaa5a20a932ccf/posthog-7.8.2.tar.gz", hash = "sha256:d36472763750d8da60ebc3cbf6349a91222ba6a43dfdbdcdb6a9f03796514239", size = 166995, upload-time = "2026-02-04T15:10:31.251Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/41/3f/41b426ed9ab161d630edec84bacb6664ae62b6e63af1165919c7e11c17d1/posthog-7.7.0-py3-none-any.whl", hash = "sha256:955f42097bf147459653b9102e5f7f9a22e4b6fc9f15003447bd1137fafbc505", size = 185353, upload-time = "2026-01-27T21:15:40.051Z" }, + { url = "https://files.pythonhosted.org/packages/53/d9/8f2374c559a6e50d2e92601b42540aae296f6e0a2066e913fed8bd603f23/posthog-7.8.2-py3-none-any.whl", hash = "sha256:d3fa69f7e15830a8e19cd4de4e7b40982838efa5d0f448133be3115bd556feef", size = 192440, upload-time = "2026-02-04T15:10:29.767Z" }, ] [[package]] @@ -4720,8 +4728,8 @@ name = "powerfx" version = "0.0.34" source = { registry = "https://pypi.org/simple" } dependencies = [ - { name = "cffi", marker = "(python_full_version < '3.14' and sys_platform == 'darwin') or (python_full_version < '3.14' and sys_platform == 'linux') or (python_full_version < '3.14' and sys_platform == 'win32')" }, - { name = "pythonnet", marker = "(python_full_version < '3.14' and sys_platform == 'darwin') or (python_full_version < '3.14' and sys_platform == 'linux') or (python_full_version < '3.14' and sys_platform == 'win32')" }, + { name = "cffi", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, + { name = "pythonnet", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, ] sdist = { url = "https://files.pythonhosted.org/packages/9f/fb/6c4bf87e0c74ca1c563921ce89ca1c5785b7576bca932f7255cdf81082a7/powerfx-0.0.34.tar.gz", hash = "sha256:956992e7afd272657ed16d80f4cad24ec95d9e4a79fb9dfa4a068a09e136af32", size = 3237555, upload-time = "2025-12-22T15:50:59.682Z" } wheels = [ @@ -4860,14 +4868,14 @@ wheels = [ [[package]] name = "proto-plus" -version = "1.27.0" +version = "1.27.1" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "protobuf", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/01/89/9cbe2f4bba860e149108b683bc2efec21f14d5f7ed6e25562ad86acbc373/proto_plus-1.27.0.tar.gz", hash = "sha256:873af56dd0d7e91836aee871e5799e1c6f1bda86ac9a983e0bb9f0c266a568c4", size = 56158, upload-time = "2025-12-16T13:46:25.729Z" } +sdist = { url = "https://files.pythonhosted.org/packages/3a/02/8832cde80e7380c600fbf55090b6ab7b62bd6825dbedde6d6657c15a1f8e/proto_plus-1.27.1.tar.gz", hash = "sha256:912a7460446625b792f6448bade9e55cd4e41e6ac10e27009ef71a7f317fa147", size = 56929, upload-time = "2026-02-02T17:34:49.035Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/cd/24/3b7a0818484df9c28172857af32c2397b6d8fcd99d9468bd4684f98ebf0a/proto_plus-1.27.0-py3-none-any.whl", hash = "sha256:1baa7f81cf0f8acb8bc1f6d085008ba4171eaf669629d1b6d1673b21ed1c0a82", size = 50205, upload-time = "2025-12-16T13:46:24.76Z" }, + { url = "https://files.pythonhosted.org/packages/5d/79/ac273cbbf744691821a9cca88957257f41afe271637794975ca090b9588b/proto_plus-1.27.1-py3-none-any.whl", hash = "sha256:e4643061f3a4d0de092d62aa4ad09fa4756b2cbb89d4627f3985018216f9fefc", size = 50480, upload-time = "2026-02-02T17:34:47.339Z" }, ] [[package]] @@ -5174,11 +5182,11 @@ wheels = [ [[package]] name = "pyjwt" -version = "2.10.1" +version = "2.11.0" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/e7/46/bd74733ff231675599650d3e47f361794b22ef3e3770998dda30d3b63726/pyjwt-2.10.1.tar.gz", hash = "sha256:3cc5772eb20009233caf06e9d8a0577824723b44e6648ee0a2aedb6cf9381953", size = 87785, upload-time = "2024-11-28T03:43:29.933Z" } +sdist = { url = "https://files.pythonhosted.org/packages/5c/5a/b46fa56bf322901eee5b0454a34343cdbdae202cd421775a8ee4e42fd519/pyjwt-2.11.0.tar.gz", hash = "sha256:35f95c1f0fbe5d5ba6e43f00271c275f7a1a4db1dab27bf708073b75318ea623", size = 98019, upload-time = "2026-01-30T19:59:55.694Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/61/ad/689f02752eeec26aed679477e80e632ef1b682313be70793d798c1d5fc8f/PyJWT-2.10.1-py3-none-any.whl", hash = "sha256:dcdd193e30abefd5debf142f9adfcdd2b58004e644f25406ffaebd50bd98dacb", size = 22997, upload-time = "2024-11-28T03:43:27.893Z" }, + { url = "https://files.pythonhosted.org/packages/6f/01/c26ce75ba460d5cd503da9e13b21a33804d38c2165dec7b716d06b13010c/pyjwt-2.11.0-py3-none-any.whl", hash = "sha256:94a6bde30eb5c8e04fee991062b534071fd1439ef58d2adc9ccb823e7bcd0469", size = 28224, upload-time = "2026-01-30T19:59:54.539Z" }, ] [package.optional-dependencies] @@ -5388,7 +5396,7 @@ name = "pythonnet" version = "3.0.5" source = { registry = "https://pypi.org/simple" } dependencies = [ - { name = "clr-loader", marker = "(python_full_version < '3.14' and sys_platform == 'darwin') or (python_full_version < '3.14' and sys_platform == 'linux') or (python_full_version < '3.14' and sys_platform == 'win32')" }, + { name = "clr-loader", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, ] sdist = { url = "https://files.pythonhosted.org/packages/9a/d6/1afd75edd932306ae9bd2c2d961d603dc2b52fcec51b04afea464f1f6646/pythonnet-3.0.5.tar.gz", hash = "sha256:48e43ca463941b3608b32b4e236db92d8d40db4c58a75ace902985f76dac21cf", size = 239212, upload-time = "2024-12-13T08:30:44.393Z" } wheels = [ @@ -5499,7 +5507,7 @@ dependencies = [ { name = "grpcio", version = "1.76.0", source = { registry = "https://pypi.org/simple" }, marker = "(python_full_version >= '3.14' and sys_platform == 'darwin') or (python_full_version >= '3.14' and sys_platform == 'linux') or (python_full_version >= '3.14' and sys_platform == 'win32')" }, { name = "httpx", extra = ["http2"], marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, { name = "numpy", version = "2.2.6", source = { registry = "https://pypi.org/simple" }, marker = "(python_full_version < '3.11' and sys_platform == 'darwin') or (python_full_version < '3.11' and sys_platform == 'linux') or (python_full_version < '3.11' and sys_platform == 'win32')" }, - { name = "numpy", version = "2.4.1", source = { registry = "https://pypi.org/simple" }, marker = "(python_full_version >= '3.11' and sys_platform == 'darwin') or (python_full_version >= '3.11' and sys_platform == 'linux') or (python_full_version >= '3.11' and sys_platform == 'win32')" }, + { name = "numpy", version = "2.4.2", source = { registry = "https://pypi.org/simple" }, marker = "(python_full_version >= '3.11' and sys_platform == 'darwin') or (python_full_version >= '3.11' and sys_platform == 'linux') or (python_full_version >= '3.11' and sys_platform == 'win32')" }, { name = "portalocker", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, { name = "protobuf", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, { name = "pydantic", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, @@ -5530,7 +5538,7 @@ dependencies = [ { name = "jsonpath-ng", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, { name = "ml-dtypes", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, { name = "numpy", version = "2.2.6", source = { registry = "https://pypi.org/simple" }, marker = "(python_full_version < '3.11' and sys_platform == 'darwin') or (python_full_version < '3.11' and sys_platform == 'linux') or (python_full_version < '3.11' and sys_platform == 'win32')" }, - { name = "numpy", version = "2.4.1", source = { registry = "https://pypi.org/simple" }, marker = "(python_full_version >= '3.11' and sys_platform == 'darwin') or (python_full_version >= '3.11' and sys_platform == 'linux') or (python_full_version >= '3.11' and sys_platform == 'win32')" }, + { name = "numpy", version = "2.4.2", source = { registry = "https://pypi.org/simple" }, marker = "(python_full_version >= '3.11' and sys_platform == 'darwin') or (python_full_version >= '3.11' and sys_platform == 'linux') or (python_full_version >= '3.11' and sys_platform == 'win32')" }, { name = "pydantic", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, { name = "python-ulid", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, { name = "pyyaml", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, @@ -5855,28 +5863,27 @@ wheels = [ [[package]] name = "ruff" -version = "0.14.14" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/2e/06/f71e3a86b2df0dfa2d2f72195941cd09b44f87711cb7fa5193732cb9a5fc/ruff-0.14.14.tar.gz", hash = "sha256:2d0f819c9a90205f3a867dbbd0be083bee9912e170fd7d9704cc8ae45824896b", size = 4515732, upload-time = "2026-01-22T22:30:17.527Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/d2/89/20a12e97bc6b9f9f68343952da08a8099c57237aef953a56b82711d55edd/ruff-0.14.14-py3-none-linux_armv6l.whl", hash = "sha256:7cfe36b56e8489dee8fbc777c61959f60ec0f1f11817e8f2415f429552846aed", size = 10467650, upload-time = "2026-01-22T22:30:08.578Z" }, - { url = "https://files.pythonhosted.org/packages/a3/b1/c5de3fd2d5a831fcae21beda5e3589c0ba67eec8202e992388e4b17a6040/ruff-0.14.14-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:6006a0082336e7920b9573ef8a7f52eec837add1265cc74e04ea8a4368cd704c", size = 10883245, upload-time = "2026-01-22T22:30:04.155Z" }, - { url = "https://files.pythonhosted.org/packages/b8/7c/3c1db59a10e7490f8f6f8559d1db8636cbb13dccebf18686f4e3c9d7c772/ruff-0.14.14-py3-none-macosx_11_0_arm64.whl", hash = "sha256:026c1d25996818f0bf498636686199d9bd0d9d6341c9c2c3b62e2a0198b758de", size = 10231273, upload-time = "2026-01-22T22:30:34.642Z" }, - { url = "https://files.pythonhosted.org/packages/a1/6e/5e0e0d9674be0f8581d1f5e0f0a04761203affce3232c1a1189d0e3b4dad/ruff-0.14.14-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f666445819d31210b71e0a6d1c01e24447a20b85458eea25a25fe8142210ae0e", size = 10585753, upload-time = "2026-01-22T22:30:31.781Z" }, - { url = "https://files.pythonhosted.org/packages/23/09/754ab09f46ff1884d422dc26d59ba18b4e5d355be147721bb2518aa2a014/ruff-0.14.14-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:3c0f18b922c6d2ff9a5e6c3ee16259adc513ca775bcf82c67ebab7cbd9da5bc8", size = 10286052, upload-time = "2026-01-22T22:30:24.827Z" }, - { url = "https://files.pythonhosted.org/packages/c8/cc/e71f88dd2a12afb5f50733851729d6b571a7c3a35bfdb16c3035132675a0/ruff-0.14.14-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1629e67489c2dea43e8658c3dba659edbfd87361624b4040d1df04c9740ae906", size = 11043637, upload-time = "2026-01-22T22:30:13.239Z" }, - { url = "https://files.pythonhosted.org/packages/67/b2/397245026352494497dac935d7f00f1468c03a23a0c5db6ad8fc49ca3fb2/ruff-0.14.14-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:27493a2131ea0f899057d49d303e4292b2cae2bb57253c1ed1f256fbcd1da480", size = 12194761, upload-time = "2026-01-22T22:30:22.542Z" }, - { url = "https://files.pythonhosted.org/packages/5b/06/06ef271459f778323112c51b7587ce85230785cd64e91772034ddb88f200/ruff-0.14.14-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:01ff589aab3f5b539e35db38425da31a57521efd1e4ad1ae08fc34dbe30bd7df", size = 12005701, upload-time = "2026-01-22T22:30:20.499Z" }, - { url = "https://files.pythonhosted.org/packages/41/d6/99364514541cf811ccc5ac44362f88df66373e9fec1b9d1c4cc830593fe7/ruff-0.14.14-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1cc12d74eef0f29f51775f5b755913eb523546b88e2d733e1d701fe65144e89b", size = 11282455, upload-time = "2026-01-22T22:29:59.679Z" }, - { url = "https://files.pythonhosted.org/packages/ca/71/37daa46f89475f8582b7762ecd2722492df26421714a33e72ccc9a84d7a5/ruff-0.14.14-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bb8481604b7a9e75eff53772496201690ce2687067e038b3cc31aaf16aa0b974", size = 11215882, upload-time = "2026-01-22T22:29:57.032Z" }, - { url = "https://files.pythonhosted.org/packages/2c/10/a31f86169ec91c0705e618443ee74ede0bdd94da0a57b28e72db68b2dbac/ruff-0.14.14-py3-none-manylinux_2_31_riscv64.whl", hash = "sha256:14649acb1cf7b5d2d283ebd2f58d56b75836ed8c6f329664fa91cdea19e76e66", size = 11180549, upload-time = "2026-01-22T22:30:27.175Z" }, - { url = "https://files.pythonhosted.org/packages/fd/1e/c723f20536b5163adf79bdd10c5f093414293cdf567eed9bdb7b83940f3f/ruff-0.14.14-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:e8058d2145566510790eab4e2fad186002e288dec5e0d343a92fe7b0bc1b3e13", size = 10543416, upload-time = "2026-01-22T22:30:01.964Z" }, - { url = "https://files.pythonhosted.org/packages/3e/34/8a84cea7e42c2d94ba5bde1d7a4fae164d6318f13f933d92da6d7c2041ff/ruff-0.14.14-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:e651e977a79e4c758eb807f0481d673a67ffe53cfa92209781dfa3a996cf8412", size = 10285491, upload-time = "2026-01-22T22:30:29.51Z" }, - { url = "https://files.pythonhosted.org/packages/55/ef/b7c5ea0be82518906c978e365e56a77f8de7678c8bb6651ccfbdc178c29f/ruff-0.14.14-py3-none-musllinux_1_2_i686.whl", hash = "sha256:cc8b22da8d9d6fdd844a68ae937e2a0adf9b16514e9a97cc60355e2d4b219fc3", size = 10733525, upload-time = "2026-01-22T22:30:06.499Z" }, - { url = "https://files.pythonhosted.org/packages/6a/5b/aaf1dfbcc53a2811f6cc0a1759de24e4b03e02ba8762daabd9b6bd8c59e3/ruff-0.14.14-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:16bc890fb4cc9781bb05beb5ab4cd51be9e7cb376bf1dd3580512b24eb3fda2b", size = 11315626, upload-time = "2026-01-22T22:30:36.848Z" }, - { url = "https://files.pythonhosted.org/packages/2c/aa/9f89c719c467dfaf8ad799b9bae0df494513fb21d31a6059cb5870e57e74/ruff-0.14.14-py3-none-win32.whl", hash = "sha256:b530c191970b143375b6a68e6f743800b2b786bbcf03a7965b06c4bf04568167", size = 10502442, upload-time = "2026-01-22T22:30:38.93Z" }, - { url = "https://files.pythonhosted.org/packages/87/44/90fa543014c45560cae1fffc63ea059fb3575ee6e1cb654562197e5d16fb/ruff-0.14.14-py3-none-win_amd64.whl", hash = "sha256:3dde1435e6b6fe5b66506c1dff67a421d0b7f6488d466f651c07f4cab3bf20fd", size = 11630486, upload-time = "2026-01-22T22:30:10.852Z" }, - { url = "https://files.pythonhosted.org/packages/9e/6a/40fee331a52339926a92e17ae748827270b288a35ef4a15c9c8f2ec54715/ruff-0.14.14-py3-none-win_arm64.whl", hash = "sha256:56e6981a98b13a32236a72a8da421d7839221fa308b223b9283312312e5ac76c", size = 10920448, upload-time = "2026-01-22T22:30:15.417Z" }, +version = "0.15.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/c8/39/5cee96809fbca590abea6b46c6d1c586b49663d1d2830a751cc8fc42c666/ruff-0.15.0.tar.gz", hash = "sha256:6bdea47cdbea30d40f8f8d7d69c0854ba7c15420ec75a26f463290949d7f7e9a", size = 4524893, upload-time = "2026-02-03T17:53:35.357Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/bc/88/3fd1b0aa4b6330d6aaa63a285bc96c9f71970351579152d231ed90914586/ruff-0.15.0-py3-none-linux_armv6l.whl", hash = "sha256:aac4ebaa612a82b23d45964586f24ae9bc23ca101919f5590bdb368d74ad5455", size = 10354332, upload-time = "2026-02-03T17:52:54.892Z" }, + { url = "https://files.pythonhosted.org/packages/72/f6/62e173fbb7eb75cc29fe2576a1e20f0a46f671a2587b5f604bfb0eaf5f6f/ruff-0.15.0-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:dcd4be7cc75cfbbca24a98d04d0b9b36a270d0833241f776b788d59f4142b14d", size = 10767189, upload-time = "2026-02-03T17:53:19.778Z" }, + { url = "https://files.pythonhosted.org/packages/99/e4/968ae17b676d1d2ff101d56dc69cf333e3a4c985e1ec23803df84fc7bf9e/ruff-0.15.0-py3-none-macosx_11_0_arm64.whl", hash = "sha256:d747e3319b2bce179c7c1eaad3d884dc0a199b5f4d5187620530adf9105268ce", size = 10075384, upload-time = "2026-02-03T17:53:29.241Z" }, + { url = "https://files.pythonhosted.org/packages/a2/bf/9843c6044ab9e20af879c751487e61333ca79a2c8c3058b15722386b8cae/ruff-0.15.0-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:650bd9c56ae03102c51a5e4b554d74d825ff3abe4db22b90fd32d816c2e90621", size = 10481363, upload-time = "2026-02-03T17:52:43.332Z" }, + { url = "https://files.pythonhosted.org/packages/55/d9/4ada5ccf4cd1f532db1c8d44b6f664f2208d3d93acbeec18f82315e15193/ruff-0.15.0-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:a6664b7eac559e3048223a2da77769c2f92b43a6dfd4720cef42654299a599c9", size = 10187736, upload-time = "2026-02-03T17:53:00.522Z" }, + { url = "https://files.pythonhosted.org/packages/86/e2/f25eaecd446af7bb132af0a1d5b135a62971a41f5366ff41d06d25e77a91/ruff-0.15.0-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6f811f97b0f092b35320d1556f3353bf238763420ade5d9e62ebd2b73f2ff179", size = 10968415, upload-time = "2026-02-03T17:53:15.705Z" }, + { url = "https://files.pythonhosted.org/packages/e7/dc/f06a8558d06333bf79b497d29a50c3a673d9251214e0d7ec78f90b30aa79/ruff-0.15.0-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:761ec0a66680fab6454236635a39abaf14198818c8cdf691e036f4bc0f406b2d", size = 11809643, upload-time = "2026-02-03T17:53:23.031Z" }, + { url = "https://files.pythonhosted.org/packages/dd/45/0ece8db2c474ad7df13af3a6d50f76e22a09d078af63078f005057ca59eb/ruff-0.15.0-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:940f11c2604d317e797b289f4f9f3fa5555ffe4fb574b55ed006c3d9b6f0eb78", size = 11234787, upload-time = "2026-02-03T17:52:46.432Z" }, + { url = "https://files.pythonhosted.org/packages/8a/d9/0e3a81467a120fd265658d127db648e4d3acfe3e4f6f5d4ea79fac47e587/ruff-0.15.0-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bcbca3d40558789126da91d7ef9a7c87772ee107033db7191edefa34e2c7f1b4", size = 11112797, upload-time = "2026-02-03T17:52:49.274Z" }, + { url = "https://files.pythonhosted.org/packages/b2/cb/8c0b3b0c692683f8ff31351dfb6241047fa873a4481a76df4335a8bff716/ruff-0.15.0-py3-none-manylinux_2_31_riscv64.whl", hash = "sha256:9a121a96db1d75fa3eb39c4539e607f628920dd72ff1f7c5ee4f1b768ac62d6e", size = 11033133, upload-time = "2026-02-03T17:53:33.105Z" }, + { url = "https://files.pythonhosted.org/packages/f8/5e/23b87370cf0f9081a8c89a753e69a4e8778805b8802ccfe175cc410e50b9/ruff-0.15.0-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:5298d518e493061f2eabd4abd067c7e4fb89e2f63291c94332e35631c07c3662", size = 10442646, upload-time = "2026-02-03T17:53:06.278Z" }, + { url = "https://files.pythonhosted.org/packages/e1/9a/3c94de5ce642830167e6d00b5c75aacd73e6347b4c7fc6828699b150a5ee/ruff-0.15.0-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:afb6e603d6375ff0d6b0cee563fa21ab570fd15e65c852cb24922cef25050cf1", size = 10195750, upload-time = "2026-02-03T17:53:26.084Z" }, + { url = "https://files.pythonhosted.org/packages/30/15/e396325080d600b436acc970848d69df9c13977942fb62bb8722d729bee8/ruff-0.15.0-py3-none-musllinux_1_2_i686.whl", hash = "sha256:77e515f6b15f828b94dc17d2b4ace334c9ddb7d9468c54b2f9ed2b9c1593ef16", size = 10676120, upload-time = "2026-02-03T17:53:09.363Z" }, + { url = "https://files.pythonhosted.org/packages/8d/c9/229a23d52a2983de1ad0fb0ee37d36e0257e6f28bfd6b498ee2c76361874/ruff-0.15.0-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:6f6e80850a01eb13b3e42ee0ebdf6e4497151b48c35051aab51c101266d187a3", size = 11201636, upload-time = "2026-02-03T17:52:57.281Z" }, + { url = "https://files.pythonhosted.org/packages/6f/b0/69adf22f4e24f3677208adb715c578266842e6e6a3cc77483f48dd999ede/ruff-0.15.0-py3-none-win32.whl", hash = "sha256:238a717ef803e501b6d51e0bdd0d2c6e8513fe9eec14002445134d3907cd46c3", size = 10465945, upload-time = "2026-02-03T17:53:12.591Z" }, + { url = "https://files.pythonhosted.org/packages/51/ad/f813b6e2c97e9b4598be25e94a9147b9af7e60523b0cb5d94d307c15229d/ruff-0.15.0-py3-none-win_amd64.whl", hash = "sha256:dd5e4d3301dc01de614da3cdffc33d4b1b96fb89e45721f1598e5532ccf78b18", size = 11564657, upload-time = "2026-02-03T17:52:51.893Z" }, + { url = "https://files.pythonhosted.org/packages/f6/b0/2d823f6e77ebe560f4e397d078487e8d52c1516b331e3521bc75db4272ca/ruff-0.15.0-py3-none-win_arm64.whl", hash = "sha256:c480d632cc0ca3f0727acac8b7d053542d9e114a462a145d0b00e7cd658c515a", size = 10865753, upload-time = "2026-02-03T17:53:03.014Z" }, ] [[package]] @@ -5960,7 +5967,7 @@ resolution-markers = [ ] dependencies = [ { name = "joblib", marker = "(python_full_version >= '3.11' and sys_platform == 'darwin') or (python_full_version >= '3.11' and sys_platform == 'linux') or (python_full_version >= '3.11' and sys_platform == 'win32')" }, - { name = "numpy", version = "2.4.1", source = { registry = "https://pypi.org/simple" }, marker = "(python_full_version >= '3.11' and sys_platform == 'darwin') or (python_full_version >= '3.11' and sys_platform == 'linux') or (python_full_version >= '3.11' and sys_platform == 'win32')" }, + { name = "numpy", version = "2.4.2", source = { registry = "https://pypi.org/simple" }, marker = "(python_full_version >= '3.11' and sys_platform == 'darwin') or (python_full_version >= '3.11' and sys_platform == 'linux') or (python_full_version >= '3.11' and sys_platform == 'win32')" }, { name = "scipy", version = "1.17.0", source = { registry = "https://pypi.org/simple" }, marker = "(python_full_version >= '3.11' and sys_platform == 'darwin') or (python_full_version >= '3.11' and sys_platform == 'linux') or (python_full_version >= '3.11' and sys_platform == 'win32')" }, { name = "threadpoolctl", marker = "(python_full_version >= '3.11' and sys_platform == 'darwin') or (python_full_version >= '3.11' and sys_platform == 'linux') or (python_full_version >= '3.11' and sys_platform == 'win32')" }, ] @@ -6084,7 +6091,7 @@ resolution-markers = [ "python_full_version == '3.11.*' and sys_platform == 'win32'", ] dependencies = [ - { name = "numpy", version = "2.4.1", source = { registry = "https://pypi.org/simple" }, marker = "(python_full_version >= '3.11' and sys_platform == 'darwin') or (python_full_version >= '3.11' and sys_platform == 'linux') or (python_full_version >= '3.11' and sys_platform == 'win32')" }, + { name = "numpy", version = "2.4.2", source = { registry = "https://pypi.org/simple" }, marker = "(python_full_version >= '3.11' and sys_platform == 'darwin') or (python_full_version >= '3.11' and sys_platform == 'linux') or (python_full_version >= '3.11' and sys_platform == 'win32')" }, ] sdist = { url = "https://files.pythonhosted.org/packages/56/3e/9cca699f3486ce6bc12ff46dc2031f1ec8eb9ccc9a320fdaf925f1417426/scipy-1.17.0.tar.gz", hash = "sha256:2591060c8e648d8b96439e111ac41fd8342fdeff1876be2e19dea3fe8930454e", size = 30396830, upload-time = "2026-01-10T21:34:23.009Z" } wheels = [ @@ -6157,7 +6164,7 @@ source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "matplotlib", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, { name = "numpy", version = "2.2.6", source = { registry = "https://pypi.org/simple" }, marker = "(python_full_version < '3.11' and sys_platform == 'darwin') or (python_full_version < '3.11' and sys_platform == 'linux') or (python_full_version < '3.11' and sys_platform == 'win32')" }, - { name = "numpy", version = "2.4.1", source = { registry = "https://pypi.org/simple" }, marker = "(python_full_version >= '3.11' and sys_platform == 'darwin') or (python_full_version >= '3.11' and sys_platform == 'linux') or (python_full_version >= '3.11' and sys_platform == 'win32')" }, + { name = "numpy", version = "2.4.2", source = { registry = "https://pypi.org/simple" }, marker = "(python_full_version >= '3.11' and sys_platform == 'darwin') or (python_full_version >= '3.11' and sys_platform == 'linux') or (python_full_version >= '3.11' and sys_platform == 'win32')" }, { name = "pandas", version = "2.3.3", source = { registry = "https://pypi.org/simple" }, marker = "(python_full_version < '3.11' and sys_platform == 'darwin') or (python_full_version < '3.11' and sys_platform == 'linux') or (python_full_version < '3.11' and sys_platform == 'win32')" }, { name = "pandas", version = "3.0.0", source = { registry = "https://pypi.org/simple" }, marker = "(python_full_version >= '3.11' and sys_platform == 'darwin') or (python_full_version >= '3.11' and sys_platform == 'linux') or (python_full_version >= '3.11' and sys_platform == 'win32')" }, ] @@ -6723,14 +6730,14 @@ wheels = [ [[package]] name = "tqdm" -version = "4.67.1" +version = "4.67.3" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "colorama", marker = "sys_platform == 'win32'" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/a8/4b/29b4ef32e036bb34e4ab51796dd745cdba7ed47ad142a9f4a1eb8e0c744d/tqdm-4.67.1.tar.gz", hash = "sha256:f8aef9c52c08c13a65f30ea34f4e5aac3fd1a34959879d7e59e63027286627f2", size = 169737, upload-time = "2024-11-24T20:12:22.481Z" } +sdist = { url = "https://files.pythonhosted.org/packages/09/a9/6ba95a270c6f1fbcd8dac228323f2777d886cb206987444e4bce66338dd4/tqdm-4.67.3.tar.gz", hash = "sha256:7d825f03f89244ef73f1d4ce193cb1774a8179fd96f31d7e1dcde62092b960bb", size = 169598, upload-time = "2026-02-03T17:35:53.048Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/d0/30/dc54f88dd4a2b5dc8a0279bdd7270e735851848b762aeb1c1184ed1f6b14/tqdm-4.67.1-py3-none-any.whl", hash = "sha256:26445eca388f82e72884e0d580d5464cd801a3ea01e63e5601bdff9ba6a48de2", size = 78540, upload-time = "2024-11-24T20:12:19.698Z" }, + { url = "https://files.pythonhosted.org/packages/16/e1/3079a9ff9b8e11b846c6ac5c8b5bfb7ff225eee721825310c91b3b50304f/tqdm-4.67.3-py3-none-any.whl", hash = "sha256:ee1e4c0e59148062281c49d80b25b67771a127c85fc9676d3be5f243206826bf", size = 78374, upload-time = "2026-02-03T17:35:50.982Z" }, ] [[package]] @@ -6829,28 +6836,28 @@ wheels = [ [[package]] name = "uv" -version = "0.9.28" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/c2/7d/005ab1cab03ca928cef75b424284d14d62c5f18775cf8114a63f210a0c9c/uv-0.9.28.tar.gz", hash = "sha256:253c04b26fb40f74c56ead12ce83db3c018bdefde1fcd1a542bcb88fdca4189c", size = 3834456, upload-time = "2026-01-29T20:15:49.794Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/77/dc/e70698756f1bb74c88bf1eaea63a114a580a38f296ea1567a01db9007490/uv-0.9.28-py3-none-linux_armv6l.whl", hash = "sha256:aede961243bb2c0ca09d0e04ea0bf580d7128dd3b14661b79d133be9a5b69894", size = 22040477, upload-time = "2026-01-29T20:16:11.24Z" }, - { url = "https://files.pythonhosted.org/packages/f0/ed/77294752bf722e1d6b666bd6592b6ac975dabcf1fde49e98a75cac23d45c/uv-0.9.28-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:3fe9aa2822d24f6ecec035a06dfdd1fbed570ed40b83a864e71714bad37ddfd3", size = 21025194, upload-time = "2026-01-29T20:15:36.504Z" }, - { url = "https://files.pythonhosted.org/packages/b1/a9/78f2da6217c1bbae3371d68515fe747e1160bab049d6898a03e517802573/uv-0.9.28-py3-none-macosx_11_0_arm64.whl", hash = "sha256:58a36bf623c6d36b3d60d3c76eeb7275199d607938786e927d40ce213980059d", size = 19783994, upload-time = "2026-01-29T20:16:19.451Z" }, - { url = "https://files.pythonhosted.org/packages/14/79/55639c444e91b96c81c326d39a0a06551d2e611be0cc917b89010ba9ba88/uv-0.9.28-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.musllinux_1_1_aarch64.whl", hash = "sha256:4d479a1d387b1464ad2c1f960b0b26a9ac1dfba67ea2c6789e9643fe6d1e7b9a", size = 21568230, upload-time = "2026-01-29T20:15:39.35Z" }, - { url = "https://files.pythonhosted.org/packages/14/2e/95d7992c0a39981cfbcf56ff8f069c09e0567feb0e70cb8b52bc8a2947a0/uv-0.9.28-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.musllinux_1_1_armv7l.whl", hash = "sha256:29eefd4642f55954a2b9a40619cde3d02856300f59b8cf63ed1a161ca0ca9b77", size = 21633679, upload-time = "2026-01-29T20:15:52.363Z" }, - { url = "https://files.pythonhosted.org/packages/da/ee/b6778e03714b1f9da095c8bf0f8e5007f4867d9196c1ae8053504ddf2877/uv-0.9.28-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:4155496f624deb753f5ddd80fbe3797587c8480d1250e83c9fd816b4b02e3a41", size = 21632238, upload-time = "2026-01-29T20:15:55.003Z" }, - { url = "https://files.pythonhosted.org/packages/3b/f8/0db6ea9fd8f2752a8723a637e3ed881eb212516665ccb2e8066bbea62a52/uv-0.9.28-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2dc98e2d6db0dc9a2f65ce4cda6a34283fa80f3fbfff129befdf40ad7a3d1615", size = 22779474, upload-time = "2026-01-29T20:15:33.513Z" }, - { url = "https://files.pythonhosted.org/packages/54/88/ef70e04113393f4e19e67281cae9f83c82030d14eb4eb811bda83fcd8f44/uv-0.9.28-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:d267280b3878aa6ef8e00bff1f11bf61580d0a8bbb69fa95b5d3526d00f77485", size = 24124596, upload-time = "2026-01-29T20:16:05.062Z" }, - { url = "https://files.pythonhosted.org/packages/81/07/9fda9149bc57e79bde5f00cabcef323a68817c1cca9d44e2aa08d18c6b52/uv-0.9.28-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ba2a320ff77996468789f4b2c573fd766f9330717c440335af8790043b2b3703", size = 23655701, upload-time = "2026-01-29T20:16:07.735Z" }, - { url = "https://files.pythonhosted.org/packages/18/b5/1f1e910ca1a0aca0d0ede3ba0eaca867fd3c575f44b2fe103a5c9511f071/uv-0.9.28-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2c8fd93c5bee89ed88908215f81a3baa0d2a98e35caf995b97e9c226c1c29340", size = 22856456, upload-time = "2026-01-29T20:16:16.582Z" }, - { url = "https://files.pythonhosted.org/packages/9a/fd/82561751105ed232f1781747bc336b20e8d57ee07b4d2ed3fa6cf2718d71/uv-0.9.28-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7b8460a2b624d8ab27cb293a2c9f2393f9efc4e36e0fb886a6c2360e23fb48be", size = 22685296, upload-time = "2026-01-29T20:16:13.857Z" }, - { url = "https://files.pythonhosted.org/packages/a9/e4/b905daff0bfde347c49b9c9ba31d09d504c4b84f2749a07db77a9da16dba/uv-0.9.28-py3-none-manylinux_2_28_aarch64.whl", hash = "sha256:3798c486ec627bbd7ca41fa219e997ad403b1f803371edf5c8e75893e46161ba", size = 21669854, upload-time = "2026-01-29T20:15:30.277Z" }, - { url = "https://files.pythonhosted.org/packages/9a/01/9a90574fe7290c775332e54f163cba58c767445b655e97646708f9c66050/uv-0.9.28-py3-none-manylinux_2_31_riscv64.whl", hash = "sha256:e479cc5cbfd72ebdbea3c909d0ab997162e0dfa1ee622b50e2f9dc8d07d4eee3", size = 22388944, upload-time = "2026-01-29T20:15:47.697Z" }, - { url = "https://files.pythonhosted.org/packages/ac/31/cc35014bab3c17b4fe8f6bae84e640ce64d9bb4c8a24694a935e0c0af538/uv-0.9.28-py3-none-musllinux_1_1_i686.whl", hash = "sha256:97d61cdf2436e83a0f188d55d1974e46679d9a787c3a54cb0a40de717c6bf435", size = 22073327, upload-time = "2026-01-29T20:15:58.119Z" }, - { url = "https://files.pythonhosted.org/packages/26/cd/e848570be5c5be4e139b90237cc64f68d5d51e8e92c40a5ac7cf0c34ad4a/uv-0.9.28-py3-none-musllinux_1_1_x86_64.whl", hash = "sha256:cbfa56c833caa37b1f14166327fcaf8aa87290451406921eb07296ffef17fef1", size = 22915580, upload-time = "2026-01-29T20:15:42.468Z" }, - { url = "https://files.pythonhosted.org/packages/a1/2a/6c3d839ea289bf8509da32f703a47accd63ab409b33627728aebcd2a1b65/uv-0.9.28-py3-none-win32.whl", hash = "sha256:d5cb780d5b821f837f63e7fd14e2bf75f01824b4575a1e89639888771bfd9efd", size = 20856809, upload-time = "2026-01-29T20:15:45.141Z" }, - { url = "https://files.pythonhosted.org/packages/06/a8/d72229dd90d1e5a3c8368d51a70219018d579380945e67c8dcffbe8e53c0/uv-0.9.28-py3-none-win_amd64.whl", hash = "sha256:203ab59710c0c1b3c5ecc684f9cfc9264340a69c8706aaa8aea75415779f0d74", size = 23447461, upload-time = "2026-01-29T20:16:22.563Z" }, - { url = "https://files.pythonhosted.org/packages/23/df/5852eb0c59e5224f4cb0323906efae348f782f8a7f1069197e7cf6ec9b74/uv-0.9.28-py3-none-win_arm64.whl", hash = "sha256:c29406e1dc6b1b312c478c76b42b9f94b684855a4c001901b5488bab6ccf4ec7", size = 21860859, upload-time = "2026-01-29T20:16:00.764Z" }, +version = "0.9.30" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/4e/a0/63cea38fe839fb89592728b91928ee6d15705f1376a7940fee5bbc77fea0/uv-0.9.30.tar.gz", hash = "sha256:03ebd4b22769e0a8d825fa09d038e31cbab5d3d48edf755971cb0cec7920ab95", size = 3846526, upload-time = "2026-02-04T21:45:37.58Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/a3/3c/71be72f125f0035348b415468559cc3b335ec219376d17a3d242d2bd9b23/uv-0.9.30-py3-none-linux_armv6l.whl", hash = "sha256:a5467dddae1cd5f4e093f433c0f0d9a0df679b92696273485ec91bbb5a8620e6", size = 21927585, upload-time = "2026-02-04T21:46:14.935Z" }, + { url = "https://files.pythonhosted.org/packages/0f/fd/8070b5423a77d4058d14e48a970aa075762bbff4c812dda3bb3171543e44/uv-0.9.30-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:6ec38ae29aa83a37c6e50331707eac8ecc90cf2b356d60ea6382a94de14973be", size = 21050392, upload-time = "2026-02-04T21:45:55.649Z" }, + { url = "https://files.pythonhosted.org/packages/42/5f/3ccc9415ef62969ed01829572338ea7bdf4c5cf1ffb9edc1f8cb91b571f3/uv-0.9.30-py3-none-macosx_11_0_arm64.whl", hash = "sha256:777ecd117cf1d8d6bb07de8c9b7f6c5f3e802415b926cf059d3423699732eb8c", size = 19817085, upload-time = "2026-02-04T21:45:40.881Z" }, + { url = "https://files.pythonhosted.org/packages/8b/3f/76b44e2a224f4c4a8816fc92686ef6d4c2656bc5fc9d4f673816162c994d/uv-0.9.30-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.musllinux_1_1_aarch64.whl", hash = "sha256:93049ba3c41fa2cc38b467cb78ef61b2ddedca34b6be924a5481d7750c8111c6", size = 21620537, upload-time = "2026-02-04T21:45:47.846Z" }, + { url = "https://files.pythonhosted.org/packages/60/2a/50f7e8c6d532af8dd327f77bdc75ce4652322ac34f5e29f79a8e04ea3cc8/uv-0.9.30-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.musllinux_1_1_armv7l.whl", hash = "sha256:f295604fee71224ebe2685a0f1f4ff7a45c77211a60bd57133a4a02056d7c775", size = 21550855, upload-time = "2026-02-04T21:46:26.269Z" }, + { url = "https://files.pythonhosted.org/packages/0e/10/f823d4af1125fae559194b356757dc7d4a8ac79d10d11db32c2d4c9e2f63/uv-0.9.30-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:2faf84e1f3b6fc347a34c07f1291d11acf000b0dd537a61d541020f22b17ccd9", size = 21516576, upload-time = "2026-02-04T21:46:03.494Z" }, + { url = "https://files.pythonhosted.org/packages/91/f3/64b02db11f38226ed34458c7fbdb6f16b6d4fd951de24c3e51acf02b30f8/uv-0.9.30-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0b3b3700ecf64a09a07fd04d10ec35f0973ec15595d38bbafaa0318252f7e31f", size = 22718097, upload-time = "2026-02-04T21:45:51.875Z" }, + { url = "https://files.pythonhosted.org/packages/28/21/a48d1872260f04a68bb5177b0f62ddef62ab892d544ed1922f2d19fd2b00/uv-0.9.30-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:b176fc2937937dd81820445cb7e7e2e3cd1009a003c512f55fa0ae10064c8a38", size = 24107844, upload-time = "2026-02-04T21:46:19.032Z" }, + { url = "https://files.pythonhosted.org/packages/1c/c6/d7e5559bfe1ab7a215a7ad49c58c8a5701728f2473f7f436ef00b4664e88/uv-0.9.30-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:180e8070b8c438b9a3fb3fde8a37b365f85c3c06e17090f555dc68fdebd73333", size = 23685378, upload-time = "2026-02-04T21:46:07.166Z" }, + { url = "https://files.pythonhosted.org/packages/a8/bf/b937bbd50d14c6286e353fd4c7bdc09b75f6b3a26bd4e2f3357e99891f28/uv-0.9.30-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4125a9aa2a751e1589728f6365cfe204d1be41499148ead44b6180b7df576f27", size = 22848471, upload-time = "2026-02-04T21:45:18.728Z" }, + { url = "https://files.pythonhosted.org/packages/6a/57/12a67c569e69b71508ad669adad266221f0b1d374be88eaf60109f551354/uv-0.9.30-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4366dd740ac9ad3ec50a58868a955b032493bb7d7e6ed368289e6ced8bbc70f3", size = 22774258, upload-time = "2026-02-04T21:46:10.798Z" }, + { url = "https://files.pythonhosted.org/packages/3d/b8/a26cc64685dddb9fb13f14c3dc1b12009f800083405f854f84eb8c86b494/uv-0.9.30-py3-none-manylinux_2_28_aarch64.whl", hash = "sha256:33e50f208e01a0c20b3c5f87d453356a5cbcfd68f19e47a28b274cd45618881c", size = 21699573, upload-time = "2026-02-04T21:45:44.365Z" }, + { url = "https://files.pythonhosted.org/packages/c8/59/995af0c5f0740f8acb30468e720269e720352df1d204e82c2d52d9a8c586/uv-0.9.30-py3-none-manylinux_2_31_riscv64.whl", hash = "sha256:5e7a6fa7a3549ce893cf91fe4b06629e3e594fc1dca0a6050aba2ea08722e964", size = 22460799, upload-time = "2026-02-04T21:45:26.658Z" }, + { url = "https://files.pythonhosted.org/packages/bb/0b/6affe815ecbaebf38b35d6230fbed2f44708c67d5dd5720f81f2ec8f96ff/uv-0.9.30-py3-none-musllinux_1_1_i686.whl", hash = "sha256:62d7e408d41e392b55ffa4cf9b07f7bbd8b04e0929258a42e19716c221ac0590", size = 22001777, upload-time = "2026-02-04T21:45:34.656Z" }, + { url = "https://files.pythonhosted.org/packages/f3/b6/47a515171c891b0d29f8e90c8a1c0e233e4813c95a011799605cfe04c74c/uv-0.9.30-py3-none-musllinux_1_1_x86_64.whl", hash = "sha256:6dc65c24f5b9cdc78300fa6631368d3106e260bbffa66fb1e831a318374da2df", size = 22968416, upload-time = "2026-02-04T21:45:22.863Z" }, + { url = "https://files.pythonhosted.org/packages/3d/3a/c1df8615385138bb7c43342586431ca32b77466c5fb086ac0ed14ab6ca28/uv-0.9.30-py3-none-win32.whl", hash = "sha256:74e94c65d578657db94a753d41763d0364e5468ec0d368fb9ac8ddab0fb6e21f", size = 20889232, upload-time = "2026-02-04T21:46:22.617Z" }, + { url = "https://files.pythonhosted.org/packages/f2/a8/e8761c8414a880d70223723946576069e042765475f73b4436d78b865dba/uv-0.9.30-py3-none-win_amd64.whl", hash = "sha256:88a2190810684830a1ba4bb1cf8fb06b0308988a1589559404259d295260891c", size = 23432208, upload-time = "2026-02-04T21:45:30.85Z" }, + { url = "https://files.pythonhosted.org/packages/49/e8/6f2ebab941ec559f97110bbbae1279cd0333d6bc352b55f6fa3fefb020d9/uv-0.9.30-py3-none-win_arm64.whl", hash = "sha256:7fde83a5b5ea027315223c33c30a1ab2f2186910b933d091a1b7652da879e230", size = 21887273, upload-time = "2026-02-04T21:45:59.787Z" }, ] [[package]] From 560a3999bbdeb3388944f2ea87bf74b359e02b21 Mon Sep 17 00:00:00 2001 From: Evan Mattson Date: Thu, 5 Feb 2026 08:23:34 +0900 Subject: [PATCH 2/4] Merge main --- .../tests/workflow/test_agent_executor.py | 2 +- .../core/tests/workflow/test_concurrent.py | 550 ------- .../tests/workflow/test_full_conversation.py | 2 +- .../core/tests/workflow/test_group_chat.py | 1334 ----------------- .../core/tests/workflow/test_handoff.py | 709 --------- .../core/tests/workflow/test_magentic.py | 1299 ---------------- .../core/tests/workflow/test_sequential.py | 454 ------ .../tests/workflow/test_workflow_kwargs.py | 735 --------- .../orchestrations/tests/test_concurrent.py | 2 +- .../orchestrations/tests/test_group_chat.py | 14 +- .../orchestrations/tests/test_handoff.py | 3 +- .../orchestrations/tests/test_magentic.py | 24 +- .../orchestrations/tests/test_sequential.py | 2 +- 13 files changed, 26 insertions(+), 5104 deletions(-) delete mode 100644 python/packages/core/tests/workflow/test_concurrent.py delete mode 100644 python/packages/core/tests/workflow/test_group_chat.py delete mode 100644 python/packages/core/tests/workflow/test_handoff.py delete mode 100644 python/packages/core/tests/workflow/test_magentic.py delete mode 100644 python/packages/core/tests/workflow/test_sequential.py delete mode 100644 python/packages/core/tests/workflow/test_workflow_kwargs.py diff --git a/python/packages/core/tests/workflow/test_agent_executor.py b/python/packages/core/tests/workflow/test_agent_executor.py index 0d4912bae1..e1df10d586 100644 --- a/python/packages/core/tests/workflow/test_agent_executor.py +++ b/python/packages/core/tests/workflow/test_agent_executor.py @@ -12,13 +12,13 @@ ChatMessage, ChatMessageStore, Content, - SequentialBuilder, WorkflowOutputEvent, WorkflowRunState, WorkflowStatusEvent, ) from agent_framework._workflows._agent_executor import AgentExecutorResponse from agent_framework._workflows._checkpoint import InMemoryCheckpointStorage +from agent_framework.orchestrations import SequentialBuilder class _CountingAgent(BaseAgent): diff --git a/python/packages/core/tests/workflow/test_concurrent.py b/python/packages/core/tests/workflow/test_concurrent.py deleted file mode 100644 index d1fee3684e..0000000000 --- a/python/packages/core/tests/workflow/test_concurrent.py +++ /dev/null @@ -1,550 +0,0 @@ -# Copyright (c) Microsoft. All rights reserved. - -from typing import Any, cast - -import pytest -from typing_extensions import Never - -from agent_framework import ( - AgentExecutorRequest, - AgentExecutorResponse, - AgentResponse, - ChatMessage, - ConcurrentBuilder, - Executor, - WorkflowContext, - WorkflowOutputEvent, - WorkflowRunState, - WorkflowStatusEvent, - handler, -) -from agent_framework._workflows._checkpoint import InMemoryCheckpointStorage - - -class _FakeAgentExec(Executor): - """Test executor that mimics an agent by emitting an AgentExecutorResponse. - - It takes the incoming AgentExecutorRequest, produces a single assistant message - with the configured reply text, and sends an AgentExecutorResponse that includes - full_conversation (the original user prompt followed by the assistant message). - """ - - def __init__(self, id: str, reply_text: str) -> None: - super().__init__(id) - self._reply_text = reply_text - - @handler - async def run(self, request: AgentExecutorRequest, ctx: WorkflowContext[AgentExecutorResponse]) -> None: - response = AgentResponse(messages=ChatMessage("assistant", text=self._reply_text)) - full_conversation = list(request.messages) + list(response.messages) - await ctx.send_message(AgentExecutorResponse(self.id, response, full_conversation=full_conversation)) - - -def test_concurrent_builder_rejects_empty_participants() -> None: - with pytest.raises(ValueError): - ConcurrentBuilder().participants([]) - - -def test_concurrent_builder_rejects_duplicate_executors() -> None: - a = _FakeAgentExec("dup", "A") - b = _FakeAgentExec("dup", "B") # same executor id - with pytest.raises(ValueError): - ConcurrentBuilder().participants([a, b]) - - -def test_concurrent_builder_rejects_duplicate_executors_from_factories() -> None: - """Test that duplicate executor IDs from factories are detected at build time.""" - - def create_dup1() -> Executor: - return _FakeAgentExec("dup", "A") - - def create_dup2() -> Executor: - return _FakeAgentExec("dup", "B") # same executor id - - builder = ConcurrentBuilder().register_participants([create_dup1, create_dup2]) - with pytest.raises(ValueError, match="Duplicate executor ID 'dup' detected in workflow."): - builder.build() - - -def test_concurrent_builder_rejects_mixed_participants_and_factories() -> None: - """Test that mixing .participants() and .register_participants() raises an error.""" - # Case 1: participants first, then register_participants - with pytest.raises(ValueError, match="Cannot mix .participants"): - ( - ConcurrentBuilder() - .participants([_FakeAgentExec("a", "A")]) - .register_participants([lambda: _FakeAgentExec("b", "B")]) - ) - - # Case 2: register_participants first, then participants - with pytest.raises(ValueError, match="Cannot mix .participants"): - ( - ConcurrentBuilder() - .register_participants([lambda: _FakeAgentExec("a", "A")]) - .participants([_FakeAgentExec("b", "B")]) - ) - - -def test_concurrent_builder_rejects_multiple_calls_to_participants() -> None: - """Test that multiple calls to .participants() raises an error.""" - with pytest.raises(ValueError, match=r"participants\(\) has already been called"): - (ConcurrentBuilder().participants([_FakeAgentExec("a", "A")]).participants([_FakeAgentExec("b", "B")])) - - -def test_concurrent_builder_rejects_multiple_calls_to_register_participants() -> None: - """Test that multiple calls to .register_participants() raises an error.""" - with pytest.raises(ValueError, match=r"register_participants\(\) has already been called"): - ( - ConcurrentBuilder() - .register_participants([lambda: _FakeAgentExec("a", "A")]) - .register_participants([lambda: _FakeAgentExec("b", "B")]) - ) - - -async def test_concurrent_default_aggregator_emits_single_user_and_assistants() -> None: - # Three synthetic agent executors - e1 = _FakeAgentExec("agentA", "Alpha") - e2 = _FakeAgentExec("agentB", "Beta") - e3 = _FakeAgentExec("agentC", "Gamma") - - wf = ConcurrentBuilder().participants([e1, e2, e3]).build() - - completed = False - output: list[ChatMessage] | None = None - async for ev in wf.run_stream("prompt: hello world"): - if isinstance(ev, WorkflowStatusEvent) and ev.state == WorkflowRunState.IDLE: - completed = True - elif isinstance(ev, WorkflowOutputEvent): - output = cast(list[ChatMessage], ev.data) - if completed and output is not None: - break - - assert completed - assert output is not None - messages: list[ChatMessage] = output - - # Expect one user message + one assistant message per participant - assert len(messages) == 1 + 3 - assert messages[0].role == "user" - assert "hello world" in messages[0].text - - assistant_texts = {m.text for m in messages[1:]} - assert assistant_texts == {"Alpha", "Beta", "Gamma"} - assert all(m.role == "assistant" for m in messages[1:]) - - -async def test_concurrent_custom_aggregator_callback_is_used() -> None: - # Two synthetic agent executors for brevity - e1 = _FakeAgentExec("agentA", "One") - e2 = _FakeAgentExec("agentB", "Two") - - async def summarize(results: list[AgentExecutorResponse]) -> str: - texts: list[str] = [] - for r in results: - msgs: list[ChatMessage] = r.agent_response.messages - texts.append(msgs[-1].text if msgs else "") - return " | ".join(sorted(texts)) - - wf = ConcurrentBuilder().participants([e1, e2]).with_aggregator(summarize).build() - - completed = False - output: str | None = None - async for ev in wf.run_stream("prompt: custom"): - if isinstance(ev, WorkflowStatusEvent) and ev.state == WorkflowRunState.IDLE: - completed = True - elif isinstance(ev, WorkflowOutputEvent): - output = cast(str, ev.data) - if completed and output is not None: - break - - assert completed - assert output is not None - # Custom aggregator returns a string payload - assert isinstance(output, str) - assert output == "One | Two" - - -async def test_concurrent_custom_aggregator_sync_callback_is_used() -> None: - e1 = _FakeAgentExec("agentA", "One") - e2 = _FakeAgentExec("agentB", "Two") - - # Sync callback with ctx parameter (should run via asyncio.to_thread) - def summarize_sync(results: list[AgentExecutorResponse], _ctx: WorkflowContext[Any]) -> str: # type: ignore[unused-argument] - texts: list[str] = [] - for r in results: - msgs: list[ChatMessage] = r.agent_response.messages - texts.append(msgs[-1].text if msgs else "") - return " | ".join(sorted(texts)) - - wf = ConcurrentBuilder().participants([e1, e2]).with_aggregator(summarize_sync).build() - - completed = False - output: str | None = None - async for ev in wf.run_stream("prompt: custom sync"): - if isinstance(ev, WorkflowStatusEvent) and ev.state == WorkflowRunState.IDLE: - completed = True - elif isinstance(ev, WorkflowOutputEvent): - output = cast(str, ev.data) - if completed and output is not None: - break - - assert completed - assert output is not None - assert isinstance(output, str) - assert output == "One | Two" - - -def test_concurrent_custom_aggregator_uses_callback_name_for_id() -> None: - e1 = _FakeAgentExec("agentA", "One") - e2 = _FakeAgentExec("agentB", "Two") - - def summarize(results: list[AgentExecutorResponse]) -> str: # type: ignore[override] - return str(len(results)) - - wf = ConcurrentBuilder().participants([e1, e2]).with_aggregator(summarize).build() - - assert "summarize" in wf.executors - aggregator = wf.executors["summarize"] - assert aggregator.id == "summarize" - - -async def test_concurrent_with_aggregator_executor_instance() -> None: - """Test with_aggregator using an Executor instance (not factory).""" - - class CustomAggregator(Executor): - @handler - async def aggregate(self, results: list[AgentExecutorResponse], ctx: WorkflowContext[Never, str]) -> None: - texts: list[str] = [] - for r in results: - msgs: list[ChatMessage] = r.agent_response.messages - texts.append(msgs[-1].text if msgs else "") - await ctx.yield_output(" & ".join(sorted(texts))) - - e1 = _FakeAgentExec("agentA", "One") - e2 = _FakeAgentExec("agentB", "Two") - - aggregator_instance = CustomAggregator(id="instance_aggregator") - wf = ConcurrentBuilder().participants([e1, e2]).with_aggregator(aggregator_instance).build() - - completed = False - output: str | None = None - async for ev in wf.run_stream("prompt: instance test"): - if isinstance(ev, WorkflowStatusEvent) and ev.state == WorkflowRunState.IDLE: - completed = True - elif isinstance(ev, WorkflowOutputEvent): - output = cast(str, ev.data) - if completed and output is not None: - break - - assert completed - assert output is not None - assert isinstance(output, str) - assert output == "One & Two" - - -async def test_concurrent_with_aggregator_executor_factory() -> None: - """Test with_aggregator using an Executor factory.""" - - class CustomAggregator(Executor): - @handler - async def aggregate(self, results: list[AgentExecutorResponse], ctx: WorkflowContext[Never, str]) -> None: - texts: list[str] = [] - for r in results: - msgs: list[ChatMessage] = r.agent_response.messages - texts.append(msgs[-1].text if msgs else "") - await ctx.yield_output(" | ".join(sorted(texts))) - - e1 = _FakeAgentExec("agentA", "One") - e2 = _FakeAgentExec("agentB", "Two") - - wf = ( - ConcurrentBuilder() - .participants([e1, e2]) - .register_aggregator(lambda: CustomAggregator(id="custom_aggregator")) - .build() - ) - - completed = False - output: str | None = None - async for ev in wf.run_stream("prompt: factory test"): - if isinstance(ev, WorkflowStatusEvent) and ev.state == WorkflowRunState.IDLE: - completed = True - elif isinstance(ev, WorkflowOutputEvent): - output = cast(str, ev.data) - if completed and output is not None: - break - - assert completed - assert output is not None - assert isinstance(output, str) - assert output == "One | Two" - - -async def test_concurrent_with_aggregator_executor_factory_with_default_id() -> None: - """Test with_aggregator using an Executor class directly as factory (with default __init__ parameters).""" - - class CustomAggregator(Executor): - def __init__(self, id: str = "default_aggregator") -> None: - super().__init__(id) - - @handler - async def aggregate(self, results: list[AgentExecutorResponse], ctx: WorkflowContext[Never, str]) -> None: - texts: list[str] = [] - for r in results: - msgs: list[ChatMessage] = r.agent_response.messages - texts.append(msgs[-1].text if msgs else "") - await ctx.yield_output(" | ".join(sorted(texts))) - - e1 = _FakeAgentExec("agentA", "One") - e2 = _FakeAgentExec("agentB", "Two") - - wf = ConcurrentBuilder().participants([e1, e2]).register_aggregator(CustomAggregator).build() - - completed = False - output: str | None = None - async for ev in wf.run_stream("prompt: factory test"): - if isinstance(ev, WorkflowStatusEvent) and ev.state == WorkflowRunState.IDLE: - completed = True - elif isinstance(ev, WorkflowOutputEvent): - output = cast(str, ev.data) - if completed and output is not None: - break - - assert completed - assert output is not None - assert isinstance(output, str) - assert output == "One | Two" - - -def test_concurrent_builder_rejects_multiple_calls_to_with_aggregator() -> None: - """Test that multiple calls to .with_aggregator() raises an error.""" - - def summarize(results: list[AgentExecutorResponse]) -> str: # type: ignore[override] - return str(len(results)) - - with pytest.raises(ValueError, match=r"with_aggregator\(\) has already been called"): - (ConcurrentBuilder().with_aggregator(summarize).with_aggregator(summarize)) - - -def test_concurrent_builder_rejects_multiple_calls_to_register_aggregator() -> None: - """Test that multiple calls to .register_aggregator() raises an error.""" - - class CustomAggregator(Executor): - pass - - with pytest.raises(ValueError, match=r"register_aggregator\(\) has already been called"): - ( - ConcurrentBuilder() - .register_aggregator(lambda: CustomAggregator(id="agg1")) - .register_aggregator(lambda: CustomAggregator(id="agg2")) - ) - - -async def test_concurrent_checkpoint_resume_round_trip() -> None: - storage = InMemoryCheckpointStorage() - - participants = ( - _FakeAgentExec("agentA", "Alpha"), - _FakeAgentExec("agentB", "Beta"), - _FakeAgentExec("agentC", "Gamma"), - ) - - wf = ConcurrentBuilder().participants(list(participants)).with_checkpointing(storage).build() - - baseline_output: list[ChatMessage] | None = None - async for ev in wf.run_stream("checkpoint concurrent"): - if isinstance(ev, WorkflowOutputEvent): - baseline_output = ev.data # type: ignore[assignment] - if isinstance(ev, WorkflowStatusEvent) and ev.state == WorkflowRunState.IDLE: - break - - assert baseline_output is not None - - checkpoints = await storage.list_checkpoints() - assert checkpoints - checkpoints.sort(key=lambda cp: cp.timestamp) - resume_checkpoint = next( - (cp for cp in checkpoints if (cp.metadata or {}).get("checkpoint_type") == "superstep"), - checkpoints[-1], - ) - - resumed_participants = ( - _FakeAgentExec("agentA", "Alpha"), - _FakeAgentExec("agentB", "Beta"), - _FakeAgentExec("agentC", "Gamma"), - ) - wf_resume = ConcurrentBuilder().participants(list(resumed_participants)).with_checkpointing(storage).build() - - resumed_output: list[ChatMessage] | None = None - async for ev in wf_resume.run_stream(checkpoint_id=resume_checkpoint.checkpoint_id): - if isinstance(ev, WorkflowOutputEvent): - resumed_output = ev.data # type: ignore[assignment] - if isinstance(ev, WorkflowStatusEvent) and ev.state in ( - WorkflowRunState.IDLE, - WorkflowRunState.IDLE_WITH_PENDING_REQUESTS, - ): - break - - assert resumed_output is not None - assert [m.role for m in resumed_output] == [m.role for m in baseline_output] - assert [m.text for m in resumed_output] == [m.text for m in baseline_output] - - -async def test_concurrent_checkpoint_runtime_only() -> None: - """Test checkpointing configured ONLY at runtime, not at build time.""" - storage = InMemoryCheckpointStorage() - - agents = [_FakeAgentExec(id="agent1", reply_text="A1"), _FakeAgentExec(id="agent2", reply_text="A2")] - wf = ConcurrentBuilder().participants(agents).build() - - baseline_output: list[ChatMessage] | None = None - async for ev in wf.run_stream("runtime checkpoint test", checkpoint_storage=storage): - if isinstance(ev, WorkflowOutputEvent): - baseline_output = ev.data # type: ignore[assignment] - if isinstance(ev, WorkflowStatusEvent) and ev.state == WorkflowRunState.IDLE: - break - - assert baseline_output is not None - - checkpoints = await storage.list_checkpoints() - assert checkpoints - checkpoints.sort(key=lambda cp: cp.timestamp) - - resume_checkpoint = next( - (cp for cp in checkpoints if (cp.metadata or {}).get("checkpoint_type") == "superstep"), - checkpoints[-1], - ) - - resumed_agents = [_FakeAgentExec(id="agent1", reply_text="A1"), _FakeAgentExec(id="agent2", reply_text="A2")] - wf_resume = ConcurrentBuilder().participants(resumed_agents).build() - - resumed_output: list[ChatMessage] | None = None - async for ev in wf_resume.run_stream(checkpoint_id=resume_checkpoint.checkpoint_id, checkpoint_storage=storage): - if isinstance(ev, WorkflowOutputEvent): - resumed_output = ev.data # type: ignore[assignment] - if isinstance(ev, WorkflowStatusEvent) and ev.state in ( - WorkflowRunState.IDLE, - WorkflowRunState.IDLE_WITH_PENDING_REQUESTS, - ): - break - - assert resumed_output is not None - assert [m.role for m in resumed_output] == [m.role for m in baseline_output] - - -async def test_concurrent_checkpoint_runtime_overrides_buildtime() -> None: - """Test that runtime checkpoint storage overrides build-time configuration.""" - import tempfile - - with tempfile.TemporaryDirectory() as temp_dir1, tempfile.TemporaryDirectory() as temp_dir2: - from agent_framework._workflows._checkpoint import FileCheckpointStorage - - buildtime_storage = FileCheckpointStorage(temp_dir1) - runtime_storage = FileCheckpointStorage(temp_dir2) - - agents = [_FakeAgentExec(id="agent1", reply_text="A1"), _FakeAgentExec(id="agent2", reply_text="A2")] - wf = ConcurrentBuilder().participants(agents).with_checkpointing(buildtime_storage).build() - - baseline_output: list[ChatMessage] | None = None - async for ev in wf.run_stream("override test", checkpoint_storage=runtime_storage): - if isinstance(ev, WorkflowOutputEvent): - baseline_output = ev.data # type: ignore[assignment] - if isinstance(ev, WorkflowStatusEvent) and ev.state == WorkflowRunState.IDLE: - break - - assert baseline_output is not None - - buildtime_checkpoints = await buildtime_storage.list_checkpoints() - runtime_checkpoints = await runtime_storage.list_checkpoints() - - assert len(runtime_checkpoints) > 0, "Runtime storage should have checkpoints" - assert len(buildtime_checkpoints) == 0, "Build-time storage should have no checkpoints when overridden" - - -def test_concurrent_builder_rejects_empty_participant_factories() -> None: - with pytest.raises(ValueError): - ConcurrentBuilder().register_participants([]) - - -async def test_concurrent_builder_reusable_after_build_with_participants() -> None: - """Test that the builder can be reused to build multiple identical workflows with participants().""" - e1 = _FakeAgentExec("agentA", "One") - e2 = _FakeAgentExec("agentB", "Two") - - builder = ConcurrentBuilder().participants([e1, e2]) - - builder.build() - - assert builder._participants[0] is e1 # type: ignore - assert builder._participants[1] is e2 # type: ignore - assert builder._participant_factories == [] # type: ignore - - -async def test_concurrent_builder_reusable_after_build_with_factories() -> None: - """Test that the builder can be reused to build multiple workflows with register_participants().""" - call_count = 0 - - def create_agent_executor_a() -> Executor: - nonlocal call_count - call_count += 1 - return _FakeAgentExec("agentA", "One") - - def create_agent_executor_b() -> Executor: - nonlocal call_count - call_count += 1 - return _FakeAgentExec("agentB", "Two") - - builder = ConcurrentBuilder().register_participants([create_agent_executor_a, create_agent_executor_b]) - - # Build the first workflow - wf1 = builder.build() - - assert builder._participants == [] # type: ignore - assert len(builder._participant_factories) == 2 # type: ignore - assert call_count == 2 - - # Build the second workflow - wf2 = builder.build() - assert call_count == 4 - - # Verify that the two workflows have different executor instances - assert wf1.executors["agentA"] is not wf2.executors["agentA"] - assert wf1.executors["agentB"] is not wf2.executors["agentB"] - - -async def test_concurrent_with_register_participants() -> None: - """Test workflow creation using register_participants with factories.""" - - def create_agent1() -> Executor: - return _FakeAgentExec("agentA", "Alpha") - - def create_agent2() -> Executor: - return _FakeAgentExec("agentB", "Beta") - - def create_agent3() -> Executor: - return _FakeAgentExec("agentC", "Gamma") - - wf = ConcurrentBuilder().register_participants([create_agent1, create_agent2, create_agent3]).build() - - completed = False - output: list[ChatMessage] | None = None - async for ev in wf.run_stream("test prompt"): - if isinstance(ev, WorkflowStatusEvent) and ev.state == WorkflowRunState.IDLE: - completed = True - elif isinstance(ev, WorkflowOutputEvent): - output = cast(list[ChatMessage], ev.data) - if completed and output is not None: - break - - assert completed - assert output is not None - messages: list[ChatMessage] = output - - # Expect one user message + one assistant message per participant - assert len(messages) == 1 + 3 - assert messages[0].role == "user" - assert "test prompt" in messages[0].text - - assistant_texts = {m.text for m in messages[1:]} - assert assistant_texts == {"Alpha", "Beta", "Gamma"} - assert all(m.role == "assistant" for m in messages[1:]) diff --git a/python/packages/core/tests/workflow/test_full_conversation.py b/python/packages/core/tests/workflow/test_full_conversation.py index 1c84e04494..cab7729fdd 100644 --- a/python/packages/core/tests/workflow/test_full_conversation.py +++ b/python/packages/core/tests/workflow/test_full_conversation.py @@ -16,13 +16,13 @@ ChatMessage, Content, Executor, - SequentialBuilder, WorkflowBuilder, WorkflowContext, WorkflowRunState, WorkflowStatusEvent, handler, ) +from agent_framework.orchestrations import SequentialBuilder class _SimpleAgent(BaseAgent): diff --git a/python/packages/core/tests/workflow/test_group_chat.py b/python/packages/core/tests/workflow/test_group_chat.py deleted file mode 100644 index 21f1e567d3..0000000000 --- a/python/packages/core/tests/workflow/test_group_chat.py +++ /dev/null @@ -1,1334 +0,0 @@ -# Copyright (c) Microsoft. All rights reserved. - -from collections.abc import AsyncIterable, Callable, Sequence -from typing import Any, cast - -import pytest - -from agent_framework import ( - AgentExecutorResponse, - AgentRequestInfoResponse, - AgentResponse, - AgentResponseUpdate, - AgentThread, - BaseAgent, - BaseGroupChatOrchestrator, - ChatAgent, - ChatMessage, - ChatResponse, - ChatResponseUpdate, - Content, - GroupChatBuilder, - GroupChatState, - MagenticContext, - MagenticManagerBase, - MagenticProgressLedger, - MagenticProgressLedgerItem, - RequestInfoEvent, - WorkflowOutputEvent, - WorkflowRunState, - WorkflowStatusEvent, -) -from agent_framework._workflows._checkpoint import InMemoryCheckpointStorage - - -class StubAgent(BaseAgent): - def __init__(self, agent_name: str, reply_text: str, **kwargs: Any) -> None: - super().__init__(name=agent_name, description=f"Stub agent {agent_name}", **kwargs) - self._reply_text = reply_text - - async def run( # type: ignore[override] - self, - messages: str | ChatMessage | Sequence[str | ChatMessage] | None = None, - *, - thread: AgentThread | None = None, - **kwargs: Any, - ) -> AgentResponse: - response = ChatMessage("assistant", [self._reply_text], author_name=self.name) - return AgentResponse(messages=[response]) - - def run_stream( # type: ignore[override] - self, - messages: str | ChatMessage | Sequence[str | ChatMessage] | None = None, - *, - thread: AgentThread | None = None, - **kwargs: Any, - ) -> AsyncIterable[AgentResponseUpdate]: - async def _stream() -> AsyncIterable[AgentResponseUpdate]: - yield AgentResponseUpdate( - contents=[Content.from_text(text=self._reply_text)], role="assistant", author_name=self.name - ) - - return _stream() - - -class MockChatClient: - """Mock chat client that raises NotImplementedError for all methods.""" - - additional_properties: dict[str, Any] - - async def get_response(self, messages: Any, **kwargs: Any) -> ChatResponse: - raise NotImplementedError - - def get_streaming_response(self, messages: Any, **kwargs: Any) -> AsyncIterable[ChatResponseUpdate]: - raise NotImplementedError - - -class StubManagerAgent(ChatAgent): - def __init__(self) -> None: - super().__init__(chat_client=MockChatClient(), name="manager_agent", description="Stub manager") - self._call_count = 0 - - async def run( - self, - messages: str | ChatMessage | Sequence[str | ChatMessage] | None = None, - *, - thread: AgentThread | None = None, - **kwargs: Any, - ) -> AgentResponse: - if self._call_count == 0: - self._call_count += 1 - # First call: select the agent (using AgentOrchestrationOutput format) - payload = {"terminate": False, "reason": "Selecting agent", "next_speaker": "agent", "final_message": None} - return AgentResponse( - messages=[ - ChatMessage( - role="assistant", - text=( - '{"terminate": false, "reason": "Selecting agent", ' - '"next_speaker": "agent", "final_message": null}' - ), - author_name=self.name, - ) - ], - value=payload, - ) - - # Second call: terminate - payload = { - "terminate": True, - "reason": "Task complete", - "next_speaker": None, - "final_message": "agent manager final", - } - return AgentResponse( - messages=[ - ChatMessage( - role="assistant", - text=( - '{"terminate": true, "reason": "Task complete", ' - '"next_speaker": null, "final_message": "agent manager final"}' - ), - author_name=self.name, - ) - ], - value=payload, - ) - - def run_stream( - self, - messages: str | ChatMessage | Sequence[str | ChatMessage] | None = None, - *, - thread: AgentThread | None = None, - **kwargs: Any, - ) -> AsyncIterable[AgentResponseUpdate]: - if self._call_count == 0: - self._call_count += 1 - - async def _stream_initial() -> AsyncIterable[AgentResponseUpdate]: - yield AgentResponseUpdate( - contents=[ - Content.from_text( - text=( - '{"terminate": false, "reason": "Selecting agent", ' - '"next_speaker": "agent", "final_message": null}' - ) - ) - ], - role="assistant", - author_name=self.name, - ) - - return _stream_initial() - - async def _stream_final() -> AsyncIterable[AgentResponseUpdate]: - yield AgentResponseUpdate( - contents=[ - Content.from_text( - text=( - '{"terminate": true, "reason": "Task complete", ' - '"next_speaker": null, "final_message": "agent manager final"}' - ) - ) - ], - role="assistant", - author_name=self.name, - ) - - return _stream_final() - - -def make_sequence_selector() -> Callable[[GroupChatState], str]: - state_counter = {"value": 0} - - def _selector(state: GroupChatState) -> str: - participants = list(state.participants.keys()) - step = state_counter["value"] - state_counter["value"] = step + 1 - if step == 0: - return participants[0] - if step == 1 and len(participants) > 1: - return participants[1] - # Return first participant to continue (will be limited by max_rounds in tests) - return participants[0] - - return _selector - - -class StubMagenticManager(MagenticManagerBase): - def __init__(self) -> None: - super().__init__(max_stall_count=3, max_round_count=5) - self._round = 0 - - async def plan(self, magentic_context: MagenticContext) -> ChatMessage: - return ChatMessage("assistant", ["plan"], author_name="magentic_manager") - - async def replan(self, magentic_context: MagenticContext) -> ChatMessage: - return await self.plan(magentic_context) - - async def create_progress_ledger(self, magentic_context: MagenticContext) -> MagenticProgressLedger: - participants = list(magentic_context.participant_descriptions.keys()) - target = participants[0] if participants else "agent" - if self._round == 0: - self._round += 1 - return MagenticProgressLedger( - is_request_satisfied=MagenticProgressLedgerItem(reason="", answer=False), - is_in_loop=MagenticProgressLedgerItem(reason="", answer=False), - is_progress_being_made=MagenticProgressLedgerItem(reason="", answer=True), - next_speaker=MagenticProgressLedgerItem(reason="", answer=target), - instruction_or_question=MagenticProgressLedgerItem(reason="", answer="respond"), - ) - return MagenticProgressLedger( - is_request_satisfied=MagenticProgressLedgerItem(reason="", answer=True), - is_in_loop=MagenticProgressLedgerItem(reason="", answer=False), - is_progress_being_made=MagenticProgressLedgerItem(reason="", answer=True), - next_speaker=MagenticProgressLedgerItem(reason="", answer=target), - instruction_or_question=MagenticProgressLedgerItem(reason="", answer=""), - ) - - async def prepare_final_answer(self, magentic_context: MagenticContext) -> ChatMessage: - return ChatMessage("assistant", ["final"], author_name="magentic_manager") - - -async def test_group_chat_builder_basic_flow() -> None: - selector = make_sequence_selector() - alpha = StubAgent("alpha", "ack from alpha") - beta = StubAgent("beta", "ack from beta") - - workflow = ( - GroupChatBuilder() - .with_orchestrator(selection_func=selector, orchestrator_name="manager") - .participants([alpha, beta]) - .with_max_rounds(2) # Limit rounds to prevent infinite loop - .build() - ) - - outputs: list[list[ChatMessage]] = [] - async for event in workflow.run_stream("coordinate task"): - if isinstance(event, WorkflowOutputEvent): - data = event.data - if isinstance(data, list): - outputs.append(cast(list[ChatMessage], data)) - - assert len(outputs) == 1 - assert len(outputs[0]) >= 1 - # Check that both agents contributed - authors = {msg.author_name for msg in outputs[0] if msg.author_name in ["alpha", "beta"]} - assert len(authors) == 2 - - -async def test_group_chat_as_agent_accepts_conversation() -> None: - selector = make_sequence_selector() - alpha = StubAgent("alpha", "ack from alpha") - beta = StubAgent("beta", "ack from beta") - - workflow = ( - GroupChatBuilder() - .with_orchestrator(selection_func=selector, orchestrator_name="manager") - .participants([alpha, beta]) - .with_max_rounds(2) # Limit rounds to prevent infinite loop - .build() - ) - - agent = workflow.as_agent(name="group-chat-agent") - conversation = [ - ChatMessage("user", ["kickoff"], author_name="user"), - ChatMessage("assistant", ["noted"], author_name="alpha"), - ] - response = await agent.run(conversation) - - assert response.messages, "Expected agent conversation output" - - -# Comprehensive tests for group chat functionality - - -class TestGroupChatBuilder: - """Tests for GroupChatBuilder validation and configuration.""" - - def test_build_without_manager_raises_error(self) -> None: - """Test that building without a manager raises ValueError.""" - agent = StubAgent("test", "response") - - builder = GroupChatBuilder().participants([agent]) - - with pytest.raises( - ValueError, match=r"No orchestrator has been configured\. Call with_orchestrator\(\) to set one\." - ): - builder.build() - - def test_build_without_participants_raises_error(self) -> None: - """Test that building without participants raises ValueError.""" - - def selector(state: GroupChatState) -> str: - return "agent" - - builder = GroupChatBuilder().with_orchestrator(selection_func=selector) - - with pytest.raises( - ValueError, - match=r"No participants provided\. Call \.participants\(\) or \.register_participants\(\) first\.", - ): - builder.build() - - def test_duplicate_manager_configuration_raises_error(self) -> None: - """Test that configuring multiple managers raises ValueError.""" - - def selector(state: GroupChatState) -> str: - return "agent" - - builder = GroupChatBuilder().with_orchestrator(selection_func=selector) - - with pytest.raises( - ValueError, - match=r"A selection function has already been configured\. Call with_orchestrator\(\.\.\.\) once only\.", - ): - builder.with_orchestrator(selection_func=selector) - - def test_empty_participants_raises_error(self) -> None: - """Test that empty participants list raises ValueError.""" - - def selector(state: GroupChatState) -> str: - return "agent" - - builder = GroupChatBuilder().with_orchestrator(selection_func=selector) - - with pytest.raises(ValueError, match="participants cannot be empty"): - builder.participants([]) - - def test_duplicate_participant_names_raises_error(self) -> None: - """Test that duplicate participant names raise ValueError.""" - agent1 = StubAgent("test", "response1") - agent2 = StubAgent("test", "response2") - - def selector(state: GroupChatState) -> str: - return "agent" - - builder = GroupChatBuilder().with_orchestrator(selection_func=selector) - - with pytest.raises(ValueError, match="Duplicate participant name 'test'"): - builder.participants([agent1, agent2]) - - def test_agent_without_name_raises_error(self) -> None: - """Test that agent without name attribute raises ValueError.""" - - class AgentWithoutName(BaseAgent): - def __init__(self) -> None: - super().__init__(name="", description="test") - - async def run(self, messages: Any = None, *, thread: Any = None, **kwargs: Any) -> AgentResponse: - return AgentResponse(messages=[]) - - def run_stream( - self, messages: Any = None, *, thread: Any = None, **kwargs: Any - ) -> AsyncIterable[AgentResponseUpdate]: - async def _stream() -> AsyncIterable[AgentResponseUpdate]: - yield AgentResponseUpdate(contents=[]) - - return _stream() - - agent = AgentWithoutName() - - def selector(state: GroupChatState) -> str: - return "agent" - - builder = GroupChatBuilder().with_orchestrator(selection_func=selector) - - with pytest.raises(ValueError, match="AgentProtocol participants must have a non-empty name"): - builder.participants([agent]) - - def test_empty_participant_name_raises_error(self) -> None: - """Test that empty participant name raises ValueError.""" - agent = StubAgent("", "response") # Agent with empty name - - def selector(state: GroupChatState) -> str: - return "agent" - - builder = GroupChatBuilder().with_orchestrator(selection_func=selector) - - with pytest.raises(ValueError, match="AgentProtocol participants must have a non-empty name"): - builder.participants([agent]) - - -class TestGroupChatWorkflow: - """Tests for GroupChat workflow functionality.""" - - async def test_max_rounds_enforcement(self) -> None: - """Test that max_rounds properly limits conversation rounds.""" - call_count = {"value": 0} - - def selector(state: GroupChatState) -> str: - call_count["value"] += 1 - # Always return the agent name to try to continue indefinitely - return "agent" - - agent = StubAgent("agent", "response") - - workflow = ( - GroupChatBuilder() - .with_orchestrator(selection_func=selector) - .participants([agent]) - .with_max_rounds(2) # Limit to 2 rounds - .build() - ) - - outputs: list[list[ChatMessage]] = [] - async for event in workflow.run_stream("test task"): - if isinstance(event, WorkflowOutputEvent): - data = event.data - if isinstance(data, list): - outputs.append(cast(list[ChatMessage], data)) - - # Should have terminated due to max_rounds, expect at least one output - assert len(outputs) >= 1 - # The final message in the conversation should be about round limit - conversation = outputs[-1] - assert len(conversation) >= 1 - final_output = conversation[-1] - assert "maximum number of rounds" in final_output.text.lower() - - async def test_termination_condition_halts_conversation(self) -> None: - """Test that a custom termination condition stops the workflow.""" - - def selector(state: GroupChatState) -> str: - return "agent" - - def termination_condition(conversation: list[ChatMessage]) -> bool: - replies = [msg for msg in conversation if msg.role == "assistant" and msg.author_name == "agent"] - return len(replies) >= 2 - - agent = StubAgent("agent", "response") - - workflow = ( - GroupChatBuilder() - .with_orchestrator(selection_func=selector) - .participants([agent]) - .with_termination_condition(termination_condition) - .build() - ) - - outputs: list[list[ChatMessage]] = [] - async for event in workflow.run_stream("test task"): - if isinstance(event, WorkflowOutputEvent): - data = event.data - if isinstance(data, list): - outputs.append(cast(list[ChatMessage], data)) - - assert outputs, "Expected termination to yield output" - conversation = outputs[-1] - agent_replies = [msg for msg in conversation if msg.author_name == "agent" and msg.role == "assistant"] - assert len(agent_replies) == 2 - final_output = conversation[-1] - # The orchestrator uses its ID as author_name by default - assert "termination condition" in final_output.text.lower() - - async def test_termination_condition_agent_manager_finalizes(self) -> None: - """Test that termination condition with agent orchestrator produces default termination message.""" - manager = StubManagerAgent() - worker = StubAgent("agent", "response") - - workflow = ( - GroupChatBuilder() - .with_orchestrator(agent=manager) - .participants([worker]) - .with_termination_condition(lambda conv: any(msg.author_name == "agent" for msg in conv)) - .build() - ) - - outputs: list[list[ChatMessage]] = [] - async for event in workflow.run_stream("test task"): - if isinstance(event, WorkflowOutputEvent): - data = event.data - if isinstance(data, list): - outputs.append(cast(list[ChatMessage], data)) - - assert outputs, "Expected termination to yield output" - conversation = outputs[-1] - assert conversation[-1].text == BaseGroupChatOrchestrator.TERMINATION_CONDITION_MET_MESSAGE - assert conversation[-1].author_name == manager.name - - async def test_unknown_participant_error(self) -> None: - """Test that unknown participant selection raises error.""" - - def selector(state: GroupChatState) -> str: - return "unknown_agent" # Return non-existent participant - - agent = StubAgent("agent", "response") - - workflow = GroupChatBuilder().with_orchestrator(selection_func=selector).participants([agent]).build() - - with pytest.raises(RuntimeError, match="Selection function returned unknown participant 'unknown_agent'"): - async for _ in workflow.run_stream("test task"): - pass - - -class TestCheckpointing: - """Tests for checkpointing functionality.""" - - async def test_workflow_with_checkpointing(self) -> None: - """Test that workflow works with checkpointing enabled.""" - - def selector(state: GroupChatState) -> str: - return "agent" - - agent = StubAgent("agent", "response") - storage = InMemoryCheckpointStorage() - - workflow = ( - GroupChatBuilder() - .with_orchestrator(selection_func=selector) - .participants([agent]) - .with_max_rounds(1) - .with_checkpointing(storage) - .build() - ) - - outputs: list[list[ChatMessage]] = [] - async for event in workflow.run_stream("test task"): - if isinstance(event, WorkflowOutputEvent): - data = event.data - if isinstance(data, list): - outputs.append(cast(list[ChatMessage], data)) - - assert len(outputs) == 1 # Should complete normally - - -class TestConversationHandling: - """Tests for different conversation input types.""" - - async def test_handle_empty_conversation_raises_error(self) -> None: - """Test that empty conversation list raises ValueError.""" - - def selector(state: GroupChatState) -> str: - return "agent" - - agent = StubAgent("agent", "response") - - workflow = ( - GroupChatBuilder() - .with_orchestrator(selection_func=selector) - .participants([agent]) - .with_max_rounds(1) - .build() - ) - - with pytest.raises(ValueError, match="At least one ChatMessage is required to start the group chat workflow."): - async for _ in workflow.run_stream([]): - pass - - async def test_handle_string_input(self) -> None: - """Test handling string input creates proper ChatMessage.""" - - def selector(state: GroupChatState) -> str: - # Verify the conversation has the user message - assert len(state.conversation) > 0 - assert state.conversation[0].role == "user" - assert state.conversation[0].text == "test string" - return "agent" - - agent = StubAgent("agent", "response") - - workflow = ( - GroupChatBuilder() - .with_orchestrator(selection_func=selector) - .participants([agent]) - .with_max_rounds(1) - .build() - ) - - outputs: list[list[ChatMessage]] = [] - async for event in workflow.run_stream("test string"): - if isinstance(event, WorkflowOutputEvent): - data = event.data - if isinstance(data, list): - outputs.append(cast(list[ChatMessage], data)) - - assert len(outputs) == 1 - - async def test_handle_chat_message_input(self) -> None: - """Test handling ChatMessage input directly.""" - task_message = ChatMessage("user", ["test message"]) - - def selector(state: GroupChatState) -> str: - # Verify the task message was preserved in conversation - assert len(state.conversation) > 0 - assert state.conversation[0] == task_message - return "agent" - - agent = StubAgent("agent", "response") - - workflow = ( - GroupChatBuilder() - .with_orchestrator(selection_func=selector) - .participants([agent]) - .with_max_rounds(1) - .build() - ) - - outputs: list[list[ChatMessage]] = [] - async for event in workflow.run_stream(task_message): - if isinstance(event, WorkflowOutputEvent): - data = event.data - if isinstance(data, list): - outputs.append(cast(list[ChatMessage], data)) - - assert len(outputs) == 1 - - async def test_handle_conversation_list_input(self) -> None: - """Test handling conversation list preserves context.""" - conversation = [ - ChatMessage("system", ["system message"]), - ChatMessage("user", ["user message"]), - ] - - def selector(state: GroupChatState) -> str: - # Verify conversation context is preserved - assert len(state.conversation) >= 2 - assert state.conversation[-1].text == "user message" - return "agent" - - agent = StubAgent("agent", "response") - - workflow = ( - GroupChatBuilder() - .with_orchestrator(selection_func=selector) - .participants([agent]) - .with_max_rounds(1) - .build() - ) - - outputs: list[list[ChatMessage]] = [] - async for event in workflow.run_stream(conversation): - if isinstance(event, WorkflowOutputEvent): - data = event.data - if isinstance(data, list): - outputs.append(cast(list[ChatMessage], data)) - - assert len(outputs) == 1 - - -class TestRoundLimitEnforcement: - """Tests for round limit checking functionality.""" - - async def test_round_limit_in_apply_directive(self) -> None: - """Test round limit enforcement.""" - rounds_called = {"count": 0} - - def selector(state: GroupChatState) -> str: - rounds_called["count"] += 1 - # Keep trying to select agent to test limit enforcement - return "agent" - - agent = StubAgent("agent", "response") - - workflow = ( - GroupChatBuilder() - .with_orchestrator(selection_func=selector) - .participants([agent]) - .with_max_rounds(1) # Very low limit - .build() - ) - - outputs: list[list[ChatMessage]] = [] - async for event in workflow.run_stream("test"): - if isinstance(event, WorkflowOutputEvent): - data = event.data - if isinstance(data, list): - outputs.append(cast(list[ChatMessage], data)) - - # Should have at least one output (the round limit message) - assert len(outputs) >= 1 - # The last message in the conversation should be about round limit - conversation = outputs[-1] - assert len(conversation) >= 1 - final_output = conversation[-1] - assert "maximum number of rounds" in final_output.text.lower() - - async def test_round_limit_in_ingest_participant_message(self) -> None: - """Test round limit enforcement after participant response.""" - responses_received = {"count": 0} - - def selector(state: GroupChatState) -> str: - responses_received["count"] += 1 - if responses_received["count"] == 1: - return "agent" # First call selects agent - return "agent" # Try to continue, but should hit limit - - agent = StubAgent("agent", "response from agent") - - workflow = ( - GroupChatBuilder() - .with_orchestrator(selection_func=selector) - .participants([agent]) - .with_max_rounds(1) # Hit limit after first response - .build() - ) - - outputs: list[list[ChatMessage]] = [] - async for event in workflow.run_stream("test"): - if isinstance(event, WorkflowOutputEvent): - data = event.data - if isinstance(data, list): - outputs.append(cast(list[ChatMessage], data)) - - # Should have at least one output (the round limit message) - assert len(outputs) >= 1 - # The last message in the conversation should be about round limit - conversation = outputs[-1] - assert len(conversation) >= 1 - final_output = conversation[-1] - assert "maximum number of rounds" in final_output.text.lower() - - -async def test_group_chat_checkpoint_runtime_only() -> None: - """Test checkpointing configured ONLY at runtime, not at build time.""" - storage = InMemoryCheckpointStorage() - - agent_a = StubAgent("agentA", "Reply from A") - agent_b = StubAgent("agentB", "Reply from B") - selector = make_sequence_selector() - - wf = ( - GroupChatBuilder() - .participants([agent_a, agent_b]) - .with_orchestrator(selection_func=selector) - .with_max_rounds(2) - .build() - ) - - baseline_output: list[ChatMessage] | None = None - async for ev in wf.run_stream("runtime checkpoint test", checkpoint_storage=storage): - if isinstance(ev, WorkflowOutputEvent): - baseline_output = cast(list[ChatMessage], ev.data) if isinstance(ev.data, list) else None # type: ignore - if isinstance(ev, WorkflowStatusEvent) and ev.state in ( - WorkflowRunState.IDLE, - WorkflowRunState.IDLE_WITH_PENDING_REQUESTS, - ): - break - - assert baseline_output is not None - - checkpoints = await storage.list_checkpoints() - assert len(checkpoints) > 0, "Runtime-only checkpointing should have created checkpoints" - - -async def test_group_chat_checkpoint_runtime_overrides_buildtime() -> None: - """Test that runtime checkpoint storage overrides build-time configuration.""" - import tempfile - - with tempfile.TemporaryDirectory() as temp_dir1, tempfile.TemporaryDirectory() as temp_dir2: - from agent_framework._workflows._checkpoint import FileCheckpointStorage - - buildtime_storage = FileCheckpointStorage(temp_dir1) - runtime_storage = FileCheckpointStorage(temp_dir2) - - agent_a = StubAgent("agentA", "Reply from A") - agent_b = StubAgent("agentB", "Reply from B") - selector = make_sequence_selector() - - wf = ( - GroupChatBuilder() - .participants([agent_a, agent_b]) - .with_orchestrator(selection_func=selector) - .with_max_rounds(2) - .with_checkpointing(buildtime_storage) - .build() - ) - baseline_output: list[ChatMessage] | None = None - async for ev in wf.run_stream("override test", checkpoint_storage=runtime_storage): - if isinstance(ev, WorkflowOutputEvent): - baseline_output = cast(list[ChatMessage], ev.data) if isinstance(ev.data, list) else None # type: ignore - if isinstance(ev, WorkflowStatusEvent) and ev.state in ( - WorkflowRunState.IDLE, - WorkflowRunState.IDLE_WITH_PENDING_REQUESTS, - ): - break - - assert baseline_output is not None - - buildtime_checkpoints = await buildtime_storage.list_checkpoints() - runtime_checkpoints = await runtime_storage.list_checkpoints() - - assert len(runtime_checkpoints) > 0, "Runtime storage should have checkpoints" - assert len(buildtime_checkpoints) == 0, "Build-time storage should have no checkpoints when overridden" - - -async def test_group_chat_with_request_info_filtering(): - """Test that with_request_info(agents=[...]) only pauses before specified agents run.""" - # Create agents - we want to verify only beta triggers pause - alpha = StubAgent("alpha", "response from alpha") - beta = StubAgent("beta", "response from beta") - - # Manager that selects alpha first, then beta, then finishes - call_count = 0 - - async def selector(state: GroupChatState) -> str: - nonlocal call_count - call_count += 1 - if call_count == 1: - return "alpha" - if call_count == 2: - return "beta" - # Return to alpha to continue - return "alpha" - - workflow = ( - GroupChatBuilder() - .with_orchestrator(selection_func=selector, orchestrator_name="manager") - .participants([alpha, beta]) - .with_max_rounds(2) - .with_request_info(agents=["beta"]) # Only pause before beta runs - .build() - ) - - # Run until we get a request info event (should be before beta, not alpha) - request_events: list[RequestInfoEvent] = [] - async for event in workflow.run_stream("test task"): - if isinstance(event, RequestInfoEvent) and isinstance(event.data, AgentExecutorResponse): - request_events.append(event) - # Don't break - let stream complete naturally when paused - - # Should have exactly one request event before beta - assert len(request_events) == 1 - request_event = request_events[0] - - # The target agent should be beta's executor ID - assert isinstance(request_event.data, AgentExecutorResponse) - assert request_event.source_executor_id == "beta" - - # Continue the workflow with a response - outputs: list[WorkflowOutputEvent] = [] - async for event in workflow.send_responses_streaming({ - request_event.request_id: AgentRequestInfoResponse.approve() - }): - if isinstance(event, WorkflowOutputEvent): - outputs.append(event) - - # Workflow should complete - assert len(outputs) == 1 - - -async def test_group_chat_with_request_info_no_filter_pauses_all(): - """Test that with_request_info() without agents pauses before all participants.""" - # Create agents - alpha = StubAgent("alpha", "response from alpha") - - # Manager selects alpha then finishes - call_count = 0 - - async def selector(state: GroupChatState) -> str: - nonlocal call_count - call_count += 1 - if call_count == 1: - return "alpha" - # Keep returning alpha to continue - return "alpha" - - workflow = ( - GroupChatBuilder() - .with_orchestrator(selection_func=selector, orchestrator_name="manager") - .participants([alpha]) - .with_max_rounds(1) - .with_request_info() # No filter - pause for all - .build() - ) - - # Run until we get a request info event - request_events: list[RequestInfoEvent] = [] - async for event in workflow.run_stream("test task"): - if isinstance(event, RequestInfoEvent) and isinstance(event.data, AgentExecutorResponse): - request_events.append(event) - break - - # Should pause before alpha - assert len(request_events) == 1 - assert request_events[0].source_executor_id == "alpha" - - -def test_group_chat_builder_with_request_info_returns_self(): - """Test that with_request_info() returns self for method chaining.""" - builder = GroupChatBuilder() - result = builder.with_request_info() - assert result is builder - - # Also test with agents parameter - builder2 = GroupChatBuilder() - result2 = builder2.with_request_info(agents=["test"]) - assert result2 is builder2 - - -# region Participant Factory Tests - - -def test_group_chat_builder_rejects_empty_participant_factories(): - """Test that GroupChatBuilder rejects empty participant_factories list.""" - - def selector(state: GroupChatState) -> str: - return list(state.participants.keys())[0] - - with pytest.raises(ValueError, match=r"participant_factories cannot be empty"): - GroupChatBuilder().register_participants([]) - - with pytest.raises( - ValueError, - match=r"No participants provided\. Call \.participants\(\) or \.register_participants\(\) first\.", - ): - GroupChatBuilder().with_orchestrator(selection_func=selector).build() - - -def test_group_chat_builder_rejects_mixing_participants_and_factories(): - """Test that mixing .participants() and .register_participants() raises an error.""" - alpha = StubAgent("alpha", "reply from alpha") - - # Case 1: participants first, then register_participants - with pytest.raises(ValueError, match="Cannot mix .participants"): - GroupChatBuilder().participants([alpha]).register_participants([lambda: StubAgent("beta", "reply from beta")]) - - # Case 2: register_participants first, then participants - with pytest.raises(ValueError, match="Cannot mix .participants"): - GroupChatBuilder().register_participants([lambda: alpha]).participants([StubAgent("beta", "reply from beta")]) - - -def test_group_chat_builder_rejects_multiple_calls_to_register_participants(): - """Test that multiple calls to .register_participants() raises an error.""" - with pytest.raises( - ValueError, match=r"register_participants\(\) has already been called on this builder instance." - ): - ( - GroupChatBuilder() - .register_participants([lambda: StubAgent("alpha", "reply from alpha")]) - .register_participants([lambda: StubAgent("beta", "reply from beta")]) - ) - - -def test_group_chat_builder_rejects_multiple_calls_to_participants(): - """Test that multiple calls to .participants() raises an error.""" - with pytest.raises(ValueError, match="participants have already been set"): - ( - GroupChatBuilder() - .participants([StubAgent("alpha", "reply from alpha")]) - .participants([StubAgent("beta", "reply from beta")]) - ) - - -async def test_group_chat_with_participant_factories(): - """Test workflow creation using participant_factories.""" - call_count = 0 - - def create_alpha() -> StubAgent: - nonlocal call_count - call_count += 1 - return StubAgent("alpha", "reply from alpha") - - def create_beta() -> StubAgent: - nonlocal call_count - call_count += 1 - return StubAgent("beta", "reply from beta") - - selector = make_sequence_selector() - - workflow = ( - GroupChatBuilder() - .register_participants([create_alpha, create_beta]) - .with_orchestrator(selection_func=selector) - .with_max_rounds(2) - .build() - ) - - # Factories should be called during build - assert call_count == 2 - - outputs: list[WorkflowOutputEvent] = [] - async for event in workflow.run_stream("coordinate task"): - if isinstance(event, WorkflowOutputEvent): - outputs.append(event) - - assert len(outputs) == 1 - - -async def test_group_chat_participant_factories_reusable_builder(): - """Test that the builder can be reused to build multiple workflows with factories.""" - call_count = 0 - - def create_alpha() -> StubAgent: - nonlocal call_count - call_count += 1 - return StubAgent("alpha", "reply from alpha") - - def create_beta() -> StubAgent: - nonlocal call_count - call_count += 1 - return StubAgent("beta", "reply from beta") - - selector = make_sequence_selector() - - builder = ( - GroupChatBuilder() - .register_participants([create_alpha, create_beta]) - .with_orchestrator(selection_func=selector) - .with_max_rounds(2) - ) - - # Build first workflow - wf1 = builder.build() - assert call_count == 2 - - # Build second workflow - wf2 = builder.build() - assert call_count == 4 - - # Verify that the two workflows have different agent instances - assert wf1.executors["alpha"] is not wf2.executors["alpha"] - assert wf1.executors["beta"] is not wf2.executors["beta"] - - -async def test_group_chat_participant_factories_with_checkpointing(): - """Test checkpointing with participant_factories.""" - storage = InMemoryCheckpointStorage() - - def create_alpha() -> StubAgent: - return StubAgent("alpha", "reply from alpha") - - def create_beta() -> StubAgent: - return StubAgent("beta", "reply from beta") - - selector = make_sequence_selector() - - workflow = ( - GroupChatBuilder() - .register_participants([create_alpha, create_beta]) - .with_orchestrator(selection_func=selector) - .with_checkpointing(storage) - .with_max_rounds(2) - .build() - ) - - outputs: list[WorkflowOutputEvent] = [] - async for event in workflow.run_stream("checkpoint test"): - if isinstance(event, WorkflowOutputEvent): - outputs.append(event) - - assert outputs, "Should have workflow output" - - checkpoints = await storage.list_checkpoints() - assert checkpoints, "Checkpoints should be created during workflow execution" - - -# endregion - -# region Orchestrator Factory Tests - - -def test_group_chat_builder_rejects_multiple_orchestrator_configurations(): - """Test that configuring multiple orchestrators raises ValueError.""" - - def selector(state: GroupChatState) -> str: - return list(state.participants.keys())[0] - - def agent_factory() -> ChatAgent: - return cast(ChatAgent, StubManagerAgent()) - - builder = GroupChatBuilder().with_orchestrator(selection_func=selector) - - # Already has a selection_func, should fail on second call - with pytest.raises(ValueError, match=r"A selection function has already been configured"): - builder.with_orchestrator(selection_func=selector) - - # Test with agent_factory - builder2 = GroupChatBuilder().with_orchestrator(agent=agent_factory) - with pytest.raises(ValueError, match=r"A factory has already been configured"): - builder2.with_orchestrator(agent=agent_factory) - - -def test_group_chat_builder_requires_exactly_one_orchestrator_option(): - """Test that exactly one orchestrator option must be provided.""" - - def selector(state: GroupChatState) -> str: - return list(state.participants.keys())[0] - - def agent_factory() -> ChatAgent: - return cast(ChatAgent, StubManagerAgent()) - - # No options provided - with pytest.raises(ValueError, match="Exactly one of"): - GroupChatBuilder().with_orchestrator() # type: ignore - - # Multiple options provided - with pytest.raises(ValueError, match="Exactly one of"): - GroupChatBuilder().with_orchestrator(selection_func=selector, agent=agent_factory) # type: ignore - - -async def test_group_chat_with_orchestrator_factory_returning_chat_agent(): - """Test workflow creation using orchestrator_factory that returns ChatAgent.""" - factory_call_count = 0 - - class DynamicManagerAgent(ChatAgent): - """Manager agent that dynamically selects from available participants.""" - - def __init__(self) -> None: - super().__init__(chat_client=MockChatClient(), name="dynamic_manager", description="Dynamic manager") - self._call_count = 0 - - async def run( - self, - messages: str | ChatMessage | Sequence[str | ChatMessage] | None = None, - *, - thread: AgentThread | None = None, - **kwargs: Any, - ) -> AgentResponse: - if self._call_count == 0: - self._call_count += 1 - payload = { - "terminate": False, - "reason": "Selecting alpha", - "next_speaker": "alpha", - "final_message": None, - } - return AgentResponse( - messages=[ - ChatMessage( - role="assistant", - text=( - '{"terminate": false, "reason": "Selecting alpha", ' - '"next_speaker": "alpha", "final_message": null}' - ), - author_name=self.name, - ) - ], - value=payload, - ) - - payload = { - "terminate": True, - "reason": "Task complete", - "next_speaker": None, - "final_message": "dynamic manager final", - } - return AgentResponse( - messages=[ - ChatMessage( - role="assistant", - text=( - '{"terminate": true, "reason": "Task complete", ' - '"next_speaker": null, "final_message": "dynamic manager final"}' - ), - author_name=self.name, - ) - ], - value=payload, - ) - - def agent_factory() -> ChatAgent: - nonlocal factory_call_count - factory_call_count += 1 - return cast(ChatAgent, DynamicManagerAgent()) - - alpha = StubAgent("alpha", "reply from alpha") - beta = StubAgent("beta", "reply from beta") - - workflow = GroupChatBuilder().participants([alpha, beta]).with_orchestrator(agent=agent_factory).build() - - # Factory should be called during build - assert factory_call_count == 1 - - outputs: list[WorkflowOutputEvent] = [] - async for event in workflow.run_stream("coordinate task"): - if isinstance(event, WorkflowOutputEvent): - outputs.append(event) - - assert len(outputs) == 1 - # The DynamicManagerAgent terminates after second call with final_message - final_messages = outputs[0].data - assert isinstance(final_messages, list) - assert any( - msg.text == "dynamic manager final" - for msg in cast(list[ChatMessage], final_messages) - if msg.author_name == "dynamic_manager" - ) - - -def test_group_chat_with_orchestrator_factory_returning_base_orchestrator(): - """Test that orchestrator_factory returning BaseGroupChatOrchestrator is used as-is.""" - factory_call_count = 0 - selector = make_sequence_selector() - - def orchestrator_factory() -> BaseGroupChatOrchestrator: - nonlocal factory_call_count - factory_call_count += 1 - from agent_framework._workflows._base_group_chat_orchestrator import ParticipantRegistry - from agent_framework._workflows._group_chat import GroupChatOrchestrator - - # Create a custom orchestrator; when returning BaseGroupChatOrchestrator, - # the builder uses it as-is without modifying its participant registry - return GroupChatOrchestrator( - id="custom_orchestrator", - participant_registry=ParticipantRegistry([]), - selection_func=selector, - max_rounds=2, - ) - - alpha = StubAgent("alpha", "reply from alpha") - - workflow = GroupChatBuilder().participants([alpha]).with_orchestrator(orchestrator=orchestrator_factory).build() - - # Factory should be called during build - assert factory_call_count == 1 - # Verify the custom orchestrator is in the workflow - assert "custom_orchestrator" in workflow.executors - - -async def test_group_chat_orchestrator_factory_reusable_builder(): - """Test that the builder can be reused to build multiple workflows with orchestrator factory.""" - factory_call_count = 0 - - def agent_factory() -> ChatAgent: - nonlocal factory_call_count - factory_call_count += 1 - return cast(ChatAgent, StubManagerAgent()) - - alpha = StubAgent("alpha", "reply from alpha") - beta = StubAgent("beta", "reply from beta") - - builder = GroupChatBuilder().participants([alpha, beta]).with_orchestrator(agent=agent_factory) - - # Build first workflow - wf1 = builder.build() - assert factory_call_count == 1 - - # Build second workflow - wf2 = builder.build() - assert factory_call_count == 2 - - # Verify that the two workflows have different orchestrator instances - assert wf1.executors["manager_agent"] is not wf2.executors["manager_agent"] - - -def test_group_chat_orchestrator_factory_invalid_return_type(): - """Test that orchestrator_factory raising error for invalid return type.""" - - def invalid_factory() -> Any: - return "invalid type" - - alpha = StubAgent("alpha", "reply from alpha") - - with pytest.raises( - TypeError, - match=r"Orchestrator factory must return ChatAgent or BaseGroupChatOrchestrator instance", - ): - (GroupChatBuilder().participants([alpha]).with_orchestrator(orchestrator=invalid_factory).build()) - - with pytest.raises( - TypeError, - match=r"Orchestrator factory must return ChatAgent or BaseGroupChatOrchestrator instance", - ): - (GroupChatBuilder().participants([alpha]).with_orchestrator(agent=invalid_factory).build()) - - -def test_group_chat_with_both_participant_and_orchestrator_factories(): - """Test workflow creation using both participant_factories and orchestrator_factory.""" - participant_factory_call_count = 0 - agent_factory_call_count = 0 - - def create_alpha() -> StubAgent: - nonlocal participant_factory_call_count - participant_factory_call_count += 1 - return StubAgent("alpha", "reply from alpha") - - def create_beta() -> StubAgent: - nonlocal participant_factory_call_count - participant_factory_call_count += 1 - return StubAgent("beta", "reply from beta") - - def agent_factory() -> ChatAgent: - nonlocal agent_factory_call_count - agent_factory_call_count += 1 - return cast(ChatAgent, StubManagerAgent()) - - workflow = ( - GroupChatBuilder() - .register_participants([create_alpha, create_beta]) - .with_orchestrator(agent=agent_factory) - .build() - ) - - # All factories should be called during build - assert participant_factory_call_count == 2 - assert agent_factory_call_count == 1 - - # Verify all executors are present in the workflow - assert "alpha" in workflow.executors - assert "beta" in workflow.executors - assert "manager_agent" in workflow.executors - - -async def test_group_chat_factories_reusable_for_multiple_workflows(): - """Test that both factories are reused correctly for multiple workflow builds.""" - participant_factory_call_count = 0 - agent_factory_call_count = 0 - - def create_alpha() -> StubAgent: - nonlocal participant_factory_call_count - participant_factory_call_count += 1 - return StubAgent("alpha", "reply from alpha") - - def create_beta() -> StubAgent: - nonlocal participant_factory_call_count - participant_factory_call_count += 1 - return StubAgent("beta", "reply from beta") - - def agent_factory() -> ChatAgent: - nonlocal agent_factory_call_count - agent_factory_call_count += 1 - return cast(ChatAgent, StubManagerAgent()) - - builder = ( - GroupChatBuilder().register_participants([create_alpha, create_beta]).with_orchestrator(agent=agent_factory) - ) - - # Build first workflow - wf1 = builder.build() - assert participant_factory_call_count == 2 - assert agent_factory_call_count == 1 - - # Build second workflow - wf2 = builder.build() - assert participant_factory_call_count == 4 - assert agent_factory_call_count == 2 - - # Verify that the workflows have different agent and orchestrator instances - assert wf1.executors["alpha"] is not wf2.executors["alpha"] - assert wf1.executors["beta"] is not wf2.executors["beta"] - assert wf1.executors["manager_agent"] is not wf2.executors["manager_agent"] - - -# endregion diff --git a/python/packages/core/tests/workflow/test_handoff.py b/python/packages/core/tests/workflow/test_handoff.py deleted file mode 100644 index 962ab88f16..0000000000 --- a/python/packages/core/tests/workflow/test_handoff.py +++ /dev/null @@ -1,709 +0,0 @@ -# Copyright (c) Microsoft. All rights reserved. - -from collections.abc import AsyncIterable -from typing import Any, cast -from unittest.mock import AsyncMock, MagicMock - -import pytest - -from agent_framework import ( - ChatAgent, - ChatMessage, - ChatResponse, - ChatResponseUpdate, - Content, - HandoffAgentUserRequest, - HandoffBuilder, - RequestInfoEvent, - WorkflowEvent, - WorkflowOutputEvent, - resolve_agent_id, - use_function_invocation, -) - - -@use_function_invocation -class MockChatClient: - """Mock chat client for testing handoff workflows.""" - - additional_properties: dict[str, Any] - - def __init__( - self, - name: str, - *, - handoff_to: str | None = None, - ) -> None: - """Initialize the mock chat client. - - Args: - name: The name of the agent using this chat client. - handoff_to: The name of the agent to hand off to, or None for no handoff. - This is hardcoded for testing purposes so that the agent always attempts to hand off. - """ - self._name = name - self._handoff_to = handoff_to - self._call_index = 0 - - async def get_response(self, messages: Any, **kwargs: Any) -> ChatResponse: - contents = _build_reply_contents(self._name, self._handoff_to, self._next_call_id()) - reply = ChatMessage( - role="assistant", - contents=contents, - ) - return ChatResponse(messages=reply, response_id="mock_response") - - def get_streaming_response(self, messages: Any, **kwargs: Any) -> AsyncIterable[ChatResponseUpdate]: - async def _stream() -> AsyncIterable[ChatResponseUpdate]: - contents = _build_reply_contents(self._name, self._handoff_to, self._next_call_id()) - yield ChatResponseUpdate(contents=contents, role="assistant") - - return _stream() - - def _next_call_id(self) -> str | None: - if not self._handoff_to: - return None - call_id = f"{self._name}-handoff-{self._call_index}" - self._call_index += 1 - return call_id - - -def _build_reply_contents( - agent_name: str, - handoff_to: str | None, - call_id: str | None, -) -> list[Content]: - contents: list[Content] = [] - if handoff_to and call_id: - contents.append( - Content.from_function_call( - call_id=call_id, name=f"handoff_to_{handoff_to}", arguments={"handoff_to": handoff_to} - ) - ) - text = f"{agent_name} reply" - contents.append(Content.from_text(text=text)) - return contents - - -class MockHandoffAgent(ChatAgent): - """Mock agent that can hand off to another agent.""" - - def __init__( - self, - *, - name: str, - handoff_to: str | None = None, - ) -> None: - """Initialize the mock handoff agent. - - Args: - name: The name of the agent. - handoff_to: The name of the agent to hand off to, or None for no handoff. - This is hardcoded for testing purposes so that the agent always attempts to hand off. - """ - super().__init__(chat_client=MockChatClient(name, handoff_to=handoff_to), name=name, id=name) - - -async def _drain(stream: AsyncIterable[WorkflowEvent]) -> list[WorkflowEvent]: - return [event async for event in stream] - - -async def test_handoff(): - """Test that agents can hand off to each other.""" - - # `triage` hands off to `specialist`, who then hands off to `escalation`. - # `escalation` has no handoff, so the workflow should request user input to continue. - triage = MockHandoffAgent(name="triage", handoff_to="specialist") - specialist = MockHandoffAgent(name="specialist", handoff_to="escalation") - escalation = MockHandoffAgent(name="escalation") - - # Without explicitly defining handoffs, the builder will create connections - # between all agents. - workflow = ( - HandoffBuilder(participants=[triage, specialist, escalation]) - .with_start_agent(triage) - .with_termination_condition(lambda conv: sum(1 for m in conv if m.role == "user") >= 2) - .build() - ) - - # Start conversation - triage hands off to specialist then escalation - # escalation won't trigger a handoff, so the response from it will become - # a request for user input because autonomous mode is not enabled by default. - events = await _drain(workflow.run_stream("Need technical support")) - requests = [ev for ev in events if isinstance(ev, RequestInfoEvent)] - - assert requests - assert len(requests) == 1 - - request = requests[0] - assert isinstance(request.data, HandoffAgentUserRequest) - assert request.source_executor_id == escalation.name - - -async def test_autonomous_mode_yields_output_without_user_request(): - """Ensure autonomous interaction mode yields output without requesting user input.""" - triage = MockHandoffAgent(name="triage", handoff_to="specialist") - specialist = MockHandoffAgent(name="specialist") - - workflow = ( - HandoffBuilder(participants=[triage, specialist]) - .with_start_agent(triage) - # Since specialist has no handoff, the specialist will be generating normal responses. - # With autonomous mode, this should continue until the termination condition is met. - .with_autonomous_mode( - agents=[specialist], - turn_limits={resolve_agent_id(specialist): 1}, - ) - # This termination condition ensures the workflow runs through both agents. - # First message is the user message to triage, second is triage's response, which - # is a handoff to specialist, third is specialist's response that should not request - # user input due to autonomous mode. Fourth message will come from the specialist - # again and will trigger termination. - .with_termination_condition(lambda conv: len(conv) >= 4) - .build() - ) - - events = await _drain(workflow.run_stream("Package arrived broken")) - requests = [ev for ev in events if isinstance(ev, RequestInfoEvent)] - assert not requests, "Autonomous mode should not request additional user input" - - outputs = [ev for ev in events if isinstance(ev, WorkflowOutputEvent)] - assert outputs, "Autonomous mode should yield a workflow output" - - final_conversation = outputs[-1].data - assert isinstance(final_conversation, list) - conversation_list = cast(list[ChatMessage], final_conversation) - assert any(msg.role == "assistant" and (msg.text or "").startswith("specialist reply") for msg in conversation_list) - - -async def test_autonomous_mode_resumes_user_input_on_turn_limit(): - """Autonomous mode should resume user input request when turn limit is reached.""" - triage = MockHandoffAgent(name="triage", handoff_to="worker") - worker = MockHandoffAgent(name="worker") - - workflow = ( - HandoffBuilder(participants=[triage, worker]) - .with_start_agent(triage) - .with_autonomous_mode(agents=[worker], turn_limits={resolve_agent_id(worker): 2}) - .with_termination_condition(lambda conv: False) - .build() - ) - - events = await _drain(workflow.run_stream("Start")) - requests = [ev for ev in events if isinstance(ev, RequestInfoEvent)] - assert requests and len(requests) == 1, "Turn limit should force a user input request" - assert requests[0].source_executor_id == worker.name - - -def test_build_fails_without_start_agent(): - """Verify that build() raises ValueError when with_start_agent() was not called.""" - triage = MockHandoffAgent(name="triage") - specialist = MockHandoffAgent(name="specialist") - - with pytest.raises(ValueError, match=r"Must call with_start_agent\(...\) before building the workflow."): - HandoffBuilder(participants=[triage, specialist]).build() - - -def test_build_fails_without_participants(): - """Verify that build() raises ValueError when no participants are provided.""" - with pytest.raises( - ValueError, match=r"No participants provided\. Call \.participants\(\) or \.register_participants\(\) first." - ): - HandoffBuilder().build() - - -async def test_handoff_async_termination_condition() -> None: - """Test that async termination conditions work correctly.""" - termination_call_count = 0 - - async def async_termination(conv: list[ChatMessage]) -> bool: - nonlocal termination_call_count - termination_call_count += 1 - user_count = sum(1 for msg in conv if msg.role == "user") - return user_count >= 2 - - coordinator = MockHandoffAgent(name="coordinator", handoff_to="worker") - worker = MockHandoffAgent(name="worker") - - workflow = ( - HandoffBuilder(participants=[coordinator, worker]) - .with_start_agent(coordinator) - .with_termination_condition(async_termination) - .build() - ) - - events = await _drain(workflow.run_stream("First user message")) - requests = [ev for ev in events if isinstance(ev, RequestInfoEvent)] - assert requests - - events = await _drain( - workflow.send_responses_streaming({requests[-1].request_id: [ChatMessage("user", ["Second user message"])]}) - ) - outputs = [ev for ev in events if isinstance(ev, WorkflowOutputEvent)] - assert len(outputs) == 1 - - final_conversation = outputs[0].data - assert isinstance(final_conversation, list) - final_conv_list = cast(list[ChatMessage], final_conversation) - user_messages = [msg for msg in final_conv_list if msg.role == "user"] - assert len(user_messages) == 2 - assert termination_call_count > 0 - - -async def test_tool_choice_preserved_from_agent_config(): - """Verify that agent-level tool_choice configuration is preserved and not overridden.""" - # Create a mock chat client that records the tool_choice used - recorded_tool_choices: list[Any] = [] - - async def mock_get_response(messages: Any, options: dict[str, Any] | None = None, **kwargs: Any) -> ChatResponse: - if options: - recorded_tool_choices.append(options.get("tool_choice")) - return ChatResponse( - messages=[ChatMessage("assistant", ["Response"])], - response_id="test_response", - ) - - mock_client = MagicMock() - mock_client.get_response = AsyncMock(side_effect=mock_get_response) - - # Create agent with specific tool_choice configuration via default_options - agent = ChatAgent( - chat_client=mock_client, - name="test_agent", - default_options={"tool_choice": {"mode": "required"}}, # type: ignore - ) - - # Run the agent - await agent.run("Test message") - - # Verify tool_choice was preserved - assert len(recorded_tool_choices) > 0, "No tool_choice recorded" - last_tool_choice = recorded_tool_choices[-1] - assert last_tool_choice is not None, "tool_choice should not be None" - assert last_tool_choice == {"mode": "required"}, f"Expected 'required', got {last_tool_choice}" - - -# region Participant Factory Tests - - -def test_handoff_builder_rejects_empty_participant_factories(): - """Test that HandoffBuilder rejects empty participant_factories dictionary.""" - # Empty factories are rejected immediately when calling participant_factories() - with pytest.raises(ValueError, match=r"participant_factories cannot be empty"): - HandoffBuilder().register_participants({}) - - with pytest.raises( - ValueError, match=r"No participants provided\. Call \.participants\(\) or \.register_participants\(\) first\." - ): - HandoffBuilder(participant_factories={}).build() - - -def test_handoff_builder_rejects_mixing_participants_and_factories(): - """Test that mixing participants and participant_factories in __init__ raises an error.""" - triage = MockHandoffAgent(name="triage") - with pytest.raises(ValueError, match="Cannot mix .participants"): - HandoffBuilder(participants=[triage], participant_factories={"triage": lambda: triage}) - - -def test_handoff_builder_rejects_mixing_participants_and_participant_factories_methods(): - """Test that mixing .participants() and .participant_factories() raises an error.""" - triage = MockHandoffAgent(name="triage") - - # Case 1: participants first, then participant_factories - with pytest.raises(ValueError, match="Cannot mix .participants"): - HandoffBuilder(participants=[triage]).register_participants({ - "specialist": lambda: MockHandoffAgent(name="specialist") - }) - - # Case 2: participant_factories first, then participants - with pytest.raises(ValueError, match="Cannot mix .participants"): - HandoffBuilder(participant_factories={"triage": lambda: triage}).participants([ - MockHandoffAgent(name="specialist") - ]) - - # Case 3: participants(), then participant_factories() - with pytest.raises(ValueError, match="Cannot mix .participants"): - HandoffBuilder().participants([triage]).register_participants({ - "specialist": lambda: MockHandoffAgent(name="specialist") - }) - - # Case 4: participant_factories(), then participants() - with pytest.raises(ValueError, match="Cannot mix .participants"): - HandoffBuilder().register_participants({"triage": lambda: triage}).participants([ - MockHandoffAgent(name="specialist") - ]) - - # Case 5: mix during initialization - with pytest.raises(ValueError, match="Cannot mix .participants"): - HandoffBuilder( - participants=[triage], participant_factories={"specialist": lambda: MockHandoffAgent(name="specialist")} - ) - - -def test_handoff_builder_rejects_multiple_calls_to_participant_factories(): - """Test that multiple calls to .participant_factories() raises an error.""" - with pytest.raises( - ValueError, match=r"register_participants\(\) has already been called on this builder instance." - ): - ( - HandoffBuilder() - .register_participants({"agent1": lambda: MockHandoffAgent(name="agent1")}) - .register_participants({"agent2": lambda: MockHandoffAgent(name="agent2")}) - ) - - -def test_handoff_builder_rejects_multiple_calls_to_participants(): - """Test that multiple calls to .participants() raises an error.""" - with pytest.raises(ValueError, match="participants have already been assigned"): - ( - HandoffBuilder() - .participants([MockHandoffAgent(name="agent1")]) - .participants([MockHandoffAgent(name="agent2")]) - ) - - -def test_handoff_builder_rejects_instance_coordinator_with_factories(): - """Test that using an agent instance for set_coordinator when using factories raises an error.""" - - def create_triage() -> MockHandoffAgent: - return MockHandoffAgent(name="triage") - - def create_specialist() -> MockHandoffAgent: - return MockHandoffAgent(name="specialist") - - # Create an agent instance - coordinator_instance = MockHandoffAgent(name="coordinator") - - with pytest.raises(ValueError, match=r"Call participants\(\.\.\.\) before with_start_agent\(\.\.\.\)"): - ( - HandoffBuilder( - participant_factories={"triage": create_triage, "specialist": create_specialist} - ).with_start_agent(coordinator_instance) # Instance, not factory name - ) - - -def test_handoff_builder_rejects_factory_name_coordinator_with_instances(): - """Test that using a factory name for set_coordinator when using instances raises an error.""" - triage = MockHandoffAgent(name="triage") - specialist = MockHandoffAgent(name="specialist") - - with pytest.raises(ValueError, match=r"Call register_participants\(...\) before with_start_agent\(...\)"): - ( - HandoffBuilder(participants=[triage, specialist]).with_start_agent( - "triage" - ) # String factory name, not instance - ) - - -def test_handoff_builder_rejects_mixed_types_in_add_handoff_source(): - """Test that add_handoff rejects factory name source with instance-based participants.""" - triage = MockHandoffAgent(name="triage") - specialist = MockHandoffAgent(name="specialist") - - with pytest.raises(TypeError, match="Cannot mix factory names \\(str\\) and AgentProtocol.*instances"): - ( - HandoffBuilder(participants=[triage, specialist]) - .with_start_agent(triage) - .add_handoff("triage", [specialist]) # String source with instance participants - ) - - -def test_handoff_builder_accepts_all_factory_names_in_add_handoff(): - """Test that add_handoff accepts all factory names when using participant_factories.""" - - def create_triage() -> MockHandoffAgent: - return MockHandoffAgent(name="triage") - - def create_specialist_a() -> MockHandoffAgent: - return MockHandoffAgent(name="specialist_a") - - def create_specialist_b() -> MockHandoffAgent: - return MockHandoffAgent(name="specialist_b") - - # This should work - all strings with participant_factories - builder = ( - HandoffBuilder( - participant_factories={ - "triage": create_triage, - "specialist_a": create_specialist_a, - "specialist_b": create_specialist_b, - } - ) - .with_start_agent("triage") - .add_handoff("triage", ["specialist_a", "specialist_b"]) - ) - - workflow = builder.build() - assert "triage" in workflow.executors - assert "specialist_a" in workflow.executors - assert "specialist_b" in workflow.executors - - -def test_handoff_builder_accepts_all_instances_in_add_handoff(): - """Test that add_handoff accepts all instances when using participants.""" - triage = MockHandoffAgent(name="triage", handoff_to="specialist_a") - specialist_a = MockHandoffAgent(name="specialist_a") - specialist_b = MockHandoffAgent(name="specialist_b") - - # This should work - all instances with participants - builder = ( - HandoffBuilder(participants=[triage, specialist_a, specialist_b]) - .with_start_agent(triage) - .add_handoff(triage, [specialist_a, specialist_b]) - ) - - workflow = builder.build() - assert "triage" in workflow.executors - assert "specialist_a" in workflow.executors - assert "specialist_b" in workflow.executors - - -async def test_handoff_with_participant_factories(): - """Test workflow creation using participant_factories.""" - call_count = 0 - - def create_triage() -> MockHandoffAgent: - nonlocal call_count - call_count += 1 - return MockHandoffAgent(name="triage", handoff_to="specialist") - - def create_specialist() -> MockHandoffAgent: - nonlocal call_count - call_count += 1 - return MockHandoffAgent(name="specialist") - - workflow = ( - HandoffBuilder(participant_factories={"triage": create_triage, "specialist": create_specialist}) - .with_start_agent("triage") - .with_termination_condition(lambda conv: sum(1 for m in conv if m.role == "user") >= 2) - .build() - ) - - # Factories should be called during build - assert call_count == 2 - - events = await _drain(workflow.run_stream("Need help")) - requests = [ev for ev in events if isinstance(ev, RequestInfoEvent)] - assert requests - - # Follow-up message - events = await _drain( - workflow.send_responses_streaming({requests[-1].request_id: [ChatMessage("user", ["More details"])]}) - ) - outputs = [ev for ev in events if isinstance(ev, WorkflowOutputEvent)] - assert outputs - - -async def test_handoff_participant_factories_reusable_builder(): - """Test that the builder can be reused to build multiple workflows with factories.""" - call_count = 0 - - def create_triage() -> MockHandoffAgent: - nonlocal call_count - call_count += 1 - return MockHandoffAgent(name="triage", handoff_to="specialist") - - def create_specialist() -> MockHandoffAgent: - nonlocal call_count - call_count += 1 - return MockHandoffAgent(name="specialist") - - builder = HandoffBuilder( - participant_factories={"triage": create_triage, "specialist": create_specialist} - ).with_start_agent("triage") - - # Build first workflow - wf1 = builder.build() - assert call_count == 2 - - # Build second workflow - wf2 = builder.build() - assert call_count == 4 - - # Verify that the two workflows have different agent instances - assert wf1.executors["triage"] is not wf2.executors["triage"] - assert wf1.executors["specialist"] is not wf2.executors["specialist"] - - -async def test_handoff_with_participant_factories_and_add_handoff(): - """Test that .add_handoff() works correctly with participant_factories.""" - - def create_triage() -> MockHandoffAgent: - return MockHandoffAgent(name="triage", handoff_to="specialist_a") - - def create_specialist_a() -> MockHandoffAgent: - return MockHandoffAgent(name="specialist_a", handoff_to="specialist_b") - - def create_specialist_b() -> MockHandoffAgent: - return MockHandoffAgent(name="specialist_b") - - workflow = ( - HandoffBuilder( - participant_factories={ - "triage": create_triage, - "specialist_a": create_specialist_a, - "specialist_b": create_specialist_b, - } - ) - .with_start_agent("triage") - .add_handoff("triage", ["specialist_a", "specialist_b"]) - .add_handoff("specialist_a", ["specialist_b"]) - .with_termination_condition(lambda conv: sum(1 for m in conv if m.role == "user") >= 3) - .build() - ) - - # Start conversation - triage hands off to specialist_a - events = await _drain(workflow.run_stream("Initial request")) - requests = [ev for ev in events if isinstance(ev, RequestInfoEvent)] - assert requests - - # Verify specialist_a executor exists and was called - assert "specialist_a" in workflow.executors - - # Second user message - specialist_a hands off to specialist_b - events = await _drain( - workflow.send_responses_streaming({requests[-1].request_id: [ChatMessage("user", ["Need escalation"])]}) - ) - requests = [ev for ev in events if isinstance(ev, RequestInfoEvent)] - assert requests - - # Verify specialist_b executor exists - assert "specialist_b" in workflow.executors - - -async def test_handoff_participant_factories_with_checkpointing(): - """Test checkpointing with participant_factories.""" - from agent_framework._workflows._checkpoint import InMemoryCheckpointStorage - - storage = InMemoryCheckpointStorage() - - def create_triage() -> MockHandoffAgent: - return MockHandoffAgent(name="triage", handoff_to="specialist") - - def create_specialist() -> MockHandoffAgent: - return MockHandoffAgent(name="specialist") - - workflow = ( - HandoffBuilder(participant_factories={"triage": create_triage, "specialist": create_specialist}) - .with_start_agent("triage") - .with_checkpointing(storage) - .with_termination_condition(lambda conv: sum(1 for m in conv if m.role == "user") >= 2) - .build() - ) - - # Run workflow and capture output - events = await _drain(workflow.run_stream("checkpoint test")) - requests = [ev for ev in events if isinstance(ev, RequestInfoEvent)] - assert requests - - events = await _drain( - workflow.send_responses_streaming({requests[-1].request_id: [ChatMessage("user", ["follow up"])]}) - ) - outputs = [ev for ev in events if isinstance(ev, WorkflowOutputEvent)] - assert outputs, "Should have workflow output after termination condition is met" - - # List checkpoints - just verify they were created - checkpoints = await storage.list_checkpoints() - assert checkpoints, "Checkpoints should be created during workflow execution" - - -def test_handoff_set_coordinator_with_factory_name(): - """Test that set_coordinator accepts factory name as string.""" - - def create_triage() -> MockHandoffAgent: - return MockHandoffAgent(name="triage") - - def create_specialist() -> MockHandoffAgent: - return MockHandoffAgent(name="specialist") - - builder = HandoffBuilder( - participant_factories={"triage": create_triage, "specialist": create_specialist} - ).with_start_agent("triage") - - workflow = builder.build() - assert "triage" in workflow.executors - - -def test_handoff_add_handoff_with_factory_names(): - """Test that add_handoff accepts factory names as strings.""" - - def create_triage() -> MockHandoffAgent: - return MockHandoffAgent(name="triage", handoff_to="specialist_a") - - def create_specialist_a() -> MockHandoffAgent: - return MockHandoffAgent(name="specialist_a") - - def create_specialist_b() -> MockHandoffAgent: - return MockHandoffAgent(name="specialist_b") - - builder = ( - HandoffBuilder( - participant_factories={ - "triage": create_triage, - "specialist_a": create_specialist_a, - "specialist_b": create_specialist_b, - } - ) - .with_start_agent("triage") - .add_handoff("triage", ["specialist_a", "specialist_b"]) - ) - - workflow = builder.build() - assert "triage" in workflow.executors - assert "specialist_a" in workflow.executors - assert "specialist_b" in workflow.executors - - -async def test_handoff_participant_factories_autonomous_mode(): - """Test autonomous mode with participant_factories.""" - - def create_triage() -> MockHandoffAgent: - return MockHandoffAgent(name="triage", handoff_to="specialist") - - def create_specialist() -> MockHandoffAgent: - return MockHandoffAgent(name="specialist") - - workflow = ( - HandoffBuilder(participant_factories={"triage": create_triage, "specialist": create_specialist}) - .with_start_agent("triage") - .with_autonomous_mode(agents=["specialist"], turn_limits={"specialist": 1}) - .build() - ) - - events = await _drain(workflow.run_stream("Issue")) - requests = [ev for ev in events if isinstance(ev, RequestInfoEvent)] - assert requests and len(requests) == 1 - assert requests[0].source_executor_id == "specialist" - - -def test_handoff_participant_factories_invalid_coordinator_name(): - """Test that set_coordinator raises error for non-existent factory name.""" - - def create_triage() -> MockHandoffAgent: - return MockHandoffAgent(name="triage") - - with pytest.raises( - ValueError, match="Start agent factory name 'nonexistent' is not in the participant_factories list" - ): - (HandoffBuilder(participant_factories={"triage": create_triage}).with_start_agent("nonexistent").build()) - - -def test_handoff_participant_factories_invalid_handoff_target(): - """Test that add_handoff raises error for non-existent target factory name.""" - - def create_triage() -> MockHandoffAgent: - return MockHandoffAgent(name="triage") - - def create_specialist() -> MockHandoffAgent: - return MockHandoffAgent(name="specialist") - - with pytest.raises(ValueError, match="Target factory name 'nonexistent' is not in the participant_factories list"): - ( - HandoffBuilder(participant_factories={"triage": create_triage, "specialist": create_specialist}) - .with_start_agent("triage") - .add_handoff("triage", ["nonexistent"]) - .build() - ) - - -# endregion Participant Factory Tests diff --git a/python/packages/core/tests/workflow/test_magentic.py b/python/packages/core/tests/workflow/test_magentic.py deleted file mode 100644 index 8f116aa1ad..0000000000 --- a/python/packages/core/tests/workflow/test_magentic.py +++ /dev/null @@ -1,1299 +0,0 @@ -# Copyright (c) Microsoft. All rights reserved. - -import sys -from collections.abc import AsyncIterable, Sequence -from dataclasses import dataclass -from typing import Any, ClassVar, cast - -import pytest - -from agent_framework import ( - AgentProtocol, - AgentResponse, - AgentResponseUpdate, - AgentRunUpdateEvent, - AgentThread, - BaseAgent, - ChatMessage, - Content, - Executor, - GroupChatRequestMessage, - MagenticBuilder, - MagenticContext, - MagenticManagerBase, - MagenticOrchestrator, - MagenticOrchestratorEvent, - MagenticPlanReviewRequest, - MagenticProgressLedger, - MagenticProgressLedgerItem, - RequestInfoEvent, - StandardMagenticManager, - Workflow, - WorkflowCheckpoint, - WorkflowCheckpointException, - WorkflowContext, - WorkflowEvent, - WorkflowOutputEvent, - WorkflowRunState, - WorkflowStatusEvent, - handler, -) -from agent_framework._workflows._checkpoint import InMemoryCheckpointStorage - -if sys.version_info >= (3, 12): - from typing import override # type: ignore # pragma: no cover -else: - from typing_extensions import override # type: ignore # pragma: no cover - - -def test_magentic_context_reset_behavior(): - ctx = MagenticContext( - task="task", - participant_descriptions={"Alice": "Researcher"}, - ) - # seed context state - ctx.chat_history.append(ChatMessage("assistant", ["draft"])) - ctx.stall_count = 2 - prev_reset = ctx.reset_count - - ctx.reset() - - assert ctx.chat_history == [] - assert ctx.stall_count == 0 - assert ctx.reset_count == prev_reset + 1 - - -@dataclass -class _SimpleLedger: - facts: ChatMessage - plan: ChatMessage - - -class FakeManager(MagenticManagerBase): - """Deterministic manager for tests that avoids real LLM calls.""" - - FINAL_ANSWER: ClassVar[str] = "FINAL" - - def __init__( - self, - *, - max_stall_count: int = 3, - max_reset_count: int | None = None, - max_round_count: int | None = None, - ) -> None: - super().__init__( - max_stall_count=max_stall_count, - max_reset_count=max_reset_count, - max_round_count=max_round_count, - ) - self.name = "magentic_manager" - self.task_ledger: _SimpleLedger | None = None - self.next_speaker_name: str = "agentA" - self.instruction_text: str = "Proceed with step 1" - - @override - def on_checkpoint_save(self) -> dict[str, Any]: - state = super().on_checkpoint_save() - if self.task_ledger is not None: - state = dict(state) - state["task_ledger"] = { - "facts": self.task_ledger.facts.to_dict(), - "plan": self.task_ledger.plan.to_dict(), - } - return state - - @override - def on_checkpoint_restore(self, state: dict[str, Any]) -> None: - super().on_checkpoint_restore(state) - ledger_state = state.get("task_ledger") - if isinstance(ledger_state, dict): - ledger_dict = cast(dict[str, Any], ledger_state) - facts_payload = cast(dict[str, Any] | None, ledger_dict.get("facts")) - plan_payload = cast(dict[str, Any] | None, ledger_dict.get("plan")) - if facts_payload is not None and plan_payload is not None: - try: - facts = ChatMessage.from_dict(facts_payload) - plan = ChatMessage.from_dict(plan_payload) - self.task_ledger = _SimpleLedger(facts=facts, plan=plan) - except Exception: # pragma: no cover - defensive - pass - - async def plan(self, magentic_context: MagenticContext) -> ChatMessage: - facts = ChatMessage("assistant", ["GIVEN OR VERIFIED FACTS\n- A\n"]) - plan = ChatMessage("assistant", ["- Do X\n- Do Y\n"]) - self.task_ledger = _SimpleLedger(facts=facts, plan=plan) - combined = f"Task: {magentic_context.task}\n\nFacts:\n{facts.text}\n\nPlan:\n{plan.text}" - return ChatMessage("assistant", [combined], author_name=self.name) - - async def replan(self, magentic_context: MagenticContext) -> ChatMessage: - facts = ChatMessage("assistant", ["GIVEN OR VERIFIED FACTS\n- A2\n"]) - plan = ChatMessage("assistant", ["- Do Z\n"]) - self.task_ledger = _SimpleLedger(facts=facts, plan=plan) - combined = f"Task: {magentic_context.task}\n\nFacts:\n{facts.text}\n\nPlan:\n{plan.text}" - return ChatMessage("assistant", [combined], author_name=self.name) - - async def create_progress_ledger(self, magentic_context: MagenticContext) -> MagenticProgressLedger: - # At least two messages in chat history means request is satisfied for testing - is_satisfied = len(magentic_context.chat_history) > 1 - return MagenticProgressLedger( - is_request_satisfied=MagenticProgressLedgerItem(reason="test", answer=is_satisfied), - is_in_loop=MagenticProgressLedgerItem(reason="test", answer=False), - is_progress_being_made=MagenticProgressLedgerItem(reason="test", answer=True), - next_speaker=MagenticProgressLedgerItem(reason="test", answer=self.next_speaker_name), - instruction_or_question=MagenticProgressLedgerItem(reason="test", answer=self.instruction_text), - ) - - async def prepare_final_answer(self, magentic_context: MagenticContext) -> ChatMessage: - return ChatMessage("assistant", [self.FINAL_ANSWER], author_name=self.name) - - -class StubAgent(BaseAgent): - def __init__(self, agent_name: str, reply_text: str, **kwargs: Any) -> None: - super().__init__(name=agent_name, description=f"Stub agent {agent_name}", **kwargs) - self._reply_text = reply_text - - async def run( # type: ignore[override] - self, - messages: str | ChatMessage | Sequence[str | ChatMessage] | None = None, - *, - thread: AgentThread | None = None, - **kwargs: Any, - ) -> AgentResponse: - response = ChatMessage("assistant", [self._reply_text], author_name=self.name) - return AgentResponse(messages=[response]) - - def run_stream( # type: ignore[override] - self, - messages: str | ChatMessage | Sequence[str | ChatMessage] | None = None, - *, - thread: AgentThread | None = None, - **kwargs: Any, - ) -> AsyncIterable[AgentResponseUpdate]: - async def _stream() -> AsyncIterable[AgentResponseUpdate]: - yield AgentResponseUpdate( - contents=[Content.from_text(text=self._reply_text)], role="assistant", author_name=self.name - ) - - return _stream() - - -class DummyExec(Executor): - def __init__(self, name: str) -> None: - super().__init__(name) - - @handler - async def _noop( - self, message: GroupChatRequestMessage, ctx: WorkflowContext[ChatMessage] - ) -> None: # pragma: no cover - not called - pass - - -async def test_magentic_builder_returns_workflow_and_runs() -> None: - manager = FakeManager() - agent = StubAgent(manager.next_speaker_name, "first draft") - - workflow = MagenticBuilder().participants([agent]).with_manager(manager=manager).build() - - assert isinstance(workflow, Workflow) - - outputs: list[ChatMessage] = [] - orchestrator_event_count = 0 - async for event in workflow.run_stream("compose summary"): - if isinstance(event, WorkflowOutputEvent): - msg = event.data - if isinstance(msg, list): - outputs.extend(cast(list[ChatMessage], msg)) - elif isinstance(event, MagenticOrchestratorEvent): - orchestrator_event_count += 1 - - assert outputs, "Expected a final output message" - assert len(outputs) >= 1 - final = outputs[-1] - assert final.text == manager.FINAL_ANSWER - assert final.author_name == manager.name - assert orchestrator_event_count > 0, "Expected orchestrator events to be emitted" - - -async def test_magentic_as_agent_does_not_accept_conversation() -> None: - manager = FakeManager() - writer = StubAgent(manager.next_speaker_name, "summary response") - - workflow = MagenticBuilder().participants([writer]).with_manager(manager=manager).build() - - agent = workflow.as_agent(name="magentic-agent") - conversation = [ - ChatMessage("system", ["Guidelines"], author_name="system"), - ChatMessage("user", ["Summarize the findings"], author_name="requester"), - ] - with pytest.raises(ValueError, match="Magentic only support a single task message to start the workflow."): - await agent.run(conversation) - - -async def test_standard_manager_plan_and_replan_combined_ledger(): - manager = FakeManager() - ctx = MagenticContext( - task="demo task", - participant_descriptions={"agentA": "Agent A"}, - ) - - first = await manager.plan(ctx.clone()) - assert first.role == "assistant" and "Facts:" in first.text and "Plan:" in first.text - assert manager.task_ledger is not None - - replanned = await manager.replan(ctx.clone()) - assert "A2" in replanned.text or "Do Z" in replanned.text - - -async def test_magentic_workflow_plan_review_approval_to_completion(): - manager = FakeManager() - wf = MagenticBuilder().participants([DummyExec("agentA")]).with_manager(manager=manager).with_plan_review().build() - - req_event: RequestInfoEvent | None = None - async for ev in wf.run_stream("do work"): - if isinstance(ev, RequestInfoEvent) and ev.request_type is MagenticPlanReviewRequest: - req_event = ev - assert req_event is not None - assert isinstance(req_event.data, MagenticPlanReviewRequest) - - completed = False - output: list[ChatMessage] | None = None - async for ev in wf.send_responses_streaming(responses={req_event.request_id: req_event.data.approve()}): - if isinstance(ev, WorkflowStatusEvent) and ev.state == WorkflowRunState.IDLE: - completed = True - elif isinstance(ev, WorkflowOutputEvent): - output = ev.data # type: ignore[assignment] - if completed and output is not None: - break - - assert completed - assert output is not None - assert isinstance(output, list) - assert all(isinstance(msg, ChatMessage) for msg in output) - - -async def test_magentic_plan_review_with_revise(): - class CountingManager(FakeManager): - # Declare as a model field so assignment is allowed under Pydantic - replan_count: int = 0 - - def __init__(self, *args, **kwargs) -> None: # type: ignore[no-untyped-def] - super().__init__(*args, **kwargs) - - async def replan(self, magentic_context: MagenticContext) -> ChatMessage: # type: ignore[override] - self.replan_count += 1 - return await super().replan(magentic_context) - - manager = CountingManager() - wf = ( - MagenticBuilder() - .participants([DummyExec(name=manager.next_speaker_name)]) - .with_manager(manager=manager) - .with_plan_review() - .build() - ) - - # Wait for the initial plan review request - req_event: RequestInfoEvent | None = None - async for ev in wf.run_stream("do work"): - if isinstance(ev, RequestInfoEvent) and ev.request_type is MagenticPlanReviewRequest: - req_event = ev - assert req_event is not None - assert isinstance(req_event.data, MagenticPlanReviewRequest) - - # Send a revise response - saw_second_review = False - completed = False - async for ev in wf.send_responses_streaming( - responses={req_event.request_id: req_event.data.revise("Looks good; consider Z")} - ): - if isinstance(ev, RequestInfoEvent) and ev.request_type is MagenticPlanReviewRequest: - saw_second_review = True - req_event = ev - - # Approve the second review - async for ev in wf.send_responses_streaming( - responses={req_event.request_id: req_event.data.approve()} # type: ignore[union-attr] - ): - if isinstance(ev, WorkflowStatusEvent) and ev.state == WorkflowRunState.IDLE: - completed = True - break - - assert completed - assert manager.replan_count >= 1 - assert saw_second_review is True - # Replan from FakeManager updates facts/plan to include A2 / Do Z - assert manager.task_ledger is not None - combined_text = (manager.task_ledger.facts.text or "") + (manager.task_ledger.plan.text or "") - assert ("A2" in combined_text) or ("Do Z" in combined_text) - - -async def test_magentic_orchestrator_round_limit_produces_partial_result(): - manager = FakeManager(max_round_count=1) - wf = ( - MagenticBuilder() - .participants([DummyExec(name=manager.next_speaker_name)]) - .with_manager(manager=manager) - .build() - ) - - events: list[WorkflowEvent] = [] - async for ev in wf.run_stream("round limit test"): - events.append(ev) - - idle_status = next( - (e for e in events if isinstance(e, WorkflowStatusEvent) and e.state == WorkflowRunState.IDLE), - None, - ) - assert idle_status is not None - # Check that we got workflow output via WorkflowOutputEvent - output_event = next((e for e in events if isinstance(e, WorkflowOutputEvent)), None) - assert output_event is not None - data = output_event.data - assert isinstance(data, list) - assert len(data) > 0 # type: ignore - assert data[-1].role == "assistant" # type: ignore - assert all(isinstance(msg, ChatMessage) for msg in data) # type: ignore - - -async def test_magentic_checkpoint_resume_round_trip(): - storage = InMemoryCheckpointStorage() - - manager1 = FakeManager() - wf = ( - MagenticBuilder() - .participants([DummyExec(name=manager1.next_speaker_name)]) - .with_manager(manager=manager1) - .with_plan_review() - .with_checkpointing(storage) - .build() - ) - - task_text = "checkpoint task" - req_event: RequestInfoEvent | None = None - async for ev in wf.run_stream(task_text): - if isinstance(ev, RequestInfoEvent) and ev.request_type is MagenticPlanReviewRequest: - req_event = ev - assert req_event is not None - assert isinstance(req_event.data, MagenticPlanReviewRequest) - - checkpoints = await storage.list_checkpoints() - assert checkpoints - checkpoints.sort(key=lambda cp: cp.timestamp) - resume_checkpoint = checkpoints[-1] - - manager2 = FakeManager() - wf_resume = ( - MagenticBuilder() - .participants([DummyExec(name=manager2.next_speaker_name)]) - .with_manager(manager=manager2) - .with_plan_review() - .with_checkpointing(storage) - .build() - ) - - completed: WorkflowOutputEvent | None = None - req_event = None - async for event in wf_resume.run_stream( - resume_checkpoint.checkpoint_id, - ): - if isinstance(event, RequestInfoEvent) and event.request_type is MagenticPlanReviewRequest: - req_event = event - assert req_event is not None - assert isinstance(req_event.data, MagenticPlanReviewRequest) - - responses = {req_event.request_id: req_event.data.approve()} - async for event in wf_resume.send_responses_streaming(responses=responses): - if isinstance(event, WorkflowOutputEvent): - completed = event - assert completed is not None - - orchestrator = next(exec for exec in wf_resume.executors.values() if isinstance(exec, MagenticOrchestrator)) - assert orchestrator._magentic_context is not None # type: ignore[reportPrivateUsage] - assert orchestrator._magentic_context.chat_history # type: ignore[reportPrivateUsage] - assert orchestrator._task_ledger is not None # type: ignore[reportPrivateUsage] - assert manager2.task_ledger is not None - # Latest entry in chat history should be the task ledger plan - assert orchestrator._magentic_context.chat_history[-1].text == orchestrator._task_ledger.text # type: ignore[reportPrivateUsage] - - -class StubManagerAgent(BaseAgent): - """Stub agent for testing StandardMagenticManager.""" - - async def run( - self, - messages: str | ChatMessage | Sequence[str | ChatMessage] | None = None, - *, - thread: Any = None, - **kwargs: Any, - ) -> AgentResponse: - return AgentResponse(messages=[ChatMessage("assistant", ["ok"])]) - - def run_stream( - self, - messages: str | ChatMessage | Sequence[str | ChatMessage] | None = None, - *, - thread: Any = None, - **kwargs: Any, - ) -> AsyncIterable[AgentResponseUpdate]: - async def _gen() -> AsyncIterable[AgentResponseUpdate]: - yield AgentResponseUpdate(message_deltas=[ChatMessage("assistant", ["ok"])]) - - return _gen() - - -async def test_standard_manager_plan_and_replan_via_complete_monkeypatch(): - mgr = StandardMagenticManager(StubManagerAgent()) - - async def fake_complete_plan(messages: list[ChatMessage], **kwargs: Any) -> ChatMessage: - # Return a different response depending on call order length - if any("FACTS" in (m.text or "") for m in messages): - return ChatMessage("assistant", ["- step A\n- step B"]) - return ChatMessage("assistant", ["GIVEN OR VERIFIED FACTS\n- fact1"]) - - # First, patch to produce facts then plan - mgr._complete = fake_complete_plan # type: ignore[attr-defined] - - ctx = MagenticContext(task="T", participant_descriptions={"A": "desc"}) - combined = await mgr.plan(ctx.clone()) - # Assert structural headings and that steps appear in the combined ledger output. - assert "We are working to address the following user request:" in combined.text - assert "Here is the plan to follow as best as possible:" in combined.text - assert any(t in combined.text for t in ("- step A", "- step B", "- step")) - - # Now replan with new outputs - async def fake_complete_replan(messages: list[ChatMessage], **kwargs: Any) -> ChatMessage: - if any("Please briefly explain" in (m.text or "") for m in messages): - return ChatMessage("assistant", ["- new step"]) - return ChatMessage("assistant", ["GIVEN OR VERIFIED FACTS\n- updated"]) - - mgr._complete = fake_complete_replan # type: ignore[attr-defined] - combined2 = await mgr.replan(ctx.clone()) - assert "updated" in combined2.text or "new step" in combined2.text - - -async def test_standard_manager_progress_ledger_success_and_error(): - mgr = StandardMagenticManager(agent=StubManagerAgent()) - ctx = MagenticContext(task="task", participant_descriptions={"alice": "desc"}) - - # Success path: valid JSON - async def fake_complete_ok(messages: list[ChatMessage], **kwargs: Any) -> ChatMessage: - json_text = ( - '{"is_request_satisfied": {"reason": "r", "answer": false}, ' - '"is_in_loop": {"reason": "r", "answer": false}, ' - '"is_progress_being_made": {"reason": "r", "answer": true}, ' - '"next_speaker": {"reason": "r", "answer": "alice"}, ' - '"instruction_or_question": {"reason": "r", "answer": "do"}}' - ) - return ChatMessage("assistant", [json_text]) - - mgr._complete = fake_complete_ok # type: ignore[attr-defined] - ledger = await mgr.create_progress_ledger(ctx.clone()) - assert ledger.next_speaker.answer == "alice" - - # Error path: invalid JSON now raises to avoid emitting planner-oriented instructions to agents - async def fake_complete_bad(messages: list[ChatMessage], **kwargs: Any) -> ChatMessage: - return ChatMessage("assistant", ["not-json"]) - - mgr._complete = fake_complete_bad # type: ignore[attr-defined] - with pytest.raises(RuntimeError): - await mgr.create_progress_ledger(ctx.clone()) - - -class InvokeOnceManager(MagenticManagerBase): - def __init__(self) -> None: - super().__init__(max_round_count=5, max_stall_count=3, max_reset_count=2) - self._invoked = False - - async def plan(self, magentic_context: MagenticContext) -> ChatMessage: - return ChatMessage("assistant", ["ledger"]) - - async def replan(self, magentic_context: MagenticContext) -> ChatMessage: - return ChatMessage("assistant", ["re-ledger"]) - - async def create_progress_ledger(self, magentic_context: MagenticContext) -> MagenticProgressLedger: - if not self._invoked: - # First round: ask agentA to respond - self._invoked = True - return MagenticProgressLedger( - is_request_satisfied=MagenticProgressLedgerItem(reason="r", answer=False), - is_in_loop=MagenticProgressLedgerItem(reason="r", answer=False), - is_progress_being_made=MagenticProgressLedgerItem(reason="r", answer=True), - next_speaker=MagenticProgressLedgerItem(reason="r", answer="agentA"), - instruction_or_question=MagenticProgressLedgerItem(reason="r", answer="say hi"), - ) - # Next round: mark satisfied so run can conclude - return MagenticProgressLedger( - is_request_satisfied=MagenticProgressLedgerItem(reason="r", answer=True), - is_in_loop=MagenticProgressLedgerItem(reason="r", answer=False), - is_progress_being_made=MagenticProgressLedgerItem(reason="r", answer=True), - next_speaker=MagenticProgressLedgerItem(reason="r", answer="agentA"), - instruction_or_question=MagenticProgressLedgerItem(reason="r", answer="done"), - ) - - async def prepare_final_answer(self, magentic_context: MagenticContext) -> ChatMessage: - return ChatMessage("assistant", ["final"]) - - -class StubThreadAgent(BaseAgent): - def __init__(self, name: str | None = None) -> None: - super().__init__(name=name or "agentA") - - async def run_stream(self, messages=None, *, thread=None, **kwargs): # type: ignore[override] - yield AgentResponseUpdate( - contents=[Content.from_text(text="thread-ok")], - author_name=self.name, - role="assistant", - ) - - async def run(self, messages=None, *, thread=None, **kwargs): # type: ignore[override] - return AgentResponse(messages=[ChatMessage("assistant", ["thread-ok"], author_name=self.name)]) - - -class StubAssistantsClient: - pass # class name used for branch detection - - -class StubAssistantsAgent(BaseAgent): - chat_client: object | None = None # allow assignment via Pydantic field - - def __init__(self) -> None: - super().__init__(name="agentA") - self.chat_client = StubAssistantsClient() # type name contains 'AssistantsClient' - - async def run_stream(self, messages=None, *, thread=None, **kwargs): # type: ignore[override] - yield AgentResponseUpdate( - contents=[Content.from_text(text="assistants-ok")], - author_name=self.name, - role="assistant", - ) - - async def run(self, messages=None, *, thread=None, **kwargs): # type: ignore[override] - return AgentResponse(messages=[ChatMessage("assistant", ["assistants-ok"], author_name=self.name)]) - - -async def _collect_agent_responses_setup(participant: AgentProtocol) -> list[ChatMessage]: - captured: list[ChatMessage] = [] - - wf = MagenticBuilder().participants([participant]).with_manager(manager=InvokeOnceManager()).build() - - # Run a bounded stream to allow one invoke and then completion - events: list[WorkflowEvent] = [] - async for ev in wf.run_stream("task"): # plan review disabled - events.append(ev) - if isinstance(ev, WorkflowOutputEvent): - break - if isinstance(ev, AgentRunUpdateEvent): - captured.append( - ChatMessage( - role=ev.data.role or "assistant", - text=ev.data.text or "", - author_name=ev.data.author_name, - ) - ) - - return captured - - -async def test_agent_executor_invoke_with_thread_chat_client(): - agent = StubThreadAgent() - captured = await _collect_agent_responses_setup(agent) - # Should have at least one response from agentA via _MagenticAgentExecutor path - assert any((m.author_name == agent.name and "ok" in (m.text or "")) for m in captured) - - -async def test_agent_executor_invoke_with_assistants_client_messages(): - agent = StubAssistantsAgent() - captured = await _collect_agent_responses_setup(agent) - assert any((m.author_name == agent.name and "ok" in (m.text or "")) for m in captured) - - -async def _collect_checkpoints( - storage: InMemoryCheckpointStorage, -) -> list[WorkflowCheckpoint]: - checkpoints = await storage.list_checkpoints() - assert checkpoints - checkpoints.sort(key=lambda cp: cp.timestamp) - return checkpoints - - -async def test_magentic_checkpoint_resume_inner_loop_superstep(): - storage = InMemoryCheckpointStorage() - - workflow = ( - MagenticBuilder() - .participants([StubThreadAgent()]) - .with_manager(manager=InvokeOnceManager()) - .with_checkpointing(storage) - .build() - ) - - async for event in workflow.run_stream("inner-loop task"): - if isinstance(event, WorkflowOutputEvent): - break - - checkpoints = await _collect_checkpoints(storage) - inner_loop_checkpoint = next(cp for cp in checkpoints if cp.metadata.get("superstep") == 1) # type: ignore[reportUnknownMemberType] - - resumed = ( - MagenticBuilder() - .participants([StubThreadAgent()]) - .with_manager(manager=InvokeOnceManager()) - .with_checkpointing(storage) - .build() - ) - - completed: WorkflowOutputEvent | None = None - async for event in resumed.run_stream(checkpoint_id=inner_loop_checkpoint.checkpoint_id): # type: ignore[reportUnknownMemberType] - if isinstance(event, WorkflowOutputEvent): - completed = event - - assert completed is not None - - -async def test_magentic_checkpoint_resume_from_saved_state(): - """Test that we can resume workflow execution from a saved checkpoint.""" - storage = InMemoryCheckpointStorage() - - # Use the working InvokeOnceManager first to get a completed workflow - manager = InvokeOnceManager() - - workflow = ( - MagenticBuilder() - .participants([StubThreadAgent()]) - .with_manager(manager=manager) - .with_checkpointing(storage) - .build() - ) - - async for event in workflow.run_stream("checkpoint resume task"): - if isinstance(event, WorkflowOutputEvent): - break - - checkpoints = await _collect_checkpoints(storage) - - # Verify we can resume from the last saved checkpoint - resumed_state = checkpoints[-1] # Use the last checkpoint - - resumed_workflow = ( - MagenticBuilder() - .participants([StubThreadAgent()]) - .with_manager(manager=InvokeOnceManager()) - .with_checkpointing(storage) - .build() - ) - - completed: WorkflowOutputEvent | None = None - async for event in resumed_workflow.run_stream(checkpoint_id=resumed_state.checkpoint_id): - if isinstance(event, WorkflowOutputEvent): - completed = event - - assert completed is not None - - -async def test_magentic_checkpoint_resume_rejects_participant_renames(): - storage = InMemoryCheckpointStorage() - - manager = InvokeOnceManager() - - workflow = ( - MagenticBuilder() - .participants([StubThreadAgent()]) - .with_manager(manager=manager) - .with_plan_review() - .with_checkpointing(storage) - .build() - ) - - req_event: RequestInfoEvent | None = None - async for event in workflow.run_stream("task"): - if isinstance(event, RequestInfoEvent) and event.request_type is MagenticPlanReviewRequest: - req_event = event - - assert req_event is not None - assert isinstance(req_event.data, MagenticPlanReviewRequest) - - checkpoints = await _collect_checkpoints(storage) - target_checkpoint = checkpoints[-1] - - renamed_workflow = ( - MagenticBuilder() - .participants([StubThreadAgent(name="renamedAgent")]) - .with_manager(manager=InvokeOnceManager()) - .with_plan_review() - .with_checkpointing(storage) - .build() - ) - - with pytest.raises(WorkflowCheckpointException, match="Workflow graph has changed"): - async for _ in renamed_workflow.run_stream( - checkpoint_id=target_checkpoint.checkpoint_id, # type: ignore[reportUnknownMemberType] - ): - pass - - -class NotProgressingManager(MagenticManagerBase): - """ - A manager that never marks progress being made, to test stall/reset limits. - """ - - async def plan(self, magentic_context: MagenticContext) -> ChatMessage: - return ChatMessage("assistant", ["ledger"]) - - async def replan(self, magentic_context: MagenticContext) -> ChatMessage: - return ChatMessage("assistant", ["re-ledger"]) - - async def create_progress_ledger(self, magentic_context: MagenticContext) -> MagenticProgressLedger: - return MagenticProgressLedger( - is_request_satisfied=MagenticProgressLedgerItem(reason="r", answer=False), - is_in_loop=MagenticProgressLedgerItem(reason="r", answer=True), - is_progress_being_made=MagenticProgressLedgerItem(reason="r", answer=False), - next_speaker=MagenticProgressLedgerItem(reason="r", answer="agentA"), - instruction_or_question=MagenticProgressLedgerItem(reason="r", answer="done"), - ) - - async def prepare_final_answer(self, magentic_context: MagenticContext) -> ChatMessage: - return ChatMessage("assistant", ["final"]) - - -async def test_magentic_stall_and_reset_reach_limits(): - manager = NotProgressingManager(max_round_count=10, max_stall_count=0, max_reset_count=1) - - wf = MagenticBuilder().participants([DummyExec("agentA")]).with_manager(manager=manager).build() - - events: list[WorkflowEvent] = [] - async for ev in wf.run_stream("test limits"): - events.append(ev) - - idle_status = next( - (e for e in events if isinstance(e, WorkflowStatusEvent) and e.state == WorkflowRunState.IDLE), - None, - ) - assert idle_status is not None - output_event = next((e for e in events if isinstance(e, WorkflowOutputEvent)), None) - assert output_event is not None - assert isinstance(output_event.data, list) - assert all(isinstance(msg, ChatMessage) for msg in output_event.data) # type: ignore - assert len(output_event.data) > 0 # type: ignore - assert output_event.data[-1].text is not None # type: ignore - assert output_event.data[-1].text == "Workflow terminated due to reaching maximum reset count." # type: ignore - - -async def test_magentic_checkpoint_runtime_only() -> None: - """Test checkpointing configured ONLY at runtime, not at build time.""" - storage = InMemoryCheckpointStorage() - - manager = FakeManager(max_round_count=10) - wf = MagenticBuilder().participants([DummyExec("agentA")]).with_manager(manager=manager).build() - - baseline_output: ChatMessage | None = None - async for ev in wf.run_stream("runtime checkpoint test", checkpoint_storage=storage): - if isinstance(ev, WorkflowOutputEvent): - baseline_output = ev.data # type: ignore[assignment] - if isinstance(ev, WorkflowStatusEvent) and ev.state in ( - WorkflowRunState.IDLE, - WorkflowRunState.IDLE_WITH_PENDING_REQUESTS, - ): - break - - assert baseline_output is not None - - checkpoints = await storage.list_checkpoints() - assert len(checkpoints) > 0, "Runtime-only checkpointing should have created checkpoints" - - -async def test_magentic_checkpoint_runtime_overrides_buildtime() -> None: - """Test that runtime checkpoint storage overrides build-time configuration.""" - import tempfile - - with ( - tempfile.TemporaryDirectory() as temp_dir1, - tempfile.TemporaryDirectory() as temp_dir2, - ): - from agent_framework._workflows._checkpoint import FileCheckpointStorage - - buildtime_storage = FileCheckpointStorage(temp_dir1) - runtime_storage = FileCheckpointStorage(temp_dir2) - - manager = FakeManager(max_round_count=10) - wf = ( - MagenticBuilder() - .participants([DummyExec("agentA")]) - .with_manager(manager=manager) - .with_checkpointing(buildtime_storage) - .build() - ) - - baseline_output: ChatMessage | None = None - async for ev in wf.run_stream("override test", checkpoint_storage=runtime_storage): - if isinstance(ev, WorkflowOutputEvent): - baseline_output = ev.data # type: ignore[assignment] - if isinstance(ev, WorkflowStatusEvent) and ev.state in ( - WorkflowRunState.IDLE, - WorkflowRunState.IDLE_WITH_PENDING_REQUESTS, - ): - break - - assert baseline_output is not None - - buildtime_checkpoints = await buildtime_storage.list_checkpoints() - runtime_checkpoints = await runtime_storage.list_checkpoints() - - assert len(runtime_checkpoints) > 0, "Runtime storage should have checkpoints" - assert len(buildtime_checkpoints) == 0, "Build-time storage should have no checkpoints when overridden" - - -# region Message Deduplication Tests - - -async def test_magentic_context_no_duplicate_on_reset(): - """Test that MagenticContext.reset() clears chat_history without leaving duplicates.""" - ctx = MagenticContext(task="task", participant_descriptions={"Alice": "Researcher"}) - - # Add some history - ctx.chat_history.append(ChatMessage("assistant", ["response1"])) - ctx.chat_history.append(ChatMessage("assistant", ["response2"])) - assert len(ctx.chat_history) == 2 - - # Reset - ctx.reset() - - # Verify clean slate - assert len(ctx.chat_history) == 0, "chat_history should be empty after reset" - - # Add new history - ctx.chat_history.append(ChatMessage("assistant", ["new_response"])) - assert len(ctx.chat_history) == 1, "Should have exactly 1 message after adding to reset context" - - -async def test_magentic_checkpoint_restore_no_duplicate_history(): - """Test that checkpoint restore does not create duplicate messages in chat_history.""" - manager = FakeManager(max_round_count=10) - storage = InMemoryCheckpointStorage() - - wf = ( - MagenticBuilder() - .participants([DummyExec("agentA")]) - .with_manager(manager=manager) - .with_checkpointing(storage) - .build() - ) - - # Run with conversation history to create initial checkpoint - conversation: list[ChatMessage] = [ - ChatMessage("user", ["task_msg"]), - ] - - async for event in wf.run_stream(conversation): - if isinstance(event, WorkflowStatusEvent) and event.state in ( - WorkflowRunState.IDLE, - WorkflowRunState.IDLE_WITH_PENDING_REQUESTS, - ): - break - - # Get checkpoint - checkpoints = await storage.list_checkpoints() - assert len(checkpoints) > 0, "Should have created checkpoints" - - latest_checkpoint = checkpoints[-1] - - # Load checkpoint and verify no duplicates in shared state - checkpoint_data = await storage.load_checkpoint(latest_checkpoint.checkpoint_id) - assert checkpoint_data is not None - - # Check the magentic_context in the checkpoint - for _, executor_state in checkpoint_data.metadata.items(): - if isinstance(executor_state, dict) and "magentic_context" in executor_state: - ctx_data: dict[str, Any] = executor_state["magentic_context"] # type: ignore - chat_history = ctx_data.get("chat_history", []) # type: ignore - - # Count unique messages by text - texts = [ # type: ignore - msg.get("text") or (msg.get("contents", [{}])[0].get("text") if msg.get("contents") else None) # type: ignore - for msg in chat_history # type: ignore - ] - text_counts: dict[str, int] = {} - for text in texts: # type: ignore - if text: - text_counts[text] = text_counts.get(text, 0) + 1 # type: ignore - - # Input messages should not be duplicated - assert text_counts.get("history_msg", 0) <= 1, ( - f"'history_msg' appears {text_counts.get('history_msg', 0)} times in checkpoint - expected <= 1" - ) - assert text_counts.get("task_msg", 0) <= 1, ( - f"'task_msg' appears {text_counts.get('task_msg', 0)} times in checkpoint - expected <= 1" - ) - - -# endregion - -# region Participant Factory Tests - - -def test_magentic_builder_rejects_empty_participant_factories(): - """Test that MagenticBuilder rejects empty participant_factories list.""" - with pytest.raises(ValueError, match=r"participant_factories cannot be empty"): - MagenticBuilder().register_participants([]) - - with pytest.raises( - ValueError, - match=r"No participants provided\. Call \.participants\(\) or \.register_participants\(\) first\.", - ): - MagenticBuilder().with_manager(manager=FakeManager()).build() - - -def test_magentic_builder_rejects_mixing_participants_and_factories(): - """Test that mixing .participants() and .register_participants() raises an error.""" - agent = StubAgent("agentA", "reply from agentA") - - # Case 1: participants first, then register_participants - with pytest.raises(ValueError, match="Cannot mix .participants"): - MagenticBuilder().participants([agent]).register_participants([lambda: StubAgent("agentB", "reply")]) - - # Case 2: register_participants first, then participants - with pytest.raises(ValueError, match="Cannot mix .participants"): - MagenticBuilder().register_participants([lambda: agent]).participants([StubAgent("agentB", "reply")]) - - -def test_magentic_builder_rejects_multiple_calls_to_register_participants(): - """Test that multiple calls to .register_participants() raises an error.""" - with pytest.raises( - ValueError, match=r"register_participants\(\) has already been called on this builder instance." - ): - ( - MagenticBuilder() - .register_participants([lambda: StubAgent("agentA", "reply from agentA")]) - .register_participants([lambda: StubAgent("agentB", "reply from agentB")]) - ) - - -def test_magentic_builder_rejects_multiple_calls_to_participants(): - """Test that multiple calls to .participants() raises an error.""" - with pytest.raises(ValueError, match="participants have already been set"): - ( - MagenticBuilder() - .participants([StubAgent("agentA", "reply from agentA")]) - .participants([StubAgent("agentB", "reply from agentB")]) - ) - - -async def test_magentic_with_participant_factories(): - """Test workflow creation using participant_factories.""" - call_count = 0 - - def create_agent() -> StubAgent: - nonlocal call_count - call_count += 1 - return StubAgent("agentA", "reply from agentA") - - manager = FakeManager() - workflow = MagenticBuilder().register_participants([create_agent]).with_manager(manager=manager).build() - - # Factory should be called during build - assert call_count == 1 - - outputs: list[WorkflowOutputEvent] = [] - async for event in workflow.run_stream("test task"): - if isinstance(event, WorkflowOutputEvent): - outputs.append(event) - - assert len(outputs) == 1 - - -async def test_magentic_participant_factories_reusable_builder(): - """Test that the builder can be reused to build multiple workflows with factories.""" - call_count = 0 - - def create_agent() -> StubAgent: - nonlocal call_count - call_count += 1 - return StubAgent("agentA", "reply from agentA") - - builder = MagenticBuilder().register_participants([create_agent]).with_manager(manager=FakeManager()) - - # Build first workflow - wf1 = builder.build() - assert call_count == 1 - - # Build second workflow - wf2 = builder.build() - assert call_count == 2 - - # Verify that the two workflows have different agent instances - assert wf1.executors["agentA"] is not wf2.executors["agentA"] - - -async def test_magentic_participant_factories_with_checkpointing(): - """Test checkpointing with participant_factories.""" - storage = InMemoryCheckpointStorage() - - def create_agent() -> StubAgent: - return StubAgent("agentA", "reply from agentA") - - manager = FakeManager() - workflow = ( - MagenticBuilder() - .register_participants([create_agent]) - .with_manager(manager=manager) - .with_checkpointing(storage) - .build() - ) - - outputs: list[WorkflowOutputEvent] = [] - async for event in workflow.run_stream("checkpoint test"): - if isinstance(event, WorkflowOutputEvent): - outputs.append(event) - - assert outputs, "Should have workflow output" - - checkpoints = await storage.list_checkpoints() - assert checkpoints, "Checkpoints should be created during workflow execution" - - -# endregion - -# region Manager Factory Tests - - -def test_magentic_builder_rejects_multiple_manager_configurations(): - """Test that configuring multiple managers raises ValueError.""" - manager = FakeManager() - - builder = MagenticBuilder().with_manager(manager=manager) - - with pytest.raises(ValueError, match=r"with_manager\(\) has already been called"): - builder.with_manager(manager=manager) - - -def test_magentic_builder_requires_exactly_one_manager_option(): - """Test that exactly one manager option must be provided.""" - manager = FakeManager() - - def manager_factory() -> MagenticManagerBase: - return FakeManager() - - # No options provided - with pytest.raises(ValueError, match="Exactly one of"): - MagenticBuilder().with_manager() # type: ignore - - # Multiple options provided - with pytest.raises(ValueError, match="Exactly one of"): - MagenticBuilder().with_manager(manager=manager, manager_factory=manager_factory) # type: ignore - - -async def test_magentic_with_manager_factory(): - """Test workflow creation using manager_factory.""" - factory_call_count = 0 - - def manager_factory() -> MagenticManagerBase: - nonlocal factory_call_count - factory_call_count += 1 - return FakeManager() - - agent = StubAgent("agentA", "reply from agentA") - workflow = MagenticBuilder().participants([agent]).with_manager(manager_factory=manager_factory).build() - - # Factory should be called during build - assert factory_call_count == 1 - - outputs: list[WorkflowOutputEvent] = [] - async for event in workflow.run_stream("test task"): - if isinstance(event, WorkflowOutputEvent): - outputs.append(event) - - assert len(outputs) == 1 - - -async def test_magentic_with_agent_factory(): - """Test workflow creation using agent_factory for StandardMagenticManager.""" - factory_call_count = 0 - - def agent_factory() -> AgentProtocol: - nonlocal factory_call_count - factory_call_count += 1 - return cast(AgentProtocol, StubManagerAgent()) - - participant = StubAgent("agentA", "reply from agentA") - workflow = ( - MagenticBuilder() - .participants([participant]) - .with_manager(agent_factory=agent_factory, max_round_count=1) - .build() - ) - - # Factory should be called during build - assert factory_call_count == 1 - - # Verify workflow can be started (may not complete successfully due to stub behavior) - event_count = 0 - async for _ in workflow.run_stream("test task"): - event_count += 1 - if event_count > 10: - break - - assert event_count > 0 - - -async def test_magentic_manager_factory_reusable_builder(): - """Test that the builder can be reused to build multiple workflows with manager factory.""" - factory_call_count = 0 - - def manager_factory() -> MagenticManagerBase: - nonlocal factory_call_count - factory_call_count += 1 - return FakeManager() - - agent = StubAgent("agentA", "reply from agentA") - builder = MagenticBuilder().participants([agent]).with_manager(manager_factory=manager_factory) - - # Build first workflow - wf1 = builder.build() - assert factory_call_count == 1 - - # Build second workflow - wf2 = builder.build() - assert factory_call_count == 2 - - # Verify that the two workflows have different orchestrator instances - orchestrator1 = next(e for e in wf1.executors.values() if isinstance(e, MagenticOrchestrator)) - orchestrator2 = next(e for e in wf2.executors.values() if isinstance(e, MagenticOrchestrator)) - assert orchestrator1 is not orchestrator2 - - -def test_magentic_with_both_participant_and_manager_factories(): - """Test workflow creation using both participant_factories and manager_factory.""" - participant_factory_call_count = 0 - manager_factory_call_count = 0 - - def create_agent() -> StubAgent: - nonlocal participant_factory_call_count - participant_factory_call_count += 1 - return StubAgent("agentA", "reply from agentA") - - def manager_factory() -> MagenticManagerBase: - nonlocal manager_factory_call_count - manager_factory_call_count += 1 - return FakeManager() - - workflow = ( - MagenticBuilder().register_participants([create_agent]).with_manager(manager_factory=manager_factory).build() - ) - - # All factories should be called during build - assert participant_factory_call_count == 1 - assert manager_factory_call_count == 1 - - # Verify executor is present in the workflow - assert "agentA" in workflow.executors - - -async def test_magentic_factories_reusable_for_multiple_workflows(): - """Test that both factories are reused correctly for multiple workflow builds.""" - participant_factory_call_count = 0 - manager_factory_call_count = 0 - - def create_agent() -> StubAgent: - nonlocal participant_factory_call_count - participant_factory_call_count += 1 - return StubAgent("agentA", "reply from agentA") - - def manager_factory() -> MagenticManagerBase: - nonlocal manager_factory_call_count - manager_factory_call_count += 1 - return FakeManager() - - builder = MagenticBuilder().register_participants([create_agent]).with_manager(manager_factory=manager_factory) - - # Build first workflow - wf1 = builder.build() - assert participant_factory_call_count == 1 - assert manager_factory_call_count == 1 - - # Build second workflow - wf2 = builder.build() - assert participant_factory_call_count == 2 - assert manager_factory_call_count == 2 - - # Verify that the workflows have different agent and orchestrator instances - assert wf1.executors["agentA"] is not wf2.executors["agentA"] - - orchestrator1 = next(e for e in wf1.executors.values() if isinstance(e, MagenticOrchestrator)) - orchestrator2 = next(e for e in wf2.executors.values() if isinstance(e, MagenticOrchestrator)) - assert orchestrator1 is not orchestrator2 - - -def test_magentic_agent_factory_with_standard_manager_options(): - """Test that agent_factory properly passes through standard manager options.""" - factory_call_count = 0 - - def agent_factory() -> AgentProtocol: - nonlocal factory_call_count - factory_call_count += 1 - return cast(AgentProtocol, StubManagerAgent()) - - # Custom options to verify they are passed through - custom_max_stall_count = 5 - custom_max_reset_count = 2 - custom_max_round_count = 10 - custom_facts_prompt = "Custom facts prompt: {task}" - custom_plan_prompt = "Custom plan prompt: {team}" - custom_full_prompt = "Custom full prompt: {task} {team} {facts} {plan}" - custom_facts_update_prompt = "Custom facts update: {task} {old_facts}" - custom_plan_update_prompt = "Custom plan update: {team}" - custom_progress_prompt = "Custom progress: {task} {team} {names}" - custom_final_prompt = "Custom final: {task}" - - # Create a custom task ledger - from agent_framework._workflows._magentic import _MagenticTaskLedger # type: ignore - - custom_task_ledger = _MagenticTaskLedger( - facts=ChatMessage("assistant", ["Custom facts"]), - plan=ChatMessage("assistant", ["Custom plan"]), - ) - - participant = StubAgent("agentA", "reply from agentA") - workflow = ( - MagenticBuilder() - .participants([participant]) - .with_manager( - agent_factory=agent_factory, - task_ledger=custom_task_ledger, - max_stall_count=custom_max_stall_count, - max_reset_count=custom_max_reset_count, - max_round_count=custom_max_round_count, - task_ledger_facts_prompt=custom_facts_prompt, - task_ledger_plan_prompt=custom_plan_prompt, - task_ledger_full_prompt=custom_full_prompt, - task_ledger_facts_update_prompt=custom_facts_update_prompt, - task_ledger_plan_update_prompt=custom_plan_update_prompt, - progress_ledger_prompt=custom_progress_prompt, - final_answer_prompt=custom_final_prompt, - ) - .build() - ) - - # Factory should be called during build - assert factory_call_count == 1 - - # Get the orchestrator and verify the manager has the custom options - orchestrator = next(e for e in workflow.executors.values() if isinstance(e, MagenticOrchestrator)) - manager = orchestrator._manager # type: ignore[reportPrivateUsage] - - # Verify the manager is a StandardMagenticManager with the expected options - from agent_framework import StandardMagenticManager - - assert isinstance(manager, StandardMagenticManager) - assert manager.task_ledger is custom_task_ledger - assert manager.max_stall_count == custom_max_stall_count - assert manager.max_reset_count == custom_max_reset_count - assert manager.max_round_count == custom_max_round_count - assert manager.task_ledger_facts_prompt == custom_facts_prompt - assert manager.task_ledger_plan_prompt == custom_plan_prompt - assert manager.task_ledger_full_prompt == custom_full_prompt - assert manager.task_ledger_facts_update_prompt == custom_facts_update_prompt - assert manager.task_ledger_plan_update_prompt == custom_plan_update_prompt - assert manager.progress_ledger_prompt == custom_progress_prompt - assert manager.final_answer_prompt == custom_final_prompt - - -# endregion diff --git a/python/packages/core/tests/workflow/test_sequential.py b/python/packages/core/tests/workflow/test_sequential.py deleted file mode 100644 index e5b55ae081..0000000000 --- a/python/packages/core/tests/workflow/test_sequential.py +++ /dev/null @@ -1,454 +0,0 @@ -# Copyright (c) Microsoft. All rights reserved. - -from collections.abc import AsyncIterable -from typing import Any - -import pytest - -from agent_framework import ( - AgentExecutorResponse, - AgentResponse, - AgentResponseUpdate, - AgentThread, - BaseAgent, - ChatMessage, - Content, - Executor, - SequentialBuilder, - TypeCompatibilityError, - WorkflowContext, - WorkflowOutputEvent, - WorkflowRunState, - WorkflowStatusEvent, - handler, -) -from agent_framework._workflows._checkpoint import InMemoryCheckpointStorage - - -class _EchoAgent(BaseAgent): - """Simple agent that appends a single assistant message with its name.""" - - async def run( # type: ignore[override] - self, - messages: str | ChatMessage | list[str] | list[ChatMessage] | None = None, - *, - thread: AgentThread | None = None, - **kwargs: Any, - ) -> AgentResponse: - return AgentResponse(messages=[ChatMessage("assistant", [f"{self.name} reply"])]) - - async def run_stream( # type: ignore[override] - self, - messages: str | ChatMessage | list[str] | list[ChatMessage] | None = None, - *, - thread: AgentThread | None = None, - **kwargs: Any, - ) -> AsyncIterable[AgentResponseUpdate]: - # Minimal async generator with one assistant update - yield AgentResponseUpdate(contents=[Content.from_text(text=f"{self.name} reply")]) - - -class _SummarizerExec(Executor): - """Custom executor that summarizes by appending a short assistant message.""" - - @handler - async def summarize(self, agent_response: AgentExecutorResponse, ctx: WorkflowContext[list[ChatMessage]]) -> None: - conversation = agent_response.full_conversation or [] - user_texts = [m.text for m in conversation if m.role == "user"] - agents = [m.author_name or m.role for m in conversation if m.role == "assistant"] - summary = ChatMessage("assistant", [f"Summary of users:{len(user_texts)} agents:{len(agents)}"]) - await ctx.send_message(list(conversation) + [summary]) - - -class _InvalidExecutor(Executor): - """Invalid executor that does not have a handler that accepts a list of chat messages""" - - @handler - async def summarize(self, conversation: list[str], ctx: WorkflowContext[list[ChatMessage]]) -> None: - pass - - -def test_sequential_builder_rejects_empty_participants() -> None: - with pytest.raises(ValueError): - SequentialBuilder().participants([]) - - -def test_sequential_builder_rejects_empty_participant_factories() -> None: - with pytest.raises(ValueError): - SequentialBuilder().register_participants([]) - - -def test_sequential_builder_rejects_mixing_participants_and_factories() -> None: - """Test that mixing .participants() and .register_participants() raises an error.""" - a1 = _EchoAgent(id="agent1", name="A1") - - # Try .participants() then .register_participants() - with pytest.raises(ValueError, match="Cannot mix"): - SequentialBuilder().participants([a1]).register_participants([lambda: _EchoAgent(id="agent2", name="A2")]) - - # Try .register_participants() then .participants() - with pytest.raises(ValueError, match="Cannot mix"): - SequentialBuilder().register_participants([lambda: _EchoAgent(id="agent1", name="A1")]).participants([a1]) - - -def test_sequential_builder_validation_rejects_invalid_executor() -> None: - """Test that adding an invalid executor to the builder raises an error.""" - with pytest.raises(TypeCompatibilityError): - SequentialBuilder().participants([_EchoAgent(id="agent1", name="A1"), _InvalidExecutor(id="invalid")]).build() - - -async def test_sequential_agents_append_to_context() -> None: - a1 = _EchoAgent(id="agent1", name="A1") - a2 = _EchoAgent(id="agent2", name="A2") - - wf = SequentialBuilder().participants([a1, a2]).build() - - completed = False - output: list[ChatMessage] | None = None - async for ev in wf.run_stream("hello sequential"): - if isinstance(ev, WorkflowStatusEvent) and ev.state == WorkflowRunState.IDLE: - completed = True - elif isinstance(ev, WorkflowOutputEvent): - output = ev.data # type: ignore[assignment] - if completed and output is not None: - break - - assert completed - assert output is not None - assert isinstance(output, list) - msgs: list[ChatMessage] = output - assert len(msgs) == 3 - assert msgs[0].role == "user" and "hello sequential" in msgs[0].text - assert msgs[1].role == "assistant" and (msgs[1].author_name == "A1" or True) - assert msgs[2].role == "assistant" and (msgs[2].author_name == "A2" or True) - assert "A1 reply" in msgs[1].text - assert "A2 reply" in msgs[2].text - - -async def test_sequential_register_participants_with_agent_factories() -> None: - """Test that register_participants works with agent factories.""" - - def create_agent1() -> _EchoAgent: - return _EchoAgent(id="agent1", name="A1") - - def create_agent2() -> _EchoAgent: - return _EchoAgent(id="agent2", name="A2") - - wf = SequentialBuilder().register_participants([create_agent1, create_agent2]).build() - - completed = False - output: list[ChatMessage] | None = None - async for ev in wf.run_stream("hello factories"): - if isinstance(ev, WorkflowStatusEvent) and ev.state == WorkflowRunState.IDLE: - completed = True - elif isinstance(ev, WorkflowOutputEvent): - output = ev.data - if completed and output is not None: - break - - assert completed - assert output is not None - assert isinstance(output, list) - msgs: list[ChatMessage] = output - assert len(msgs) == 3 - assert msgs[0].role == "user" and "hello factories" in msgs[0].text - assert msgs[1].role == "assistant" and "A1 reply" in msgs[1].text - assert msgs[2].role == "assistant" and "A2 reply" in msgs[2].text - - -async def test_sequential_with_custom_executor_summary() -> None: - a1 = _EchoAgent(id="agent1", name="A1") - summarizer = _SummarizerExec(id="summarizer") - - wf = SequentialBuilder().participants([a1, summarizer]).build() - - completed = False - output: list[ChatMessage] | None = None - async for ev in wf.run_stream("topic X"): - if isinstance(ev, WorkflowStatusEvent) and ev.state == WorkflowRunState.IDLE: - completed = True - elif isinstance(ev, WorkflowOutputEvent): - output = ev.data - if completed and output is not None: - break - - assert completed - assert output is not None - msgs: list[ChatMessage] = output - # Expect: [user, A1 reply, summary] - assert len(msgs) == 3 - assert msgs[0].role == "user" - assert msgs[1].role == "assistant" and "A1 reply" in msgs[1].text - assert msgs[2].role == "assistant" and msgs[2].text.startswith("Summary of users:") - - -async def test_sequential_register_participants_mixed_agents_and_executors() -> None: - """Test register_participants with both agent and executor factories.""" - - def create_agent() -> _EchoAgent: - return _EchoAgent(id="agent1", name="A1") - - def create_summarizer() -> _SummarizerExec: - return _SummarizerExec(id="summarizer") - - wf = SequentialBuilder().register_participants([create_agent, create_summarizer]).build() - - completed = False - output: list[ChatMessage] | None = None - async for ev in wf.run_stream("topic Y"): - if isinstance(ev, WorkflowStatusEvent) and ev.state == WorkflowRunState.IDLE: - completed = True - elif isinstance(ev, WorkflowOutputEvent): - output = ev.data - if completed and output is not None: - break - - assert completed - assert output is not None - msgs: list[ChatMessage] = output - # Expect: [user, A1 reply, summary] - assert len(msgs) == 3 - assert msgs[0].role == "user" and "topic Y" in msgs[0].text - assert msgs[1].role == "assistant" and "A1 reply" in msgs[1].text - assert msgs[2].role == "assistant" and msgs[2].text.startswith("Summary of users:") - - -async def test_sequential_checkpoint_resume_round_trip() -> None: - storage = InMemoryCheckpointStorage() - - initial_agents = (_EchoAgent(id="agent1", name="A1"), _EchoAgent(id="agent2", name="A2")) - wf = SequentialBuilder().participants(list(initial_agents)).with_checkpointing(storage).build() - - baseline_output: list[ChatMessage] | None = None - async for ev in wf.run_stream("checkpoint sequential"): - if isinstance(ev, WorkflowOutputEvent): - baseline_output = ev.data # type: ignore[assignment] - if isinstance(ev, WorkflowStatusEvent) and ev.state == WorkflowRunState.IDLE: - break - - assert baseline_output is not None - - checkpoints = await storage.list_checkpoints() - assert checkpoints - checkpoints.sort(key=lambda cp: cp.timestamp) - - resume_checkpoint = next( - (cp for cp in checkpoints if (cp.metadata or {}).get("checkpoint_type") == "superstep"), - checkpoints[-1], - ) - - resumed_agents = (_EchoAgent(id="agent1", name="A1"), _EchoAgent(id="agent2", name="A2")) - wf_resume = SequentialBuilder().participants(list(resumed_agents)).with_checkpointing(storage).build() - - resumed_output: list[ChatMessage] | None = None - async for ev in wf_resume.run_stream(checkpoint_id=resume_checkpoint.checkpoint_id): - if isinstance(ev, WorkflowOutputEvent): - resumed_output = ev.data # type: ignore[assignment] - if isinstance(ev, WorkflowStatusEvent) and ev.state in ( - WorkflowRunState.IDLE, - WorkflowRunState.IDLE_WITH_PENDING_REQUESTS, - ): - break - - assert resumed_output is not None - assert [m.role for m in resumed_output] == [m.role for m in baseline_output] - assert [m.text for m in resumed_output] == [m.text for m in baseline_output] - - -async def test_sequential_checkpoint_runtime_only() -> None: - """Test checkpointing configured ONLY at runtime, not at build time.""" - storage = InMemoryCheckpointStorage() - - agents = (_EchoAgent(id="agent1", name="A1"), _EchoAgent(id="agent2", name="A2")) - wf = SequentialBuilder().participants(list(agents)).build() - - baseline_output: list[ChatMessage] | None = None - async for ev in wf.run_stream("runtime checkpoint test", checkpoint_storage=storage): - if isinstance(ev, WorkflowOutputEvent): - baseline_output = ev.data # type: ignore[assignment] - if isinstance(ev, WorkflowStatusEvent) and ev.state == WorkflowRunState.IDLE: - break - - assert baseline_output is not None - - checkpoints = await storage.list_checkpoints() - assert checkpoints - checkpoints.sort(key=lambda cp: cp.timestamp) - - resume_checkpoint = next( - (cp for cp in checkpoints if (cp.metadata or {}).get("checkpoint_type") == "superstep"), - checkpoints[-1], - ) - - resumed_agents = (_EchoAgent(id="agent1", name="A1"), _EchoAgent(id="agent2", name="A2")) - wf_resume = SequentialBuilder().participants(list(resumed_agents)).build() - - resumed_output: list[ChatMessage] | None = None - async for ev in wf_resume.run_stream(checkpoint_id=resume_checkpoint.checkpoint_id, checkpoint_storage=storage): - if isinstance(ev, WorkflowOutputEvent): - resumed_output = ev.data # type: ignore[assignment] - if isinstance(ev, WorkflowStatusEvent) and ev.state in ( - WorkflowRunState.IDLE, - WorkflowRunState.IDLE_WITH_PENDING_REQUESTS, - ): - break - - assert resumed_output is not None - assert [m.role for m in resumed_output] == [m.role for m in baseline_output] - assert [m.text for m in resumed_output] == [m.text for m in baseline_output] - - -async def test_sequential_checkpoint_runtime_overrides_buildtime() -> None: - """Test that runtime checkpoint storage overrides build-time configuration.""" - import tempfile - - with tempfile.TemporaryDirectory() as temp_dir1, tempfile.TemporaryDirectory() as temp_dir2: - from agent_framework._workflows._checkpoint import FileCheckpointStorage - - buildtime_storage = FileCheckpointStorage(temp_dir1) - runtime_storage = FileCheckpointStorage(temp_dir2) - - agents = (_EchoAgent(id="agent1", name="A1"), _EchoAgent(id="agent2", name="A2")) - wf = SequentialBuilder().participants(list(agents)).with_checkpointing(buildtime_storage).build() - - baseline_output: list[ChatMessage] | None = None - async for ev in wf.run_stream("override test", checkpoint_storage=runtime_storage): - if isinstance(ev, WorkflowOutputEvent): - baseline_output = ev.data # type: ignore[assignment] - if isinstance(ev, WorkflowStatusEvent) and ev.state == WorkflowRunState.IDLE: - break - - assert baseline_output is not None - - buildtime_checkpoints = await buildtime_storage.list_checkpoints() - runtime_checkpoints = await runtime_storage.list_checkpoints() - - assert len(runtime_checkpoints) > 0, "Runtime storage should have checkpoints" - assert len(buildtime_checkpoints) == 0, "Build-time storage should have no checkpoints when overridden" - - -async def test_sequential_register_participants_with_checkpointing() -> None: - """Test that checkpointing works with register_participants.""" - storage = InMemoryCheckpointStorage() - - def create_agent1() -> _EchoAgent: - return _EchoAgent(id="agent1", name="A1") - - def create_agent2() -> _EchoAgent: - return _EchoAgent(id="agent2", name="A2") - - wf = SequentialBuilder().register_participants([create_agent1, create_agent2]).with_checkpointing(storage).build() - - baseline_output: list[ChatMessage] | None = None - async for ev in wf.run_stream("checkpoint with factories"): - if isinstance(ev, WorkflowOutputEvent): - baseline_output = ev.data - if isinstance(ev, WorkflowStatusEvent) and ev.state == WorkflowRunState.IDLE: - break - - assert baseline_output is not None - - checkpoints = await storage.list_checkpoints() - assert checkpoints - checkpoints.sort(key=lambda cp: cp.timestamp) - - resume_checkpoint = next( - (cp for cp in checkpoints if (cp.metadata or {}).get("checkpoint_type") == "superstep"), - checkpoints[-1], - ) - - wf_resume = ( - SequentialBuilder().register_participants([create_agent1, create_agent2]).with_checkpointing(storage).build() - ) - - resumed_output: list[ChatMessage] | None = None - async for ev in wf_resume.run_stream(checkpoint_id=resume_checkpoint.checkpoint_id): - if isinstance(ev, WorkflowOutputEvent): - resumed_output = ev.data - if isinstance(ev, WorkflowStatusEvent) and ev.state in ( - WorkflowRunState.IDLE, - WorkflowRunState.IDLE_WITH_PENDING_REQUESTS, - ): - break - - assert resumed_output is not None - assert [m.role for m in resumed_output] == [m.role for m in baseline_output] - assert [m.text for m in resumed_output] == [m.text for m in baseline_output] - - -async def test_sequential_register_participants_factories_called_on_build() -> None: - """Test that factories are called during build(), not during register_participants().""" - call_count = 0 - - def create_agent() -> _EchoAgent: - nonlocal call_count - call_count += 1 - return _EchoAgent(id=f"agent{call_count}", name=f"A{call_count}") - - builder = SequentialBuilder().register_participants([create_agent, create_agent]) - - # Factories should not be called yet - assert call_count == 0 - - wf = builder.build() - - # Now factories should have been called - assert call_count == 2 - - # Run the workflow to ensure it works - completed = False - output: list[ChatMessage] | None = None - async for ev in wf.run_stream("test factories timing"): - if isinstance(ev, WorkflowStatusEvent) and ev.state == WorkflowRunState.IDLE: - completed = True - elif isinstance(ev, WorkflowOutputEvent): - output = ev.data # type: ignore[assignment] - if completed and output is not None: - break - - assert completed - assert output is not None - msgs: list[ChatMessage] = output - # Should have user message + 2 agent replies - assert len(msgs) == 3 - - -async def test_sequential_builder_reusable_after_build_with_participants() -> None: - """Test that the builder can be reused to build multiple identical workflows with participants().""" - a1 = _EchoAgent(id="agent1", name="A1") - a2 = _EchoAgent(id="agent2", name="A2") - - builder = SequentialBuilder().participants([a1, a2]) - - # Build first workflow - builder.build() - - assert builder._participants[0] is a1 # type: ignore - assert builder._participants[1] is a2 # type: ignore - assert builder._participant_factories == [] # type: ignore - - -async def test_sequential_builder_reusable_after_build_with_factories() -> None: - """Test that the builder can be reused to build multiple workflows with register_participants().""" - call_count = 0 - - def create_agent1() -> _EchoAgent: - nonlocal call_count - call_count += 1 - return _EchoAgent(id="agent1", name="A1") - - def create_agent2() -> _EchoAgent: - nonlocal call_count - call_count += 1 - return _EchoAgent(id="agent2", name="A2") - - builder = SequentialBuilder().register_participants([create_agent1, create_agent2]) - - # Build first workflow - factories should be called - builder.build() - - assert call_count == 2 - assert builder._participants == [] # type: ignore - assert len(builder._participant_factories) == 2 # type: ignore - assert builder._participant_factories[0] is create_agent1 # type: ignore - assert builder._participant_factories[1] is create_agent2 # type: ignore diff --git a/python/packages/core/tests/workflow/test_workflow_kwargs.py b/python/packages/core/tests/workflow/test_workflow_kwargs.py deleted file mode 100644 index 763a911351..0000000000 --- a/python/packages/core/tests/workflow/test_workflow_kwargs.py +++ /dev/null @@ -1,735 +0,0 @@ -# Copyright (c) Microsoft. All rights reserved. - -from collections.abc import AsyncIterable, Sequence -from typing import Annotated, Any - -import pytest - -from agent_framework import ( - AgentResponse, - AgentResponseUpdate, - AgentThread, - BaseAgent, - ChatMessage, - ConcurrentBuilder, - Content, - GroupChatBuilder, - GroupChatState, - HandoffBuilder, - SequentialBuilder, - WorkflowRunState, - WorkflowStatusEvent, - tool, -) -from agent_framework._workflows._const import WORKFLOW_RUN_KWARGS_KEY - -# Track kwargs received by tools during test execution -_received_kwargs: list[dict[str, Any]] = [] - - -@tool(approval_mode="never_require") -def tool_with_kwargs( - action: Annotated[str, "The action to perform"], - **kwargs: Any, -) -> str: - """A test tool that captures kwargs for verification.""" - _received_kwargs.append(dict(kwargs)) - custom_data = kwargs.get("custom_data", {}) - user_token = kwargs.get("user_token", {}) - return f"Executed {action} with custom_data={custom_data}, user={user_token.get('user_name', 'unknown')}" - - -class _KwargsCapturingAgent(BaseAgent): - """Test agent that captures kwargs passed to run/run_stream.""" - - captured_kwargs: list[dict[str, Any]] - - def __init__(self, name: str = "test_agent") -> None: - super().__init__(name=name, description="Test agent for kwargs capture") - self.captured_kwargs = [] - - async def run( - self, - messages: str | ChatMessage | Sequence[str | ChatMessage] | None = None, - *, - thread: AgentThread | None = None, - **kwargs: Any, - ) -> AgentResponse: - self.captured_kwargs.append(dict(kwargs)) - return AgentResponse(messages=[ChatMessage("assistant", [f"{self.name} response"])]) - - async def run_stream( - self, - messages: str | ChatMessage | Sequence[str | ChatMessage] | None = None, - *, - thread: AgentThread | None = None, - **kwargs: Any, - ) -> AsyncIterable[AgentResponseUpdate]: - self.captured_kwargs.append(dict(kwargs)) - yield AgentResponseUpdate(contents=[Content.from_text(text=f"{self.name} response")]) - - -# region Sequential Builder Tests - - -async def test_sequential_kwargs_flow_to_agent() -> None: - """Test that kwargs passed to SequentialBuilder workflow flow through to agent.""" - agent = _KwargsCapturingAgent(name="seq_agent") - workflow = SequentialBuilder().participants([agent]).build() - - custom_data = {"endpoint": "https://api.example.com", "version": "v1"} - user_token = {"user_name": "alice", "access_level": "admin"} - - async for event in workflow.run_stream( - "test message", - custom_data=custom_data, - user_token=user_token, - ): - if isinstance(event, WorkflowStatusEvent) and event.state == WorkflowRunState.IDLE: - break - - # Verify agent received kwargs - assert len(agent.captured_kwargs) >= 1, "Agent should have been invoked at least once" - received = agent.captured_kwargs[0] - assert "custom_data" in received, "Agent should receive custom_data kwarg" - assert "user_token" in received, "Agent should receive user_token kwarg" - assert received["custom_data"] == custom_data - assert received["user_token"] == user_token - - -async def test_sequential_kwargs_flow_to_multiple_agents() -> None: - """Test that kwargs flow to all agents in a sequential workflow.""" - agent1 = _KwargsCapturingAgent(name="agent1") - agent2 = _KwargsCapturingAgent(name="agent2") - workflow = SequentialBuilder().participants([agent1, agent2]).build() - - custom_data = {"key": "value"} - - async for event in workflow.run_stream("test", custom_data=custom_data): - if isinstance(event, WorkflowStatusEvent) and event.state == WorkflowRunState.IDLE: - break - - # Both agents should have received kwargs - assert len(agent1.captured_kwargs) >= 1, "First agent should be invoked" - assert len(agent2.captured_kwargs) >= 1, "Second agent should be invoked" - assert agent1.captured_kwargs[0].get("custom_data") == custom_data - assert agent2.captured_kwargs[0].get("custom_data") == custom_data - - -async def test_sequential_run_kwargs_flow() -> None: - """Test that kwargs flow through workflow.run() (non-streaming).""" - agent = _KwargsCapturingAgent(name="run_agent") - workflow = SequentialBuilder().participants([agent]).build() - - _ = await workflow.run("test message", custom_data={"test": True}) - - assert len(agent.captured_kwargs) >= 1 - assert agent.captured_kwargs[0].get("custom_data") == {"test": True} - - -# endregion - - -# region Concurrent Builder Tests - - -async def test_concurrent_kwargs_flow_to_agents() -> None: - """Test that kwargs flow to all agents in a concurrent workflow.""" - agent1 = _KwargsCapturingAgent(name="concurrent1") - agent2 = _KwargsCapturingAgent(name="concurrent2") - workflow = ConcurrentBuilder().participants([agent1, agent2]).build() - - custom_data = {"batch_id": "123"} - user_token = {"user_name": "bob"} - - async for event in workflow.run_stream( - "concurrent test", - custom_data=custom_data, - user_token=user_token, - ): - if isinstance(event, WorkflowStatusEvent) and event.state == WorkflowRunState.IDLE: - break - - # Both agents should have received kwargs - assert len(agent1.captured_kwargs) >= 1, "First concurrent agent should be invoked" - assert len(agent2.captured_kwargs) >= 1, "Second concurrent agent should be invoked" - - for agent in [agent1, agent2]: - received = agent.captured_kwargs[0] - assert received.get("custom_data") == custom_data - assert received.get("user_token") == user_token - - -# endregion - - -# region GroupChat Builder Tests - - -async def test_groupchat_kwargs_flow_to_agents() -> None: - """Test that kwargs flow to agents in a group chat workflow.""" - agent1 = _KwargsCapturingAgent(name="chat1") - agent2 = _KwargsCapturingAgent(name="chat2") - - # Simple selector that takes GroupChatStateSnapshot - turn_count = 0 - - def simple_selector(state: GroupChatState) -> str: - nonlocal turn_count - turn_count += 1 - if turn_count > 2: # Loop after two turns for test - turn_count = 0 - # state is a Mapping - access via dict syntax - names = list(state.participants.keys()) - return names[(turn_count - 1) % len(names)] - - workflow = ( - GroupChatBuilder() - .participants([agent1, agent2]) - .with_orchestrator(selection_func=simple_selector) - .with_max_rounds(2) # Limit rounds to prevent infinite loop - .build() - ) - - custom_data = {"session_id": "group123"} - - async for event in workflow.run_stream("group chat test", custom_data=custom_data): - if isinstance(event, WorkflowStatusEvent) and event.state == WorkflowRunState.IDLE: - break - - # At least one agent should have received kwargs - all_kwargs = agent1.captured_kwargs + agent2.captured_kwargs - assert len(all_kwargs) >= 1, "At least one agent should be invoked in group chat" - - for received in all_kwargs: - assert received.get("custom_data") == custom_data - - -# endregion - - -# region SharedState Verification Tests - - -async def test_kwargs_stored_in_shared_state() -> None: - """Test that kwargs are stored in SharedState with the correct key.""" - from agent_framework import Executor, WorkflowContext, handler - - stored_kwargs: dict[str, Any] | None = None - - class _SharedStateInspector(Executor): - @handler - async def inspect(self, msgs: list[ChatMessage], ctx: WorkflowContext[list[ChatMessage]]) -> None: - nonlocal stored_kwargs - stored_kwargs = await ctx.get_shared_state(WORKFLOW_RUN_KWARGS_KEY) - await ctx.send_message(msgs) - - inspector = _SharedStateInspector(id="inspector") - workflow = SequentialBuilder().participants([inspector]).build() - - async for event in workflow.run_stream("test", my_kwarg="my_value", another=123): - if isinstance(event, WorkflowStatusEvent) and event.state == WorkflowRunState.IDLE: - break - - assert stored_kwargs is not None, "kwargs should be stored in SharedState" - assert stored_kwargs.get("my_kwarg") == "my_value" - assert stored_kwargs.get("another") == 123 - - -async def test_empty_kwargs_stored_as_empty_dict() -> None: - """Test that empty kwargs are stored as empty dict in SharedState.""" - from agent_framework import Executor, WorkflowContext, handler - - stored_kwargs: Any = "NOT_CHECKED" - - class _SharedStateChecker(Executor): - @handler - async def check(self, msgs: list[ChatMessage], ctx: WorkflowContext[list[ChatMessage]]) -> None: - nonlocal stored_kwargs - stored_kwargs = await ctx.get_shared_state(WORKFLOW_RUN_KWARGS_KEY) - await ctx.send_message(msgs) - - checker = _SharedStateChecker(id="checker") - workflow = SequentialBuilder().participants([checker]).build() - - # Run without any kwargs - async for event in workflow.run_stream("test"): - if isinstance(event, WorkflowStatusEvent) and event.state == WorkflowRunState.IDLE: - break - - # SharedState should have empty dict when no kwargs provided - assert stored_kwargs == {}, f"Expected empty dict, got: {stored_kwargs}" - - -# endregion - - -# region Edge Cases - - -async def test_kwargs_with_none_values() -> None: - """Test that kwargs with None values are passed through correctly.""" - agent = _KwargsCapturingAgent(name="none_test") - workflow = SequentialBuilder().participants([agent]).build() - - async for event in workflow.run_stream("test", optional_param=None, other_param="value"): - if isinstance(event, WorkflowStatusEvent) and event.state == WorkflowRunState.IDLE: - break - - assert len(agent.captured_kwargs) >= 1 - received = agent.captured_kwargs[0] - assert "optional_param" in received - assert received["optional_param"] is None - assert received["other_param"] == "value" - - -async def test_kwargs_with_complex_nested_data() -> None: - """Test that complex nested data structures flow through correctly.""" - agent = _KwargsCapturingAgent(name="nested_test") - workflow = SequentialBuilder().participants([agent]).build() - - complex_data = { - "level1": { - "level2": { - "level3": ["a", "b", "c"], - "number": 42, - }, - "list": [1, 2, {"nested": True}], - }, - "tuple_like": [1, 2, 3], - } - - async for event in workflow.run_stream("test", complex_data=complex_data): - if isinstance(event, WorkflowStatusEvent) and event.state == WorkflowRunState.IDLE: - break - - assert len(agent.captured_kwargs) >= 1 - received = agent.captured_kwargs[0] - assert received.get("complex_data") == complex_data - - -async def test_kwargs_preserved_across_workflow_reruns() -> None: - """Test that kwargs are correctly isolated between workflow runs.""" - agent = _KwargsCapturingAgent(name="rerun_test") - - # Build separate workflows for each run to avoid "already running" error - workflow1 = SequentialBuilder().participants([agent]).build() - workflow2 = SequentialBuilder().participants([agent]).build() - - # First run - async for event in workflow1.run_stream("run1", run_id="first"): - if isinstance(event, WorkflowStatusEvent) and event.state == WorkflowRunState.IDLE: - break - - # Second run with different kwargs (using fresh workflow) - async for event in workflow2.run_stream("run2", run_id="second"): - if isinstance(event, WorkflowStatusEvent) and event.state == WorkflowRunState.IDLE: - break - - assert len(agent.captured_kwargs) >= 2 - assert agent.captured_kwargs[0].get("run_id") == "first" - assert agent.captured_kwargs[1].get("run_id") == "second" - - -# endregion - - -# region Handoff Builder Tests - - -@pytest.mark.xfail(reason="Handoff workflow does not yet propagate kwargs to agents") -async def test_handoff_kwargs_flow_to_agents() -> None: - """Test that kwargs flow to agents in a handoff workflow.""" - agent1 = _KwargsCapturingAgent(name="coordinator") - agent2 = _KwargsCapturingAgent(name="specialist") - - workflow = ( - HandoffBuilder() - .participants([agent1, agent2]) - .with_start_agent(agent1) - .with_autonomous_mode() - .with_termination_condition(lambda conv: len(conv) >= 4) - .build() - ) - - custom_data = {"session_id": "handoff123"} - - async for event in workflow.run_stream("handoff test", custom_data=custom_data): - if isinstance(event, WorkflowStatusEvent) and event.state == WorkflowRunState.IDLE: - break - - # Coordinator agent should have received kwargs - assert len(agent1.captured_kwargs) >= 1, "Coordinator should be invoked in handoff" - assert agent1.captured_kwargs[0].get("custom_data") == custom_data - - -# endregion - - -# region Magentic Builder Tests - - -async def test_magentic_kwargs_flow_to_agents() -> None: - """Test that kwargs flow to agents in a magentic workflow via MagenticAgentExecutor.""" - from agent_framework import MagenticBuilder - from agent_framework._workflows._magentic import ( - MagenticContext, - MagenticManagerBase, - MagenticProgressLedger, - MagenticProgressLedgerItem, - ) - - # Create a mock manager that completes after one round - class _MockManager(MagenticManagerBase): - def __init__(self) -> None: - super().__init__(max_stall_count=3, max_reset_count=None, max_round_count=2) - self.task_ledger = None - - async def plan(self, magentic_context: MagenticContext) -> ChatMessage: - return ChatMessage("assistant", ["Plan: Test task"], author_name="manager") - - async def replan(self, magentic_context: MagenticContext) -> ChatMessage: - return ChatMessage("assistant", ["Replan: Test task"], author_name="manager") - - async def create_progress_ledger(self, magentic_context: MagenticContext) -> MagenticProgressLedger: - # Return completed on first call - return MagenticProgressLedger( - is_request_satisfied=MagenticProgressLedgerItem(answer=True, reason="Done"), - is_progress_being_made=MagenticProgressLedgerItem(answer=True, reason="Progress"), - is_in_loop=MagenticProgressLedgerItem(answer=False, reason="Not looping"), - instruction_or_question=MagenticProgressLedgerItem(answer="Complete", reason="Done"), - next_speaker=MagenticProgressLedgerItem(answer="agent1", reason="First"), - ) - - async def prepare_final_answer(self, magentic_context: MagenticContext) -> ChatMessage: - return ChatMessage("assistant", ["Final answer"], author_name="manager") - - agent = _KwargsCapturingAgent(name="agent1") - manager = _MockManager() - - workflow = MagenticBuilder().participants([agent]).with_manager(manager=manager).build() - - custom_data = {"session_id": "magentic123"} - - async for event in workflow.run_stream("magentic test", custom_data=custom_data): - if isinstance(event, WorkflowStatusEvent) and event.state == WorkflowRunState.IDLE: - break - - # The workflow completes immediately via prepare_final_answer without invoking agents - # because is_request_satisfied=True. This test verifies the kwargs storage path works. - # A more comprehensive integration test would require the manager to select an agent. - - -async def test_magentic_kwargs_stored_in_shared_state() -> None: - """Test that kwargs are stored in SharedState when using MagenticWorkflow.run_stream().""" - from agent_framework import MagenticBuilder - from agent_framework._workflows._magentic import ( - MagenticContext, - MagenticManagerBase, - MagenticProgressLedger, - MagenticProgressLedgerItem, - ) - - class _MockManager(MagenticManagerBase): - def __init__(self) -> None: - super().__init__(max_stall_count=3, max_reset_count=None, max_round_count=1) - self.task_ledger = None - - async def plan(self, magentic_context: MagenticContext) -> ChatMessage: - return ChatMessage("assistant", ["Plan"], author_name="manager") - - async def replan(self, magentic_context: MagenticContext) -> ChatMessage: - return ChatMessage("assistant", ["Replan"], author_name="manager") - - async def create_progress_ledger(self, magentic_context: MagenticContext) -> MagenticProgressLedger: - return MagenticProgressLedger( - is_request_satisfied=MagenticProgressLedgerItem(answer=True, reason="Done"), - is_progress_being_made=MagenticProgressLedgerItem(answer=True, reason="Progress"), - is_in_loop=MagenticProgressLedgerItem(answer=False, reason="Not looping"), - instruction_or_question=MagenticProgressLedgerItem(answer="Done", reason="Done"), - next_speaker=MagenticProgressLedgerItem(answer="agent1", reason="First"), - ) - - async def prepare_final_answer(self, magentic_context: MagenticContext) -> ChatMessage: - return ChatMessage("assistant", ["Final"], author_name="manager") - - agent = _KwargsCapturingAgent(name="agent1") - manager = _MockManager() - - magentic_workflow = MagenticBuilder().participants([agent]).with_manager(manager=manager).build() - - # Use MagenticWorkflow.run_stream() which goes through the kwargs attachment path - custom_data = {"magentic_key": "magentic_value"} - - async for event in magentic_workflow.run_stream("test task", custom_data=custom_data): - if isinstance(event, WorkflowStatusEvent) and event.state == WorkflowRunState.IDLE: - break - - # Verify the workflow completed (kwargs were stored, even if agent wasn't invoked) - # The test validates the code path through MagenticWorkflow.run_stream -> _MagenticStartMessage - - -# endregion - - -# region WorkflowAgent (as_agent) kwargs Tests - - -async def test_workflow_as_agent_run_propagates_kwargs_to_underlying_agent() -> None: - """Test that kwargs passed to workflow_agent.run() flow through to the underlying agents.""" - agent = _KwargsCapturingAgent(name="inner_agent") - workflow = SequentialBuilder().participants([agent]).build() - workflow_agent = workflow.as_agent(name="TestWorkflowAgent") - - custom_data = {"endpoint": "https://api.example.com", "version": "v1"} - user_token = {"user_name": "alice", "access_level": "admin"} - - _ = await workflow_agent.run( - "test message", - custom_data=custom_data, - user_token=user_token, - ) - - # Verify inner agent received kwargs - assert len(agent.captured_kwargs) >= 1, "Inner agent should have been invoked at least once" - received = agent.captured_kwargs[0] - assert "custom_data" in received, "Inner agent should receive custom_data kwarg" - assert "user_token" in received, "Inner agent should receive user_token kwarg" - assert received["custom_data"] == custom_data - assert received["user_token"] == user_token - - -async def test_workflow_as_agent_run_stream_propagates_kwargs_to_underlying_agent() -> None: - """Test that kwargs passed to workflow_agent.run_stream() flow through to the underlying agents.""" - agent = _KwargsCapturingAgent(name="inner_agent") - workflow = SequentialBuilder().participants([agent]).build() - workflow_agent = workflow.as_agent(name="TestWorkflowAgent") - - custom_data = {"session_id": "xyz123"} - api_token = "secret-token" - - async for _ in workflow_agent.run_stream( - "test message", - custom_data=custom_data, - api_token=api_token, - ): - pass - - # Verify inner agent received kwargs - assert len(agent.captured_kwargs) >= 1, "Inner agent should have been invoked at least once" - received = agent.captured_kwargs[0] - assert "custom_data" in received, "Inner agent should receive custom_data kwarg" - assert "api_token" in received, "Inner agent should receive api_token kwarg" - assert received["custom_data"] == custom_data - assert received["api_token"] == api_token - - -async def test_workflow_as_agent_propagates_kwargs_to_multiple_agents() -> None: - """Test that kwargs flow to all agents when using workflow.as_agent().""" - agent1 = _KwargsCapturingAgent(name="agent1") - agent2 = _KwargsCapturingAgent(name="agent2") - workflow = SequentialBuilder().participants([agent1, agent2]).build() - workflow_agent = workflow.as_agent(name="MultiAgentWorkflow") - - custom_data = {"batch_id": "batch-001"} - - _ = await workflow_agent.run("test message", custom_data=custom_data) - - # Both agents should have received kwargs - assert len(agent1.captured_kwargs) >= 1, "First agent should be invoked" - assert len(agent2.captured_kwargs) >= 1, "Second agent should be invoked" - assert agent1.captured_kwargs[0].get("custom_data") == custom_data - assert agent2.captured_kwargs[0].get("custom_data") == custom_data - - -async def test_workflow_as_agent_kwargs_with_none_values() -> None: - """Test that kwargs with None values are passed through correctly via as_agent().""" - agent = _KwargsCapturingAgent(name="none_test_agent") - workflow = SequentialBuilder().participants([agent]).build() - workflow_agent = workflow.as_agent(name="NoneTestWorkflow") - - _ = await workflow_agent.run("test", optional_param=None, other_param="value") - - assert len(agent.captured_kwargs) >= 1 - received = agent.captured_kwargs[0] - assert "optional_param" in received - assert received["optional_param"] is None - assert received["other_param"] == "value" - - -async def test_workflow_as_agent_kwargs_with_complex_nested_data() -> None: - """Test that complex nested data structures flow through correctly via as_agent().""" - agent = _KwargsCapturingAgent(name="nested_agent") - workflow = SequentialBuilder().participants([agent]).build() - workflow_agent = workflow.as_agent(name="NestedDataWorkflow") - - complex_data = { - "level1": { - "level2": { - "level3": ["a", "b", "c"], - "number": 42, - }, - "list": [1, 2, {"nested": True}], - }, - } - - _ = await workflow_agent.run("test", complex_data=complex_data) - - assert len(agent.captured_kwargs) >= 1 - received = agent.captured_kwargs[0] - assert received.get("complex_data") == complex_data - - -# endregion - - -# region SubWorkflow (WorkflowExecutor) Tests - - -async def test_subworkflow_kwargs_propagation() -> None: - """Test that kwargs are propagated to subworkflows. - - Verifies kwargs passed to parent workflow.run_stream() flow through to agents - in subworkflows wrapped by WorkflowExecutor. - """ - from agent_framework._workflows._workflow_executor import WorkflowExecutor - - # Create an agent inside the subworkflow that captures kwargs - inner_agent = _KwargsCapturingAgent(name="inner_agent") - - # Build the inner (sub) workflow with the agent - inner_workflow = SequentialBuilder().participants([inner_agent]).build() - - # Wrap the inner workflow in a WorkflowExecutor so it can be used as a subworkflow - subworkflow_executor = WorkflowExecutor(workflow=inner_workflow, id="subworkflow_executor") - - # Build the outer (parent) workflow containing the subworkflow - outer_workflow = SequentialBuilder().participants([subworkflow_executor]).build() - - # Define kwargs that should propagate to subworkflow - custom_data = {"api_key": "secret123", "endpoint": "https://api.example.com"} - user_token = {"user_name": "alice", "access_level": "admin"} - - # Run the outer workflow with kwargs - async for event in outer_workflow.run_stream( - "test message for subworkflow", - custom_data=custom_data, - user_token=user_token, - ): - if isinstance(event, WorkflowStatusEvent) and event.state == WorkflowRunState.IDLE: - break - - # Verify that the inner agent was called - assert len(inner_agent.captured_kwargs) >= 1, "Inner agent in subworkflow should have been invoked" - - received_kwargs = inner_agent.captured_kwargs[0] - - # Verify kwargs were propagated from parent workflow to subworkflow agent - assert "custom_data" in received_kwargs, ( - f"Subworkflow agent should receive 'custom_data' kwarg. Received keys: {list(received_kwargs.keys())}" - ) - assert "user_token" in received_kwargs, ( - f"Subworkflow agent should receive 'user_token' kwarg. Received keys: {list(received_kwargs.keys())}" - ) - assert received_kwargs.get("custom_data") == custom_data, ( - f"Expected custom_data={custom_data}, got {received_kwargs.get('custom_data')}" - ) - assert received_kwargs.get("user_token") == user_token, ( - f"Expected user_token={user_token}, got {received_kwargs.get('user_token')}" - ) - - -async def test_subworkflow_kwargs_accessible_via_shared_state() -> None: - """Test that kwargs are accessible via SharedState within subworkflow. - - Verifies that WORKFLOW_RUN_KWARGS_KEY is populated in the subworkflow's SharedState - with kwargs from the parent workflow. - """ - from agent_framework import Executor, WorkflowContext, handler - from agent_framework._workflows._workflow_executor import WorkflowExecutor - - captured_kwargs_from_state: list[dict[str, Any]] = [] - - class _SharedStateReader(Executor): - """Executor that reads kwargs from SharedState for verification.""" - - @handler - async def read_kwargs(self, msgs: list[ChatMessage], ctx: WorkflowContext[list[ChatMessage]]) -> None: - kwargs_from_state = await ctx.get_shared_state(WORKFLOW_RUN_KWARGS_KEY) - captured_kwargs_from_state.append(kwargs_from_state or {}) - await ctx.send_message(msgs) - - # Build inner workflow with SharedState reader - state_reader = _SharedStateReader(id="state_reader") - inner_workflow = SequentialBuilder().participants([state_reader]).build() - - # Wrap as subworkflow - subworkflow_executor = WorkflowExecutor(workflow=inner_workflow, id="subworkflow") - - # Build outer workflow - outer_workflow = SequentialBuilder().participants([subworkflow_executor]).build() - - # Run with kwargs - async for event in outer_workflow.run_stream( - "test", - my_custom_kwarg="should_be_propagated", - another_kwarg=42, - ): - if isinstance(event, WorkflowStatusEvent) and event.state == WorkflowRunState.IDLE: - break - - # Verify the state reader was invoked - assert len(captured_kwargs_from_state) >= 1, "SharedState reader should have been invoked" - - kwargs_in_subworkflow = captured_kwargs_from_state[0] - - assert kwargs_in_subworkflow.get("my_custom_kwarg") == "should_be_propagated", ( - f"Expected 'my_custom_kwarg' in subworkflow SharedState, got: {kwargs_in_subworkflow}" - ) - assert kwargs_in_subworkflow.get("another_kwarg") == 42, ( - f"Expected 'another_kwarg'=42 in subworkflow SharedState, got: {kwargs_in_subworkflow}" - ) - - -async def test_nested_subworkflow_kwargs_propagation() -> None: - """Test kwargs propagation through multiple levels of nested subworkflows. - - Verifies kwargs flow through 3 levels: - - Outer workflow - - Middle subworkflow (WorkflowExecutor) - - Inner subworkflow (WorkflowExecutor) with agent - """ - from agent_framework._workflows._workflow_executor import WorkflowExecutor - - # Innermost agent - inner_agent = _KwargsCapturingAgent(name="deeply_nested_agent") - - # Build inner workflow - inner_workflow = SequentialBuilder().participants([inner_agent]).build() - inner_executor = WorkflowExecutor(workflow=inner_workflow, id="inner_executor") - - # Build middle workflow containing inner - middle_workflow = SequentialBuilder().participants([inner_executor]).build() - middle_executor = WorkflowExecutor(workflow=middle_workflow, id="middle_executor") - - # Build outer workflow containing middle - outer_workflow = SequentialBuilder().participants([middle_executor]).build() - - # Run with kwargs - async for event in outer_workflow.run_stream( - "deeply nested test", - deep_kwarg="should_reach_inner", - ): - if isinstance(event, WorkflowStatusEvent) and event.state == WorkflowRunState.IDLE: - break - - # Verify inner agent was called - assert len(inner_agent.captured_kwargs) >= 1, "Deeply nested agent should be invoked" - - received = inner_agent.captured_kwargs[0] - assert received.get("deep_kwarg") == "should_reach_inner", ( - f"Deeply nested agent should receive 'deep_kwarg'. Got: {received}" - ) - - -# endregion diff --git a/python/packages/orchestrations/tests/test_concurrent.py b/python/packages/orchestrations/tests/test_concurrent.py index afee1cb21f..edc937a75e 100644 --- a/python/packages/orchestrations/tests/test_concurrent.py +++ b/python/packages/orchestrations/tests/test_concurrent.py @@ -8,7 +8,6 @@ AgentExecutorResponse, AgentResponse, ChatMessage, - ConcurrentBuilder, Executor, WorkflowContext, WorkflowOutputEvent, @@ -17,6 +16,7 @@ handler, ) from agent_framework._workflows._checkpoint import InMemoryCheckpointStorage +from agent_framework.orchestrations import ConcurrentBuilder from typing_extensions import Never diff --git a/python/packages/orchestrations/tests/test_group_chat.py b/python/packages/orchestrations/tests/test_group_chat.py index 57e7ac279c..2e6e2f0ce9 100644 --- a/python/packages/orchestrations/tests/test_group_chat.py +++ b/python/packages/orchestrations/tests/test_group_chat.py @@ -17,18 +17,20 @@ ChatResponse, ChatResponseUpdate, Content, + RequestInfoEvent, + WorkflowOutputEvent, + WorkflowRunState, + WorkflowStatusEvent, +) +from agent_framework._workflows._checkpoint import InMemoryCheckpointStorage +from agent_framework.orchestrations import ( GroupChatBuilder, GroupChatState, MagenticContext, MagenticManagerBase, MagenticProgressLedger, MagenticProgressLedgerItem, - RequestInfoEvent, - WorkflowOutputEvent, - WorkflowRunState, - WorkflowStatusEvent, ) -from agent_framework._workflows._checkpoint import InMemoryCheckpointStorage class StubAgent(BaseAgent): @@ -1185,7 +1187,7 @@ def orchestrator_factory() -> BaseGroupChatOrchestrator: nonlocal factory_call_count factory_call_count += 1 from agent_framework._workflows._base_group_chat_orchestrator import ParticipantRegistry - from agent_framework._workflows._group_chat import GroupChatOrchestrator + from agent_framework.orchestrations import GroupChatOrchestrator # Create a custom orchestrator; when returning BaseGroupChatOrchestrator, # the builder uses it as-is without modifying its participant registry diff --git a/python/packages/orchestrations/tests/test_handoff.py b/python/packages/orchestrations/tests/test_handoff.py index 93a373a872..d1fe70eff6 100644 --- a/python/packages/orchestrations/tests/test_handoff.py +++ b/python/packages/orchestrations/tests/test_handoff.py @@ -11,14 +11,13 @@ ChatResponse, ChatResponseUpdate, Content, - HandoffAgentUserRequest, - HandoffBuilder, RequestInfoEvent, WorkflowEvent, WorkflowOutputEvent, resolve_agent_id, use_function_invocation, ) +from agent_framework.orchestrations import HandoffAgentUserRequest, HandoffBuilder @use_function_invocation diff --git a/python/packages/orchestrations/tests/test_magentic.py b/python/packages/orchestrations/tests/test_magentic.py index 5f9be8ee4f..0d42660d6c 100644 --- a/python/packages/orchestrations/tests/test_magentic.py +++ b/python/packages/orchestrations/tests/test_magentic.py @@ -17,16 +17,7 @@ Content, Executor, GroupChatRequestMessage, - MagenticBuilder, - MagenticContext, - MagenticManagerBase, - MagenticOrchestrator, - MagenticOrchestratorEvent, - MagenticPlanReviewRequest, - MagenticProgressLedger, - MagenticProgressLedgerItem, RequestInfoEvent, - StandardMagenticManager, Workflow, WorkflowCheckpoint, WorkflowCheckpointException, @@ -38,6 +29,17 @@ handler, ) from agent_framework._workflows._checkpoint import InMemoryCheckpointStorage +from agent_framework.orchestrations import ( + MagenticBuilder, + MagenticContext, + MagenticManagerBase, + MagenticOrchestrator, + MagenticOrchestratorEvent, + MagenticPlanReviewRequest, + MagenticProgressLedger, + MagenticProgressLedgerItem, + StandardMagenticManager, +) if sys.version_info >= (3, 12): from typing import override # type: ignore # pragma: no cover @@ -1243,7 +1245,7 @@ def agent_factory() -> AgentProtocol: custom_final_prompt = "Custom final: {task}" # Create a custom task ledger - from agent_framework._workflows._magentic import _MagenticTaskLedger # type: ignore + from agent_framework_orchestrations._magentic import _MagenticTaskLedger # type: ignore custom_task_ledger = _MagenticTaskLedger( facts=ChatMessage("assistant", ["Custom facts"]), @@ -1279,7 +1281,7 @@ def agent_factory() -> AgentProtocol: manager = orchestrator._manager # type: ignore[reportPrivateUsage] # Verify the manager is a StandardMagenticManager with the expected options - from agent_framework import StandardMagenticManager + from agent_framework.orchestrations import StandardMagenticManager assert isinstance(manager, StandardMagenticManager) assert manager.task_ledger is custom_task_ledger diff --git a/python/packages/orchestrations/tests/test_sequential.py b/python/packages/orchestrations/tests/test_sequential.py index 389a33c7c0..b6441ff592 100644 --- a/python/packages/orchestrations/tests/test_sequential.py +++ b/python/packages/orchestrations/tests/test_sequential.py @@ -13,7 +13,6 @@ ChatMessage, Content, Executor, - SequentialBuilder, TypeCompatibilityError, WorkflowContext, WorkflowOutputEvent, @@ -22,6 +21,7 @@ handler, ) from agent_framework._workflows._checkpoint import InMemoryCheckpointStorage +from agent_framework.orchestrations import SequentialBuilder class _EchoAgent(BaseAgent): From 219826048f364dc3eca631b58b6a347965d210f9 Mon Sep 17 00:00:00 2001 From: Evan Mattson Date: Thu, 5 Feb 2026 08:34:44 +0900 Subject: [PATCH 3/4] Fix markdown links --- python/packages/core/README.md | 5 +---- python/samples/getting_started/orchestrations/README.md | 1 - 2 files changed, 1 insertion(+), 5 deletions(-) diff --git a/python/packages/core/README.md b/python/packages/core/README.md index 30ff1b7aa4..637b1e3655 100644 --- a/python/packages/core/README.md +++ b/python/packages/core/README.md @@ -213,10 +213,7 @@ if __name__ == "__main__": asyncio.run(main()) ``` -**Note**: GroupChat, Sequential, and Concurrent orchestrations are available today. See examples in: -- [python/samples/getting_started/workflows/orchestration/](https://github.com/microsoft/agent-framework/tree/main/python/samples/getting_started/workflows/orchestration) -- [group_chat_simple_selector.py](https://github.com/microsoft/agent-framework/blob/main/python/samples/getting_started/workflows/orchestration/group_chat_simple_selector.py) -- [group_chat_prompt_based_manager.py](https://github.com/microsoft/agent-framework/blob/main/python/samples/getting_started/workflows/orchestration/group_chat_prompt_based_manager.py) +**Note**: Sequential, Concurrent, Group Chat, Handoff, and Magentic orchestrations are available. See examples in [python/samples/getting_started/orchestrations/](https://github.com/microsoft/agent-framework/tree/main/python/samples/getting_started/orchestrations). ## More Examples & Samples diff --git a/python/samples/getting_started/orchestrations/README.md b/python/samples/getting_started/orchestrations/README.md index 886be7775b..d1fb0e0ef0 100644 --- a/python/samples/getting_started/orchestrations/README.md +++ b/python/samples/getting_started/orchestrations/README.md @@ -41,7 +41,6 @@ from agent_framework.orchestrations import ( | Handoff (Autonomous) | [handoff_autonomous.py](./handoff_autonomous.py) | Autonomous mode: specialists iterate independently until invoking a handoff tool using `.with_autonomous_mode()` | | Handoff (Participant Factory) | [handoff_participant_factory.py](./handoff_participant_factory.py) | Use participant factories for state isolation between workflow instances | | Handoff with Code Interpreter | [handoff_with_code_interpreter_file.py](./handoff_with_code_interpreter_file.py) | Retrieve file IDs from code interpreter output in handoff workflow | -| Handoff with Azure AI Agent | [handoff_participant_factory_azure_ai_agent.py](./handoff_participant_factory_azure_ai_agent.py) | Handoff workflow with tool approvals and checkpoint resume using AzureAIProjectAgentProvider | | Magentic Workflow (Multi-Agent) | [magentic.py](./magentic.py) | Orchestrate multiple agents with Magentic manager and streaming | | Magentic + Human Plan Review | [magentic_human_plan_review.py](./magentic_human_plan_review.py) | Human reviews/updates the plan before execution | | Magentic + Checkpoint Resume | [magentic_checkpoint.py](./magentic_checkpoint.py) | Resume Magentic orchestration from saved checkpoints | From 853c1f92f7727f607d4ad413b8967bce89a30ecb Mon Sep 17 00:00:00 2001 From: Evan Mattson Date: Thu, 5 Feb 2026 08:45:30 +0900 Subject: [PATCH 4/4] Fix links --- python/README.md | 2 +- python/packages/core/README.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/python/README.md b/python/README.md index 74d7052c12..80cb85e4f4 100644 --- a/python/README.md +++ b/python/README.md @@ -233,7 +233,7 @@ if __name__ == "__main__": asyncio.run(main()) ``` -For more advanced orchestration patterns including Sequential, GroupChat, Concurrent, Magentic, and Handoff orchestrations, see the [orchestration samples](samples/getting_started/workflows/orchestration). +For more advanced orchestration patterns including Sequential, Concurrent, Group Chat, Handoff, and Magentic orchestrations, see the [orchestration samples](samples/getting_started/orchestrations). ## More Examples & Samples diff --git a/python/packages/core/README.md b/python/packages/core/README.md index 637b1e3655..a56badd777 100644 --- a/python/packages/core/README.md +++ b/python/packages/core/README.md @@ -213,7 +213,7 @@ if __name__ == "__main__": asyncio.run(main()) ``` -**Note**: Sequential, Concurrent, Group Chat, Handoff, and Magentic orchestrations are available. See examples in [python/samples/getting_started/orchestrations/](https://github.com/microsoft/agent-framework/tree/main/python/samples/getting_started/orchestrations). +**Note**: Sequential, Concurrent, Group Chat, Handoff, and Magentic orchestrations are available. See examples in [orchestration samples](../../samples/getting_started/orchestrations). ## More Examples & Samples