From 4fbf2cef1a3cd1db30dcf7a71347db06b6433dca Mon Sep 17 00:00:00 2001 From: Evan Mattson Date: Thu, 5 Feb 2026 09:06:05 +0900 Subject: [PATCH 01/12] Refactor events --- .../agent_framework/_workflows/__init__.py | 26 +- .../core/agent_framework/_workflows/_agent.py | 111 ++-- .../_workflows/_agent_executor.py | 9 +- .../_workflows/_checkpoint_summary.py | 6 +- .../agent_framework/_workflows/_events.py | 526 +++++++++--------- .../agent_framework/_workflows/_executor.py | 10 +- .../agent_framework/_workflows/_magentic.py | 13 +- .../agent_framework/_workflows/_runner.py | 6 +- .../_workflows/_runner_context.py | 36 +- .../agent_framework/_workflows/_workflow.py | 59 +- .../_workflows/_workflow_context.py | 31 +- .../_workflows/_workflow_executor.py | 30 +- .../test_agent_executor_tool_calls.py | 14 +- .../workflow/test_agent_run_event_typing.py | 31 +- .../workflow/test_checkpoint_validation.py | 2 +- .../core/tests/workflow/test_executor.py | 41 +- .../core/tests/workflow/test_group_chat.py | 32 +- .../core/tests/workflow/test_magentic.py | 36 +- .../test_request_info_and_response.py | 32 +- .../test_request_info_event_rehydrate.py | 17 +- .../core/tests/workflow/test_runner.py | 4 +- .../core/tests/workflow/test_typing_utils.py | 13 +- .../core/tests/workflow/test_workflow.py | 58 +- .../tests/workflow/test_workflow_agent.py | 16 +- .../tests/workflow/test_workflow_kwargs.py | 32 +- .../tests/workflow/test_workflow_states.py | 43 +- .../devui/agent_framework_devui/_executor.py | 7 +- .../devui/agent_framework_devui/_mapper.py | 213 ++++--- .../packages/devui/tests/test_checkpoints.py | 4 +- python/packages/devui/tests/test_helpers.py | 26 +- python/packages/devui/tests/test_mapper.py | 134 +++-- .../lab/lightning/tests/test_lightning.py | 8 +- .../01_round_robin_group_chat.py | 13 +- .../orchestrations/02_selector_group_chat.py | 4 +- .../orchestrations/03_swarm.py | 21 +- .../orchestrations/04_magentic_one.py | 11 +- .../workflow_evaluation/create_workflow.py | 8 +- .../observability/workflow_observability.py | 6 +- .../_start-here/step2_agents_in_a_workflow.py | 4 +- .../workflows/_start-here/step3_streaming.py | 20 +- .../_start-here/step4_using_factories.py | 5 +- .../agents/azure_ai_agents_streaming.py | 6 +- .../azure_chat_agents_function_bridge.py | 9 +- .../agents/azure_chat_agents_streaming.py | 6 +- ...re_chat_agents_tool_calls_with_feedback.py | 15 +- .../workflow_as_agent_reflection_pattern.py | 9 +- .../checkpoint_with_human_in_the_loop.py | 15 +- .../checkpoint/checkpoint_with_resume.py | 5 +- ...ff_with_tool_approval_checkpoint_resume.py | 35 +- .../checkpoint/sub_workflow_checkpoint.py | 19 +- .../composition/sub_workflow_kwargs.py | 5 +- .../sub_workflow_request_interception.py | 5 +- .../multi_selection_edge_group.py | 4 +- .../control-flow/sequential_executors.py | 5 +- .../control-flow/sequential_streaming.py | 4 +- .../declarative/customer_support/main.py | 6 +- .../declarative/deep_research/main.py | 4 +- .../declarative/function_tools/main.py | 7 +- .../declarative/human_in_loop/main.py | 6 +- .../workflows/declarative/marketing/main.py | 4 +- .../declarative/student_teacher/main.py | 4 +- .../concurrent_request_info.py | 13 +- .../group_chat_request_info.py | 15 +- .../guessing_game_with_human_input.py | 21 +- .../sequential_request_info.py | 15 +- .../observability/executor_io_observation.py | 5 +- .../orchestration/concurrent_agents.py | 2 +- .../orchestration/group_chat_agent_manager.py | 11 +- .../group_chat_philosophical_debate.py | 9 +- .../group_chat_simple_selector.py | 11 +- .../orchestration/handoff_autonomous.py | 7 +- .../handoff_participant_factory.py | 30 +- .../workflows/orchestration/handoff_simple.py | 30 +- .../handoff_with_code_interpreter_file.py | 21 +- .../workflows/orchestration/magentic.py | 11 +- .../orchestration/magentic_checkpoint.py | 25 +- .../magentic_human_plan_review.py | 18 +- .../orchestration/sequential_agents.py | 4 +- .../aggregate_results_of_different_types.py | 4 +- .../parallelism/fan_out_fan_in_edges.py | 7 +- .../map_reduce_and_visualization.py | 5 +- .../state-management/workflow_kwargs.py | 4 +- .../concurrent_builder_tool_approval.py | 21 +- .../group_chat_builder_tool_approval.py | 12 +- .../sequential_builder_tool_approval.py | 15 +- .../orchestrations/concurrent_basic.py | 4 +- .../orchestrations/group_chat.py | 4 +- .../orchestrations/handoff.py | 12 +- .../orchestrations/magentic.py | 4 +- .../orchestrations/sequential.py | 4 +- .../processes/fan_out_fan_in_process.py | 4 +- .../processes/nested_process.py | 4 +- 92 files changed, 1098 insertions(+), 1145 deletions(-) diff --git a/python/packages/core/agent_framework/_workflows/__init__.py b/python/packages/core/agent_framework/_workflows/__init__.py index 70706ff827..bec27a1ca9 100644 --- a/python/packages/core/agent_framework/_workflows/__init__.py +++ b/python/packages/core/agent_framework/_workflows/__init__.py @@ -38,24 +38,13 @@ ) from ._edge_runner import create_edge_runner from ._events import ( - AgentRunEvent, - AgentRunUpdateEvent, - ExecutorCompletedEvent, ExecutorEvent, - ExecutorFailedEvent, - ExecutorInvokedEvent, - RequestInfoEvent, - SuperStepCompletedEvent, - SuperStepStartedEvent, WorkflowErrorDetails, WorkflowEvent, WorkflowEventSource, - WorkflowFailedEvent, + WorkflowEventType, WorkflowLifecycleEvent, - WorkflowOutputEvent, WorkflowRunState, - WorkflowStartedEvent, - WorkflowStatusEvent, ) from ._exceptions import ( WorkflowCheckpointException, @@ -132,8 +121,6 @@ "AgentExecutorRequest", "AgentExecutorResponse", "AgentRequestInfoResponse", - "AgentRunEvent", - "AgentRunUpdateEvent", "BaseGroupChatOrchestrator", "Case", "CheckpointStorage", @@ -143,10 +130,7 @@ "EdgeCondition", "EdgeDuplicationError", "Executor", - "ExecutorCompletedEvent", "ExecutorEvent", - "ExecutorFailedEvent", - "ExecutorInvokedEvent", "FanInEdgeGroup", "FanOutEdgeGroup", "FileCheckpointStorage", @@ -175,7 +159,6 @@ "MagenticResetSignal", "Message", "OrchestrationState", - "RequestInfoEvent", "Runner", "RunnerContext", "SequentialBuilder", @@ -184,8 +167,6 @@ "StandardMagenticManager", "SubWorkflowRequestMessage", "SubWorkflowResponseMessage", - "SuperStepCompletedEvent", - "SuperStepStartedEvent", "SwitchCaseEdgeGroup", "SwitchCaseEdgeGroupCase", "SwitchCaseEdgeGroupDefault", @@ -202,16 +183,13 @@ "WorkflowErrorDetails", "WorkflowEvent", "WorkflowEventSource", + "WorkflowEventType", "WorkflowException", "WorkflowExecutor", - "WorkflowFailedEvent", "WorkflowLifecycleEvent", - "WorkflowOutputEvent", "WorkflowRunResult", "WorkflowRunState", "WorkflowRunnerException", - "WorkflowStartedEvent", - "WorkflowStatusEvent", "WorkflowValidationError", "WorkflowViz", "create_edge_runner", diff --git a/python/packages/core/agent_framework/_workflows/_agent.py b/python/packages/core/agent_framework/_workflows/_agent.py index 1543ed7db6..63d5f1cf7b 100644 --- a/python/packages/core/agent_framework/_workflows/_agent.py +++ b/python/packages/core/agent_framework/_workflows/_agent.py @@ -25,10 +25,7 @@ from ._agent_executor import AgentExecutor from ._checkpoint import CheckpointStorage from ._events import ( - AgentRunUpdateEvent, - RequestInfoEvent, WorkflowEvent, - WorkflowOutputEvent, ) from ._message_utils import normalize_messages_input from ._typing_utils import is_type_compatible @@ -109,14 +106,14 @@ def __init__( super().__init__(id=id, name=name, description=description, **kwargs) self._workflow: "Workflow" = workflow - self._pending_requests: dict[str, RequestInfoEvent] = {} + self._pending_requests: dict[str, WorkflowEvent[Any]] = {} @property def workflow(self) -> "Workflow": return self._workflow @property - def pending_requests(self) -> dict[str, RequestInfoEvent]: + def pending_requests(self) -> "dict[str, WorkflowEvent[Any]]": return self._pending_requests async def run( @@ -289,71 +286,76 @@ async def _run_stream_impl( def _convert_workflow_event_to_agent_update( self, response_id: str, - event: WorkflowEvent, + event: "WorkflowEvent[Any]", ) -> AgentResponseUpdate | None: """Convert a workflow event to an AgentResponseUpdate. - AgentRunUpdateEvent, RequestInfoEvent, and WorkflowOutputEvent are processed. + Events with type='data', type='request_info', and type='output' are processed. Other workflow events are ignored as they are workflow-internal. - For AgentRunUpdateEvent from AgentExecutor instances, only events from executors + For 'data' events (AgentResponseUpdate) from AgentExecutor instances, only events from executors with output_response=True are converted to agent updates. This prevents agent responses from executors that were not explicitly marked to surface their output. - Non-AgentExecutor executors that emit AgentRunUpdateEvent directly are allowed + Non-AgentExecutor executors that emit 'data' events directly are allowed through since they explicitly chose to emit the event. """ - match event: - case AgentRunUpdateEvent(data=update, executor_id=executor_id): - # For AgentExecutor instances, only pass through if output_response=True. - # Non-AgentExecutor executors that emit AgentRunUpdateEvent are allowed through. - executor = self.workflow.executors.get(executor_id) - if isinstance(executor, AgentExecutor) and not executor.output_response: - return None - if update: - # Enrich with executor identity if author_name is not already set - if not update.author_name: - update.author_name = executor_id - return update + if event.type == "data" and isinstance(event.data, AgentResponseUpdate): + # For AgentExecutor instances, only pass through if output_response=True. + # Non-AgentExecutor executors that emit 'data' events are allowed through. + executor_id = event.executor_id + executor = self.workflow.executors.get(executor_id) if executor_id else None + if isinstance(executor, AgentExecutor) and not executor.output_response: return None - - case WorkflowOutputEvent(data=data, executor_id=executor_id): - # Convert workflow output to an agent response update. - # Handle different data types appropriately. - - # Skip AgentResponse from AgentExecutor with output_response=True - # since streaming events already surfaced the content. - if isinstance(data, AgentResponse): - executor = self.workflow.executors.get(executor_id) - if isinstance(executor, AgentExecutor) and executor.output_response: - return None - - if isinstance(data, AgentResponseUpdate): - return data - if isinstance(data, ChatMessage): - return AgentResponseUpdate( - contents=list(data.contents), - role=data.role, - author_name=data.author_name or executor_id, - response_id=response_id, - message_id=str(uuid.uuid4()), - created_at=datetime.now(tz=timezone.utc).strftime("%Y-%m-%dT%H:%M:%S.%fZ"), - raw_representation=data, - ) - contents = self._extract_contents(data) - if not contents: + update = event.data + if update: + # Enrich with executor identity if author_name is not already set + if not update.author_name: + update.author_name = executor_id + return update + return None + + if event.type == "output": + # Convert workflow output to an agent response update. + # Handle different data types appropriately. + data = event.data + executor_id = event.executor_id + + # Skip AgentResponse from AgentExecutor with output_response=True + # since streaming events already surfaced the content. + if isinstance(data, AgentResponse): + executor = self.workflow.executors.get(executor_id) if executor_id else None + if isinstance(executor, AgentExecutor) and executor.output_response: return None + + if isinstance(data, AgentResponseUpdate): + return data + if isinstance(data, ChatMessage): return AgentResponseUpdate( - contents=contents, - role=Role.ASSISTANT, - author_name=executor_id, + contents=list(data.contents), + role=data.role, + author_name=data.author_name or executor_id, response_id=response_id, message_id=str(uuid.uuid4()), created_at=datetime.now(tz=timezone.utc).strftime("%Y-%m-%dT%H:%M:%S.%fZ"), raw_representation=data, ) + contents = self._extract_contents(data) + if not contents: + return None + return AgentResponseUpdate( + contents=contents, + role=Role.ASSISTANT, + author_name=executor_id, + response_id=response_id, + message_id=str(uuid.uuid4()), + created_at=datetime.now(tz=timezone.utc).strftime("%Y-%m-%dT%H:%M:%S.%fZ"), + raw_representation=data, + ) - case RequestInfoEvent(request_id=request_id): - # Store the pending request for later correlation + if event.type == "request_info": + # Store the pending request for later correlation + request_id = event.request_id + if request_id: self.pending_requests[request_id] = event args = self.RequestInfoFunctionArgs(request_id=request_id, data=event.data).to_dict() @@ -376,9 +378,8 @@ def _convert_workflow_event_to_agent_update( message_id=str(uuid.uuid4()), created_at=datetime.now(tz=timezone.utc).strftime("%Y-%m-%dT%H:%M:%S.%fZ"), ) - case _: - # Ignore workflow-internal events - pass + + # Ignore workflow-internal events return None def _extract_function_responses(self, input_messages: list[ChatMessage]) -> dict[str, Any]: diff --git a/python/packages/core/agent_framework/_workflows/_agent_executor.py b/python/packages/core/agent_framework/_workflows/_agent_executor.py index 80bd4aba43..a801b619b5 100644 --- a/python/packages/core/agent_framework/_workflows/_agent_executor.py +++ b/python/packages/core/agent_framework/_workflows/_agent_executor.py @@ -15,10 +15,7 @@ from ._checkpoint_encoding import decode_checkpoint_value, encode_checkpoint_value from ._const import WORKFLOW_RUN_KWARGS_KEY from ._conversation_state import encode_chat_messages -from ._events import ( - AgentRunEvent, - AgentRunUpdateEvent, -) +from ._events import WorkflowEvent from ._executor import Executor, handler from ._message_utils import normalize_messages_input from ._request_info_mixin import response_handler @@ -340,7 +337,7 @@ async def _run_agent(self, ctx: WorkflowContext) -> AgentResponse | None: thread=self._agent_thread, **run_kwargs, ) - await ctx.add_event(AgentRunEvent(self.id, response)) + await ctx.add_event(WorkflowEvent.emit(self.id, response)) # Handle any user input requests if response.user_input_requests: @@ -370,7 +367,7 @@ async def _run_agent_streaming(self, ctx: WorkflowContext) -> AgentResponse | No **run_kwargs, ): updates.append(update) - await ctx.add_event(AgentRunUpdateEvent(self.id, update)) + await ctx.add_event(WorkflowEvent.emit(self.id, update)) if update.user_input_requests: user_input_requests.extend(update.user_input_requests) diff --git a/python/packages/core/agent_framework/_workflows/_checkpoint_summary.py b/python/packages/core/agent_framework/_workflows/_checkpoint_summary.py index ebcf2ff83b..78e2fa54cf 100644 --- a/python/packages/core/agent_framework/_workflows/_checkpoint_summary.py +++ b/python/packages/core/agent_framework/_workflows/_checkpoint_summary.py @@ -5,7 +5,7 @@ from ._checkpoint import WorkflowCheckpoint from ._const import EXECUTOR_STATE_KEY -from ._events import RequestInfoEvent +from ._events import WorkflowEvent logger = logging.getLogger(__name__) @@ -20,14 +20,14 @@ class WorkflowCheckpointSummary: targets: list[str] executor_ids: list[str] status: str - pending_request_info_events: list[RequestInfoEvent] + pending_request_info_events: list[WorkflowEvent] def get_checkpoint_summary(checkpoint: WorkflowCheckpoint) -> WorkflowCheckpointSummary: targets = sorted(checkpoint.messages.keys()) executor_ids = sorted(checkpoint.shared_state.get(EXECUTOR_STATE_KEY, {}).keys()) pending_request_info_events = [ - RequestInfoEvent.from_dict(request) for request in checkpoint.pending_request_info_events.values() + WorkflowEvent.from_dict(request) for request in checkpoint.pending_request_info_events.values() ] status = "idle" diff --git a/python/packages/core/agent_framework/_workflows/_events.py b/python/packages/core/agent_framework/_workflows/_events.py index dcd6ab5866..4f5155a2ae 100644 --- a/python/packages/core/agent_framework/_workflows/_events.py +++ b/python/packages/core/agent_framework/_workflows/_events.py @@ -1,18 +1,25 @@ # Copyright (c) Microsoft. All rights reserved. +import sys import traceback as _traceback from collections.abc import Iterator from contextlib import contextmanager from contextvars import ContextVar from dataclasses import dataclass from enum import Enum -from typing import Any, TypeAlias +from typing import Any, Generic, Literal, TypeAlias -from agent_framework import AgentResponse, AgentResponseUpdate +if sys.version_info >= (3, 13): + from typing import TypeVar # type: ignore # pragma: no cover +else: + from typing_extensions import TypeVar # type: ignore[import] # pragma: no cover from ._checkpoint_encoding import decode_checkpoint_value, encode_checkpoint_value from ._typing_utils import deserialize_type, serialize_type +# TypeVar with default parameter (Python 3.13+ feature, backported via typing_extensions) +DataT = TypeVar("DataT", default=Any) + class WorkflowEventSource(str, Enum): """Identifies whether a workflow event came from the framework or an executor. @@ -46,114 +53,16 @@ def _framework_event_origin() -> Iterator[None]: # pyright: ignore[reportUnused _event_origin_context.reset(token) -class WorkflowEvent: - """Base class for workflow events.""" - - def __init__(self, data: Any | None = None): - """Initialize the workflow event with optional data.""" - self.data = data - self.origin = _current_event_origin() - - def __repr__(self) -> str: - """Return a string representation of the workflow event.""" - data_repr = self.data if self.data is not None else "None" - return f"{self.__class__.__name__}(origin={self.origin}, data={data_repr})" - - -class WorkflowStartedEvent(WorkflowEvent): - """Built-in lifecycle event emitted when a workflow run begins.""" - - ... - - -class WorkflowWarningEvent(WorkflowEvent): - """Executor-origin event signaling a warning surfaced by user code.""" - - def __init__(self, data: str): - """Initialize the workflow warning event with optional data and warning message.""" - super().__init__(data) - - def __repr__(self) -> str: - """Return a string representation of the workflow warning event.""" - return f"{self.__class__.__name__}(message={self.data}, origin={self.origin})" - - -class WorkflowErrorEvent(WorkflowEvent): - """Executor-origin event signaling an error surfaced by user code.""" - - def __init__(self, data: Exception): - """Initialize the workflow error event with optional data and error message.""" - super().__init__(data) - - def __repr__(self) -> str: - """Return a string representation of the workflow error event.""" - return f"{self.__class__.__name__}(exception={self.data}, origin={self.origin})" - - class WorkflowRunState(str, Enum): - """Run-level state of a workflow execution. - - Semantics: - - STARTED: Run has been initiated and the workflow context has been created. - This is an initial state before any meaningful work is performed. In this - codebase we emit a dedicated `WorkflowStartedEvent` for telemetry, and - typically advance the status directly to `IN_PROGRESS`. Consumers may - still rely on `STARTED` for state machines that need an explicit pre-work - phase. - - - IN_PROGRESS: The workflow is actively executing (e.g., the initial - message has been delivered to the start executor or a superstep is - running). This status is emitted at the beginning of a run and can be - followed by other statuses as the run progresses. - - - IN_PROGRESS_PENDING_REQUESTS: Active execution while one or more - request-for-information operations are outstanding. New work may still - be scheduled while requests are in flight. - - - IDLE: The workflow is quiescent with no outstanding requests and no more - work to do. This is the normal terminal state for workflows that have - finished executing, potentially having produced outputs along the way. - - - IDLE_WITH_PENDING_REQUESTS: The workflow is paused awaiting external - input (e.g., emitted a `RequestInfoEvent`). This is a non-terminal - state; the workflow can resume when responses are supplied. - - - FAILED: Terminal state indicating an error surfaced. Accompanied by a - `WorkflowFailedEvent` with structured error details. - - - CANCELLED: Terminal state indicating the run was cancelled by a caller - or orchestrator. Not currently emitted by default runner paths but - included for integrators/orchestrators that support cancellation. - """ - - STARTED = "STARTED" # Explicit pre-work phase (rarely emitted as status; see note above) - IN_PROGRESS = "IN_PROGRESS" # Active execution is underway - IN_PROGRESS_PENDING_REQUESTS = "IN_PROGRESS_PENDING_REQUESTS" # Active execution with outstanding requests - IDLE = "IDLE" # No active work and no outstanding requests - IDLE_WITH_PENDING_REQUESTS = "IDLE_WITH_PENDING_REQUESTS" # Paused awaiting external responses - FAILED = "FAILED" # Finished with an error - CANCELLED = "CANCELLED" # Finished due to cancellation - - -class WorkflowStatusEvent(WorkflowEvent): - """Built-in lifecycle event emitted for workflow run state transitions.""" - - def __init__( - self, - state: WorkflowRunState, - data: Any | None = None, - ): - """Initialize the workflow status event with a new state and optional data. - - Args: - state: The new state of the workflow run. - data: Optional additional data associated with the state change. - """ - super().__init__(data) - self.state = state + """Run-level state of a workflow execution.""" - def __repr__(self) -> str: # pragma: no cover - representation only - return f"{self.__class__.__name__}(state={self.state}, data={self.data!r}, origin={self.origin})" + STARTED = "STARTED" + IN_PROGRESS = "IN_PROGRESS" + IN_PROGRESS_PENDING_REQUESTS = "IN_PROGRESS_PENDING_REQUESTS" + IDLE = "IDLE" + IDLE_WITH_PENDING_REQUESTS = "IDLE_WITH_PENDING_REQUESTS" + FAILED = "FAILED" + CANCELLED = "CANCELLED" @dataclass @@ -188,208 +97,269 @@ def from_exception( ) -class WorkflowFailedEvent(WorkflowEvent): - """Built-in lifecycle event emitted when a workflow run terminates with an error.""" - - def __init__( - self, - details: WorkflowErrorDetails, - data: Any | None = None, - ): - super().__init__(data) - self.details = details - - def __repr__(self) -> str: # pragma: no cover - representation only - return f"{self.__class__.__name__}(details={self.details}, data={self.data!r}, origin={self.origin})" - +# Type discriminator for workflow events (like ContentType in _types.py) +WorkflowEventType = Literal[ + # Lifecycle events (workflow-level) + "started", # Workflow run began + "status", # Workflow state changed (use .state) + "failed", # Workflow terminated with error (use .details) + # Data events + "output", # Executor yielded final output (use .executor_id, .data) + "data", # Executor emitted data during execution (use .executor_id, .data) + # Request events (human-in-the-loop) + "request_info", # Executor requests external info (use .request_id, .source_executor_id) + # Diagnostic events (warnings/errors from user code) + "warning", # Warning from user code (use .data as str) + "error", # Error from user code, non-fatal (use .data as Exception) + # Iteration events (supersteps) + "superstep_started", # Superstep began (use .iteration) + "superstep_completed", # Superstep ended (use .iteration) + # Executor lifecycle events + "executor_invoked", # Executor handler was called (use .executor_id, .data) + "executor_completed", # Executor handler completed (use .executor_id, .data) + "executor_failed", # Executor handler raised error (use .executor_id, .details) + # Extension point + "custom", # User-defined event (for subclassing) +] + + +class WorkflowEvent(Generic[DataT]): + """Unified event for all workflow emissions. + + This single generic class handles all workflow events through a `type` discriminator, + following the same pattern as the `Content` class. + + Use factory methods for convenient construction: + + - `WorkflowEvent.started()` - workflow run began + - `WorkflowEvent.status(state)` - workflow state changed + - `WorkflowEvent.failed(details)` - workflow terminated with error + - `WorkflowEvent.warning(message)` - warning from user code + - `WorkflowEvent.error(exception)` - error from user code + - `WorkflowEvent.output(executor_id, data)` - executor yielded final output + - `WorkflowEvent.data(executor_id, data)` - executor emitted data (e.g., AgentResponse) + - `WorkflowEvent.request_info(...)` - executor requests external info + - `WorkflowEvent.superstep_started(iteration)` - superstep began + - `WorkflowEvent.superstep_completed(iteration)` - superstep ended + - `WorkflowEvent.executor_invoked(executor_id)` - executor handler called + - `WorkflowEvent.executor_completed(executor_id)` - executor handler completed + - `WorkflowEvent.executor_failed(executor_id, details)` - executor handler failed + + The generic parameter DataT represents the type of the event's data payload: + - Lifecycle events: `WorkflowEvent[None]` (data is None) + - Data events: `WorkflowEvent[DataT]` where DataT is the payload type (e.g., AgentResponse) + + Examples: + .. code-block:: python + + # Create events via factory methods + started = WorkflowEvent.started() + status = WorkflowEvent.status(WorkflowRunState.IN_PROGRESS) + output = WorkflowEvent.output("agent1", result_data) + + # Emit typed data from executor + event: WorkflowEvent[AgentResponse] = WorkflowEvent.data("agent1", response) + data: AgentResponse = event.data # Type-safe access + + # Check event type + if event.type == "status": + print(f"State: {event.state}") + elif event.type == "output": + print(f"Output from {event.executor_id}: {event.data}") + elif event.type == "data": + if isinstance(event.data, AgentResponse): + print(f"Agent response: {event.data.text}") + """ -class RequestInfoEvent(WorkflowEvent): - """Event triggered when a workflow executor requests external information.""" + data: DataT def __init__( self, - request_id: str, - source_executor_id: str, - request_data: Any, - response_type: type[Any], - ): - """Initialize the request info event. - - Args: - request_id: Unique identifier for the request. - source_executor_id: ID of the executor that made the request. - request_data: The data associated with the request. - response_type: Expected type of the response. + type: WorkflowEventType, + data: DataT | None = None, + *, + # Event context fields + origin: WorkflowEventSource | None = None, + # STATUS event fields + state: WorkflowRunState | None = None, + # FAILED event fields + details: WorkflowErrorDetails | None = None, + # OUTPUT/DATA event fields + executor_id: str | None = None, + # REQUEST_INFO event fields + request_id: str | None = None, + source_executor_id: str | None = None, + request_type: type[Any] | None = None, + response_type: type[Any] | None = None, + # SUPERSTEP event fields + iteration: int | None = None, + ) -> None: + """Initialize the workflow event. + + Prefer using factory methods like `WorkflowEvent.started()` instead of __init__ directly. """ - super().__init__(request_data) + self.type = type + self.data = data # type: ignore[assignment] + self.origin = origin if origin is not None else _current_event_origin() + + # Event-specific fields + self.state = state + self.details = details + self.executor_id = executor_id self.request_id = request_id self.source_executor_id = source_executor_id - self.request_type: type[Any] = type(request_data) + self.request_type = request_type self.response_type = response_type - - def __repr__(self) -> str: - """Return a string representation of the request info event.""" - return ( - f"{self.__class__.__name__}(" - f"request_id={self.request_id}, " - f"source_executor_id={self.source_executor_id}, " - f"request_type={self.request_type.__name__}, " - f"data={self.data}, " - f"response_type={self.response_type.__name__})" - ) - - def to_dict(self) -> dict[str, Any]: - """Convert the request info event to a dictionary for serialization.""" - return { - "data": encode_checkpoint_value(self.data), - "request_id": self.request_id, - "source_executor_id": self.source_executor_id, - "request_type": serialize_type(self.request_type), - "response_type": serialize_type(self.response_type), - } - - @staticmethod - def from_dict(data: dict[str, Any]) -> "RequestInfoEvent": - """Create a RequestInfoEvent from a dictionary.""" - # Validation - for property in ["data", "request_id", "source_executor_id", "request_type", "response_type"]: - if property not in data: - raise KeyError(f"Missing '{property}' field in RequestInfoEvent dictionary.") - - request_info_event = RequestInfoEvent( - request_id=data["request_id"], - source_executor_id=data["source_executor_id"], - request_data=decode_checkpoint_value(data["data"]), - response_type=deserialize_type(data["response_type"]), - ) - - # Verify that the deserialized request_data matches the declared request_type - if deserialize_type(data["request_type"]) is not type(request_info_event.data): - raise TypeError( - "Mismatch between deserialized request_data type and request_type field in RequestInfoEvent dictionary." - ) - - return request_info_event - - -class WorkflowOutputEvent(WorkflowEvent): - """Event triggered when a workflow executor yields output.""" - - def __init__( - self, - data: Any, - executor_id: str, - ): - """Initialize the workflow output event. - - Args: - data: The output yielded by the executor. - executor_id: ID of the executor that yielded the output. - """ - super().__init__(data) - self.executor_id = executor_id - - def __repr__(self) -> str: - """Return a string representation of the workflow output event.""" - return f"{self.__class__.__name__}(data={self.data}, executor_id={self.executor_id})" - - -class SuperStepEvent(WorkflowEvent): - """Event triggered when a superstep starts or ends.""" - - def __init__(self, iteration: int, data: Any | None = None): - """Initialize the superstep event. - - Args: - iteration: The number of the superstep (1-based index). - data: Optional data associated with the superstep event. - """ - super().__init__(data) self.iteration = iteration def __repr__(self) -> str: - """Return a string representation of the superstep event.""" - return f"{self.__class__.__name__}(iteration={self.iteration}, data={self.data})" - - -class SuperStepStartedEvent(SuperStepEvent): - """Event triggered when a superstep starts.""" - - ... - - -class SuperStepCompletedEvent(SuperStepEvent): - """Event triggered when a superstep ends.""" - - ... - - -class ExecutorEvent(WorkflowEvent): - """Base class for executor events.""" + """Return a string representation of the workflow event.""" + parts = [f"type={self.type!r}"] + if self.state is not None: + parts.append(f"state={self.state.value}") + if self.executor_id is not None: + parts.append(f"executor_id={self.executor_id!r}") + if self.iteration is not None: + parts.append(f"iteration={self.iteration}") + if self.request_id is not None: + parts.append(f"request_id={self.request_id!r}") + if self.data is not None: + parts.append(f"data={self.data!r}") + return f"WorkflowEvent({', '.join(parts)})" + + # ========================================================================== + # Factory methods + # ========================================================================== - def __init__(self, executor_id: str, data: Any | None = None): - """Initialize the executor event with an executor ID and optional data.""" - super().__init__(data) - self.executor_id = executor_id + @classmethod + def started(cls, data: DataT | None = None) -> "WorkflowEvent[DataT]": + """Create a 'started' event when a workflow run begins.""" + return cls("started", data=data) - def __repr__(self) -> str: - """Return a string representation of the executor event.""" - return f"{self.__class__.__name__}(executor_id={self.executor_id}, data={self.data})" + @classmethod + def status(cls, state: WorkflowRunState, data: DataT | None = None) -> "WorkflowEvent[DataT]": + """Create a 'status' event for workflow state transitions.""" + return cls("status", data=data, state=state) + @classmethod + def failed(cls, details: WorkflowErrorDetails, data: DataT | None = None) -> "WorkflowEvent[DataT]": + """Create a 'failed' event when a workflow terminates with error.""" + return cls("failed", data=data, details=details) -class ExecutorInvokedEvent(ExecutorEvent): - """Event triggered when an executor handler is invoked.""" + @classmethod + def warning(cls, message: str) -> "WorkflowEvent[str]": + """Create a 'warning' event from user code.""" + return WorkflowEvent("warning", data=message) - ... + @classmethod + def error(cls, exception: Exception) -> "WorkflowEvent[Exception]": + """Create an 'error' event from user code.""" + return WorkflowEvent("error", data=exception) + @classmethod + def output(cls, executor_id: str, data: DataT) -> "WorkflowEvent[DataT]": + """Create an 'output' event when an executor yields final output.""" + return cls("output", executor_id=executor_id, data=data) -class ExecutorCompletedEvent(ExecutorEvent): - """Event triggered when an executor handler is completed.""" + @classmethod + def emit(cls, executor_id: str, data: DataT) -> "WorkflowEvent[DataT]": + """Create a 'data' event when an executor emits data during execution. - ... + This is the primary method for executors to emit typed data + (e.g., AgentResponse, AgentResponseUpdate, custom data). + """ + return cls("data", executor_id=executor_id, data=data) + @classmethod + def request_info( + cls, + request_id: str, + source_executor_id: str, + request_data: DataT, + response_type: type[Any], + ) -> "WorkflowEvent[DataT]": + """Create a 'request_info' event when an executor requests external information.""" + return cls( + "request_info", + data=request_data, + request_id=request_id, + source_executor_id=source_executor_id, + request_type=type(request_data), + response_type=response_type, + ) -class ExecutorFailedEvent(ExecutorEvent): - """Event triggered when an executor handler raises an error.""" + @classmethod + def superstep_started(cls, iteration: int, data: DataT | None = None) -> "WorkflowEvent[DataT]": + """Create a 'superstep_started' event when a superstep begins.""" + return cls("superstep_started", iteration=iteration, data=data) - def __init__( - self, - executor_id: str, - details: WorkflowErrorDetails, - ): - super().__init__(executor_id, details) - self.details = details + @classmethod + def superstep_completed(cls, iteration: int, data: DataT | None = None) -> "WorkflowEvent[DataT]": + """Create a 'superstep_completed' event when a superstep ends.""" + return cls("superstep_completed", iteration=iteration, data=data) - def __repr__(self) -> str: # pragma: no cover - representation only - return f"{self.__class__.__name__}(executor_id={self.executor_id}, details={self.details})" + @classmethod + def executor_invoked(cls, executor_id: str, data: DataT | None = None) -> "WorkflowEvent[DataT]": + """Create an 'executor_invoked' event when an executor handler is called.""" + return cls("executor_invoked", executor_id=executor_id, data=data) + @classmethod + def executor_completed(cls, executor_id: str, data: DataT | None = None) -> "WorkflowEvent[DataT]": + """Create an 'executor_completed' event when an executor handler completes.""" + return cls("executor_completed", executor_id=executor_id, data=data) -class AgentRunUpdateEvent(ExecutorEvent): - """Event triggered when an agent is streaming messages.""" + @classmethod + def executor_failed( + cls, executor_id: str, details: WorkflowErrorDetails + ) -> "WorkflowEvent[WorkflowErrorDetails]": + """Create an 'executor_failed' event when an executor handler raises an error.""" + return WorkflowEvent("executor_failed", executor_id=executor_id, data=details, details=details) - data: AgentResponseUpdate + # ========================================================================== + # Serialization methods (primarily for REQUEST_INFO events) + # ========================================================================== - def __init__(self, executor_id: str, data: AgentResponseUpdate): - """Initialize the agent streaming event.""" - super().__init__(executor_id, data) + def to_dict(self) -> dict[str, Any]: + """Convert to dictionary for serialization. - def __repr__(self) -> str: - """Return a string representation of the agent streaming event.""" - return f"{self.__class__.__name__}(executor_id={self.executor_id}, messages={self.data})" + Currently only implemented for 'request_info' events for checkpoint storage. + """ + if self.type != "request_info": + raise ValueError(f"to_dict() only supported for 'request_info' events, got '{self.type}'") + return { + "type": self.type, + "data": encode_checkpoint_value(self.data), + "request_id": self.request_id, + "source_executor_id": self.source_executor_id, + "request_type": serialize_type(self.request_type) if self.request_type else None, + "response_type": serialize_type(self.response_type) if self.response_type else None, + } + @classmethod + def from_dict(cls, data: dict[str, Any]) -> "WorkflowEvent[Any]": + """Create a REQUEST_INFO event from a dictionary.""" + for prop in ["data", "request_id", "source_executor_id", "request_type", "response_type"]: + if prop not in data: + raise KeyError(f"Missing '{prop}' field in WorkflowEvent dictionary.") -class AgentRunEvent(ExecutorEvent): - """Event triggered when an agent run is completed.""" + request_data = decode_checkpoint_value(data["data"]) + request_type = deserialize_type(data["request_type"]) - data: AgentResponse + if request_type is not type(request_data): + raise TypeError( + "Mismatch between deserialized request_data type and request_type field in WorkflowEvent dictionary." + ) - def __init__(self, executor_id: str, data: AgentResponse): - """Initialize the agent run event.""" - super().__init__(executor_id, data) + return cls.request_info( + request_id=data["request_id"], + source_executor_id=data["source_executor_id"], + request_data=request_data, + response_type=deserialize_type(data["response_type"]), + ) - def __repr__(self) -> str: - """Return a string representation of the agent run event.""" - return f"{self.__class__.__name__}(executor_id={self.executor_id}, data={self.data})" +# Type alias for backwards compatibility +WorkflowLifecycleEvent: TypeAlias = WorkflowEvent[Any] -WorkflowLifecycleEvent: TypeAlias = WorkflowStartedEvent | WorkflowStatusEvent | WorkflowFailedEvent +# Backwards compatibility alias - ExecutorEvent is now just WorkflowEvent +ExecutorEvent: TypeAlias = WorkflowEvent[DataT] # type: ignore[type-arg] diff --git a/python/packages/core/agent_framework/_workflows/_executor.py b/python/packages/core/agent_framework/_workflows/_executor.py index 18adc4b904..be01d20682 100644 --- a/python/packages/core/agent_framework/_workflows/_executor.py +++ b/python/packages/core/agent_framework/_workflows/_executor.py @@ -11,10 +11,8 @@ from ..observability import create_processing_span from ._events import ( - ExecutorCompletedEvent, - ExecutorFailedEvent, - ExecutorInvokedEvent, WorkflowErrorDetails, + WorkflowEvent, _framework_event_origin, # type: ignore[reportPrivateUsage] ) from ._model_utils import DictConvertible @@ -274,14 +272,14 @@ async def execute( # Invoke the handler with the message and context # Use deepcopy to capture original input state before handler can mutate it with _framework_event_origin(): - invoke_event = ExecutorInvokedEvent(self.id, copy.deepcopy(message)) + invoke_event = WorkflowEvent.executor_invoked(self.id, copy.deepcopy(message)) await context.add_event(invoke_event) try: await handler(message, context) except Exception as exc: # Surface structured executor failure before propagating with _framework_event_origin(): - failure_event = ExecutorFailedEvent(self.id, WorkflowErrorDetails.from_exception(exc)) + failure_event = WorkflowEvent.executor_failed(self.id, WorkflowErrorDetails.from_exception(exc)) await context.add_event(failure_event) raise with _framework_event_origin(): @@ -289,7 +287,7 @@ async def execute( sent_messages = context.get_sent_messages() yielded_outputs = context.get_yielded_outputs() completion_data = sent_messages + yielded_outputs - completed_event = ExecutorCompletedEvent(self.id, completion_data if completion_data else None) + completed_event = WorkflowEvent.executor_completed(self.id, completion_data if completion_data else None) await context.add_event(completed_event) def _create_context_for_handler( diff --git a/python/packages/core/agent_framework/_workflows/_magentic.py b/python/packages/core/agent_framework/_workflows/_magentic.py index eff87fd5f0..8207ee9c46 100644 --- a/python/packages/core/agent_framework/_workflows/_magentic.py +++ b/python/packages/core/agent_framework/_workflows/_magentic.py @@ -31,7 +31,7 @@ ParticipantRegistry, ) from ._checkpoint import CheckpointStorage -from ._events import ExecutorEvent +from ._events import WorkflowEvent from ._executor import Executor, handler from ._model_utils import DictConvertible, encode_value from ._request_info_mixin import response_handler @@ -773,8 +773,11 @@ class MagenticOrchestratorEventType(str, Enum): @dataclass -class MagenticOrchestratorEvent(ExecutorEvent): - """Base class for Magentic orchestrator events.""" +class MagenticOrchestratorEvent(WorkflowEvent): + """Base class for Magentic orchestrator events. + + Uses the 'custom' event type for extensibility. + """ def __init__( self, @@ -782,7 +785,7 @@ def __init__( event_type: MagenticOrchestratorEventType, data: ChatMessage | MagenticProgressLedger, ) -> None: - super().__init__(executor_id, data) + super().__init__("custom", data=data, executor_id=executor_id) self.event_type = event_type def __repr__(self) -> str: @@ -1514,7 +1517,7 @@ def with_plan_review(self, enable: bool = True) -> "MagenticBuilder": # During execution, handle plan review async for event in workflow.run_stream("task"): - if isinstance(event, RequestInfoEvent): + if event.type == "request_info": request = event.data if isinstance(request, MagenticHumanInterventionRequest): if request.kind == MagenticHumanInterventionKind.PLAN_REVIEW: diff --git a/python/packages/core/agent_framework/_workflows/_runner.py b/python/packages/core/agent_framework/_workflows/_runner.py index 227f0f7fe7..428d78b5fb 100644 --- a/python/packages/core/agent_framework/_workflows/_runner.py +++ b/python/packages/core/agent_framework/_workflows/_runner.py @@ -15,7 +15,7 @@ from ._const import EXECUTOR_STATE_KEY from ._edge import EdgeGroup from ._edge_runner import EdgeRunner, create_edge_runner -from ._events import SuperStepCompletedEvent, SuperStepStartedEvent, WorkflowEvent +from ._events import WorkflowEvent from ._exceptions import ( WorkflowCheckpointException, WorkflowConvergenceException, @@ -101,7 +101,7 @@ async def run_until_convergence(self) -> AsyncGenerator[WorkflowEvent, None]: while self._iteration < self._max_iterations: logger.info(f"Starting superstep {self._iteration + 1}") - yield SuperStepStartedEvent(iteration=self._iteration + 1) + yield WorkflowEvent.superstep_started(iteration=self._iteration + 1) # Run iteration concurrently with live event streaming: we poll # for new events while the iteration coroutine progresses. @@ -136,7 +136,7 @@ async def run_until_convergence(self) -> AsyncGenerator[WorkflowEvent, None]: # Create checkpoint after each superstep iteration await self._create_checkpoint_if_enabled(f"superstep_{self._iteration}") - yield SuperStepCompletedEvent(iteration=self._iteration) + yield WorkflowEvent.superstep_completed(iteration=self._iteration) # Check for convergence: no more messages to process if not await self._ctx.has_messages(): diff --git a/python/packages/core/agent_framework/_workflows/_runner_context.py b/python/packages/core/agent_framework/_workflows/_runner_context.py index ce9fff6617..1bff2b486f 100644 --- a/python/packages/core/agent_framework/_workflows/_runner_context.py +++ b/python/packages/core/agent_framework/_workflows/_runner_context.py @@ -7,12 +7,12 @@ from copy import copy from dataclasses import dataclass from enum import Enum -from typing import Any, Protocol, TypeVar, runtime_checkable +from typing import TYPE_CHECKING, Any, Protocol, TypeVar, runtime_checkable from ._checkpoint import CheckpointStorage, WorkflowCheckpoint from ._checkpoint_encoding import decode_checkpoint_value, encode_checkpoint_value from ._const import INTERNAL_SOURCE_ID -from ._events import RequestInfoEvent, WorkflowEvent +from ._events import WorkflowEvent from ._shared_state import SharedState from ._typing_utils import is_instance_of @@ -51,7 +51,7 @@ class Message: source_span_ids: list[str] | None = None # Publishing span IDs for linking from multiple sources # For response messages, the original request data - original_request_info_event: RequestInfoEvent | None = None + original_request_info_event: "WorkflowEvent[Any] | None" = None # Backward compatibility properties @property @@ -254,11 +254,11 @@ async def apply_checkpoint(self, checkpoint: WorkflowCheckpoint) -> None: """ ... - async def add_request_info_event(self, event: RequestInfoEvent) -> None: - """Add a RequestInfoEvent to the context and track it for correlation. + async def add_request_info_event(self, event: "WorkflowEvent[Any]") -> None: + """Add a request_info event to the context and track it for correlation. Args: - event: The RequestInfoEvent to be added. + event: The WorkflowEvent with type='request_info' to be added. """ ... @@ -271,11 +271,11 @@ async def send_request_info_response(self, request_id: str, response: Any) -> No """ ... - async def get_pending_request_info_events(self) -> dict[str, RequestInfoEvent]: - """Get the mapping of request IDs to their corresponding RequestInfoEvent. + async def get_pending_request_info_events(self) -> "dict[str, WorkflowEvent[Any]]": + """Get the mapping of request IDs to their corresponding request_info events. Returns: - A dictionary mapping request IDs to their corresponding RequestInfoEvent. + A dictionary mapping request IDs to their corresponding WorkflowEvent (type='request_info'). """ ... @@ -294,7 +294,7 @@ def __init__(self, checkpoint_storage: CheckpointStorage | None = None): self._event_queue: asyncio.Queue[WorkflowEvent] = asyncio.Queue() # An additional storage for pending request info events - self._pending_request_info_events: dict[str, RequestInfoEvent] = {} + self._pending_request_info_events: dict[str, WorkflowEvent[Any]] = {} # Checkpointing configuration/state self._checkpoint_storage = checkpoint_storage @@ -426,7 +426,7 @@ async def apply_checkpoint(self, checkpoint: WorkflowCheckpoint) -> None: self._pending_request_info_events.clear() pending_requests_data = checkpoint.pending_request_info_events for request_id, request_data in pending_requests_data.items(): - request_info_event = RequestInfoEvent.from_dict(request_data) + request_info_event = WorkflowEvent.from_dict(request_data) self._pending_request_info_events[request_id] = request_info_event await self.add_event(request_info_event) @@ -470,12 +470,14 @@ async def _get_serialized_workflow_state(self, shared_state: SharedState, iterat "pending_request_info_events": serialized_pending_request_info_events, } - async def add_request_info_event(self, event: RequestInfoEvent) -> None: - """Add a RequestInfoEvent to the context and track it for correlation. + async def add_request_info_event(self, event: "WorkflowEvent[Any]") -> None: + """Add a request_info event to the context and track it for correlation. Args: - event: The RequestInfoEvent to be added. + event: The WorkflowEvent with type='request_info' to be added. """ + if event.request_id is None: + raise ValueError("request_info event must have a request_id") self._pending_request_info_events[event.request_id] = event await self.add_event(event) @@ -508,10 +510,10 @@ async def send_request_info_response(self, request_id: str, response: Any) -> No await self.send_message(response_msg) - async def get_pending_request_info_events(self) -> dict[str, RequestInfoEvent]: - """Get the mapping of request IDs to their corresponding RequestInfoEvent. + async def get_pending_request_info_events(self) -> "dict[str, WorkflowEvent[Any]]": + """Get the mapping of request IDs to their corresponding request_info events. Returns: - A dictionary mapping request IDs to their corresponding RequestInfoEvent. + A dictionary mapping request IDs to their corresponding WorkflowEvent (type='request_info'). """ return dict(self._pending_request_info_events) diff --git a/python/packages/core/agent_framework/_workflows/_workflow.py b/python/packages/core/agent_framework/_workflows/_workflow.py index bd14dc6bcc..a9aaf4eeda 100644 --- a/python/packages/core/agent_framework/_workflows/_workflow.py +++ b/python/packages/core/agent_framework/_workflows/_workflow.py @@ -19,14 +19,9 @@ FanOutEdgeGroup, ) from ._events import ( - RequestInfoEvent, WorkflowErrorDetails, WorkflowEvent, - WorkflowFailedEvent, - WorkflowOutputEvent, WorkflowRunState, - WorkflowStartedEvent, - WorkflowStatusEvent, _framework_event_origin, # type: ignore ) from ._executor import Executor @@ -59,9 +54,11 @@ class WorkflowRunResult(list[WorkflowEvent]): - status_timeline(): Access the complete status event history """ - def __init__(self, events: list[WorkflowEvent], status_events: list[WorkflowStatusEvent] | None = None) -> None: + def __init__( + self, events: "list[WorkflowEvent[Any]]", status_events: "list[WorkflowEvent[Any]] | None" = None + ) -> None: super().__init__(events) - self._status_events: list[WorkflowStatusEvent] = status_events or [] + self._status_events: list[WorkflowEvent[Any]] = status_events or [] def get_outputs(self) -> list[Any]: """Get all outputs from the workflow run result. @@ -69,30 +66,30 @@ def get_outputs(self) -> list[Any]: Returns: A list of outputs produced by the workflow during its execution. """ - return [event.data for event in self if isinstance(event, WorkflowOutputEvent)] + return [event.data for event in self if event.type == "output"] - def get_request_info_events(self) -> list[RequestInfoEvent]: + def get_request_info_events(self) -> "list[WorkflowEvent[Any]]": """Get all request info events from the workflow run result. Returns: - A list of RequestInfoEvent instances found in the workflow run result. + A list of WorkflowEvent instances with type='request_info' found in the workflow run result. """ - return [event for event in self if isinstance(event, RequestInfoEvent)] + return [event for event in self if event.type == "request_info"] def get_final_state(self) -> WorkflowRunState: """Return the final run state based on explicit status events. - Returns the last WorkflowStatusEvent.state observed. Raises if none were emitted. + Returns the last status event's state observed. Raises if none were emitted. """ if self._status_events: return self._status_events[-1].state # type: ignore[return-value] raise RuntimeError( - "Final state is unknown because no WorkflowStatusEvent was emitted. " + "Final state is unknown because no status event was emitted. " "Ensure your workflow entry points are used (which emit status events) " "or handle the absence of status explicitly." ) - def status_timeline(self) -> list[WorkflowStatusEvent]: + def status_timeline(self) -> "list[WorkflowEvent[Any]]": """Return the list of status events emitted during the run (control-plane).""" return list(self._status_events) @@ -320,10 +317,10 @@ async def _run_workflow_with_tracing( span.add_event(OtelAttr.WORKFLOW_STARTED) # Emit explicit start/status events to the stream with _framework_event_origin(): - started = WorkflowStartedEvent() + started = WorkflowEvent.started() yield started with _framework_event_origin(): - in_progress = WorkflowStatusEvent(WorkflowRunState.IN_PROGRESS) + in_progress = WorkflowEvent.status(WorkflowRunState.IN_PROGRESS) yield in_progress # Reset context for a new run if supported @@ -346,39 +343,39 @@ async def _run_workflow_with_tracing( # All executor executions happen within workflow span async for event in self._runner.run_until_convergence(): # Track request events for final status determination - if isinstance(event, RequestInfoEvent): + if event.type == "request_info": saw_request = True yield event - if isinstance(event, RequestInfoEvent) and not emitted_in_progress_pending: + if event.type == "request_info" and not emitted_in_progress_pending: emitted_in_progress_pending = True with _framework_event_origin(): - pending_status = WorkflowStatusEvent(WorkflowRunState.IN_PROGRESS_PENDING_REQUESTS) + pending_status = WorkflowEvent.status(WorkflowRunState.IN_PROGRESS_PENDING_REQUESTS) yield pending_status # Workflow runs until idle - emit final status based on whether requests are pending if saw_request: with _framework_event_origin(): - terminal_status = WorkflowStatusEvent(WorkflowRunState.IDLE_WITH_PENDING_REQUESTS) + terminal_status = WorkflowEvent.status(WorkflowRunState.IDLE_WITH_PENDING_REQUESTS) yield terminal_status else: with _framework_event_origin(): - terminal_status = WorkflowStatusEvent(WorkflowRunState.IDLE) + terminal_status = WorkflowEvent.status(WorkflowRunState.IDLE) yield terminal_status span.add_event(OtelAttr.WORKFLOW_COMPLETED) except Exception as exc: - # Drain any pending events (for example, ExecutorFailedEvent) before yielding WorkflowFailedEvent + # Drain any pending events (for example, executor_failed) before yielding failed event for event in await self._runner.context.drain_events(): yield event # Surface structured failure details before propagating exception details = WorkflowErrorDetails.from_exception(exc) with _framework_event_origin(): - failed_event = WorkflowFailedEvent(details) + failed_event = WorkflowEvent.failed(details) yield failed_event with _framework_event_origin(): - failed_status = WorkflowStatusEvent(WorkflowRunState.FAILED) + failed_status = WorkflowEvent.status(WorkflowRunState.FAILED) yield failed_status span.add_event( name=OtelAttr.WORKFLOW_ERROR, @@ -674,15 +671,15 @@ async def run( self._reset_running_flag() # Filter events for non-streaming mode - filtered: list[WorkflowEvent] = [] - status_events: list[WorkflowStatusEvent] = [] + filtered: list[WorkflowEvent[Any]] = [] + status_events: list[WorkflowEvent[Any]] = [] for ev in raw_events: - # Omit WorkflowStartedEvent from non-streaming (telemetry-only) - if isinstance(ev, WorkflowStartedEvent): + # Omit started events from non-streaming (telemetry-only) + if ev.type == "started": continue # Track status; include inline only if explicitly requested - if isinstance(ev, WorkflowStatusEvent): + if ev.type == "status": status_events.append(ev) if include_status_events: filtered.append(ev) @@ -709,8 +706,8 @@ async def send_responses(self, responses: dict[str, Any]) -> WorkflowRunResult: reset_context=False, # Don't reset context when sending responses ) ] - status_events = [e for e in events if isinstance(e, WorkflowStatusEvent)] - filtered_events = [e for e in events if not isinstance(e, (WorkflowStatusEvent, WorkflowStartedEvent))] + status_events = [e for e in events if e.type == "status"] + filtered_events = [e for e in events if e.type not in ("status", "started")] return WorkflowRunResult(filtered_events, status_events) finally: self._reset_running_flag() diff --git a/python/packages/core/agent_framework/_workflows/_workflow_context.py b/python/packages/core/agent_framework/_workflows/_workflow_context.py index 708cdf3c51..671145c974 100644 --- a/python/packages/core/agent_framework/_workflows/_workflow_context.py +++ b/python/packages/core/agent_framework/_workflows/_workflow_context.py @@ -14,15 +14,8 @@ from ..observability import OtelAttr, create_workflow_span from ._const import EXECUTOR_STATE_KEY from ._events import ( - RequestInfoEvent, WorkflowEvent, WorkflowEventSource, - WorkflowFailedEvent, - WorkflowLifecycleEvent, - WorkflowOutputEvent, - WorkflowStartedEvent, - WorkflowStatusEvent, - WorkflowWarningEvent, _framework_event_origin, # type: ignore ) from ._runner_context import Message, RunnerContext @@ -205,15 +198,8 @@ def _is_type_like(x: Any) -> bool: return infer_output_types_from_ctx_annotation(annotation) -_FRAMEWORK_LIFECYCLE_EVENT_TYPES: tuple[type[WorkflowEvent], ...] = cast( - tuple[type[WorkflowEvent], ...], - tuple(get_args(WorkflowLifecycleEvent)) - or ( - WorkflowStartedEvent, - WorkflowStatusEvent, - WorkflowFailedEvent, - ), -) +# Event types reserved for framework lifecycle (not allowed from user code) +_FRAMEWORK_LIFECYCLE_EVENT_TYPES: frozenset[str] = frozenset({"started", "status", "failed"}) class WorkflowContext(Generic[T_Out, T_W_Out]): @@ -360,20 +346,19 @@ async def yield_output(self, output: T_W_Out) -> None: self._yielded_outputs.append(copy.deepcopy(output)) with _framework_event_origin(): - event = WorkflowOutputEvent(data=output, executor_id=self._executor_id) + event = WorkflowEvent.output(self._executor_id, output) await self._runner_context.add_event(event) - async def add_event(self, event: WorkflowEvent) -> None: + async def add_event(self, event: "WorkflowEvent[Any]") -> None: """Add an event to the workflow context.""" - if event.origin == WorkflowEventSource.EXECUTOR and isinstance(event, _FRAMEWORK_LIFECYCLE_EVENT_TYPES): - event_name = event.__class__.__name__ + if event.origin == WorkflowEventSource.EXECUTOR and event.type in _FRAMEWORK_LIFECYCLE_EVENT_TYPES: warning_msg = ( - f"Executor '{self._executor_id}' attempted to emit {event_name}, " + f"Executor '{self._executor_id}' attempted to emit a '{event.type}' event, " "which is reserved for framework lifecycle notifications. The " "event was ignored." ) logger.warning(warning_msg) - await self._runner_context.add_event(WorkflowWarningEvent(warning_msg)) + await self._runner_context.add_event(WorkflowEvent.warning(warning_msg)) return await self._runner_context.add_event(event) @@ -402,7 +387,7 @@ async def request_info(self, request_data: object, response_type: type, *, reque "not be processed. Please define a response handler using the @response_handler decorator." ) - request_info_event = RequestInfoEvent( + request_info_event = WorkflowEvent.request_info( request_id=request_id or str(uuid.uuid4()), source_executor_id=self._executor_id, request_data=request_data, diff --git a/python/packages/core/agent_framework/_workflows/_workflow_executor.py b/python/packages/core/agent_framework/_workflows/_workflow_executor.py index 2453620cfd..e262414f37 100644 --- a/python/packages/core/agent_framework/_workflows/_workflow_executor.py +++ b/python/packages/core/agent_framework/_workflows/_workflow_executor.py @@ -14,9 +14,7 @@ from ._checkpoint_encoding import decode_checkpoint_value, encode_checkpoint_value from ._const import WORKFLOW_RUN_KWARGS_KEY from ._events import ( - RequestInfoEvent, - WorkflowErrorEvent, - WorkflowFailedEvent, + WorkflowEvent, WorkflowRunState, ) from ._executor import Executor, handler @@ -52,38 +50,38 @@ class ExecutionContext: # Pending requests to be fulfilled. This will get updated as the # WorkflowExecutor receives responses. - pending_requests: dict[str, RequestInfoEvent] # request_id -> request_info_event + pending_requests: dict[str, WorkflowEvent] # request_id -> request_info_event @dataclass class SubWorkflowResponseMessage: """Message sent from a parent workflow to a sub-workflow via WorkflowExecutor to provide requested information. - This message wraps the response data along with the original RequestInfoEvent emitted by the sub-workflow executor. + This message wraps the response data along with the original WorkflowEvent emitted by the sub-workflow executor. Attributes: data: The response data to the original request. - source_event: The original RequestInfoEvent emitted by the sub-workflow executor. + source_event: The original WorkflowEvent emitted by the sub-workflow executor. """ data: Any - source_event: RequestInfoEvent + source_event: WorkflowEvent @dataclass class SubWorkflowRequestMessage: """Message sent from a sub-workflow to an executor in the parent workflow to request information. - This message wraps a RequestInfoEvent emitted by the executor in the sub-workflow. + This message wraps a WorkflowEvent emitted by the executor in the sub-workflow. Attributes: - source_event: The original RequestInfoEvent emitted by the sub-workflow executor. + source_event: The original WorkflowEvent emitted by the sub-workflow executor. executor_id: The ID of the WorkflowExecutor in the parent workflow that is responsible for this sub-workflow. This can be used to ensure that the response is sent back to the correct sub-workflow instance. """ - source_event: RequestInfoEvent + source_event: WorkflowEvent executor_id: str def create_response(self, data: Any) -> SubWorkflowResponseMessage: @@ -153,7 +151,7 @@ class WorkflowExecutor(Executor): # An executor in the sub-workflow makes request request = MyDataRequest(query="user info") - # WorkflowExecutor captures RequestInfoEvent and wraps it in a SubWorkflowRequestMessage + # WorkflowExecutor captures WorkflowEvent and wraps it in a SubWorkflowRequestMessage # then send it to the receiving executor in parent workflow. The executor in parent workflow # can handle the request locally or forward it to an external source. # The WorkflowExecutor tracks the pending request, and implements a response handler. @@ -285,7 +283,7 @@ def __init__( workflow's event stream. propagate_request: Whether to propagate requests from the sub-workflow to the parent workflow. If set to true, requests from the sub-workflow - will be propagated as the original RequestInfoEvent to the parent + will be propagated as the original WorkflowEvent to the parent workflow. Otherwise, they will be wrapped in a SubWorkflowRequestMessage, which should be handled by an executor in the parent workflow. @@ -437,7 +435,7 @@ async def handle_propagated_request_response( """Handle response for a request that was propagated to the parent workflow. Args: - original_request: The original RequestInfoEvent. + original_request: The original WorkflowEvent. response: The response data. ctx: The workflow context. """ @@ -570,7 +568,7 @@ async def _process_workflow_result( # Handle final state if workflow_run_state == WorkflowRunState.FAILED: # Find the WorkflowFailedEvent. - failed_events = [e for e in result if isinstance(e, WorkflowFailedEvent)] + failed_events = [e for e in result if isinstance(e, WorkflowEvent) and e.type == "failed"] if failed_events: failed_event = failed_events[0] error_type = failed_event.details.error_type @@ -578,9 +576,7 @@ async def _process_workflow_result( exception = Exception( f"Sub-workflow {self.workflow.id} failed with error: {error_type} - {error_message}" ) - error_event = WorkflowErrorEvent( - data=exception, - ) + error_event = WorkflowEvent.error(exception) await ctx.add_event(error_event) elif workflow_run_state == WorkflowRunState.IDLE: # Sub-workflow is idle - nothing more to do now diff --git a/python/packages/core/tests/workflow/test_agent_executor_tool_calls.py b/python/packages/core/tests/workflow/test_agent_executor_tool_calls.py index 874f73fa5b..de81eb21bb 100644 --- a/python/packages/core/tests/workflow/test_agent_executor_tool_calls.py +++ b/python/packages/core/tests/workflow/test_agent_executor_tool_calls.py @@ -12,7 +12,6 @@ AgentExecutorResponse, AgentResponse, AgentResponseUpdate, - AgentRunUpdateEvent, AgentThread, BaseAgent, ChatAgent, @@ -20,6 +19,7 @@ ChatResponse, ChatResponseUpdate, Content, + ExecutorEvent, RequestInfoEvent, Role, WorkflowBuilder, @@ -100,9 +100,9 @@ async def test_agent_executor_emits_tool_calls_in_streaming_mode() -> None: workflow = WorkflowBuilder().set_start_executor(agent_exec).build() # Act: run in streaming mode - events: list[AgentRunUpdateEvent] = [] + events: list[ExecutorEvent[AgentResponseUpdate]] = [] async for event in workflow.run_stream("What's the weather?"): - if isinstance(event, AgentRunUpdateEvent): + if event.type == "data" and isinstance(event.data, AgentResponseUpdate): events.append(event) # Assert: we should receive 4 events (text, function call, function result, text) @@ -269,7 +269,7 @@ async def test_agent_executor_tool_call_with_approval_streaming() -> None: # Act request_info_events: list[RequestInfoEvent] = [] async for event in workflow.run_stream("Invoke tool requiring approval"): - if isinstance(event, RequestInfoEvent): + if event.type == "request_info": request_info_events.append(event) # Assert @@ -284,7 +284,7 @@ async def test_agent_executor_tool_call_with_approval_streaming() -> None: async for event in workflow.send_responses_streaming({ approval_request.request_id: approval_request.data.to_function_approval_response(True) }): - if isinstance(event, WorkflowOutputEvent): + if event.type == "output": output = event.data # Assert @@ -340,7 +340,7 @@ async def test_agent_executor_parallel_tool_call_with_approval_streaming() -> No # Act request_info_events: list[RequestInfoEvent] = [] async for event in workflow.run_stream("Invoke tool requiring approval"): - if isinstance(event, RequestInfoEvent): + if event.type == "request_info": request_info_events.append(event) # Assert @@ -358,7 +358,7 @@ async def test_agent_executor_parallel_tool_call_with_approval_streaming() -> No output: str | None = None async for event in workflow.send_responses_streaming(responses): - if isinstance(event, WorkflowOutputEvent): + if event.type == "output": output = event.data # Assert diff --git a/python/packages/core/tests/workflow/test_agent_run_event_typing.py b/python/packages/core/tests/workflow/test_agent_run_event_typing.py index e5071a7c96..cedf7371aa 100644 --- a/python/packages/core/tests/workflow/test_agent_run_event_typing.py +++ b/python/packages/core/tests/workflow/test_agent_run_event_typing.py @@ -1,27 +1,38 @@ # Copyright (c) Microsoft. All rights reserved. -"""Tests for AgentRunEvent and AgentRunUpdateEvent type annotations.""" +"""Tests for WorkflowEvent[T] generic type annotations.""" from agent_framework import AgentResponse, AgentResponseUpdate, ChatMessage, Role -from agent_framework._workflows._events import AgentRunEvent, AgentRunUpdateEvent +from agent_framework._workflows._events import WorkflowEvent -def test_agent_run_event_data_type() -> None: - """Verify AgentRunEvent.data is typed as AgentResponse | None.""" +def test_workflow_event_with_agent_response_data_type() -> None: + """Verify WorkflowEvent[AgentResponse].data is typed as AgentResponse.""" response = AgentResponse(messages=[ChatMessage(role=Role.ASSISTANT, text="Hello")]) - event = AgentRunEvent(executor_id="test", data=response) + event: WorkflowEvent[AgentResponse] = WorkflowEvent.emit(executor_id="test", data=response) # This assignment should pass type checking without a cast - data: AgentResponse | None = event.data + data: AgentResponse = event.data assert data is not None assert data.text == "Hello" -def test_agent_run_update_event_data_type() -> None: - """Verify AgentRunUpdateEvent.data is typed as AgentResponseUpdate | None.""" +def test_workflow_event_with_agent_response_update_data_type() -> None: + """Verify WorkflowEvent[AgentResponseUpdate].data is typed as AgentResponseUpdate.""" update = AgentResponseUpdate() - event = AgentRunUpdateEvent(executor_id="test", data=update) + event: WorkflowEvent[AgentResponseUpdate] = WorkflowEvent.emit(executor_id="test", data=update) # This assignment should pass type checking without a cast - data: AgentResponseUpdate | None = event.data + data: AgentResponseUpdate = event.data assert data is not None + + +def test_workflow_event_repr() -> None: + """Verify WorkflowEvent.__repr__ uses consistent format.""" + response = AgentResponse(messages=[ChatMessage(role=Role.ASSISTANT, text="Hello")]) + event: WorkflowEvent[AgentResponse] = WorkflowEvent.emit(executor_id="test", data=response) + + repr_str = repr(event) + assert "WorkflowEvent" in repr_str + assert "executor_id='test'" in repr_str + assert "data=" in repr_str diff --git a/python/packages/core/tests/workflow/test_checkpoint_validation.py b/python/packages/core/tests/workflow/test_checkpoint_validation.py index f90f74db57..746baea2a2 100644 --- a/python/packages/core/tests/workflow/test_checkpoint_validation.py +++ b/python/packages/core/tests/workflow/test_checkpoint_validation.py @@ -78,4 +78,4 @@ async def test_resume_succeeds_when_graph_matches() -> None: ) ] - assert any(isinstance(event, WorkflowStatusEvent) and event.state == WorkflowRunState.IDLE for event in events) + assert any(event.type == "status" and event.state == WorkflowRunState.IDLE for event in events) diff --git a/python/packages/core/tests/workflow/test_executor.py b/python/packages/core/tests/workflow/test_executor.py index e7c2a31aec..5b89dcf7f9 100644 --- a/python/packages/core/tests/workflow/test_executor.py +++ b/python/packages/core/tests/workflow/test_executor.py @@ -8,11 +8,10 @@ from agent_framework import ( ChatMessage, Executor, - ExecutorCompletedEvent, - ExecutorInvokedEvent, Message, WorkflowBuilder, WorkflowContext, + WorkflowEvent, executor, handler, response_handler, @@ -139,7 +138,7 @@ async def handle_integer(self, number: int, ctx: WorkflowContext[int]) -> None: async def test_executor_invoked_event_contains_input_data(): - """Test that ExecutorInvokedEvent contains the input message data.""" + """Test that ExecutorEvent (kind=INVOKED) contains the input message data.""" class UpperCaseExecutor(Executor): @handler @@ -157,7 +156,9 @@ async def handle(self, text: str, ctx: WorkflowContext) -> None: workflow = WorkflowBuilder().add_edge(upper, collector).set_start_executor(upper).build() events = await workflow.run("hello world") - invoked_events = [e for e in events if isinstance(e, ExecutorInvokedEvent)] + invoked_events = [ + e for e in events if isinstance(e, WorkflowEvent) and e.type == "executor_invoked" + ] assert len(invoked_events) == 2 @@ -171,7 +172,7 @@ async def handle(self, text: str, ctx: WorkflowContext) -> None: async def test_executor_completed_event_contains_sent_messages(): - """Test that ExecutorCompletedEvent contains the messages sent via ctx.send_message().""" + """Test that ExecutorEvent (kind=COMPLETED) contains the messages sent via ctx.send_message().""" class MultiSenderExecutor(Executor): @handler @@ -194,7 +195,9 @@ async def handle(self, text: str, ctx: WorkflowContext) -> None: workflow = WorkflowBuilder().add_edge(sender, collector).set_start_executor(sender).build() events = await workflow.run("hello") - completed_events = [e for e in events if isinstance(e, ExecutorCompletedEvent)] + completed_events = [ + e for e in events if isinstance(e, WorkflowEvent) and e.type == "executor_completed" + ] # Sender should have completed with the sent messages sender_completed = next(e for e in completed_events if e.executor_id == "sender") @@ -210,9 +213,7 @@ async def handle(self, text: str, ctx: WorkflowContext) -> None: async def test_executor_completed_event_includes_yielded_outputs(): - """Test that ExecutorCompletedEvent.data includes yielded outputs.""" - - from agent_framework import WorkflowOutputEvent + """Test that WorkflowEvent(type='executor_completed').data includes yielded outputs.""" class YieldOnlyExecutor(Executor): @handler @@ -223,15 +224,17 @@ async def handle(self, text: str, ctx: WorkflowContext[Never, str]) -> None: workflow = WorkflowBuilder().set_start_executor(executor).build() events = await workflow.run("test") - completed_events = [e for e in events if isinstance(e, ExecutorCompletedEvent)] + completed_events = [ + e for e in events if isinstance(e, WorkflowEvent) and e.type == "executor_completed" + ] assert len(completed_events) == 1 assert completed_events[0].executor_id == "yielder" - # Yielded outputs are now included in ExecutorCompletedEvent.data + # Yielded outputs are now included in ExecutorEvent (kind=COMPLETED).data assert completed_events[0].data == ["TEST"] # Verify the output was also yielded as WorkflowOutputEvent - output_events = [e for e in events if isinstance(e, WorkflowOutputEvent)] + output_events = [e for e in events if e.type == "output"] assert len(output_events) == 1 assert output_events[0].data == "TEST" @@ -268,8 +271,12 @@ async def handle(self, response: Response, ctx: WorkflowContext) -> None: input_request = Request(query="hello", limit=3) events = await workflow.run(input_request) - invoked_events = [e for e in events if isinstance(e, ExecutorInvokedEvent)] - completed_events = [e for e in events if isinstance(e, ExecutorCompletedEvent)] + invoked_events = [ + e for e in events if isinstance(e, WorkflowEvent) and e.type == "executor_invoked" + ] + completed_events = [ + e for e in events if isinstance(e, WorkflowEvent) and e.type == "executor_completed" + ] # Check processor invoked event has the Request object processor_invoked = next(e for e in invoked_events if e.executor_id == "processor") @@ -531,7 +538,7 @@ async def handle_response( async def test_executor_invoked_event_data_not_mutated_by_handler(): - """Test that ExecutorInvokedEvent.data captures original input, not mutated input.""" + """Test that ExecutorEvent (kind=INVOKED).data captures original input, not mutated input.""" @executor(id="Mutator") async def mutator(messages: list[ChatMessage], ctx: WorkflowContext[list[ChatMessage]]) -> None: @@ -549,7 +556,9 @@ async def mutator(messages: list[ChatMessage], ctx: WorkflowContext[list[ChatMes events = await workflow.run(input_messages) # Find the invoked event for the Mutator executor - invoked_events = [e for e in events if isinstance(e, ExecutorInvokedEvent)] + invoked_events = [ + e for e in events if isinstance(e, WorkflowEvent) and e.type == "executor_invoked" + ] assert len(invoked_events) == 1 mutator_invoked = invoked_events[0] diff --git a/python/packages/core/tests/workflow/test_group_chat.py b/python/packages/core/tests/workflow/test_group_chat.py index e75bdfd638..812cd4062b 100644 --- a/python/packages/core/tests/workflow/test_group_chat.py +++ b/python/packages/core/tests/workflow/test_group_chat.py @@ -236,7 +236,7 @@ async def test_group_chat_builder_basic_flow() -> None: outputs: list[list[ChatMessage]] = [] async for event in workflow.run_stream("coordinate task"): - if isinstance(event, WorkflowOutputEvent): + if event.type == "output": data = event.data if isinstance(data, list): outputs.append(cast(list[ChatMessage], data)) @@ -405,7 +405,7 @@ def selector(state: GroupChatState) -> str: outputs: list[list[ChatMessage]] = [] async for event in workflow.run_stream("test task"): - if isinstance(event, WorkflowOutputEvent): + if event.type == "output": data = event.data if isinstance(data, list): outputs.append(cast(list[ChatMessage], data)) @@ -440,7 +440,7 @@ def termination_condition(conversation: list[ChatMessage]) -> bool: outputs: list[list[ChatMessage]] = [] async for event in workflow.run_stream("test task"): - if isinstance(event, WorkflowOutputEvent): + if event.type == "output": data = event.data if isinstance(data, list): outputs.append(cast(list[ChatMessage], data)) @@ -468,7 +468,7 @@ async def test_termination_condition_agent_manager_finalizes(self) -> None: outputs: list[list[ChatMessage]] = [] async for event in workflow.run_stream("test task"): - if isinstance(event, WorkflowOutputEvent): + if event.type == "output": data = event.data if isinstance(data, list): outputs.append(cast(list[ChatMessage], data)) @@ -516,7 +516,7 @@ def selector(state: GroupChatState) -> str: outputs: list[list[ChatMessage]] = [] async for event in workflow.run_stream("test task"): - if isinstance(event, WorkflowOutputEvent): + if event.type == "output": data = event.data if isinstance(data, list): outputs.append(cast(list[ChatMessage], data)) @@ -569,7 +569,7 @@ def selector(state: GroupChatState) -> str: outputs: list[list[ChatMessage]] = [] async for event in workflow.run_stream("test string"): - if isinstance(event, WorkflowOutputEvent): + if event.type == "output": data = event.data if isinstance(data, list): outputs.append(cast(list[ChatMessage], data)) @@ -598,7 +598,7 @@ def selector(state: GroupChatState) -> str: outputs: list[list[ChatMessage]] = [] async for event in workflow.run_stream(task_message): - if isinstance(event, WorkflowOutputEvent): + if event.type == "output": data = event.data if isinstance(data, list): outputs.append(cast(list[ChatMessage], data)) @@ -630,7 +630,7 @@ def selector(state: GroupChatState) -> str: outputs: list[list[ChatMessage]] = [] async for event in workflow.run_stream(conversation): - if isinstance(event, WorkflowOutputEvent): + if event.type == "output": data = event.data if isinstance(data, list): outputs.append(cast(list[ChatMessage], data)) @@ -662,7 +662,7 @@ def selector(state: GroupChatState) -> str: outputs: list[list[ChatMessage]] = [] async for event in workflow.run_stream("test"): - if isinstance(event, WorkflowOutputEvent): + if event.type == "output": data = event.data if isinstance(data, list): outputs.append(cast(list[ChatMessage], data)) @@ -697,7 +697,7 @@ def selector(state: GroupChatState) -> str: outputs: list[list[ChatMessage]] = [] async for event in workflow.run_stream("test"): - if isinstance(event, WorkflowOutputEvent): + if event.type == "output": data = event.data if isinstance(data, list): outputs.append(cast(list[ChatMessage], data)) @@ -815,7 +815,7 @@ async def selector(state: GroupChatState) -> str: # Run until we get a request info event (should be before beta, not alpha) request_events: list[RequestInfoEvent] = [] async for event in workflow.run_stream("test task"): - if isinstance(event, RequestInfoEvent) and isinstance(event.data, AgentExecutorResponse): + if event.type == "request_info" and isinstance(event.data, AgentExecutorResponse): request_events.append(event) # Don't break - let stream complete naturally when paused @@ -832,7 +832,7 @@ async def selector(state: GroupChatState) -> str: async for event in workflow.send_responses_streaming({ request_event.request_id: AgentRequestInfoResponse.approve() }): - if isinstance(event, WorkflowOutputEvent): + if event.type == "output": outputs.append(event) # Workflow should complete @@ -867,7 +867,7 @@ async def selector(state: GroupChatState) -> str: # Run until we get a request info event request_events: list[RequestInfoEvent] = [] async for event in workflow.run_stream("test task"): - if isinstance(event, RequestInfoEvent) and isinstance(event.data, AgentExecutorResponse): + if event.type == "request_info" and isinstance(event.data, AgentExecutorResponse): request_events.append(event) break @@ -971,7 +971,7 @@ def create_beta() -> StubAgent: outputs: list[WorkflowOutputEvent] = [] async for event in workflow.run_stream("coordinate task"): - if isinstance(event, WorkflowOutputEvent): + if event.type == "output": outputs.append(event) assert len(outputs) == 1 @@ -1036,7 +1036,7 @@ def create_beta() -> StubAgent: outputs: list[WorkflowOutputEvent] = [] async for event in workflow.run_stream("checkpoint test"): - if isinstance(event, WorkflowOutputEvent): + if event.type == "output": outputs.append(event) assert outputs, "Should have workflow output" @@ -1164,7 +1164,7 @@ def agent_factory() -> ChatAgent: outputs: list[WorkflowOutputEvent] = [] async for event in workflow.run_stream("coordinate task"): - if isinstance(event, WorkflowOutputEvent): + if event.type == "output": outputs.append(event) assert len(outputs) == 1 diff --git a/python/packages/core/tests/workflow/test_magentic.py b/python/packages/core/tests/workflow/test_magentic.py index 9c6a2521b1..f9f4a9922d 100644 --- a/python/packages/core/tests/workflow/test_magentic.py +++ b/python/packages/core/tests/workflow/test_magentic.py @@ -11,12 +11,12 @@ AgentProtocol, AgentResponse, AgentResponseUpdate, - AgentRunUpdateEvent, AgentThread, BaseAgent, ChatMessage, Content, Executor, + ExecutorEvent, GroupChatRequestMessage, MagenticBuilder, MagenticContext, @@ -200,7 +200,7 @@ async def test_magentic_builder_returns_workflow_and_runs() -> None: outputs: list[ChatMessage] = [] orchestrator_event_count = 0 async for event in workflow.run_stream("compose summary"): - if isinstance(event, WorkflowOutputEvent): + if event.type == "output": msg = event.data if isinstance(msg, list): outputs.extend(cast(list[ChatMessage], msg)) @@ -342,12 +342,12 @@ async def test_magentic_orchestrator_round_limit_produces_partial_result(): events.append(ev) idle_status = next( - (e for e in events if isinstance(e, WorkflowStatusEvent) and e.state == WorkflowRunState.IDLE), + (e for e in events if e.type == "status" and e.state == WorkflowRunState.IDLE), None, ) assert idle_status is not None # Check that we got workflow output via WorkflowOutputEvent - output_event = next((e for e in events if isinstance(e, WorkflowOutputEvent)), None) + output_event = next((e for e in events if e.type == "output"), None) assert output_event is not None data = output_event.data assert isinstance(data, list) @@ -397,14 +397,14 @@ async def test_magentic_checkpoint_resume_round_trip(): async for event in wf_resume.run_stream( resume_checkpoint.checkpoint_id, ): - if isinstance(event, RequestInfoEvent) and event.request_type is MagenticPlanReviewRequest: + if event.type == "request_info" and event.request_type is MagenticPlanReviewRequest: req_event = event assert req_event is not None assert isinstance(req_event.data, MagenticPlanReviewRequest) responses = {req_event.request_id: req_event.data.approve()} async for event in wf_resume.send_responses_streaming(responses=responses): - if isinstance(event, WorkflowOutputEvent): + if event.type == "output": completed = event assert completed is not None @@ -583,7 +583,7 @@ async def _collect_agent_responses_setup(participant: AgentProtocol) -> list[Cha events.append(ev) if isinstance(ev, WorkflowOutputEvent): break - if isinstance(ev, AgentRunUpdateEvent): + if isinstance(ev, ExecutorEvent) and isinstance(ev.data, AgentResponseUpdate): captured.append( ChatMessage( role=ev.data.role or Role.ASSISTANT, @@ -629,7 +629,7 @@ async def test_magentic_checkpoint_resume_inner_loop_superstep(): ) async for event in workflow.run_stream("inner-loop task"): - if isinstance(event, WorkflowOutputEvent): + if event.type == "output": break checkpoints = await _collect_checkpoints(storage) @@ -645,7 +645,7 @@ async def test_magentic_checkpoint_resume_inner_loop_superstep(): completed: WorkflowOutputEvent | None = None async for event in resumed.run_stream(checkpoint_id=inner_loop_checkpoint.checkpoint_id): # type: ignore[reportUnknownMemberType] - if isinstance(event, WorkflowOutputEvent): + if event.type == "output": completed = event assert completed is not None @@ -667,7 +667,7 @@ async def test_magentic_checkpoint_resume_from_saved_state(): ) async for event in workflow.run_stream("checkpoint resume task"): - if isinstance(event, WorkflowOutputEvent): + if event.type == "output": break checkpoints = await _collect_checkpoints(storage) @@ -685,7 +685,7 @@ async def test_magentic_checkpoint_resume_from_saved_state(): completed: WorkflowOutputEvent | None = None async for event in resumed_workflow.run_stream(checkpoint_id=resumed_state.checkpoint_id): - if isinstance(event, WorkflowOutputEvent): + if event.type == "output": completed = event assert completed is not None @@ -707,7 +707,7 @@ async def test_magentic_checkpoint_resume_rejects_participant_renames(): req_event: RequestInfoEvent | None = None async for event in workflow.run_stream("task"): - if isinstance(event, RequestInfoEvent) and event.request_type is MagenticPlanReviewRequest: + if event.type == "request_info" and event.request_type is MagenticPlanReviewRequest: req_event = event assert req_event is not None @@ -766,11 +766,11 @@ async def test_magentic_stall_and_reset_reach_limits(): events.append(ev) idle_status = next( - (e for e in events if isinstance(e, WorkflowStatusEvent) and e.state == WorkflowRunState.IDLE), + (e for e in events if e.type == "status" and e.state == WorkflowRunState.IDLE), None, ) assert idle_status is not None - output_event = next((e for e in events if isinstance(e, WorkflowOutputEvent)), None) + output_event = next((e for e in events if e.type == "output"), None) assert output_event is not None assert isinstance(output_event.data, list) assert all(isinstance(msg, ChatMessage) for msg in output_event.data) # type: ignore @@ -885,7 +885,7 @@ async def test_magentic_checkpoint_restore_no_duplicate_history(): ] async for event in wf.run_stream(conversation): - if isinstance(event, WorkflowStatusEvent) and event.state in ( + if event.type == "status" and event.state in ( WorkflowRunState.IDLE, WorkflowRunState.IDLE_WITH_PENDING_REQUESTS, ): @@ -995,7 +995,7 @@ def create_agent() -> StubAgent: outputs: list[WorkflowOutputEvent] = [] async for event in workflow.run_stream("test task"): - if isinstance(event, WorkflowOutputEvent): + if event.type == "output": outputs.append(event) assert len(outputs) == 1 @@ -1042,7 +1042,7 @@ def create_agent() -> StubAgent: outputs: list[WorkflowOutputEvent] = [] async for event in workflow.run_stream("checkpoint test"): - if isinstance(event, WorkflowOutputEvent): + if event.type == "output": outputs.append(event) assert outputs, "Should have workflow output" @@ -1099,7 +1099,7 @@ def manager_factory() -> MagenticManagerBase: outputs: list[WorkflowOutputEvent] = [] async for event in workflow.run_stream("test task"): - if isinstance(event, WorkflowOutputEvent): + if event.type == "output": outputs.append(event) assert len(outputs) == 1 diff --git a/python/packages/core/tests/workflow/test_request_info_and_response.py b/python/packages/core/tests/workflow/test_request_info_and_response.py index 537d9b05c5..1f93a085b0 100644 --- a/python/packages/core/tests/workflow/test_request_info_and_response.py +++ b/python/packages/core/tests/workflow/test_request_info_and_response.py @@ -184,7 +184,7 @@ async def test_approval_workflow(self): # First run the workflow until it emits a request request_info_event: RequestInfoEvent | None = None async for event in workflow.run_stream("test operation"): - if isinstance(event, RequestInfoEvent): + if event.type == "request_info": request_info_event = event assert request_info_event is not None @@ -194,7 +194,7 @@ async def test_approval_workflow(self): # Send response and continue workflow completed = False async for event in workflow.send_responses_streaming({request_info_event.request_id: True}): - if isinstance(event, WorkflowStatusEvent) and event.state == WorkflowRunState.IDLE: + if event.type == "status" and event.state == WorkflowRunState.IDLE: completed = True assert completed @@ -209,7 +209,7 @@ async def test_calculation_workflow(self): # First run the workflow until it emits a calculation request request_info_event: RequestInfoEvent | None = None async for event in workflow.run_stream("multiply 15.5 2.0"): - if isinstance(event, RequestInfoEvent): + if event.type == "request_info": request_info_event = event assert request_info_event is not None @@ -221,7 +221,7 @@ async def test_calculation_workflow(self): calculated_result = 31.0 completed = False async for event in workflow.send_responses_streaming({request_info_event.request_id: calculated_result}): - if isinstance(event, WorkflowStatusEvent) and event.state == WorkflowRunState.IDLE: + if event.type == "status" and event.state == WorkflowRunState.IDLE: completed = True assert completed @@ -236,7 +236,7 @@ async def test_multiple_requests_workflow(self): # Collect all request events by running the full stream request_events: list[RequestInfoEvent] = [] async for event in workflow.run_stream("start batch"): - if isinstance(event, RequestInfoEvent): + if event.type == "request_info": request_events.append(event) assert len(request_events) == 2 @@ -256,7 +256,7 @@ async def test_multiple_requests_workflow(self): responses = {approval_event.request_id: True, calc_event.request_id: 50.0} completed = False async for event in workflow.send_responses_streaming(responses): - if isinstance(event, WorkflowStatusEvent) and event.state == WorkflowRunState.IDLE: + if event.type == "status" and event.state == WorkflowRunState.IDLE: completed = True assert completed @@ -270,7 +270,7 @@ async def test_denied_approval_workflow(self): # First run the workflow until it emits a request request_info_event: RequestInfoEvent | None = None async for event in workflow.run_stream("sensitive operation"): - if isinstance(event, RequestInfoEvent): + if event.type == "request_info": request_info_event = event assert request_info_event is not None @@ -278,7 +278,7 @@ async def test_denied_approval_workflow(self): # Deny the request completed = False async for event in workflow.send_responses_streaming({request_info_event.request_id: False}): - if isinstance(event, WorkflowStatusEvent) and event.state == WorkflowRunState.IDLE: + if event.type == "status" and event.state == WorkflowRunState.IDLE: completed = True assert completed @@ -294,9 +294,9 @@ async def test_workflow_state_with_pending_requests(self): request_info_event: RequestInfoEvent | None = None idle_with_pending = False async for event in workflow.run_stream("test operation"): - if isinstance(event, RequestInfoEvent): + if event.type == "request_info": request_info_event = event - elif isinstance(event, WorkflowStatusEvent) and event.state == WorkflowRunState.IDLE_WITH_PENDING_REQUESTS: + elif event.type == "status" and event.state == WorkflowRunState.IDLE_WITH_PENDING_REQUESTS: idle_with_pending = True assert request_info_event is not None @@ -305,7 +305,7 @@ async def test_workflow_state_with_pending_requests(self): # Continue with response completed = False async for event in workflow.send_responses_streaming({request_info_event.request_id: True}): - if isinstance(event, WorkflowStatusEvent) and event.state == WorkflowRunState.IDLE: + if event.type == "status" and event.state == WorkflowRunState.IDLE: completed = True assert completed @@ -318,7 +318,7 @@ async def test_invalid_calculation_input(self): # Send invalid input (no numbers) completed = False async for event in workflow.run_stream("invalid input"): - if isinstance(event, WorkflowStatusEvent) and event.state == WorkflowRunState.IDLE: + if event.type == "status" and event.state == WorkflowRunState.IDLE: completed = True assert completed @@ -340,7 +340,7 @@ async def test_checkpoint_with_pending_request_info_events(self): # Step 1: Run workflow to completion to ensure checkpoints are created request_info_event: RequestInfoEvent | None = None async for event in workflow.run_stream("checkpoint test operation"): - if isinstance(event, RequestInfoEvent): + if event.type == "request_info": request_info_event = event # Verify request was emitted @@ -380,10 +380,10 @@ async def test_checkpoint_with_pending_request_info_events(self): restored_request_event: RequestInfoEvent | None = None async for event in restored_workflow.run_stream(checkpoint_id=checkpoint_with_request.checkpoint_id): # Should re-emit the pending request info event - if isinstance(event, RequestInfoEvent) and event.request_id == request_info_event.request_id: + if event.type == "request_info" and event.request_id == request_info_event.request_id: restored_request_event = event elif ( - isinstance(event, WorkflowStatusEvent) + event.type == "status" and event.state == WorkflowRunState.IDLE_WITH_PENDING_REQUESTS ): completed = True @@ -402,7 +402,7 @@ async def test_checkpoint_with_pending_request_info_events(self): async for event in restored_workflow.send_responses_streaming({ request_info_event.request_id: True # Approve the request }): - if isinstance(event, WorkflowStatusEvent) and event.state == WorkflowRunState.IDLE: + if event.type == "status" and event.state == WorkflowRunState.IDLE: final_completed = True assert final_completed, "Workflow should complete after providing response to restored request" diff --git a/python/packages/core/tests/workflow/test_request_info_event_rehydrate.py b/python/packages/core/tests/workflow/test_request_info_event_rehydrate.py index c0fd8e198f..02a0e52079 100644 --- a/python/packages/core/tests/workflow/test_request_info_event_rehydrate.py +++ b/python/packages/core/tests/workflow/test_request_info_event_rehydrate.py @@ -9,7 +9,7 @@ from agent_framework import InMemoryCheckpointStorage, InProcRunnerContext from agent_framework._workflows._checkpoint_encoding import DATACLASS_MARKER, encode_checkpoint_value from agent_framework._workflows._checkpoint_summary import get_checkpoint_summary -from agent_framework._workflows._events import RequestInfoEvent +from agent_framework._workflows._events import WorkflowEvent from agent_framework._workflows._shared_state import SharedState @@ -36,7 +36,7 @@ class TimedApproval: async def test_rehydrate_request_info_event() -> None: """Rehydration should succeed for valid request info events.""" - request_info_event = RequestInfoEvent( + request_info_event = WorkflowEvent.request_info( request_id="request-123", source_executor_id="review_gateway", request_data=MockRequest(), @@ -69,7 +69,7 @@ async def test_rehydrate_request_info_event() -> None: async def test_rehydrate_fails_when_request_type_missing() -> None: """Rehydration should fail is the request type is missing or fails to import.""" - request_info_event = RequestInfoEvent( + request_info_event = WorkflowEvent.request_info( request_id="request-123", source_executor_id="review_gateway", request_data=MockRequest(), @@ -97,7 +97,7 @@ async def test_rehydrate_fails_when_request_type_missing() -> None: async def test_rehydrate_fails_when_request_type_mismatch() -> None: """Rehydration should fail if the request type is mismatched.""" - request_info_event = RequestInfoEvent( + request_info_event = WorkflowEvent.request_info( request_id="request-123", source_executor_id="review_gateway", request_data=MockRequest(), @@ -127,7 +127,7 @@ async def test_rehydrate_fails_when_request_type_mismatch() -> None: async def test_pending_requests_in_summary() -> None: """Test that pending requests are correctly summarized in the checkpoint summary.""" - request_info_event = RequestInfoEvent( + request_info_event = WorkflowEvent.request_info( request_id="request-123", source_executor_id="review_gateway", request_data=MockRequest(), @@ -148,7 +148,8 @@ async def test_pending_requests_in_summary() -> None: assert len(summary.pending_request_info_events) == 1 pending_event = summary.pending_request_info_events[0] - assert isinstance(pending_event, RequestInfoEvent) + assert isinstance(pending_event, WorkflowEvent) + assert pending_event.type == "request_info" assert pending_event.request_id == "request-123" assert pending_event.source_executor_id == "review_gateway" @@ -158,13 +159,13 @@ async def test_pending_requests_in_summary() -> None: async def test_request_info_event_serializes_non_json_payloads() -> None: - req_1 = RequestInfoEvent( + req_1 = WorkflowEvent.request_info( request_id="req-1", source_executor_id="source", request_data=TimedApproval(issued_at=datetime(2024, 5, 4, 12, 30, 45)), response_type=bool, ) - req_2 = RequestInfoEvent( + req_2 = WorkflowEvent.request_info( request_id="req-2", source_executor_id="source", request_data=SlottedApproval(note="slot-based"), diff --git a/python/packages/core/tests/workflow/test_runner.py b/python/packages/core/tests/workflow/test_runner.py index f6a031e5a3..7cac3a3198 100644 --- a/python/packages/core/tests/workflow/test_runner.py +++ b/python/packages/core/tests/workflow/test_runner.py @@ -97,7 +97,7 @@ async def test_runner_run_until_convergence(): ) async for event in runner.run_until_convergence(): assert isinstance(event, WorkflowEvent) - if isinstance(event, WorkflowOutputEvent): + if event.type == "output": result = event.data assert result is not None and result == 10 @@ -137,7 +137,7 @@ async def test_runner_run_until_convergence_not_completed(): match="Runner did not converge after 5 iterations.", ): async for event in runner.run_until_convergence(): - assert not isinstance(event, WorkflowStatusEvent) or event.state != WorkflowRunState.IDLE + assert not event.type == "status" or event.state != WorkflowRunState.IDLE async def test_runner_already_running(): diff --git a/python/packages/core/tests/workflow/test_typing_utils.py b/python/packages/core/tests/workflow/test_typing_utils.py index 3e8d1051e7..19973276f5 100644 --- a/python/packages/core/tests/workflow/test_typing_utils.py +++ b/python/packages/core/tests/workflow/test_typing_utils.py @@ -5,7 +5,7 @@ import pytest -from agent_framework import RequestInfoEvent +from agent_framework import WorkflowEvent from agent_framework._workflows._typing_utils import ( deserialize_type, is_instance_of, @@ -308,18 +308,19 @@ def test_serialize_deserialize_roundtrip() -> None: # Test agent framework type roundtrip - serialized = serialize_type(RequestInfoEvent) + serialized = serialize_type(WorkflowEvent) deserialized = deserialize_type(serialized) - assert deserialized is RequestInfoEvent + assert deserialized is WorkflowEvent - # Verify we can instantiate the deserialized type - instance = deserialized( + # Verify we can instantiate the deserialized type via factory method + instance = WorkflowEvent.request_info( request_id="request-123", source_executor_id="executor_1", request_data="test", response_type=str, ) - assert isinstance(instance, RequestInfoEvent) + assert isinstance(instance, WorkflowEvent) + assert instance.type == "request_info" def test_deserialize_type_error_handling() -> None: diff --git a/python/packages/core/tests/workflow/test_workflow.py b/python/packages/core/tests/workflow/test_workflow.py index 6b08b7b22a..7a30baa4ba 100644 --- a/python/packages/core/tests/workflow/test_workflow.py +++ b/python/packages/core/tests/workflow/test_workflow.py @@ -13,13 +13,12 @@ AgentExecutor, AgentResponse, AgentResponseUpdate, - AgentRunEvent, - AgentRunUpdateEvent, AgentThread, BaseAgent, ChatMessage, Content, Executor, + ExecutorEvent, FileCheckpointStorage, Message, RequestInfoEvent, @@ -125,7 +124,7 @@ async def test_workflow_run_streaming() -> None: result: int | None = None async for event in workflow.run_stream(NumberMessage(data=0)): assert isinstance(event, WorkflowEvent) - if isinstance(event, WorkflowOutputEvent): + if event.type == "output": result = event.data assert result is not None and result == 10 @@ -199,7 +198,7 @@ async def test_fan_out(): events = await workflow.run(NumberMessage(data=0)) - # Each executor will emit two events: ExecutorInvokedEvent and ExecutorCompletedEvent + # Each executor will emit two events: ExecutorEvent (kind=INVOKED) and ExecutorEvent (kind=COMPLETED) # executor_b will also emit a WorkflowOutputEvent (no WorkflowCompletedEvent anymore) # Each superstep will emit also emit a WorkflowStartedEvent and WorkflowCompletedEvent # This workflow will converge in 2 supersteps because executor_c will send one more message @@ -223,7 +222,7 @@ async def test_fan_out_multiple_completed_events(): events = await workflow.run(NumberMessage(data=0)) - # Each executor will emit two events: ExecutorInvokedEvent and ExecutorCompletedEvent + # Each executor will emit two events: ExecutorEvent (kind=INVOKED) and ExecutorEvent (kind=COMPLETED) # executor_b and executor_c will also emit a WorkflowOutputEvent (no WorkflowCompletedEvent anymore) # Each superstep will emit also emit a WorkflowStartedEvent and WorkflowCompletedEvent # This workflow will converge in 1 superstep because executor_a and executor_b will not send further messages @@ -251,7 +250,7 @@ async def test_fan_in(): events = await workflow.run(NumberMessage(data=0)) - # Each executor will emit two events: ExecutorInvokedEvent and ExecutorCompletedEvent + # Each executor will emit two events: ExecutorEvent (kind=INVOKED) and ExecutorEvent (kind=COMPLETED) # aggregator will also emit a WorkflowOutputEvent (no WorkflowCompletedEvent anymore) # Each superstep will emit also emit a WorkflowStartedEvent and WorkflowCompletedEvent assert len(events) == 13 @@ -468,7 +467,7 @@ async def test_workflow_run_stream_from_checkpoint_with_responses( # Verify that the pending request event was emitted assert next( - event for event in events if isinstance(event, RequestInfoEvent) and event.request_id == "request_123" + event for event in events if event.type == "request_info" and event.request_id == "request_123" ) assert len(events) > 0 # Just ensure we processed some events @@ -735,10 +734,15 @@ async def test_workflow_with_simple_cycle_and_exit_condition(): assert outputs[0] is not None and outputs[0] >= 6 # Should complete when executor_a reaches its limit # Verify cycling occurred (should have events from both executors) - # Check for ExecutorInvokedEvent and ExecutorCompletedEvent types that have executor_id - from agent_framework import ExecutorCompletedEvent, ExecutorInvokedEvent - - executor_events = [e for e in events if isinstance(e, (ExecutorInvokedEvent, ExecutorCompletedEvent))] + # Check for ExecutorEvent types that have executor_id + from agent_framework import ExecutorEvent, WorkflowEventKind + + executor_events = [ + e + for e in events + if isinstance(e, ExecutorEvent) + and e.kind in (WorkflowEventKind.EXECUTOR_INVOKED, WorkflowEventKind.EXECUTOR_COMPLETED) + ] executor_ids = {e.executor_id for e in executor_events} assert "exec_a" in executor_ids, "Should have events from executor A" assert "exec_b" in executor_ids, "Should have events from executor B" @@ -885,7 +889,7 @@ async def run_stream( async def test_agent_streaming_vs_non_streaming() -> None: - """Test that run() emits AgentRunEvent while run_stream() emits AgentRunUpdateEvent.""" + """Test run() emits ExecutorEvent[AgentResponse] while run_stream() emits ExecutorEvent[AgentResponseUpdate].""" agent = _StreamingTestAgent(id="test_agent", name="TestAgent", reply_text="Hello World") agent_exec = AgentExecutor(agent, id="agent_exec") @@ -895,12 +899,14 @@ async def test_agent_streaming_vs_non_streaming() -> None: result = await workflow.run("test message") # Filter for agent events (result is a list of events) - agent_run_events = [e for e in result if isinstance(e, AgentRunEvent)] - agent_update_events = [e for e in result if isinstance(e, AgentRunUpdateEvent)] - - # In non-streaming mode, should have AgentRunEvent, no AgentRunUpdateEvent - assert len(agent_run_events) == 1, "Expected exactly one AgentRunEvent in non-streaming mode" - assert len(agent_update_events) == 0, "Expected no AgentRunUpdateEvent in non-streaming mode" + agent_run_events = [e for e in result if isinstance(e, ExecutorEvent) and isinstance(e.data, AgentResponse)] + agent_update_events = [ + e for e in result if isinstance(e, ExecutorEvent) and isinstance(e.data, AgentResponseUpdate) + ] + + # In non-streaming mode, should have ExecutorEvent[AgentResponse], no ExecutorEvent[AgentResponseUpdate] + assert len(agent_run_events) == 1, "Expected exactly one ExecutorEvent[AgentResponse] in non-streaming mode" + assert len(agent_update_events) == 0, "Expected no ExecutorEvent[AgentResponseUpdate] in non-streaming mode" assert agent_run_events[0].executor_id == "agent_exec" assert agent_run_events[0].data is not None assert agent_run_events[0].data.messages[0].text == "Hello World" @@ -911,12 +917,16 @@ async def test_agent_streaming_vs_non_streaming() -> None: stream_events.append(event) # Filter for agent events - stream_agent_run_events = [e for e in stream_events if isinstance(e, AgentRunEvent)] - stream_agent_update_events = [e for e in stream_events if isinstance(e, AgentRunUpdateEvent)] + stream_agent_run_events = [ + e for e in stream_events if isinstance(e, ExecutorEvent) and isinstance(e.data, AgentResponse) + ] + stream_agent_update_events = [ + e for e in stream_events if isinstance(e, ExecutorEvent) and isinstance(e.data, AgentResponseUpdate) + ] - # In streaming mode, should have AgentRunUpdateEvent, no AgentRunEvent - assert len(stream_agent_run_events) == 0, "Expected no AgentRunEvent in streaming mode" - assert len(stream_agent_update_events) > 0, "Expected AgentRunUpdateEvent events in streaming mode" + # In streaming mode, should have ExecutorEvent[AgentResponseUpdate], no ExecutorEvent[AgentResponse] + assert len(stream_agent_run_events) == 0, "Expected no ExecutorEvent[AgentResponse] in streaming mode" + assert len(stream_agent_update_events) > 0, "Expected ExecutorEvent[AgentResponseUpdate] events in streaming mode" # Verify we got incremental updates (one per character in "Hello World") assert len(stream_agent_update_events) == len("Hello World"), "Expected one update per character" @@ -971,7 +981,7 @@ async def test_workflow_run_stream_parameter_validation( events: list[WorkflowEvent] = [] async for event in workflow.run_stream(test_message): events.append(event) - assert any(isinstance(e, WorkflowStatusEvent) and e.state == WorkflowRunState.IDLE for e in events) + assert any(e.type == "status" and e.state == WorkflowRunState.IDLE for e in events) # Invalid combinations already tested in test_workflow_run_parameter_validation # This test ensures streaming works correctly for valid parameters diff --git a/python/packages/core/tests/workflow/test_workflow_agent.py b/python/packages/core/tests/workflow/test_workflow_agent.py index 9514efdf74..a743d06e31 100644 --- a/python/packages/core/tests/workflow/test_workflow_agent.py +++ b/python/packages/core/tests/workflow/test_workflow_agent.py @@ -10,12 +10,12 @@ AgentProtocol, AgentResponse, AgentResponseUpdate, - AgentRunUpdateEvent, AgentThread, ChatMessage, ChatMessageStore, Content, Executor, + ExecutorEvent, Role, UsageDetails, WorkflowAgent, @@ -47,7 +47,7 @@ async def handle_message(self, message: list[ChatMessage], ctx: WorkflowContext[ streaming_update = AgentResponseUpdate( contents=[Content.from_text(text=response_text)], role=Role.ASSISTANT, message_id=str(uuid.uuid4()) ) - await ctx.add_event(AgentRunUpdateEvent(executor_id=self.id, data=streaming_update)) + await ctx.add_event(ExecutorEvent(executor_id=self.id, data=streaming_update)) # Pass message to next executor if any (for both streaming and non-streaming) await ctx.send_message([response_message]) @@ -71,7 +71,7 @@ async def handle_request_response( role=Role.ASSISTANT, message_id=str(uuid.uuid4()), ) - await ctx.add_event(AgentRunUpdateEvent(executor_id=self.id, data=update)) + await ctx.add_event(ExecutorEvent(executor_id=self.id, data=update)) class ConversationHistoryCapturingExecutor(Executor): @@ -95,7 +95,7 @@ async def handle_message(self, messages: list[ChatMessage], ctx: WorkflowContext streaming_update = AgentResponseUpdate( contents=[Content.from_text(text=response_text)], role=Role.ASSISTANT, message_id=str(uuid.uuid4()) ) - await ctx.add_event(AgentRunUpdateEvent(executor_id=self.id, data=streaming_update)) + await ctx.add_event(ExecutorEvent(executor_id=self.id, data=streaming_update)) await ctx.send_message([response_message]) @@ -695,13 +695,13 @@ async def start_executor(messages: list[ChatMessage], ctx: WorkflowContext) -> N class TestWorkflowAgentAuthorName: """Test cases for author_name enrichment in WorkflowAgent (GitHub issue #1331).""" - async def test_agent_run_update_event_gets_executor_id_as_author_name(self): - """Test that AgentRunUpdateEvent gets executor_id as author_name when not already set. + async def test_executor_event_gets_executor_id_as_author_name(self): + """Test that ExecutorEvent[AgentResponseUpdate] gets executor_id as author_name when not already set. This validates the fix for GitHub issue #1331: agent responses should include identification of which agent produced them in multi-agent workflows. """ - # Create workflow with executor that emits AgentRunUpdateEvent without author_name + # Create workflow with executor that emits ExecutorEvent[AgentResponseUpdate] without author_name executor1 = SimpleExecutor(id="my_executor_id", response_text="Response", emit_streaming=False) workflow = WorkflowBuilder().set_start_executor(executor1).build() agent = WorkflowAgent(workflow=workflow, name="Test Agent") @@ -732,7 +732,7 @@ async def handle_message(self, message: list[ChatMessage], ctx: WorkflowContext[ author_name="custom_author_name", # Explicitly set message_id=str(uuid.uuid4()), ) - await ctx.add_event(AgentRunUpdateEvent(executor_id=self.id, data=update)) + await ctx.add_event(ExecutorEvent(executor_id=self.id, data=update)) executor = AuthorNameExecutor(id="executor_id") workflow = WorkflowBuilder().set_start_executor(executor).build() diff --git a/python/packages/core/tests/workflow/test_workflow_kwargs.py b/python/packages/core/tests/workflow/test_workflow_kwargs.py index 79aa009f57..d080eeffa2 100644 --- a/python/packages/core/tests/workflow/test_workflow_kwargs.py +++ b/python/packages/core/tests/workflow/test_workflow_kwargs.py @@ -86,7 +86,7 @@ async def test_sequential_kwargs_flow_to_agent() -> None: custom_data=custom_data, user_token=user_token, ): - if isinstance(event, WorkflowStatusEvent) and event.state == WorkflowRunState.IDLE: + if event.type == "status" and event.state == WorkflowRunState.IDLE: break # Verify agent received kwargs @@ -107,7 +107,7 @@ async def test_sequential_kwargs_flow_to_multiple_agents() -> None: custom_data = {"key": "value"} async for event in workflow.run_stream("test", custom_data=custom_data): - if isinstance(event, WorkflowStatusEvent) and event.state == WorkflowRunState.IDLE: + if event.type == "status" and event.state == WorkflowRunState.IDLE: break # Both agents should have received kwargs @@ -148,7 +148,7 @@ async def test_concurrent_kwargs_flow_to_agents() -> None: custom_data=custom_data, user_token=user_token, ): - if isinstance(event, WorkflowStatusEvent) and event.state == WorkflowRunState.IDLE: + if event.type == "status" and event.state == WorkflowRunState.IDLE: break # Both agents should have received kwargs @@ -195,7 +195,7 @@ def simple_selector(state: GroupChatState) -> str: custom_data = {"session_id": "group123"} async for event in workflow.run_stream("group chat test", custom_data=custom_data): - if isinstance(event, WorkflowStatusEvent) and event.state == WorkflowRunState.IDLE: + if event.type == "status" and event.state == WorkflowRunState.IDLE: break # At least one agent should have received kwargs @@ -229,7 +229,7 @@ async def inspect(self, msgs: list[ChatMessage], ctx: WorkflowContext[list[ChatM workflow = SequentialBuilder().participants([inspector]).build() async for event in workflow.run_stream("test", my_kwarg="my_value", another=123): - if isinstance(event, WorkflowStatusEvent) and event.state == WorkflowRunState.IDLE: + if event.type == "status" and event.state == WorkflowRunState.IDLE: break assert stored_kwargs is not None, "kwargs should be stored in SharedState" @@ -255,7 +255,7 @@ async def check(self, msgs: list[ChatMessage], ctx: WorkflowContext[list[ChatMes # Run without any kwargs async for event in workflow.run_stream("test"): - if isinstance(event, WorkflowStatusEvent) and event.state == WorkflowRunState.IDLE: + if event.type == "status" and event.state == WorkflowRunState.IDLE: break # SharedState should have empty dict when no kwargs provided @@ -274,7 +274,7 @@ async def test_kwargs_with_none_values() -> None: workflow = SequentialBuilder().participants([agent]).build() async for event in workflow.run_stream("test", optional_param=None, other_param="value"): - if isinstance(event, WorkflowStatusEvent) and event.state == WorkflowRunState.IDLE: + if event.type == "status" and event.state == WorkflowRunState.IDLE: break assert len(agent.captured_kwargs) >= 1 @@ -301,7 +301,7 @@ async def test_kwargs_with_complex_nested_data() -> None: } async for event in workflow.run_stream("test", complex_data=complex_data): - if isinstance(event, WorkflowStatusEvent) and event.state == WorkflowRunState.IDLE: + if event.type == "status" and event.state == WorkflowRunState.IDLE: break assert len(agent.captured_kwargs) >= 1 @@ -319,12 +319,12 @@ async def test_kwargs_preserved_across_workflow_reruns() -> None: # First run async for event in workflow1.run_stream("run1", run_id="first"): - if isinstance(event, WorkflowStatusEvent) and event.state == WorkflowRunState.IDLE: + if event.type == "status" and event.state == WorkflowRunState.IDLE: break # Second run with different kwargs (using fresh workflow) async for event in workflow2.run_stream("run2", run_id="second"): - if isinstance(event, WorkflowStatusEvent) and event.state == WorkflowRunState.IDLE: + if event.type == "status" and event.state == WorkflowRunState.IDLE: break assert len(agent.captured_kwargs) >= 2 @@ -356,7 +356,7 @@ async def test_handoff_kwargs_flow_to_agents() -> None: custom_data = {"session_id": "handoff123"} async for event in workflow.run_stream("handoff test", custom_data=custom_data): - if isinstance(event, WorkflowStatusEvent) and event.state == WorkflowRunState.IDLE: + if event.type == "status" and event.state == WorkflowRunState.IDLE: break # Coordinator agent should have received kwargs @@ -413,7 +413,7 @@ async def prepare_final_answer(self, magentic_context: MagenticContext) -> ChatM custom_data = {"session_id": "magentic123"} async for event in workflow.run_stream("magentic test", custom_data=custom_data): - if isinstance(event, WorkflowStatusEvent) and event.state == WorkflowRunState.IDLE: + if event.type == "status" and event.state == WorkflowRunState.IDLE: break # The workflow completes immediately via prepare_final_answer without invoking agents @@ -463,7 +463,7 @@ async def prepare_final_answer(self, magentic_context: MagenticContext) -> ChatM custom_data = {"magentic_key": "magentic_value"} async for event in magentic_workflow.run_stream("test task", custom_data=custom_data): - if isinstance(event, WorkflowStatusEvent) and event.state == WorkflowRunState.IDLE: + if event.type == "status" and event.state == WorkflowRunState.IDLE: break # Verify the workflow completed (kwargs were stored, even if agent wasn't invoked) @@ -617,7 +617,7 @@ async def test_subworkflow_kwargs_propagation() -> None: custom_data=custom_data, user_token=user_token, ): - if isinstance(event, WorkflowStatusEvent) and event.state == WorkflowRunState.IDLE: + if event.type == "status" and event.state == WorkflowRunState.IDLE: break # Verify that the inner agent was called @@ -676,7 +676,7 @@ async def read_kwargs(self, msgs: list[ChatMessage], ctx: WorkflowContext[list[C my_custom_kwarg="should_be_propagated", another_kwarg=42, ): - if isinstance(event, WorkflowStatusEvent) and event.state == WorkflowRunState.IDLE: + if event.type == "status" and event.state == WorkflowRunState.IDLE: break # Verify the state reader was invoked @@ -721,7 +721,7 @@ async def test_nested_subworkflow_kwargs_propagation() -> None: "deeply nested test", deep_kwarg="should_reach_inner", ): - if isinstance(event, WorkflowStatusEvent) and event.state == WorkflowRunState.IDLE: + if event.type == "status" and event.state == WorkflowRunState.IDLE: break # Verify inner agent was called diff --git a/python/packages/core/tests/workflow/test_workflow_states.py b/python/packages/core/tests/workflow/test_workflow_states.py index 4aec349d15..41e3cb99fe 100644 --- a/python/packages/core/tests/workflow/test_workflow_states.py +++ b/python/packages/core/tests/workflow/test_workflow_states.py @@ -5,12 +5,13 @@ from agent_framework import ( Executor, - ExecutorFailedEvent, + ExecutorEvent, InProcRunnerContext, RequestInfoEvent, Workflow, WorkflowBuilder, WorkflowContext, + WorkflowEventKind, WorkflowEventSource, WorkflowFailedEvent, WorkflowRunResult, @@ -39,9 +40,11 @@ async def test_executor_failed_and_workflow_failed_events_streaming(): async for ev in wf.run_stream(0): events.append(ev) - # ExecutorFailedEvent should be emitted before WorkflowFailedEvent - executor_failed_events = [e for e in events if isinstance(e, ExecutorFailedEvent)] - assert executor_failed_events, "ExecutorFailedEvent should be emitted when start executor fails" + # ExecutorEvent (kind=FAILED) should be emitted before WorkflowFailedEvent + executor_failed_events = [ + e for e in events if isinstance(e, ExecutorEvent) and e.kind == WorkflowEventKind.EXECUTOR_FAILED + ] + assert executor_failed_events, "ExecutorEvent (kind=FAILED) should be emitted when start executor fails" assert executor_failed_events[0].executor_id == "f" assert executor_failed_events[0].origin is WorkflowEventSource.FRAMEWORK @@ -49,14 +52,16 @@ async def test_executor_failed_and_workflow_failed_events_streaming(): failed_events = [e for e in events if isinstance(e, WorkflowFailedEvent)] assert failed_events assert all(e.origin is WorkflowEventSource.FRAMEWORK for e in failed_events) - status = [e for e in events if isinstance(e, WorkflowStatusEvent)] + status = [e for e in events if e.type == "status"] assert status and status[-1].state == WorkflowRunState.FAILED assert all(e.origin is WorkflowEventSource.FRAMEWORK for e in status) - # Verify ExecutorFailedEvent comes before WorkflowFailedEvent + # Verify ExecutorEvent (kind=FAILED) comes before WorkflowFailedEvent executor_failed_idx = events.index(executor_failed_events[0]) workflow_failed_idx = events.index(failed_events[0]) - assert executor_failed_idx < workflow_failed_idx, "ExecutorFailedEvent should be emitted before WorkflowFailedEvent" + assert executor_failed_idx < workflow_failed_idx, ( + "ExecutorEvent (kind=FAILED) should be emitted before WorkflowFailedEvent" + ) async def test_executor_failed_event_emitted_on_direct_execute(): @@ -71,7 +76,7 @@ async def test_executor_failed_event_emitted_on_direct_execute(): ctx, ) drained = await ctx.drain_events() - failed = [e for e in drained if isinstance(e, ExecutorFailedEvent)] + failed = [e for e in drained if isinstance(e, ExecutorEvent) and e.kind == WorkflowEventKind.EXECUTOR_FAILED] assert failed assert all(e.origin is WorkflowEventSource.FRAMEWORK for e in failed) @@ -85,7 +90,7 @@ async def passthrough(self, msg: int, ctx: WorkflowContext[int]) -> None: async def test_executor_failed_event_from_second_executor_in_chain(): - """Test that ExecutorFailedEvent is emitted when a non-start executor fails.""" + """Test that ExecutorEvent (kind=FAILED) is emitted when a non-start executor fails.""" passthrough = PassthroughExecutor(id="passthrough") failing = FailingExecutor(id="failing") wf: Workflow = WorkflowBuilder().set_start_executor(passthrough).add_edge(passthrough, failing).build() @@ -95,9 +100,11 @@ async def test_executor_failed_event_from_second_executor_in_chain(): async for ev in wf.run_stream(0): events.append(ev) - # ExecutorFailedEvent should be emitted for the failing executor - executor_failed_events = [e for e in events if isinstance(e, ExecutorFailedEvent)] - assert executor_failed_events, "ExecutorFailedEvent should be emitted when second executor fails" + # ExecutorEvent (kind=FAILED) should be emitted for the failing executor + executor_failed_events = [ + e for e in events if isinstance(e, ExecutorEvent) and e.kind == WorkflowEventKind.EXECUTOR_FAILED + ] + assert executor_failed_events, "ExecutorEvent (kind=FAILED) should be emitted when second executor fails" assert executor_failed_events[0].executor_id == "failing" assert executor_failed_events[0].origin is WorkflowEventSource.FRAMEWORK @@ -106,10 +113,12 @@ async def test_executor_failed_event_from_second_executor_in_chain(): assert failed_events assert all(e.origin is WorkflowEventSource.FRAMEWORK for e in failed_events) - # Verify ExecutorFailedEvent comes before WorkflowFailedEvent + # Verify ExecutorEvent (kind=FAILED) comes before WorkflowFailedEvent executor_failed_idx = events.index(executor_failed_events[0]) workflow_failed_idx = events.index(failed_events[0]) - assert executor_failed_idx < workflow_failed_idx, "ExecutorFailedEvent should be emitted before WorkflowFailedEvent" + assert executor_failed_idx < workflow_failed_idx, ( + "ExecutorEvent (kind=FAILED) should be emitted before WorkflowFailedEvent" + ) class SimpleExecutor(Executor): @@ -137,7 +146,7 @@ async def test_idle_with_pending_requests_status_streaming(): # Ensure a request was emitted assert any(isinstance(e, RequestInfoEvent) for e in events) - status_events = [e for e in events if isinstance(e, WorkflowStatusEvent)] + status_events = [e for e in events if e.type == "status"] assert len(status_events) >= 3 assert status_events[-2].state == WorkflowRunState.IN_PROGRESS_PENDING_REQUESTS assert status_events[-1].state == WorkflowRunState.IDLE_WITH_PENDING_REQUESTS @@ -156,7 +165,7 @@ async def test_completed_status_streaming(): wf = WorkflowBuilder().set_start_executor(c).build() events = [ev async for ev in wf.run_stream("ok")] # no raise # Last status should be IDLE - status = [e for e in events if isinstance(e, WorkflowStatusEvent)] + status = [e for e in events if e.type == "status"] assert status and status[-1].state == WorkflowRunState.IDLE assert all(e.origin is WorkflowEventSource.FRAMEWORK for e in status) @@ -171,7 +180,7 @@ async def test_started_and_completed_event_origins(): # Check for IDLE status indicating completion idle_status = next( - (e for e in events if isinstance(e, WorkflowStatusEvent) and e.state == WorkflowRunState.IDLE), None + (e for e in events if e.type == "status" and e.state == WorkflowRunState.IDLE), None ) assert idle_status is not None assert idle_status.origin is WorkflowEventSource.FRAMEWORK diff --git a/python/packages/devui/agent_framework_devui/_executor.py b/python/packages/devui/agent_framework_devui/_executor.py index cf4fa0066f..1d9bb6fe0c 100644 --- a/python/packages/devui/agent_framework_devui/_executor.py +++ b/python/packages/devui/agent_framework_devui/_executor.py @@ -8,7 +8,6 @@ from typing import Any from agent_framework import AgentProtocol, Content -from agent_framework._workflows._events import RequestInfoEvent from ._conversations import ConversationStore, InMemoryConversationStore from ._discovery import EntityDiscovery @@ -528,7 +527,7 @@ async def _execute_workflow( async for event in workflow.send_responses_streaming(hil_responses): # Enrich new RequestInfoEvents that may come from subsequent HIL requests - if isinstance(event, RequestInfoEvent): + if event.type == "request_info": self._enrich_request_info_event_with_response_schema(event, workflow) for trace_event in trace_collector.get_pending_events(): @@ -548,7 +547,7 @@ async def _execute_workflow( async for event in workflow.run_stream( checkpoint_id=checkpoint_id, checkpoint_storage=checkpoint_storage ): - if isinstance(event, RequestInfoEvent): + if event.type == "request_info": self._enrich_request_info_event_with_response_schema(event, workflow) for trace_event in trace_collector.get_pending_events(): @@ -572,7 +571,7 @@ async def _execute_workflow( parsed_input = await self._parse_workflow_input(workflow, request.input) async for event in workflow.run_stream(parsed_input, checkpoint_storage=checkpoint_storage): - if isinstance(event, RequestInfoEvent): + if event.type == "request_info": self._enrich_request_info_event_with_response_schema(event, workflow) for trace_event in trace_collector.get_pending_events(): diff --git a/python/packages/devui/agent_framework_devui/_mapper.py b/python/packages/devui/agent_framework_devui/_mapper.py index f11a6811ce..fd7f21d170 100644 --- a/python/packages/devui/agent_framework_devui/_mapper.py +++ b/python/packages/devui/agent_framework_devui/_mapper.py @@ -179,18 +179,14 @@ async def convert_event(self, raw_event: Any, request: AgentFrameworkRequest) -> # Import Agent Framework types for proper isinstance checks try: from agent_framework import AgentResponse, AgentResponseUpdate, WorkflowEvent - from agent_framework._workflows._events import AgentRunUpdateEvent - # Handle AgentRunUpdateEvent - workflow event wrapping AgentResponseUpdate + # Handle WorkflowEvent with type='data' wrapping AgentResponseUpdate # This must be checked BEFORE generic WorkflowEvent check - if isinstance(raw_event, AgentRunUpdateEvent): - # Extract the AgentResponseUpdate from the event's data attribute - if raw_event.data and isinstance(raw_event.data, AgentResponseUpdate): + if isinstance(raw_event, WorkflowEvent) and raw_event.type == "data": + if isinstance(raw_event.data, AgentResponseUpdate): # Preserve executor_id in context for proper output routing context["current_executor_id"] = raw_event.executor_id return await self._convert_agent_update(raw_event.data, context) - # If no data, treat as generic workflow event - return await self._convert_workflow_event(raw_event, context) # Handle complete agent response (AgentResponse) - for non-streaming agent execution if isinstance(raw_event, AgentResponse): @@ -825,10 +821,12 @@ async def _convert_workflow_event(self, event: Any, context: dict[str, Any]) -> List of OpenAI response stream events """ try: - event_class = event.__class__.__name__ + # Use event.type for discriminated union pattern (similar to Content class) + event_type = getattr(event, "type", None) + event_class = event.__class__.__name__ # Fallback for non-workflow events # Response-level events - construct proper OpenAI objects - if event_class == "WorkflowStartedEvent": + if event_type == "started": workflow_id = getattr(event, "workflow_id", str(uuid4())) context["workflow_id"] = workflow_id @@ -872,8 +870,8 @@ async def _convert_workflow_event(self, event: Any, context: dict[str, Any]) -> return events - # Handle WorkflowOutputEvent separately to preserve output data - if event_class == "WorkflowOutputEvent": + # Handle output events separately to preserve output data + if event_type == "output": output_data = getattr(event, "data", None) executor_id = getattr(event, "executor_id", "unknown") @@ -947,12 +945,12 @@ async def _convert_workflow_event(self, event: Any, context: dict[str, Any]) -> ) ] - # Handle WorkflowCompletedEvent - Don't emit response.completed here + # Handle completed event - Don't emit response.completed here # The server will emit a proper one with usage data after aggregating all events - if event_class == "WorkflowCompletedEvent": + if event_type == "completed": return [] - if event_class == "WorkflowFailedEvent": + if event_type == "failed": workflow_id = context.get("workflow_id", str(uuid4())) # WorkflowFailedEvent uses 'details' field (WorkflowErrorDetails), not 'error' # This matches ExecutorFailedEvent which also uses 'details' @@ -1001,101 +999,103 @@ async def _convert_workflow_event(self, event: Any, context: dict[str, Any]) -> ] # Executor-level events (output items) - if event_class == "ExecutorInvokedEvent": - executor_id = getattr(event, "executor_id", "unknown") - item_id = f"exec_{executor_id}_{uuid4().hex[:8]}" - context[f"exec_item_{executor_id}"] = item_id - context["output_index"] = context.get("output_index", -1) + 1 + # Check for executor lifecycle events via event.type + if event_type == "executor_invoked": + executor_id = getattr(event, "executor_id", "unknown") + item_id = f"exec_{executor_id}_{uuid4().hex[:8]}" + context[f"exec_item_{executor_id}"] = item_id + context["output_index"] = context.get("output_index", -1) + 1 - # Track current executor for routing Magentic agent events - # This allows MagenticAgentDeltaEvent to route to the executor's item - context["current_executor_id"] = executor_id + # Track current executor for routing Magentic agent events + # This allows MagenticAgentDeltaEvent to route to the executor's item + context["current_executor_id"] = executor_id - # Create ExecutorActionItem with proper type - executor_item = ExecutorActionItem( - type="executor_action", - id=item_id, - executor_id=executor_id, - status="in_progress", - metadata=getattr(event, "metadata", {}), - ) - - # Use our custom event type that accepts ExecutorActionItem - return [ - CustomResponseOutputItemAddedEvent( - type="response.output_item.added", - output_index=context["output_index"], - sequence_number=self._next_sequence(context), - item=executor_item, + # Create ExecutorActionItem with proper type + executor_item = ExecutorActionItem( + type="executor_action", + id=item_id, + executor_id=executor_id, + status="in_progress", + metadata=getattr(event, "metadata", {}), ) - ] - if event_class == "ExecutorCompletedEvent": - executor_id = getattr(event, "executor_id", "unknown") - item_id = context.get(f"exec_item_{executor_id}", f"exec_{executor_id}_unknown") - - # Clear current executor tracking when executor completes - if context.get("current_executor_id") == executor_id: - context.pop("current_executor_id", None) - - # Create ExecutorActionItem with completed status - # ExecutorCompletedEvent uses 'data' field, not 'result' - # Serialize the result data to ensure it's JSON-serializable - # (AgentExecutorResponse contains AgentResponse/ChatMessage which are SerializationMixin) - raw_result = getattr(event, "data", None) - serialized_result = self._serialize_value(raw_result) if raw_result is not None else None - executor_item = ExecutorActionItem( - type="executor_action", - id=item_id, - executor_id=executor_id, - status="completed", - result=serialized_result, - ) + # Use our custom event type that accepts ExecutorActionItem + return [ + CustomResponseOutputItemAddedEvent( + type="response.output_item.added", + output_index=context["output_index"], + sequence_number=self._next_sequence(context), + item=executor_item, + ) + ] - # Use our custom event type - return [ - CustomResponseOutputItemDoneEvent( - type="response.output_item.done", - output_index=context.get("output_index", 0), - sequence_number=self._next_sequence(context), - item=executor_item, + if event_type == "executor_completed": + executor_id = getattr(event, "executor_id", "unknown") + item_id = context.get(f"exec_item_{executor_id}", f"exec_{executor_id}_unknown") + + # Clear current executor tracking when executor completes + if context.get("current_executor_id") == executor_id: + context.pop("current_executor_id", None) + + # Create ExecutorActionItem with completed status + # ExecutorEvent (kind=EXECUTOR_COMPLETED) uses 'data' field, not 'result' + # Serialize the result data to ensure it's JSON-serializable + # (AgentExecutorResponse contains AgentResponse/ChatMessage which are SerializationMixin) + raw_result = getattr(event, "data", None) + serialized_result = self._serialize_value(raw_result) if raw_result is not None else None + executor_item = ExecutorActionItem( + type="executor_action", + id=item_id, + executor_id=executor_id, + status="completed", + result=serialized_result, ) - ] - if event_class == "ExecutorFailedEvent": - executor_id = getattr(event, "executor_id", "unknown") - item_id = context.get(f"exec_item_{executor_id}", f"exec_{executor_id}_unknown") - # ExecutorFailedEvent uses 'details' field (WorkflowErrorDetails), not 'error' - details = getattr(event, "details", None) - if details: - err_msg = getattr(details, "message", None) or str(details) - extra = getattr(details, "extra", None) - if extra: - err_msg = f"{err_msg} (extra: {extra})" - else: - err_msg = None - - # Create ExecutorActionItem with failed status - executor_item = ExecutorActionItem( - type="executor_action", - id=item_id, - executor_id=executor_id, - status="failed", - error={"message": err_msg} if err_msg else None, - ) + # Use our custom event type + return [ + CustomResponseOutputItemDoneEvent( + type="response.output_item.done", + output_index=context.get("output_index", 0), + sequence_number=self._next_sequence(context), + item=executor_item, + ) + ] - # Use our custom event type - return [ - CustomResponseOutputItemDoneEvent( - type="response.output_item.done", - output_index=context.get("output_index", 0), - sequence_number=self._next_sequence(context), - item=executor_item, + if event_type == "executor_failed": + executor_id = getattr(event, "executor_id", "unknown") + item_id = context.get(f"exec_item_{executor_id}", f"exec_{executor_id}_unknown") + # ExecutorEvent (kind=EXECUTOR_FAILED) uses 'details' property (WorkflowErrorDetails), not 'error' + # This matches ExecutorEvent.details which returns self.data for EXECUTOR_FAILED kind + details = getattr(event, "details", None) + if details: + err_msg = getattr(details, "message", None) or str(details) + extra = getattr(details, "extra", None) + if extra: + err_msg = f"{err_msg} (extra: {extra})" + else: + err_msg = None + + # Create ExecutorActionItem with failed status + executor_item = ExecutorActionItem( + type="executor_action", + id=item_id, + executor_id=executor_id, + status="failed", + error={"message": err_msg} if err_msg else None, ) - ] - # Handle RequestInfoEvent specially - emit as HIL event with schema - if event_class == "RequestInfoEvent": + # Use our custom event type + return [ + CustomResponseOutputItemDoneEvent( + type="response.output_item.done", + output_index=context.get("output_index", 0), + sequence_number=self._next_sequence(context), + item=executor_item, + ) + ] + + # Handle request_info events specially - emit as HIL event with schema + if event_type == "request_info": from .models._openai_custom import ResponseRequestInfoEvent request_id = getattr(event, "request_id", "") @@ -1164,26 +1164,25 @@ async def _convert_workflow_event(self, event: Any, context: dict[str, Any]) -> return [hil_event] # Handle other informational workflow events (status, warnings, errors) - if event_class in ["WorkflowStatusEvent", "WorkflowWarningEvent", "WorkflowErrorEvent"]: + if event_type in ["status", "warning", "error"]: # These are informational events that don't map to OpenAI lifecycle events # Convert them to trace events for debugging visibility event_data: dict[str, Any] = {} # Extract relevant data based on event type - if event_class == "WorkflowStatusEvent": + if event_type == "status": event_data["state"] = str(getattr(event, "state", "unknown")) - elif event_class == "WorkflowWarningEvent": - event_data["message"] = str(getattr(event, "message", "")) - elif event_class == "WorkflowErrorEvent": - event_data["message"] = str(getattr(event, "message", "")) - event_data["error"] = str(getattr(event, "error", "")) + elif event_type == "warning": + event_data["message"] = str(getattr(event, "data", "")) + elif event_type == "error": + event_data["message"] = str(getattr(event, "data", "")) # Create a trace event for debugging trace_event = ResponseTraceEventComplete( type="response.trace.completed", data={ "trace_type": "workflow_info", - "event_type": event_class, + "event_type": event_type, "data": event_data, "timestamp": datetime.now().isoformat(), }, diff --git a/python/packages/devui/tests/test_checkpoints.py b/python/packages/devui/tests/test_checkpoints.py index fbaf8734cd..5501f5a73d 100644 --- a/python/packages/devui/tests/test_checkpoints.py +++ b/python/packages/devui/tests/test_checkpoints.py @@ -428,10 +428,10 @@ async def test_workflow_auto_saves_checkpoints_to_injected_storage(self, checkpo # Run workflow until it reaches IDLE_WITH_PENDING_REQUESTS (after checkpoint is created) saw_request_event = False async for event in test_workflow.run_stream(WorkflowTestData(value="test")): - if isinstance(event, RequestInfoEvent): + if event.type == "request_info": saw_request_event = True # Wait for IDLE_WITH_PENDING_REQUESTS status (comes after checkpoint creation) - if isinstance(event, WorkflowStatusEvent) and "IDLE_WITH_PENDING_REQUESTS" in str(event.state): + if event.type == "status" and "IDLE_WITH_PENDING_REQUESTS" in str(event.state): break assert saw_request_event, "Test workflow should have emitted RequestInfoEvent" diff --git a/python/packages/devui/tests/test_helpers.py b/python/packages/devui/tests/test_helpers.py index 130ab475d9..6d190c9519 100644 --- a/python/packages/devui/tests/test_helpers.py +++ b/python/packages/devui/tests/test_helpers.py @@ -43,10 +43,8 @@ # Import real workflow event classes - NOT mocks! from agent_framework._workflows._events import ( - ExecutorCompletedEvent, - ExecutorFailedEvent, - ExecutorInvokedEvent, WorkflowErrorDetails, + WorkflowEvent, ) from agent_framework_devui._discovery import EntityDiscovery @@ -302,7 +300,7 @@ def create_agent_executor_response( executor_id: str = "test_executor", response_text: str = "Executor response", ) -> AgentExecutorResponse: - """Create an AgentExecutorResponse - the type that's nested in ExecutorCompletedEvent.data.""" + """Create an AgentExecutorResponse - the type that's nested in ExecutorEvent (kind=COMPLETED).data.""" agent_response = create_agent_run_response(response_text) return AgentExecutorResponse( executor_id=executor_id, @@ -317,29 +315,29 @@ def create_agent_executor_response( def create_executor_completed_event( executor_id: str = "test_executor", with_agent_response: bool = True, -) -> ExecutorCompletedEvent: - """Create an ExecutorCompletedEvent with realistic nested data. +) -> WorkflowEvent[Any]: + """Create a WorkflowEvent(type='executor_completed') with realistic nested data. This creates the exact data structure that caused the serialization bug: - ExecutorCompletedEvent.data contains AgentExecutorResponse which contains + WorkflowEvent.data contains AgentExecutorResponse which contains AgentResponse and ChatMessage objects (SerializationMixin, not Pydantic). """ data = create_agent_executor_response(executor_id) if with_agent_response else {"simple": "dict"} - return ExecutorCompletedEvent(executor_id=executor_id, data=data) + return WorkflowEvent.executor_completed(executor_id=executor_id, data=data) -def create_executor_invoked_event(executor_id: str = "test_executor") -> ExecutorInvokedEvent: - """Create an ExecutorInvokedEvent.""" - return ExecutorInvokedEvent(executor_id=executor_id) +def create_executor_invoked_event(executor_id: str = "test_executor") -> WorkflowEvent[Any]: + """Create a WorkflowEvent(type='executor_invoked').""" + return WorkflowEvent.executor_invoked(executor_id=executor_id) def create_executor_failed_event( executor_id: str = "test_executor", error_message: str = "Test error", -) -> ExecutorFailedEvent: - """Create an ExecutorFailedEvent.""" +) -> WorkflowEvent[WorkflowErrorDetails]: + """Create a WorkflowEvent(type='executor_failed').""" details = WorkflowErrorDetails(error_type="TestError", message=error_message) - return ExecutorFailedEvent(executor_id=executor_id, details=details) + return WorkflowEvent.executor_failed(executor_id=executor_id, details=details) # ============================================================================= diff --git a/python/packages/devui/tests/test_mapper.py b/python/packages/devui/tests/test_mapper.py index 4ea3ba9333..e8287fe940 100644 --- a/python/packages/devui/tests/test_mapper.py +++ b/python/packages/devui/tests/test_mapper.py @@ -20,9 +20,8 @@ # Import real workflow event classes - NOT mocks! from agent_framework._workflows._events import ( - ExecutorCompletedEvent, - WorkflowStartedEvent, - WorkflowStatusEvent, + WorkflowEvent, + WorkflowRunState, ) # Import test utilities @@ -278,7 +277,7 @@ async def test_agent_run_response_mapping(mapper: MessageMapper, test_request: A async def test_executor_invoked_event(mapper: MessageMapper, test_request: AgentFrameworkRequest) -> None: - """Test ExecutorInvokedEvent using the REAL class from agent_framework.""" + """Test WorkflowEvent(type='executor_invoked') using the REAL class from agent_framework.""" # Use real class, not mock! event = create_executor_invoked_event(executor_id="exec_123") @@ -294,9 +293,9 @@ async def test_executor_invoked_event(mapper: MessageMapper, test_request: Agent async def test_executor_completed_event_simple_data(mapper: MessageMapper, test_request: AgentFrameworkRequest) -> None: - """Test ExecutorCompletedEvent with simple dict data.""" + """Test WorkflowEvent(type='executor_completed') with simple dict data.""" # Create event with simple data - event = ExecutorCompletedEvent(executor_id="exec_123", data={"simple": "result"}) + event = WorkflowEvent.executor_completed(executor_id="exec_123", data={"simple": "result"}) # First need to invoke the executor to set up context invoke_event = create_executor_invoked_event(executor_id="exec_123") @@ -318,10 +317,10 @@ async def test_executor_completed_event_simple_data(mapper: MessageMapper, test_ async def test_executor_completed_event_with_agent_response( mapper: MessageMapper, test_request: AgentFrameworkRequest ) -> None: - """Test ExecutorCompletedEvent with nested AgentExecutorResponse. + """Test WorkflowEvent(type='executor_completed') with nested AgentExecutorResponse. This is a REGRESSION TEST for the serialization bug where - ExecutorCompletedEvent.data contained AgentExecutorResponse with nested + WorkflowEvent.data contained AgentExecutorResponse with nested AgentResponse and ChatMessage objects (SerializationMixin) that Pydantic couldn't serialize. """ @@ -391,7 +390,7 @@ async def test_executor_completed_event_serialization_to_json( async def test_executor_failed_event(mapper: MessageMapper, test_request: AgentFrameworkRequest) -> None: - """Test ExecutorFailedEvent using the REAL class.""" + """Test WorkflowEvent(type='executor_failed') using the REAL class.""" # First invoke the executor invoke_event = create_executor_invoked_event(executor_id="exec_fail") await mapper.convert_event(invoke_event, test_request) @@ -415,22 +414,21 @@ async def test_executor_failed_event(mapper: MessageMapper, test_request: AgentF async def test_workflow_started_event(mapper: MessageMapper, test_request: AgentFrameworkRequest) -> None: - """Test WorkflowStartedEvent using the REAL class.""" + """Test WorkflowEvent(type='started') using the REAL class.""" - event = WorkflowStartedEvent(data=None) + event = WorkflowEvent.started() events = await mapper.convert_event(event, test_request) - # WorkflowStartedEvent should emit response.created and response.in_progress + # WorkflowEvent(type='started') should emit response.created and response.in_progress assert len(events) == 2 assert events[0].type == "response.created" assert events[1].type == "response.in_progress" async def test_workflow_status_event(mapper: MessageMapper, test_request: AgentFrameworkRequest) -> None: - """Test WorkflowStatusEvent using the REAL class.""" - from agent_framework._workflows._events import WorkflowRunState + """Test WorkflowEvent(type='status') using the REAL class.""" - event = WorkflowStatusEvent(state=WorkflowRunState.IN_PROGRESS) + event = WorkflowEvent.status(state=WorkflowRunState.IN_PROGRESS) events = await mapper.convert_event(event, test_request) # Should emit some status-related event @@ -438,20 +436,20 @@ async def test_workflow_status_event(mapper: MessageMapper, test_request: AgentF # ============================================================================= -# Magentic Event Tests - Testing REAL AgentRunUpdateEvent with additional_properties +# Magentic Event Tests - Testing REAL WorkflowEvent[AgentResponseUpdate] with additional_properties # ============================================================================= -async def test_magentic_agent_run_update_event_with_agent_delta_metadata( +async def test_magentic_executor_event_with_agent_delta_metadata( mapper: MessageMapper, test_request: AgentFrameworkRequest ) -> None: - """Test that AgentRunUpdateEvent with magentic_event_type='agent_delta' is handled correctly. + """Test that WorkflowEvent[AgentResponseUpdate] with magentic_event_type='agent_delta' is handled correctly. This tests the ACTUAL event format Magentic emits - not a fake MagenticAgentDeltaEvent class. - Magentic uses AgentRunUpdateEvent with additional_properties containing magentic_event_type. + Magentic uses WorkflowEvent.emit() with additional_properties containing magentic_event_type. """ from agent_framework._types import AgentResponseUpdate, Role - from agent_framework._workflows._events import AgentRunUpdateEvent + from agent_framework._workflows._events import WorkflowEvent # Create the REAL event format that Magentic emits update = AgentResponseUpdate( @@ -463,11 +461,11 @@ async def test_magentic_agent_run_update_event_with_agent_delta_metadata( "agent_id": "writer_agent", }, ) - event = AgentRunUpdateEvent(executor_id="magentic_executor", data=update) + event = WorkflowEvent.emit(executor_id="magentic_executor", data=update) events = await mapper.convert_event(event, test_request) - # Should be treated as a regular AgentRunUpdateEvent with text content + # Should be treated as a regular WorkflowEvent[AgentResponseUpdate] with text content # The mapper should emit text delta events assert len(events) >= 1 text_events = [e for e in events if getattr(e, "type", "") == "response.output_text.delta"] @@ -476,13 +474,13 @@ async def test_magentic_agent_run_update_event_with_agent_delta_metadata( async def test_magentic_orchestrator_message_event(mapper: MessageMapper, test_request: AgentFrameworkRequest) -> None: - """Test that AgentRunUpdateEvent with magentic_event_type='orchestrator_message' is handled. + """Test that WorkflowEvent[AgentResponseUpdate] with magentic_event_type='orchestrator_message' is handled. - Magentic emits orchestrator planning/instruction messages using AgentRunUpdateEvent + Magentic emits orchestrator planning/instruction messages using WorkflowEvent.emit() with additional_properties containing magentic_event_type='orchestrator_message'. """ from agent_framework._types import AgentResponseUpdate, Role - from agent_framework._workflows._events import AgentRunUpdateEvent + from agent_framework._workflows._events import WorkflowEvent # Create orchestrator message event (REAL format from Magentic) update = AgentResponseUpdate( @@ -495,11 +493,11 @@ async def test_magentic_orchestrator_message_event(mapper: MessageMapper, test_r "orchestrator_id": "magentic_orchestrator", }, ) - event = AgentRunUpdateEvent(executor_id="magentic_orchestrator", data=update) + event = WorkflowEvent.emit(executor_id="magentic_orchestrator", data=update) events = await mapper.convert_event(event, test_request) - # Currently, mapper treats this as regular AgentRunUpdateEvent (no special handling) + # Currently, mapper treats this as regular WorkflowEvent[AgentResponseUpdate] (no special handling) # This test documents the current behavior assert len(events) >= 1 text_events = [e for e in events if getattr(e, "type", "") == "response.output_text.delta"] @@ -510,15 +508,15 @@ async def test_magentic_orchestrator_message_event(mapper: MessageMapper, test_r async def test_magentic_events_use_same_event_class_as_other_workflows( mapper: MessageMapper, test_request: AgentFrameworkRequest ) -> None: - """Verify Magentic uses the same AgentRunUpdateEvent class as other workflows. + """Verify Magentic uses the same WorkflowEvent class as other workflows. This test documents that Magentic does NOT define separate event classes like - MagenticAgentDeltaEvent - it reuses AgentRunUpdateEvent with metadata in + MagenticAgentDeltaEvent - it reuses WorkflowEvent with metadata in additional_properties. Any mapper code checking for 'MagenticAgentDeltaEvent' class names is dead code. """ from agent_framework._types import AgentResponseUpdate, Role - from agent_framework._workflows._events import AgentRunUpdateEvent + from agent_framework._workflows._events import WorkflowEvent # Create events the way different workflows do it # 1. Regular workflow (no additional_properties) @@ -526,7 +524,7 @@ class names is dead code. contents=[Content.from_text(text="Regular workflow response")], role=Role.ASSISTANT, ) - regular_event = AgentRunUpdateEvent(executor_id="regular_executor", data=regular_update) + regular_event = WorkflowEvent.emit(executor_id="regular_executor", data=regular_update) # 2. Magentic workflow (with additional_properties) magentic_update = AgentResponseUpdate( @@ -534,12 +532,12 @@ class names is dead code. role=Role.ASSISTANT, additional_properties={"magentic_event_type": "agent_delta"}, ) - magentic_event = AgentRunUpdateEvent(executor_id="magentic_executor", data=magentic_update) + magentic_event = WorkflowEvent.emit(executor_id="magentic_executor", data=magentic_update) # Both should be the SAME class assert type(regular_event) is type(magentic_event) - assert isinstance(regular_event, AgentRunUpdateEvent) - assert isinstance(magentic_event, AgentRunUpdateEvent) + assert isinstance(regular_event, WorkflowEvent) + assert isinstance(magentic_event, WorkflowEvent) # Both should be handled by the same isinstance check in mapper regular_events = await mapper.convert_event(regular_event, test_request) @@ -581,10 +579,10 @@ def __init__(self): async def test_workflow_output_event(mapper: MessageMapper, test_request: AgentFrameworkRequest) -> None: - """Test WorkflowOutputEvent is converted to output_item.added.""" - from agent_framework._workflows._events import WorkflowOutputEvent + """Test WorkflowEvent(type='output') is converted to output_item.added.""" + from agent_framework._workflows._events import WorkflowEvent - event = WorkflowOutputEvent(data="Final workflow output", executor_id="final_executor") + event = WorkflowEvent.output(executor_id="final_executor", data="Final workflow output") events = await mapper.convert_event(event, test_request) # WorkflowOutputEvent should emit output_item.added @@ -597,16 +595,16 @@ async def test_workflow_output_event(mapper: MessageMapper, test_request: AgentF async def test_workflow_output_event_with_list_data(mapper: MessageMapper, test_request: AgentFrameworkRequest) -> None: - """Test WorkflowOutputEvent with list data (common for sequential/concurrent workflows).""" + """Test WorkflowEvent(type='output') with list data (common for sequential/concurrent workflows).""" from agent_framework import ChatMessage, Role - from agent_framework._workflows._events import WorkflowOutputEvent + from agent_framework._workflows._events import WorkflowEvent # Sequential/Concurrent workflows often output list[ChatMessage] messages = [ ChatMessage(role=Role.USER, contents=[Content.from_text(text="Hello")]), ChatMessage(role=Role.ASSISTANT, contents=[Content.from_text(text="World")]), ] - event = WorkflowOutputEvent(data=messages, executor_id="complete") + event = WorkflowEvent.output(executor_id="complete", data=messages) events = await mapper.convert_event(event, test_request) assert len(events) == 1 @@ -619,15 +617,15 @@ async def test_workflow_output_event_with_list_data(mapper: MessageMapper, test_ async def test_workflow_failed_event(mapper: MessageMapper, test_request: AgentFrameworkRequest) -> None: - """Test WorkflowFailedEvent is converted to response.failed.""" - from agent_framework._workflows._events import WorkflowErrorDetails, WorkflowFailedEvent + """Test WorkflowEvent(type='failed') is converted to response.failed.""" + from agent_framework._workflows._events import WorkflowErrorDetails, WorkflowEvent details = WorkflowErrorDetails( error_type="TestError", message="Workflow failed due to test error", executor_id="failing_executor", ) - event = WorkflowFailedEvent(details=details) + event = WorkflowEvent.failed(details=details) events = await mapper.convert_event(event, test_request) # WorkflowFailedEvent should emit response.failed @@ -645,8 +643,8 @@ async def test_workflow_failed_event(mapper: MessageMapper, test_request: AgentF async def test_workflow_failed_event_with_extra(mapper: MessageMapper, test_request: AgentFrameworkRequest) -> None: - """Test WorkflowFailedEvent includes extra context when available.""" - from agent_framework._workflows._events import WorkflowErrorDetails, WorkflowFailedEvent + """Test WorkflowEvent(type='failed') includes extra context when available.""" + from agent_framework._workflows._events import WorkflowErrorDetails, WorkflowEvent details = WorkflowErrorDetails( error_type="ValidationError", @@ -654,7 +652,7 @@ async def test_workflow_failed_event_with_extra(mapper: MessageMapper, test_requ executor_id="validation_executor", extra={"field": "email", "reason": "invalid format"}, ) - event = WorkflowFailedEvent(details=details) + event = WorkflowEvent.failed(details=details) events = await mapper.convert_event(event, test_request) assert len(events) == 1 @@ -667,8 +665,8 @@ async def test_workflow_failed_event_with_extra(mapper: MessageMapper, test_requ async def test_workflow_failed_event_with_traceback(mapper: MessageMapper, test_request: AgentFrameworkRequest) -> None: - """Test WorkflowFailedEvent includes traceback when available.""" - from agent_framework._workflows._events import WorkflowErrorDetails, WorkflowFailedEvent + """Test WorkflowEvent(type='failed') includes traceback when available.""" + from agent_framework._workflows._events import WorkflowErrorDetails, WorkflowEvent details = WorkflowErrorDetails( error_type="ValueError", @@ -676,7 +674,7 @@ async def test_workflow_failed_event_with_traceback(mapper: MessageMapper, test_ traceback="Traceback (most recent call last):\n File ...\nValueError: Invalid input", executor_id="validation_executor", ) - event = WorkflowFailedEvent(details=details) + event = WorkflowEvent.failed(details=details) events = await mapper.convert_event(event, test_request) assert len(events) == 1 @@ -689,29 +687,29 @@ async def test_workflow_failed_event_with_traceback(mapper: MessageMapper, test_ async def test_workflow_warning_event(mapper: MessageMapper, test_request: AgentFrameworkRequest) -> None: - """Test WorkflowWarningEvent is converted to trace event.""" - from agent_framework._workflows._events import WorkflowWarningEvent + """Test WorkflowEvent(type='warning') is converted to trace event.""" + from agent_framework._workflows._events import WorkflowEvent - event = WorkflowWarningEvent(data="This is a warning message") + event = WorkflowEvent.warning("This is a warning message") events = await mapper.convert_event(event, test_request) - # WorkflowWarningEvent should emit a trace event + # WorkflowEvent(type='warning') should emit a trace event assert len(events) == 1 assert events[0].type == "response.trace.completed" - assert events[0].data["event_type"] == "WorkflowWarningEvent" + assert events[0].data["event_type"] == "warning" async def test_workflow_error_event(mapper: MessageMapper, test_request: AgentFrameworkRequest) -> None: - """Test WorkflowErrorEvent is converted to trace event.""" - from agent_framework._workflows._events import WorkflowErrorEvent + """Test WorkflowEvent(type='error') is converted to trace event.""" + from agent_framework._workflows._events import WorkflowEvent - event = WorkflowErrorEvent(data=ValueError("Something went wrong")) + event = WorkflowEvent.error(ValueError("Something went wrong")) events = await mapper.convert_event(event, test_request) - # WorkflowErrorEvent should emit a trace event + # WorkflowEvent(type='error') should emit a trace event assert len(events) == 1 assert events[0].type == "response.trace.completed" - assert events[0].data["event_type"] == "WorkflowErrorEvent" + assert events[0].data["event_type"] == "error" # ============================================================================= @@ -720,10 +718,10 @@ async def test_workflow_error_event(mapper: MessageMapper, test_request: AgentFr async def test_request_info_event(mapper: MessageMapper, test_request: AgentFrameworkRequest) -> None: - """Test RequestInfoEvent is converted to HIL request event.""" - from agent_framework._workflows._events import RequestInfoEvent + """Test WorkflowEvent(type='request_info') is converted to HIL request event.""" + from agent_framework._workflows._events import WorkflowEvent - event = RequestInfoEvent( + event = WorkflowEvent.request_info( request_id="req_123", source_executor_id="approval_executor", request_data={"action": "approve", "details": "Please approve this action"}, @@ -749,10 +747,10 @@ async def test_request_info_event(mapper: MessageMapper, test_request: AgentFram async def test_superstep_started_event(mapper: MessageMapper, test_request: AgentFrameworkRequest) -> None: - """Test SuperStepStartedEvent is handled gracefully.""" - from agent_framework._workflows._events import SuperStepStartedEvent + """Test WorkflowEvent(type='superstep_started') is handled gracefully.""" + from agent_framework._workflows._events import WorkflowEvent - event = SuperStepStartedEvent(iteration=1) + event = WorkflowEvent.superstep_started(iteration=1) events = await mapper.convert_event(event, test_request) # SuperStepStartedEvent may not emit events (internal workflow signal) @@ -761,10 +759,10 @@ async def test_superstep_started_event(mapper: MessageMapper, test_request: Agen async def test_superstep_completed_event(mapper: MessageMapper, test_request: AgentFrameworkRequest) -> None: - """Test SuperStepCompletedEvent is handled gracefully.""" - from agent_framework._workflows._events import SuperStepCompletedEvent + """Test WorkflowEvent(type='superstep_completed') is handled gracefully.""" + from agent_framework._workflows._events import WorkflowEvent - event = SuperStepCompletedEvent(iteration=1) + event = WorkflowEvent.superstep_completed(iteration=1) events = await mapper.convert_event(event, test_request) # SuperStepCompletedEvent may not emit events (internal workflow signal) diff --git a/python/packages/lab/lightning/tests/test_lightning.py b/python/packages/lab/lightning/tests/test_lightning.py index c56adf2b20..d867afe676 100644 --- a/python/packages/lab/lightning/tests/test_lightning.py +++ b/python/packages/lab/lightning/tests/test_lightning.py @@ -9,7 +9,7 @@ agentlightning = pytest.importorskip("agentlightning") -from agent_framework import AgentExecutor, AgentRunEvent, ChatAgent, WorkflowBuilder, Workflow +from agent_framework import AgentExecutor, AgentResponse, ChatAgent, ExecutorEvent, WorkflowBuilder, Workflow from agent_framework_lab_lightning import AgentFrameworkTracer from agent_framework.openai import OpenAIChatClient from agentlightning import TracerTraceToTriplet @@ -109,8 +109,10 @@ def workflow_two_agents(): async def test_openai_workflow_two_agents(workflow_two_agents: Workflow): events = await workflow_two_agents.run("Please analyze the quarterly sales data") - # Get all AgentRunEvent data - agent_outputs = [event.data for event in events if isinstance(event, AgentRunEvent)] + # Get all ExecutorEvent[AgentResponse] data + agent_outputs = [ + event.data for event in events if event.type == "data" and isinstance(event.data, AgentResponse) + ] # Check that we have outputs from both agents assert len(agent_outputs) == 2 diff --git a/python/samples/autogen-migration/orchestrations/01_round_robin_group_chat.py b/python/samples/autogen-migration/orchestrations/01_round_robin_group_chat.py index 39d360b1e1..e46872527d 100644 --- a/python/samples/autogen-migration/orchestrations/01_round_robin_group_chat.py +++ b/python/samples/autogen-migration/orchestrations/01_round_robin_group_chat.py @@ -53,7 +53,7 @@ async def run_autogen() -> None: async def run_agent_framework() -> None: """Agent Framework's SequentialBuilder for sequential agent orchestration.""" - from agent_framework import AgentRunUpdateEvent, SequentialBuilder + from agent_framework import AgentResponseUpdate, SequentialBuilder from agent_framework.openai import OpenAIChatClient client = OpenAIChatClient(model_id="gpt-4.1-mini") @@ -81,7 +81,7 @@ async def run_agent_framework() -> None: print("[Agent Framework] Sequential conversation:") current_executor = None async for event in workflow.run_stream("Create a brief summary about electric vehicles"): - if isinstance(event, AgentRunUpdateEvent): + if event.type == "data" and isinstance(event.data, AgentResponseUpdate): # Print executor name header when switching to a new agent if current_executor != event.executor_id: if current_executor is not None: @@ -98,10 +98,11 @@ async def run_agent_framework_with_cycle() -> None: from agent_framework import ( AgentExecutorRequest, AgentExecutorResponse, - AgentRunUpdateEvent, + AgentResponseUpdate, + WorkflowBuilder, WorkflowContext, - WorkflowOutputEvent, + executor, tool, ) @@ -154,10 +155,10 @@ async def check_approval( print("[Agent Framework with Cycle] Cyclic conversation:") current_executor = None async for event in workflow.run_stream("Create a brief summary about electric vehicles"): - if isinstance(event, WorkflowOutputEvent): + if event.type == "output": print("\n---------- Workflow Output ----------") print(event.data) - elif isinstance(event, AgentRunUpdateEvent): + elif event.type == "data" and isinstance(event.data, AgentResponseUpdate): # Print executor name header when switching to a new agent if current_executor != event.executor_id: if current_executor is not None: diff --git a/python/samples/autogen-migration/orchestrations/02_selector_group_chat.py b/python/samples/autogen-migration/orchestrations/02_selector_group_chat.py index f8c170cbef..90b04874b8 100644 --- a/python/samples/autogen-migration/orchestrations/02_selector_group_chat.py +++ b/python/samples/autogen-migration/orchestrations/02_selector_group_chat.py @@ -59,7 +59,7 @@ async def run_autogen() -> None: async def run_agent_framework() -> None: """Agent Framework's GroupChatBuilder with LLM-based speaker selection.""" - from agent_framework import AgentRunUpdateEvent, GroupChatBuilder + from agent_framework import AgentResponseUpdate, GroupChatBuilder from agent_framework.openai import OpenAIChatClient client = OpenAIChatClient(model_id="gpt-4.1-mini") @@ -100,7 +100,7 @@ async def run_agent_framework() -> None: print("[Agent Framework] Group chat conversation:") current_executor = None async for event in workflow.run_stream("How do I connect to a PostgreSQL database using Python?"): - if isinstance(event, AgentRunUpdateEvent): + if event.type == "data" and isinstance(event.data, AgentResponseUpdate): # Print executor name header when switching to a new agent if current_executor != event.executor_id: if current_executor is not None: diff --git a/python/samples/autogen-migration/orchestrations/03_swarm.py b/python/samples/autogen-migration/orchestrations/03_swarm.py index 3fa9f7a04d..fd703c0e43 100644 --- a/python/samples/autogen-migration/orchestrations/03_swarm.py +++ b/python/samples/autogen-migration/orchestrations/03_swarm.py @@ -96,12 +96,13 @@ async def run_autogen() -> None: async def run_agent_framework() -> None: """Agent Framework's HandoffBuilder for agent coordination.""" from agent_framework import ( - AgentRunUpdateEvent, + AgentResponseUpdate, + HandoffBuilder, HandoffUserInputRequest, - RequestInfoEvent, + WorkflowRunState, - WorkflowStatusEvent, + tool, ) from agent_framework.openai import OpenAIChatClient @@ -160,10 +161,10 @@ async def run_agent_framework() -> None: current_executor = None stream_line_open = False - pending_requests: list[RequestInfoEvent] = [] + pending_requests: list[WorkflowEvent] = [] async for event in workflow.run_stream(scripted_responses[0]): - if isinstance(event, AgentRunUpdateEvent): + if event.type == "data" and isinstance(event.data, AgentResponseUpdate): # Print executor name header when switching to a new agent if current_executor != event.executor_id: if stream_line_open: @@ -174,10 +175,10 @@ async def run_agent_framework() -> None: stream_line_open = True if event.data: print(event.data.text, end="", flush=True) - elif isinstance(event, RequestInfoEvent): + elif event.type == "request_info": if isinstance(event.data, HandoffUserInputRequest): pending_requests.append(event) - elif isinstance(event, WorkflowStatusEvent): + elif event.type == "status": if event.state in {WorkflowRunState.IDLE_WITH_PENDING_REQUESTS} and stream_line_open: print() stream_line_open = False @@ -195,7 +196,7 @@ async def run_agent_framework() -> None: stream_line_open = False async for event in workflow.send_responses_streaming(responses): - if isinstance(event, AgentRunUpdateEvent): + if event.type == "data" and isinstance(event.data, AgentResponseUpdate): # Print executor name header when switching to a new agent if current_executor != event.executor_id: if stream_line_open: @@ -206,10 +207,10 @@ async def run_agent_framework() -> None: stream_line_open = True if event.data: print(event.data.text, end="", flush=True) - elif isinstance(event, RequestInfoEvent): + elif event.type == "request_info": if isinstance(event.data, HandoffUserInputRequest): pending_requests.append(event) - elif isinstance(event, WorkflowStatusEvent): + elif event.type == "status": if ( event.state in {WorkflowRunState.IDLE_WITH_PENDING_REQUESTS, WorkflowRunState.IDLE} and stream_line_open diff --git a/python/samples/autogen-migration/orchestrations/04_magentic_one.py b/python/samples/autogen-migration/orchestrations/04_magentic_one.py index 30ccd0aa01..0613a4eaff 100644 --- a/python/samples/autogen-migration/orchestrations/04_magentic_one.py +++ b/python/samples/autogen-migration/orchestrations/04_magentic_one.py @@ -10,11 +10,12 @@ from typing import cast from agent_framework import ( - AgentRunUpdateEvent, + AgentResponseUpdate, ChatMessage, + MagenticOrchestratorEvent, MagenticProgressLedger, - WorkflowOutputEvent, + ) @@ -110,10 +111,10 @@ async def run_agent_framework() -> None: # Run complex task last_message_id: str | None = None - output_event: WorkflowOutputEvent | None = None + output_event: WorkflowEvent | None = None print("[Agent Framework] Magentic conversation:") async for event in workflow.run_stream("Research Python async patterns and write a simple example"): - if isinstance(event, AgentRunUpdateEvent): + if event.type == "data" and isinstance(event.data, AgentResponseUpdate): message_id = event.data.message_id if message_id != last_message_id: if last_message_id is not None: @@ -136,7 +137,7 @@ async def run_agent_framework() -> None: # Please refer to `with_plan_review` for proper human interaction during planning phases. await asyncio.get_event_loop().run_in_executor(None, input, "Press Enter to continue...") - elif isinstance(event, WorkflowOutputEvent): + elif event.type == "output": output_event = event if not output_event: diff --git a/python/samples/demos/workflow_evaluation/create_workflow.py b/python/samples/demos/workflow_evaluation/create_workflow.py index dc1e920b69..4c8e1d6871 100644 --- a/python/samples/demos/workflow_evaluation/create_workflow.py +++ b/python/samples/demos/workflow_evaluation/create_workflow.py @@ -48,13 +48,13 @@ from agent_framework import ( AgentExecutorResponse, AgentResponseUpdate, - AgentRunUpdateEvent, ChatMessage, Executor, + Role, WorkflowBuilder, WorkflowContext, - WorkflowOutputEvent, + executor, handler, tool, @@ -357,7 +357,7 @@ async def _process_workflow_events(events, conversation_ids, response_ids): workflow_output = None async for event in events: - if isinstance(event, WorkflowOutputEvent): + if event.type == "output": workflow_output = event.data # Handle Unicode characters that may not be displayable in Windows console try: @@ -366,7 +366,7 @@ async def _process_workflow_events(events, conversation_ids, response_ids): output_str = str(event.data).encode("ascii", "replace").decode("ascii") print(f"\nWorkflow Output: {output_str}\n") - elif isinstance(event, AgentRunUpdateEvent): + elif event.type == "data" and isinstance(event.data, AgentResponseUpdate): _track_agent_ids(event, event.executor_id, response_ids, conversation_ids) return workflow_output diff --git a/python/samples/getting_started/observability/workflow_observability.py b/python/samples/getting_started/observability/workflow_observability.py index 57e636fd68..265754ad5d 100644 --- a/python/samples/getting_started/observability/workflow_observability.py +++ b/python/samples/getting_started/observability/workflow_observability.py @@ -6,7 +6,7 @@ Executor, WorkflowBuilder, WorkflowContext, - WorkflowOutputEvent, + handler, tool, ) @@ -94,8 +94,8 @@ async def run_sequential_workflow() -> None: output_event = None async for event in workflow.run_stream("Hello world"): - if isinstance(event, WorkflowOutputEvent): - # The WorkflowOutputEvent contains the final result. + if event.type == "output": + # The contains the final result. output_event = event if output_event: diff --git a/python/samples/getting_started/workflows/_start-here/step2_agents_in_a_workflow.py b/python/samples/getting_started/workflows/_start-here/step2_agents_in_a_workflow.py index 4fb3340c5b..33a051c5f8 100644 --- a/python/samples/getting_started/workflows/_start-here/step2_agents_in_a_workflow.py +++ b/python/samples/getting_started/workflows/_start-here/step2_agents_in_a_workflow.py @@ -2,7 +2,7 @@ import asyncio -from agent_framework import AgentRunEvent, WorkflowBuilder +from agent_framework import AgentResponse, WorkflowBuilder, WorkflowEvent from agent_framework.azure import AzureOpenAIChatClient from azure.identity import AzureCliCredential @@ -53,7 +53,7 @@ async def main(): events = await workflow.run("Create a slogan for a new electric SUV that is affordable and fun to drive.") # Print agent run events and final outputs for event in events: - if isinstance(event, AgentRunEvent): + if event.type == "data" and isinstance(event.data, AgentResponse): print(f"{event.executor_id}: {event.data}") print(f"{'=' * 60}\nWorkflow Outputs: {events.get_outputs()}") diff --git a/python/samples/getting_started/workflows/_start-here/step3_streaming.py b/python/samples/getting_started/workflows/_start-here/step3_streaming.py index ffd3e9323d..5a17e0b72b 100644 --- a/python/samples/getting_started/workflows/_start-here/step3_streaming.py +++ b/python/samples/getting_started/workflows/_start-here/step3_streaming.py @@ -6,16 +6,13 @@ ChatAgent, ChatMessage, Executor, - ExecutorFailedEvent, WorkflowBuilder, WorkflowContext, - WorkflowFailedEvent, + WorkflowErrorDetails, + WorkflowEvent, WorkflowRunState, - WorkflowStatusEvent, handler, - tool, ) -from agent_framework._workflows._events import WorkflowOutputEvent from agent_framework.azure import AzureOpenAIChatClient from azure.identity import AzureCliCredential from typing_extensions import Never @@ -125,7 +122,7 @@ async def main(): async for event in workflow.run_stream( ChatMessage(role="user", text="Create a slogan for a new electric SUV that is affordable and fun to drive.") ): - if isinstance(event, WorkflowStatusEvent): + if event.type == "status": prefix = f"State ({event.origin.value}): " if event.state == WorkflowRunState.IN_PROGRESS: print(prefix + "IN_PROGRESS") @@ -137,18 +134,17 @@ async def main(): print(prefix + "IDLE_WITH_PENDING_REQUESTS (prompt user or UI now)") else: print(prefix + str(event.state)) - elif isinstance(event, WorkflowOutputEvent): + elif event.type == "output": print(f"Workflow output ({event.origin.value}): {event.data}") - elif isinstance(event, ExecutorFailedEvent): + elif event.type == "executor_failed" and isinstance(event.details, WorkflowErrorDetails): print( f"Executor failed ({event.origin.value}): " f"{event.executor_id} {event.details.error_type}: {event.details.message}" ) - elif isinstance(event, WorkflowFailedEvent): - details = event.details - print(f"Workflow failed ({event.origin.value}): {details.error_type}: {details.message}") + elif event.type == "failed" and isinstance(event.details, WorkflowErrorDetails): + print(f"Workflow failed ({event.origin.value}): {event.details.error_type}: {event.details.message}") else: - print(f"{event.__class__.__name__} ({event.origin.value}): {event}") + print(f"{event.type} ({event.origin.value}): {event}") """ Sample Output: diff --git a/python/samples/getting_started/workflows/_start-here/step4_using_factories.py b/python/samples/getting_started/workflows/_start-here/step4_using_factories.py index f9d4f2b971..fafaf27ccd 100644 --- a/python/samples/getting_started/workflows/_start-here/step4_using_factories.py +++ b/python/samples/getting_started/workflows/_start-here/step4_using_factories.py @@ -8,10 +8,9 @@ Executor, WorkflowBuilder, WorkflowContext, - WorkflowOutputEvent, + WorkflowEvent, executor, handler, - tool, ) from agent_framework.azure import AzureOpenAIChatClient from azure.identity import AzureCliCredential @@ -86,7 +85,7 @@ async def main(): output: AgentResponse | None = None async for event in workflow.run_stream("hello world"): - if isinstance(event, WorkflowOutputEvent) and isinstance(event.data, AgentResponse): + if event.type == "output" and isinstance(event.data, AgentResponse): output = event.data if output: diff --git a/python/samples/getting_started/workflows/agents/azure_ai_agents_streaming.py b/python/samples/getting_started/workflows/agents/azure_ai_agents_streaming.py index 42f7dc3d23..ea503f9914 100644 --- a/python/samples/getting_started/workflows/agents/azure_ai_agents_streaming.py +++ b/python/samples/getting_started/workflows/agents/azure_ai_agents_streaming.py @@ -2,7 +2,7 @@ import asyncio -from agent_framework import AgentRunUpdateEvent, ChatAgent, WorkflowBuilder, WorkflowOutputEvent +from agent_framework import AgentResponseUpdate, ChatAgent, WorkflowBuilder, WorkflowEvent from agent_framework.azure import AzureAIAgentClient from azure.identity.aio import AzureCliCredential @@ -63,7 +63,7 @@ async def main() -> None: events = workflow.run_stream("Create a slogan for a new electric SUV that is affordable and fun to drive.") async for event in events: - if isinstance(event, AgentRunUpdateEvent): + if event.type == "data" and isinstance(event.data, AgentResponseUpdate): eid = event.executor_id if eid != last_executor_id: if last_executor_id is not None: @@ -71,7 +71,7 @@ async def main() -> None: print(f"{eid}:", end=" ", flush=True) last_executor_id = eid print(event.data, end="", flush=True) - elif isinstance(event, WorkflowOutputEvent): + elif event.type == "output": print("\n===== Final output =====") print(event.data) diff --git a/python/samples/getting_started/workflows/agents/azure_chat_agents_function_bridge.py b/python/samples/getting_started/workflows/agents/azure_chat_agents_function_bridge.py index 11bac9f2c9..d7530781b2 100644 --- a/python/samples/getting_started/workflows/agents/azure_chat_agents_function_bridge.py +++ b/python/samples/getting_started/workflows/agents/azure_chat_agents_function_bridge.py @@ -7,14 +7,13 @@ AgentExecutorRequest, AgentExecutorResponse, AgentResponse, - AgentRunUpdateEvent, + AgentResponseUpdate, ChatMessage, Role, WorkflowBuilder, WorkflowContext, - WorkflowOutputEvent, + WorkflowEvent, executor, - tool, ) from agent_framework.azure import AzureOpenAIChatClient from azure.identity import AzureCliCredential @@ -125,14 +124,14 @@ async def main() -> None: last_executor: str | None = None async for event in events: - if isinstance(event, AgentRunUpdateEvent): + if event.type == "data" and isinstance(event.data, AgentResponseUpdate): if event.executor_id != last_executor: if last_executor is not None: print() print(f"{event.executor_id}:", end=" ", flush=True) last_executor = event.executor_id print(event.data, end="", flush=True) - elif isinstance(event, WorkflowOutputEvent): + elif event.type == "output": print("\n\n===== Final Output =====") response = event.data if isinstance(response, AgentResponse): diff --git a/python/samples/getting_started/workflows/agents/azure_chat_agents_streaming.py b/python/samples/getting_started/workflows/agents/azure_chat_agents_streaming.py index d8a8021a75..bb87d7e529 100644 --- a/python/samples/getting_started/workflows/agents/azure_chat_agents_streaming.py +++ b/python/samples/getting_started/workflows/agents/azure_chat_agents_streaming.py @@ -2,7 +2,7 @@ import asyncio -from agent_framework import AgentRunUpdateEvent, WorkflowBuilder, WorkflowOutputEvent +from agent_framework import AgentResponseUpdate, WorkflowBuilder, WorkflowEvent from agent_framework.azure import AzureOpenAIChatClient from azure.identity import AzureCliCredential @@ -65,7 +65,7 @@ async def main(): events = workflow.run_stream("Create a slogan for a new electric SUV that is affordable and fun to drive.") async for event in events: - if isinstance(event, AgentRunUpdateEvent): + if event.type == "data" and isinstance(event.data, AgentResponseUpdate): # AgentRunUpdateEvent contains incremental text deltas from the underlying agent. # Print a prefix when the executor changes, then append updates on the same line. eid = event.executor_id @@ -75,7 +75,7 @@ async def main(): print(f"{eid}:", end=" ", flush=True) last_executor_id = eid print(event.data, end="", flush=True) - elif isinstance(event, WorkflowOutputEvent): + elif event.type == "output": print("\n===== Final output =====") print(event.data) diff --git a/python/samples/getting_started/workflows/agents/azure_chat_agents_tool_calls_with_feedback.py b/python/samples/getting_started/workflows/agents/azure_chat_agents_tool_calls_with_feedback.py index 1b97677374..61b23b5712 100644 --- a/python/samples/getting_started/workflows/agents/azure_chat_agents_tool_calls_with_feedback.py +++ b/python/samples/getting_started/workflows/agents/azure_chat_agents_tool_calls_with_feedback.py @@ -9,17 +9,16 @@ AgentExecutorRequest, AgentExecutorResponse, AgentResponse, - AgentRunUpdateEvent, + AgentResponseUpdate, ChatAgent, ChatMessage, Executor, FunctionCallContent, FunctionResultContent, - RequestInfoEvent, Role, WorkflowBuilder, WorkflowContext, - WorkflowOutputEvent, + WorkflowEvent, handler, response_handler, tool, @@ -195,7 +194,7 @@ def create_final_editor_agent() -> ChatAgent: ) -def display_agent_run_update(event: AgentRunUpdateEvent, last_executor: str | None) -> None: +def display_agent_run_update(event: WorkflowEvent[AgentResponseUpdate], last_executor: str | None) -> None: """Display an AgentRunUpdateEvent in a readable format.""" printed_tool_calls: set[str] = set() printed_tool_results: set[str] = set() @@ -291,13 +290,13 @@ async def main() -> None: requests: list[tuple[str, DraftFeedbackRequest]] = [] async for event in stream: - if isinstance(event, AgentRunUpdateEvent) and display_agent_run_update_switch: - display_agent_run_update(event, last_executor) - if isinstance(event, RequestInfoEvent) and isinstance(event.data, DraftFeedbackRequest): + if event.type == "data" and isinstance(event.data, AgentResponseUpdate) and display_agent_run_update_switch: + display_agent_run_update(event, last_executor) # type: ignore[arg-type] + if event.type == "request_info" and isinstance(event.data, DraftFeedbackRequest): # Stash the request so we can prompt the human after the stream completes. requests.append((event.request_id, event.data)) last_executor = None - elif isinstance(event, WorkflowOutputEvent): + elif event.type == "output": last_executor = None response = event.data print("\n===== Final output =====") diff --git a/python/samples/getting_started/workflows/agents/workflow_as_agent_reflection_pattern.py b/python/samples/getting_started/workflows/agents/workflow_as_agent_reflection_pattern.py index 9aa98f7b96..5bddfb0ab5 100644 --- a/python/samples/getting_started/workflows/agents/workflow_as_agent_reflection_pattern.py +++ b/python/samples/getting_started/workflows/agents/workflow_as_agent_reflection_pattern.py @@ -6,7 +6,6 @@ from agent_framework import ( AgentResponseUpdate, - AgentRunUpdateEvent, ChatClientProtocol, ChatMessage, Content, @@ -14,8 +13,8 @@ Role, WorkflowBuilder, WorkflowContext, + WorkflowEvent, handler, - tool, ) from agent_framework.openai import OpenAIChatClient from pydantic import BaseModel @@ -33,7 +32,7 @@ Key Concepts Demonstrated: - WorkflowAgent: Wraps a workflow to behave like a regular agent. - Cyclic workflow design (Worker ↔ Reviewer) for iterative improvement. -- AgentRunUpdateEvent: Mechanism for emitting approved responses externally. +- ExecutorEvent: Mechanism for emitting approved responses externally. - Structured output parsing for review feedback using Pydantic. - State management for pending requests and retry logic. @@ -160,9 +159,9 @@ async def handle_review_response(self, review: ReviewResponse, ctx: WorkflowCont for message in request.agent_messages: contents.extend(message.contents) - # Emit approved result to external consumer via AgentRunUpdateEvent. + # Emit approved result to external consumer via WorkflowEvent. await ctx.add_event( - AgentRunUpdateEvent(self.id, data=AgentResponseUpdate(contents=contents, role=Role.ASSISTANT)) + WorkflowEvent.emit(self.id, data=AgentResponseUpdate(contents=contents, role=Role.ASSISTANT)) ) return diff --git a/python/samples/getting_started/workflows/checkpoint/checkpoint_with_human_in_the_loop.py b/python/samples/getting_started/workflows/checkpoint/checkpoint_with_human_in_the_loop.py index a2628592ea..0b13122d79 100644 --- a/python/samples/getting_started/workflows/checkpoint/checkpoint_with_human_in_the_loop.py +++ b/python/samples/getting_started/workflows/checkpoint/checkpoint_with_human_in_the_loop.py @@ -10,19 +10,20 @@ # `agent_framework.builtin` chat client or mock the writer executor. We keep the # concrete import here so readers can see an end-to-end configuration. from agent_framework import ( + WorkflowEvent, AgentExecutorRequest, AgentExecutorResponse, ChatMessage, Executor, FileCheckpointStorage, - RequestInfoEvent, + Role, Workflow, WorkflowBuilder, WorkflowCheckpoint, WorkflowContext, - WorkflowOutputEvent, - WorkflowStatusEvent, + + get_checkpoint_summary, handler, response_handler, @@ -55,7 +56,7 @@ 3. Later, restart the script, select that checkpoint, and provide the stored human decision when prompted to pre-supply responses. Doing so applies the answer immediately on resume, so the system does **not** - re-emit the same `RequestInfoEvent`. + re-emit the same ``. """ # Directory used for the sample's temporary checkpoint files. We isolate the @@ -261,11 +262,11 @@ async def run_interactive_session( raise ValueError("Either initial_message or checkpoint_id must be provided") async for event in event_stream: - if isinstance(event, WorkflowStatusEvent): + if event.type == "status": print(event) - if isinstance(event, WorkflowOutputEvent): + if event.type == "output": completed_output = event.data - if isinstance(event, RequestInfoEvent): + if event.type == "request_info": if isinstance(event.data, HumanApprovalRequest): requests[event.request_id] = event.data else: diff --git a/python/samples/getting_started/workflows/checkpoint/checkpoint_with_resume.py b/python/samples/getting_started/workflows/checkpoint/checkpoint_with_resume.py index bfa2484d63..b264188188 100644 --- a/python/samples/getting_started/workflows/checkpoint/checkpoint_with_resume.py +++ b/python/samples/getting_started/workflows/checkpoint/checkpoint_with_resume.py @@ -29,13 +29,14 @@ from typing import Any, override from agent_framework import ( + WorkflowEvent, Executor, InMemoryCheckpointStorage, SuperStepCompletedEvent, WorkflowBuilder, WorkflowCheckpoint, WorkflowContext, - WorkflowOutputEvent, + handler, tool, ) @@ -127,7 +128,7 @@ async def main(): output: str | None = None async for event in event_stream: - if isinstance(event, WorkflowOutputEvent): + if event.type == "output": output = event.data break if isinstance(event, SuperStepCompletedEvent) and random() < 0.5: diff --git a/python/samples/getting_started/workflows/checkpoint/handoff_with_tool_approval_checkpoint_resume.py b/python/samples/getting_started/workflows/checkpoint/handoff_with_tool_approval_checkpoint_resume.py index 145504bdce..f8c368b1c9 100644 --- a/python/samples/getting_started/workflows/checkpoint/handoff_with_tool_approval_checkpoint_resume.py +++ b/python/samples/getting_started/workflows/checkpoint/handoff_with_tool_approval_checkpoint_resume.py @@ -7,16 +7,17 @@ from typing import cast from agent_framework import ( + WorkflowEvent, ChatAgent, ChatMessage, Content, FileCheckpointStorage, HandoffAgentUserRequest, HandoffBuilder, - RequestInfoEvent, + Workflow, - WorkflowOutputEvent, - WorkflowStatusEvent, + + tool, ) from agent_framework.azure import AzureOpenAIChatClient @@ -151,7 +152,7 @@ def _print_function_approval_request(request: Content, request_id: str) -> None: def _build_responses_for_requests( - pending_requests: list[RequestInfoEvent], + pending_requests: list[WorkflowEvent], *, user_response: str | None, approve_tools: bool | None, @@ -176,14 +177,14 @@ async def run_until_user_input_needed( workflow: Workflow, initial_message: str | None = None, checkpoint_id: str | None = None, -) -> tuple[list[RequestInfoEvent], str | None]: +) -> tuple[list[WorkflowEvent], str | None]: """ Run the workflow until it needs user input or approval, or completes. Returns: Tuple of (pending_requests, checkpoint_id_to_use_for_resume) """ - pending_requests: list[RequestInfoEvent] = [] + pending_requests: list[WorkflowEvent] = [] latest_checkpoint_id: str | None = checkpoint_id if initial_message: @@ -196,17 +197,17 @@ async def run_until_user_input_needed( raise ValueError("Must provide either initial_message or checkpoint_id") async for event in event_stream: - if isinstance(event, WorkflowStatusEvent): + if event.type == "status": print(f"[Status] {event.state}") - elif isinstance(event, RequestInfoEvent): + elif event.type == "request_info": pending_requests.append(event) if isinstance(event.data, HandoffAgentUserRequest): _print_handoff_request(event.data, event.request_id) elif isinstance(event.data, Content) and event.data.type == "function_approval_request": _print_function_approval_request(event.data, event.request_id) - elif isinstance(event, WorkflowOutputEvent): + elif event.type == "output": print("\n[Workflow Completed]") if event.data: print(f"Final conversation length: {len(event.data)} messages") @@ -223,7 +224,7 @@ async def resume_with_responses( checkpoint_storage: FileCheckpointStorage, user_response: str | None = None, approve_tools: bool | None = None, -) -> tuple[list[RequestInfoEvent], str | None]: +) -> tuple[list[WorkflowEvent], str | None]: """ Two-step resume pattern (answers customer questions and tool approvals): @@ -253,10 +254,10 @@ async def resume_with_responses( print(f"Step 1: Restoring checkpoint {latest_checkpoint.checkpoint_id}") # Step 1: Restore the checkpoint to load pending requests into memory - # The checkpoint restoration re-emits pending RequestInfoEvents - restored_requests: list[RequestInfoEvent] = [] + # The checkpoint restoration re-emits pending s + restored_requests: list[WorkflowEvent] = [] async for event in workflow.run_stream(checkpoint_id=latest_checkpoint.checkpoint_id): # type: ignore[attr-defined] - if isinstance(event, RequestInfoEvent): + if event.type == "request_info": restored_requests.append(event) if isinstance(event.data, HandoffAgentUserRequest): _print_handoff_request(event.data, event.request_id) @@ -273,13 +274,13 @@ async def resume_with_responses( ) print(f"Step 2: Sending responses for {len(responses)} request(s)") - new_pending_requests: list[RequestInfoEvent] = [] + new_pending_requests: list[WorkflowEvent] = [] async for event in workflow.send_responses_streaming(responses): - if isinstance(event, WorkflowStatusEvent): + if event.type == "status": print(f"[Status] {event.state}") - elif isinstance(event, WorkflowOutputEvent): + elif event.type == "output": print("\n[Workflow Output Event - Conversation Update]") if event.data and isinstance(event.data, list) and all(isinstance(msg, ChatMessage) for msg in event.data): # Now safe to cast event.data to list[ChatMessage] @@ -289,7 +290,7 @@ async def resume_with_responses( text = msg.text[:100] + "..." if len(msg.text) > 100 else msg.text print(f" {author}: {text}") - elif isinstance(event, RequestInfoEvent): + elif event.type == "request_info": new_pending_requests.append(event) if isinstance(event.data, HandoffAgentUserRequest): _print_handoff_request(event.data, event.request_id) diff --git a/python/samples/getting_started/workflows/checkpoint/sub_workflow_checkpoint.py b/python/samples/getting_started/workflows/checkpoint/sub_workflow_checkpoint.py index d35fd5e41f..0ca4bfe207 100644 --- a/python/samples/getting_started/workflows/checkpoint/sub_workflow_checkpoint.py +++ b/python/samples/getting_started/workflows/checkpoint/sub_workflow_checkpoint.py @@ -10,18 +10,19 @@ from typing import Any, override from agent_framework import ( + WorkflowEvent, Executor, FileCheckpointStorage, - RequestInfoEvent, + SubWorkflowRequestMessage, SubWorkflowResponseMessage, Workflow, WorkflowBuilder, WorkflowContext, WorkflowExecutor, - WorkflowOutputEvent, + WorkflowRunState, - WorkflowStatusEvent, + handler, response_handler, tool, @@ -336,10 +337,10 @@ async def main() -> None: request_id: str | None = None async for event in workflow.run_stream("Contoso Gadget Launch"): - if isinstance(event, RequestInfoEvent) and request_id is None: + if event.type == "request_info" and request_id is None: request_id = event.request_id print(f"Captured review request id: {request_id}") - if isinstance(event, WorkflowStatusEvent) and event.state is WorkflowRunState.IDLE_WITH_PENDING_REQUESTS: + if event.type == "status" and event.state is WorkflowRunState.IDLE_WITH_PENDING_REQUESTS: break if request_id is None: @@ -365,9 +366,9 @@ async def main() -> None: # Rebuild fresh instances to mimic a separate process resuming workflow2 = build_parent_workflow(storage) - request_info_event: RequestInfoEvent | None = None + request_info_event: WorkflowEvent | None = None async for event in workflow2.run_stream(checkpoint_id=resume_checkpoint.checkpoint_id): - if isinstance(event, RequestInfoEvent): + if event.type == "request_info": request_info_event = event if request_info_event is None: @@ -376,9 +377,9 @@ async def main() -> None: print("\n=== Stage 3: approve draft ==") approval_response = "approve" - output_event: WorkflowOutputEvent | None = None + output_event: WorkflowEvent | None = None async for event in workflow2.send_responses_streaming({request_info_event.request_id: approval_response}): - if isinstance(event, WorkflowOutputEvent): + if event.type == "output": output_event = event if output_event is None: diff --git a/python/samples/getting_started/workflows/composition/sub_workflow_kwargs.py b/python/samples/getting_started/workflows/composition/sub_workflow_kwargs.py index 07e0f67d9d..21e8c52d65 100644 --- a/python/samples/getting_started/workflows/composition/sub_workflow_kwargs.py +++ b/python/samples/getting_started/workflows/composition/sub_workflow_kwargs.py @@ -5,10 +5,11 @@ from typing import Annotated, Any from agent_framework import ( + WorkflowEvent, ChatMessage, SequentialBuilder, WorkflowExecutor, - WorkflowOutputEvent, + tool, ) from agent_framework.openai import OpenAIChatClient @@ -128,7 +129,7 @@ async def main() -> None: user_token=user_token, service_config=service_config, ): - if isinstance(event, WorkflowOutputEvent): + if event.type == "output": output_data = event.data if isinstance(output_data, list): for item in output_data: # type: ignore diff --git a/python/samples/getting_started/workflows/composition/sub_workflow_request_interception.py b/python/samples/getting_started/workflows/composition/sub_workflow_request_interception.py index e21c74039a..f112ff30c6 100644 --- a/python/samples/getting_started/workflows/composition/sub_workflow_request_interception.py +++ b/python/samples/getting_started/workflows/composition/sub_workflow_request_interception.py @@ -4,6 +4,7 @@ from dataclasses import dataclass from agent_framework import ( + WorkflowEvent, Executor, SubWorkflowRequestMessage, SubWorkflowResponseMessage, @@ -11,7 +12,7 @@ WorkflowBuilder, WorkflowContext, WorkflowExecutor, - WorkflowOutputEvent, + handler, response_handler, tool, @@ -304,7 +305,7 @@ async def main() -> None: for email in test_emails: print(f"\n🚀 Processing email to '{email.recipient}'") async for event in workflow.run_stream(email): - if isinstance(event, WorkflowOutputEvent): + if event.type == "output": print(f"🎉 Final result for '{email.recipient}': {'Delivered' if event.data else 'Blocked'}") diff --git a/python/samples/getting_started/workflows/control-flow/multi_selection_edge_group.py b/python/samples/getting_started/workflows/control-flow/multi_selection_edge_group.py index 44385bffca..9cb3fc231a 100644 --- a/python/samples/getting_started/workflows/control-flow/multi_selection_edge_group.py +++ b/python/samples/getting_started/workflows/control-flow/multi_selection_edge_group.py @@ -17,7 +17,7 @@ WorkflowBuilder, WorkflowContext, WorkflowEvent, - WorkflowOutputEvent, + executor, tool, ) @@ -281,7 +281,7 @@ def select_targets(analysis: AnalysisResult, target_ids: list[str]) -> list[str] async for event in workflow.run_stream(email): if isinstance(event, DatabaseEvent): print(f"{event}") - elif isinstance(event, WorkflowOutputEvent): + elif event.type == "output": print(f"Workflow output: {event.data}") """ diff --git a/python/samples/getting_started/workflows/control-flow/sequential_executors.py b/python/samples/getting_started/workflows/control-flow/sequential_executors.py index 0fedfcf1cd..7a3318c808 100644 --- a/python/samples/getting_started/workflows/control-flow/sequential_executors.py +++ b/python/samples/getting_started/workflows/control-flow/sequential_executors.py @@ -4,10 +4,11 @@ from typing import cast from agent_framework import ( + WorkflowEvent, Executor, WorkflowBuilder, WorkflowContext, - WorkflowOutputEvent, + handler, tool, ) @@ -78,7 +79,7 @@ async def main() -> None: outputs: list[str] = [] async for event in workflow.run_stream("hello world"): print(f"Event: {event}") - if isinstance(event, WorkflowOutputEvent): + if event.type == "output": outputs.append(cast(str, event.data)) if outputs: diff --git a/python/samples/getting_started/workflows/control-flow/sequential_streaming.py b/python/samples/getting_started/workflows/control-flow/sequential_streaming.py index ce7bc92758..cf2943c32a 100644 --- a/python/samples/getting_started/workflows/control-flow/sequential_streaming.py +++ b/python/samples/getting_started/workflows/control-flow/sequential_streaming.py @@ -2,7 +2,7 @@ import asyncio -from agent_framework import WorkflowBuilder, WorkflowContext, WorkflowOutputEvent, executor +from agent_framework import WorkflowBuilder, WorkflowContext, WorkflowEvent, executor from typing_extensions import Never """ @@ -67,7 +67,7 @@ async def main(): async for event in workflow.run_stream("hello world"): # You will see executor invoke and completion events as the workflow progresses. print(f"Event: {event}") - if isinstance(event, WorkflowOutputEvent): + if event.type == "output": print(f"Workflow completed with result: {event.data}") """ diff --git a/python/samples/getting_started/workflows/declarative/customer_support/main.py b/python/samples/getting_started/workflows/declarative/customer_support/main.py index 84e36b771d..5b54f6a20b 100644 --- a/python/samples/getting_started/workflows/declarative/customer_support/main.py +++ b/python/samples/getting_started/workflows/declarative/customer_support/main.py @@ -26,7 +26,7 @@ import uuid from pathlib import Path -from agent_framework import RequestInfoEvent, WorkflowOutputEvent +from agent_framework import WorkflowEvent from agent_framework.azure import AzureOpenAIChatClient from agent_framework.declarative import ( AgentExternalInputRequest, @@ -259,7 +259,7 @@ async def main() -> None: stream = workflow.run_stream(user_input) async for event in stream: - if isinstance(event, WorkflowOutputEvent): + if event.type == "output": data = event.data source_id = getattr(event, "source_executor_id", "") @@ -286,7 +286,7 @@ async def main() -> None: else: accumulated_response += str(data) - elif isinstance(event, RequestInfoEvent) and isinstance(event.data, AgentExternalInputRequest): + elif event.type == "request_info" and isinstance(event.data, AgentExternalInputRequest): request = event.data # The agent_response from the request contains the structured response diff --git a/python/samples/getting_started/workflows/declarative/deep_research/main.py b/python/samples/getting_started/workflows/declarative/deep_research/main.py index b5efef8101..f4d5f149bf 100644 --- a/python/samples/getting_started/workflows/declarative/deep_research/main.py +++ b/python/samples/getting_started/workflows/declarative/deep_research/main.py @@ -24,7 +24,7 @@ import asyncio from pathlib import Path -from agent_framework import WorkflowOutputEvent +from agent_framework import WorkflowEvent from agent_framework.azure import AzureOpenAIChatClient from agent_framework.declarative import WorkflowFactory from azure.identity import AzureCliCredential @@ -193,7 +193,7 @@ async def main() -> None: task = "What is the weather like in Seattle and how does it compare to the average for this time of year?" async for event in workflow.run_stream(task): - if isinstance(event, WorkflowOutputEvent): + if event.type == "output": print(f"{event.data}", end="", flush=True) print("\n" + "=" * 60) diff --git a/python/samples/getting_started/workflows/declarative/function_tools/main.py b/python/samples/getting_started/workflows/declarative/function_tools/main.py index ea647e7f21..dde6e1ef58 100644 --- a/python/samples/getting_started/workflows/declarative/function_tools/main.py +++ b/python/samples/getting_started/workflows/declarative/function_tools/main.py @@ -10,8 +10,7 @@ from pathlib import Path from typing import Annotated, Any -from agent_framework import FileCheckpointStorage, RequestInfoEvent, WorkflowOutputEvent -from agent_framework import tool +from agent_framework import FileCheckpointStorage, WorkflowEvent, tool from agent_framework.azure import AzureOpenAIChatClient from agent_framework_declarative import ExternalInputRequest, ExternalInputResponse, WorkflowFactory from azure.identity import AzureCliCredential @@ -96,12 +95,12 @@ async def main(): first_response = True async for event in stream: - if isinstance(event, WorkflowOutputEvent) and isinstance(event.data, str): + if event.type == "output" and isinstance(event.data, str): if first_response: print("MenuAgent: ", end="") first_response = False print(event.data, end="", flush=True) - elif isinstance(event, RequestInfoEvent) and isinstance(event.data, ExternalInputRequest): + elif event.type == "request_info" and isinstance(event.data, ExternalInputRequest): pending_request_id = event.request_id print() diff --git a/python/samples/getting_started/workflows/declarative/human_in_loop/main.py b/python/samples/getting_started/workflows/declarative/human_in_loop/main.py index e9c0f90f83..4a3c64295e 100644 --- a/python/samples/getting_started/workflows/declarative/human_in_loop/main.py +++ b/python/samples/getting_started/workflows/declarative/human_in_loop/main.py @@ -15,7 +15,7 @@ import asyncio from pathlib import Path -from agent_framework import Workflow, WorkflowOutputEvent +from agent_framework import Workflow, WorkflowEvent from agent_framework.declarative import ExternalInputRequest, WorkflowFactory from agent_framework_declarative._workflows._handlers import TextOutputEvent @@ -26,8 +26,8 @@ async def run_with_streaming(workflow: Workflow) -> None: print("-" * 40) async for event in workflow.run_stream({}): - # WorkflowOutputEvent wraps the actual output data - if isinstance(event, WorkflowOutputEvent): + # wraps the actual output data + if event.type == "output": data = event.data if isinstance(data, TextOutputEvent): print(f"[Bot]: {data.text}") diff --git a/python/samples/getting_started/workflows/declarative/marketing/main.py b/python/samples/getting_started/workflows/declarative/marketing/main.py index e48d262076..b8de3ff069 100644 --- a/python/samples/getting_started/workflows/declarative/marketing/main.py +++ b/python/samples/getting_started/workflows/declarative/marketing/main.py @@ -15,7 +15,7 @@ import asyncio from pathlib import Path -from agent_framework import WorkflowOutputEvent +from agent_framework import WorkflowEvent from agent_framework.azure import AzureOpenAIChatClient from agent_framework.declarative import WorkflowFactory from azure.identity import AzureCliCredential @@ -85,7 +85,7 @@ async def main() -> None: product = "An eco-friendly stainless steel water bottle that keeps drinks cold for 24 hours." async for event in workflow.run_stream(product): - if isinstance(event, WorkflowOutputEvent): + if event.type == "output": print(f"{event.data}", end="", flush=True) print("\n" + "=" * 60) diff --git a/python/samples/getting_started/workflows/declarative/student_teacher/main.py b/python/samples/getting_started/workflows/declarative/student_teacher/main.py index 746acaf009..efae717a16 100644 --- a/python/samples/getting_started/workflows/declarative/student_teacher/main.py +++ b/python/samples/getting_started/workflows/declarative/student_teacher/main.py @@ -22,7 +22,7 @@ import asyncio from pathlib import Path -from agent_framework import WorkflowOutputEvent +from agent_framework import WorkflowEvent from agent_framework.azure import AzureOpenAIChatClient from agent_framework.declarative import WorkflowFactory from azure.identity import AzureCliCredential @@ -82,7 +82,7 @@ async def main() -> None: print("=" * 50) async for event in workflow.run_stream("How would you compute the value of PI?"): - if isinstance(event, WorkflowOutputEvent): + if event.type == "output": print(f"{event.data}", flush=True, end="") print("\n" + "=" * 50) diff --git a/python/samples/getting_started/workflows/human-in-the-loop/concurrent_request_info.py b/python/samples/getting_started/workflows/human-in-the-loop/concurrent_request_info.py index 5aca9f8848..986949f36c 100644 --- a/python/samples/getting_started/workflows/human-in-the-loop/concurrent_request_info.py +++ b/python/samples/getting_started/workflows/human-in-the-loop/concurrent_request_info.py @@ -25,14 +25,15 @@ from typing import Any from agent_framework import ( + WorkflowEvent, AgentRequestInfoResponse, ChatMessage, ConcurrentBuilder, - RequestInfoEvent, + Role, - WorkflowOutputEvent, + WorkflowRunState, - WorkflowStatusEvent, + tool, ) from agent_framework._workflows._agent_executor import AgentExecutorResponse @@ -156,7 +157,7 @@ async def main() -> None: # Process events async for event in stream: - if isinstance(event, RequestInfoEvent): + if event.type == "request_info": if isinstance(event.data, AgentExecutorResponse): # Display agent output for review and potential modification print("\n" + "-" * 40) @@ -189,7 +190,7 @@ async def main() -> None: pending_responses = {event.request_id: user_input} print("(Resuming workflow...)") - elif isinstance(event, WorkflowOutputEvent): + elif event.type == "output": print("\n" + "=" * 60) print("WORKFLOW COMPLETE") print("=" * 60) @@ -199,7 +200,7 @@ async def main() -> None: print(event.data) workflow_complete = True - elif isinstance(event, WorkflowStatusEvent) and event.state == WorkflowRunState.IDLE: + elif event.type == "status" and event.state == WorkflowRunState.IDLE: workflow_complete = True diff --git a/python/samples/getting_started/workflows/human-in-the-loop/group_chat_request_info.py b/python/samples/getting_started/workflows/human-in-the-loop/group_chat_request_info.py index fcc1d1460c..a9bd5908c3 100644 --- a/python/samples/getting_started/workflows/human-in-the-loop/group_chat_request_info.py +++ b/python/samples/getting_started/workflows/human-in-the-loop/group_chat_request_info.py @@ -28,14 +28,11 @@ AgentExecutorResponse, AgentRequestInfoResponse, AgentResponse, - AgentRunUpdateEvent, + AgentResponseUpdate, ChatMessage, GroupChatBuilder, - RequestInfoEvent, - WorkflowOutputEvent, + WorkflowEvent, WorkflowRunState, - WorkflowStatusEvent, - tool, ) from agent_framework.azure import AzureOpenAIChatClient from azure.identity import AzureCliCredential @@ -120,7 +117,7 @@ async def main() -> None: # Process events async for event in stream: - if isinstance(event, AgentRunUpdateEvent): + if event.type == "data" and isinstance(event.data, AgentResponseUpdate): # Show all agent responses as they stream if event.data and event.data.text: agent_name = event.data.author_name or "unknown" @@ -130,7 +127,7 @@ async def main() -> None: print(f"\n[{agent_name}]: ", end="", flush=True) print(event.data.text, end="", flush=True) - elif isinstance(event, RequestInfoEvent): + elif event.type == "request_info": current_agent = None # Reset for next agent if isinstance(event.data, AgentExecutorResponse): # Display pre-agent context for human input @@ -156,7 +153,7 @@ async def main() -> None: pending_responses = {event.request_id: AgentRequestInfoResponse.from_strings([user_input])} print("(Resuming discussion...)") - elif isinstance(event, WorkflowOutputEvent): + elif event.type == "output": print("\n" + "=" * 60) print("DISCUSSION COMPLETE") print("=" * 60) @@ -170,7 +167,7 @@ async def main() -> None: print(f"[{role}][{name}]: {text}...") workflow_complete = True - elif isinstance(event, WorkflowStatusEvent) and event.state == WorkflowRunState.IDLE: + elif event.type == "status" and event.state == WorkflowRunState.IDLE: workflow_complete = True diff --git a/python/samples/getting_started/workflows/human-in-the-loop/guessing_game_with_human_input.py b/python/samples/getting_started/workflows/human-in-the-loop/guessing_game_with_human_input.py index 52a9d72901..599955bc9c 100644 --- a/python/samples/getting_started/workflows/human-in-the-loop/guessing_game_with_human_input.py +++ b/python/samples/getting_started/workflows/human-in-the-loop/guessing_game_with_human_input.py @@ -4,18 +4,19 @@ from dataclasses import dataclass from agent_framework import ( + WorkflowEvent, AgentExecutorRequest, # Message bundle sent to an AgentExecutor AgentExecutorResponse, ChatAgent, # Result returned by an AgentExecutor ChatMessage, # Chat message structure Executor, # Base class for workflow executors - RequestInfoEvent, # Event emitted when human input is requested + # Event emitted when human input is requested Role, # Enum of chat roles (user, assistant, system) WorkflowBuilder, # Fluent builder for assembling the graph WorkflowContext, # Per run context and event bus - WorkflowOutputEvent, # Event emitted when workflow yields output + # Event emitted when workflow yields output WorkflowRunState, # Enum of workflow run states - WorkflowStatusEvent, # Event emitted on run state changes + # Event emitted on run state changes handler, response_handler, # Decorator to expose an Executor method as a step tool, @@ -48,7 +49,7 @@ # How human-in-the-loop is achieved via `request_info` and `send_responses_streaming`: # - An executor (TurnManager) calls `ctx.request_info` with a payload (HumanFeedbackRequest). -# - The workflow run pauses and emits a RequestInfoEvent with the payload and the request_id. +# - The workflow run pauses and emits a with the payload and the request_id. # - The application captures the event, prompts the user, and collects replies. # - The application calls `send_responses_streaming` with a map of request_ids to replies. # - The workflow resumes, and the response is delivered to the executor method decorated with @response_handler. @@ -191,7 +192,7 @@ async def main() -> None: stream = ( workflow.send_responses_streaming(pending_responses) if pending_responses else workflow.run_stream("start") ) - # Collect events for this turn. Among these you may see WorkflowStatusEvent + # Collect events for this turn. Among these you may see # with state IDLE_WITH_PENDING_REQUESTS when the workflow pauses for # human input, preceded by IN_PROGRESS_PENDING_REQUESTS as requests are # emitted. @@ -201,20 +202,20 @@ async def main() -> None: # Collect human requests, workflow outputs, and check for completion. requests: list[tuple[str, str]] = [] # (request_id, prompt) for event in events: - if isinstance(event, RequestInfoEvent) and isinstance(event.data, HumanFeedbackRequest): - # RequestInfoEvent for our HumanFeedbackRequest. + if event.type == "request_info" and isinstance(event.data, HumanFeedbackRequest): + # for our HumanFeedbackRequest. requests.append((event.request_id, event.data.prompt)) - elif isinstance(event, WorkflowOutputEvent): + elif event.type == "output": # Capture workflow output as they're yielded workflow_output = str(event.data) # Detect run state transitions for a better developer experience. pending_status = any( - isinstance(e, WorkflowStatusEvent) and e.state == WorkflowRunState.IN_PROGRESS_PENDING_REQUESTS + isinstance(e, ) and e.state == WorkflowRunState.IN_PROGRESS_PENDING_REQUESTS for e in events ) idle_with_requests = any( - isinstance(e, WorkflowStatusEvent) and e.state == WorkflowRunState.IDLE_WITH_PENDING_REQUESTS + isinstance(e, ) and e.state == WorkflowRunState.IDLE_WITH_PENDING_REQUESTS for e in events ) if pending_status: diff --git a/python/samples/getting_started/workflows/human-in-the-loop/sequential_request_info.py b/python/samples/getting_started/workflows/human-in-the-loop/sequential_request_info.py index 401c24b5dd..197e21f491 100644 --- a/python/samples/getting_started/workflows/human-in-the-loop/sequential_request_info.py +++ b/python/samples/getting_started/workflows/human-in-the-loop/sequential_request_info.py @@ -13,7 +13,7 @@ Demonstrate: - Configuring request info with `.with_request_info()` -- Handling RequestInfoEvent with AgentInputRequest data +- Handling with AgentInputRequest data - Injecting responses back into the workflow via send_responses_streaming Prerequisites: @@ -24,14 +24,15 @@ import asyncio from agent_framework import ( + WorkflowEvent, AgentExecutorResponse, AgentRequestInfoResponse, ChatMessage, - RequestInfoEvent, + SequentialBuilder, - WorkflowOutputEvent, + WorkflowRunState, - WorkflowStatusEvent, + tool, ) from agent_framework.azure import AzureOpenAIChatClient @@ -91,7 +92,7 @@ async def main() -> None: # Process events async for event in stream: - if isinstance(event, RequestInfoEvent): + if event.type == "request_info": if isinstance(event.data, AgentExecutorResponse): # Display agent response and conversation context for review print("\n" + "-" * 40) @@ -124,7 +125,7 @@ async def main() -> None: pending_responses = {event.request_id: user_input} print("(Resuming workflow...)") - elif isinstance(event, WorkflowOutputEvent): + elif event.type == "output": print("\n" + "=" * 60) print("WORKFLOW COMPLETE") print("=" * 60) @@ -136,7 +137,7 @@ async def main() -> None: print(f"[{role}]: {msg.text}") workflow_complete = True - elif isinstance(event, WorkflowStatusEvent) and event.state == WorkflowRunState.IDLE: + elif event.type == "status" and event.state == WorkflowRunState.IDLE: workflow_complete = True diff --git a/python/samples/getting_started/workflows/observability/executor_io_observation.py b/python/samples/getting_started/workflows/observability/executor_io_observation.py index 54645f237d..4103ee7ef3 100644 --- a/python/samples/getting_started/workflows/observability/executor_io_observation.py +++ b/python/samples/getting_started/workflows/observability/executor_io_observation.py @@ -4,12 +4,13 @@ from typing import Any, cast from agent_framework import ( + WorkflowEvent, Executor, ExecutorCompletedEvent, ExecutorInvokedEvent, WorkflowBuilder, WorkflowContext, - WorkflowOutputEvent, + handler, tool, ) @@ -104,7 +105,7 @@ async def main() -> None: if event.data: print(f" Output: {format_io_data(event.data)}") - elif isinstance(event, WorkflowOutputEvent): + elif event.type == "output": print(f"[WORKFLOW OUTPUT] {format_io_data(event.data)}") """ diff --git a/python/samples/getting_started/workflows/orchestration/concurrent_agents.py b/python/samples/getting_started/workflows/orchestration/concurrent_agents.py index 2be0f29f9c..ea2ede1c48 100644 --- a/python/samples/getting_started/workflows/orchestration/concurrent_agents.py +++ b/python/samples/getting_started/workflows/orchestration/concurrent_agents.py @@ -22,7 +22,7 @@ Prerequisites: - Azure OpenAI access configured for AzureOpenAIChatClient (use az login + env vars) -- Familiarity with Workflow events (AgentRunEvent, WorkflowOutputEvent) +- Familiarity with Workflow events (ExecutorEvent, WorkflowOutputEvent) """ diff --git a/python/samples/getting_started/workflows/orchestration/group_chat_agent_manager.py b/python/samples/getting_started/workflows/orchestration/group_chat_agent_manager.py index 926c787aaa..d5032f449a 100644 --- a/python/samples/getting_started/workflows/orchestration/group_chat_agent_manager.py +++ b/python/samples/getting_started/workflows/orchestration/group_chat_agent_manager.py @@ -3,13 +3,12 @@ import asyncio from agent_framework import ( - AgentRunUpdateEvent, + AgentResponseUpdate, ChatAgent, ChatMessage, GroupChatBuilder, Role, - WorkflowOutputEvent, - tool, + WorkflowEvent, ) from agent_framework.azure import AzureOpenAIChatClient from azure.identity import AzureCliCredential @@ -85,9 +84,9 @@ async def main() -> None: # Keep track of the last executor to format output nicely in streaming mode last_executor_id: str | None = None - output_event: WorkflowOutputEvent | None = None + output_event: WorkflowEvent | None = None async for event in workflow.run_stream(task): - if isinstance(event, AgentRunUpdateEvent): + if event.type == "data" and isinstance(event.data, AgentResponseUpdate): eid = event.executor_id if eid != last_executor_id: if last_executor_id is not None: @@ -95,7 +94,7 @@ async def main() -> None: print(f"{eid}:", end=" ", flush=True) last_executor_id = eid print(event.data, end="", flush=True) - elif isinstance(event, WorkflowOutputEvent): + elif event.type == "output": output_event = event # The output of the workflow is the full list of messages exchanged diff --git a/python/samples/getting_started/workflows/orchestration/group_chat_philosophical_debate.py b/python/samples/getting_started/workflows/orchestration/group_chat_philosophical_debate.py index 9be9192a57..6e35eae0bb 100644 --- a/python/samples/getting_started/workflows/orchestration/group_chat_philosophical_debate.py +++ b/python/samples/getting_started/workflows/orchestration/group_chat_philosophical_debate.py @@ -5,13 +5,12 @@ from typing import cast from agent_framework import ( - AgentRunUpdateEvent, + AgentResponseUpdate, ChatAgent, ChatMessage, GroupChatBuilder, Role, - WorkflowOutputEvent, - tool, + WorkflowEvent, ) from agent_framework.azure import AzureOpenAIChatClient from azure.identity import AzureCliCredential @@ -241,7 +240,7 @@ async def main() -> None: current_speaker: str | None = None async for event in workflow.run_stream(f"Please begin the discussion on: {topic}"): - if isinstance(event, AgentRunUpdateEvent): + if event.type == "data" and isinstance(event.data, AgentResponseUpdate): if event.executor_id != current_speaker: if current_speaker is not None: print("\n") @@ -250,7 +249,7 @@ async def main() -> None: print(event.data, end="", flush=True) - elif isinstance(event, WorkflowOutputEvent): + elif event.type == "output": final_conversation = cast(list[ChatMessage], event.data) print("\n\n" + "=" * 80) diff --git a/python/samples/getting_started/workflows/orchestration/group_chat_simple_selector.py b/python/samples/getting_started/workflows/orchestration/group_chat_simple_selector.py index cf64ef0aca..eaae5e0622 100644 --- a/python/samples/getting_started/workflows/orchestration/group_chat_simple_selector.py +++ b/python/samples/getting_started/workflows/orchestration/group_chat_simple_selector.py @@ -3,13 +3,12 @@ import asyncio from agent_framework import ( - AgentRunUpdateEvent, + AgentResponseUpdate, ChatAgent, ChatMessage, GroupChatBuilder, GroupChatState, - WorkflowOutputEvent, - tool, + WorkflowEvent, ) from agent_framework.azure import AzureOpenAIChatClient from azure.identity import AzureCliCredential @@ -103,9 +102,9 @@ async def main() -> None: # Keep track of the last executor to format output nicely in streaming mode last_executor_id: str | None = None - output_event: WorkflowOutputEvent | None = None + output_event: WorkflowEvent | None = None async for event in workflow.run_stream(task): - if isinstance(event, AgentRunUpdateEvent): + if event.type == "data" and isinstance(event.data, AgentResponseUpdate): eid = event.executor_id if eid != last_executor_id: if last_executor_id is not None: @@ -113,7 +112,7 @@ async def main() -> None: print(f"{eid}:", end=" ", flush=True) last_executor_id = eid print(event.data, end="", flush=True) - elif isinstance(event, WorkflowOutputEvent): + elif event.type == "output": output_event = event # The output of the workflow is the full list of messages exchanged diff --git a/python/samples/getting_started/workflows/orchestration/handoff_autonomous.py b/python/samples/getting_started/workflows/orchestration/handoff_autonomous.py index edab013700..45b6ef61e3 100644 --- a/python/samples/getting_started/workflows/orchestration/handoff_autonomous.py +++ b/python/samples/getting_started/workflows/orchestration/handoff_autonomous.py @@ -6,15 +6,12 @@ from agent_framework import ( AgentResponseUpdate, - AgentRunUpdateEvent, ChatAgent, ChatMessage, HandoffBuilder, HostedWebSearchTool, WorkflowEvent, - WorkflowOutputEvent, resolve_agent_id, - tool, ) from agent_framework.azure import AzureOpenAIChatClient from azure.identity import AzureCliCredential @@ -82,7 +79,7 @@ def create_agents( def _display_event(event: WorkflowEvent) -> None: """Print the final conversation snapshot from workflow output events.""" - if isinstance(event, AgentRunUpdateEvent) and event.data: + if event.type == "data" and isinstance(event.data, AgentResponseUpdate): update: AgentResponseUpdate = event.data if not update.text: return @@ -91,7 +88,7 @@ def _display_event(event: WorkflowEvent) -> None: last_response_id = update.response_id print(f"\n- {update.author_name}: ", flush=True, end="") print(event.data, flush=True, end="") - elif isinstance(event, WorkflowOutputEvent): + elif event.type == "output": conversation = cast(list[ChatMessage], event.data) print("\n=== Final Conversation (Autonomous with Iteration) ===") for message in conversation: diff --git a/python/samples/getting_started/workflows/orchestration/handoff_participant_factory.py b/python/samples/getting_started/workflows/orchestration/handoff_participant_factory.py index dd4e4054c8..d9d3aa4dd0 100644 --- a/python/samples/getting_started/workflows/orchestration/handoff_participant_factory.py +++ b/python/samples/getting_started/workflows/orchestration/handoff_participant_factory.py @@ -6,18 +6,14 @@ from agent_framework import ( AgentResponse, - AgentRunEvent, ChatAgent, ChatMessage, HandoffAgentUserRequest, HandoffBuilder, HandoffSentEvent, - RequestInfoEvent, Workflow, WorkflowEvent, - WorkflowOutputEvent, WorkflowRunState, - WorkflowStatusEvent, tool, ) from agent_framework.azure import AzureOpenAIChatClient @@ -107,26 +103,26 @@ def create_return_agent() -> ChatAgent: ) -def _handle_events(events: list[WorkflowEvent]) -> list[RequestInfoEvent]: +def _handle_events(events: list[WorkflowEvent]) -> list[WorkflowEvent[HandoffAgentUserRequest]]: """Process workflow events and extract any pending user input requests. This function inspects each event type and: - Prints workflow status changes (IDLE, IDLE_WITH_PENDING_REQUESTS, etc.) - Displays final conversation snapshots when workflow completes - Prints user input request prompts - - Collects all RequestInfoEvent instances for response handling + - Collects all request_info events for response handling Args: events: List of WorkflowEvent to process Returns: - List of RequestInfoEvent representing pending user input requests + List of WorkflowEvent[HandoffAgentUserRequest] representing pending user input requests """ - requests: list[RequestInfoEvent] = [] + requests: list[WorkflowEvent[HandoffAgentUserRequest]] = [] for event in events: - # AgentRunEvent: Contains messages generated by agents during their turn - if isinstance(event, AgentRunEvent): + # Data event with AgentResponse: Contains messages generated by agents during their turn + if event.type == "data" and isinstance(event.data, AgentResponse): for message in event.data.messages: if not message.text: # Skip messages without text (e.g., tool calls) @@ -138,15 +134,15 @@ def _handle_events(events: list[WorkflowEvent]) -> list[RequestInfoEvent]: if isinstance(event, HandoffSentEvent): print(f"\n[Handoff from {event.source} to {event.target} initiated.]") - # WorkflowStatusEvent: Indicates workflow state changes - if isinstance(event, WorkflowStatusEvent) and event.state in { + # Status event: Indicates workflow state changes + if event.type == "status" and event.state in { WorkflowRunState.IDLE, WorkflowRunState.IDLE_WITH_PENDING_REQUESTS, }: print(f"\n[Workflow Status] {event.state.name}") - # WorkflowOutputEvent: Contains the final conversation when workflow terminates - elif isinstance(event, WorkflowOutputEvent): + # Output event: Contains the final conversation when workflow terminates + elif event.type == "output": conversation = cast(list[ChatMessage], event.data) if isinstance(conversation, list): print("\n=== Final Conversation Snapshot ===") @@ -155,11 +151,11 @@ def _handle_events(events: list[WorkflowEvent]) -> list[RequestInfoEvent]: print(f"- {speaker}: {message.text or [content.type for content in message.contents]}") print("===================================") - # RequestInfoEvent: Workflow is requesting user input - elif isinstance(event, RequestInfoEvent): + # Request info event: Workflow is requesting user input + elif event.type == "request_info": if isinstance(event.data, HandoffAgentUserRequest): _print_handoff_agent_user_request(event.data.agent_response) - requests.append(event) + requests.append(cast(WorkflowEvent[HandoffAgentUserRequest], event)) return requests diff --git a/python/samples/getting_started/workflows/orchestration/handoff_simple.py b/python/samples/getting_started/workflows/orchestration/handoff_simple.py index 72ea035a4f..38af3a2f45 100644 --- a/python/samples/getting_started/workflows/orchestration/handoff_simple.py +++ b/python/samples/getting_started/workflows/orchestration/handoff_simple.py @@ -5,17 +5,13 @@ from agent_framework import ( AgentResponse, - AgentRunEvent, ChatAgent, ChatMessage, HandoffAgentUserRequest, HandoffBuilder, HandoffSentEvent, - RequestInfoEvent, WorkflowEvent, - WorkflowOutputEvent, WorkflowRunState, - WorkflowStatusEvent, tool, ) from agent_framework.azure import AzureOpenAIChatClient @@ -102,26 +98,26 @@ def create_agents(chat_client: AzureOpenAIChatClient) -> tuple[ChatAgent, ChatAg return triage_agent, refund_agent, order_agent, return_agent -def _handle_events(events: list[WorkflowEvent]) -> list[RequestInfoEvent]: +def _handle_events(events: list[WorkflowEvent]) -> list[WorkflowEvent[HandoffAgentUserRequest]]: """Process workflow events and extract any pending user input requests. This function inspects each event type and: - Prints workflow status changes (IDLE, IDLE_WITH_PENDING_REQUESTS, etc.) - Displays final conversation snapshots when workflow completes - Prints user input request prompts - - Collects all RequestInfoEvent instances for response handling + - Collects all request_info events for response handling Args: events: List of WorkflowEvent to process Returns: - List of RequestInfoEvent representing pending user input requests + List of WorkflowEvent[HandoffAgentUserRequest] representing pending user input requests """ - requests: list[RequestInfoEvent] = [] + requests: list[WorkflowEvent[HandoffAgentUserRequest]] = [] for event in events: - # AgentRunEvent: Contains messages generated by agents during their turn - if isinstance(event, AgentRunEvent): + # Data event with AgentResponse: Contains messages generated by agents during their turn + if event.type == "data" and isinstance(event.data, AgentResponse): for message in event.data.messages: if not message.text: # Skip messages without text (e.g., tool calls) @@ -133,15 +129,15 @@ def _handle_events(events: list[WorkflowEvent]) -> list[RequestInfoEvent]: if isinstance(event, HandoffSentEvent): print(f"\n[Handoff from {event.source} to {event.target} initiated.]") - # WorkflowStatusEvent: Indicates workflow state changes - if isinstance(event, WorkflowStatusEvent) and event.state in { + # Status event: Indicates workflow state changes + if event.type == "status" and event.state in { WorkflowRunState.IDLE, WorkflowRunState.IDLE_WITH_PENDING_REQUESTS, }: print(f"\n[Workflow Status] {event.state.name}") - # WorkflowOutputEvent: Contains the final conversation when workflow terminates - elif isinstance(event, WorkflowOutputEvent): + # Output event: Contains the final conversation when workflow terminates + elif event.type == "output": conversation = cast(list[ChatMessage], event.data) if isinstance(conversation, list): print("\n=== Final Conversation Snapshot ===") @@ -150,11 +146,11 @@ def _handle_events(events: list[WorkflowEvent]) -> list[RequestInfoEvent]: print(f"- {speaker}: {message.text or [content.type for content in message.contents]}") print("===================================") - # RequestInfoEvent: Workflow is requesting user input - elif isinstance(event, RequestInfoEvent): + # Request info event: Workflow is requesting user input + elif event.type == "request_info": if isinstance(event.data, HandoffAgentUserRequest): _print_handoff_agent_user_request(event.data.agent_response) - requests.append(event) + requests.append(cast(WorkflowEvent[HandoffAgentUserRequest], event)) return requests diff --git a/python/samples/getting_started/workflows/orchestration/handoff_with_code_interpreter_file.py b/python/samples/getting_started/workflows/orchestration/handoff_with_code_interpreter_file.py index 54f7f4504c..23d90d3bdb 100644 --- a/python/samples/getting_started/workflows/orchestration/handoff_with_code_interpreter_file.py +++ b/python/samples/getting_started/workflows/orchestration/handoff_with_code_interpreter_file.py @@ -6,7 +6,7 @@ This sample demonstrates retrieving file IDs from code interpreter output in a handoff workflow context. A triage agent routes to a code specialist that generates a text file, and we verify the file_id is captured correctly -from the streaming AgentRunUpdateEvent events. +from the streaming ExecutorEvent events. Verifies GitHub issue #2718: files generated by code interpreter in HandoffBuilder workflows can be properly retrieved. @@ -30,18 +30,15 @@ from contextlib import asynccontextmanager from agent_framework import ( - AgentRunUpdateEvent, + AgentResponseUpdate, ChatAgent, Content, HandoffAgentUserRequest, HandoffBuilder, HostedCodeInterpreterTool, HostedFileContent, - RequestInfoEvent, WorkflowEvent, WorkflowRunState, - WorkflowStatusEvent, - tool, ) from azure.identity.aio import AzureCliCredential @@ -54,24 +51,26 @@ async def _drain(stream: AsyncIterable[WorkflowEvent]) -> list[WorkflowEvent]: return [event async for event in stream] -def _handle_events(events: list[WorkflowEvent]) -> tuple[list[RequestInfoEvent], list[str]]: +def _handle_events(events: list[WorkflowEvent]) -> tuple[list[WorkflowEvent[HandoffAgentUserRequest]], list[str]]: """Process workflow events and extract file IDs and pending requests. Returns: Tuple of (pending_requests, file_ids_found) """ - requests: list[RequestInfoEvent] = [] + from typing import cast + + requests: list[WorkflowEvent[HandoffAgentUserRequest]] = [] file_ids: list[str] = [] for event in events: - if isinstance(event, WorkflowStatusEvent): + if event.type == "status": if event.state in {WorkflowRunState.IDLE, WorkflowRunState.IDLE_WITH_PENDING_REQUESTS}: print(f"[status] {event.state.name}") - elif isinstance(event, RequestInfoEvent): - requests.append(event) + elif event.type == "request_info" and isinstance(event.data, HandoffAgentUserRequest): + requests.append(cast(WorkflowEvent[HandoffAgentUserRequest], event)) - elif isinstance(event, AgentRunUpdateEvent): + elif event.type == "data" and isinstance(event.data, AgentResponseUpdate): for content in event.data.contents: if isinstance(content, HostedFileContent): file_ids.append(content.file_id) diff --git a/python/samples/getting_started/workflows/orchestration/magentic.py b/python/samples/getting_started/workflows/orchestration/magentic.py index d153d41d9c..3a7021650d 100644 --- a/python/samples/getting_started/workflows/orchestration/magentic.py +++ b/python/samples/getting_started/workflows/orchestration/magentic.py @@ -6,7 +6,7 @@ from typing import cast from agent_framework import ( - AgentRunUpdateEvent, + AgentResponseUpdate, ChatAgent, ChatMessage, GroupChatRequestSentEvent, @@ -14,8 +14,7 @@ MagenticBuilder, MagenticOrchestratorEvent, MagenticProgressLedger, - WorkflowOutputEvent, - tool, + WorkflowEvent, ) from agent_framework.openai import OpenAIChatClient, OpenAIResponsesClient @@ -104,9 +103,9 @@ async def main() -> None: # Keep track of the last executor to format output nicely in streaming mode last_message_id: str | None = None - output_event: WorkflowOutputEvent | None = None + output_event: WorkflowEvent | None = None async for event in workflow.run_stream(task): - if isinstance(event, AgentRunUpdateEvent): + if event.type == "data" and isinstance(event.data, AgentResponseUpdate): message_id = event.data.message_id if message_id != last_message_id: if last_message_id is not None: @@ -132,7 +131,7 @@ async def main() -> None: elif isinstance(event, GroupChatRequestSentEvent): print(f"\n[REQUEST SENT ({event.round_index})] to agent: {event.participant_name}") - elif isinstance(event, WorkflowOutputEvent): + elif event.type == "output": output_event = event if not output_event: diff --git a/python/samples/getting_started/workflows/orchestration/magentic_checkpoint.py b/python/samples/getting_started/workflows/orchestration/magentic_checkpoint.py index 3c68931a18..a945830737 100644 --- a/python/samples/getting_started/workflows/orchestration/magentic_checkpoint.py +++ b/python/samples/getting_started/workflows/orchestration/magentic_checkpoint.py @@ -6,16 +6,17 @@ from typing import cast from agent_framework import ( + WorkflowEvent, ChatAgent, ChatMessage, FileCheckpointStorage, MagenticBuilder, MagenticPlanReviewRequest, - RequestInfoEvent, + WorkflowCheckpoint, - WorkflowOutputEvent, + WorkflowRunState, - WorkflowStatusEvent, + tool, ) from agent_framework.azure import AzureOpenAIChatClient @@ -107,16 +108,16 @@ async def main() -> None: print("\n=== Stage 1: run until plan review request (checkpointing active) ===") workflow = build_workflow(checkpoint_storage) - # Run the workflow until the first RequestInfoEvent is surfaced. The event carries the + # Run the workflow until the first is surfaced. The event carries the # request_id we must reuse on resume. In a real system this is where the UI would present # the plan for human review. plan_review_request: MagenticPlanReviewRequest | None = None async for event in workflow.run_stream(TASK): - if isinstance(event, RequestInfoEvent) and event.request_type is MagenticPlanReviewRequest: + if event.type == "request_info" and event.request_type is MagenticPlanReviewRequest: plan_review_request = event.data print(f"Captured plan review request: {event.request_id}") - if isinstance(event, WorkflowStatusEvent) and event.state is WorkflowRunState.IDLE_WITH_PENDING_REQUESTS: + if event.type == "status" and event.state is WorkflowRunState.IDLE_WITH_PENDING_REQUESTS: break if plan_review_request is None: @@ -149,9 +150,9 @@ async def main() -> None: approval = plan_review_request.approve() # Resume execution and capture the re-emitted plan review request. - request_info_event: RequestInfoEvent | None = None + request_info_event: WorkflowEvent | None = None async for event in resumed_workflow.run_stream(checkpoint_id=resume_checkpoint.checkpoint_id): - if isinstance(event, RequestInfoEvent) and isinstance(event.data, MagenticPlanReviewRequest): + if event.type == "request_info" and isinstance(event.data, MagenticPlanReviewRequest): request_info_event = event if request_info_event is None: @@ -160,9 +161,9 @@ async def main() -> None: print(f"Resumed plan review request: {request_info_event.request_id}") # Supply the approval and continue to run to completion. - final_event: WorkflowOutputEvent | None = None + final_event: WorkflowEvent | None = None async for event in resumed_workflow.send_responses_streaming({request_info_event.request_id: approval}): - if isinstance(event, WorkflowOutputEvent): + if event.type == "output": final_event = event if final_event is None: @@ -220,12 +221,12 @@ def _pending_message_count(cp: WorkflowCheckpoint) -> int: if pending_messages == 0: print("Checkpoint has no pending messages; no additional work expected on resume.") - final_event_post: WorkflowOutputEvent | None = None + final_event_post: WorkflowEvent | None = None post_emitted_events = False post_plan_workflow = build_workflow(checkpoint_storage) async for event in post_plan_workflow.run_stream(checkpoint_id=post_plan_checkpoint.checkpoint_id): post_emitted_events = True - if isinstance(event, WorkflowOutputEvent): + if event.type == "output": final_event_post = event if final_event_post is None: diff --git a/python/samples/getting_started/workflows/orchestration/magentic_human_plan_review.py b/python/samples/getting_started/workflows/orchestration/magentic_human_plan_review.py index bba6913a3b..024a481f37 100644 --- a/python/samples/getting_started/workflows/orchestration/magentic_human_plan_review.py +++ b/python/samples/getting_started/workflows/orchestration/magentic_human_plan_review.py @@ -5,14 +5,12 @@ from typing import cast from agent_framework import ( - AgentRunUpdateEvent, + AgentResponseUpdate, ChatAgent, ChatMessage, MagenticBuilder, MagenticPlanReviewRequest, - RequestInfoEvent, - WorkflowOutputEvent, - tool, + WorkflowEvent, ) from agent_framework.openai import OpenAIChatClient @@ -80,9 +78,9 @@ async def main() -> None: print("\nStarting workflow execution...") print("=" * 60) - pending_request: RequestInfoEvent | None = None + pending_request: WorkflowEvent[MagenticPlanReviewRequest] | None = None pending_responses: dict[str, object] | None = None - output_event: WorkflowOutputEvent | None = None + output_event: WorkflowEvent | None = None while not output_event: if pending_responses is not None: @@ -92,7 +90,7 @@ async def main() -> None: last_message_id: str | None = None async for event in stream: - if isinstance(event, AgentRunUpdateEvent): + if event.type == "data" and isinstance(event.data, AgentResponseUpdate): message_id = event.data.message_id if message_id != last_message_id: if last_message_id is not None: @@ -101,10 +99,10 @@ async def main() -> None: last_message_id = message_id print(event.data, end="", flush=True) - elif isinstance(event, RequestInfoEvent) and event.request_type is MagenticPlanReviewRequest: - pending_request = event + elif event.type == "request_info" and event.request_type is MagenticPlanReviewRequest: + pending_request = cast(WorkflowEvent[MagenticPlanReviewRequest], event) - elif isinstance(event, WorkflowOutputEvent): + elif event.type == "output": output_event = event pending_responses = None diff --git a/python/samples/getting_started/workflows/orchestration/sequential_agents.py b/python/samples/getting_started/workflows/orchestration/sequential_agents.py index 64ccbc6150..6f2c3cf277 100644 --- a/python/samples/getting_started/workflows/orchestration/sequential_agents.py +++ b/python/samples/getting_started/workflows/orchestration/sequential_agents.py @@ -3,7 +3,7 @@ import asyncio from typing import cast -from agent_framework import ChatMessage, Role, SequentialBuilder, WorkflowOutputEvent +from agent_framework import ChatMessage, Role, SequentialBuilder, WorkflowEvent from agent_framework.azure import AzureOpenAIChatClient from azure.identity import AzureCliCredential @@ -47,7 +47,7 @@ async def main() -> None: # 3) Run and collect outputs outputs: list[list[ChatMessage]] = [] async for event in workflow.run_stream("Write a tagline for a budget-friendly eBike."): - if isinstance(event, WorkflowOutputEvent): + if event.type == "output": outputs.append(cast(list[ChatMessage], event.data)) if outputs: diff --git a/python/samples/getting_started/workflows/parallelism/aggregate_results_of_different_types.py b/python/samples/getting_started/workflows/parallelism/aggregate_results_of_different_types.py index f59b1ea0c8..1ec4864037 100644 --- a/python/samples/getting_started/workflows/parallelism/aggregate_results_of_different_types.py +++ b/python/samples/getting_started/workflows/parallelism/aggregate_results_of_different_types.py @@ -3,7 +3,7 @@ import asyncio import random -from agent_framework import Executor, WorkflowBuilder, WorkflowContext, WorkflowOutputEvent, handler +from agent_framework import Executor, WorkflowBuilder, WorkflowContext, handler from typing_extensions import Never """ @@ -88,7 +88,7 @@ async def main() -> None: # 2) Run the workflow output: list[int | float] | None = None async for event in workflow.run_stream([random.randint(1, 100) for _ in range(10)]): - if isinstance(event, WorkflowOutputEvent): + if event.type == "output": output = event.data if output is not None: diff --git a/python/samples/getting_started/workflows/parallelism/fan_out_fan_in_edges.py b/python/samples/getting_started/workflows/parallelism/fan_out_fan_in_edges.py index 36c2ca24f6..62a4e9bf62 100644 --- a/python/samples/getting_started/workflows/parallelism/fan_out_fan_in_edges.py +++ b/python/samples/getting_started/workflows/parallelism/fan_out_fan_in_edges.py @@ -3,7 +3,8 @@ import asyncio from dataclasses import dataclass -from agent_framework import ( # Core chat primitives to build LLM requests +from agent_framework import ( + WorkflowEvent, # Core chat primitives to build LLM requests AgentExecutorRequest, # The message bundle sent to an AgentExecutor AgentExecutorResponse, # The structured result returned by an AgentExecutor ChatAgent, # Tracing event for agent execution steps @@ -14,7 +15,7 @@ Role, # Enum of chat roles (user, assistant, system) WorkflowBuilder, # Fluent builder for wiring the workflow graph WorkflowContext, # Per run context and event bus - WorkflowOutputEvent, # Event emitted when workflow yields output + # Event emitted when workflow yields output handler, # Decorator to mark an Executor method as invokable tool, ) @@ -148,7 +149,7 @@ async def main() -> None: print(f"{event.executor_id} invoked") elif isinstance(event, ExecutorCompletedEvent): print(f"{event.executor_id} completed") - elif isinstance(event, WorkflowOutputEvent): + elif event.type == "output": print("===== Final Aggregated Output =====") print(event.data) diff --git a/python/samples/getting_started/workflows/parallelism/map_reduce_and_visualization.py b/python/samples/getting_started/workflows/parallelism/map_reduce_and_visualization.py index d98c6cb78b..78c563c9ac 100644 --- a/python/samples/getting_started/workflows/parallelism/map_reduce_and_visualization.py +++ b/python/samples/getting_started/workflows/parallelism/map_reduce_and_visualization.py @@ -8,10 +8,11 @@ import aiofiles from agent_framework import ( + WorkflowEvent, Executor, # Base class for custom workflow steps WorkflowBuilder, # Fluent builder for executors and edges WorkflowContext, # Per run context with shared state and messaging - WorkflowOutputEvent, # Event emitted when workflow yields output + # Event emitted when workflow yields output WorkflowViz, # Utility to visualize a workflow graph handler, # Decorator to expose an Executor method as a step tool, @@ -332,7 +333,7 @@ async def main(): # Step 4: Run the workflow with the raw text as input. async for event in workflow.run_stream(raw_text): print(f"Event: {event}") - if isinstance(event, WorkflowOutputEvent): + if event.type == "output": print(f"Final Output: {event.data}") diff --git a/python/samples/getting_started/workflows/state-management/workflow_kwargs.py b/python/samples/getting_started/workflows/state-management/workflow_kwargs.py index bf7320f834..293b0579a1 100644 --- a/python/samples/getting_started/workflows/state-management/workflow_kwargs.py +++ b/python/samples/getting_started/workflows/state-management/workflow_kwargs.py @@ -4,7 +4,7 @@ import json from typing import Annotated, Any -from agent_framework import ChatMessage, SequentialBuilder, WorkflowOutputEvent, tool +from agent_framework import ChatMessage, SequentialBuilder, tool from agent_framework.openai import OpenAIChatClient from pydantic import Field @@ -117,7 +117,7 @@ async def main() -> None: custom_data=custom_data, user_token=user_token, ): - if isinstance(event, WorkflowOutputEvent): + if event.type == "output": output_data = event.data if isinstance(output_data, list): for item in output_data: diff --git a/python/samples/getting_started/workflows/tool-approval/concurrent_builder_tool_approval.py b/python/samples/getting_started/workflows/tool-approval/concurrent_builder_tool_approval.py index b43e01916f..49dd648e83 100644 --- a/python/samples/getting_started/workflows/tool-approval/concurrent_builder_tool_approval.py +++ b/python/samples/getting_started/workflows/tool-approval/concurrent_builder_tool_approval.py @@ -4,11 +4,12 @@ from typing import Annotated from agent_framework import ( + WorkflowEvent, ChatMessage, ConcurrentBuilder, Content, - RequestInfoEvent, - WorkflowOutputEvent, + + tool, ) from agent_framework.openai import OpenAIChatClient @@ -34,7 +35,7 @@ Demonstrate: - Handling multiple approval requests from different agents in concurrent workflows. -- Handling RequestInfoEvent during concurrent agent execution. +- Handling during concurrent agent execution. - Understanding that approval pauses only the agent that triggered it, not all agents. Prerequisites: @@ -84,12 +85,12 @@ def get_portfolio_balance() -> str: return "Portfolio: $50,000 invested, $10,000 cash available. Holdings: AAPL, GOOGL, MSFT." -def _print_output(event: WorkflowOutputEvent) -> None: +def _print_output(event: WorkflowEvent) -> None: if not event.data: - raise ValueError("WorkflowOutputEvent has no data") + raise ValueError("WorkflowEvent has no data") if not isinstance(event.data, list) and not all(isinstance(msg, ChatMessage) for msg in event.data): - raise ValueError("WorkflowOutputEvent data is not a list of ChatMessage") + raise ValueError("WorkflowEvent data is not a list of ChatMessage") messages: list[ChatMessage] = event.data # type: ignore @@ -131,17 +132,17 @@ async def main() -> None: print("-" * 60) # Phase 1: Run workflow and collect request info events - request_info_events: list[RequestInfoEvent] = [] + request_info_events: list[WorkflowEvent] = [] async for event in workflow.run_stream( "Manage my portfolio. Use a max of 5000 dollars to adjust my position using " "your best judgment based on market sentiment. No need to confirm trades with me." ): - if isinstance(event, RequestInfoEvent): + if event.type == "request_info": request_info_events.append(event) if isinstance(event.data, Content) and event.data.type == "function_approval_request": print(f"\nApproval requested for tool: {event.data.function_call.name}") print(f" Arguments: {event.data.function_call.arguments}") - elif isinstance(event, WorkflowOutputEvent): + elif event.type == "output": _print_output(event) # 6. Handle approval requests (if any) @@ -156,7 +157,7 @@ async def main() -> None: if responses: # Phase 2: Send all approvals and continue workflow async for event in workflow.send_responses_streaming(responses): - if isinstance(event, WorkflowOutputEvent): + if event.type == "output": _print_output(event) else: print("\nWorkflow completed without requiring approvals.") diff --git a/python/samples/getting_started/workflows/tool-approval/group_chat_builder_tool_approval.py b/python/samples/getting_started/workflows/tool-approval/group_chat_builder_tool_approval.py index b4bc773eba..7b4e3c4cf6 100644 --- a/python/samples/getting_started/workflows/tool-approval/group_chat_builder_tool_approval.py +++ b/python/samples/getting_started/workflows/tool-approval/group_chat_builder_tool_approval.py @@ -4,12 +4,12 @@ from typing import Annotated from agent_framework import ( - AgentRunUpdateEvent, + AgentResponseUpdate, Content, GroupChatBuilder, GroupChatRequestSentEvent, GroupChatState, - RequestInfoEvent, + WorkflowEvent, tool, ) from agent_framework.openai import OpenAIChatClient @@ -136,19 +136,19 @@ async def main() -> None: print("-" * 60) # Phase 1: Run workflow and collect all events (stream ends at IDLE or IDLE_WITH_PENDING_REQUESTS) - request_info_events: list[RequestInfoEvent] = [] + request_info_events: list[WorkflowEvent] = [] # Keep track of the last response to format output nicely in streaming mode last_response_id: str | None = None async for event in workflow.run_stream( "We need to deploy version 2.4.0 to production. Please coordinate the deployment." ): - if isinstance(event, RequestInfoEvent): + if event.type == "request_info": request_info_events.append(event) if isinstance(event.data, Content) and event.data.type == "function_approval_request": print("\n[APPROVAL REQUIRED] From agent:", event.source_executor_id) print(f" Tool: {event.data.function_call.name}") print(f" Arguments: {event.data.function_call.arguments}") - elif isinstance(event, AgentRunUpdateEvent): + elif event.type == "data" and isinstance(event.data, AgentResponseUpdate): if not event.data.text: continue # Skip empty updates response_id = event.data.response_id @@ -178,7 +178,7 @@ async def main() -> None: # Keep track of the response to format output nicely in streaming mode last_response_id: str | None = None async for event in workflow.send_responses_streaming({request_event.request_id: approval_response}): - if isinstance(event, AgentRunUpdateEvent): + if event.type == "data" and isinstance(event.data, AgentResponseUpdate): if not event.data.text: continue # Skip empty updates response_id = event.data.response_id diff --git a/python/samples/getting_started/workflows/tool-approval/sequential_builder_tool_approval.py b/python/samples/getting_started/workflows/tool-approval/sequential_builder_tool_approval.py index 7712873943..1b66c154ef 100644 --- a/python/samples/getting_started/workflows/tool-approval/sequential_builder_tool_approval.py +++ b/python/samples/getting_started/workflows/tool-approval/sequential_builder_tool_approval.py @@ -4,11 +4,12 @@ from typing import Annotated from agent_framework import ( + WorkflowEvent, ChatMessage, Content, - RequestInfoEvent, + SequentialBuilder, - WorkflowOutputEvent, + tool, ) from agent_framework.openai import OpenAIChatClient @@ -24,7 +25,7 @@ 1. A SequentialBuilder workflow is created with a single agent that has tools requiring approval. 2. The agent receives a user task and determines it needs to call a sensitive tool. 3. The tool call triggers a function_approval_request Content, pausing the workflow. -4. The sample simulates human approval by responding to the RequestInfoEvent. +4. The sample simulates human approval by responding to the . 5. Once approved, the tool executes and the agent completes its response. 6. The workflow outputs the final conversation with all messages. @@ -34,7 +35,7 @@ Demonstrate: - Using @tool(approval_mode="always_require") for sensitive operations. -- Handling RequestInfoEvent with function_approval_request Content in sequential workflows. +- Handling with function_approval_request Content in sequential workflows. - Resuming workflow execution after approval via send_responses_streaming. Prerequisites: @@ -86,11 +87,11 @@ async def main() -> None: print("-" * 60) # Phase 1: Run workflow and collect all events (stream ends at IDLE or IDLE_WITH_PENDING_REQUESTS) - request_info_events: list[RequestInfoEvent] = [] + request_info_events: list[WorkflowEvent] = [] async for event in workflow.run_stream( "Check the schema and then update all orders with status 'pending' to 'processing'" ): - if isinstance(event, RequestInfoEvent): + if event.type == "request_info": request_info_events.append(event) if isinstance(event.data, Content) and event.data.type == "function_approval_request": print(f"\nApproval requested for tool: {event.data.function_call.name}") @@ -109,7 +110,7 @@ async def main() -> None: # Phase 2: Send approval and continue workflow output: list[ChatMessage] | None = None async for event in workflow.send_responses_streaming({request_event.request_id: approval_response}): - if isinstance(event, WorkflowOutputEvent): + if event.type == "output": output = event.data if output: diff --git a/python/samples/semantic-kernel-migration/orchestrations/concurrent_basic.py b/python/samples/semantic-kernel-migration/orchestrations/concurrent_basic.py index b07a3393a8..e0a6070c14 100644 --- a/python/samples/semantic-kernel-migration/orchestrations/concurrent_basic.py +++ b/python/samples/semantic-kernel-migration/orchestrations/concurrent_basic.py @@ -6,7 +6,7 @@ from collections.abc import Sequence from typing import cast -from agent_framework import ChatMessage, ConcurrentBuilder, WorkflowOutputEvent +from agent_framework import ChatMessage, ConcurrentBuilderWorkflowEvent from agent_framework.azure import AzureOpenAIChatClient from azure.identity import AzureCliCredential from semantic_kernel.agents import Agent, ChatCompletionAgent, ConcurrentOrchestration @@ -91,7 +91,7 @@ async def run_agent_framework_example(prompt: str) -> Sequence[list[ChatMessage] outputs: list[list[ChatMessage]] = [] async for event in workflow.run_stream(prompt): - if isinstance(event, WorkflowOutputEvent): + if event.type == "output": outputs.append(cast(list[ChatMessage], event.data)) return outputs diff --git a/python/samples/semantic-kernel-migration/orchestrations/group_chat.py b/python/samples/semantic-kernel-migration/orchestrations/group_chat.py index 4ce31f3a04..14047bab84 100644 --- a/python/samples/semantic-kernel-migration/orchestrations/group_chat.py +++ b/python/samples/semantic-kernel-migration/orchestrations/group_chat.py @@ -7,7 +7,7 @@ from collections.abc import Sequence from typing import Any, cast -from agent_framework import ChatAgent, ChatMessage, GroupChatBuilder, WorkflowOutputEvent +from agent_framework import ChatAgent, ChatMessage, GroupChatBuilderWorkflowEvent from agent_framework.azure import AzureOpenAIChatClient, AzureOpenAIResponsesClient from azure.identity import AzureCliCredential from semantic_kernel.agents import Agent, ChatCompletionAgent, GroupChatOrchestration @@ -240,7 +240,7 @@ async def run_agent_framework_example(task: str) -> str: final_response = "" async for event in workflow.run_stream(task): - if isinstance(event, WorkflowOutputEvent): + if event.type == "output": data = event.data if isinstance(data, list) and len(data) > 0: # Get the final message from the conversation diff --git a/python/samples/semantic-kernel-migration/orchestrations/handoff.py b/python/samples/semantic-kernel-migration/orchestrations/handoff.py index bd4cfccec4..4dd94c7d5f 100644 --- a/python/samples/semantic-kernel-migration/orchestrations/handoff.py +++ b/python/samples/semantic-kernel-migration/orchestrations/handoff.py @@ -10,9 +10,9 @@ ChatMessage, HandoffBuilder, HandoffUserInputRequest, - RequestInfoEvent, + WorkflowEvent, - WorkflowOutputEvent, + tool, ) from agent_framework.azure import AzureOpenAIChatClient @@ -215,17 +215,17 @@ async def _drain_events(stream: AsyncIterable[WorkflowEvent]) -> list[WorkflowEv return [event async for event in stream] -def _collect_handoff_requests(events: list[WorkflowEvent]) -> list[RequestInfoEvent]: - requests: list[RequestInfoEvent] = [] +def _collect_handoff_requests(events: list[WorkflowEvent]) -> list[WorkflowEvent]: + requests: list[WorkflowEvent] = [] for event in events: - if isinstance(event, RequestInfoEvent) and isinstance(event.data, HandoffUserInputRequest): + if event.type == "request_info" and isinstance(event.data, HandoffUserInputRequest): requests.append(event) return requests def _extract_final_conversation(events: list[WorkflowEvent]) -> list[ChatMessage]: for event in events: - if isinstance(event, WorkflowOutputEvent): + if event.type == "output": data = cast(list[ChatMessage], event.data) return data return [] diff --git a/python/samples/semantic-kernel-migration/orchestrations/magentic.py b/python/samples/semantic-kernel-migration/orchestrations/magentic.py index 3d9aa67ea8..17259cdd12 100644 --- a/python/samples/semantic-kernel-migration/orchestrations/magentic.py +++ b/python/samples/semantic-kernel-migration/orchestrations/magentic.py @@ -6,7 +6,7 @@ from collections.abc import Sequence from typing import cast -from agent_framework import ChatAgent, HostedCodeInterpreterTool, MagenticBuilder, WorkflowOutputEvent +from agent_framework import ChatAgent, HostedCodeInterpreterTool, MagenticBuilderWorkflowEvent from agent_framework.openai import OpenAIChatClient, OpenAIResponsesClient from semantic_kernel.agents import ( Agent, @@ -148,7 +148,7 @@ async def run_agent_framework_example(prompt: str) -> str | None: final_text: str | None = None async for event in workflow.run_stream(prompt): - if isinstance(event, WorkflowOutputEvent): + if event.type == "output": final_text = cast(str, event.data) return final_text diff --git a/python/samples/semantic-kernel-migration/orchestrations/sequential.py b/python/samples/semantic-kernel-migration/orchestrations/sequential.py index 0a2bafb3bb..4136fa963a 100644 --- a/python/samples/semantic-kernel-migration/orchestrations/sequential.py +++ b/python/samples/semantic-kernel-migration/orchestrations/sequential.py @@ -6,7 +6,7 @@ from collections.abc import Sequence from typing import cast -from agent_framework import ChatMessage, Role, SequentialBuilder, WorkflowOutputEvent +from agent_framework import ChatMessage, Role, SequentialBuilderWorkflowEvent from agent_framework.azure import AzureOpenAIChatClient from azure.identity import AzureCliCredential from semantic_kernel.agents import Agent, ChatCompletionAgent, SequentialOrchestration @@ -77,7 +77,7 @@ async def run_agent_framework_example(prompt: str) -> list[ChatMessage]: conversation_outputs: list[list[ChatMessage]] = [] async for event in workflow.run_stream(prompt): - if isinstance(event, WorkflowOutputEvent): + if event.type == "output": conversation_outputs.append(cast(list[ChatMessage], event.data)) return conversation_outputs[-1] if conversation_outputs else [] diff --git a/python/samples/semantic-kernel-migration/processes/fan_out_fan_in_process.py b/python/samples/semantic-kernel-migration/processes/fan_out_fan_in_process.py index 626421ddc9..c29a19a55e 100644 --- a/python/samples/semantic-kernel-migration/processes/fan_out_fan_in_process.py +++ b/python/samples/semantic-kernel-migration/processes/fan_out_fan_in_process.py @@ -11,7 +11,7 @@ ###################################################################### # region Agent Framework imports ###################################################################### -from agent_framework import Executor, WorkflowBuilder, WorkflowContext, WorkflowOutputEvent, handler +from agent_framework import Executor, WorkflowBuilder, WorkflowContext, handler from pydantic import BaseModel, Field ###################################################################### @@ -232,7 +232,7 @@ async def run_agent_framework_workflow_example() -> str | None: final_text: str | None = None async for event in workflow.run_stream(CommonEvents.START_PROCESS): - if isinstance(event, WorkflowOutputEvent): + if event.type == "output": final_text = cast(str, event.data) return final_text diff --git a/python/samples/semantic-kernel-migration/processes/nested_process.py b/python/samples/semantic-kernel-migration/processes/nested_process.py index e649103703..36b9396fcb 100644 --- a/python/samples/semantic-kernel-migration/processes/nested_process.py +++ b/python/samples/semantic-kernel-migration/processes/nested_process.py @@ -17,7 +17,7 @@ WorkflowBuilder, WorkflowContext, WorkflowExecutor, - WorkflowOutputEvent, + handler, tool, ) @@ -258,7 +258,7 @@ async def run_agent_framework_nested_workflow(initial_message: str) -> Sequence[ results: list[str] = [] async for event in outer_workflow.run_stream(initial_message): - if isinstance(event, WorkflowOutputEvent): + if event.type == "output": results.append(cast(str, event.data)) return results From 26618d0aeab3fbdcf8cef5fe375b9b87741864fd Mon Sep 17 00:00:00 2001 From: Evan Mattson Date: Thu, 5 Feb 2026 16:16:20 +0900 Subject: [PATCH 02/12] Merge main --- .../agent_framework/_workflows/__init__.py | 4 - .../core/agent_framework/_workflows/_agent.py | 201 +++++++++++------- .../_workflows/_agent_executor.py | 13 +- .../_base_group_chat_orchestrator.py | 2 +- .../agent_framework/_workflows/_events.py | 22 +- .../_workflows/_runner_context.py | 8 +- .../agent_framework/_workflows/_workflow.py | 6 +- .../_workflows/_workflow_context.py | 11 +- .../_workflows/_workflow_executor.py | 36 ++-- .../tests/workflow/test_agent_executor.py | 11 +- .../test_agent_executor_tool_calls.py | 6 +- .../workflow/test_agent_run_event_typing.py | 6 +- .../workflow/test_checkpoint_validation.py | 2 +- .../core/tests/workflow/test_executor.py | 10 +- .../tests/workflow/test_full_conversation.py | 4 +- .../test_request_info_and_response.py | 21 +- .../core/tests/workflow/test_runner.py | 2 - .../core/tests/workflow/test_workflow.py | 60 +++--- .../tests/workflow/test_workflow_agent.py | 8 +- .../tests/workflow/test_workflow_context.py | 12 +- .../tests/workflow/test_workflow_kwargs.py | 1 - .../tests/workflow/test_workflow_states.py | 47 ++-- .../tests/test_workflow_factory.py | 2 +- python/packages/devui/README.md | 20 +- .../devui/agent_framework_devui/_executor.py | 18 +- .../devui/agent_framework_devui/_mapper.py | 14 +- .../packages/devui/tests/test_checkpoints.py | 2 +- python/packages/devui/tests/test_execution.py | 6 +- python/packages/devui/tests/test_helpers.py | 2 +- python/packages/devui/tests/test_mapper.py | 32 +-- .../_concurrent.py | 4 +- .../_group_chat.py | 2 +- .../_handoff.py | 2 +- .../_sequential.py | 2 +- .../orchestrations/tests/test_concurrent.py | 51 +++-- .../orchestrations/tests/test_group_chat.py | 24 +-- .../orchestrations/tests/test_handoff.py | 28 ++- .../orchestrations/tests/test_magentic.py | 52 ++--- .../orchestrations/tests/test_sequential.py | 51 +++-- .../getting_started/orchestrations/README.md | 4 +- .../group_chat_agent_manager.py | 2 +- .../group_chat_philosophical_debate.py | 2 +- .../group_chat_simple_selector.py | 2 +- .../magentic_human_plan_review.py | 8 +- .../getting_started/workflows/README.md | 6 +- ...re_chat_agents_tool_calls_with_feedback.py | 2 +- .../agents/concurrent_workflow_as_agent.py | 2 +- .../agents/group_chat_workflow_as_agent.py | 2 +- .../agents/magentic_workflow_as_agent.py | 2 +- .../checkpoint/checkpoint_with_resume.py | 15 +- .../sub_workflow_parallel_requests.py | 12 +- .../control-flow/sequential_streaming.py | 12 +- .../workflows/control-flow/simple_loop.py | 4 +- .../human-in-the-loop/agents_with_HITL.py | 24 +-- .../agents_with_approval_requests.py | 5 +- .../concurrent_request_info.py | 11 +- .../group_chat_request_info.py | 8 +- .../observability/executor_io_observation.py | 13 +- .../parallelism/fan_out_fan_in_edges.py | 9 +- .../group_chat_builder_tool_approval.py | 2 +- python/uv.lock | 40 ++-- 61 files changed, 518 insertions(+), 474 deletions(-) diff --git a/python/packages/core/agent_framework/_workflows/__init__.py b/python/packages/core/agent_framework/_workflows/__init__.py index 7dccda6682..7d2f8263b5 100644 --- a/python/packages/core/agent_framework/_workflows/__init__.py +++ b/python/packages/core/agent_framework/_workflows/__init__.py @@ -37,12 +37,10 @@ ) from ._edge_runner import create_edge_runner from ._events import ( - ExecutorEvent, WorkflowErrorDetails, WorkflowEvent, WorkflowEventSource, WorkflowEventType, - WorkflowLifecycleEvent, WorkflowRunState, ) from ._exceptions import ( @@ -97,7 +95,6 @@ "EdgeCondition", "EdgeDuplicationError", "Executor", - "ExecutorEvent", "FanInEdgeGroup", "FanOutEdgeGroup", "FileCheckpointStorage", @@ -134,7 +131,6 @@ "WorkflowEventType", "WorkflowException", "WorkflowExecutor", - "WorkflowLifecycleEvent", "WorkflowRunResult", "WorkflowRunState", "WorkflowRunnerException", diff --git a/python/packages/core/agent_framework/_workflows/_agent.py b/python/packages/core/agent_framework/_workflows/_agent.py index 1c7ee41f35..dd0b7646ac 100644 --- a/python/packages/core/agent_framework/_workflows/_agent.py +++ b/python/packages/core/agent_framework/_workflows/_agent.py @@ -91,10 +91,10 @@ def __init__( **kwargs: Additional keyword arguments passed to BaseAgent. Note: - Only WorkflowOutputEvents and RequestInfoEvents from the workflow are considered and - converted to agent responses of the WorkflowAgent. Other workflow events are ignored. - Use `with_output_from` in WorkflowBuilder to control which executors' outputs are surfaced - as agent responses. + Only output events (type='output') and request_info events (type='request_info') from + the workflow are considered and converted to agent responses of the WorkflowAgent. + Other workflow events are ignored. Use `with_output_from` in WorkflowBuilder to control + which executors' outputs are surfaced as agent responses. """ if id is None: id = f"WorkflowAgent_{uuid.uuid4().hex[:8]}" @@ -152,9 +152,9 @@ async def run( Returns: An AgentResponse representing the workflow execution results. The response includes all output events and requests emitted during the workflow run. - WorkflowOutputEvents will be converted to ChatMessages in the response. - RequestInfoEvents will be converted to function call and approval request contents - in the response. + Output events (type='output') will be converted to ChatMessages in the response. + Request info events (type='request_info') will be converted to function call and + approval request contents in the response. """ input_messages = normalize_messages_input(messages) thread = thread or self.get_new_thread() @@ -197,9 +197,9 @@ async def run_stream( Yields: AgentResponseUpdate objects representing the workflow execution progress. Updates include output events and requests emitted during the workflow run. - WorkflowOutputEvents will be converted to AgentResponseUpdate objects. - RequestInfoEvents will be converted to function call and approval request contents - in the updates. + Output events (type='output') will be converted to AgentResponseUpdate objects. + Request info events (type='request_info') will be converted to function call and + approval request contents in the updates. """ input_messages = normalize_messages_input(messages) thread = thread or self.get_new_thread() @@ -241,11 +241,11 @@ async def _run_impl( Returns: An AgentResponse representing the workflow execution results. """ - output_events: list[WorkflowOutputEvent | RequestInfoEvent] = [] + output_events: list[WorkflowEvent[Any]] = [] async for event in self._run_core( input_messages, thread, checkpoint_id, checkpoint_storage, streaming=False, **kwargs ): - if isinstance(event, WorkflowOutputEvent | RequestInfoEvent): + if event.type == "output" or event.type == "request_info": output_events.append(event) return self._convert_workflow_events_to_agent_response(response_id, output_events) @@ -276,7 +276,7 @@ async def _run_stream_impl( async for event in self._run_core( input_messages, thread, checkpoint_id, checkpoint_storage, streaming=True, **kwargs ): - updates = self._convert_workflow_event_to_agent_response_update(response_id, event) + updates = self._convert_workflow_event_to_agent_response_updates(response_id, event) for update in updates: yield update @@ -410,7 +410,7 @@ def _process_pending_requests(self, input_messages: list[ChatMessage]) -> dict[s def _convert_workflow_events_to_agent_response( self, response_id: str, - output_events: list[WorkflowOutputEvent | RequestInfoEvent], + output_events: list[WorkflowEvent[Any]], ) -> AgentResponse: """Convert a list of workflow output events to an AgentResponse.""" messages: list[ChatMessage] = [] @@ -419,7 +419,7 @@ def _convert_workflow_events_to_agent_response( latest_created_at: str | None = None for output_event in output_events: - if isinstance(output_event, RequestInfoEvent): + if output_event.type == "request_info": function_call, approval_request = self._process_request_info_event(output_event) messages.append( ChatMessage( @@ -438,7 +438,7 @@ def _convert_workflow_events_to_agent_response( # sequence cannot be guaranteed when there are streaming updates in between non-streaming # responses. raise AgentExecutionException( - "WorkflowOutputEvent with AgentResponseUpdate data cannot be emitted in non-streaming mode. " + "Output event with AgentResponseUpdate data cannot be emitted in non-streaming mode. " "Please ensure executors emit AgentResponse for non-streaming workflows." ) @@ -484,74 +484,125 @@ def _convert_workflow_events_to_agent_response( raw_representation=raw_representations, ) - def _convert_workflow_event_to_agent_response_update( + def _process_request_info_event( + self, + event: "WorkflowEvent[Any]", + ) -> tuple[Content, Content]: + """Convert a request_info event to FunctionCallContent and FunctionApprovalRequestContent. + + Args: + event: A WorkflowEvent with type='request_info'. + + Returns: + A tuple of (FunctionCallContent, FunctionApprovalRequestContent). + """ + request_id = event.request_id + if not request_id: + raise ValueError("request_info event must have a request_id") + + self.pending_requests[request_id] = event + + args = self.RequestInfoFunctionArgs(request_id=request_id, data=event.data).to_dict() + + function_call = Content.from_function_call( + call_id=request_id, + name=self.REQUEST_INFO_FUNCTION_NAME, + arguments=args, + ) + approval_request = Content.from_function_approval_request( + id=request_id, + function_call=function_call, + additional_properties={"request_id": request_id}, + ) + return function_call, approval_request + + def _convert_workflow_event_to_agent_response_updates( self, response_id: str, event: "WorkflowEvent[Any]", - ) -> AgentResponseUpdate | None: - """Convert a workflow event to an AgentResponseUpdate. + ) -> list[AgentResponseUpdate]: + """Convert a workflow event to a list of AgentResponseUpdate objects. - Events with type='data', type='request_info', and type='output' are processed. + Events with type='output' and type='request_info' are processed. Other workflow events are ignored as they are workflow-internal. - For 'data' events (AgentResponseUpdate) from AgentExecutor instances, only events from executors - with output_response=True are converted to agent updates. This prevents agent - responses from executors that were not explicitly marked to surface their output. - Non-AgentExecutor executors that emit 'data' events directly are allowed - through since they explicitly chose to emit the event. - """ - if event.type == "data" and isinstance(event.data, AgentResponseUpdate): - # For AgentExecutor instances, only pass through if output_response=True. - # Non-AgentExecutor executors that emit 'data' events are allowed through. - executor_id = event.executor_id - executor = self.workflow.executors.get(executor_id) if executor_id else None - if isinstance(executor, AgentExecutor) and not executor.output_response: - return None - update = event.data - if update: - # Enrich with executor identity if author_name is not already set - if not update.author_name: - update.author_name = executor_id - return update - return None + For 'output' events, AgentExecutor yields AgentResponseUpdate for streaming updates + via ctx.yield_output(). This method converts those to agent response updates. + Returns: + A list of AgentResponseUpdate objects. Empty list if the event is not relevant. + """ if event.type == "output": - # Convert workflow output to an agent response update. + # Convert workflow output to agent response updates. # Handle different data types appropriately. data = event.data executor_id = event.executor_id - # Skip AgentResponse from AgentExecutor with output_response=True - # since streaming events already surfaced the content. - if isinstance(data, AgentResponse): - executor = self.workflow.executors.get(executor_id) if executor_id else None - if isinstance(executor, AgentExecutor) and executor.output_response: - return None - if isinstance(data, AgentResponseUpdate): - return data + # Pass through AgentResponseUpdate directly (streaming from AgentExecutor) + if not data.author_name: + data.author_name = executor_id + return [data] + if isinstance(data, AgentResponse): + # Convert each message in AgentResponse to an AgentResponseUpdate + updates: list[AgentResponseUpdate] = [] + for msg in data.messages: + updates.append( + AgentResponseUpdate( + contents=list(msg.contents), + role=msg.role, + author_name=msg.author_name or executor_id, + response_id=data.response_id or response_id, + message_id=msg.message_id or str(uuid.uuid4()), + created_at=data.created_at + or datetime.now(tz=timezone.utc).strftime("%Y-%m-%dT%H:%M:%S.%fZ"), + raw_representation=msg, + ) + ) + return updates if isinstance(data, ChatMessage): - return AgentResponseUpdate( - contents=list(data.contents), - role=data.role, - author_name=data.author_name or executor_id, + return [ + AgentResponseUpdate( + contents=list(data.contents), + role=data.role, + author_name=data.author_name or executor_id, + response_id=response_id, + message_id=str(uuid.uuid4()), + created_at=datetime.now(tz=timezone.utc).strftime("%Y-%m-%dT%H:%M:%S.%fZ"), + raw_representation=data, + ) + ] + if is_instance_of(data, list[ChatMessage]): + # Convert each ChatMessage to an AgentResponseUpdate + chat_messages = cast(list[ChatMessage], data) + updates = [] + for msg in chat_messages: + updates.append( + AgentResponseUpdate( + contents=list(msg.contents), + role=msg.role, + author_name=msg.author_name or executor_id, + response_id=response_id, + message_id=msg.message_id or str(uuid.uuid4()), + created_at=datetime.now(tz=timezone.utc).strftime("%Y-%m-%dT%H:%M:%S.%fZ"), + raw_representation=msg, + ) + ) + return updates + contents = self._extract_contents(data) + if not contents: + return [] + return [ + AgentResponseUpdate( + contents=contents, + role="assistant", + author_name=executor_id, response_id=response_id, message_id=str(uuid.uuid4()), created_at=datetime.now(tz=timezone.utc).strftime("%Y-%m-%dT%H:%M:%S.%fZ"), raw_representation=data, ) - contents = self._extract_contents(data) - if not contents: - return None - return AgentResponseUpdate( - contents=contents, - role=Role.ASSISTANT, - author_name=executor_id, - response_id=response_id, - message_id=str(uuid.uuid4()), - created_at=datetime.now(tz=timezone.utc).strftime("%Y-%m-%dT%H:%M:%S.%fZ"), - raw_representation=data, - ) + ] if event.type == "request_info": # Store the pending request for later correlation @@ -571,17 +622,19 @@ def _convert_workflow_event_to_agent_response_update( function_call=function_call, additional_properties={"request_id": request_id}, ) - return AgentResponseUpdate( - contents=[function_call, approval_request], - role=Role.ASSISTANT, - author_name=self.name, - response_id=response_id, - message_id=str(uuid.uuid4()), - created_at=datetime.now(tz=timezone.utc).strftime("%Y-%m-%dT%H:%M:%S.%fZ"), - ) + return [ + AgentResponseUpdate( + contents=[function_call, approval_request], + role="assistant", + author_name=self.name, + response_id=response_id, + message_id=str(uuid.uuid4()), + created_at=datetime.now(tz=timezone.utc).strftime("%Y-%m-%dT%H:%M:%S.%fZ"), + ) + ] # Ignore workflow-internal events - return None + return [] def _extract_function_responses(self, input_messages: list[ChatMessage]) -> dict[str, Any]: """Extract function responses from input messages.""" diff --git a/python/packages/core/agent_framework/_workflows/_agent_executor.py b/python/packages/core/agent_framework/_workflows/_agent_executor.py index 4510efeebf..2627eb8e66 100644 --- a/python/packages/core/agent_framework/_workflows/_agent_executor.py +++ b/python/packages/core/agent_framework/_workflows/_agent_executor.py @@ -16,7 +16,6 @@ from ._checkpoint_encoding import decode_checkpoint_value, encode_checkpoint_value from ._const import WORKFLOW_RUN_KWARGS_KEY from ._conversation_state import encode_chat_messages -from ._events import WorkflowEvent from ._executor import Executor, handler from ._message_utils import normalize_messages_input from ._request_info_mixin import response_handler @@ -66,8 +65,8 @@ class AgentExecutor(Executor): """built-in executor that wraps an agent for handling messages. AgentExecutor adapts its behavior based on the workflow execution mode: - - run_stream(): Emits incremental WorkflowOutputEvents as the agent produces tokens - - run(): Emits a single WorkflowOutputEvent containing the complete response + - run_stream(): Emits incremental output events (type='output') as the agent produces tokens + - run(): Emits a single output event (type='output') containing the complete response Use `with_output_from` in WorkflowBuilder to control whether the AgentResponse or AgentResponseUpdate objects are yielded as workflow outputs. @@ -297,8 +296,8 @@ async def _run_agent_and_emit( ) -> None: """Execute the underlying agent, emit events, and enqueue response. - Checks ctx.is_streaming() to determine whether to emit WorkflowOutputEvents - containing incremental updates (streaming mode) or a single WorkflowOutputEvent + Checks ctx.is_streaming() to determine whether to emit output events (type='output') + containing incremental updates (streaming mode) or a single output event (type='output') containing the complete response (non-streaming mode). """ if ctx.is_streaming(): @@ -338,7 +337,7 @@ async def _run_agent(self, ctx: WorkflowContext[Never, AgentResponse]) -> AgentR thread=self._agent_thread, **run_kwargs, ) - await ctx.add_event(WorkflowEvent.emit(self.id, response)) + await ctx.yield_output(response) # Handle any user input requests if response.user_input_requests: @@ -368,7 +367,7 @@ async def _run_agent_streaming(self, ctx: WorkflowContext[Never, AgentResponseUp **run_kwargs, ): updates.append(update) - await ctx.add_event(WorkflowEvent.emit(self.id, update)) + await ctx.yield_output(update) if update.user_input_requests: user_input_requests.extend(update.user_input_requests) diff --git a/python/packages/core/agent_framework/_workflows/_base_group_chat_orchestrator.py b/python/packages/core/agent_framework/_workflows/_base_group_chat_orchestrator.py index 542b3c2116..c0f523a3d7 100644 --- a/python/packages/core/agent_framework/_workflows/_base_group_chat_orchestrator.py +++ b/python/packages/core/agent_framework/_workflows/_base_group_chat_orchestrator.py @@ -71,7 +71,7 @@ def __init__(self, round_index: int, data: Any | None = None) -> None: round_index: Current round index data: Optional event-specific data """ - super().__init__(data) + super().__init__("custom", data=data) self.round_index = round_index diff --git a/python/packages/core/agent_framework/_workflows/_events.py b/python/packages/core/agent_framework/_workflows/_events.py index 4f5155a2ae..c36c946bbc 100644 --- a/python/packages/core/agent_framework/_workflows/_events.py +++ b/python/packages/core/agent_framework/_workflows/_events.py @@ -7,17 +7,16 @@ from contextvars import ContextVar from dataclasses import dataclass from enum import Enum -from typing import Any, Generic, Literal, TypeAlias +from typing import Any, Generic, Literal, cast + +from ._checkpoint_encoding import decode_checkpoint_value, encode_checkpoint_value +from ._typing_utils import deserialize_type, serialize_type if sys.version_info >= (3, 13): from typing import TypeVar # type: ignore # pragma: no cover else: from typing_extensions import TypeVar # type: ignore[import] # pragma: no cover -from ._checkpoint_encoding import decode_checkpoint_value, encode_checkpoint_value -from ._typing_utils import deserialize_type, serialize_type - -# TypeVar with default parameter (Python 3.13+ feature, backported via typing_extensions) DataT = TypeVar("DataT", default=Any) @@ -309,9 +308,7 @@ def executor_completed(cls, executor_id: str, data: DataT | None = None) -> "Wor return cls("executor_completed", executor_id=executor_id, data=data) @classmethod - def executor_failed( - cls, executor_id: str, details: WorkflowErrorDetails - ) -> "WorkflowEvent[WorkflowErrorDetails]": + def executor_failed(cls, executor_id: str, details: WorkflowErrorDetails) -> "WorkflowEvent[WorkflowErrorDetails]": """Create an 'executor_failed' event when an executor handler raises an error.""" return WorkflowEvent("executor_failed", executor_id=executor_id, data=details, details=details) @@ -353,13 +350,6 @@ def from_dict(cls, data: dict[str, Any]) -> "WorkflowEvent[Any]": return cls.request_info( request_id=data["request_id"], source_executor_id=data["source_executor_id"], - request_data=request_data, + request_data=cast(Any, request_data), # type: ignore response_type=deserialize_type(data["response_type"]), ) - - -# Type alias for backwards compatibility -WorkflowLifecycleEvent: TypeAlias = WorkflowEvent[Any] - -# Backwards compatibility alias - ExecutorEvent is now just WorkflowEvent -ExecutorEvent: TypeAlias = WorkflowEvent[DataT] # type: ignore[type-arg] diff --git a/python/packages/core/agent_framework/_workflows/_runner_context.py b/python/packages/core/agent_framework/_workflows/_runner_context.py index 4f3d35efb2..696f9455b9 100644 --- a/python/packages/core/agent_framework/_workflows/_runner_context.py +++ b/python/packages/core/agent_framework/_workflows/_runner_context.py @@ -499,11 +499,15 @@ async def send_request_info_response(self, request_id: str, response: Any) -> No f"expected {event.response_type.__name__}, got {type(response).__name__}" ) + source_executor_id = event.source_executor_id + if source_executor_id is None: + raise RuntimeError("request_info event must have a source_executor_id for response routing") + # Create ResponseMessage instance response_msg = Message( data=response, - source_id=INTERNAL_SOURCE_ID(event.source_executor_id), - target_id=event.source_executor_id, + source_id=INTERNAL_SOURCE_ID(source_executor_id), + target_id=source_executor_id, type=MessageType.RESPONSE, original_request_info_event=event, ) diff --git a/python/packages/core/agent_framework/_workflows/_workflow.py b/python/packages/core/agent_framework/_workflows/_workflow.py index e256ebb463..417e41bffa 100644 --- a/python/packages/core/agent_framework/_workflows/_workflow.py +++ b/python/packages/core/agent_framework/_workflows/_workflow.py @@ -142,7 +142,7 @@ class Workflow(DictConvertible): Executors within a workflow can request external input using `ctx.request_info()`: 1. Executor calls `ctx.request_info()` to request input 2. Executor implements `response_handler()` to process the response - 3. Requests are emitted as RequestInfoEvent instances in the event stream + 3. Requests are emitted as request_info events (WorkflowEvent with type='request_info') in the event stream 4. Workflow enters IDLE_WITH_PENDING_REQUESTS state 5. Caller handles requests and provides responses via the `send_responses` or `send_responses_streaming` methods 6. Responses are routed to the requesting executors and response handlers are invoked @@ -202,7 +202,7 @@ def __init__( self.name = name self.description = description - # `WorkflowOutputEvent`s from these executors are treated as workflow outputs. + # Output events (WorkflowEvent with type='output') from these executors are treated as workflow outputs. # If None or empty, all executor outputs are considered workflow outputs. self._output_executors = list(output_executors) if output_executors else list(self.executors.keys()) @@ -603,7 +603,7 @@ async def run( - With checkpoint_id: Used to load and restore the specified checkpoint - Without checkpoint_id: Enables checkpointing for this run, overriding build-time configuration - include_status_events: Whether to include WorkflowStatusEvent instances in the result list. + include_status_events: Whether to include status events (WorkflowEvent with type='status') in the result list. **kwargs: Additional keyword arguments to pass through to agent invocations. These are stored in State and accessible in @tool functions via the **kwargs parameter. diff --git a/python/packages/core/agent_framework/_workflows/_workflow_context.py b/python/packages/core/agent_framework/_workflows/_workflow_context.py index 0fbd65a9c6..3286f6a15f 100644 --- a/python/packages/core/agent_framework/_workflows/_workflow_context.py +++ b/python/packages/core/agent_framework/_workflows/_workflow_context.py @@ -277,10 +277,10 @@ def __init__( self._runner_context = runner_context self._state = state - # Track messages sent via send_message() for ExecutorCompletedEvent + # Track messages sent via send_message() for executor_completed event (type='executor_completed') self._sent_messages: list[Any] = [] - # Track outputs yielded via yield_output() for ExecutorCompletedEvent + # Track outputs yielded via yield_output() for executor_completed event (type='executor_completed') self._yielded_outputs: list[Any] = [] # Store trace contexts and source span IDs for linking (supporting multiple sources) @@ -321,7 +321,7 @@ async def send_message(self, message: OutT, target_id: str | None = None) -> Non # Create Message wrapper msg = Message(data=message, source_id=self._executor_id, target_id=target_id) - # Track sent message for ExecutorCompletedEvent + # Track sent message for executor_completed event (type='executor_completed') self._sent_messages.append(message) # Inject current trace context if tracing enabled @@ -341,7 +341,8 @@ async def yield_output(self, output: W_OutT) -> None: output: The output to yield. This must conform to the workflow output type(s) declared on this context. """ - # Track yielded output for ExecutorCompletedEvent (deepcopy to capture state at yield time) + # Track yielded output for executor_completed event (type='executor_completed') + # (deepcopy to capture state at yield time) self._yielded_outputs.append(copy.deepcopy(output)) with _framework_event_origin(): @@ -364,7 +365,7 @@ async def add_event(self, event: "WorkflowEvent[Any]") -> None: async def request_info(self, request_data: object, response_type: type, *, request_id: str | None = None) -> None: """Request information from outside of the workflow. - Calling this method will cause the workflow to emit a RequestInfoEvent, carrying the + Calling this method will cause the workflow to emit a request_info event (type='request_info'), carrying the provided request_data and request_type. External systems listening for such events can then process the request and respond accordingly. diff --git a/python/packages/core/agent_framework/_workflows/_workflow_executor.py b/python/packages/core/agent_framework/_workflows/_workflow_executor.py index 1eba8b8538..5ac35a6ead 100644 --- a/python/packages/core/agent_framework/_workflows/_workflow_executor.py +++ b/python/packages/core/agent_framework/_workflows/_workflow_executor.py @@ -189,8 +189,8 @@ class WorkflowExecutor(Executor): ## Error Handling WorkflowExecutor propagates sub-workflow failures: - - Captures WorkflowFailedEvent from sub-workflow - - Converts to WorkflowErrorEvent in parent context + - Captures failed event (type='failed') from sub-workflow + - Converts to error event in parent context - Provides detailed error information including sub-workflow ID ## Concurrent Execution Support @@ -419,8 +419,11 @@ async def handle_message_wrapped_request_response( response: The response to a previous request. ctx: The workflow context. """ + request_id = response.source_event.request_id + if request_id is None: + raise RuntimeError("SubWorkflowResponseMessage source_event must have a request_id") await self._handle_response( - request_id=response.source_event.request_id, + request_id=request_id, response=response.data, ctx=ctx, ) @@ -548,15 +551,21 @@ async def _process_workflow_result( # Process request info events for event in request_info_events: + request_id = event.request_id + response_type = event.response_type + if request_id is None: + raise RuntimeError("request_info event must have a request_id") + if response_type is None: + raise RuntimeError("request_info event must have a response_type") # Track the pending request in execution context - execution_context.pending_requests[event.request_id] = event + execution_context.pending_requests[request_id] = event # Map request to execution for response routing - self._request_to_execution[event.request_id] = execution_context.execution_id + self._request_to_execution[request_id] = execution_context.execution_id if self._propagate_request: # In a workflow where the parent workflow does not handle the request, the request # should be propagated via the `request_info` mechanism to an external source. And # a @response_handler would be required in the WorkflowExecutor to handle the response. - await ctx.request_info(event.data, event.response_type, request_id=event.request_id) + await ctx.request_info(event.data, response_type, request_id=request_id) else: # In a workflow where the parent workflow has an executor that may intercept the # request and handle it directly, a message should be sent. @@ -567,15 +576,18 @@ async def _process_workflow_result( # Handle final state if workflow_run_state == WorkflowRunState.FAILED: - # Find the WorkflowFailedEvent. + # Find the failed event (type='failed'). failed_events = [e for e in result if isinstance(e, WorkflowEvent) and e.type == "failed"] if failed_events: failed_event = failed_events[0] - error_type = failed_event.details.error_type - error_message = failed_event.details.message - exception = Exception( - f"Sub-workflow {self.workflow.id} failed with error: {error_type} - {error_message}" - ) + if failed_event.details is not None: + error_type = failed_event.details.error_type + error_message = failed_event.details.message + exception = Exception( + f"Sub-workflow {self.workflow.id} failed with error: {error_type} - {error_message}" + ) + else: + exception = Exception(f"Sub-workflow {self.workflow.id} failed with unknown error") error_event = WorkflowEvent.error(exception) await ctx.add_event(error_event) elif workflow_run_state == WorkflowRunState.IDLE: diff --git a/python/packages/core/tests/workflow/test_agent_executor.py b/python/packages/core/tests/workflow/test_agent_executor.py index cb5ed5f22f..8a8c2880b7 100644 --- a/python/packages/core/tests/workflow/test_agent_executor.py +++ b/python/packages/core/tests/workflow/test_agent_executor.py @@ -12,9 +12,8 @@ ChatMessage, ChatMessageStore, Content, - WorkflowOutputEvent, + WorkflowEvent, WorkflowRunState, - WorkflowStatusEvent, ) from agent_framework._workflows._agent_executor import AgentExecutorResponse from agent_framework._workflows._checkpoint import InMemoryCheckpointStorage @@ -73,9 +72,9 @@ async def test_agent_executor_checkpoint_stores_and_restores_state() -> None: # Run the workflow with a user message first_run_output: AgentExecutorResponse | None = None async for ev in wf.run_stream("First workflow run"): - if isinstance(ev, WorkflowOutputEvent): + if ev.type == "output": first_run_output = ev.data # type: ignore[assignment] - if isinstance(ev, WorkflowStatusEvent) and ev.state == WorkflowRunState.IDLE: + if ev.type == "status" and ev.state == WorkflowRunState.IDLE: break assert first_run_output is not None @@ -127,9 +126,9 @@ async def test_agent_executor_checkpoint_stores_and_restores_state() -> None: # Resume from checkpoint resumed_output: AgentExecutorResponse | None = None async for ev in wf_resume.run_stream(checkpoint_id=restore_checkpoint.checkpoint_id): - if isinstance(ev, WorkflowOutputEvent): + if ev.type == "output": resumed_output = ev.data # type: ignore[assignment] - if isinstance(ev, WorkflowStatusEvent) and ev.state in ( + if ev.type == "status" and ev.state in ( WorkflowRunState.IDLE, WorkflowRunState.IDLE_WITH_PENDING_REQUESTS, ): diff --git a/python/packages/core/tests/workflow/test_agent_executor_tool_calls.py b/python/packages/core/tests/workflow/test_agent_executor_tool_calls.py index 08f74ec848..62e07eeff8 100644 --- a/python/packages/core/tests/workflow/test_agent_executor_tool_calls.py +++ b/python/packages/core/tests/workflow/test_agent_executor_tool_calls.py @@ -99,7 +99,7 @@ async def test_agent_executor_emits_tool_calls_in_streaming_mode() -> None: # Act: run in streaming mode events: list[WorkflowEvent[AgentResponseUpdate]] = [] async for event in workflow.run_stream("What's the weather?"): - if event.type == "data" and isinstance(event.data, AgentResponseUpdate): + if event.type == "output" and isinstance(event.data, AgentResponseUpdate): events.append(event) # Assert: we should receive 4 events (text, function call, function result, text) @@ -270,7 +270,7 @@ async def test_agent_executor_tool_call_with_approval_streaming() -> None: workflow = WorkflowBuilder().set_start_executor(agent).add_edge(agent, test_executor).build() # Act - request_info_events: list[RequestInfoEvent] = [] + request_info_events: list[WorkflowEvent] = [] async for event in workflow.run_stream("Invoke tool requiring approval"): if event.type == "request_info": request_info_events.append(event) @@ -347,7 +347,7 @@ async def test_agent_executor_parallel_tool_call_with_approval_streaming() -> No workflow = WorkflowBuilder().set_start_executor(agent).add_edge(agent, test_executor).build() # Act - request_info_events: list[RequestInfoEvent] = [] + request_info_events: list[WorkflowEvent] = [] async for event in workflow.run_stream("Invoke tool requiring approval"): if event.type == "request_info": request_info_events.append(event) diff --git a/python/packages/core/tests/workflow/test_agent_run_event_typing.py b/python/packages/core/tests/workflow/test_agent_run_event_typing.py index cedf7371aa..410f57f962 100644 --- a/python/packages/core/tests/workflow/test_agent_run_event_typing.py +++ b/python/packages/core/tests/workflow/test_agent_run_event_typing.py @@ -2,13 +2,13 @@ """Tests for WorkflowEvent[T] generic type annotations.""" -from agent_framework import AgentResponse, AgentResponseUpdate, ChatMessage, Role +from agent_framework import AgentResponse, AgentResponseUpdate, ChatMessage from agent_framework._workflows._events import WorkflowEvent def test_workflow_event_with_agent_response_data_type() -> None: """Verify WorkflowEvent[AgentResponse].data is typed as AgentResponse.""" - response = AgentResponse(messages=[ChatMessage(role=Role.ASSISTANT, text="Hello")]) + response = AgentResponse(messages=[ChatMessage(role="assistant", text="Hello")]) event: WorkflowEvent[AgentResponse] = WorkflowEvent.emit(executor_id="test", data=response) # This assignment should pass type checking without a cast @@ -29,7 +29,7 @@ def test_workflow_event_with_agent_response_update_data_type() -> None: def test_workflow_event_repr() -> None: """Verify WorkflowEvent.__repr__ uses consistent format.""" - response = AgentResponse(messages=[ChatMessage(role=Role.ASSISTANT, text="Hello")]) + response = AgentResponse(messages=[ChatMessage(role="assistant", text="Hello")]) event: WorkflowEvent[AgentResponse] = WorkflowEvent.emit(executor_id="test", data=response) repr_str = repr(event) diff --git a/python/packages/core/tests/workflow/test_checkpoint_validation.py b/python/packages/core/tests/workflow/test_checkpoint_validation.py index 746baea2a2..35ea2c3753 100644 --- a/python/packages/core/tests/workflow/test_checkpoint_validation.py +++ b/python/packages/core/tests/workflow/test_checkpoint_validation.py @@ -7,8 +7,8 @@ WorkflowBuilder, WorkflowCheckpointException, WorkflowContext, + WorkflowEvent, WorkflowRunState, - WorkflowStatusEvent, handler, ) from agent_framework._workflows._checkpoint import InMemoryCheckpointStorage diff --git a/python/packages/core/tests/workflow/test_executor.py b/python/packages/core/tests/workflow/test_executor.py index 8a12f6d5e8..32f2342ea3 100644 --- a/python/packages/core/tests/workflow/test_executor.py +++ b/python/packages/core/tests/workflow/test_executor.py @@ -138,7 +138,7 @@ async def handle_integer(self, number: int, ctx: WorkflowContext[int]) -> None: async def test_executor_invoked_event_contains_input_data(): - """Test that ExecutorEvent (kind=INVOKED) contains the input message data.""" + """Test that executor_invoked event (type='executor_invoked') contains the input message data.""" class UpperCaseExecutor(Executor): @handler @@ -172,7 +172,7 @@ async def handle(self, text: str, ctx: WorkflowContext) -> None: async def test_executor_completed_event_contains_sent_messages(): - """Test that ExecutorEvent (kind=COMPLETED) contains the messages sent via ctx.send_message().""" + """Test that executor_completed event (type='executor_completed') contains the messages sent via ctx.send_message().""" class MultiSenderExecutor(Executor): @handler @@ -230,10 +230,10 @@ async def handle(self, text: str, ctx: WorkflowContext[Never, str]) -> None: assert len(completed_events) == 1 assert completed_events[0].executor_id == "yielder" - # Yielded outputs are now included in ExecutorEvent (kind=COMPLETED).data + # Yielded outputs are now included in executor_completed event (type='executor_completed').data assert completed_events[0].data == ["TEST"] - # Verify the output was also yielded as WorkflowOutputEvent + # Verify the output was also yielded as an output event (type='output') output_events = [e for e in events if e.type == "output"] assert len(output_events) == 1 assert output_events[0].data == "TEST" @@ -538,7 +538,7 @@ async def handle_response( async def test_executor_invoked_event_data_not_mutated_by_handler(): - """Test that ExecutorEvent (kind=INVOKED).data captures original input, not mutated input.""" + """Test that executor_invoked event (type='executor_invoked').data captures original input, not mutated input.""" @executor(id="Mutator") async def mutator(messages: list[ChatMessage], ctx: WorkflowContext[list[ChatMessage]]) -> None: diff --git a/python/packages/core/tests/workflow/test_full_conversation.py b/python/packages/core/tests/workflow/test_full_conversation.py index b7c6e0d39a..10bd93f8de 100644 --- a/python/packages/core/tests/workflow/test_full_conversation.py +++ b/python/packages/core/tests/workflow/test_full_conversation.py @@ -18,8 +18,8 @@ Executor, WorkflowBuilder, WorkflowContext, + WorkflowEvent, WorkflowRunState, - WorkflowStatusEvent, handler, ) from agent_framework.orchestrations import SequentialBuilder @@ -153,7 +153,7 @@ async def test_sequential_adapter_uses_full_conversation() -> None: # Act async for ev in wf.run_stream("hello seq"): - if isinstance(ev, WorkflowStatusEvent) and ev.state == WorkflowRunState.IDLE: + if ev.type == "status" and ev.state == WorkflowRunState.IDLE: break # Assert: second agent should have seen the user prompt and A1's assistant reply diff --git a/python/packages/core/tests/workflow/test_request_info_and_response.py b/python/packages/core/tests/workflow/test_request_info_and_response.py index 1f93a085b0..64287b7488 100644 --- a/python/packages/core/tests/workflow/test_request_info_and_response.py +++ b/python/packages/core/tests/workflow/test_request_info_and_response.py @@ -4,11 +4,10 @@ from agent_framework import ( FileCheckpointStorage, - RequestInfoEvent, WorkflowBuilder, WorkflowContext, + WorkflowEvent, WorkflowRunState, - WorkflowStatusEvent, handler, response_handler, ) @@ -182,7 +181,7 @@ async def test_approval_workflow(self): workflow = WorkflowBuilder().set_start_executor(executor).build() # First run the workflow until it emits a request - request_info_event: RequestInfoEvent | None = None + request_info_event: WorkflowEvent | None = None async for event in workflow.run_stream("test operation"): if event.type == "request_info": request_info_event = event @@ -207,7 +206,7 @@ async def test_calculation_workflow(self): workflow = WorkflowBuilder().set_start_executor(executor).build() # First run the workflow until it emits a calculation request - request_info_event: RequestInfoEvent | None = None + request_info_event: WorkflowEvent | None = None async for event in workflow.run_stream("multiply 15.5 2.0"): if event.type == "request_info": request_info_event = event @@ -234,7 +233,7 @@ async def test_multiple_requests_workflow(self): workflow = WorkflowBuilder().set_start_executor(executor).build() # Collect all request events by running the full stream - request_events: list[RequestInfoEvent] = [] + request_events: list[WorkflowEvent] = [] async for event in workflow.run_stream("start batch"): if event.type == "request_info": request_events.append(event) @@ -242,10 +241,10 @@ async def test_multiple_requests_workflow(self): assert len(request_events) == 2 # Find the approval and calculation requests - approval_event: RequestInfoEvent | None = next( + approval_event: WorkflowEvent | None = next( (e for e in request_events if isinstance(e.data, UserApprovalRequest)), None ) - calc_event: RequestInfoEvent | None = next( + calc_event: WorkflowEvent | None = next( (e for e in request_events if isinstance(e.data, CalculationRequest)), None ) @@ -268,7 +267,7 @@ async def test_denied_approval_workflow(self): workflow = WorkflowBuilder().set_start_executor(executor).build() # First run the workflow until it emits a request - request_info_event: RequestInfoEvent | None = None + request_info_event: WorkflowEvent | None = None async for event in workflow.run_stream("sensitive operation"): if event.type == "request_info": request_info_event = event @@ -291,7 +290,7 @@ async def test_workflow_state_with_pending_requests(self): workflow = WorkflowBuilder().set_start_executor(executor).build() # Run workflow until idle with pending requests - request_info_event: RequestInfoEvent | None = None + request_info_event: WorkflowEvent | None = None idle_with_pending = False async for event in workflow.run_stream("test operation"): if event.type == "request_info": @@ -338,7 +337,7 @@ async def test_checkpoint_with_pending_request_info_events(self): workflow = WorkflowBuilder().set_start_executor(executor).with_checkpointing(storage).build() # Step 1: Run workflow to completion to ensure checkpoints are created - request_info_event: RequestInfoEvent | None = None + request_info_event: WorkflowEvent | None = None async for event in workflow.run_stream("checkpoint test operation"): if event.type == "request_info": request_info_event = event @@ -377,7 +376,7 @@ async def test_checkpoint_with_pending_request_info_events(self): # Step 5: Resume from checkpoint and verify the request can be continued completed = False - restored_request_event: RequestInfoEvent | None = None + restored_request_event: WorkflowEvent | None = None async for event in restored_workflow.run_stream(checkpoint_id=checkpoint_with_request.checkpoint_id): # Should re-emit the pending request info event if event.type == "request_info" and event.request_id == request_info_event.request_id: diff --git a/python/packages/core/tests/workflow/test_runner.py b/python/packages/core/tests/workflow/test_runner.py index 8814316828..da4a2fcaff 100644 --- a/python/packages/core/tests/workflow/test_runner.py +++ b/python/packages/core/tests/workflow/test_runner.py @@ -12,10 +12,8 @@ WorkflowContext, WorkflowConvergenceException, WorkflowEvent, - WorkflowOutputEvent, WorkflowRunnerException, WorkflowRunState, - WorkflowStatusEvent, handler, ) from agent_framework._workflows._edge import SingleEdgeGroup diff --git a/python/packages/core/tests/workflow/test_workflow.py b/python/packages/core/tests/workflow/test_workflow.py index 524a77f6f9..23600d3d87 100644 --- a/python/packages/core/tests/workflow/test_workflow.py +++ b/python/packages/core/tests/workflow/test_workflow.py @@ -193,9 +193,9 @@ async def test_fan_out(): events = await workflow.run(NumberMessage(data=0)) - # Each executor will emit two events: ExecutorEvent (kind=INVOKED) and ExecutorEvent (kind=COMPLETED) - # executor_b will also emit a WorkflowOutputEvent (no WorkflowCompletedEvent anymore) - # Each superstep will emit also emit a WorkflowStartedEvent and WorkflowCompletedEvent + # Each executor will emit two events: executor_invoked (type='executor_invoked') and executor_completed (type='executor_completed') + # executor_b will also emit an output event (type='output') + # Each superstep will emit a started event (type='started') and status event (type='status') # This workflow will converge in 2 supersteps because executor_c will send one more message # after executor_b completes assert len(events) == 11 @@ -217,9 +217,9 @@ async def test_fan_out_multiple_completed_events(): events = await workflow.run(NumberMessage(data=0)) - # Each executor will emit two events: ExecutorEvent (kind=INVOKED) and ExecutorEvent (kind=COMPLETED) - # executor_b and executor_c will also emit a WorkflowOutputEvent (no WorkflowCompletedEvent anymore) - # Each superstep will emit also emit a WorkflowStartedEvent and WorkflowCompletedEvent + # Each executor will emit two events: executor_invoked (type='executor_invoked') and executor_completed (type='executor_completed') + # executor_b and executor_c will also emit an output event (type='output') + # Each superstep will emit a started event (type='started') and status event (type='status') # This workflow will converge in 1 superstep because executor_a and executor_b will not send further messages assert len(events) == 10 @@ -245,9 +245,9 @@ async def test_fan_in(): events = await workflow.run(NumberMessage(data=0)) - # Each executor will emit two events: ExecutorEvent (kind=INVOKED) and ExecutorEvent (kind=COMPLETED) - # aggregator will also emit a WorkflowOutputEvent (no WorkflowCompletedEvent anymore) - # Each superstep will emit also emit a WorkflowStartedEvent and WorkflowCompletedEvent + # Each executor will emit two events: executor_invoked (type='executor_invoked') and executor_completed (type='executor_completed') + # aggregator will also emit an output event (type='output') + # Each superstep will emit a started event (type='started') and status event (type='status') assert len(events) == 13 assert events.get_final_state() == WorkflowRunState.IDLE @@ -423,7 +423,7 @@ async def test_workflow_run_from_checkpoint_non_streaming(simple_executor: Execu async def test_workflow_run_stream_from_checkpoint_with_responses( simple_executor: Executor, ): - """Test that workflow can be resumed from checkpoint with pending RequestInfoEvents.""" + """Test that workflow can be resumed from checkpoint with pending request_info events.""" with tempfile.TemporaryDirectory() as temp_dir: storage = FileCheckpointStorage(temp_dir) @@ -435,7 +435,7 @@ async def test_workflow_run_stream_from_checkpoint_with_responses( messages={}, state={}, pending_request_info_events={ - "request_123": RequestInfoEvent( + "request_123": WorkflowEvent.request_info( request_id="request_123", source_executor_id=simple_executor.id, request_data="Mock", @@ -726,14 +726,14 @@ async def test_workflow_with_simple_cycle_and_exit_condition(): assert outputs[0] is not None and outputs[0] >= 6 # Should complete when executor_a reaches its limit # Verify cycling occurred (should have events from both executors) - # Check for ExecutorEvent types that have executor_id - from agent_framework import ExecutorEvent, WorkflowEventKind + # Check for executor events that have executor_id + from agent_framework import WorkflowEvent executor_events = [ e for e in events - if isinstance(e, ExecutorEvent) - and e.kind in (WorkflowEventKind.EXECUTOR_INVOKED, WorkflowEventKind.EXECUTOR_COMPLETED) + if isinstance(e, WorkflowEvent) + and e.type in ("executor_invoked", "executor_completed") ] executor_ids = {e.executor_id for e in executor_events} assert "exec_a" in executor_ids, "Should have events from executor A" @@ -881,7 +881,7 @@ async def run_stream( async def test_agent_streaming_vs_non_streaming() -> None: - """Test run() emits data events with AgentResponse while run_stream() emits data events with AgentResponseUpdate.""" + """Test run() emits output events with AgentResponse while run_stream() emits output events with AgentResponseUpdate.""" agent = _StreamingTestAgent(id="test_agent", name="TestAgent", reply_text="Hello World") agent_exec = AgentExecutor(agent, id="agent_exec") @@ -891,14 +891,14 @@ async def test_agent_streaming_vs_non_streaming() -> None: result = await workflow.run("test message") # Filter for agent events (result is a list of events) - agent_run_events = [e for e in result if e.type == "data" and isinstance(e.data, AgentResponse)] + agent_run_events = [e for e in result if e.type == "output" and isinstance(e.data, AgentResponse)] agent_update_events = [ - e for e in result if e.type == "data" and isinstance(e.data, AgentResponseUpdate) + e for e in result if e.type == "output" and isinstance(e.data, AgentResponseUpdate) ] - # In non-streaming mode, should have data event with AgentResponse, no AgentResponseUpdate - assert len(agent_run_events) == 1, "Expected exactly one data event with AgentResponse in non-streaming mode" - assert len(agent_update_events) == 0, "Expected no data event with AgentResponseUpdate in non-streaming mode" + # In non-streaming mode, should have output event with AgentResponse, no AgentResponseUpdate + assert len(agent_run_events) == 1, "Expected exactly one output event with AgentResponse in non-streaming mode" + assert len(agent_update_events) == 0, "Expected no output event with AgentResponseUpdate in non-streaming mode" assert agent_run_events[0].executor_id == "agent_exec" assert agent_run_events[0].data is not None assert agent_run_events[0].data.messages[0].text == "Hello World" @@ -912,10 +912,10 @@ async def test_agent_streaming_vs_non_streaming() -> None: agent_response = [ cast(AgentResponse, e.data) for e in stream_events - if e.type == "data" and isinstance(e.data, AgentResponse) + if e.type == "output" and isinstance(e.data, AgentResponse) ] agent_response_updates = [ - e.data for e in stream_events if e.type == "data" and isinstance(e.data, AgentResponseUpdate) + e.data for e in stream_events if e.type == "output" and isinstance(e.data, AgentResponseUpdate) ] # In streaming mode, should have AgentResponseUpdate, no AgentResponse @@ -1028,7 +1028,7 @@ async def test_output_executors_empty_yields_all_outputs() -> None: assert len(outputs) == 2 assert outputs == [10, 20] - output_events = [event for event in result if isinstance(event, WorkflowOutputEvent)] + output_events = [event for event in result if event.type == "output"] assert len(output_events) == 2 assert output_events[0].executor_id == "executor_a" assert output_events[1].executor_id == "executor_b" @@ -1056,7 +1056,7 @@ async def test_output_executors_filters_outputs_non_streaming() -> None: assert len(outputs) == 1 assert outputs[0] == 20 - output_events = [event for event in result if isinstance(event, WorkflowOutputEvent)] + output_events = [event for event in result if event.type == "output"] assert len(output_events) == 1 assert output_events[0].executor_id == "executor_b" @@ -1077,9 +1077,9 @@ async def test_output_executors_filters_outputs_streaming() -> None: ) # Collect outputs from streaming - output_events: list[WorkflowOutputEvent] = [] + output_events: list[WorkflowEvent] = [] async for event in workflow.run_stream(NumberMessage(data=0)): - if isinstance(event, WorkflowOutputEvent): + if event.type == "output": output_events.append(event) # Only executor_a's output should be present @@ -1214,7 +1214,7 @@ async def test_output_executors_filtering_with_send_responses_streaming() -> Non events_list.append(event) # Get request info events - request_events = [e for e in events_list if isinstance(e, RequestInfoEvent)] + request_events = [e for e in events_list if e.type == "request_info"] assert len(request_events) == 1 # Set output_executors to exclude the approval executor @@ -1222,9 +1222,9 @@ async def test_output_executors_filtering_with_send_responses_streaming() -> Non # Send approval response via streaming responses = {request_events[0].request_id: ApprovalMessage(approved=True)} - output_events: list[WorkflowOutputEvent] = [] + output_events: list[WorkflowEvent] = [] async for event in workflow.send_responses_streaming(responses): - if isinstance(event, WorkflowOutputEvent): + if event.type == "output": output_events.append(event) # No outputs should be yielded since approval_executor is not in output_executors diff --git a/python/packages/core/tests/workflow/test_workflow_agent.py b/python/packages/core/tests/workflow/test_workflow_agent.py index 18182fb272..991c5a7b1f 100644 --- a/python/packages/core/tests/workflow/test_workflow_agent.py +++ b/python/packages/core/tests/workflow/test_workflow_agent.py @@ -217,7 +217,7 @@ async def test_end_to_end_basic_workflow_streaming(self): assert "Streaming2: Streaming1: Test input" in second_content.text async def test_end_to_end_request_info_handling(self): - """Test end-to-end workflow with RequestInfoEvent handling.""" + """Test end-to-end workflow with request_info event (type='request_info') handling.""" # Create workflow with requesting executor -> request info executor (no cycle) simple_executor = SimpleExecutor(id="simple", response_text="SimpleResponse", streaming=False) requesting_executor = RequestingExecutor(id="requester", streaming=False) @@ -330,7 +330,7 @@ async def handle_bool(self, message: bool, context: WorkflowContext[Any]) -> Non async def test_workflow_as_agent_yield_output_surfaces_as_agent_response(self) -> None: """Test that ctx.yield_output() in a workflow executor surfaces as agent output when using .as_agent(). - This validates the fix for issue #2813: WorkflowOutputEvent should be converted to + This validates the fix for issue #2813: output event (type='output') should be converted to AgentResponseUpdate when the workflow is wrapped via .as_agent(). """ @@ -342,7 +342,7 @@ async def yielding_executor(messages: list[ChatMessage], ctx: WorkflowContext[Ne workflow = WorkflowBuilder().set_start_executor(yielding_executor).build() - # Run directly - should return WorkflowOutputEvent in result + # Run directly - should return output event (type='output') in result direct_result = await workflow.run([ChatMessage("user", [Content.from_text(text="hello")])]) direct_outputs = direct_result.get_outputs() assert len(direct_outputs) == 1 @@ -741,7 +741,7 @@ async def start_executor(messages: list[ChatMessage], ctx: WorkflowContext[Agent # Count occurrences of the unique response text unique_text_count = sum(1 for msg in result.messages if msg.text and "Unique response text" in msg.text) - # Should appear exactly once (not duplicated from both streaming and WorkflowOutputEvent) + # Should appear exactly once (not duplicated from both streaming and output event) assert unique_text_count == 1, f"Response should appear exactly once, but appeared {unique_text_count} times" diff --git a/python/packages/core/tests/workflow/test_workflow_context.py b/python/packages/core/tests/workflow/test_workflow_context.py index e3fafc4144..8115fbb858 100644 --- a/python/packages/core/tests/workflow/test_workflow_context.py +++ b/python/packages/core/tests/workflow/test_workflow_context.py @@ -13,7 +13,6 @@ WorkflowContext, WorkflowEvent, WorkflowRunState, - WorkflowStatusEvent, executor, handler, ) @@ -62,15 +61,15 @@ async def test_executor_cannot_emit_framework_lifecycle_event(caplog: "LogCaptur async with make_context() as (ctx, runner_ctx): caplog.clear() with caplog.at_level("WARNING"): - await ctx.add_event(WorkflowStatusEvent(state=WorkflowRunState.IN_PROGRESS)) + await ctx.add_event(WorkflowEvent.status(state=WorkflowRunState.IN_PROGRESS)) events: list[WorkflowEvent] = await runner_ctx.drain_events() assert len(events) == 1 - assert type(events[0]).__name__ == "WorkflowWarningEvent" - data = getattr(events[0], "data", None) + assert events[0].type == "warning" + data = events[0].data assert isinstance(data, str) assert "reserved for framework lifecycle notifications" in data - assert any("attempted to emit WorkflowStatusEvent" in message for message in list(caplog.messages)) + assert any("attempted to emit" in message and "'status'" in message for message in list(caplog.messages)) async def test_executor_emits_normal_event() -> None: @@ -84,7 +83,8 @@ async def test_executor_emits_normal_event() -> None: class _TestEvent(WorkflowEvent): - pass + def __init__(self, data: Any = None) -> None: + super().__init__("custom", data=data) async def test_workflow_context_type_annotations_no_parameter() -> None: diff --git a/python/packages/core/tests/workflow/test_workflow_kwargs.py b/python/packages/core/tests/workflow/test_workflow_kwargs.py index 6029c166bd..2100b28053 100644 --- a/python/packages/core/tests/workflow/test_workflow_kwargs.py +++ b/python/packages/core/tests/workflow/test_workflow_kwargs.py @@ -13,7 +13,6 @@ ChatMessage, Content, WorkflowRunState, - WorkflowStatusEvent, tool, ) from agent_framework._workflows._const import WORKFLOW_RUN_KWARGS_KEY diff --git a/python/packages/core/tests/workflow/test_workflow_states.py b/python/packages/core/tests/workflow/test_workflow_states.py index 7ad7649263..2fddc912a2 100644 --- a/python/packages/core/tests/workflow/test_workflow_states.py +++ b/python/packages/core/tests/workflow/test_workflow_states.py @@ -5,19 +5,14 @@ from agent_framework import ( Executor, - ExecutorEvent, InProcRunnerContext, - RequestInfoEvent, Workflow, WorkflowBuilder, WorkflowContext, - WorkflowEventKind, + WorkflowEvent, WorkflowEventSource, - WorkflowFailedEvent, WorkflowRunResult, WorkflowRunState, - WorkflowStartedEvent, - WorkflowStatusEvent, handler, ) from agent_framework._workflows._state import State @@ -40,27 +35,27 @@ async def test_executor_failed_and_workflow_failed_events_streaming(): async for ev in wf.run_stream(0): events.append(ev) - # ExecutorEvent (kind=FAILED) should be emitted before WorkflowFailedEvent + # executor_failed event (type='executor_failed') should be emitted before workflow failed event executor_failed_events = [ - e for e in events if isinstance(e, ExecutorEvent) and e.kind == WorkflowEventKind.EXECUTOR_FAILED + e for e in events if isinstance(e, WorkflowEvent) and e.type == "executor_failed" ] - assert executor_failed_events, "ExecutorEvent (kind=FAILED) should be emitted when start executor fails" + assert executor_failed_events, "executor_failed event should be emitted when start executor fails" assert executor_failed_events[0].executor_id == "f" assert executor_failed_events[0].origin is WorkflowEventSource.FRAMEWORK # Workflow-level failure and FAILED status should be surfaced - failed_events = [e for e in events if isinstance(e, WorkflowFailedEvent)] + failed_events = [e for e in events if isinstance(e, WorkflowEvent) and e.type == "failed"] assert failed_events assert all(e.origin is WorkflowEventSource.FRAMEWORK for e in failed_events) - status = [e for e in events if e.type == "status"] + status = [e for e in events if isinstance(e, WorkflowEvent) and e.type == "status"] assert status and status[-1].state == WorkflowRunState.FAILED assert all(e.origin is WorkflowEventSource.FRAMEWORK for e in status) - # Verify ExecutorEvent (kind=FAILED) comes before WorkflowFailedEvent + # Verify executor_failed event comes before workflow failed event executor_failed_idx = events.index(executor_failed_events[0]) workflow_failed_idx = events.index(failed_events[0]) assert executor_failed_idx < workflow_failed_idx, ( - "ExecutorEvent (kind=FAILED) should be emitted before WorkflowFailedEvent" + "executor_failed event should be emitted before workflow failed event" ) @@ -76,7 +71,7 @@ async def test_executor_failed_event_emitted_on_direct_execute(): ctx, ) drained = await ctx.drain_events() - failed = [e for e in drained if isinstance(e, ExecutorEvent) and e.kind == WorkflowEventKind.EXECUTOR_FAILED] + failed = [e for e in drained if isinstance(e, WorkflowEvent) and e.type == "executor_failed"] assert failed assert all(e.origin is WorkflowEventSource.FRAMEWORK for e in failed) @@ -90,7 +85,7 @@ async def passthrough(self, msg: int, ctx: WorkflowContext[int]) -> None: async def test_executor_failed_event_from_second_executor_in_chain(): - """Test that ExecutorEvent (kind=FAILED) is emitted when a non-start executor fails.""" + """Test that executor_failed event is emitted when a non-start executor fails.""" passthrough = PassthroughExecutor(id="passthrough") failing = FailingExecutor(id="failing") wf: Workflow = WorkflowBuilder().set_start_executor(passthrough).add_edge(passthrough, failing).build() @@ -100,24 +95,24 @@ async def test_executor_failed_event_from_second_executor_in_chain(): async for ev in wf.run_stream(0): events.append(ev) - # ExecutorEvent (kind=FAILED) should be emitted for the failing executor + # executor_failed event should be emitted for the failing executor executor_failed_events = [ - e for e in events if isinstance(e, ExecutorEvent) and e.kind == WorkflowEventKind.EXECUTOR_FAILED + e for e in events if isinstance(e, WorkflowEvent) and e.type == "executor_failed" ] - assert executor_failed_events, "ExecutorEvent (kind=FAILED) should be emitted when second executor fails" + assert executor_failed_events, "executor_failed event should be emitted when second executor fails" assert executor_failed_events[0].executor_id == "failing" assert executor_failed_events[0].origin is WorkflowEventSource.FRAMEWORK # Workflow-level failure should also be surfaced - failed_events = [e for e in events if isinstance(e, WorkflowFailedEvent)] + failed_events = [e for e in events if isinstance(e, WorkflowEvent) and e.type == "failed"] assert failed_events assert all(e.origin is WorkflowEventSource.FRAMEWORK for e in failed_events) - # Verify ExecutorEvent (kind=FAILED) comes before WorkflowFailedEvent + # Verify executor_failed event comes before workflow failed event executor_failed_idx = events.index(executor_failed_events[0]) workflow_failed_idx = events.index(failed_events[0]) assert executor_failed_idx < workflow_failed_idx, ( - "ExecutorEvent (kind=FAILED) should be emitted before WorkflowFailedEvent" + "executor_failed event should be emitted before workflow failed event" ) @@ -145,8 +140,8 @@ async def test_idle_with_pending_requests_status_streaming(): events = [ev async for ev in wf.run_stream("start")] # Consume stream fully # Ensure a request was emitted - assert any(isinstance(e, RequestInfoEvent) for e in events) - status_events = [e for e in events if e.type == "status"] + assert any(isinstance(e, WorkflowEvent) and e.type == "request_info" for e in events) + status_events = [e for e in events if isinstance(e, WorkflowEvent) and e.type == "status"] assert len(status_events) >= 3 assert status_events[-2].state == WorkflowRunState.IN_PROGRESS_PENDING_REQUESTS assert status_events[-1].state == WorkflowRunState.IDLE_WITH_PENDING_REQUESTS @@ -165,7 +160,7 @@ async def test_completed_status_streaming(): wf = WorkflowBuilder().set_start_executor(c).build() events = [ev async for ev in wf.run_stream("ok")] # no raise # Last status should be IDLE - status = [e for e in events if e.type == "status"] + status = [e for e in events if isinstance(e, WorkflowEvent) and e.type == "status"] assert status and status[-1].state == WorkflowRunState.IDLE assert all(e.origin is WorkflowEventSource.FRAMEWORK for e in status) @@ -175,12 +170,12 @@ async def test_started_and_completed_event_origins(): wf = WorkflowBuilder().set_start_executor(c).build() events = [ev async for ev in wf.run_stream("payload")] - started = next(e for e in events if isinstance(e, WorkflowStartedEvent)) + started = next(e for e in events if isinstance(e, WorkflowEvent) and e.type == "started") assert started.origin is WorkflowEventSource.FRAMEWORK # Check for IDLE status indicating completion idle_status = next( - (e for e in events if e.type == "status" and e.state == WorkflowRunState.IDLE), None + (e for e in events if isinstance(e, WorkflowEvent) and e.type == "status" and e.state == WorkflowRunState.IDLE), None ) assert idle_status is not None assert idle_status.origin is WorkflowEventSource.FRAMEWORK diff --git a/python/packages/declarative/tests/test_workflow_factory.py b/python/packages/declarative/tests/test_workflow_factory.py index 8bad4651f0..04bd57587b 100644 --- a/python/packages/declarative/tests/test_workflow_factory.py +++ b/python/packages/declarative/tests/test_workflow_factory.py @@ -145,7 +145,7 @@ async def test_execute_if_workflow(self): result = await workflow.run({}) outputs = result.get_outputs() - # Check for the expected text in WorkflowOutputEvent + # Check for the expected text in output event (type='output') _text_outputs = [str(o) for o in outputs if isinstance(o, str) or hasattr(o, "data")] # noqa: F841 assert any("Condition was true" in str(o) for o in outputs) diff --git a/python/packages/devui/README.md b/python/packages/devui/README.md index 520b03e56f..fb14469905 100644 --- a/python/packages/devui/README.md +++ b/python/packages/devui/README.md @@ -249,9 +249,9 @@ Given that DevUI offers an OpenAI Responses API, it internally maps messages and | `response.created` + `response.in_progress` | `AgentStartedEvent` | OpenAI | | `response.completed` | `AgentCompletedEvent` | OpenAI | | `response.failed` | `AgentFailedEvent` | OpenAI | -| `response.created` + `response.in_progress` | `WorkflowStartedEvent` | OpenAI | -| `response.completed` | `WorkflowCompletedEvent` | OpenAI | -| `response.failed` | `WorkflowFailedEvent` | OpenAI | +| `response.created` + `response.in_progress` | `WorkflowEvent (type='started')` | OpenAI | +| `response.completed` | `WorkflowEvent (type='status')` | OpenAI | +| `response.failed` | `WorkflowEvent (type='failed')` | OpenAI | | | **Content Types** | | | `response.content_part.added` + `response.output_text.delta` | `TextContent` | OpenAI | | `response.reasoning_text.delta` | `TextReasoningContent` | OpenAI | @@ -267,13 +267,13 @@ Given that DevUI offers an OpenAI Responses API, it internally maps messages and | `error` | `ErrorContent` | OpenAI | | Final `Response.usage` field (not streamed) | `UsageContent` | OpenAI | | | **Workflow Events** | | -| `response.output_item.added` (ExecutorActionItem)* | `ExecutorInvokedEvent` | OpenAI | -| `response.output_item.done` (ExecutorActionItem)* | `ExecutorCompletedEvent` | OpenAI | -| `response.output_item.done` (ExecutorActionItem with error)* | `ExecutorFailedEvent` | OpenAI | -| `response.output_item.added` (ResponseOutputMessage) | `WorkflowOutputEvent` | OpenAI | -| `response.workflow_event.complete` | `WorkflowEvent` (other) | DevUI | -| `response.trace.complete` | `WorkflowStatusEvent` | DevUI | -| `response.trace.complete` | `WorkflowWarningEvent` | DevUI | +| `response.output_item.added` (ExecutorActionItem)* | `WorkflowEvent (type='executor_invoked')` | OpenAI | +| `response.output_item.done` (ExecutorActionItem)* | `WorkflowEvent (type='executor_completed')` | OpenAI | +| `response.output_item.done` (ExecutorActionItem with error)* | `WorkflowEvent (type='executor_failed')` | OpenAI | +| `response.output_item.added` (ResponseOutputMessage) | `WorkflowEvent (type='output')` | OpenAI | +| `response.workflow_event.complete` | `WorkflowEvent` (other types) | DevUI | +| `response.trace.complete` | `WorkflowEvent (type='status')` | DevUI | +| `response.trace.complete` | `WorkflowEvent (type='warning')` | DevUI | | | **Trace Content** | | | `response.trace.complete` | `DataContent` (no data/errors) | DevUI | | `response.trace.complete` | `UriContent` (unsupported MIME) | DevUI | diff --git a/python/packages/devui/agent_framework_devui/_executor.py b/python/packages/devui/agent_framework_devui/_executor.py index 3987654291..d8e5279b83 100644 --- a/python/packages/devui/agent_framework_devui/_executor.py +++ b/python/packages/devui/agent_framework_devui/_executor.py @@ -261,10 +261,10 @@ async def execute_entity(self, entity_id: str, request: AgentFrameworkRequest) - yield event elif entity_info.type == "workflow": async for event in self._execute_workflow(entity_obj, request, trace_collector): - # Log RequestInfoEvent for debugging HIL flow + # Log request_info event (type='request_info') for debugging HIL flow event_class = event.__class__.__name__ if hasattr(event, "__class__") else type(event).__name__ if event_class == "RequestInfoEvent": - logger.info("🔔 [EXECUTOR] RequestInfoEvent detected from workflow!") + logger.info("🔔 [EXECUTOR] request_info event (type='request_info') detected from workflow!") logger.info(f" request_id: {getattr(event, 'request_id', 'N/A')}") logger.info(f" source_executor_id: {getattr(event, 'source_executor_id', 'N/A')}") logger.info(f" request_type: {getattr(event, 'request_type', 'N/A')}") @@ -526,7 +526,7 @@ async def _execute_workflow( logger.warning(f"Could not convert HIL responses to proper types: {e}") async for event in workflow.send_responses_streaming(hil_responses): - # Enrich new RequestInfoEvents that may come from subsequent HIL requests + # Enrich new request_info events (type='request_info') that may come from subsequent HIL requests if event.type == "request_info": self._enrich_request_info_event_with_response_schema(event, workflow) @@ -555,7 +555,7 @@ async def _execute_workflow( yield event - # Note: Removed break on RequestInfoEvent - continue yielding all events + # Note: Removed break on request_info event (type='request_info') - continue yielding all events # The workflow is already paused by ctx.request_info() in the framework # DevUI should continue yielding events even during HIL pause @@ -579,7 +579,7 @@ async def _execute_workflow( yield event - # Note: Removed break on RequestInfoEvent - continue yielding all events + # Note: Removed break on request_info event (type='request_info') - continue yielding all events # The workflow is already paused by ctx.request_info() in the framework # DevUI should continue yielding events even during HIL pause @@ -1024,10 +1024,10 @@ def _parse_raw_workflow_input(self, workflow: Any, raw_input: str) -> Any: return raw_input def _enrich_request_info_event_with_response_schema(self, event: Any, workflow: Any) -> None: - """Extract response type from workflow executor and attach response schema to RequestInfoEvent. + """Extract response type from workflow executor and attach response schema to request_info event (type='request_info'). Args: - event: RequestInfoEvent to enrich + event: request_info event (type='request_info') to enrich workflow: Workflow object containing executors """ try: @@ -1038,7 +1038,7 @@ def _enrich_request_info_event_with_response_schema(self, event: Any, workflow: request_type = getattr(event, "request_type", None) if not source_executor_id or not request_type: - logger.debug("RequestInfoEvent missing source_executor_id or request_type") + logger.debug("request_info event (type='request_info') missing source_executor_id or request_type") return # Find the source executor in the workflow @@ -1071,4 +1071,4 @@ def _enrich_request_info_event_with_response_schema(self, event: Any, workflow: event._response_schema = response_schema except Exception as e: - logger.warning(f"Failed to enrich RequestInfoEvent with response schema: {e}") + logger.warning(f"Failed to enrich request_info event (type='request_info') with response schema: {e}") diff --git a/python/packages/devui/agent_framework_devui/_mapper.py b/python/packages/devui/agent_framework_devui/_mapper.py index 07901f1e99..a5eb22e8e7 100644 --- a/python/packages/devui/agent_framework_devui/_mapper.py +++ b/python/packages/devui/agent_framework_devui/_mapper.py @@ -934,7 +934,7 @@ async def _convert_workflow_event(self, event: Any, context: dict[str, Any]) -> # Emit output_item.added for each yield_output logger.debug( - f"WorkflowOutputEvent converted to output_item.added " + f"output event (type='output') converted to output_item.added " f"(executor: {executor_id}, length: {len(text)})" ) return [ @@ -953,8 +953,8 @@ async def _convert_workflow_event(self, event: Any, context: dict[str, Any]) -> if event_type == "failed": workflow_id = context.get("workflow_id", str(uuid4())) - # WorkflowFailedEvent uses 'details' field (WorkflowErrorDetails), not 'error' - # This matches ExecutorFailedEvent which also uses 'details' + # failed event (type='failed') uses 'details' field (WorkflowErrorDetails), not 'error' + # This matches executor_failed event which also uses 'details' details = getattr(event, "details", None) # Import Response and ResponseError types @@ -1039,7 +1039,7 @@ async def _convert_workflow_event(self, event: Any, context: dict[str, Any]) -> context.pop("current_executor_id", None) # Create ExecutorActionItem with completed status - # ExecutorEvent (kind=EXECUTOR_COMPLETED) uses 'data' field, not 'result' + # executor_completed event (type='executor_completed') uses 'data' field, not 'result' # Serialize the result data to ensure it's JSON-serializable # (AgentExecutorResponse contains AgentResponse/ChatMessage which are SerializationMixin) raw_result = getattr(event, "data", None) @@ -1065,8 +1065,8 @@ async def _convert_workflow_event(self, event: Any, context: dict[str, Any]) -> if event_type == "executor_failed": executor_id = getattr(event, "executor_id", "unknown") item_id = context.get(f"exec_item_{executor_id}", f"exec_{executor_id}_unknown") - # ExecutorEvent (kind=EXECUTOR_FAILED) uses 'details' property (WorkflowErrorDetails), not 'error' - # This matches ExecutorEvent.details which returns self.data for EXECUTOR_FAILED kind + # executor_failed event (type='executor_failed') uses 'details' property (WorkflowErrorDetails), not 'error' + # This matches WorkflowEvent.details which returns self.data for executor_failed type details = getattr(event, "details", None) if details: err_msg = getattr(details, "message", None) or str(details) @@ -1104,7 +1104,7 @@ async def _convert_workflow_event(self, event: Any, context: dict[str, Any]) -> request_type_class = getattr(event, "request_type", None) request_data = getattr(event, "data", None) - logger.info("📨 [MAPPER] Processing RequestInfoEvent") + logger.info("📨 [MAPPER] Processing request_info event (type='request_info')") logger.info(f" request_id: {request_id}") logger.info(f" source_executor_id: {source_executor_id}") logger.info(f" request_type_class: {request_type_class}") diff --git a/python/packages/devui/tests/test_checkpoints.py b/python/packages/devui/tests/test_checkpoints.py index 006715dc61..ac450c447b 100644 --- a/python/packages/devui/tests/test_checkpoints.py +++ b/python/packages/devui/tests/test_checkpoints.py @@ -432,7 +432,7 @@ async def test_workflow_auto_saves_checkpoints_to_injected_storage(self, checkpo if event.type == "status" and "IDLE_WITH_PENDING_REQUESTS" in str(event.state): break - assert saw_request_event, "Test workflow should have emitted RequestInfoEvent" + assert saw_request_event, "Test workflow should have emitted request_info event (type='request_info')" # Verify checkpoint was AUTOMATICALLY saved to our storage by the framework checkpoints_after = await checkpoint_storage.list_checkpoints() diff --git a/python/packages/devui/tests/test_execution.py b/python/packages/devui/tests/test_execution.py index ce763d227e..ce3f82fac3 100644 --- a/python/packages/devui/tests/test_execution.py +++ b/python/packages/devui/tests/test_execution.py @@ -326,7 +326,7 @@ async def test_full_pipeline_workflow_events_are_json_serializable(): """CRITICAL TEST: Verify ALL events from workflow execution can be JSON serialized. This is particularly important for workflows with AgentExecutor because: - - AgentExecutor produces ExecutorCompletedEvent with AgentExecutorResponse + - AgentExecutor produces executor_completed event (type='executor_completed') with AgentExecutorResponse - AgentExecutorResponse contains AgentResponse and ChatMessage objects - These are SerializationMixin objects, not Pydantic, which caused the original bug @@ -693,10 +693,10 @@ async def test_full_pipeline_concurrent_workflow(concurrent_workflow_fixture): @pytest.mark.asyncio async def test_full_pipeline_workflow_output_event_serialization(): - """Test that WorkflowOutputEvent from ctx.yield_output() serializes correctly. + """Test that output event (type='output') from ctx.yield_output() serializes correctly. This tests the pattern where executors yield output via ctx.yield_output(), - which emits WorkflowOutputEvent that DevUI must serialize for SSE. + which emits output event (type='output') that DevUI must serialize for SSE. """ from agent_framework import Executor, WorkflowBuilder, WorkflowContext, handler diff --git a/python/packages/devui/tests/test_helpers.py b/python/packages/devui/tests/test_helpers.py index 3da6f775ee..6e06380826 100644 --- a/python/packages/devui/tests/test_helpers.py +++ b/python/packages/devui/tests/test_helpers.py @@ -296,7 +296,7 @@ def create_agent_executor_response( executor_id: str = "test_executor", response_text: str = "Executor response", ) -> AgentExecutorResponse: - """Create an AgentExecutorResponse - the type that's nested in ExecutorEvent (kind=COMPLETED).data.""" + """Create an AgentExecutorResponse - the type that's nested in executor_completed event (type='executor_completed').data.""" agent_response = create_agent_run_response(response_text) return AgentExecutorResponse( executor_id=executor_id, diff --git a/python/packages/devui/tests/test_mapper.py b/python/packages/devui/tests/test_mapper.py index 24c76b7584..2b365eba35 100644 --- a/python/packages/devui/tests/test_mapper.py +++ b/python/packages/devui/tests/test_mapper.py @@ -573,18 +573,18 @@ def __init__(self): # ============================================================================= -# WorkflowOutputEvent Tests +# output event (type='output') Tests # ============================================================================= async def test_workflow_output_event(mapper: MessageMapper, test_request: AgentFrameworkRequest) -> None: - """Test WorkflowEvent(type='output') is converted to output_item.added.""" + """Test output event (type='output') is converted to output_item.added.""" from agent_framework._workflows._events import WorkflowEvent event = WorkflowEvent.output(executor_id="final_executor", data="Final workflow output") events = await mapper.convert_event(event, test_request) - # WorkflowOutputEvent should emit output_item.added + # output event (type='output') should emit output_item.added assert len(events) == 1 assert events[0].type == "response.output_item.added" # Check item contains the output text @@ -594,7 +594,7 @@ async def test_workflow_output_event(mapper: MessageMapper, test_request: AgentF async def test_workflow_output_event_with_list_data(mapper: MessageMapper, test_request: AgentFrameworkRequest) -> None: - """Test WorkflowEvent(type='output') with list data (common for sequential/concurrent workflows).""" + """Test output event (type='output') with list data (common for sequential/concurrent workflows).""" from agent_framework import ChatMessage from agent_framework._workflows._events import WorkflowEvent @@ -611,12 +611,12 @@ async def test_workflow_output_event_with_list_data(mapper: MessageMapper, test_ # ============================================================================= -# WorkflowFailedEvent Tests +# failed event (type='failed') Tests # ============================================================================= async def test_workflow_failed_event(mapper: MessageMapper, test_request: AgentFrameworkRequest) -> None: - """Test WorkflowEvent(type='failed') is converted to response.failed.""" + """Test failed event (type='failed') is converted to response.failed.""" from agent_framework._workflows._events import WorkflowErrorDetails, WorkflowEvent details = WorkflowErrorDetails( @@ -627,7 +627,7 @@ async def test_workflow_failed_event(mapper: MessageMapper, test_request: AgentF event = WorkflowEvent.failed(details=details) events = await mapper.convert_event(event, test_request) - # WorkflowFailedEvent should emit response.failed + # failed event (type='failed') should emit response.failed assert len(events) >= 1 # Find the failed event failed_events = [e for e in events if getattr(e, "type", "") == "response.failed"] @@ -642,7 +642,7 @@ async def test_workflow_failed_event(mapper: MessageMapper, test_request: AgentF async def test_workflow_failed_event_with_extra(mapper: MessageMapper, test_request: AgentFrameworkRequest) -> None: - """Test WorkflowEvent(type='failed') includes extra context when available.""" + """Test failed event (type='failed') includes extra context when available.""" from agent_framework._workflows._events import WorkflowErrorDetails, WorkflowEvent details = WorkflowErrorDetails( @@ -664,7 +664,7 @@ async def test_workflow_failed_event_with_extra(mapper: MessageMapper, test_requ async def test_workflow_failed_event_with_traceback(mapper: MessageMapper, test_request: AgentFrameworkRequest) -> None: - """Test WorkflowEvent(type='failed') includes traceback when available.""" + """Test failed event (type='failed') includes traceback when available.""" from agent_framework._workflows._events import WorkflowErrorDetails, WorkflowEvent details = WorkflowErrorDetails( @@ -712,12 +712,12 @@ async def test_workflow_error_event(mapper: MessageMapper, test_request: AgentFr # ============================================================================= -# RequestInfoEvent Tests (Human-in-the-Loop) +# request_info event (type='request_info') Tests (Human-in-the-Loop) # ============================================================================= async def test_request_info_event(mapper: MessageMapper, test_request: AgentFrameworkRequest) -> None: - """Test WorkflowEvent(type='request_info') is converted to HIL request event.""" + """Test request_info event (type='request_info') is converted to HIL request event.""" from agent_framework._workflows._events import WorkflowEvent event = WorkflowEvent.request_info( @@ -728,7 +728,7 @@ async def test_request_info_event(mapper: MessageMapper, test_request: AgentFram ) events = await mapper.convert_event(event, test_request) - # RequestInfoEvent should emit response.request_info.requested + # request_info event (type='request_info') should emit response.request_info.requested assert len(events) >= 1 # Check that request info is captured has_hil_event = any(getattr(e, "type", "") == "response.request_info.requested" for e in events) @@ -746,24 +746,24 @@ async def test_request_info_event(mapper: MessageMapper, test_request: AgentFram async def test_superstep_started_event(mapper: MessageMapper, test_request: AgentFrameworkRequest) -> None: - """Test WorkflowEvent(type='superstep_started') is handled gracefully.""" + """Test superstep_started event (type='superstep_started') is handled gracefully.""" from agent_framework._workflows._events import WorkflowEvent event = WorkflowEvent.superstep_started(iteration=1) events = await mapper.convert_event(event, test_request) - # SuperStepStartedEvent may not emit events (internal workflow signal) + # superstep_started event (type='superstep_started') may not emit events (internal workflow signal) # Just ensure it doesn't crash assert isinstance(events, list) async def test_superstep_completed_event(mapper: MessageMapper, test_request: AgentFrameworkRequest) -> None: - """Test WorkflowEvent(type='superstep_completed') is handled gracefully.""" + """Test superstep_completed event (type='superstep_completed') is handled gracefully.""" from agent_framework._workflows._events import WorkflowEvent event = WorkflowEvent.superstep_completed(iteration=1) events = await mapper.convert_event(event, test_request) - # SuperStepCompletedEvent may not emit events (internal workflow signal) + # superstep_completed event (type='superstep_completed') may not emit events (internal workflow signal) # Just ensure it doesn't crash assert isinstance(events, list) diff --git a/python/packages/orchestrations/agent_framework_orchestrations/_concurrent.py b/python/packages/orchestrations/agent_framework_orchestrations/_concurrent.py index d426afd415..6f55da124d 100644 --- a/python/packages/orchestrations/agent_framework_orchestrations/_concurrent.py +++ b/python/packages/orchestrations/agent_framework_orchestrations/_concurrent.py @@ -462,9 +462,9 @@ def with_request_info( ) -> "ConcurrentBuilder": """Enable request info after agent participant responses. - This enables human-in-the-loop (HIL) scenarios for the sequential orchestration. + This enables human-in-the-loop (HIL) scenarios for the concurrent orchestration. When enabled, the workflow pauses after each agent participant runs, emitting - a RequestInfoEvent that allows the caller to review the conversation and optionally + a request_info event (type='request_info') that allows the caller to review the conversation and optionally inject guidance for the agent participant to iterate. The caller provides input via the standard response_handler/request_info pattern. diff --git a/python/packages/orchestrations/agent_framework_orchestrations/_group_chat.py b/python/packages/orchestrations/agent_framework_orchestrations/_group_chat.py index 5fb5d9db17..b76f813135 100644 --- a/python/packages/orchestrations/agent_framework_orchestrations/_group_chat.py +++ b/python/packages/orchestrations/agent_framework_orchestrations/_group_chat.py @@ -860,7 +860,7 @@ def with_request_info(self, *, agents: Sequence[str | AgentProtocol] | None = No This enables human-in-the-loop (HIL) scenarios for the group chat orchestration. When enabled, the workflow pauses after each agent participant runs, emitting - a RequestInfoEvent that allows the caller to review the conversation and optionally + a request_info event (type='request_info') that allows the caller to review the conversation and optionally inject guidance for the agent participant to iterate. The caller provides input via the standard response_handler/request_info pattern. diff --git a/python/packages/orchestrations/agent_framework_orchestrations/_handoff.py b/python/packages/orchestrations/agent_framework_orchestrations/_handoff.py index a26bf1ea37..2fd6f678e1 100644 --- a/python/packages/orchestrations/agent_framework_orchestrations/_handoff.py +++ b/python/packages/orchestrations/agent_framework_orchestrations/_handoff.py @@ -74,7 +74,7 @@ def __init__(self, source: str, target: str, data: Any | None = None) -> None: target: Identifier of the target agent receiving the handoff data: Optional event-specific data """ - super().__init__(data) + super().__init__("custom", data=data) self.source = source self.target = target diff --git a/python/packages/orchestrations/agent_framework_orchestrations/_sequential.py b/python/packages/orchestrations/agent_framework_orchestrations/_sequential.py index f619473857..a53d3c6d6c 100644 --- a/python/packages/orchestrations/agent_framework_orchestrations/_sequential.py +++ b/python/packages/orchestrations/agent_framework_orchestrations/_sequential.py @@ -218,7 +218,7 @@ def with_request_info( This enables human-in-the-loop (HIL) scenarios for the sequential orchestration. When enabled, the workflow pauses after each agent participant runs, emitting - a RequestInfoEvent that allows the caller to review the conversation and optionally + a request_info event (type='request_info') that allows the caller to review the conversation and optionally inject guidance for the agent participant to iterate. The caller provides input via the standard response_handler/request_info pattern. diff --git a/python/packages/orchestrations/tests/test_concurrent.py b/python/packages/orchestrations/tests/test_concurrent.py index edc937a75e..5eb9d2ac67 100644 --- a/python/packages/orchestrations/tests/test_concurrent.py +++ b/python/packages/orchestrations/tests/test_concurrent.py @@ -10,9 +10,8 @@ ChatMessage, Executor, WorkflowContext, - WorkflowOutputEvent, + WorkflowEvent, WorkflowRunState, - WorkflowStatusEvent, handler, ) from agent_framework._workflows._checkpoint import InMemoryCheckpointStorage @@ -111,9 +110,9 @@ async def test_concurrent_default_aggregator_emits_single_user_and_assistants() completed = False output: list[ChatMessage] | None = None async for ev in wf.run_stream("prompt: hello world"): - if isinstance(ev, WorkflowStatusEvent) and ev.state == WorkflowRunState.IDLE: + if ev.type == "status" and ev.state == WorkflowRunState.IDLE: completed = True - elif isinstance(ev, WorkflowOutputEvent): + elif ev.type == "output": output = cast(list[ChatMessage], ev.data) if completed and output is not None: break @@ -149,9 +148,9 @@ async def summarize(results: list[AgentExecutorResponse]) -> str: completed = False output: str | None = None async for ev in wf.run_stream("prompt: custom"): - if isinstance(ev, WorkflowStatusEvent) and ev.state == WorkflowRunState.IDLE: + if ev.type == "status" and ev.state == WorkflowRunState.IDLE: completed = True - elif isinstance(ev, WorkflowOutputEvent): + elif ev.type == "output": output = cast(str, ev.data) if completed and output is not None: break @@ -180,9 +179,9 @@ def summarize_sync(results: list[AgentExecutorResponse], _ctx: WorkflowContext[A completed = False output: str | None = None async for ev in wf.run_stream("prompt: custom sync"): - if isinstance(ev, WorkflowStatusEvent) and ev.state == WorkflowRunState.IDLE: + if ev.type == "status" and ev.state == WorkflowRunState.IDLE: completed = True - elif isinstance(ev, WorkflowOutputEvent): + elif ev.type == "output": output = cast(str, ev.data) if completed and output is not None: break @@ -228,9 +227,9 @@ async def aggregate(self, results: list[AgentExecutorResponse], ctx: WorkflowCon completed = False output: str | None = None async for ev in wf.run_stream("prompt: instance test"): - if isinstance(ev, WorkflowStatusEvent) and ev.state == WorkflowRunState.IDLE: + if ev.type == "status" and ev.state == WorkflowRunState.IDLE: completed = True - elif isinstance(ev, WorkflowOutputEvent): + elif ev.type == "output": output = cast(str, ev.data) if completed and output is not None: break @@ -266,9 +265,9 @@ async def aggregate(self, results: list[AgentExecutorResponse], ctx: WorkflowCon completed = False output: str | None = None async for ev in wf.run_stream("prompt: factory test"): - if isinstance(ev, WorkflowStatusEvent) and ev.state == WorkflowRunState.IDLE: + if ev.type == "status" and ev.state == WorkflowRunState.IDLE: completed = True - elif isinstance(ev, WorkflowOutputEvent): + elif ev.type == "output": output = cast(str, ev.data) if completed and output is not None: break @@ -302,9 +301,9 @@ async def aggregate(self, results: list[AgentExecutorResponse], ctx: WorkflowCon completed = False output: str | None = None async for ev in wf.run_stream("prompt: factory test"): - if isinstance(ev, WorkflowStatusEvent) and ev.state == WorkflowRunState.IDLE: + if ev.type == "status" and ev.state == WorkflowRunState.IDLE: completed = True - elif isinstance(ev, WorkflowOutputEvent): + elif ev.type == "output": output = cast(str, ev.data) if completed and output is not None: break @@ -352,9 +351,9 @@ async def test_concurrent_checkpoint_resume_round_trip() -> None: baseline_output: list[ChatMessage] | None = None async for ev in wf.run_stream("checkpoint concurrent"): - if isinstance(ev, WorkflowOutputEvent): + if ev.type == "output": baseline_output = ev.data # type: ignore[assignment] - if isinstance(ev, WorkflowStatusEvent) and ev.state == WorkflowRunState.IDLE: + if ev.type == "status" and ev.state == WorkflowRunState.IDLE: break assert baseline_output is not None @@ -376,9 +375,9 @@ async def test_concurrent_checkpoint_resume_round_trip() -> None: resumed_output: list[ChatMessage] | None = None async for ev in wf_resume.run_stream(checkpoint_id=resume_checkpoint.checkpoint_id): - if isinstance(ev, WorkflowOutputEvent): + if ev.type == "output": resumed_output = ev.data # type: ignore[assignment] - if isinstance(ev, WorkflowStatusEvent) and ev.state in ( + if ev.type == "status" and ev.state in ( WorkflowRunState.IDLE, WorkflowRunState.IDLE_WITH_PENDING_REQUESTS, ): @@ -398,9 +397,9 @@ async def test_concurrent_checkpoint_runtime_only() -> None: baseline_output: list[ChatMessage] | None = None async for ev in wf.run_stream("runtime checkpoint test", checkpoint_storage=storage): - if isinstance(ev, WorkflowOutputEvent): + if ev.type == "output": baseline_output = ev.data # type: ignore[assignment] - if isinstance(ev, WorkflowStatusEvent) and ev.state == WorkflowRunState.IDLE: + if ev.type == "status" and ev.state == WorkflowRunState.IDLE: break assert baseline_output is not None @@ -419,9 +418,9 @@ async def test_concurrent_checkpoint_runtime_only() -> None: resumed_output: list[ChatMessage] | None = None async for ev in wf_resume.run_stream(checkpoint_id=resume_checkpoint.checkpoint_id, checkpoint_storage=storage): - if isinstance(ev, WorkflowOutputEvent): + if ev.type == "output": resumed_output = ev.data # type: ignore[assignment] - if isinstance(ev, WorkflowStatusEvent) and ev.state in ( + if ev.type == "status" and ev.state in ( WorkflowRunState.IDLE, WorkflowRunState.IDLE_WITH_PENDING_REQUESTS, ): @@ -446,9 +445,9 @@ async def test_concurrent_checkpoint_runtime_overrides_buildtime() -> None: baseline_output: list[ChatMessage] | None = None async for ev in wf.run_stream("override test", checkpoint_storage=runtime_storage): - if isinstance(ev, WorkflowOutputEvent): + if ev.type == "output": baseline_output = ev.data # type: ignore[assignment] - if isinstance(ev, WorkflowStatusEvent) and ev.state == WorkflowRunState.IDLE: + if ev.type == "status" and ev.state == WorkflowRunState.IDLE: break assert baseline_output is not None @@ -528,9 +527,9 @@ def create_agent3() -> Executor: completed = False output: list[ChatMessage] | None = None async for ev in wf.run_stream("test prompt"): - if isinstance(ev, WorkflowStatusEvent) and ev.state == WorkflowRunState.IDLE: + if ev.type == "status" and ev.state == WorkflowRunState.IDLE: completed = True - elif isinstance(ev, WorkflowOutputEvent): + elif ev.type == "output": output = cast(list[ChatMessage], ev.data) if completed and output is not None: break diff --git a/python/packages/orchestrations/tests/test_group_chat.py b/python/packages/orchestrations/tests/test_group_chat.py index 4e18ac6ca2..891ae4c6c1 100644 --- a/python/packages/orchestrations/tests/test_group_chat.py +++ b/python/packages/orchestrations/tests/test_group_chat.py @@ -17,10 +17,8 @@ ChatResponse, ChatResponseUpdate, Content, - RequestInfoEvent, - WorkflowOutputEvent, + WorkflowEvent, WorkflowRunState, - WorkflowStatusEvent, ) from agent_framework._workflows._checkpoint import InMemoryCheckpointStorage from agent_framework.orchestrations import ( @@ -729,9 +727,9 @@ async def test_group_chat_checkpoint_runtime_only() -> None: baseline_output: list[ChatMessage] | None = None async for ev in wf.run_stream("runtime checkpoint test", checkpoint_storage=storage): - if isinstance(ev, WorkflowOutputEvent): + if ev.type == "output": baseline_output = cast(list[ChatMessage], ev.data) if isinstance(ev.data, list) else None # type: ignore - if isinstance(ev, WorkflowStatusEvent) and ev.state in ( + if ev.type == "status" and ev.state in ( WorkflowRunState.IDLE, WorkflowRunState.IDLE_WITH_PENDING_REQUESTS, ): @@ -767,9 +765,9 @@ async def test_group_chat_checkpoint_runtime_overrides_buildtime() -> None: ) baseline_output: list[ChatMessage] | None = None async for ev in wf.run_stream("override test", checkpoint_storage=runtime_storage): - if isinstance(ev, WorkflowOutputEvent): + if ev.type == "output": baseline_output = cast(list[ChatMessage], ev.data) if isinstance(ev.data, list) else None # type: ignore - if isinstance(ev, WorkflowStatusEvent) and ev.state in ( + if ev.type == "status" and ev.state in ( WorkflowRunState.IDLE, WorkflowRunState.IDLE_WITH_PENDING_REQUESTS, ): @@ -813,7 +811,7 @@ async def selector(state: GroupChatState) -> str: ) # Run until we get a request info event (should be before beta, not alpha) - request_events: list[RequestInfoEvent] = [] + request_events: list[WorkflowEvent] = [] async for event in workflow.run_stream("test task"): if event.type == "request_info" and isinstance(event.data, AgentExecutorResponse): request_events.append(event) @@ -828,7 +826,7 @@ async def selector(state: GroupChatState) -> str: assert request_event.source_executor_id == "beta" # Continue the workflow with a response - outputs: list[WorkflowOutputEvent] = [] + outputs: list[WorkflowEvent] = [] async for event in workflow.send_responses_streaming({ request_event.request_id: AgentRequestInfoResponse.approve() }): @@ -865,7 +863,7 @@ async def selector(state: GroupChatState) -> str: ) # Run until we get a request info event - request_events: list[RequestInfoEvent] = [] + request_events: list[WorkflowEvent] = [] async for event in workflow.run_stream("test task"): if event.type == "request_info" and isinstance(event.data, AgentExecutorResponse): request_events.append(event) @@ -969,7 +967,7 @@ def create_beta() -> StubAgent: # Factories should be called during build assert call_count == 2 - outputs: list[WorkflowOutputEvent] = [] + outputs: list[WorkflowEvent] = [] async for event in workflow.run_stream("coordinate task"): if event.type == "output": outputs.append(event) @@ -1034,7 +1032,7 @@ def create_beta() -> StubAgent: .build() ) - outputs: list[WorkflowOutputEvent] = [] + outputs: list[WorkflowEvent] = [] async for event in workflow.run_stream("checkpoint test"): if event.type == "output": outputs.append(event) @@ -1162,7 +1160,7 @@ def agent_factory() -> ChatAgent: # Factory should be called during build assert factory_call_count == 1 - outputs: list[WorkflowOutputEvent] = [] + outputs: list[WorkflowEvent] = [] async for event in workflow.run_stream("coordinate task"): if event.type == "output": outputs.append(event) diff --git a/python/packages/orchestrations/tests/test_handoff.py b/python/packages/orchestrations/tests/test_handoff.py index d1fe70eff6..18f540d516 100644 --- a/python/packages/orchestrations/tests/test_handoff.py +++ b/python/packages/orchestrations/tests/test_handoff.py @@ -11,9 +11,7 @@ ChatResponse, ChatResponseUpdate, Content, - RequestInfoEvent, WorkflowEvent, - WorkflowOutputEvent, resolve_agent_id, use_function_invocation, ) @@ -128,7 +126,7 @@ async def test_handoff(): # escalation won't trigger a handoff, so the response from it will become # a request for user input because autonomous mode is not enabled by default. events = await _drain(workflow.run_stream("Need technical support")) - requests = [ev for ev in events if isinstance(ev, RequestInfoEvent)] + requests = [ev for ev in events if ev.type == "request_info"] assert requests assert len(requests) == 1 @@ -162,10 +160,10 @@ async def test_autonomous_mode_yields_output_without_user_request(): ) events = await _drain(workflow.run_stream("Package arrived broken")) - requests = [ev for ev in events if isinstance(ev, RequestInfoEvent)] + requests = [ev for ev in events if ev.type == "request_info"] assert not requests, "Autonomous mode should not request additional user input" - outputs = [ev for ev in events if isinstance(ev, WorkflowOutputEvent)] + outputs = [ev for ev in events if ev.type == "output"] assert outputs, "Autonomous mode should yield a workflow output" final_conversation = outputs[-1].data @@ -188,7 +186,7 @@ async def test_autonomous_mode_resumes_user_input_on_turn_limit(): ) events = await _drain(workflow.run_stream("Start")) - requests = [ev for ev in events if isinstance(ev, RequestInfoEvent)] + requests = [ev for ev in events if ev.type == "request_info"] assert requests and len(requests) == 1, "Turn limit should force a user input request" assert requests[0].source_executor_id == worker.name @@ -231,13 +229,13 @@ async def async_termination(conv: list[ChatMessage]) -> bool: ) events = await _drain(workflow.run_stream("First user message")) - requests = [ev for ev in events if isinstance(ev, RequestInfoEvent)] + requests = [ev for ev in events if ev.type == "request_info"] assert requests events = await _drain( workflow.send_responses_streaming({requests[-1].request_id: [ChatMessage("user", ["Second user message"])]}) ) - outputs = [ev for ev in events if isinstance(ev, WorkflowOutputEvent)] + outputs = [ev for ev in events if ev.type == "output"] assert len(outputs) == 1 final_conversation = outputs[0].data @@ -481,14 +479,14 @@ def create_specialist() -> MockHandoffAgent: assert call_count == 2 events = await _drain(workflow.run_stream("Need help")) - requests = [ev for ev in events if isinstance(ev, RequestInfoEvent)] + requests = [ev for ev in events if ev.type == "request_info"] assert requests # Follow-up message events = await _drain( workflow.send_responses_streaming({requests[-1].request_id: [ChatMessage("user", ["More details"])]}) ) - outputs = [ev for ev in events if isinstance(ev, WorkflowOutputEvent)] + outputs = [ev for ev in events if ev.type == "output"] assert outputs @@ -552,7 +550,7 @@ def create_specialist_b() -> MockHandoffAgent: # Start conversation - triage hands off to specialist_a events = await _drain(workflow.run_stream("Initial request")) - requests = [ev for ev in events if isinstance(ev, RequestInfoEvent)] + requests = [ev for ev in events if ev.type == "request_info"] assert requests # Verify specialist_a executor exists and was called @@ -562,7 +560,7 @@ def create_specialist_b() -> MockHandoffAgent: events = await _drain( workflow.send_responses_streaming({requests[-1].request_id: [ChatMessage("user", ["Need escalation"])]}) ) - requests = [ev for ev in events if isinstance(ev, RequestInfoEvent)] + requests = [ev for ev in events if ev.type == "request_info"] assert requests # Verify specialist_b executor exists @@ -591,13 +589,13 @@ def create_specialist() -> MockHandoffAgent: # Run workflow and capture output events = await _drain(workflow.run_stream("checkpoint test")) - requests = [ev for ev in events if isinstance(ev, RequestInfoEvent)] + requests = [ev for ev in events if ev.type == "request_info"] assert requests events = await _drain( workflow.send_responses_streaming({requests[-1].request_id: [ChatMessage("user", ["follow up"])]}) ) - outputs = [ev for ev in events if isinstance(ev, WorkflowOutputEvent)] + outputs = [ev for ev in events if ev.type == "output"] assert outputs, "Should have workflow output after termination condition is met" # List checkpoints - just verify they were created @@ -669,7 +667,7 @@ def create_specialist() -> MockHandoffAgent: ) events = await _drain(workflow.run_stream("Issue")) - requests = [ev for ev in events if isinstance(ev, RequestInfoEvent)] + requests = [ev for ev in events if ev.type == "request_info"] assert requests and len(requests) == 1 assert requests[0].source_executor_id == "specialist" diff --git a/python/packages/orchestrations/tests/test_magentic.py b/python/packages/orchestrations/tests/test_magentic.py index a2af8b4e6c..542fd05bc8 100644 --- a/python/packages/orchestrations/tests/test_magentic.py +++ b/python/packages/orchestrations/tests/test_magentic.py @@ -245,9 +245,9 @@ async def test_magentic_workflow_plan_review_approval_to_completion(): manager = FakeManager() wf = MagenticBuilder().participants([DummyExec("agentA")]).with_manager(manager=manager).with_plan_review().build() - req_event: RequestInfoEvent | None = None + req_event: WorkflowEvent | None = None async for ev in wf.run_stream("do work"): - if isinstance(ev, RequestInfoEvent) and ev.request_type is MagenticPlanReviewRequest: + if ev.type == "request_info" and ev.request_type is MagenticPlanReviewRequest: req_event = ev assert req_event is not None assert isinstance(req_event.data, MagenticPlanReviewRequest) @@ -255,9 +255,9 @@ async def test_magentic_workflow_plan_review_approval_to_completion(): completed = False output: list[ChatMessage] | None = None async for ev in wf.send_responses_streaming(responses={req_event.request_id: req_event.data.approve()}): - if isinstance(ev, WorkflowStatusEvent) and ev.state == WorkflowRunState.IDLE: + if ev.type == "status" and ev.state == WorkflowRunState.IDLE: completed = True - elif isinstance(ev, WorkflowOutputEvent): + elif ev.type == "output": output = ev.data # type: ignore[assignment] if completed and output is not None: break @@ -290,9 +290,9 @@ async def replan(self, magentic_context: MagenticContext) -> ChatMessage: # typ ) # Wait for the initial plan review request - req_event: RequestInfoEvent | None = None + req_event: WorkflowEvent | None = None async for ev in wf.run_stream("do work"): - if isinstance(ev, RequestInfoEvent) and ev.request_type is MagenticPlanReviewRequest: + if ev.type == "request_info" and ev.request_type is MagenticPlanReviewRequest: req_event = ev assert req_event is not None assert isinstance(req_event.data, MagenticPlanReviewRequest) @@ -303,7 +303,7 @@ async def replan(self, magentic_context: MagenticContext) -> ChatMessage: # typ async for ev in wf.send_responses_streaming( responses={req_event.request_id: req_event.data.revise("Looks good; consider Z")} ): - if isinstance(ev, RequestInfoEvent) and ev.request_type is MagenticPlanReviewRequest: + if ev.type == "request_info" and ev.request_type is MagenticPlanReviewRequest: saw_second_review = True req_event = ev @@ -311,7 +311,7 @@ async def replan(self, magentic_context: MagenticContext) -> ChatMessage: # typ async for ev in wf.send_responses_streaming( responses={req_event.request_id: req_event.data.approve()} # type: ignore[union-attr] ): - if isinstance(ev, WorkflowStatusEvent) and ev.state == WorkflowRunState.IDLE: + if ev.type == "status" and ev.state == WorkflowRunState.IDLE: completed = True break @@ -342,7 +342,7 @@ async def test_magentic_orchestrator_round_limit_produces_partial_result(): None, ) assert idle_status is not None - # Check that we got workflow output via WorkflowOutputEvent + # Check that we got workflow output via WorkflowEvent with type "output" output_event = next((e for e in events if e.type == "output"), None) assert output_event is not None data = output_event.data @@ -366,9 +366,9 @@ async def test_magentic_checkpoint_resume_round_trip(): ) task_text = "checkpoint task" - req_event: RequestInfoEvent | None = None + req_event: WorkflowEvent | None = None async for ev in wf.run_stream(task_text): - if isinstance(ev, RequestInfoEvent) and ev.request_type is MagenticPlanReviewRequest: + if ev.type == "request_info" and ev.request_type is MagenticPlanReviewRequest: req_event = ev assert req_event is not None assert isinstance(req_event.data, MagenticPlanReviewRequest) @@ -388,7 +388,7 @@ async def test_magentic_checkpoint_resume_round_trip(): .build() ) - completed: WorkflowOutputEvent | None = None + completed: WorkflowEvent | None = None req_event = None async for event in wf_resume.run_stream( resume_checkpoint.checkpoint_id, @@ -583,9 +583,8 @@ async def _collect_agent_responses_setup(participant: AgentProtocol) -> list[Cha events: list[WorkflowEvent] = [] async for ev in wf.run_stream("task"): # plan review disabled events.append(ev) - if ev.type == "output": - break - if ev.type == "data" and isinstance(ev.data, AgentResponseUpdate): + # Capture streaming updates (type="output" with AgentResponseUpdate data) + if ev.type == "output" and isinstance(ev.data, AgentResponseUpdate): captured.append( ChatMessage( role=ev.data.role or "assistant", @@ -593,6 +592,9 @@ async def _collect_agent_responses_setup(participant: AgentProtocol) -> list[Cha author_name=ev.data.author_name, ) ) + # Break on final AgentResponse output + elif ev.type == "output" and isinstance(ev.data, AgentResponse): + break return captured @@ -644,7 +646,7 @@ async def test_magentic_checkpoint_resume_inner_loop_superstep(): .build() ) - completed: WorkflowOutputEvent | None = None + completed: WorkflowEvent | None = None async for event in resumed.run_stream(checkpoint_id=inner_loop_checkpoint.checkpoint_id): # type: ignore[reportUnknownMemberType] if event.type == "output": completed = event @@ -684,7 +686,7 @@ async def test_magentic_checkpoint_resume_from_saved_state(): .build() ) - completed: WorkflowOutputEvent | None = None + completed: WorkflowEvent | None = None async for event in resumed_workflow.run_stream(checkpoint_id=resumed_state.checkpoint_id): if event.type == "output": completed = event @@ -706,7 +708,7 @@ async def test_magentic_checkpoint_resume_rejects_participant_renames(): .build() ) - req_event: RequestInfoEvent | None = None + req_event: WorkflowEvent | None = None async for event in workflow.run_stream("task"): if event.type == "request_info" and event.request_type is MagenticPlanReviewRequest: req_event = event @@ -789,9 +791,9 @@ async def test_magentic_checkpoint_runtime_only() -> None: baseline_output: ChatMessage | None = None async for ev in wf.run_stream("runtime checkpoint test", checkpoint_storage=storage): - if isinstance(ev, WorkflowOutputEvent): + if ev.type == "output": baseline_output = ev.data # type: ignore[assignment] - if isinstance(ev, WorkflowStatusEvent) and ev.state in ( + if ev.type == "status" and ev.state in ( WorkflowRunState.IDLE, WorkflowRunState.IDLE_WITH_PENDING_REQUESTS, ): @@ -827,9 +829,9 @@ async def test_magentic_checkpoint_runtime_overrides_buildtime() -> None: baseline_output: ChatMessage | None = None async for ev in wf.run_stream("override test", checkpoint_storage=runtime_storage): - if isinstance(ev, WorkflowOutputEvent): + if ev.type == "output": baseline_output = ev.data # type: ignore[assignment] - if isinstance(ev, WorkflowStatusEvent) and ev.state in ( + if ev.type == "status" and ev.state in ( WorkflowRunState.IDLE, WorkflowRunState.IDLE_WITH_PENDING_REQUESTS, ): @@ -994,7 +996,7 @@ def create_agent() -> StubAgent: # Factory should be called during build assert call_count == 1 - outputs: list[WorkflowOutputEvent] = [] + outputs: list[WorkflowEvent] = [] async for event in workflow.run_stream("test task"): if event.type == "output": outputs.append(event) @@ -1041,7 +1043,7 @@ def create_agent() -> StubAgent: .build() ) - outputs: list[WorkflowOutputEvent] = [] + outputs: list[WorkflowEvent] = [] async for event in workflow.run_stream("checkpoint test"): if event.type == "output": outputs.append(event) @@ -1098,7 +1100,7 @@ def manager_factory() -> MagenticManagerBase: # Factory should be called during build assert factory_call_count == 1 - outputs: list[WorkflowOutputEvent] = [] + outputs: list[WorkflowEvent] = [] async for event in workflow.run_stream("test task"): if event.type == "output": outputs.append(event) diff --git a/python/packages/orchestrations/tests/test_sequential.py b/python/packages/orchestrations/tests/test_sequential.py index b6441ff592..7ee37b980d 100644 --- a/python/packages/orchestrations/tests/test_sequential.py +++ b/python/packages/orchestrations/tests/test_sequential.py @@ -15,9 +15,8 @@ Executor, TypeCompatibilityError, WorkflowContext, - WorkflowOutputEvent, + WorkflowEvent, WorkflowRunState, - WorkflowStatusEvent, handler, ) from agent_framework._workflows._checkpoint import InMemoryCheckpointStorage @@ -105,9 +104,9 @@ async def test_sequential_agents_append_to_context() -> None: completed = False output: list[ChatMessage] | None = None async for ev in wf.run_stream("hello sequential"): - if isinstance(ev, WorkflowStatusEvent) and ev.state == WorkflowRunState.IDLE: + if ev.type == "status" and ev.state == WorkflowRunState.IDLE: completed = True - elif isinstance(ev, WorkflowOutputEvent): + elif ev.type == "output": output = ev.data # type: ignore[assignment] if completed and output is not None: break @@ -138,9 +137,9 @@ def create_agent2() -> _EchoAgent: completed = False output: list[ChatMessage] | None = None async for ev in wf.run_stream("hello factories"): - if isinstance(ev, WorkflowStatusEvent) and ev.state == WorkflowRunState.IDLE: + if ev.type == "status" and ev.state == WorkflowRunState.IDLE: completed = True - elif isinstance(ev, WorkflowOutputEvent): + elif ev.type == "output": output = ev.data if completed and output is not None: break @@ -164,9 +163,9 @@ async def test_sequential_with_custom_executor_summary() -> None: completed = False output: list[ChatMessage] | None = None async for ev in wf.run_stream("topic X"): - if isinstance(ev, WorkflowStatusEvent) and ev.state == WorkflowRunState.IDLE: + if ev.type == "status" and ev.state == WorkflowRunState.IDLE: completed = True - elif isinstance(ev, WorkflowOutputEvent): + elif ev.type == "output": output = ev.data if completed and output is not None: break @@ -195,9 +194,9 @@ def create_summarizer() -> _SummarizerExec: completed = False output: list[ChatMessage] | None = None async for ev in wf.run_stream("topic Y"): - if isinstance(ev, WorkflowStatusEvent) and ev.state == WorkflowRunState.IDLE: + if ev.type == "status" and ev.state == WorkflowRunState.IDLE: completed = True - elif isinstance(ev, WorkflowOutputEvent): + elif ev.type == "output": output = ev.data if completed and output is not None: break @@ -220,9 +219,9 @@ async def test_sequential_checkpoint_resume_round_trip() -> None: baseline_output: list[ChatMessage] | None = None async for ev in wf.run_stream("checkpoint sequential"): - if isinstance(ev, WorkflowOutputEvent): + if ev.type == "output": baseline_output = ev.data # type: ignore[assignment] - if isinstance(ev, WorkflowStatusEvent) and ev.state == WorkflowRunState.IDLE: + if ev.type == "status" and ev.state == WorkflowRunState.IDLE: break assert baseline_output is not None @@ -241,9 +240,9 @@ async def test_sequential_checkpoint_resume_round_trip() -> None: resumed_output: list[ChatMessage] | None = None async for ev in wf_resume.run_stream(checkpoint_id=resume_checkpoint.checkpoint_id): - if isinstance(ev, WorkflowOutputEvent): + if ev.type == "output": resumed_output = ev.data # type: ignore[assignment] - if isinstance(ev, WorkflowStatusEvent) and ev.state in ( + if ev.type == "status" and ev.state in ( WorkflowRunState.IDLE, WorkflowRunState.IDLE_WITH_PENDING_REQUESTS, ): @@ -263,9 +262,9 @@ async def test_sequential_checkpoint_runtime_only() -> None: baseline_output: list[ChatMessage] | None = None async for ev in wf.run_stream("runtime checkpoint test", checkpoint_storage=storage): - if isinstance(ev, WorkflowOutputEvent): + if ev.type == "output": baseline_output = ev.data # type: ignore[assignment] - if isinstance(ev, WorkflowStatusEvent) and ev.state == WorkflowRunState.IDLE: + if ev.type == "status" and ev.state == WorkflowRunState.IDLE: break assert baseline_output is not None @@ -284,9 +283,9 @@ async def test_sequential_checkpoint_runtime_only() -> None: resumed_output: list[ChatMessage] | None = None async for ev in wf_resume.run_stream(checkpoint_id=resume_checkpoint.checkpoint_id, checkpoint_storage=storage): - if isinstance(ev, WorkflowOutputEvent): + if ev.type == "output": resumed_output = ev.data # type: ignore[assignment] - if isinstance(ev, WorkflowStatusEvent) and ev.state in ( + if ev.type == "status" and ev.state in ( WorkflowRunState.IDLE, WorkflowRunState.IDLE_WITH_PENDING_REQUESTS, ): @@ -312,9 +311,9 @@ async def test_sequential_checkpoint_runtime_overrides_buildtime() -> None: baseline_output: list[ChatMessage] | None = None async for ev in wf.run_stream("override test", checkpoint_storage=runtime_storage): - if isinstance(ev, WorkflowOutputEvent): + if ev.type == "output": baseline_output = ev.data # type: ignore[assignment] - if isinstance(ev, WorkflowStatusEvent) and ev.state == WorkflowRunState.IDLE: + if ev.type == "status" and ev.state == WorkflowRunState.IDLE: break assert baseline_output is not None @@ -340,9 +339,9 @@ def create_agent2() -> _EchoAgent: baseline_output: list[ChatMessage] | None = None async for ev in wf.run_stream("checkpoint with factories"): - if isinstance(ev, WorkflowOutputEvent): + if ev.type == "output": baseline_output = ev.data - if isinstance(ev, WorkflowStatusEvent) and ev.state == WorkflowRunState.IDLE: + if ev.type == "status" and ev.state == WorkflowRunState.IDLE: break assert baseline_output is not None @@ -362,9 +361,9 @@ def create_agent2() -> _EchoAgent: resumed_output: list[ChatMessage] | None = None async for ev in wf_resume.run_stream(checkpoint_id=resume_checkpoint.checkpoint_id): - if isinstance(ev, WorkflowOutputEvent): + if ev.type == "output": resumed_output = ev.data - if isinstance(ev, WorkflowStatusEvent) and ev.state in ( + if ev.type == "status" and ev.state in ( WorkflowRunState.IDLE, WorkflowRunState.IDLE_WITH_PENDING_REQUESTS, ): @@ -398,9 +397,9 @@ def create_agent() -> _EchoAgent: completed = False output: list[ChatMessage] | None = None async for ev in wf.run_stream("test factories timing"): - if isinstance(ev, WorkflowStatusEvent) and ev.state == WorkflowRunState.IDLE: + if ev.type == "status" and ev.state == WorkflowRunState.IDLE: completed = True - elif isinstance(ev, WorkflowOutputEvent): + elif ev.type == "output": output = ev.data # type: ignore[assignment] if completed and output is not None: break diff --git a/python/samples/getting_started/orchestrations/README.md b/python/samples/getting_started/orchestrations/README.md index d1fb0e0ef0..14f0be5fad 100644 --- a/python/samples/getting_started/orchestrations/README.md +++ b/python/samples/getting_started/orchestrations/README.md @@ -57,9 +57,9 @@ from agent_framework.orchestrations import ( **Sequential orchestration note**: Sequential orchestration uses a few small adapter nodes for plumbing: - `input-conversation` normalizes input to `list[ChatMessage]` - `to-conversation:` converts agent responses into the shared conversation -- `complete` publishes the final `WorkflowOutputEvent` +- `complete` publishes the final output event (type='output') -These may appear in event streams (ExecutorInvoke/Completed). They're analogous to concurrent's dispatcher and aggregator and can be ignored if you only care about agent activity. +These may appear in event streams (executor_invoked/executor_completed). They're analogous to concurrent's dispatcher and aggregator and can be ignored if you only care about agent activity. ## Environment Variables diff --git a/python/samples/getting_started/orchestrations/group_chat_agent_manager.py b/python/samples/getting_started/orchestrations/group_chat_agent_manager.py index d6d9bc5606..aa3380fa84 100644 --- a/python/samples/getting_started/orchestrations/group_chat_agent_manager.py +++ b/python/samples/getting_started/orchestrations/group_chat_agent_manager.py @@ -73,7 +73,7 @@ async def main() -> None: # The agent orchestrator will intelligently decide when to end before this limit but just in case .with_termination_condition(lambda messages: sum(1 for msg in messages if msg.role == "assistant") >= 4) # Enable intermediate outputs to observe the conversation as it unfolds - # Intermediate outputs will be emitted as WorkflowOutputEvent events + # Intermediate outputs will be emitted as WorkflowEvent with type "output" events .with_intermediate_outputs() .build() ) diff --git a/python/samples/getting_started/orchestrations/group_chat_philosophical_debate.py b/python/samples/getting_started/orchestrations/group_chat_philosophical_debate.py index 6ceeb613f6..55cb63434c 100644 --- a/python/samples/getting_started/orchestrations/group_chat_philosophical_debate.py +++ b/python/samples/getting_started/orchestrations/group_chat_philosophical_debate.py @@ -213,7 +213,7 @@ async def main() -> None: .participants([farmer, developer, teacher, activist, spiritual_leader, artist, immigrant, doctor]) .with_termination_condition(lambda messages: sum(1 for msg in messages if msg.role == "assistant") >= 10) # Enable intermediate outputs to observe the conversation as it unfolds - # Intermediate outputs will be emitted as WorkflowOutputEvent events + # Intermediate outputs will be emitted as WorkflowEvent with type "output" events .with_intermediate_outputs() .build() ) diff --git a/python/samples/getting_started/orchestrations/group_chat_simple_selector.py b/python/samples/getting_started/orchestrations/group_chat_simple_selector.py index 93f2e7ef4f..de3907d9c4 100644 --- a/python/samples/getting_started/orchestrations/group_chat_simple_selector.py +++ b/python/samples/getting_started/orchestrations/group_chat_simple_selector.py @@ -91,7 +91,7 @@ async def main() -> None: # have nothing to add, but for demo purposes we want to see at least one full round of interaction. .with_termination_condition(lambda conversation: len(conversation) >= 6) # Enable intermediate outputs to observe the conversation as it unfolds - # Intermediate outputs will be emitted as WorkflowOutputEvent events + # Intermediate outputs will be emitted as WorkflowEvent with type "output" events .with_intermediate_outputs() .build() ) diff --git a/python/samples/getting_started/orchestrations/magentic_human_plan_review.py b/python/samples/getting_started/orchestrations/magentic_human_plan_review.py index 2413a4c47e..e13706e8ce 100644 --- a/python/samples/getting_started/orchestrations/magentic_human_plan_review.py +++ b/python/samples/getting_started/orchestrations/magentic_human_plan_review.py @@ -9,9 +9,7 @@ AgentResponseUpdate, ChatAgent, ChatMessage, - RequestInfoEvent, WorkflowEvent, - WorkflowOutputEvent, ) from agent_framework.openai import OpenAIChatClient from agent_framework.orchestrations import MagenticBuilder, MagenticPlanReviewRequest, MagenticPlanReviewResponse @@ -46,10 +44,10 @@ async def process_event_stream(stream: AsyncIterable[WorkflowEvent]) -> dict[str requests: dict[str, MagenticPlanReviewRequest] = {} async for event in stream: - if isinstance(event, RequestInfoEvent) and event.request_type is MagenticPlanReviewRequest: + if event.type == "request_info" and event.request_type is MagenticPlanReviewRequest: requests[event.request_id] = cast(MagenticPlanReviewRequest, event.data) - if isinstance(event, WorkflowOutputEvent): + if event.type == "output": data = event.data if isinstance(data, AgentResponseUpdate): rid = data.response_id @@ -129,7 +127,7 @@ async def main() -> None: # Request human input for plan review .with_plan_review() # Enable intermediate outputs to observe the conversation as it unfolds - # Intermediate outputs will be emitted as WorkflowOutputEvent events + # Intermediate outputs will be emitted as WorkflowEvent with type "output" .with_intermediate_outputs() .build() ) diff --git a/python/samples/getting_started/workflows/README.md b/python/samples/getting_started/workflows/README.md index 3e4b6f0a72..1d16f8f24b 100644 --- a/python/samples/getting_started/workflows/README.md +++ b/python/samples/getting_started/workflows/README.md @@ -102,7 +102,7 @@ Tool approval samples demonstrate using `@tool(approval_mode="always_require")` | Sample | File | Concepts | | ------------------------ | -------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------- | -| Executor I/O Observation | [observability/executor_io_observation.py](./observability/executor_io_observation.py) | Observe executor input/output data via ExecutorInvokedEvent and ExecutorCompletedEvent without modifying executor code | +| Executor I/O Observation | [observability/executor_io_observation.py](./observability/executor_io_observation.py) | Observe executor input/output data via executor_invoked events (type='executor_invoked') and executor_completed events (type='executor_completed') without modifying executor code | For additional observability samples in Agent Framework, see the [observability getting started samples](../observability/README.md). The [sample](../observability/workflow_observability.py) demonstrates integrating observability into workflows. @@ -162,8 +162,8 @@ Sequential orchestration uses a few small adapter nodes for plumbing: - "input-conversation" normalizes input to `list[ChatMessage]` - "to-conversation:" converts agent responses into the shared conversation -- "complete" publishes the final `WorkflowOutputEvent` - These may appear in event streams (ExecutorInvoke/Completed). They’re analogous to +- "complete" publishes the final output event (type='output') + These may appear in event streams (executor_invoked/executor_completed). They're analogous to concurrent’s dispatcher and aggregator and can be ignored if you only care about agent activity. ### Environment Variables diff --git a/python/samples/getting_started/workflows/agents/azure_chat_agents_tool_calls_with_feedback.py b/python/samples/getting_started/workflows/agents/azure_chat_agents_tool_calls_with_feedback.py index 61b23b5712..51926f14b6 100644 --- a/python/samples/getting_started/workflows/agents/azure_chat_agents_tool_calls_with_feedback.py +++ b/python/samples/getting_started/workflows/agents/azure_chat_agents_tool_calls_with_feedback.py @@ -36,7 +36,7 @@ -> Coordinator -> final_editor_agent -> Coordinator -> output The writer agent calls tools to gather product facts before drafting copy. A custom executor -packages the draft and emits a RequestInfoEvent so a human can comment, then replays the human +packages the draft and emits a request_info event (type='request_info') so a human can comment, then replays the human guidance back into the conversation before the final editor agent produces the polished output. Demonstrates: diff --git a/python/samples/getting_started/workflows/agents/concurrent_workflow_as_agent.py b/python/samples/getting_started/workflows/agents/concurrent_workflow_as_agent.py index 75e7e07573..4afd08c4dc 100644 --- a/python/samples/getting_started/workflows/agents/concurrent_workflow_as_agent.py +++ b/python/samples/getting_started/workflows/agents/concurrent_workflow_as_agent.py @@ -20,7 +20,7 @@ Prerequisites: - Azure OpenAI access configured for AzureOpenAIChatClient (use az login + env vars) -- Familiarity with Workflow events (WorkflowOutputEvent) +- Familiarity with Workflow events (WorkflowEvent with type "output") """ diff --git a/python/samples/getting_started/workflows/agents/group_chat_workflow_as_agent.py b/python/samples/getting_started/workflows/agents/group_chat_workflow_as_agent.py index fa227826d0..6b6737c6c6 100644 --- a/python/samples/getting_started/workflows/agents/group_chat_workflow_as_agent.py +++ b/python/samples/getting_started/workflows/agents/group_chat_workflow_as_agent.py @@ -42,7 +42,7 @@ async def main() -> None: ) .participants([researcher, writer]) # Enable intermediate outputs to observe the conversation as it unfolds - # Intermediate outputs will be emitted as WorkflowOutputEvent events + # Intermediate outputs will be emitted as WorkflowEvent with type "output" events .with_intermediate_outputs() .build() ) diff --git a/python/samples/getting_started/workflows/agents/magentic_workflow_as_agent.py b/python/samples/getting_started/workflows/agents/magentic_workflow_as_agent.py index 4e5b700e66..4d78fca6dc 100644 --- a/python/samples/getting_started/workflows/agents/magentic_workflow_as_agent.py +++ b/python/samples/getting_started/workflows/agents/magentic_workflow_as_agent.py @@ -62,7 +62,7 @@ async def main() -> None: max_reset_count=2, ) # Enable intermediate outputs to observe the conversation as it unfolds - # Intermediate outputs will be emitted as WorkflowOutputEvent events + # Intermediate outputs will be emitted as WorkflowEvent with type "output" events .with_intermediate_outputs() .build() ) diff --git a/python/samples/getting_started/workflows/checkpoint/checkpoint_with_resume.py b/python/samples/getting_started/workflows/checkpoint/checkpoint_with_resume.py index 1f8a3d28fb..166892ad57 100644 --- a/python/samples/getting_started/workflows/checkpoint/checkpoint_with_resume.py +++ b/python/samples/getting_started/workflows/checkpoint/checkpoint_with_resume.py @@ -24,22 +24,25 @@ """ import asyncio +import sys from dataclasses import dataclass from random import random -from typing import Any, override +from typing import Any from agent_framework import ( - WorkflowEvent, Executor, InMemoryCheckpointStorage, - SuperStepCompletedEvent, WorkflowBuilder, WorkflowCheckpoint, WorkflowContext, - handler, ) +if sys.version_info >= (3, 12): + from typing import override # type: ignore # pragma: no cover +else: + from typing_extensions import override # type: ignore[import] # pragma: no cover + @dataclass class ComputeTask: @@ -130,9 +133,9 @@ async def main(): if event.type == "output": output = event.data break - if isinstance(event, SuperStepCompletedEvent) and random() < 0.5: + if event.type == "superstep_completed" and random() < 0.5: # Randomly simulate system interruptions - # The `SuperStepCompletedEvent` ensures we only interrupt after + # The type="superstep_completed" event ensures we only interrupt after # the current super-step is fully complete and checkpointed. # If we interrupt mid-step, the workflow may resume from an earlier point. print("\n** Simulating workflow interruption. Stopping execution. **") diff --git a/python/samples/getting_started/workflows/composition/sub_workflow_parallel_requests.py b/python/samples/getting_started/workflows/composition/sub_workflow_parallel_requests.py index 0959f591f0..58ee575684 100644 --- a/python/samples/getting_started/workflows/composition/sub_workflow_parallel_requests.py +++ b/python/samples/getting_started/workflows/composition/sub_workflow_parallel_requests.py @@ -3,16 +3,16 @@ import asyncio import uuid from dataclasses import dataclass -from typing import Literal +from typing import Any, Literal from agent_framework import ( Executor, - RequestInfoEvent, SubWorkflowRequestMessage, SubWorkflowResponseMessage, Workflow, WorkflowBuilder, WorkflowContext, + WorkflowEvent, WorkflowExecutor, handler, response_handler, @@ -192,7 +192,7 @@ def __init__(self, id: str) -> None: super().__init__(id) self._cache: dict[str, int] = {"cpu": 10, "memory": 50, "disk": 100} # Record pending requests to match responses - self._pending_requests: dict[str, RequestInfoEvent] = {} + self._pending_requests: dict[str, WorkflowEvent[Any]] = {} async def _handle_resource_request(self, request: ResourceRequest) -> ResourceResponse | None: """Allocates resources based on request and available cache.""" @@ -207,7 +207,7 @@ async def handle_subworkflow_request( self, request: SubWorkflowRequestMessage, ctx: WorkflowContext[SubWorkflowResponseMessage] ) -> None: """Handles requests from sub-workflows.""" - source_event: RequestInfoEvent = request.source_event + source_event: WorkflowEvent[Any] = request.source_event if not isinstance(source_event.data, ResourceRequest): return @@ -246,14 +246,14 @@ def __init__(self, id: str) -> None: "disk": 1000, # Liberal disk policy } # Record pending requests to match responses - self._pending_requests: dict[str, RequestInfoEvent] = {} + self._pending_requests: dict[str, WorkflowEvent[Any]] = {} @handler async def handle_subworkflow_request( self, request: SubWorkflowRequestMessage, ctx: WorkflowContext[SubWorkflowResponseMessage] ) -> None: """Handles requests from sub-workflows.""" - source_event: RequestInfoEvent = request.source_event + source_event: WorkflowEvent[Any] = request.source_event if not isinstance(source_event.data, PolicyRequest): return diff --git a/python/samples/getting_started/workflows/control-flow/sequential_streaming.py b/python/samples/getting_started/workflows/control-flow/sequential_streaming.py index cf2943c32a..0d1f1de89f 100644 --- a/python/samples/getting_started/workflows/control-flow/sequential_streaming.py +++ b/python/samples/getting_started/workflows/control-flow/sequential_streaming.py @@ -14,7 +14,7 @@ Purpose: Show how to declare executors with the @executor decorator, connect them with WorkflowBuilder, pass intermediate values using ctx.send_message, and yield final output using ctx.yield_output(). -Demonstrate how streaming exposes ExecutorInvokedEvent and ExecutorCompletedEvent for observability. +Demonstrate how streaming exposes executor_invoked events (type='executor_invoked') and executor_completed events (type='executor_completed') for observability. Prerequisites: - No external services required. @@ -73,11 +73,11 @@ async def main(): """ Sample Output: - Event: ExecutorInvokedEvent(executor_id=upper_case_executor) - Event: ExecutorCompletedEvent(executor_id=upper_case_executor) - Event: ExecutorInvokedEvent(executor_id=reverse_text_executor) - Event: ExecutorCompletedEvent(executor_id=reverse_text_executor) - Event: WorkflowOutputEvent(data='DLROW OLLEH', executor_id=reverse_text_executor) + Event: executor_invoked event (type='executor_invoked', executor_id=upper_case_executor) + Event: executor_completed event (type='executor_completed', executor_id=upper_case_executor) + Event: executor_invoked event (type='executor_invoked', executor_id=reverse_text_executor) + Event: executor_completed event (type='executor_completed', executor_id=reverse_text_executor) + Event: output event (type='output', data='DLROW OLLEH', executor_id=reverse_text_executor) Workflow completed with result: DLROW OLLEH """ diff --git a/python/samples/getting_started/workflows/control-flow/simple_loop.py b/python/samples/getting_started/workflows/control-flow/simple_loop.py index 348a014f9f..ce9f0b2f28 100644 --- a/python/samples/getting_started/workflows/control-flow/simple_loop.py +++ b/python/samples/getting_started/workflows/control-flow/simple_loop.py @@ -9,9 +9,9 @@ ChatAgent, ChatMessage, Executor, - ExecutorCompletedEvent, WorkflowBuilder, WorkflowContext, + WorkflowEvent, handler, ) from agent_framework.azure import AzureOpenAIChatClient @@ -143,7 +143,7 @@ async def main(): # Step 2: Run the workflow and print the events. iterations = 0 async for event in workflow.run_stream(NumberSignal.INIT): - if isinstance(event, ExecutorCompletedEvent) and event.executor_id == "guess_number": + if event.type == "executor_completed" and event.executor_id == "guess_number": iterations += 1 print(f"Event: {event}") diff --git a/python/samples/getting_started/workflows/human-in-the-loop/agents_with_HITL.py b/python/samples/getting_started/workflows/human-in-the-loop/agents_with_HITL.py index d2db9ac1c7..fafabfb540 100644 --- a/python/samples/getting_started/workflows/human-in-the-loop/agents_with_HITL.py +++ b/python/samples/getting_started/workflows/human-in-the-loop/agents_with_HITL.py @@ -11,12 +11,9 @@ AgentResponseUpdate, ChatMessage, Executor, - RequestInfoEvent, - Role, WorkflowBuilder, WorkflowContext, WorkflowEvent, - WorkflowOutputEvent, handler, response_handler, ) @@ -30,13 +27,13 @@ Pipeline layout: writer_agent -> Coordinator -> writer_agent -> Coordinator -> final_editor_agent -> Coordinator -> output -The writer agent drafts marketing copy. A custom executor emits a RequestInfoEvent so a human can comment, -then relays the human guidance back into the conversation before the final editor agent produces the polished -output. +The writer agent drafts marketing copy. A custom executor emits a request_info event (type='request_info') so a +human can comment, then relays the human guidance back into the conversation before the final editor agent +produces the polished output. Demonstrates: - Capturing agent responses in a custom executor. -- Emitting RequestInfoEvent to request human input. +- Emitting request_info events (type='request_info') to request human input. - Handling human feedback and routing it to the appropriate agents. Prerequisites: @@ -103,8 +100,7 @@ async def on_human_feedback( # Human approved the draft as-is; forward it unchanged. await ctx.send_message( AgentExecutorRequest( - messages=original_request.conversation - + [ChatMessage(Role.USER, text="The draft is approved as-is.")], + messages=original_request.conversation + [ChatMessage("user", text="The draft is approved as-is.")], should_respond=True, ), target_id=self.final_editor_name, @@ -119,7 +115,7 @@ async def on_human_feedback( "Rewrite the draft from the previous assistant message into a polished final version. " "Keep the response under 120 words and reflect any requested tone adjustments." ) - conversation.append(ChatMessage(Role.USER, text=instruction)) + conversation.append(ChatMessage("user", text=instruction)) await ctx.send_message( AgentExecutorRequest(messages=conversation, should_respond=True), target_id=self.writer_name ) @@ -132,9 +128,13 @@ async def process_event_stream(stream: AsyncIterable[WorkflowEvent]) -> dict[str requests: list[tuple[str, DraftFeedbackRequest]] = [] async for event in stream: - if isinstance(event, RequestInfoEvent) and isinstance(event.data, DraftFeedbackRequest): + if ( + event.type == "request_info" + and isinstance(event.data, DraftFeedbackRequest) + and event.request_id is not None + ): requests.append((event.request_id, event.data)) - elif isinstance(event, WorkflowOutputEvent) and isinstance(event.data, AgentResponseUpdate): + elif event.type == "output" and isinstance(event.data, AgentResponseUpdate): # This workflow should only produce AgentResponseUpdate as outputs. # Streaming updates from an agent will be consecutive, because no two agents run simultaneously # in this workflow. So we can use last_author to format output nicely. diff --git a/python/samples/getting_started/workflows/human-in-the-loop/agents_with_approval_requests.py b/python/samples/getting_started/workflows/human-in-the-loop/agents_with_approval_requests.py index b82f41b545..fff5185a76 100644 --- a/python/samples/getting_started/workflows/human-in-the-loop/agents_with_approval_requests.py +++ b/python/samples/getting_started/workflows/human-in-the-loop/agents_with_approval_requests.py @@ -3,7 +3,7 @@ import asyncio import json from dataclasses import dataclass -from typing import Annotated, Never +from typing import Annotated from agent_framework import ( AgentExecutorResponse, @@ -16,6 +16,7 @@ tool, ) from agent_framework.openai import OpenAIChatClient +from typing_extensions import Never """ Sample: Agents in a workflow with AI functions requiring approval @@ -46,7 +47,7 @@ Prerequisites: - Azure AI Agent Service configured, along with the required environment variables. - Authentication via azure-identity. Use AzureCliCredential and run az login before executing the sample. -- Basic familiarity with WorkflowBuilder, edges, events, RequestInfoEvent, and streaming runs. +- Basic familiarity with WorkflowBuilder, edges, events, request_info events (type='request_info'), and streaming runs. """ diff --git a/python/samples/getting_started/workflows/human-in-the-loop/concurrent_request_info.py b/python/samples/getting_started/workflows/human-in-the-loop/concurrent_request_info.py index d16663290e..126bea78b3 100644 --- a/python/samples/getting_started/workflows/human-in-the-loop/concurrent_request_info.py +++ b/python/samples/getting_started/workflows/human-in-the-loop/concurrent_request_info.py @@ -26,13 +26,13 @@ from typing import Any from agent_framework import ( + AgentExecutorResponse, AgentRequestInfoResponse, ChatMessage, - ConcurrentBuilder, WorkflowEvent, ) -from agent_framework._workflows._agent_executor import AgentExecutorResponse from agent_framework.azure import AzureOpenAIChatClient +from agent_framework.orchestrations import ConcurrentBuilder from azure.identity import AzureCliCredential # Store chat client at module level for aggregator access @@ -96,8 +96,11 @@ async def process_event_stream(stream: AsyncIterable[WorkflowEvent]) -> dict[str requests: dict[str, AgentExecutorResponse] = {} async for event in stream: - if event.type == "request_info" and isinstance(event.data, AgentExecutorResponse): - # Display agent output for review and potential modification + if ( + event.type == "request_info" + and isinstance(event.data, AgentExecutorResponse) + and event.request_id is not None + ): requests[event.request_id] = event.data if event.type == "output": diff --git a/python/samples/getting_started/workflows/human-in-the-loop/group_chat_request_info.py b/python/samples/getting_started/workflows/human-in-the-loop/group_chat_request_info.py index 5ae3e2070a..617570b641 100644 --- a/python/samples/getting_started/workflows/human-in-the-loop/group_chat_request_info.py +++ b/python/samples/getting_started/workflows/human-in-the-loop/group_chat_request_info.py @@ -30,10 +30,10 @@ AgentExecutorResponse, AgentRequestInfoResponse, ChatMessage, - GroupChatBuilder, WorkflowEvent, ) from agent_framework.azure import AzureOpenAIChatClient +from agent_framework.orchestrations import GroupChatBuilder from azure.identity import AzureCliCredential @@ -42,7 +42,11 @@ async def process_event_stream(stream: AsyncIterable[WorkflowEvent]) -> dict[str requests: dict[str, AgentExecutorResponse] = {} async for event in stream: - if event.type == "request_info" and isinstance(event.data, AgentExecutorResponse): + if ( + event.type == "request_info" + and isinstance(event.data, AgentExecutorResponse) + and event.request_id is not None + ): requests[event.request_id] = event.data if event.type == "output": diff --git a/python/samples/getting_started/workflows/observability/executor_io_observation.py b/python/samples/getting_started/workflows/observability/executor_io_observation.py index 5214464947..1308640613 100644 --- a/python/samples/getting_started/workflows/observability/executor_io_observation.py +++ b/python/samples/getting_started/workflows/observability/executor_io_observation.py @@ -4,13 +4,10 @@ from typing import Any, cast from agent_framework import ( - WorkflowEvent, Executor, - ExecutorCompletedEvent, - ExecutorInvokedEvent, WorkflowBuilder, WorkflowContext, - + WorkflowEvent, handler, ) from typing_extensions import Never @@ -22,8 +19,8 @@ executor code. This is useful for debugging, logging, or building monitoring tools. What this example shows: -- ExecutorInvokedEvent.data contains the input message received by the executor -- ExecutorCompletedEvent.data contains the messages sent via ctx.send_message() +- executor_invoked events (type='executor_invoked') contain the input message in event.data +- executor_completed events (type='executor_completed') contain the messages sent via ctx.send_message() in event.data - How to generically observe all executor I/O through workflow streaming events This approach allows you to enable_instrumentation any workflow for observability without @@ -93,12 +90,12 @@ async def main() -> None: print("Running workflow with executor I/O observation...\n") async for event in workflow.run_stream("hello world"): - if isinstance(event, ExecutorInvokedEvent): + if event.type == "executor_invoked": # The input message received by the executor is in event.data print(f"[INVOKED] {event.executor_id}") print(f" Input: {format_io_data(event.data)}") - elif isinstance(event, ExecutorCompletedEvent): + elif event.type == "executor_completed": # Messages sent via ctx.send_message() are in event.data print(f"[COMPLETED] {event.executor_id}") if event.data: diff --git a/python/samples/getting_started/workflows/parallelism/fan_out_fan_in_edges.py b/python/samples/getting_started/workflows/parallelism/fan_out_fan_in_edges.py index 2595b2c2f7..3d9b1ea8de 100644 --- a/python/samples/getting_started/workflows/parallelism/fan_out_fan_in_edges.py +++ b/python/samples/getting_started/workflows/parallelism/fan_out_fan_in_edges.py @@ -4,17 +4,14 @@ from dataclasses import dataclass from agent_framework import ( - WorkflowEvent, # Core chat primitives to build LLM requests AgentExecutorRequest, # The message bundle sent to an AgentExecutor AgentExecutorResponse, # The structured result returned by an AgentExecutor ChatAgent, # Tracing event for agent execution steps ChatMessage, # Chat message structure Executor, # Base class for custom Python executors - ExecutorCompletedEvent, - ExecutorInvokedEvent, WorkflowBuilder, # Fluent builder for wiring the workflow graph WorkflowContext, # Per run context and event bus - # Event emitted when workflow yields output + WorkflowEvent, # Unified event class for workflow events handler, # Decorator to mark an Executor method as invokable ) from agent_framework.azure import AzureOpenAIChatClient @@ -141,10 +138,10 @@ async def main() -> None: # 3) Run with a single prompt and print progress plus the final consolidated output async for event in workflow.run_stream("We are launching a new budget-friendly electric bike for urban commuters."): - if isinstance(event, ExecutorInvokedEvent): + if event.type == "executor_invoked": # Show when executors are invoked and completed for lightweight observability. print(f"{event.executor_id} invoked") - elif isinstance(event, ExecutorCompletedEvent): + elif event.type == "executor_completed": print(f"{event.executor_id} completed") elif event.type == "output": print("===== Final Aggregated Output =====") diff --git a/python/samples/getting_started/workflows/tool-approval/group_chat_builder_tool_approval.py b/python/samples/getting_started/workflows/tool-approval/group_chat_builder_tool_approval.py index 3dbafc581e..44d1e496d4 100644 --- a/python/samples/getting_started/workflows/tool-approval/group_chat_builder_tool_approval.py +++ b/python/samples/getting_started/workflows/tool-approval/group_chat_builder_tool_approval.py @@ -34,7 +34,7 @@ Demonstrate: - Using set_select_speakers_func with agents that have approval-required tools. -- Handling RequestInfoEvent in group chat scenarios. +- Handling request_info events (type='request_info') in group chat scenarios. - Multi-round group chat with tool approval interruption and resumption. Prerequisites: diff --git a/python/uv.lock b/python/uv.lock index 1eba1ebdc7..dd87ebbce5 100644 --- a/python/uv.lock +++ b/python/uv.lock @@ -3213,7 +3213,7 @@ wheels = [ [[package]] name = "litellm" -version = "1.81.7" +version = "1.81.8" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "aiohttp", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, @@ -3229,9 +3229,9 @@ dependencies = [ { name = "tiktoken", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, { name = "tokenizers", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/77/69/cfa8a1d68cd10223a9d9741c411e131aece85c60c29c1102d762738b3e5c/litellm-1.81.7.tar.gz", hash = "sha256:442ff38708383ebee21357b3d936e58938172bae892f03bc5be4019ed4ff4a17", size = 14039864, upload-time = "2026-02-03T19:43:10.633Z" } +sdist = { url = "https://files.pythonhosted.org/packages/eb/1d/e8f95dd1fc0eed36f2698ca82d8a0693d5388c6f2f1718f3f5ed472daaf4/litellm-1.81.8.tar.gz", hash = "sha256:5cc6547697748b8ca38d17d755662871da125df6e378cc987eaf2208a15626fb", size = 14066801, upload-time = "2026-02-05T05:56:03.37Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/60/95/8cecc7e6377171e4ac96f23d65236af8706d99c1b7b71a94c72206672810/litellm-1.81.7-py3-none-any.whl", hash = "sha256:58466c88c3289c6a3830d88768cf8f307581d9e6c87861de874d1128bb2de90d", size = 12254178, upload-time = "2026-02-03T19:43:08.035Z" }, + { url = "https://files.pythonhosted.org/packages/d8/5a/6f391c2f251553dae98b6edca31c070d7e2291cef6153ae69e0688159093/litellm-1.81.8-py3-none-any.whl", hash = "sha256:78cca92f36bc6c267c191d1fe1e2630c812bff6daec32c58cade75748c2692f6", size = 12286316, upload-time = "2026-02-05T05:56:00.248Z" }, ] [package.optional-dependencies] @@ -3273,11 +3273,11 @@ wheels = [ [[package]] name = "litellm-proxy-extras" -version = "0.4.29" +version = "0.4.30" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/42/c5/9c4325452b3b3fc144e942f0f0e6582374d588f3159a0706594e3422943c/litellm_proxy_extras-0.4.29.tar.gz", hash = "sha256:1a8266911e0546f1e17e6714ca20b72e9fef47c1683f9c16399cf2d1786437a0", size = 23561, upload-time = "2026-01-31T23:13:58.707Z" } +sdist = { url = "https://files.pythonhosted.org/packages/83/a1/00d2e91a7a91335a7d7f43dfb8316142879782c22ef59eca5d0ced055bf0/litellm_proxy_extras-0.4.30.tar.gz", hash = "sha256:5d32f8dc3d37d36fb15ab6995fea706dd8a453ff7f12e70b47cba35e5368da10", size = 23752, upload-time = "2026-02-05T03:54:00.351Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/b0/d6/7393367fdf4b65d80ba0c32d517743a7aa8975a36b32cc70a0352b9514aa/litellm_proxy_extras-0.4.29-py3-none-any.whl", hash = "sha256:c36c1b69675c61acccc6b61dd610eb37daeb72c6fd819461cefb5b0cc7e0550f", size = 50734, upload-time = "2026-01-31T23:13:56.986Z" }, + { url = "https://files.pythonhosted.org/packages/bd/80/5b7ae7b39a79ca79722dd9049b3b4227b4540cb97006c8ef26c43af74db8/litellm_proxy_extras-0.4.30-py3-none-any.whl", hash = "sha256:0b7df68f0968eb817462b847eaee81bba23d935adb2e84d2e342a77711887051", size = 51217, upload-time = "2026-02-05T03:54:02.128Z" }, ] [[package]] @@ -4057,7 +4057,7 @@ wheels = [ [[package]] name = "openai-agents" -version = "0.7.0" +version = "0.8.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "griffe", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, @@ -4068,9 +4068,9 @@ dependencies = [ { name = "types-requests", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, { name = "typing-extensions", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/f8/a2/63a5ff78d89fa0861fe461a7b91d2123315115dcbf2c3fdab051b99185e5/openai_agents-0.7.0.tar.gz", hash = "sha256:5a283e02ee0d7c0d869421de9918691711bf19d1b1dc4d2840548335f2d24de6", size = 2169530, upload-time = "2026-01-23T00:06:35.746Z" } +sdist = { url = "https://files.pythonhosted.org/packages/97/57/724c73f158dec760a6e689e2415ab1b85bc5ff21508d82af91d23c9580e9/openai_agents-0.8.0.tar.gz", hash = "sha256:0ea66356ace1e158b09ab173534cacbc435d4a06e3203d04978dd69531729fc3", size = 2342265, upload-time = "2026-02-05T02:51:52.293Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/a2/92/9cbbdd604f858056d4e4f105a1b99779128bae61b6a3681db0f035ef73b4/openai_agents-0.7.0-py3-none-any.whl", hash = "sha256:4446935a65d3bb1c2c1cd0546b1bc286ced9dde0adba947ab390b2e74802aa49", size = 288537, upload-time = "2026-01-23T00:06:33.78Z" }, + { url = "https://files.pythonhosted.org/packages/b5/61/7c590176c664845e75961a7755f58997b404fb633073a9ddba1151582033/openai_agents-0.8.0-py3-none-any.whl", hash = "sha256:1a8b63f10f8828fb5516fa4917ee26d03956893f8f09e38cfcf33ec60ffcd546", size = 373746, upload-time = "2026-02-05T02:51:50.501Z" }, ] [[package]] @@ -4605,11 +4605,11 @@ wheels = [ [[package]] name = "pip" -version = "26.0" +version = "26.0.1" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/44/c2/65686a7783a7c27a329706207147e82f23c41221ee9ae33128fc331670a0/pip-26.0.tar.gz", hash = "sha256:3ce220a0a17915972fbf1ab451baae1521c4539e778b28127efa79b974aff0fa", size = 1812654, upload-time = "2026-01-31T01:40:54.361Z" } +sdist = { url = "https://files.pythonhosted.org/packages/48/83/0d7d4e9efe3344b8e2fe25d93be44f64b65364d3c8d7bc6dc90198d5422e/pip-26.0.1.tar.gz", hash = "sha256:c4037d8a277c89b320abe636d59f91e6d0922d08a05b60e85e53b296613346d8", size = 1812747, upload-time = "2026-02-05T02:20:18.702Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/69/00/5ac7aa77688ec4d34148b423d34dc0c9bc4febe0d872a9a1ad9860b2f6f1/pip-26.0-py3-none-any.whl", hash = "sha256:98436feffb9e31bc9339cf369fd55d3331b1580b6a6f1173bacacddcf9c34754", size = 1787564, upload-time = "2026-01-31T01:40:52.252Z" }, + { url = "https://files.pythonhosted.org/packages/de/f0/c81e05b613866b76d2d1066490adf1a3dbc4ee9d9c839961c3fc8a6997af/pip-26.0.1-py3-none-any.whl", hash = "sha256:bdb1b08f4274833d62c1aa29e20907365a2ceb950410df15fc9521bad440122b", size = 1787723, upload-time = "2026-02-05T02:20:16.416Z" }, ] [[package]] @@ -4880,16 +4880,16 @@ wheels = [ [[package]] name = "protobuf" -version = "5.29.5" +version = "5.29.6" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/43/29/d09e70352e4e88c9c7a198d5645d7277811448d76c23b00345670f7c8a38/protobuf-5.29.5.tar.gz", hash = "sha256:bc1463bafd4b0929216c35f437a8e28731a2b7fe3d98bb77a600efced5a15c84", size = 425226, upload-time = "2025-05-28T23:51:59.82Z" } +sdist = { url = "https://files.pythonhosted.org/packages/7e/57/394a763c103e0edf87f0938dafcd918d53b4c011dfc5c8ae80f3b0452dbb/protobuf-5.29.6.tar.gz", hash = "sha256:da9ee6a5424b6b30fd5e45c5ea663aef540ca95f9ad99d1e887e819cdf9b8723", size = 425623, upload-time = "2026-02-04T22:54:40.584Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/5f/11/6e40e9fc5bba02988a214c07cf324595789ca7820160bfd1f8be96e48539/protobuf-5.29.5-cp310-abi3-win32.whl", hash = "sha256:3f1c6468a2cfd102ff4703976138844f78ebd1fb45f49011afc5139e9e283079", size = 422963, upload-time = "2025-05-28T23:51:41.204Z" }, - { url = "https://files.pythonhosted.org/packages/81/7f/73cefb093e1a2a7c3ffd839e6f9fcafb7a427d300c7f8aef9c64405d8ac6/protobuf-5.29.5-cp310-abi3-win_amd64.whl", hash = "sha256:3f76e3a3675b4a4d867b52e4a5f5b78a2ef9565549d4037e06cf7b0942b1d3fc", size = 434818, upload-time = "2025-05-28T23:51:44.297Z" }, - { url = "https://files.pythonhosted.org/packages/dd/73/10e1661c21f139f2c6ad9b23040ff36fee624310dc28fba20d33fdae124c/protobuf-5.29.5-cp38-abi3-macosx_10_9_universal2.whl", hash = "sha256:e38c5add5a311f2a6eb0340716ef9b039c1dfa428b28f25a7838ac329204a671", size = 418091, upload-time = "2025-05-28T23:51:45.907Z" }, - { url = "https://files.pythonhosted.org/packages/6c/04/98f6f8cf5b07ab1294c13f34b4e69b3722bb609c5b701d6c169828f9f8aa/protobuf-5.29.5-cp38-abi3-manylinux2014_aarch64.whl", hash = "sha256:fa18533a299d7ab6c55a238bf8629311439995f2e7eca5caaff08663606e9015", size = 319824, upload-time = "2025-05-28T23:51:47.545Z" }, - { url = "https://files.pythonhosted.org/packages/85/e4/07c80521879c2d15f321465ac24c70efe2381378c00bf5e56a0f4fbac8cd/protobuf-5.29.5-cp38-abi3-manylinux2014_x86_64.whl", hash = "sha256:63848923da3325e1bf7e9003d680ce6e14b07e55d0473253a690c3a8b8fd6e61", size = 319942, upload-time = "2025-05-28T23:51:49.11Z" }, - { url = "https://files.pythonhosted.org/packages/7e/cc/7e77861000a0691aeea8f4566e5d3aa716f2b1dece4a24439437e41d3d25/protobuf-5.29.5-py3-none-any.whl", hash = "sha256:6cf42630262c59b2d8de33954443d94b746c952b01434fc58a417fdbd2e84bd5", size = 172823, upload-time = "2025-05-28T23:51:58.157Z" }, + { url = "https://files.pythonhosted.org/packages/d4/88/9ee58ff7863c479d6f8346686d4636dd4c415b0cbeed7a6a7d0617639c2a/protobuf-5.29.6-cp310-abi3-win32.whl", hash = "sha256:62e8a3114992c7c647bce37dcc93647575fc52d50e48de30c6fcb28a6a291eb1", size = 423357, upload-time = "2026-02-04T22:54:25.805Z" }, + { url = "https://files.pythonhosted.org/packages/1c/66/2dc736a4d576847134fb6d80bd995c569b13cdc7b815d669050bf0ce2d2c/protobuf-5.29.6-cp310-abi3-win_amd64.whl", hash = "sha256:7e6ad413275be172f67fdee0f43484b6de5a904cc1c3ea9804cb6fe2ff366eda", size = 435175, upload-time = "2026-02-04T22:54:28.592Z" }, + { url = "https://files.pythonhosted.org/packages/06/db/49b05966fd208ae3f44dcd33837b6243b4915c57561d730a43f881f24dea/protobuf-5.29.6-cp38-abi3-macosx_10_9_universal2.whl", hash = "sha256:b5a169e664b4057183a34bdc424540e86eea47560f3c123a0d64de4e137f9269", size = 418619, upload-time = "2026-02-04T22:54:30.266Z" }, + { url = "https://files.pythonhosted.org/packages/b7/d7/48cbf6b0c3c39761e47a99cb483405f0fde2be22cf00d71ef316ce52b458/protobuf-5.29.6-cp38-abi3-manylinux2014_aarch64.whl", hash = "sha256:a8866b2cff111f0f863c1b3b9e7572dc7eaea23a7fae27f6fc613304046483e6", size = 320284, upload-time = "2026-02-04T22:54:31.782Z" }, + { url = "https://files.pythonhosted.org/packages/e3/dd/cadd6ec43069247d91f6345fa7a0d2858bef6af366dbd7ba8f05d2c77d3b/protobuf-5.29.6-cp38-abi3-manylinux2014_x86_64.whl", hash = "sha256:e3387f44798ac1106af0233c04fb8abf543772ff241169946f698b3a9a3d3ab9", size = 320478, upload-time = "2026-02-04T22:54:32.909Z" }, + { url = "https://files.pythonhosted.org/packages/5a/cb/e3065b447186cb70aa65acc70c86baf482d82bf75625bf5a2c4f6919c6a3/protobuf-5.29.6-py3-none-any.whl", hash = "sha256:6b9edb641441b2da9fa8f428760fc136a49cf97a52076010cf22a2ff73438a86", size = 173126, upload-time = "2026-02-04T22:54:39.462Z" }, ] [[package]] From a6517e1ac13f8252b04b8511052d68917cdd9f65 Mon Sep 17 00:00:00 2001 From: Evan Mattson Date: Thu, 5 Feb 2026 16:26:15 +0900 Subject: [PATCH 03/12] Fixes --- .../core/agent_framework/_workflows/_agent.py | 46 ++--- .../agent_framework/_workflows/_executor.py | 4 +- .../_workflows/_runner_context.py | 2 +- .../agent_framework/_workflows/_workflow.py | 3 +- .../tests/workflow/test_agent_executor.py | 1 - .../workflow/test_checkpoint_validation.py | 1 - .../core/tests/workflow/test_executor.py | 26 +-- .../tests/workflow/test_full_conversation.py | 1 - .../test_request_info_and_response.py | 5 +- .../core/tests/workflow/test_runner.py | 2 +- .../core/tests/workflow/test_workflow.py | 28 ++- .../tests/workflow/test_workflow_states.py | 11 +- .../devui/agent_framework_devui/_executor.py | 11 +- .../devui/agent_framework_devui/_mapper.py | 185 +++++++++--------- python/packages/devui/tests/test_helpers.py | 3 +- .../lab/lightning/tests/test_lightning.py | 6 +- .../orchestrations/tests/test_concurrent.py | 1 - .../orchestrations/tests/test_sequential.py | 1 - 18 files changed, 159 insertions(+), 178 deletions(-) diff --git a/python/packages/core/agent_framework/_workflows/_agent.py b/python/packages/core/agent_framework/_workflows/_agent.py index dd0b7646ac..eb0a94f7d1 100644 --- a/python/packages/core/agent_framework/_workflows/_agent.py +++ b/python/packages/core/agent_framework/_workflows/_agent.py @@ -607,31 +607,33 @@ def _convert_workflow_event_to_agent_response_updates( if event.type == "request_info": # Store the pending request for later correlation request_id = event.request_id - if request_id: - self.pending_requests[request_id] = event + if not request_id: + raise ValueError("request_info event must have a request_id") - args = self.RequestInfoFunctionArgs(request_id=request_id, data=event.data).to_dict() + self.pending_requests[request_id] = event - function_call = Content.from_function_call( - call_id=request_id, - name=self.REQUEST_INFO_FUNCTION_NAME, - arguments=args, - ) - approval_request = Content.from_function_approval_request( - id=request_id, - function_call=function_call, - additional_properties={"request_id": request_id}, + args = self.RequestInfoFunctionArgs(request_id=request_id, data=event.data).to_dict() + + function_call = Content.from_function_call( + call_id=request_id, + name=self.REQUEST_INFO_FUNCTION_NAME, + arguments=args, + ) + approval_request = Content.from_function_approval_request( + id=request_id, + function_call=function_call, + additional_properties={"request_id": request_id}, + ) + return [ + AgentResponseUpdate( + contents=[function_call, approval_request], + role="assistant", + author_name=self.name, + response_id=response_id, + message_id=str(uuid.uuid4()), + created_at=datetime.now(tz=timezone.utc).strftime("%Y-%m-%dT%H:%M:%S.%fZ"), ) - return [ - AgentResponseUpdate( - contents=[function_call, approval_request], - role="assistant", - author_name=self.name, - response_id=response_id, - message_id=str(uuid.uuid4()), - created_at=datetime.now(tz=timezone.utc).strftime("%Y-%m-%dT%H:%M:%S.%fZ"), - ) - ] + ] # Ignore workflow-internal events return [] diff --git a/python/packages/core/agent_framework/_workflows/_executor.py b/python/packages/core/agent_framework/_workflows/_executor.py index d2f948b2c9..b1eaa05559 100644 --- a/python/packages/core/agent_framework/_workflows/_executor.py +++ b/python/packages/core/agent_framework/_workflows/_executor.py @@ -287,7 +287,9 @@ async def execute( sent_messages = context.get_sent_messages() yielded_outputs = context.get_yielded_outputs() completion_data = sent_messages + yielded_outputs - completed_event = WorkflowEvent.executor_completed(self.id, completion_data if completion_data else None) + completed_event = WorkflowEvent.executor_completed( + self.id, completion_data if completion_data else None + ) await context.add_event(completed_event) def _create_context_for_handler( diff --git a/python/packages/core/agent_framework/_workflows/_runner_context.py b/python/packages/core/agent_framework/_workflows/_runner_context.py index 696f9455b9..01b544d211 100644 --- a/python/packages/core/agent_framework/_workflows/_runner_context.py +++ b/python/packages/core/agent_framework/_workflows/_runner_context.py @@ -7,7 +7,7 @@ from copy import copy from dataclasses import dataclass from enum import Enum -from typing import TYPE_CHECKING, Any, Protocol, TypeVar, runtime_checkable +from typing import Any, Protocol, TypeVar, runtime_checkable from ._checkpoint import CheckpointStorage, WorkflowCheckpoint from ._checkpoint_encoding import decode_checkpoint_value, encode_checkpoint_value diff --git a/python/packages/core/agent_framework/_workflows/_workflow.py b/python/packages/core/agent_framework/_workflows/_workflow.py index 417e41bffa..8cb1f7ed7f 100644 --- a/python/packages/core/agent_framework/_workflows/_workflow.py +++ b/python/packages/core/agent_framework/_workflows/_workflow.py @@ -603,7 +603,8 @@ async def run( - With checkpoint_id: Used to load and restore the specified checkpoint - Without checkpoint_id: Enables checkpointing for this run, overriding build-time configuration - include_status_events: Whether to include status events (WorkflowEvent with type='status') in the result list. + include_status_events: Whether to include status events (WorkflowEvent with type='status') + in the result list. **kwargs: Additional keyword arguments to pass through to agent invocations. These are stored in State and accessible in @tool functions via the **kwargs parameter. diff --git a/python/packages/core/tests/workflow/test_agent_executor.py b/python/packages/core/tests/workflow/test_agent_executor.py index 8a8c2880b7..c35595778a 100644 --- a/python/packages/core/tests/workflow/test_agent_executor.py +++ b/python/packages/core/tests/workflow/test_agent_executor.py @@ -12,7 +12,6 @@ ChatMessage, ChatMessageStore, Content, - WorkflowEvent, WorkflowRunState, ) from agent_framework._workflows._agent_executor import AgentExecutorResponse diff --git a/python/packages/core/tests/workflow/test_checkpoint_validation.py b/python/packages/core/tests/workflow/test_checkpoint_validation.py index 35ea2c3753..c3aff3da35 100644 --- a/python/packages/core/tests/workflow/test_checkpoint_validation.py +++ b/python/packages/core/tests/workflow/test_checkpoint_validation.py @@ -7,7 +7,6 @@ WorkflowBuilder, WorkflowCheckpointException, WorkflowContext, - WorkflowEvent, WorkflowRunState, handler, ) diff --git a/python/packages/core/tests/workflow/test_executor.py b/python/packages/core/tests/workflow/test_executor.py index 32f2342ea3..0f0897b193 100644 --- a/python/packages/core/tests/workflow/test_executor.py +++ b/python/packages/core/tests/workflow/test_executor.py @@ -156,9 +156,7 @@ async def handle(self, text: str, ctx: WorkflowContext) -> None: workflow = WorkflowBuilder().add_edge(upper, collector).set_start_executor(upper).build() events = await workflow.run("hello world") - invoked_events = [ - e for e in events if isinstance(e, WorkflowEvent) and e.type == "executor_invoked" - ] + invoked_events = [e for e in events if isinstance(e, WorkflowEvent) and e.type == "executor_invoked"] assert len(invoked_events) == 2 @@ -172,7 +170,7 @@ async def handle(self, text: str, ctx: WorkflowContext) -> None: async def test_executor_completed_event_contains_sent_messages(): - """Test that executor_completed event (type='executor_completed') contains the messages sent via ctx.send_message().""" + """Test that event (type='executor_completed') contains the messages sent via ctx.send_message().""" class MultiSenderExecutor(Executor): @handler @@ -195,9 +193,7 @@ async def handle(self, text: str, ctx: WorkflowContext) -> None: workflow = WorkflowBuilder().add_edge(sender, collector).set_start_executor(sender).build() events = await workflow.run("hello") - completed_events = [ - e for e in events if isinstance(e, WorkflowEvent) and e.type == "executor_completed" - ] + completed_events = [e for e in events if isinstance(e, WorkflowEvent) and e.type == "executor_completed"] # Sender should have completed with the sent messages sender_completed = next(e for e in completed_events if e.executor_id == "sender") @@ -224,9 +220,7 @@ async def handle(self, text: str, ctx: WorkflowContext[Never, str]) -> None: workflow = WorkflowBuilder().set_start_executor(executor).build() events = await workflow.run("test") - completed_events = [ - e for e in events if isinstance(e, WorkflowEvent) and e.type == "executor_completed" - ] + completed_events = [e for e in events if isinstance(e, WorkflowEvent) and e.type == "executor_completed"] assert len(completed_events) == 1 assert completed_events[0].executor_id == "yielder" @@ -271,12 +265,8 @@ async def handle(self, response: Response, ctx: WorkflowContext) -> None: input_request = Request(query="hello", limit=3) events = await workflow.run(input_request) - invoked_events = [ - e for e in events if isinstance(e, WorkflowEvent) and e.type == "executor_invoked" - ] - completed_events = [ - e for e in events if isinstance(e, WorkflowEvent) and e.type == "executor_completed" - ] + invoked_events = [e for e in events if isinstance(e, WorkflowEvent) and e.type == "executor_invoked"] + completed_events = [e for e in events if isinstance(e, WorkflowEvent) and e.type == "executor_completed"] # Check processor invoked event has the Request object processor_invoked = next(e for e in invoked_events if e.executor_id == "processor") @@ -556,9 +546,7 @@ async def mutator(messages: list[ChatMessage], ctx: WorkflowContext[list[ChatMes events = await workflow.run(input_messages) # Find the invoked event for the Mutator executor - invoked_events = [ - e for e in events if isinstance(e, WorkflowEvent) and e.type == "executor_invoked" - ] + invoked_events = [e for e in events if isinstance(e, WorkflowEvent) and e.type == "executor_invoked"] assert len(invoked_events) == 1 mutator_invoked = invoked_events[0] diff --git a/python/packages/core/tests/workflow/test_full_conversation.py b/python/packages/core/tests/workflow/test_full_conversation.py index 10bd93f8de..4ae899d70c 100644 --- a/python/packages/core/tests/workflow/test_full_conversation.py +++ b/python/packages/core/tests/workflow/test_full_conversation.py @@ -18,7 +18,6 @@ Executor, WorkflowBuilder, WorkflowContext, - WorkflowEvent, WorkflowRunState, handler, ) diff --git a/python/packages/core/tests/workflow/test_request_info_and_response.py b/python/packages/core/tests/workflow/test_request_info_and_response.py index 64287b7488..473b7367b7 100644 --- a/python/packages/core/tests/workflow/test_request_info_and_response.py +++ b/python/packages/core/tests/workflow/test_request_info_and_response.py @@ -381,10 +381,7 @@ async def test_checkpoint_with_pending_request_info_events(self): # Should re-emit the pending request info event if event.type == "request_info" and event.request_id == request_info_event.request_id: restored_request_event = event - elif ( - event.type == "status" - and event.state == WorkflowRunState.IDLE_WITH_PENDING_REQUESTS - ): + elif event.type == "status" and event.state == WorkflowRunState.IDLE_WITH_PENDING_REQUESTS: completed = True assert completed, "Workflow should reach idle with pending requests state after restoration" diff --git a/python/packages/core/tests/workflow/test_runner.py b/python/packages/core/tests/workflow/test_runner.py index da4a2fcaff..7af722e45a 100644 --- a/python/packages/core/tests/workflow/test_runner.py +++ b/python/packages/core/tests/workflow/test_runner.py @@ -135,7 +135,7 @@ async def test_runner_run_until_convergence_not_completed(): match="Runner did not converge after 5 iterations.", ): async for event in runner.run_until_convergence(): - assert not event.type == "status" or event.state != WorkflowRunState.IDLE + assert event.type != "status" or event.state != WorkflowRunState.IDLE async def test_runner_already_running(): diff --git a/python/packages/core/tests/workflow/test_workflow.py b/python/packages/core/tests/workflow/test_workflow.py index 23600d3d87..378e6ef218 100644 --- a/python/packages/core/tests/workflow/test_workflow.py +++ b/python/packages/core/tests/workflow/test_workflow.py @@ -193,7 +193,8 @@ async def test_fan_out(): events = await workflow.run(NumberMessage(data=0)) - # Each executor will emit two events: executor_invoked (type='executor_invoked') and executor_completed (type='executor_completed') + # Each executor will emit two events: executor_invoked (type='executor_invoked') + # and executor_completed (type='executor_completed') # executor_b will also emit an output event (type='output') # Each superstep will emit a started event (type='started') and status event (type='status') # This workflow will converge in 2 supersteps because executor_c will send one more message @@ -217,7 +218,8 @@ async def test_fan_out_multiple_completed_events(): events = await workflow.run(NumberMessage(data=0)) - # Each executor will emit two events: executor_invoked (type='executor_invoked') and executor_completed (type='executor_completed') + # Each executor will emit two events: executor_invoked (type='executor_invoked') + # and executor_completed (type='executor_completed') # executor_b and executor_c will also emit an output event (type='output') # Each superstep will emit a started event (type='started') and status event (type='status') # This workflow will converge in 1 superstep because executor_a and executor_b will not send further messages @@ -245,7 +247,8 @@ async def test_fan_in(): events = await workflow.run(NumberMessage(data=0)) - # Each executor will emit two events: executor_invoked (type='executor_invoked') and executor_completed (type='executor_completed') + # Each executor will emit two events: executor_invoked (type='executor_invoked') + # and executor_completed (type='executor_completed') # aggregator will also emit an output event (type='output') # Each superstep will emit a started event (type='started') and status event (type='status') assert len(events) == 13 @@ -461,9 +464,7 @@ async def test_workflow_run_stream_from_checkpoint_with_responses( events.append(event) # Verify that the pending request event was emitted - assert next( - event for event in events if event.type == "request_info" and event.request_id == "request_123" - ) + assert next(event for event in events if event.type == "request_info" and event.request_id == "request_123") assert len(events) > 0 # Just ensure we processed some events @@ -730,10 +731,7 @@ async def test_workflow_with_simple_cycle_and_exit_condition(): from agent_framework import WorkflowEvent executor_events = [ - e - for e in events - if isinstance(e, WorkflowEvent) - and e.type in ("executor_invoked", "executor_completed") + e for e in events if isinstance(e, WorkflowEvent) and e.type in ("executor_invoked", "executor_completed") ] executor_ids = {e.executor_id for e in executor_events} assert "exec_a" in executor_ids, "Should have events from executor A" @@ -881,7 +879,7 @@ async def run_stream( async def test_agent_streaming_vs_non_streaming() -> None: - """Test run() emits output events with AgentResponse while run_stream() emits output events with AgentResponseUpdate.""" + """Test run() emits output events with AgentResponse while run_stream() emits events with AgentResponseUpdate.""" agent = _StreamingTestAgent(id="test_agent", name="TestAgent", reply_text="Hello World") agent_exec = AgentExecutor(agent, id="agent_exec") @@ -892,9 +890,7 @@ async def test_agent_streaming_vs_non_streaming() -> None: # Filter for agent events (result is a list of events) agent_run_events = [e for e in result if e.type == "output" and isinstance(e.data, AgentResponse)] - agent_update_events = [ - e for e in result if e.type == "output" and isinstance(e.data, AgentResponseUpdate) - ] + agent_update_events = [e for e in result if e.type == "output" and isinstance(e.data, AgentResponseUpdate)] # In non-streaming mode, should have output event with AgentResponse, no AgentResponseUpdate assert len(agent_run_events) == 1, "Expected exactly one output event with AgentResponse in non-streaming mode" @@ -910,9 +906,7 @@ async def test_agent_streaming_vs_non_streaming() -> None: # Filter for agent events agent_response = [ - cast(AgentResponse, e.data) - for e in stream_events - if e.type == "output" and isinstance(e.data, AgentResponse) + cast(AgentResponse, e.data) for e in stream_events if e.type == "output" and isinstance(e.data, AgentResponse) ] agent_response_updates = [ e.data for e in stream_events if e.type == "output" and isinstance(e.data, AgentResponseUpdate) diff --git a/python/packages/core/tests/workflow/test_workflow_states.py b/python/packages/core/tests/workflow/test_workflow_states.py index 2fddc912a2..e0d644bb3a 100644 --- a/python/packages/core/tests/workflow/test_workflow_states.py +++ b/python/packages/core/tests/workflow/test_workflow_states.py @@ -36,9 +36,7 @@ async def test_executor_failed_and_workflow_failed_events_streaming(): events.append(ev) # executor_failed event (type='executor_failed') should be emitted before workflow failed event - executor_failed_events = [ - e for e in events if isinstance(e, WorkflowEvent) and e.type == "executor_failed" - ] + executor_failed_events = [e for e in events if isinstance(e, WorkflowEvent) and e.type == "executor_failed"] assert executor_failed_events, "executor_failed event should be emitted when start executor fails" assert executor_failed_events[0].executor_id == "f" assert executor_failed_events[0].origin is WorkflowEventSource.FRAMEWORK @@ -96,9 +94,7 @@ async def test_executor_failed_event_from_second_executor_in_chain(): events.append(ev) # executor_failed event should be emitted for the failing executor - executor_failed_events = [ - e for e in events if isinstance(e, WorkflowEvent) and e.type == "executor_failed" - ] + executor_failed_events = [e for e in events if isinstance(e, WorkflowEvent) and e.type == "executor_failed"] assert executor_failed_events, "executor_failed event should be emitted when second executor fails" assert executor_failed_events[0].executor_id == "failing" assert executor_failed_events[0].origin is WorkflowEventSource.FRAMEWORK @@ -175,7 +171,8 @@ async def test_started_and_completed_event_origins(): # Check for IDLE status indicating completion idle_status = next( - (e for e in events if isinstance(e, WorkflowEvent) and e.type == "status" and e.state == WorkflowRunState.IDLE), None + (e for e in events if isinstance(e, WorkflowEvent) and e.type == "status" and e.state == WorkflowRunState.IDLE), + None, ) assert idle_status is not None assert idle_status.origin is WorkflowEventSource.FRAMEWORK diff --git a/python/packages/devui/agent_framework_devui/_executor.py b/python/packages/devui/agent_framework_devui/_executor.py index d8e5279b83..fee8433f79 100644 --- a/python/packages/devui/agent_framework_devui/_executor.py +++ b/python/packages/devui/agent_framework_devui/_executor.py @@ -264,7 +264,9 @@ async def execute_entity(self, entity_id: str, request: AgentFrameworkRequest) - # Log request_info event (type='request_info') for debugging HIL flow event_class = event.__class__.__name__ if hasattr(event, "__class__") else type(event).__name__ if event_class == "RequestInfoEvent": - logger.info("🔔 [EXECUTOR] request_info event (type='request_info') detected from workflow!") + logger.info( + "🔔 [EXECUTOR] request_info event (type='request_info') detected from workflow!" + ) logger.info(f" request_id: {getattr(event, 'request_id', 'N/A')}") logger.info(f" source_executor_id: {getattr(event, 'source_executor_id', 'N/A')}") logger.info(f" request_type: {getattr(event, 'request_type', 'N/A')}") @@ -526,7 +528,8 @@ async def _execute_workflow( logger.warning(f"Could not convert HIL responses to proper types: {e}") async for event in workflow.send_responses_streaming(hil_responses): - # Enrich new request_info events (type='request_info') that may come from subsequent HIL requests + # Enrich new request_info events (type='request_info') + # that may come from subsequent HIL requests if event.type == "request_info": self._enrich_request_info_event_with_response_schema(event, workflow) @@ -1024,7 +1027,9 @@ def _parse_raw_workflow_input(self, workflow: Any, raw_input: str) -> Any: return raw_input def _enrich_request_info_event_with_response_schema(self, event: Any, workflow: Any) -> None: - """Extract response type from workflow executor and attach response schema to request_info event (type='request_info'). + """Extract response type from workflow executor. + + Attach response schema to request_info event (type='request_info'). Args: event: request_info event (type='request_info') to enrich diff --git a/python/packages/devui/agent_framework_devui/_mapper.py b/python/packages/devui/agent_framework_devui/_mapper.py index a5eb22e8e7..38e50c676b 100644 --- a/python/packages/devui/agent_framework_devui/_mapper.py +++ b/python/packages/devui/agent_framework_devui/_mapper.py @@ -182,12 +182,15 @@ async def convert_event(self, raw_event: Any, request: AgentFrameworkRequest) -> # Handle WorkflowEvent with type='data' wrapping AgentResponseUpdate # This must be checked BEFORE generic WorkflowEvent check - if isinstance(raw_event, WorkflowEvent) and raw_event.type == "data": - # Extract the AgentResponseUpdate from the event's data attribute - if raw_event.data and isinstance(raw_event.data, AgentResponseUpdate): - # Preserve executor_id in context for proper output routing - context["current_executor_id"] = raw_event.executor_id - return await self._convert_agent_update(raw_event.data, context) + if ( + isinstance(raw_event, WorkflowEvent) + and raw_event.type == "data" + and raw_event.data + and isinstance(raw_event.data, AgentResponseUpdate) + ): + # Preserve executor_id in context for proper output routing + context["current_executor_id"] = raw_event.executor_id + return await self._convert_agent_update(raw_event.data, context) # Handle complete agent response (AgentResponse) - for non-streaming agent execution if isinstance(raw_event, AgentResponse): @@ -1002,98 +1005,98 @@ async def _convert_workflow_event(self, event: Any, context: dict[str, Any]) -> # Executor-level events (output items) # Check for executor lifecycle events via event.type if event_type == "executor_invoked": - executor_id = getattr(event, "executor_id", "unknown") - item_id = f"exec_{executor_id}_{uuid4().hex[:8]}" - context[f"exec_item_{executor_id}"] = item_id - context["output_index"] = context.get("output_index", -1) + 1 + executor_id = getattr(event, "executor_id", "unknown") + item_id = f"exec_{executor_id}_{uuid4().hex[:8]}" + context[f"exec_item_{executor_id}"] = item_id + context["output_index"] = context.get("output_index", -1) + 1 - # Track current executor for routing Magentic agent events - # This allows MagenticAgentDeltaEvent to route to the executor's item - context["current_executor_id"] = executor_id + # Track current executor for routing Magentic agent events + # This allows MagenticAgentDeltaEvent to route to the executor's item + context["current_executor_id"] = executor_id - # Create ExecutorActionItem with proper type - executor_item = ExecutorActionItem( - type="executor_action", - id=item_id, - executor_id=executor_id, - status="in_progress", - metadata=getattr(event, "metadata", {}), - ) + # Create ExecutorActionItem with proper type + executor_item = ExecutorActionItem( + type="executor_action", + id=item_id, + executor_id=executor_id, + status="in_progress", + metadata=getattr(event, "metadata", {}), + ) - # Use our custom event type that accepts ExecutorActionItem - return [ - CustomResponseOutputItemAddedEvent( - type="response.output_item.added", - output_index=context["output_index"], - sequence_number=self._next_sequence(context), - item=executor_item, - ) - ] + # Use our custom event type that accepts ExecutorActionItem + return [ + CustomResponseOutputItemAddedEvent( + type="response.output_item.added", + output_index=context["output_index"], + sequence_number=self._next_sequence(context), + item=executor_item, + ) + ] if event_type == "executor_completed": - executor_id = getattr(event, "executor_id", "unknown") - item_id = context.get(f"exec_item_{executor_id}", f"exec_{executor_id}_unknown") - - # Clear current executor tracking when executor completes - if context.get("current_executor_id") == executor_id: - context.pop("current_executor_id", None) - - # Create ExecutorActionItem with completed status - # executor_completed event (type='executor_completed') uses 'data' field, not 'result' - # Serialize the result data to ensure it's JSON-serializable - # (AgentExecutorResponse contains AgentResponse/ChatMessage which are SerializationMixin) - raw_result = getattr(event, "data", None) - serialized_result = self._serialize_value(raw_result) if raw_result is not None else None - executor_item = ExecutorActionItem( - type="executor_action", - id=item_id, - executor_id=executor_id, - status="completed", - result=serialized_result, - ) + executor_id = getattr(event, "executor_id", "unknown") + item_id = context.get(f"exec_item_{executor_id}", f"exec_{executor_id}_unknown") + + # Clear current executor tracking when executor completes + if context.get("current_executor_id") == executor_id: + context.pop("current_executor_id", None) + + # Create ExecutorActionItem with completed status + # executor_completed event (type='executor_completed') uses 'data' field, not 'result' + # Serialize the result data to ensure it's JSON-serializable + # (AgentExecutorResponse contains AgentResponse/ChatMessage which are SerializationMixin) + raw_result = getattr(event, "data", None) + serialized_result = self._serialize_value(raw_result) if raw_result is not None else None + executor_item = ExecutorActionItem( + type="executor_action", + id=item_id, + executor_id=executor_id, + status="completed", + result=serialized_result, + ) - # Use our custom event type - return [ - CustomResponseOutputItemDoneEvent( - type="response.output_item.done", - output_index=context.get("output_index", 0), - sequence_number=self._next_sequence(context), - item=executor_item, - ) - ] + # Use our custom event type + return [ + CustomResponseOutputItemDoneEvent( + type="response.output_item.done", + output_index=context.get("output_index", 0), + sequence_number=self._next_sequence(context), + item=executor_item, + ) + ] if event_type == "executor_failed": - executor_id = getattr(event, "executor_id", "unknown") - item_id = context.get(f"exec_item_{executor_id}", f"exec_{executor_id}_unknown") - # executor_failed event (type='executor_failed') uses 'details' property (WorkflowErrorDetails), not 'error' - # This matches WorkflowEvent.details which returns self.data for executor_failed type - details = getattr(event, "details", None) - if details: - err_msg = getattr(details, "message", None) or str(details) - extra = getattr(details, "extra", None) - if extra: - err_msg = f"{err_msg} (extra: {extra})" - else: - err_msg = None - - # Create ExecutorActionItem with failed status - executor_item = ExecutorActionItem( - type="executor_action", - id=item_id, - executor_id=executor_id, - status="failed", - error={"message": err_msg} if err_msg else None, - ) + executor_id = getattr(event, "executor_id", "unknown") + item_id = context.get(f"exec_item_{executor_id}", f"exec_{executor_id}_unknown") + # executor_failed event (type='executor_failed') uses 'details' property (WorkflowErrorDetails) + # not 'error'. This matches WorkflowEvent.details which returns self.data for executor_failed type + details = getattr(event, "details", None) + if details: + err_msg = getattr(details, "message", None) or str(details) + extra = getattr(details, "extra", None) + if extra: + err_msg = f"{err_msg} (extra: {extra})" + else: + err_msg = None - # Use our custom event type - return [ - CustomResponseOutputItemDoneEvent( - type="response.output_item.done", - output_index=context.get("output_index", 0), - sequence_number=self._next_sequence(context), - item=executor_item, - ) - ] + # Create ExecutorActionItem with failed status + executor_item = ExecutorActionItem( + type="executor_action", + id=item_id, + executor_id=executor_id, + status="failed", + error={"message": err_msg} if err_msg else None, + ) + + # Use our custom event type + return [ + CustomResponseOutputItemDoneEvent( + type="response.output_item.done", + output_index=context.get("output_index", 0), + sequence_number=self._next_sequence(context), + item=executor_item, + ) + ] # Handle request_info events specially - emit as HIL event with schema if event_type == "request_info": @@ -1173,9 +1176,7 @@ async def _convert_workflow_event(self, event: Any, context: dict[str, Any]) -> # Extract relevant data based on event type if event_type == "status": event_data["state"] = str(getattr(event, "state", "unknown")) - elif event_type == "warning": - event_data["message"] = str(getattr(event, "data", "")) - elif event_type == "error": + elif event_type == "warning" or event_type == "error": event_data["message"] = str(getattr(event, "data", "")) # Create a trace event for debugging diff --git a/python/packages/devui/tests/test_helpers.py b/python/packages/devui/tests/test_helpers.py index 6e06380826..05f5513f20 100644 --- a/python/packages/devui/tests/test_helpers.py +++ b/python/packages/devui/tests/test_helpers.py @@ -296,7 +296,8 @@ def create_agent_executor_response( executor_id: str = "test_executor", response_text: str = "Executor response", ) -> AgentExecutorResponse: - """Create an AgentExecutorResponse - the type that's nested in executor_completed event (type='executor_completed').data.""" + """Create an AgentExecutorResponse - the type that's nested in + executor_completed event (type='executor_completed').data.""" agent_response = create_agent_run_response(response_text) return AgentExecutorResponse( executor_id=executor_id, diff --git a/python/packages/lab/lightning/tests/test_lightning.py b/python/packages/lab/lightning/tests/test_lightning.py index 88075cd74a..d337a0de16 100644 --- a/python/packages/lab/lightning/tests/test_lightning.py +++ b/python/packages/lab/lightning/tests/test_lightning.py @@ -109,10 +109,8 @@ def workflow_two_agents(): async def test_openai_workflow_two_agents(workflow_two_agents: Workflow): events = await workflow_two_agents.run("Please analyze the quarterly sales data") - # Get all data events with AgentResponse - agent_outputs = [ - event.data for event in events if event.type == "data" and isinstance(event.data, AgentResponse) - ] + # Get all output events with AgentResponse + agent_outputs = [event.data for event in events if event.type == "output" and isinstance(event.data, AgentResponse)] # Check that we have outputs from both agents assert len(agent_outputs) == 2 diff --git a/python/packages/orchestrations/tests/test_concurrent.py b/python/packages/orchestrations/tests/test_concurrent.py index 5eb9d2ac67..2642f09349 100644 --- a/python/packages/orchestrations/tests/test_concurrent.py +++ b/python/packages/orchestrations/tests/test_concurrent.py @@ -10,7 +10,6 @@ ChatMessage, Executor, WorkflowContext, - WorkflowEvent, WorkflowRunState, handler, ) diff --git a/python/packages/orchestrations/tests/test_sequential.py b/python/packages/orchestrations/tests/test_sequential.py index 7ee37b980d..ec7d28548b 100644 --- a/python/packages/orchestrations/tests/test_sequential.py +++ b/python/packages/orchestrations/tests/test_sequential.py @@ -15,7 +15,6 @@ Executor, TypeCompatibilityError, WorkflowContext, - WorkflowEvent, WorkflowRunState, handler, ) From 61076d7b58ab58d3b73f140090e80f339c9d2bdf Mon Sep 17 00:00:00 2001 From: Evan Mattson Date: Thu, 5 Feb 2026 16:40:07 +0900 Subject: [PATCH 04/12] Cleanup --- .../core/agent_framework/_workflows/_agent.py | 18 ++++++----- .../agent_framework/_workflows/_checkpoint.py | 4 ++- .../core/agent_framework/_workflows/_edge.py | 12 ++++--- .../agent_framework/_workflows/_events.py | 32 ++++++++++--------- .../_workflows/_orchestration_state.py | 4 ++- .../_workflows/_runner_context.py | 14 ++++---- .../agent_framework/_workflows/_workflow.py | 12 +++---- .../_workflows/_workflow_context.py | 6 ++-- 8 files changed, 58 insertions(+), 44 deletions(-) diff --git a/python/packages/core/agent_framework/_workflows/_agent.py b/python/packages/core/agent_framework/_workflows/_agent.py index eb0a94f7d1..d4da0327cb 100644 --- a/python/packages/core/agent_framework/_workflows/_agent.py +++ b/python/packages/core/agent_framework/_workflows/_agent.py @@ -1,5 +1,7 @@ # Copyright (c) Microsoft. All rights reserved. +from __future__ import annotations + import json import logging import sys @@ -57,11 +59,11 @@ def to_json(self) -> str: return json.dumps(self.to_dict()) @classmethod - def from_dict(cls, payload: dict[str, Any]) -> "WorkflowAgent.RequestInfoFunctionArgs": + def from_dict(cls, payload: dict[str, Any]) -> WorkflowAgent.RequestInfoFunctionArgs: return cls(request_id=payload.get("request_id", ""), data=payload.get("data")) @classmethod - def from_json(cls, raw: str) -> "WorkflowAgent.RequestInfoFunctionArgs": + def from_json(cls, raw: str) -> WorkflowAgent.RequestInfoFunctionArgs: try: parsed: Any = json.loads(raw) except json.JSONDecodeError as exc: @@ -72,7 +74,7 @@ def from_json(cls, raw: str) -> "WorkflowAgent.RequestInfoFunctionArgs": def __init__( self, - workflow: "Workflow", + workflow: Workflow, *, id: str | None = None, name: str | None = None, @@ -109,15 +111,15 @@ def __init__( raise ValueError("Workflow's start executor cannot handle list[ChatMessage]") super().__init__(id=id, name=name, description=description, **kwargs) - self._workflow: "Workflow" = workflow + self._workflow: Workflow = workflow self._pending_requests: dict[str, WorkflowEvent[Any]] = {} @property - def workflow(self) -> "Workflow": + def workflow(self) -> Workflow: return self._workflow @property - def pending_requests(self) -> "dict[str, WorkflowEvent[Any]]": + def pending_requests(self) -> dict[str, WorkflowEvent[Any]]: return self._pending_requests # region Run Methods @@ -486,7 +488,7 @@ def _convert_workflow_events_to_agent_response( def _process_request_info_event( self, - event: "WorkflowEvent[Any]", + event: WorkflowEvent[Any], ) -> tuple[Content, Content]: """Convert a request_info event to FunctionCallContent and FunctionApprovalRequestContent. @@ -519,7 +521,7 @@ def _process_request_info_event( def _convert_workflow_event_to_agent_response_updates( self, response_id: str, - event: "WorkflowEvent[Any]", + event: WorkflowEvent[Any], ) -> list[AgentResponseUpdate]: """Convert a workflow event to a list of AgentResponseUpdate objects. diff --git a/python/packages/core/agent_framework/_workflows/_checkpoint.py b/python/packages/core/agent_framework/_workflows/_checkpoint.py index 874ded5568..0334ee3893 100644 --- a/python/packages/core/agent_framework/_workflows/_checkpoint.py +++ b/python/packages/core/agent_framework/_workflows/_checkpoint.py @@ -1,5 +1,7 @@ # Copyright (c) Microsoft. All rights reserved. +from __future__ import annotations + import asyncio import json import logging @@ -59,7 +61,7 @@ def to_dict(self) -> dict[str, Any]: return asdict(self) @classmethod - def from_dict(cls, data: Mapping[str, Any]) -> "WorkflowCheckpoint": + def from_dict(cls, data: Mapping[str, Any]) -> WorkflowCheckpoint: return cls(**data) diff --git a/python/packages/core/agent_framework/_workflows/_edge.py b/python/packages/core/agent_framework/_workflows/_edge.py index 3212eff41a..02544ad3df 100644 --- a/python/packages/core/agent_framework/_workflows/_edge.py +++ b/python/packages/core/agent_framework/_workflows/_edge.py @@ -1,5 +1,7 @@ # Copyright (c) Microsoft. All rights reserved. +from __future__ import annotations + import inspect import logging import uuid @@ -214,7 +216,7 @@ def to_dict(self) -> dict[str, Any]: return payload @classmethod - def from_dict(cls, data: dict[str, Any]) -> "Edge": + def from_dict(cls, data: dict[str, Any]) -> Edge: """Reconstruct an `Edge` from its serialised dictionary form. The deserialised edge will lack the executable predicate because we do @@ -311,7 +313,7 @@ class EdgeGroup(DictConvertible): from builtins import type as builtin_type - _TYPE_REGISTRY: ClassVar[dict[str, builtin_type["EdgeGroup"]]] = {} + _TYPE_REGISTRY: ClassVar[dict[str, builtin_type[EdgeGroup]]] = {} def __init__( self, @@ -415,7 +417,7 @@ class CustomGroup(EdgeGroup): return subclass @classmethod - def from_dict(cls, data: dict[str, Any]) -> "EdgeGroup": + def from_dict(cls, data: dict[str, Any]) -> EdgeGroup: """Hydrate the correct `EdgeGroup` subclass from serialised state. The method inspects the `type` field, allocates the corresponding class @@ -735,7 +737,7 @@ def to_dict(self) -> dict[str, Any]: return payload @classmethod - def from_dict(cls, data: dict[str, Any]) -> "SwitchCaseEdgeGroupCase": + def from_dict(cls, data: dict[str, Any]) -> SwitchCaseEdgeGroupCase: """Instantiate a case from its serialised dictionary payload. Examples: @@ -789,7 +791,7 @@ def to_dict(self) -> dict[str, Any]: return {"target_id": self.target_id, "type": self.type} @classmethod - def from_dict(cls, data: dict[str, Any]) -> "SwitchCaseEdgeGroupDefault": + def from_dict(cls, data: dict[str, Any]) -> SwitchCaseEdgeGroupDefault: """Recreate the default branch from its persisted form. Examples: diff --git a/python/packages/core/agent_framework/_workflows/_events.py b/python/packages/core/agent_framework/_workflows/_events.py index c36c946bbc..2eaa2aa0fd 100644 --- a/python/packages/core/agent_framework/_workflows/_events.py +++ b/python/packages/core/agent_framework/_workflows/_events.py @@ -1,5 +1,7 @@ # Copyright (c) Microsoft. All rights reserved. +from __future__ import annotations + import sys import traceback as _traceback from collections.abc import Iterator @@ -81,7 +83,7 @@ def from_exception( *, executor_id: str | None = None, extra: dict[str, Any] | None = None, - ) -> "WorkflowErrorDetails": + ) -> WorkflowErrorDetails: tb = None try: tb = "".join(_traceback.format_exception(type(exc), exc, exc.__traceback__)) @@ -231,37 +233,37 @@ def __repr__(self) -> str: # ========================================================================== @classmethod - def started(cls, data: DataT | None = None) -> "WorkflowEvent[DataT]": + def started(cls, data: DataT | None = None) -> WorkflowEvent[DataT]: """Create a 'started' event when a workflow run begins.""" return cls("started", data=data) @classmethod - def status(cls, state: WorkflowRunState, data: DataT | None = None) -> "WorkflowEvent[DataT]": + def status(cls, state: WorkflowRunState, data: DataT | None = None) -> WorkflowEvent[DataT]: """Create a 'status' event for workflow state transitions.""" return cls("status", data=data, state=state) @classmethod - def failed(cls, details: WorkflowErrorDetails, data: DataT | None = None) -> "WorkflowEvent[DataT]": + def failed(cls, details: WorkflowErrorDetails, data: DataT | None = None) -> WorkflowEvent[DataT]: """Create a 'failed' event when a workflow terminates with error.""" return cls("failed", data=data, details=details) @classmethod - def warning(cls, message: str) -> "WorkflowEvent[str]": + def warning(cls, message: str) -> WorkflowEvent[str]: """Create a 'warning' event from user code.""" return WorkflowEvent("warning", data=message) @classmethod - def error(cls, exception: Exception) -> "WorkflowEvent[Exception]": + def error(cls, exception: Exception) -> WorkflowEvent[Exception]: """Create an 'error' event from user code.""" return WorkflowEvent("error", data=exception) @classmethod - def output(cls, executor_id: str, data: DataT) -> "WorkflowEvent[DataT]": + def output(cls, executor_id: str, data: DataT) -> WorkflowEvent[DataT]: """Create an 'output' event when an executor yields final output.""" return cls("output", executor_id=executor_id, data=data) @classmethod - def emit(cls, executor_id: str, data: DataT) -> "WorkflowEvent[DataT]": + def emit(cls, executor_id: str, data: DataT) -> WorkflowEvent[DataT]: """Create a 'data' event when an executor emits data during execution. This is the primary method for executors to emit typed data @@ -276,7 +278,7 @@ def request_info( source_executor_id: str, request_data: DataT, response_type: type[Any], - ) -> "WorkflowEvent[DataT]": + ) -> WorkflowEvent[DataT]: """Create a 'request_info' event when an executor requests external information.""" return cls( "request_info", @@ -288,27 +290,27 @@ def request_info( ) @classmethod - def superstep_started(cls, iteration: int, data: DataT | None = None) -> "WorkflowEvent[DataT]": + def superstep_started(cls, iteration: int, data: DataT | None = None) -> WorkflowEvent[DataT]: """Create a 'superstep_started' event when a superstep begins.""" return cls("superstep_started", iteration=iteration, data=data) @classmethod - def superstep_completed(cls, iteration: int, data: DataT | None = None) -> "WorkflowEvent[DataT]": + def superstep_completed(cls, iteration: int, data: DataT | None = None) -> WorkflowEvent[DataT]: """Create a 'superstep_completed' event when a superstep ends.""" return cls("superstep_completed", iteration=iteration, data=data) @classmethod - def executor_invoked(cls, executor_id: str, data: DataT | None = None) -> "WorkflowEvent[DataT]": + def executor_invoked(cls, executor_id: str, data: DataT | None = None) -> WorkflowEvent[DataT]: """Create an 'executor_invoked' event when an executor handler is called.""" return cls("executor_invoked", executor_id=executor_id, data=data) @classmethod - def executor_completed(cls, executor_id: str, data: DataT | None = None) -> "WorkflowEvent[DataT]": + def executor_completed(cls, executor_id: str, data: DataT | None = None) -> WorkflowEvent[DataT]: """Create an 'executor_completed' event when an executor handler completes.""" return cls("executor_completed", executor_id=executor_id, data=data) @classmethod - def executor_failed(cls, executor_id: str, details: WorkflowErrorDetails) -> "WorkflowEvent[WorkflowErrorDetails]": + def executor_failed(cls, executor_id: str, details: WorkflowErrorDetails) -> WorkflowEvent[WorkflowErrorDetails]: """Create an 'executor_failed' event when an executor handler raises an error.""" return WorkflowEvent("executor_failed", executor_id=executor_id, data=details, details=details) @@ -333,7 +335,7 @@ def to_dict(self) -> dict[str, Any]: } @classmethod - def from_dict(cls, data: dict[str, Any]) -> "WorkflowEvent[Any]": + def from_dict(cls, data: dict[str, Any]) -> WorkflowEvent[Any]: """Create a REQUEST_INFO event from a dictionary.""" for prop in ["data", "request_id", "source_executor_id", "request_type", "response_type"]: if prop not in data: diff --git a/python/packages/core/agent_framework/_workflows/_orchestration_state.py b/python/packages/core/agent_framework/_workflows/_orchestration_state.py index 8210d7d4bb..68e4bee873 100644 --- a/python/packages/core/agent_framework/_workflows/_orchestration_state.py +++ b/python/packages/core/agent_framework/_workflows/_orchestration_state.py @@ -6,6 +6,8 @@ across GroupChat, Handoff, and Magentic patterns. """ +from __future__ import annotations + from dataclasses import dataclass, field from typing import Any @@ -69,7 +71,7 @@ def to_dict(self) -> dict[str, Any]: return result @classmethod - def from_dict(cls, data: dict[str, Any]) -> "OrchestrationState": + def from_dict(cls, data: dict[str, Any]) -> OrchestrationState: """Deserialize from checkpointed dict. Args: diff --git a/python/packages/core/agent_framework/_workflows/_runner_context.py b/python/packages/core/agent_framework/_workflows/_runner_context.py index 01b544d211..75f25126ca 100644 --- a/python/packages/core/agent_framework/_workflows/_runner_context.py +++ b/python/packages/core/agent_framework/_workflows/_runner_context.py @@ -1,5 +1,7 @@ # Copyright (c) Microsoft. All rights reserved. +from __future__ import annotations + import asyncio import logging import sys @@ -51,7 +53,7 @@ class Message: source_span_ids: list[str] | None = None # Publishing span IDs for linking from multiple sources # For response messages, the original request data - original_request_info_event: "WorkflowEvent[Any] | None" = None + original_request_info_event: WorkflowEvent[Any] | None = None # Backward compatibility properties @property @@ -77,7 +79,7 @@ def to_dict(self) -> dict[str, Any]: } @staticmethod - def from_dict(data: dict[str, Any]) -> "Message": + def from_dict(data: dict[str, Any]) -> Message: """Create a Message from a dictionary.""" # Validation if "data" not in data: @@ -254,7 +256,7 @@ async def apply_checkpoint(self, checkpoint: WorkflowCheckpoint) -> None: """ ... - async def add_request_info_event(self, event: "WorkflowEvent[Any]") -> None: + async def add_request_info_event(self, event: WorkflowEvent[Any]) -> None: """Add a request_info event to the context and track it for correlation. Args: @@ -271,7 +273,7 @@ async def send_request_info_response(self, request_id: str, response: Any) -> No """ ... - async def get_pending_request_info_events(self) -> "dict[str, WorkflowEvent[Any]]": + async def get_pending_request_info_events(self) -> dict[str, WorkflowEvent[Any]]: """Get the mapping of request IDs to their corresponding request_info events. Returns: @@ -470,7 +472,7 @@ def _get_serialized_workflow_state(self, state: State, iteration_count: int) -> "pending_request_info_events": serialized_pending_request_info_events, } - async def add_request_info_event(self, event: "WorkflowEvent[Any]") -> None: + async def add_request_info_event(self, event: WorkflowEvent[Any]) -> None: """Add a request_info event to the context and track it for correlation. Args: @@ -514,7 +516,7 @@ async def send_request_info_response(self, request_id: str, response: Any) -> No await self.send_message(response_msg) - async def get_pending_request_info_events(self) -> "dict[str, WorkflowEvent[Any]]": + async def get_pending_request_info_events(self) -> dict[str, WorkflowEvent[Any]]: """Get the mapping of request IDs to their corresponding request_info events. Returns: diff --git a/python/packages/core/agent_framework/_workflows/_workflow.py b/python/packages/core/agent_framework/_workflows/_workflow.py index 8cb1f7ed7f..4674691733 100644 --- a/python/packages/core/agent_framework/_workflows/_workflow.py +++ b/python/packages/core/agent_framework/_workflows/_workflow.py @@ -1,5 +1,7 @@ # Copyright (c) Microsoft. All rights reserved. +from __future__ import annotations + import asyncio import functools import hashlib @@ -54,9 +56,7 @@ class WorkflowRunResult(list[WorkflowEvent]): - status_timeline(): Access the complete status event history """ - def __init__( - self, events: "list[WorkflowEvent[Any]]", status_events: "list[WorkflowEvent[Any]] | None" = None - ) -> None: + def __init__(self, events: list[WorkflowEvent[Any]], status_events: list[WorkflowEvent[Any]] | None = None) -> None: super().__init__(events) self._status_events: list[WorkflowEvent[Any]] = status_events or [] @@ -68,7 +68,7 @@ def get_outputs(self) -> list[Any]: """ return [event.data for event in self if event.type == "output"] - def get_request_info_events(self) -> "list[WorkflowEvent[Any]]": + def get_request_info_events(self) -> list[WorkflowEvent[Any]]: """Get all request info events from the workflow run result. Returns: @@ -89,7 +89,7 @@ def get_final_state(self) -> WorkflowRunState: "or handle the absence of status explicitly." ) - def status_timeline(self) -> "list[WorkflowEvent[Any]]": + def status_timeline(self) -> list[WorkflowEvent[Any]]: """Return the list of status events emitted during the run (control-plane).""" return list(self._status_events) @@ -773,7 +773,7 @@ def _get_executor_by_id(self, executor_id: str) -> Executor: raise ValueError(f"Executor with ID {executor_id} not found.") return self.executors[executor_id] - def _should_yield_output_event(self, event: "WorkflowEvent[Any]") -> bool: + def _should_yield_output_event(self, event: WorkflowEvent[Any]) -> bool: """Determine if an output event should be yielded as a workflow output. Args: diff --git a/python/packages/core/agent_framework/_workflows/_workflow_context.py b/python/packages/core/agent_framework/_workflows/_workflow_context.py index 3286f6a15f..8fb2b765b9 100644 --- a/python/packages/core/agent_framework/_workflows/_workflow_context.py +++ b/python/packages/core/agent_framework/_workflows/_workflow_context.py @@ -1,5 +1,7 @@ # Copyright (c) Microsoft. All rights reserved. +from __future__ import annotations + import copy import inspect import logging @@ -250,7 +252,7 @@ async def flexible(message: str, ctx: WorkflowContext[int | str, bool | dict]) - def __init__( self, - executor: "Executor", + executor: Executor, source_executor_ids: list[str], state: State, runner_context: RunnerContext, @@ -349,7 +351,7 @@ async def yield_output(self, output: W_OutT) -> None: event = WorkflowEvent.output(self._executor_id, output) await self._runner_context.add_event(event) - async def add_event(self, event: "WorkflowEvent[Any]") -> None: + async def add_event(self, event: WorkflowEvent[Any]) -> None: """Add an event to the workflow context.""" if event.origin == WorkflowEventSource.EXECUTOR and event.type in _FRAMEWORK_LIFECYCLE_EVENT_TYPES: warning_msg = ( From fd663ed67e675ee3cfbea99d6776ae36f3cf95d1 Mon Sep 17 00:00:00 2001 From: Evan Mattson Date: Thu, 5 Feb 2026 17:15:47 +0900 Subject: [PATCH 05/12] Update samples and tests --- .../packages/lab/lightning/tests/test_lightning.py | 2 +- .../orchestrations/01_round_robin_group_chat.py | 4 ++-- .../orchestrations/02_selector_group_chat.py | 2 +- .../autogen-migration/orchestrations/03_swarm.py | 12 ++++++------ .../orchestrations/04_magentic_one.py | 2 +- 5 files changed, 11 insertions(+), 11 deletions(-) diff --git a/python/packages/lab/lightning/tests/test_lightning.py b/python/packages/lab/lightning/tests/test_lightning.py index d337a0de16..6770f9d974 100644 --- a/python/packages/lab/lightning/tests/test_lightning.py +++ b/python/packages/lab/lightning/tests/test_lightning.py @@ -9,7 +9,7 @@ agentlightning = pytest.importorskip("agentlightning") -from agent_framework import AgentExecutor, AgentResponse, ChatAgent, WorkflowBuilder, Workflow, WorkflowEvent +from agent_framework import AgentExecutor, AgentResponse, ChatAgent, WorkflowBuilder, Workflow from agent_framework_lab_lightning import AgentFrameworkTracer from agent_framework.openai import OpenAIChatClient from agentlightning import TracerTraceToTriplet diff --git a/python/samples/autogen-migration/orchestrations/01_round_robin_group_chat.py b/python/samples/autogen-migration/orchestrations/01_round_robin_group_chat.py index 7fe3ad07bb..486fd2cbfa 100644 --- a/python/samples/autogen-migration/orchestrations/01_round_robin_group_chat.py +++ b/python/samples/autogen-migration/orchestrations/01_round_robin_group_chat.py @@ -7,7 +7,7 @@ import asyncio -from agent_framework import AgentResponseUpdate, WorkflowEvent +from agent_framework import AgentResponseUpdate async def run_autogen() -> None: @@ -55,8 +55,8 @@ async def run_autogen() -> None: async def run_agent_framework() -> None: """Agent Framework's SequentialBuilder for sequential agent orchestration.""" - from agent_framework import SequentialBuilder from agent_framework.openai import OpenAIChatClient + from agent_framework.orchestrations import SequentialBuilder client = OpenAIChatClient(model_id="gpt-4.1-mini") diff --git a/python/samples/autogen-migration/orchestrations/02_selector_group_chat.py b/python/samples/autogen-migration/orchestrations/02_selector_group_chat.py index ec6a05c9c8..f0b4f3ee47 100644 --- a/python/samples/autogen-migration/orchestrations/02_selector_group_chat.py +++ b/python/samples/autogen-migration/orchestrations/02_selector_group_chat.py @@ -61,8 +61,8 @@ async def run_autogen() -> None: async def run_agent_framework() -> None: """Agent Framework's GroupChatBuilder with LLM-based speaker selection.""" - from agent_framework import GroupChatBuilder from agent_framework.openai import OpenAIChatClient + from agent_framework.orchestrations import GroupChatBuilder client = OpenAIChatClient(model_id="gpt-4.1-mini") diff --git a/python/samples/autogen-migration/orchestrations/03_swarm.py b/python/samples/autogen-migration/orchestrations/03_swarm.py index 34631c4300..34f4b2deef 100644 --- a/python/samples/autogen-migration/orchestrations/03_swarm.py +++ b/python/samples/autogen-migration/orchestrations/03_swarm.py @@ -7,7 +7,8 @@ import asyncio -from agent_framework import AgentResponseUpdate, WorkflowEvent +from agent_framework import WorkflowEvent +from orderedmultidict import Any async def run_autogen() -> None: @@ -99,11 +100,10 @@ async def run_agent_framework() -> None: """Agent Framework's HandoffBuilder for agent coordination.""" from agent_framework import ( AgentResponseUpdate, - HandoffBuilder, - HandoffUserInputRequest, WorkflowRunState, ) from agent_framework.openai import OpenAIChatClient + from agent_framework.orchestrations import HandoffAgentUserRequest, HandoffBuilder client = OpenAIChatClient(model_id="gpt-4.1-mini") @@ -174,7 +174,7 @@ async def run_agent_framework() -> None: if event.data: print(event.data.text, end="", flush=True) elif event.type == "request_info": - if isinstance(event.data, HandoffUserInputRequest): + if isinstance(event.data, HandoffAgentUserRequest): pending_requests.append(event) elif event.type == "status": if event.state in {WorkflowRunState.IDLE_WITH_PENDING_REQUESTS} and stream_line_open: @@ -188,7 +188,7 @@ async def run_agent_framework() -> None: print("---------- user ----------") print(user_response) - responses = {req.request_id: user_response for req in pending_requests} + responses: dict[str, Any] = {req.request_id: user_response for req in pending_requests} # type: ignore pending_requests = [] current_executor = None stream_line_open = False @@ -206,7 +206,7 @@ async def run_agent_framework() -> None: if event.data: print(event.data.text, end="", flush=True) elif event.type == "request_info": - if isinstance(event.data, HandoffUserInputRequest): + if isinstance(event.data, HandoffAgentUserRequest): pending_requests.append(event) elif event.type == "status": if ( diff --git a/python/samples/autogen-migration/orchestrations/04_magentic_one.py b/python/samples/autogen-migration/orchestrations/04_magentic_one.py index 1bac81774c..2a8f61ae53 100644 --- a/python/samples/autogen-migration/orchestrations/04_magentic_one.py +++ b/python/samples/autogen-migration/orchestrations/04_magentic_one.py @@ -66,8 +66,8 @@ async def run_autogen() -> None: async def run_agent_framework() -> None: """Agent Framework's MagenticBuilder for orchestrated collaboration.""" - from agent_framework.orchestrations import MagenticBuilder from agent_framework.openai import OpenAIChatClient + from agent_framework.orchestrations import MagenticBuilder client = OpenAIChatClient(model_id="gpt-4.1-mini") From 7169140bca70dac4b394d0c75fb608fea5b7f78d Mon Sep 17 00:00:00 2001 From: Evan Mattson Date: Thu, 5 Feb 2026 17:20:01 +0900 Subject: [PATCH 06/12] Remove unused imports --- .../workflows/control-flow/sequential_streaming.py | 5 +++-- .../workflows/declarative/customer_support/main.py | 1 - .../workflows/declarative/deep_research/main.py | 1 - .../getting_started/workflows/declarative/marketing/main.py | 1 - .../workflows/declarative/student_teacher/main.py | 1 - 5 files changed, 3 insertions(+), 6 deletions(-) diff --git a/python/samples/getting_started/workflows/control-flow/sequential_streaming.py b/python/samples/getting_started/workflows/control-flow/sequential_streaming.py index 0d1f1de89f..733cce14ff 100644 --- a/python/samples/getting_started/workflows/control-flow/sequential_streaming.py +++ b/python/samples/getting_started/workflows/control-flow/sequential_streaming.py @@ -2,7 +2,7 @@ import asyncio -from agent_framework import WorkflowBuilder, WorkflowContext, WorkflowEvent, executor +from agent_framework import WorkflowBuilder, WorkflowContext, executor from typing_extensions import Never """ @@ -14,7 +14,8 @@ Purpose: Show how to declare executors with the @executor decorator, connect them with WorkflowBuilder, pass intermediate values using ctx.send_message, and yield final output using ctx.yield_output(). -Demonstrate how streaming exposes executor_invoked events (type='executor_invoked') and executor_completed events (type='executor_completed') for observability. +Demonstrate how streaming exposes executor_invoked events (type='executor_invoked') and +executor_completed events (type='executor_completed') for observability. Prerequisites: - No external services required. diff --git a/python/samples/getting_started/workflows/declarative/customer_support/main.py b/python/samples/getting_started/workflows/declarative/customer_support/main.py index 5b54f6a20b..34821b668c 100644 --- a/python/samples/getting_started/workflows/declarative/customer_support/main.py +++ b/python/samples/getting_started/workflows/declarative/customer_support/main.py @@ -26,7 +26,6 @@ import uuid from pathlib import Path -from agent_framework import WorkflowEvent from agent_framework.azure import AzureOpenAIChatClient from agent_framework.declarative import ( AgentExternalInputRequest, diff --git a/python/samples/getting_started/workflows/declarative/deep_research/main.py b/python/samples/getting_started/workflows/declarative/deep_research/main.py index f4d5f149bf..676727a71f 100644 --- a/python/samples/getting_started/workflows/declarative/deep_research/main.py +++ b/python/samples/getting_started/workflows/declarative/deep_research/main.py @@ -24,7 +24,6 @@ import asyncio from pathlib import Path -from agent_framework import WorkflowEvent from agent_framework.azure import AzureOpenAIChatClient from agent_framework.declarative import WorkflowFactory from azure.identity import AzureCliCredential diff --git a/python/samples/getting_started/workflows/declarative/marketing/main.py b/python/samples/getting_started/workflows/declarative/marketing/main.py index b8de3ff069..e145daf6d7 100644 --- a/python/samples/getting_started/workflows/declarative/marketing/main.py +++ b/python/samples/getting_started/workflows/declarative/marketing/main.py @@ -15,7 +15,6 @@ import asyncio from pathlib import Path -from agent_framework import WorkflowEvent from agent_framework.azure import AzureOpenAIChatClient from agent_framework.declarative import WorkflowFactory from azure.identity import AzureCliCredential diff --git a/python/samples/getting_started/workflows/declarative/student_teacher/main.py b/python/samples/getting_started/workflows/declarative/student_teacher/main.py index efae717a16..5d9fd8d7e9 100644 --- a/python/samples/getting_started/workflows/declarative/student_teacher/main.py +++ b/python/samples/getting_started/workflows/declarative/student_teacher/main.py @@ -22,7 +22,6 @@ import asyncio from pathlib import Path -from agent_framework import WorkflowEvent from agent_framework.azure import AzureOpenAIChatClient from agent_framework.declarative import WorkflowFactory from azure.identity import AzureCliCredential From 599464695f68a005fe638ac1b7dcc51c406fe63b Mon Sep 17 00:00:00 2001 From: Evan Mattson Date: Thu, 5 Feb 2026 19:43:29 +0900 Subject: [PATCH 07/12] PR feedback --- .../_base_group_chat_orchestrator.py | 2 +- .../agent_framework/_workflows/_events.py | 4 +--- .../tests/workflow/test_workflow_context.py | 2 +- .../devui/agent_framework_devui/_mapper.py | 5 ++-- .../_handoff.py | 2 +- .../_magentic.py | 2 +- .../01_round_robin_group_chat.py | 4 ++-- .../orchestrations/02_selector_group_chat.py | 2 +- .../orchestrations/03_swarm.py | 4 ++-- .../orchestrations/04_magentic_one.py | 2 +- .../workflow_evaluation/create_workflow.py | 2 +- .../group_chat_agent_manager.py | 2 +- .../group_chat_philosophical_debate.py | 2 +- .../group_chat_simple_selector.py | 2 +- .../orchestrations/handoff_autonomous.py | 2 +- .../handoff_participant_factory.py | 4 ++-- .../orchestrations/handoff_simple.py | 4 ++-- .../handoff_with_code_interpreter_file.py | 2 +- .../orchestrations/magentic.py | 2 +- .../workflows/_start-here/step3_streaming.py | 2 +- .../_start-here/step4_using_factories.py | 2 +- .../agents/azure_ai_agents_streaming.py | 2 +- .../agents/azure_chat_agents_and_executor.py | 2 +- .../agents/azure_chat_agents_streaming.py | 2 +- ...re_chat_agents_tool_calls_with_feedback.py | 23 ++++++++++--------- .../magentic_human_plan_review.py | 5 ++-- 26 files changed, 44 insertions(+), 45 deletions(-) diff --git a/python/packages/core/agent_framework/_workflows/_base_group_chat_orchestrator.py b/python/packages/core/agent_framework/_workflows/_base_group_chat_orchestrator.py index c0f523a3d7..e5ea0af3e5 100644 --- a/python/packages/core/agent_framework/_workflows/_base_group_chat_orchestrator.py +++ b/python/packages/core/agent_framework/_workflows/_base_group_chat_orchestrator.py @@ -71,7 +71,7 @@ def __init__(self, round_index: int, data: Any | None = None) -> None: round_index: Current round index data: Optional event-specific data """ - super().__init__("custom", data=data) + super().__init__("group_chat", data=data) self.round_index = round_index diff --git a/python/packages/core/agent_framework/_workflows/_events.py b/python/packages/core/agent_framework/_workflows/_events.py index 2eaa2aa0fd..2b94d0915f 100644 --- a/python/packages/core/agent_framework/_workflows/_events.py +++ b/python/packages/core/agent_framework/_workflows/_events.py @@ -119,8 +119,6 @@ def from_exception( "executor_invoked", # Executor handler was called (use .executor_id, .data) "executor_completed", # Executor handler completed (use .executor_id, .data) "executor_failed", # Executor handler raised error (use .executor_id, .details) - # Extension point - "custom", # User-defined event (for subclassing) ] @@ -176,7 +174,7 @@ class WorkflowEvent(Generic[DataT]): def __init__( self, - type: WorkflowEventType, + type: WorkflowEventType | str, data: DataT | None = None, *, # Event context fields diff --git a/python/packages/core/tests/workflow/test_workflow_context.py b/python/packages/core/tests/workflow/test_workflow_context.py index 8115fbb858..03aa1d78d9 100644 --- a/python/packages/core/tests/workflow/test_workflow_context.py +++ b/python/packages/core/tests/workflow/test_workflow_context.py @@ -84,7 +84,7 @@ async def test_executor_emits_normal_event() -> None: class _TestEvent(WorkflowEvent): def __init__(self, data: Any = None) -> None: - super().__init__("custom", data=data) + super().__init__("test_event", data=data) async def test_workflow_context_type_annotations_no_parameter() -> None: diff --git a/python/packages/devui/agent_framework_devui/_mapper.py b/python/packages/devui/agent_framework_devui/_mapper.py index 38e50c676b..b956be3ac0 100644 --- a/python/packages/devui/agent_framework_devui/_mapper.py +++ b/python/packages/devui/agent_framework_devui/_mapper.py @@ -180,11 +180,12 @@ async def convert_event(self, raw_event: Any, request: AgentFrameworkRequest) -> try: from agent_framework import AgentResponse, AgentResponseUpdate, WorkflowEvent - # Handle WorkflowEvent with type='data' wrapping AgentResponseUpdate + # Handle WorkflowEvent with type='output' or 'data' wrapping AgentResponseUpdate # This must be checked BEFORE generic WorkflowEvent check + # Note: AgentExecutor uses type='output' for streaming updates if ( isinstance(raw_event, WorkflowEvent) - and raw_event.type == "data" + and raw_event.type in ("output", "data") and raw_event.data and isinstance(raw_event.data, AgentResponseUpdate) ): diff --git a/python/packages/orchestrations/agent_framework_orchestrations/_handoff.py b/python/packages/orchestrations/agent_framework_orchestrations/_handoff.py index 2fd6f678e1..60243e48c8 100644 --- a/python/packages/orchestrations/agent_framework_orchestrations/_handoff.py +++ b/python/packages/orchestrations/agent_framework_orchestrations/_handoff.py @@ -74,7 +74,7 @@ def __init__(self, source: str, target: str, data: Any | None = None) -> None: target: Identifier of the target agent receiving the handoff data: Optional event-specific data """ - super().__init__("custom", data=data) + super().__init__("handoff_sent", data=data) self.source = source self.target = target diff --git a/python/packages/orchestrations/agent_framework_orchestrations/_magentic.py b/python/packages/orchestrations/agent_framework_orchestrations/_magentic.py index 14c0389c97..196e88babd 100644 --- a/python/packages/orchestrations/agent_framework_orchestrations/_magentic.py +++ b/python/packages/orchestrations/agent_framework_orchestrations/_magentic.py @@ -784,7 +784,7 @@ def __init__( event_type: MagenticOrchestratorEventType, data: ChatMessage | MagenticProgressLedger, ) -> None: - super().__init__("custom", data=data, executor_id=executor_id) + super().__init__("magentic_orchestrator", data=data, executor_id=executor_id) self.event_type = event_type def __repr__(self) -> str: diff --git a/python/samples/autogen-migration/orchestrations/01_round_robin_group_chat.py b/python/samples/autogen-migration/orchestrations/01_round_robin_group_chat.py index 486fd2cbfa..9523fb900b 100644 --- a/python/samples/autogen-migration/orchestrations/01_round_robin_group_chat.py +++ b/python/samples/autogen-migration/orchestrations/01_round_robin_group_chat.py @@ -83,7 +83,7 @@ async def run_agent_framework() -> None: print("[Agent Framework] Sequential conversation:") current_executor = None async for event in workflow.run_stream("Create a brief summary about electric vehicles"): - if event.type == "data" and isinstance(event.data, AgentResponseUpdate): + if event.type == "output" and isinstance(event.data, AgentResponseUpdate): # Print executor name header when switching to a new agent if current_executor != event.executor_id: if current_executor is not None: @@ -156,7 +156,7 @@ async def check_approval( if event.type == "output": print("\n---------- Workflow Output ----------") print(event.data) - elif event.type == "data" and isinstance(event.data, AgentResponseUpdate): + elif event.type == "output" and isinstance(event.data, AgentResponseUpdate): # Print executor name header when switching to a new agent if current_executor != event.executor_id: if current_executor is not None: diff --git a/python/samples/autogen-migration/orchestrations/02_selector_group_chat.py b/python/samples/autogen-migration/orchestrations/02_selector_group_chat.py index f0b4f3ee47..7010b1347b 100644 --- a/python/samples/autogen-migration/orchestrations/02_selector_group_chat.py +++ b/python/samples/autogen-migration/orchestrations/02_selector_group_chat.py @@ -102,7 +102,7 @@ async def run_agent_framework() -> None: print("[Agent Framework] Group chat conversation:") current_executor = None async for event in workflow.run_stream("How do I connect to a PostgreSQL database using Python?"): - if event.type == "data" and isinstance(event.data, AgentResponseUpdate): + if event.type == "output" and isinstance(event.data, AgentResponseUpdate): # Print executor name header when switching to a new agent if current_executor != event.executor_id: if current_executor is not None: diff --git a/python/samples/autogen-migration/orchestrations/03_swarm.py b/python/samples/autogen-migration/orchestrations/03_swarm.py index 34f4b2deef..604fff7fba 100644 --- a/python/samples/autogen-migration/orchestrations/03_swarm.py +++ b/python/samples/autogen-migration/orchestrations/03_swarm.py @@ -162,7 +162,7 @@ async def run_agent_framework() -> None: pending_requests: list[WorkflowEvent] = [] async for event in workflow.run_stream(scripted_responses[0]): - if event.type == "data" and isinstance(event.data, AgentResponseUpdate): + if event.type == "output" and isinstance(event.data, AgentResponseUpdate): # Print executor name header when switching to a new agent if current_executor != event.executor_id: if stream_line_open: @@ -194,7 +194,7 @@ async def run_agent_framework() -> None: stream_line_open = False async for event in workflow.send_responses_streaming(responses): - if event.type == "data" and isinstance(event.data, AgentResponseUpdate): + if event.type == "output" and isinstance(event.data, AgentResponseUpdate): # Print executor name header when switching to a new agent if current_executor != event.executor_id: if stream_line_open: diff --git a/python/samples/autogen-migration/orchestrations/04_magentic_one.py b/python/samples/autogen-migration/orchestrations/04_magentic_one.py index 2a8f61ae53..07acc4f472 100644 --- a/python/samples/autogen-migration/orchestrations/04_magentic_one.py +++ b/python/samples/autogen-migration/orchestrations/04_magentic_one.py @@ -112,7 +112,7 @@ async def run_agent_framework() -> None: output_event: WorkflowEvent | None = None print("[Agent Framework] Magentic conversation:") async for event in workflow.run_stream("Research Python async patterns and write a simple example"): - if event.type == "data" and isinstance(event.data, AgentResponseUpdate): + if event.type == "output" and isinstance(event.data, AgentResponseUpdate): message_id = event.data.message_id if message_id != last_message_id: if last_message_id is not None: diff --git a/python/samples/demos/workflow_evaluation/create_workflow.py b/python/samples/demos/workflow_evaluation/create_workflow.py index a50c1b4536..029b9e0850 100644 --- a/python/samples/demos/workflow_evaluation/create_workflow.py +++ b/python/samples/demos/workflow_evaluation/create_workflow.py @@ -362,7 +362,7 @@ async def _process_workflow_events(events, conversation_ids, response_ids): output_str = str(event.data).encode("ascii", "replace").decode("ascii") print(f"\nWorkflow Output: {output_str}\n") - elif event.type == "data" and isinstance(event.data, AgentResponseUpdate): + elif event.type == "output" and isinstance(event.data, AgentResponseUpdate): _track_agent_ids(event, event.executor_id, response_ids, conversation_ids) return workflow_output diff --git a/python/samples/getting_started/orchestrations/group_chat_agent_manager.py b/python/samples/getting_started/orchestrations/group_chat_agent_manager.py index aa3380fa84..a830e43216 100644 --- a/python/samples/getting_started/orchestrations/group_chat_agent_manager.py +++ b/python/samples/getting_started/orchestrations/group_chat_agent_manager.py @@ -87,7 +87,7 @@ async def main() -> None: # Keep track of the last response to format output nicely in streaming mode last_response_id: str | None = None async for event in workflow.run_stream(task): - if event.type == "output" or event.type == "data": + if event.type == "output": data = event.data if isinstance(data, AgentResponseUpdate): rid = data.response_id diff --git a/python/samples/getting_started/orchestrations/group_chat_philosophical_debate.py b/python/samples/getting_started/orchestrations/group_chat_philosophical_debate.py index 55cb63434c..50cf7321c8 100644 --- a/python/samples/getting_started/orchestrations/group_chat_philosophical_debate.py +++ b/python/samples/getting_started/orchestrations/group_chat_philosophical_debate.py @@ -240,7 +240,7 @@ async def main() -> None: # Keep track of the last response to format output nicely in streaming mode last_response_id: str | None = None async for event in workflow.run_stream(f"Please begin the discussion on: {topic}"): - if event.type == "output" or event.type == "data": + if event.type == "output": data = event.data if isinstance(data, AgentResponseUpdate): rid = data.response_id diff --git a/python/samples/getting_started/orchestrations/group_chat_simple_selector.py b/python/samples/getting_started/orchestrations/group_chat_simple_selector.py index de3907d9c4..4c21fd87ff 100644 --- a/python/samples/getting_started/orchestrations/group_chat_simple_selector.py +++ b/python/samples/getting_started/orchestrations/group_chat_simple_selector.py @@ -105,7 +105,7 @@ async def main() -> None: # Keep track of the last response to format output nicely in streaming mode last_response_id: str | None = None async for event in workflow.run_stream(task): - if event.type == "output" or event.type == "data": + if event.type == "output": data = event.data if isinstance(data, AgentResponseUpdate): rid = data.response_id diff --git a/python/samples/getting_started/orchestrations/handoff_autonomous.py b/python/samples/getting_started/orchestrations/handoff_autonomous.py index 8e21e8cd4d..fa92e8137e 100644 --- a/python/samples/getting_started/orchestrations/handoff_autonomous.py +++ b/python/samples/getting_started/orchestrations/handoff_autonomous.py @@ -112,7 +112,7 @@ async def main() -> None: async for event in workflow.run_stream(request): if isinstance(event, HandoffSentEvent): print(f"\nHandoff Event: from {event.source} to {event.target}\n") - elif event.type == "output" or event.type == "data": + elif event.type == "output": data = event.data if isinstance(data, AgentResponseUpdate): if not data.text: diff --git a/python/samples/getting_started/orchestrations/handoff_participant_factory.py b/python/samples/getting_started/orchestrations/handoff_participant_factory.py index dd11a68211..c66f9a5e22 100644 --- a/python/samples/getting_started/orchestrations/handoff_participant_factory.py +++ b/python/samples/getting_started/orchestrations/handoff_participant_factory.py @@ -131,8 +131,8 @@ def _handle_events(events: list[WorkflowEvent]) -> list[WorkflowEvent[HandoffAge }: # Status event: Indicates workflow state changes print(f"\n[Workflow Status] {event.state.name}") - elif event.type == "output" or event.type == "data": - # Output/data event: Contains contents generated by the workflow + elif event.type == "output": + # Output event: Contains contents generated by the workflow data = event.data if isinstance(data, AgentResponse): for message in data.messages: diff --git a/python/samples/getting_started/orchestrations/handoff_simple.py b/python/samples/getting_started/orchestrations/handoff_simple.py index e1c0b896dc..1df4c05bc5 100644 --- a/python/samples/getting_started/orchestrations/handoff_simple.py +++ b/python/samples/getting_started/orchestrations/handoff_simple.py @@ -126,8 +126,8 @@ def _handle_events(events: list[WorkflowEvent]) -> list[WorkflowEvent[HandoffAge }: # Status event: Indicates workflow state changes print(f"\n[Workflow Status] {event.state.name}") - elif event.type == "output" or event.type == "data": - # Output/data event: Contains contents generated by the workflow + elif event.type == "output": + # Output event: Contains contents generated by the workflow data = event.data if isinstance(data, AgentResponse): for message in data.messages: diff --git a/python/samples/getting_started/orchestrations/handoff_with_code_interpreter_file.py b/python/samples/getting_started/orchestrations/handoff_with_code_interpreter_file.py index 97e53680db..9e09147ebb 100644 --- a/python/samples/getting_started/orchestrations/handoff_with_code_interpreter_file.py +++ b/python/samples/getting_started/orchestrations/handoff_with_code_interpreter_file.py @@ -71,7 +71,7 @@ def _handle_events(events: list[WorkflowEvent]) -> tuple[list[WorkflowEvent[Hand print(f"[status] {event.state.name}") elif event.type == "request_info" and isinstance(event.data, HandoffAgentUserRequest): requests.append(cast(WorkflowEvent[HandoffAgentUserRequest], event)) - elif event.type == "output" or event.type == "data": + elif event.type == "output": data = event.data if isinstance(data, AgentResponseUpdate): for content in data.contents: diff --git a/python/samples/getting_started/orchestrations/magentic.py b/python/samples/getting_started/orchestrations/magentic.py index ff6b4aa470..014a235de7 100644 --- a/python/samples/getting_started/orchestrations/magentic.py +++ b/python/samples/getting_started/orchestrations/magentic.py @@ -106,7 +106,7 @@ async def main() -> None: last_response_id: str | None = None output_event: WorkflowEvent | None = None async for event in workflow.run_stream(task): - if event.type == "data" and isinstance(event.data, AgentResponseUpdate): + if event.type == "output" and isinstance(event.data, AgentResponseUpdate): response_id = event.data.response_id if response_id != last_response_id: if last_response_id is not None: diff --git a/python/samples/getting_started/workflows/_start-here/step3_streaming.py b/python/samples/getting_started/workflows/_start-here/step3_streaming.py index 2b6eadf448..51b42c5217 100644 --- a/python/samples/getting_started/workflows/_start-here/step3_streaming.py +++ b/python/samples/getting_started/workflows/_start-here/step3_streaming.py @@ -56,7 +56,7 @@ async def main(): ): # The outputs of the workflow are whatever the agents produce. So the events are expected to # contain `AgentResponseUpdate` from the agents in the workflow. - if event.type == "data" and isinstance(event.data, AgentResponseUpdate): + if event.type == "output" and isinstance(event.data, AgentResponseUpdate): update = event.data author = update.author_name if author != last_author: diff --git a/python/samples/getting_started/workflows/_start-here/step4_using_factories.py b/python/samples/getting_started/workflows/_start-here/step4_using_factories.py index d73feaf027..4b82acef8f 100644 --- a/python/samples/getting_started/workflows/_start-here/step4_using_factories.py +++ b/python/samples/getting_started/workflows/_start-here/step4_using_factories.py @@ -87,7 +87,7 @@ async def main(): async for event in workflow.run_stream("hello world"): # The outputs of the workflow are whatever the agents produce. So the events are expected to # contain `AgentResponseUpdate` from the agents in the workflow. - if event.type == "data" and isinstance(event.data, AgentResponseUpdate): + if event.type == "output" and isinstance(event.data, AgentResponseUpdate): update = event.data if first_update: print(f"{update.author_name}: {update.text}", end="", flush=True) diff --git a/python/samples/getting_started/workflows/agents/azure_ai_agents_streaming.py b/python/samples/getting_started/workflows/agents/azure_ai_agents_streaming.py index 4630ec9737..29d2603651 100644 --- a/python/samples/getting_started/workflows/agents/azure_ai_agents_streaming.py +++ b/python/samples/getting_started/workflows/agents/azure_ai_agents_streaming.py @@ -48,7 +48,7 @@ async def main() -> None: async for event in events: # The outputs of the workflow are whatever the agents produce. So the events are expected to # contain `AgentResponseUpdate` from the agents in the workflow. - if event.type == "data" and isinstance(event.data, AgentResponseUpdate): + if event.type == "output" and isinstance(event.data, AgentResponseUpdate): update = event.data author = update.author_name if author != last_author: diff --git a/python/samples/getting_started/workflows/agents/azure_chat_agents_and_executor.py b/python/samples/getting_started/workflows/agents/azure_chat_agents_and_executor.py index 235a760557..f5a8c11525 100644 --- a/python/samples/getting_started/workflows/agents/azure_chat_agents_and_executor.py +++ b/python/samples/getting_started/workflows/agents/azure_chat_agents_and_executor.py @@ -127,7 +127,7 @@ async def main() -> None: async for event in events: # The outputs of the workflow are whatever the agents produce. So the events are expected to # contain `AgentResponseUpdate` from the agents in the workflow. - if event.type == "data" and isinstance(event.data, AgentResponseUpdate): + if event.type == "output" and isinstance(event.data, AgentResponseUpdate): update = event.data author = update.author_name if author != last_author: diff --git a/python/samples/getting_started/workflows/agents/azure_chat_agents_streaming.py b/python/samples/getting_started/workflows/agents/azure_chat_agents_streaming.py index fe31c23c49..4e8739c76d 100644 --- a/python/samples/getting_started/workflows/agents/azure_chat_agents_streaming.py +++ b/python/samples/getting_started/workflows/agents/azure_chat_agents_streaming.py @@ -49,7 +49,7 @@ async def main(): async for event in events: # The outputs of the workflow are whatever the agents produce. So the events are expected to # contain `AgentResponseUpdate` from the agents in the workflow. - if event.type == "data" and isinstance(event.data, AgentResponseUpdate): + if event.type == "output" and isinstance(event.data, AgentResponseUpdate): update = event.data author = update.author_name if author != last_author: diff --git a/python/samples/getting_started/workflows/agents/azure_chat_agents_tool_calls_with_feedback.py b/python/samples/getting_started/workflows/agents/azure_chat_agents_tool_calls_with_feedback.py index 51926f14b6..3c0b2084c3 100644 --- a/python/samples/getting_started/workflows/agents/azure_chat_agents_tool_calls_with_feedback.py +++ b/python/samples/getting_started/workflows/agents/azure_chat_agents_tool_calls_with_feedback.py @@ -12,10 +12,8 @@ AgentResponseUpdate, ChatAgent, ChatMessage, + Content, Executor, - FunctionCallContent, - FunctionResultContent, - Role, WorkflowBuilder, WorkflowContext, WorkflowEvent, @@ -49,9 +47,9 @@ - Authentication via azure-identity. Run `az login` before executing. """ + # NOTE: approval_mode="never_require" is for sample brevity. Use "always_require" in production; see samples/getting_started/tools/function_tool_with_approval.py and samples/getting_started/tools/function_tool_with_approval_and_threads.py. @tool(approval_mode="never_require") - def fetch_product_brief( product_name: Annotated[str, Field(description="Product name to look up.")], ) -> str: @@ -67,8 +65,8 @@ def fetch_product_brief( } return briefs.get(product_name.lower(), f"No stored brief for '{product_name}'.") -@tool(approval_mode="never_require") +@tool(approval_mode="never_require") def get_brand_voice_profile( voice_name: Annotated[str, Field(description="Brand or campaign voice to emulate.")], ) -> str: @@ -147,8 +145,7 @@ async def on_human_feedback( # Human approved the draft as-is; forward it unchanged. await ctx.send_message( AgentExecutorRequest( - messages=original_request.conversation - + [ChatMessage(Role.USER, text="The draft is approved as-is.")], + messages=original_request.conversation + [ChatMessage("user", text="The draft is approved as-is.")], should_respond=True, ), target_id=self.final_editor_id, @@ -163,7 +160,7 @@ async def on_human_feedback( "Rewrite the draft from the previous assistant message into a polished final version. " "Keep the response under 120 words and reflect any requested tone adjustments." ) - conversation.append(ChatMessage(Role.USER, text=instruction)) + conversation.append(ChatMessage("user", text=instruction)) await ctx.send_message( AgentExecutorRequest(messages=conversation, should_respond=True), target_id=self.writer_id ) @@ -201,8 +198,8 @@ def display_agent_run_update(event: WorkflowEvent[AgentResponseUpdate], last_exe executor_id = event.executor_id update = event.data # Extract and print any new tool calls or results from the update. - function_calls = [c for c in update.contents if isinstance(c, FunctionCallContent)] # type: ignore[union-attr] - function_results = [c for c in update.contents if isinstance(c, FunctionResultContent)] # type: ignore[union-attr] + function_calls = [c for c in update.contents if isinstance(c, Content.from_function_call(c))] # type: ignore[union-attr] + function_results = [c for c in update.contents if isinstance(c, Content.from_function_result(c))] # type: ignore[union-attr] if executor_id != last_executor: if last_executor is not None: print() @@ -290,7 +287,11 @@ async def main() -> None: requests: list[tuple[str, DraftFeedbackRequest]] = [] async for event in stream: - if event.type == "data" and isinstance(event.data, AgentResponseUpdate) and display_agent_run_update_switch: + if ( + event.type == "output" + and isinstance(event.data, AgentResponseUpdate) + and display_agent_run_update_switch + ): display_agent_run_update(event, last_executor) # type: ignore[arg-type] if event.type == "request_info" and isinstance(event.data, DraftFeedbackRequest): # Stash the request so we can prompt the human after the stream completes. diff --git a/python/samples/getting_started/workflows/orchestration/magentic_human_plan_review.py b/python/samples/getting_started/workflows/orchestration/magentic_human_plan_review.py index 024a481f37..731038039c 100644 --- a/python/samples/getting_started/workflows/orchestration/magentic_human_plan_review.py +++ b/python/samples/getting_started/workflows/orchestration/magentic_human_plan_review.py @@ -8,11 +8,10 @@ AgentResponseUpdate, ChatAgent, ChatMessage, - MagenticBuilder, - MagenticPlanReviewRequest, WorkflowEvent, ) from agent_framework.openai import OpenAIChatClient +from agent_framework.orchestrations import MagenticBuilder, MagenticPlanReviewRequest """ Sample: Magentic Orchestration with Human Plan Review @@ -90,7 +89,7 @@ async def main() -> None: last_message_id: str | None = None async for event in stream: - if event.type == "data" and isinstance(event.data, AgentResponseUpdate): + if event.type == "output" and isinstance(event.data, AgentResponseUpdate): message_id = event.data.message_id if message_id != last_message_id: if last_message_id is not None: From ac96b01a1a552ed3bd746ca11393cda4b2111f47 Mon Sep 17 00:00:00 2001 From: Evan Mattson Date: Fri, 6 Feb 2026 10:48:00 +0900 Subject: [PATCH 08/12] Merge main. Add properties for events to help typing --- .../packages/core/agent_framework/_tools.py | 3 + .../_workflows/_agent_executor.py | 12 +++ .../agent_framework/_workflows/_events.py | 86 ++++++++++++++++--- .../agent_framework/_workflows/_executor.py | 16 ++-- .../_workflows/_runner_context.py | 2 - .../_workflows/_workflow_executor.py | 6 -- .../magentic_human_plan_review copy.py} | 2 +- .../_start-here/step1_executors_and_edges.py | 4 +- .../_start-here/step2_agents_in_a_workflow.py | 1 - .../_start-here/step4_using_factories.py | 1 - ...re_chat_agents_tool_calls_with_feedback.py | 11 +-- .../agents/concurrent_workflow_as_agent.py | 2 +- .../agents/group_chat_workflow_as_agent.py | 3 +- .../agents/handoff_workflow_as_agent.py | 3 +- .../agents/magentic_workflow_as_agent.py | 2 +- .../agents/sequential_workflow_as_agent.py | 2 +- .../agents/workflow_as_agent_kwargs.py | 4 +- .../agents/workflow_as_agent_with_thread.py | 3 +- .../checkpoint_with_human_in_the_loop.py | 9 +- ...ff_with_tool_approval_checkpoint_resume.py | 10 ++- .../checkpoint/sub_workflow_checkpoint.py | 13 +-- .../workflow_as_agent_checkpoint.py | 13 ++- .../composition/sub_workflow_kwargs.py | 52 ++++++++++- .../sub_workflow_request_interception.py | 2 - .../multi_selection_edge_group.py | 1 - .../control-flow/sequential_executors.py | 2 - .../workflows/control-flow/simple_loop.py | 1 - .../declarative/human_in_loop/main.py | 2 +- .../human-in-the-loop/agents_with_HITL.py | 6 +- .../concurrent_request_info.py | 6 +- .../group_chat_request_info.py | 6 +- .../guessing_game_with_human_input.py | 10 ++- .../sequential_request_info.py | 2 +- .../observability/executor_io_observation.py | 1 - .../aggregate_results_of_different_types.py | 2 +- .../parallelism/fan_out_fan_in_edges.py | 4 +- .../state-management/workflow_kwargs.py | 8 +- .../concurrent_builder_tool_approval.py | 2 +- .../group_chat_builder_tool_approval.py | 5 +- .../sequential_builder_tool_approval.py | 6 +- 40 files changed, 226 insertions(+), 100 deletions(-) rename python/samples/getting_started/{workflows/orchestration/magentic_human_plan_review.py => orchestrations/magentic_human_plan_review copy.py} (98%) diff --git a/python/packages/core/agent_framework/_tools.py b/python/packages/core/agent_framework/_tools.py index 6638e71dac..7e22b78827 100644 --- a/python/packages/core/agent_framework/_tools.py +++ b/python/packages/core/agent_framework/_tools.py @@ -2135,6 +2135,9 @@ def get_response( filtered_kwargs = {k: v for k, v in kwargs.items() if k != "thread"} # Make options mutable so we can update conversation_id during function invocation loop mutable_options: dict[str, Any] = dict(options) if options else {} + # Remove additional_function_arguments from options passed to underlying chat client + # It's for tool invocation only and not recognized by chat service APIs + mutable_options.pop("additional_function_arguments", None) if not stream: diff --git a/python/packages/core/agent_framework/_workflows/_agent_executor.py b/python/packages/core/agent_framework/_workflows/_agent_executor.py index 98c2bc4ce3..f13b7b65fd 100644 --- a/python/packages/core/agent_framework/_workflows/_agent_executor.py +++ b/python/packages/core/agent_framework/_workflows/_agent_executor.py @@ -332,10 +332,16 @@ async def _run_agent(self, ctx: WorkflowContext[Never, AgentResponse]) -> AgentR """ run_kwargs: dict[str, Any] = ctx.get_state(WORKFLOW_RUN_KWARGS_KEY, {}) + # Build options dict with additional_function_arguments for tool kwargs propagation + options: dict[str, Any] | None = None + if run_kwargs: + options = {"additional_function_arguments": run_kwargs} + response = await self._agent.run( self._cache, stream=False, thread=self._agent_thread, + options=options, **run_kwargs, ) await ctx.yield_output(response) @@ -360,12 +366,18 @@ async def _run_agent_streaming(self, ctx: WorkflowContext[Never, AgentResponseUp """ run_kwargs: dict[str, Any] = ctx.get_state(WORKFLOW_RUN_KWARGS_KEY) or {} + # Build options dict with additional_function_arguments for tool kwargs propagation + options: dict[str, Any] | None = None + if run_kwargs: + options = {"additional_function_arguments": run_kwargs} + updates: list[AgentResponseUpdate] = [] user_input_requests: list[Content] = [] async for update in self._agent.run( self._cache, stream=True, thread=self._agent_thread, + options=options, **run_kwargs, ): updates.append(update) diff --git a/python/packages/core/agent_framework/_workflows/_events.py b/python/packages/core/agent_framework/_workflows/_events.py index 2b94d0915f..b4f5f678fa 100644 --- a/python/packages/core/agent_framework/_workflows/_events.py +++ b/python/packages/core/agent_framework/_workflows/_events.py @@ -205,10 +205,10 @@ def __init__( self.state = state self.details = details self.executor_id = executor_id - self.request_id = request_id - self.source_executor_id = source_executor_id - self.request_type = request_type - self.response_type = response_type + self._request_id = request_id + self._source_executor_id = source_executor_id + self._request_type = request_type + self._response_type = response_type self.iteration = iteration def __repr__(self) -> str: @@ -220,11 +220,11 @@ def __repr__(self) -> str: parts.append(f"executor_id={self.executor_id!r}") if self.iteration is not None: parts.append(f"iteration={self.iteration}") - if self.request_id is not None: - parts.append(f"request_id={self.request_id!r}") + if self._request_id is not None: + parts.append(f"request_id={self._request_id!r}") if self.data is not None: parts.append(f"data={self.data!r}") - return f"WorkflowEvent({', '.join(parts)})" + return f"WorkflowEvent({', '.join(parts)})" # pragma: no cover # ========================================================================== # Factory methods @@ -312,6 +312,70 @@ def executor_failed(cls, executor_id: str, details: WorkflowErrorDetails) -> Wor """Create an 'executor_failed' event when an executor handler raises an error.""" return WorkflowEvent("executor_failed", executor_id=executor_id, data=details, details=details) + # ========================================================================== + # Property for type-safe access + # ========================================================================== + + @property + def request_id(self) -> str: + """Get request_id for request_info events. + + Returns: + The request ID as a non-None string. + + Raises: + RuntimeError: If called on an event that is not a request_info event, + or if the event is malformed (request_info without request_id). + """ + if self.type != "request_info" or self._request_id is None: + raise RuntimeError(f"request_id is only available for request_info events, got type={self.type!r}") + return self._request_id + + @property + def source_executor_id(self) -> str: + """Get source_executor_id for request_info events. + + Returns: + The source executor ID as a non-None string. + + Raises: + RuntimeError: If called on an event that is not a request_info event, + or if the event is malformed (request_info without source_executor_id). + """ + if self.type != "request_info" or self._source_executor_id is None: + raise RuntimeError(f"source_executor_id is only available for request_info events, got type={self.type!r}") + return self._source_executor_id + + @property + def request_type(self) -> type[Any]: + """Get request_type for request_info events. + + Returns: + The request data type as a non-None type object. + + Raises: + RuntimeError: If called on an event that is not a request_info event, + or if the event is malformed (request_info without request_type). + """ + if self.type != "request_info" or self._request_type is None: + raise RuntimeError(f"request_type is only available for request_info events, got type={self.type!r}") + return self._request_type + + @property + def response_type(self) -> type[Any]: + """Get response_type for request_info events. + + Returns: + The response data type as a non-None type object. + + Raises: + RuntimeError: If called on an event that is not a request_info event, + or if the event is malformed (request_info without response_type). + """ + if self.type != "request_info" or self._response_type is None: + raise RuntimeError(f"response_type is only available for request_info events, got type={self.type!r}") + return self._response_type + # ========================================================================== # Serialization methods (primarily for REQUEST_INFO events) # ========================================================================== @@ -326,10 +390,10 @@ def to_dict(self) -> dict[str, Any]: return { "type": self.type, "data": encode_checkpoint_value(self.data), - "request_id": self.request_id, - "source_executor_id": self.source_executor_id, - "request_type": serialize_type(self.request_type) if self.request_type else None, - "response_type": serialize_type(self.response_type) if self.response_type else None, + "request_id": self._request_id, + "source_executor_id": self._source_executor_id, + "request_type": serialize_type(self._request_type) if self._request_type else None, + "response_type": serialize_type(self._response_type) if self._response_type else None, } @classmethod diff --git a/python/packages/core/agent_framework/_workflows/_executor.py b/python/packages/core/agent_framework/_workflows/_executor.py index b1eaa05559..ffab65e3a3 100644 --- a/python/packages/core/agent_framework/_workflows/_executor.py +++ b/python/packages/core/agent_framework/_workflows/_executor.py @@ -538,8 +538,8 @@ def handler( output: type | types.UnionType | str | None = None, workflow_output: type | types.UnionType | str | None = None, ) -> Callable[ - [Callable[[ExecutorT, Any, ContextT], Awaitable[Any]]], - Callable[[ExecutorT, Any, ContextT], Awaitable[Any]], + [Callable[..., Awaitable[Any]]], + Callable[..., Awaitable[Any]], ]: ... @@ -724,9 +724,15 @@ def _validate_handler_signature( # Validate ctx parameter is WorkflowContext and extract type args ctx_param = params[2] - output_types, workflow_output_types = validate_workflow_context_annotation( - ctx_param.annotation, f"parameter '{ctx_param.name}'", "Handler" - ) + if skip_message_annotation and ctx_param.annotation == inspect.Parameter.empty: + # When explicit types are provided via @handler(input=..., output=...), + # the ctx parameter doesn't need a type annotation - types come from the decorator. + output_types: list[type[Any] | types.UnionType] = [] + workflow_output_types: list[type[Any] | types.UnionType] = [] + else: + output_types, workflow_output_types = validate_workflow_context_annotation( + ctx_param.annotation, f"parameter '{ctx_param.name}'", "Handler" + ) message_type = message_param.annotation if message_param.annotation != inspect.Parameter.empty else None ctx_annotation = ctx_param.annotation diff --git a/python/packages/core/agent_framework/_workflows/_runner_context.py b/python/packages/core/agent_framework/_workflows/_runner_context.py index 8e4f5726d2..ed81026245 100644 --- a/python/packages/core/agent_framework/_workflows/_runner_context.py +++ b/python/packages/core/agent_framework/_workflows/_runner_context.py @@ -502,8 +502,6 @@ async def send_request_info_response(self, request_id: str, response: Any) -> No ) source_executor_id = event.source_executor_id - if source_executor_id is None: - raise RuntimeError("request_info event must have a source_executor_id for response routing") # Create ResponseMessage instance response_msg = Message( diff --git a/python/packages/core/agent_framework/_workflows/_workflow_executor.py b/python/packages/core/agent_framework/_workflows/_workflow_executor.py index d0a2994c6b..b83c826873 100644 --- a/python/packages/core/agent_framework/_workflows/_workflow_executor.py +++ b/python/packages/core/agent_framework/_workflows/_workflow_executor.py @@ -420,8 +420,6 @@ async def handle_message_wrapped_request_response( ctx: The workflow context. """ request_id = response.source_event.request_id - if request_id is None: - raise RuntimeError("SubWorkflowResponseMessage source_event must have a request_id") await self._handle_response( request_id=request_id, response=response.data, @@ -553,10 +551,6 @@ async def _process_workflow_result( for event in request_info_events: request_id = event.request_id response_type = event.response_type - if request_id is None: - raise RuntimeError("request_info event must have a request_id") - if response_type is None: - raise RuntimeError("request_info event must have a response_type") # Track the pending request in execution context execution_context.pending_requests[request_id] = event # Map request to execution for response routing diff --git a/python/samples/getting_started/workflows/orchestration/magentic_human_plan_review.py b/python/samples/getting_started/orchestrations/magentic_human_plan_review copy.py similarity index 98% rename from python/samples/getting_started/workflows/orchestration/magentic_human_plan_review.py rename to python/samples/getting_started/orchestrations/magentic_human_plan_review copy.py index 4e0a5b5b6c..dd41457caa 100644 --- a/python/samples/getting_started/workflows/orchestration/magentic_human_plan_review.py +++ b/python/samples/getting_started/orchestrations/magentic_human_plan_review copy.py @@ -107,7 +107,7 @@ async def main() -> None: pending_responses = None # Handle plan review request if any - if pending_request is not None: + if pending_request is not None and pending_request.request_id: event_data = cast(MagenticPlanReviewRequest, pending_request.data) print("\n\n[Magentic Plan Review Request]") diff --git a/python/samples/getting_started/workflows/_start-here/step1_executors_and_edges.py b/python/samples/getting_started/workflows/_start-here/step1_executors_and_edges.py index 7c9f7a4cbb..98460844f6 100644 --- a/python/samples/getting_started/workflows/_start-here/step1_executors_and_edges.py +++ b/python/samples/getting_started/workflows/_start-here/step1_executors_and_edges.py @@ -140,7 +140,7 @@ def __init__(self, id: str): super().__init__(id=id) @handler(input=str, output=str) - async def add_exclamation(self, message: str, ctx: WorkflowContext) -> None: + async def add_exclamation(self, message, ctx) -> None: # type: ignore """Add exclamation marks to the input. Note: The input=str and output=str are explicitly specified on @handler, @@ -149,7 +149,7 @@ async def add_exclamation(self, message: str, ctx: WorkflowContext) -> None: on @handler take precedence. """ result = f"{message}!!!" - await ctx.send_message(result) + await ctx.send_message(result) # type: ignore async def main(): diff --git a/python/samples/getting_started/workflows/_start-here/step2_agents_in_a_workflow.py b/python/samples/getting_started/workflows/_start-here/step2_agents_in_a_workflow.py index 6ecfbe55a8..5d8a9d46cc 100644 --- a/python/samples/getting_started/workflows/_start-here/step2_agents_in_a_workflow.py +++ b/python/samples/getting_started/workflows/_start-here/step2_agents_in_a_workflow.py @@ -57,7 +57,6 @@ async def main(): # of `AgentResponse` from the agents in the workflow. outputs = cast(list[AgentResponse], outputs) for output in outputs: - # TODO: author_name should be available in AgentResponse print(f"{output.messages[0].author_name}: {output.text}\n") # Summarize the final run state (e.g., COMPLETED) diff --git a/python/samples/getting_started/workflows/_start-here/step4_using_factories.py b/python/samples/getting_started/workflows/_start-here/step4_using_factories.py index 577f105807..166514f7ac 100644 --- a/python/samples/getting_started/workflows/_start-here/step4_using_factories.py +++ b/python/samples/getting_started/workflows/_start-here/step4_using_factories.py @@ -8,7 +8,6 @@ Executor, WorkflowBuilder, WorkflowContext, - WorkflowEvent, executor, handler, ) diff --git a/python/samples/getting_started/workflows/agents/azure_chat_agents_tool_calls_with_feedback.py b/python/samples/getting_started/workflows/agents/azure_chat_agents_tool_calls_with_feedback.py index 90d00bd88e..457defcf51 100644 --- a/python/samples/getting_started/workflows/agents/azure_chat_agents_tool_calls_with_feedback.py +++ b/python/samples/getting_started/workflows/agents/azure_chat_agents_tool_calls_with_feedback.py @@ -12,7 +12,6 @@ AgentResponseUpdate, ChatAgent, ChatMessage, - Content, Executor, WorkflowBuilder, WorkflowContext, @@ -48,7 +47,9 @@ """ -# NOTE: approval_mode="never_require" is for sample brevity. Use "always_require" in production; see samples/getting_started/tools/function_tool_with_approval.py and samples/getting_started/tools/function_tool_with_approval_and_threads.py. +# NOTE: approval_mode="never_require" is for sample brevity. Use "always_require" in production; +# see samples/getting_started/tools/function_tool_with_approval.py and +# samples/getting_started/tools/function_tool_with_approval_and_threads.py. @tool(approval_mode="never_require") def fetch_product_brief( product_name: Annotated[str, Field(description="Product name to look up.")], @@ -298,12 +299,12 @@ async def main() -> None: # Stash the request so we can prompt the human after the stream completes. requests.append((event.request_id, event.data)) last_executor = None - elif event.type == "output": + elif event.type == "output" and not isinstance(event.data, AgentResponseUpdate): + # Only mark as completed for final outputs, not streaming updates last_executor = None response = event.data - print("\n===== Final output =====") final_text = getattr(response, "text", str(response)) - print(final_text.strip()) + print(final_text, flush=True, end="") completed = True if requests and not completed: diff --git a/python/samples/getting_started/workflows/agents/concurrent_workflow_as_agent.py b/python/samples/getting_started/workflows/agents/concurrent_workflow_as_agent.py index 4afd08c4dc..89b003dd5f 100644 --- a/python/samples/getting_started/workflows/agents/concurrent_workflow_as_agent.py +++ b/python/samples/getting_started/workflows/agents/concurrent_workflow_as_agent.py @@ -2,8 +2,8 @@ import asyncio -from agent_framework import ConcurrentBuilder from agent_framework.azure import AzureOpenAIChatClient +from agent_framework.orchestrations import ConcurrentBuilder from azure.identity import AzureCliCredential """ diff --git a/python/samples/getting_started/workflows/agents/group_chat_workflow_as_agent.py b/python/samples/getting_started/workflows/agents/group_chat_workflow_as_agent.py index 6b6737c6c6..4193d1fdfc 100644 --- a/python/samples/getting_started/workflows/agents/group_chat_workflow_as_agent.py +++ b/python/samples/getting_started/workflows/agents/group_chat_workflow_as_agent.py @@ -2,8 +2,9 @@ import asyncio -from agent_framework import ChatAgent, GroupChatBuilder +from agent_framework import ChatAgent from agent_framework.openai import OpenAIChatClient, OpenAIResponsesClient +from agent_framework.orchestrations import GroupChatBuilder """ Sample: Group Chat Orchestration diff --git a/python/samples/getting_started/workflows/agents/handoff_workflow_as_agent.py b/python/samples/getting_started/workflows/agents/handoff_workflow_as_agent.py index 99f9cca02a..e083cf7d60 100644 --- a/python/samples/getting_started/workflows/agents/handoff_workflow_as_agent.py +++ b/python/samples/getting_started/workflows/agents/handoff_workflow_as_agent.py @@ -8,12 +8,11 @@ ChatAgent, ChatMessage, Content, - HandoffAgentUserRequest, - HandoffBuilder, WorkflowAgent, tool, ) from agent_framework.azure import AzureOpenAIChatClient +from agent_framework.orchestrations import HandoffAgentUserRequest, HandoffBuilder from azure.identity import AzureCliCredential """Sample: Handoff Workflow as Agent with Human-in-the-Loop. diff --git a/python/samples/getting_started/workflows/agents/magentic_workflow_as_agent.py b/python/samples/getting_started/workflows/agents/magentic_workflow_as_agent.py index 007bbc21ca..bd70926b08 100644 --- a/python/samples/getting_started/workflows/agents/magentic_workflow_as_agent.py +++ b/python/samples/getting_started/workflows/agents/magentic_workflow_as_agent.py @@ -5,9 +5,9 @@ from agent_framework import ( ChatAgent, HostedCodeInterpreterTool, - MagenticBuilder, ) from agent_framework.openai import OpenAIChatClient, OpenAIResponsesClient +from agent_framework.orchestrations import MagenticBuilder """ Sample: Build a Magentic orchestration and wrap it as an agent. diff --git a/python/samples/getting_started/workflows/agents/sequential_workflow_as_agent.py b/python/samples/getting_started/workflows/agents/sequential_workflow_as_agent.py index 6339f88ba2..ba09785f0c 100644 --- a/python/samples/getting_started/workflows/agents/sequential_workflow_as_agent.py +++ b/python/samples/getting_started/workflows/agents/sequential_workflow_as_agent.py @@ -2,8 +2,8 @@ import asyncio -from agent_framework import SequentialBuilder from agent_framework.azure import AzureOpenAIChatClient +from agent_framework.orchestrations import SequentialBuilder from azure.identity import AzureCliCredential """ diff --git a/python/samples/getting_started/workflows/agents/workflow_as_agent_kwargs.py b/python/samples/getting_started/workflows/agents/workflow_as_agent_kwargs.py index 1fee49fc1d..23b4d1e5ee 100644 --- a/python/samples/getting_started/workflows/agents/workflow_as_agent_kwargs.py +++ b/python/samples/getting_started/workflows/agents/workflow_as_agent_kwargs.py @@ -33,7 +33,9 @@ # Define tools that accept custom context via **kwargs -# NOTE: approval_mode="never_require" is for sample brevity. Use "always_require" in production; see samples/getting_started/tools/function_tool_with_approval.py and samples/getting_started/tools/function_tool_with_approval_and_threads.py. +# NOTE: approval_mode="never_require" is for sample brevity. +# Use "always_require" in production; see samples/getting_started/tools/function_tool_with_approval.py and +# samples/getting_started/tools/function_tool_with_approval_and_threads.py. @tool(approval_mode="never_require") def get_user_data( query: Annotated[str, Field(description="What user data to retrieve")], diff --git a/python/samples/getting_started/workflows/agents/workflow_as_agent_with_thread.py b/python/samples/getting_started/workflows/agents/workflow_as_agent_with_thread.py index 0580fe45ab..01d5626589 100644 --- a/python/samples/getting_started/workflows/agents/workflow_as_agent_with_thread.py +++ b/python/samples/getting_started/workflows/agents/workflow_as_agent_with_thread.py @@ -2,8 +2,9 @@ import asyncio -from agent_framework import AgentThread, ChatAgent, ChatMessageStore, SequentialBuilder +from agent_framework import AgentThread, ChatAgent, ChatMessageStore from agent_framework.openai import OpenAIChatClient +from agent_framework.orchestrations import SequentialBuilder """ Sample: Workflow as Agent with Thread Conversation History and Checkpointing diff --git a/python/samples/getting_started/workflows/checkpoint/checkpoint_with_human_in_the_loop.py b/python/samples/getting_started/workflows/checkpoint/checkpoint_with_human_in_the_loop.py index 190c239b09..df7c5b1445 100644 --- a/python/samples/getting_started/workflows/checkpoint/checkpoint_with_human_in_the_loop.py +++ b/python/samples/getting_started/workflows/checkpoint/checkpoint_with_human_in_the_loop.py @@ -1,9 +1,16 @@ # Copyright (c) Microsoft. All rights reserved. import asyncio +import sys from dataclasses import dataclass from pathlib import Path -from typing import Any, override +from typing import Any + +if sys.version_info >= (3, 12): + from typing import override # type: ignore # pragma: no cover +else: + from typing_extensions import override # type: ignore[import] # pragma: no cover + # NOTE: the Azure client imports above are real dependencies. When running this # sample outside of Azure-enabled environments you may wish to swap in the diff --git a/python/samples/getting_started/workflows/checkpoint/handoff_with_tool_approval_checkpoint_resume.py b/python/samples/getting_started/workflows/checkpoint/handoff_with_tool_approval_checkpoint_resume.py index 0524ed8595..6e0bcaa00a 100644 --- a/python/samples/getting_started/workflows/checkpoint/handoff_with_tool_approval_checkpoint_resume.py +++ b/python/samples/getting_started/workflows/checkpoint/handoff_with_tool_approval_checkpoint_resume.py @@ -16,8 +16,8 @@ WorkflowEvent, tool, ) -from agent_framework.orchestrations import HandoffAgentUserRequest, HandoffBuilder from agent_framework.azure import AzureOpenAIChatClient +from agent_framework.orchestrations import HandoffAgentUserRequest, HandoffBuilder from azure.identity import AzureCliCredential """ @@ -158,11 +158,15 @@ def _build_responses_for_requests( """Create response payloads for each pending request.""" responses: dict[str, object] = {} for request in pending_requests: - if isinstance(request.data, HandoffAgentUserRequest): + if isinstance(request.data, HandoffAgentUserRequest) and request.request_id: if user_response is None: raise ValueError("User response is required for HandoffAgentUserRequest") responses[request.request_id] = user_response - elif isinstance(request.data, Content) and request.data.type == "function_approval_request": + elif ( + isinstance(request.data, Content) + and request.data.type == "function_approval_request" + and request.request_id + ): if approve_tools is None: raise ValueError("Approval decision is required for function approval request") responses[request.request_id] = request.data.to_function_approval_response(approved=approve_tools) diff --git a/python/samples/getting_started/workflows/checkpoint/sub_workflow_checkpoint.py b/python/samples/getting_started/workflows/checkpoint/sub_workflow_checkpoint.py index 85125c1f34..267cfdfb60 100644 --- a/python/samples/getting_started/workflows/checkpoint/sub_workflow_checkpoint.py +++ b/python/samples/getting_started/workflows/checkpoint/sub_workflow_checkpoint.py @@ -3,30 +3,33 @@ import asyncio import contextlib import json +import sys import uuid from dataclasses import dataclass, field, replace from datetime import datetime, timedelta from pathlib import Path -from typing import Any, override +from typing import Any from agent_framework import ( - WorkflowEvent, Executor, FileCheckpointStorage, - SubWorkflowRequestMessage, SubWorkflowResponseMessage, Workflow, WorkflowBuilder, WorkflowContext, + WorkflowEvent, WorkflowExecutor, - WorkflowRunState, - handler, response_handler, ) +if sys.version_info >= (3, 12): + from typing import override # type: ignore # pragma: no cover +else: + from typing_extensions import override # type: ignore[import] # pragma: no cover + CHECKPOINT_DIR = Path(__file__).with_suffix("").parent / "tmp" / "sub_workflow_checkpoints" """ diff --git a/python/samples/getting_started/workflows/checkpoint/workflow_as_agent_checkpoint.py b/python/samples/getting_started/workflows/checkpoint/workflow_as_agent_checkpoint.py index d947330a19..52d2f99843 100644 --- a/python/samples/getting_started/workflows/checkpoint/workflow_as_agent_checkpoint.py +++ b/python/samples/getting_started/workflows/checkpoint/workflow_as_agent_checkpoint.py @@ -30,9 +30,9 @@ ChatAgent, ChatMessageStore, InMemoryCheckpointStorage, - SequentialBuilder, ) from agent_framework.openai import OpenAIChatClient +from agent_framework.orchestrations import SequentialBuilder async def basic_checkpointing() -> None: @@ -157,7 +157,12 @@ def create_assistant() -> ChatAgent: print(f"\nCheckpoints created during stream: {len(checkpoints)}") +async def main() -> None: + """Run all checkpoint examples.""" + await basic_checkpointing() + await checkpointing_with_thread() + await streaming_with_checkpoints() + + if __name__ == "__main__": - asyncio.run(basic_checkpointing()) - asyncio.run(checkpointing_with_thread()) - asyncio.run(streaming_with_checkpoints()) + asyncio.run(main()) diff --git a/python/samples/getting_started/workflows/composition/sub_workflow_kwargs.py b/python/samples/getting_started/workflows/composition/sub_workflow_kwargs.py index 6d283d9125..4c77fc5202 100644 --- a/python/samples/getting_started/workflows/composition/sub_workflow_kwargs.py +++ b/python/samples/getting_started/workflows/composition/sub_workflow_kwargs.py @@ -5,14 +5,12 @@ from typing import Annotated, Any from agent_framework import ( - WorkflowEvent, ChatMessage, - SequentialBuilder, WorkflowExecutor, - tool, ) from agent_framework.openai import OpenAIChatClient +from agent_framework.orchestrations import SequentialBuilder """ Sample: Sub-Workflow kwargs Propagation @@ -33,7 +31,9 @@ # Define tools that access custom context via **kwargs -# NOTE: approval_mode="never_require" is for sample brevity. Use "always_require" in production; see samples/getting_started/tools/function_tool_with_approval.py and samples/getting_started/tools/function_tool_with_approval_and_threads.py. +# NOTE: approval_mode="never_require" is for sample brevity. Use "always_require" in production; +# see samples/getting_started/tools/function_tool_with_approval.py and +# samples/getting_started/tools/function_tool_with_approval_and_threads.py. @tool(approval_mode="never_require") def get_authenticated_data( resource: Annotated[str, "The resource to fetch"], @@ -141,6 +141,50 @@ async def main() -> None: print("Sample Complete - kwargs successfully flowed through sub-workflow!") print("=" * 70) + """ + Sample Output: + + ====================================================================== + Sub-Workflow kwargs Propagation Demo + ====================================================================== + + Context being passed to parent workflow: + user_token: { + "user_name": "alice@contoso.com", + "access_level": "admin", + "session_id": "sess_12345" + } + service_config: { + "services": { + "users": "https://api.example.com/v1/users", + "orders": "https://api.example.com/v1/orders", + "inventory": "https://api.example.com/v1/inventory" + }, + "timeout": 30 + } + + ---------------------------------------------------------------------- + Workflow Execution (kwargs flow: parent -> sub-workflow -> agent -> tool): + ---------------------------------------------------------------------- + + [get_authenticated_data] kwargs keys: ['user_token', 'service_config'] + [get_authenticated_data] User: alice@contoso.com, Access: admin + + [call_configured_service] kwargs keys: ['user_token', 'service_config'] + [call_configured_service] Available services: ['users', 'orders', 'inventory'] + + [Final Answer]: Please fetch my profile data and then call the users service. + + [Final Answer]: - Your profile data has been fetched. + - The users service has been called. + + Would you like details from either the profile data or the users service response? + + ====================================================================== + Sample Complete - kwargs successfully flowed through sub-workflow! + ====================================================================== + """ + if __name__ == "__main__": asyncio.run(main()) diff --git a/python/samples/getting_started/workflows/composition/sub_workflow_request_interception.py b/python/samples/getting_started/workflows/composition/sub_workflow_request_interception.py index f09bc40838..9b0637652b 100644 --- a/python/samples/getting_started/workflows/composition/sub_workflow_request_interception.py +++ b/python/samples/getting_started/workflows/composition/sub_workflow_request_interception.py @@ -4,7 +4,6 @@ from dataclasses import dataclass from agent_framework import ( - WorkflowEvent, Executor, SubWorkflowRequestMessage, SubWorkflowResponseMessage, @@ -12,7 +11,6 @@ WorkflowBuilder, WorkflowContext, WorkflowExecutor, - handler, response_handler, ) diff --git a/python/samples/getting_started/workflows/control-flow/multi_selection_edge_group.py b/python/samples/getting_started/workflows/control-flow/multi_selection_edge_group.py index 09d282efda..67058435c9 100644 --- a/python/samples/getting_started/workflows/control-flow/multi_selection_edge_group.py +++ b/python/samples/getting_started/workflows/control-flow/multi_selection_edge_group.py @@ -16,7 +16,6 @@ WorkflowBuilder, WorkflowContext, WorkflowEvent, - executor, ) from agent_framework.azure import AzureOpenAIChatClient diff --git a/python/samples/getting_started/workflows/control-flow/sequential_executors.py b/python/samples/getting_started/workflows/control-flow/sequential_executors.py index 68e3f71255..d69aafcfe9 100644 --- a/python/samples/getting_started/workflows/control-flow/sequential_executors.py +++ b/python/samples/getting_started/workflows/control-flow/sequential_executors.py @@ -4,11 +4,9 @@ from typing import cast from agent_framework import ( - WorkflowEvent, Executor, WorkflowBuilder, WorkflowContext, - handler, ) from typing_extensions import Never diff --git a/python/samples/getting_started/workflows/control-flow/simple_loop.py b/python/samples/getting_started/workflows/control-flow/simple_loop.py index dab1a7313c..e9fca78510 100644 --- a/python/samples/getting_started/workflows/control-flow/simple_loop.py +++ b/python/samples/getting_started/workflows/control-flow/simple_loop.py @@ -11,7 +11,6 @@ Executor, WorkflowBuilder, WorkflowContext, - WorkflowEvent, handler, ) from agent_framework.azure import AzureOpenAIChatClient diff --git a/python/samples/getting_started/workflows/declarative/human_in_loop/main.py b/python/samples/getting_started/workflows/declarative/human_in_loop/main.py index 5217212fb2..8f501ab358 100644 --- a/python/samples/getting_started/workflows/declarative/human_in_loop/main.py +++ b/python/samples/getting_started/workflows/declarative/human_in_loop/main.py @@ -15,7 +15,7 @@ import asyncio from pathlib import Path -from agent_framework import Workflow, WorkflowEvent +from agent_framework import Workflow from agent_framework.declarative import ExternalInputRequest, WorkflowFactory from agent_framework_declarative._workflows._handlers import TextOutputEvent diff --git a/python/samples/getting_started/workflows/human-in-the-loop/agents_with_HITL.py b/python/samples/getting_started/workflows/human-in-the-loop/agents_with_HITL.py index 4d3b34e155..739a0cbe96 100644 --- a/python/samples/getting_started/workflows/human-in-the-loop/agents_with_HITL.py +++ b/python/samples/getting_started/workflows/human-in-the-loop/agents_with_HITL.py @@ -128,11 +128,7 @@ async def process_event_stream(stream: AsyncIterable[WorkflowEvent]) -> dict[str requests: list[tuple[str, DraftFeedbackRequest]] = [] async for event in stream: - if ( - event.type == "request_info" - and isinstance(event.data, DraftFeedbackRequest) - and event.request_id is not None - ): + if event.type == "request_info" and isinstance(event.data, DraftFeedbackRequest): requests.append((event.request_id, event.data)) elif event.type == "output" and isinstance(event.data, AgentResponseUpdate): # This workflow should only produce AgentResponseUpdate as outputs. diff --git a/python/samples/getting_started/workflows/human-in-the-loop/concurrent_request_info.py b/python/samples/getting_started/workflows/human-in-the-loop/concurrent_request_info.py index 7264d88b73..3513483993 100644 --- a/python/samples/getting_started/workflows/human-in-the-loop/concurrent_request_info.py +++ b/python/samples/getting_started/workflows/human-in-the-loop/concurrent_request_info.py @@ -96,11 +96,7 @@ async def process_event_stream(stream: AsyncIterable[WorkflowEvent]) -> dict[str requests: dict[str, AgentExecutorResponse] = {} async for event in stream: - if ( - event.type == "request_info" - and isinstance(event.data, AgentExecutorResponse) - and event.request_id is not None - ): + if event.type == "request_info" and isinstance(event.data, AgentExecutorResponse): requests[event.request_id] = event.data if event.type == "output": diff --git a/python/samples/getting_started/workflows/human-in-the-loop/group_chat_request_info.py b/python/samples/getting_started/workflows/human-in-the-loop/group_chat_request_info.py index 7c2bdedf5e..b754c26309 100644 --- a/python/samples/getting_started/workflows/human-in-the-loop/group_chat_request_info.py +++ b/python/samples/getting_started/workflows/human-in-the-loop/group_chat_request_info.py @@ -42,11 +42,7 @@ async def process_event_stream(stream: AsyncIterable[WorkflowEvent]) -> dict[str requests: dict[str, AgentExecutorResponse] = {} async for event in stream: - if ( - event.type == "request_info" - and isinstance(event.data, AgentExecutorResponse) - and event.request_id is not None - ): + if event.type == "request_info" and isinstance(event.data, AgentExecutorResponse): requests[event.request_id] = event.data if event.type == "output": diff --git a/python/samples/getting_started/workflows/human-in-the-loop/guessing_game_with_human_input.py b/python/samples/getting_started/workflows/human-in-the-loop/guessing_game_with_human_input.py index 2222297d2c..bee4aeb61d 100644 --- a/python/samples/getting_started/workflows/human-in-the-loop/guessing_game_with_human_input.py +++ b/python/samples/getting_started/workflows/human-in-the-loop/guessing_game_with_human_input.py @@ -130,11 +130,13 @@ async def on_human_feedback( return # Provide feedback to the agent to try again. - # We keep the agent's output strictly JSON to ensure stable parsing on the next turn. - user_msg = ChatMessage( - "user", - text=(f'Feedback: {reply}. Return ONLY a JSON object matching the schema {{"guess": }}.'), + # response_format=GuessOutput on the agent ensures JSON output, so we just need to guide the logic. + last_guess = original_request.prompt.split(": ")[1].split(".")[0] + feedback_text = ( + f"Feedback: {reply}. Your last guess was {last_guess}. " + f"Use this feedback to adjust and make your next guess (1-10)." ) + user_msg = ChatMessage("user", text=feedback_text) await ctx.send_message(AgentExecutorRequest(messages=[user_msg], should_respond=True)) diff --git a/python/samples/getting_started/workflows/human-in-the-loop/sequential_request_info.py b/python/samples/getting_started/workflows/human-in-the-loop/sequential_request_info.py index ac1eca4676..7c4e47bf46 100644 --- a/python/samples/getting_started/workflows/human-in-the-loop/sequential_request_info.py +++ b/python/samples/getting_started/workflows/human-in-the-loop/sequential_request_info.py @@ -29,10 +29,10 @@ AgentExecutorResponse, AgentRequestInfoResponse, ChatMessage, - SequentialBuilder, WorkflowEvent, ) from agent_framework.azure import AzureOpenAIChatClient +from agent_framework.orchestrations import SequentialBuilder from azure.identity import AzureCliCredential diff --git a/python/samples/getting_started/workflows/observability/executor_io_observation.py b/python/samples/getting_started/workflows/observability/executor_io_observation.py index 9bdde3f6c5..822d0a7c72 100644 --- a/python/samples/getting_started/workflows/observability/executor_io_observation.py +++ b/python/samples/getting_started/workflows/observability/executor_io_observation.py @@ -7,7 +7,6 @@ Executor, WorkflowBuilder, WorkflowContext, - WorkflowEvent, handler, ) from typing_extensions import Never diff --git a/python/samples/getting_started/workflows/parallelism/aggregate_results_of_different_types.py b/python/samples/getting_started/workflows/parallelism/aggregate_results_of_different_types.py index b06665d03b..e4550c1ab2 100644 --- a/python/samples/getting_started/workflows/parallelism/aggregate_results_of_different_types.py +++ b/python/samples/getting_started/workflows/parallelism/aggregate_results_of_different_types.py @@ -3,7 +3,7 @@ import asyncio import random -from agent_framework import Executor, WorkflowBuilder, WorkflowContext, handler +from agent_framework import Executor, WorkflowBuilder, WorkflowContext, handler from typing_extensions import Never """ diff --git a/python/samples/getting_started/workflows/parallelism/fan_out_fan_in_edges.py b/python/samples/getting_started/workflows/parallelism/fan_out_fan_in_edges.py index a34275016b..2be9bc09f7 100644 --- a/python/samples/getting_started/workflows/parallelism/fan_out_fan_in_edges.py +++ b/python/samples/getting_started/workflows/parallelism/fan_out_fan_in_edges.py @@ -9,10 +9,8 @@ ChatAgent, # Tracing event for agent execution steps ChatMessage, # Chat message structure Executor, # Base class for custom Python executors - Role, # Enum of chat roles (user, assistant, system) WorkflowBuilder, # Fluent builder for wiring the workflow graph WorkflowContext, # Per run context and event bus - WorkflowEvent, # Unified event class for workflow events handler, # Decorator to mark an Executor method as invokable ) from agent_framework.azure import AzureOpenAIChatClient @@ -43,7 +41,7 @@ class DispatchToExperts(Executor): @handler async def dispatch(self, prompt: str, ctx: WorkflowContext[AgentExecutorRequest]) -> None: # Wrap the incoming prompt as a user message for each expert and request a response. - initial_message = ChatMessage(Role.USER, text=prompt) + initial_message = ChatMessage("user", text=prompt) await ctx.send_message(AgentExecutorRequest(messages=[initial_message], should_respond=True)) diff --git a/python/samples/getting_started/workflows/state-management/workflow_kwargs.py b/python/samples/getting_started/workflows/state-management/workflow_kwargs.py index dd4496918a..25e46ab343 100644 --- a/python/samples/getting_started/workflows/state-management/workflow_kwargs.py +++ b/python/samples/getting_started/workflows/state-management/workflow_kwargs.py @@ -2,7 +2,7 @@ import asyncio import json -from typing import Annotated, Any +from typing import Annotated, Any, cast from agent_framework import ChatMessage, tool from agent_framework.openai import OpenAIChatClient @@ -27,7 +27,9 @@ # Define tools that accept custom context via **kwargs -# NOTE: approval_mode="never_require" is for sample brevity. Use "always_require" in production; see samples/getting_started/tools/function_tool_with_approval.py and samples/getting_started/tools/function_tool_with_approval_and_threads.py. +# NOTE: approval_mode="never_require" is for sample brevity. Use "always_require" in production; +# see samples/getting_started/tools/function_tool_with_approval.py +# and samples/getting_started/tools/function_tool_with_approval_and_threads.py. @tool(approval_mode="never_require") def get_user_data( query: Annotated[str, Field(description="What user data to retrieve")], @@ -119,7 +121,7 @@ async def main() -> None: stream=True, ): if event.type == "output": - output_data = event.data + output_data = cast(list[ChatMessage], event.data) if isinstance(output_data, list): for item in output_data: if isinstance(item, ChatMessage) and item.text: diff --git a/python/samples/getting_started/workflows/tool-approval/concurrent_builder_tool_approval.py b/python/samples/getting_started/workflows/tool-approval/concurrent_builder_tool_approval.py index 7582cd84bf..e49c9456d2 100644 --- a/python/samples/getting_started/workflows/tool-approval/concurrent_builder_tool_approval.py +++ b/python/samples/getting_started/workflows/tool-approval/concurrent_builder_tool_approval.py @@ -6,12 +6,12 @@ from agent_framework import ( ChatMessage, - ConcurrentBuilder, Content, WorkflowEvent, tool, ) from agent_framework.openai import OpenAIChatClient +from agent_framework.orchestrations import ConcurrentBuilder """ Sample: Concurrent Workflow with Tool Approval Requests diff --git a/python/samples/getting_started/workflows/tool-approval/group_chat_builder_tool_approval.py b/python/samples/getting_started/workflows/tool-approval/group_chat_builder_tool_approval.py index 57b435705d..732b73d746 100644 --- a/python/samples/getting_started/workflows/tool-approval/group_chat_builder_tool_approval.py +++ b/python/samples/getting_started/workflows/tool-approval/group_chat_builder_tool_approval.py @@ -7,12 +7,11 @@ from agent_framework import ( ChatMessage, Content, - GroupChatBuilder, - GroupChatState, WorkflowEvent, tool, ) from agent_framework.openai import OpenAIChatClient +from agent_framework.orchestrations import GroupChatBuilder, GroupChatState """ Sample: Group Chat Workflow with Tool Approval Requests @@ -106,7 +105,7 @@ async def process_event_stream(stream: AsyncIterable[WorkflowEvent]) -> dict[str print("Workflow summary:") outputs = cast(list[ChatMessage], event.data) for msg in outputs: - speaker = msg.author_name or msg.role.value + speaker = msg.author_name or msg.role print(f"[{speaker}]: {msg.text}") responses: dict[str, Content] = {} diff --git a/python/samples/getting_started/workflows/tool-approval/sequential_builder_tool_approval.py b/python/samples/getting_started/workflows/tool-approval/sequential_builder_tool_approval.py index dcca7e00c9..3695097363 100644 --- a/python/samples/getting_started/workflows/tool-approval/sequential_builder_tool_approval.py +++ b/python/samples/getting_started/workflows/tool-approval/sequential_builder_tool_approval.py @@ -7,11 +7,11 @@ from agent_framework import ( ChatMessage, Content, - SequentialBuilder, WorkflowEvent, tool, ) from agent_framework.openai import OpenAIChatClient +from agent_framework.orchestrations import SequentialBuilder """ Sample: Sequential Workflow with Tool Approval Requests @@ -53,7 +53,9 @@ def execute_database_query( return f"Query executed successfully. Results: 3 rows affected by '{query}'" -# NOTE: approval_mode="never_require" is for sample brevity. Use "always_require" in production; see samples/getting_started/tools/function_tool_with_approval.py and samples/getting_started/tools/function_tool_with_approval_and_threads.py. +# NOTE: approval_mode="never_require" is for sample brevity. Use "always_require" in production; +# see samples/getting_started/tools/function_tool_with_approval.py and +# samples/getting_started/tools/function_tool_with_approval_and_threads.py. @tool(approval_mode="never_require") def get_database_schema() -> str: """Get the current database schema. Does not require approval.""" From 26c5b8056303dc0389639a9510727d5c05eb070d Mon Sep 17 00:00:00 2001 From: Evan Mattson Date: Fri, 6 Feb 2026 13:18:22 +0900 Subject: [PATCH 09/12] Formatting --- .../workflows/_start-here/step2_agents_in_a_workflow.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/python/samples/getting_started/workflows/_start-here/step2_agents_in_a_workflow.py b/python/samples/getting_started/workflows/_start-here/step2_agents_in_a_workflow.py index 5d8a9d46cc..b2fcbb1aa0 100644 --- a/python/samples/getting_started/workflows/_start-here/step2_agents_in_a_workflow.py +++ b/python/samples/getting_started/workflows/_start-here/step2_agents_in_a_workflow.py @@ -65,7 +65,7 @@ async def main(): """ writer: "Charge Ahead: Affordable Adventure Awaits!" - reviewer: - Consider emphasizing both affordability and fun in a more dynamic way. + reviewer: - Consider emphasizing both affordability and fun in a more dynamic way. - Try using a catchy phrase that includes a play on words, like “Electrify Your Drive: Fun Meets Affordability!” - Ensure the slogan is succinct while capturing the essence of the car's unique selling proposition. From 72bc0e04853f1e7410ed6b2282830e2495cfafe9 Mon Sep 17 00:00:00 2001 From: Evan Mattson Date: Fri, 6 Feb 2026 13:48:20 +0900 Subject: [PATCH 10/12] Cleanup --- .../devui/agent_framework_devui/_executor.py | 7 +- .../magentic_human_plan_review copy.py | 143 ------------------ .../magentic_human_plan_review.py | 2 +- 3 files changed, 4 insertions(+), 148 deletions(-) delete mode 100644 python/samples/getting_started/orchestrations/magentic_human_plan_review copy.py diff --git a/python/packages/devui/agent_framework_devui/_executor.py b/python/packages/devui/agent_framework_devui/_executor.py index a931bf8b1f..7f395023b6 100644 --- a/python/packages/devui/agent_framework_devui/_executor.py +++ b/python/packages/devui/agent_framework_devui/_executor.py @@ -7,7 +7,7 @@ from collections.abc import AsyncGenerator from typing import Any -from agent_framework import AgentProtocol, Content +from agent_framework import AgentProtocol, Content, Workflow from ._conversations import ConversationStore, InMemoryConversationStore from ._discovery import EntityDiscovery @@ -262,8 +262,7 @@ async def execute_entity(self, entity_id: str, request: AgentFrameworkRequest) - elif entity_info.type == "workflow": async for event in self._execute_workflow(entity_obj, request, trace_collector): # Log request_info event (type='request_info') for debugging HIL flow - event_class = event.__class__.__name__ if hasattr(event, "__class__") else type(event).__name__ - if event_class == "RequestInfoEvent": + if event.type == "request_info": logger.info( "🔔 [EXECUTOR] request_info event (type='request_info') detected from workflow!" ) @@ -361,7 +360,7 @@ async def _execute_agent( yield {"type": "error", "message": f"Agent execution error: {e!s}"} async def _execute_workflow( - self, workflow: Any, request: AgentFrameworkRequest, trace_collector: Any + self, workflow: Workflow, request: AgentFrameworkRequest, trace_collector: Any ) -> AsyncGenerator[Any, None]: """Execute Agent Framework workflow with checkpoint support via conversation items. diff --git a/python/samples/getting_started/orchestrations/magentic_human_plan_review copy.py b/python/samples/getting_started/orchestrations/magentic_human_plan_review copy.py deleted file mode 100644 index dd41457caa..0000000000 --- a/python/samples/getting_started/orchestrations/magentic_human_plan_review copy.py +++ /dev/null @@ -1,143 +0,0 @@ -# Copyright (c) Microsoft. All rights reserved. - -import asyncio -import json -from typing import cast - -from agent_framework import ( - AgentResponseUpdate, - ChatAgent, - ChatMessage, - WorkflowEvent, -) -from agent_framework.openai import OpenAIChatClient -from agent_framework.orchestrations import MagenticBuilder, MagenticPlanReviewRequest - -""" -Sample: Magentic Orchestration with Human Plan Review - -This sample demonstrates how humans can review and provide feedback on plans -generated by the Magentic workflow orchestrator. When plan review is enabled, -the workflow requests human approval or revision before executing each plan. - -Key concepts: -- with_plan_review(): Enables human review of generated plans -- MagenticPlanReviewRequest: The event type for plan review requests -- Human can choose to: approve the plan or provide revision feedback - -Plan review options: -- approve(): Accept the proposed plan and continue execution -- revise(feedback): Provide textual feedback to modify the plan - -Prerequisites: -- OpenAI credentials configured for `OpenAIChatClient`. -""" - - -async def main() -> None: - researcher_agent = ChatAgent( - name="ResearcherAgent", - description="Specialist in research and information gathering", - instructions="You are a Researcher. You find information and gather facts.", - chat_client=OpenAIChatClient(model_id="gpt-4o"), - ) - - analyst_agent = ChatAgent( - name="AnalystAgent", - description="Data analyst who processes and summarizes research findings", - instructions="You are an Analyst. You analyze findings and create summaries.", - chat_client=OpenAIChatClient(model_id="gpt-4o"), - ) - - manager_agent = ChatAgent( - name="MagenticManager", - description="Orchestrator that coordinates the workflow", - instructions="You coordinate a team to complete tasks efficiently.", - chat_client=OpenAIChatClient(model_id="gpt-4o"), - ) - - print("\nBuilding Magentic Workflow with Human Plan Review...") - - workflow = ( - MagenticBuilder() - .participants([researcher_agent, analyst_agent]) - .with_manager( - agent=manager_agent, - max_round_count=10, - max_stall_count=1, - max_reset_count=2, - ) - .with_plan_review() # Request human input for plan review - .build() - ) - - task = "Research sustainable aviation fuel technology and summarize the findings." - - print(f"\nTask: {task}") - print("\nStarting workflow execution...") - print("=" * 60) - - pending_request: WorkflowEvent | None = None - pending_responses: dict[str, object] | None = None - output_event: WorkflowEvent | None = None - - while not output_event: - if pending_responses is not None: - stream = workflow.send_responses_streaming(pending_responses) - else: - stream = workflow.run(task, stream=True) - - last_message_id: str | None = None - async for event in stream: - if event.type == "output" and isinstance(event.data, AgentResponseUpdate): - message_id = event.data.message_id - if message_id != last_message_id: - if last_message_id is not None: - print("\n") - print(f"- {event.executor_id}:", end=" ", flush=True) - last_message_id = message_id - print(event.data, end="", flush=True) - - elif event.type == "request_info" and event.request_type is MagenticPlanReviewRequest: - pending_request = event - - elif event.type == "output": - output_event = event - - pending_responses = None - - # Handle plan review request if any - if pending_request is not None and pending_request.request_id: - event_data = cast(MagenticPlanReviewRequest, pending_request.data) - - print("\n\n[Magentic Plan Review Request]") - if event_data.current_progress is not None: - print("Current Progress Ledger:") - print(json.dumps(event_data.current_progress.to_dict(), indent=2)) - print() - print(f"Proposed Plan:\n{event_data.plan.text}\n") - print("Please provide your feedback (press Enter to approve):") - - reply = await asyncio.get_event_loop().run_in_executor(None, input, "> ") - if reply.strip() == "": - print("Plan approved.\n") - pending_responses = {pending_request.request_id: event_data.approve()} - else: - print("Plan revised by human.\n") - pending_responses = {pending_request.request_id: event_data.revise(reply)} - pending_request = None - - print("\n" + "=" * 60) - print("WORKFLOW COMPLETED") - print("=" * 60) - print("Final Output:") - # The output of the Magentic workflow is a list of ChatMessages with only one final message - # generated by the orchestrator. - output_messages = cast(list[ChatMessage], output_event.data) - if output_messages: - output = output_messages[-1].text - print(output) - - -if __name__ == "__main__": - asyncio.run(main()) diff --git a/python/samples/getting_started/orchestrations/magentic_human_plan_review.py b/python/samples/getting_started/orchestrations/magentic_human_plan_review.py index bf4dae612b..eda574b264 100644 --- a/python/samples/getting_started/orchestrations/magentic_human_plan_review.py +++ b/python/samples/getting_started/orchestrations/magentic_human_plan_review.py @@ -66,7 +66,7 @@ async def process_event_stream(stream: AsyncIterable[WorkflowEvent]) -> dict[str # To make the type checker happy, we cast event.data to the expected type outputs = cast(list[ChatMessage], event.data) for msg in outputs: - speaker = msg.author_name or msg.role.value + speaker = msg.author_name or msg.role print(f"[{speaker}]: {msg.text}") responses: dict[str, MagenticPlanReviewResponse] = {} From 80d595a9c2ecf41256643ef4f75324f80648d4c0 Mon Sep 17 00:00:00 2001 From: Evan Mattson Date: Fri, 6 Feb 2026 14:03:18 +0900 Subject: [PATCH 11/12] use builtins.type to avoid shadowing by WorkflowEvent.type attribute --- .../core/agent_framework/_workflows/_events.py | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/python/packages/core/agent_framework/_workflows/_events.py b/python/packages/core/agent_framework/_workflows/_events.py index b4f5f678fa..4f7b107187 100644 --- a/python/packages/core/agent_framework/_workflows/_events.py +++ b/python/packages/core/agent_framework/_workflows/_events.py @@ -2,6 +2,7 @@ from __future__ import annotations +import builtins import sys import traceback as _traceback from collections.abc import Iterator @@ -170,11 +171,12 @@ class WorkflowEvent(Generic[DataT]): print(f"Agent response: {event.data.text}") """ + type: WorkflowEventType data: DataT def __init__( self, - type: WorkflowEventType | str, + type: WorkflowEventType, data: DataT | None = None, *, # Event context fields @@ -188,8 +190,8 @@ def __init__( # REQUEST_INFO event fields request_id: str | None = None, source_executor_id: str | None = None, - request_type: type[Any] | None = None, - response_type: type[Any] | None = None, + request_type: builtins.type[Any] | None = None, + response_type: builtins.type[Any] | None = None, # SUPERSTEP event fields iteration: int | None = None, ) -> None: @@ -275,7 +277,7 @@ def request_info( request_id: str, source_executor_id: str, request_data: DataT, - response_type: type[Any], + response_type: builtins.type[Any], ) -> WorkflowEvent[DataT]: """Create a 'request_info' event when an executor requests external information.""" return cls( @@ -347,7 +349,7 @@ def source_executor_id(self) -> str: return self._source_executor_id @property - def request_type(self) -> type[Any]: + def request_type(self) -> builtins.type[Any]: """Get request_type for request_info events. Returns: @@ -362,7 +364,7 @@ def request_type(self) -> type[Any]: return self._request_type @property - def response_type(self) -> type[Any]: + def response_type(self) -> builtins.type[Any]: """Get response_type for request_info events. Returns: From 10c84059e3cc5a7f8e600e5967024cef64b9ec9b Mon Sep 17 00:00:00 2001 From: Evan Mattson Date: Fri, 6 Feb 2026 16:38:05 +0900 Subject: [PATCH 12/12] Final improvements --- .../agent_framework/_workflows/_events.py | 7 +- .../orchestrations/__init__.pyi | 4 + .../_base_group_chat_orchestrator.py | 76 +++++++------------ .../_handoff.py | 22 +++--- .../_magentic.py | 56 +++++++------- .../orchestrations/tests/test_magentic.py | 3 +- .../orchestrations/04_magentic_one.py | 16 ++-- .../orchestrations/handoff_autonomous.py | 6 +- .../handoff_participant_factory.py | 8 +- .../orchestrations/handoff_simple.py | 18 ++--- .../handoff_with_code_interpreter_file.py | 7 +- .../orchestrations/magentic.py | 21 +++-- 12 files changed, 112 insertions(+), 132 deletions(-) diff --git a/python/packages/core/agent_framework/_workflows/_events.py b/python/packages/core/agent_framework/_workflows/_events.py index 4f7b107187..18e974e3e7 100644 --- a/python/packages/core/agent_framework/_workflows/_events.py +++ b/python/packages/core/agent_framework/_workflows/_events.py @@ -99,7 +99,8 @@ def from_exception( ) -# Type discriminator for workflow events (like ContentType in _types.py) +# Type discriminator for workflow events. +# Includes both framework lifecycle types and well-known orchestration types. WorkflowEventType = Literal[ # Lifecycle events (workflow-level) "started", # Workflow run began @@ -120,6 +121,10 @@ def from_exception( "executor_invoked", # Executor handler was called (use .executor_id, .data) "executor_completed", # Executor handler completed (use .executor_id, .data) "executor_failed", # Executor handler raised error (use .executor_id, .details) + # Orchestration event types (use .data for typed payload) + "group_chat", # Group chat orchestrator events (use .data as GroupChatRequestSentEvent | GroupChatResponseReceivedEvent) # noqa: E501 + "handoff_sent", # Handoff routing events (use .data as HandoffSentEvent) + "magentic_orchestrator", # Magentic orchestrator events (use .data as MagenticOrchestratorEvent) ] diff --git a/python/packages/core/agent_framework/orchestrations/__init__.pyi b/python/packages/core/agent_framework/orchestrations/__init__.pyi index fcaaf04d00..cf26847972 100644 --- a/python/packages/core/agent_framework/orchestrations/__init__.pyi +++ b/python/packages/core/agent_framework/orchestrations/__init__.pyi @@ -12,6 +12,8 @@ from agent_framework_orchestrations import ( ConcurrentBuilder, GroupChatBuilder, GroupChatOrchestrator, + GroupChatRequestMessage, + GroupChatRequestSentEvent, GroupChatSelectionFunction, GroupChatState, HandoffAgentExecutor, @@ -48,6 +50,8 @@ __all__ = [ "ConcurrentBuilder", "GroupChatBuilder", "GroupChatOrchestrator", + "GroupChatRequestMessage", + "GroupChatRequestSentEvent", "GroupChatSelectionFunction", "GroupChatState", "HandoffAgentExecutor", diff --git a/python/packages/orchestrations/agent_framework_orchestrations/_base_group_chat_orchestrator.py b/python/packages/orchestrations/agent_framework_orchestrations/_base_group_chat_orchestrator.py index f42cfbc4c0..4d93a3e69b 100644 --- a/python/packages/orchestrations/agent_framework_orchestrations/_base_group_chat_orchestrator.py +++ b/python/packages/orchestrations/agent_framework_orchestrations/_base_group_chat_orchestrator.py @@ -61,48 +61,22 @@ class GroupChatResponseMessage: # region Group chat events -class GroupChatEvent(WorkflowEvent): - """Base class for group chat workflow events.""" - def __init__(self, round_index: int, data: Any | None = None) -> None: - """Initialize group chat event. - Args: - round_index: Current round index - data: Optional event-specific data - """ - super().__init__("group_chat", data=data) - self.round_index = round_index - - -class GroupChatResponseReceivedEvent(GroupChatEvent): - """Event emitted when a participant response is received.""" - - def __init__(self, round_index: int, participant_name: str, data: Any | None = None) -> None: - """Initialize response received event. - - Args: - round_index: Current round index - participant_name: Name of the participant who sent the response - data: Optional event-specific data - """ - super().__init__(round_index, data) - self.participant_name = participant_name +@dataclass +class GroupChatRequestSentEvent: + """Data payload for group_chat request sent events.""" + round_index: int + participant_name: str -class GroupChatRequestSentEvent(GroupChatEvent): - """Event emitted when a request is sent to a participant.""" - def __init__(self, round_index: int, participant_name: str, data: Any | None = None) -> None: - """Initialize request sent event. +@dataclass +class GroupChatResponseReceivedEvent: + """Data payload for group_chat response received events.""" - Args: - round_index: Current round index - participant_name: Name of the participant to whom the request was sent - data: Optional event-specific data - """ - super().__init__(round_index, data) - self.participant_name = participant_name + round_index: int + participant_name: str # endregion @@ -273,10 +247,12 @@ async def handle_participant_response( ctx: Workflow context """ await ctx.add_event( - GroupChatResponseReceivedEvent( - round_index=self._round_index, - participant_name=ctx.source_executor_ids[0] if ctx.source_executor_ids else "unknown", - data=response, + WorkflowEvent( + "group_chat", + data=GroupChatResponseReceivedEvent( + round_index=self._round_index, + participant_name=ctx.source_executor_ids[0] if ctx.source_executor_ids else "unknown", + ), ) ) await self._handle_response(response, ctx) @@ -469,10 +445,12 @@ async def _send_request_to_participant( request = AgentExecutorRequest(messages=messages, should_respond=True) await ctx.send_message(request, target_id=target) await ctx.add_event( - GroupChatRequestSentEvent( - round_index=self._round_index, - participant_name=target, - data=request, + WorkflowEvent( + "group_chat", + data=GroupChatRequestSentEvent( + round_index=self._round_index, + participant_name=target, + ), ) ) else: @@ -480,10 +458,12 @@ async def _send_request_to_participant( request = GroupChatRequestMessage(additional_instruction=additional_instruction, metadata=metadata) # type: ignore[assignment] await ctx.send_message(request, target_id=target) await ctx.add_event( - GroupChatRequestSentEvent( - round_index=self._round_index, - participant_name=target, - data=request, + WorkflowEvent( + "group_chat", + data=GroupChatRequestSentEvent( + round_index=self._round_index, + participant_name=target, + ), ) ) diff --git a/python/packages/orchestrations/agent_framework_orchestrations/_handoff.py b/python/packages/orchestrations/agent_framework_orchestrations/_handoff.py index 71d399e77d..a2f9a4eea8 100644 --- a/python/packages/orchestrations/agent_framework_orchestrations/_handoff.py +++ b/python/packages/orchestrations/agent_framework_orchestrations/_handoff.py @@ -64,20 +64,14 @@ # region Handoff events -class HandoffSentEvent(WorkflowEvent): - """Base class for handoff workflow events.""" - def __init__(self, source: str, target: str, data: Any | None = None) -> None: - """Initialize handoff sent event. - Args: - source: Identifier of the source agent initiating the handoff - target: Identifier of the target agent receiving the handoff - data: Optional event-specific data - """ - super().__init__("handoff_sent", data=data) - self.source = source - self.target = target +@dataclass +class HandoffSentEvent: + """Data payload for handoff_sent events.""" + + source: str + target: str # endregion @@ -421,7 +415,9 @@ async def _run_agent_and_emit( await cast(WorkflowContext[AgentExecutorRequest], ctx).send_message( AgentExecutorRequest(messages=[], should_respond=True), target_id=handoff_target ) - await ctx.add_event(HandoffSentEvent(source=self.id, target=handoff_target)) + await ctx.add_event( + WorkflowEvent("handoff_sent", data=HandoffSentEvent(source=self.id, target=handoff_target)) + ) self._autonomous_mode_turns = 0 # Reset autonomous mode turn counter on handoff return diff --git a/python/packages/orchestrations/agent_framework_orchestrations/_magentic.py b/python/packages/orchestrations/agent_framework_orchestrations/_magentic.py index 4412bbe562..a90f570575 100644 --- a/python/packages/orchestrations/agent_framework_orchestrations/_magentic.py +++ b/python/packages/orchestrations/agent_framework_orchestrations/_magentic.py @@ -771,23 +771,11 @@ class MagenticOrchestratorEventType(str, Enum): @dataclass -class MagenticOrchestratorEvent(WorkflowEvent): - """Base class for Magentic orchestrator events. +class MagenticOrchestratorEvent: + """Data payload for magentic_orchestrator events.""" - Uses the 'custom' event type for extensibility. - """ - - def __init__( - self, - executor_id: str, - event_type: MagenticOrchestratorEventType, - data: ChatMessage | MagenticProgressLedger, - ) -> None: - super().__init__("magentic_orchestrator", data=data, executor_id=executor_id) - self.event_type = event_type - - def __repr__(self) -> str: - return f"{self.__class__.__name__}(executor_id={self.executor_id}, event_type={self.event_type})" + event_type: MagenticOrchestratorEventType + content: ChatMessage | MagenticProgressLedger # region Request info related types @@ -931,10 +919,13 @@ async def _handle_messages( # Initial planning using the manager with real model calls self._task_ledger = await self._manager.plan(self._magentic_context.clone(deep=True)) await ctx.add_event( - MagenticOrchestratorEvent( + WorkflowEvent( + "magentic_orchestrator", executor_id=self.id, - event_type=MagenticOrchestratorEventType.PLAN_CREATED, - data=self._task_ledger, + data=MagenticOrchestratorEvent( + event_type=MagenticOrchestratorEventType.PLAN_CREATED, + content=self._task_ledger, + ), ) ) @@ -1009,10 +1000,13 @@ async def handle_plan_review_response( self._magentic_context.chat_history.extend(response.review) self._task_ledger = await self._manager.replan(self._magentic_context.clone(deep=True)) await ctx.add_event( - MagenticOrchestratorEvent( + WorkflowEvent( + "magentic_orchestrator", executor_id=self.id, - event_type=MagenticOrchestratorEventType.REPLANNED, - data=self._task_ledger, + data=MagenticOrchestratorEvent( + event_type=MagenticOrchestratorEventType.REPLANNED, + content=self._task_ledger, + ), ) ) # Continue the review process by sending the new plan for review again until approved @@ -1075,10 +1069,13 @@ async def _run_inner_loop_helper( return await ctx.add_event( - MagenticOrchestratorEvent( + WorkflowEvent( + "magentic_orchestrator", executor_id=self.id, - event_type=MagenticOrchestratorEventType.PROGRESS_LEDGER_UPDATED, - data=self._progress_ledger, + data=MagenticOrchestratorEvent( + event_type=MagenticOrchestratorEventType.PROGRESS_LEDGER_UPDATED, + content=self._progress_ledger, + ), ) ) @@ -1152,10 +1149,13 @@ async def _reset_and_replan( # Replan self._task_ledger = await self._manager.replan(self._magentic_context.clone(deep=True)) await ctx.add_event( - MagenticOrchestratorEvent( + WorkflowEvent( + "magentic_orchestrator", executor_id=self.id, - event_type=MagenticOrchestratorEventType.REPLANNED, - data=self._task_ledger, + data=MagenticOrchestratorEvent( + event_type=MagenticOrchestratorEventType.REPLANNED, + content=self._task_ledger, + ), ) ) # If a human must sign off, ask now and return. The response handler will resume. diff --git a/python/packages/orchestrations/tests/test_magentic.py b/python/packages/orchestrations/tests/test_magentic.py index a26105c2ac..d92e6aff47 100644 --- a/python/packages/orchestrations/tests/test_magentic.py +++ b/python/packages/orchestrations/tests/test_magentic.py @@ -30,7 +30,6 @@ MagenticContext, MagenticManagerBase, MagenticOrchestrator, - MagenticOrchestratorEvent, MagenticPlanReviewRequest, MagenticProgressLedger, MagenticProgressLedgerItem, @@ -198,7 +197,7 @@ async def test_magentic_builder_returns_workflow_and_runs() -> None: msg = event.data if isinstance(msg, list): outputs.extend(cast(list[ChatMessage], msg)) - elif isinstance(event, MagenticOrchestratorEvent): + elif event.type == "magentic_orchestrator": orchestrator_event_count += 1 assert outputs, "Expected a final output message" diff --git a/python/samples/autogen-migration/orchestrations/04_magentic_one.py b/python/samples/autogen-migration/orchestrations/04_magentic_one.py index 3ca032951e..201e653693 100644 --- a/python/samples/autogen-migration/orchestrations/04_magentic_one.py +++ b/python/samples/autogen-migration/orchestrations/04_magentic_one.py @@ -14,7 +14,7 @@ ChatMessage, WorkflowEvent, ) -from agent_framework.orchestrations import MagenticOrchestratorEvent, MagenticProgressLedger +from agent_framework.orchestrations import MagenticProgressLedger async def run_autogen() -> None: @@ -121,14 +121,14 @@ async def run_agent_framework() -> None: last_message_id = message_id print(event.data, end="", flush=True) - elif isinstance(event, MagenticOrchestratorEvent): - print(f"\n[Magentic Orchestrator Event] Type: {event.event_type.name}") - if isinstance(event.data, ChatMessage): - print(f"Please review the plan:\n{event.data.text}") - elif isinstance(event.data, MagenticProgressLedger): - print(f"Please review progress ledger:\n{json.dumps(event.data.to_dict(), indent=2)}") + elif event.type == "magentic_orchestrator": + print(f"\n[Magentic Orchestrator Event] Type: {event.data.event_type.name}") + if isinstance(event.data.content, ChatMessage): + print(f"Please review the plan:\n{event.data.content.text}") + elif isinstance(event.data.content, MagenticProgressLedger): + print(f"Please review progress ledger:\n{json.dumps(event.data.content.to_dict(), indent=2)}") else: - print(f"Unknown data type in MagenticOrchestratorEvent: {type(event.data)}") + print(f"Unknown data type in MagenticOrchestratorEvent: {type(event.data.content)}") # Block to allow user to read the plan/progress before continuing # Note: this is for demonstration only and is not the recommended way to handle human interaction. diff --git a/python/samples/getting_started/orchestrations/handoff_autonomous.py b/python/samples/getting_started/orchestrations/handoff_autonomous.py index b61a1fc202..faadd8486e 100644 --- a/python/samples/getting_started/orchestrations/handoff_autonomous.py +++ b/python/samples/getting_started/orchestrations/handoff_autonomous.py @@ -11,7 +11,7 @@ resolve_agent_id, ) from agent_framework.azure import AzureOpenAIChatClient -from agent_framework.orchestrations import HandoffBuilder, HandoffSentEvent +from agent_framework.orchestrations import HandoffBuilder from azure.identity import AzureCliCredential logging.basicConfig(level=logging.ERROR) @@ -110,8 +110,8 @@ async def main() -> None: last_response_id: str | None = None async for event in workflow.run(request, stream=True): - if isinstance(event, HandoffSentEvent): - print(f"\nHandoff Event: from {event.source} to {event.target}\n") + if event.type == "handoff_sent": + print(f"\nHandoff Event: from {event.data.source} to {event.data.target}\n") elif event.type == "output": data = event.data if isinstance(data, AgentResponseUpdate): diff --git a/python/samples/getting_started/orchestrations/handoff_participant_factory.py b/python/samples/getting_started/orchestrations/handoff_participant_factory.py index c66f9a5e22..100bc1be03 100644 --- a/python/samples/getting_started/orchestrations/handoff_participant_factory.py +++ b/python/samples/getting_started/orchestrations/handoff_participant_factory.py @@ -14,7 +14,7 @@ tool, ) from agent_framework.azure import AzureOpenAIChatClient -from agent_framework.orchestrations import HandoffAgentUserRequest, HandoffBuilder, HandoffSentEvent +from agent_framework.orchestrations import HandoffAgentUserRequest, HandoffBuilder from azure.identity import AzureCliCredential logging.basicConfig(level=logging.ERROR) @@ -122,9 +122,9 @@ def _handle_events(events: list[WorkflowEvent]) -> list[WorkflowEvent[HandoffAge requests: list[WorkflowEvent[HandoffAgentUserRequest]] = [] for event in events: - if isinstance(event, HandoffSentEvent): - # HandoffSentEvent: Indicates a handoff has been initiated - print(f"\n[Handoff from {event.source} to {event.target} initiated.]") + if event.type == "handoff_sent": + # handoff_sent event: Indicates a handoff has been initiated + print(f"\n[Handoff from {event.data.source} to {event.data.target} initiated.]") elif event.type == "status" and event.state in { WorkflowRunState.IDLE, WorkflowRunState.IDLE_WITH_PENDING_REQUESTS, diff --git a/python/samples/getting_started/orchestrations/handoff_simple.py b/python/samples/getting_started/orchestrations/handoff_simple.py index 17d407c11f..d32c92aca9 100644 --- a/python/samples/getting_started/orchestrations/handoff_simple.py +++ b/python/samples/getting_started/orchestrations/handoff_simple.py @@ -12,7 +12,7 @@ tool, ) from agent_framework.azure import AzureOpenAIChatClient -from agent_framework.orchestrations import HandoffAgentUserRequest, HandoffBuilder, HandoffSentEvent +from agent_framework.orchestrations import HandoffAgentUserRequest, HandoffBuilder from azure.identity import AzureCliCredential """Sample: Simple handoff workflow. @@ -117,15 +117,15 @@ def _handle_events(events: list[WorkflowEvent]) -> list[WorkflowEvent[HandoffAge requests: list[WorkflowEvent[HandoffAgentUserRequest]] = [] for event in events: - if isinstance(event, HandoffSentEvent): - # HandoffSentEvent: Indicates a handoff has been initiated - print(f"\n[Handoff from {event.source} to {event.target} initiated.]") + if event.type == "handoff_sent": + # handoff_sent event: Indicates a handoff has been initiated + print(f"\n[Handoff from {event.data.source} to {event.data.target} initiated.]") elif event.type == "status" and event.state in { WorkflowRunState.IDLE, WorkflowRunState.IDLE_WITH_PENDING_REQUESTS, }: # Status event: Indicates workflow state changes - print(f"\n[Workflow Status] {event.state.name}") + print(f"\n[Workflow Status] {event.state}") elif event.type == "output": # Output event: Contains contents generated by the workflow data = event.data @@ -145,11 +145,9 @@ def _handle_events(events: list[WorkflowEvent]) -> list[WorkflowEvent[HandoffAge speaker = message.author_name or message.role print(f"- {speaker}: {message.text or [content.type for content in message.contents]}") print("===================================") - elif event.type == "request_info": - # Request info event: Workflow is requesting user input - if isinstance(event.data, HandoffAgentUserRequest): - _print_handoff_agent_user_request(event.data.agent_response) - requests.append(cast(WorkflowEvent[HandoffAgentUserRequest], event)) + elif event.type == "request_info" and isinstance(event.data, HandoffAgentUserRequest): + _print_handoff_agent_user_request(event.data.agent_response) + requests.append(cast(WorkflowEvent[HandoffAgentUserRequest], event)) return requests diff --git a/python/samples/getting_started/orchestrations/handoff_with_code_interpreter_file.py b/python/samples/getting_started/orchestrations/handoff_with_code_interpreter_file.py index bd0044fdfb..d0bbb02e2e 100644 --- a/python/samples/getting_started/orchestrations/handoff_with_code_interpreter_file.py +++ b/python/samples/getting_started/orchestrations/handoff_with_code_interpreter_file.py @@ -38,7 +38,7 @@ WorkflowEvent, WorkflowRunState, ) -from agent_framework.orchestrations import HandoffAgentUserRequest, HandoffBuilder, HandoffSentEvent +from agent_framework.orchestrations import HandoffAgentUserRequest, HandoffBuilder from azure.identity.aio import AzureCliCredential # Toggle between V1 (AzureAIAgentClient) and V2 (AzureAIClient) @@ -56,14 +56,13 @@ def _handle_events(events: list[WorkflowEvent]) -> tuple[list[WorkflowEvent[Hand Returns: Tuple of (pending_requests, file_ids_found) """ - from typing import cast requests: list[WorkflowEvent[HandoffAgentUserRequest]] = [] file_ids: list[str] = [] for event in events: - if isinstance(event, HandoffSentEvent): - print(f"\n[Handoff from {event.source} to {event.target} initiated.]") + if event.type == "handoff_sent": + print(f"\n[Handoff from {event.data.source} to {event.data.target} initiated.]") elif event.type == "status" and event.state in { WorkflowRunState.IDLE, WorkflowRunState.IDLE_WITH_PENDING_REQUESTS, diff --git a/python/samples/getting_started/orchestrations/magentic.py b/python/samples/getting_started/orchestrations/magentic.py index 196e92346e..cc1cb304ab 100644 --- a/python/samples/getting_started/orchestrations/magentic.py +++ b/python/samples/getting_started/orchestrations/magentic.py @@ -9,12 +9,11 @@ AgentResponseUpdate, ChatAgent, ChatMessage, - GroupChatRequestSentEvent, HostedCodeInterpreterTool, WorkflowEvent, ) from agent_framework.openai import OpenAIChatClient, OpenAIResponsesClient -from agent_framework.orchestrations import MagenticBuilder, MagenticOrchestratorEvent, MagenticProgressLedger +from agent_framework.orchestrations import GroupChatRequestSentEvent, MagenticBuilder, MagenticProgressLedger logging.basicConfig(level=logging.WARNING) logger = logging.getLogger(__name__) @@ -115,22 +114,22 @@ async def main() -> None: last_response_id = response_id print(event.data, end="", flush=True) - elif isinstance(event, MagenticOrchestratorEvent): - print(f"\n[Magentic Orchestrator Event] Type: {event.event_type.name}") - if isinstance(event.data, ChatMessage): - print(f"Please review the plan:\n{event.data.text}") - elif isinstance(event.data, MagenticProgressLedger): - print(f"Please review progress ledger:\n{json.dumps(event.data.to_dict(), indent=2)}") + elif event.type == "magentic_orchestrator": + print(f"\n[Magentic Orchestrator Event] Type: {event.data.event_type.name}") + if isinstance(event.data.content, ChatMessage): + print(f"Please review the plan:\n{event.data.content.text}") + elif isinstance(event.data.content, MagenticProgressLedger): + print(f"Please review progress ledger:\n{json.dumps(event.data.content.to_dict(), indent=2)}") else: - print(f"Unknown data type in MagenticOrchestratorEvent: {type(event.data)}") + print(f"Unknown data type in MagenticOrchestratorEvent: {type(event.data.content)}") # Block to allow user to read the plan/progress before continuing # Note: this is for demonstration only and is not the recommended way to handle human interaction. # Please refer to `with_plan_review` for proper human interaction during planning phases. await asyncio.get_event_loop().run_in_executor(None, input, "Press Enter to continue...") - elif isinstance(event, GroupChatRequestSentEvent): - print(f"\n[REQUEST SENT ({event.round_index})] to agent: {event.participant_name}") + elif event.type == "group_chat" and isinstance(event.data, GroupChatRequestSentEvent): + print(f"\n[REQUEST SENT ({event.data.round_index})] to agent: {event.data.participant_name}") elif event.type == "output": output_event = event