diff --git a/python/packages/core/agent_framework/_tools.py b/python/packages/core/agent_framework/_tools.py index 6638e71dac..7e22b78827 100644 --- a/python/packages/core/agent_framework/_tools.py +++ b/python/packages/core/agent_framework/_tools.py @@ -2135,6 +2135,9 @@ def get_response( filtered_kwargs = {k: v for k, v in kwargs.items() if k != "thread"} # Make options mutable so we can update conversation_id during function invocation loop mutable_options: dict[str, Any] = dict(options) if options else {} + # Remove additional_function_arguments from options passed to underlying chat client + # It's for tool invocation only and not recognized by chat service APIs + mutable_options.pop("additional_function_arguments", None) if not stream: diff --git a/python/packages/core/agent_framework/_workflows/__init__.py b/python/packages/core/agent_framework/_workflows/__init__.py index b77a3d4c72..c5666f7b26 100644 --- a/python/packages/core/agent_framework/_workflows/__init__.py +++ b/python/packages/core/agent_framework/_workflows/__init__.py @@ -31,22 +31,11 @@ ) from ._edge_runner import create_edge_runner from ._events import ( - ExecutorCompletedEvent, - ExecutorEvent, - ExecutorFailedEvent, - ExecutorInvokedEvent, - RequestInfoEvent, - SuperStepCompletedEvent, - SuperStepStartedEvent, WorkflowErrorDetails, WorkflowEvent, WorkflowEventSource, - WorkflowFailedEvent, - WorkflowLifecycleEvent, - WorkflowOutputEvent, + WorkflowEventType, WorkflowRunState, - WorkflowStartedEvent, - WorkflowStatusEvent, ) from ._exceptions import ( WorkflowCheckpointException, @@ -96,10 +85,6 @@ "EdgeCondition", "EdgeDuplicationError", "Executor", - "ExecutorCompletedEvent", - "ExecutorEvent", - "ExecutorFailedEvent", - "ExecutorInvokedEvent", "FanInEdgeGroup", "FanOutEdgeGroup", "FileCheckpointStorage", @@ -108,14 +93,11 @@ "InMemoryCheckpointStorage", "InProcRunnerContext", "Message", - "RequestInfoEvent", "Runner", "RunnerContext", "SingleEdgeGroup", "SubWorkflowRequestMessage", "SubWorkflowResponseMessage", - "SuperStepCompletedEvent", - "SuperStepStartedEvent", "SwitchCaseEdgeGroup", "SwitchCaseEdgeGroupCase", "SwitchCaseEdgeGroupDefault", @@ -132,16 +114,12 @@ "WorkflowErrorDetails", "WorkflowEvent", "WorkflowEventSource", + "WorkflowEventType", "WorkflowException", "WorkflowExecutor", - "WorkflowFailedEvent", - "WorkflowLifecycleEvent", - "WorkflowOutputEvent", "WorkflowRunResult", "WorkflowRunState", "WorkflowRunnerException", - "WorkflowStartedEvent", - "WorkflowStatusEvent", "WorkflowValidationError", "WorkflowViz", "create_edge_runner", diff --git a/python/packages/core/agent_framework/_workflows/_agent.py b/python/packages/core/agent_framework/_workflows/_agent.py index 70b385c06d..06aa6646af 100644 --- a/python/packages/core/agent_framework/_workflows/_agent.py +++ b/python/packages/core/agent_framework/_workflows/_agent.py @@ -1,5 +1,7 @@ # Copyright (c) Microsoft. All rights reserved. +from __future__ import annotations + import json import logging import sys @@ -23,9 +25,7 @@ from ..exceptions import AgentExecutionException from ._checkpoint import CheckpointStorage from ._events import ( - RequestInfoEvent, WorkflowEvent, - WorkflowOutputEvent, ) from ._message_utils import normalize_messages_input from ._typing_utils import is_instance_of, is_type_compatible @@ -59,11 +59,11 @@ def to_json(self) -> str: return json.dumps(self.to_dict()) @classmethod - def from_dict(cls, payload: dict[str, Any]) -> "WorkflowAgent.RequestInfoFunctionArgs": + def from_dict(cls, payload: dict[str, Any]) -> WorkflowAgent.RequestInfoFunctionArgs: return cls(request_id=payload.get("request_id", ""), data=payload.get("data")) @classmethod - def from_json(cls, raw: str) -> "WorkflowAgent.RequestInfoFunctionArgs": + def from_json(cls, raw: str) -> WorkflowAgent.RequestInfoFunctionArgs: try: parsed: Any = json.loads(raw) except json.JSONDecodeError as exc: @@ -74,7 +74,7 @@ def from_json(cls, raw: str) -> "WorkflowAgent.RequestInfoFunctionArgs": def __init__( self, - workflow: "Workflow", + workflow: Workflow, *, id: str | None = None, name: str | None = None, @@ -93,10 +93,10 @@ def __init__( **kwargs: Additional keyword arguments passed to BaseAgent. Note: - Only WorkflowOutputEvents and RequestInfoEvents from the workflow are considered and - converted to agent responses of the WorkflowAgent. Other workflow events are ignored. - Use `with_output_from` in WorkflowBuilder to control which executors' outputs are surfaced - as agent responses. + Only output events (type='output') and request_info events (type='request_info') from + the workflow are considered and converted to agent responses of the WorkflowAgent. + Other workflow events are ignored. Use `with_output_from` in WorkflowBuilder to control + which executors' outputs are surfaced as agent responses. """ if id is None: id = f"WorkflowAgent_{uuid.uuid4().hex[:8]}" @@ -111,15 +111,15 @@ def __init__( raise ValueError("Workflow's start executor cannot handle list[ChatMessage]") super().__init__(id=id, name=name, description=description, **kwargs) - self._workflow: "Workflow" = workflow - self._pending_requests: dict[str, RequestInfoEvent] = {} + self._workflow: Workflow = workflow + self._pending_requests: dict[str, WorkflowEvent[Any]] = {} @property - def workflow(self) -> "Workflow": + def workflow(self) -> Workflow: return self._workflow @property - def pending_requests(self) -> dict[str, RequestInfoEvent]: + def pending_requests(self) -> dict[str, WorkflowEvent[Any]]: return self._pending_requests # region Run Methods @@ -179,6 +179,10 @@ def run( Returns: When stream=True: An AsyncIterable[AgentResponseUpdate] for streaming updates. When stream=False: An Awaitable[AgentResponse] with the complete response. + + Output events (type='output') from the workflow will be converted to ChatMessages + or AgentResponseUpdate objects. Request info events (type='request_info') will be + converted to function call and approval request contents. """ if stream: return self._run_streaming( @@ -228,7 +232,12 @@ async def _run_streaming( checkpoint_storage: CheckpointStorage | None = None, **kwargs: Any, ) -> AsyncIterable[AgentResponseUpdate]: - """Internal streaming implementation.""" + """Internal streaming implementation. + + Yields AgentResponseUpdate objects. Output events (type='output') from the workflow + are converted to updates. Request info events (type='request_info') are converted + to function call and approval request contents. + """ input_messages = normalize_messages_input(messages) thread = thread or self.get_new_thread() response_updates: list[AgentResponseUpdate] = [] @@ -269,11 +278,11 @@ async def _run_impl( Returns: An AgentResponse representing the workflow execution results. """ - output_events: list[WorkflowOutputEvent | RequestInfoEvent] = [] + output_events: list[WorkflowEvent[Any]] = [] async for event in self._run_core( input_messages, thread, checkpoint_id, checkpoint_storage, streaming=False, **kwargs ): - if isinstance(event, WorkflowOutputEvent | RequestInfoEvent): + if event.type == "output" or event.type == "request_info": output_events.append(event) return self._convert_workflow_events_to_agent_response(response_id, output_events) @@ -304,7 +313,7 @@ async def _run_stream_impl( async for event in self._run_core( input_messages, thread, checkpoint_id, checkpoint_storage, streaming=True, **kwargs ): - updates = self._convert_workflow_event_to_agent_response_update(response_id, event) + updates = self._convert_workflow_event_to_agent_response_updates(response_id, event) for update in updates: yield update @@ -440,7 +449,7 @@ def _process_pending_requests(self, input_messages: list[ChatMessage]) -> dict[s def _convert_workflow_events_to_agent_response( self, response_id: str, - output_events: list[WorkflowOutputEvent | RequestInfoEvent], + output_events: list[WorkflowEvent[Any]], ) -> AgentResponse: """Convert a list of workflow output events to an AgentResponse.""" messages: list[ChatMessage] = [] @@ -449,7 +458,7 @@ def _convert_workflow_events_to_agent_response( latest_created_at: str | None = None for output_event in output_events: - if isinstance(output_event, RequestInfoEvent): + if output_event.type == "request_info": function_call, approval_request = self._process_request_info_event(output_event) messages.append( ChatMessage( @@ -468,7 +477,7 @@ def _convert_workflow_events_to_agent_response( # sequence cannot be guaranteed when there are streaming updates in between non-streaming # responses. raise AgentExecutionException( - "WorkflowOutputEvent with AgentResponseUpdate data cannot be emitted in non-streaming mode. " + "Output event with AgentResponseUpdate data cannot be emitted in non-streaming mode. " "Please ensure executors emit AgentResponse for non-streaming workflows." ) @@ -514,114 +523,159 @@ def _convert_workflow_events_to_agent_response( raw_representation=raw_representations, ) - def _convert_workflow_event_to_agent_response_update( + def _process_request_info_event( + self, + event: WorkflowEvent[Any], + ) -> tuple[Content, Content]: + """Convert a request_info event to FunctionCallContent and FunctionApprovalRequestContent. + + Args: + event: A WorkflowEvent with type='request_info'. + + Returns: + A tuple of (FunctionCallContent, FunctionApprovalRequestContent). + """ + request_id = event.request_id + if not request_id: + raise ValueError("request_info event must have a request_id") + + self.pending_requests[request_id] = event + + args = self.RequestInfoFunctionArgs(request_id=request_id, data=event.data).to_dict() + + function_call = Content.from_function_call( + call_id=request_id, + name=self.REQUEST_INFO_FUNCTION_NAME, + arguments=args, + ) + approval_request = Content.from_function_approval_request( + id=request_id, + function_call=function_call, + additional_properties={"request_id": request_id}, + ) + return function_call, approval_request + + def _convert_workflow_event_to_agent_response_updates( self, response_id: str, - event: WorkflowEvent, + event: WorkflowEvent[Any], ) -> list[AgentResponseUpdate]: - """Convert a workflow event to an AgentResponseUpdate. + """Convert a workflow event to a list of AgentResponseUpdate objects. - Only WorkflowOutputEvent and RequestInfoEvent are processed. + Events with type='output' and type='request_info' are processed. Other workflow events are ignored as they are workflow-internal. - """ - match event: - # Convert workflow output to an agent response update. - case WorkflowOutputEvent(data=data, executor_id=executor_id): - # Handle different data types appropriately. - if isinstance(data, AgentResponse): - return [ - AgentResponseUpdate( - contents=[content for message in data.messages for content in message.contents], - role="assistant", - author_name=executor_id, - response_id=response_id, - created_at=data.created_at, - raw_representation=data, - ) - ] - if isinstance(data, AgentResponseUpdate): - return [data] + For 'output' events, AgentExecutor yields AgentResponseUpdate for streaming updates + via ctx.yield_output(). This method converts those to agent response updates. - if isinstance(data, ChatMessage): - return [ - AgentResponseUpdate( - contents=list(data.contents), - role=data.role, - author_name=data.author_name, - response_id=response_id, - message_id=data.message_id or str(uuid.uuid4()), - created_at=datetime.now(tz=timezone.utc).strftime("%Y-%m-%dT%H:%M:%S.%fZ"), - raw_representation=data, - ) - ] - - if is_instance_of(data, list[ChatMessage]): - chat_messages = cast(list[ChatMessage], data) - return [ + Returns: + A list of AgentResponseUpdate objects. Empty list if the event is not relevant. + """ + if event.type == "output": + # Convert workflow output to agent response updates. + # Handle different data types appropriately. + data = event.data + executor_id = event.executor_id + + if isinstance(data, AgentResponseUpdate): + # Pass through AgentResponseUpdate directly (streaming from AgentExecutor) + if not data.author_name: + data.author_name = executor_id + return [data] + if isinstance(data, AgentResponse): + # Convert each message in AgentResponse to an AgentResponseUpdate + updates: list[AgentResponseUpdate] = [] + for msg in data.messages: + updates.append( AgentResponseUpdate( contents=list(msg.contents), role=msg.role, - author_name=msg.author_name, - response_id=response_id, + author_name=msg.author_name or executor_id, + response_id=data.response_id or response_id, message_id=msg.message_id or str(uuid.uuid4()), - created_at=datetime.now(tz=timezone.utc).strftime("%Y-%m-%dT%H:%M:%S.%fZ"), + created_at=data.created_at + or datetime.now(tz=timezone.utc).strftime("%Y-%m-%dT%H:%M:%S.%fZ"), raw_representation=msg, ) - for msg in chat_messages - ] - - contents = self._extract_contents(data) - if not contents: - return [] - + ) + return updates + if isinstance(data, ChatMessage): return [ AgentResponseUpdate( - contents=contents, - role="assistant", - author_name=executor_id, + contents=list(data.contents), + role=data.role, + author_name=data.author_name or executor_id, response_id=response_id, message_id=str(uuid.uuid4()), created_at=datetime.now(tz=timezone.utc).strftime("%Y-%m-%dT%H:%M:%S.%fZ"), raw_representation=data, ) ] - - case RequestInfoEvent(): - function_call, approval_request = self._process_request_info_event(event) - return [ - AgentResponseUpdate( - contents=[function_call, approval_request], - role="assistant", - author_name=self.name, - response_id=response_id, - message_id=str(uuid.uuid4()), - created_at=datetime.now(tz=timezone.utc).strftime("%Y-%m-%dT%H:%M:%S.%fZ"), + if is_instance_of(data, list[ChatMessage]): + # Convert each ChatMessage to an AgentResponseUpdate + chat_messages = cast(list[ChatMessage], data) + updates = [] + for msg in chat_messages: + updates.append( + AgentResponseUpdate( + contents=list(msg.contents), + role=msg.role, + author_name=msg.author_name or executor_id, + response_id=response_id, + message_id=msg.message_id or str(uuid.uuid4()), + created_at=datetime.now(tz=timezone.utc).strftime("%Y-%m-%dT%H:%M:%S.%fZ"), + raw_representation=msg, + ) ) - ] - case _: - # Ignore workflow-internal events - pass + return updates + contents = self._extract_contents(data) + if not contents: + return [] + return [ + AgentResponseUpdate( + contents=contents, + role="assistant", + author_name=executor_id, + response_id=response_id, + message_id=str(uuid.uuid4()), + created_at=datetime.now(tz=timezone.utc).strftime("%Y-%m-%dT%H:%M:%S.%fZ"), + raw_representation=data, + ) + ] - return [] + if event.type == "request_info": + # Store the pending request for later correlation + request_id = event.request_id + if not request_id: + raise ValueError("request_info event must have a request_id") - def _process_request_info_event(self, event: RequestInfoEvent) -> tuple[Content, Content]: - """Process a RequestInfoEvent by adding it to pending requests.""" - # Store the pending request for later correlation - self.pending_requests[event.request_id] = event + self.pending_requests[request_id] = event - args = self.RequestInfoFunctionArgs(request_id=event.request_id, data=event.data).to_dict() - function_call = Content.from_function_call( - call_id=event.request_id, - name=self.REQUEST_INFO_FUNCTION_NAME, - arguments=args, - ) - approval_request = Content.from_function_approval_request( - id=event.request_id, - function_call=function_call, - additional_properties={"request_id": event.request_id}, - ) - return function_call, approval_request + args = self.RequestInfoFunctionArgs(request_id=request_id, data=event.data).to_dict() + + function_call = Content.from_function_call( + call_id=request_id, + name=self.REQUEST_INFO_FUNCTION_NAME, + arguments=args, + ) + approval_request = Content.from_function_approval_request( + id=request_id, + function_call=function_call, + additional_properties={"request_id": request_id}, + ) + return [ + AgentResponseUpdate( + contents=[function_call, approval_request], + role="assistant", + author_name=self.name, + response_id=response_id, + message_id=str(uuid.uuid4()), + created_at=datetime.now(tz=timezone.utc).strftime("%Y-%m-%dT%H:%M:%S.%fZ"), + ) + ] + + # Ignore workflow-internal events + return [] def _extract_function_responses(self, input_messages: list[ChatMessage]) -> dict[str, Any]: """Extract function responses from input messages.""" diff --git a/python/packages/core/agent_framework/_workflows/_agent_executor.py b/python/packages/core/agent_framework/_workflows/_agent_executor.py index 2a345ee386..f13b7b65fd 100644 --- a/python/packages/core/agent_framework/_workflows/_agent_executor.py +++ b/python/packages/core/agent_framework/_workflows/_agent_executor.py @@ -65,8 +65,8 @@ class AgentExecutor(Executor): """built-in executor that wraps an agent for handling messages. AgentExecutor adapts its behavior based on the workflow execution mode: - - run(stream=True): Emits incremental WorkflowOutputEvents as the agent produces tokens - - run(): Emits a single WorkflowOutputEvent containing the complete response + - run(stream=True): Emits incremental output events (type='output') as the agent produces tokens + - run(): Emits a single output event (type='output') containing the complete response Use `with_output_from` in WorkflowBuilder to control whether the AgentResponse or AgentResponseUpdate objects are yielded as workflow outputs. @@ -296,8 +296,8 @@ async def _run_agent_and_emit( ) -> None: """Execute the underlying agent, emit events, and enqueue response. - Checks ctx.is_streaming() to determine whether to emit WorkflowOutputEvents - containing incremental updates (streaming mode) or a single WorkflowOutputEvent + Checks ctx.is_streaming() to determine whether to emit output events (type='output') + containing incremental updates (streaming mode) or a single output event (type='output') containing the complete response (non-streaming mode). """ if ctx.is_streaming(): @@ -332,10 +332,16 @@ async def _run_agent(self, ctx: WorkflowContext[Never, AgentResponse]) -> AgentR """ run_kwargs: dict[str, Any] = ctx.get_state(WORKFLOW_RUN_KWARGS_KEY, {}) + # Build options dict with additional_function_arguments for tool kwargs propagation + options: dict[str, Any] | None = None + if run_kwargs: + options = {"additional_function_arguments": run_kwargs} + response = await self._agent.run( self._cache, stream=False, thread=self._agent_thread, + options=options, **run_kwargs, ) await ctx.yield_output(response) @@ -360,12 +366,18 @@ async def _run_agent_streaming(self, ctx: WorkflowContext[Never, AgentResponseUp """ run_kwargs: dict[str, Any] = ctx.get_state(WORKFLOW_RUN_KWARGS_KEY) or {} + # Build options dict with additional_function_arguments for tool kwargs propagation + options: dict[str, Any] | None = None + if run_kwargs: + options = {"additional_function_arguments": run_kwargs} + updates: list[AgentResponseUpdate] = [] user_input_requests: list[Content] = [] async for update in self._agent.run( self._cache, stream=True, thread=self._agent_thread, + options=options, **run_kwargs, ): updates.append(update) diff --git a/python/packages/core/agent_framework/_workflows/_checkpoint.py b/python/packages/core/agent_framework/_workflows/_checkpoint.py index 874ded5568..0334ee3893 100644 --- a/python/packages/core/agent_framework/_workflows/_checkpoint.py +++ b/python/packages/core/agent_framework/_workflows/_checkpoint.py @@ -1,5 +1,7 @@ # Copyright (c) Microsoft. All rights reserved. +from __future__ import annotations + import asyncio import json import logging @@ -59,7 +61,7 @@ def to_dict(self) -> dict[str, Any]: return asdict(self) @classmethod - def from_dict(cls, data: Mapping[str, Any]) -> "WorkflowCheckpoint": + def from_dict(cls, data: Mapping[str, Any]) -> WorkflowCheckpoint: return cls(**data) diff --git a/python/packages/core/agent_framework/_workflows/_checkpoint_summary.py b/python/packages/core/agent_framework/_workflows/_checkpoint_summary.py index b1fd6896ab..fe00c1a287 100644 --- a/python/packages/core/agent_framework/_workflows/_checkpoint_summary.py +++ b/python/packages/core/agent_framework/_workflows/_checkpoint_summary.py @@ -5,7 +5,7 @@ from ._checkpoint import WorkflowCheckpoint from ._const import EXECUTOR_STATE_KEY -from ._events import RequestInfoEvent +from ._events import WorkflowEvent logger = logging.getLogger(__name__) @@ -20,14 +20,14 @@ class WorkflowCheckpointSummary: targets: list[str] executor_ids: list[str] status: str - pending_request_info_events: list[RequestInfoEvent] + pending_request_info_events: list[WorkflowEvent] def get_checkpoint_summary(checkpoint: WorkflowCheckpoint) -> WorkflowCheckpointSummary: targets = sorted(checkpoint.messages.keys()) executor_ids = sorted(checkpoint.state.get(EXECUTOR_STATE_KEY, {}).keys()) pending_request_info_events = [ - RequestInfoEvent.from_dict(request) for request in checkpoint.pending_request_info_events.values() + WorkflowEvent.from_dict(request) for request in checkpoint.pending_request_info_events.values() ] status = "idle" diff --git a/python/packages/core/agent_framework/_workflows/_edge.py b/python/packages/core/agent_framework/_workflows/_edge.py index 3212eff41a..02544ad3df 100644 --- a/python/packages/core/agent_framework/_workflows/_edge.py +++ b/python/packages/core/agent_framework/_workflows/_edge.py @@ -1,5 +1,7 @@ # Copyright (c) Microsoft. All rights reserved. +from __future__ import annotations + import inspect import logging import uuid @@ -214,7 +216,7 @@ def to_dict(self) -> dict[str, Any]: return payload @classmethod - def from_dict(cls, data: dict[str, Any]) -> "Edge": + def from_dict(cls, data: dict[str, Any]) -> Edge: """Reconstruct an `Edge` from its serialised dictionary form. The deserialised edge will lack the executable predicate because we do @@ -311,7 +313,7 @@ class EdgeGroup(DictConvertible): from builtins import type as builtin_type - _TYPE_REGISTRY: ClassVar[dict[str, builtin_type["EdgeGroup"]]] = {} + _TYPE_REGISTRY: ClassVar[dict[str, builtin_type[EdgeGroup]]] = {} def __init__( self, @@ -415,7 +417,7 @@ class CustomGroup(EdgeGroup): return subclass @classmethod - def from_dict(cls, data: dict[str, Any]) -> "EdgeGroup": + def from_dict(cls, data: dict[str, Any]) -> EdgeGroup: """Hydrate the correct `EdgeGroup` subclass from serialised state. The method inspects the `type` field, allocates the corresponding class @@ -735,7 +737,7 @@ def to_dict(self) -> dict[str, Any]: return payload @classmethod - def from_dict(cls, data: dict[str, Any]) -> "SwitchCaseEdgeGroupCase": + def from_dict(cls, data: dict[str, Any]) -> SwitchCaseEdgeGroupCase: """Instantiate a case from its serialised dictionary payload. Examples: @@ -789,7 +791,7 @@ def to_dict(self) -> dict[str, Any]: return {"target_id": self.target_id, "type": self.type} @classmethod - def from_dict(cls, data: dict[str, Any]) -> "SwitchCaseEdgeGroupDefault": + def from_dict(cls, data: dict[str, Any]) -> SwitchCaseEdgeGroupDefault: """Recreate the default branch from its persisted form. Examples: diff --git a/python/packages/core/agent_framework/_workflows/_events.py b/python/packages/core/agent_framework/_workflows/_events.py index b43511cbc2..18e974e3e7 100644 --- a/python/packages/core/agent_framework/_workflows/_events.py +++ b/python/packages/core/agent_framework/_workflows/_events.py @@ -1,16 +1,27 @@ # Copyright (c) Microsoft. All rights reserved. +from __future__ import annotations + +import builtins +import sys import traceback as _traceback from collections.abc import Iterator from contextlib import contextmanager from contextvars import ContextVar from dataclasses import dataclass from enum import Enum -from typing import Any, TypeAlias +from typing import Any, Generic, Literal, cast from ._checkpoint_encoding import decode_checkpoint_value, encode_checkpoint_value from ._typing_utils import deserialize_type, serialize_type +if sys.version_info >= (3, 13): + from typing import TypeVar # type: ignore # pragma: no cover +else: + from typing_extensions import TypeVar # type: ignore[import] # pragma: no cover + +DataT = TypeVar("DataT", default=Any) + class WorkflowEventSource(str, Enum): """Identifies whether a workflow event came from the framework or an executor. @@ -44,114 +55,16 @@ def _framework_event_origin() -> Iterator[None]: # pyright: ignore[reportUnused _event_origin_context.reset(token) -class WorkflowEvent: - """Base class for workflow events.""" - - def __init__(self, data: Any | None = None): - """Initialize the workflow event with optional data.""" - self.data = data - self.origin = _current_event_origin() - - def __repr__(self) -> str: - """Return a string representation of the workflow event.""" - data_repr = self.data if self.data is not None else "None" - return f"{self.__class__.__name__}(origin={self.origin}, data={data_repr})" - - -class WorkflowStartedEvent(WorkflowEvent): - """Built-in lifecycle event emitted when a workflow run begins.""" - - ... - - -class WorkflowWarningEvent(WorkflowEvent): - """Executor-origin event signaling a warning surfaced by user code.""" - - def __init__(self, data: str): - """Initialize the workflow warning event with optional data and warning message.""" - super().__init__(data) - - def __repr__(self) -> str: - """Return a string representation of the workflow warning event.""" - return f"{self.__class__.__name__}(message={self.data}, origin={self.origin})" - - -class WorkflowErrorEvent(WorkflowEvent): - """Executor-origin event signaling an error surfaced by user code.""" - - def __init__(self, data: Exception): - """Initialize the workflow error event with optional data and error message.""" - super().__init__(data) - - def __repr__(self) -> str: - """Return a string representation of the workflow error event.""" - return f"{self.__class__.__name__}(exception={self.data}, origin={self.origin})" - - class WorkflowRunState(str, Enum): - """Run-level state of a workflow execution. - - Semantics: - - STARTED: Run has been initiated and the workflow context has been created. - This is an initial state before any meaningful work is performed. In this - codebase we emit a dedicated `WorkflowStartedEvent` for telemetry, and - typically advance the status directly to `IN_PROGRESS`. Consumers may - still rely on `STARTED` for state machines that need an explicit pre-work - phase. - - - IN_PROGRESS: The workflow is actively executing (e.g., the initial - message has been delivered to the start executor or a superstep is - running). This status is emitted at the beginning of a run and can be - followed by other statuses as the run progresses. - - - IN_PROGRESS_PENDING_REQUESTS: Active execution while one or more - request-for-information operations are outstanding. New work may still - be scheduled while requests are in flight. - - - IDLE: The workflow is quiescent with no outstanding requests and no more - work to do. This is the normal terminal state for workflows that have - finished executing, potentially having produced outputs along the way. - - - IDLE_WITH_PENDING_REQUESTS: The workflow is paused awaiting external - input (e.g., emitted a `RequestInfoEvent`). This is a non-terminal - state; the workflow can resume when responses are supplied. - - - FAILED: Terminal state indicating an error surfaced. Accompanied by a - `WorkflowFailedEvent` with structured error details. - - - CANCELLED: Terminal state indicating the run was cancelled by a caller - or orchestrator. Not currently emitted by default runner paths but - included for integrators/orchestrators that support cancellation. - """ - - STARTED = "STARTED" # Explicit pre-work phase (rarely emitted as status; see note above) - IN_PROGRESS = "IN_PROGRESS" # Active execution is underway - IN_PROGRESS_PENDING_REQUESTS = "IN_PROGRESS_PENDING_REQUESTS" # Active execution with outstanding requests - IDLE = "IDLE" # No active work and no outstanding requests - IDLE_WITH_PENDING_REQUESTS = "IDLE_WITH_PENDING_REQUESTS" # Paused awaiting external responses - FAILED = "FAILED" # Finished with an error - CANCELLED = "CANCELLED" # Finished due to cancellation - - -class WorkflowStatusEvent(WorkflowEvent): - """Built-in lifecycle event emitted for workflow run state transitions.""" - - def __init__( - self, - state: WorkflowRunState, - data: Any | None = None, - ): - """Initialize the workflow status event with a new state and optional data. - - Args: - state: The new state of the workflow run. - data: Optional additional data associated with the state change. - """ - super().__init__(data) - self.state = state + """Run-level state of a workflow execution.""" - def __repr__(self) -> str: # pragma: no cover - representation only - return f"{self.__class__.__name__}(state={self.state}, data={self.data!r}, origin={self.origin})" + STARTED = "STARTED" + IN_PROGRESS = "IN_PROGRESS" + IN_PROGRESS_PENDING_REQUESTS = "IN_PROGRESS_PENDING_REQUESTS" + IDLE = "IDLE" + IDLE_WITH_PENDING_REQUESTS = "IDLE_WITH_PENDING_REQUESTS" + FAILED = "FAILED" + CANCELLED = "CANCELLED" @dataclass @@ -171,7 +84,7 @@ def from_exception( *, executor_id: str | None = None, extra: dict[str, Any] | None = None, - ) -> "WorkflowErrorDetails": + ) -> WorkflowErrorDetails: tb = None try: tb = "".join(_traceback.format_exception(type(exc), exc, exc.__traceback__)) @@ -186,180 +99,328 @@ def from_exception( ) -class WorkflowFailedEvent(WorkflowEvent): - """Built-in lifecycle event emitted when a workflow run terminates with an error.""" - - def __init__( - self, - details: WorkflowErrorDetails, - data: Any | None = None, - ): - super().__init__(data) - self.details = details - - def __repr__(self) -> str: # pragma: no cover - representation only - return f"{self.__class__.__name__}(details={self.details}, data={self.data!r}, origin={self.origin})" - +# Type discriminator for workflow events. +# Includes both framework lifecycle types and well-known orchestration types. +WorkflowEventType = Literal[ + # Lifecycle events (workflow-level) + "started", # Workflow run began + "status", # Workflow state changed (use .state) + "failed", # Workflow terminated with error (use .details) + # Data events + "output", # Executor yielded final output (use .executor_id, .data) + "data", # Executor emitted data during execution (use .executor_id, .data) + # Request events (human-in-the-loop) + "request_info", # Executor requests external info (use .request_id, .source_executor_id) + # Diagnostic events (warnings/errors from user code) + "warning", # Warning from user code (use .data as str) + "error", # Error from user code, non-fatal (use .data as Exception) + # Iteration events (supersteps) + "superstep_started", # Superstep began (use .iteration) + "superstep_completed", # Superstep ended (use .iteration) + # Executor lifecycle events + "executor_invoked", # Executor handler was called (use .executor_id, .data) + "executor_completed", # Executor handler completed (use .executor_id, .data) + "executor_failed", # Executor handler raised error (use .executor_id, .details) + # Orchestration event types (use .data for typed payload) + "group_chat", # Group chat orchestrator events (use .data as GroupChatRequestSentEvent | GroupChatResponseReceivedEvent) # noqa: E501 + "handoff_sent", # Handoff routing events (use .data as HandoffSentEvent) + "magentic_orchestrator", # Magentic orchestrator events (use .data as MagenticOrchestratorEvent) +] + + +class WorkflowEvent(Generic[DataT]): + """Unified event for all workflow emissions. + + This single generic class handles all workflow events through a `type` discriminator, + following the same pattern as the `Content` class. + + Use factory methods for convenient construction: + + - `WorkflowEvent.started()` - workflow run began + - `WorkflowEvent.status(state)` - workflow state changed + - `WorkflowEvent.failed(details)` - workflow terminated with error + - `WorkflowEvent.warning(message)` - warning from user code + - `WorkflowEvent.error(exception)` - error from user code + - `WorkflowEvent.output(executor_id, data)` - executor yielded final output + - `WorkflowEvent.data(executor_id, data)` - executor emitted data (e.g., AgentResponse) + - `WorkflowEvent.request_info(...)` - executor requests external info + - `WorkflowEvent.superstep_started(iteration)` - superstep began + - `WorkflowEvent.superstep_completed(iteration)` - superstep ended + - `WorkflowEvent.executor_invoked(executor_id)` - executor handler called + - `WorkflowEvent.executor_completed(executor_id)` - executor handler completed + - `WorkflowEvent.executor_failed(executor_id, details)` - executor handler failed + + The generic parameter DataT represents the type of the event's data payload: + - Lifecycle events: `WorkflowEvent[None]` (data is None) + - Data events: `WorkflowEvent[DataT]` where DataT is the payload type (e.g., AgentResponse) + + Examples: + .. code-block:: python + + # Create events via factory methods + started = WorkflowEvent.started() + status = WorkflowEvent.status(WorkflowRunState.IN_PROGRESS) + output = WorkflowEvent.output("agent1", result_data) + + # Emit typed data from executor + event: WorkflowEvent[AgentResponse] = WorkflowEvent.data("agent1", response) + data: AgentResponse = event.data # Type-safe access + + # Check event type + if event.type == "status": + print(f"State: {event.state}") + elif event.type == "output": + print(f"Output from {event.executor_id}: {event.data}") + elif event.type == "data": + if isinstance(event.data, AgentResponse): + print(f"Agent response: {event.data.text}") + """ -class RequestInfoEvent(WorkflowEvent): - """Event triggered when a workflow executor requests external information.""" + type: WorkflowEventType + data: DataT def __init__( self, - request_id: str, - source_executor_id: str, - request_data: Any, - response_type: type[Any], - ): - """Initialize the request info event. - - Args: - request_id: Unique identifier for the request. - source_executor_id: ID of the executor that made the request. - request_data: The data associated with the request. - response_type: Expected type of the response. + type: WorkflowEventType, + data: DataT | None = None, + *, + # Event context fields + origin: WorkflowEventSource | None = None, + # STATUS event fields + state: WorkflowRunState | None = None, + # FAILED event fields + details: WorkflowErrorDetails | None = None, + # OUTPUT/DATA event fields + executor_id: str | None = None, + # REQUEST_INFO event fields + request_id: str | None = None, + source_executor_id: str | None = None, + request_type: builtins.type[Any] | None = None, + response_type: builtins.type[Any] | None = None, + # SUPERSTEP event fields + iteration: int | None = None, + ) -> None: + """Initialize the workflow event. + + Prefer using factory methods like `WorkflowEvent.started()` instead of __init__ directly. """ - super().__init__(request_data) - self.request_id = request_id - self.source_executor_id = source_executor_id - self.request_type: type[Any] = type(request_data) - self.response_type = response_type + self.type = type + self.data = data # type: ignore[assignment] + self.origin = origin if origin is not None else _current_event_origin() + + # Event-specific fields + self.state = state + self.details = details + self.executor_id = executor_id + self._request_id = request_id + self._source_executor_id = source_executor_id + self._request_type = request_type + self._response_type = response_type + self.iteration = iteration def __repr__(self) -> str: - """Return a string representation of the request info event.""" - return ( - f"{self.__class__.__name__}(" - f"request_id={self.request_id}, " - f"source_executor_id={self.source_executor_id}, " - f"request_type={self.request_type.__name__}, " - f"data={self.data}, " - f"response_type={self.response_type.__name__})" - ) + """Return a string representation of the workflow event.""" + parts = [f"type={self.type!r}"] + if self.state is not None: + parts.append(f"state={self.state.value}") + if self.executor_id is not None: + parts.append(f"executor_id={self.executor_id!r}") + if self.iteration is not None: + parts.append(f"iteration={self.iteration}") + if self._request_id is not None: + parts.append(f"request_id={self._request_id!r}") + if self.data is not None: + parts.append(f"data={self.data!r}") + return f"WorkflowEvent({', '.join(parts)})" # pragma: no cover + + # ========================================================================== + # Factory methods + # ========================================================================== - def to_dict(self) -> dict[str, Any]: - """Convert the request info event to a dictionary for serialization.""" - return { - "data": encode_checkpoint_value(self.data), - "request_id": self.request_id, - "source_executor_id": self.source_executor_id, - "request_type": serialize_type(self.request_type), - "response_type": serialize_type(self.response_type), - } + @classmethod + def started(cls, data: DataT | None = None) -> WorkflowEvent[DataT]: + """Create a 'started' event when a workflow run begins.""" + return cls("started", data=data) - @staticmethod - def from_dict(data: dict[str, Any]) -> "RequestInfoEvent": - """Create a RequestInfoEvent from a dictionary.""" - # Validation - for property in ["data", "request_id", "source_executor_id", "request_type", "response_type"]: - if property not in data: - raise KeyError(f"Missing '{property}' field in RequestInfoEvent dictionary.") + @classmethod + def status(cls, state: WorkflowRunState, data: DataT | None = None) -> WorkflowEvent[DataT]: + """Create a 'status' event for workflow state transitions.""" + return cls("status", data=data, state=state) - request_info_event = RequestInfoEvent( - request_id=data["request_id"], - source_executor_id=data["source_executor_id"], - request_data=decode_checkpoint_value(data["data"]), - response_type=deserialize_type(data["response_type"]), - ) + @classmethod + def failed(cls, details: WorkflowErrorDetails, data: DataT | None = None) -> WorkflowEvent[DataT]: + """Create a 'failed' event when a workflow terminates with error.""" + return cls("failed", data=data, details=details) - # Verify that the deserialized request_data matches the declared request_type - if deserialize_type(data["request_type"]) is not type(request_info_event.data): - raise TypeError( - "Mismatch between deserialized request_data type and request_type field in RequestInfoEvent dictionary." - ) + @classmethod + def warning(cls, message: str) -> WorkflowEvent[str]: + """Create a 'warning' event from user code.""" + return WorkflowEvent("warning", data=message) - return request_info_event + @classmethod + def error(cls, exception: Exception) -> WorkflowEvent[Exception]: + """Create an 'error' event from user code.""" + return WorkflowEvent("error", data=exception) + @classmethod + def output(cls, executor_id: str, data: DataT) -> WorkflowEvent[DataT]: + """Create an 'output' event when an executor yields final output.""" + return cls("output", executor_id=executor_id, data=data) -class WorkflowOutputEvent(WorkflowEvent): - """Event triggered when a workflow executor yields output.""" + @classmethod + def emit(cls, executor_id: str, data: DataT) -> WorkflowEvent[DataT]: + """Create a 'data' event when an executor emits data during execution. - def __init__( - self, - data: Any, - executor_id: str, - ): - """Initialize the workflow output event. - - Args: - data: The output yielded by the executor. - executor_id: ID of the executor that yielded the output. + This is the primary method for executors to emit typed data + (e.g., AgentResponse, AgentResponseUpdate, custom data). """ - super().__init__(data) - self.executor_id = executor_id - - def __repr__(self) -> str: - """Return a string representation of the workflow output event.""" - return f"{self.__class__.__name__}(data={self.data}, executor_id={self.executor_id})" - + return cls("data", executor_id=executor_id, data=data) -class SuperStepEvent(WorkflowEvent): - """Event triggered when a superstep starts or ends.""" + @classmethod + def request_info( + cls, + request_id: str, + source_executor_id: str, + request_data: DataT, + response_type: builtins.type[Any], + ) -> WorkflowEvent[DataT]: + """Create a 'request_info' event when an executor requests external information.""" + return cls( + "request_info", + data=request_data, + request_id=request_id, + source_executor_id=source_executor_id, + request_type=type(request_data), + response_type=response_type, + ) - def __init__(self, iteration: int, data: Any | None = None): - """Initialize the superstep event. + @classmethod + def superstep_started(cls, iteration: int, data: DataT | None = None) -> WorkflowEvent[DataT]: + """Create a 'superstep_started' event when a superstep begins.""" + return cls("superstep_started", iteration=iteration, data=data) - Args: - iteration: The number of the superstep (1-based index). - data: Optional data associated with the superstep event. - """ - super().__init__(data) - self.iteration = iteration + @classmethod + def superstep_completed(cls, iteration: int, data: DataT | None = None) -> WorkflowEvent[DataT]: + """Create a 'superstep_completed' event when a superstep ends.""" + return cls("superstep_completed", iteration=iteration, data=data) - def __repr__(self) -> str: - """Return a string representation of the superstep event.""" - return f"{self.__class__.__name__}(iteration={self.iteration}, data={self.data})" + @classmethod + def executor_invoked(cls, executor_id: str, data: DataT | None = None) -> WorkflowEvent[DataT]: + """Create an 'executor_invoked' event when an executor handler is called.""" + return cls("executor_invoked", executor_id=executor_id, data=data) + @classmethod + def executor_completed(cls, executor_id: str, data: DataT | None = None) -> WorkflowEvent[DataT]: + """Create an 'executor_completed' event when an executor handler completes.""" + return cls("executor_completed", executor_id=executor_id, data=data) -class SuperStepStartedEvent(SuperStepEvent): - """Event triggered when a superstep starts.""" + @classmethod + def executor_failed(cls, executor_id: str, details: WorkflowErrorDetails) -> WorkflowEvent[WorkflowErrorDetails]: + """Create an 'executor_failed' event when an executor handler raises an error.""" + return WorkflowEvent("executor_failed", executor_id=executor_id, data=details, details=details) - ... + # ========================================================================== + # Property for type-safe access + # ========================================================================== + @property + def request_id(self) -> str: + """Get request_id for request_info events. -class SuperStepCompletedEvent(SuperStepEvent): - """Event triggered when a superstep ends.""" + Returns: + The request ID as a non-None string. - ... + Raises: + RuntimeError: If called on an event that is not a request_info event, + or if the event is malformed (request_info without request_id). + """ + if self.type != "request_info" or self._request_id is None: + raise RuntimeError(f"request_id is only available for request_info events, got type={self.type!r}") + return self._request_id + @property + def source_executor_id(self) -> str: + """Get source_executor_id for request_info events. -class ExecutorEvent(WorkflowEvent): - """Base class for executor events.""" + Returns: + The source executor ID as a non-None string. - def __init__(self, executor_id: str, data: Any | None = None): - """Initialize the executor event with an executor ID and optional data.""" - super().__init__(data) - self.executor_id = executor_id + Raises: + RuntimeError: If called on an event that is not a request_info event, + or if the event is malformed (request_info without source_executor_id). + """ + if self.type != "request_info" or self._source_executor_id is None: + raise RuntimeError(f"source_executor_id is only available for request_info events, got type={self.type!r}") + return self._source_executor_id - def __repr__(self) -> str: - """Return a string representation of the executor event.""" - return f"{self.__class__.__name__}(executor_id={self.executor_id}, data={self.data})" + @property + def request_type(self) -> builtins.type[Any]: + """Get request_type for request_info events. + Returns: + The request data type as a non-None type object. -class ExecutorInvokedEvent(ExecutorEvent): - """Event triggered when an executor handler is invoked.""" + Raises: + RuntimeError: If called on an event that is not a request_info event, + or if the event is malformed (request_info without request_type). + """ + if self.type != "request_info" or self._request_type is None: + raise RuntimeError(f"request_type is only available for request_info events, got type={self.type!r}") + return self._request_type - ... + @property + def response_type(self) -> builtins.type[Any]: + """Get response_type for request_info events. + Returns: + The response data type as a non-None type object. -class ExecutorCompletedEvent(ExecutorEvent): - """Event triggered when an executor handler is completed.""" + Raises: + RuntimeError: If called on an event that is not a request_info event, + or if the event is malformed (request_info without response_type). + """ + if self.type != "request_info" or self._response_type is None: + raise RuntimeError(f"response_type is only available for request_info events, got type={self.type!r}") + return self._response_type - ... + # ========================================================================== + # Serialization methods (primarily for REQUEST_INFO events) + # ========================================================================== + def to_dict(self) -> dict[str, Any]: + """Convert to dictionary for serialization. -class ExecutorFailedEvent(ExecutorEvent): - """Event triggered when an executor handler raises an error.""" + Currently only implemented for 'request_info' events for checkpoint storage. + """ + if self.type != "request_info": + raise ValueError(f"to_dict() only supported for 'request_info' events, got '{self.type}'") + return { + "type": self.type, + "data": encode_checkpoint_value(self.data), + "request_id": self._request_id, + "source_executor_id": self._source_executor_id, + "request_type": serialize_type(self._request_type) if self._request_type else None, + "response_type": serialize_type(self._response_type) if self._response_type else None, + } - def __init__( - self, - executor_id: str, - details: WorkflowErrorDetails, - ): - super().__init__(executor_id, details) - self.details = details + @classmethod + def from_dict(cls, data: dict[str, Any]) -> WorkflowEvent[Any]: + """Create a REQUEST_INFO event from a dictionary.""" + for prop in ["data", "request_id", "source_executor_id", "request_type", "response_type"]: + if prop not in data: + raise KeyError(f"Missing '{prop}' field in WorkflowEvent dictionary.") - def __repr__(self) -> str: # pragma: no cover - representation only - return f"{self.__class__.__name__}(executor_id={self.executor_id}, details={self.details})" + request_data = decode_checkpoint_value(data["data"]) + request_type = deserialize_type(data["request_type"]) + if request_type is not type(request_data): + raise TypeError( + "Mismatch between deserialized request_data type and request_type field in WorkflowEvent dictionary." + ) -WorkflowLifecycleEvent: TypeAlias = WorkflowStartedEvent | WorkflowStatusEvent | WorkflowFailedEvent + return cls.request_info( + request_id=data["request_id"], + source_executor_id=data["source_executor_id"], + request_data=cast(Any, request_data), # type: ignore + response_type=deserialize_type(data["response_type"]), + ) diff --git a/python/packages/core/agent_framework/_workflows/_executor.py b/python/packages/core/agent_framework/_workflows/_executor.py index d7e58c9c20..ffab65e3a3 100644 --- a/python/packages/core/agent_framework/_workflows/_executor.py +++ b/python/packages/core/agent_framework/_workflows/_executor.py @@ -11,10 +11,8 @@ from ..observability import create_processing_span from ._events import ( - ExecutorCompletedEvent, - ExecutorFailedEvent, - ExecutorInvokedEvent, WorkflowErrorDetails, + WorkflowEvent, _framework_event_origin, # type: ignore[reportPrivateUsage] ) from ._model_utils import DictConvertible @@ -274,14 +272,14 @@ async def execute( # Invoke the handler with the message and context # Use deepcopy to capture original input state before handler can mutate it with _framework_event_origin(): - invoke_event = ExecutorInvokedEvent(self.id, copy.deepcopy(message)) + invoke_event = WorkflowEvent.executor_invoked(self.id, copy.deepcopy(message)) await context.add_event(invoke_event) try: await handler(message, context) except Exception as exc: # Surface structured executor failure before propagating with _framework_event_origin(): - failure_event = ExecutorFailedEvent(self.id, WorkflowErrorDetails.from_exception(exc)) + failure_event = WorkflowEvent.executor_failed(self.id, WorkflowErrorDetails.from_exception(exc)) await context.add_event(failure_event) raise with _framework_event_origin(): @@ -289,7 +287,9 @@ async def execute( sent_messages = context.get_sent_messages() yielded_outputs = context.get_yielded_outputs() completion_data = sent_messages + yielded_outputs - completed_event = ExecutorCompletedEvent(self.id, completion_data if completion_data else None) + completed_event = WorkflowEvent.executor_completed( + self.id, completion_data if completion_data else None + ) await context.add_event(completed_event) def _create_context_for_handler( @@ -538,8 +538,8 @@ def handler( output: type | types.UnionType | str | None = None, workflow_output: type | types.UnionType | str | None = None, ) -> Callable[ - [Callable[[ExecutorT, Any, ContextT], Awaitable[Any]]], - Callable[[ExecutorT, Any, ContextT], Awaitable[Any]], + [Callable[..., Awaitable[Any]]], + Callable[..., Awaitable[Any]], ]: ... @@ -724,9 +724,15 @@ def _validate_handler_signature( # Validate ctx parameter is WorkflowContext and extract type args ctx_param = params[2] - output_types, workflow_output_types = validate_workflow_context_annotation( - ctx_param.annotation, f"parameter '{ctx_param.name}'", "Handler" - ) + if skip_message_annotation and ctx_param.annotation == inspect.Parameter.empty: + # When explicit types are provided via @handler(input=..., output=...), + # the ctx parameter doesn't need a type annotation - types come from the decorator. + output_types: list[type[Any] | types.UnionType] = [] + workflow_output_types: list[type[Any] | types.UnionType] = [] + else: + output_types, workflow_output_types = validate_workflow_context_annotation( + ctx_param.annotation, f"parameter '{ctx_param.name}'", "Handler" + ) message_type = message_param.annotation if message_param.annotation != inspect.Parameter.empty else None ctx_annotation = ctx_param.annotation diff --git a/python/packages/core/agent_framework/_workflows/_runner.py b/python/packages/core/agent_framework/_workflows/_runner.py index da8473613e..f3a475e034 100644 --- a/python/packages/core/agent_framework/_workflows/_runner.py +++ b/python/packages/core/agent_framework/_workflows/_runner.py @@ -16,7 +16,7 @@ from ._const import EXECUTOR_STATE_KEY from ._edge import EdgeGroup from ._edge_runner import EdgeRunner, create_edge_runner -from ._events import SuperStepCompletedEvent, SuperStepStartedEvent, WorkflowEvent +from ._events import WorkflowEvent from ._exceptions import ( WorkflowCheckpointException, WorkflowConvergenceException, @@ -102,7 +102,7 @@ async def run_until_convergence(self) -> AsyncGenerator[WorkflowEvent, None]: while self._iteration < self._max_iterations: logger.info(f"Starting superstep {self._iteration + 1}") - yield SuperStepStartedEvent(iteration=self._iteration + 1) + yield WorkflowEvent.superstep_started(iteration=self._iteration + 1) # Run iteration concurrently with live event streaming: we poll # for new events while the iteration coroutine progresses. @@ -147,7 +147,7 @@ async def run_until_convergence(self) -> AsyncGenerator[WorkflowEvent, None]: # Create checkpoint after each superstep iteration await self._create_checkpoint_if_enabled(f"superstep_{self._iteration}") - yield SuperStepCompletedEvent(iteration=self._iteration) + yield WorkflowEvent.superstep_completed(iteration=self._iteration) # Check for convergence: no more messages to process if not await self._ctx.has_messages(): diff --git a/python/packages/core/agent_framework/_workflows/_runner_context.py b/python/packages/core/agent_framework/_workflows/_runner_context.py index c3bf6ce262..ed81026245 100644 --- a/python/packages/core/agent_framework/_workflows/_runner_context.py +++ b/python/packages/core/agent_framework/_workflows/_runner_context.py @@ -1,5 +1,7 @@ # Copyright (c) Microsoft. All rights reserved. +from __future__ import annotations + import asyncio import logging import sys @@ -12,7 +14,7 @@ from ._checkpoint import CheckpointStorage, WorkflowCheckpoint from ._checkpoint_encoding import decode_checkpoint_value, encode_checkpoint_value from ._const import INTERNAL_SOURCE_ID -from ._events import RequestInfoEvent, WorkflowEvent +from ._events import WorkflowEvent from ._state import State from ._typing_utils import is_instance_of @@ -51,7 +53,7 @@ class Message: source_span_ids: list[str] | None = None # Publishing span IDs for linking from multiple sources # For response messages, the original request data - original_request_info_event: RequestInfoEvent | None = None + original_request_info_event: WorkflowEvent[Any] | None = None # Backward compatibility properties @property @@ -77,7 +79,7 @@ def to_dict(self) -> dict[str, Any]: } @staticmethod - def from_dict(data: dict[str, Any]) -> "Message": + def from_dict(data: dict[str, Any]) -> Message: """Create a Message from a dictionary.""" # Validation if "data" not in data: @@ -254,11 +256,11 @@ async def apply_checkpoint(self, checkpoint: WorkflowCheckpoint) -> None: """ ... - async def add_request_info_event(self, event: RequestInfoEvent) -> None: - """Add a RequestInfoEvent to the context and track it for correlation. + async def add_request_info_event(self, event: WorkflowEvent[Any]) -> None: + """Add a request_info event to the context and track it for correlation. Args: - event: The RequestInfoEvent to be added. + event: The WorkflowEvent with type='request_info' to be added. """ ... @@ -271,11 +273,11 @@ async def send_request_info_response(self, request_id: str, response: Any) -> No """ ... - async def get_pending_request_info_events(self) -> dict[str, RequestInfoEvent]: - """Get the mapping of request IDs to their corresponding RequestInfoEvent. + async def get_pending_request_info_events(self) -> dict[str, WorkflowEvent[Any]]: + """Get the mapping of request IDs to their corresponding request_info events. Returns: - A dictionary mapping request IDs to their corresponding RequestInfoEvent. + A dictionary mapping request IDs to their corresponding WorkflowEvent (type='request_info'). """ ... @@ -294,7 +296,7 @@ def __init__(self, checkpoint_storage: CheckpointStorage | None = None): self._event_queue: asyncio.Queue[WorkflowEvent] = asyncio.Queue() # An additional storage for pending request info events - self._pending_request_info_events: dict[str, RequestInfoEvent] = {} + self._pending_request_info_events: dict[str, WorkflowEvent[Any]] = {} # Checkpointing configuration/state self._checkpoint_storage = checkpoint_storage @@ -426,7 +428,7 @@ async def apply_checkpoint(self, checkpoint: WorkflowCheckpoint) -> None: self._pending_request_info_events.clear() pending_requests_data = checkpoint.pending_request_info_events for request_id, request_data in pending_requests_data.items(): - request_info_event = RequestInfoEvent.from_dict(request_data) + request_info_event = WorkflowEvent.from_dict(request_data) self._pending_request_info_events[request_id] = request_info_event await self.add_event(request_info_event) @@ -470,12 +472,14 @@ def _get_serialized_workflow_state(self, state: State, iteration_count: int) -> "pending_request_info_events": serialized_pending_request_info_events, } - async def add_request_info_event(self, event: RequestInfoEvent) -> None: - """Add a RequestInfoEvent to the context and track it for correlation. + async def add_request_info_event(self, event: WorkflowEvent[Any]) -> None: + """Add a request_info event to the context and track it for correlation. Args: - event: The RequestInfoEvent to be added. + event: The WorkflowEvent with type='request_info' to be added. """ + if event.request_id is None: + raise ValueError("request_info event must have a request_id") self._pending_request_info_events[event.request_id] = event await self.add_event(event) @@ -497,21 +501,23 @@ async def send_request_info_response(self, request_id: str, response: Any) -> No f"expected {event.response_type.__name__}, got {type(response).__name__}" ) + source_executor_id = event.source_executor_id + # Create ResponseMessage instance response_msg = Message( data=response, - source_id=INTERNAL_SOURCE_ID(event.source_executor_id), - target_id=event.source_executor_id, + source_id=INTERNAL_SOURCE_ID(source_executor_id), + target_id=source_executor_id, type=MessageType.RESPONSE, original_request_info_event=event, ) await self.send_message(response_msg) - async def get_pending_request_info_events(self) -> dict[str, RequestInfoEvent]: - """Get the mapping of request IDs to their corresponding RequestInfoEvent. + async def get_pending_request_info_events(self) -> dict[str, WorkflowEvent[Any]]: + """Get the mapping of request IDs to their corresponding request_info events. Returns: - A dictionary mapping request IDs to their corresponding RequestInfoEvent. + A dictionary mapping request IDs to their corresponding WorkflowEvent (type='request_info'). """ return dict(self._pending_request_info_events) diff --git a/python/packages/core/agent_framework/_workflows/_workflow.py b/python/packages/core/agent_framework/_workflows/_workflow.py index 665e6541f3..f12e9c9b2a 100644 --- a/python/packages/core/agent_framework/_workflows/_workflow.py +++ b/python/packages/core/agent_framework/_workflows/_workflow.py @@ -1,5 +1,7 @@ # Copyright (c) Microsoft. All rights reserved. +from __future__ import annotations + import asyncio import functools import hashlib @@ -19,14 +21,9 @@ FanOutEdgeGroup, ) from ._events import ( - RequestInfoEvent, WorkflowErrorDetails, WorkflowEvent, - WorkflowFailedEvent, - WorkflowOutputEvent, WorkflowRunState, - WorkflowStartedEvent, - WorkflowStatusEvent, _framework_event_origin, # type: ignore ) from ._executor import Executor @@ -59,9 +56,9 @@ class WorkflowRunResult(list[WorkflowEvent]): - status_timeline(): Access the complete status event history """ - def __init__(self, events: list[WorkflowEvent], status_events: list[WorkflowStatusEvent] | None = None) -> None: + def __init__(self, events: list[WorkflowEvent[Any]], status_events: list[WorkflowEvent[Any]] | None = None) -> None: super().__init__(events) - self._status_events: list[WorkflowStatusEvent] = status_events or [] + self._status_events: list[WorkflowEvent[Any]] = status_events or [] def get_outputs(self) -> list[Any]: """Get all outputs from the workflow run result. @@ -69,30 +66,30 @@ def get_outputs(self) -> list[Any]: Returns: A list of outputs produced by the workflow during its execution. """ - return [event.data for event in self if isinstance(event, WorkflowOutputEvent)] + return [event.data for event in self if event.type == "output"] - def get_request_info_events(self) -> list[RequestInfoEvent]: + def get_request_info_events(self) -> list[WorkflowEvent[Any]]: """Get all request info events from the workflow run result. Returns: - A list of RequestInfoEvent instances found in the workflow run result. + A list of WorkflowEvent instances with type='request_info' found in the workflow run result. """ - return [event for event in self if isinstance(event, RequestInfoEvent)] + return [event for event in self if event.type == "request_info"] def get_final_state(self) -> WorkflowRunState: """Return the final run state based on explicit status events. - Returns the last WorkflowStatusEvent.state observed. Raises if none were emitted. + Returns the last status event's state observed. Raises if none were emitted. """ if self._status_events: return self._status_events[-1].state # type: ignore[return-value] raise RuntimeError( - "Final state is unknown because no WorkflowStatusEvent was emitted. " + "Final state is unknown because no status event was emitted. " "Ensure your workflow entry points are used (which emit status events) " "or handle the absence of status explicitly." ) - def status_timeline(self) -> list[WorkflowStatusEvent]: + def status_timeline(self) -> list[WorkflowEvent[Any]]: """Return the list of status events emitted during the run (control-plane).""" return list(self._status_events) @@ -145,7 +142,7 @@ class Workflow(DictConvertible): Executors within a workflow can request external input using `ctx.request_info()`: 1. Executor calls `ctx.request_info()` to request input 2. Executor implements `response_handler()` to process the response - 3. Requests are emitted as RequestInfoEvent instances in the event stream + 3. Requests are emitted as request_info events (WorkflowEvent with type='request_info') in the event stream 4. Workflow enters IDLE_WITH_PENDING_REQUESTS state 5. Caller handles requests and provides responses via the `send_responses` or `send_responses_streaming` methods 6. Responses are routed to the requesting executors and response handlers are invoked @@ -205,7 +202,7 @@ def __init__( self.name = name self.description = description - # `WorkflowOutputEvent`s from these executors are treated as workflow outputs. + # Output events (WorkflowEvent with type='output') from these executors are treated as workflow outputs. # If None or empty, all executor outputs are considered workflow outputs. self._output_executors = list(output_executors) if output_executors else list(self.executors.keys()) @@ -332,10 +329,10 @@ async def _run_workflow_with_tracing( span.add_event(OtelAttr.WORKFLOW_STARTED) # Emit explicit start/status events to the stream with _framework_event_origin(): - started = WorkflowStartedEvent() + started = WorkflowEvent.started() yield started with _framework_event_origin(): - in_progress = WorkflowStatusEvent(WorkflowRunState.IN_PROGRESS) + in_progress = WorkflowEvent.status(WorkflowRunState.IN_PROGRESS) yield in_progress # Reset context for a new run if supported @@ -359,39 +356,39 @@ async def _run_workflow_with_tracing( # All executor executions happen within workflow span async for event in self._runner.run_until_convergence(): # Track request events for final status determination - if isinstance(event, RequestInfoEvent): + if event.type == "request_info": saw_request = True yield event - if isinstance(event, RequestInfoEvent) and not emitted_in_progress_pending: + if event.type == "request_info" and not emitted_in_progress_pending: emitted_in_progress_pending = True with _framework_event_origin(): - pending_status = WorkflowStatusEvent(WorkflowRunState.IN_PROGRESS_PENDING_REQUESTS) + pending_status = WorkflowEvent.status(WorkflowRunState.IN_PROGRESS_PENDING_REQUESTS) yield pending_status # Workflow runs until idle - emit final status based on whether requests are pending if saw_request: with _framework_event_origin(): - terminal_status = WorkflowStatusEvent(WorkflowRunState.IDLE_WITH_PENDING_REQUESTS) + terminal_status = WorkflowEvent.status(WorkflowRunState.IDLE_WITH_PENDING_REQUESTS) yield terminal_status else: with _framework_event_origin(): - terminal_status = WorkflowStatusEvent(WorkflowRunState.IDLE) + terminal_status = WorkflowEvent.status(WorkflowRunState.IDLE) yield terminal_status span.add_event(OtelAttr.WORKFLOW_COMPLETED) except Exception as exc: - # Drain any pending events (for example, ExecutorFailedEvent) before yielding WorkflowFailedEvent + # Drain any pending events (for example, executor_failed) before yielding failed event for event in await self._runner.context.drain_events(): yield event # Surface structured failure details before propagating exception details = WorkflowErrorDetails.from_exception(exc) with _framework_event_origin(): - failed_event = WorkflowFailedEvent(details) + failed_event = WorkflowEvent.failed(details) yield failed_event with _framework_event_origin(): - failed_status = WorkflowStatusEvent(WorkflowRunState.FAILED) + failed_status = WorkflowEvent.status(WorkflowRunState.FAILED) yield failed_status span.add_event( name=OtelAttr.WORKFLOW_ERROR, @@ -554,7 +551,7 @@ async def _run_streaming( streaming=True, run_kwargs=kwargs if kwargs else None, ): - if isinstance(event, WorkflowOutputEvent) and not self._should_yield_output_event(event): + if event.type == "output" and not self._should_yield_output_event(event): continue yield event finally: @@ -579,7 +576,7 @@ async def send_responses_streaming(self, responses: dict[str, Any]) -> AsyncIter reset_context=False, # Don't reset context when sending responses streaming=True, ): - if isinstance(event, WorkflowOutputEvent) and not self._should_yield_output_event(event): + if event.type == "output" and not self._should_yield_output_event(event): continue yield event finally: @@ -628,20 +625,20 @@ async def _run_non_streaming( self._reset_running_flag() # Filter events for non-streaming mode - filtered: list[WorkflowEvent] = [] - status_events: list[WorkflowStatusEvent] = [] + filtered: list[WorkflowEvent[Any]] = [] + status_events: list[WorkflowEvent[Any]] = [] for ev in raw_events: - # Omit WorkflowStartedEvent from non-streaming (telemetry-only) - if isinstance(ev, WorkflowStartedEvent): + # Omit started events from non-streaming (telemetry-only) + if ev.type == "started": continue # Track status; include inline only if explicitly requested - if isinstance(ev, WorkflowStatusEvent): + if ev.type == "status": status_events.append(ev) if include_status_events: filtered.append(ev) continue - if isinstance(ev, WorkflowOutputEvent) and not self._should_yield_output_event(ev): + if ev.type == "output" and not self._should_yield_output_event(ev): continue filtered.append(ev) @@ -665,12 +662,12 @@ async def send_responses(self, responses: dict[str, Any]) -> WorkflowRunResult: reset_context=False, # Don't reset context when sending responses ) ] - status_events = [e for e in events if isinstance(e, WorkflowStatusEvent)] - filtered_events: list[WorkflowEvent] = [] + status_events = [e for e in events if e.type == "status"] + filtered_events: list[WorkflowEvent[Any]] = [] for e in events: - if isinstance(e, WorkflowOutputEvent) and not self._should_yield_output_event(e): + if e.type == "output" and not self._should_yield_output_event(e): continue - if isinstance(e, (WorkflowStatusEvent, WorkflowStartedEvent)): + if e.type in ("status", "started"): continue filtered_events.append(e) return WorkflowRunResult(filtered_events, status_events) @@ -712,11 +709,11 @@ def _get_executor_by_id(self, executor_id: str) -> Executor: raise ValueError(f"Executor with ID {executor_id} not found.") return self.executors[executor_id] - def _should_yield_output_event(self, event: WorkflowOutputEvent) -> bool: - """Determine if a WorkflowOutputEvent should be yielded as a workflow output. + def _should_yield_output_event(self, event: WorkflowEvent[Any]) -> bool: + """Determine if an output event should be yielded as a workflow output. Args: - event: The WorkflowOutputEvent to evaluate. + event: The WorkflowEvent with type='output' to evaluate. Returns: True if the event should be yielded as a workflow output, False otherwise. diff --git a/python/packages/core/agent_framework/_workflows/_workflow_context.py b/python/packages/core/agent_framework/_workflows/_workflow_context.py index 3558e30fd9..2bdd81ef41 100644 --- a/python/packages/core/agent_framework/_workflows/_workflow_context.py +++ b/python/packages/core/agent_framework/_workflows/_workflow_context.py @@ -1,5 +1,7 @@ # Copyright (c) Microsoft. All rights reserved. +from __future__ import annotations + import copy import inspect import logging @@ -13,15 +15,8 @@ from ..observability import OtelAttr, create_workflow_span from ._events import ( - RequestInfoEvent, WorkflowEvent, WorkflowEventSource, - WorkflowFailedEvent, - WorkflowLifecycleEvent, - WorkflowOutputEvent, - WorkflowStartedEvent, - WorkflowStatusEvent, - WorkflowWarningEvent, _framework_event_origin, # type: ignore ) from ._runner_context import Message, RunnerContext @@ -204,15 +199,8 @@ def _is_type_like(x: Any) -> bool: return infer_output_types_from_ctx_annotation(annotation) -_FRAMEWORK_LIFECYCLE_EVENT_TYPES: tuple[type[WorkflowEvent], ...] = cast( - tuple[type[WorkflowEvent], ...], - tuple(get_args(WorkflowLifecycleEvent)) - or ( - WorkflowStartedEvent, - WorkflowStatusEvent, - WorkflowFailedEvent, - ), -) +# Event types reserved for framework lifecycle (not allowed from user code) +_FRAMEWORK_LIFECYCLE_EVENT_TYPES: frozenset[str] = frozenset({"started", "status", "failed"}) class WorkflowContext(Generic[OutT, W_OutT]): @@ -264,7 +252,7 @@ async def flexible(message: str, ctx: WorkflowContext[int | str, bool | dict]) - def __init__( self, - executor: "Executor", + executor: Executor, source_executor_ids: list[str], state: State, runner_context: RunnerContext, @@ -291,10 +279,10 @@ def __init__( self._runner_context = runner_context self._state = state - # Track messages sent via send_message() for ExecutorCompletedEvent + # Track messages sent via send_message() for executor_completed event (type='executor_completed') self._sent_messages: list[Any] = [] - # Track outputs yielded via yield_output() for ExecutorCompletedEvent + # Track outputs yielded via yield_output() for executor_completed event (type='executor_completed') self._yielded_outputs: list[Any] = [] # Store trace contexts and source span IDs for linking (supporting multiple sources) @@ -335,7 +323,7 @@ async def send_message(self, message: OutT, target_id: str | None = None) -> Non # Create Message wrapper msg = Message(data=message, source_id=self._executor_id, target_id=target_id) - # Track sent message for ExecutorCompletedEvent + # Track sent message for executor_completed event (type='executor_completed') self._sent_messages.append(message) # Inject current trace context if tracing enabled @@ -355,31 +343,31 @@ async def yield_output(self, output: W_OutT) -> None: output: The output to yield. This must conform to the workflow output type(s) declared on this context. """ - # Track yielded output for ExecutorCompletedEvent (deepcopy to capture state at yield time) + # Track yielded output for executor_completed event (type='executor_completed') + # (deepcopy to capture state at yield time) self._yielded_outputs.append(copy.deepcopy(output)) with _framework_event_origin(): - event = WorkflowOutputEvent(data=output, executor_id=self._executor_id) + event = WorkflowEvent.output(self._executor_id, output) await self._runner_context.add_event(event) - async def add_event(self, event: WorkflowEvent) -> None: + async def add_event(self, event: WorkflowEvent[Any]) -> None: """Add an event to the workflow context.""" - if event.origin == WorkflowEventSource.EXECUTOR and isinstance(event, _FRAMEWORK_LIFECYCLE_EVENT_TYPES): - event_name = event.__class__.__name__ + if event.origin == WorkflowEventSource.EXECUTOR and event.type in _FRAMEWORK_LIFECYCLE_EVENT_TYPES: warning_msg = ( - f"Executor '{self._executor_id}' attempted to emit {event_name}, " + f"Executor '{self._executor_id}' attempted to emit a '{event.type}' event, " "which is reserved for framework lifecycle notifications. The " "event was ignored." ) logger.warning(warning_msg) - await self._runner_context.add_event(WorkflowWarningEvent(warning_msg)) + await self._runner_context.add_event(WorkflowEvent.warning(warning_msg)) return await self._runner_context.add_event(event) async def request_info(self, request_data: object, response_type: type, *, request_id: str | None = None) -> None: """Request information from outside of the workflow. - Calling this method will cause the workflow to emit a RequestInfoEvent, carrying the + Calling this method will cause the workflow to emit a request_info event (type='request_info'), carrying the provided request_data and request_type. External systems listening for such events can then process the request and respond accordingly. @@ -401,7 +389,7 @@ async def request_info(self, request_data: object, response_type: type, *, reque "not be processed. Please define a response handler using the @response_handler decorator." ) - request_info_event = RequestInfoEvent( + request_info_event = WorkflowEvent.request_info( request_id=request_id or str(uuid.uuid4()), source_executor_id=self._executor_id, request_data=request_data, diff --git a/python/packages/core/agent_framework/_workflows/_workflow_executor.py b/python/packages/core/agent_framework/_workflows/_workflow_executor.py index 029e89e000..b83c826873 100644 --- a/python/packages/core/agent_framework/_workflows/_workflow_executor.py +++ b/python/packages/core/agent_framework/_workflows/_workflow_executor.py @@ -14,9 +14,7 @@ from ._checkpoint_encoding import decode_checkpoint_value, encode_checkpoint_value from ._const import WORKFLOW_RUN_KWARGS_KEY from ._events import ( - RequestInfoEvent, - WorkflowErrorEvent, - WorkflowFailedEvent, + WorkflowEvent, WorkflowRunState, ) from ._executor import Executor, handler @@ -52,38 +50,38 @@ class ExecutionContext: # Pending requests to be fulfilled. This will get updated as the # WorkflowExecutor receives responses. - pending_requests: dict[str, RequestInfoEvent] # request_id -> request_info_event + pending_requests: dict[str, WorkflowEvent] # request_id -> request_info_event @dataclass class SubWorkflowResponseMessage: """Message sent from a parent workflow to a sub-workflow via WorkflowExecutor to provide requested information. - This message wraps the response data along with the original RequestInfoEvent emitted by the sub-workflow executor. + This message wraps the response data along with the original WorkflowEvent emitted by the sub-workflow executor. Attributes: data: The response data to the original request. - source_event: The original RequestInfoEvent emitted by the sub-workflow executor. + source_event: The original WorkflowEvent emitted by the sub-workflow executor. """ data: Any - source_event: RequestInfoEvent + source_event: WorkflowEvent @dataclass class SubWorkflowRequestMessage: """Message sent from a sub-workflow to an executor in the parent workflow to request information. - This message wraps a RequestInfoEvent emitted by the executor in the sub-workflow. + This message wraps a WorkflowEvent emitted by the executor in the sub-workflow. Attributes: - source_event: The original RequestInfoEvent emitted by the sub-workflow executor. + source_event: The original WorkflowEvent emitted by the sub-workflow executor. executor_id: The ID of the WorkflowExecutor in the parent workflow that is responsible for this sub-workflow. This can be used to ensure that the response is sent back to the correct sub-workflow instance. """ - source_event: RequestInfoEvent + source_event: WorkflowEvent executor_id: str def create_response(self, data: Any) -> SubWorkflowResponseMessage: @@ -153,7 +151,7 @@ class WorkflowExecutor(Executor): # An executor in the sub-workflow makes request request = MyDataRequest(query="user info") - # WorkflowExecutor captures RequestInfoEvent and wraps it in a SubWorkflowRequestMessage + # WorkflowExecutor captures WorkflowEvent and wraps it in a SubWorkflowRequestMessage # then send it to the receiving executor in parent workflow. The executor in parent workflow # can handle the request locally or forward it to an external source. # The WorkflowExecutor tracks the pending request, and implements a response handler. @@ -191,8 +189,8 @@ class WorkflowExecutor(Executor): ## Error Handling WorkflowExecutor propagates sub-workflow failures: - - Captures WorkflowFailedEvent from sub-workflow - - Converts to WorkflowErrorEvent in parent context + - Captures failed event (type='failed') from sub-workflow + - Converts to error event in parent context - Provides detailed error information including sub-workflow ID ## Concurrent Execution Support @@ -285,7 +283,7 @@ def __init__( workflow's event stream. propagate_request: Whether to propagate requests from the sub-workflow to the parent workflow. If set to true, requests from the sub-workflow - will be propagated as the original RequestInfoEvent to the parent + will be propagated as the original WorkflowEvent to the parent workflow. Otherwise, they will be wrapped in a SubWorkflowRequestMessage, which should be handled by an executor in the parent workflow. @@ -421,8 +419,9 @@ async def handle_message_wrapped_request_response( response: The response to a previous request. ctx: The workflow context. """ + request_id = response.source_event.request_id await self._handle_response( - request_id=response.source_event.request_id, + request_id=request_id, response=response.data, ctx=ctx, ) @@ -437,7 +436,7 @@ async def handle_propagated_request_response( """Handle response for a request that was propagated to the parent workflow. Args: - original_request: The original RequestInfoEvent. + original_request: The original WorkflowEvent. response: The response data. ctx: The workflow context. """ @@ -550,15 +549,17 @@ async def _process_workflow_result( # Process request info events for event in request_info_events: + request_id = event.request_id + response_type = event.response_type # Track the pending request in execution context - execution_context.pending_requests[event.request_id] = event + execution_context.pending_requests[request_id] = event # Map request to execution for response routing - self._request_to_execution[event.request_id] = execution_context.execution_id + self._request_to_execution[request_id] = execution_context.execution_id if self._propagate_request: # In a workflow where the parent workflow does not handle the request, the request # should be propagated via the `request_info` mechanism to an external source. And # a @response_handler would be required in the WorkflowExecutor to handle the response. - await ctx.request_info(event.data, event.response_type, request_id=event.request_id) + await ctx.request_info(event.data, response_type, request_id=request_id) else: # In a workflow where the parent workflow has an executor that may intercept the # request and handle it directly, a message should be sent. @@ -569,18 +570,19 @@ async def _process_workflow_result( # Handle final state if workflow_run_state == WorkflowRunState.FAILED: - # Find the WorkflowFailedEvent. - failed_events = [e for e in result if isinstance(e, WorkflowFailedEvent)] + # Find the failed event (type='failed'). + failed_events = [e for e in result if isinstance(e, WorkflowEvent) and e.type == "failed"] if failed_events: failed_event = failed_events[0] - error_type = failed_event.details.error_type - error_message = failed_event.details.message - exception = Exception( - f"Sub-workflow {self.workflow.id} failed with error: {error_type} - {error_message}" - ) - error_event = WorkflowErrorEvent( - data=exception, - ) + if failed_event.details is not None: + error_type = failed_event.details.error_type + error_message = failed_event.details.message + exception = Exception( + f"Sub-workflow {self.workflow.id} failed with error: {error_type} - {error_message}" + ) + else: + exception = Exception(f"Sub-workflow {self.workflow.id} failed with unknown error") + error_event = WorkflowEvent.error(exception) await ctx.add_event(error_event) elif workflow_run_state == WorkflowRunState.IDLE: # Sub-workflow is idle - nothing more to do now @@ -661,11 +663,7 @@ async def _handle_response( # requesting the same information again. for request_id in responses_to_send: event_to_remove = next( - ( - event - for event in result - if isinstance(event, RequestInfoEvent) and event.request_id == request_id - ), + (event for event in result if event.type == "request_info" and event.request_id == request_id), None, ) if event_to_remove: diff --git a/python/packages/core/agent_framework/orchestrations/__init__.pyi b/python/packages/core/agent_framework/orchestrations/__init__.pyi index fcaaf04d00..cf26847972 100644 --- a/python/packages/core/agent_framework/orchestrations/__init__.pyi +++ b/python/packages/core/agent_framework/orchestrations/__init__.pyi @@ -12,6 +12,8 @@ from agent_framework_orchestrations import ( ConcurrentBuilder, GroupChatBuilder, GroupChatOrchestrator, + GroupChatRequestMessage, + GroupChatRequestSentEvent, GroupChatSelectionFunction, GroupChatState, HandoffAgentExecutor, @@ -48,6 +50,8 @@ __all__ = [ "ConcurrentBuilder", "GroupChatBuilder", "GroupChatOrchestrator", + "GroupChatRequestMessage", + "GroupChatRequestSentEvent", "GroupChatSelectionFunction", "GroupChatState", "HandoffAgentExecutor", diff --git a/python/packages/core/tests/workflow/test_agent_executor.py b/python/packages/core/tests/workflow/test_agent_executor.py index 560eb10091..3cbd369bf4 100644 --- a/python/packages/core/tests/workflow/test_agent_executor.py +++ b/python/packages/core/tests/workflow/test_agent_executor.py @@ -13,9 +13,7 @@ ChatMessageStore, Content, ResponseStream, - WorkflowOutputEvent, WorkflowRunState, - WorkflowStatusEvent, ) from agent_framework._workflows._agent_executor import AgentExecutorResponse from agent_framework._workflows._checkpoint import InMemoryCheckpointStorage @@ -77,9 +75,9 @@ async def test_agent_executor_checkpoint_stores_and_restores_state() -> None: # Run the workflow with a user message first_run_output: AgentExecutorResponse | None = None async for ev in wf.run("First workflow run", stream=True): - if isinstance(ev, WorkflowOutputEvent): + if ev.type == "output": first_run_output = ev.data # type: ignore[assignment] - if isinstance(ev, WorkflowStatusEvent) and ev.state == WorkflowRunState.IDLE: + if ev.type == "status" and ev.state == WorkflowRunState.IDLE: break assert first_run_output is not None @@ -131,9 +129,9 @@ async def test_agent_executor_checkpoint_stores_and_restores_state() -> None: # Resume from checkpoint resumed_output: AgentExecutorResponse | None = None async for ev in wf_resume.run(checkpoint_id=restore_checkpoint.checkpoint_id, stream=True): - if isinstance(ev, WorkflowOutputEvent): + if ev.type == "output": resumed_output = ev.data # type: ignore[assignment] - if isinstance(ev, WorkflowStatusEvent) and ev.state in ( + if ev.type == "status" and ev.state in ( WorkflowRunState.IDLE, WorkflowRunState.IDLE_WITH_PENDING_REQUESTS, ): diff --git a/python/packages/core/tests/workflow/test_agent_executor_tool_calls.py b/python/packages/core/tests/workflow/test_agent_executor_tool_calls.py index 7f2e4931e5..9b69fe7034 100644 --- a/python/packages/core/tests/workflow/test_agent_executor_tool_calls.py +++ b/python/packages/core/tests/workflow/test_agent_executor_tool_calls.py @@ -19,11 +19,10 @@ ChatResponse, ChatResponseUpdate, Content, - RequestInfoEvent, ResponseStream, WorkflowBuilder, WorkflowContext, - WorkflowOutputEvent, + WorkflowEvent, executor, tool, ) @@ -100,9 +99,9 @@ async def test_agent_executor_emits_tool_calls_in_streaming_mode() -> None: workflow = WorkflowBuilder().set_start_executor(agent_exec).build() # Act: run in streaming mode - events: list[WorkflowOutputEvent] = [] + events: list[WorkflowEvent[AgentResponseUpdate]] = [] async for event in workflow.run("What's the weather?", stream=True): - if isinstance(event, WorkflowOutputEvent): + if event.type == "output" and isinstance(event.data, AgentResponseUpdate): events.append(event) # Assert: we should receive 4 events (text, function call, function result, text) @@ -290,9 +289,9 @@ async def test_agent_executor_tool_call_with_approval_streaming() -> None: workflow = WorkflowBuilder().set_start_executor(agent).add_edge(agent, test_executor).build() # Act - request_info_events: list[RequestInfoEvent] = [] + request_info_events: list[WorkflowEvent] = [] async for event in workflow.run("Invoke tool requiring approval", stream=True): - if isinstance(event, RequestInfoEvent): + if event.type == "request_info": request_info_events.append(event) # Assert @@ -307,7 +306,7 @@ async def test_agent_executor_tool_call_with_approval_streaming() -> None: async for event in workflow.send_responses_streaming({ approval_request.request_id: approval_request.data.to_function_approval_response(True) }): - if isinstance(event, WorkflowOutputEvent): + if event.type == "output": output = event.data # Assert @@ -367,9 +366,9 @@ async def test_agent_executor_parallel_tool_call_with_approval_streaming() -> No workflow = WorkflowBuilder().set_start_executor(agent).add_edge(agent, test_executor).build() # Act - request_info_events: list[RequestInfoEvent] = [] + request_info_events: list[WorkflowEvent] = [] async for event in workflow.run("Invoke tool requiring approval", stream=True): - if isinstance(event, RequestInfoEvent): + if event.type == "request_info": request_info_events.append(event) # Assert @@ -387,7 +386,7 @@ async def test_agent_executor_parallel_tool_call_with_approval_streaming() -> No output: str | None = None async for event in workflow.send_responses_streaming(responses): - if isinstance(event, WorkflowOutputEvent): + if event.type == "output": output = event.data # Assert diff --git a/python/packages/core/tests/workflow/test_agent_run_event_typing.py b/python/packages/core/tests/workflow/test_agent_run_event_typing.py index 58ac2cbf27..410f57f962 100644 --- a/python/packages/core/tests/workflow/test_agent_run_event_typing.py +++ b/python/packages/core/tests/workflow/test_agent_run_event_typing.py @@ -1,27 +1,38 @@ # Copyright (c) Microsoft. All rights reserved. -"""Tests for agent run event typing.""" +"""Tests for WorkflowEvent[T] generic type annotations.""" from agent_framework import AgentResponse, AgentResponseUpdate, ChatMessage -from agent_framework._workflows._events import WorkflowOutputEvent +from agent_framework._workflows._events import WorkflowEvent -def test_agent_run_event_data_type() -> None: - """Verify WorkflowOutputEvent.data is typed as AgentResponse | None.""" +def test_workflow_event_with_agent_response_data_type() -> None: + """Verify WorkflowEvent[AgentResponse].data is typed as AgentResponse.""" response = AgentResponse(messages=[ChatMessage(role="assistant", text="Hello")]) - event = WorkflowOutputEvent(data=response, executor_id="test") + event: WorkflowEvent[AgentResponse] = WorkflowEvent.emit(executor_id="test", data=response) # This assignment should pass type checking without a cast - data: AgentResponse | None = event.data + data: AgentResponse = event.data assert data is not None assert data.text == "Hello" -def test_agent_run_update_event_data_type() -> None: - """Verify WorkflowOutputEvent.data is typed as AgentResponseUpdate | None.""" +def test_workflow_event_with_agent_response_update_data_type() -> None: + """Verify WorkflowEvent[AgentResponseUpdate].data is typed as AgentResponseUpdate.""" update = AgentResponseUpdate() - event = WorkflowOutputEvent(data=update, executor_id="test") + event: WorkflowEvent[AgentResponseUpdate] = WorkflowEvent.emit(executor_id="test", data=update) # This assignment should pass type checking without a cast - data: AgentResponseUpdate | None = event.data + data: AgentResponseUpdate = event.data assert data is not None + + +def test_workflow_event_repr() -> None: + """Verify WorkflowEvent.__repr__ uses consistent format.""" + response = AgentResponse(messages=[ChatMessage(role="assistant", text="Hello")]) + event: WorkflowEvent[AgentResponse] = WorkflowEvent.emit(executor_id="test", data=response) + + repr_str = repr(event) + assert "WorkflowEvent" in repr_str + assert "executor_id='test'" in repr_str + assert "data=" in repr_str diff --git a/python/packages/core/tests/workflow/test_checkpoint_validation.py b/python/packages/core/tests/workflow/test_checkpoint_validation.py index 4313c0cc5e..3139fa302a 100644 --- a/python/packages/core/tests/workflow/test_checkpoint_validation.py +++ b/python/packages/core/tests/workflow/test_checkpoint_validation.py @@ -8,7 +8,6 @@ WorkflowCheckpointException, WorkflowContext, WorkflowRunState, - WorkflowStatusEvent, handler, ) from agent_framework._workflows._checkpoint import InMemoryCheckpointStorage @@ -80,4 +79,4 @@ async def test_resume_succeeds_when_graph_matches() -> None: ) ] - assert any(isinstance(event, WorkflowStatusEvent) and event.state == WorkflowRunState.IDLE for event in events) + assert any(event.type == "status" and event.state == WorkflowRunState.IDLE for event in events) diff --git a/python/packages/core/tests/workflow/test_executor.py b/python/packages/core/tests/workflow/test_executor.py index e7c2a31aec..b08bd2be81 100644 --- a/python/packages/core/tests/workflow/test_executor.py +++ b/python/packages/core/tests/workflow/test_executor.py @@ -8,11 +8,10 @@ from agent_framework import ( ChatMessage, Executor, - ExecutorCompletedEvent, - ExecutorInvokedEvent, Message, WorkflowBuilder, WorkflowContext, + WorkflowEvent, executor, handler, response_handler, @@ -139,7 +138,7 @@ async def handle_integer(self, number: int, ctx: WorkflowContext[int]) -> None: async def test_executor_invoked_event_contains_input_data(): - """Test that ExecutorInvokedEvent contains the input message data.""" + """Test that executor_invoked event (type='executor_invoked') contains the input message data.""" class UpperCaseExecutor(Executor): @handler @@ -157,7 +156,7 @@ async def handle(self, text: str, ctx: WorkflowContext) -> None: workflow = WorkflowBuilder().add_edge(upper, collector).set_start_executor(upper).build() events = await workflow.run("hello world") - invoked_events = [e for e in events if isinstance(e, ExecutorInvokedEvent)] + invoked_events = [e for e in events if isinstance(e, WorkflowEvent) and e.type == "executor_invoked"] assert len(invoked_events) == 2 @@ -171,7 +170,7 @@ async def handle(self, text: str, ctx: WorkflowContext) -> None: async def test_executor_completed_event_contains_sent_messages(): - """Test that ExecutorCompletedEvent contains the messages sent via ctx.send_message().""" + """Test that event (type='executor_completed') contains the messages sent via ctx.send_message().""" class MultiSenderExecutor(Executor): @handler @@ -194,7 +193,7 @@ async def handle(self, text: str, ctx: WorkflowContext) -> None: workflow = WorkflowBuilder().add_edge(sender, collector).set_start_executor(sender).build() events = await workflow.run("hello") - completed_events = [e for e in events if isinstance(e, ExecutorCompletedEvent)] + completed_events = [e for e in events if isinstance(e, WorkflowEvent) and e.type == "executor_completed"] # Sender should have completed with the sent messages sender_completed = next(e for e in completed_events if e.executor_id == "sender") @@ -210,9 +209,7 @@ async def handle(self, text: str, ctx: WorkflowContext) -> None: async def test_executor_completed_event_includes_yielded_outputs(): - """Test that ExecutorCompletedEvent.data includes yielded outputs.""" - - from agent_framework import WorkflowOutputEvent + """Test that WorkflowEvent(type='executor_completed').data includes yielded outputs.""" class YieldOnlyExecutor(Executor): @handler @@ -223,15 +220,15 @@ async def handle(self, text: str, ctx: WorkflowContext[Never, str]) -> None: workflow = WorkflowBuilder().set_start_executor(executor).build() events = await workflow.run("test") - completed_events = [e for e in events if isinstance(e, ExecutorCompletedEvent)] + completed_events = [e for e in events if isinstance(e, WorkflowEvent) and e.type == "executor_completed"] assert len(completed_events) == 1 assert completed_events[0].executor_id == "yielder" - # Yielded outputs are now included in ExecutorCompletedEvent.data + # Yielded outputs are now included in executor_completed event (type='executor_completed').data assert completed_events[0].data == ["TEST"] - # Verify the output was also yielded as WorkflowOutputEvent - output_events = [e for e in events if isinstance(e, WorkflowOutputEvent)] + # Verify the output was also yielded as an output event (type='output') + output_events = [e for e in events if e.type == "output"] assert len(output_events) == 1 assert output_events[0].data == "TEST" @@ -268,8 +265,8 @@ async def handle(self, response: Response, ctx: WorkflowContext) -> None: input_request = Request(query="hello", limit=3) events = await workflow.run(input_request) - invoked_events = [e for e in events if isinstance(e, ExecutorInvokedEvent)] - completed_events = [e for e in events if isinstance(e, ExecutorCompletedEvent)] + invoked_events = [e for e in events if isinstance(e, WorkflowEvent) and e.type == "executor_invoked"] + completed_events = [e for e in events if isinstance(e, WorkflowEvent) and e.type == "executor_completed"] # Check processor invoked event has the Request object processor_invoked = next(e for e in invoked_events if e.executor_id == "processor") @@ -531,7 +528,7 @@ async def handle_response( async def test_executor_invoked_event_data_not_mutated_by_handler(): - """Test that ExecutorInvokedEvent.data captures original input, not mutated input.""" + """Test that executor_invoked event (type='executor_invoked').data captures original input, not mutated input.""" @executor(id="Mutator") async def mutator(messages: list[ChatMessage], ctx: WorkflowContext[list[ChatMessage]]) -> None: @@ -549,7 +546,7 @@ async def mutator(messages: list[ChatMessage], ctx: WorkflowContext[list[ChatMes events = await workflow.run(input_messages) # Find the invoked event for the Mutator executor - invoked_events = [e for e in events if isinstance(e, ExecutorInvokedEvent)] + invoked_events = [e for e in events if isinstance(e, WorkflowEvent) and e.type == "executor_invoked"] assert len(invoked_events) == 1 mutator_invoked = invoked_events[0] diff --git a/python/packages/core/tests/workflow/test_full_conversation.py b/python/packages/core/tests/workflow/test_full_conversation.py index 343a9848e2..7ebb9b03d6 100644 --- a/python/packages/core/tests/workflow/test_full_conversation.py +++ b/python/packages/core/tests/workflow/test_full_conversation.py @@ -20,7 +20,6 @@ WorkflowBuilder, WorkflowContext, WorkflowRunState, - WorkflowStatusEvent, handler, ) from agent_framework.orchestrations import SequentialBuilder @@ -149,7 +148,7 @@ async def test_sequential_adapter_uses_full_conversation() -> None: # Act async for ev in wf.run("hello seq", stream=True): - if isinstance(ev, WorkflowStatusEvent) and ev.state == WorkflowRunState.IDLE: + if ev.type == "status" and ev.state == WorkflowRunState.IDLE: break # Assert: second agent should have seen the user prompt and A1's assistant reply diff --git a/python/packages/core/tests/workflow/test_request_info_and_response.py b/python/packages/core/tests/workflow/test_request_info_and_response.py index 210cebd340..e545869a86 100644 --- a/python/packages/core/tests/workflow/test_request_info_and_response.py +++ b/python/packages/core/tests/workflow/test_request_info_and_response.py @@ -4,11 +4,10 @@ from agent_framework import ( FileCheckpointStorage, - RequestInfoEvent, WorkflowBuilder, WorkflowContext, + WorkflowEvent, WorkflowRunState, - WorkflowStatusEvent, handler, response_handler, ) @@ -182,9 +181,9 @@ async def test_approval_workflow(self): workflow = WorkflowBuilder().set_start_executor(executor).build() # First run the workflow until it emits a request - request_info_event: RequestInfoEvent | None = None + request_info_event: WorkflowEvent | None = None async for event in workflow.run("test operation", stream=True): - if isinstance(event, RequestInfoEvent): + if event.type == "request_info": request_info_event = event assert request_info_event is not None @@ -194,7 +193,7 @@ async def test_approval_workflow(self): # Send response and continue workflow completed = False async for event in workflow.send_responses_streaming({request_info_event.request_id: True}): - if isinstance(event, WorkflowStatusEvent) and event.state == WorkflowRunState.IDLE: + if event.type == "status" and event.state == WorkflowRunState.IDLE: completed = True assert completed @@ -207,9 +206,9 @@ async def test_calculation_workflow(self): workflow = WorkflowBuilder().set_start_executor(executor).build() # First run the workflow until it emits a calculation request - request_info_event: RequestInfoEvent | None = None + request_info_event: WorkflowEvent | None = None async for event in workflow.run("multiply 15.5 2.0", stream=True): - if isinstance(event, RequestInfoEvent): + if event.type == "request_info": request_info_event = event assert request_info_event is not None @@ -221,7 +220,7 @@ async def test_calculation_workflow(self): calculated_result = 31.0 completed = False async for event in workflow.send_responses_streaming({request_info_event.request_id: calculated_result}): - if isinstance(event, WorkflowStatusEvent) and event.state == WorkflowRunState.IDLE: + if event.type == "status" and event.state == WorkflowRunState.IDLE: completed = True assert completed @@ -234,18 +233,18 @@ async def test_multiple_requests_workflow(self): workflow = WorkflowBuilder().set_start_executor(executor).build() # Collect all request events by running the full stream - request_events: list[RequestInfoEvent] = [] + request_events: list[WorkflowEvent] = [] async for event in workflow.run("start batch", stream=True): - if isinstance(event, RequestInfoEvent): + if event.type == "request_info": request_events.append(event) assert len(request_events) == 2 # Find the approval and calculation requests - approval_event: RequestInfoEvent | None = next( + approval_event: WorkflowEvent | None = next( (e for e in request_events if isinstance(e.data, UserApprovalRequest)), None ) - calc_event: RequestInfoEvent | None = next( + calc_event: WorkflowEvent | None = next( (e for e in request_events if isinstance(e.data, CalculationRequest)), None ) @@ -256,7 +255,7 @@ async def test_multiple_requests_workflow(self): responses = {approval_event.request_id: True, calc_event.request_id: 50.0} completed = False async for event in workflow.send_responses_streaming(responses): - if isinstance(event, WorkflowStatusEvent) and event.state == WorkflowRunState.IDLE: + if event.type == "status" and event.state == WorkflowRunState.IDLE: completed = True assert completed @@ -268,9 +267,9 @@ async def test_denied_approval_workflow(self): workflow = WorkflowBuilder().set_start_executor(executor).build() # First run the workflow until it emits a request - request_info_event: RequestInfoEvent | None = None + request_info_event: WorkflowEvent | None = None async for event in workflow.run("sensitive operation", stream=True): - if isinstance(event, RequestInfoEvent): + if event.type == "request_info": request_info_event = event assert request_info_event is not None @@ -278,7 +277,7 @@ async def test_denied_approval_workflow(self): # Deny the request completed = False async for event in workflow.send_responses_streaming({request_info_event.request_id: False}): - if isinstance(event, WorkflowStatusEvent) and event.state == WorkflowRunState.IDLE: + if event.type == "status" and event.state == WorkflowRunState.IDLE: completed = True assert completed @@ -291,12 +290,12 @@ async def test_workflow_state_with_pending_requests(self): workflow = WorkflowBuilder().set_start_executor(executor).build() # Run workflow until idle with pending requests - request_info_event: RequestInfoEvent | None = None + request_info_event: WorkflowEvent | None = None idle_with_pending = False async for event in workflow.run("test operation", stream=True): - if isinstance(event, RequestInfoEvent): + if event.type == "request_info": request_info_event = event - elif isinstance(event, WorkflowStatusEvent) and event.state == WorkflowRunState.IDLE_WITH_PENDING_REQUESTS: + elif event.type == "status" and event.state == WorkflowRunState.IDLE_WITH_PENDING_REQUESTS: idle_with_pending = True assert request_info_event is not None @@ -305,7 +304,7 @@ async def test_workflow_state_with_pending_requests(self): # Continue with response completed = False async for event in workflow.send_responses_streaming({request_info_event.request_id: True}): - if isinstance(event, WorkflowStatusEvent) and event.state == WorkflowRunState.IDLE: + if event.type == "status" and event.state == WorkflowRunState.IDLE: completed = True assert completed @@ -318,7 +317,7 @@ async def test_invalid_calculation_input(self): # Send invalid input (no numbers) completed = False async for event in workflow.run("invalid input", stream=True): - if isinstance(event, WorkflowStatusEvent) and event.state == WorkflowRunState.IDLE: + if event.type == "status" and event.state == WorkflowRunState.IDLE: completed = True assert completed @@ -338,9 +337,9 @@ async def test_checkpoint_with_pending_request_info_events(self): workflow = WorkflowBuilder().set_start_executor(executor).with_checkpointing(storage).build() # Step 1: Run workflow to completion to ensure checkpoints are created - request_info_event: RequestInfoEvent | None = None + request_info_event: WorkflowEvent | None = None async for event in workflow.run("checkpoint test operation", stream=True): - if isinstance(event, RequestInfoEvent): + if event.type == "request_info": request_info_event = event # Verify request was emitted @@ -377,15 +376,12 @@ async def test_checkpoint_with_pending_request_info_events(self): # Step 5: Resume from checkpoint and verify the request can be continued completed = False - restored_request_event: RequestInfoEvent | None = None + restored_request_event: WorkflowEvent | None = None async for event in restored_workflow.run(checkpoint_id=checkpoint_with_request.checkpoint_id, stream=True): # Should re-emit the pending request info event - if isinstance(event, RequestInfoEvent) and event.request_id == request_info_event.request_id: + if event.type == "request_info" and event.request_id == request_info_event.request_id: restored_request_event = event - elif ( - isinstance(event, WorkflowStatusEvent) - and event.state == WorkflowRunState.IDLE_WITH_PENDING_REQUESTS - ): + elif event.type == "status" and event.state == WorkflowRunState.IDLE_WITH_PENDING_REQUESTS: completed = True assert completed, "Workflow should reach idle with pending requests state after restoration" @@ -402,7 +398,7 @@ async def test_checkpoint_with_pending_request_info_events(self): async for event in restored_workflow.send_responses_streaming({ request_info_event.request_id: True # Approve the request }): - if isinstance(event, WorkflowStatusEvent) and event.state == WorkflowRunState.IDLE: + if event.type == "status" and event.state == WorkflowRunState.IDLE: final_completed = True assert final_completed, "Workflow should complete after providing response to restored request" diff --git a/python/packages/core/tests/workflow/test_request_info_event_rehydrate.py b/python/packages/core/tests/workflow/test_request_info_event_rehydrate.py index 8442af9445..73b4b938c1 100644 --- a/python/packages/core/tests/workflow/test_request_info_event_rehydrate.py +++ b/python/packages/core/tests/workflow/test_request_info_event_rehydrate.py @@ -9,7 +9,7 @@ from agent_framework import InMemoryCheckpointStorage, InProcRunnerContext from agent_framework._workflows._checkpoint_encoding import DATACLASS_MARKER, encode_checkpoint_value from agent_framework._workflows._checkpoint_summary import get_checkpoint_summary -from agent_framework._workflows._events import RequestInfoEvent +from agent_framework._workflows._events import WorkflowEvent from agent_framework._workflows._state import State @@ -36,7 +36,7 @@ class TimedApproval: async def test_rehydrate_request_info_event() -> None: """Rehydration should succeed for valid request info events.""" - request_info_event = RequestInfoEvent( + request_info_event = WorkflowEvent.request_info( request_id="request-123", source_executor_id="review_gateway", request_data=MockRequest(), @@ -69,7 +69,7 @@ async def test_rehydrate_request_info_event() -> None: async def test_rehydrate_fails_when_request_type_missing() -> None: """Rehydration should fail is the request type is missing or fails to import.""" - request_info_event = RequestInfoEvent( + request_info_event = WorkflowEvent.request_info( request_id="request-123", source_executor_id="review_gateway", request_data=MockRequest(), @@ -97,7 +97,7 @@ async def test_rehydrate_fails_when_request_type_missing() -> None: async def test_rehydrate_fails_when_request_type_mismatch() -> None: """Rehydration should fail if the request type is mismatched.""" - request_info_event = RequestInfoEvent( + request_info_event = WorkflowEvent.request_info( request_id="request-123", source_executor_id="review_gateway", request_data=MockRequest(), @@ -127,7 +127,7 @@ async def test_rehydrate_fails_when_request_type_mismatch() -> None: async def test_pending_requests_in_summary() -> None: """Test that pending requests are correctly summarized in the checkpoint summary.""" - request_info_event = RequestInfoEvent( + request_info_event = WorkflowEvent.request_info( request_id="request-123", source_executor_id="review_gateway", request_data=MockRequest(), @@ -148,7 +148,8 @@ async def test_pending_requests_in_summary() -> None: assert len(summary.pending_request_info_events) == 1 pending_event = summary.pending_request_info_events[0] - assert isinstance(pending_event, RequestInfoEvent) + assert isinstance(pending_event, WorkflowEvent) + assert pending_event.type == "request_info" assert pending_event.request_id == "request-123" assert pending_event.source_executor_id == "review_gateway" @@ -158,13 +159,13 @@ async def test_pending_requests_in_summary() -> None: async def test_request_info_event_serializes_non_json_payloads() -> None: - req_1 = RequestInfoEvent( + req_1 = WorkflowEvent.request_info( request_id="req-1", source_executor_id="source", request_data=TimedApproval(issued_at=datetime(2024, 5, 4, 12, 30, 45)), response_type=bool, ) - req_2 = RequestInfoEvent( + req_2 = WorkflowEvent.request_info( request_id="req-2", source_executor_id="source", request_data=SlottedApproval(note="slot-based"), diff --git a/python/packages/core/tests/workflow/test_runner.py b/python/packages/core/tests/workflow/test_runner.py index b3c97126c8..7af722e45a 100644 --- a/python/packages/core/tests/workflow/test_runner.py +++ b/python/packages/core/tests/workflow/test_runner.py @@ -12,10 +12,8 @@ WorkflowContext, WorkflowConvergenceException, WorkflowEvent, - WorkflowOutputEvent, WorkflowRunnerException, WorkflowRunState, - WorkflowStatusEvent, handler, ) from agent_framework._workflows._edge import SingleEdgeGroup @@ -97,7 +95,7 @@ async def test_runner_run_until_convergence(): ) async for event in runner.run_until_convergence(): assert isinstance(event, WorkflowEvent) - if isinstance(event, WorkflowOutputEvent): + if event.type == "output": result = event.data assert result is not None and result == 10 @@ -137,7 +135,7 @@ async def test_runner_run_until_convergence_not_completed(): match="Runner did not converge after 5 iterations.", ): async for event in runner.run_until_convergence(): - assert not isinstance(event, WorkflowStatusEvent) or event.state != WorkflowRunState.IDLE + assert event.type != "status" or event.state != WorkflowRunState.IDLE async def test_runner_already_running(): diff --git a/python/packages/core/tests/workflow/test_sub_workflow.py b/python/packages/core/tests/workflow/test_sub_workflow.py index 33333d2906..a06980eba2 100644 --- a/python/packages/core/tests/workflow/test_sub_workflow.py +++ b/python/packages/core/tests/workflow/test_sub_workflow.py @@ -8,12 +8,12 @@ from agent_framework import ( Executor, - RequestInfoEvent, SubWorkflowRequestMessage, SubWorkflowResponseMessage, Workflow, WorkflowBuilder, WorkflowContext, + WorkflowEvent, WorkflowExecutor, handler, response_handler, @@ -592,7 +592,7 @@ async def test_sub_workflow_checkpoint_restore_no_duplicate_requests() -> None: first_request_id: str | None = None async for event in workflow1.run("test_value", stream=True): - if isinstance(event, RequestInfoEvent): + if event.type == "request_info": first_request_id = event.request_id assert first_request_id is not None @@ -606,15 +606,15 @@ async def test_sub_workflow_checkpoint_restore_no_duplicate_requests() -> None: resumed_first_request_id: str | None = None async for event in workflow2.run(checkpoint_id=checkpoint_id, stream=True): - if isinstance(event, RequestInfoEvent): + if event.type == "request_info": resumed_first_request_id = event.request_id assert resumed_first_request_id is not None assert resumed_first_request_id == first_request_id - request_events: list[RequestInfoEvent] = [] + request_events: list[WorkflowEvent] = [] async for event in workflow2.send_responses_streaming({resumed_first_request_id: "first_answer"}): - if isinstance(event, RequestInfoEvent): + if event.type == "request_info": request_events.append(event) # Key assertion: Only the second request should be received, not a duplicate of the first diff --git a/python/packages/core/tests/workflow/test_typing_utils.py b/python/packages/core/tests/workflow/test_typing_utils.py index 3e8d1051e7..19973276f5 100644 --- a/python/packages/core/tests/workflow/test_typing_utils.py +++ b/python/packages/core/tests/workflow/test_typing_utils.py @@ -5,7 +5,7 @@ import pytest -from agent_framework import RequestInfoEvent +from agent_framework import WorkflowEvent from agent_framework._workflows._typing_utils import ( deserialize_type, is_instance_of, @@ -308,18 +308,19 @@ def test_serialize_deserialize_roundtrip() -> None: # Test agent framework type roundtrip - serialized = serialize_type(RequestInfoEvent) + serialized = serialize_type(WorkflowEvent) deserialized = deserialize_type(serialized) - assert deserialized is RequestInfoEvent + assert deserialized is WorkflowEvent - # Verify we can instantiate the deserialized type - instance = deserialized( + # Verify we can instantiate the deserialized type via factory method + instance = WorkflowEvent.request_info( request_id="request-123", source_executor_id="executor_1", request_data="test", response_type=str, ) - assert isinstance(instance, RequestInfoEvent) + assert isinstance(instance, WorkflowEvent) + assert instance.type == "request_info" def test_deserialize_type_error_handling() -> None: diff --git a/python/packages/core/tests/workflow/test_workflow.py b/python/packages/core/tests/workflow/test_workflow.py index 314fad89a0..1ab77096ac 100644 --- a/python/packages/core/tests/workflow/test_workflow.py +++ b/python/packages/core/tests/workflow/test_workflow.py @@ -20,16 +20,13 @@ Executor, FileCheckpointStorage, Message, - RequestInfoEvent, ResponseStream, WorkflowBuilder, WorkflowCheckpointException, WorkflowContext, WorkflowConvergenceException, WorkflowEvent, - WorkflowOutputEvent, WorkflowRunState, - WorkflowStatusEvent, handler, response_handler, ) @@ -123,7 +120,7 @@ async def test_workflow_run_streaming() -> None: result: int | None = None async for event in workflow.run(NumberMessage(data=0), stream=True): assert isinstance(event, WorkflowEvent) - if isinstance(event, WorkflowOutputEvent): + if event.type == "output": result = event.data assert result is not None and result == 10 @@ -197,9 +194,10 @@ async def test_fan_out(): events = await workflow.run(NumberMessage(data=0)) - # Each executor will emit two events: ExecutorInvokedEvent and ExecutorCompletedEvent - # executor_b will also emit a WorkflowOutputEvent (no WorkflowCompletedEvent anymore) - # Each superstep will emit also emit a WorkflowStartedEvent and WorkflowCompletedEvent + # Each executor will emit two events: executor_invoked (type='executor_invoked') + # and executor_completed (type='executor_completed') + # executor_b will also emit an output event (type='output') + # Each superstep will emit a started event (type='started') and status event (type='status') # This workflow will converge in 2 supersteps because executor_c will send one more message # after executor_b completes assert len(events) == 11 @@ -221,9 +219,10 @@ async def test_fan_out_multiple_completed_events(): events = await workflow.run(NumberMessage(data=0)) - # Each executor will emit two events: ExecutorInvokedEvent and ExecutorCompletedEvent - # executor_b and executor_c will also emit a WorkflowOutputEvent (no WorkflowCompletedEvent anymore) - # Each superstep will emit also emit a WorkflowStartedEvent and WorkflowCompletedEvent + # Each executor will emit two events: executor_invoked (type='executor_invoked') + # and executor_completed (type='executor_completed') + # executor_b and executor_c will also emit an output event (type='output') + # Each superstep will emit a started event (type='started') and status event (type='status') # This workflow will converge in 1 superstep because executor_a and executor_b will not send further messages assert len(events) == 10 @@ -249,9 +248,10 @@ async def test_fan_in(): events = await workflow.run(NumberMessage(data=0)) - # Each executor will emit two events: ExecutorInvokedEvent and ExecutorCompletedEvent - # aggregator will also emit a WorkflowOutputEvent (no WorkflowCompletedEvent anymore) - # Each superstep will emit also emit a WorkflowStartedEvent and WorkflowCompletedEvent + # Each executor will emit two events: executor_invoked (type='executor_invoked') + # and executor_completed (type='executor_completed') + # aggregator will also emit an output event (type='output') + # Each superstep will emit a started event (type='started') and status event (type='status') assert len(events) == 13 assert events.get_final_state() == WorkflowRunState.IDLE @@ -427,7 +427,7 @@ async def test_workflow_run_from_checkpoint_non_streaming(simple_executor: Execu async def test_workflow_run_stream_from_checkpoint_with_responses( simple_executor: Executor, ): - """Test that workflow can be resumed from checkpoint with pending RequestInfoEvents.""" + """Test that workflow can be resumed from checkpoint with pending request_info events.""" with tempfile.TemporaryDirectory() as temp_dir: storage = FileCheckpointStorage(temp_dir) @@ -439,7 +439,7 @@ async def test_workflow_run_stream_from_checkpoint_with_responses( messages={}, state={}, pending_request_info_events={ - "request_123": RequestInfoEvent( + "request_123": WorkflowEvent.request_info( request_id="request_123", source_executor_id=simple_executor.id, request_data="Mock", @@ -465,9 +465,7 @@ async def test_workflow_run_stream_from_checkpoint_with_responses( events.append(event) # Verify that the pending request event was emitted - assert next( - event for event in events if isinstance(event, RequestInfoEvent) and event.request_id == "request_123" - ) + assert next(event for event in events if event.type == "request_info" and event.request_id == "request_123") assert len(events) > 0 # Just ensure we processed some events @@ -730,10 +728,12 @@ async def test_workflow_with_simple_cycle_and_exit_condition(): assert outputs[0] is not None and outputs[0] >= 6 # Should complete when executor_a reaches its limit # Verify cycling occurred (should have events from both executors) - # Check for ExecutorInvokedEvent and ExecutorCompletedEvent types that have executor_id - from agent_framework import ExecutorCompletedEvent, ExecutorInvokedEvent + # Check for executor events that have executor_id + from agent_framework import WorkflowEvent - executor_events = [e for e in events if isinstance(e, (ExecutorInvokedEvent, ExecutorCompletedEvent))] + executor_events = [ + e for e in events if isinstance(e, WorkflowEvent) and e.type in ("executor_invoked", "executor_completed") + ] executor_ids = {e.executor_id for e in executor_events} assert "exec_a" in executor_ids, "Should have events from executor A" assert "exec_b" in executor_ids, "Should have events from executor B" @@ -880,7 +880,7 @@ async def _run() -> AgentResponse: async def test_agent_streaming_vs_non_streaming() -> None: - """Test that stream=True/False both emits WorkflowOutputEvents correctly with the right data types.""" + """Test that stream=True/False both emit output events (type='output') with the right data types.""" agent = _StreamingTestAgent(id="test_agent", name="TestAgent", reply_text="Hello World") agent_exec = AgentExecutor(agent, id="agent_exec") @@ -890,17 +890,15 @@ async def test_agent_streaming_vs_non_streaming() -> None: result = await workflow.run("test message") # Filter for agent events (result is a list of events) - agent_response = [e for e in result if isinstance(e, WorkflowOutputEvent) and isinstance(e.data, AgentResponse)] - agent_response_updates = [ - e for e in result if isinstance(e, WorkflowOutputEvent) and isinstance(e.data, AgentResponseUpdate) - ] + agent_run_events = [e for e in result if e.type == "output" and isinstance(e.data, AgentResponse)] + agent_update_events = [e for e in result if e.type == "output" and isinstance(e.data, AgentResponseUpdate)] - # In non-streaming mode, should have AgentResponse, no AgentResponseUpdate - assert len(agent_response) == 1, "Expected exactly one AgentResponse in non-streaming mode" - assert len(agent_response_updates) == 0, "Expected no AgentResponseUpdate in non-streaming mode" - assert agent_response[0].executor_id == "agent_exec" - assert agent_response[0].data is not None - assert agent_response[0].data.messages[0].text == "Hello World" + # In non-streaming mode, should have output event with AgentResponse, no AgentResponseUpdate + assert len(agent_run_events) == 1, "Expected exactly one output event with AgentResponse in non-streaming mode" + assert len(agent_update_events) == 0, "Expected no output event with AgentResponseUpdate in non-streaming mode" + assert agent_run_events[0].executor_id == "agent_exec" + assert agent_run_events[0].data is not None + assert agent_run_events[0].data.messages[0].text == "Hello World" # Test streaming mode with run(stream=True) stream_events: list[WorkflowEvent] = [] @@ -909,12 +907,10 @@ async def test_agent_streaming_vs_non_streaming() -> None: # Filter for agent events agent_response = [ - cast(AgentResponse, e.data) # type: ignore - for e in stream_events - if isinstance(e, WorkflowOutputEvent) and isinstance(e.data, AgentResponse) + cast(AgentResponse, e.data) for e in stream_events if e.type == "output" and isinstance(e.data, AgentResponse) ] agent_response_updates = [ - e.data for e in stream_events if isinstance(e, WorkflowOutputEvent) and isinstance(e.data, AgentResponseUpdate) + e.data for e in stream_events if e.type == "output" and isinstance(e.data, AgentResponseUpdate) ] # In streaming mode, should have AgentResponseUpdate, no AgentResponse @@ -977,7 +973,7 @@ async def test_workflow_run_stream_parameter_validation( events: list[WorkflowEvent] = [] async for event in workflow.run(test_message, stream=True): events.append(event) - assert any(isinstance(e, WorkflowStatusEvent) and e.state == WorkflowRunState.IDLE for e in events) + assert any(e.type == "status" and e.state == WorkflowRunState.IDLE for e in events) # Invalid combinations already tested in test_workflow_run_parameter_validation # This test ensures streaming works correctly for valid parameters @@ -1027,7 +1023,7 @@ async def test_output_executors_empty_yields_all_outputs() -> None: assert len(outputs) == 2 assert outputs == [10, 20] - output_events = [event for event in result if isinstance(event, WorkflowOutputEvent)] + output_events = [event for event in result if event.type == "output"] assert len(output_events) == 2 assert output_events[0].executor_id == "executor_a" assert output_events[1].executor_id == "executor_b" @@ -1055,7 +1051,7 @@ async def test_output_executors_filters_outputs_non_streaming() -> None: assert len(outputs) == 1 assert outputs[0] == 20 - output_events = [event for event in result if isinstance(event, WorkflowOutputEvent)] + output_events = [event for event in result if event.type == "output"] assert len(output_events) == 1 assert output_events[0].executor_id == "executor_b" @@ -1076,9 +1072,9 @@ async def test_output_executors_filters_outputs_streaming() -> None: ) # Collect outputs from streaming - output_events: list[WorkflowOutputEvent] = [] + output_events: list[WorkflowEvent] = [] async for event in workflow.run(NumberMessage(data=0), stream=True): - if isinstance(event, WorkflowOutputEvent): + if event.type == "output": output_events.append(event) # Only executor_a's output should be present @@ -1213,7 +1209,7 @@ async def test_output_executors_filtering_with_send_responses_streaming() -> Non events_list.append(event) # Get request info events - request_events = [e for e in events_list if isinstance(e, RequestInfoEvent)] + request_events = [e for e in events_list if e.type == "request_info"] assert len(request_events) == 1 # Set output_executors to exclude the approval executor @@ -1221,9 +1217,9 @@ async def test_output_executors_filtering_with_send_responses_streaming() -> Non # Send approval response via streaming responses = {request_events[0].request_id: ApprovalMessage(approved=True)} - output_events: list[WorkflowOutputEvent] = [] + output_events: list[WorkflowEvent] = [] async for event in workflow.send_responses_streaming(responses): - if isinstance(event, WorkflowOutputEvent): + if event.type == "output": output_events.append(event) # No outputs should be yielded since approval_executor is not in output_executors diff --git a/python/packages/core/tests/workflow/test_workflow_agent.py b/python/packages/core/tests/workflow/test_workflow_agent.py index 4a0cf60955..b067cb5841 100644 --- a/python/packages/core/tests/workflow/test_workflow_agent.py +++ b/python/packages/core/tests/workflow/test_workflow_agent.py @@ -218,7 +218,7 @@ async def test_end_to_end_basic_workflow_streaming(self): assert "Streaming2: Streaming1: Test input" in second_content.text async def test_end_to_end_request_info_handling(self): - """Test end-to-end workflow with RequestInfoEvent handling.""" + """Test end-to-end workflow with request_info event (type='request_info') handling.""" # Create workflow with requesting executor -> request info executor (no cycle) simple_executor = SimpleExecutor(id="simple", response_text="SimpleResponse", streaming=False) requesting_executor = RequestingExecutor(id="requester", streaming=False) @@ -331,7 +331,7 @@ async def handle_bool(self, message: bool, context: WorkflowContext[Any]) -> Non async def test_workflow_as_agent_yield_output_surfaces_as_agent_response(self) -> None: """Test that ctx.yield_output() in a workflow executor surfaces as agent output when using .as_agent(). - This validates the fix for issue #2813: WorkflowOutputEvent should be converted to + This validates the fix for issue #2813: output event (type='output') should be converted to AgentResponseUpdate when the workflow is wrapped via .as_agent(). """ @@ -343,7 +343,7 @@ async def yielding_executor(messages: list[ChatMessage], ctx: WorkflowContext[Ne workflow = WorkflowBuilder().set_start_executor(yielding_executor).build() - # Run directly - should return WorkflowOutputEvent in result + # Run directly - should return output event (type='output') in result direct_result = await workflow.run([ChatMessage(role="user", text="hello")]) direct_outputs = direct_result.get_outputs() assert len(direct_outputs) == 1 @@ -779,7 +779,7 @@ async def start_executor(messages: list[ChatMessage], ctx: WorkflowContext[Agent # Count occurrences of the unique response text unique_text_count = sum(1 for msg in result.messages if msg.text and "Unique response text" in msg.text) - # Should appear exactly once (not duplicated from both streaming and WorkflowOutputEvent) + # Should appear exactly once (not duplicated from both streaming and output event) assert unique_text_count == 1, f"Response should appear exactly once, but appeared {unique_text_count} times" @@ -793,7 +793,7 @@ async def test_agent_response_update_gets_executor_id_as_author_name(self): identification of which agent produced them in multi-agent workflows. """ # Create workflow with executor that emits AgentResponseUpdate without author_name - executor1 = SimpleExecutor(id="my_executor_id", response_text="Response") + executor1 = SimpleExecutor(id="my_executor_id", response_text="Response", streaming=True) workflow = WorkflowBuilder().set_start_executor(executor1).build() agent = WorkflowAgent(workflow=workflow, name="Test Agent") diff --git a/python/packages/core/tests/workflow/test_workflow_context.py b/python/packages/core/tests/workflow/test_workflow_context.py index e3fafc4144..03aa1d78d9 100644 --- a/python/packages/core/tests/workflow/test_workflow_context.py +++ b/python/packages/core/tests/workflow/test_workflow_context.py @@ -13,7 +13,6 @@ WorkflowContext, WorkflowEvent, WorkflowRunState, - WorkflowStatusEvent, executor, handler, ) @@ -62,15 +61,15 @@ async def test_executor_cannot_emit_framework_lifecycle_event(caplog: "LogCaptur async with make_context() as (ctx, runner_ctx): caplog.clear() with caplog.at_level("WARNING"): - await ctx.add_event(WorkflowStatusEvent(state=WorkflowRunState.IN_PROGRESS)) + await ctx.add_event(WorkflowEvent.status(state=WorkflowRunState.IN_PROGRESS)) events: list[WorkflowEvent] = await runner_ctx.drain_events() assert len(events) == 1 - assert type(events[0]).__name__ == "WorkflowWarningEvent" - data = getattr(events[0], "data", None) + assert events[0].type == "warning" + data = events[0].data assert isinstance(data, str) assert "reserved for framework lifecycle notifications" in data - assert any("attempted to emit WorkflowStatusEvent" in message for message in list(caplog.messages)) + assert any("attempted to emit" in message and "'status'" in message for message in list(caplog.messages)) async def test_executor_emits_normal_event() -> None: @@ -84,7 +83,8 @@ async def test_executor_emits_normal_event() -> None: class _TestEvent(WorkflowEvent): - pass + def __init__(self, data: Any = None) -> None: + super().__init__("test_event", data=data) async def test_workflow_context_type_annotations_no_parameter() -> None: diff --git a/python/packages/core/tests/workflow/test_workflow_kwargs.py b/python/packages/core/tests/workflow/test_workflow_kwargs.py index 99d9de5b32..e35430f453 100644 --- a/python/packages/core/tests/workflow/test_workflow_kwargs.py +++ b/python/packages/core/tests/workflow/test_workflow_kwargs.py @@ -14,7 +14,6 @@ Content, ResponseStream, WorkflowRunState, - WorkflowStatusEvent, tool, ) from agent_framework._workflows._const import WORKFLOW_RUN_KWARGS_KEY @@ -90,7 +89,7 @@ async def test_sequential_kwargs_flow_to_agent() -> None: custom_data=custom_data, user_token=user_token, ): - if isinstance(event, WorkflowStatusEvent) and event.state == WorkflowRunState.IDLE: + if event.type == "status" and event.state == WorkflowRunState.IDLE: break # Verify agent received kwargs @@ -111,7 +110,7 @@ async def test_sequential_kwargs_flow_to_multiple_agents() -> None: custom_data = {"key": "value"} async for event in workflow.run("test", custom_data=custom_data, stream=True): - if isinstance(event, WorkflowStatusEvent) and event.state == WorkflowRunState.IDLE: + if event.type == "status" and event.state == WorkflowRunState.IDLE: break # Both agents should have received kwargs @@ -153,7 +152,7 @@ async def test_concurrent_kwargs_flow_to_agents() -> None: custom_data=custom_data, user_token=user_token, ): - if isinstance(event, WorkflowStatusEvent) and event.state == WorkflowRunState.IDLE: + if event.type == "status" and event.state == WorkflowRunState.IDLE: break # Both agents should have received kwargs @@ -200,7 +199,7 @@ def simple_selector(state: GroupChatState) -> str: custom_data = {"session_id": "group123"} async for event in workflow.run("group chat test", custom_data=custom_data, stream=True): - if isinstance(event, WorkflowStatusEvent) and event.state == WorkflowRunState.IDLE: + if event.type == "status" and event.state == WorkflowRunState.IDLE: break # At least one agent should have received kwargs @@ -234,7 +233,7 @@ async def inspect(self, msgs: list[ChatMessage], ctx: WorkflowContext[list[ChatM workflow = SequentialBuilder().participants([inspector]).build() async for event in workflow.run("test", my_kwarg="my_value", another=123, stream=True): - if isinstance(event, WorkflowStatusEvent) and event.state == WorkflowRunState.IDLE: + if event.type == "status" and event.state == WorkflowRunState.IDLE: break assert stored_kwargs is not None, "kwargs should be stored in State" @@ -260,7 +259,7 @@ async def check(self, msgs: list[ChatMessage], ctx: WorkflowContext[list[ChatMes # Run without any kwargs async for event in workflow.run("test", stream=True): - if isinstance(event, WorkflowStatusEvent) and event.state == WorkflowRunState.IDLE: + if event.type == "status" and event.state == WorkflowRunState.IDLE: break # State should have empty dict when no kwargs provided @@ -279,7 +278,7 @@ async def test_kwargs_with_none_values() -> None: workflow = SequentialBuilder().participants([agent]).build() async for event in workflow.run("test", optional_param=None, other_param="value", stream=True): - if isinstance(event, WorkflowStatusEvent) and event.state == WorkflowRunState.IDLE: + if event.type == "status" and event.state == WorkflowRunState.IDLE: break assert len(agent.captured_kwargs) >= 1 @@ -306,7 +305,7 @@ async def test_kwargs_with_complex_nested_data() -> None: } async for event in workflow.run("test", complex_data=complex_data, stream=True): - if isinstance(event, WorkflowStatusEvent) and event.state == WorkflowRunState.IDLE: + if event.type == "status" and event.state == WorkflowRunState.IDLE: break assert len(agent.captured_kwargs) >= 1 @@ -324,12 +323,12 @@ async def test_kwargs_preserved_across_workflow_reruns() -> None: # First run async for event in workflow1.run("run1", run_id="first", stream=True): - if isinstance(event, WorkflowStatusEvent) and event.state == WorkflowRunState.IDLE: + if event.type == "status" and event.state == WorkflowRunState.IDLE: break # Second run with different kwargs (using fresh workflow) async for event in workflow2.run("run2", run_id="second", stream=True): - if isinstance(event, WorkflowStatusEvent) and event.state == WorkflowRunState.IDLE: + if event.type == "status" and event.state == WorkflowRunState.IDLE: break assert len(agent.captured_kwargs) >= 2 @@ -361,7 +360,7 @@ async def test_handoff_kwargs_flow_to_agents() -> None: custom_data = {"session_id": "handoff123"} async for event in workflow.run("handoff test", custom_data=custom_data, stream=True): - if isinstance(event, WorkflowStatusEvent) and event.state == WorkflowRunState.IDLE: + if event.type == "status" and event.state == WorkflowRunState.IDLE: break # Coordinator agent should have received kwargs @@ -419,7 +418,7 @@ async def prepare_final_answer(self, magentic_context: MagenticContext) -> ChatM custom_data = {"session_id": "magentic123"} async for event in workflow.run("magentic test", custom_data=custom_data, stream=True): - if isinstance(event, WorkflowStatusEvent) and event.state == WorkflowRunState.IDLE: + if event.type == "status" and event.state == WorkflowRunState.IDLE: break # The workflow completes immediately via prepare_final_answer without invoking agents @@ -470,7 +469,7 @@ async def prepare_final_answer(self, magentic_context: MagenticContext) -> ChatM custom_data = {"magentic_key": "magentic_value"} async for event in magentic_workflow.run("test task", custom_data=custom_data, stream=True): - if isinstance(event, WorkflowStatusEvent) and event.state == WorkflowRunState.IDLE: + if event.type == "status" and event.state == WorkflowRunState.IDLE: break # Verify the workflow completed (kwargs were stored, even if agent wasn't invoked) @@ -626,7 +625,7 @@ async def test_subworkflow_kwargs_propagation() -> None: custom_data=custom_data, user_token=user_token, ): - if isinstance(event, WorkflowStatusEvent) and event.state == WorkflowRunState.IDLE: + if event.type == "status" and event.state == WorkflowRunState.IDLE: break # Verify that the inner agent was called @@ -686,7 +685,7 @@ async def read_kwargs(self, msgs: list[ChatMessage], ctx: WorkflowContext[list[C my_custom_kwarg="should_be_propagated", another_kwarg=42, ): - if isinstance(event, WorkflowStatusEvent) and event.state == WorkflowRunState.IDLE: + if event.type == "status" and event.state == WorkflowRunState.IDLE: break # Verify the state reader was invoked @@ -732,7 +731,7 @@ async def test_nested_subworkflow_kwargs_propagation() -> None: stream=True, deep_kwarg="should_reach_inner", ): - if isinstance(event, WorkflowStatusEvent) and event.state == WorkflowRunState.IDLE: + if event.type == "status" and event.state == WorkflowRunState.IDLE: break # Verify inner agent was called diff --git a/python/packages/core/tests/workflow/test_workflow_states.py b/python/packages/core/tests/workflow/test_workflow_states.py index 81ead39ec8..90b4a8dd58 100644 --- a/python/packages/core/tests/workflow/test_workflow_states.py +++ b/python/packages/core/tests/workflow/test_workflow_states.py @@ -5,18 +5,14 @@ from agent_framework import ( Executor, - ExecutorFailedEvent, InProcRunnerContext, - RequestInfoEvent, Workflow, WorkflowBuilder, WorkflowContext, + WorkflowEvent, WorkflowEventSource, - WorkflowFailedEvent, WorkflowRunResult, WorkflowRunState, - WorkflowStartedEvent, - WorkflowStatusEvent, handler, ) from agent_framework._workflows._state import State @@ -39,24 +35,26 @@ async def test_executor_failed_and_workflow_failed_events_streaming(): async for ev in wf.run(0, stream=True): events.append(ev) - # ExecutorFailedEvent should be emitted before WorkflowFailedEvent - executor_failed_events = [e for e in events if isinstance(e, ExecutorFailedEvent)] - assert executor_failed_events, "ExecutorFailedEvent should be emitted when start executor fails" + # executor_failed event (type='executor_failed') should be emitted before workflow failed event + executor_failed_events = [e for e in events if isinstance(e, WorkflowEvent) and e.type == "executor_failed"] + assert executor_failed_events, "executor_failed event should be emitted when start executor fails" assert executor_failed_events[0].executor_id == "f" assert executor_failed_events[0].origin is WorkflowEventSource.FRAMEWORK # Workflow-level failure and FAILED status should be surfaced - failed_events = [e for e in events if isinstance(e, WorkflowFailedEvent)] + failed_events = [e for e in events if isinstance(e, WorkflowEvent) and e.type == "failed"] assert failed_events assert all(e.origin is WorkflowEventSource.FRAMEWORK for e in failed_events) - status = [e for e in events if isinstance(e, WorkflowStatusEvent)] + status = [e for e in events if isinstance(e, WorkflowEvent) and e.type == "status"] assert status and status[-1].state == WorkflowRunState.FAILED assert all(e.origin is WorkflowEventSource.FRAMEWORK for e in status) - # Verify ExecutorFailedEvent comes before WorkflowFailedEvent + # Verify executor_failed event comes before workflow failed event executor_failed_idx = events.index(executor_failed_events[0]) workflow_failed_idx = events.index(failed_events[0]) - assert executor_failed_idx < workflow_failed_idx, "ExecutorFailedEvent should be emitted before WorkflowFailedEvent" + assert executor_failed_idx < workflow_failed_idx, ( + "executor_failed event should be emitted before workflow failed event" + ) async def test_executor_failed_event_emitted_on_direct_execute(): @@ -71,7 +69,7 @@ async def test_executor_failed_event_emitted_on_direct_execute(): ctx, ) drained = await ctx.drain_events() - failed = [e for e in drained if isinstance(e, ExecutorFailedEvent)] + failed = [e for e in drained if isinstance(e, WorkflowEvent) and e.type == "executor_failed"] assert failed assert all(e.origin is WorkflowEventSource.FRAMEWORK for e in failed) @@ -85,7 +83,7 @@ async def passthrough(self, msg: int, ctx: WorkflowContext[int]) -> None: async def test_executor_failed_event_from_second_executor_in_chain(): - """Test that ExecutorFailedEvent is emitted when a non-start executor fails.""" + """Test that executor_failed event is emitted when a non-start executor fails.""" passthrough = PassthroughExecutor(id="passthrough") failing = FailingExecutor(id="failing") wf: Workflow = WorkflowBuilder().set_start_executor(passthrough).add_edge(passthrough, failing).build() @@ -95,21 +93,23 @@ async def test_executor_failed_event_from_second_executor_in_chain(): async for ev in wf.run(0, stream=True): events.append(ev) - # ExecutorFailedEvent should be emitted for the failing executor - executor_failed_events = [e for e in events if isinstance(e, ExecutorFailedEvent)] - assert executor_failed_events, "ExecutorFailedEvent should be emitted when second executor fails" + # executor_failed event should be emitted for the failing executor + executor_failed_events = [e for e in events if isinstance(e, WorkflowEvent) and e.type == "executor_failed"] + assert executor_failed_events, "executor_failed event should be emitted when second executor fails" assert executor_failed_events[0].executor_id == "failing" assert executor_failed_events[0].origin is WorkflowEventSource.FRAMEWORK # Workflow-level failure should also be surfaced - failed_events = [e for e in events if isinstance(e, WorkflowFailedEvent)] + failed_events = [e for e in events if isinstance(e, WorkflowEvent) and e.type == "failed"] assert failed_events assert all(e.origin is WorkflowEventSource.FRAMEWORK for e in failed_events) - # Verify ExecutorFailedEvent comes before WorkflowFailedEvent + # Verify executor_failed event comes before workflow failed event executor_failed_idx = events.index(executor_failed_events[0]) workflow_failed_idx = events.index(failed_events[0]) - assert executor_failed_idx < workflow_failed_idx, "ExecutorFailedEvent should be emitted before WorkflowFailedEvent" + assert executor_failed_idx < workflow_failed_idx, ( + "executor_failed event should be emitted before workflow failed event" + ) class SimpleExecutor(Executor): @@ -136,8 +136,8 @@ async def test_idle_with_pending_requests_status_streaming(): events = [ev async for ev in wf.run("start", stream=True)] # Consume stream fully # Ensure a request was emitted - assert any(isinstance(e, RequestInfoEvent) for e in events) - status_events = [e for e in events if isinstance(e, WorkflowStatusEvent)] + assert any(isinstance(e, WorkflowEvent) and e.type == "request_info" for e in events) + status_events = [e for e in events if isinstance(e, WorkflowEvent) and e.type == "status"] assert len(status_events) >= 3 assert status_events[-2].state == WorkflowRunState.IN_PROGRESS_PENDING_REQUESTS assert status_events[-1].state == WorkflowRunState.IDLE_WITH_PENDING_REQUESTS @@ -156,7 +156,7 @@ async def test_completed_status_streaming(): wf = WorkflowBuilder().set_start_executor(c).build() events = [ev async for ev in wf.run("ok", stream=True)] # no raise # Last status should be IDLE - status = [e for e in events if isinstance(e, WorkflowStatusEvent)] + status = [e for e in events if isinstance(e, WorkflowEvent) and e.type == "status"] assert status and status[-1].state == WorkflowRunState.IDLE assert all(e.origin is WorkflowEventSource.FRAMEWORK for e in status) @@ -166,12 +166,13 @@ async def test_started_and_completed_event_origins(): wf = WorkflowBuilder().set_start_executor(c).build() events = [ev async for ev in wf.run("payload", stream=True)] - started = next(e for e in events if isinstance(e, WorkflowStartedEvent)) + started = next(e for e in events if isinstance(e, WorkflowEvent) and e.type == "started") assert started.origin is WorkflowEventSource.FRAMEWORK # Check for IDLE status indicating completion idle_status = next( - (e for e in events if isinstance(e, WorkflowStatusEvent) and e.state == WorkflowRunState.IDLE), None + (e for e in events if isinstance(e, WorkflowEvent) and e.type == "status" and e.state == WorkflowRunState.IDLE), + None, ) assert idle_status is not None assert idle_status.origin is WorkflowEventSource.FRAMEWORK diff --git a/python/packages/declarative/tests/test_workflow_factory.py b/python/packages/declarative/tests/test_workflow_factory.py index 8bad4651f0..04bd57587b 100644 --- a/python/packages/declarative/tests/test_workflow_factory.py +++ b/python/packages/declarative/tests/test_workflow_factory.py @@ -145,7 +145,7 @@ async def test_execute_if_workflow(self): result = await workflow.run({}) outputs = result.get_outputs() - # Check for the expected text in WorkflowOutputEvent + # Check for the expected text in output event (type='output') _text_outputs = [str(o) for o in outputs if isinstance(o, str) or hasattr(o, "data")] # noqa: F841 assert any("Condition was true" in str(o) for o in outputs) diff --git a/python/packages/devui/README.md b/python/packages/devui/README.md index 520b03e56f..fb14469905 100644 --- a/python/packages/devui/README.md +++ b/python/packages/devui/README.md @@ -249,9 +249,9 @@ Given that DevUI offers an OpenAI Responses API, it internally maps messages and | `response.created` + `response.in_progress` | `AgentStartedEvent` | OpenAI | | `response.completed` | `AgentCompletedEvent` | OpenAI | | `response.failed` | `AgentFailedEvent` | OpenAI | -| `response.created` + `response.in_progress` | `WorkflowStartedEvent` | OpenAI | -| `response.completed` | `WorkflowCompletedEvent` | OpenAI | -| `response.failed` | `WorkflowFailedEvent` | OpenAI | +| `response.created` + `response.in_progress` | `WorkflowEvent (type='started')` | OpenAI | +| `response.completed` | `WorkflowEvent (type='status')` | OpenAI | +| `response.failed` | `WorkflowEvent (type='failed')` | OpenAI | | | **Content Types** | | | `response.content_part.added` + `response.output_text.delta` | `TextContent` | OpenAI | | `response.reasoning_text.delta` | `TextReasoningContent` | OpenAI | @@ -267,13 +267,13 @@ Given that DevUI offers an OpenAI Responses API, it internally maps messages and | `error` | `ErrorContent` | OpenAI | | Final `Response.usage` field (not streamed) | `UsageContent` | OpenAI | | | **Workflow Events** | | -| `response.output_item.added` (ExecutorActionItem)* | `ExecutorInvokedEvent` | OpenAI | -| `response.output_item.done` (ExecutorActionItem)* | `ExecutorCompletedEvent` | OpenAI | -| `response.output_item.done` (ExecutorActionItem with error)* | `ExecutorFailedEvent` | OpenAI | -| `response.output_item.added` (ResponseOutputMessage) | `WorkflowOutputEvent` | OpenAI | -| `response.workflow_event.complete` | `WorkflowEvent` (other) | DevUI | -| `response.trace.complete` | `WorkflowStatusEvent` | DevUI | -| `response.trace.complete` | `WorkflowWarningEvent` | DevUI | +| `response.output_item.added` (ExecutorActionItem)* | `WorkflowEvent (type='executor_invoked')` | OpenAI | +| `response.output_item.done` (ExecutorActionItem)* | `WorkflowEvent (type='executor_completed')` | OpenAI | +| `response.output_item.done` (ExecutorActionItem with error)* | `WorkflowEvent (type='executor_failed')` | OpenAI | +| `response.output_item.added` (ResponseOutputMessage) | `WorkflowEvent (type='output')` | OpenAI | +| `response.workflow_event.complete` | `WorkflowEvent` (other types) | DevUI | +| `response.trace.complete` | `WorkflowEvent (type='status')` | DevUI | +| `response.trace.complete` | `WorkflowEvent (type='warning')` | DevUI | | | **Trace Content** | | | `response.trace.complete` | `DataContent` (no data/errors) | DevUI | | `response.trace.complete` | `UriContent` (unsupported MIME) | DevUI | diff --git a/python/packages/devui/agent_framework_devui/_executor.py b/python/packages/devui/agent_framework_devui/_executor.py index ca06a6a951..7f395023b6 100644 --- a/python/packages/devui/agent_framework_devui/_executor.py +++ b/python/packages/devui/agent_framework_devui/_executor.py @@ -7,8 +7,7 @@ from collections.abc import AsyncGenerator from typing import Any -from agent_framework import AgentProtocol, Content -from agent_framework._workflows._events import RequestInfoEvent +from agent_framework import AgentProtocol, Content, Workflow from ._conversations import ConversationStore, InMemoryConversationStore from ._discovery import EntityDiscovery @@ -262,10 +261,11 @@ async def execute_entity(self, entity_id: str, request: AgentFrameworkRequest) - yield event elif entity_info.type == "workflow": async for event in self._execute_workflow(entity_obj, request, trace_collector): - # Log RequestInfoEvent for debugging HIL flow - event_class = event.__class__.__name__ if hasattr(event, "__class__") else type(event).__name__ - if event_class == "RequestInfoEvent": - logger.info("šŸ”” [EXECUTOR] RequestInfoEvent detected from workflow!") + # Log request_info event (type='request_info') for debugging HIL flow + if event.type == "request_info": + logger.info( + "šŸ”” [EXECUTOR] request_info event (type='request_info') detected from workflow!" + ) logger.info(f" request_id: {getattr(event, 'request_id', 'N/A')}") logger.info(f" source_executor_id: {getattr(event, 'source_executor_id', 'N/A')}") logger.info(f" request_type: {getattr(event, 'request_type', 'N/A')}") @@ -360,7 +360,7 @@ async def _execute_agent( yield {"type": "error", "message": f"Agent execution error: {e!s}"} async def _execute_workflow( - self, workflow: Any, request: AgentFrameworkRequest, trace_collector: Any + self, workflow: Workflow, request: AgentFrameworkRequest, trace_collector: Any ) -> AsyncGenerator[Any, None]: """Execute Agent Framework workflow with checkpoint support via conversation items. @@ -515,8 +515,9 @@ async def _execute_workflow( logger.warning(f"Could not convert HIL responses to proper types: {e}") async for event in workflow.send_responses_streaming(hil_responses): - # Enrich new RequestInfoEvents that may come from subsequent HIL requests - if isinstance(event, RequestInfoEvent): + # Enrich new request_info events (type='request_info') + # that may come from subsequent HIL requests + if event.type == "request_info": self._enrich_request_info_event_with_response_schema(event, workflow) for trace_event in trace_collector.get_pending_events(): @@ -538,7 +539,7 @@ async def _execute_workflow( checkpoint_id=checkpoint_id, checkpoint_storage=checkpoint_storage, ): - if isinstance(event, RequestInfoEvent): + if event.type == "request_info": self._enrich_request_info_event_with_response_schema(event, workflow) for trace_event in trace_collector.get_pending_events(): @@ -546,7 +547,7 @@ async def _execute_workflow( yield event - # Note: Removed break on RequestInfoEvent - continue yielding all events + # Note: Removed break on request_info event (type='request_info') - continue yielding all events # The workflow is already paused by ctx.request_info() in the framework # DevUI should continue yielding events even during HIL pause @@ -562,7 +563,7 @@ async def _execute_workflow( parsed_input = await self._parse_workflow_input(workflow, request.input) async for event in workflow.run(parsed_input, stream=True, checkpoint_storage=checkpoint_storage): - if isinstance(event, RequestInfoEvent): + if event.type == "request_info": self._enrich_request_info_event_with_response_schema(event, workflow) for trace_event in trace_collector.get_pending_events(): @@ -570,7 +571,7 @@ async def _execute_workflow( yield event - # Note: Removed break on RequestInfoEvent - continue yielding all events + # Note: Removed break on request_info event (type='request_info') - continue yielding all events # The workflow is already paused by ctx.request_info() in the framework # DevUI should continue yielding events even during HIL pause @@ -1015,10 +1016,12 @@ def _parse_raw_workflow_input(self, workflow: Any, raw_input: str) -> Any: return raw_input def _enrich_request_info_event_with_response_schema(self, event: Any, workflow: Any) -> None: - """Extract response type from workflow executor and attach response schema to RequestInfoEvent. + """Extract response type from workflow executor. + + Attach response schema to request_info event (type='request_info'). Args: - event: RequestInfoEvent to enrich + event: request_info event (type='request_info') to enrich workflow: Workflow object containing executors """ try: @@ -1029,7 +1032,7 @@ def _enrich_request_info_event_with_response_schema(self, event: Any, workflow: request_type = getattr(event, "request_type", None) if not source_executor_id or not request_type: - logger.debug("RequestInfoEvent missing source_executor_id or request_type") + logger.debug("request_info event (type='request_info') missing source_executor_id or request_type") return # Find the source executor in the workflow @@ -1062,4 +1065,4 @@ def _enrich_request_info_event_with_response_schema(self, event: Any, workflow: event._response_schema = response_schema except Exception as e: - logger.warning(f"Failed to enrich RequestInfoEvent with response schema: {e}") + logger.warning(f"Failed to enrich request_info event (type='request_info') with response schema: {e}") diff --git a/python/packages/devui/agent_framework_devui/_mapper.py b/python/packages/devui/agent_framework_devui/_mapper.py index 7acb247c20..b956be3ac0 100644 --- a/python/packages/devui/agent_framework_devui/_mapper.py +++ b/python/packages/devui/agent_framework_devui/_mapper.py @@ -12,7 +12,7 @@ from typing import Any, Union from uuid import uuid4 -from agent_framework import ChatMessage, Content, WorkflowOutputEvent +from agent_framework import ChatMessage, Content from openai.types.responses import ( Response, ResponseContentPartAddedEvent, @@ -180,16 +180,18 @@ async def convert_event(self, raw_event: Any, request: AgentFrameworkRequest) -> try: from agent_framework import AgentResponse, AgentResponseUpdate, WorkflowEvent - # Handle AgentRunUpdateEvent - workflow event wrapping AgentResponseUpdate + # Handle WorkflowEvent with type='output' or 'data' wrapping AgentResponseUpdate # This must be checked BEFORE generic WorkflowEvent check - if isinstance(raw_event, WorkflowOutputEvent): - # Extract the AgentResponseUpdate from the event's data attribute - if raw_event.data and isinstance(raw_event.data, AgentResponseUpdate): - # Preserve executor_id in context for proper output routing - context["current_executor_id"] = raw_event.executor_id - return await self._convert_agent_update(raw_event.data, context) - # If no data, treat as generic workflow event - return await self._convert_workflow_event(raw_event, context) + # Note: AgentExecutor uses type='output' for streaming updates + if ( + isinstance(raw_event, WorkflowEvent) + and raw_event.type in ("output", "data") + and raw_event.data + and isinstance(raw_event.data, AgentResponseUpdate) + ): + # Preserve executor_id in context for proper output routing + context["current_executor_id"] = raw_event.executor_id + return await self._convert_agent_update(raw_event.data, context) # Handle complete agent response (AgentResponse) - for non-streaming agent execution if isinstance(raw_event, AgentResponse): @@ -824,10 +826,12 @@ async def _convert_workflow_event(self, event: Any, context: dict[str, Any]) -> List of OpenAI response stream events """ try: - event_class = event.__class__.__name__ + # Use event.type for discriminated union pattern (similar to Content class) + event_type = getattr(event, "type", None) + event_class = event.__class__.__name__ # Fallback for non-workflow events # Response-level events - construct proper OpenAI objects - if event_class == "WorkflowStartedEvent": + if event_type == "started": workflow_id = getattr(event, "workflow_id", str(uuid4())) context["workflow_id"] = workflow_id @@ -871,8 +875,8 @@ async def _convert_workflow_event(self, event: Any, context: dict[str, Any]) -> return events - # Handle WorkflowOutputEvent separately to preserve output data - if event_class == "WorkflowOutputEvent": + # Handle output events separately to preserve output data + if event_type == "output": output_data = getattr(event, "data", None) executor_id = getattr(event, "executor_id", "unknown") @@ -934,7 +938,7 @@ async def _convert_workflow_event(self, event: Any, context: dict[str, Any]) -> # Emit output_item.added for each yield_output logger.debug( - f"WorkflowOutputEvent converted to output_item.added " + f"output event (type='output') converted to output_item.added " f"(executor: {executor_id}, length: {len(text)})" ) return [ @@ -946,15 +950,15 @@ async def _convert_workflow_event(self, event: Any, context: dict[str, Any]) -> ) ] - # Handle WorkflowCompletedEvent - Don't emit response.completed here + # Handle completed event - Don't emit response.completed here # The server will emit a proper one with usage data after aggregating all events - if event_class == "WorkflowCompletedEvent": + if event_type == "completed": return [] - if event_class == "WorkflowFailedEvent": + if event_type == "failed": workflow_id = context.get("workflow_id", str(uuid4())) - # WorkflowFailedEvent uses 'details' field (WorkflowErrorDetails), not 'error' - # This matches ExecutorFailedEvent which also uses 'details' + # failed event (type='failed') uses 'details' field (WorkflowErrorDetails), not 'error' + # This matches executor_failed event which also uses 'details' details = getattr(event, "details", None) # Import Response and ResponseError types @@ -1000,7 +1004,8 @@ async def _convert_workflow_event(self, event: Any, context: dict[str, Any]) -> ] # Executor-level events (output items) - if event_class == "ExecutorInvokedEvent": + # Check for executor lifecycle events via event.type + if event_type == "executor_invoked": executor_id = getattr(event, "executor_id", "unknown") item_id = f"exec_{executor_id}_{uuid4().hex[:8]}" context[f"exec_item_{executor_id}"] = item_id @@ -1029,7 +1034,7 @@ async def _convert_workflow_event(self, event: Any, context: dict[str, Any]) -> ) ] - if event_class == "ExecutorCompletedEvent": + if event_type == "executor_completed": executor_id = getattr(event, "executor_id", "unknown") item_id = context.get(f"exec_item_{executor_id}", f"exec_{executor_id}_unknown") @@ -1038,7 +1043,7 @@ async def _convert_workflow_event(self, event: Any, context: dict[str, Any]) -> context.pop("current_executor_id", None) # Create ExecutorActionItem with completed status - # ExecutorCompletedEvent uses 'data' field, not 'result' + # executor_completed event (type='executor_completed') uses 'data' field, not 'result' # Serialize the result data to ensure it's JSON-serializable # (AgentExecutorResponse contains AgentResponse/ChatMessage which are SerializationMixin) raw_result = getattr(event, "data", None) @@ -1061,10 +1066,11 @@ async def _convert_workflow_event(self, event: Any, context: dict[str, Any]) -> ) ] - if event_class == "ExecutorFailedEvent": + if event_type == "executor_failed": executor_id = getattr(event, "executor_id", "unknown") item_id = context.get(f"exec_item_{executor_id}", f"exec_{executor_id}_unknown") - # ExecutorFailedEvent uses 'details' field (WorkflowErrorDetails), not 'error' + # executor_failed event (type='executor_failed') uses 'details' property (WorkflowErrorDetails) + # not 'error'. This matches WorkflowEvent.details which returns self.data for executor_failed type details = getattr(event, "details", None) if details: err_msg = getattr(details, "message", None) or str(details) @@ -1093,8 +1099,8 @@ async def _convert_workflow_event(self, event: Any, context: dict[str, Any]) -> ) ] - # Handle RequestInfoEvent specially - emit as HIL event with schema - if event_class == "RequestInfoEvent": + # Handle request_info events specially - emit as HIL event with schema + if event_type == "request_info": from .models._openai_custom import ResponseRequestInfoEvent request_id = getattr(event, "request_id", "") @@ -1102,7 +1108,7 @@ async def _convert_workflow_event(self, event: Any, context: dict[str, Any]) -> request_type_class = getattr(event, "request_type", None) request_data = getattr(event, "data", None) - logger.info("šŸ“Ø [MAPPER] Processing RequestInfoEvent") + logger.info("šŸ“Ø [MAPPER] Processing request_info event (type='request_info')") logger.info(f" request_id: {request_id}") logger.info(f" source_executor_id: {source_executor_id}") logger.info(f" request_type_class: {request_type_class}") @@ -1163,26 +1169,23 @@ async def _convert_workflow_event(self, event: Any, context: dict[str, Any]) -> return [hil_event] # Handle other informational workflow events (status, warnings, errors) - if event_class in ["WorkflowStatusEvent", "WorkflowWarningEvent", "WorkflowErrorEvent"]: + if event_type in ["status", "warning", "error"]: # These are informational events that don't map to OpenAI lifecycle events # Convert them to trace events for debugging visibility event_data: dict[str, Any] = {} # Extract relevant data based on event type - if event_class == "WorkflowStatusEvent": + if event_type == "status": event_data["state"] = str(getattr(event, "state", "unknown")) - elif event_class == "WorkflowWarningEvent": - event_data["message"] = str(getattr(event, "message", "")) - elif event_class == "WorkflowErrorEvent": - event_data["message"] = str(getattr(event, "message", "")) - event_data["error"] = str(getattr(event, "error", "")) + elif event_type == "warning" or event_type == "error": + event_data["message"] = str(getattr(event, "data", "")) # Create a trace event for debugging trace_event = ResponseTraceEventComplete( type="response.trace.completed", data={ "trace_type": "workflow_info", - "event_type": event_class, + "event_type": event_type, "data": event_data, "timestamp": datetime.now().isoformat(), }, diff --git a/python/packages/devui/tests/devui/conftest.py b/python/packages/devui/tests/devui/conftest.py index a9a1bcb971..a6240108c6 100644 --- a/python/packages/devui/tests/devui/conftest.py +++ b/python/packages/devui/tests/devui/conftest.py @@ -32,10 +32,8 @@ from agent_framework._clients import TOptions_co from agent_framework._workflows._agent_executor import AgentExecutorResponse from agent_framework._workflows._events import ( - ExecutorCompletedEvent, - ExecutorFailedEvent, - ExecutorInvokedEvent, WorkflowErrorDetails, + WorkflowEvent, ) from agent_framework.orchestrations import ConcurrentBuilder, SequentialBuilder @@ -284,7 +282,8 @@ def _create_agent_executor_response( executor_id: str = "test_executor", response_text: str = "Executor response", ) -> AgentExecutorResponse: - """Create an AgentExecutorResponse - the type that's nested in ExecutorCompletedEvent.data.""" + """Create an AgentExecutorResponse - the type that's nested in + executor_completed event (type='executor_completed').data.""" agent_response = _create_agent_run_response(response_text) return AgentExecutorResponse( executor_id=executor_id, @@ -306,32 +305,32 @@ def create_agent_run_response(text: str = "Test response") -> AgentResponse: return _create_agent_run_response(text) -def create_executor_invoked_event(executor_id: str = "test_executor") -> ExecutorInvokedEvent: - """Create an ExecutorInvokedEvent.""" - return ExecutorInvokedEvent(executor_id=executor_id) +def create_executor_invoked_event(executor_id: str = "test_executor") -> WorkflowEvent[Any]: + """Create a WorkflowEvent(type='executor_invoked').""" + return WorkflowEvent.executor_invoked(executor_id=executor_id) def create_executor_completed_event( executor_id: str = "test_executor", with_agent_response: bool = True, -) -> ExecutorCompletedEvent: - """Create an ExecutorCompletedEvent with realistic nested data. +) -> WorkflowEvent[Any]: + """Create a WorkflowEvent(type='executor_completed') with realistic nested data. This creates the exact data structure that caused the serialization bug: - ExecutorCompletedEvent.data contains AgentExecutorResponse which contains + WorkflowEvent.data contains AgentExecutorResponse which contains AgentResponse and ChatMessage objects (SerializationMixin, not Pydantic). """ data = _create_agent_executor_response(executor_id) if with_agent_response else {"simple": "dict"} - return ExecutorCompletedEvent(executor_id=executor_id, data=data) + return WorkflowEvent.executor_completed(executor_id=executor_id, data=data) def create_executor_failed_event( executor_id: str = "test_executor", error_message: str = "Test error", -) -> ExecutorFailedEvent: - """Create an ExecutorFailedEvent.""" +) -> WorkflowEvent[WorkflowErrorDetails]: + """Create a WorkflowEvent(type='executor_failed').""" details = WorkflowErrorDetails(error_type="TestError", message=error_message) - return ExecutorFailedEvent(executor_id=executor_id, details=details) + return WorkflowEvent.executor_failed(executor_id=executor_id, details=details) # ============================================================================= @@ -386,28 +385,28 @@ def agent_run_response() -> AgentResponse: @pytest.fixture -def executor_completed_event() -> ExecutorCompletedEvent: - """Create an ExecutorCompletedEvent with realistic nested data. +def executor_completed_event() -> WorkflowEvent[Any]: + """Create a WorkflowEvent(type='executor_completed') with realistic nested data. This creates the exact data structure that caused the serialization bug: - ExecutorCompletedEvent.data contains AgentExecutorResponse which contains + executor_completed event (type='executor_completed').data contains AgentExecutorResponse which contains AgentResponse and ChatMessage objects (SerializationMixin, not Pydantic). """ data = _create_agent_executor_response("test_executor") - return ExecutorCompletedEvent(executor_id="test_executor", data=data) + return WorkflowEvent.executor_completed(executor_id="test_executor", data=data) @pytest.fixture -def executor_invoked_event() -> ExecutorInvokedEvent: - """Create an ExecutorInvokedEvent.""" - return ExecutorInvokedEvent(executor_id="test_executor") +def executor_invoked_event() -> WorkflowEvent[Any]: + """Create a WorkflowEvent(type='executor_invoked').""" + return WorkflowEvent.executor_invoked(executor_id="test_executor") @pytest.fixture -def executor_failed_event() -> ExecutorFailedEvent: - """Create an ExecutorFailedEvent.""" +def executor_failed_event() -> WorkflowEvent[WorkflowErrorDetails]: + """Create a WorkflowEvent(type='executor_failed').""" details = WorkflowErrorDetails(error_type="TestError", message="Test error") - return ExecutorFailedEvent(executor_id="test_executor", details=details) + return WorkflowEvent.executor_failed(executor_id="test_executor", details=details) @pytest.fixture diff --git a/python/packages/devui/tests/devui/test_checkpoints.py b/python/packages/devui/tests/devui/test_checkpoints.py index e1a3114f14..dddb51cdb2 100644 --- a/python/packages/devui/tests/devui/test_checkpoints.py +++ b/python/packages/devui/tests/devui/test_checkpoints.py @@ -8,10 +8,8 @@ from agent_framework import ( Executor, InMemoryCheckpointStorage, - RequestInfoEvent, WorkflowBuilder, WorkflowContext, - WorkflowStatusEvent, handler, response_handler, ) @@ -428,13 +426,13 @@ async def test_workflow_auto_saves_checkpoints_to_injected_storage(self, checkpo # Run workflow until it reaches IDLE_WITH_PENDING_REQUESTS (after checkpoint is created) saw_request_event = False async for event in test_workflow.run(WorkflowTestData(value="test"), stream=True): - if isinstance(event, RequestInfoEvent): + if event.type == "request_info": saw_request_event = True # Wait for IDLE_WITH_PENDING_REQUESTS status (comes after checkpoint creation) - if isinstance(event, WorkflowStatusEvent) and "IDLE_WITH_PENDING_REQUESTS" in str(event.state): + if event.type == "status" and "IDLE_WITH_PENDING_REQUESTS" in str(event.state): break - assert saw_request_event, "Test workflow should have emitted RequestInfoEvent" + assert saw_request_event, "Test workflow should have emitted request_info event (type='request_info')" # Verify checkpoint was AUTOMATICALLY saved to our storage by the framework checkpoints_after = await checkpoint_storage.list_checkpoints() diff --git a/python/packages/devui/tests/devui/test_execution.py b/python/packages/devui/tests/devui/test_execution.py index 12ee7d8a7a..2a92f48486 100644 --- a/python/packages/devui/tests/devui/test_execution.py +++ b/python/packages/devui/tests/devui/test_execution.py @@ -292,7 +292,7 @@ async def test_full_pipeline_workflow_events_are_json_serializable(): """CRITICAL TEST: Verify ALL events from workflow execution can be JSON serialized. This is particularly important for workflows with AgentExecutor because: - - AgentExecutor produces ExecutorCompletedEvent with AgentExecutorResponse + - AgentExecutor produces executor_completed event (type='executor_completed') with AgentExecutorResponse - AgentExecutorResponse contains AgentResponse and ChatMessage objects - These are SerializationMixin objects, not Pydantic, which caused the original bug @@ -672,10 +672,10 @@ async def test_full_pipeline_concurrent_workflow(concurrent_workflow): @pytest.mark.asyncio async def test_full_pipeline_workflow_output_event_serialization(): - """Test that WorkflowOutputEvent from ctx.yield_output() serializes correctly. + """Test that output event (type='output') from ctx.yield_output() serializes correctly. This tests the pattern where executors yield output via ctx.yield_output(), - which emits WorkflowOutputEvent that DevUI must serialize for SSE. + which emits output event (type='output') that DevUI must serialize for SSE. """ from agent_framework import Executor, WorkflowBuilder, WorkflowContext, handler diff --git a/python/packages/devui/tests/devui/test_mapper.py b/python/packages/devui/tests/devui/test_mapper.py index 3d3cf2194c..3609cd774b 100644 --- a/python/packages/devui/tests/devui/test_mapper.py +++ b/python/packages/devui/tests/devui/test_mapper.py @@ -19,9 +19,8 @@ # Import real workflow event classes - NOT mocks! from agent_framework._workflows._events import ( - ExecutorCompletedEvent, - WorkflowStartedEvent, - WorkflowStatusEvent, + WorkflowEvent, + WorkflowRunState, ) # Import factory functions from conftest for parameterized test data creation @@ -261,7 +260,7 @@ async def test_agent_run_response_mapping(mapper: MessageMapper, test_request: A async def test_executor_invoked_event(mapper: MessageMapper, test_request: AgentFrameworkRequest) -> None: - """Test ExecutorInvokedEvent using the REAL class from agent_framework.""" + """Test WorkflowEvent(type='executor_invoked') using the REAL class from agent_framework.""" # Use real class, not mock! event = create_executor_invoked_event(executor_id="exec_123") @@ -277,9 +276,9 @@ async def test_executor_invoked_event(mapper: MessageMapper, test_request: Agent async def test_executor_completed_event_simple_data(mapper: MessageMapper, test_request: AgentFrameworkRequest) -> None: - """Test ExecutorCompletedEvent with simple dict data.""" + """Test WorkflowEvent(type='executor_completed') with simple dict data.""" # Create event with simple data - event = ExecutorCompletedEvent(executor_id="exec_123", data={"simple": "result"}) + event = WorkflowEvent.executor_completed(executor_id="exec_123", data={"simple": "result"}) # First need to invoke the executor to set up context invoke_event = create_executor_invoked_event(executor_id="exec_123") @@ -301,10 +300,10 @@ async def test_executor_completed_event_simple_data(mapper: MessageMapper, test_ async def test_executor_completed_event_with_agent_response( mapper: MessageMapper, test_request: AgentFrameworkRequest ) -> None: - """Test ExecutorCompletedEvent with nested AgentExecutorResponse. + """Test WorkflowEvent(type='executor_completed') with nested AgentExecutorResponse. This is a REGRESSION TEST for the serialization bug where - ExecutorCompletedEvent.data contained AgentExecutorResponse with nested + WorkflowEvent.data contained AgentExecutorResponse with nested AgentResponse and ChatMessage objects (SerializationMixin) that Pydantic couldn't serialize. """ @@ -374,7 +373,7 @@ async def test_executor_completed_event_serialization_to_json( async def test_executor_failed_event(mapper: MessageMapper, test_request: AgentFrameworkRequest) -> None: - """Test ExecutorFailedEvent using the REAL class.""" + """Test WorkflowEvent(type='executor_failed') using the REAL class.""" # First invoke the executor invoke_event = create_executor_invoked_event(executor_id="exec_fail") await mapper.convert_event(invoke_event, test_request) @@ -398,22 +397,21 @@ async def test_executor_failed_event(mapper: MessageMapper, test_request: AgentF async def test_workflow_started_event(mapper: MessageMapper, test_request: AgentFrameworkRequest) -> None: - """Test WorkflowStartedEvent using the REAL class.""" + """Test WorkflowEvent(type='started') using the REAL class.""" - event = WorkflowStartedEvent(data=None) + event = WorkflowEvent.started() events = await mapper.convert_event(event, test_request) - # WorkflowStartedEvent should emit response.created and response.in_progress + # WorkflowEvent(type='started') should emit response.created and response.in_progress assert len(events) == 2 assert events[0].type == "response.created" assert events[1].type == "response.in_progress" async def test_workflow_status_event(mapper: MessageMapper, test_request: AgentFrameworkRequest) -> None: - """Test WorkflowStatusEvent using the REAL class.""" - from agent_framework._workflows._events import WorkflowRunState + """Test WorkflowEvent(type='status') using the REAL class.""" - event = WorkflowStatusEvent(state=WorkflowRunState.IN_PROGRESS) + event = WorkflowEvent.status(state=WorkflowRunState.IN_PROGRESS) events = await mapper.convert_event(event, test_request) # Should emit some status-related event @@ -421,20 +419,20 @@ async def test_workflow_status_event(mapper: MessageMapper, test_request: AgentF # ============================================================================= -# Magentic Event Tests - Testing WorkflowOutputEvent with additional_properties +# Magentic Event Tests - Testing WorkflowEvent[AgentResponseUpdate] with additional_properties # ============================================================================= -async def test_magentic_agent_run_update_event_with_agent_delta_metadata( +async def test_magentic_executor_event_with_agent_delta_metadata( mapper: MessageMapper, test_request: AgentFrameworkRequest ) -> None: - """Test that WorkflowOutputEvent with magentic_event_type='agent_delta' is handled correctly. + """Test that WorkflowEvent[AgentResponseUpdate] with magentic_event_type='agent_delta' is handled correctly. This tests the ACTUAL event format Magentic emits - not a fake MagenticAgentDeltaEvent class. - Magentic uses WorkflowOutputEvent wrapping AgentResponseUpdate with additional_properties. + Magentic uses WorkflowEvent.emit() with additional_properties containing magentic_event_type. """ from agent_framework._types import AgentResponseUpdate - from agent_framework._workflows._events import WorkflowOutputEvent + from agent_framework._workflows._events import WorkflowEvent # Create the REAL event format that Magentic emits update = AgentResponseUpdate( @@ -446,11 +444,11 @@ async def test_magentic_agent_run_update_event_with_agent_delta_metadata( "agent_id": "writer_agent", }, ) - event = WorkflowOutputEvent(executor_id="magentic_executor", data=update) + event = WorkflowEvent.emit(executor_id="magentic_executor", data=update) events = await mapper.convert_event(event, test_request) - # Should be treated as a regular WorkflowOutputEvent with text content + # Should be treated as a regular WorkflowEvent[AgentResponseUpdate] with text content # The mapper should emit text delta events assert len(events) >= 1 text_events = [e for e in events if getattr(e, "type", "") == "response.output_text.delta"] @@ -459,13 +457,13 @@ async def test_magentic_agent_run_update_event_with_agent_delta_metadata( async def test_magentic_orchestrator_message_event(mapper: MessageMapper, test_request: AgentFrameworkRequest) -> None: - """Test that WorkflowOutputEvent with magentic_event_type='orchestrator_message' is handled. + """Test that WorkflowEvent[AgentResponseUpdate] with magentic_event_type='orchestrator_message' is handled. - Magentic emits orchestrator planning/instruction messages using WorkflowOutputEvent - wrapping AgentResponseUpdate with additional_properties. + Magentic emits orchestrator planning/instruction messages using WorkflowEvent.emit() + with additional_properties containing magentic_event_type='orchestrator_message'. """ from agent_framework._types import AgentResponseUpdate - from agent_framework._workflows._events import WorkflowOutputEvent + from agent_framework._workflows._events import WorkflowEvent # Create orchestrator message event (REAL format from Magentic) update = AgentResponseUpdate( @@ -478,11 +476,11 @@ async def test_magentic_orchestrator_message_event(mapper: MessageMapper, test_r "orchestrator_id": "magentic_orchestrator", }, ) - event = WorkflowOutputEvent(executor_id="magentic_orchestrator", data=update) + event = WorkflowEvent.emit(executor_id="magentic_orchestrator", data=update) events = await mapper.convert_event(event, test_request) - # Currently, mapper treats this as regular WorkflowOutputEvent (no special handling) + # Currently, mapper treats this as regular WorkflowEvent[AgentResponseUpdate] (no special handling) # This test documents the current behavior assert len(events) >= 1 text_events = [e for e in events if getattr(e, "type", "") == "response.output_text.delta"] @@ -493,15 +491,15 @@ async def test_magentic_orchestrator_message_event(mapper: MessageMapper, test_r async def test_magentic_events_use_same_event_class_as_other_workflows( mapper: MessageMapper, test_request: AgentFrameworkRequest ) -> None: - """Verify Magentic uses the same WorkflowOutputEvent class as other workflows. + """Verify Magentic uses the same WorkflowEvent class as other workflows. This test documents that Magentic does NOT define separate event classes like - MagenticAgentDeltaEvent - it reuses WorkflowOutputEvent with metadata in + MagenticAgentDeltaEvent - it reuses WorkflowEvent with metadata in additional_properties. Any mapper code checking for 'MagenticAgentDeltaEvent' class names is dead code. """ from agent_framework._types import AgentResponseUpdate - from agent_framework._workflows._events import WorkflowOutputEvent + from agent_framework._workflows._events import WorkflowEvent # Create events the way different workflows do it # 1. Regular workflow (no additional_properties) @@ -509,7 +507,7 @@ class names is dead code. contents=[Content.from_text(text="Regular workflow response")], role="assistant", ) - regular_event = WorkflowOutputEvent(executor_id="regular_executor", data=regular_update) + regular_event = WorkflowEvent.emit(executor_id="regular_executor", data=regular_update) # 2. Magentic workflow (with additional_properties) magentic_update = AgentResponseUpdate( @@ -517,12 +515,12 @@ class names is dead code. role="assistant", additional_properties={"magentic_event_type": "agent_delta"}, ) - magentic_event = WorkflowOutputEvent(executor_id="magentic_executor", data=magentic_update) + magentic_event = WorkflowEvent.emit(executor_id="magentic_executor", data=magentic_update) # Both should be the SAME class assert type(regular_event) is type(magentic_event) - assert isinstance(regular_event, WorkflowOutputEvent) - assert isinstance(magentic_event, WorkflowOutputEvent) + assert isinstance(regular_event, WorkflowEvent) + assert isinstance(magentic_event, WorkflowEvent) # Both should be handled by the same isinstance check in mapper regular_events = await mapper.convert_event(regular_event, test_request) @@ -559,18 +557,18 @@ def __init__(self): # ============================================================================= -# WorkflowOutputEvent Tests +# output event (type='output') Tests # ============================================================================= async def test_workflow_output_event(mapper: MessageMapper, test_request: AgentFrameworkRequest) -> None: - """Test WorkflowOutputEvent is converted to output_item.added.""" - from agent_framework._workflows._events import WorkflowOutputEvent + """Test output event (type='output') is converted to output_item.added.""" + from agent_framework._workflows._events import WorkflowEvent - event = WorkflowOutputEvent(data="Final workflow output", executor_id="final_executor") + event = WorkflowEvent.output(executor_id="final_executor", data="Final workflow output") events = await mapper.convert_event(event, test_request) - # WorkflowOutputEvent should emit output_item.added + # output event (type='output') should emit output_item.added assert len(events) == 1 assert events[0].type == "response.output_item.added" # Check item contains the output text @@ -580,16 +578,16 @@ async def test_workflow_output_event(mapper: MessageMapper, test_request: AgentF async def test_workflow_output_event_with_list_data(mapper: MessageMapper, test_request: AgentFrameworkRequest) -> None: - """Test WorkflowOutputEvent with list data (common for sequential/concurrent workflows).""" + """Test output event (type='output') with list data (common for sequential/concurrent workflows).""" from agent_framework import ChatMessage - from agent_framework._workflows._events import WorkflowOutputEvent + from agent_framework._workflows._events import WorkflowEvent # Sequential/Concurrent workflows often output list[ChatMessage] messages = [ ChatMessage(role="user", contents=[Content.from_text(text="Hello")]), ChatMessage(role="assistant", contents=[Content.from_text(text="World")]), ] - event = WorkflowOutputEvent(data=messages, executor_id="complete") + event = WorkflowEvent.output(executor_id="complete", data=messages) events = await mapper.convert_event(event, test_request) assert len(events) == 1 @@ -597,23 +595,23 @@ async def test_workflow_output_event_with_list_data(mapper: MessageMapper, test_ # ============================================================================= -# WorkflowFailedEvent Tests +# failed event (type='failed') Tests # ============================================================================= async def test_workflow_failed_event(mapper: MessageMapper, test_request: AgentFrameworkRequest) -> None: - """Test WorkflowFailedEvent is converted to response.failed.""" - from agent_framework._workflows._events import WorkflowErrorDetails, WorkflowFailedEvent + """Test failed event (type='failed') is converted to response.failed.""" + from agent_framework._workflows._events import WorkflowErrorDetails, WorkflowEvent details = WorkflowErrorDetails( error_type="TestError", message="Workflow failed due to test error", executor_id="failing_executor", ) - event = WorkflowFailedEvent(details=details) + event = WorkflowEvent.failed(details=details) events = await mapper.convert_event(event, test_request) - # WorkflowFailedEvent should emit response.failed + # failed event (type='failed') should emit response.failed assert len(events) >= 1 # Find the failed event failed_events = [e for e in events if getattr(e, "type", "") == "response.failed"] @@ -628,8 +626,8 @@ async def test_workflow_failed_event(mapper: MessageMapper, test_request: AgentF async def test_workflow_failed_event_with_extra(mapper: MessageMapper, test_request: AgentFrameworkRequest) -> None: - """Test WorkflowFailedEvent includes extra context when available.""" - from agent_framework._workflows._events import WorkflowErrorDetails, WorkflowFailedEvent + """Test failed event (type='failed') includes extra context when available.""" + from agent_framework._workflows._events import WorkflowErrorDetails, WorkflowEvent details = WorkflowErrorDetails( error_type="ValidationError", @@ -637,7 +635,7 @@ async def test_workflow_failed_event_with_extra(mapper: MessageMapper, test_requ executor_id="validation_executor", extra={"field": "email", "reason": "invalid format"}, ) - event = WorkflowFailedEvent(details=details) + event = WorkflowEvent.failed(details=details) events = await mapper.convert_event(event, test_request) assert len(events) == 1 @@ -650,8 +648,8 @@ async def test_workflow_failed_event_with_extra(mapper: MessageMapper, test_requ async def test_workflow_failed_event_with_traceback(mapper: MessageMapper, test_request: AgentFrameworkRequest) -> None: - """Test WorkflowFailedEvent includes traceback when available.""" - from agent_framework._workflows._events import WorkflowErrorDetails, WorkflowFailedEvent + """Test failed event (type='failed') includes traceback when available.""" + from agent_framework._workflows._events import WorkflowErrorDetails, WorkflowEvent details = WorkflowErrorDetails( error_type="ValueError", @@ -659,7 +657,7 @@ async def test_workflow_failed_event_with_traceback(mapper: MessageMapper, test_ traceback="Traceback (most recent call last):\n File ...\nValueError: Invalid input", executor_id="validation_executor", ) - event = WorkflowFailedEvent(details=details) + event = WorkflowEvent.failed(details=details) events = await mapper.convert_event(event, test_request) assert len(events) == 1 @@ -672,41 +670,41 @@ async def test_workflow_failed_event_with_traceback(mapper: MessageMapper, test_ async def test_workflow_warning_event(mapper: MessageMapper, test_request: AgentFrameworkRequest) -> None: - """Test WorkflowWarningEvent is converted to trace event.""" - from agent_framework._workflows._events import WorkflowWarningEvent + """Test WorkflowEvent(type='warning') is converted to trace event.""" + from agent_framework._workflows._events import WorkflowEvent - event = WorkflowWarningEvent(data="This is a warning message") + event = WorkflowEvent.warning("This is a warning message") events = await mapper.convert_event(event, test_request) - # WorkflowWarningEvent should emit a trace event + # WorkflowEvent(type='warning') should emit a trace event assert len(events) == 1 assert events[0].type == "response.trace.completed" - assert events[0].data["event_type"] == "WorkflowWarningEvent" + assert events[0].data["event_type"] == "warning" async def test_workflow_error_event(mapper: MessageMapper, test_request: AgentFrameworkRequest) -> None: - """Test WorkflowErrorEvent is converted to trace event.""" - from agent_framework._workflows._events import WorkflowErrorEvent + """Test WorkflowEvent(type='error') is converted to trace event.""" + from agent_framework._workflows._events import WorkflowEvent - event = WorkflowErrorEvent(data=ValueError("Something went wrong")) + event = WorkflowEvent.error(ValueError("Something went wrong")) events = await mapper.convert_event(event, test_request) - # WorkflowErrorEvent should emit a trace event + # WorkflowEvent(type='error') should emit a trace event assert len(events) == 1 assert events[0].type == "response.trace.completed" - assert events[0].data["event_type"] == "WorkflowErrorEvent" + assert events[0].data["event_type"] == "error" # ============================================================================= -# RequestInfoEvent Tests (Human-in-the-Loop) +# request_info event (type='request_info') Tests (Human-in-the-Loop) # ============================================================================= async def test_request_info_event(mapper: MessageMapper, test_request: AgentFrameworkRequest) -> None: - """Test RequestInfoEvent is converted to HIL request event.""" - from agent_framework._workflows._events import RequestInfoEvent + """Test request_info event (type='request_info') is converted to HIL request event.""" + from agent_framework._workflows._events import WorkflowEvent - event = RequestInfoEvent( + event = WorkflowEvent.request_info( request_id="req_123", source_executor_id="approval_executor", request_data={"action": "approve", "details": "Please approve this action"}, @@ -714,7 +712,7 @@ async def test_request_info_event(mapper: MessageMapper, test_request: AgentFram ) events = await mapper.convert_event(event, test_request) - # RequestInfoEvent should emit response.request_info.requested + # request_info event (type='request_info') should emit response.request_info.requested assert len(events) >= 1 # Check that request info is captured has_hil_event = any(getattr(e, "type", "") == "response.request_info.requested" for e in events) @@ -732,24 +730,24 @@ async def test_request_info_event(mapper: MessageMapper, test_request: AgentFram async def test_superstep_started_event(mapper: MessageMapper, test_request: AgentFrameworkRequest) -> None: - """Test SuperStepStartedEvent is handled gracefully.""" - from agent_framework._workflows._events import SuperStepStartedEvent + """Test superstep_started event (type='superstep_started') is handled gracefully.""" + from agent_framework._workflows._events import WorkflowEvent - event = SuperStepStartedEvent(iteration=1) + event = WorkflowEvent.superstep_started(iteration=1) events = await mapper.convert_event(event, test_request) - # SuperStepStartedEvent may not emit events (internal workflow signal) + # superstep_started event (type='superstep_started') may not emit events (internal workflow signal) # Just ensure it doesn't crash assert isinstance(events, list) async def test_superstep_completed_event(mapper: MessageMapper, test_request: AgentFrameworkRequest) -> None: - """Test SuperStepCompletedEvent is handled gracefully.""" - from agent_framework._workflows._events import SuperStepCompletedEvent + """Test superstep_completed event (type='superstep_completed') is handled gracefully.""" + from agent_framework._workflows._events import WorkflowEvent - event = SuperStepCompletedEvent(iteration=1) + event = WorkflowEvent.superstep_completed(iteration=1) events = await mapper.convert_event(event, test_request) - # SuperStepCompletedEvent may not emit events (internal workflow signal) + # superstep_completed event (type='superstep_completed') may not emit events (internal workflow signal) # Just ensure it doesn't crash assert isinstance(events, list) diff --git a/python/packages/lab/lightning/tests/test_lightning.py b/python/packages/lab/lightning/tests/test_lightning.py index ae64ec772f..6770f9d974 100644 --- a/python/packages/lab/lightning/tests/test_lightning.py +++ b/python/packages/lab/lightning/tests/test_lightning.py @@ -9,7 +9,7 @@ agentlightning = pytest.importorskip("agentlightning") -from agent_framework import AgentExecutor, ChatAgent, WorkflowBuilder, Workflow, WorkflowOutputEvent +from agent_framework import AgentExecutor, AgentResponse, ChatAgent, WorkflowBuilder, Workflow from agent_framework_lab_lightning import AgentFrameworkTracer from agent_framework.openai import OpenAIChatClient from agentlightning import TracerTraceToTriplet @@ -109,8 +109,8 @@ def workflow_two_agents(): async def test_openai_workflow_two_agents(workflow_two_agents: Workflow): events = await workflow_two_agents.run("Please analyze the quarterly sales data") - # Get all WorkflowOutputEvent data - agent_outputs = [event.data for event in events if isinstance(event, WorkflowOutputEvent)] + # Get all output events with AgentResponse + agent_outputs = [event.data for event in events if event.type == "output" and isinstance(event.data, AgentResponse)] # Check that we have outputs from both agents assert len(agent_outputs) == 2 diff --git a/python/packages/orchestrations/agent_framework_orchestrations/_base_group_chat_orchestrator.py b/python/packages/orchestrations/agent_framework_orchestrations/_base_group_chat_orchestrator.py index 5dc01cf242..4d93a3e69b 100644 --- a/python/packages/orchestrations/agent_framework_orchestrations/_base_group_chat_orchestrator.py +++ b/python/packages/orchestrations/agent_framework_orchestrations/_base_group_chat_orchestrator.py @@ -61,48 +61,22 @@ class GroupChatResponseMessage: # region Group chat events -class GroupChatEvent(WorkflowEvent): - """Base class for group chat workflow events.""" - def __init__(self, round_index: int, data: Any | None = None) -> None: - """Initialize group chat event. - Args: - round_index: Current round index - data: Optional event-specific data - """ - super().__init__(data) - self.round_index = round_index - - -class GroupChatResponseReceivedEvent(GroupChatEvent): - """Event emitted when a participant response is received.""" - - def __init__(self, round_index: int, participant_name: str, data: Any | None = None) -> None: - """Initialize response received event. - - Args: - round_index: Current round index - participant_name: Name of the participant who sent the response - data: Optional event-specific data - """ - super().__init__(round_index, data) - self.participant_name = participant_name +@dataclass +class GroupChatRequestSentEvent: + """Data payload for group_chat request sent events.""" + round_index: int + participant_name: str -class GroupChatRequestSentEvent(GroupChatEvent): - """Event emitted when a request is sent to a participant.""" - def __init__(self, round_index: int, participant_name: str, data: Any | None = None) -> None: - """Initialize request sent event. +@dataclass +class GroupChatResponseReceivedEvent: + """Data payload for group_chat response received events.""" - Args: - round_index: Current round index - participant_name: Name of the participant to whom the request was sent - data: Optional event-specific data - """ - super().__init__(round_index, data) - self.participant_name = participant_name + round_index: int + participant_name: str # endregion @@ -273,10 +247,12 @@ async def handle_participant_response( ctx: Workflow context """ await ctx.add_event( - GroupChatResponseReceivedEvent( - round_index=self._round_index, - participant_name=ctx.source_executor_ids[0] if ctx.source_executor_ids else "unknown", - data=response, + WorkflowEvent( + "group_chat", + data=GroupChatResponseReceivedEvent( + round_index=self._round_index, + participant_name=ctx.source_executor_ids[0] if ctx.source_executor_ids else "unknown", + ), ) ) await self._handle_response(response, ctx) @@ -469,10 +445,12 @@ async def _send_request_to_participant( request = AgentExecutorRequest(messages=messages, should_respond=True) await ctx.send_message(request, target_id=target) await ctx.add_event( - GroupChatRequestSentEvent( - round_index=self._round_index, - participant_name=target, - data=request, + WorkflowEvent( + "group_chat", + data=GroupChatRequestSentEvent( + round_index=self._round_index, + participant_name=target, + ), ) ) else: @@ -480,10 +458,12 @@ async def _send_request_to_participant( request = GroupChatRequestMessage(additional_instruction=additional_instruction, metadata=metadata) # type: ignore[assignment] await ctx.send_message(request, target_id=target) await ctx.add_event( - GroupChatRequestSentEvent( - round_index=self._round_index, - participant_name=target, - data=request, + WorkflowEvent( + "group_chat", + data=GroupChatRequestSentEvent( + round_index=self._round_index, + participant_name=target, + ), ) ) diff --git a/python/packages/orchestrations/agent_framework_orchestrations/_concurrent.py b/python/packages/orchestrations/agent_framework_orchestrations/_concurrent.py index 20149435d4..610350f1fd 100644 --- a/python/packages/orchestrations/agent_framework_orchestrations/_concurrent.py +++ b/python/packages/orchestrations/agent_framework_orchestrations/_concurrent.py @@ -463,9 +463,9 @@ def with_request_info( ) -> "ConcurrentBuilder": """Enable request info after agent participant responses. - This enables human-in-the-loop (HIL) scenarios for the sequential orchestration. + This enables human-in-the-loop (HIL) scenarios for the concurrent orchestration. When enabled, the workflow pauses after each agent participant runs, emitting - a RequestInfoEvent that allows the caller to review the conversation and optionally + a request_info event (type='request_info') that allows the caller to review the conversation and optionally inject guidance for the agent participant to iterate. The caller provides input via the standard response_handler/request_info pattern. diff --git a/python/packages/orchestrations/agent_framework_orchestrations/_group_chat.py b/python/packages/orchestrations/agent_framework_orchestrations/_group_chat.py index d1d98b9e18..5ee8982617 100644 --- a/python/packages/orchestrations/agent_framework_orchestrations/_group_chat.py +++ b/python/packages/orchestrations/agent_framework_orchestrations/_group_chat.py @@ -866,7 +866,7 @@ def with_request_info(self, *, agents: Sequence[str | AgentProtocol] | None = No This enables human-in-the-loop (HIL) scenarios for the group chat orchestration. When enabled, the workflow pauses after each agent participant runs, emitting - a RequestInfoEvent that allows the caller to review the conversation and optionally + a request_info event (type='request_info') that allows the caller to review the conversation and optionally inject guidance for the agent participant to iterate. The caller provides input via the standard response_handler/request_info pattern. diff --git a/python/packages/orchestrations/agent_framework_orchestrations/_handoff.py b/python/packages/orchestrations/agent_framework_orchestrations/_handoff.py index 9be67a3b52..a2f9a4eea8 100644 --- a/python/packages/orchestrations/agent_framework_orchestrations/_handoff.py +++ b/python/packages/orchestrations/agent_framework_orchestrations/_handoff.py @@ -64,20 +64,14 @@ # region Handoff events -class HandoffSentEvent(WorkflowEvent): - """Base class for handoff workflow events.""" - def __init__(self, source: str, target: str, data: Any | None = None) -> None: - """Initialize handoff sent event. - Args: - source: Identifier of the source agent initiating the handoff - target: Identifier of the target agent receiving the handoff - data: Optional event-specific data - """ - super().__init__(data) - self.source = source - self.target = target +@dataclass +class HandoffSentEvent: + """Data payload for handoff_sent events.""" + + source: str + target: str # endregion @@ -421,7 +415,9 @@ async def _run_agent_and_emit( await cast(WorkflowContext[AgentExecutorRequest], ctx).send_message( AgentExecutorRequest(messages=[], should_respond=True), target_id=handoff_target ) - await ctx.add_event(HandoffSentEvent(source=self.id, target=handoff_target)) + await ctx.add_event( + WorkflowEvent("handoff_sent", data=HandoffSentEvent(source=self.id, target=handoff_target)) + ) self._autonomous_mode_turns = 0 # Reset autonomous mode turn counter on handoff return diff --git a/python/packages/orchestrations/agent_framework_orchestrations/_magentic.py b/python/packages/orchestrations/agent_framework_orchestrations/_magentic.py index 51996f09a0..a90f570575 100644 --- a/python/packages/orchestrations/agent_framework_orchestrations/_magentic.py +++ b/python/packages/orchestrations/agent_framework_orchestrations/_magentic.py @@ -19,7 +19,7 @@ ) from agent_framework._workflows._agent_executor import AgentExecutor, AgentExecutorRequest, AgentExecutorResponse from agent_framework._workflows._checkpoint import CheckpointStorage -from agent_framework._workflows._events import ExecutorEvent +from agent_framework._workflows._events import WorkflowEvent from agent_framework._workflows._executor import Executor, handler from agent_framework._workflows._model_utils import DictConvertible, encode_value from agent_framework._workflows._request_info_mixin import response_handler @@ -771,20 +771,11 @@ class MagenticOrchestratorEventType(str, Enum): @dataclass -class MagenticOrchestratorEvent(ExecutorEvent): - """Base class for Magentic orchestrator events.""" +class MagenticOrchestratorEvent: + """Data payload for magentic_orchestrator events.""" - def __init__( - self, - executor_id: str, - event_type: MagenticOrchestratorEventType, - data: ChatMessage | MagenticProgressLedger, - ) -> None: - super().__init__(executor_id, data) - self.event_type = event_type - - def __repr__(self) -> str: - return f"{self.__class__.__name__}(executor_id={self.executor_id}, event_type={self.event_type})" + event_type: MagenticOrchestratorEventType + content: ChatMessage | MagenticProgressLedger # region Request info related types @@ -928,10 +919,13 @@ async def _handle_messages( # Initial planning using the manager with real model calls self._task_ledger = await self._manager.plan(self._magentic_context.clone(deep=True)) await ctx.add_event( - MagenticOrchestratorEvent( + WorkflowEvent( + "magentic_orchestrator", executor_id=self.id, - event_type=MagenticOrchestratorEventType.PLAN_CREATED, - data=self._task_ledger, + data=MagenticOrchestratorEvent( + event_type=MagenticOrchestratorEventType.PLAN_CREATED, + content=self._task_ledger, + ), ) ) @@ -1006,10 +1000,13 @@ async def handle_plan_review_response( self._magentic_context.chat_history.extend(response.review) self._task_ledger = await self._manager.replan(self._magentic_context.clone(deep=True)) await ctx.add_event( - MagenticOrchestratorEvent( + WorkflowEvent( + "magentic_orchestrator", executor_id=self.id, - event_type=MagenticOrchestratorEventType.REPLANNED, - data=self._task_ledger, + data=MagenticOrchestratorEvent( + event_type=MagenticOrchestratorEventType.REPLANNED, + content=self._task_ledger, + ), ) ) # Continue the review process by sending the new plan for review again until approved @@ -1072,10 +1069,13 @@ async def _run_inner_loop_helper( return await ctx.add_event( - MagenticOrchestratorEvent( + WorkflowEvent( + "magentic_orchestrator", executor_id=self.id, - event_type=MagenticOrchestratorEventType.PROGRESS_LEDGER_UPDATED, - data=self._progress_ledger, + data=MagenticOrchestratorEvent( + event_type=MagenticOrchestratorEventType.PROGRESS_LEDGER_UPDATED, + content=self._progress_ledger, + ), ) ) @@ -1149,10 +1149,13 @@ async def _reset_and_replan( # Replan self._task_ledger = await self._manager.replan(self._magentic_context.clone(deep=True)) await ctx.add_event( - MagenticOrchestratorEvent( + WorkflowEvent( + "magentic_orchestrator", executor_id=self.id, - event_type=MagenticOrchestratorEventType.REPLANNED, - data=self._task_ledger, + data=MagenticOrchestratorEvent( + event_type=MagenticOrchestratorEventType.REPLANNED, + content=self._task_ledger, + ), ) ) # If a human must sign off, ask now and return. The response handler will resume. @@ -1515,7 +1518,7 @@ def with_plan_review(self, enable: bool = True) -> "MagenticBuilder": # During execution, handle plan review async for event in workflow.run("task", stream=True): - if isinstance(event, RequestInfoEvent): + if event.type == "request_info": request = event.data if isinstance(request, MagenticHumanInterventionRequest): if request.kind == MagenticHumanInterventionKind.PLAN_REVIEW: diff --git a/python/packages/orchestrations/agent_framework_orchestrations/_orchestration_state.py b/python/packages/orchestrations/agent_framework_orchestrations/_orchestration_state.py index 95894d37dc..fe8ba64126 100644 --- a/python/packages/orchestrations/agent_framework_orchestrations/_orchestration_state.py +++ b/python/packages/orchestrations/agent_framework_orchestrations/_orchestration_state.py @@ -6,6 +6,8 @@ across GroupChat, Handoff, and Magentic patterns. """ +from __future__ import annotations + from dataclasses import dataclass, field from typing import Any @@ -69,7 +71,7 @@ def to_dict(self) -> dict[str, Any]: return result @classmethod - def from_dict(cls, data: dict[str, Any]) -> "OrchestrationState": + def from_dict(cls, data: dict[str, Any]) -> OrchestrationState: """Deserialize from checkpointed dict. Args: diff --git a/python/packages/orchestrations/agent_framework_orchestrations/_sequential.py b/python/packages/orchestrations/agent_framework_orchestrations/_sequential.py index b54ddea6d6..5fa3598c6f 100644 --- a/python/packages/orchestrations/agent_framework_orchestrations/_sequential.py +++ b/python/packages/orchestrations/agent_framework_orchestrations/_sequential.py @@ -219,7 +219,7 @@ def with_request_info( This enables human-in-the-loop (HIL) scenarios for the sequential orchestration. When enabled, the workflow pauses after each agent participant runs, emitting - a RequestInfoEvent that allows the caller to review the conversation and optionally + a request_info event (type='request_info') that allows the caller to review the conversation and optionally inject guidance for the agent participant to iterate. The caller provides input via the standard response_handler/request_info pattern. diff --git a/python/packages/orchestrations/tests/test_concurrent.py b/python/packages/orchestrations/tests/test_concurrent.py index f1853eb2e7..0b0c279b14 100644 --- a/python/packages/orchestrations/tests/test_concurrent.py +++ b/python/packages/orchestrations/tests/test_concurrent.py @@ -10,9 +10,7 @@ ChatMessage, Executor, WorkflowContext, - WorkflowOutputEvent, WorkflowRunState, - WorkflowStatusEvent, handler, ) from agent_framework._workflows._checkpoint import InMemoryCheckpointStorage @@ -111,9 +109,9 @@ async def test_concurrent_default_aggregator_emits_single_user_and_assistants() completed = False output: list[ChatMessage] | None = None async for ev in wf.run("prompt: hello world", stream=True): - if isinstance(ev, WorkflowStatusEvent) and ev.state == WorkflowRunState.IDLE: + if ev.type == "status" and ev.state == WorkflowRunState.IDLE: completed = True - elif isinstance(ev, WorkflowOutputEvent): + elif ev.type == "output": output = cast(list[ChatMessage], ev.data) if completed and output is not None: break @@ -149,9 +147,9 @@ async def summarize(results: list[AgentExecutorResponse]) -> str: completed = False output: str | None = None async for ev in wf.run("prompt: custom", stream=True): - if isinstance(ev, WorkflowStatusEvent) and ev.state == WorkflowRunState.IDLE: + if ev.type == "status" and ev.state == WorkflowRunState.IDLE: completed = True - elif isinstance(ev, WorkflowOutputEvent): + elif ev.type == "output": output = cast(str, ev.data) if completed and output is not None: break @@ -180,9 +178,9 @@ def summarize_sync(results: list[AgentExecutorResponse], _ctx: WorkflowContext[A completed = False output: str | None = None async for ev in wf.run("prompt: custom sync", stream=True): - if isinstance(ev, WorkflowStatusEvent) and ev.state == WorkflowRunState.IDLE: + if ev.type == "status" and ev.state == WorkflowRunState.IDLE: completed = True - elif isinstance(ev, WorkflowOutputEvent): + elif ev.type == "output": output = cast(str, ev.data) if completed and output is not None: break @@ -228,9 +226,9 @@ async def aggregate(self, results: list[AgentExecutorResponse], ctx: WorkflowCon completed = False output: str | None = None async for ev in wf.run("prompt: instance test", stream=True): - if isinstance(ev, WorkflowStatusEvent) and ev.state == WorkflowRunState.IDLE: + if ev.type == "status" and ev.state == WorkflowRunState.IDLE: completed = True - elif isinstance(ev, WorkflowOutputEvent): + elif ev.type == "output": output = cast(str, ev.data) if completed and output is not None: break @@ -266,9 +264,9 @@ async def aggregate(self, results: list[AgentExecutorResponse], ctx: WorkflowCon completed = False output: str | None = None async for ev in wf.run("prompt: factory test", stream=True): - if isinstance(ev, WorkflowStatusEvent) and ev.state == WorkflowRunState.IDLE: + if ev.type == "status" and ev.state == WorkflowRunState.IDLE: completed = True - elif isinstance(ev, WorkflowOutputEvent): + elif ev.type == "output": output = cast(str, ev.data) if completed and output is not None: break @@ -302,9 +300,9 @@ async def aggregate(self, results: list[AgentExecutorResponse], ctx: WorkflowCon completed = False output: str | None = None async for ev in wf.run("prompt: factory test", stream=True): - if isinstance(ev, WorkflowStatusEvent) and ev.state == WorkflowRunState.IDLE: + if ev.type == "status" and ev.state == WorkflowRunState.IDLE: completed = True - elif isinstance(ev, WorkflowOutputEvent): + elif ev.type == "output": output = cast(str, ev.data) if completed and output is not None: break @@ -352,9 +350,9 @@ async def test_concurrent_checkpoint_resume_round_trip() -> None: baseline_output: list[ChatMessage] | None = None async for ev in wf.run("checkpoint concurrent", stream=True): - if isinstance(ev, WorkflowOutputEvent): + if ev.type == "output": baseline_output = ev.data # type: ignore[assignment] - if isinstance(ev, WorkflowStatusEvent) and ev.state == WorkflowRunState.IDLE: + if ev.type == "status" and ev.state == WorkflowRunState.IDLE: break assert baseline_output is not None @@ -376,9 +374,9 @@ async def test_concurrent_checkpoint_resume_round_trip() -> None: resumed_output: list[ChatMessage] | None = None async for ev in wf_resume.run(checkpoint_id=resume_checkpoint.checkpoint_id, stream=True): - if isinstance(ev, WorkflowOutputEvent): + if ev.type == "output": resumed_output = ev.data # type: ignore[assignment] - if isinstance(ev, WorkflowStatusEvent) and ev.state in ( + if ev.type == "status" and ev.state in ( WorkflowRunState.IDLE, WorkflowRunState.IDLE_WITH_PENDING_REQUESTS, ): @@ -398,9 +396,9 @@ async def test_concurrent_checkpoint_runtime_only() -> None: baseline_output: list[ChatMessage] | None = None async for ev in wf.run("runtime checkpoint test", checkpoint_storage=storage, stream=True): - if isinstance(ev, WorkflowOutputEvent): + if ev.type == "output": baseline_output = ev.data # type: ignore[assignment] - if isinstance(ev, WorkflowStatusEvent) and ev.state == WorkflowRunState.IDLE: + if ev.type == "status" and ev.state == WorkflowRunState.IDLE: break assert baseline_output is not None @@ -421,9 +419,9 @@ async def test_concurrent_checkpoint_runtime_only() -> None: async for ev in wf_resume.run( checkpoint_id=resume_checkpoint.checkpoint_id, checkpoint_storage=storage, stream=True ): - if isinstance(ev, WorkflowOutputEvent): + if ev.type == "output": resumed_output = ev.data # type: ignore[assignment] - if isinstance(ev, WorkflowStatusEvent) and ev.state in ( + if ev.type == "status" and ev.state in ( WorkflowRunState.IDLE, WorkflowRunState.IDLE_WITH_PENDING_REQUESTS, ): @@ -448,9 +446,9 @@ async def test_concurrent_checkpoint_runtime_overrides_buildtime() -> None: baseline_output: list[ChatMessage] | None = None async for ev in wf.run("override test", checkpoint_storage=runtime_storage, stream=True): - if isinstance(ev, WorkflowOutputEvent): + if ev.type == "output": baseline_output = ev.data # type: ignore[assignment] - if isinstance(ev, WorkflowStatusEvent) and ev.state == WorkflowRunState.IDLE: + if ev.type == "status" and ev.state == WorkflowRunState.IDLE: break assert baseline_output is not None @@ -530,9 +528,9 @@ def create_agent3() -> Executor: completed = False output: list[ChatMessage] | None = None async for ev in wf.run("test prompt", stream=True): - if isinstance(ev, WorkflowStatusEvent) and ev.state == WorkflowRunState.IDLE: + if ev.type == "status" and ev.state == WorkflowRunState.IDLE: completed = True - elif isinstance(ev, WorkflowOutputEvent): + elif ev.type == "output": output = cast(list[ChatMessage], ev.data) if completed and output is not None: break diff --git a/python/packages/orchestrations/tests/test_group_chat.py b/python/packages/orchestrations/tests/test_group_chat.py index 77e707d6f7..1b7f02b5f5 100644 --- a/python/packages/orchestrations/tests/test_group_chat.py +++ b/python/packages/orchestrations/tests/test_group_chat.py @@ -15,10 +15,8 @@ ChatResponse, ChatResponseUpdate, Content, - RequestInfoEvent, - WorkflowOutputEvent, + WorkflowEvent, WorkflowRunState, - WorkflowStatusEvent, ) from agent_framework._workflows._checkpoint import InMemoryCheckpointStorage from agent_framework.orchestrations import ( @@ -190,7 +188,7 @@ async def test_group_chat_builder_basic_flow() -> None: outputs: list[list[ChatMessage]] = [] async for event in workflow.run("coordinate task", stream=True): - if isinstance(event, WorkflowOutputEvent): + if event.type == "output": data = event.data if isinstance(data, list): outputs.append(cast(list[ChatMessage], data)) @@ -362,7 +360,7 @@ def selector(state: GroupChatState) -> str: outputs: list[list[ChatMessage]] = [] async for event in workflow.run("test task", stream=True): - if isinstance(event, WorkflowOutputEvent): + if event.type == "output": data = event.data if isinstance(data, list): outputs.append(cast(list[ChatMessage], data)) @@ -397,7 +395,7 @@ def termination_condition(conversation: list[ChatMessage]) -> bool: outputs: list[list[ChatMessage]] = [] async for event in workflow.run("test task", stream=True): - if isinstance(event, WorkflowOutputEvent): + if event.type == "output": data = event.data if isinstance(data, list): outputs.append(cast(list[ChatMessage], data)) @@ -425,7 +423,7 @@ async def test_termination_condition_agent_manager_finalizes(self) -> None: outputs: list[list[ChatMessage]] = [] async for event in workflow.run("test task", stream=True): - if isinstance(event, WorkflowOutputEvent): + if event.type == "output": data = event.data if isinstance(data, list): outputs.append(cast(list[ChatMessage], data)) @@ -473,7 +471,7 @@ def selector(state: GroupChatState) -> str: outputs: list[list[ChatMessage]] = [] async for event in workflow.run("test task", stream=True): - if isinstance(event, WorkflowOutputEvent): + if event.type == "output": data = event.data if isinstance(data, list): outputs.append(cast(list[ChatMessage], data)) @@ -526,7 +524,7 @@ def selector(state: GroupChatState) -> str: outputs: list[list[ChatMessage]] = [] async for event in workflow.run("test string", stream=True): - if isinstance(event, WorkflowOutputEvent): + if event.type == "output": data = event.data if isinstance(data, list): outputs.append(cast(list[ChatMessage], data)) @@ -555,7 +553,7 @@ def selector(state: GroupChatState) -> str: outputs: list[list[ChatMessage]] = [] async for event in workflow.run(task_message, stream=True): - if isinstance(event, WorkflowOutputEvent): + if event.type == "output": data = event.data if isinstance(data, list): outputs.append(cast(list[ChatMessage], data)) @@ -587,7 +585,7 @@ def selector(state: GroupChatState) -> str: outputs: list[list[ChatMessage]] = [] async for event in workflow.run(conversation, stream=True): - if isinstance(event, WorkflowOutputEvent): + if event.type == "output": data = event.data if isinstance(data, list): outputs.append(cast(list[ChatMessage], data)) @@ -619,7 +617,7 @@ def selector(state: GroupChatState) -> str: outputs: list[list[ChatMessage]] = [] async for event in workflow.run("test", stream=True): - if isinstance(event, WorkflowOutputEvent): + if event.type == "output": data = event.data if isinstance(data, list): outputs.append(cast(list[ChatMessage], data)) @@ -654,7 +652,7 @@ def selector(state: GroupChatState) -> str: outputs: list[list[ChatMessage]] = [] async for event in workflow.run("test", stream=True): - if isinstance(event, WorkflowOutputEvent): + if event.type == "output": data = event.data if isinstance(data, list): outputs.append(cast(list[ChatMessage], data)) @@ -686,9 +684,9 @@ async def test_group_chat_checkpoint_runtime_only() -> None: baseline_output: list[ChatMessage] | None = None async for ev in wf.run("runtime checkpoint test", checkpoint_storage=storage, stream=True): - if isinstance(ev, WorkflowOutputEvent): + if ev.type == "output": baseline_output = cast(list[ChatMessage], ev.data) if isinstance(ev.data, list) else None # type: ignore - if isinstance(ev, WorkflowStatusEvent) and ev.state in ( + if ev.type == "status" and ev.state in ( WorkflowRunState.IDLE, WorkflowRunState.IDLE_WITH_PENDING_REQUESTS, ): @@ -724,9 +722,9 @@ async def test_group_chat_checkpoint_runtime_overrides_buildtime() -> None: ) baseline_output: list[ChatMessage] | None = None async for ev in wf.run("override test", checkpoint_storage=runtime_storage, stream=True): - if isinstance(ev, WorkflowOutputEvent): + if ev.type == "output": baseline_output = cast(list[ChatMessage], ev.data) if isinstance(ev.data, list) else None # type: ignore - if isinstance(ev, WorkflowStatusEvent) and ev.state in ( + if ev.type == "status" and ev.state in ( WorkflowRunState.IDLE, WorkflowRunState.IDLE_WITH_PENDING_REQUESTS, ): @@ -770,9 +768,9 @@ async def selector(state: GroupChatState) -> str: ) # Run until we get a request info event (should be before beta, not alpha) - request_events: list[RequestInfoEvent] = [] + request_events: list[WorkflowEvent] = [] async for event in workflow.run("test task", stream=True): - if isinstance(event, RequestInfoEvent) and isinstance(event.data, AgentExecutorResponse): + if event.type == "request_info" and isinstance(event.data, AgentExecutorResponse): request_events.append(event) # Don't break - let stream complete naturally when paused @@ -785,11 +783,11 @@ async def selector(state: GroupChatState) -> str: assert request_event.source_executor_id == "beta" # Continue the workflow with a response - outputs: list[WorkflowOutputEvent] = [] + outputs: list[WorkflowEvent] = [] async for event in workflow.send_responses_streaming({ request_event.request_id: AgentRequestInfoResponse.approve() }): - if isinstance(event, WorkflowOutputEvent): + if event.type == "output": outputs.append(event) # Workflow should complete @@ -822,9 +820,9 @@ async def selector(state: GroupChatState) -> str: ) # Run until we get a request info event - request_events: list[RequestInfoEvent] = [] + request_events: list[WorkflowEvent] = [] async for event in workflow.run("test task", stream=True): - if isinstance(event, RequestInfoEvent) and isinstance(event.data, AgentExecutorResponse): + if event.type == "request_info" and isinstance(event.data, AgentExecutorResponse): request_events.append(event) break @@ -926,9 +924,9 @@ def create_beta() -> StubAgent: # Factories should be called during build assert call_count == 2 - outputs: list[WorkflowOutputEvent] = [] + outputs: list[WorkflowEvent] = [] async for event in workflow.run("coordinate task", stream=True): - if isinstance(event, WorkflowOutputEvent): + if event.type == "output": outputs.append(event) assert len(outputs) == 1 @@ -991,9 +989,9 @@ def create_beta() -> StubAgent: .build() ) - outputs: list[WorkflowOutputEvent] = [] + outputs: list[WorkflowEvent] = [] async for event in workflow.run("checkpoint test", stream=True): - if isinstance(event, WorkflowOutputEvent): + if event.type == "output": outputs.append(event) assert outputs, "Should have workflow output" @@ -1119,9 +1117,9 @@ def agent_factory() -> ChatAgent: # Factory should be called during build assert factory_call_count == 1 - outputs: list[WorkflowOutputEvent] = [] + outputs: list[WorkflowEvent] = [] async for event in workflow.run("coordinate task", stream=True): - if isinstance(event, WorkflowOutputEvent): + if event.type == "output": outputs.append(event) assert len(outputs) == 1 diff --git a/python/packages/orchestrations/tests/test_handoff.py b/python/packages/orchestrations/tests/test_handoff.py index 2242508aa7..ab9f6e45cb 100644 --- a/python/packages/orchestrations/tests/test_handoff.py +++ b/python/packages/orchestrations/tests/test_handoff.py @@ -11,10 +11,8 @@ ChatResponse, ChatResponseUpdate, Content, - RequestInfoEvent, ResponseStream, WorkflowEvent, - WorkflowOutputEvent, resolve_agent_id, ) from agent_framework._clients import BaseChatClient @@ -150,7 +148,7 @@ async def test_handoff(): # escalation won't trigger a handoff, so the response from it will become # a request for user input because autonomous mode is not enabled by default. events = await _drain(workflow.run("Need technical support", stream=True)) - requests = [ev for ev in events if isinstance(ev, RequestInfoEvent)] + requests = [ev for ev in events if ev.type == "request_info"] assert requests assert len(requests) == 1 @@ -184,10 +182,10 @@ async def test_autonomous_mode_yields_output_without_user_request(): ) events = await _drain(workflow.run("Package arrived broken", stream=True)) - requests = [ev for ev in events if isinstance(ev, RequestInfoEvent)] + requests = [ev for ev in events if ev.type == "request_info"] assert not requests, "Autonomous mode should not request additional user input" - outputs = [ev for ev in events if isinstance(ev, WorkflowOutputEvent)] + outputs = [ev for ev in events if ev.type == "output"] assert outputs, "Autonomous mode should yield a workflow output" final_conversation = outputs[-1].data @@ -210,7 +208,7 @@ async def test_autonomous_mode_resumes_user_input_on_turn_limit(): ) events = await _drain(workflow.run("Start", stream=True)) - requests = [ev for ev in events if isinstance(ev, RequestInfoEvent)] + requests = [ev for ev in events if ev.type == "request_info"] assert requests and len(requests) == 1, "Turn limit should force a user input request" assert requests[0].source_executor_id == worker.name @@ -253,7 +251,7 @@ async def async_termination(conv: list[ChatMessage]) -> bool: ) events = await _drain(workflow.run("First user message", stream=True)) - requests = [ev for ev in events if isinstance(ev, RequestInfoEvent)] + requests = [ev for ev in events if ev.type == "request_info"] assert requests events = await _drain( @@ -261,7 +259,7 @@ async def async_termination(conv: list[ChatMessage]) -> bool: requests[-1].request_id: [ChatMessage(role="user", text="Second user message")] }) ) - outputs = [ev for ev in events if isinstance(ev, WorkflowOutputEvent)] + outputs = [ev for ev in events if ev.type == "output"] assert len(outputs) == 1 final_conversation = outputs[0].data @@ -505,14 +503,14 @@ def create_specialist() -> MockHandoffAgent: assert call_count == 2 events = await _drain(workflow.run("Need help", stream=True)) - requests = [ev for ev in events if isinstance(ev, RequestInfoEvent)] + requests = [ev for ev in events if ev.type == "request_info"] assert requests # Follow-up message events = await _drain( workflow.send_responses_streaming({requests[-1].request_id: [ChatMessage(role="user", text="More details")]}) ) - outputs = [ev for ev in events if isinstance(ev, WorkflowOutputEvent)] + outputs = [ev for ev in events if ev.type == "output"] assert outputs @@ -576,7 +574,7 @@ def create_specialist_b() -> MockHandoffAgent: # Start conversation - triage hands off to specialist_a events = await _drain(workflow.run("Initial request", stream=True)) - requests = [ev for ev in events if isinstance(ev, RequestInfoEvent)] + requests = [ev for ev in events if ev.type == "request_info"] assert requests # Verify specialist_a executor exists and was called @@ -586,7 +584,7 @@ def create_specialist_b() -> MockHandoffAgent: events = await _drain( workflow.send_responses_streaming({requests[-1].request_id: [ChatMessage(role="user", text="Need escalation")]}) ) - requests = [ev for ev in events if isinstance(ev, RequestInfoEvent)] + requests = [ev for ev in events if ev.type == "request_info"] assert requests # Verify specialist_b executor exists @@ -615,13 +613,13 @@ def create_specialist() -> MockHandoffAgent: # Run workflow and capture output events = await _drain(workflow.run("checkpoint test", stream=True)) - requests = [ev for ev in events if isinstance(ev, RequestInfoEvent)] + requests = [ev for ev in events if ev.type == "request_info"] assert requests events = await _drain( workflow.send_responses_streaming({requests[-1].request_id: [ChatMessage(role="user", text="follow up")]}) ) - outputs = [ev for ev in events if isinstance(ev, WorkflowOutputEvent)] + outputs = [ev for ev in events if ev.type == "output"] assert outputs, "Should have workflow output after termination condition is met" # List checkpoints - just verify they were created @@ -693,7 +691,7 @@ def create_specialist() -> MockHandoffAgent: ) events = await _drain(workflow.run("Issue", stream=True)) - requests = [ev for ev in events if isinstance(ev, RequestInfoEvent)] + requests = [ev for ev in events if ev.type == "request_info"] assert requests and len(requests) == 1 assert requests[0].source_executor_id == "specialist" diff --git a/python/packages/orchestrations/tests/test_magentic.py b/python/packages/orchestrations/tests/test_magentic.py index 58943cdad4..d92e6aff47 100644 --- a/python/packages/orchestrations/tests/test_magentic.py +++ b/python/packages/orchestrations/tests/test_magentic.py @@ -15,15 +15,12 @@ ChatMessage, Content, Executor, - RequestInfoEvent, Workflow, WorkflowCheckpoint, WorkflowCheckpointException, WorkflowContext, WorkflowEvent, - WorkflowOutputEvent, WorkflowRunState, - WorkflowStatusEvent, handler, ) from agent_framework._workflows._checkpoint import InMemoryCheckpointStorage @@ -33,7 +30,6 @@ MagenticContext, MagenticManagerBase, MagenticOrchestrator, - MagenticOrchestratorEvent, MagenticPlanReviewRequest, MagenticProgressLedger, MagenticProgressLedgerItem, @@ -197,11 +193,11 @@ async def test_magentic_builder_returns_workflow_and_runs() -> None: outputs: list[ChatMessage] = [] orchestrator_event_count = 0 async for event in workflow.run("compose summary", stream=True): - if isinstance(event, WorkflowOutputEvent): + if event.type == "output": msg = event.data if isinstance(msg, list): outputs.extend(cast(list[ChatMessage], msg)) - elif isinstance(event, MagenticOrchestratorEvent): + elif event.type == "magentic_orchestrator": orchestrator_event_count += 1 assert outputs, "Expected a final output message" @@ -246,9 +242,9 @@ async def test_magentic_workflow_plan_review_approval_to_completion(): manager = FakeManager() wf = MagenticBuilder().participants([DummyExec("agentA")]).with_manager(manager=manager).with_plan_review().build() - req_event: RequestInfoEvent | None = None + req_event: WorkflowEvent | None = None async for ev in wf.run("do work", stream=True): - if isinstance(ev, RequestInfoEvent) and ev.request_type is MagenticPlanReviewRequest: + if ev.type == "request_info" and ev.request_type is MagenticPlanReviewRequest: req_event = ev assert req_event is not None assert isinstance(req_event.data, MagenticPlanReviewRequest) @@ -256,9 +252,9 @@ async def test_magentic_workflow_plan_review_approval_to_completion(): completed = False output: list[ChatMessage] | None = None async for ev in wf.send_responses_streaming(responses={req_event.request_id: req_event.data.approve()}): - if isinstance(ev, WorkflowStatusEvent) and ev.state == WorkflowRunState.IDLE: + if ev.type == "status" and ev.state == WorkflowRunState.IDLE: completed = True - elif isinstance(ev, WorkflowOutputEvent): + elif ev.type == "output": output = ev.data # type: ignore[assignment] if completed and output is not None: break @@ -291,9 +287,9 @@ async def replan(self, magentic_context: MagenticContext) -> ChatMessage: # typ ) # Wait for the initial plan review request - req_event: RequestInfoEvent | None = None + req_event: WorkflowEvent | None = None async for ev in wf.run("do work", stream=True): - if isinstance(ev, RequestInfoEvent) and ev.request_type is MagenticPlanReviewRequest: + if ev.type == "request_info" and ev.request_type is MagenticPlanReviewRequest: req_event = ev assert req_event is not None assert isinstance(req_event.data, MagenticPlanReviewRequest) @@ -304,7 +300,7 @@ async def replan(self, magentic_context: MagenticContext) -> ChatMessage: # typ async for ev in wf.send_responses_streaming( responses={req_event.request_id: req_event.data.revise("Looks good; consider Z")} ): - if isinstance(ev, RequestInfoEvent) and ev.request_type is MagenticPlanReviewRequest: + if ev.type == "request_info" and ev.request_type is MagenticPlanReviewRequest: saw_second_review = True req_event = ev @@ -312,7 +308,7 @@ async def replan(self, magentic_context: MagenticContext) -> ChatMessage: # typ async for ev in wf.send_responses_streaming( responses={req_event.request_id: req_event.data.approve()} # type: ignore[union-attr] ): - if isinstance(ev, WorkflowStatusEvent) and ev.state == WorkflowRunState.IDLE: + if ev.type == "status" and ev.state == WorkflowRunState.IDLE: completed = True break @@ -339,12 +335,12 @@ async def test_magentic_orchestrator_round_limit_produces_partial_result(): events.append(ev) idle_status = next( - (e for e in events if isinstance(e, WorkflowStatusEvent) and e.state == WorkflowRunState.IDLE), + (e for e in events if e.type == "status" and e.state == WorkflowRunState.IDLE), None, ) assert idle_status is not None - # Check that we got workflow output via WorkflowOutputEvent - output_event = next((e for e in events if isinstance(e, WorkflowOutputEvent)), None) + # Check that we got workflow output via WorkflowEvent with type "output" + output_event = next((e for e in events if e.type == "output"), None) assert output_event is not None data = output_event.data assert isinstance(data, list) @@ -367,9 +363,9 @@ async def test_magentic_checkpoint_resume_round_trip(): ) task_text = "checkpoint task" - req_event: RequestInfoEvent | None = None + req_event: WorkflowEvent | None = None async for ev in wf.run(task_text, stream=True): - if isinstance(ev, RequestInfoEvent) and ev.request_type is MagenticPlanReviewRequest: + if ev.type == "request_info" and ev.request_type is MagenticPlanReviewRequest: req_event = ev assert req_event is not None assert isinstance(req_event.data, MagenticPlanReviewRequest) @@ -389,20 +385,20 @@ async def test_magentic_checkpoint_resume_round_trip(): .build() ) - completed: WorkflowOutputEvent | None = None + completed: WorkflowEvent | None = None req_event = None async for event in wf_resume.run( resume_checkpoint.checkpoint_id, stream=True, ): - if isinstance(event, RequestInfoEvent) and event.request_type is MagenticPlanReviewRequest: + if event.type == "request_info" and event.request_type is MagenticPlanReviewRequest: req_event = event assert req_event is not None assert isinstance(req_event.data, MagenticPlanReviewRequest) responses = {req_event.request_id: req_event.data.approve()} async for event in wf_resume.send_responses_streaming(responses=responses): - if isinstance(event, WorkflowOutputEvent): + if event.type == "output": completed = event assert completed is not None @@ -595,7 +591,8 @@ async def _collect_agent_responses_setup(participant: AgentProtocol) -> list[Cha events: list[WorkflowEvent] = [] async for ev in wf.run("task", stream=True): # plan review disabled events.append(ev) - if isinstance(ev, WorkflowOutputEvent) and isinstance(ev.data, AgentResponseUpdate): + # Capture streaming updates (type="output" with AgentResponseUpdate data) + if ev.type == "output" and isinstance(ev.data, AgentResponseUpdate): captured.append( ChatMessage( role=ev.data.role or "assistant", @@ -603,6 +600,9 @@ async def _collect_agent_responses_setup(participant: AgentProtocol) -> list[Cha author_name=ev.data.author_name, ) ) + # Break on final AgentResponse output + elif ev.type == "output" and isinstance(ev.data, AgentResponse): + break return captured @@ -640,7 +640,7 @@ async def test_magentic_checkpoint_resume_inner_loop_superstep(): ) async for event in workflow.run("inner-loop task", stream=True): - if isinstance(event, WorkflowOutputEvent): + if event.type == "output": break checkpoints = await _collect_checkpoints(storage) @@ -654,9 +654,9 @@ async def test_magentic_checkpoint_resume_inner_loop_superstep(): .build() ) - completed: WorkflowOutputEvent | None = None + completed: WorkflowEvent | None = None async for event in resumed.run(checkpoint_id=inner_loop_checkpoint.checkpoint_id, stream=True): # type: ignore[reportUnknownMemberType] - if isinstance(event, WorkflowOutputEvent): + if event.type == "output": completed = event assert completed is not None @@ -678,7 +678,7 @@ async def test_magentic_checkpoint_resume_from_saved_state(): ) async for event in workflow.run("checkpoint resume task", stream=True): - if isinstance(event, WorkflowOutputEvent): + if event.type == "output": break checkpoints = await _collect_checkpoints(storage) @@ -694,9 +694,9 @@ async def test_magentic_checkpoint_resume_from_saved_state(): .build() ) - completed: WorkflowOutputEvent | None = None + completed: WorkflowEvent | None = None async for event in resumed_workflow.run(checkpoint_id=resumed_state.checkpoint_id, stream=True): - if isinstance(event, WorkflowOutputEvent): + if event.type == "output": completed = event assert completed is not None @@ -716,9 +716,9 @@ async def test_magentic_checkpoint_resume_rejects_participant_renames(): .build() ) - req_event: RequestInfoEvent | None = None + req_event: WorkflowEvent | None = None async for event in workflow.run("task", stream=True): - if isinstance(event, RequestInfoEvent) and event.request_type is MagenticPlanReviewRequest: + if event.type == "request_info" and event.request_type is MagenticPlanReviewRequest: req_event = event assert req_event is not None @@ -778,11 +778,11 @@ async def test_magentic_stall_and_reset_reach_limits(): events.append(ev) idle_status = next( - (e for e in events if isinstance(e, WorkflowStatusEvent) and e.state == WorkflowRunState.IDLE), + (e for e in events if e.type == "status" and e.state == WorkflowRunState.IDLE), None, ) assert idle_status is not None - output_event = next((e for e in events if isinstance(e, WorkflowOutputEvent)), None) + output_event = next((e for e in events if e.type == "output"), None) assert output_event is not None assert isinstance(output_event.data, list) assert all(isinstance(msg, ChatMessage) for msg in output_event.data) # type: ignore @@ -800,9 +800,9 @@ async def test_magentic_checkpoint_runtime_only() -> None: baseline_output: ChatMessage | None = None async for ev in wf.run("runtime checkpoint test", checkpoint_storage=storage, stream=True): - if isinstance(ev, WorkflowOutputEvent): + if ev.type == "output": baseline_output = ev.data # type: ignore[assignment] - if isinstance(ev, WorkflowStatusEvent) and ev.state in ( + if ev.type == "status" and ev.state in ( WorkflowRunState.IDLE, WorkflowRunState.IDLE_WITH_PENDING_REQUESTS, ): @@ -838,9 +838,9 @@ async def test_magentic_checkpoint_runtime_overrides_buildtime() -> None: baseline_output: ChatMessage | None = None async for ev in wf.run("override test", checkpoint_storage=runtime_storage, stream=True): - if isinstance(ev, WorkflowOutputEvent): + if ev.type == "output": baseline_output = ev.data # type: ignore[assignment] - if isinstance(ev, WorkflowStatusEvent) and ev.state in ( + if ev.type == "status" and ev.state in ( WorkflowRunState.IDLE, WorkflowRunState.IDLE_WITH_PENDING_REQUESTS, ): @@ -897,7 +897,7 @@ async def test_magentic_checkpoint_restore_no_duplicate_history(): ] async for event in wf.run(conversation, stream=True): - if isinstance(event, WorkflowStatusEvent) and event.state in ( + if event.type == "status" and event.state in ( WorkflowRunState.IDLE, WorkflowRunState.IDLE_WITH_PENDING_REQUESTS, ): @@ -1005,9 +1005,9 @@ def create_agent() -> StubAgent: # Factory should be called during build assert call_count == 1 - outputs: list[WorkflowOutputEvent] = [] + outputs: list[WorkflowEvent] = [] async for event in workflow.run("test task", stream=True): - if isinstance(event, WorkflowOutputEvent): + if event.type == "output": outputs.append(event) assert len(outputs) == 1 @@ -1052,9 +1052,9 @@ def create_agent() -> StubAgent: .build() ) - outputs: list[WorkflowOutputEvent] = [] + outputs: list[WorkflowEvent] = [] async for event in workflow.run("checkpoint test", stream=True): - if isinstance(event, WorkflowOutputEvent): + if event.type == "output": outputs.append(event) assert outputs, "Should have workflow output" @@ -1109,9 +1109,9 @@ def manager_factory() -> MagenticManagerBase: # Factory should be called during build assert factory_call_count == 1 - outputs: list[WorkflowOutputEvent] = [] + outputs: list[WorkflowEvent] = [] async for event in workflow.run("test task", stream=True): - if isinstance(event, WorkflowOutputEvent): + if event.type == "output": outputs.append(event) assert len(outputs) == 1 diff --git a/python/packages/orchestrations/tests/test_sequential.py b/python/packages/orchestrations/tests/test_sequential.py index 322f3ba7c0..68d78b1fa9 100644 --- a/python/packages/orchestrations/tests/test_sequential.py +++ b/python/packages/orchestrations/tests/test_sequential.py @@ -15,9 +15,7 @@ Executor, TypeCompatibilityError, WorkflowContext, - WorkflowOutputEvent, WorkflowRunState, - WorkflowStatusEvent, handler, ) from agent_framework._workflows._checkpoint import InMemoryCheckpointStorage @@ -106,9 +104,9 @@ async def test_sequential_agents_append_to_context() -> None: completed = False output: list[ChatMessage] | None = None async for ev in wf.run("hello sequential", stream=True): - if isinstance(ev, WorkflowStatusEvent) and ev.state == WorkflowRunState.IDLE: + if ev.type == "status" and ev.state == WorkflowRunState.IDLE: completed = True - elif isinstance(ev, WorkflowOutputEvent): + elif ev.type == "output": output = ev.data # type: ignore[assignment] if completed and output is not None: break @@ -139,9 +137,9 @@ def create_agent2() -> _EchoAgent: completed = False output: list[ChatMessage] | None = None async for ev in wf.run("hello factories", stream=True): - if isinstance(ev, WorkflowStatusEvent) and ev.state == WorkflowRunState.IDLE: + if ev.type == "status" and ev.state == WorkflowRunState.IDLE: completed = True - elif isinstance(ev, WorkflowOutputEvent): + elif ev.type == "output": output = ev.data if completed and output is not None: break @@ -165,9 +163,9 @@ async def test_sequential_with_custom_executor_summary() -> None: completed = False output: list[ChatMessage] | None = None async for ev in wf.run("topic X", stream=True): - if isinstance(ev, WorkflowStatusEvent) and ev.state == WorkflowRunState.IDLE: + if ev.type == "status" and ev.state == WorkflowRunState.IDLE: completed = True - elif isinstance(ev, WorkflowOutputEvent): + elif ev.type == "output": output = ev.data if completed and output is not None: break @@ -196,9 +194,9 @@ def create_summarizer() -> _SummarizerExec: completed = False output: list[ChatMessage] | None = None async for ev in wf.run("topic Y", stream=True): - if isinstance(ev, WorkflowStatusEvent) and ev.state == WorkflowRunState.IDLE: + if ev.type == "status" and ev.state == WorkflowRunState.IDLE: completed = True - elif isinstance(ev, WorkflowOutputEvent): + elif ev.type == "output": output = ev.data if completed and output is not None: break @@ -221,9 +219,9 @@ async def test_sequential_checkpoint_resume_round_trip() -> None: baseline_output: list[ChatMessage] | None = None async for ev in wf.run("checkpoint sequential", stream=True): - if isinstance(ev, WorkflowOutputEvent): + if ev.type == "output": baseline_output = ev.data # type: ignore[assignment] - if isinstance(ev, WorkflowStatusEvent) and ev.state == WorkflowRunState.IDLE: + if ev.type == "status" and ev.state == WorkflowRunState.IDLE: break assert baseline_output is not None @@ -242,9 +240,9 @@ async def test_sequential_checkpoint_resume_round_trip() -> None: resumed_output: list[ChatMessage] | None = None async for ev in wf_resume.run(checkpoint_id=resume_checkpoint.checkpoint_id, stream=True): - if isinstance(ev, WorkflowOutputEvent): + if ev.type == "output": resumed_output = ev.data # type: ignore[assignment] - if isinstance(ev, WorkflowStatusEvent) and ev.state in ( + if ev.type == "status" and ev.state in ( WorkflowRunState.IDLE, WorkflowRunState.IDLE_WITH_PENDING_REQUESTS, ): @@ -264,9 +262,9 @@ async def test_sequential_checkpoint_runtime_only() -> None: baseline_output: list[ChatMessage] | None = None async for ev in wf.run("runtime checkpoint test", checkpoint_storage=storage, stream=True): - if isinstance(ev, WorkflowOutputEvent): + if ev.type == "output": baseline_output = ev.data # type: ignore[assignment] - if isinstance(ev, WorkflowStatusEvent) and ev.state == WorkflowRunState.IDLE: + if ev.type == "status" and ev.state == WorkflowRunState.IDLE: break assert baseline_output is not None @@ -287,9 +285,9 @@ async def test_sequential_checkpoint_runtime_only() -> None: async for ev in wf_resume.run( checkpoint_id=resume_checkpoint.checkpoint_id, checkpoint_storage=storage, stream=True ): - if isinstance(ev, WorkflowOutputEvent): + if ev.type == "output": resumed_output = ev.data # type: ignore[assignment] - if isinstance(ev, WorkflowStatusEvent) and ev.state in ( + if ev.type == "status" and ev.state in ( WorkflowRunState.IDLE, WorkflowRunState.IDLE_WITH_PENDING_REQUESTS, ): @@ -315,9 +313,9 @@ async def test_sequential_checkpoint_runtime_overrides_buildtime() -> None: baseline_output: list[ChatMessage] | None = None async for ev in wf.run("override test", checkpoint_storage=runtime_storage, stream=True): - if isinstance(ev, WorkflowOutputEvent): + if ev.type == "output": baseline_output = ev.data # type: ignore[assignment] - if isinstance(ev, WorkflowStatusEvent) and ev.state == WorkflowRunState.IDLE: + if ev.type == "status" and ev.state == WorkflowRunState.IDLE: break assert baseline_output is not None @@ -343,9 +341,9 @@ def create_agent2() -> _EchoAgent: baseline_output: list[ChatMessage] | None = None async for ev in wf.run("checkpoint with factories", stream=True): - if isinstance(ev, WorkflowOutputEvent): + if ev.type == "output": baseline_output = ev.data - if isinstance(ev, WorkflowStatusEvent) and ev.state == WorkflowRunState.IDLE: + if ev.type == "status" and ev.state == WorkflowRunState.IDLE: break assert baseline_output is not None @@ -365,9 +363,9 @@ def create_agent2() -> _EchoAgent: resumed_output: list[ChatMessage] | None = None async for ev in wf_resume.run(checkpoint_id=resume_checkpoint.checkpoint_id, stream=True): - if isinstance(ev, WorkflowOutputEvent): + if ev.type == "output": resumed_output = ev.data - if isinstance(ev, WorkflowStatusEvent) and ev.state in ( + if ev.type == "status" and ev.state in ( WorkflowRunState.IDLE, WorkflowRunState.IDLE_WITH_PENDING_REQUESTS, ): @@ -401,9 +399,9 @@ def create_agent() -> _EchoAgent: completed = False output: list[ChatMessage] | None = None async for ev in wf.run("test factories timing", stream=True): - if isinstance(ev, WorkflowStatusEvent) and ev.state == WorkflowRunState.IDLE: + if ev.type == "status" and ev.state == WorkflowRunState.IDLE: completed = True - elif isinstance(ev, WorkflowOutputEvent): + elif ev.type == "output": output = ev.data # type: ignore[assignment] if completed and output is not None: break diff --git a/python/samples/autogen-migration/orchestrations/01_round_robin_group_chat.py b/python/samples/autogen-migration/orchestrations/01_round_robin_group_chat.py index f89891ddc7..f00aafe91e 100644 --- a/python/samples/autogen-migration/orchestrations/01_round_robin_group_chat.py +++ b/python/samples/autogen-migration/orchestrations/01_round_robin_group_chat.py @@ -7,7 +7,7 @@ import asyncio -from agent_framework import AgentResponseUpdate, WorkflowOutputEvent +from agent_framework import AgentResponseUpdate async def run_autogen() -> None: @@ -55,8 +55,8 @@ async def run_autogen() -> None: async def run_agent_framework() -> None: """Agent Framework's SequentialBuilder for sequential agent orchestration.""" - from agent_framework import SequentialBuilder from agent_framework.openai import OpenAIChatClient + from agent_framework.orchestrations import SequentialBuilder client = OpenAIChatClient(model_id="gpt-4.1-mini") @@ -83,15 +83,14 @@ async def run_agent_framework() -> None: print("[Agent Framework] Sequential conversation:") current_executor = None async for event in workflow.run("Create a brief summary about electric vehicles", stream=True): - if isinstance(event, WorkflowOutputEvent): + if event.type == "output" and isinstance(event.data, AgentResponseUpdate): # Print executor name header when switching to a new agent if current_executor != event.executor_id: if current_executor is not None: print() # Newline after previous agent's message print(f"---------- {event.executor_id} ----------") current_executor = event.executor_id - if isinstance(event.data, AgentResponseUpdate): - print(event.data.text, end="", flush=True) + print(event.data.text, end="", flush=True) print() # Final newline after conversation @@ -100,9 +99,9 @@ async def run_agent_framework_with_cycle() -> None: from agent_framework import ( AgentExecutorRequest, AgentExecutorResponse, + AgentResponseUpdate, WorkflowBuilder, WorkflowContext, - WorkflowOutputEvent, executor, ) from agent_framework.openai import OpenAIChatClient @@ -154,7 +153,10 @@ async def check_approval( print("[Agent Framework with Cycle] Cyclic conversation:") current_executor = None async for event in workflow.run("Create a brief summary about electric vehicles", stream=True): - if isinstance(event, WorkflowOutputEvent) and isinstance(event.data, AgentResponseUpdate): + if event.type == "output" and not isinstance(event.data, AgentResponseUpdate): + print("\n---------- Workflow Output ----------") + print(event.data) + elif event.type == "output" and isinstance(event.data, AgentResponseUpdate): # Print executor name header when switching to a new agent if current_executor != event.executor_id: if current_executor is not None: diff --git a/python/samples/autogen-migration/orchestrations/02_selector_group_chat.py b/python/samples/autogen-migration/orchestrations/02_selector_group_chat.py index 6eae117432..476d8008e9 100644 --- a/python/samples/autogen-migration/orchestrations/02_selector_group_chat.py +++ b/python/samples/autogen-migration/orchestrations/02_selector_group_chat.py @@ -7,7 +7,7 @@ import asyncio -from agent_framework import AgentResponseUpdate, WorkflowOutputEvent +from agent_framework import AgentResponseUpdate async def run_autogen() -> None: @@ -61,8 +61,8 @@ async def run_autogen() -> None: async def run_agent_framework() -> None: """Agent Framework's GroupChatBuilder with LLM-based speaker selection.""" - from agent_framework import GroupChatBuilder from agent_framework.openai import OpenAIChatClient + from agent_framework.orchestrations import GroupChatBuilder client = OpenAIChatClient(model_id="gpt-4.1-mini") @@ -102,7 +102,7 @@ async def run_agent_framework() -> None: print("[Agent Framework] Group chat conversation:") current_executor = None async for event in workflow.run("How do I connect to a PostgreSQL database using Python?", stream=True): - if isinstance(event, WorkflowOutputEvent) and isinstance(event.data, AgentResponseUpdate): + if event.type == "output" and isinstance(event.data, AgentResponseUpdate): # Print executor name header when switching to a new agent if current_executor != event.executor_id: if current_executor is not None: diff --git a/python/samples/autogen-migration/orchestrations/03_swarm.py b/python/samples/autogen-migration/orchestrations/03_swarm.py index df398a96ea..20466fde98 100644 --- a/python/samples/autogen-migration/orchestrations/03_swarm.py +++ b/python/samples/autogen-migration/orchestrations/03_swarm.py @@ -7,7 +7,8 @@ import asyncio -from agent_framework import AgentResponseUpdate, HandoffAgentUserRequest, WorkflowOutputEvent +from agent_framework import WorkflowEvent +from orderedmultidict import Any async def run_autogen() -> None: @@ -98,12 +99,11 @@ async def run_autogen() -> None: async def run_agent_framework() -> None: """Agent Framework's HandoffBuilder for agent coordination.""" from agent_framework import ( - HandoffBuilder, - RequestInfoEvent, + AgentResponseUpdate, WorkflowRunState, - WorkflowStatusEvent, ) from agent_framework.openai import OpenAIChatClient + from agent_framework.orchestrations import HandoffAgentUserRequest, HandoffBuilder client = OpenAIChatClient(model_id="gpt-4.1-mini") @@ -159,10 +159,10 @@ async def run_agent_framework() -> None: current_executor = None stream_line_open = False - pending_requests: list[RequestInfoEvent] = [] + pending_requests: list[WorkflowEvent] = [] async for event in workflow.run(scripted_responses[0], stream=True): - if isinstance(event, WorkflowOutputEvent) and isinstance(event.data, AgentResponseUpdate): + if event.type == "output" and isinstance(event.data, AgentResponseUpdate): # Print executor name header when switching to a new agent if current_executor != event.executor_id: if stream_line_open: @@ -173,10 +173,10 @@ async def run_agent_framework() -> None: stream_line_open = True if event.data: print(event.data.text, end="", flush=True) - elif isinstance(event, RequestInfoEvent): + elif event.type == "request_info": if isinstance(event.data, HandoffAgentUserRequest): pending_requests.append(event) - elif isinstance(event, WorkflowStatusEvent): + elif event.type == "status": if event.state in {WorkflowRunState.IDLE_WITH_PENDING_REQUESTS} and stream_line_open: print() stream_line_open = False @@ -188,13 +188,13 @@ async def run_agent_framework() -> None: print("---------- user ----------") print(user_response) - responses = {req.request_id: user_response for req in pending_requests} + responses: dict[str, Any] = {req.request_id: user_response for req in pending_requests} # type: ignore pending_requests = [] current_executor = None stream_line_open = False async for event in workflow.send_responses_streaming(responses): - if isinstance(event, WorkflowOutputEvent) and isinstance(event.data, AgentResponseUpdate): + if event.type == "output" and isinstance(event.data, AgentResponseUpdate): # Print executor name header when switching to a new agent if current_executor != event.executor_id: if stream_line_open: @@ -205,10 +205,10 @@ async def run_agent_framework() -> None: stream_line_open = True if event.data: print(event.data.text, end="", flush=True) - elif isinstance(event, RequestInfoEvent): + elif event.type == "request_info": if isinstance(event.data, HandoffAgentUserRequest): pending_requests.append(event) - elif isinstance(event, WorkflowStatusEvent): + elif event.type == "status": if ( event.state in {WorkflowRunState.IDLE_WITH_PENDING_REQUESTS, WorkflowRunState.IDLE} and stream_line_open diff --git a/python/samples/autogen-migration/orchestrations/04_magentic_one.py b/python/samples/autogen-migration/orchestrations/04_magentic_one.py index 1fc4e88d31..201e653693 100644 --- a/python/samples/autogen-migration/orchestrations/04_magentic_one.py +++ b/python/samples/autogen-migration/orchestrations/04_magentic_one.py @@ -12,10 +12,9 @@ from agent_framework import ( AgentResponseUpdate, ChatMessage, - MagenticOrchestratorEvent, - MagenticProgressLedger, - WorkflowOutputEvent, + WorkflowEvent, ) +from agent_framework.orchestrations import MagenticProgressLedger async def run_autogen() -> None: @@ -67,8 +66,8 @@ async def run_autogen() -> None: async def run_agent_framework() -> None: """Agent Framework's MagenticBuilder for orchestrated collaboration.""" - from agent_framework import MagenticBuilder from agent_framework.openai import OpenAIChatClient + from agent_framework.orchestrations import MagenticBuilder client = OpenAIChatClient(model_id="gpt-4.1-mini") @@ -110,10 +109,10 @@ async def run_agent_framework() -> None: # Run complex task last_message_id: str | None = None - output_event: WorkflowOutputEvent | None = None + output_event: WorkflowEvent | None = None print("[Agent Framework] Magentic conversation:") async for event in workflow.run("Research Python async patterns and write a simple example", stream=True): - if isinstance(event, WorkflowOutputEvent) and isinstance(event.data, AgentResponseUpdate): + if event.type == "output" and isinstance(event.data, AgentResponseUpdate): message_id = event.data.message_id if message_id != last_message_id: if last_message_id is not None: @@ -122,21 +121,21 @@ async def run_agent_framework() -> None: last_message_id = message_id print(event.data, end="", flush=True) - elif isinstance(event, MagenticOrchestratorEvent): - print(f"\n[Magentic Orchestrator Event] Type: {event.event_type.name}") - if isinstance(event.data, ChatMessage): - print(f"Please review the plan:\n{event.data.text}") - elif isinstance(event.data, MagenticProgressLedger): - print(f"Please review progress ledger:\n{json.dumps(event.data.to_dict(), indent=2)}") + elif event.type == "magentic_orchestrator": + print(f"\n[Magentic Orchestrator Event] Type: {event.data.event_type.name}") + if isinstance(event.data.content, ChatMessage): + print(f"Please review the plan:\n{event.data.content.text}") + elif isinstance(event.data.content, MagenticProgressLedger): + print(f"Please review progress ledger:\n{json.dumps(event.data.content.to_dict(), indent=2)}") else: - print(f"Unknown data type in MagenticOrchestratorEvent: {type(event.data)}") + print(f"Unknown data type in MagenticOrchestratorEvent: {type(event.data.content)}") # Block to allow user to read the plan/progress before continuing # Note: this is for demonstration only and is not the recommended way to handle human interaction. # Please refer to `with_plan_review` for proper human interaction during planning phases. await asyncio.get_event_loop().run_in_executor(None, input, "Press Enter to continue...") - elif isinstance(event, WorkflowOutputEvent): + elif event.type == "output": output_event = event if not output_event: diff --git a/python/samples/demos/workflow_evaluation/create_workflow.py b/python/samples/demos/workflow_evaluation/create_workflow.py index e32916a864..c8033fd2ae 100644 --- a/python/samples/demos/workflow_evaluation/create_workflow.py +++ b/python/samples/demos/workflow_evaluation/create_workflow.py @@ -48,12 +48,10 @@ from agent_framework import ( AgentExecutorResponse, AgentResponseUpdate, - AgentRunUpdateEvent, ChatMessage, Executor, WorkflowBuilder, WorkflowContext, - WorkflowOutputEvent, executor, handler, ) @@ -355,7 +353,7 @@ async def _process_workflow_events(events, conversation_ids, response_ids): workflow_output = None async for event in events: - if isinstance(event, WorkflowOutputEvent): + if event.type == "output": workflow_output = event.data # Handle Unicode characters that may not be displayable in Windows console try: @@ -364,7 +362,7 @@ async def _process_workflow_events(events, conversation_ids, response_ids): output_str = str(event.data).encode("ascii", "replace").decode("ascii") print(f"\nWorkflow Output: {output_str}\n") - elif isinstance(event, AgentRunUpdateEvent): + elif event.type == "output" and isinstance(event.data, AgentResponseUpdate): _track_agent_ids(event, event.executor_id, response_ids, conversation_ids) return workflow_output diff --git a/python/samples/getting_started/observability/workflow_observability.py b/python/samples/getting_started/observability/workflow_observability.py index 96a3565476..e08eaa37af 100644 --- a/python/samples/getting_started/observability/workflow_observability.py +++ b/python/samples/getting_started/observability/workflow_observability.py @@ -6,7 +6,7 @@ Executor, WorkflowBuilder, WorkflowContext, - WorkflowOutputEvent, + handler, ) from agent_framework.observability import configure_otel_providers, get_tracer @@ -93,7 +93,7 @@ async def run_sequential_workflow() -> None: output_event = None async for event in workflow.run("Hello world", stream=True): - if isinstance(event, WorkflowOutputEvent): + if event.type == "output": # The WorkflowOutputEvent contains the final result. output_event = event diff --git a/python/samples/getting_started/orchestrations/README.md b/python/samples/getting_started/orchestrations/README.md index d1fb0e0ef0..14f0be5fad 100644 --- a/python/samples/getting_started/orchestrations/README.md +++ b/python/samples/getting_started/orchestrations/README.md @@ -57,9 +57,9 @@ from agent_framework.orchestrations import ( **Sequential orchestration note**: Sequential orchestration uses a few small adapter nodes for plumbing: - `input-conversation` normalizes input to `list[ChatMessage]` - `to-conversation:` converts agent responses into the shared conversation -- `complete` publishes the final `WorkflowOutputEvent` +- `complete` publishes the final output event (type='output') -These may appear in event streams (ExecutorInvoke/Completed). They're analogous to concurrent's dispatcher and aggregator and can be ignored if you only care about agent activity. +These may appear in event streams (executor_invoked/executor_completed). They're analogous to concurrent's dispatcher and aggregator and can be ignored if you only care about agent activity. ## Environment Variables diff --git a/python/samples/getting_started/orchestrations/concurrent_agents.py b/python/samples/getting_started/orchestrations/concurrent_agents.py index cece1f616a..b2886f8497 100644 --- a/python/samples/getting_started/orchestrations/concurrent_agents.py +++ b/python/samples/getting_started/orchestrations/concurrent_agents.py @@ -23,7 +23,7 @@ Prerequisites: - Azure OpenAI access configured for AzureOpenAIChatClient (use az login + env vars) -- Familiarity with Workflow events (WorkflowOutputEvent) +- Familiarity with Workflow events (WorkflowEvent) """ diff --git a/python/samples/getting_started/orchestrations/group_chat_agent_manager.py b/python/samples/getting_started/orchestrations/group_chat_agent_manager.py index f9e7a072a1..9624e2ed5b 100644 --- a/python/samples/getting_started/orchestrations/group_chat_agent_manager.py +++ b/python/samples/getting_started/orchestrations/group_chat_agent_manager.py @@ -7,7 +7,6 @@ AgentResponseUpdate, ChatAgent, ChatMessage, - WorkflowOutputEvent, ) from agent_framework.azure import AzureOpenAIChatClient from agent_framework.orchestrations import GroupChatBuilder @@ -74,7 +73,7 @@ async def main() -> None: # The agent orchestrator will intelligently decide when to end before this limit but just in case .with_termination_condition(lambda messages: sum(1 for msg in messages if msg.role == "assistant") >= 4) # Enable intermediate outputs to observe the conversation as it unfolds - # Intermediate outputs will be emitted as WorkflowOutputEvent events + # Intermediate outputs will be emitted as WorkflowEvent with type "output" events .with_intermediate_outputs() .build() ) @@ -88,7 +87,7 @@ async def main() -> None: # Keep track of the last response to format output nicely in streaming mode last_response_id: str | None = None async for event in workflow.run(task, stream=True): - if isinstance(event, WorkflowOutputEvent): + if event.type == "output": data = event.data if isinstance(data, AgentResponseUpdate): rid = data.response_id @@ -98,7 +97,7 @@ async def main() -> None: print(f"{data.author_name}:", end=" ", flush=True) last_response_id = rid print(data.text, end="", flush=True) - else: + elif event.type == "output": # The output of the group chat workflow is a collection of chat messages from all participants outputs = cast(list[ChatMessage], event.data) print("\n" + "=" * 80) diff --git a/python/samples/getting_started/orchestrations/group_chat_philosophical_debate.py b/python/samples/getting_started/orchestrations/group_chat_philosophical_debate.py index 70154d07f4..a8e06e55d7 100644 --- a/python/samples/getting_started/orchestrations/group_chat_philosophical_debate.py +++ b/python/samples/getting_started/orchestrations/group_chat_philosophical_debate.py @@ -8,7 +8,6 @@ AgentResponseUpdate, ChatAgent, ChatMessage, - WorkflowOutputEvent, ) from agent_framework.azure import AzureOpenAIChatClient from agent_framework.orchestrations import GroupChatBuilder @@ -214,7 +213,7 @@ async def main() -> None: .participants([farmer, developer, teacher, activist, spiritual_leader, artist, immigrant, doctor]) .with_termination_condition(lambda messages: sum(1 for msg in messages if msg.role == "assistant") >= 10) # Enable intermediate outputs to observe the conversation as it unfolds - # Intermediate outputs will be emitted as WorkflowOutputEvent events + # Intermediate outputs will be emitted as WorkflowEvent with type "output" events .with_intermediate_outputs() .build() ) @@ -241,7 +240,7 @@ async def main() -> None: # Keep track of the last response to format output nicely in streaming mode last_response_id: str | None = None async for event in workflow.run(f"Please begin the discussion on: {topic}", stream=True): - if isinstance(event, WorkflowOutputEvent): + if event.type == "output": data = event.data if isinstance(data, AgentResponseUpdate): rid = data.response_id @@ -251,7 +250,7 @@ async def main() -> None: print(f"{data.author_name}:", end=" ", flush=True) last_response_id = rid print(data.text, end="", flush=True) - else: + elif event.type == "output": # The output of the group chat workflow is a collection of chat messages from all participants outputs = cast(list[ChatMessage], event.data) print("\n" + "=" * 80) diff --git a/python/samples/getting_started/orchestrations/group_chat_simple_selector.py b/python/samples/getting_started/orchestrations/group_chat_simple_selector.py index f2e5560128..3e7ea3fe11 100644 --- a/python/samples/getting_started/orchestrations/group_chat_simple_selector.py +++ b/python/samples/getting_started/orchestrations/group_chat_simple_selector.py @@ -7,7 +7,6 @@ AgentResponseUpdate, ChatAgent, ChatMessage, - WorkflowOutputEvent, ) from agent_framework.azure import AzureOpenAIChatClient from agent_framework.orchestrations import GroupChatBuilder, GroupChatState @@ -92,7 +91,7 @@ async def main() -> None: # have nothing to add, but for demo purposes we want to see at least one full round of interaction. .with_termination_condition(lambda conversation: len(conversation) >= 6) # Enable intermediate outputs to observe the conversation as it unfolds - # Intermediate outputs will be emitted as WorkflowOutputEvent events + # Intermediate outputs will be emitted as WorkflowEvent with type "output" events .with_intermediate_outputs() .build() ) @@ -106,7 +105,7 @@ async def main() -> None: # Keep track of the last response to format output nicely in streaming mode last_response_id: str | None = None async for event in workflow.run(task, stream=True): - if isinstance(event, WorkflowOutputEvent): + if event.type == "output": data = event.data if isinstance(data, AgentResponseUpdate): rid = data.response_id @@ -116,7 +115,7 @@ async def main() -> None: print(f"{data.author_name}:", end=" ", flush=True) last_response_id = rid print(data.text, end="", flush=True) - else: + elif event.type == "output": # The output of the group chat workflow is a collection of chat messages from all participants outputs = cast(list[ChatMessage], event.data) print("\n" + "=" * 80) diff --git a/python/samples/getting_started/orchestrations/handoff_autonomous.py b/python/samples/getting_started/orchestrations/handoff_autonomous.py index 76a5c7cfd2..faadd8486e 100644 --- a/python/samples/getting_started/orchestrations/handoff_autonomous.py +++ b/python/samples/getting_started/orchestrations/handoff_autonomous.py @@ -8,8 +8,6 @@ AgentResponseUpdate, ChatAgent, ChatMessage, - HandoffSentEvent, - WorkflowOutputEvent, resolve_agent_id, ) from agent_framework.azure import AzureOpenAIChatClient @@ -112,9 +110,9 @@ async def main() -> None: last_response_id: str | None = None async for event in workflow.run(request, stream=True): - if isinstance(event, HandoffSentEvent): - print(f"\nHandoff Event: from {event.source} to {event.target}\n") - elif isinstance(event, WorkflowOutputEvent): + if event.type == "handoff_sent": + print(f"\nHandoff Event: from {event.data.source} to {event.data.target}\n") + elif event.type == "output": data = event.data if isinstance(data, AgentResponseUpdate): if not data.text: @@ -128,8 +126,8 @@ async def main() -> None: print(f"{data.author_name}:", end=" ", flush=True) last_response_id = rid print(data.text, end="", flush=True) - else: - # The output of the group chat workflow is a collection of chat messages from all participants + elif event.type == "output": + # The output of the handoff workflow is a collection of chat messages from all participants outputs = cast(list[ChatMessage], event.data) print("\n" + "=" * 80) print("\nFinal Conversation Transcript:\n") diff --git a/python/samples/getting_started/orchestrations/handoff_participant_factory.py b/python/samples/getting_started/orchestrations/handoff_participant_factory.py index ee5c8830bc..100bc1be03 100644 --- a/python/samples/getting_started/orchestrations/handoff_participant_factory.py +++ b/python/samples/getting_started/orchestrations/handoff_participant_factory.py @@ -8,16 +8,13 @@ AgentResponse, ChatAgent, ChatMessage, - RequestInfoEvent, Workflow, WorkflowEvent, - WorkflowOutputEvent, WorkflowRunState, - WorkflowStatusEvent, tool, ) from agent_framework.azure import AzureOpenAIChatClient -from agent_framework.orchestrations import HandoffAgentUserRequest, HandoffBuilder, HandoffSentEvent +from agent_framework.orchestrations import HandoffAgentUserRequest, HandoffBuilder from azure.identity import AzureCliCredential logging.basicConfig(level=logging.ERROR) @@ -107,35 +104,35 @@ def create_return_agent() -> ChatAgent: ) -def _handle_events(events: list[WorkflowEvent]) -> list[RequestInfoEvent]: +def _handle_events(events: list[WorkflowEvent]) -> list[WorkflowEvent[HandoffAgentUserRequest]]: """Process workflow events and extract any pending user input requests. This function inspects each event type and: - Prints workflow status changes (IDLE, IDLE_WITH_PENDING_REQUESTS, etc.) - Displays final conversation snapshots when workflow completes - Prints user input request prompts - - Collects all RequestInfoEvent instances for response handling + - Collects all request_info events for response handling Args: events: List of WorkflowEvent to process Returns: - List of RequestInfoEvent representing pending user input requests + List of WorkflowEvent[HandoffAgentUserRequest] representing pending user input requests """ - requests: list[RequestInfoEvent] = [] + requests: list[WorkflowEvent[HandoffAgentUserRequest]] = [] for event in events: - if isinstance(event, HandoffSentEvent): - # HandoffSentEvent: Indicates a handoff has been initiated - print(f"\n[Handoff from {event.source} to {event.target} initiated.]") - elif isinstance(event, WorkflowStatusEvent) and event.state in { + if event.type == "handoff_sent": + # handoff_sent event: Indicates a handoff has been initiated + print(f"\n[Handoff from {event.data.source} to {event.data.target} initiated.]") + elif event.type == "status" and event.state in { WorkflowRunState.IDLE, WorkflowRunState.IDLE_WITH_PENDING_REQUESTS, }: - # WorkflowStatusEvent: Indicates workflow state changes + # Status event: Indicates workflow state changes print(f"\n[Workflow Status] {event.state.name}") - elif isinstance(event, WorkflowOutputEvent): - # WorkflowOutputEvent: Contains contents generated by the workflow + elif event.type == "output": + # Output event: Contains contents generated by the workflow data = event.data if isinstance(data, AgentResponse): for message in data.messages: @@ -144,7 +141,7 @@ def _handle_events(events: list[WorkflowEvent]) -> list[RequestInfoEvent]: continue speaker = message.author_name or message.role print(f"- {speaker}: {message.text}") - else: + elif event.type == "output": # The output of the handoff workflow is a collection of chat messages from all participants conversation = cast(list[ChatMessage], event.data) if isinstance(conversation, list): @@ -153,11 +150,11 @@ def _handle_events(events: list[WorkflowEvent]) -> list[RequestInfoEvent]: speaker = message.author_name or message.role print(f"- {speaker}: {message.text or [content.type for content in message.contents]}") print("===================================") - elif isinstance(event, RequestInfoEvent): - # RequestInfoEvent: Workflow is requesting user input + elif event.type == "request_info": + # Request info event: Workflow is requesting user input if isinstance(event.data, HandoffAgentUserRequest): _print_handoff_agent_user_request(event.data.agent_response) - requests.append(event) + requests.append(cast(WorkflowEvent[HandoffAgentUserRequest], event)) return requests diff --git a/python/samples/getting_started/orchestrations/handoff_simple.py b/python/samples/getting_started/orchestrations/handoff_simple.py index d439d5a719..d32c92aca9 100644 --- a/python/samples/getting_started/orchestrations/handoff_simple.py +++ b/python/samples/getting_started/orchestrations/handoff_simple.py @@ -7,15 +7,12 @@ AgentResponse, ChatAgent, ChatMessage, - RequestInfoEvent, WorkflowEvent, - WorkflowOutputEvent, WorkflowRunState, - WorkflowStatusEvent, tool, ) from agent_framework.azure import AzureOpenAIChatClient -from agent_framework.orchestrations import HandoffAgentUserRequest, HandoffBuilder, HandoffSentEvent +from agent_framework.orchestrations import HandoffAgentUserRequest, HandoffBuilder from azure.identity import AzureCliCredential """Sample: Simple handoff workflow. @@ -102,35 +99,35 @@ def create_agents(chat_client: AzureOpenAIChatClient) -> tuple[ChatAgent, ChatAg return triage_agent, refund_agent, order_agent, return_agent -def _handle_events(events: list[WorkflowEvent]) -> list[RequestInfoEvent]: +def _handle_events(events: list[WorkflowEvent]) -> list[WorkflowEvent[HandoffAgentUserRequest]]: """Process workflow events and extract any pending user input requests. This function inspects each event type and: - Prints workflow status changes (IDLE, IDLE_WITH_PENDING_REQUESTS, etc.) - Displays final conversation snapshots when workflow completes - Prints user input request prompts - - Collects all RequestInfoEvent instances for response handling + - Collects all request_info events for response handling Args: events: List of WorkflowEvent to process Returns: - List of RequestInfoEvent representing pending user input requests + List of WorkflowEvent[HandoffAgentUserRequest] representing pending user input requests """ - requests: list[RequestInfoEvent] = [] + requests: list[WorkflowEvent[HandoffAgentUserRequest]] = [] for event in events: - if isinstance(event, HandoffSentEvent): - # HandoffSentEvent: Indicates a handoff has been initiated - print(f"\n[Handoff from {event.source} to {event.target} initiated.]") - elif isinstance(event, WorkflowStatusEvent) and event.state in { + if event.type == "handoff_sent": + # handoff_sent event: Indicates a handoff has been initiated + print(f"\n[Handoff from {event.data.source} to {event.data.target} initiated.]") + elif event.type == "status" and event.state in { WorkflowRunState.IDLE, WorkflowRunState.IDLE_WITH_PENDING_REQUESTS, }: - # WorkflowStatusEvent: Indicates workflow state changes - print(f"\n[Workflow Status] {event.state.name}") - elif isinstance(event, WorkflowOutputEvent): - # WorkflowOutputEvent: Contains contents generated by the workflow + # Status event: Indicates workflow state changes + print(f"\n[Workflow Status] {event.state}") + elif event.type == "output": + # Output event: Contains contents generated by the workflow data = event.data if isinstance(data, AgentResponse): for message in data.messages: @@ -139,7 +136,7 @@ def _handle_events(events: list[WorkflowEvent]) -> list[RequestInfoEvent]: continue speaker = message.author_name or message.role print(f"- {speaker}: {message.text}") - else: + elif event.type == "output": # The output of the handoff workflow is a collection of chat messages from all participants conversation = cast(list[ChatMessage], event.data) if isinstance(conversation, list): @@ -148,11 +145,9 @@ def _handle_events(events: list[WorkflowEvent]) -> list[RequestInfoEvent]: speaker = message.author_name or message.role print(f"- {speaker}: {message.text or [content.type for content in message.contents]}") print("===================================") - elif isinstance(event, RequestInfoEvent): - # RequestInfoEvent: Workflow is requesting user input - if isinstance(event.data, HandoffAgentUserRequest): - _print_handoff_agent_user_request(event.data.agent_response) - requests.append(event) + elif event.type == "request_info" and isinstance(event.data, HandoffAgentUserRequest): + _print_handoff_agent_user_request(event.data.agent_response) + requests.append(cast(WorkflowEvent[HandoffAgentUserRequest], event)) return requests diff --git a/python/samples/getting_started/orchestrations/handoff_with_code_interpreter_file.py b/python/samples/getting_started/orchestrations/handoff_with_code_interpreter_file.py index d6b335e15c..d0bbb02e2e 100644 --- a/python/samples/getting_started/orchestrations/handoff_with_code_interpreter_file.py +++ b/python/samples/getting_started/orchestrations/handoff_with_code_interpreter_file.py @@ -6,7 +6,7 @@ This sample demonstrates retrieving file IDs from code interpreter output in a handoff workflow context. A triage agent routes to a code specialist that generates a text file, and we verify the file_id is captured correctly -from the streaming WorkflowOutputEvent events. +from the streaming workflow events. Verifies GitHub issue #2718: files generated by code interpreter in HandoffBuilder workflows can be properly retrieved. @@ -34,13 +34,9 @@ AgentResponseUpdate, ChatAgent, ChatMessage, - HandoffSentEvent, HostedCodeInterpreterTool, - RequestInfoEvent, WorkflowEvent, - WorkflowOutputEvent, WorkflowRunState, - WorkflowStatusEvent, ) from agent_framework.orchestrations import HandoffAgentUserRequest, HandoffBuilder from azure.identity.aio import AzureCliCredential @@ -54,30 +50,29 @@ async def _drain(stream: AsyncIterable[WorkflowEvent]) -> list[WorkflowEvent]: return [event async for event in stream] -def _handle_events(events: list[WorkflowEvent]) -> tuple[list[RequestInfoEvent], list[str]]: +def _handle_events(events: list[WorkflowEvent]) -> tuple[list[WorkflowEvent[HandoffAgentUserRequest]], list[str]]: """Process workflow events and extract file IDs and pending requests. Returns: Tuple of (pending_requests, file_ids_found) """ - requests: list[RequestInfoEvent] = [] + + requests: list[WorkflowEvent[HandoffAgentUserRequest]] = [] file_ids: list[str] = [] for event in events: - if isinstance(event, HandoffSentEvent): - # HandoffSentEvent: Indicates a handoff has been initiated - print(f"\n[Handoff from {event.source} to {event.target} initiated.]") - elif isinstance(event, WorkflowStatusEvent) and event.state in { + if event.type == "handoff_sent": + print(f"\n[Handoff from {event.data.source} to {event.data.target} initiated.]") + elif event.type == "status" and event.state in { WorkflowRunState.IDLE, WorkflowRunState.IDLE_WITH_PENDING_REQUESTS, }: - # WorkflowStatusEvent: Indicates workflow state changes - print(f"\n[Workflow Status] {event.state.name}") - elif isinstance(event, WorkflowOutputEvent): - # WorkflowOutputEvent: Contains contents generated by the workflow + print(f"[status] {event.state.name}") + elif event.type == "request_info" and isinstance(event.data, HandoffAgentUserRequest): + requests.append(cast(WorkflowEvent[HandoffAgentUserRequest], event)) + elif event.type == "output": data = event.data if isinstance(data, AgentResponseUpdate): - # AgentResponseUpdate: Intermediate output from an agent for content in data.contents: if content.type == "hosted_file": file_ids.append(content.file_id) # type: ignore @@ -87,8 +82,7 @@ def _handle_events(events: list[WorkflowEvent]) -> tuple[list[RequestInfoEvent], file_id = annotation["file_id"] # type: ignore file_ids.append(file_id) print(f"[Found file annotation: file_id={file_id}]") - else: - # The output of the handoff workflow is a collection of chat messages from all participants + elif event.type == "output": conversation = cast(list[ChatMessage], event.data) if isinstance(conversation, list): print("\n=== Final Conversation Snapshot ===") @@ -96,9 +90,6 @@ def _handle_events(events: list[WorkflowEvent]) -> tuple[list[RequestInfoEvent], speaker = message.author_name or message.role print(f"- {speaker}: {message.text or [content.type for content in message.contents]}") print("===================================") - elif isinstance(event, RequestInfoEvent): - # RequestInfoEvent: Workflow is requesting user input - requests.append(event) return requests, file_ids diff --git a/python/samples/getting_started/orchestrations/magentic.py b/python/samples/getting_started/orchestrations/magentic.py index ae426685d9..cc1cb304ab 100644 --- a/python/samples/getting_started/orchestrations/magentic.py +++ b/python/samples/getting_started/orchestrations/magentic.py @@ -9,12 +9,11 @@ AgentResponseUpdate, ChatAgent, ChatMessage, - GroupChatRequestSentEvent, HostedCodeInterpreterTool, - WorkflowOutputEvent, + WorkflowEvent, ) from agent_framework.openai import OpenAIChatClient, OpenAIResponsesClient -from agent_framework.orchestrations import MagenticBuilder, MagenticOrchestratorEvent, MagenticProgressLedger +from agent_framework.orchestrations import GroupChatRequestSentEvent, MagenticBuilder, MagenticProgressLedger logging.basicConfig(level=logging.WARNING) logger = logging.getLogger(__name__) @@ -85,7 +84,7 @@ async def main() -> None: max_reset_count=2, ) # Enable intermediate outputs to observe the conversation as it unfolds - # Intermediate outputs will be emitted as WorkflowOutputEvent events + # Intermediate outputs will be emitted as WorkflowEvent events .with_intermediate_outputs() .build() ) @@ -104,41 +103,44 @@ async def main() -> None: # Keep track of the last executor to format output nicely in streaming mode last_response_id: str | None = None + output_event: WorkflowEvent | None = None async for event in workflow.run(task, stream=True): - if isinstance(event, MagenticOrchestratorEvent): - print(f"\n[Magentic Orchestrator Event] Type: {event.event_type.name}") - if isinstance(event.data, ChatMessage): - print(f"Please review the plan:\n{event.data.text}") - elif isinstance(event.data, MagenticProgressLedger): - print(f"Please review progress ledger:\n{json.dumps(event.data.to_dict(), indent=2)}") + if event.type == "output" and isinstance(event.data, AgentResponseUpdate): + response_id = event.data.response_id + if response_id != last_response_id: + if last_response_id is not None: + print("\n") + print(f"- {event.executor_id}:", end=" ", flush=True) + last_response_id = response_id + print(event.data, end="", flush=True) + + elif event.type == "magentic_orchestrator": + print(f"\n[Magentic Orchestrator Event] Type: {event.data.event_type.name}") + if isinstance(event.data.content, ChatMessage): + print(f"Please review the plan:\n{event.data.content.text}") + elif isinstance(event.data.content, MagenticProgressLedger): + print(f"Please review progress ledger:\n{json.dumps(event.data.content.to_dict(), indent=2)}") else: - print(f"Unknown data type in MagenticOrchestratorEvent: {type(event.data)}") + print(f"Unknown data type in MagenticOrchestratorEvent: {type(event.data.content)}") # Block to allow user to read the plan/progress before continuing # Note: this is for demonstration only and is not the recommended way to handle human interaction. # Please refer to `with_plan_review` for proper human interaction during planning phases. await asyncio.get_event_loop().run_in_executor(None, input, "Press Enter to continue...") - elif isinstance(event, GroupChatRequestSentEvent): - print(f"\n[REQUEST SENT ({event.round_index})] to agent: {event.participant_name}") - - elif isinstance(event, WorkflowOutputEvent): - data = event.data - if isinstance(data, AgentResponseUpdate): - response_id = data.response_id - if response_id != last_response_id: - if last_response_id is not None: - print("\n") - print(f"- {event.executor_id}:", end=" ", flush=True) - last_response_id = response_id - print(event.data, end="", flush=True) - else: - # The output of the magentic workflow is a collection of chat messages from all participants - outputs = cast(list[ChatMessage], event.data) - print("\n" + "=" * 80) - print("\nFinal Conversation Transcript:\n") - for message in outputs: - print(f"{message.author_name or message.role}: {message.text}\n") + elif event.type == "group_chat" and isinstance(event.data, GroupChatRequestSentEvent): + print(f"\n[REQUEST SENT ({event.data.round_index})] to agent: {event.data.participant_name}") + + elif event.type == "output": + output_event = event + + if output_event: + # The output of the magentic workflow is a collection of chat messages from all participants + outputs = cast(list[ChatMessage], output_event.data) + print("\n" + "=" * 80) + print("\nFinal Conversation Transcript:\n") + for message in outputs: + print(f"{message.author_name or message.role}: {message.text}\n") if __name__ == "__main__": diff --git a/python/samples/getting_started/orchestrations/magentic_checkpoint.py b/python/samples/getting_started/orchestrations/magentic_checkpoint.py index 08b233661b..0b91193ca3 100644 --- a/python/samples/getting_started/orchestrations/magentic_checkpoint.py +++ b/python/samples/getting_started/orchestrations/magentic_checkpoint.py @@ -9,11 +9,9 @@ ChatAgent, ChatMessage, FileCheckpointStorage, - RequestInfoEvent, WorkflowCheckpoint, - WorkflowOutputEvent, + WorkflowEvent, WorkflowRunState, - WorkflowStatusEvent, ) from agent_framework.azure import AzureOpenAIChatClient from agent_framework.orchestrations import MagenticBuilder, MagenticPlanReviewRequest @@ -105,16 +103,16 @@ async def main() -> None: print("\n=== Stage 1: run until plan review request (checkpointing active) ===") workflow = build_workflow(checkpoint_storage) - # Run the workflow until the first RequestInfoEvent is surfaced. The event carries the + # Run the workflow until the first is surfaced. The event carries the # request_id we must reuse on resume. In a real system this is where the UI would present # the plan for human review. plan_review_request: MagenticPlanReviewRequest | None = None async for event in workflow.run(TASK, stream=True): - if isinstance(event, RequestInfoEvent) and event.request_type is MagenticPlanReviewRequest: + if event.type == "request_info" and event.request_type is MagenticPlanReviewRequest: plan_review_request = event.data print(f"Captured plan review request: {event.request_id}") - if isinstance(event, WorkflowStatusEvent) and event.state is WorkflowRunState.IDLE_WITH_PENDING_REQUESTS: + if event.type == "status" and event.state is WorkflowRunState.IDLE_WITH_PENDING_REQUESTS: break if plan_review_request is None: @@ -147,9 +145,9 @@ async def main() -> None: approval = plan_review_request.approve() # Resume execution and capture the re-emitted plan review request. - request_info_event: RequestInfoEvent | None = None + request_info_event: WorkflowEvent | None = None async for event in resumed_workflow.run(checkpoint_id=resume_checkpoint.checkpoint_id, stream=True): - if isinstance(event, RequestInfoEvent) and isinstance(event.data, MagenticPlanReviewRequest): + if event.type == "request_info" and isinstance(event.data, MagenticPlanReviewRequest): request_info_event = event if request_info_event is None: @@ -158,9 +156,9 @@ async def main() -> None: print(f"Resumed plan review request: {request_info_event.request_id}") # Supply the approval and continue to run to completion. - final_event: WorkflowOutputEvent | None = None + final_event: WorkflowEvent | None = None async for event in resumed_workflow.send_responses_streaming({request_info_event.request_id: approval}): - if isinstance(event, WorkflowOutputEvent): + if event.type == "output": final_event = event if final_event is None: @@ -218,12 +216,12 @@ def _pending_message_count(cp: WorkflowCheckpoint) -> int: if pending_messages == 0: print("Checkpoint has no pending messages; no additional work expected on resume.") - final_event_post: WorkflowOutputEvent | None = None + final_event_post: WorkflowEvent | None = None post_emitted_events = False post_plan_workflow = build_workflow(checkpoint_storage) async for event in post_plan_workflow.run(checkpoint_id=post_plan_checkpoint.checkpoint_id, stream=True): post_emitted_events = True - if isinstance(event, WorkflowOutputEvent): + if event.type == "output": final_event_post = event if final_event_post is None: diff --git a/python/samples/getting_started/orchestrations/magentic_human_plan_review.py b/python/samples/getting_started/orchestrations/magentic_human_plan_review.py index 9af07ae13f..eda574b264 100644 --- a/python/samples/getting_started/orchestrations/magentic_human_plan_review.py +++ b/python/samples/getting_started/orchestrations/magentic_human_plan_review.py @@ -9,9 +9,7 @@ AgentResponseUpdate, ChatAgent, ChatMessage, - RequestInfoEvent, WorkflowEvent, - WorkflowOutputEvent, ) from agent_framework.openai import OpenAIChatClient from agent_framework.orchestrations import MagenticBuilder, MagenticPlanReviewRequest, MagenticPlanReviewResponse @@ -46,10 +44,10 @@ async def process_event_stream(stream: AsyncIterable[WorkflowEvent]) -> dict[str requests: dict[str, MagenticPlanReviewRequest] = {} async for event in stream: - if isinstance(event, RequestInfoEvent) and event.request_type is MagenticPlanReviewRequest: + if event.type == "request_info" and event.request_type is MagenticPlanReviewRequest: requests[event.request_id] = cast(MagenticPlanReviewRequest, event.data) - if isinstance(event, WorkflowOutputEvent): + if event.type == "output": data = event.data if isinstance(data, AgentResponseUpdate): rid = data.response_id @@ -68,7 +66,7 @@ async def process_event_stream(stream: AsyncIterable[WorkflowEvent]) -> dict[str # To make the type checker happy, we cast event.data to the expected type outputs = cast(list[ChatMessage], event.data) for msg in outputs: - speaker = msg.author_name or msg.role.value + speaker = msg.author_name or msg.role print(f"[{speaker}]: {msg.text}") responses: dict[str, MagenticPlanReviewResponse] = {} @@ -129,7 +127,7 @@ async def main() -> None: # Request human input for plan review .with_plan_review() # Enable intermediate outputs to observe the conversation as it unfolds - # Intermediate outputs will be emitted as WorkflowOutputEvent events + # Intermediate outputs will be emitted as WorkflowEvent with type "output" .with_intermediate_outputs() .build() ) diff --git a/python/samples/getting_started/orchestrations/sequential_agents.py b/python/samples/getting_started/orchestrations/sequential_agents.py index b0cea780a7..03e5c42e9a 100644 --- a/python/samples/getting_started/orchestrations/sequential_agents.py +++ b/python/samples/getting_started/orchestrations/sequential_agents.py @@ -3,7 +3,7 @@ import asyncio from typing import cast -from agent_framework import ChatMessage, WorkflowOutputEvent +from agent_framework import ChatMessage from agent_framework.azure import AzureOpenAIChatClient from agent_framework.orchestrations import SequentialBuilder from azure.identity import AzureCliCredential @@ -48,7 +48,7 @@ async def main() -> None: # 3) Run and collect outputs outputs: list[list[ChatMessage]] = [] async for event in workflow.run("Write a tagline for a budget-friendly eBike.", stream=True): - if isinstance(event, WorkflowOutputEvent): + if event.type == "output": outputs.append(cast(list[ChatMessage], event.data)) if outputs: diff --git a/python/samples/getting_started/workflows/README.md b/python/samples/getting_started/workflows/README.md index 3e4b6f0a72..1d16f8f24b 100644 --- a/python/samples/getting_started/workflows/README.md +++ b/python/samples/getting_started/workflows/README.md @@ -102,7 +102,7 @@ Tool approval samples demonstrate using `@tool(approval_mode="always_require")` | Sample | File | Concepts | | ------------------------ | -------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------- | -| Executor I/O Observation | [observability/executor_io_observation.py](./observability/executor_io_observation.py) | Observe executor input/output data via ExecutorInvokedEvent and ExecutorCompletedEvent without modifying executor code | +| Executor I/O Observation | [observability/executor_io_observation.py](./observability/executor_io_observation.py) | Observe executor input/output data via executor_invoked events (type='executor_invoked') and executor_completed events (type='executor_completed') without modifying executor code | For additional observability samples in Agent Framework, see the [observability getting started samples](../observability/README.md). The [sample](../observability/workflow_observability.py) demonstrates integrating observability into workflows. @@ -162,8 +162,8 @@ Sequential orchestration uses a few small adapter nodes for plumbing: - "input-conversation" normalizes input to `list[ChatMessage]` - "to-conversation:" converts agent responses into the shared conversation -- "complete" publishes the final `WorkflowOutputEvent` - These may appear in event streams (ExecutorInvoke/Completed). They’re analogous to +- "complete" publishes the final output event (type='output') + These may appear in event streams (executor_invoked/executor_completed). They're analogous to concurrent’s dispatcher and aggregator and can be ignored if you only care about agent activity. ### Environment Variables diff --git a/python/samples/getting_started/workflows/_start-here/step1_executors_and_edges.py b/python/samples/getting_started/workflows/_start-here/step1_executors_and_edges.py index 7c9f7a4cbb..98460844f6 100644 --- a/python/samples/getting_started/workflows/_start-here/step1_executors_and_edges.py +++ b/python/samples/getting_started/workflows/_start-here/step1_executors_and_edges.py @@ -140,7 +140,7 @@ def __init__(self, id: str): super().__init__(id=id) @handler(input=str, output=str) - async def add_exclamation(self, message: str, ctx: WorkflowContext) -> None: + async def add_exclamation(self, message, ctx) -> None: # type: ignore """Add exclamation marks to the input. Note: The input=str and output=str are explicitly specified on @handler, @@ -149,7 +149,7 @@ async def add_exclamation(self, message: str, ctx: WorkflowContext) -> None: on @handler take precedence. """ result = f"{message}!!!" - await ctx.send_message(result) + await ctx.send_message(result) # type: ignore async def main(): diff --git a/python/samples/getting_started/workflows/_start-here/step2_agents_in_a_workflow.py b/python/samples/getting_started/workflows/_start-here/step2_agents_in_a_workflow.py index 6ecfbe55a8..b2fcbb1aa0 100644 --- a/python/samples/getting_started/workflows/_start-here/step2_agents_in_a_workflow.py +++ b/python/samples/getting_started/workflows/_start-here/step2_agents_in_a_workflow.py @@ -57,7 +57,6 @@ async def main(): # of `AgentResponse` from the agents in the workflow. outputs = cast(list[AgentResponse], outputs) for output in outputs: - # TODO: author_name should be available in AgentResponse print(f"{output.messages[0].author_name}: {output.text}\n") # Summarize the final run state (e.g., COMPLETED) @@ -66,7 +65,7 @@ async def main(): """ writer: "Charge Ahead: Affordable Adventure Awaits!" - reviewer: - Consider emphasizing both affordability and fun in a more dynamic way. + reviewer: - Consider emphasizing both affordability and fun in a more dynamic way. - Try using a catchy phrase that includes a play on words, like ā€œElectrify Your Drive: Fun Meets Affordability!ā€ - Ensure the slogan is succinct while capturing the essence of the car's unique selling proposition. diff --git a/python/samples/getting_started/workflows/_start-here/step3_streaming.py b/python/samples/getting_started/workflows/_start-here/step3_streaming.py index 2ac0f64ca8..8ca951aa0a 100644 --- a/python/samples/getting_started/workflows/_start-here/step3_streaming.py +++ b/python/samples/getting_started/workflows/_start-here/step3_streaming.py @@ -3,7 +3,6 @@ import asyncio from agent_framework import AgentResponseUpdate, ChatMessage, WorkflowBuilder -from agent_framework._workflows._events import WorkflowOutputEvent from agent_framework.azure import AzureOpenAIChatClient from azure.identity import AzureCliCredential @@ -58,7 +57,7 @@ async def main(): ): # The outputs of the workflow are whatever the agents produce. So the events are expected to # contain `AgentResponseUpdate` from the agents in the workflow. - if isinstance(event, WorkflowOutputEvent) and isinstance(event.data, AgentResponseUpdate): + if event.type == "output" and isinstance(event.data, AgentResponseUpdate): update = event.data author = update.author_name if author != last_author: diff --git a/python/samples/getting_started/workflows/_start-here/step4_using_factories.py b/python/samples/getting_started/workflows/_start-here/step4_using_factories.py index d5e333ddbc..166514f7ac 100644 --- a/python/samples/getting_started/workflows/_start-here/step4_using_factories.py +++ b/python/samples/getting_started/workflows/_start-here/step4_using_factories.py @@ -8,7 +8,6 @@ Executor, WorkflowBuilder, WorkflowContext, - WorkflowOutputEvent, executor, handler, ) @@ -87,7 +86,7 @@ async def main(): async for event in workflow.run("hello world", stream=True): # The outputs of the workflow are whatever the agents produce. So the events are expected to # contain `AgentResponseUpdate` from the agents in the workflow. - if isinstance(event, WorkflowOutputEvent) and isinstance(event.data, AgentResponseUpdate): + if event.type == "output" and isinstance(event.data, AgentResponseUpdate): update = event.data if first_update: print(f"{update.author_name}: {update.text}", end="", flush=True) diff --git a/python/samples/getting_started/workflows/agents/azure_ai_agents_streaming.py b/python/samples/getting_started/workflows/agents/azure_ai_agents_streaming.py index 4b4ddbc38b..43c35a8082 100644 --- a/python/samples/getting_started/workflows/agents/azure_ai_agents_streaming.py +++ b/python/samples/getting_started/workflows/agents/azure_ai_agents_streaming.py @@ -2,7 +2,7 @@ import asyncio -from agent_framework import AgentResponseUpdate, WorkflowBuilder, WorkflowOutputEvent +from agent_framework import AgentResponseUpdate, WorkflowBuilder from agent_framework.azure import AzureAIAgentClient from azure.identity.aio import AzureCliCredential @@ -50,7 +50,7 @@ async def main() -> None: async for event in events: # The outputs of the workflow are whatever the agents produce. So the events are expected to # contain `AgentResponseUpdate` from the agents in the workflow. - if isinstance(event, WorkflowOutputEvent) and isinstance(event.data, AgentResponseUpdate): + if event.type == "output" and isinstance(event.data, AgentResponseUpdate): update = event.data author = update.author_name if author != last_author: diff --git a/python/samples/getting_started/workflows/agents/azure_chat_agents_and_executor.py b/python/samples/getting_started/workflows/agents/azure_chat_agents_and_executor.py index 7d51660336..c9a31cf6f7 100644 --- a/python/samples/getting_started/workflows/agents/azure_chat_agents_and_executor.py +++ b/python/samples/getting_started/workflows/agents/azure_chat_agents_and_executor.py @@ -10,7 +10,6 @@ ChatMessage, WorkflowBuilder, WorkflowContext, - WorkflowOutputEvent, executor, ) from agent_framework.azure import AzureOpenAIChatClient @@ -128,7 +127,7 @@ async def main() -> None: async for event in events: # The outputs of the workflow are whatever the agents produce. So the events are expected to # contain `AgentResponseUpdate` from the agents in the workflow. - if isinstance(event, WorkflowOutputEvent) and isinstance(event.data, AgentResponseUpdate): + if event.type == "output" and isinstance(event.data, AgentResponseUpdate): update = event.data author = update.author_name if author != last_author: diff --git a/python/samples/getting_started/workflows/agents/azure_chat_agents_streaming.py b/python/samples/getting_started/workflows/agents/azure_chat_agents_streaming.py index 627febb99a..73d520b182 100644 --- a/python/samples/getting_started/workflows/agents/azure_chat_agents_streaming.py +++ b/python/samples/getting_started/workflows/agents/azure_chat_agents_streaming.py @@ -2,7 +2,7 @@ import asyncio -from agent_framework import AgentResponseUpdate, WorkflowBuilder, WorkflowOutputEvent +from agent_framework import AgentResponseUpdate, WorkflowBuilder from agent_framework.azure import AzureOpenAIChatClient from azure.identity import AzureCliCredential @@ -49,7 +49,7 @@ async def main(): async for event in events: # The outputs of the workflow are whatever the agents produce. So the events are expected to # contain `AgentResponseUpdate` from the agents in the workflow. - if isinstance(event, WorkflowOutputEvent) and isinstance(event.data, AgentResponseUpdate): + if event.type == "output" and isinstance(event.data, AgentResponseUpdate): update = event.data author = update.author_name if author != last_author: diff --git a/python/samples/getting_started/workflows/agents/azure_chat_agents_tool_calls_with_feedback.py b/python/samples/getting_started/workflows/agents/azure_chat_agents_tool_calls_with_feedback.py index 4b7eabf9ba..457defcf51 100644 --- a/python/samples/getting_started/workflows/agents/azure_chat_agents_tool_calls_with_feedback.py +++ b/python/samples/getting_started/workflows/agents/azure_chat_agents_tool_calls_with_feedback.py @@ -9,16 +9,13 @@ AgentExecutorRequest, AgentExecutorResponse, AgentResponse, - AgentRunUpdateEvent, + AgentResponseUpdate, ChatAgent, ChatMessage, Executor, - FunctionCallContent, - FunctionResultContent, - RequestInfoEvent, WorkflowBuilder, WorkflowContext, - WorkflowOutputEvent, + WorkflowEvent, handler, response_handler, tool, @@ -36,7 +33,7 @@ -> Coordinator -> final_editor_agent -> Coordinator -> output The writer agent calls tools to gather product facts before drafting copy. A custom executor -packages the draft and emits a RequestInfoEvent so a human can comment, then replays the human +packages the draft and emits a request_info event (type='request_info') so a human can comment, then replays the human guidance back into the conversation before the final editor agent produces the polished output. Demonstrates: @@ -50,7 +47,9 @@ """ -# NOTE: approval_mode="never_require" is for sample brevity. Use "always_require" in production; see samples/getting_started/tools/function_tool_with_approval.py and samples/getting_started/tools/function_tool_with_approval_and_threads.py. +# NOTE: approval_mode="never_require" is for sample brevity. Use "always_require" in production; +# see samples/getting_started/tools/function_tool_with_approval.py and +# samples/getting_started/tools/function_tool_with_approval_and_threads.py. @tool(approval_mode="never_require") def fetch_product_brief( product_name: Annotated[str, Field(description="Product name to look up.")], @@ -147,8 +146,7 @@ async def on_human_feedback( # Human approved the draft as-is; forward it unchanged. await ctx.send_message( AgentExecutorRequest( - messages=original_request.conversation - + [ChatMessage("user", text="The draft is approved as-is.")], + messages=original_request.conversation + [ChatMessage("user", text="The draft is approved as-is.")], should_respond=True, ), target_id=self.final_editor_id, @@ -194,15 +192,15 @@ def create_final_editor_agent() -> ChatAgent: ) -def display_agent_run_update(event: AgentRunUpdateEvent, last_executor: str | None) -> None: +def display_agent_run_update(event: WorkflowEvent, last_executor: str | None) -> None: """Display an AgentRunUpdateEvent in a readable format.""" printed_tool_calls: set[str] = set() printed_tool_results: set[str] = set() executor_id = event.executor_id update = event.data # Extract and print any new tool calls or results from the update. - function_calls = [c for c in update.contents if isinstance(c, FunctionCallContent)] # type: ignore[union-attr] - function_results = [c for c in update.contents if isinstance(c, FunctionResultContent)] # type: ignore[union-attr] + function_calls = [c for c in update.contents if c.type == "function_call"] # type: ignore[union-attr] + function_results = [c for c in update.contents if c.type == "function_result"] # type: ignore[union-attr] if executor_id != last_executor: if last_executor is not None: print() @@ -291,18 +289,22 @@ async def main() -> None: requests: list[tuple[str, DraftFeedbackRequest]] = [] async for event in stream: - if isinstance(event, AgentRunUpdateEvent) and display_agent_run_update_switch: + if ( + event.type == "output" + and isinstance(event.data, AgentResponseUpdate) + and display_agent_run_update_switch + ): display_agent_run_update(event, last_executor) - if isinstance(event, RequestInfoEvent) and isinstance(event.data, DraftFeedbackRequest): + if event.type == "request_info" and isinstance(event.data, DraftFeedbackRequest): # Stash the request so we can prompt the human after the stream completes. requests.append((event.request_id, event.data)) last_executor = None - elif isinstance(event, WorkflowOutputEvent): + elif event.type == "output" and not isinstance(event.data, AgentResponseUpdate): + # Only mark as completed for final outputs, not streaming updates last_executor = None response = event.data - print("\n===== Final output =====") final_text = getattr(response, "text", str(response)) - print(final_text.strip()) + print(final_text, flush=True, end="") completed = True if requests and not completed: diff --git a/python/samples/getting_started/workflows/agents/concurrent_workflow_as_agent.py b/python/samples/getting_started/workflows/agents/concurrent_workflow_as_agent.py index 75e7e07573..89b003dd5f 100644 --- a/python/samples/getting_started/workflows/agents/concurrent_workflow_as_agent.py +++ b/python/samples/getting_started/workflows/agents/concurrent_workflow_as_agent.py @@ -2,8 +2,8 @@ import asyncio -from agent_framework import ConcurrentBuilder from agent_framework.azure import AzureOpenAIChatClient +from agent_framework.orchestrations import ConcurrentBuilder from azure.identity import AzureCliCredential """ @@ -20,7 +20,7 @@ Prerequisites: - Azure OpenAI access configured for AzureOpenAIChatClient (use az login + env vars) -- Familiarity with Workflow events (WorkflowOutputEvent) +- Familiarity with Workflow events (WorkflowEvent with type "output") """ diff --git a/python/samples/getting_started/workflows/agents/group_chat_workflow_as_agent.py b/python/samples/getting_started/workflows/agents/group_chat_workflow_as_agent.py index fa227826d0..4193d1fdfc 100644 --- a/python/samples/getting_started/workflows/agents/group_chat_workflow_as_agent.py +++ b/python/samples/getting_started/workflows/agents/group_chat_workflow_as_agent.py @@ -2,8 +2,9 @@ import asyncio -from agent_framework import ChatAgent, GroupChatBuilder +from agent_framework import ChatAgent from agent_framework.openai import OpenAIChatClient, OpenAIResponsesClient +from agent_framework.orchestrations import GroupChatBuilder """ Sample: Group Chat Orchestration @@ -42,7 +43,7 @@ async def main() -> None: ) .participants([researcher, writer]) # Enable intermediate outputs to observe the conversation as it unfolds - # Intermediate outputs will be emitted as WorkflowOutputEvent events + # Intermediate outputs will be emitted as WorkflowEvent with type "output" events .with_intermediate_outputs() .build() ) diff --git a/python/samples/getting_started/workflows/agents/handoff_workflow_as_agent.py b/python/samples/getting_started/workflows/agents/handoff_workflow_as_agent.py index 99f9cca02a..e083cf7d60 100644 --- a/python/samples/getting_started/workflows/agents/handoff_workflow_as_agent.py +++ b/python/samples/getting_started/workflows/agents/handoff_workflow_as_agent.py @@ -8,12 +8,11 @@ ChatAgent, ChatMessage, Content, - HandoffAgentUserRequest, - HandoffBuilder, WorkflowAgent, tool, ) from agent_framework.azure import AzureOpenAIChatClient +from agent_framework.orchestrations import HandoffAgentUserRequest, HandoffBuilder from azure.identity import AzureCliCredential """Sample: Handoff Workflow as Agent with Human-in-the-Loop. diff --git a/python/samples/getting_started/workflows/agents/magentic_workflow_as_agent.py b/python/samples/getting_started/workflows/agents/magentic_workflow_as_agent.py index c0d51777f3..bd70926b08 100644 --- a/python/samples/getting_started/workflows/agents/magentic_workflow_as_agent.py +++ b/python/samples/getting_started/workflows/agents/magentic_workflow_as_agent.py @@ -5,9 +5,9 @@ from agent_framework import ( ChatAgent, HostedCodeInterpreterTool, - MagenticBuilder, ) from agent_framework.openai import OpenAIChatClient, OpenAIResponsesClient +from agent_framework.orchestrations import MagenticBuilder """ Sample: Build a Magentic orchestration and wrap it as an agent. @@ -62,7 +62,7 @@ async def main() -> None: max_reset_count=2, ) # Enable intermediate outputs to observe the conversation as it unfolds - # Intermediate outputs will be emitted as WorkflowOutputEvent events + # Intermediate outputs will be emitted as WorkflowEvent with type "output" events .with_intermediate_outputs() .build() ) diff --git a/python/samples/getting_started/workflows/agents/sequential_workflow_as_agent.py b/python/samples/getting_started/workflows/agents/sequential_workflow_as_agent.py index 6339f88ba2..ba09785f0c 100644 --- a/python/samples/getting_started/workflows/agents/sequential_workflow_as_agent.py +++ b/python/samples/getting_started/workflows/agents/sequential_workflow_as_agent.py @@ -2,8 +2,8 @@ import asyncio -from agent_framework import SequentialBuilder from agent_framework.azure import AzureOpenAIChatClient +from agent_framework.orchestrations import SequentialBuilder from azure.identity import AzureCliCredential """ diff --git a/python/samples/getting_started/workflows/agents/workflow_as_agent_kwargs.py b/python/samples/getting_started/workflows/agents/workflow_as_agent_kwargs.py index 1fee49fc1d..23b4d1e5ee 100644 --- a/python/samples/getting_started/workflows/agents/workflow_as_agent_kwargs.py +++ b/python/samples/getting_started/workflows/agents/workflow_as_agent_kwargs.py @@ -33,7 +33,9 @@ # Define tools that accept custom context via **kwargs -# NOTE: approval_mode="never_require" is for sample brevity. Use "always_require" in production; see samples/getting_started/tools/function_tool_with_approval.py and samples/getting_started/tools/function_tool_with_approval_and_threads.py. +# NOTE: approval_mode="never_require" is for sample brevity. +# Use "always_require" in production; see samples/getting_started/tools/function_tool_with_approval.py and +# samples/getting_started/tools/function_tool_with_approval_and_threads.py. @tool(approval_mode="never_require") def get_user_data( query: Annotated[str, Field(description="What user data to retrieve")], diff --git a/python/samples/getting_started/workflows/agents/workflow_as_agent_with_thread.py b/python/samples/getting_started/workflows/agents/workflow_as_agent_with_thread.py index 0580fe45ab..01d5626589 100644 --- a/python/samples/getting_started/workflows/agents/workflow_as_agent_with_thread.py +++ b/python/samples/getting_started/workflows/agents/workflow_as_agent_with_thread.py @@ -2,8 +2,9 @@ import asyncio -from agent_framework import AgentThread, ChatAgent, ChatMessageStore, SequentialBuilder +from agent_framework import AgentThread, ChatAgent, ChatMessageStore from agent_framework.openai import OpenAIChatClient +from agent_framework.orchestrations import SequentialBuilder """ Sample: Workflow as Agent with Thread Conversation History and Checkpointing diff --git a/python/samples/getting_started/workflows/checkpoint/checkpoint_with_human_in_the_loop.py b/python/samples/getting_started/workflows/checkpoint/checkpoint_with_human_in_the_loop.py index 1f7f5659af..df7c5b1445 100644 --- a/python/samples/getting_started/workflows/checkpoint/checkpoint_with_human_in_the_loop.py +++ b/python/samples/getting_started/workflows/checkpoint/checkpoint_with_human_in_the_loop.py @@ -1,9 +1,16 @@ # Copyright (c) Microsoft. All rights reserved. import asyncio +import sys from dataclasses import dataclass from pathlib import Path -from typing import Any, override +from typing import Any + +if sys.version_info >= (3, 12): + from typing import override # type: ignore # pragma: no cover +else: + from typing_extensions import override # type: ignore[import] # pragma: no cover + # NOTE: the Azure client imports above are real dependencies. When running this # sample outside of Azure-enabled environments you may wish to swap in the @@ -15,13 +22,10 @@ ChatMessage, Executor, FileCheckpointStorage, - RequestInfoEvent, Workflow, WorkflowBuilder, WorkflowCheckpoint, WorkflowContext, - WorkflowOutputEvent, - WorkflowStatusEvent, get_checkpoint_summary, handler, response_handler, @@ -53,7 +57,7 @@ 3. Later, restart the script, select that checkpoint, and provide the stored human decision when prompted to pre-supply responses. Doing so applies the answer immediately on resume, so the system does **not** - re-emit the same `RequestInfoEvent`. + re-emit the same ``. """ # Directory used for the sample's temporary checkpoint files. We isolate the @@ -259,11 +263,11 @@ async def run_interactive_session( raise ValueError("Either initial_message or checkpoint_id must be provided") async for event in event_stream: - if isinstance(event, WorkflowStatusEvent): + if event.type == "status": print(event) - if isinstance(event, WorkflowOutputEvent): + if event.type == "output": completed_output = event.data - if isinstance(event, RequestInfoEvent): + if event.type == "request_info": if isinstance(event.data, HumanApprovalRequest): requests[event.request_id] = event.data else: diff --git a/python/samples/getting_started/workflows/checkpoint/checkpoint_with_resume.py b/python/samples/getting_started/workflows/checkpoint/checkpoint_with_resume.py index b82eaf80e9..ff23b1af5b 100644 --- a/python/samples/getting_started/workflows/checkpoint/checkpoint_with_resume.py +++ b/python/samples/getting_started/workflows/checkpoint/checkpoint_with_resume.py @@ -24,21 +24,25 @@ """ import asyncio +import sys from dataclasses import dataclass from random import random -from typing import Any, override +from typing import Any from agent_framework import ( Executor, InMemoryCheckpointStorage, - SuperStepCompletedEvent, WorkflowBuilder, WorkflowCheckpoint, WorkflowContext, - WorkflowOutputEvent, handler, ) +if sys.version_info >= (3, 12): + from typing import override # type: ignore # pragma: no cover +else: + from typing_extensions import override # type: ignore[import] # pragma: no cover + @dataclass class ComputeTask: @@ -126,12 +130,12 @@ async def main(): output: str | None = None async for event in event_stream: - if isinstance(event, WorkflowOutputEvent): + if event.type == "output": output = event.data break - if isinstance(event, SuperStepCompletedEvent) and random() < 0.5: + if event.type == "superstep_completed" and random() < 0.5: # Randomly simulate system interruptions - # The `SuperStepCompletedEvent` ensures we only interrupt after + # The type="superstep_completed" event ensures we only interrupt after # the current super-step is fully complete and checkpointed. # If we interrupt mid-step, the workflow may resume from an earlier point. print("\n** Simulating workflow interruption. Stopping execution. **") diff --git a/python/samples/getting_started/workflows/checkpoint/handoff_with_tool_approval_checkpoint_resume.py b/python/samples/getting_started/workflows/checkpoint/handoff_with_tool_approval_checkpoint_resume.py index 5ab80e37ee..6e0bcaa00a 100644 --- a/python/samples/getting_started/workflows/checkpoint/handoff_with_tool_approval_checkpoint_resume.py +++ b/python/samples/getting_started/workflows/checkpoint/handoff_with_tool_approval_checkpoint_resume.py @@ -12,15 +12,12 @@ ChatMessage, Content, FileCheckpointStorage, - HandoffAgentUserRequest, - HandoffBuilder, - RequestInfoEvent, Workflow, - WorkflowOutputEvent, - WorkflowStatusEvent, + WorkflowEvent, tool, ) from agent_framework.azure import AzureOpenAIChatClient +from agent_framework.orchestrations import HandoffAgentUserRequest, HandoffBuilder from azure.identity import AzureCliCredential """ @@ -153,7 +150,7 @@ def _print_function_approval_request(request: Content, request_id: str) -> None: def _build_responses_for_requests( - pending_requests: list[RequestInfoEvent], + pending_requests: list[WorkflowEvent], *, user_response: str | None, approve_tools: bool | None, @@ -161,11 +158,15 @@ def _build_responses_for_requests( """Create response payloads for each pending request.""" responses: dict[str, object] = {} for request in pending_requests: - if isinstance(request.data, HandoffAgentUserRequest): + if isinstance(request.data, HandoffAgentUserRequest) and request.request_id: if user_response is None: raise ValueError("User response is required for HandoffAgentUserRequest") responses[request.request_id] = user_response - elif isinstance(request.data, Content) and request.data.type == "function_approval_request": + elif ( + isinstance(request.data, Content) + and request.data.type == "function_approval_request" + and request.request_id + ): if approve_tools is None: raise ValueError("Approval decision is required for function approval request") responses[request.request_id] = request.data.to_function_approval_response(approved=approve_tools) @@ -178,14 +179,14 @@ async def run_until_user_input_needed( workflow: Workflow, initial_message: str | None = None, checkpoint_id: str | None = None, -) -> tuple[list[RequestInfoEvent], str | None]: +) -> tuple[list[WorkflowEvent], str | None]: """ Run the workflow until it needs user input or approval, or completes. Returns: Tuple of (pending_requests, checkpoint_id_to_use_for_resume) """ - pending_requests: list[RequestInfoEvent] = [] + pending_requests: list[WorkflowEvent] = [] latest_checkpoint_id: str | None = checkpoint_id if initial_message: @@ -198,17 +199,17 @@ async def run_until_user_input_needed( raise ValueError("Must provide either initial_message or checkpoint_id") async for event in event_stream: - if isinstance(event, WorkflowStatusEvent): + if event.type == "status": print(f"[Status] {event.state}") - elif isinstance(event, RequestInfoEvent): + elif event.type == "request_info": pending_requests.append(event) if isinstance(event.data, HandoffAgentUserRequest): _print_handoff_request(event.data, event.request_id) elif isinstance(event.data, Content) and event.data.type == "function_approval_request": _print_function_approval_request(event.data, event.request_id) - elif isinstance(event, WorkflowOutputEvent): + elif event.type == "output": print("\n[Workflow Completed]") if event.data: print(f"Final conversation length: {len(event.data)} messages") @@ -225,7 +226,7 @@ async def resume_with_responses( checkpoint_storage: FileCheckpointStorage, user_response: str | None = None, approve_tools: bool | None = None, -) -> tuple[list[RequestInfoEvent], str | None]: +) -> tuple[list[WorkflowEvent], str | None]: """ Two-step resume pattern (answers customer questions and tool approvals): @@ -255,10 +256,10 @@ async def resume_with_responses( print(f"Step 1: Restoring checkpoint {latest_checkpoint.checkpoint_id}") # Step 1: Restore the checkpoint to load pending requests into memory - # The checkpoint restoration re-emits pending RequestInfoEvents - restored_requests: list[RequestInfoEvent] = [] + # The checkpoint restoration re-emits pending request_info events + restored_requests: list[WorkflowEvent] = [] async for event in workflow.run(checkpoint_id=latest_checkpoint.checkpoint_id, stream=True): # type: ignore[attr-defined] - if isinstance(event, RequestInfoEvent): + if event.type == "request_info": restored_requests.append(event) if isinstance(event.data, HandoffAgentUserRequest): _print_handoff_request(event.data, event.request_id) @@ -275,13 +276,13 @@ async def resume_with_responses( ) print(f"Step 2: Sending responses for {len(responses)} request(s)") - new_pending_requests: list[RequestInfoEvent] = [] + new_pending_requests: list[WorkflowEvent] = [] async for event in workflow.send_responses_streaming(responses): - if isinstance(event, WorkflowStatusEvent): + if event.type == "status": print(f"[Status] {event.state}") - elif isinstance(event, WorkflowOutputEvent): + elif event.type == "output": print("\n[Workflow Output Event - Conversation Update]") if event.data and isinstance(event.data, list) and all(isinstance(msg, ChatMessage) for msg in event.data): # type: ignore # Now safe to cast event.data to list[ChatMessage] @@ -291,7 +292,7 @@ async def resume_with_responses( text = msg.text[:100] + "..." if len(msg.text) > 100 else msg.text print(f" {author}: {text}") - elif isinstance(event, RequestInfoEvent): + elif event.type == "request_info": new_pending_requests.append(event) if isinstance(event.data, HandoffAgentUserRequest): _print_handoff_request(event.data, event.request_id) diff --git a/python/samples/getting_started/workflows/checkpoint/sub_workflow_checkpoint.py b/python/samples/getting_started/workflows/checkpoint/sub_workflow_checkpoint.py index 6f8567d02c..267cfdfb60 100644 --- a/python/samples/getting_started/workflows/checkpoint/sub_workflow_checkpoint.py +++ b/python/samples/getting_started/workflows/checkpoint/sub_workflow_checkpoint.py @@ -3,29 +3,33 @@ import asyncio import contextlib import json +import sys import uuid from dataclasses import dataclass, field, replace from datetime import datetime, timedelta from pathlib import Path -from typing import Any, override +from typing import Any from agent_framework import ( Executor, FileCheckpointStorage, - RequestInfoEvent, SubWorkflowRequestMessage, SubWorkflowResponseMessage, Workflow, WorkflowBuilder, WorkflowContext, + WorkflowEvent, WorkflowExecutor, - WorkflowOutputEvent, WorkflowRunState, - WorkflowStatusEvent, handler, response_handler, ) +if sys.version_info >= (3, 12): + from typing import override # type: ignore # pragma: no cover +else: + from typing_extensions import override # type: ignore[import] # pragma: no cover + CHECKPOINT_DIR = Path(__file__).with_suffix("").parent / "tmp" / "sub_workflow_checkpoints" """ @@ -335,10 +339,10 @@ async def main() -> None: request_id: str | None = None async for event in workflow.run("Contoso Gadget Launch", stream=True): - if isinstance(event, RequestInfoEvent) and request_id is None: + if event.type == "request_info" and request_id is None: request_id = event.request_id print(f"Captured review request id: {request_id}") - if isinstance(event, WorkflowStatusEvent) and event.state is WorkflowRunState.IDLE_WITH_PENDING_REQUESTS: + if event.type == "status" and event.state is WorkflowRunState.IDLE_WITH_PENDING_REQUESTS: break if request_id is None: @@ -364,9 +368,9 @@ async def main() -> None: # Rebuild fresh instances to mimic a separate process resuming workflow2 = build_parent_workflow(storage) - request_info_event: RequestInfoEvent | None = None + request_info_event: WorkflowEvent | None = None async for event in workflow2.run(checkpoint_id=resume_checkpoint.checkpoint_id, stream=True): - if isinstance(event, RequestInfoEvent): + if event.type == "request_info": request_info_event = event if request_info_event is None: @@ -375,9 +379,9 @@ async def main() -> None: print("\n=== Stage 3: approve draft ==") approval_response = "approve" - output_event: WorkflowOutputEvent | None = None + output_event: WorkflowEvent | None = None async for event in workflow2.send_responses_streaming({request_info_event.request_id: approval_response}): - if isinstance(event, WorkflowOutputEvent): + if event.type == "output": output_event = event if output_event is None: diff --git a/python/samples/getting_started/workflows/checkpoint/workflow_as_agent_checkpoint.py b/python/samples/getting_started/workflows/checkpoint/workflow_as_agent_checkpoint.py index d947330a19..52d2f99843 100644 --- a/python/samples/getting_started/workflows/checkpoint/workflow_as_agent_checkpoint.py +++ b/python/samples/getting_started/workflows/checkpoint/workflow_as_agent_checkpoint.py @@ -30,9 +30,9 @@ ChatAgent, ChatMessageStore, InMemoryCheckpointStorage, - SequentialBuilder, ) from agent_framework.openai import OpenAIChatClient +from agent_framework.orchestrations import SequentialBuilder async def basic_checkpointing() -> None: @@ -157,7 +157,12 @@ def create_assistant() -> ChatAgent: print(f"\nCheckpoints created during stream: {len(checkpoints)}") +async def main() -> None: + """Run all checkpoint examples.""" + await basic_checkpointing() + await checkpointing_with_thread() + await streaming_with_checkpoints() + + if __name__ == "__main__": - asyncio.run(basic_checkpointing()) - asyncio.run(checkpointing_with_thread()) - asyncio.run(streaming_with_checkpoints()) + asyncio.run(main()) diff --git a/python/samples/getting_started/workflows/composition/sub_workflow_kwargs.py b/python/samples/getting_started/workflows/composition/sub_workflow_kwargs.py index bf95a980fd..4c77fc5202 100644 --- a/python/samples/getting_started/workflows/composition/sub_workflow_kwargs.py +++ b/python/samples/getting_started/workflows/composition/sub_workflow_kwargs.py @@ -6,12 +6,11 @@ from agent_framework import ( ChatMessage, - SequentialBuilder, WorkflowExecutor, - WorkflowOutputEvent, tool, ) from agent_framework.openai import OpenAIChatClient +from agent_framework.orchestrations import SequentialBuilder """ Sample: Sub-Workflow kwargs Propagation @@ -32,7 +31,9 @@ # Define tools that access custom context via **kwargs -# NOTE: approval_mode="never_require" is for sample brevity. Use "always_require" in production; see samples/getting_started/tools/function_tool_with_approval.py and samples/getting_started/tools/function_tool_with_approval_and_threads.py. +# NOTE: approval_mode="never_require" is for sample brevity. Use "always_require" in production; +# see samples/getting_started/tools/function_tool_with_approval.py and +# samples/getting_started/tools/function_tool_with_approval_and_threads.py. @tool(approval_mode="never_require") def get_authenticated_data( resource: Annotated[str, "The resource to fetch"], @@ -129,7 +130,7 @@ async def main() -> None: user_token=user_token, service_config=service_config, ): - if isinstance(event, WorkflowOutputEvent): + if event.type == "output": output_data = event.data if isinstance(output_data, list): for item in output_data: # type: ignore @@ -140,6 +141,50 @@ async def main() -> None: print("Sample Complete - kwargs successfully flowed through sub-workflow!") print("=" * 70) + """ + Sample Output: + + ====================================================================== + Sub-Workflow kwargs Propagation Demo + ====================================================================== + + Context being passed to parent workflow: + user_token: { + "user_name": "alice@contoso.com", + "access_level": "admin", + "session_id": "sess_12345" + } + service_config: { + "services": { + "users": "https://api.example.com/v1/users", + "orders": "https://api.example.com/v1/orders", + "inventory": "https://api.example.com/v1/inventory" + }, + "timeout": 30 + } + + ---------------------------------------------------------------------- + Workflow Execution (kwargs flow: parent -> sub-workflow -> agent -> tool): + ---------------------------------------------------------------------- + + [get_authenticated_data] kwargs keys: ['user_token', 'service_config'] + [get_authenticated_data] User: alice@contoso.com, Access: admin + + [call_configured_service] kwargs keys: ['user_token', 'service_config'] + [call_configured_service] Available services: ['users', 'orders', 'inventory'] + + [Final Answer]: Please fetch my profile data and then call the users service. + + [Final Answer]: - Your profile data has been fetched. + - The users service has been called. + + Would you like details from either the profile data or the users service response? + + ====================================================================== + Sample Complete - kwargs successfully flowed through sub-workflow! + ====================================================================== + """ + if __name__ == "__main__": asyncio.run(main()) diff --git a/python/samples/getting_started/workflows/composition/sub_workflow_parallel_requests.py b/python/samples/getting_started/workflows/composition/sub_workflow_parallel_requests.py index 0959f591f0..58ee575684 100644 --- a/python/samples/getting_started/workflows/composition/sub_workflow_parallel_requests.py +++ b/python/samples/getting_started/workflows/composition/sub_workflow_parallel_requests.py @@ -3,16 +3,16 @@ import asyncio import uuid from dataclasses import dataclass -from typing import Literal +from typing import Any, Literal from agent_framework import ( Executor, - RequestInfoEvent, SubWorkflowRequestMessage, SubWorkflowResponseMessage, Workflow, WorkflowBuilder, WorkflowContext, + WorkflowEvent, WorkflowExecutor, handler, response_handler, @@ -192,7 +192,7 @@ def __init__(self, id: str) -> None: super().__init__(id) self._cache: dict[str, int] = {"cpu": 10, "memory": 50, "disk": 100} # Record pending requests to match responses - self._pending_requests: dict[str, RequestInfoEvent] = {} + self._pending_requests: dict[str, WorkflowEvent[Any]] = {} async def _handle_resource_request(self, request: ResourceRequest) -> ResourceResponse | None: """Allocates resources based on request and available cache.""" @@ -207,7 +207,7 @@ async def handle_subworkflow_request( self, request: SubWorkflowRequestMessage, ctx: WorkflowContext[SubWorkflowResponseMessage] ) -> None: """Handles requests from sub-workflows.""" - source_event: RequestInfoEvent = request.source_event + source_event: WorkflowEvent[Any] = request.source_event if not isinstance(source_event.data, ResourceRequest): return @@ -246,14 +246,14 @@ def __init__(self, id: str) -> None: "disk": 1000, # Liberal disk policy } # Record pending requests to match responses - self._pending_requests: dict[str, RequestInfoEvent] = {} + self._pending_requests: dict[str, WorkflowEvent[Any]] = {} @handler async def handle_subworkflow_request( self, request: SubWorkflowRequestMessage, ctx: WorkflowContext[SubWorkflowResponseMessage] ) -> None: """Handles requests from sub-workflows.""" - source_event: RequestInfoEvent = request.source_event + source_event: WorkflowEvent[Any] = request.source_event if not isinstance(source_event.data, PolicyRequest): return diff --git a/python/samples/getting_started/workflows/composition/sub_workflow_request_interception.py b/python/samples/getting_started/workflows/composition/sub_workflow_request_interception.py index b06a2ce82a..9b0637652b 100644 --- a/python/samples/getting_started/workflows/composition/sub_workflow_request_interception.py +++ b/python/samples/getting_started/workflows/composition/sub_workflow_request_interception.py @@ -11,7 +11,6 @@ WorkflowBuilder, WorkflowContext, WorkflowExecutor, - WorkflowOutputEvent, handler, response_handler, ) @@ -303,7 +302,7 @@ async def main() -> None: for email in test_emails: print(f"\nšŸš€ Processing email to '{email.recipient}'") async for event in workflow.run(email, stream=True): - if isinstance(event, WorkflowOutputEvent): + if event.type == "output": print(f"šŸŽ‰ Final result for '{email.recipient}': {'Delivered' if event.data else 'Blocked'}") diff --git a/python/samples/getting_started/workflows/control-flow/multi_selection_edge_group.py b/python/samples/getting_started/workflows/control-flow/multi_selection_edge_group.py index 23fd5601c4..67058435c9 100644 --- a/python/samples/getting_started/workflows/control-flow/multi_selection_edge_group.py +++ b/python/samples/getting_started/workflows/control-flow/multi_selection_edge_group.py @@ -16,7 +16,6 @@ WorkflowBuilder, WorkflowContext, WorkflowEvent, - WorkflowOutputEvent, executor, ) from agent_framework.azure import AzureOpenAIChatClient @@ -279,7 +278,7 @@ def select_targets(analysis: AnalysisResult, target_ids: list[str]) -> list[str] async for event in workflow.run(email, stream=True): if isinstance(event, DatabaseEvent): print(f"{event}") - elif isinstance(event, WorkflowOutputEvent): + elif event.type == "output": print(f"Workflow output: {event.data}") """ diff --git a/python/samples/getting_started/workflows/control-flow/sequential_executors.py b/python/samples/getting_started/workflows/control-flow/sequential_executors.py index 41bba945f3..d69aafcfe9 100644 --- a/python/samples/getting_started/workflows/control-flow/sequential_executors.py +++ b/python/samples/getting_started/workflows/control-flow/sequential_executors.py @@ -7,7 +7,6 @@ Executor, WorkflowBuilder, WorkflowContext, - WorkflowOutputEvent, handler, ) from typing_extensions import Never @@ -77,7 +76,7 @@ async def main() -> None: outputs: list[str] = [] async for event in workflow.run("hello world", stream=True): print(f"Event: {event}") - if isinstance(event, WorkflowOutputEvent): + if event.type == "output": outputs.append(cast(str, event.data)) if outputs: diff --git a/python/samples/getting_started/workflows/control-flow/sequential_streaming.py b/python/samples/getting_started/workflows/control-flow/sequential_streaming.py index 1e31bcafc8..cb06157d1a 100644 --- a/python/samples/getting_started/workflows/control-flow/sequential_streaming.py +++ b/python/samples/getting_started/workflows/control-flow/sequential_streaming.py @@ -2,7 +2,7 @@ import asyncio -from agent_framework import WorkflowBuilder, WorkflowContext, WorkflowOutputEvent, executor +from agent_framework import WorkflowBuilder, WorkflowContext, executor from typing_extensions import Never """ @@ -14,7 +14,8 @@ Purpose: Show how to declare executors with the @executor decorator, connect them with WorkflowBuilder, pass intermediate values using ctx.send_message, and yield final output using ctx.yield_output(). -Demonstrate how streaming exposes ExecutorInvokedEvent and ExecutorCompletedEvent for observability. +Demonstrate how streaming exposes executor_invoked events (type='executor_invoked') and +executor_completed events (type='executor_completed') for observability. Prerequisites: - No external services required. @@ -67,17 +68,17 @@ async def main(): async for event in workflow.run("hello world", stream=True): # You will see executor invoke and completion events as the workflow progresses. print(f"Event: {event}") - if isinstance(event, WorkflowOutputEvent): + if event.type == "output": print(f"Workflow completed with result: {event.data}") """ Sample Output: - Event: ExecutorInvokedEvent(executor_id=upper_case_executor) - Event: ExecutorCompletedEvent(executor_id=upper_case_executor) - Event: ExecutorInvokedEvent(executor_id=reverse_text_executor) - Event: ExecutorCompletedEvent(executor_id=reverse_text_executor) - Event: WorkflowOutputEvent(data='DLROW OLLEH', executor_id=reverse_text_executor) + Event: executor_invoked event (type='executor_invoked', executor_id=upper_case_executor) + Event: executor_completed event (type='executor_completed', executor_id=upper_case_executor) + Event: executor_invoked event (type='executor_invoked', executor_id=reverse_text_executor) + Event: executor_completed event (type='executor_completed', executor_id=reverse_text_executor) + Event: output event (type='output', data='DLROW OLLEH', executor_id=reverse_text_executor) Workflow completed with result: DLROW OLLEH """ diff --git a/python/samples/getting_started/workflows/control-flow/simple_loop.py b/python/samples/getting_started/workflows/control-flow/simple_loop.py index 36a09241ed..e9fca78510 100644 --- a/python/samples/getting_started/workflows/control-flow/simple_loop.py +++ b/python/samples/getting_started/workflows/control-flow/simple_loop.py @@ -9,7 +9,6 @@ ChatAgent, ChatMessage, Executor, - ExecutorCompletedEvent, WorkflowBuilder, WorkflowContext, handler, @@ -143,7 +142,7 @@ async def main(): # Step 2: Run the workflow and print the events. iterations = 0 async for event in workflow.run(NumberSignal.INIT, stream=True): - if isinstance(event, ExecutorCompletedEvent) and event.executor_id == "guess_number": + if event.type == "executor_completed" and event.executor_id == "guess_number": iterations += 1 print(f"Event: {event}") diff --git a/python/samples/getting_started/workflows/declarative/customer_support/main.py b/python/samples/getting_started/workflows/declarative/customer_support/main.py index 685ff905d5..91ddbed268 100644 --- a/python/samples/getting_started/workflows/declarative/customer_support/main.py +++ b/python/samples/getting_started/workflows/declarative/customer_support/main.py @@ -26,7 +26,6 @@ import uuid from pathlib import Path -from agent_framework import RequestInfoEvent, WorkflowOutputEvent from agent_framework.azure import AzureOpenAIChatClient from agent_framework.declarative import ( AgentExternalInputRequest, @@ -259,7 +258,7 @@ async def main() -> None: stream = workflow.run(user_input, stream=True) async for event in stream: - if isinstance(event, WorkflowOutputEvent): + if event.type == "output": data = event.data source_id = getattr(event, "source_executor_id", "") @@ -286,7 +285,7 @@ async def main() -> None: else: accumulated_response += str(data) - elif isinstance(event, RequestInfoEvent) and isinstance(event.data, AgentExternalInputRequest): + elif event.type == "request_info" and isinstance(event.data, AgentExternalInputRequest): request = event.data # The agent_response from the request contains the structured response diff --git a/python/samples/getting_started/workflows/declarative/deep_research/main.py b/python/samples/getting_started/workflows/declarative/deep_research/main.py index 947c5d288c..3e4ecf7d19 100644 --- a/python/samples/getting_started/workflows/declarative/deep_research/main.py +++ b/python/samples/getting_started/workflows/declarative/deep_research/main.py @@ -24,7 +24,6 @@ import asyncio from pathlib import Path -from agent_framework import WorkflowOutputEvent from agent_framework.azure import AzureOpenAIChatClient from agent_framework.declarative import WorkflowFactory from azure.identity import AzureCliCredential @@ -193,7 +192,7 @@ async def main() -> None: task = "What is the weather like in Seattle and how does it compare to the average for this time of year?" async for event in workflow.run(task, stream=True): - if isinstance(event, WorkflowOutputEvent): + if event.type == "output": print(f"{event.data}", end="", flush=True) print("\n" + "=" * 60) diff --git a/python/samples/getting_started/workflows/declarative/function_tools/main.py b/python/samples/getting_started/workflows/declarative/function_tools/main.py index 0fd8dce643..745b965e2f 100644 --- a/python/samples/getting_started/workflows/declarative/function_tools/main.py +++ b/python/samples/getting_started/workflows/declarative/function_tools/main.py @@ -10,7 +10,7 @@ from pathlib import Path from typing import Annotated, Any -from agent_framework import FileCheckpointStorage, RequestInfoEvent, WorkflowOutputEvent, tool +from agent_framework import FileCheckpointStorage, tool from agent_framework.azure import AzureOpenAIChatClient from agent_framework_declarative import ExternalInputRequest, ExternalInputResponse, WorkflowFactory from azure.identity import AzureCliCredential @@ -98,12 +98,12 @@ async def main(): first_response = True async for event in stream: - if isinstance(event, WorkflowOutputEvent) and isinstance(event.data, str): + if event.type == "output" and isinstance(event.data, str): if first_response: print("MenuAgent: ", end="") first_response = False print(event.data, end="", flush=True) - elif isinstance(event, RequestInfoEvent) and isinstance(event.data, ExternalInputRequest): + elif event.type == "request_info" and isinstance(event.data, ExternalInputRequest): pending_request_id = event.request_id print() diff --git a/python/samples/getting_started/workflows/declarative/human_in_loop/main.py b/python/samples/getting_started/workflows/declarative/human_in_loop/main.py index aaf2faf613..8f501ab358 100644 --- a/python/samples/getting_started/workflows/declarative/human_in_loop/main.py +++ b/python/samples/getting_started/workflows/declarative/human_in_loop/main.py @@ -15,7 +15,7 @@ import asyncio from pathlib import Path -from agent_framework import Workflow, WorkflowOutputEvent +from agent_framework import Workflow from agent_framework.declarative import ExternalInputRequest, WorkflowFactory from agent_framework_declarative._workflows._handlers import TextOutputEvent @@ -27,7 +27,7 @@ async def run_with_streaming(workflow: Workflow) -> None: async for event in workflow.run({}, stream=True): # WorkflowOutputEvent wraps the actual output data - if isinstance(event, WorkflowOutputEvent): + if event.type == "output": data = event.data if isinstance(data, TextOutputEvent): print(f"[Bot]: {data.text}") diff --git a/python/samples/getting_started/workflows/declarative/marketing/main.py b/python/samples/getting_started/workflows/declarative/marketing/main.py index 639fbdddc3..2f5e999aa7 100644 --- a/python/samples/getting_started/workflows/declarative/marketing/main.py +++ b/python/samples/getting_started/workflows/declarative/marketing/main.py @@ -15,7 +15,6 @@ import asyncio from pathlib import Path -from agent_framework import WorkflowOutputEvent from agent_framework.azure import AzureOpenAIChatClient from agent_framework.declarative import WorkflowFactory from azure.identity import AzureCliCredential @@ -85,7 +84,7 @@ async def main() -> None: product = "An eco-friendly stainless steel water bottle that keeps drinks cold for 24 hours." async for event in workflow.run(product, stream=True): - if isinstance(event, WorkflowOutputEvent): + if event.type == "output": print(f"{event.data}", end="", flush=True) print("\n" + "=" * 60) diff --git a/python/samples/getting_started/workflows/declarative/student_teacher/main.py b/python/samples/getting_started/workflows/declarative/student_teacher/main.py index dc252255a7..ec06c4fc7d 100644 --- a/python/samples/getting_started/workflows/declarative/student_teacher/main.py +++ b/python/samples/getting_started/workflows/declarative/student_teacher/main.py @@ -22,7 +22,6 @@ import asyncio from pathlib import Path -from agent_framework import WorkflowOutputEvent from agent_framework.azure import AzureOpenAIChatClient from agent_framework.declarative import WorkflowFactory from azure.identity import AzureCliCredential @@ -82,7 +81,7 @@ async def main() -> None: print("=" * 50) async for event in workflow.run("How would you compute the value of PI?", stream=True): - if isinstance(event, WorkflowOutputEvent): + if event.type == "output": print(f"{event.data}", flush=True, end="") print("\n" + "=" * 50) diff --git a/python/samples/getting_started/workflows/human-in-the-loop/agents_with_HITL.py b/python/samples/getting_started/workflows/human-in-the-loop/agents_with_HITL.py index 39b4d72086..739a0cbe96 100644 --- a/python/samples/getting_started/workflows/human-in-the-loop/agents_with_HITL.py +++ b/python/samples/getting_started/workflows/human-in-the-loop/agents_with_HITL.py @@ -11,12 +11,9 @@ AgentResponseUpdate, ChatMessage, Executor, - RequestInfoEvent, - Role, WorkflowBuilder, WorkflowContext, WorkflowEvent, - WorkflowOutputEvent, handler, response_handler, ) @@ -30,13 +27,13 @@ Pipeline layout: writer_agent -> Coordinator -> writer_agent -> Coordinator -> final_editor_agent -> Coordinator -> output -The writer agent drafts marketing copy. A custom executor emits a RequestInfoEvent so a human can comment, -then relays the human guidance back into the conversation before the final editor agent produces the polished -output. +The writer agent drafts marketing copy. A custom executor emits a request_info event (type='request_info') so a +human can comment, then relays the human guidance back into the conversation before the final editor agent +produces the polished output. Demonstrates: - Capturing agent responses in a custom executor. -- Emitting RequestInfoEvent to request human input. +- Emitting request_info events (type='request_info') to request human input. - Handling human feedback and routing it to the appropriate agents. Prerequisites: @@ -103,8 +100,7 @@ async def on_human_feedback( # Human approved the draft as-is; forward it unchanged. await ctx.send_message( AgentExecutorRequest( - messages=original_request.conversation - + [ChatMessage(Role.USER, text="The draft is approved as-is.")], + messages=original_request.conversation + [ChatMessage("user", text="The draft is approved as-is.")], should_respond=True, ), target_id=self.final_editor_name, @@ -119,7 +115,7 @@ async def on_human_feedback( "Rewrite the draft from the previous assistant message into a polished final version. " "Keep the response under 120 words and reflect any requested tone adjustments." ) - conversation.append(ChatMessage(Role.USER, text=instruction)) + conversation.append(ChatMessage("user", text=instruction)) await ctx.send_message( AgentExecutorRequest(messages=conversation, should_respond=True), target_id=self.writer_name ) @@ -132,9 +128,9 @@ async def process_event_stream(stream: AsyncIterable[WorkflowEvent]) -> dict[str requests: list[tuple[str, DraftFeedbackRequest]] = [] async for event in stream: - if isinstance(event, RequestInfoEvent) and isinstance(event.data, DraftFeedbackRequest): + if event.type == "request_info" and isinstance(event.data, DraftFeedbackRequest): requests.append((event.request_id, event.data)) - elif isinstance(event, WorkflowOutputEvent) and isinstance(event.data, AgentResponseUpdate): + elif event.type == "output" and isinstance(event.data, AgentResponseUpdate): # This workflow should only produce AgentResponseUpdate as outputs. # Streaming updates from an agent will be consecutive, because no two agents run simultaneously # in this workflow. So we can use last_author to format output nicely. diff --git a/python/samples/getting_started/workflows/human-in-the-loop/agents_with_approval_requests.py b/python/samples/getting_started/workflows/human-in-the-loop/agents_with_approval_requests.py index 8f73b26438..fff5185a76 100644 --- a/python/samples/getting_started/workflows/human-in-the-loop/agents_with_approval_requests.py +++ b/python/samples/getting_started/workflows/human-in-the-loop/agents_with_approval_requests.py @@ -47,7 +47,7 @@ Prerequisites: - Azure AI Agent Service configured, along with the required environment variables. - Authentication via azure-identity. Use AzureCliCredential and run az login before executing the sample. -- Basic familiarity with WorkflowBuilder, edges, events, RequestInfoEvent, and streaming runs. +- Basic familiarity with WorkflowBuilder, edges, events, request_info events (type='request_info'), and streaming runs. """ diff --git a/python/samples/getting_started/workflows/human-in-the-loop/concurrent_request_info.py b/python/samples/getting_started/workflows/human-in-the-loop/concurrent_request_info.py index 178fe028a5..4b82839ffb 100644 --- a/python/samples/getting_started/workflows/human-in-the-loop/concurrent_request_info.py +++ b/python/samples/getting_started/workflows/human-in-the-loop/concurrent_request_info.py @@ -26,12 +26,10 @@ from typing import Any from agent_framework import ( + AgentExecutorResponse, ChatMessage, - RequestInfoEvent, WorkflowEvent, - WorkflowOutputEvent, ) -from agent_framework._workflows._agent_executor import AgentExecutorResponse from agent_framework.azure import AzureOpenAIChatClient from agent_framework.orchestrations import AgentRequestInfoResponse, ConcurrentBuilder from azure.identity import AzureCliCredential @@ -97,11 +95,10 @@ async def process_event_stream(stream: AsyncIterable[WorkflowEvent]) -> dict[str requests: dict[str, AgentExecutorResponse] = {} async for event in stream: - if isinstance(event, RequestInfoEvent) and isinstance(event.data, AgentExecutorResponse): - # Display agent output for review and potential modification + if event.type == "request_info" and isinstance(event.data, AgentExecutorResponse): requests[event.request_id] = event.data - if isinstance(event, WorkflowOutputEvent): + if event.type == "output": # The output of the workflow comes from the aggregator and it's a single string print("\n" + "=" * 60) print("ANALYSIS COMPLETE") diff --git a/python/samples/getting_started/workflows/human-in-the-loop/group_chat_request_info.py b/python/samples/getting_started/workflows/human-in-the-loop/group_chat_request_info.py index fb51c5b530..33b6c151b7 100644 --- a/python/samples/getting_started/workflows/human-in-the-loop/group_chat_request_info.py +++ b/python/samples/getting_started/workflows/human-in-the-loop/group_chat_request_info.py @@ -29,9 +29,7 @@ from agent_framework import ( AgentExecutorResponse, ChatMessage, - RequestInfoEvent, WorkflowEvent, - WorkflowOutputEvent, ) from agent_framework.azure import AzureOpenAIChatClient from agent_framework.orchestrations import AgentRequestInfoResponse, GroupChatBuilder @@ -43,10 +41,10 @@ async def process_event_stream(stream: AsyncIterable[WorkflowEvent]) -> dict[str requests: dict[str, AgentExecutorResponse] = {} async for event in stream: - if isinstance(event, RequestInfoEvent) and isinstance(event.data, AgentExecutorResponse): + if event.type == "request_info" and isinstance(event.data, AgentExecutorResponse): requests[event.request_id] = event.data - if isinstance(event, WorkflowOutputEvent): + if event.type == "output": # The output of the workflow comes from the orchestrator and it's a list of messages print("\n" + "=" * 60) print("DISCUSSION COMPLETE") diff --git a/python/samples/getting_started/workflows/human-in-the-loop/guessing_game_with_human_input.py b/python/samples/getting_started/workflows/human-in-the-loop/guessing_game_with_human_input.py index ef03d7bd05..bee4aeb61d 100644 --- a/python/samples/getting_started/workflows/human-in-the-loop/guessing_game_with_human_input.py +++ b/python/samples/getting_started/workflows/human-in-the-loop/guessing_game_with_human_input.py @@ -10,11 +10,9 @@ AgentResponseUpdate, ChatMessage, Executor, - RequestInfoEvent, WorkflowBuilder, WorkflowContext, WorkflowEvent, - WorkflowOutputEvent, handler, response_handler, ) @@ -46,7 +44,7 @@ # How human-in-the-loop is achieved via `request_info` and `send_responses_streaming`: # - An executor (TurnManager) calls `ctx.request_info` with a payload (HumanFeedbackRequest). -# - The workflow run pauses and emits a RequestInfoEvent with the payload and the request_id. +# - The workflow run pauses and emits a with the payload and the request_id. # - The application captures the event, prompts the user, and collects replies. # - The application calls `send_responses_streaming` with a map of request_ids to replies. # - The workflow resumes, and the response is delivered to the executor method decorated with @response_handler. @@ -132,11 +130,13 @@ async def on_human_feedback( return # Provide feedback to the agent to try again. - # We keep the agent's output strictly JSON to ensure stable parsing on the next turn. - user_msg = ChatMessage( - "user", - text=(f'Feedback: {reply}. Return ONLY a JSON object matching the schema {{"guess": }}.'), + # response_format=GuessOutput on the agent ensures JSON output, so we just need to guide the logic. + last_guess = original_request.prompt.split(": ")[1].split(".")[0] + feedback_text = ( + f"Feedback: {reply}. Your last guess was {last_guess}. " + f"Use this feedback to adjust and make your next guess (1-10)." ) + user_msg = ChatMessage("user", text=feedback_text) await ctx.send_message(AgentExecutorRequest(messages=[user_msg], should_respond=True)) @@ -147,9 +147,9 @@ async def process_event_stream(stream: AsyncIterable[WorkflowEvent]) -> dict[str requests: list[tuple[str, HumanFeedbackRequest]] = [] async for event in stream: - if isinstance(event, RequestInfoEvent) and isinstance(event.data, HumanFeedbackRequest): + if event.type == "request_info" and isinstance(event.data, HumanFeedbackRequest): requests.append((event.request_id, event.data)) - elif isinstance(event, WorkflowOutputEvent): + elif event.type == "output": if isinstance(event.data, AgentResponseUpdate): update = event.data response_id = update.response_id diff --git a/python/samples/getting_started/workflows/human-in-the-loop/sequential_request_info.py b/python/samples/getting_started/workflows/human-in-the-loop/sequential_request_info.py index bc9eff94f9..f545d46b0a 100644 --- a/python/samples/getting_started/workflows/human-in-the-loop/sequential_request_info.py +++ b/python/samples/getting_started/workflows/human-in-the-loop/sequential_request_info.py @@ -13,7 +13,7 @@ Demonstrate: - Configuring request info with `.with_request_info()` -- Handling RequestInfoEvent with AgentInputRequest data +- Handling with AgentInputRequest data - Injecting responses back into the workflow via send_responses_streaming Prerequisites: @@ -28,9 +28,7 @@ from agent_framework import ( AgentExecutorResponse, ChatMessage, - RequestInfoEvent, WorkflowEvent, - WorkflowOutputEvent, ) from agent_framework.azure import AzureOpenAIChatClient from agent_framework.orchestrations import AgentRequestInfoResponse, SequentialBuilder @@ -42,10 +40,10 @@ async def process_event_stream(stream: AsyncIterable[WorkflowEvent]) -> dict[str requests: dict[str, AgentExecutorResponse] = {} async for event in stream: - if isinstance(event, RequestInfoEvent) and isinstance(event.data, AgentExecutorResponse): + if event.type == "request_info" and isinstance(event.data, AgentExecutorResponse): requests[event.request_id] = event.data - elif isinstance(event, WorkflowOutputEvent): + elif event.type == "output": # The output of the sequential workflow is a list of ChatMessages print("\n" + "=" * 60) print("WORKFLOW COMPLETE") diff --git a/python/samples/getting_started/workflows/observability/executor_io_observation.py b/python/samples/getting_started/workflows/observability/executor_io_observation.py index a8f7576fcb..822d0a7c72 100644 --- a/python/samples/getting_started/workflows/observability/executor_io_observation.py +++ b/python/samples/getting_started/workflows/observability/executor_io_observation.py @@ -5,11 +5,8 @@ from agent_framework import ( Executor, - ExecutorCompletedEvent, - ExecutorInvokedEvent, WorkflowBuilder, WorkflowContext, - WorkflowOutputEvent, handler, ) from typing_extensions import Never @@ -21,8 +18,8 @@ executor code. This is useful for debugging, logging, or building monitoring tools. What this example shows: -- ExecutorInvokedEvent.data contains the input message received by the executor -- ExecutorCompletedEvent.data contains the messages sent via ctx.send_message() +- executor_invoked events (type='executor_invoked') contain the input message in event.data +- executor_completed events (type='executor_completed') contain the messages sent via ctx.send_message() in event.data - How to generically observe all executor I/O through workflow streaming events This approach allows you to enable_instrumentation any workflow for observability without @@ -92,18 +89,18 @@ async def main() -> None: print("Running workflow with executor I/O observation...\n") async for event in workflow.run("hello world", stream=True): - if isinstance(event, ExecutorInvokedEvent): + if event.type == "executor_invoked": # The input message received by the executor is in event.data print(f"[INVOKED] {event.executor_id}") print(f" Input: {format_io_data(event.data)}") - elif isinstance(event, ExecutorCompletedEvent): + elif event.type == "executor_completed": # Messages sent via ctx.send_message() are in event.data print(f"[COMPLETED] {event.executor_id}") if event.data: print(f" Output: {format_io_data(event.data)}") - elif isinstance(event, WorkflowOutputEvent): + elif event.type == "output": print(f"[WORKFLOW OUTPUT] {format_io_data(event.data)}") """ diff --git a/python/samples/getting_started/workflows/orchestration/magentic_human_plan_review.py b/python/samples/getting_started/workflows/orchestration/magentic_human_plan_review.py deleted file mode 100644 index aa7b9b5f8c..0000000000 --- a/python/samples/getting_started/workflows/orchestration/magentic_human_plan_review.py +++ /dev/null @@ -1,145 +0,0 @@ -# Copyright (c) Microsoft. All rights reserved. - -import asyncio -import json -from typing import cast - -from agent_framework import ( - AgentRunUpdateEvent, - ChatAgent, - ChatMessage, - MagenticBuilder, - MagenticPlanReviewRequest, - RequestInfoEvent, - WorkflowOutputEvent, -) -from agent_framework.openai import OpenAIChatClient - -""" -Sample: Magentic Orchestration with Human Plan Review - -This sample demonstrates how humans can review and provide feedback on plans -generated by the Magentic workflow orchestrator. When plan review is enabled, -the workflow requests human approval or revision before executing each plan. - -Key concepts: -- with_plan_review(): Enables human review of generated plans -- MagenticPlanReviewRequest: The event type for plan review requests -- Human can choose to: approve the plan or provide revision feedback - -Plan review options: -- approve(): Accept the proposed plan and continue execution -- revise(feedback): Provide textual feedback to modify the plan - -Prerequisites: -- OpenAI credentials configured for `OpenAIChatClient`. -""" - - -async def main() -> None: - researcher_agent = ChatAgent( - name="ResearcherAgent", - description="Specialist in research and information gathering", - instructions="You are a Researcher. You find information and gather facts.", - chat_client=OpenAIChatClient(model_id="gpt-4o"), - ) - - analyst_agent = ChatAgent( - name="AnalystAgent", - description="Data analyst who processes and summarizes research findings", - instructions="You are an Analyst. You analyze findings and create summaries.", - chat_client=OpenAIChatClient(model_id="gpt-4o"), - ) - - manager_agent = ChatAgent( - name="MagenticManager", - description="Orchestrator that coordinates the workflow", - instructions="You coordinate a team to complete tasks efficiently.", - chat_client=OpenAIChatClient(model_id="gpt-4o"), - ) - - print("\nBuilding Magentic Workflow with Human Plan Review...") - - workflow = ( - MagenticBuilder() - .participants([researcher_agent, analyst_agent]) - .with_manager( - agent=manager_agent, - max_round_count=10, - max_stall_count=1, - max_reset_count=2, - ) - .with_plan_review() # Request human input for plan review - .build() - ) - - task = "Research sustainable aviation fuel technology and summarize the findings." - - print(f"\nTask: {task}") - print("\nStarting workflow execution...") - print("=" * 60) - - pending_request: RequestInfoEvent | None = None - pending_responses: dict[str, object] | None = None - output_event: WorkflowOutputEvent | None = None - - while not output_event: - if pending_responses is not None: - stream = workflow.send_responses_streaming(pending_responses) - else: - stream = workflow.run(task, stream=True) - - last_message_id: str | None = None - async for event in stream: - if isinstance(event, AgentRunUpdateEvent): - message_id = event.data.message_id - if message_id != last_message_id: - if last_message_id is not None: - print("\n") - print(f"- {event.executor_id}:", end=" ", flush=True) - last_message_id = message_id - print(event.data, end="", flush=True) - - elif isinstance(event, RequestInfoEvent) and event.request_type is MagenticPlanReviewRequest: - pending_request = event - - elif isinstance(event, WorkflowOutputEvent): - output_event = event - - pending_responses = None - - # Handle plan review request if any - if pending_request is not None: - event_data = cast(MagenticPlanReviewRequest, pending_request.data) - - print("\n\n[Magentic Plan Review Request]") - if event_data.current_progress is not None: - print("Current Progress Ledger:") - print(json.dumps(event_data.current_progress.to_dict(), indent=2)) - print() - print(f"Proposed Plan:\n{event_data.plan.text}\n") - print("Please provide your feedback (press Enter to approve):") - - reply = await asyncio.get_event_loop().run_in_executor(None, input, "> ") - if reply.strip() == "": - print("Plan approved.\n") - pending_responses = {pending_request.request_id: event_data.approve()} - else: - print("Plan revised by human.\n") - pending_responses = {pending_request.request_id: event_data.revise(reply)} - pending_request = None - - print("\n" + "=" * 60) - print("WORKFLOW COMPLETED") - print("=" * 60) - print("Final Output:") - # The output of the Magentic workflow is a list of ChatMessages with only one final message - # generated by the orchestrator. - output_messages = cast(list[ChatMessage], output_event.data) - if output_messages: - output = output_messages[-1].text - print(output) - - -if __name__ == "__main__": - asyncio.run(main()) diff --git a/python/samples/getting_started/workflows/parallelism/aggregate_results_of_different_types.py b/python/samples/getting_started/workflows/parallelism/aggregate_results_of_different_types.py index 8c01a81bc9..e4550c1ab2 100644 --- a/python/samples/getting_started/workflows/parallelism/aggregate_results_of_different_types.py +++ b/python/samples/getting_started/workflows/parallelism/aggregate_results_of_different_types.py @@ -3,7 +3,7 @@ import asyncio import random -from agent_framework import Executor, WorkflowBuilder, WorkflowContext, WorkflowOutputEvent, handler +from agent_framework import Executor, WorkflowBuilder, WorkflowContext, handler from typing_extensions import Never """ @@ -87,7 +87,7 @@ async def main() -> None: # 2) Run the workflow output: list[int | float] | None = None async for event in workflow.run([random.randint(1, 100) for _ in range(10)], stream=True): - if isinstance(event, WorkflowOutputEvent): + if event.type == "output": output = event.data if output is not None: diff --git a/python/samples/getting_started/workflows/parallelism/fan_out_fan_in_edges.py b/python/samples/getting_started/workflows/parallelism/fan_out_fan_in_edges.py index 0652fd86ed..2be9bc09f7 100644 --- a/python/samples/getting_started/workflows/parallelism/fan_out_fan_in_edges.py +++ b/python/samples/getting_started/workflows/parallelism/fan_out_fan_in_edges.py @@ -3,18 +3,14 @@ import asyncio from dataclasses import dataclass -from agent_framework import ( # Core chat primitives to build LLM requests +from agent_framework import ( AgentExecutorRequest, # The message bundle sent to an AgentExecutor AgentExecutorResponse, # The structured result returned by an AgentExecutor ChatAgent, # Tracing event for agent execution steps ChatMessage, # Chat message structure Executor, # Base class for custom Python executors - ExecutorCompletedEvent, - ExecutorInvokedEvent, - Role, # Enum of chat roles (user, assistant, system) WorkflowBuilder, # Fluent builder for wiring the workflow graph WorkflowContext, # Per run context and event bus - WorkflowOutputEvent, # Event emitted when workflow yields output handler, # Decorator to mark an Executor method as invokable ) from agent_framework.azure import AzureOpenAIChatClient @@ -45,7 +41,7 @@ class DispatchToExperts(Executor): @handler async def dispatch(self, prompt: str, ctx: WorkflowContext[AgentExecutorRequest]) -> None: # Wrap the incoming prompt as a user message for each expert and request a response. - initial_message = ChatMessage(Role.USER, text=prompt) + initial_message = ChatMessage("user", text=prompt) await ctx.send_message(AgentExecutorRequest(messages=[initial_message], should_respond=True)) @@ -143,12 +139,12 @@ async def main() -> None: async for event in workflow.run( "We are launching a new budget-friendly electric bike for urban commuters.", stream=True ): - if isinstance(event, ExecutorInvokedEvent): + if event.type == "executor_invoked": # Show when executors are invoked and completed for lightweight observability. print(f"{event.executor_id} invoked") - elif isinstance(event, ExecutorCompletedEvent): + elif event.type == "executor_completed": print(f"{event.executor_id} completed") - elif isinstance(event, WorkflowOutputEvent): + elif event.type == "output": print("===== Final Aggregated Output =====") print(event.data) diff --git a/python/samples/getting_started/workflows/parallelism/map_reduce_and_visualization.py b/python/samples/getting_started/workflows/parallelism/map_reduce_and_visualization.py index c7ac2dee55..99494c59f4 100644 --- a/python/samples/getting_started/workflows/parallelism/map_reduce_and_visualization.py +++ b/python/samples/getting_started/workflows/parallelism/map_reduce_and_visualization.py @@ -10,8 +10,7 @@ from agent_framework import ( Executor, # Base class for custom workflow steps WorkflowBuilder, # Fluent builder for executors and edges - WorkflowContext, # Per run context with workflow state and messaging - WorkflowOutputEvent, # Event emitted when workflow yields output + WorkflowContext, # Per run context with shared state and messaging WorkflowViz, # Utility to visualize a workflow graph handler, # Decorator to expose an Executor method as a step ) @@ -332,7 +331,7 @@ async def main(): # Step 4: Run the workflow with the raw text as input. async for event in workflow.run(raw_text, stream=True): print(f"Event: {event}") - if isinstance(event, WorkflowOutputEvent): + if event.type == "output": print(f"Final Output: {event.data}") diff --git a/python/samples/getting_started/workflows/state-management/workflow_kwargs.py b/python/samples/getting_started/workflows/state-management/workflow_kwargs.py index aeb8bbeaf0..25e46ab343 100644 --- a/python/samples/getting_started/workflows/state-management/workflow_kwargs.py +++ b/python/samples/getting_started/workflows/state-management/workflow_kwargs.py @@ -2,9 +2,9 @@ import asyncio import json -from typing import Annotated, Any +from typing import Annotated, Any, cast -from agent_framework import ChatMessage, WorkflowOutputEvent, tool +from agent_framework import ChatMessage, tool from agent_framework.openai import OpenAIChatClient from agent_framework.orchestrations import SequentialBuilder from pydantic import Field @@ -27,7 +27,9 @@ # Define tools that accept custom context via **kwargs -# NOTE: approval_mode="never_require" is for sample brevity. Use "always_require" in production; see samples/getting_started/tools/function_tool_with_approval.py and samples/getting_started/tools/function_tool_with_approval_and_threads.py. +# NOTE: approval_mode="never_require" is for sample brevity. Use "always_require" in production; +# see samples/getting_started/tools/function_tool_with_approval.py +# and samples/getting_started/tools/function_tool_with_approval_and_threads.py. @tool(approval_mode="never_require") def get_user_data( query: Annotated[str, Field(description="What user data to retrieve")], @@ -118,8 +120,8 @@ async def main() -> None: additional_function_arguments={"custom_data": custom_data, "user_token": user_token}, stream=True, ): - if isinstance(event, WorkflowOutputEvent): - output_data = event.data + if event.type == "output": + output_data = cast(list[ChatMessage], event.data) if isinstance(output_data, list): for item in output_data: if isinstance(item, ChatMessage) and item.text: diff --git a/python/samples/getting_started/workflows/tool-approval/concurrent_builder_tool_approval.py b/python/samples/getting_started/workflows/tool-approval/concurrent_builder_tool_approval.py index cfb425ae7e..e49c9456d2 100644 --- a/python/samples/getting_started/workflows/tool-approval/concurrent_builder_tool_approval.py +++ b/python/samples/getting_started/workflows/tool-approval/concurrent_builder_tool_approval.py @@ -6,14 +6,12 @@ from agent_framework import ( ChatMessage, - ConcurrentBuilder, Content, - RequestInfoEvent, WorkflowEvent, - WorkflowOutputEvent, tool, ) from agent_framework.openai import OpenAIChatClient +from agent_framework.orchestrations import ConcurrentBuilder """ Sample: Concurrent Workflow with Tool Approval Requests @@ -36,7 +34,7 @@ Demonstrate: - Handling multiple approval requests from different agents in concurrent workflows. -- Handling RequestInfoEvent during concurrent agent execution. +- Handling during concurrent agent execution. - Understanding that approval pauses only the agent that triggered it, not all agents. Prerequisites: @@ -89,12 +87,12 @@ def get_portfolio_balance() -> str: return "Portfolio: $50,000 invested, $10,000 cash available. Holdings: AAPL, GOOGL, MSFT." -def _print_output(event: WorkflowOutputEvent) -> None: +def _print_output(event: WorkflowEvent) -> None: if not event.data: - raise ValueError("WorkflowOutputEvent has no data") + raise ValueError("WorkflowEvent has no data") if not isinstance(event.data, list) and not all(isinstance(msg, ChatMessage) for msg in event.data): - raise ValueError("WorkflowOutputEvent data is not a list of ChatMessage") + raise ValueError("WorkflowEvent data is not a list of ChatMessage") messages: list[ChatMessage] = event.data # type: ignore @@ -109,10 +107,10 @@ async def process_event_stream(stream: AsyncIterable[WorkflowEvent]) -> dict[str """Process events from the workflow stream to capture human feedback requests.""" requests: dict[str, Content] = {} async for event in stream: - if isinstance(event, RequestInfoEvent) and isinstance(event.data, Content): + if event.type == "request_info" and isinstance(event.data, Content): # We are only expecting tool approval requests in this sample requests[event.request_id] = event.data - elif isinstance(event, WorkflowOutputEvent): + elif event.type == "output": _print_output(event) responses: dict[str, Content] = {} diff --git a/python/samples/getting_started/workflows/tool-approval/group_chat_builder_tool_approval.py b/python/samples/getting_started/workflows/tool-approval/group_chat_builder_tool_approval.py index eeee1abfb2..732b73d746 100644 --- a/python/samples/getting_started/workflows/tool-approval/group_chat_builder_tool_approval.py +++ b/python/samples/getting_started/workflows/tool-approval/group_chat_builder_tool_approval.py @@ -7,14 +7,11 @@ from agent_framework import ( ChatMessage, Content, - GroupChatBuilder, - GroupChatState, - RequestInfoEvent, WorkflowEvent, - WorkflowOutputEvent, tool, ) from agent_framework.openai import OpenAIChatClient +from agent_framework.orchestrations import GroupChatBuilder, GroupChatState """ Sample: Group Chat Workflow with Tool Approval Requests @@ -36,7 +33,7 @@ Demonstrate: - Using set_select_speakers_func with agents that have approval-required tools. -- Handling RequestInfoEvent in group chat scenarios. +- Handling request_info events (type='request_info') in group chat scenarios. - Multi-round group chat with tool approval interruption and resumption. Prerequisites: @@ -99,16 +96,16 @@ async def process_event_stream(stream: AsyncIterable[WorkflowEvent]) -> dict[str """Process events from the workflow stream to capture human feedback requests.""" requests: dict[str, Content] = {} async for event in stream: - if isinstance(event, RequestInfoEvent) and isinstance(event.data, Content): + if event.type == "request_info" and isinstance(event.data, Content): # We are only expecting tool approval requests in this sample requests[event.request_id] = event.data - elif isinstance(event, WorkflowOutputEvent): + elif event.type == "output": # The output of the workflow comes from the orchestrator and it's a list of messages print("\n" + "=" * 60) print("Workflow summary:") outputs = cast(list[ChatMessage], event.data) for msg in outputs: - speaker = msg.author_name or msg.role.value + speaker = msg.author_name or msg.role print(f"[{speaker}]: {msg.text}") responses: dict[str, Content] = {} diff --git a/python/samples/getting_started/workflows/tool-approval/sequential_builder_tool_approval.py b/python/samples/getting_started/workflows/tool-approval/sequential_builder_tool_approval.py index d0e234e1db..3695097363 100644 --- a/python/samples/getting_started/workflows/tool-approval/sequential_builder_tool_approval.py +++ b/python/samples/getting_started/workflows/tool-approval/sequential_builder_tool_approval.py @@ -7,13 +7,11 @@ from agent_framework import ( ChatMessage, Content, - RequestInfoEvent, - SequentialBuilder, WorkflowEvent, - WorkflowOutputEvent, tool, ) from agent_framework.openai import OpenAIChatClient +from agent_framework.orchestrations import SequentialBuilder """ Sample: Sequential Workflow with Tool Approval Requests @@ -26,7 +24,7 @@ 1. A SequentialBuilder workflow is created with a single agent that has tools requiring approval. 2. The agent receives a user task and determines it needs to call a sensitive tool. 3. The tool call triggers a function_approval_request Content, pausing the workflow. -4. The sample simulates human approval by responding to the RequestInfoEvent. +4. The sample simulates human approval by responding to the . 5. Once approved, the tool executes and the agent completes its response. 6. The workflow outputs the final conversation with all messages. @@ -36,7 +34,7 @@ Demonstrate: - Using @tool(approval_mode="always_require") for sensitive operations. -- Handling RequestInfoEvent with function_approval_request Content in sequential workflows. +- Handling with function_approval_request Content in sequential workflows. - Resuming workflow execution after approval via send_responses_streaming. Prerequisites: @@ -55,7 +53,9 @@ def execute_database_query( return f"Query executed successfully. Results: 3 rows affected by '{query}'" -# NOTE: approval_mode="never_require" is for sample brevity. Use "always_require" in production; see samples/getting_started/tools/function_tool_with_approval.py and samples/getting_started/tools/function_tool_with_approval_and_threads.py. +# NOTE: approval_mode="never_require" is for sample brevity. Use "always_require" in production; +# see samples/getting_started/tools/function_tool_with_approval.py and +# samples/getting_started/tools/function_tool_with_approval_and_threads.py. @tool(approval_mode="never_require") def get_database_schema() -> str: """Get the current database schema. Does not require approval.""" @@ -71,10 +71,10 @@ async def process_event_stream(stream: AsyncIterable[WorkflowEvent]) -> dict[str """Process events from the workflow stream to capture human feedback requests.""" requests: dict[str, Content] = {} async for event in stream: - if isinstance(event, RequestInfoEvent) and isinstance(event.data, Content): + if event.type == "request_info" and isinstance(event.data, Content): # We are only expecting tool approval requests in this sample requests[event.request_id] = event.data - elif isinstance(event, WorkflowOutputEvent): + elif event.type == "output": # The output of the workflow comes from the orchestrator and it's a list of messages print("\n" + "=" * 60) print("Workflow summary:") diff --git a/python/samples/semantic-kernel-migration/orchestrations/concurrent_basic.py b/python/samples/semantic-kernel-migration/orchestrations/concurrent_basic.py index efd3d80e5d..18afcda4d0 100644 --- a/python/samples/semantic-kernel-migration/orchestrations/concurrent_basic.py +++ b/python/samples/semantic-kernel-migration/orchestrations/concurrent_basic.py @@ -6,7 +6,7 @@ from collections.abc import Sequence from typing import cast -from agent_framework import ChatMessage, ConcurrentBuilder, WorkflowOutputEvent +from agent_framework import ChatMessage, ConcurrentBuilderWorkflowEvent from agent_framework.azure import AzureOpenAIChatClient from azure.identity import AzureCliCredential from semantic_kernel.agents import Agent, ChatCompletionAgent, ConcurrentOrchestration @@ -91,7 +91,7 @@ async def run_agent_framework_example(prompt: str) -> Sequence[list[ChatMessage] outputs: list[list[ChatMessage]] = [] async for event in workflow.run(prompt, stream=True): - if isinstance(event, WorkflowOutputEvent): + if event.type == "output": outputs.append(cast(list[ChatMessage], event.data)) return outputs diff --git a/python/samples/semantic-kernel-migration/orchestrations/group_chat.py b/python/samples/semantic-kernel-migration/orchestrations/group_chat.py index 76ab8ee692..2c8e82e9bd 100644 --- a/python/samples/semantic-kernel-migration/orchestrations/group_chat.py +++ b/python/samples/semantic-kernel-migration/orchestrations/group_chat.py @@ -7,7 +7,7 @@ from collections.abc import Sequence from typing import Any, cast -from agent_framework import ChatAgent, ChatMessage, GroupChatBuilder, WorkflowOutputEvent +from agent_framework import ChatAgent, ChatMessage, GroupChatBuilderWorkflowEvent from agent_framework.azure import AzureOpenAIChatClient, AzureOpenAIResponsesClient from azure.identity import AzureCliCredential from semantic_kernel.agents import Agent, ChatCompletionAgent, GroupChatOrchestration @@ -240,7 +240,7 @@ async def run_agent_framework_example(task: str) -> str: final_response = "" async for event in workflow.run(task, stream=True): - if isinstance(event, WorkflowOutputEvent): + if event.type == "output": data = event.data if isinstance(data, list) and len(data) > 0: # Get the final message from the conversation diff --git a/python/samples/semantic-kernel-migration/orchestrations/handoff.py b/python/samples/semantic-kernel-migration/orchestrations/handoff.py index f2333c0fb5..550429448c 100644 --- a/python/samples/semantic-kernel-migration/orchestrations/handoff.py +++ b/python/samples/semantic-kernel-migration/orchestrations/handoff.py @@ -8,12 +8,9 @@ from agent_framework import ( ChatMessage, - HandoffBuilder, - HandoffUserInputRequest, - RequestInfoEvent, WorkflowEvent, - WorkflowOutputEvent, ) +from agent_framework.orchestrations import HandoffBuilder, HandoffUserInputRequest from agent_framework.azure import AzureOpenAIChatClient from azure.identity import AzureCliCredential from semantic_kernel.agents import Agent, ChatCompletionAgent, HandoffOrchestration, OrchestrationHandoffs @@ -214,17 +211,17 @@ async def _drain_events(stream: AsyncIterable[WorkflowEvent]) -> list[WorkflowEv return [event async for event in stream] -def _collect_handoff_requests(events: list[WorkflowEvent]) -> list[RequestInfoEvent]: - requests: list[RequestInfoEvent] = [] +def _collect_handoff_requests(events: list[WorkflowEvent]) -> list[WorkflowEvent]: + requests: list[WorkflowEvent] = [] for event in events: - if isinstance(event, RequestInfoEvent) and isinstance(event.data, HandoffUserInputRequest): + if event.type == "request_info" and isinstance(event.data, HandoffUserInputRequest): requests.append(event) return requests def _extract_final_conversation(events: list[WorkflowEvent]) -> list[ChatMessage]: for event in events: - if isinstance(event, WorkflowOutputEvent): + if event.type == "output": data = cast(list[ChatMessage], event.data) return data return [] diff --git a/python/samples/semantic-kernel-migration/orchestrations/magentic.py b/python/samples/semantic-kernel-migration/orchestrations/magentic.py index db201da443..9c4aea6187 100644 --- a/python/samples/semantic-kernel-migration/orchestrations/magentic.py +++ b/python/samples/semantic-kernel-migration/orchestrations/magentic.py @@ -6,7 +6,7 @@ from collections.abc import Sequence from typing import cast -from agent_framework import ChatAgent, HostedCodeInterpreterTool, MagenticBuilder, WorkflowOutputEvent +from agent_framework import ChatAgent, HostedCodeInterpreterTool, MagenticBuilderWorkflowEvent from agent_framework.openai import OpenAIChatClient, OpenAIResponsesClient from semantic_kernel.agents import ( Agent, @@ -148,7 +148,7 @@ async def run_agent_framework_example(prompt: str) -> str | None: final_text: str | None = None async for event in workflow.run(prompt, stream=True): - if isinstance(event, WorkflowOutputEvent): + if event.type == "output": final_text = cast(str, event.data) return final_text diff --git a/python/samples/semantic-kernel-migration/orchestrations/sequential.py b/python/samples/semantic-kernel-migration/orchestrations/sequential.py index e433c8c3d4..91d23b02c8 100644 --- a/python/samples/semantic-kernel-migration/orchestrations/sequential.py +++ b/python/samples/semantic-kernel-migration/orchestrations/sequential.py @@ -6,8 +6,9 @@ from collections.abc import Sequence from typing import cast -from agent_framework import ChatMessage, SequentialBuilder, WorkflowOutputEvent +from agent_framework import ChatMessage from agent_framework.azure import AzureOpenAIChatClient +from agent_framework.orchestrations import SequentialBuilder from azure.identity import AzureCliCredential from semantic_kernel.agents import Agent, ChatCompletionAgent, SequentialOrchestration from semantic_kernel.agents.runtime import InProcessRuntime @@ -77,7 +78,7 @@ async def run_agent_framework_example(prompt: str) -> list[ChatMessage]: conversation_outputs: list[list[ChatMessage]] = [] async for event in workflow.run(prompt, stream=True): - if isinstance(event, WorkflowOutputEvent): + if event.type == "output": conversation_outputs.append(cast(list[ChatMessage], event.data)) return conversation_outputs[-1] if conversation_outputs else [] diff --git a/python/samples/semantic-kernel-migration/processes/fan_out_fan_in_process.py b/python/samples/semantic-kernel-migration/processes/fan_out_fan_in_process.py index cb27e53cc0..3ddb656abf 100644 --- a/python/samples/semantic-kernel-migration/processes/fan_out_fan_in_process.py +++ b/python/samples/semantic-kernel-migration/processes/fan_out_fan_in_process.py @@ -11,7 +11,7 @@ ###################################################################### # region Agent Framework imports ###################################################################### -from agent_framework import Executor, WorkflowBuilder, WorkflowContext, WorkflowOutputEvent, handler +from agent_framework import Executor, WorkflowBuilder, WorkflowContext, handler from pydantic import BaseModel, Field ###################################################################### @@ -232,7 +232,7 @@ async def run_agent_framework_workflow_example() -> str | None: final_text: str | None = None async for event in workflow.run(CommonEvents.START_PROCESS, stream=True): - if isinstance(event, WorkflowOutputEvent): + if event.type == "output": final_text = cast(str, event.data) return final_text diff --git a/python/samples/semantic-kernel-migration/processes/nested_process.py b/python/samples/semantic-kernel-migration/processes/nested_process.py index 40c682a805..849457d324 100644 --- a/python/samples/semantic-kernel-migration/processes/nested_process.py +++ b/python/samples/semantic-kernel-migration/processes/nested_process.py @@ -17,7 +17,7 @@ WorkflowBuilder, WorkflowContext, WorkflowExecutor, - WorkflowOutputEvent, + handler, ) from pydantic import BaseModel, Field @@ -257,7 +257,7 @@ async def run_agent_framework_nested_workflow(initial_message: str) -> Sequence[ results: list[str] = [] async for event in outer_workflow.run(initial_message, stream=True): - if isinstance(event, WorkflowOutputEvent): + if event.type == "output": results.append(cast(str, event.data)) return results