From a68e3fa0dab0b3e134de1620658456ef2f8a04a3 Mon Sep 17 00:00:00 2001 From: Ahmed Muhsin Date: Thu, 18 Dec 2025 20:05:13 -0600 Subject: [PATCH 01/18] initial commit - migrate code from daf repo --- .../__init__.py | 2 + .../agent_framework_azurefunctions/_app.py | 199 ++++++++- .../_orchestration.py | 2 + .../_shared_state.py | 252 +++++++++++ .../agent_framework_azurefunctions/_utils.py | 317 +++++++++++++ .../_workflow.py | 416 ++++++++++++++++++ .../09_workflow_shared_state/.gitignore | 18 + .../09_workflow_shared_state/README.md | 98 +++++ .../09_workflow_shared_state/demo.http | 31 ++ .../09_workflow_shared_state/function_app.py | 198 +++++++++ .../09_workflow_shared_state/host.json | 7 + .../local.settings.json.sample | 10 + .../09_workflow_shared_state/requirements.txt | 6 + .../10_workflow_no_shared_state/.env.sample | 4 + .../10_workflow_no_shared_state/.gitignore | 2 + .../10_workflow_no_shared_state/README.md | 217 +++++++++ .../10_workflow_no_shared_state/demo.http | 32 ++ .../function_app.py | 229 ++++++++++ .../10_workflow_no_shared_state/host.json | 23 + .../local.settings.json.sample | 11 + .../requirements.txt | 3 + 21 files changed, 2076 insertions(+), 1 deletion(-) create mode 100644 python/packages/azurefunctions/agent_framework_azurefunctions/_shared_state.py create mode 100644 python/packages/azurefunctions/agent_framework_azurefunctions/_utils.py create mode 100644 python/packages/azurefunctions/agent_framework_azurefunctions/_workflow.py create mode 100644 python/samples/getting_started/azure_functions/09_workflow_shared_state/.gitignore create mode 100644 python/samples/getting_started/azure_functions/09_workflow_shared_state/README.md create mode 100644 python/samples/getting_started/azure_functions/09_workflow_shared_state/demo.http create mode 100644 python/samples/getting_started/azure_functions/09_workflow_shared_state/function_app.py create mode 100644 python/samples/getting_started/azure_functions/09_workflow_shared_state/host.json create mode 100644 python/samples/getting_started/azure_functions/09_workflow_shared_state/local.settings.json.sample create mode 100644 python/samples/getting_started/azure_functions/09_workflow_shared_state/requirements.txt create mode 100644 python/samples/getting_started/azure_functions/10_workflow_no_shared_state/.env.sample create mode 100644 python/samples/getting_started/azure_functions/10_workflow_no_shared_state/.gitignore create mode 100644 python/samples/getting_started/azure_functions/10_workflow_no_shared_state/README.md create mode 100644 python/samples/getting_started/azure_functions/10_workflow_no_shared_state/demo.http create mode 100644 python/samples/getting_started/azure_functions/10_workflow_no_shared_state/function_app.py create mode 100644 python/samples/getting_started/azure_functions/10_workflow_no_shared_state/host.json create mode 100644 python/samples/getting_started/azure_functions/10_workflow_no_shared_state/local.settings.json.sample create mode 100644 python/samples/getting_started/azure_functions/10_workflow_no_shared_state/requirements.txt diff --git a/python/packages/azurefunctions/agent_framework_azurefunctions/__init__.py b/python/packages/azurefunctions/agent_framework_azurefunctions/__init__.py index 960b8d4023..18f13d7728 100644 --- a/python/packages/azurefunctions/agent_framework_azurefunctions/__init__.py +++ b/python/packages/azurefunctions/agent_framework_azurefunctions/__init__.py @@ -6,6 +6,7 @@ from ._app import AgentFunctionApp from ._orchestration import DurableAIAgent +from ._shared_state import DurableSharedState try: __version__ = importlib.metadata.version(__name__) @@ -17,5 +18,6 @@ "AgentFunctionApp", "AgentResponseCallbackProtocol", "DurableAIAgent", + "DurableSharedState", "__version__", ] diff --git a/python/packages/azurefunctions/agent_framework_azurefunctions/_app.py b/python/packages/azurefunctions/agent_framework_azurefunctions/_app.py index 61e678a38f..40cfd8f701 100644 --- a/python/packages/azurefunctions/agent_framework_azurefunctions/_app.py +++ b/python/packages/azurefunctions/agent_framework_azurefunctions/_app.py @@ -6,6 +6,7 @@ with Azure Durable Entities, enabling stateful and durable AI agent execution. """ +import asyncio import json import re from collections.abc import Callable, Mapping @@ -15,7 +16,8 @@ import azure.durable_functions as df import azure.functions as func -from agent_framework import AgentProtocol, get_logger +from agent_framework import AgentExecutor, AgentProtocol, Workflow, get_logger +from agent_framework._workflows._typing_utils import is_instance_of from agent_framework_durabletask import ( DEFAULT_MAX_POLL_RETRIES, DEFAULT_POLL_INTERVAL_SECONDS, @@ -36,6 +38,9 @@ from ._errors import IncomingRequestError from ._models import AgentSessionId from ._orchestration import AgentOrchestrationContextType, DurableAIAgent +from ._shared_state import SHARED_STATE_ENTITY_NAME, DurableSharedState, create_shared_state_entity_function +from ._utils import CapturingWorkflowContext, reconstruct_message_for_handler, serialize_message +from ._workflow import run_workflow_orchestrator logger = get_logger("agent_framework.azurefunctions") @@ -148,16 +153,21 @@ def my_orchestration(context): enable_mcp_tool_trigger: Whether MCP tool triggers are created for agents max_poll_retries: Maximum polling attempts when waiting for responses poll_interval_seconds: Delay (seconds) between polling attempts + workflow: Optional Workflow instance for workflow orchestration + enable_shared_state: Whether SharedState entity is enabled for workflows """ _agent_metadata: dict[str, AgentMetadata] enable_health_check: bool enable_http_endpoints: bool enable_mcp_tool_trigger: bool + workflow: Workflow | None + enable_shared_state: bool def __init__( self, agents: list[AgentProtocol] | None = None, + workflow: Workflow | None = None, http_auth_level: func.AuthLevel = func.AuthLevel.FUNCTION, enable_health_check: bool = True, enable_http_endpoints: bool = True, @@ -165,10 +175,12 @@ def __init__( poll_interval_seconds: float = DEFAULT_POLL_INTERVAL_SECONDS, enable_mcp_tool_trigger: bool = False, default_callback: AgentResponseCallbackProtocol | None = None, + enable_shared_state: bool = True, ): """Initialize the AgentFunctionApp. :param agents: List of agent instances to register. + :param workflow: Optional Workflow instance to extract agents from and set up orchestration. :param http_auth_level: HTTP authentication level (default: ``func.AuthLevel.FUNCTION``). :param enable_health_check: Enable the built-in health check endpoint (default: ``True``). :param enable_http_endpoints: Enable HTTP endpoints for agents (default: ``True``). @@ -179,6 +191,7 @@ def __init__( :param poll_interval_seconds: Delay in seconds between polling attempts. Defaults to ``DEFAULT_POLL_INTERVAL_SECONDS``. :param default_callback: Optional callback invoked for agents without specific callbacks. + :param enable_shared_state: Enable SharedState entity for workflow executors (default: ``True``). :note: If no agents are provided, they can be added later using :meth:`add_agent`. """ @@ -193,6 +206,8 @@ def __init__( self.enable_http_endpoints = enable_http_endpoints self.enable_mcp_tool_trigger = enable_mcp_tool_trigger self.default_callback = default_callback + self.workflow = workflow + self.enable_shared_state = enable_shared_state try: retries = int(max_poll_retries) @@ -206,6 +221,18 @@ def __init__( interval = DEFAULT_POLL_INTERVAL_SECONDS self.poll_interval_seconds = interval if interval > 0 else DEFAULT_POLL_INTERVAL_SECONDS + # If workflow is provided, extract agents and set up orchestration + if workflow: + if agents is None: + agents = [] + logger.debug("[AgentFunctionApp] Extracting agents from workflow") + for executor in workflow.executors.values(): + if isinstance(executor, AgentExecutor): + agents.append(executor._agent) + + self._setup_executor_activity() + self._setup_workflow_orchestration() + if agents: # Register all provided agents logger.debug(f"[AgentFunctionApp] Registering {len(agents)} agent(s)") @@ -218,6 +245,176 @@ def __init__( logger.debug("[AgentFunctionApp] Initialization complete") + def _setup_executor_activity(self) -> None: + """Register the activity for executing standard executors.""" + + @self.activity_trigger(input_name="inputData") + def ExecuteExecutor(inputData: str) -> str: + """Activity to execute non-agent executors. + + Note: We use str type annotations instead of dict to work around + Azure Functions worker type validation issues with dict[str, Any]. + """ + import json as json_module + + data = json_module.loads(inputData) + executor_id = data["executor_id"] + message_data = data["message"] + shared_state_snapshot = data.get("shared_state_snapshot", {}) + + if not self.workflow: + raise RuntimeError("Workflow not initialized in AgentFunctionApp") + + executor = self.workflow.executors.get(executor_id) + if not executor: + raise ValueError(f"Unknown executor: {executor_id}") + + # Reconstruct message - try to match handler's expected types + message = reconstruct_message_for_handler(message_data, executor._handlers) + + ctx = CapturingWorkflowContext( + shared_state_snapshot=shared_state_snapshot, + ) + + async def run() -> None: + # Find handler + handler = None + for message_type, handler_func in executor._handlers.items(): + if is_instance_of(message, message_type): + handler = handler_func + break + + if handler: + await handler(message, ctx) + else: + raise ValueError(f"Executor {executor_id} cannot handle message of type {type(message)}") + + asyncio.run(run()) + + updates, deletes = ctx.get_shared_state_changes() + + # Serialize all outputs for JSON compatibility + serialized_sent_messages = [ + { + "message": serialize_message(msg["message"]), + "target_id": msg.get("target_id"), + } + for msg in ctx.sent_messages + ] + serialized_outputs = [serialize_message(o) for o in ctx.outputs] + serialized_updates = {k: serialize_message(v) for k, v in updates.items()} + + result = { + "sent_messages": serialized_sent_messages, + "outputs": serialized_outputs, + "shared_state_updates": serialized_updates, + "shared_state_deletes": list(deletes), + } + return json_module.dumps(result) + + def _setup_shared_state_entity(self) -> None: + """Register the SharedState durable entity for workflow state sharing.""" + entity_function = create_shared_state_entity_function() + entity_function.__name__ = SHARED_STATE_ENTITY_NAME + self.entity_trigger(context_name="context", entity_name=SHARED_STATE_ENTITY_NAME)(entity_function) + logger.debug(f"[AgentFunctionApp] Registered SharedState entity: {SHARED_STATE_ENTITY_NAME}") + + def _setup_workflow_orchestration(self) -> None: + """Register the workflow orchestration and related HTTP endpoints.""" + + # Only register the SharedState entity if enabled + if self.enable_shared_state: + self._setup_shared_state_entity() + + # Capture enable_shared_state for use in nested function + _enable_shared_state = self.enable_shared_state + + @self.orchestration_trigger(context_name="context") + def workflow_orchestrator(context: df.DurableOrchestrationContext): # type: ignore[type-arg] + """Generic orchestrator for running the configured workflow.""" + input_data = context.get_input() + + # Ensure input is a string for the agent + if isinstance(input_data, (dict, list)): + initial_message = json.dumps(input_data) + else: + initial_message = str(input_data) + + # Only create DurableSharedState if enabled to avoid extra entity calls + shared_state = None + if _enable_shared_state: + shared_state = DurableSharedState(context, context.instance_id) + + outputs = yield from run_workflow_orchestrator(context, self.workflow, initial_message, shared_state) + return outputs + + @self.route(route="workflow/run", methods=["POST"]) + @self.durable_client_input(client_name="client") + async def start_workflow_orchestration( + req: func.HttpRequest, client: df.DurableOrchestrationClient + ) -> func.HttpResponse: + """HTTP endpoint to start the workflow.""" + try: + req_body = req.get_json() + except ValueError: + return func.HttpResponse( + json.dumps({"error": "Invalid JSON body"}), + status_code=400, + mimetype="application/json", + ) + + instance_id = await client.start_new("workflow_orchestrator", client_input=req_body) + + status_url = self._build_status_url(req.url, instance_id) + + return func.HttpResponse( + json.dumps({ + "instanceId": instance_id, + "statusQueryGetUri": status_url, + "message": "Workflow started", + }), + status_code=202, + mimetype="application/json", + ) + + @self.route(route="workflow/status/{instanceId}", methods=["GET"]) + @self.durable_client_input(client_name="client") + async def get_workflow_status( + req: func.HttpRequest, client: df.DurableOrchestrationClient + ) -> func.HttpResponse: + """HTTP endpoint to get workflow status.""" + instance_id = req.route_params.get("instanceId") + status = await client.get_status(instance_id) + + if not status: + return func.HttpResponse( + json.dumps({"error": "Instance not found"}), + status_code=404, + mimetype="application/json", + ) + + response = { + "instanceId": status.instance_id, + "runtimeStatus": status.runtime_status.name if status.runtime_status else None, + "output": status.output, + "error": status.output if status.runtime_status == df.OrchestrationRuntimeStatus.Failed else None, + "createdTime": status.created_time.isoformat() if status.created_time else None, + "lastUpdatedTime": status.last_updated_time.isoformat() if status.last_updated_time else None, + } + + return func.HttpResponse( + json.dumps(response), + status_code=200, + mimetype="application/json", + ) + + def _build_status_url(self, request_url: str, instance_id: str) -> str: + """Build the status URL for a workflow instance.""" + base_url, _, _ = request_url.partition("/api/") + if not base_url: + base_url = request_url.rstrip("/") + return f"{base_url}/api/workflow/status/{instance_id}" + @property def agents(self) -> dict[str, AgentProtocol]: """Returns dict of agent names to agent instances. diff --git a/python/packages/azurefunctions/agent_framework_azurefunctions/_orchestration.py b/python/packages/azurefunctions/agent_framework_azurefunctions/_orchestration.py index 24b1b27368..bf4678ae0a 100644 --- a/python/packages/azurefunctions/agent_framework_azurefunctions/_orchestration.py +++ b/python/packages/azurefunctions/agent_framework_azurefunctions/_orchestration.py @@ -7,6 +7,7 @@ import uuid from collections.abc import AsyncIterator, Callable +from datetime import datetime, timezone from typing import TYPE_CHECKING, Any, TypeAlias, cast from agent_framework import ( @@ -281,6 +282,7 @@ def my_orchestration(context): thread_id=session_id.key, response_format=response_format, orchestration_id=self.context.instance_id, + created_at=datetime.now(timezone.utc), ) logger.debug("[DurableAIAgent] Calling entity %s with message: %s", entity_id, message_str[:100]) diff --git a/python/packages/azurefunctions/agent_framework_azurefunctions/_shared_state.py b/python/packages/azurefunctions/agent_framework_azurefunctions/_shared_state.py new file mode 100644 index 0000000000..8608c8ddf6 --- /dev/null +++ b/python/packages/azurefunctions/agent_framework_azurefunctions/_shared_state.py @@ -0,0 +1,252 @@ +# Copyright (c) Microsoft. All rights reserved. + +""" +Durable Shared State for Workflow Execution + +This module provides a durable SharedState implementation that allows executors +in a workflow to share state across the execution lifecycle. Unlike MAF's in-memory +SharedState which uses async locks, this implementation is backed by Azure Durable +Entities for durability and replay-safety. + +Key features: +- DurableSharedState: Orchestration-side wrapper for shared state operations +- SharedStateEntity: Entity function that stores the shared state +- Compatible API with agent_framework SharedState + +Usage: + In run_workflow_orchestrator: + shared_state = DurableSharedState(context, session_id) + value = yield shared_state.get("my_key") + yield shared_state.set("my_key", "my_value") +""" + +from __future__ import annotations + +import logging +from collections.abc import Generator +from dataclasses import dataclass, field +from typing import Any + +from azure.durable_functions import DurableOrchestrationContext, EntityId + +logger = logging.getLogger(__name__) + +# Entity name for SharedState +SHARED_STATE_ENTITY_NAME = "SharedStateEntity" + + +@dataclass +class SharedStateData: + """ + The underlying data structure for shared state. + + This is stored as the state of the SharedStateEntity. + """ + + state: dict[str, Any] = field(default_factory=dict) + + def to_dict(self) -> dict[str, Any]: + """Serialize to dictionary for entity storage.""" + return {"state": self.state} + + @classmethod + def from_dict(cls, data: dict[str, Any] | None) -> SharedStateData: + """Deserialize from entity state.""" + if data is None: + return cls() + return cls(state=data.get("state", {})) + + +class DurableSharedState: + """ + Orchestration-side wrapper for shared state operations. + + This class provides a generator-based API compatible with Durable Functions + orchestrations. Each operation (get, set, has, delete) returns a generator + that yields entity calls. + + The shared state is scoped to a workflow session using the session_id as + the entity instance id. + + Example: + shared_state = DurableSharedState(context, "session-123") + + # Get a value + value = yield from shared_state.get("my_key") + + # Set a value + yield from shared_state.set("my_key", {"data": "value"}) + + # Check if key exists + exists = yield from shared_state.has("my_key") + + # Delete a key + yield from shared_state.delete("my_key") + + # Get all state + all_state = yield from shared_state.get_all() + """ + + def __init__(self, context: DurableOrchestrationContext, session_id: str) -> None: + """ + Initialize the shared state wrapper. + + Args: + context: The Durable Functions orchestration context + session_id: The session identifier used as the entity instance id + """ + self._context = context + self._session_id = session_id + self._entity_id = EntityId(SHARED_STATE_ENTITY_NAME, session_id) + + @property + def entity_id(self) -> EntityId: + """Get the entity ID for this shared state instance.""" + return self._entity_id + + def get(self, key: str, default: Any = None) -> Generator[Any, Any, Any]: + """ + Get a value from the shared state. + + Args: + key: The key to retrieve + default: Default value if key doesn't exist + + Returns: + Generator that yields the value or default + """ + result = yield self._context.call_entity(self._entity_id, "get", {"key": key, "default": default}) + return result + + def set(self, key: str, value: Any) -> Generator[Any, Any, None]: + """ + Set a value in the shared state. + + Args: + key: The key to set + value: The value to store (must be JSON serializable) + """ + yield self._context.call_entity(self._entity_id, "set", {"key": key, "value": value}) + + def has(self, key: str) -> Generator[Any, Any, bool]: + """ + Check if a key exists in the shared state. + + Args: + key: The key to check + + Returns: + Generator that yields True if key exists, False otherwise + """ + result = yield self._context.call_entity(self._entity_id, "has", {"key": key}) + return result + + def delete(self, key: str) -> Generator[Any, Any, bool]: + """ + Delete a key from the shared state. + + Args: + key: The key to delete + + Returns: + Generator that yields True if key was deleted, False if it didn't exist + """ + result = yield self._context.call_entity(self._entity_id, "delete", {"key": key}) + return result + + def get_all(self) -> Generator[Any, Any, dict[str, Any]]: + """ + Get all shared state as a dictionary. + + Returns: + Generator that yields the complete state dictionary + """ + result = yield self._context.call_entity(self._entity_id, "get_all", None) + return result if result else {} + + def update(self, updates: dict[str, Any]) -> Generator[Any, Any, None]: + """ + Update multiple keys at once. + + Args: + updates: Dictionary of key-value pairs to update + """ + yield self._context.call_entity(self._entity_id, "update", {"updates": updates}) + + def clear(self) -> Generator[Any, Any, None]: + """ + Clear all shared state. + """ + yield self._context.call_entity(self._entity_id, "clear", None) + + +def create_shared_state_entity_function(): + """ + Create the entity function for SharedState. + + This function handles all shared state operations: + - get: Retrieve a value by key + - set: Store a value by key + - has: Check if a key exists + - delete: Remove a key + - get_all: Get the complete state dictionary + - update: Update multiple keys at once + - clear: Clear all state + + Returns: + The entity function to be registered with the Durable Functions app + """ + + def shared_state_entity(context): + """Entity function for SharedState storage.""" + # Get or initialize state + current_state = context.get_state(lambda: {"state": {}}) + state_data = SharedStateData.from_dict(current_state) + + operation = context.operation_name + operation_input = context.get_input() + + logger.debug("[SharedState] Operation: %s, Input: %s", operation, operation_input) + + if operation == "get": + key = operation_input.get("key") + default = operation_input.get("default") + result = state_data.state.get(key, default) + context.set_result(result) + + elif operation == "set": + key = operation_input.get("key") + value = operation_input.get("value") + state_data.state[key] = value + context.set_state(state_data.to_dict()) + + elif operation == "has": + key = operation_input.get("key") + result = key in state_data.state + context.set_result(result) + + elif operation == "delete": + key = operation_input.get("key") + if key in state_data.state: + del state_data.state[key] + context.set_state(state_data.to_dict()) + context.set_result(True) + else: + context.set_result(False) + + elif operation == "get_all": + context.set_result(state_data.state.copy()) + + elif operation == "update": + updates = operation_input.get("updates", {}) + state_data.state.update(updates) + context.set_state(state_data.to_dict()) + + elif operation == "clear": + state_data.state.clear() + context.set_state(state_data.to_dict()) + + else: + logger.warning("[SharedState] Unknown operation: %s", operation) + + return shared_state_entity diff --git a/python/packages/azurefunctions/agent_framework_azurefunctions/_utils.py b/python/packages/azurefunctions/agent_framework_azurefunctions/_utils.py new file mode 100644 index 0000000000..7a87acf49e --- /dev/null +++ b/python/packages/azurefunctions/agent_framework_azurefunctions/_utils.py @@ -0,0 +1,317 @@ +# Copyright (c) Microsoft. All rights reserved. + +""" +Utility functions for workflow execution. + +This module provides helper functions for serialization, deserialization, and +context management used by the workflow orchestrator and executors. +""" + +from __future__ import annotations + +import logging +from dataclasses import asdict, fields, is_dataclass +from typing import Any + +from agent_framework import AgentExecutorRequest, AgentExecutorResponse, AgentRunResponse, ChatMessage +from agent_framework._workflows._shared_state import SharedState +from pydantic import BaseModel + +logger = logging.getLogger(__name__) + + +class CapturingWorkflowContext: + """ + Context that captures outputs, sent messages, and shared state changes. + + Provides a WorkflowContext-compatible API for custom executors running + in activities. Uses MAF's SharedState class internally, initialized with + a snapshot from the orchestrator. After execution, changes are diffed + against the original snapshot to determine updates and deletes. + + This class does NOT inherit from WorkflowContext to avoid requiring + RunnerContext instances. Instead, it duck-types the interface that + executor handlers expect. + """ + + def __init__( + self, + shared_state_snapshot: dict[str, Any] | None = None, + ) -> None: + """ + Initialize the capturing context. + + Args: + shared_state_snapshot: Snapshot of current shared state from orchestrator + """ + # Keep original snapshot for diffing later + self._original_snapshot: dict[str, Any] = dict(shared_state_snapshot or {}) + + # Create real SharedState, pre-populated with snapshot + self._shared_state = SharedState() + self._shared_state._state = dict(shared_state_snapshot or {}) + + # Captured outputs + self.sent_messages: list[dict[str, Any]] = [] + self.outputs: list[Any] = [] + + @property + def shared_state(self) -> SharedState: + """Get the shared state object for direct access.""" + return self._shared_state + + async def send_message(self, message: Any, target_id: str | None = None) -> None: + """Capture a message to be routed by the orchestrator.""" + self.sent_messages.append({"message": message, "target_id": target_id}) + + async def yield_output(self, output: Any) -> None: + """Capture a workflow output.""" + self.outputs.append(output) + + async def get_shared_state(self, key: str) -> Any: + """ + Get a value from shared state. + + If the stored value has type metadata (__type__, __module__), + attempts to reconstruct the original typed object. + + Args: + key: The key to retrieve + + Returns: + The value associated with the key (possibly reconstructed) + + Raises: + KeyError: If the key doesn't exist + """ + value = await self._shared_state.get(key) + return deserialize_value(value) + + async def set_shared_state(self, key: str, value: Any) -> None: + """ + Set a value in shared state. + + Args: + key: The key to set + value: The value to store (must be JSON serializable) + """ + await self._shared_state.set(key, value) + + def get_shared_state_changes(self) -> tuple[dict[str, Any], set[str]]: + """ + Get all shared state changes made during execution. + + Compares current state against the original snapshot to find: + - Updates: keys that were added or modified + - Deletes: keys that were removed + + Returns: + Tuple of (updates dict, deletes set) + """ + current_state = self._shared_state._state + original_keys = set(self._original_snapshot.keys()) + current_keys = set(current_state.keys()) + + # Deleted = was in original, not in current + deletes = original_keys - current_keys + + # Updates = keys in current that are new or have different values + updates = {k: v for k, v in current_state.items() if k not in self._original_snapshot or self._original_snapshot[k] != v} + + return updates, deletes + + +def _serialize_value(value: Any) -> Any: + """Recursively serialize a value for JSON compatibility.""" + # Handle None + if value is None: + return None + + # Handle objects with to_dict() method (like ChatMessage) + if hasattr(value, "to_dict") and callable(value.to_dict): + return value.to_dict() + + # Handle dataclasses + if is_dataclass(value) and not isinstance(value, type): + d: dict[str, Any] = {} + for k, v in asdict(value).items(): + d[k] = _serialize_value(v) + d["__type__"] = type(value).__name__ + d["__module__"] = type(value).__module__ + return d + + # Handle Pydantic models + if isinstance(value, BaseModel): + d = value.model_dump() + d["__type__"] = type(value).__name__ + d["__module__"] = type(value).__module__ + return d + + # Handle lists + if isinstance(value, list): + return [_serialize_value(item) for item in value] + + # Handle dicts + if isinstance(value, dict): + return {k: _serialize_value(v) for k, v in value.items()} + + # Handle primitives and other types + return value + + +def serialize_message(message: Any) -> Any: + """Helper to serialize messages for activity input. + + Adds type metadata (__type__, __module__) to dataclasses and Pydantic models + to enable reconstruction on the receiving end. Handles nested ChatMessage + and other objects with to_dict() methods. + """ + return _serialize_value(message) + + +def deserialize_value(data: Any, type_registry: dict[str, type] | None = None) -> Any: + """ + Attempt to deserialize a value using embedded type metadata. + + Args: + data: The serialized data (could be dict with __type__ metadata) + type_registry: Optional dict mapping type names to types for reconstruction + + Returns: + Reconstructed object if type metadata found and type available, otherwise original data + """ + if not isinstance(data, dict): + return data + + type_name = data.get("__type__") + module_name = data.get("__module__") + + # Special handling for MAF types with nested objects + if type_name == "AgentExecutorRequest" or ("messages" in data and "should_respond" in data): + try: + return reconstruct_agent_executor_request(data) + except Exception: + pass + + if type_name == "AgentExecutorResponse" or ("executor_id" in data and "agent_run_response" in data): + try: + return reconstruct_agent_executor_response(data) + except Exception: + pass + + if not type_name: + return data + + # Try to find the type + target_type = None + + # First check the registry + if type_registry and type_name in type_registry: + target_type = type_registry[type_name] + else: + # Try to import from module + if module_name: + try: + import importlib + + module = importlib.import_module(module_name) + target_type = getattr(module, type_name, None) + except Exception: + pass + + if target_type: + # Remove metadata before reconstruction + clean_data = {k: v for k, v in data.items() if not k.startswith("__")} + try: + if is_dataclass(target_type): + return target_type(**clean_data) + elif issubclass(target_type, BaseModel): + return target_type(**clean_data) + except Exception: + pass + + return data + + +def reconstruct_agent_executor_request(data: dict[str, Any]) -> AgentExecutorRequest: + """Helper to reconstruct AgentExecutorRequest from dict.""" + # Reconstruct ChatMessage objects in messages + messages_data = data.get("messages", []) + messages = [ChatMessage.from_dict(m) if isinstance(m, dict) else m for m in messages_data] + + return AgentExecutorRequest(messages=messages, should_respond=data.get("should_respond", True)) + + +def reconstruct_agent_executor_response(data: dict[str, Any]) -> AgentExecutorResponse: + """Helper to reconstruct AgentExecutorResponse from dict.""" + # Reconstruct AgentRunResponse + arr_data = data.get("agent_run_response", {}) + + agent_run_response = None + if isinstance(arr_data, dict): + # Use from_dict for proper reconstruction + agent_run_response = AgentRunResponse.from_dict(arr_data) + else: + agent_run_response = arr_data + + # Reconstruct full_conversation + fc_data = data.get("full_conversation", []) + full_conversation = None + if fc_data: + full_conversation = [ChatMessage.from_dict(m) if isinstance(m, dict) else m for m in fc_data] + + return AgentExecutorResponse( + executor_id=data["executor_id"], agent_run_response=agent_run_response, full_conversation=full_conversation + ) + + +def reconstruct_message_for_handler(data: Any, handler_types: dict[type, Any]) -> Any: + """ + Attempt to reconstruct a message to match one of the handler's expected types. + + Args: + data: The serialized message data (could be dict, str, etc.) + handler_types: Dict of message types the handler can accept + + Returns: + Reconstructed message if possible, otherwise the original data + """ + if not isinstance(data, dict): + return data + + # Try AgentExecutorResponse first - it needs special handling for nested objects + if "executor_id" in data and "agent_run_response" in data: + try: + return reconstruct_agent_executor_response(data) + except Exception: + pass + + # Try AgentExecutorRequest - also needs special handling for nested ChatMessage objects + if "messages" in data and "should_respond" in data: + try: + return reconstruct_agent_executor_request(data) + except Exception: + pass + + # Try deserialize_value which uses embedded type metadata (__type__, __module__) + if "__type__" in data: + deserialized = deserialize_value(data) + if deserialized is not data: + return deserialized + + # Try to match against handler types by checking dict keys vs dataclass fields + # Filter out metadata keys when comparing + data_keys = {k for k in data.keys() if not k.startswith("__")} + for msg_type in handler_types.keys(): + if is_dataclass(msg_type): + # Check if the dict keys match the dataclass fields + field_names = {f.name for f in fields(msg_type)} + if field_names == data_keys or field_names.issubset(data_keys): + try: + # Remove metadata before constructing + clean_data = {k: v for k, v in data.items() if not k.startswith("__")} + return msg_type(**clean_data) + except Exception: + pass + + return data diff --git a/python/packages/azurefunctions/agent_framework_azurefunctions/_workflow.py b/python/packages/azurefunctions/agent_framework_azurefunctions/_workflow.py new file mode 100644 index 0000000000..8de69214c1 --- /dev/null +++ b/python/packages/azurefunctions/agent_framework_azurefunctions/_workflow.py @@ -0,0 +1,416 @@ +# Copyright (c) Microsoft. All rights reserved. + +""" +Workflow Execution for Durable Functions + +This module provides the workflow orchestration engine that executes MAF Workflows +using Azure Durable Functions. It reuses MAF's edge group routing logic while +adapting execution to the DF generator-based model (yield instead of await). + +Key components: +- run_workflow_orchestrator: Main orchestration function for workflow execution +- route_message_through_edge_groups: Routing helper using MAF edge group APIs +- build_agent_executor_response: Helper to construct AgentExecutorResponse +""" + +from __future__ import annotations + +import json +import logging +from typing import Any + +from agent_framework import ( + AgentExecutor, + AgentExecutorRequest, + AgentExecutorResponse, + AgentRunResponse, + AgentThread, + ChatMessage, + Workflow, +) +from agent_framework._workflows._edge import ( + EdgeGroup, + FanInEdgeGroup, + FanOutEdgeGroup, + SingleEdgeGroup, + SwitchCaseEdgeGroup, +) +from azure.durable_functions import DurableOrchestrationContext + +from ._models import AgentSessionId +from ._orchestration import DurableAIAgent +from ._shared_state import DurableSharedState +from ._utils import deserialize_value, serialize_message + +logger = logging.getLogger(__name__) + + +def route_message_through_edge_groups( + edge_groups: list[EdgeGroup], + source_id: str, + message: Any, +) -> list[str]: + """ + Route a message through edge groups to find target executor IDs. + + Delegates to MAF's edge group routing logic instead of manual inspection. + + Args: + edge_groups: List of EdgeGroup instances from the workflow + source_id: The ID of the source executor + message: The message to route + + Returns: + List of target executor IDs that should receive the message + """ + targets: list[str] = [] + + for group in edge_groups: + if source_id not in group.source_executor_ids: + continue + + # SwitchCaseEdgeGroup and FanOutEdgeGroup use selection_func + if isinstance(group, (SwitchCaseEdgeGroup, FanOutEdgeGroup)): + if group._selection_func is not None: + selected = group._selection_func(message, group.target_executor_ids) + targets.extend(selected) + else: + # No selection func means broadcast to all targets + targets.extend(group.target_executor_ids) + + elif isinstance(group, SingleEdgeGroup): + # SingleEdgeGroup has exactly one edge + edge = group.edges[0] + if edge.should_route(message): + targets.append(edge.target_id) + + elif isinstance(group, FanInEdgeGroup): + # FanIn is handled separately in the orchestrator loop + # since it requires aggregation + pass + + else: + # Generic EdgeGroup: check each edge's condition + for edge in group.edges: + if edge.source_id == source_id and edge.should_route(message): + targets.append(edge.target_id) + + return targets + + +def build_agent_executor_response( + executor_id: str, + response_text: str | None, + structured_response: dict[str, Any] | None, + previous_message: Any, +) -> AgentExecutorResponse: + """ + Build an AgentExecutorResponse from entity response data. + + Shared helper to construct the response object consistently. + + Args: + executor_id: The ID of the executor that produced the response + response_text: Plain text response from the agent (if any) + structured_response: Structured JSON response (if any) + previous_message: The input message that triggered this response + + Returns: + AgentExecutorResponse with reconstructed conversation + """ + final_text = response_text + if structured_response: + final_text = json.dumps(structured_response) + + assistant_message = ChatMessage(role="assistant", text=final_text) + + agent_run_response = AgentRunResponse( + messages=[assistant_message], + ) + + # Build conversation history + full_conversation: list[ChatMessage] = [] + if isinstance(previous_message, AgentExecutorResponse) and previous_message.full_conversation: + full_conversation.extend(previous_message.full_conversation) + elif isinstance(previous_message, str): + full_conversation.append(ChatMessage(role="user", text=previous_message)) + + full_conversation.append(assistant_message) + + return AgentExecutorResponse( + executor_id=executor_id, + agent_run_response=agent_run_response, + full_conversation=full_conversation, + ) + + +def run_workflow_orchestrator( + context: DurableOrchestrationContext, + workflow: Workflow, + initial_message: Any, + shared_state: DurableSharedState | None = None, +): + """ + Traverse and execute the workflow graph using Durable Functions. + + This orchestrator reuses MAF's edge group routing logic while adapting + execution to the DF generator-based model (yield instead of await). + + Supports: + - SingleEdgeGroup: Direct 1:1 routing with optional condition + - SwitchCaseEdgeGroup: First matching condition wins + - FanOutEdgeGroup: Broadcast to multiple targets (with optional selection) + - FanInEdgeGroup: Aggregates messages from multiple sources before delivery + - SharedState: Durable shared state accessible to all executors + + Args: + context: The Durable Functions orchestration context + workflow: The MAF Workflow instance to execute + initial_message: The initial message to send to the start executor + shared_state: Optional DurableSharedState for cross-executor state sharing + + Returns: + List of workflow outputs collected from executor activities + """ + pending_messages: dict[str, list[Any]] = {workflow.start_executor_id: [initial_message]} + iteration = 0 + max_iterations = workflow.max_iterations + workflow_outputs: list[Any] = [] + + # Track pending sources for FanInEdgeGroups + # Structure: {group_id: {source_id: [messages]}} + fan_in_pending: dict[str, dict[str, list[Any]]] = {} + + # Initialize fan-in tracking for all FanInEdgeGroups + for group in workflow.edge_groups: + if isinstance(group, FanInEdgeGroup): + fan_in_pending[group.id] = {} + + while pending_messages and iteration < max_iterations: + logger.debug("Orchestrator iteration %d", iteration) + next_pending_messages: dict[str, list[Any]] = {} + + for executor_id, messages in pending_messages.items(): + logger.debug("Processing executor: %s with %d messages", executor_id, len(messages)) + executor = workflow.executors[executor_id] + + for message in messages: + output_message: Any | None = None + result: dict[str, Any] | None = None # Activity result (only set for standard executors) + + # Execute + if isinstance(executor, AgentExecutor): + # Durable Agent Execution + agent_def = executor._agent + agent_name = agent_def.name + logger.debug("Calling Durable Entity: %s", agent_name) + + # Extract message content + message_content = _extract_message_content(message) + + # Create unique session for this orchestration instance + session_id = AgentSessionId(name=agent_name, key=context.instance_id) + + # Create a thread with the session ID + thread = AgentThread() + thread._durable_session_id = session_id # type: ignore[attr-defined] + + # Create DurableAIAgent wrapper to call the entity + agent = DurableAIAgent(context, agent_name) + agent_response: AgentRunResponse = yield agent.run( + message_content, + thread=thread, + ) + logger.debug("Durable Entity %s returned: %s", agent_name, agent_response) + + # Build AgentExecutorResponse from the typed AgentRunResponse + # AgentRunResponse has .text property for response text and .value for structured response + response_text = agent_response.text if agent_response else None + structured_response = None + if agent_response and agent_response.value is not None: + # If value is a Pydantic model, convert to dict + if hasattr(agent_response.value, "model_dump"): + structured_response = agent_response.value.model_dump() + elif isinstance(agent_response.value, dict): + structured_response = agent_response.value + + output_message = build_agent_executor_response( + executor_id=executor_id, + response_text=response_text, + structured_response=structured_response, + previous_message=message, + ) + + else: + # Standard Executor Execution via Activity + logger.debug("Calling Activity for executor: %s", executor_id) + + # Get shared state snapshot before activity execution (if shared_state is available) + # Only needed for activities since they can access SharedState + shared_state_snapshot: dict[str, Any] | None = None + if shared_state: + shared_state_snapshot = yield from shared_state.get_all() + logger.debug("[workflow] SharedState snapshot for activity: %s", shared_state_snapshot) + + activity_input = { + "executor_id": executor_id, + "message": serialize_message(message), + "shared_state_snapshot": shared_state_snapshot, + } + + # Serialize to JSON string to work around Azure Functions type validation issues + activity_input_json = json.dumps(activity_input) + result_json = yield context.call_activity("ExecuteExecutor", activity_input_json) + result = json.loads(result_json) if result_json else None + logger.debug("Activity for executor %s returned", executor_id) + + # Apply any shared state updates from the activity result + if shared_state and result: + if result.get("shared_state_updates"): + updates = result["shared_state_updates"] + logger.debug("[workflow] Applying SharedState updates from activity: %s", updates) + yield from shared_state.update(updates) + if result.get("shared_state_deletes"): + deletes = result["shared_state_deletes"] + logger.debug("[workflow] Applying SharedState deletes from activity: %s", deletes) + for key in deletes: + yield from shared_state.delete(key) + + # Collect outputs + if result and result.get("outputs"): + workflow_outputs.extend(result["outputs"]) + + # Routing - handles both agent output_message and activity sent_messages + messages_to_route: list[tuple[Any, str | None]] = [] # List of (message, explicit_target_or_none) + + if output_message: + messages_to_route.append((output_message, None)) + + # Also route sent_messages from activities + if result and result.get("sent_messages"): + for msg_data in result["sent_messages"]: + sent_msg = msg_data.get("message") + target_id = msg_data.get("target_id") + if sent_msg: + # Deserialize the message to reconstruct typed objects + # This is needed for condition functions that check message types + sent_msg = deserialize_value(sent_msg) + messages_to_route.append((sent_msg, target_id)) + + for msg_to_route, explicit_target in messages_to_route: + logger.debug("Routing output from %s", executor_id) + + # If explicit target specified, route directly + if explicit_target: + if explicit_target not in next_pending_messages: + next_pending_messages[explicit_target] = [] + next_pending_messages[explicit_target].append(msg_to_route) + logger.debug("Routed message from %s to explicit target %s", executor_id, explicit_target) + continue + + # Check for FanInEdgeGroup sources first + for group in workflow.edge_groups: + if isinstance(group, FanInEdgeGroup) and executor_id in group.source_executor_ids: + # Accumulate message for fan-in + if executor_id not in fan_in_pending[group.id]: + fan_in_pending[group.id][executor_id] = [] + fan_in_pending[group.id][executor_id].append(msg_to_route) + logger.debug("Accumulated message for FanIn group %s from %s", group.id, executor_id) + + # Use MAF's edge group routing for other edge types + targets = route_message_through_edge_groups( + workflow.edge_groups, + executor_id, + msg_to_route, + ) + + for target_id in targets: + logger.debug("Routing to %s", target_id) + if target_id not in next_pending_messages: + next_pending_messages[target_id] = [] + next_pending_messages[target_id].append(msg_to_route) + + # Check if any FanInEdgeGroups are ready to deliver + for group in workflow.edge_groups: + if isinstance(group, FanInEdgeGroup): + pending_sources = fan_in_pending.get(group.id, {}) + # Check if all sources have contributed at least one message + if all(src in pending_sources for src in group.source_executor_ids): + # Aggregate all messages into a single list + aggregated: list[Any] = [] + for src in group.source_executor_ids: + aggregated.extend(pending_sources[src]) + + target_id = group.target_executor_ids[0] + logger.debug( + "FanIn group %s ready, delivering %d messages to %s", group.id, len(aggregated), target_id + ) + + if target_id not in next_pending_messages: + next_pending_messages[target_id] = [] + next_pending_messages[target_id].append(aggregated) + + # Clear the pending sources for this group + fan_in_pending[group.id] = {} + + pending_messages = next_pending_messages + iteration += 1 + + return workflow_outputs + + +def _extract_message_content(message: Any) -> str: + """Extract text content from various message types.""" + message_content = "" + if isinstance(message, AgentExecutorResponse) and message.agent_run_response: + if message.agent_run_response.text: + message_content = message.agent_run_response.text + elif message.agent_run_response.messages: + message_content = message.agent_run_response.messages[-1].text or "" + elif isinstance(message, AgentExecutorRequest) and message.messages: + # Extract text from the last message in the request + message_content = message.messages[-1].text or "" + elif isinstance(message, dict): + message_content = _extract_message_content_from_dict(message) + elif isinstance(message, str): + message_content = message + + return message_content + + +def _extract_message_content_from_dict(message: dict[str, Any]) -> str: + """Extract text content from serialized message dictionaries.""" + message_content = "" + + if "messages" in message and message["messages"]: + # AgentExecutorRequest dict - messages is a list of ChatMessage dicts + last_msg = message["messages"][-1] + if isinstance(last_msg, dict): + # ChatMessage serialized via to_dict() has structure: + # {"type": "chat_message", "contents": [{"type": "text", "text": "..."}], ...} + if "contents" in last_msg and last_msg["contents"]: + first_content = last_msg["contents"][0] + if isinstance(first_content, dict): + message_content = first_content.get("text") or "" + # Fallback to direct text field if not in contents structure + if not message_content: + message_content = last_msg.get("text") or last_msg.get("_text") or "" + elif hasattr(last_msg, "text"): + message_content = last_msg.text or "" + elif "agent_run_response" in message: + # AgentExecutorResponse dict + arr = message.get("agent_run_response", {}) + if isinstance(arr, dict): + message_content = arr.get("text") or "" + if not message_content and arr.get("messages"): + last_msg = arr["messages"][-1] + if isinstance(last_msg, dict): + # Check for contents structure first + if "contents" in last_msg and last_msg["contents"]: + first_content = last_msg["contents"][0] + if isinstance(first_content, dict): + message_content = first_content.get("text") or "" + if not message_content: + message_content = last_msg.get("text") or last_msg.get("_text") or "" + + return message_content diff --git a/python/samples/getting_started/azure_functions/09_workflow_shared_state/.gitignore b/python/samples/getting_started/azure_functions/09_workflow_shared_state/.gitignore new file mode 100644 index 0000000000..560ff95106 --- /dev/null +++ b/python/samples/getting_started/azure_functions/09_workflow_shared_state/.gitignore @@ -0,0 +1,18 @@ +# Local settings +local.settings.json +.env + +# Python +__pycache__/ +*.py[cod] +.venv/ +venv/ + +# Azure Functions +bin/ +obj/ +.python_packages/ + +# IDE +.vscode/ +.idea/ diff --git a/python/samples/getting_started/azure_functions/09_workflow_shared_state/README.md b/python/samples/getting_started/azure_functions/09_workflow_shared_state/README.md new file mode 100644 index 0000000000..31a92058a9 --- /dev/null +++ b/python/samples/getting_started/azure_functions/09_workflow_shared_state/README.md @@ -0,0 +1,98 @@ +# Workflow Shared State Sample + +This sample demonstrates **Workflow SharedState functionality** with the Agent Framework Azure Functions package. + +## What This Sample Demonstrates + +This sample validates the SharedState implementation in workflow orchestrations: + +1. **`ctx.set_shared_state(key, value)`** - Store values in SharedState +2. **`ctx.get_shared_state(key)`** - Retrieve values from SharedState +3. **`ctx.shared_state.delete(key)`** - Delete keys from SharedState +4. **State persistence** - State passed between executors via `DurableSharedState` entity + +## Workflow Architecture + +``` +store_email → spam_detector (agent) → to_detection_result → [branch]: + ├── If spam: handle_spam → yield "Email marked as spam: {reason}" + └── If not spam: submit_to_email_assistant → email_assistant (agent) → finalize_and_send → yield "Email sent: {response}" +``` + +### SharedState Usage by Executor + +| Executor | SharedState Operations | +|----------|----------------------| +| `store_email` | `set_shared_state("email:{id}", email)`, `set_shared_state("current_email_id", id)` | +| `to_detection_result` | `get_shared_state("current_email_id")` | +| `submit_to_email_assistant` | `get_shared_state("email:{id}")` | +| `finalize_and_send` | `shared_state.delete("email:{id}")`, `shared_state.delete("current_email_id")` | +| `handle_spam` | `shared_state.delete("email:{id}")`, `shared_state.delete("current_email_id")` | + +## Prerequisites + +1. **Azure OpenAI** - Endpoint and deployment configured +2. **Durable Task Scheduler** - Running locally or in Azure +3. **Azurite** - For local storage emulation + +## Setup + +1. Copy `local.settings.json.sample` to `local.settings.json` and configure: + ```json + { + "Values": { + "AZURE_OPENAI_ENDPOINT": "https://your-resource.openai.azure.com/", + "AZURE_OPENAI_CHAT_DEPLOYMENT_NAME": "gpt-4o" + } + } + ``` + +2. Install dependencies: + ```bash + pip install -r requirements.txt + ``` + +3. Start Azurite: + ```bash + azurite --silent + ``` + +4. Start Durable Task Scheduler: + ```bash + durabletask-scheduler start + ``` + +5. Run the function app: + ```bash + func start + ``` + +## Testing + +Use the `demo.http` file with REST Client extension or curl: + +### Test Spam Email +```bash +curl -X POST http://localhost:7071/api/workflow/run \ + -H "Content-Type: application/json" \ + -d '"URGENT! You have won $1,000,000! Click here to claim!"' +``` + +### Test Legitimate Email +```bash +curl -X POST http://localhost:7071/api/workflow/run \ + -H "Content-Type: application/json" \ + -d '"Hi team, reminder about our meeting tomorrow at 10 AM."' +``` + +## Expected Output + +**Spam email:** +``` +Email marked as spam: This email exhibits spam characteristics including urgent language, unrealistic claims of monetary winnings, and requests to click suspicious links. +``` + +**Legitimate email:** +``` +Email sent: Hi, Thank you for the reminder about the sprint planning meeting tomorrow at 10 AM. I will review the agenda in Jira and come prepared with my updates. See you then! +``` diff --git a/python/samples/getting_started/azure_functions/09_workflow_shared_state/demo.http b/python/samples/getting_started/azure_functions/09_workflow_shared_state/demo.http new file mode 100644 index 0000000000..48b6a73f72 --- /dev/null +++ b/python/samples/getting_started/azure_functions/09_workflow_shared_state/demo.http @@ -0,0 +1,31 @@ +@endpoint = http://localhost:7071 + +### Start the workflow with a spam email +POST {{endpoint}}/api/workflow/run +Content-Type: application/json + +"URGENT! You have won $1,000,000! Click here to claim your prize now before it expires!" + +### Start the workflow with a legitimate email +POST {{endpoint}}/api/workflow/run +Content-Type: application/json + +"Hi team, just a reminder about the sprint planning meeting tomorrow at 10 AM. Please review the agenda items in Jira before the call." + +### Start the workflow with another legitimate email +POST {{endpoint}}/api/workflow/run +Content-Type: application/json + +"Hello, I wanted to follow up on our conversation from last week regarding the project timeline. Could we schedule a brief call this afternoon to discuss the next steps?" + +### Start the workflow with a phishing attempt +POST {{endpoint}}/api/workflow/run +Content-Type: application/json + +"Dear Customer, Your account has been compromised! Click this link immediately to secure your account: http://totallylegit.suspicious.com/secure" + +### Check workflow status (replace {instanceId} with actual instance ID from response) +GET {{endpoint}}/runtime/webhooks/durabletask/instances/{instanceId} + +### Purge all orchestration instances (use for cleanup) +POST {{endpoint}}/runtime/webhooks/durabletask/instances/purge?createdTimeFrom=2020-01-01T00:00:00Z&createdTimeTo=2030-12-31T23:59:59Z diff --git a/python/samples/getting_started/azure_functions/09_workflow_shared_state/function_app.py b/python/samples/getting_started/azure_functions/09_workflow_shared_state/function_app.py new file mode 100644 index 0000000000..af2aacfb12 --- /dev/null +++ b/python/samples/getting_started/azure_functions/09_workflow_shared_state/function_app.py @@ -0,0 +1,198 @@ +# Copyright (c) Microsoft. All rights reserved. + +import os +from dataclasses import dataclass +from typing import Any +from uuid import uuid4 + +from agent_framework import ( + AgentExecutorRequest, + AgentExecutorResponse, + ChatMessage, + Role, + WorkflowBuilder, + WorkflowContext, + executor, +) +from agent_framework.azure import AzureOpenAIChatClient +from azure.identity import DefaultAzureCredential +from pydantic import BaseModel +from typing_extensions import Never + +from agent_framework_azurefunctions import AgentFunctionApp + +""" +Sample: Shared state with agents and conditional routing. + +Store an email once by id, classify it with a detector agent, then either draft a reply with an assistant +agent or finish with a spam notice. Stream events as the workflow runs. + +Purpose: +Show how to: +- Use shared state to decouple large payloads from messages and pass around lightweight references. +- Enforce structured agent outputs with Pydantic models via response_format for robust parsing. +- Route using conditional edges based on a typed intermediate DetectionResult. +- Compose agent backed executors with function style executors and yield the final output when the workflow completes. + +Prerequisites: +- Azure OpenAI configured for AzureOpenAIChatClient with required environment variables. +- Authentication via azure-identity. Use DefaultAzureCredential and run az login before executing the sample. +- Familiarity with WorkflowBuilder, executors, conditional edges, and streaming runs. +""" + +EMAIL_STATE_PREFIX = "email:" +CURRENT_EMAIL_ID_KEY = "current_email_id" + + +class DetectionResultAgent(BaseModel): + """Structured output returned by the spam detection agent.""" + + is_spam: bool + reason: str + + +class EmailResponse(BaseModel): + """Structured output returned by the email assistant agent.""" + + response: str + + +@dataclass +class DetectionResult: + """Internal detection result enriched with the shared state email_id for later lookups.""" + + is_spam: bool + reason: str + email_id: str + + +@dataclass +class Email: + """In memory record stored in shared state to avoid re-sending large bodies on edges.""" + + email_id: str + email_content: str + + +def get_condition(expected_result: bool): + """Create a condition predicate for DetectionResult.is_spam. + + Contract: + - If the message is not a DetectionResult, allow it to pass to avoid accidental dead ends. + - Otherwise, return True only when is_spam matches expected_result. + """ + + def condition(message: Any) -> bool: + if not isinstance(message, DetectionResult): + return True + return message.is_spam == expected_result + + return condition + + +@executor(id="store_email") +async def store_email(email_text: str, ctx: WorkflowContext[AgentExecutorRequest]) -> None: + """Persist the raw email content in shared state and trigger spam detection. + + Responsibilities: + - Generate a unique email_id (UUID) for downstream retrieval. + - Store the Email object under a namespaced key and set the current id pointer. + - Emit an AgentExecutorRequest asking the detector to respond. + """ + new_email = Email(email_id=str(uuid4()), email_content=email_text) + await ctx.set_shared_state(f"{EMAIL_STATE_PREFIX}{new_email.email_id}", new_email) + await ctx.set_shared_state(CURRENT_EMAIL_ID_KEY, new_email.email_id) + + await ctx.send_message( + AgentExecutorRequest(messages=[ChatMessage(Role.USER, text=new_email.email_content)], should_respond=True) + ) + + +@executor(id="to_detection_result") +async def to_detection_result(response: AgentExecutorResponse, ctx: WorkflowContext[DetectionResult]) -> None: + """Parse spam detection JSON into a structured model and enrich with email_id. + + Steps: + 1) Validate the agent's JSON output into DetectionResultAgent. + 2) Retrieve the current email_id from shared state. + 3) Send a typed DetectionResult for conditional routing. + """ + parsed = DetectionResultAgent.model_validate_json(response.agent_run_response.text) + email_id: str = await ctx.get_shared_state(CURRENT_EMAIL_ID_KEY) + await ctx.send_message(DetectionResult(is_spam=parsed.is_spam, reason=parsed.reason, email_id=email_id)) + + +@executor(id="submit_to_email_assistant") +async def submit_to_email_assistant(detection: DetectionResult, ctx: WorkflowContext[AgentExecutorRequest]) -> None: + """Forward non spam email content to the drafting agent. + + Guard: + - This path should only receive non spam. Raise if misrouted. + """ + if detection.is_spam: + raise RuntimeError("This executor should only handle non-spam messages.") + + # Load the original content by id from shared state and forward it to the assistant. + email: Email = await ctx.get_shared_state(f"{EMAIL_STATE_PREFIX}{detection.email_id}") + await ctx.send_message( + AgentExecutorRequest(messages=[ChatMessage(Role.USER, text=email.email_content)], should_respond=True) + ) + + +@executor(id="finalize_and_send") +async def finalize_and_send(response: AgentExecutorResponse, ctx: WorkflowContext[Never, str]) -> None: + """Validate the drafted reply and yield the final output.""" + parsed = EmailResponse.model_validate_json(response.agent_run_response.text) + await ctx.yield_output(f"Email sent: {parsed.response}") + + +@executor(id="handle_spam") +async def handle_spam(detection: DetectionResult, ctx: WorkflowContext[Never, str]) -> None: + """Yield output describing why the email was marked as spam.""" + if detection.is_spam: + await ctx.yield_output(f"Email marked as spam: {detection.reason}") + else: + raise RuntimeError("This executor should only handle spam messages.") + + +# Create chat client and agents. response_format enforces structured JSON from each agent. +chat_client = AzureOpenAIChatClient(credential=DefaultAzureCredential()) + +spam_detection_agent = chat_client.create_agent( + instructions=( + "You are a spam detection assistant that identifies spam emails. " + "Always return JSON with fields is_spam (bool) and reason (string)." + ), + response_format=DetectionResultAgent, + name="spam_detection_agent", +) + +email_assistant_agent = chat_client.create_agent( + instructions=( + "You are an email assistant that helps users draft responses to emails with professionalism. " + "Return JSON with a single field 'response' containing the drafted reply." + ), + response_format=EmailResponse, + name="email_assistant_agent", +) + +# Build the workflow graph with conditional edges. +# Flow: +# store_email -> spam_detection_agent -> to_detection_result -> branch: +# False -> submit_to_email_assistant -> email_assistant_agent -> finalize_and_send +# True -> handle_spam +workflow = ( + WorkflowBuilder() + .set_start_executor(store_email) + .add_edge(store_email, spam_detection_agent) + .add_edge(spam_detection_agent, to_detection_result) + .add_edge(to_detection_result, submit_to_email_assistant, condition=get_condition(False)) + .add_edge(to_detection_result, handle_spam, condition=get_condition(True)) + .add_edge(submit_to_email_assistant, email_assistant_agent) + .add_edge(email_assistant_agent, finalize_and_send) + .build() +) + +# Wrap workflow with AgentFunctionApp for durable execution +# SharedState is enabled by default, which this sample requires for storing emails +app = AgentFunctionApp(workflow=workflow, enable_health_check=True) diff --git a/python/samples/getting_started/azure_functions/09_workflow_shared_state/host.json b/python/samples/getting_started/azure_functions/09_workflow_shared_state/host.json new file mode 100644 index 0000000000..b7e5ad1c0b --- /dev/null +++ b/python/samples/getting_started/azure_functions/09_workflow_shared_state/host.json @@ -0,0 +1,7 @@ +{ + "version": "2.0", + "extensionBundle": { + "id": "Microsoft.Azure.Functions.ExtensionBundle", + "version": "[4.*, 5.0.0)" + } +} diff --git a/python/samples/getting_started/azure_functions/09_workflow_shared_state/local.settings.json.sample b/python/samples/getting_started/azure_functions/09_workflow_shared_state/local.settings.json.sample new file mode 100644 index 0000000000..46708be91c --- /dev/null +++ b/python/samples/getting_started/azure_functions/09_workflow_shared_state/local.settings.json.sample @@ -0,0 +1,10 @@ +{ + "IsEncrypted": false, + "Values": { + "AzureWebJobsStorage": "UseDevelopmentStorage=true", + "MSSQL_CONNECTION_STRING": "Server=localhost,1435;Database=DurableDB;User Id=sa;Password=;TrustServerCertificate=True;", + "FUNCTIONS_WORKER_RUNTIME": "python", + "AZURE_OPENAI_ENDPOINT": "", + "AZURE_OPENAI_CHAT_DEPLOYMENT_NAME": "" + } +} diff --git a/python/samples/getting_started/azure_functions/09_workflow_shared_state/requirements.txt b/python/samples/getting_started/azure_functions/09_workflow_shared_state/requirements.txt new file mode 100644 index 0000000000..fe2d14bbae --- /dev/null +++ b/python/samples/getting_started/azure_functions/09_workflow_shared_state/requirements.txt @@ -0,0 +1,6 @@ +agent-framework-azurefunctions +azure-functions +azure-functions-durable +azure-identity +openai +agents-maf diff --git a/python/samples/getting_started/azure_functions/10_workflow_no_shared_state/.env.sample b/python/samples/getting_started/azure_functions/10_workflow_no_shared_state/.env.sample new file mode 100644 index 0000000000..cf8fe3d05c --- /dev/null +++ b/python/samples/getting_started/azure_functions/10_workflow_no_shared_state/.env.sample @@ -0,0 +1,4 @@ +# Azure OpenAI Configuration +AZURE_OPENAI_ENDPOINT=https://.openai.azure.com/ +AZURE_OPENAI_CHAT_DEPLOYMENT_NAME= +AZURE_OPENAI_API_KEY= diff --git a/python/samples/getting_started/azure_functions/10_workflow_no_shared_state/.gitignore b/python/samples/getting_started/azure_functions/10_workflow_no_shared_state/.gitignore new file mode 100644 index 0000000000..1d5b48c35f --- /dev/null +++ b/python/samples/getting_started/azure_functions/10_workflow_no_shared_state/.gitignore @@ -0,0 +1,2 @@ +.env +local.settings.json diff --git a/python/samples/getting_started/azure_functions/10_workflow_no_shared_state/README.md b/python/samples/getting_started/azure_functions/10_workflow_no_shared_state/README.md new file mode 100644 index 0000000000..4b80d7cc0a --- /dev/null +++ b/python/samples/getting_started/azure_functions/10_workflow_no_shared_state/README.md @@ -0,0 +1,217 @@ +# Hybrid Multi-Agent Workflow with Durable Functions – Python + +This sample demonstrates a **hybrid approach** that combines Durable Functions orchestration with Agent Framework workflows. + +## Overview + +This sample bridges the gap between the two previous samples: +- **`06_multi_agent_orchestration_conditionals`** - Pure Durable Functions orchestration +- **`09_workflow_shared_state`** - Workflow with shared state + +The hybrid approach: +1. Registers agents via `AgentFunctionApp` (Durable Functions style) +2. Retrieves agents from `DurableOrchestrationContext` during orchestration +3. Builds a `WorkflowBuilder` graph using those agents +4. Executes the workflow logic within the durable orchestration + +## Architecture + +```text +AgentFunctionApp + ├─ Register SpamDetectionAgent + └─ Register EmailAssistantAgent + +DurableOrchestration + ├─ Get agents from context + ├─ Build WorkflowBuilder graph with agents + executors + └─ Execute workflow logic (procedural with agents) +``` + +## Components + +### AI Agents (registered via AgentFunctionApp) + +1. **SpamDetectionAgent** - Analyzes emails for spam +2. **EmailAssistantAgent** - Drafts professional responses + +### Executors (defined as classes) + +1. **SpamHandlerExecutor** - Handles spam emails (non-AI activity) +2. **EmailSenderExecutor** - Sends email responses (non-AI activity) + +## Key Features + +- **Agent Registration**: Uses `AgentFunctionApp` for centralized agent management +- **Context-Based Retrieval**: Gets agents from `context.get_agent()` +- **Workflow Builder**: Constructs declarative workflow graphs +- **Executor Pattern**: Uses Executor classes (no traditional activity triggers) +- **Durable Orchestration**: Runs within durable context with state persistence + +## Running the Sample + +### Prerequisites + +1. Install dependencies: + + ```bash + pip install -r requirements.txt + ``` + +2. Configure your environment: + + Copy the sample configuration files: + ```bash + cp .env.sample .env + cp local.settings.json.sample local.settings.json + ``` + + Update `.env` and `local.settings.json` with your Azure OpenAI credentials. + +3. Start Azurite (for local storage): + + ```bash + azurite + ``` + +### Execution Modes + +This sample can be run in two modes by modifying the `launch(durable=...)` call at the bottom of `function_app.py`. + +#### 1. Durable Functions Mode (Default) + +Set `launch(durable=True)` in `function_app.py`. + +- **Configuration**: Requires `local.settings.json`. +- **Command**: + + ```bash + func start + ``` + +- **Description**: Runs the workflow as a Durable Functions orchestration. The app will start on `http://localhost:7071`. + +#### 2. Standalone Workflow Mode (DevUI) + +Set `launch(durable=False)` in `function_app.py`. + +- **Configuration**: Requires `.env`. +- **Command**: + + ```bash + python function_app.py + ``` + +- **Description**: Runs the workflow locally using the Agent Framework DevUI (available at `http://localhost:8094`). + +### Test with HTTP Requests + +Use `demo.http` or curl: + +```bash +# Start orchestration +curl -X POST http://localhost:7071/api/workflow/run \ + -H "Content-Type: application/json" \ + -d '{"email_id": "test-001", "email_content": "URGENT! Click here now!"}' + +# Check status +curl http://localhost:7071/api/workflow/status/{instanceId} +``` + +## Comparison with Other Samples + +| Feature | Orchestration | Workflow | **Hybrid** | +|---------|--------------|----------|------------| +| **Agent Registration** | AgentFunctionApp | Direct creation | **AgentFunctionApp** | +| **Agent Retrieval** | context.get_agent() | Direct reference | **context.get_agent()** | +| **Workflow Definition** | Procedural (yield) | Declarative (WorkflowBuilder) | **Both** | +| **Activity Style** | @activity_trigger | Executor classes | **Executor classes** | +| **Execution Model** | Durable orchestration | In-memory workflow | **Durable orchestration** | +| **State Persistence** | ✅ Azure Storage | ❌ In-memory | **✅ Azure Storage** | +| **Scalability** | ✅ Cloud-native | ❌ Single process | **✅ Cloud-native** | + +## When to Use This Approach + +**Use the Hybrid approach** when you: + +- Want the declarative nature of WorkflowBuilder for documentation +- Need durable state persistence and cloud scalability +- Want to use Executor classes instead of activity triggers +- Want to visualize workflow structure programmatically +- Need both agent-based reasoning and executor-based activities +- Are exploring workflow patterns within durable orchestrations + +**Don't use it** when: + +- You only need simple orchestration (use pure Durable Functions) +- You want standalone workflows without Azure infrastructure (use pure WorkflowBuilder) +- The added complexity doesn't provide value + +## Code Highlights + +### Agent Registration + +```python +app = AgentFunctionApp(agents=_create_agents(), enable_health_check=True) +``` + +### Agent Retrieval in Orchestration + +```python +@app.orchestration_trigger(context_name="context") +def spam_detection_workflow_orchestration(context: DurableOrchestrationContext): + # Get agents from context + spam_agent = context.get_agent(SPAM_AGENT_NAME) + email_agent = context.get_agent(EMAIL_AGENT_NAME) + + # Build workflow graph + workflow = ( + WorkflowBuilder() + .set_start_executor(spam_agent) + .add_switch_case_edge_group(...) + .build() + ) +``` + +### Mixed Execution Styles + +```python +# Executor-based activity (no @activity_trigger needed) +class SpamHandlerExecutor(Executor): + @handler + async def handle_spam_result(self, agent_response, ctx): + spam_result = SpamDetectionResult.model_validate_json(agent_response.agent_run_response.text) + message = f"Email marked as spam: {spam_result.reason}" + await ctx.yield_output(message) + +# Workflow execution follows the graph structure but runs procedurally +if spam_result.is_spam: + result = f"Email marked as spam: {spam_result.reason}" + return result +``` + +## Benefits of Hybrid Approach + +1. **Documentation** - WorkflowBuilder graph serves as living documentation +2. **Flexibility** - Mix declarative structure with procedural execution +3. **Scalability** - Leverage durable orchestration for production workloads +4. **Type Safety** - Workflow validation catches type mismatches +5. **Executor Pattern** - Modern class-based activity definitions instead of decorators + +## Limitations + +- WorkflowBuilder used for structure/documentation only (not native execution) +- Execution is still procedural with yield-based orchestration +- Requires understanding of both workflow and orchestration paradigms +- Executor classes defined but not automatically invoked by workflow engine + +## Next Steps + +- Explore pure workflow execution without orchestration +- Add workflow visualization endpoints +- Implement workflow-native execution within durable context +- Add telemetry and monitoring integration + +## Related Samples + +- `06_multi_agent_orchestration_conditionals` - Pure Durable Functions approach +- `09_workflow_shared_state` - Workflow with shared state diff --git a/python/samples/getting_started/azure_functions/10_workflow_no_shared_state/demo.http b/python/samples/getting_started/azure_functions/10_workflow_no_shared_state/demo.http new file mode 100644 index 0000000000..2c81ddc9bc --- /dev/null +++ b/python/samples/getting_started/azure_functions/10_workflow_no_shared_state/demo.http @@ -0,0 +1,32 @@ +### Start Workflow Orchestration - Spam Email +POST http://localhost:7071/api/workflow/run +Content-Type: application/json + +{ + "email_id": "email-001", + "email_content": "URGENT! You've won $1,000,000! Click here immediately to claim your prize! Limited time offer - act now!" +} + +### + +### Start Workflow Orchestration - Legitimate Email +POST http://localhost:7071/api/workflow/run +Content-Type: application/json + +{ + "email_id": "email-002", + "email_content": "Hi team, just a reminder about our sprint planning meeting tomorrow at 10 AM. Please review the agenda in Jira." +} + +### + +### Get Workflow Status +# Replace {instanceId} with the actual instance ID from the start response +GET http://localhost:7071/api/workflow/status/{instanceId} + +### + +### Health Check +GET http://localhost:7071/api/health + +### diff --git a/python/samples/getting_started/azure_functions/10_workflow_no_shared_state/function_app.py b/python/samples/getting_started/azure_functions/10_workflow_no_shared_state/function_app.py new file mode 100644 index 0000000000..dde515d737 --- /dev/null +++ b/python/samples/getting_started/azure_functions/10_workflow_no_shared_state/function_app.py @@ -0,0 +1,229 @@ +# Copyright (c) Microsoft. All rights reserved. +"""Workflow Execution within Durable Functions Orchestrator. + +This sample demonstrates running agent framework WorkflowBuilder workflows inside +a Durable Functions orchestrator by manually traversing the workflow graph and +delegating execution to Durable Entities (for agents) and Activities (for other logic). + +Key architectural points: +- AgentFunctionApp registers agents as DurableAIAgents. +- WorkflowBuilder uses `DurableAgentDefinition` (a placeholder) to define the graph. +- The orchestrator (`workflow_orchestration`) iterates through the workflow graph. +- When an agent node is encountered, it calls the corresponding `DurableAIAgent` entity. +- When a standard executor node is encountered, it calls an Activity (`ExecuteExecutor`). + +This approach allows using the rich structure of `WorkflowBuilder` while leveraging +the statefulness and durability of `DurableAIAgent`s. +""" + +import logging +import os +from typing import Any, Dict + +from anyio import Path +from agent_framework import ( + AgentExecutorResponse, + Case, + Default, + Executor, + Workflow, + WorkflowBuilder, + WorkflowContext, + handler, +) +from agent_framework.azure import AzureOpenAIChatClient +from azure.identity import AzureCliCredential +from pydantic import BaseModel +from agent_framework_azurefunctions import AgentFunctionApp +from typing_extensions import Never + +logger = logging.getLogger(__name__) +app: AgentFunctionApp = None + +AZURE_OPENAI_ENDPOINT_ENV = "AZURE_OPENAI_ENDPOINT" +AZURE_OPENAI_DEPLOYMENT_ENV = "AZURE_OPENAI_CHAT_DEPLOYMENT_NAME" +AZURE_OPENAI_API_KEY_ENV = "AZURE_OPENAI_API_KEY" +SPAM_AGENT_NAME = "SpamDetectionAgent" +EMAIL_AGENT_NAME = "EmailAssistantAgent" + +SPAM_DETECTION_INSTRUCTIONS = ( + "You are a spam detection assistant that identifies spam emails.\n\n" + "Analyze the email content for spam indicators including:\n" + "1. Suspicious language (urgent, limited time, act now, free money, etc.)\n" + "2. Suspicious links or requests for personal information\n" + "3. Poor grammar or spelling\n" + "4. Requests for money or financial information\n" + "5. Impersonation attempts\n\n" + "Return a JSON response with:\n" + "- is_spam: boolean indicating if it's spam\n" + "- confidence: float between 0.0 and 1.0\n" + "- reason: detailed explanation of your classification" +) + +EMAIL_ASSISTANT_INSTRUCTIONS = ( + "You are an email assistant that helps users draft responses to legitimate emails.\n\n" + "When you receive an email that has been verified as legitimate:\n" + "1. Draft a professional and appropriate response\n" + "2. Match the tone and formality of the original email\n" + "3. Be helpful and courteous\n" + "4. Keep the response concise but complete\n\n" + "Return a JSON response with:\n" + "- response: the drafted email response" +) + + +class SpamDetectionResult(BaseModel): + is_spam: bool + confidence: float + reason: str + + +class EmailResponse(BaseModel): + response: str + + +class EmailPayload(BaseModel): + email_id: str + email_content: str + + +def _build_client_kwargs() -> Dict[str, Any]: + endpoint = os.getenv(AZURE_OPENAI_ENDPOINT_ENV) + if not endpoint: + raise RuntimeError(f"{AZURE_OPENAI_ENDPOINT_ENV} environment variable is required.") + + deployment = os.getenv(AZURE_OPENAI_DEPLOYMENT_ENV) + if not deployment: + raise RuntimeError(f"{AZURE_OPENAI_DEPLOYMENT_ENV} environment variable is required.") + + client_kwargs: Dict[str, Any] = { + "endpoint": endpoint, + "deployment_name": deployment, + } + + api_key = os.getenv(AZURE_OPENAI_API_KEY_ENV) + if api_key: + client_kwargs["api_key"] = api_key + else: + client_kwargs["credential"] = AzureCliCredential() + + return client_kwargs + + +# Executors for non-AI activities (defined at module level) +class SpamHandlerExecutor(Executor): + """Executor that handles spam emails (non-AI activity).""" + + @handler + async def handle_spam_result( + self, + agent_response: AgentExecutorResponse, + ctx: WorkflowContext[Never, str], + ) -> None: + """Mark email as spam and log the reason.""" + text = agent_response.agent_run_response.text + spam_result = SpamDetectionResult.model_validate_json(text) + message = f"Email marked as spam: {spam_result.reason}" + await ctx.yield_output(message) + + +class EmailSenderExecutor(Executor): + """Executor that sends email responses (non-AI activity).""" + + @handler + async def handle_email_response( + self, + agent_response: AgentExecutorResponse, + ctx: WorkflowContext[Never, str], + ) -> None: + """Send the drafted email response.""" + text = agent_response.agent_run_response.text + email_response = EmailResponse.model_validate_json(text) + message = f"Email sent: {email_response.response}" + await ctx.yield_output(message) + + +# Condition function for routing +def is_spam_detected(message: Any) -> bool: + """Check if spam was detected in the email.""" + if not isinstance(message, AgentExecutorResponse): + return False + try: + result = SpamDetectionResult.model_validate_json(message.agent_run_response.text) + return result.is_spam + except Exception: + return False + + +def _create_workflow() -> Workflow: + """Create the workflow definition.""" + client_kwargs = _build_client_kwargs() + chat_client = AzureOpenAIChatClient(**client_kwargs) + + spam_agent = chat_client.create_agent( + name=SPAM_AGENT_NAME, + instructions=SPAM_DETECTION_INSTRUCTIONS, + response_format=SpamDetectionResult, + ) + + email_agent = chat_client.create_agent( + name=EMAIL_AGENT_NAME, + instructions=EMAIL_ASSISTANT_INSTRUCTIONS, + response_format=EmailResponse, + ) + + # Executors + spam_handler = SpamHandlerExecutor(id="spam_handler") + email_sender = EmailSenderExecutor(id="email_sender") + + # Build workflow + workflow = ( + WorkflowBuilder() + .set_start_executor(spam_agent) + .add_switch_case_edge_group( + spam_agent, + [ + Case(condition=is_spam_detected, target=spam_handler), + Default(target=email_agent), + ], + ) + .add_edge(email_agent, email_sender) + .build() + ) + return workflow + + +def launch(durable: bool = True) -> None: + + global app + workflow = None + + if durable: + # Initialize app + workflow = _create_workflow() + app = AgentFunctionApp(workflow=workflow, enable_health_check=True, enable_shared_state=False) + else: + # Launch the spam detection workflow in DevUI + from agent_framework.devui import serve + from dotenv import load_dotenv + + # Load environment variables from .env file + env_path = Path(__file__).parent / ".env" + load_dotenv(dotenv_path=env_path) + + # logging.basicConfig(level=logging.INFO, format="%(message)s") + logger = logging.getLogger(__name__) + + logger.info("Starting Multi-Agent Spam Detection Workflow") + logger.info("Available at: http://localhost:8094") + logger.info("\nThis workflow demonstrates:") + logger.info("- Conditional routing based on spam detection") + logger.info("- Mixing AI agents with non-AI executors (like activity functions)") + logger.info("- Path 1 (spam): SpamDetector Agent → SpamHandler Executor") + logger.info("- Path 2 (legitimate): SpamDetector Agent → EmailAssistant Agent → EmailSender Executor") + + workflow = _create_workflow() + serve(entities=[workflow], port=8094, auto_open=True) + + +launch(durable=True) diff --git a/python/samples/getting_started/azure_functions/10_workflow_no_shared_state/host.json b/python/samples/getting_started/azure_functions/10_workflow_no_shared_state/host.json new file mode 100644 index 0000000000..4ef61f4578 --- /dev/null +++ b/python/samples/getting_started/azure_functions/10_workflow_no_shared_state/host.json @@ -0,0 +1,23 @@ +{ + "version": "2.0", + "logging": { + "applicationInsights": { + "samplingSettings": { + "isEnabled": true, + "maxTelemetryItemsPerSecond": 20 + } + } + }, + "extensionBundle": { + "id": "Microsoft.Azure.Functions.ExtensionBundle.Preview", + "version": "[4.*, 5.0.0)" + }, + "extensions": { + "durableTask": { + "storageProvider": { + "type": "azureManaged", + "connectionStringName": "DURABLE_TASK_SCHEDULER_CONNECTION_STRING" + } + } + } +} diff --git a/python/samples/getting_started/azure_functions/10_workflow_no_shared_state/local.settings.json.sample b/python/samples/getting_started/azure_functions/10_workflow_no_shared_state/local.settings.json.sample new file mode 100644 index 0000000000..4c43714b01 --- /dev/null +++ b/python/samples/getting_started/azure_functions/10_workflow_no_shared_state/local.settings.json.sample @@ -0,0 +1,11 @@ +{ + "IsEncrypted": false, + "Values": { + "FUNCTIONS_WORKER_RUNTIME": "python", + "AzureWebJobsStorage": "UseDevelopmentStorage=true", + "DURABLE_TASK_SCHEDULER_CONNECTION_STRING": "Endpoint=http://localhost:8080;TaskHub=default;Authentication=None", + "AZURE_OPENAI_ENDPOINT": "https://.openai.azure.com/", + "AZURE_OPENAI_CHAT_DEPLOYMENT_NAME": "", + "AZURE_OPENAI_API_KEY": "" + } +} diff --git a/python/samples/getting_started/azure_functions/10_workflow_no_shared_state/requirements.txt b/python/samples/getting_started/azure_functions/10_workflow_no_shared_state/requirements.txt new file mode 100644 index 0000000000..792ae4864e --- /dev/null +++ b/python/samples/getting_started/azure_functions/10_workflow_no_shared_state/requirements.txt @@ -0,0 +1,3 @@ +agent-framework-azurefunctions +agent-framework +azure-identity From 829cbf384f7e861ae986f583dba4582d521374ef Mon Sep 17 00:00:00 2001 From: Ahmed Muhsin Date: Fri, 19 Dec 2025 11:19:10 -0600 Subject: [PATCH 02/18] use import and export state --- .../agent_framework_azurefunctions/_app.py | 50 ++++++++++--------- .../agent_framework_azurefunctions/_utils.py | 39 +++++++++------ 2 files changed, 49 insertions(+), 40 deletions(-) diff --git a/python/packages/azurefunctions/agent_framework_azurefunctions/_app.py b/python/packages/azurefunctions/agent_framework_azurefunctions/_app.py index 40cfd8f701..539e19b23f 100644 --- a/python/packages/azurefunctions/agent_framework_azurefunctions/_app.py +++ b/python/packages/azurefunctions/agent_framework_azurefunctions/_app.py @@ -272,11 +272,12 @@ def ExecuteExecutor(inputData: str) -> str: # Reconstruct message - try to match handler's expected types message = reconstruct_message_for_handler(message_data, executor._handlers) - ctx = CapturingWorkflowContext( - shared_state_snapshot=shared_state_snapshot, - ) + async def run() -> dict[str, Any]: + # Create context asynchronously using factory method + ctx = await CapturingWorkflowContext.create( + shared_state_snapshot=shared_state_snapshot, + ) - async def run() -> None: # Find handler handler = None for message_type, handler_func in executor._handlers.items(): @@ -289,27 +290,28 @@ async def run() -> None: else: raise ValueError(f"Executor {executor_id} cannot handle message of type {type(message)}") - asyncio.run(run()) - - updates, deletes = ctx.get_shared_state_changes() - - # Serialize all outputs for JSON compatibility - serialized_sent_messages = [ - { - "message": serialize_message(msg["message"]), - "target_id": msg.get("target_id"), + # Get changes asynchronously + updates, deletes = await ctx.get_shared_state_changes() + + # Serialize all outputs for JSON compatibility + serialized_sent_messages = [ + { + "message": serialize_message(msg["message"]), + "target_id": msg.get("target_id"), + } + for msg in ctx.sent_messages + ] + serialized_outputs = [serialize_message(o) for o in ctx.outputs] + serialized_updates = {k: serialize_message(v) for k, v in updates.items()} + + return { + "sent_messages": serialized_sent_messages, + "outputs": serialized_outputs, + "shared_state_updates": serialized_updates, + "shared_state_deletes": list(deletes), } - for msg in ctx.sent_messages - ] - serialized_outputs = [serialize_message(o) for o in ctx.outputs] - serialized_updates = {k: serialize_message(v) for k, v in updates.items()} - - result = { - "sent_messages": serialized_sent_messages, - "outputs": serialized_outputs, - "shared_state_updates": serialized_updates, - "shared_state_deletes": list(deletes), - } + + result = asyncio.run(run()) return json_module.dumps(result) def _setup_shared_state_entity(self) -> None: diff --git a/python/packages/azurefunctions/agent_framework_azurefunctions/_utils.py b/python/packages/azurefunctions/agent_framework_azurefunctions/_utils.py index 7a87acf49e..dc9109cc9c 100644 --- a/python/packages/azurefunctions/agent_framework_azurefunctions/_utils.py +++ b/python/packages/azurefunctions/agent_framework_azurefunctions/_utils.py @@ -32,28 +32,35 @@ class CapturingWorkflowContext: This class does NOT inherit from WorkflowContext to avoid requiring RunnerContext instances. Instead, it duck-types the interface that executor handlers expect. + + Use the async `create()` factory method to instantiate this class. """ - def __init__( - self, + def __init__(self) -> None: + """Initialize the capturing context. Use create() factory method instead.""" + self._original_snapshot: dict[str, Any] = {} + self._shared_state = SharedState() + self.sent_messages: list[dict[str, Any]] = [] + self.outputs: list[Any] = [] + + @classmethod + async def create( + cls, shared_state_snapshot: dict[str, Any] | None = None, - ) -> None: + ) -> "CapturingWorkflowContext": """ - Initialize the capturing context. + Create a new CapturingWorkflowContext asynchronously. Args: shared_state_snapshot: Snapshot of current shared state from orchestrator - """ - # Keep original snapshot for diffing later - self._original_snapshot: dict[str, Any] = dict(shared_state_snapshot or {}) - # Create real SharedState, pre-populated with snapshot - self._shared_state = SharedState() - self._shared_state._state = dict(shared_state_snapshot or {}) - - # Captured outputs - self.sent_messages: list[dict[str, Any]] = [] - self.outputs: list[Any] = [] + Returns: + A new CapturingWorkflowContext instance + """ + instance = cls() + instance._original_snapshot = dict(shared_state_snapshot or {}) + await instance._shared_state.import_state(shared_state_snapshot or {}) + return instance @property def shared_state(self) -> SharedState: @@ -97,7 +104,7 @@ async def set_shared_state(self, key: str, value: Any) -> None: """ await self._shared_state.set(key, value) - def get_shared_state_changes(self) -> tuple[dict[str, Any], set[str]]: + async def get_shared_state_changes(self) -> tuple[dict[str, Any], set[str]]: """ Get all shared state changes made during execution. @@ -108,7 +115,7 @@ def get_shared_state_changes(self) -> tuple[dict[str, Any], set[str]]: Returns: Tuple of (updates dict, deletes set) """ - current_state = self._shared_state._state + current_state = await self._shared_state.export_state() original_keys = set(self._original_snapshot.keys()) current_keys = set(current_state.keys()) From efdff2d248c2fd12e15ec6a5aa138a51316c9fc5 Mon Sep 17 00:00:00 2001 From: Ahmed Muhsin Date: Fri, 19 Dec 2025 11:45:26 -0600 Subject: [PATCH 03/18] use runnercontext instead of workflowcontext for executors --- .../agent_framework_azurefunctions/_app.py | 93 ++++--- .../agent_framework_azurefunctions/_utils.py | 231 +++++++++++------- .../_workflow.py | 37 ++- 3 files changed, 228 insertions(+), 133 deletions(-) diff --git a/python/packages/azurefunctions/agent_framework_azurefunctions/_app.py b/python/packages/azurefunctions/agent_framework_azurefunctions/_app.py index 539e19b23f..0afafba3c7 100644 --- a/python/packages/azurefunctions/agent_framework_azurefunctions/_app.py +++ b/python/packages/azurefunctions/agent_framework_azurefunctions/_app.py @@ -16,8 +16,7 @@ import azure.durable_functions as df import azure.functions as func -from agent_framework import AgentExecutor, AgentProtocol, Workflow, get_logger -from agent_framework._workflows._typing_utils import is_instance_of +from agent_framework import AgentExecutor, AgentProtocol, Workflow, WorkflowOutputEvent, get_logger from agent_framework_durabletask import ( DEFAULT_MAX_POLL_RETRIES, DEFAULT_POLL_INTERVAL_SECONDS, @@ -39,7 +38,7 @@ from ._models import AgentSessionId from ._orchestration import AgentOrchestrationContextType, DurableAIAgent from ._shared_state import SHARED_STATE_ENTITY_NAME, DurableSharedState, create_shared_state_entity_function -from ._utils import CapturingWorkflowContext, reconstruct_message_for_handler, serialize_message +from ._utils import CapturingRunnerContext, deserialize_value, reconstruct_message_for_handler, serialize_message from ._workflow import run_workflow_orchestrator logger = get_logger("agent_framework.azurefunctions") @@ -257,10 +256,13 @@ def ExecuteExecutor(inputData: str) -> str: """ import json as json_module + from agent_framework import SharedState + data = json_module.loads(inputData) executor_id = data["executor_id"] message_data = data["message"] shared_state_snapshot = data.get("shared_state_snapshot", {}) + source_executor_ids = data.get("source_executor_ids", ["__orchestrator__"]) if not self.workflow: raise RuntimeError("Workflow not initialized in AgentFunctionApp") @@ -269,44 +271,69 @@ def ExecuteExecutor(inputData: str) -> str: if not executor: raise ValueError(f"Unknown executor: {executor_id}") - # Reconstruct message - try to match handler's expected types - message = reconstruct_message_for_handler(message_data, executor._handlers) + # Reconstruct message - try to match handler's expected types using public input_types + message = reconstruct_message_for_handler(message_data, executor.input_types) async def run() -> dict[str, Any]: - # Create context asynchronously using factory method - ctx = await CapturingWorkflowContext.create( - shared_state_snapshot=shared_state_snapshot, + # Create runner context and shared state + runner_context = CapturingRunnerContext() + shared_state = SharedState() + + # Deserialize shared state values to reconstruct dataclasses/Pydantic models + deserialized_state = { + k: deserialize_value(v) for k, v in (shared_state_snapshot or {}).items() + } + original_snapshot = dict(deserialized_state) + await shared_state.import_state(deserialized_state) + + # Execute using the public execute() method + await executor.execute( + message=message, + source_executor_ids=source_executor_ids, + shared_state=shared_state, + runner_context=runner_context, ) - # Find handler - handler = None - for message_type, handler_func in executor._handlers.items(): - if is_instance_of(message, message_type): - handler = handler_func - break - - if handler: - await handler(message, ctx) - else: - raise ValueError(f"Executor {executor_id} cannot handle message of type {type(message)}") - - # Get changes asynchronously - updates, deletes = await ctx.get_shared_state_changes() - - # Serialize all outputs for JSON compatibility - serialized_sent_messages = [ - { - "message": serialize_message(msg["message"]), - "target_id": msg.get("target_id"), - } - for msg in ctx.sent_messages - ] - serialized_outputs = [serialize_message(o) for o in ctx.outputs] + # Export current state and compute changes + current_state = await shared_state.export_state() + original_keys = set(original_snapshot.keys()) + current_keys = set(current_state.keys()) + + # Deleted = was in original, not in current + deletes = original_keys - current_keys + + # Updates = keys in current that are new or have different values + updates = { + k: v + for k, v in current_state.items() + if k not in original_snapshot or original_snapshot[k] != v + } + + # Drain messages and events from runner context + sent_messages = await runner_context.drain_messages() + events = await runner_context.drain_events() + + # Extract outputs from WorkflowOutputEvent instances + outputs: list[Any] = [] + for event in events: + if isinstance(event, WorkflowOutputEvent): + outputs.append(serialize_message(event.data)) + + # Serialize messages for JSON compatibility + serialized_sent_messages = [] + for source_id, msg_list in sent_messages.items(): + for msg in msg_list: + serialized_sent_messages.append({ + "message": serialize_message(msg.data), + "target_id": msg.target_id, + "source_id": msg.source_id, + }) + serialized_updates = {k: serialize_message(v) for k, v in updates.items()} return { "sent_messages": serialized_sent_messages, - "outputs": serialized_outputs, + "outputs": outputs, "shared_state_updates": serialized_updates, "shared_state_deletes": list(deletes), } diff --git a/python/packages/azurefunctions/agent_framework_azurefunctions/_utils.py b/python/packages/azurefunctions/agent_framework_azurefunctions/_utils.py index dc9109cc9c..2dcd3375e8 100644 --- a/python/packages/azurefunctions/agent_framework_azurefunctions/_utils.py +++ b/python/packages/azurefunctions/agent_framework_azurefunctions/_utils.py @@ -9,123 +9,180 @@ from __future__ import annotations +import asyncio import logging from dataclasses import asdict, fields, is_dataclass from typing import Any -from agent_framework import AgentExecutorRequest, AgentExecutorResponse, AgentRunResponse, ChatMessage -from agent_framework._workflows._shared_state import SharedState +from agent_framework import ( + AgentExecutorRequest, + AgentExecutorResponse, + AgentRunResponse, + ChatMessage, + CheckpointStorage, + Message, + RequestInfoEvent, + RunnerContext, + SharedState, + WorkflowCheckpoint, + WorkflowEvent, + WorkflowOutputEvent, +) from pydantic import BaseModel logger = logging.getLogger(__name__) -class CapturingWorkflowContext: +class CapturingRunnerContext(RunnerContext): """ - Context that captures outputs, sent messages, and shared state changes. + A RunnerContext implementation that captures messages and events for Azure Functions activities. - Provides a WorkflowContext-compatible API for custom executors running - in activities. Uses MAF's SharedState class internally, initialized with - a snapshot from the orchestrator. After execution, changes are diffed - against the original snapshot to determine updates and deletes. + This context is designed for executing standard Executors within Azure Functions activities. + It captures all messages and events produced during execution without requiring durable + entity storage, allowing the results to be returned to the orchestrator. - This class does NOT inherit from WorkflowContext to avoid requiring - RunnerContext instances. Instead, it duck-types the interface that - executor handlers expect. + Unlike the full InProcRunnerContext, this implementation: + - Does NOT support checkpointing (always returns False for has_checkpointing) + - Does NOT support streaming (always returns False for is_streaming) + - Captures messages and events in memory for later retrieval - Use the async `create()` factory method to instantiate this class. + The orchestrator manages state coordination; this context just captures execution output. """ def __init__(self) -> None: - """Initialize the capturing context. Use create() factory method instead.""" - self._original_snapshot: dict[str, Any] = {} - self._shared_state = SharedState() - self.sent_messages: list[dict[str, Any]] = [] - self.outputs: list[Any] = [] - - @classmethod - async def create( - cls, - shared_state_snapshot: dict[str, Any] | None = None, - ) -> "CapturingWorkflowContext": - """ - Create a new CapturingWorkflowContext asynchronously. + """Initialize the capturing runner context.""" + self._messages: dict[str, list[Message]] = {} + self._event_queue: asyncio.Queue[WorkflowEvent] = asyncio.Queue() + self._pending_request_info_events: dict[str, RequestInfoEvent] = {} + self._workflow_id: str | None = None + self._streaming: bool = False - Args: - shared_state_snapshot: Snapshot of current shared state from orchestrator + # region Messaging - Returns: - A new CapturingWorkflowContext instance - """ - instance = cls() - instance._original_snapshot = dict(shared_state_snapshot or {}) - await instance._shared_state.import_state(shared_state_snapshot or {}) - return instance + async def send_message(self, message: Message) -> None: + """Capture a message sent by an executor.""" + self._messages.setdefault(message.source_id, []) + self._messages[message.source_id].append(message) - @property - def shared_state(self) -> SharedState: - """Get the shared state object for direct access.""" - return self._shared_state + async def drain_messages(self) -> dict[str, list[Message]]: + """Drain and return all captured messages.""" + from copy import copy - async def send_message(self, message: Any, target_id: str | None = None) -> None: - """Capture a message to be routed by the orchestrator.""" - self.sent_messages.append({"message": message, "target_id": target_id}) + messages = copy(self._messages) + self._messages.clear() + return messages - async def yield_output(self, output: Any) -> None: - """Capture a workflow output.""" - self.outputs.append(output) + async def has_messages(self) -> bool: + """Check if there are any captured messages.""" + return bool(self._messages) - async def get_shared_state(self, key: str) -> Any: - """ - Get a value from shared state. + # endregion Messaging - If the stored value has type metadata (__type__, __module__), - attempts to reconstruct the original typed object. + # region Events - Args: - key: The key to retrieve + async def add_event(self, event: WorkflowEvent) -> None: + """Capture an event produced during execution.""" + await self._event_queue.put(event) - Returns: - The value associated with the key (possibly reconstructed) + async def drain_events(self) -> list[WorkflowEvent]: + """Drain all currently queued events without blocking.""" + events: list[WorkflowEvent] = [] + while True: + try: + events.append(self._event_queue.get_nowait()) + except asyncio.QueueEmpty: + break + return events - Raises: - KeyError: If the key doesn't exist - """ - value = await self._shared_state.get(key) - return deserialize_value(value) + async def has_events(self) -> bool: + """Check if there are any queued events.""" + return not self._event_queue.empty() - async def set_shared_state(self, key: str, value: Any) -> None: - """ - Set a value in shared state. + async def next_event(self) -> WorkflowEvent: + """Wait for and return the next event.""" + return await self._event_queue.get() - Args: - key: The key to set - value: The value to store (must be JSON serializable) - """ - await self._shared_state.set(key, value) + # endregion Events - async def get_shared_state_changes(self) -> tuple[dict[str, Any], set[str]]: - """ - Get all shared state changes made during execution. + # region Checkpointing (not supported in activity context) - Compares current state against the original snapshot to find: - - Updates: keys that were added or modified - - Deletes: keys that were removed + def has_checkpointing(self) -> bool: + """Checkpointing is not supported in activity context.""" + return False - Returns: - Tuple of (updates dict, deletes set) - """ - current_state = await self._shared_state.export_state() - original_keys = set(self._original_snapshot.keys()) - current_keys = set(current_state.keys()) + def set_runtime_checkpoint_storage(self, storage: CheckpointStorage) -> None: + """No-op: checkpointing not supported in activity context.""" + pass + + def clear_runtime_checkpoint_storage(self) -> None: + """No-op: checkpointing not supported in activity context.""" + pass + + async def create_checkpoint( + self, + shared_state: SharedState, + iteration_count: int, + metadata: dict[str, Any] | None = None, + ) -> str: + """Checkpointing not supported in activity context.""" + raise NotImplementedError("Checkpointing is not supported in Azure Functions activity context") + + async def load_checkpoint(self, checkpoint_id: str) -> WorkflowCheckpoint | None: + """Checkpointing not supported in activity context.""" + raise NotImplementedError("Checkpointing is not supported in Azure Functions activity context") - # Deleted = was in original, not in current - deletes = original_keys - current_keys + async def apply_checkpoint(self, checkpoint: WorkflowCheckpoint) -> None: + """Checkpointing not supported in activity context.""" + raise NotImplementedError("Checkpointing is not supported in Azure Functions activity context") + + # endregion Checkpointing + + # region Workflow Configuration + + def set_workflow_id(self, workflow_id: str) -> None: + """Set the workflow ID.""" + self._workflow_id = workflow_id + + def reset_for_new_run(self) -> None: + """Reset the context for a new run.""" + self._messages.clear() + self._event_queue = asyncio.Queue() + self._pending_request_info_events.clear() + self._streaming = False + + def set_streaming(self, streaming: bool) -> None: + """Set streaming mode (not used in activity context).""" + self._streaming = streaming + + def is_streaming(self) -> bool: + """Check if streaming mode is enabled (always False in activity context).""" + return self._streaming + + # endregion Workflow Configuration + + # region Request Info Events + + async def add_request_info_event(self, event: RequestInfoEvent) -> None: + """Add a RequestInfoEvent and track it for correlation.""" + self._pending_request_info_events[event.request_id] = event + await self.add_event(event) + + async def send_request_info_response(self, request_id: str, response: Any) -> None: + """Send a response correlated to a pending request. + + Note: This is not supported in activity context since human-in-the-loop + scenarios require orchestrator-level coordination. + """ + raise NotImplementedError( + "send_request_info_response is not supported in Azure Functions activity context. " + "Human-in-the-loop scenarios should be handled at the orchestrator level." + ) - # Updates = keys in current that are new or have different values - updates = {k: v for k, v in current_state.items() if k not in self._original_snapshot or self._original_snapshot[k] != v} + async def get_pending_request_info_events(self) -> dict[str, RequestInfoEvent]: + """Get the mapping of request IDs to their corresponding RequestInfoEvent.""" + return dict(self._pending_request_info_events) - return updates, deletes + # endregion Request Info Events def _serialize_value(value: Any) -> Any: @@ -272,13 +329,13 @@ def reconstruct_agent_executor_response(data: dict[str, Any]) -> AgentExecutorRe ) -def reconstruct_message_for_handler(data: Any, handler_types: dict[type, Any]) -> Any: +def reconstruct_message_for_handler(data: Any, input_types: list[type[Any]]) -> Any: """ Attempt to reconstruct a message to match one of the handler's expected types. Args: data: The serialized message data (could be dict, str, etc.) - handler_types: Dict of message types the handler can accept + input_types: List of message types the executor can accept Returns: Reconstructed message if possible, otherwise the original data @@ -306,10 +363,10 @@ def reconstruct_message_for_handler(data: Any, handler_types: dict[type, Any]) - if deserialized is not data: return deserialized - # Try to match against handler types by checking dict keys vs dataclass fields + # Try to match against input types by checking dict keys vs dataclass fields # Filter out metadata keys when comparing data_keys = {k for k in data.keys() if not k.startswith("__")} - for msg_type in handler_types.keys(): + for msg_type in input_types: if is_dataclass(msg_type): # Check if the dict keys match the dataclass fields field_names = {f.name for f in fields(msg_type)} diff --git a/python/packages/azurefunctions/agent_framework_azurefunctions/_workflow.py b/python/packages/azurefunctions/agent_framework_azurefunctions/_workflow.py index 8de69214c1..91de79a6e8 100644 --- a/python/packages/azurefunctions/agent_framework_azurefunctions/_workflow.py +++ b/python/packages/azurefunctions/agent_framework_azurefunctions/_workflow.py @@ -172,14 +172,18 @@ def run_workflow_orchestrator( Returns: List of workflow outputs collected from executor activities """ - pending_messages: dict[str, list[Any]] = {workflow.start_executor_id: [initial_message]} + # pending_messages stores {target_executor_id: [(message, source_executor_id), ...]} + # This allows executors to know who sent them each message + pending_messages: dict[str, list[tuple[Any, str]]] = { + workflow.start_executor_id: [(initial_message, "__workflow_start__")] + } iteration = 0 max_iterations = workflow.max_iterations workflow_outputs: list[Any] = [] # Track pending sources for FanInEdgeGroups - # Structure: {group_id: {source_id: [messages]}} - fan_in_pending: dict[str, dict[str, list[Any]]] = {} + # Structure: {group_id: {source_id: [(message, source_executor_id)]}} + fan_in_pending: dict[str, dict[str, list[tuple[Any, str]]]] = {} # Initialize fan-in tracking for all FanInEdgeGroups for group in workflow.edge_groups: @@ -188,13 +192,13 @@ def run_workflow_orchestrator( while pending_messages and iteration < max_iterations: logger.debug("Orchestrator iteration %d", iteration) - next_pending_messages: dict[str, list[Any]] = {} + next_pending_messages: dict[str, list[tuple[Any, str]]] = {} - for executor_id, messages in pending_messages.items(): - logger.debug("Processing executor: %s with %d messages", executor_id, len(messages)) + for executor_id, messages_with_sources in pending_messages.items(): + logger.debug("Processing executor: %s with %d messages", executor_id, len(messages_with_sources)) executor = workflow.executors[executor_id] - for message in messages: + for message, source_executor_id in messages_with_sources: output_message: Any | None = None result: dict[str, Any] | None = None # Activity result (only set for standard executors) @@ -256,6 +260,7 @@ def run_workflow_orchestrator( "executor_id": executor_id, "message": serialize_message(message), "shared_state_snapshot": shared_state_snapshot, + "source_executor_ids": [source_executor_id], } # Serialize to JSON string to work around Azure Functions type validation issues @@ -304,7 +309,7 @@ def run_workflow_orchestrator( if explicit_target: if explicit_target not in next_pending_messages: next_pending_messages[explicit_target] = [] - next_pending_messages[explicit_target].append(msg_to_route) + next_pending_messages[explicit_target].append((msg_to_route, executor_id)) logger.debug("Routed message from %s to explicit target %s", executor_id, explicit_target) continue @@ -314,7 +319,7 @@ def run_workflow_orchestrator( # Accumulate message for fan-in if executor_id not in fan_in_pending[group.id]: fan_in_pending[group.id][executor_id] = [] - fan_in_pending[group.id][executor_id].append(msg_to_route) + fan_in_pending[group.id][executor_id].append((msg_to_route, executor_id)) logger.debug("Accumulated message for FanIn group %s from %s", group.id, executor_id) # Use MAF's edge group routing for other edge types @@ -328,7 +333,7 @@ def run_workflow_orchestrator( logger.debug("Routing to %s", target_id) if target_id not in next_pending_messages: next_pending_messages[target_id] = [] - next_pending_messages[target_id].append(msg_to_route) + next_pending_messages[target_id].append((msg_to_route, executor_id)) # Check if any FanInEdgeGroups are ready to deliver for group in workflow.edge_groups: @@ -336,10 +341,13 @@ def run_workflow_orchestrator( pending_sources = fan_in_pending.get(group.id, {}) # Check if all sources have contributed at least one message if all(src in pending_sources for src in group.source_executor_ids): - # Aggregate all messages into a single list + # Aggregate all messages into a single list (extract just the messages) aggregated: list[Any] = [] + aggregated_sources: list[str] = [] for src in group.source_executor_ids: - aggregated.extend(pending_sources[src]) + for msg, msg_source in pending_sources[src]: + aggregated.append(msg) + aggregated_sources.append(msg_source) target_id = group.target_executor_ids[0] logger.debug( @@ -348,7 +356,10 @@ def run_workflow_orchestrator( if target_id not in next_pending_messages: next_pending_messages[target_id] = [] - next_pending_messages[target_id].append(aggregated) + # For fan-in, the aggregated list is the message, sources are all contributors + # Use first source as representative (or could join them) + first_source = aggregated_sources[0] if aggregated_sources else "__fan_in__" + next_pending_messages[target_id].append((aggregated, first_source)) # Clear the pending sources for this group fan_in_pending[group.id] = {} From 428ddf18aee9d1509ff6d849b13dcdff00cfda90 Mon Sep 17 00:00:00 2001 From: Ahmed Muhsin Date: Fri, 19 Dec 2025 12:01:28 -0600 Subject: [PATCH 04/18] expose agent as property --- .../azurefunctions/agent_framework_azurefunctions/_app.py | 2 +- .../core/agent_framework/_workflows/_agent_executor.py | 5 +++++ 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/python/packages/azurefunctions/agent_framework_azurefunctions/_app.py b/python/packages/azurefunctions/agent_framework_azurefunctions/_app.py index 0afafba3c7..924d484e6a 100644 --- a/python/packages/azurefunctions/agent_framework_azurefunctions/_app.py +++ b/python/packages/azurefunctions/agent_framework_azurefunctions/_app.py @@ -227,7 +227,7 @@ def __init__( logger.debug("[AgentFunctionApp] Extracting agents from workflow") for executor in workflow.executors.values(): if isinstance(executor, AgentExecutor): - agents.append(executor._agent) + agents.append(executor.agent) self._setup_executor_activity() self._setup_workflow_orchestration() diff --git a/python/packages/core/agent_framework/_workflows/_agent_executor.py b/python/packages/core/agent_framework/_workflows/_agent_executor.py index 26300ad473..081e1e1510 100644 --- a/python/packages/core/agent_framework/_workflows/_agent_executor.py +++ b/python/packages/core/agent_framework/_workflows/_agent_executor.py @@ -106,6 +106,11 @@ def workflow_output_types(self) -> list[type[Any]]: return [AgentRunResponse] return [] + @property + def agent(self) -> AgentProtocol: + """Get the underlying agent wrapped by this executor.""" + return self._agent + @property def description(self) -> str | None: """Get the description of the underlying agent.""" From b3a32adfb515270a10ebd5dda918bacba07cbbc4 Mon Sep 17 00:00:00 2001 From: Ahmed Muhsin Date: Fri, 19 Dec 2025 12:01:46 -0600 Subject: [PATCH 05/18] use durable agent thread --- .../agent_framework_azurefunctions/_workflow.py | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) diff --git a/python/packages/azurefunctions/agent_framework_azurefunctions/_workflow.py b/python/packages/azurefunctions/agent_framework_azurefunctions/_workflow.py index 91de79a6e8..c95bdb7d41 100644 --- a/python/packages/azurefunctions/agent_framework_azurefunctions/_workflow.py +++ b/python/packages/azurefunctions/agent_framework_azurefunctions/_workflow.py @@ -24,7 +24,6 @@ AgentExecutorRequest, AgentExecutorResponse, AgentRunResponse, - AgentThread, ChatMessage, Workflow, ) @@ -37,7 +36,7 @@ ) from azure.durable_functions import DurableOrchestrationContext -from ._models import AgentSessionId +from ._models import AgentSessionId, DurableAgentThread from ._orchestration import DurableAIAgent from ._shared_state import DurableSharedState from ._utils import deserialize_value, serialize_message @@ -205,8 +204,8 @@ def run_workflow_orchestrator( # Execute if isinstance(executor, AgentExecutor): # Durable Agent Execution - agent_def = executor._agent - agent_name = agent_def.name + # Use executor.id which equals agent.name (set during AgentExecutor construction) + agent_name = executor.id logger.debug("Calling Durable Entity: %s", agent_name) # Extract message content @@ -215,9 +214,8 @@ def run_workflow_orchestrator( # Create unique session for this orchestration instance session_id = AgentSessionId(name=agent_name, key=context.instance_id) - # Create a thread with the session ID - thread = AgentThread() - thread._durable_session_id = session_id # type: ignore[attr-defined] + # Create a durable thread with the session ID using proper class + thread = DurableAgentThread(session_id=session_id) # Create DurableAIAgent wrapper to call the entity agent = DurableAIAgent(context, agent_name) From 66714582dde5050d3bf7c58a377f766fb3fd5dda Mon Sep 17 00:00:00 2001 From: Ahmed Muhsin Date: Fri, 19 Dec 2025 12:05:23 -0600 Subject: [PATCH 06/18] use public selection_func property --- .../agent_framework_azurefunctions/_workflow.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/python/packages/azurefunctions/agent_framework_azurefunctions/_workflow.py b/python/packages/azurefunctions/agent_framework_azurefunctions/_workflow.py index c95bdb7d41..81c26fe0af 100644 --- a/python/packages/azurefunctions/agent_framework_azurefunctions/_workflow.py +++ b/python/packages/azurefunctions/agent_framework_azurefunctions/_workflow.py @@ -70,8 +70,8 @@ def route_message_through_edge_groups( # SwitchCaseEdgeGroup and FanOutEdgeGroup use selection_func if isinstance(group, (SwitchCaseEdgeGroup, FanOutEdgeGroup)): - if group._selection_func is not None: - selected = group._selection_func(message, group.target_executor_ids) + if group.selection_func is not None: + selected = group.selection_func(message, group.target_executor_ids) targets.extend(selected) else: # No selection func means broadcast to all targets From 323832b76ad7e5d0be79472962ceeb2c6e056c64 Mon Sep 17 00:00:00 2001 From: Ahmed Muhsin Date: Fri, 19 Dec 2025 12:19:27 -0600 Subject: [PATCH 07/18] clean up samples --- .../09_workflow_shared_state/function_app.py | 1 - .../10_workflow_no_shared_state/function_app.py | 10 ++++------ 2 files changed, 4 insertions(+), 7 deletions(-) diff --git a/python/samples/getting_started/azure_functions/09_workflow_shared_state/function_app.py b/python/samples/getting_started/azure_functions/09_workflow_shared_state/function_app.py index af2aacfb12..70456ce3f4 100644 --- a/python/samples/getting_started/azure_functions/09_workflow_shared_state/function_app.py +++ b/python/samples/getting_started/azure_functions/09_workflow_shared_state/function_app.py @@ -1,6 +1,5 @@ # Copyright (c) Microsoft. All rights reserved. -import os from dataclasses import dataclass from typing import Any from uuid import uuid4 diff --git a/python/samples/getting_started/azure_functions/10_workflow_no_shared_state/function_app.py b/python/samples/getting_started/azure_functions/10_workflow_no_shared_state/function_app.py index dde515d737..f566ba429e 100644 --- a/python/samples/getting_started/azure_functions/10_workflow_no_shared_state/function_app.py +++ b/python/samples/getting_started/azure_functions/10_workflow_no_shared_state/function_app.py @@ -38,7 +38,6 @@ from typing_extensions import Never logger = logging.getLogger(__name__) -app: AgentFunctionApp = None AZURE_OPENAI_ENDPOINT_ENV = "AZURE_OPENAI_ENDPOINT" AZURE_OPENAI_DEPLOYMENT_ENV = "AZURE_OPENAI_CHAT_DEPLOYMENT_NAME" @@ -195,7 +194,7 @@ def _create_workflow() -> Workflow: def launch(durable: bool = True) -> None: - global app + app: AgentFunctionApp = None workflow = None if durable: @@ -211,9 +210,6 @@ def launch(durable: bool = True) -> None: env_path = Path(__file__).parent / ".env" load_dotenv(dotenv_path=env_path) - # logging.basicConfig(level=logging.INFO, format="%(message)s") - logger = logging.getLogger(__name__) - logger.info("Starting Multi-Agent Spam Detection Workflow") logger.info("Available at: http://localhost:8094") logger.info("\nThis workflow demonstrates:") @@ -224,6 +220,8 @@ def launch(durable: bool = True) -> None: workflow = _create_workflow() serve(entities=[workflow], port=8094, auto_open=True) + + return app -launch(durable=True) +app = launch(durable=True) From 86034ca60e156d907ff5a3a6bc648f5dd3401a12 Mon Sep 17 00:00:00 2001 From: Ahmed Muhsin Date: Fri, 19 Dec 2025 12:21:32 -0600 Subject: [PATCH 08/18] update readmes --- .../09_workflow_shared_state/README.md | 39 +-- .../10_workflow_no_shared_state/README.md | 256 +++++++----------- 2 files changed, 123 insertions(+), 172 deletions(-) diff --git a/python/samples/getting_started/azure_functions/09_workflow_shared_state/README.md b/python/samples/getting_started/azure_functions/09_workflow_shared_state/README.md index 31a92058a9..0385f65d98 100644 --- a/python/samples/getting_started/azure_functions/09_workflow_shared_state/README.md +++ b/python/samples/getting_started/azure_functions/09_workflow_shared_state/README.md @@ -1,15 +1,17 @@ -# Workflow Shared State Sample +# Workflow with SharedState Sample -This sample demonstrates **Workflow SharedState functionality** with the Agent Framework Azure Functions package. +This sample demonstrates running **Agent Framework workflows with SharedState** in Azure Durable Functions. -## What This Sample Demonstrates +## Overview + +This sample shows how to use `AgentFunctionApp` to execute a `WorkflowBuilder` workflow that uses SharedState to pass data between executors. SharedState is backed by a Durable Entity for persistence across workflow steps. -This sample validates the SharedState implementation in workflow orchestrations: +## What This Sample Demonstrates -1. **`ctx.set_shared_state(key, value)`** - Store values in SharedState -2. **`ctx.get_shared_state(key)`** - Retrieve values from SharedState -3. **`ctx.shared_state.delete(key)`** - Delete keys from SharedState -4. **State persistence** - State passed between executors via `DurableSharedState` entity +1. **Workflow Execution** - Running `WorkflowBuilder` workflows in Azure Durable Functions +2. **SharedState APIs** - Using `ctx.set_shared_state()` and `ctx.get_shared_state()` to share data +3. **Conditional Routing** - Routing messages based on spam detection results +4. **Agent + Executor Composition** - Combining AI agents with non-AI function executors ## Workflow Architecture @@ -26,14 +28,13 @@ store_email → spam_detector (agent) → to_detection_result → [branch]: | `store_email` | `set_shared_state("email:{id}", email)`, `set_shared_state("current_email_id", id)` | | `to_detection_result` | `get_shared_state("current_email_id")` | | `submit_to_email_assistant` | `get_shared_state("email:{id}")` | -| `finalize_and_send` | `shared_state.delete("email:{id}")`, `shared_state.delete("current_email_id")` | -| `handle_spam` | `shared_state.delete("email:{id}")`, `shared_state.delete("current_email_id")` | + +SharedState allows executors to pass large payloads (like email content) by reference rather than through message routing. ## Prerequisites 1. **Azure OpenAI** - Endpoint and deployment configured -2. **Durable Task Scheduler** - Running locally or in Azure -3. **Azurite** - For local storage emulation +2. **Azurite** - For local storage emulation ## Setup @@ -57,12 +58,7 @@ store_email → spam_detector (agent) → to_detection_result → [branch]: azurite --silent ``` -4. Start Durable Task Scheduler: - ```bash - durabletask-scheduler start - ``` - -5. Run the function app: +4. Run the function app: ```bash func start ``` @@ -94,5 +90,10 @@ Email marked as spam: This email exhibits spam characteristics including urgent **Legitimate email:** ``` -Email sent: Hi, Thank you for the reminder about the sprint planning meeting tomorrow at 10 AM. I will review the agenda in Jira and come prepared with my updates. See you then! +Email sent: Hi, Thank you for the reminder about the sprint planning meeting tomorrow at 10 AM. I will review the agenda and come prepared with my updates. See you then! ``` + +## Related Samples + +- `10_workflow_no_shared_state` - Workflow execution without SharedState +- `06_multi_agent_orchestration_conditionals` - Manual Durable Functions orchestration with agents diff --git a/python/samples/getting_started/azure_functions/10_workflow_no_shared_state/README.md b/python/samples/getting_started/azure_functions/10_workflow_no_shared_state/README.md index 4b80d7cc0a..105012c280 100644 --- a/python/samples/getting_started/azure_functions/10_workflow_no_shared_state/README.md +++ b/python/samples/getting_started/azure_functions/10_workflow_no_shared_state/README.md @@ -1,217 +1,167 @@ -# Hybrid Multi-Agent Workflow with Durable Functions – Python +# Workflow Execution Sample (No SharedState) -This sample demonstrates a **hybrid approach** that combines Durable Functions orchestration with Agent Framework workflows. +This sample demonstrates running **Agent Framework workflows** in Azure Durable Functions without using SharedState. ## Overview -This sample bridges the gap between the two previous samples: -- **`06_multi_agent_orchestration_conditionals`** - Pure Durable Functions orchestration -- **`09_workflow_shared_state`** - Workflow with shared state +This sample shows how to use `AgentFunctionApp` with a `WorkflowBuilder` workflow. The workflow is passed directly to `AgentFunctionApp`, which orchestrates execution using Durable Functions: -The hybrid approach: -1. Registers agents via `AgentFunctionApp` (Durable Functions style) -2. Retrieves agents from `DurableOrchestrationContext` during orchestration -3. Builds a `WorkflowBuilder` graph using those agents -4. Executes the workflow logic within the durable orchestration - -## Architecture - -```text -AgentFunctionApp - ├─ Register SpamDetectionAgent - └─ Register EmailAssistantAgent - -DurableOrchestration - ├─ Get agents from context - ├─ Build WorkflowBuilder graph with agents + executors - └─ Execute workflow logic (procedural with agents) +```python +workflow = _create_workflow() # Build the workflow graph +app = AgentFunctionApp(workflow=workflow, enable_shared_state=False) ``` -## Components +This approach provides durable, fault-tolerant workflow execution with minimal code. -### AI Agents (registered via AgentFunctionApp) +## What This Sample Demonstrates -1. **SpamDetectionAgent** - Analyzes emails for spam -2. **EmailAssistantAgent** - Drafts professional responses +1. **Workflow Registration** - Pass a `Workflow` directly to `AgentFunctionApp` +2. **Durable Execution** - Workflow executes with Durable Functions durability and scalability +3. **Conditional Routing** - Route messages based on spam detection (is_spam → spam handler, not spam → email assistant) +4. **Agent + Executor Composition** - Combine AI agents with non-AI executor classes -### Executors (defined as classes) +## Workflow Architecture -1. **SpamHandlerExecutor** - Handles spam emails (non-AI activity) -2. **EmailSenderExecutor** - Sends email responses (non-AI activity) +``` +SpamDetectionAgent → [branch based on is_spam]: + ├── If spam: SpamHandlerExecutor → yield "Email marked as spam: {reason}" + └── If not spam: EmailAssistantAgent → EmailSenderExecutor → yield "Email sent: {response}" +``` -## Key Features +### Components -- **Agent Registration**: Uses `AgentFunctionApp` for centralized agent management -- **Context-Based Retrieval**: Gets agents from `context.get_agent()` -- **Workflow Builder**: Constructs declarative workflow graphs -- **Executor Pattern**: Uses Executor classes (no traditional activity triggers) -- **Durable Orchestration**: Runs within durable context with state persistence +| Component | Type | Description | +|-----------|------|-------------| +| `SpamDetectionAgent` | AI Agent | Analyzes emails for spam indicators | +| `EmailAssistantAgent` | AI Agent | Drafts professional email responses | +| `SpamHandlerExecutor` | Executor | Handles spam emails (non-AI) | +| `EmailSenderExecutor` | Executor | Sends email responses (non-AI) | -## Running the Sample +## Prerequisites -### Prerequisites +1. **Azure OpenAI** - Endpoint and deployment configured +2. **Azurite** - For local storage emulation -1. Install dependencies: +## Setup +1. Copy configuration files: ```bash - pip install -r requirements.txt + cp local.settings.json.sample local.settings.json ``` -2. Configure your environment: +2. Configure `local.settings.json`: + ```json + { + "Values": { + "AZURE_OPENAI_ENDPOINT": "https://your-resource.openai.azure.com/", + "AZURE_OPENAI_CHAT_DEPLOYMENT_NAME": "gpt-4o" + } + } + ``` - Copy the sample configuration files: +3. Install dependencies: ```bash - cp .env.sample .env - cp local.settings.json.sample local.settings.json + pip install -r requirements.txt ``` - Update `.env` and `local.settings.json` with your Azure OpenAI credentials. - -3. Start Azurite (for local storage): - +4. Start Azurite: ```bash - azurite + azurite --silent ``` -### Execution Modes - -This sample can be run in two modes by modifying the `launch(durable=...)` call at the bottom of `function_app.py`. - -#### 1. Durable Functions Mode (Default) - -Set `launch(durable=True)` in `function_app.py`. - -- **Configuration**: Requires `local.settings.json`. -- **Command**: - - ```bash - func start - ``` - -- **Description**: Runs the workflow as a Durable Functions orchestration. The app will start on `http://localhost:7071`. - -#### 2. Standalone Workflow Mode (DevUI) - -Set `launch(durable=False)` in `function_app.py`. - -- **Configuration**: Requires `.env`. -- **Command**: - - ```bash - python function_app.py - ``` +5. Run the function app: + ```bash + func start + ``` -- **Description**: Runs the workflow locally using the Agent Framework DevUI (available at `http://localhost:8094`). +## Testing -### Test with HTTP Requests +Use the `demo.http` file with REST Client extension or curl: -Use `demo.http` or curl: +### Test Spam Email +```bash +curl -X POST http://localhost:7071/api/workflow/run \ + -H "Content-Type: application/json" \ + -d '{"email_id": "test-001", "email_content": "URGENT! You have won $1,000,000! Click here!"}' +``` +### Test Legitimate Email ```bash -# Start orchestration curl -X POST http://localhost:7071/api/workflow/run \ -H "Content-Type: application/json" \ - -d '{"email_id": "test-001", "email_content": "URGENT! Click here now!"}' + -d '{"email_id": "test-002", "email_content": "Hi team, reminder about our meeting tomorrow at 10 AM."}' +``` -# Check status +### Check Status +```bash curl http://localhost:7071/api/workflow/status/{instanceId} ``` -## Comparison with Other Samples - -| Feature | Orchestration | Workflow | **Hybrid** | -|---------|--------------|----------|------------| -| **Agent Registration** | AgentFunctionApp | Direct creation | **AgentFunctionApp** | -| **Agent Retrieval** | context.get_agent() | Direct reference | **context.get_agent()** | -| **Workflow Definition** | Procedural (yield) | Declarative (WorkflowBuilder) | **Both** | -| **Activity Style** | @activity_trigger | Executor classes | **Executor classes** | -| **Execution Model** | Durable orchestration | In-memory workflow | **Durable orchestration** | -| **State Persistence** | ✅ Azure Storage | ❌ In-memory | **✅ Azure Storage** | -| **Scalability** | ✅ Cloud-native | ❌ Single process | **✅ Cloud-native** | - -## When to Use This Approach - -**Use the Hybrid approach** when you: - -- Want the declarative nature of WorkflowBuilder for documentation -- Need durable state persistence and cloud scalability -- Want to use Executor classes instead of activity triggers -- Want to visualize workflow structure programmatically -- Need both agent-based reasoning and executor-based activities -- Are exploring workflow patterns within durable orchestrations +## Expected Output -**Don't use it** when: +**Spam email:** +``` +Email marked as spam: This email exhibits spam characteristics including urgent language, unrealistic claims of monetary winnings, and requests to click suspicious links. +``` -- You only need simple orchestration (use pure Durable Functions) -- You want standalone workflows without Azure infrastructure (use pure WorkflowBuilder) -- The added complexity doesn't provide value +**Legitimate email:** +``` +Email sent: Hi, Thank you for the reminder about the sprint planning meeting tomorrow at 10 AM. I will be there. +``` ## Code Highlights -### Agent Registration +### Creating the Workflow ```python -app = AgentFunctionApp(agents=_create_agents(), enable_health_check=True) +workflow = ( + WorkflowBuilder() + .set_start_executor(spam_agent) + .add_switch_case_edge_group( + spam_agent, + [ + Case(condition=is_spam_detected, target=spam_handler), + Default(target=email_agent), + ], + ) + .add_edge(email_agent, email_sender) + .build() +) ``` -### Agent Retrieval in Orchestration +### Registering with AgentFunctionApp ```python -@app.orchestration_trigger(context_name="context") -def spam_detection_workflow_orchestration(context: DurableOrchestrationContext): - # Get agents from context - spam_agent = context.get_agent(SPAM_AGENT_NAME) - email_agent = context.get_agent(EMAIL_AGENT_NAME) - - # Build workflow graph - workflow = ( - WorkflowBuilder() - .set_start_executor(spam_agent) - .add_switch_case_edge_group(...) - .build() - ) +app = AgentFunctionApp(workflow=workflow, enable_health_check=True, enable_shared_state=False) ``` -### Mixed Execution Styles +### Executor Classes ```python -# Executor-based activity (no @activity_trigger needed) class SpamHandlerExecutor(Executor): @handler - async def handle_spam_result(self, agent_response, ctx): + async def handle_spam_result( + self, + agent_response: AgentExecutorResponse, + ctx: WorkflowContext[Never, str], + ) -> None: spam_result = SpamDetectionResult.model_validate_json(agent_response.agent_run_response.text) - message = f"Email marked as spam: {spam_result.reason}" - await ctx.yield_output(message) - -# Workflow execution follows the graph structure but runs procedurally -if spam_result.is_spam: - result = f"Email marked as spam: {spam_result.reason}" - return result + await ctx.yield_output(f"Email marked as spam: {spam_result.reason}") ``` -## Benefits of Hybrid Approach - -1. **Documentation** - WorkflowBuilder graph serves as living documentation -2. **Flexibility** - Mix declarative structure with procedural execution -3. **Scalability** - Leverage durable orchestration for production workloads -4. **Type Safety** - Workflow validation catches type mismatches -5. **Executor Pattern** - Modern class-based activity definitions instead of decorators - -## Limitations +## Standalone Mode (DevUI) -- WorkflowBuilder used for structure/documentation only (not native execution) -- Execution is still procedural with yield-based orchestration -- Requires understanding of both workflow and orchestration paradigms -- Executor classes defined but not automatically invoked by workflow engine +This sample also supports running standalone for local development: -## Next Steps +```python +# Change launch(durable=True) to launch(durable=False) in function_app.py +# Then run: +python function_app.py +``` -- Explore pure workflow execution without orchestration -- Add workflow visualization endpoints -- Implement workflow-native execution within durable context -- Add telemetry and monitoring integration +This starts the DevUI at `http://localhost:8094` for interactive testing. ## Related Samples -- `06_multi_agent_orchestration_conditionals` - Pure Durable Functions approach -- `09_workflow_shared_state` - Workflow with shared state +- `09_workflow_shared_state` - Workflow with SharedState for passing data between executors +- `06_multi_agent_orchestration_conditionals` - Manual Durable Functions orchestration with agents From d5735cb16bb69c57c2307503cf5835da4077fff6 Mon Sep 17 00:00:00 2001 From: Ahmed Muhsin Date: Fri, 19 Dec 2025 12:59:54 -0600 Subject: [PATCH 09/18] some more cleanup --- .../agent_framework_azurefunctions/_utils.py | 5 ++++- .../local.settings.json.sample | 2 +- .../09_workflow_shared_state/requirements.txt | 5 +---- .../10_workflow_no_shared_state/README.md | 8 -------- .../function_app.py | 13 ++++++------- .../10_workflow_no_shared_state/host.json | 18 +----------------- 6 files changed, 13 insertions(+), 38 deletions(-) diff --git a/python/packages/azurefunctions/agent_framework_azurefunctions/_utils.py b/python/packages/azurefunctions/agent_framework_azurefunctions/_utils.py index 2dcd3375e8..affde2237a 100644 --- a/python/packages/azurefunctions/agent_framework_azurefunctions/_utils.py +++ b/python/packages/azurefunctions/agent_framework_azurefunctions/_utils.py @@ -26,7 +26,6 @@ SharedState, WorkflowCheckpoint, WorkflowEvent, - WorkflowOutputEvent, ) from pydantic import BaseModel @@ -281,6 +280,8 @@ def deserialize_value(data: Any, type_registry: dict[str, type] | None = None) - module = importlib.import_module(module_name) target_type = getattr(module, type_name, None) except Exception: + # Ignore import errors - type may not be available in this context + # Will fall back to returning the raw dict below pass if target_type: @@ -292,6 +293,8 @@ def deserialize_value(data: Any, type_registry: dict[str, type] | None = None) - elif issubclass(target_type, BaseModel): return target_type(**clean_data) except Exception: + # Ignore reconstruction errors (e.g., missing fields, type mismatches) + # Will fall back to returning the raw dict below pass return data diff --git a/python/samples/getting_started/azure_functions/09_workflow_shared_state/local.settings.json.sample b/python/samples/getting_started/azure_functions/09_workflow_shared_state/local.settings.json.sample index 46708be91c..3d972041cb 100644 --- a/python/samples/getting_started/azure_functions/09_workflow_shared_state/local.settings.json.sample +++ b/python/samples/getting_started/azure_functions/09_workflow_shared_state/local.settings.json.sample @@ -2,7 +2,7 @@ "IsEncrypted": false, "Values": { "AzureWebJobsStorage": "UseDevelopmentStorage=true", - "MSSQL_CONNECTION_STRING": "Server=localhost,1435;Database=DurableDB;User Id=sa;Password=;TrustServerCertificate=True;", + "DURABLE_TASK_SCHEDULER_CONNECTION_STRING": "Endpoint=http://localhost:8080;TaskHub=default;Authentication=None", "FUNCTIONS_WORKER_RUNTIME": "python", "AZURE_OPENAI_ENDPOINT": "", "AZURE_OPENAI_CHAT_DEPLOYMENT_NAME": "" diff --git a/python/samples/getting_started/azure_functions/09_workflow_shared_state/requirements.txt b/python/samples/getting_started/azure_functions/09_workflow_shared_state/requirements.txt index fe2d14bbae..5739f93aa3 100644 --- a/python/samples/getting_started/azure_functions/09_workflow_shared_state/requirements.txt +++ b/python/samples/getting_started/azure_functions/09_workflow_shared_state/requirements.txt @@ -1,6 +1,3 @@ agent-framework-azurefunctions -azure-functions -azure-functions-durable azure-identity -openai -agents-maf +agents-maf \ No newline at end of file diff --git a/python/samples/getting_started/azure_functions/10_workflow_no_shared_state/README.md b/python/samples/getting_started/azure_functions/10_workflow_no_shared_state/README.md index 105012c280..f027e40596 100644 --- a/python/samples/getting_started/azure_functions/10_workflow_no_shared_state/README.md +++ b/python/samples/getting_started/azure_functions/10_workflow_no_shared_state/README.md @@ -50,14 +50,6 @@ SpamDetectionAgent → [branch based on is_spam]: ``` 2. Configure `local.settings.json`: - ```json - { - "Values": { - "AZURE_OPENAI_ENDPOINT": "https://your-resource.openai.azure.com/", - "AZURE_OPENAI_CHAT_DEPLOYMENT_NAME": "gpt-4o" - } - } - ``` 3. Install dependencies: ```bash diff --git a/python/samples/getting_started/azure_functions/10_workflow_no_shared_state/function_app.py b/python/samples/getting_started/azure_functions/10_workflow_no_shared_state/function_app.py index f566ba429e..83a583a351 100644 --- a/python/samples/getting_started/azure_functions/10_workflow_no_shared_state/function_app.py +++ b/python/samples/getting_started/azure_functions/10_workflow_no_shared_state/function_app.py @@ -20,7 +20,7 @@ import os from typing import Any, Dict -from anyio import Path +from pathlib import Path from agent_framework import ( AgentExecutorResponse, Case, @@ -192,15 +192,14 @@ def _create_workflow() -> Workflow: return workflow -def launch(durable: bool = True) -> None: - - app: AgentFunctionApp = None - workflow = None +def launch(durable: bool = True) -> AgentFunctionApp | None: + workflow: Workflow | None = None if durable: # Initialize app workflow = _create_workflow() app = AgentFunctionApp(workflow=workflow, enable_health_check=True, enable_shared_state=False) + return app else: # Launch the spam detection workflow in DevUI from agent_framework.devui import serve @@ -220,8 +219,8 @@ def launch(durable: bool = True) -> None: workflow = _create_workflow() serve(entities=[workflow], port=8094, auto_open=True) - - return app + return None + app = launch(durable=True) diff --git a/python/samples/getting_started/azure_functions/10_workflow_no_shared_state/host.json b/python/samples/getting_started/azure_functions/10_workflow_no_shared_state/host.json index 4ef61f4578..b7e5ad1c0b 100644 --- a/python/samples/getting_started/azure_functions/10_workflow_no_shared_state/host.json +++ b/python/samples/getting_started/azure_functions/10_workflow_no_shared_state/host.json @@ -1,23 +1,7 @@ { "version": "2.0", - "logging": { - "applicationInsights": { - "samplingSettings": { - "isEnabled": true, - "maxTelemetryItemsPerSecond": 20 - } - } - }, "extensionBundle": { - "id": "Microsoft.Azure.Functions.ExtensionBundle.Preview", + "id": "Microsoft.Azure.Functions.ExtensionBundle", "version": "[4.*, 5.0.0)" - }, - "extensions": { - "durableTask": { - "storageProvider": { - "type": "azureManaged", - "connectionStringName": "DURABLE_TASK_SCHEDULER_CONNECTION_STRING" - } - } } } From 16f1e622ab8986d72b24071ef6c7f8512d0bf596 Mon Sep 17 00:00:00 2001 From: Ahmed Muhsin Date: Fri, 19 Dec 2025 13:30:32 -0600 Subject: [PATCH 10/18] add unit tests --- .../packages/azurefunctions/tests/test_app.py | 107 ++++ .../azurefunctions/tests/test_shared_state.py | 301 ++++++++++++ .../azurefunctions/tests/test_utils.py | 463 ++++++++++++++++++ .../azurefunctions/tests/test_workflow.py | 397 +++++++++++++++ 4 files changed, 1268 insertions(+) create mode 100644 python/packages/azurefunctions/tests/test_shared_state.py create mode 100644 python/packages/azurefunctions/tests/test_utils.py create mode 100644 python/packages/azurefunctions/tests/test_workflow.py diff --git a/python/packages/azurefunctions/tests/test_app.py b/python/packages/azurefunctions/tests/test_app.py index 1fbfa57e39..54014a5941 100644 --- a/python/packages/azurefunctions/tests/test_app.py +++ b/python/packages/azurefunctions/tests/test_app.py @@ -1095,5 +1095,112 @@ def decorator(func): assert body["agents"][0]["mcp_tool_enabled"] is True +class TestAgentFunctionAppWorkflow: + """Test suite for AgentFunctionApp workflow support.""" + + def test_init_with_workflow_stores_workflow(self) -> None: + """Test that workflow is stored when provided.""" + mock_workflow = Mock() + mock_workflow.executors = {} + + with patch.object(AgentFunctionApp, "_setup_executor_activity"): + with patch.object(AgentFunctionApp, "_setup_workflow_orchestration"): + app = AgentFunctionApp(workflow=mock_workflow) + + assert app.workflow is mock_workflow + + def test_init_with_workflow_extracts_agents(self) -> None: + """Test that agents are extracted from workflow executors.""" + from agent_framework import AgentExecutor + + mock_agent = Mock() + mock_agent.name = "WorkflowAgent" + + mock_executor = Mock(spec=AgentExecutor) + mock_executor.agent = mock_agent + + mock_workflow = Mock() + mock_workflow.executors = {"WorkflowAgent": mock_executor} + + with patch.object(AgentFunctionApp, "_setup_executor_activity"): + with patch.object(AgentFunctionApp, "_setup_workflow_orchestration"): + with patch.object(AgentFunctionApp, "_setup_agent_functions"): + app = AgentFunctionApp(workflow=mock_workflow) + + assert "WorkflowAgent" in app.agents + + def test_init_with_workflow_calls_setup_methods(self) -> None: + """Test that workflow setup methods are called.""" + mock_workflow = Mock() + mock_workflow.executors = {} + + with patch.object(AgentFunctionApp, "_setup_executor_activity") as setup_exec: + with patch.object(AgentFunctionApp, "_setup_workflow_orchestration") as setup_orch: + AgentFunctionApp(workflow=mock_workflow) + + setup_exec.assert_called_once() + setup_orch.assert_called_once() + + def test_init_shared_state_enabled_by_default(self) -> None: + """Test that SharedState is enabled by default.""" + mock_workflow = Mock() + mock_workflow.executors = {} + + with patch.object(AgentFunctionApp, "_setup_executor_activity"): + with patch.object(AgentFunctionApp, "_setup_workflow_orchestration"): + app = AgentFunctionApp(workflow=mock_workflow) + + assert app.enable_shared_state is True + + def test_init_shared_state_can_be_disabled(self) -> None: + """Test that SharedState can be disabled.""" + mock_workflow = Mock() + mock_workflow.executors = {} + + with patch.object(AgentFunctionApp, "_setup_executor_activity"): + with patch.object(AgentFunctionApp, "_setup_workflow_orchestration"): + app = AgentFunctionApp(workflow=mock_workflow, enable_shared_state=False) + + assert app.enable_shared_state is False + + def test_init_without_workflow_does_not_call_workflow_setup(self) -> None: + """Test that workflow setup is not called when no workflow provided.""" + mock_agent = Mock() + mock_agent.name = "TestAgent" + + with patch.object(AgentFunctionApp, "_setup_executor_activity") as setup_exec: + with patch.object(AgentFunctionApp, "_setup_workflow_orchestration") as setup_orch: + AgentFunctionApp(agents=[mock_agent]) + + setup_exec.assert_not_called() + setup_orch.assert_not_called() + + def test_build_status_url(self) -> None: + """Test _build_status_url constructs correct URL.""" + mock_workflow = Mock() + mock_workflow.executors = {} + + with patch.object(AgentFunctionApp, "_setup_executor_activity"): + with patch.object(AgentFunctionApp, "_setup_workflow_orchestration"): + app = AgentFunctionApp(workflow=mock_workflow) + + url = app._build_status_url("http://localhost:7071/api/workflow/run", "instance-123") + + assert url == "http://localhost:7071/api/workflow/status/instance-123" + + def test_build_status_url_handles_trailing_slash(self) -> None: + """Test _build_status_url handles URLs without /api/ correctly.""" + mock_workflow = Mock() + mock_workflow.executors = {} + + with patch.object(AgentFunctionApp, "_setup_executor_activity"): + with patch.object(AgentFunctionApp, "_setup_workflow_orchestration"): + app = AgentFunctionApp(workflow=mock_workflow) + + url = app._build_status_url("http://localhost:7071/", "instance-456") + + assert "instance-456" in url + + if __name__ == "__main__": pytest.main([__file__, "-v", "--tb=short"]) diff --git a/python/packages/azurefunctions/tests/test_shared_state.py b/python/packages/azurefunctions/tests/test_shared_state.py new file mode 100644 index 0000000000..76e6722961 --- /dev/null +++ b/python/packages/azurefunctions/tests/test_shared_state.py @@ -0,0 +1,301 @@ +# Copyright (c) Microsoft. All rights reserved. + +"""Unit tests for DurableSharedState and SharedState entity.""" + +from typing import Any +from unittest.mock import Mock + +import pytest +from azure.durable_functions import EntityId + +from agent_framework_azurefunctions._shared_state import ( + SHARED_STATE_ENTITY_NAME, + DurableSharedState, + SharedStateData, + create_shared_state_entity_function, +) + + +class TestSharedStateData: + """Test suite for SharedStateData dataclass.""" + + def test_default_initialization(self) -> None: + """Test default initialization creates empty state.""" + data = SharedStateData() + + assert data.state == {} + + def test_initialization_with_state(self) -> None: + """Test initialization with provided state.""" + data = SharedStateData(state={"key": "value"}) + + assert data.state == {"key": "value"} + + def test_to_dict(self) -> None: + """Test serialization to dictionary.""" + data = SharedStateData(state={"a": 1, "b": 2}) + + result = data.to_dict() + + assert result == {"state": {"a": 1, "b": 2}} + + def test_from_dict_with_none(self) -> None: + """Test deserialization from None.""" + result = SharedStateData.from_dict(None) + + assert result.state == {} + + def test_from_dict_with_empty_dict(self) -> None: + """Test deserialization from empty dict.""" + result = SharedStateData.from_dict({}) + + assert result.state == {} + + def test_from_dict_with_state(self) -> None: + """Test deserialization from dict with state.""" + result = SharedStateData.from_dict({"state": {"x": 10, "y": 20}}) + + assert result.state == {"x": 10, "y": 20} + + +class TestDurableSharedState: + """Test suite for DurableSharedState orchestration wrapper.""" + + @pytest.fixture + def mock_context(self) -> Mock: + """Create a mock DurableOrchestrationContext.""" + context = Mock() + context.call_entity = Mock(return_value="mocked_result") + return context + + @pytest.fixture + def shared_state(self, mock_context: Mock) -> DurableSharedState: + """Create a DurableSharedState instance for testing.""" + return DurableSharedState(mock_context, "test-session-123") + + def test_initialization(self, mock_context: Mock) -> None: + """Test DurableSharedState initialization.""" + state = DurableSharedState(mock_context, "my-session") + + assert state._context == mock_context + assert state._session_id == "my-session" + assert state._entity_id.name == SHARED_STATE_ENTITY_NAME + assert state._entity_id.key == "my-session" + + def test_entity_id_property(self, shared_state: DurableSharedState) -> None: + """Test entity_id property returns correct EntityId.""" + entity_id = shared_state.entity_id + + assert isinstance(entity_id, EntityId) + assert entity_id.name == SHARED_STATE_ENTITY_NAME + assert entity_id.key == "test-session-123" + + def test_get_generator_yields_entity_call(self, shared_state: DurableSharedState, mock_context: Mock) -> None: + """Test get() yields a call_entity operation.""" + gen = shared_state.get("my_key", default="default_val") + + # The generator should yield the entity call + yielded = next(gen) + + # Verify the call was made with correct parameters + mock_context.call_entity.assert_called_once_with( + shared_state._entity_id, "get", {"key": "my_key", "default": "default_val"} + ) + + def test_set_generator_yields_entity_call(self, shared_state: DurableSharedState, mock_context: Mock) -> None: + """Test set() yields a call_entity operation.""" + gen = shared_state.set("my_key", {"data": "value"}) + + # Consume the generator + yielded = next(gen) + + mock_context.call_entity.assert_called_once_with( + shared_state._entity_id, "set", {"key": "my_key", "value": {"data": "value"}} + ) + + def test_has_generator_yields_entity_call(self, shared_state: DurableSharedState, mock_context: Mock) -> None: + """Test has() yields a call_entity operation.""" + gen = shared_state.has("check_key") + + yielded = next(gen) + + mock_context.call_entity.assert_called_once_with(shared_state._entity_id, "has", {"key": "check_key"}) + + def test_delete_generator_yields_entity_call(self, shared_state: DurableSharedState, mock_context: Mock) -> None: + """Test delete() yields a call_entity operation.""" + gen = shared_state.delete("remove_key") + + yielded = next(gen) + + mock_context.call_entity.assert_called_once_with(shared_state._entity_id, "delete", {"key": "remove_key"}) + + def test_get_all_generator_yields_entity_call(self, shared_state: DurableSharedState, mock_context: Mock) -> None: + """Test get_all() yields a call_entity operation.""" + gen = shared_state.get_all() + + yielded = next(gen) + + mock_context.call_entity.assert_called_once_with(shared_state._entity_id, "get_all", None) + + def test_update_generator_yields_entity_call(self, shared_state: DurableSharedState, mock_context: Mock) -> None: + """Test update() yields a call_entity operation.""" + updates = {"key1": "val1", "key2": "val2"} + gen = shared_state.update(updates) + + yielded = next(gen) + + mock_context.call_entity.assert_called_once_with(shared_state._entity_id, "update", {"updates": updates}) + + def test_clear_generator_yields_entity_call(self, shared_state: DurableSharedState, mock_context: Mock) -> None: + """Test clear() yields a call_entity operation.""" + gen = shared_state.clear() + + yielded = next(gen) + + mock_context.call_entity.assert_called_once_with(shared_state._entity_id, "clear", None) + + +class TestSharedStateEntityFunction: + """Test suite for the SharedState entity function.""" + + @pytest.fixture + def entity_function(self): + """Create the entity function.""" + return create_shared_state_entity_function() + + @pytest.fixture + def mock_entity_context(self) -> Mock: + """Create a mock entity context.""" + context = Mock() + context.get_state = Mock(return_value={"state": {}}) + context.set_state = Mock() + context.set_result = Mock() + return context + + def test_get_operation_returns_value(self, entity_function, mock_entity_context: Mock) -> None: + """Test get operation returns the stored value.""" + mock_entity_context.get_state.return_value = {"state": {"my_key": "my_value"}} + mock_entity_context.operation_name = "get" + mock_entity_context.get_input.return_value = {"key": "my_key", "default": None} + + entity_function(mock_entity_context) + + mock_entity_context.set_result.assert_called_once_with("my_value") + + def test_get_operation_returns_default_when_key_missing( + self, entity_function, mock_entity_context: Mock + ) -> None: + """Test get operation returns default when key doesn't exist.""" + mock_entity_context.get_state.return_value = {"state": {}} + mock_entity_context.operation_name = "get" + mock_entity_context.get_input.return_value = {"key": "missing_key", "default": "fallback"} + + entity_function(mock_entity_context) + + mock_entity_context.set_result.assert_called_once_with("fallback") + + def test_set_operation_stores_value(self, entity_function, mock_entity_context: Mock) -> None: + """Test set operation stores a value.""" + mock_entity_context.get_state.return_value = {"state": {}} + mock_entity_context.operation_name = "set" + mock_entity_context.get_input.return_value = {"key": "new_key", "value": {"data": 123}} + + entity_function(mock_entity_context) + + mock_entity_context.set_state.assert_called_once() + saved_state = mock_entity_context.set_state.call_args[0][0] + assert saved_state["state"]["new_key"] == {"data": 123} + + def test_has_operation_returns_true_when_exists(self, entity_function, mock_entity_context: Mock) -> None: + """Test has operation returns True when key exists.""" + mock_entity_context.get_state.return_value = {"state": {"existing_key": "value"}} + mock_entity_context.operation_name = "has" + mock_entity_context.get_input.return_value = {"key": "existing_key"} + + entity_function(mock_entity_context) + + mock_entity_context.set_result.assert_called_once_with(True) + + def test_has_operation_returns_false_when_missing(self, entity_function, mock_entity_context: Mock) -> None: + """Test has operation returns False when key doesn't exist.""" + mock_entity_context.get_state.return_value = {"state": {}} + mock_entity_context.operation_name = "has" + mock_entity_context.get_input.return_value = {"key": "missing_key"} + + entity_function(mock_entity_context) + + mock_entity_context.set_result.assert_called_once_with(False) + + def test_delete_operation_removes_key(self, entity_function, mock_entity_context: Mock) -> None: + """Test delete operation removes a key and returns True.""" + mock_entity_context.get_state.return_value = {"state": {"to_delete": "value"}} + mock_entity_context.operation_name = "delete" + mock_entity_context.get_input.return_value = {"key": "to_delete"} + + entity_function(mock_entity_context) + + mock_entity_context.set_result.assert_called_once_with(True) + saved_state = mock_entity_context.set_state.call_args[0][0] + assert "to_delete" not in saved_state["state"] + + def test_delete_operation_returns_false_when_missing( + self, entity_function, mock_entity_context: Mock + ) -> None: + """Test delete operation returns False when key doesn't exist.""" + mock_entity_context.get_state.return_value = {"state": {}} + mock_entity_context.operation_name = "delete" + mock_entity_context.get_input.return_value = {"key": "nonexistent"} + + entity_function(mock_entity_context) + + mock_entity_context.set_result.assert_called_once_with(False) + mock_entity_context.set_state.assert_not_called() + + def test_get_all_operation_returns_all_state(self, entity_function, mock_entity_context: Mock) -> None: + """Test get_all operation returns complete state.""" + state_data = {"key1": "val1", "key2": "val2"} + mock_entity_context.get_state.return_value = {"state": state_data} + mock_entity_context.operation_name = "get_all" + mock_entity_context.get_input.return_value = None + + entity_function(mock_entity_context) + + mock_entity_context.set_result.assert_called_once() + result = mock_entity_context.set_result.call_args[0][0] + assert result == state_data + + def test_update_operation_merges_updates(self, entity_function, mock_entity_context: Mock) -> None: + """Test update operation merges multiple key-value pairs.""" + mock_entity_context.get_state.return_value = {"state": {"existing": "old"}} + mock_entity_context.operation_name = "update" + mock_entity_context.get_input.return_value = {"updates": {"new1": "val1", "new2": "val2"}} + + entity_function(mock_entity_context) + + saved_state = mock_entity_context.set_state.call_args[0][0] + assert saved_state["state"]["existing"] == "old" + assert saved_state["state"]["new1"] == "val1" + assert saved_state["state"]["new2"] == "val2" + + def test_clear_operation_removes_all_state(self, entity_function, mock_entity_context: Mock) -> None: + """Test clear operation removes all state.""" + mock_entity_context.get_state.return_value = {"state": {"key1": "val1", "key2": "val2"}} + mock_entity_context.operation_name = "clear" + mock_entity_context.get_input.return_value = None + + entity_function(mock_entity_context) + + saved_state = mock_entity_context.set_state.call_args[0][0] + assert saved_state["state"] == {} + + def test_unknown_operation_is_handled(self, entity_function, mock_entity_context: Mock) -> None: + """Test unknown operation doesn't crash.""" + mock_entity_context.get_state.return_value = {"state": {}} + mock_entity_context.operation_name = "unknown_op" + mock_entity_context.get_input.return_value = {} + + # Should not raise + entity_function(mock_entity_context) + + # No result should be set for unknown operations + mock_entity_context.set_result.assert_not_called() diff --git a/python/packages/azurefunctions/tests/test_utils.py b/python/packages/azurefunctions/tests/test_utils.py new file mode 100644 index 0000000000..85a6c388ba --- /dev/null +++ b/python/packages/azurefunctions/tests/test_utils.py @@ -0,0 +1,463 @@ +# Copyright (c) Microsoft. All rights reserved. + +"""Unit tests for workflow utility functions.""" + +import asyncio +from dataclasses import dataclass +from typing import Any +from unittest.mock import Mock + +import pytest +from agent_framework import ( + AgentExecutorRequest, + AgentExecutorResponse, + AgentRunResponse, + ChatMessage, + Message, + WorkflowOutputEvent, +) +from pydantic import BaseModel + +from agent_framework_azurefunctions._utils import ( + CapturingRunnerContext, + deserialize_value, + reconstruct_agent_executor_request, + reconstruct_agent_executor_response, + reconstruct_message_for_handler, + serialize_message, +) + + +class TestCapturingRunnerContext: + """Test suite for CapturingRunnerContext.""" + + @pytest.fixture + def context(self) -> CapturingRunnerContext: + """Create a fresh CapturingRunnerContext for each test.""" + return CapturingRunnerContext() + + @pytest.mark.asyncio + async def test_send_message_captures_message(self, context: CapturingRunnerContext) -> None: + """Test that send_message captures messages correctly.""" + message = Message(data="test data", target_id="target_1", source_id="source_1") + + await context.send_message(message) + + messages = await context.drain_messages() + assert "source_1" in messages + assert len(messages["source_1"]) == 1 + assert messages["source_1"][0].data == "test data" + + @pytest.mark.asyncio + async def test_send_multiple_messages_groups_by_source(self, context: CapturingRunnerContext) -> None: + """Test that messages are grouped by source_id.""" + msg1 = Message(data="msg1", target_id="target", source_id="source_a") + msg2 = Message(data="msg2", target_id="target", source_id="source_a") + msg3 = Message(data="msg3", target_id="target", source_id="source_b") + + await context.send_message(msg1) + await context.send_message(msg2) + await context.send_message(msg3) + + messages = await context.drain_messages() + assert len(messages["source_a"]) == 2 + assert len(messages["source_b"]) == 1 + + @pytest.mark.asyncio + async def test_drain_messages_clears_messages(self, context: CapturingRunnerContext) -> None: + """Test that drain_messages clears the message store.""" + message = Message(data="test", target_id="t", source_id="s") + await context.send_message(message) + + await context.drain_messages() # First drain + messages = await context.drain_messages() # Second drain + + assert messages == {} + + @pytest.mark.asyncio + async def test_has_messages_returns_correct_status(self, context: CapturingRunnerContext) -> None: + """Test has_messages returns correct boolean.""" + assert await context.has_messages() is False + + await context.send_message(Message(data="test", target_id="t", source_id="s")) + + assert await context.has_messages() is True + + @pytest.mark.asyncio + async def test_add_event_queues_event(self, context: CapturingRunnerContext) -> None: + """Test that add_event queues events correctly.""" + event = WorkflowOutputEvent(data="output", source_executor_id="exec_1") + + await context.add_event(event) + + events = await context.drain_events() + assert len(events) == 1 + assert isinstance(events[0], WorkflowOutputEvent) + assert events[0].data == "output" + + @pytest.mark.asyncio + async def test_drain_events_clears_queue(self, context: CapturingRunnerContext) -> None: + """Test that drain_events clears the event queue.""" + await context.add_event(WorkflowOutputEvent(data="test", source_executor_id="e")) + + await context.drain_events() # First drain + events = await context.drain_events() # Second drain + + assert events == [] + + @pytest.mark.asyncio + async def test_has_events_returns_correct_status(self, context: CapturingRunnerContext) -> None: + """Test has_events returns correct boolean.""" + assert await context.has_events() is False + + await context.add_event(WorkflowOutputEvent(data="test", source_executor_id="e")) + + assert await context.has_events() is True + + @pytest.mark.asyncio + async def test_next_event_waits_for_event(self, context: CapturingRunnerContext) -> None: + """Test that next_event returns queued events.""" + event = WorkflowOutputEvent(data="waited", source_executor_id="e") + await context.add_event(event) + + result = await context.next_event() + + assert result.data == "waited" + + def test_has_checkpointing_returns_false(self, context: CapturingRunnerContext) -> None: + """Test that checkpointing is not supported.""" + assert context.has_checkpointing() is False + + def test_is_streaming_returns_false_by_default(self, context: CapturingRunnerContext) -> None: + """Test streaming is disabled by default.""" + assert context.is_streaming() is False + + def test_set_streaming(self, context: CapturingRunnerContext) -> None: + """Test setting streaming mode.""" + context.set_streaming(True) + assert context.is_streaming() is True + + context.set_streaming(False) + assert context.is_streaming() is False + + def test_set_workflow_id(self, context: CapturingRunnerContext) -> None: + """Test setting workflow ID.""" + context.set_workflow_id("workflow-123") + assert context._workflow_id == "workflow-123" + + @pytest.mark.asyncio + async def test_reset_for_new_run_clears_state(self, context: CapturingRunnerContext) -> None: + """Test that reset_for_new_run clears all state.""" + await context.send_message(Message(data="test", target_id="t", source_id="s")) + await context.add_event(WorkflowOutputEvent(data="event", source_executor_id="e")) + context.set_streaming(True) + + context.reset_for_new_run() + + assert await context.has_messages() is False + assert await context.has_events() is False + assert context.is_streaming() is False + + @pytest.mark.asyncio + async def test_create_checkpoint_raises_not_implemented(self, context: CapturingRunnerContext) -> None: + """Test that checkpointing methods raise NotImplementedError.""" + from agent_framework import SharedState + + with pytest.raises(NotImplementedError): + await context.create_checkpoint(SharedState(), 1) + + @pytest.mark.asyncio + async def test_load_checkpoint_raises_not_implemented(self, context: CapturingRunnerContext) -> None: + """Test that load_checkpoint raises NotImplementedError.""" + with pytest.raises(NotImplementedError): + await context.load_checkpoint("some-id") + + @pytest.mark.asyncio + async def test_apply_checkpoint_raises_not_implemented(self, context: CapturingRunnerContext) -> None: + """Test that apply_checkpoint raises NotImplementedError.""" + with pytest.raises(NotImplementedError): + await context.apply_checkpoint(Mock()) + + +class TestSerializeMessage: + """Test suite for serialize_message function.""" + + def test_serialize_none(self) -> None: + """Test serializing None.""" + assert serialize_message(None) is None + + def test_serialize_primitive_types(self) -> None: + """Test serializing primitive types.""" + assert serialize_message("hello") == "hello" + assert serialize_message(42) == 42 + assert serialize_message(3.14) == 3.14 + assert serialize_message(True) is True + + def test_serialize_list(self) -> None: + """Test serializing lists.""" + result = serialize_message([1, 2, 3]) + assert result == [1, 2, 3] + + def test_serialize_dict(self) -> None: + """Test serializing dicts.""" + result = serialize_message({"key": "value", "num": 42}) + assert result == {"key": "value", "num": 42} + + def test_serialize_dataclass(self) -> None: + """Test serializing dataclasses with type metadata.""" + + @dataclass + class TestData: + name: str + value: int + + data = TestData(name="test", value=123) + result = serialize_message(data) + + assert result["name"] == "test" + assert result["value"] == 123 + assert result["__type__"] == "TestData" + assert "__module__" in result + + def test_serialize_pydantic_model(self) -> None: + """Test serializing Pydantic models with type metadata.""" + + class TestModel(BaseModel): + title: str + count: int + + model = TestModel(title="Hello", count=5) + result = serialize_message(model) + + assert result["title"] == "Hello" + assert result["count"] == 5 + assert result["__type__"] == "TestModel" + assert "__module__" in result + + def test_serialize_nested_structures(self) -> None: + """Test serializing nested structures.""" + + @dataclass + class Inner: + x: int + + @dataclass + class Outer: + inner: Inner + items: list[int] + + outer = Outer(inner=Inner(x=10), items=[1, 2, 3]) + result = serialize_message(outer) + + assert result["__type__"] == "Outer" + # Nested dataclass is serialized via asdict, which doesn't add __type__ recursively + assert result["inner"]["x"] == 10 + assert result["items"] == [1, 2, 3] + + def test_serialize_object_with_to_dict(self) -> None: + """Test serializing objects with to_dict method.""" + message = ChatMessage(role="user", text="Hello") + result = serialize_message(message) + + # ChatMessage has to_dict() method which returns a specific structure + assert isinstance(result, dict) + assert "contents" in result # ChatMessage uses contents structure + + +class TestDeserializeValue: + """Test suite for deserialize_value function.""" + + def test_deserialize_non_dict_returns_original(self) -> None: + """Test that non-dict values are returned as-is.""" + assert deserialize_value("string") == "string" + assert deserialize_value(42) == 42 + assert deserialize_value([1, 2, 3]) == [1, 2, 3] + + def test_deserialize_dict_without_type_returns_original(self) -> None: + """Test that dicts without type metadata are returned as-is.""" + data = {"key": "value", "num": 42} + result = deserialize_value(data) + assert result == data + + def test_deserialize_agent_executor_request(self) -> None: + """Test deserializing AgentExecutorRequest.""" + data = { + "messages": [{"type": "chat_message", "role": "user", "contents": [{"type": "text", "text": "Hello"}]}], + "should_respond": True, + } + + result = deserialize_value(data) + + assert isinstance(result, AgentExecutorRequest) + assert len(result.messages) == 1 + assert result.should_respond is True + + def test_deserialize_agent_executor_response(self) -> None: + """Test deserializing AgentExecutorResponse.""" + data = { + "executor_id": "test_exec", + "agent_run_response": { + "type": "agent_run_response", + "messages": [ + {"type": "chat_message", "role": "assistant", "contents": [{"type": "text", "text": "Hi there"}]} + ], + }, + } + + result = deserialize_value(data) + + assert isinstance(result, AgentExecutorResponse) + assert result.executor_id == "test_exec" + + def test_deserialize_with_type_registry(self) -> None: + """Test deserializing with type registry.""" + + @dataclass + class CustomType: + name: str + + data = {"name": "test", "__type__": "CustomType"} + result = deserialize_value(data, type_registry={"CustomType": CustomType}) + + assert isinstance(result, CustomType) + assert result.name == "test" + + +class TestReconstructAgentExecutorRequest: + """Test suite for reconstruct_agent_executor_request function.""" + + def test_reconstruct_with_chat_messages(self) -> None: + """Test reconstructing request with ChatMessage dicts.""" + data = { + "messages": [ + {"type": "chat_message", "role": "user", "contents": [{"type": "text", "text": "Hello"}]}, + {"type": "chat_message", "role": "assistant", "contents": [{"type": "text", "text": "Hi"}]}, + ], + "should_respond": True, + } + + result = reconstruct_agent_executor_request(data) + + assert isinstance(result, AgentExecutorRequest) + assert len(result.messages) == 2 + assert result.should_respond is True + + def test_reconstruct_defaults_should_respond_to_true(self) -> None: + """Test that should_respond defaults to True.""" + data = {"messages": []} + + result = reconstruct_agent_executor_request(data) + + assert result.should_respond is True + + +class TestReconstructAgentExecutorResponse: + """Test suite for reconstruct_agent_executor_response function.""" + + def test_reconstruct_with_agent_run_response(self) -> None: + """Test reconstructing response with agent_run_response.""" + data = { + "executor_id": "my_executor", + "agent_run_response": { + "type": "agent_run_response", + "messages": [{"type": "chat_message", "role": "assistant", "contents": [{"type": "text", "text": "Response"}]}], + }, + "full_conversation": [], + } + + result = reconstruct_agent_executor_response(data) + + assert isinstance(result, AgentExecutorResponse) + assert result.executor_id == "my_executor" + assert isinstance(result.agent_run_response, AgentRunResponse) + + def test_reconstruct_with_full_conversation(self) -> None: + """Test reconstructing response with full_conversation.""" + data = { + "executor_id": "exec", + "agent_run_response": {"type": "agent_run_response", "messages": []}, + "full_conversation": [ + {"type": "chat_message", "role": "user", "contents": [{"type": "text", "text": "Q"}]}, + {"type": "chat_message", "role": "assistant", "contents": [{"type": "text", "text": "A"}]}, + ], + } + + result = reconstruct_agent_executor_response(data) + + assert result.full_conversation is not None + assert len(result.full_conversation) == 2 + + +class TestReconstructMessageForHandler: + """Test suite for reconstruct_message_for_handler function.""" + + def test_reconstruct_non_dict_returns_original(self) -> None: + """Test that non-dict messages are returned as-is.""" + assert reconstruct_message_for_handler("string", []) == "string" + assert reconstruct_message_for_handler(42, []) == 42 + + def test_reconstruct_agent_executor_response(self) -> None: + """Test reconstructing AgentExecutorResponse.""" + data = { + "executor_id": "exec", + "agent_run_response": {"type": "agent_run_response", "messages": []}, + } + + result = reconstruct_message_for_handler(data, [AgentExecutorResponse]) + + assert isinstance(result, AgentExecutorResponse) + + def test_reconstruct_agent_executor_request(self) -> None: + """Test reconstructing AgentExecutorRequest.""" + data = { + "messages": [{"type": "chat_message", "role": "user", "contents": [{"type": "text", "text": "Hi"}]}], + "should_respond": True, + } + + result = reconstruct_message_for_handler(data, [AgentExecutorRequest]) + + assert isinstance(result, AgentExecutorRequest) + + def test_reconstruct_with_type_metadata(self) -> None: + """Test reconstructing using __type__ metadata.""" + + @dataclass + class CustomMsg: + content: str + + # Serialize includes type metadata + serialized = serialize_message(CustomMsg(content="test")) + + result = reconstruct_message_for_handler(serialized, [CustomMsg]) + + assert isinstance(result, CustomMsg) + assert result.content == "test" + + def test_reconstruct_matches_dataclass_fields(self) -> None: + """Test reconstruction by matching dataclass field names.""" + + @dataclass + class MyData: + field_a: str + field_b: int + + data = {"field_a": "hello", "field_b": 42} + + result = reconstruct_message_for_handler(data, [MyData]) + + assert isinstance(result, MyData) + assert result.field_a == "hello" + assert result.field_b == 42 + + def test_reconstruct_returns_original_if_no_match(self) -> None: + """Test that original dict is returned if no type matches.""" + + @dataclass + class UnrelatedType: + completely_different_field: str + + data = {"some_key": "some_value"} + + result = reconstruct_message_for_handler(data, [UnrelatedType]) + + assert result == data diff --git a/python/packages/azurefunctions/tests/test_workflow.py b/python/packages/azurefunctions/tests/test_workflow.py new file mode 100644 index 0000000000..fef583d1c2 --- /dev/null +++ b/python/packages/azurefunctions/tests/test_workflow.py @@ -0,0 +1,397 @@ +# Copyright (c) Microsoft. All rights reserved. + +"""Unit tests for workflow orchestration functions.""" + +import json +from dataclasses import dataclass +from typing import Any +from unittest.mock import Mock, patch + +import pytest +from agent_framework import ( + AgentExecutorRequest, + AgentExecutorResponse, + AgentRunResponse, + ChatMessage, +) +from agent_framework._workflows._edge import ( + FanInEdgeGroup, + FanOutEdgeGroup, + SingleEdgeGroup, + SwitchCaseEdgeGroup, + SwitchCaseEdgeGroupCase, + SwitchCaseEdgeGroupDefault, +) + +from agent_framework_azurefunctions._workflow import ( + _extract_message_content, + _extract_message_content_from_dict, + build_agent_executor_response, + route_message_through_edge_groups, +) + + +class TestRouteMessageThroughEdgeGroups: + """Test suite for route_message_through_edge_groups function.""" + + def test_single_edge_group_routes_when_condition_matches(self) -> None: + """Test SingleEdgeGroup routes when condition is satisfied.""" + group = SingleEdgeGroup(source_id="src", target_id="tgt", condition=lambda m: True) + + targets = route_message_through_edge_groups([group], "src", "any message") + + assert targets == ["tgt"] + + def test_single_edge_group_does_not_route_when_condition_fails(self) -> None: + """Test SingleEdgeGroup does not route when condition fails.""" + group = SingleEdgeGroup(source_id="src", target_id="tgt", condition=lambda m: False) + + targets = route_message_through_edge_groups([group], "src", "any message") + + assert targets == [] + + def test_single_edge_group_ignores_different_source(self) -> None: + """Test SingleEdgeGroup ignores messages from different sources.""" + group = SingleEdgeGroup(source_id="src", target_id="tgt", condition=lambda m: True) + + targets = route_message_through_edge_groups([group], "other_src", "any message") + + assert targets == [] + + def test_switch_case_with_selection_func(self) -> None: + """Test SwitchCaseEdgeGroup uses selection_func.""" + + def select_first_target(msg: Any, targets: list[str]) -> list[str]: + return [targets[0]] + + group = SwitchCaseEdgeGroup( + source_id="src", + cases=[ + SwitchCaseEdgeGroupCase(condition=lambda m: True, target_id="target_a"), + SwitchCaseEdgeGroupDefault(target_id="target_b"), + ], + ) + # Manually set the selection function + group._selection_func = select_first_target + + targets = route_message_through_edge_groups([group], "src", "test") + + assert targets == ["target_a"] + + def test_switch_case_without_selection_func_broadcasts(self) -> None: + """Test SwitchCaseEdgeGroup without selection_func broadcasts to all.""" + group = SwitchCaseEdgeGroup( + source_id="src", + cases=[ + SwitchCaseEdgeGroupCase(condition=lambda m: True, target_id="target_a"), + SwitchCaseEdgeGroupDefault(target_id="target_b"), + ], + ) + group._selection_func = None + + targets = route_message_through_edge_groups([group], "src", "test") + + assert set(targets) == {"target_a", "target_b"} + + def test_fan_out_with_selection_func(self) -> None: + """Test FanOutEdgeGroup uses selection_func.""" + + def select_all(msg: Any, targets: list[str]) -> list[str]: + return targets + + group = FanOutEdgeGroup( + source_id="src", + target_ids=["fan_a", "fan_b", "fan_c"], + selection_func=select_all, + ) + + targets = route_message_through_edge_groups([group], "src", "broadcast") + + assert set(targets) == {"fan_a", "fan_b", "fan_c"} + + def test_fan_in_is_not_routed_directly(self) -> None: + """Test FanInEdgeGroup is handled separately (not routed here).""" + group = FanInEdgeGroup( + source_ids=["src_a", "src_b"], + target_id="aggregator", + ) + + # Fan-in should not add targets through this function + targets = route_message_through_edge_groups([group], "src_a", "message") + + assert targets == [] + + def test_multiple_edge_groups_aggregated(self) -> None: + """Test that targets from multiple edge groups are aggregated.""" + group1 = SingleEdgeGroup(source_id="src", target_id="t1", condition=lambda m: True) + group2 = SingleEdgeGroup(source_id="src", target_id="t2", condition=lambda m: True) + + targets = route_message_through_edge_groups([group1, group2], "src", "msg") + + assert set(targets) == {"t1", "t2"} + + +class TestBuildAgentExecutorResponse: + """Test suite for build_agent_executor_response function.""" + + def test_builds_response_with_text(self) -> None: + """Test building response with plain text.""" + response = build_agent_executor_response( + executor_id="my_executor", + response_text="Hello, world!", + structured_response=None, + previous_message="User input", + ) + + assert response.executor_id == "my_executor" + assert response.agent_run_response.text == "Hello, world!" + assert len(response.full_conversation) == 2 # User + Assistant + + def test_builds_response_with_structured_response(self) -> None: + """Test building response with structured JSON response.""" + structured = {"answer": 42, "reason": "because"} + + response = build_agent_executor_response( + executor_id="calc", + response_text="Original text", + structured_response=structured, + previous_message="Calculate", + ) + + # Structured response overrides text + assert response.agent_run_response.text == json.dumps(structured) + + def test_conversation_includes_previous_string_message(self) -> None: + """Test that string previous_message is included in conversation.""" + response = build_agent_executor_response( + executor_id="exec", + response_text="Response", + structured_response=None, + previous_message="User said this", + ) + + assert len(response.full_conversation) == 2 + assert response.full_conversation[0].role.value == "user" + assert response.full_conversation[0].text == "User said this" + assert response.full_conversation[1].role.value == "assistant" + + def test_conversation_extends_previous_agent_executor_response(self) -> None: + """Test that previous AgentExecutorResponse's conversation is extended.""" + # Create a previous response with conversation history + previous = AgentExecutorResponse( + executor_id="prev", + agent_run_response=AgentRunResponse(messages=[ChatMessage(role="assistant", text="Previous")]), + full_conversation=[ + ChatMessage(role="user", text="First"), + ChatMessage(role="assistant", text="Previous"), + ], + ) + + response = build_agent_executor_response( + executor_id="current", + response_text="Current response", + structured_response=None, + previous_message=previous, + ) + + # Should have 3 messages: First + Previous + Current + assert len(response.full_conversation) == 3 + assert response.full_conversation[0].text == "First" + assert response.full_conversation[1].text == "Previous" + assert response.full_conversation[2].text == "Current response" + + +class TestExtractMessageContent: + """Test suite for _extract_message_content function.""" + + def test_extract_from_string(self) -> None: + """Test extracting content from plain string.""" + result = _extract_message_content("Hello, world!") + + assert result == "Hello, world!" + + def test_extract_from_agent_executor_response_with_text(self) -> None: + """Test extracting from AgentExecutorResponse with text.""" + response = AgentExecutorResponse( + executor_id="exec", + agent_run_response=AgentRunResponse(messages=[ChatMessage(role="assistant", text="Response text")]), + ) + + result = _extract_message_content(response) + + assert result == "Response text" + + def test_extract_from_agent_executor_response_with_messages(self) -> None: + """Test extracting from AgentExecutorResponse with messages.""" + response = AgentExecutorResponse( + executor_id="exec", + agent_run_response=AgentRunResponse( + messages=[ + ChatMessage(role="user", text="First"), + ChatMessage(role="assistant", text="Last message"), + ] + ), + ) + + result = _extract_message_content(response) + + # AgentRunResponse.text concatenates all message texts + assert result == "FirstLast message" + + def test_extract_from_agent_executor_request(self) -> None: + """Test extracting from AgentExecutorRequest.""" + request = AgentExecutorRequest( + messages=[ + ChatMessage(role="user", text="First"), + ChatMessage(role="user", text="Last request"), + ] + ) + + result = _extract_message_content(request) + + assert result == "Last request" + + def test_extract_from_dict_agent_executor_request(self) -> None: + """Test extracting from serialized AgentExecutorRequest dict.""" + msg_dict = { + "messages": [ + { + "type": "chat_message", + "contents": [{"type": "text", "text": "Hello from dict"}], + } + ] + } + + result = _extract_message_content(msg_dict) + + assert result == "Hello from dict" + + def test_extract_returns_empty_for_unknown_type(self) -> None: + """Test that unknown types return empty string.""" + result = _extract_message_content(12345) + + assert result == "" + + +class TestExtractMessageContentFromDict: + """Test suite for _extract_message_content_from_dict function.""" + + def test_extract_from_messages_with_contents(self) -> None: + """Test extracting from messages with contents structure.""" + msg_dict = { + "messages": [ + { + "contents": [{"type": "text", "text": "Content text"}] + } + ] + } + + result = _extract_message_content_from_dict(msg_dict) + + assert result == "Content text" + + def test_extract_from_messages_with_direct_text(self) -> None: + """Test extracting from messages with direct text field.""" + msg_dict = { + "messages": [{"text": "Direct text"}] + } + + result = _extract_message_content_from_dict(msg_dict) + + assert result == "Direct text" + + def test_extract_from_agent_run_response(self) -> None: + """Test extracting from agent_run_response dict.""" + msg_dict = { + "agent_run_response": {"text": "Response text"} + } + + result = _extract_message_content_from_dict(msg_dict) + + assert result == "Response text" + + def test_extract_from_agent_run_response_with_messages(self) -> None: + """Test extracting from agent_run_response with messages.""" + msg_dict = { + "agent_run_response": { + "messages": [ + {"contents": [{"type": "text", "text": "Nested content"}]} + ] + } + } + + result = _extract_message_content_from_dict(msg_dict) + + assert result == "Nested content" + + def test_extract_returns_empty_for_empty_dict(self) -> None: + """Test that empty dict returns empty string.""" + result = _extract_message_content_from_dict({}) + + assert result == "" + + def test_extract_returns_empty_for_empty_messages(self) -> None: + """Test that empty messages list returns empty string.""" + result = _extract_message_content_from_dict({"messages": []}) + + assert result == "" + + +class TestEdgeGroupIntegration: + """Integration tests for edge group routing with realistic scenarios.""" + + def test_conditional_routing_by_message_type(self) -> None: + """Test routing based on message content/type.""" + + @dataclass + class SpamResult: + is_spam: bool + reason: str + + def is_spam_condition(msg: Any) -> bool: + if isinstance(msg, SpamResult): + return msg.is_spam + return False + + def is_not_spam_condition(msg: Any) -> bool: + if isinstance(msg, SpamResult): + return not msg.is_spam + return False + + spam_group = SingleEdgeGroup( + source_id="detector", + target_id="spam_handler", + condition=is_spam_condition, + ) + legit_group = SingleEdgeGroup( + source_id="detector", + target_id="email_handler", + condition=is_not_spam_condition, + ) + + # Test spam message + spam_msg = SpamResult(is_spam=True, reason="Suspicious content") + targets = route_message_through_edge_groups([spam_group, legit_group], "detector", spam_msg) + assert targets == ["spam_handler"] + + # Test legitimate message + legit_msg = SpamResult(is_spam=False, reason="Clean") + targets = route_message_through_edge_groups([spam_group, legit_group], "detector", legit_msg) + assert targets == ["email_handler"] + + def test_fan_out_to_multiple_workers(self) -> None: + """Test fan-out to multiple parallel workers.""" + + def select_all_workers(msg: Any, targets: list[str]) -> list[str]: + return targets + + group = FanOutEdgeGroup( + source_id="coordinator", + target_ids=["worker_1", "worker_2", "worker_3"], + selection_func=select_all_workers, + ) + + targets = route_message_through_edge_groups([group], "coordinator", {"task": "process"}) + + assert len(targets) == 3 + assert set(targets) == {"worker_1", "worker_2", "worker_3"} From 8482cf532f4caae490f22ef6b4a2b00e2e6b6c7c Mon Sep 17 00:00:00 2001 From: Ahmed Muhsin Date: Wed, 14 Jan 2026 16:25:41 -0600 Subject: [PATCH 11/18] add missing app setting --- .../azure_functions/09_workflow_shared_state/host.json | 5 +++++ .../09_workflow_shared_state/local.settings.json.sample | 1 + .../azure_functions/10_workflow_no_shared_state/host.json | 5 +++++ .../10_workflow_no_shared_state/local.settings.json.sample | 1 + 4 files changed, 12 insertions(+) diff --git a/python/samples/getting_started/azure_functions/09_workflow_shared_state/host.json b/python/samples/getting_started/azure_functions/09_workflow_shared_state/host.json index b7e5ad1c0b..9e7fd873dd 100644 --- a/python/samples/getting_started/azure_functions/09_workflow_shared_state/host.json +++ b/python/samples/getting_started/azure_functions/09_workflow_shared_state/host.json @@ -3,5 +3,10 @@ "extensionBundle": { "id": "Microsoft.Azure.Functions.ExtensionBundle", "version": "[4.*, 5.0.0)" + }, + "extensions": { + "durableTask": { + "hubName": "%TASKHUB_NAME%" + } } } diff --git a/python/samples/getting_started/azure_functions/09_workflow_shared_state/local.settings.json.sample b/python/samples/getting_started/azure_functions/09_workflow_shared_state/local.settings.json.sample index 3d972041cb..69c08a3386 100644 --- a/python/samples/getting_started/azure_functions/09_workflow_shared_state/local.settings.json.sample +++ b/python/samples/getting_started/azure_functions/09_workflow_shared_state/local.settings.json.sample @@ -3,6 +3,7 @@ "Values": { "AzureWebJobsStorage": "UseDevelopmentStorage=true", "DURABLE_TASK_SCHEDULER_CONNECTION_STRING": "Endpoint=http://localhost:8080;TaskHub=default;Authentication=None", + "TASKHUB_NAME": "default", "FUNCTIONS_WORKER_RUNTIME": "python", "AZURE_OPENAI_ENDPOINT": "", "AZURE_OPENAI_CHAT_DEPLOYMENT_NAME": "" diff --git a/python/samples/getting_started/azure_functions/10_workflow_no_shared_state/host.json b/python/samples/getting_started/azure_functions/10_workflow_no_shared_state/host.json index b7e5ad1c0b..9e7fd873dd 100644 --- a/python/samples/getting_started/azure_functions/10_workflow_no_shared_state/host.json +++ b/python/samples/getting_started/azure_functions/10_workflow_no_shared_state/host.json @@ -3,5 +3,10 @@ "extensionBundle": { "id": "Microsoft.Azure.Functions.ExtensionBundle", "version": "[4.*, 5.0.0)" + }, + "extensions": { + "durableTask": { + "hubName": "%TASKHUB_NAME%" + } } } diff --git a/python/samples/getting_started/azure_functions/10_workflow_no_shared_state/local.settings.json.sample b/python/samples/getting_started/azure_functions/10_workflow_no_shared_state/local.settings.json.sample index 4c43714b01..30edea6c08 100644 --- a/python/samples/getting_started/azure_functions/10_workflow_no_shared_state/local.settings.json.sample +++ b/python/samples/getting_started/azure_functions/10_workflow_no_shared_state/local.settings.json.sample @@ -4,6 +4,7 @@ "FUNCTIONS_WORKER_RUNTIME": "python", "AzureWebJobsStorage": "UseDevelopmentStorage=true", "DURABLE_TASK_SCHEDULER_CONNECTION_STRING": "Endpoint=http://localhost:8080;TaskHub=default;Authentication=None", + "TASKHUB_NAME": "default", "AZURE_OPENAI_ENDPOINT": "https://.openai.azure.com/", "AZURE_OPENAI_CHAT_DEPLOYMENT_NAME": "", "AZURE_OPENAI_API_KEY": "" From 67e0a302ea212cc2c6d4d6be0d266258383ed4e4 Mon Sep 17 00:00:00 2001 From: Ahmed Muhsin Date: Thu, 15 Jan 2026 14:04:26 -0600 Subject: [PATCH 12/18] update host.json to use dts --- .../azure_functions/09_workflow_shared_state/host.json | 6 +++++- .../azure_functions/10_workflow_no_shared_state/host.json | 6 +++++- 2 files changed, 10 insertions(+), 2 deletions(-) diff --git a/python/samples/getting_started/azure_functions/09_workflow_shared_state/host.json b/python/samples/getting_started/azure_functions/09_workflow_shared_state/host.json index 9e7fd873dd..292562af8e 100644 --- a/python/samples/getting_started/azure_functions/09_workflow_shared_state/host.json +++ b/python/samples/getting_started/azure_functions/09_workflow_shared_state/host.json @@ -6,7 +6,11 @@ }, "extensions": { "durableTask": { - "hubName": "%TASKHUB_NAME%" + "hubName": "%TASKHUB_NAME%", + "storageProvider": { + "type": "AzureManaged", + "connectionStringName": "DURABLE_TASK_SCHEDULER_CONNECTION_STRING" + } } } } diff --git a/python/samples/getting_started/azure_functions/10_workflow_no_shared_state/host.json b/python/samples/getting_started/azure_functions/10_workflow_no_shared_state/host.json index 9e7fd873dd..292562af8e 100644 --- a/python/samples/getting_started/azure_functions/10_workflow_no_shared_state/host.json +++ b/python/samples/getting_started/azure_functions/10_workflow_no_shared_state/host.json @@ -6,7 +6,11 @@ }, "extensions": { "durableTask": { - "hubName": "%TASKHUB_NAME%" + "hubName": "%TASKHUB_NAME%", + "storageProvider": { + "type": "AzureManaged", + "connectionStringName": "DURABLE_TASK_SCHEDULER_CONNECTION_STRING" + } } } } From 787203db702780dd51eebed7b36146fb55baa795 Mon Sep 17 00:00:00 2001 From: Ahmed Muhsin Date: Fri, 16 Jan 2026 11:06:11 -0600 Subject: [PATCH 13/18] add concurrency support --- .../agent_framework_azurefunctions/_utils.py | 44 +- .../_workflow.py | 340 ++++++++---- .../11_workflow_parallel/.env.template | 14 + .../11_workflow_parallel/.gitignore | 4 + .../11_workflow_parallel/README.md | 193 +++++++ .../11_workflow_parallel/demo.http | 29 + .../11_workflow_parallel/function_app.py | 525 ++++++++++++++++++ .../11_workflow_parallel/host.json | 16 + .../local.settings.json.sample | 12 + .../11_workflow_parallel/requirements.txt | 3 + 10 files changed, 1057 insertions(+), 123 deletions(-) create mode 100644 python/samples/getting_started/azure_functions/11_workflow_parallel/.env.template create mode 100644 python/samples/getting_started/azure_functions/11_workflow_parallel/.gitignore create mode 100644 python/samples/getting_started/azure_functions/11_workflow_parallel/README.md create mode 100644 python/samples/getting_started/azure_functions/11_workflow_parallel/demo.http create mode 100644 python/samples/getting_started/azure_functions/11_workflow_parallel/function_app.py create mode 100644 python/samples/getting_started/azure_functions/11_workflow_parallel/host.json create mode 100644 python/samples/getting_started/azure_functions/11_workflow_parallel/local.settings.json.sample create mode 100644 python/samples/getting_started/azure_functions/11_workflow_parallel/requirements.txt diff --git a/python/packages/azurefunctions/agent_framework_azurefunctions/_utils.py b/python/packages/azurefunctions/agent_framework_azurefunctions/_utils.py index 4731a7551b..9e16609c27 100644 --- a/python/packages/azurefunctions/agent_framework_azurefunctions/_utils.py +++ b/python/packages/azurefunctions/agent_framework_azurefunctions/_utils.py @@ -11,7 +11,8 @@ import asyncio import logging from dataclasses import asdict, fields, is_dataclass -from typing import Any +import types +from typing import Any, Union, get_args, get_origin from agent_framework import ( AgentExecutorRequest, @@ -330,13 +331,50 @@ def reconstruct_agent_executor_response(data: dict[str, Any]) -> AgentExecutorRe def reconstruct_message_for_handler(data: Any, input_types: list[type[Any]]) -> Any: """Attempt to reconstruct a message to match one of the handler's expected types. + Handles: + - Dicts with __type__ metadata -> reconstructs to original dataclass/Pydantic model + - Lists (from fan-in) -> recursively reconstructs each item + - Union types (T | U) -> tries each type in the union + - AgentExecutorRequest/Response -> special handling for nested ChatMessage objects + Args: - data: The serialized message data (could be dict, str, etc.) + data: The serialized message data (could be dict, str, list, etc.) input_types: List of message types the executor can accept Returns: Reconstructed message if possible, otherwise the original data """ + # Flatten union types in input_types (e.g., T | U becomes [T, U]) + flattened_types: list[type[Any]] = [] + for input_type in input_types: + origin = get_origin(input_type) + # Handle both typing.Union and types.UnionType (Python 3.10+ | syntax) + if origin is Union or isinstance(input_type, types.UnionType): + # This is a Union type (T | U), extract the component types + flattened_types.extend(get_args(input_type)) + else: + flattened_types.append(input_type) + + # Handle lists (fan-in aggregation) - recursively reconstruct each item + if isinstance(data, list): + # Extract element types from list[T] annotations in input_types if possible + element_types: list[type[Any]] = [] + for input_type in input_types: + origin = get_origin(input_type) + if origin is list: + args = get_args(input_type) + if args: + # Handle union types inside list[T | U] + for arg in args: + arg_origin = get_origin(arg) + if arg_origin is Union or isinstance(arg, types.UnionType): + element_types.extend(get_args(arg)) + else: + element_types.append(arg) + + # Recursively reconstruct each item in the list + return [reconstruct_message_for_handler(item, element_types or flattened_types) for item in data] + if not isinstance(data, dict): return data @@ -363,7 +401,7 @@ def reconstruct_message_for_handler(data: Any, input_types: list[type[Any]]) -> # Try to match against input types by checking dict keys vs dataclass fields # Filter out metadata keys when comparing data_keys = {k for k in data.keys() if not k.startswith("__")} - for msg_type in input_types: + for msg_type in flattened_types: if is_dataclass(msg_type): # Check if the dict keys match the dataclass fields field_names = {f.name for f in fields(msg_type)} diff --git a/python/packages/azurefunctions/agent_framework_azurefunctions/_workflow.py b/python/packages/azurefunctions/agent_framework_azurefunctions/_workflow.py index 9f440f7e87..088dd68e0b 100644 --- a/python/packages/azurefunctions/agent_framework_azurefunctions/_workflow.py +++ b/python/packages/azurefunctions/agent_framework_azurefunctions/_workflow.py @@ -154,10 +154,20 @@ def run_workflow_orchestrator( Supports: - SingleEdgeGroup: Direct 1:1 routing with optional condition - SwitchCaseEdgeGroup: First matching condition wins - - FanOutEdgeGroup: Broadcast to multiple targets (with optional selection) + - FanOutEdgeGroup: Broadcast to multiple targets - **executed in parallel** - FanInEdgeGroup: Aggregates messages from multiple sources before delivery - SharedState: Durable shared state accessible to all executors + Execution model: + - Different executors pending in the same iteration run in parallel + - Agent executors (entities): Different agents run in parallel; multiple messages + to the SAME agent are processed sequentially to maintain conversation coherence + - Standard executors (activities): All batched and executed in parallel using task_all() + + Note: When running in parallel with shared state, updates are applied + in order after all tasks complete. This may cause conflicts if multiple + executors modify the same state keys. + Args: context: The Durable Functions orchestration context workflow: The MAF Workflow instance to execute @@ -189,146 +199,236 @@ def run_workflow_orchestrator( logger.debug("Orchestrator iteration %d", iteration) next_pending_messages: dict[str, list[tuple[Any, str]]] = {} + # Separate executors into agents (entities) and standard executors (activities) + # Agents must be processed sequentially due to entity semantics + # Activities can be processed in parallel + agent_executor_tasks: list[tuple[str, Any, str]] = [] # (executor_id, message, source_id) + activity_executor_tasks: list[tuple[str, Any, str]] = [] # (executor_id, message, source_id) + for executor_id, messages_with_sources in pending_messages.items(): - logger.debug("Processing executor: %s with %d messages", executor_id, len(messages_with_sources)) executor = workflow.executors[executor_id] - for message, source_executor_id in messages_with_sources: - output_message: Any | None = None - result: dict[str, Any] | None = None # Activity result (only set for standard executors) - - # Execute if isinstance(executor, AgentExecutor): - # Durable Agent Execution - # Use executor.id which equals agent.name (set during AgentExecutor construction) - agent_name = executor.id - logger.debug("Calling Durable Entity: %s", agent_name) - - # Extract message content + agent_executor_tasks.append((executor_id, message, source_executor_id)) + else: + activity_executor_tasks.append((executor_id, message, source_executor_id)) + + # Results collected from all executor types + # Structure: list of (executor_id, output_message, result_dict_or_none) + all_results: list[tuple[str, Any | None, dict[str, Any] | None]] = [] + + # Process Agent Executors (entities) in parallel when they are different agents + # Messages to the SAME agent are processed sequentially to maintain conversation coherence + if agent_executor_tasks: + # Group tasks by executor_id (agent_name) - same agent needs sequential processing + agent_groups: dict[str, list[tuple[str, Any, str]]] = {} + for executor_id, message, source_executor_id in agent_executor_tasks: + if executor_id not in agent_groups: + agent_groups[executor_id] = [] + agent_groups[executor_id].append((executor_id, message, source_executor_id)) + + # Process groups - if only one message per agent, can run all in parallel + # If multiple messages to same agent, need sequential within that agent + + # First pass: create tasks for the first message of each agent (parallel) + agent_tasks = [] + agent_task_metadata = [] # (executor_id, message, source_executor_id, remaining_messages) + + for executor_id, messages_list in agent_groups.items(): + first_msg = messages_list[0] + remaining = messages_list[1:] + + message = first_msg[1] + source_executor_id = first_msg[2] + + agent_name = executor_id + logger.debug("Preparing agent task for: %s", agent_name) + + message_content = _extract_message_content(message) + session_id = AgentSessionId(name=agent_name, key=context.instance_id) + thread = DurableAgentThread(session_id=session_id) + + az_executor = AzureFunctionsAgentExecutor(context) + agent = DurableAIAgent(az_executor, agent_name) + task = agent.run(message_content, thread=thread) + + agent_tasks.append(task) + agent_task_metadata.append((executor_id, message, source_executor_id, remaining)) + + # Execute first batch of agent tasks in parallel + if agent_tasks: + logger.debug("Executing %d agent tasks in parallel", len(agent_tasks)) + agent_responses = yield context.task_all(agent_tasks) + logger.debug("All %d agent tasks completed", len(agent_tasks)) + + # Process results and handle remaining messages for agents with multiple inputs + remaining_to_process: list[tuple[str, Any, str]] = [] + + for idx, agent_response in enumerate(agent_responses): + executor_id, message, source_executor_id, remaining = agent_task_metadata[idx] + logger.debug("Durable Entity %s returned: %s", executor_id, agent_response) + + # Build AgentExecutorResponse from the typed AgentRunResponse + response_text = agent_response.text if agent_response else None + structured_response = None + if agent_response and agent_response.value is not None: + if hasattr(agent_response.value, "model_dump"): + structured_response = agent_response.value.model_dump() + elif isinstance(agent_response.value, dict): + structured_response = agent_response.value + + output_message = build_agent_executor_response( + executor_id=executor_id, + response_text=response_text, + structured_response=structured_response, + previous_message=message, + ) + + all_results.append((executor_id, output_message, None)) + + # Queue remaining messages for sequential processing + remaining_to_process.extend(remaining) + + # Process remaining messages sequentially (these are additional messages to same agent) + for executor_id, message, source_executor_id in remaining_to_process: + agent_name = executor_id + logger.debug("Processing additional message for agent: %s (sequential)", agent_name) + message_content = _extract_message_content(message) - - # Create unique session for this orchestration instance session_id = AgentSessionId(name=agent_name, key=context.instance_id) - - # Create a durable thread with the session ID using proper class thread = DurableAgentThread(session_id=session_id) - - # Create DurableAIAgent wrapper to call the entity - executor = AzureFunctionsAgentExecutor(context) - agent = DurableAIAgent(executor, agent_name) - agent_response: AgentRunResponse = yield agent.run( - message_content, - thread=thread, - ) + + az_executor = AzureFunctionsAgentExecutor(context) + agent = DurableAIAgent(az_executor, agent_name) + agent_response: AgentRunResponse = yield agent.run(message_content, thread=thread) logger.debug("Durable Entity %s returned: %s", agent_name, agent_response) - - # Build AgentExecutorResponse from the typed AgentRunResponse - # AgentRunResponse has .text property for response text and .value for structured response + response_text = agent_response.text if agent_response else None structured_response = None if agent_response and agent_response.value is not None: - # If value is a Pydantic model, convert to dict if hasattr(agent_response.value, "model_dump"): structured_response = agent_response.value.model_dump() elif isinstance(agent_response.value, dict): structured_response = agent_response.value - + output_message = build_agent_executor_response( executor_id=executor_id, response_text=response_text, structured_response=structured_response, previous_message=message, ) - - else: - # Standard Executor Execution via Activity - logger.debug("Calling Activity for executor: %s", executor_id) - - # Get shared state snapshot before activity execution (if shared_state is available) - # Only needed for activities since they can access SharedState - shared_state_snapshot: dict[str, Any] | None = None - if shared_state: - shared_state_snapshot = yield from shared_state.get_all() - logger.debug("[workflow] SharedState snapshot for activity: %s", shared_state_snapshot) - - activity_input = { - "executor_id": executor_id, - "message": serialize_message(message), - "shared_state_snapshot": shared_state_snapshot, - "source_executor_ids": [source_executor_id], - } - - # Serialize to JSON string to work around Azure Functions type validation issues - activity_input_json = json.dumps(activity_input) - result_json = yield context.call_activity("ExecuteExecutor", activity_input_json) - result = json.loads(result_json) if result_json else None - logger.debug("Activity for executor %s returned", executor_id) - - # Apply any shared state updates from the activity result - if shared_state and result: - if result.get("shared_state_updates"): - updates = result["shared_state_updates"] - logger.debug("[workflow] Applying SharedState updates from activity: %s", updates) - yield from shared_state.update(updates) - if result.get("shared_state_deletes"): - deletes = result["shared_state_deletes"] - logger.debug("[workflow] Applying SharedState deletes from activity: %s", deletes) - for key in deletes: - yield from shared_state.delete(key) - - # Collect outputs - if result and result.get("outputs"): - workflow_outputs.extend(result["outputs"]) - - # Routing - handles both agent output_message and activity sent_messages - messages_to_route: list[tuple[Any, str | None]] = [] # List of (message, explicit_target_or_none) - - if output_message: - messages_to_route.append((output_message, None)) - - # Also route sent_messages from activities - if result and result.get("sent_messages"): - for msg_data in result["sent_messages"]: - sent_msg = msg_data.get("message") - target_id = msg_data.get("target_id") - if sent_msg: - # Deserialize the message to reconstruct typed objects - # This is needed for condition functions that check message types - sent_msg = deserialize_value(sent_msg) - messages_to_route.append((sent_msg, target_id)) - - for msg_to_route, explicit_target in messages_to_route: - logger.debug("Routing output from %s", executor_id) - - # If explicit target specified, route directly - if explicit_target: - if explicit_target not in next_pending_messages: - next_pending_messages[explicit_target] = [] - next_pending_messages[explicit_target].append((msg_to_route, executor_id)) - logger.debug("Routed message from %s to explicit target %s", executor_id, explicit_target) - continue - - # Check for FanInEdgeGroup sources first - for group in workflow.edge_groups: - if isinstance(group, FanInEdgeGroup) and executor_id in group.source_executor_ids: - # Accumulate message for fan-in - if executor_id not in fan_in_pending[group.id]: - fan_in_pending[group.id][executor_id] = [] - fan_in_pending[group.id][executor_id].append((msg_to_route, executor_id)) - logger.debug("Accumulated message for FanIn group %s from %s", group.id, executor_id) - - # Use MAF's edge group routing for other edge types - targets = route_message_through_edge_groups( - workflow.edge_groups, - executor_id, - msg_to_route, - ) - - for target_id in targets: - logger.debug("Routing to %s", target_id) - if target_id not in next_pending_messages: - next_pending_messages[target_id] = [] - next_pending_messages[target_id].append((msg_to_route, executor_id)) + + all_results.append((executor_id, output_message, None)) + + # Process Activity Executors in parallel + if activity_executor_tasks: + logger.debug("Processing %d activity executors in parallel", len(activity_executor_tasks)) + + # Get shared state snapshot once before all activity executions (if shared_state is available) + shared_state_snapshot: dict[str, Any] | None = None + if shared_state: + shared_state_snapshot = yield from shared_state.get_all() + logger.debug("[workflow] SharedState snapshot for activities: %s", shared_state_snapshot) + + # Create all activity tasks without yielding (to enable parallel execution) + activity_tasks = [] + task_metadata = [] # Track which task corresponds to which executor + + for executor_id, message, source_executor_id in activity_executor_tasks: + logger.debug("Preparing activity task for executor: %s", executor_id) + + activity_input = { + "executor_id": executor_id, + "message": serialize_message(message), + "shared_state_snapshot": shared_state_snapshot, + "source_executor_ids": [source_executor_id], + } + + # Create the task (don't yield yet - this enables parallelism) + activity_input_json = json.dumps(activity_input) + task = context.call_activity("ExecuteExecutor", activity_input_json) + activity_tasks.append(task) + task_metadata.append((executor_id, message, source_executor_id)) + + # Execute all activities in parallel using task_all + logger.debug("Executing %d activities in parallel", len(activity_tasks)) + results_json_list = yield context.task_all(activity_tasks) + logger.debug("All %d activities completed", len(activity_tasks)) + + # Process results and apply shared state updates + # Note: When running in parallel, shared state updates may conflict + # We apply them in order, but this is a limitation of parallel execution + for idx, result_json in enumerate(results_json_list): + executor_id, message, source_executor_id = task_metadata[idx] + result = json.loads(result_json) if result_json else None + logger.debug("Activity for executor %s returned", executor_id) + + # Apply any shared state updates from the activity result + if shared_state and result: + if result.get("shared_state_updates"): + updates = result["shared_state_updates"] + logger.debug("[workflow] Applying SharedState updates from activity %s: %s", executor_id, updates) + yield from shared_state.update(updates) + if result.get("shared_state_deletes"): + deletes = result["shared_state_deletes"] + logger.debug("[workflow] Applying SharedState deletes from activity %s: %s", executor_id, deletes) + for key in deletes: + yield from shared_state.delete(key) + + # Collect outputs + if result and result.get("outputs"): + workflow_outputs.extend(result["outputs"]) + + # Add to results for routing + all_results.append((executor_id, None, result)) + + # Routing phase - process all results + for executor_id, output_message, result in all_results: + messages_to_route: list[tuple[Any, str | None]] = [] + + if output_message: + messages_to_route.append((output_message, None)) + + # Also route sent_messages from activities + if result and result.get("sent_messages"): + for msg_data in result["sent_messages"]: + sent_msg = msg_data.get("message") + target_id = msg_data.get("target_id") + if sent_msg: + sent_msg = deserialize_value(sent_msg) + messages_to_route.append((sent_msg, target_id)) + + for msg_to_route, explicit_target in messages_to_route: + logger.debug("Routing output from %s", executor_id) + + # If explicit target specified, route directly + if explicit_target: + if explicit_target not in next_pending_messages: + next_pending_messages[explicit_target] = [] + next_pending_messages[explicit_target].append((msg_to_route, executor_id)) + logger.debug("Routed message from %s to explicit target %s", executor_id, explicit_target) + continue + + # Check for FanInEdgeGroup sources first + for group in workflow.edge_groups: + if isinstance(group, FanInEdgeGroup) and executor_id in group.source_executor_ids: + if executor_id not in fan_in_pending[group.id]: + fan_in_pending[group.id][executor_id] = [] + fan_in_pending[group.id][executor_id].append((msg_to_route, executor_id)) + logger.debug("Accumulated message for FanIn group %s from %s", group.id, executor_id) + + # Use MAF's edge group routing for other edge types + targets = route_message_through_edge_groups( + workflow.edge_groups, + executor_id, + msg_to_route, + ) + + for target_id in targets: + logger.debug("Routing to %s", target_id) + if target_id not in next_pending_messages: + next_pending_messages[target_id] = [] + next_pending_messages[target_id].append((msg_to_route, executor_id)) # Check if any FanInEdgeGroups are ready to deliver for group in workflow.edge_groups: diff --git a/python/samples/getting_started/azure_functions/11_workflow_parallel/.env.template b/python/samples/getting_started/azure_functions/11_workflow_parallel/.env.template new file mode 100644 index 0000000000..1ef634f442 --- /dev/null +++ b/python/samples/getting_started/azure_functions/11_workflow_parallel/.env.template @@ -0,0 +1,14 @@ +# Azure Functions Runtime Configuration +FUNCTIONS_WORKER_RUNTIME=python +AzureWebJobsStorage=UseDevelopmentStorage=true + +# Durable Task Scheduler Configuration +# For local development with DTS emulator: Endpoint=http://localhost:8080;TaskHub=default;Authentication=None +# For Azure: Get connection string from Azure portal +DURABLE_TASK_SCHEDULER_CONNECTION_STRING=Endpoint=http://localhost:8080;TaskHub=default;Authentication=None +TASKHUB_NAME=default + +# Azure OpenAI Configuration +AZURE_OPENAI_ENDPOINT=https://your-resource.openai.azure.com/ +AZURE_OPENAI_CHAT_DEPLOYMENT_NAME=your-deployment-name +AZURE_OPENAI_API_KEY=your-api-key diff --git a/python/samples/getting_started/azure_functions/11_workflow_parallel/.gitignore b/python/samples/getting_started/azure_functions/11_workflow_parallel/.gitignore new file mode 100644 index 0000000000..41f350a67c --- /dev/null +++ b/python/samples/getting_started/azure_functions/11_workflow_parallel/.gitignore @@ -0,0 +1,4 @@ +.venv/ +__pycache__/ +local.settings.json +.env diff --git a/python/samples/getting_started/azure_functions/11_workflow_parallel/README.md b/python/samples/getting_started/azure_functions/11_workflow_parallel/README.md new file mode 100644 index 0000000000..07c48b73e6 --- /dev/null +++ b/python/samples/getting_started/azure_functions/11_workflow_parallel/README.md @@ -0,0 +1,193 @@ +# Parallel Workflow Execution Sample + +This sample demonstrates **parallel execution** of executors and agents in Azure Durable Functions workflows. + +## Overview + +This sample showcases three different parallel execution patterns: + +1. **Two Executors in Parallel** - Fan-out to multiple activities +2. **Two Agents in Parallel** - Fan-out to multiple entities +3. **Mixed Execution** - Agents and executors can run concurrently + +## Workflow Architecture + +``` +┌─────────────────────────────────────────────────────────────────────────┐ +│ PARALLEL WORKFLOW │ +├─────────────────────────────────────────────────────────────────────────┤ +│ │ +│ Pattern 1: Two Executors in Parallel (Activities) │ +│ ───────────────────────────────────────────────── │ +│ │ +│ input_router ──┬──> [word_count_processor] ────┐ │ +│ │ │ │ +│ └──> [format_analyzer_processor]┴──> [aggregator] │ +│ │ +│ Pattern 2: Two Agents in Parallel (Entities) │ +│ ───────────────────────────────────────────── │ +│ │ +│ [prepare_for_agents] ──┬──> [SentimentAgent] ──────┐ │ +│ │ │ │ +│ └──> [KeywordAgent] ────────┴──> [prepare_for_│ +│ mixed] │ +│ │ +│ Pattern 3: Mixed Agent + Executor in Parallel │ +│ ──────────────────────────────────────────────── │ +│ │ +│ [prepare_for_mixed] ──┬──> [SummaryAgent] ─────────┐ │ +│ │ │ │ +│ └──> [statistics_processor] ─┴──> [final_report│ +│ _executor] │ +│ │ +└─────────────────────────────────────────────────────────────────────────┘ +``` + +## How Parallel Execution Works + +### Activities (Executors) +When multiple executors are pending in the same iteration (e.g., after a fan-out edge), they are batched and executed using `task_all()`: + +```python +# In _workflow.py - activities execute in parallel +activity_tasks = [context.call_activity("ExecuteExecutor", input) for ...] +results = yield context.task_all(activity_tasks) # All run concurrently! +``` + +### Agents (Entities) +Different agents can also run in parallel when they're pending in the same iteration: + +```python +# Different agents run in parallel +agent_tasks = [agent_a.run(...), agent_b.run(...)] +responses = yield context.task_all(agent_tasks) # Both agents run concurrently! +``` + +**Note:** Multiple messages to the *same* agent are processed sequentially to maintain conversation coherence. + +## Components + +| Component | Type | Description | +|-----------|------|-------------| +| `input_router` | Executor | Routes input JSON to parallel processors | +| `word_count_processor` | Executor | Counts words and characters | +| `format_analyzer_processor` | Executor | Analyzes document format | +| `aggregator` | Executor | Combines results from parallel processors | +| `prepare_for_agents` | Executor | Prepares content for agent analysis | +| `SentimentAnalysisAgent` | AI Agent | Analyzes text sentiment | +| `KeywordExtractionAgent` | AI Agent | Extracts keywords and categories | +| `prepare_for_mixed` | Executor | Prepares content for mixed parallel execution | +| `SummaryAgent` | AI Agent | Summarizes the document | +| `statistics_processor` | Executor | Computes document statistics | +| `FinalReportExecutor` | Executor | Compiles final report from all analyses | + +## Prerequisites + +1. **Azure OpenAI** - Endpoint and deployment configured +2. **DTS Emulator** - For durable task scheduling (recommended) +3. **Azurite** - For Azure Functions internal storage + +## Setup + +### Option 1: DevUI Mode (Local Development - No Durable Functions) + +The sample can run locally without Azure Functions infrastructure using DevUI: + +1. Copy the environment template: + ```bash + cp .env.template .env + ``` + +2. Configure `.env` with your Azure OpenAI credentials + +3. Install dependencies: + ```bash + pip install -r requirements.txt + ``` + +4. Run in DevUI mode (set `durable=False` in `function_app.py`): + ```bash + python function_app.py + ``` + +5. Open `http://localhost:8095` and provide input: + ```json + { + "document_id": "doc-001", + "content": "Your document text here..." + } + ``` + +### Option 2: Durable Functions Mode (Full Azure Functions) + +1. Copy configuration files: + ```bash + cp .env.template .env + cp local.settings.json.sample local.settings.json + ``` + +2. Configure `local.settings.json` with your Azure OpenAI credentials + +3. Install dependencies: + ```bash + pip install -r requirements.txt + ``` + +4. Start DTS Emulator: + ```bash + docker run -d --name dts-emulator -p 8080:8080 -p 8082:8082 mcr.microsoft.com/dts/dts-emulator:latest + ``` + +5. Start Azurite (or use VS Code extension): + ```bash + azurite --silent + ``` + +6. Run the function app (ensure `durable=True` in `function_app.py`): + ```bash + func start + ``` + +## Testing + +Use the `demo.http` file with REST Client extension or curl: + +### Analyze a Document +```bash +curl -X POST http://localhost:7071/api/workflow/run \ + -H "Content-Type: application/json" \ + -d '{ + "document_id": "doc-001", + "content": "The quarterly earnings report shows strong growth in cloud services. Revenue increased by 25%." + }' +``` + +### Check Status +```bash +curl http://localhost:7071/api/workflow/status/{instanceId} +``` + +## Observing Parallel Execution + +Open the DTS Dashboard at `http://localhost:8082` to observe: + +1. **Activity Execution Timeline** - You'll see `word_count_processor` and `format_analyzer_processor` starting at approximately the same time +2. **Agent Execution Timeline** - `SentimentAnalysisAgent` and `KeywordExtractionAgent` also start concurrently +3. **Sequential vs Parallel** - Compare with non-parallel samples to see the time savings + +## Expected Output + +```json +{ + "output": [ + "=== Document Analysis Report ===\n\n--- SentimentAnalysisAgent ---\n{\"sentiment\": \"positive\", \"confidence\": 0.85, \"explanation\": \"...\"}\n\n--- KeywordExtractionAgent ---\n{\"keywords\": [\"earnings\", \"growth\", \"cloud\"], \"categories\": [\"finance\", \"technology\"]}" + ] +} +``` + +## Key Takeaways + +1. **Parallel execution is automatic** - When multiple executors/agents are pending in the same iteration, they run in parallel +2. **Workflow graph determines parallelism** - Fan-out edges create parallel execution opportunities +3. **Mixed parallelism** - Agents and executors can run concurrently if they're in the same iteration +4. **Same-agent messages are sequential** - To maintain conversation coherence diff --git a/python/samples/getting_started/azure_functions/11_workflow_parallel/demo.http b/python/samples/getting_started/azure_functions/11_workflow_parallel/demo.http new file mode 100644 index 0000000000..a8ae96e452 --- /dev/null +++ b/python/samples/getting_started/azure_functions/11_workflow_parallel/demo.http @@ -0,0 +1,29 @@ +### Analyze a document (triggers parallel workflow) +POST http://localhost:7071/api/workflow/run +Content-Type: application/json + +{ + "document_id": "doc-001", + "content": "The quarterly earnings report shows strong growth in our cloud services division. Revenue increased by 25% compared to last year, driven by enterprise adoption. Customer satisfaction remains high at 92%. However, we face challenges in the mobile segment where competition is intense. Overall, the outlook is positive with expected continued growth in the coming quarters." +} + +### + +### Short document test +POST http://localhost:7071/api/workflow/run +Content-Type: application/json + +{ + "document_id": "doc-002", + "content": "Quick update: Project completed successfully. Team performance exceeded expectations." +} + +### + +### Check workflow status +GET http://localhost:7071/api/workflow/status/{{instanceId}} + +### + +### Health check +GET http://localhost:7071/api/health diff --git a/python/samples/getting_started/azure_functions/11_workflow_parallel/function_app.py b/python/samples/getting_started/azure_functions/11_workflow_parallel/function_app.py new file mode 100644 index 0000000000..e19da877ac --- /dev/null +++ b/python/samples/getting_started/azure_functions/11_workflow_parallel/function_app.py @@ -0,0 +1,525 @@ +# Copyright (c) Microsoft. All rights reserved. +"""Parallel Workflow Execution Sample. + +This sample demonstrates parallel execution of executors and agents in Azure Durable Functions. +It showcases three different parallel execution patterns: + +1. Two executors running concurrently (fan-out to activities) +2. Two agents running concurrently (fan-out to entities) +3. One executor and one agent running concurrently (mixed fan-out) + +The workflow simulates a document processing pipeline where: +- A document is analyzed by multiple processors in parallel +- Results are aggregated and then processed by agents +- A summary agent and statistics executor run in parallel +- Finally, combined into a single output + +Key architectural points: +- FanOut edges enable parallel execution +- Different agents run in parallel when they're in the same iteration +- Activities (executors) also run in parallel when pending together +- Mixed agent/executor fan-outs execute concurrently +""" + +import json +import logging +import os +from dataclasses import dataclass +from typing import Any + +from agent_framework import ( + AgentExecutorResponse, + Executor, + Workflow, + WorkflowBuilder, + WorkflowContext, + executor, + handler, +) +from agent_framework.azure import AzureOpenAIChatClient +from azure.identity import AzureCliCredential +from pydantic import BaseModel +from typing_extensions import Never + +from agent_framework_azurefunctions import AgentFunctionApp + +logger = logging.getLogger(__name__) + +AZURE_OPENAI_ENDPOINT_ENV = "AZURE_OPENAI_ENDPOINT" +AZURE_OPENAI_DEPLOYMENT_ENV = "AZURE_OPENAI_CHAT_DEPLOYMENT_NAME" +AZURE_OPENAI_API_KEY_ENV = "AZURE_OPENAI_API_KEY" + +# Agent names +SENTIMENT_AGENT_NAME = "SentimentAnalysisAgent" +KEYWORD_AGENT_NAME = "KeywordExtractionAgent" +SUMMARY_AGENT_NAME = "SummaryAgent" +RECOMMENDATION_AGENT_NAME = "RecommendationAgent" + + +# ============================================================================ +# Pydantic Models for structured outputs +# ============================================================================ + + +class SentimentResult(BaseModel): + """Result from sentiment analysis.""" + sentiment: str # positive, negative, neutral + confidence: float + explanation: str + + +class KeywordResult(BaseModel): + """Result from keyword extraction.""" + keywords: list[str] + categories: list[str] + + +class SummaryResult(BaseModel): + """Result from summarization.""" + summary: str + key_points: list[str] + + +class RecommendationResult(BaseModel): + """Result from recommendation engine.""" + recommendations: list[str] + priority: str + + +@dataclass +class DocumentInput: + """Input document to be processed.""" + document_id: str + content: str + + +@dataclass +class ProcessorResult: + """Result from a document processor (executor).""" + processor_name: str + document_id: str + content: str + word_count: int + char_count: int + has_numbers: bool + + +@dataclass +class AggregatedResults: + """Aggregated results from parallel processors.""" + document_id: str + content: str + processor_results: list[ProcessorResult] + + +@dataclass +class AgentAnalysis: + """Analysis result from an agent.""" + agent_name: str + result: str + + +@dataclass +class FinalReport: + """Final combined report.""" + document_id: str + analyses: list[AgentAnalysis] + + +# ============================================================================ +# Executor Definitions (Activities - run in parallel when pending together) +# ============================================================================ + + +@executor(id="input_router") +async def input_router( + doc: str, + ctx: WorkflowContext[DocumentInput] +) -> None: + """Route input document to parallel processors. + + Accepts a JSON string from the HTTP request and converts to DocumentInput. + """ + # Parse the JSON string input + data = json.loads(doc) if isinstance(doc, str) else doc + document = DocumentInput( + document_id=data.get("document_id", "unknown"), + content=data.get("content", ""), + ) + logger.info("[input_router] Routing document: %s", document.document_id) + await ctx.send_message(document) + + +@executor(id="word_count_processor") +async def word_count_processor( + doc: DocumentInput, + ctx: WorkflowContext[ProcessorResult] +) -> None: + """Process document and count words - runs as an activity.""" + logger.info("[word_count_processor] Processing document: %s", doc.document_id) + + word_count = len(doc.content.split()) + char_count = len(doc.content) + has_numbers = any(c.isdigit() for c in doc.content) + + result = ProcessorResult( + processor_name="word_count", + document_id=doc.document_id, + content=doc.content, + word_count=word_count, + char_count=char_count, + has_numbers=has_numbers, + ) + + await ctx.send_message(result) + + +@executor(id="format_analyzer_processor") +async def format_analyzer_processor( + doc: DocumentInput, + ctx: WorkflowContext[ProcessorResult] +) -> None: + """Analyze document format - runs as an activity in parallel with word_count.""" + logger.info("[format_analyzer_processor] Processing document: %s", doc.document_id) + + # Simple format analysis + lines = doc.content.split('\n') + word_count = len(lines) # Using line count as "word count" for this processor + char_count = sum(len(line) for line in lines) + has_numbers = doc.content.count('.') > 0 # Check for sentences + + result = ProcessorResult( + processor_name="format_analyzer", + document_id=doc.document_id, + content=doc.content, + word_count=word_count, + char_count=char_count, + has_numbers=has_numbers, + ) + + await ctx.send_message(result) + + +@executor(id="aggregator") +async def aggregator( + results: list[ProcessorResult], + ctx: WorkflowContext[AggregatedResults] +) -> None: + """Aggregate results from parallel processors - receives fan-in input.""" + logger.info("[aggregator] Aggregating %d results", len(results)) + + # Extract document info from the first result (all have the same content) + document_id = results[0].document_id if results else "unknown" + content = results[0].content if results else "" + + aggregated = AggregatedResults( + document_id=document_id, + content=content, + processor_results=results, + ) + + await ctx.send_message(aggregated) + + +@executor(id="prepare_for_agents") +async def prepare_for_agents( + aggregated: AggregatedResults, + ctx: WorkflowContext[str] +) -> None: + """Prepare content for agent analysis - broadcasts to multiple agents.""" + logger.info("[prepare_for_agents] Preparing content for agents") + + # Send the original content to agents for analysis + await ctx.send_message(aggregated.content) + + +@executor(id="prepare_for_mixed") +async def prepare_for_mixed( + analyses: list[AgentExecutorResponse], + ctx: WorkflowContext[str] +) -> None: + """Prepare results for mixed agent+executor parallel processing. + + Combines agent analysis results into a string that can be consumed by + both the SummaryAgent and the statistics_processor in parallel. + """ + logger.info("[prepare_for_mixed] Preparing for mixed parallel pattern") + + sentiment_text = "" + keyword_text = "" + + for analysis in analyses: + executor_id = analysis.executor_id + text = analysis.agent_run_response.text if analysis.agent_run_response else "" + + if executor_id == SENTIMENT_AGENT_NAME: + sentiment_text = text + elif executor_id == KEYWORD_AGENT_NAME: + keyword_text = text + + # Combine into a string that both agent and executor can process + combined = f"Sentiment Analysis: {sentiment_text}\n\nKeyword Extraction: {keyword_text}" + await ctx.send_message(combined) + + +@executor(id="statistics_processor") +async def statistics_processor( + analysis_text: str, + ctx: WorkflowContext[ProcessorResult] +) -> None: + """Calculate statistics from the analysis - runs in parallel with SummaryAgent.""" + logger.info("[statistics_processor] Calculating statistics") + + # Calculate some statistics from the combined analysis + word_count = len(analysis_text.split()) + char_count = len(analysis_text) + has_numbers = any(c.isdigit() for c in analysis_text) + + result = ProcessorResult( + processor_name="statistics", + document_id="analysis", + content=analysis_text, + word_count=word_count, + char_count=char_count, + has_numbers=has_numbers, + ) + await ctx.send_message(result) + + +class FinalReportExecutor(Executor): + """Executor that compiles the final report from agent analyses.""" + + @handler + async def compile_report( + self, + analyses: list[AgentExecutorResponse | ProcessorResult], + ctx: WorkflowContext[Never, str], + ) -> None: + """Compile final report from mixed agent + processor results.""" + logger.info("[final_report] Compiling report from %d analyses", len(analyses)) + + report_parts = ["=== Document Analysis Report ===\n"] + + for analysis in analyses: + if isinstance(analysis, AgentExecutorResponse): + agent_name = analysis.executor_id + text = analysis.agent_run_response.text if analysis.agent_run_response else "No response" + elif isinstance(analysis, ProcessorResult): + agent_name = f"Processor: {analysis.processor_name}" + text = f"Words: {analysis.word_count}, Chars: {analysis.char_count}" + else: + continue + + report_parts.append(f"\n--- {agent_name} ---") + report_parts.append(text) + + final_report = "\n".join(report_parts) + await ctx.yield_output(final_report) + + +class MixedResultCollector(Executor): + """Collector for mixed agent/executor results.""" + + @handler + async def collect_mixed_results( + self, + results: list[Any], + ctx: WorkflowContext[Never, str], + ) -> None: + """Collect and format results from mixed parallel execution.""" + logger.info("[mixed_collector] Collecting %d mixed results", len(results)) + + output_parts = ["=== Mixed Parallel Execution Results ===\n"] + + for result in results: + if isinstance(result, AgentExecutorResponse): + output_parts.append(f"[Agent: {result.executor_id}]") + output_parts.append(result.agent_run_response.text if result.agent_run_response else "No response") + elif isinstance(result, ProcessorResult): + output_parts.append(f"[Processor: {result.processor_name}]") + output_parts.append(f" Words: {result.word_count}, Chars: {result.char_count}") + + await ctx.yield_output("\n".join(output_parts)) + + +# ============================================================================ +# Workflow Construction +# ============================================================================ + + +def _build_client_kwargs() -> dict[str, Any]: + """Build Azure OpenAI client kwargs from environment variables.""" + endpoint = os.getenv(AZURE_OPENAI_ENDPOINT_ENV) + if not endpoint: + raise RuntimeError(f"{AZURE_OPENAI_ENDPOINT_ENV} environment variable is required.") + + deployment = os.getenv(AZURE_OPENAI_DEPLOYMENT_ENV) + if not deployment: + raise RuntimeError(f"{AZURE_OPENAI_DEPLOYMENT_ENV} environment variable is required.") + + client_kwargs: dict[str, Any] = { + "endpoint": endpoint, + "deployment_name": deployment, + } + + api_key = os.getenv(AZURE_OPENAI_API_KEY_ENV) + if api_key: + client_kwargs["api_key"] = api_key + else: + client_kwargs["credential"] = AzureCliCredential() + + return client_kwargs + + +def _create_workflow() -> Workflow: + """Create the parallel workflow definition. + + Workflow structure demonstrating three parallel patterns: + + Pattern 1: Two Executors in Parallel (Fan-out/Fan-in to activities) + ──────────────────────────────────────────────────────────────────── + ┌─> word_count_processor ─────┐ + input_router ──┤ ├──> aggregator + └─> format_analyzer_processor ─┘ + + Pattern 2: Two Agents in Parallel (Fan-out to entities) + ──────────────────────────────────────────────────────── + prepare_for_agents ─┬─> SentimentAgent ──┐ + └─> KeywordAgent ────┤ + └──> prepare_for_mixed + + Pattern 3: Mixed Agent + Executor in Parallel + ────────────────────────────────────────────── + prepare_for_mixed ─┬─> SummaryAgent ────────┐ + └─> statistics_processor ─┤ + └──> final_report + """ + client_kwargs = _build_client_kwargs() + chat_client = AzureOpenAIChatClient(**client_kwargs) + + # Create agents for parallel analysis + sentiment_agent = chat_client.create_agent( + name=SENTIMENT_AGENT_NAME, + instructions=( + "You are a sentiment analysis expert. Analyze the sentiment of the given text. " + "Return JSON with fields: sentiment (positive/negative/neutral), " + "confidence (0.0-1.0), and explanation (brief reasoning)." + ), + response_format=SentimentResult, + ) + + keyword_agent = chat_client.create_agent( + name=KEYWORD_AGENT_NAME, + instructions=( + "You are a keyword extraction expert. Extract important keywords and categories " + "from the given text. Return JSON with fields: keywords (list of strings), " + "and categories (list of topic categories)." + ), + response_format=KeywordResult, + ) + + # Create summary agent for Pattern 3 (mixed parallel) + summary_agent = chat_client.create_agent( + name=SUMMARY_AGENT_NAME, + instructions=( + "You are a summarization expert. Given analysis results (sentiment and keywords), " + "provide a concise summary. Return JSON with fields: summary (brief text), " + "and key_points (list of main takeaways)." + ), + response_format=SummaryResult, + ) + + # Create executor instances + final_report_executor = FinalReportExecutor(id="final_report") + + # Build workflow with parallel patterns + workflow = ( + WorkflowBuilder() + # Start: Route input to parallel processors + .set_start_executor(input_router) + + # Pattern 1: Fan-out to two executors (run in parallel) + .add_fan_out_edges( + source=input_router, + targets=[word_count_processor, format_analyzer_processor], + ) + + # Fan-in: Both processors send results to aggregator + .add_fan_in_edges( + sources=[word_count_processor, format_analyzer_processor], + target=aggregator, + ) + + # Prepare content for agent analysis + .add_edge(aggregator, prepare_for_agents) + + # Pattern 2: Fan-out to two agents (run in parallel) + .add_fan_out_edges( + source=prepare_for_agents, + targets=[sentiment_agent, keyword_agent], + ) + + # Fan-in: Collect agent results into prepare_for_mixed + .add_fan_in_edges( + sources=[sentiment_agent, keyword_agent], + target=prepare_for_mixed, + ) + + # Pattern 3: Fan-out to one agent + one executor (mixed parallel) + .add_fan_out_edges( + source=prepare_for_mixed, + targets=[summary_agent, statistics_processor], + ) + + # Final fan-in: Collect mixed results + .add_fan_in_edges( + sources=[summary_agent, statistics_processor], + target=final_report_executor, + ) + + .build() + ) + + return workflow + + +# ============================================================================ +# Application Entry Point +# ============================================================================ + + +def launch(durable: bool = True) -> AgentFunctionApp | None: + """Launch the function app or DevUI.""" + workflow: Workflow | None = None + + if durable: + workflow = _create_workflow() + app = AgentFunctionApp( + workflow=workflow, + enable_health_check=True, + enable_shared_state=False, + ) + return app + else: + from pathlib import Path + from agent_framework.devui import serve + from dotenv import load_dotenv + + env_path = Path(__file__).parent / ".env" + load_dotenv(dotenv_path=env_path) + + logger.info("Starting Parallel Workflow Sample") + logger.info("Available at: http://localhost:8095") + logger.info("\nThis workflow demonstrates:") + logger.info("- Pattern 1: Two executors running in parallel") + logger.info("- Pattern 2: Two agents running in parallel") + logger.info("- Pattern 3: Mixed agent + executor running in parallel") + logger.info("- Fan-in aggregation of parallel results") + + workflow = _create_workflow() + serve(entities=[workflow], port=8095, auto_open=True) + + return None + + +app = launch(durable=False) diff --git a/python/samples/getting_started/azure_functions/11_workflow_parallel/host.json b/python/samples/getting_started/azure_functions/11_workflow_parallel/host.json new file mode 100644 index 0000000000..292562af8e --- /dev/null +++ b/python/samples/getting_started/azure_functions/11_workflow_parallel/host.json @@ -0,0 +1,16 @@ +{ + "version": "2.0", + "extensionBundle": { + "id": "Microsoft.Azure.Functions.ExtensionBundle", + "version": "[4.*, 5.0.0)" + }, + "extensions": { + "durableTask": { + "hubName": "%TASKHUB_NAME%", + "storageProvider": { + "type": "AzureManaged", + "connectionStringName": "DURABLE_TASK_SCHEDULER_CONNECTION_STRING" + } + } + } +} diff --git a/python/samples/getting_started/azure_functions/11_workflow_parallel/local.settings.json.sample b/python/samples/getting_started/azure_functions/11_workflow_parallel/local.settings.json.sample new file mode 100644 index 0000000000..30edea6c08 --- /dev/null +++ b/python/samples/getting_started/azure_functions/11_workflow_parallel/local.settings.json.sample @@ -0,0 +1,12 @@ +{ + "IsEncrypted": false, + "Values": { + "FUNCTIONS_WORKER_RUNTIME": "python", + "AzureWebJobsStorage": "UseDevelopmentStorage=true", + "DURABLE_TASK_SCHEDULER_CONNECTION_STRING": "Endpoint=http://localhost:8080;TaskHub=default;Authentication=None", + "TASKHUB_NAME": "default", + "AZURE_OPENAI_ENDPOINT": "https://.openai.azure.com/", + "AZURE_OPENAI_CHAT_DEPLOYMENT_NAME": "", + "AZURE_OPENAI_API_KEY": "" + } +} diff --git a/python/samples/getting_started/azure_functions/11_workflow_parallel/requirements.txt b/python/samples/getting_started/azure_functions/11_workflow_parallel/requirements.txt new file mode 100644 index 0000000000..792ae4864e --- /dev/null +++ b/python/samples/getting_started/azure_functions/11_workflow_parallel/requirements.txt @@ -0,0 +1,3 @@ +agent-framework-azurefunctions +agent-framework +azure-identity From 29f1d0de3f28c687846ea28144a0e56a06484fd9 Mon Sep 17 00:00:00 2001 From: Ahmed Muhsin Date: Tue, 20 Jan 2026 11:24:00 -0600 Subject: [PATCH 14/18] fix issues brought up by pre-commit checks --- .../agent_framework_azurefunctions/_app.py | 8 +-- .../_shared_state.py | 14 ++-- .../agent_framework_azurefunctions/_utils.py | 32 ++++----- .../_workflow.py | 59 +++++++++-------- .../packages/azurefunctions/tests/test_app.py | 66 ++++++++++++------- .../function_app.py | 4 +- 6 files changed, 98 insertions(+), 85 deletions(-) diff --git a/python/packages/azurefunctions/agent_framework_azurefunctions/_app.py b/python/packages/azurefunctions/agent_framework_azurefunctions/_app.py index b13907685b..07021a9507 100644 --- a/python/packages/azurefunctions/agent_framework_azurefunctions/_app.py +++ b/python/packages/azurefunctions/agent_framework_azurefunctions/_app.py @@ -362,10 +362,7 @@ def workflow_orchestrator(context: df.DurableOrchestrationContext): # type: ign input_data = context.get_input() # Ensure input is a string for the agent - if isinstance(input_data, (dict, list)): - initial_message = json.dumps(input_data) - else: - initial_message = str(input_data) + initial_message = json.dumps(input_data) if isinstance(input_data, (dict, list)) else str(input_data) # Only create DurableSharedState if enabled to avoid extra entity calls shared_state = None @@ -373,7 +370,8 @@ def workflow_orchestrator(context: df.DurableOrchestrationContext): # type: ign shared_state = DurableSharedState(context, context.instance_id) outputs = yield from run_workflow_orchestrator(context, self.workflow, initial_message, shared_state) - return outputs + # Durable Functions runtime extracts return value from StopIteration + return outputs # noqa: B901 @self.route(route="workflow/run", methods=["POST"]) @self.durable_client_input(client_name="client") diff --git a/python/packages/azurefunctions/agent_framework_azurefunctions/_shared_state.py b/python/packages/azurefunctions/agent_framework_azurefunctions/_shared_state.py index 9177ac5a7d..f51bed842a 100644 --- a/python/packages/azurefunctions/agent_framework_azurefunctions/_shared_state.py +++ b/python/packages/azurefunctions/agent_framework_azurefunctions/_shared_state.py @@ -1,6 +1,6 @@ # Copyright (c) Microsoft. All rights reserved. -"""Durable Shared State for Workflow Execution +"""Durable Shared State for Workflow Execution. This module provides a durable SharedState implementation that allows executors in a workflow to share state across the execution lifecycle. Unlike MAF's in-memory @@ -111,7 +111,8 @@ def get(self, key: str, default: Any = None) -> Generator[Any, Any, Any]: Generator that yields the value or default """ result = yield self._context.call_entity(self._entity_id, "get", {"key": key, "default": default}) - return result + # Durable Functions runtime extracts return value from StopIteration + return result # noqa: B901 def set(self, key: str, value: Any) -> Generator[Any, Any, None]: """Set a value in the shared state. @@ -132,7 +133,8 @@ def has(self, key: str) -> Generator[Any, Any, bool]: Generator that yields True if key exists, False otherwise """ result = yield self._context.call_entity(self._entity_id, "has", {"key": key}) - return result + # Durable Functions runtime extracts return value from StopIteration + return result # noqa: B901 def delete(self, key: str) -> Generator[Any, Any, bool]: """Delete a key from the shared state. @@ -144,7 +146,8 @@ def delete(self, key: str) -> Generator[Any, Any, bool]: Generator that yields True if key was deleted, False if it didn't exist """ result = yield self._context.call_entity(self._entity_id, "delete", {"key": key}) - return result + # Durable Functions runtime extracts return value from StopIteration + return result # noqa: B901 def get_all(self) -> Generator[Any, Any, dict[str, Any]]: """Get all shared state as a dictionary. @@ -153,7 +156,8 @@ def get_all(self) -> Generator[Any, Any, dict[str, Any]]: Generator that yields the complete state dictionary """ result = yield self._context.call_entity(self._entity_id, "get_all", None) - return result if result else {} + # Durable Functions runtime extracts return value from StopIteration + return result if result else {} # noqa: B901 def update(self, updates: dict[str, Any]) -> Generator[Any, Any, None]: """Update multiple keys at once. diff --git a/python/packages/azurefunctions/agent_framework_azurefunctions/_utils.py b/python/packages/azurefunctions/agent_framework_azurefunctions/_utils.py index 9e16609c27..5ef9fd7ea6 100644 --- a/python/packages/azurefunctions/agent_framework_azurefunctions/_utils.py +++ b/python/packages/azurefunctions/agent_framework_azurefunctions/_utils.py @@ -10,8 +10,8 @@ import asyncio import logging -from dataclasses import asdict, fields, is_dataclass import types +from dataclasses import asdict, fields, is_dataclass from typing import Any, Union, get_args, get_origin from agent_framework import ( @@ -252,13 +252,13 @@ def deserialize_value(data: Any, type_registry: dict[str, type] | None = None) - try: return reconstruct_agent_executor_request(data) except Exception: - pass + logger.debug("Could not reconstruct as AgentExecutorRequest, trying next strategy") if type_name == "AgentExecutorResponse" or ("executor_id" in data and "agent_run_response" in data): try: return reconstruct_agent_executor_response(data) except Exception: - pass + logger.debug("Could not reconstruct as AgentExecutorResponse, trying next strategy") if not type_name: return data @@ -278,9 +278,7 @@ def deserialize_value(data: Any, type_registry: dict[str, type] | None = None) - module = importlib.import_module(module_name) target_type = getattr(module, type_name, None) except Exception: - # Ignore import errors - type may not be available in this context - # Will fall back to returning the raw dict below - pass + logger.debug("Could not import module %s for type %s", module_name, type_name) if target_type: # Remove metadata before reconstruction @@ -289,9 +287,7 @@ def deserialize_value(data: Any, type_registry: dict[str, type] | None = None) - if is_dataclass(target_type) or issubclass(target_type, BaseModel): return target_type(**clean_data) except Exception: - # Ignore reconstruction errors (e.g., missing fields, type mismatches) - # Will fall back to returning the raw dict below - pass + logger.debug("Could not reconstruct type %s from data", type_name) return data @@ -309,13 +305,7 @@ def reconstruct_agent_executor_response(data: dict[str, Any]) -> AgentExecutorRe """Helper to reconstruct AgentExecutorResponse from dict.""" # Reconstruct AgentRunResponse arr_data = data.get("agent_run_response", {}) - - agent_run_response = None - if isinstance(arr_data, dict): - # Use from_dict for proper reconstruction - agent_run_response = AgentRunResponse.from_dict(arr_data) - else: - agent_run_response = arr_data + agent_run_response = AgentRunResponse.from_dict(arr_data) if isinstance(arr_data, dict) else arr_data # Reconstruct full_conversation fc_data = data.get("full_conversation", []) @@ -371,7 +361,7 @@ def reconstruct_message_for_handler(data: Any, input_types: list[type[Any]]) -> element_types.extend(get_args(arg)) else: element_types.append(arg) - + # Recursively reconstruct each item in the list return [reconstruct_message_for_handler(item, element_types or flattened_types) for item in data] @@ -383,14 +373,14 @@ def reconstruct_message_for_handler(data: Any, input_types: list[type[Any]]) -> try: return reconstruct_agent_executor_response(data) except Exception: - pass + logger.debug("Could not reconstruct as AgentExecutorResponse in handler context") # Try AgentExecutorRequest - also needs special handling for nested ChatMessage objects if "messages" in data and "should_respond" in data: try: return reconstruct_agent_executor_request(data) except Exception: - pass + logger.debug("Could not reconstruct as AgentExecutorRequest in handler context") # Try deserialize_value which uses embedded type metadata (__type__, __module__) if "__type__" in data: @@ -400,7 +390,7 @@ def reconstruct_message_for_handler(data: Any, input_types: list[type[Any]]) -> # Try to match against input types by checking dict keys vs dataclass fields # Filter out metadata keys when comparing - data_keys = {k for k in data.keys() if not k.startswith("__")} + data_keys = {k for k in data if not k.startswith("__")} for msg_type in flattened_types: if is_dataclass(msg_type): # Check if the dict keys match the dataclass fields @@ -411,6 +401,6 @@ def reconstruct_message_for_handler(data: Any, input_types: list[type[Any]]) -> clean_data = {k: v for k, v in data.items() if not k.startswith("__")} return msg_type(**clean_data) except Exception: - pass + logger.debug("Could not construct %s from matching fields", msg_type.__name__) return data diff --git a/python/packages/azurefunctions/agent_framework_azurefunctions/_workflow.py b/python/packages/azurefunctions/agent_framework_azurefunctions/_workflow.py index 088dd68e0b..8efed6c922 100644 --- a/python/packages/azurefunctions/agent_framework_azurefunctions/_workflow.py +++ b/python/packages/azurefunctions/agent_framework_azurefunctions/_workflow.py @@ -1,6 +1,6 @@ # Copyright (c) Microsoft. All rights reserved. -"""Workflow Execution for Durable Functions +"""Workflow Execution for Durable Functions. This module provides the workflow orchestration engine that executes MAF Workflows using Azure Durable Functions. It reuses MAF's edge group routing logic while @@ -186,8 +186,8 @@ def run_workflow_orchestrator( max_iterations = workflow.max_iterations workflow_outputs: list[Any] = [] - # Track pending sources for FanInEdgeGroups - # Structure: {group_id: {source_id: [(message, source_executor_id)]}} + # Track pending sources for FanInEdgeGroups. + # Maps group_id to a dict of source_id to list of (message, source_executor_id) tuples. fan_in_pending: dict[str, dict[str, list[tuple[Any, str]]]] = {} # Initialize fan-in tracking for all FanInEdgeGroups @@ -229,45 +229,45 @@ def run_workflow_orchestrator( # Process groups - if only one message per agent, can run all in parallel # If multiple messages to same agent, need sequential within that agent - + # First pass: create tasks for the first message of each agent (parallel) agent_tasks = [] agent_task_metadata = [] # (executor_id, message, source_executor_id, remaining_messages) - + for executor_id, messages_list in agent_groups.items(): first_msg = messages_list[0] remaining = messages_list[1:] - + message = first_msg[1] source_executor_id = first_msg[2] - + agent_name = executor_id logger.debug("Preparing agent task for: %s", agent_name) - + message_content = _extract_message_content(message) session_id = AgentSessionId(name=agent_name, key=context.instance_id) thread = DurableAgentThread(session_id=session_id) - + az_executor = AzureFunctionsAgentExecutor(context) agent = DurableAIAgent(az_executor, agent_name) task = agent.run(message_content, thread=thread) - + agent_tasks.append(task) agent_task_metadata.append((executor_id, message, source_executor_id, remaining)) - + # Execute first batch of agent tasks in parallel if agent_tasks: logger.debug("Executing %d agent tasks in parallel", len(agent_tasks)) agent_responses = yield context.task_all(agent_tasks) logger.debug("All %d agent tasks completed", len(agent_tasks)) - + # Process results and handle remaining messages for agents with multiple inputs remaining_to_process: list[tuple[str, Any, str]] = [] - + for idx, agent_response in enumerate(agent_responses): executor_id, message, source_executor_id, remaining = agent_task_metadata[idx] logger.debug("Durable Entity %s returned: %s", executor_id, agent_response) - + # Build AgentExecutorResponse from the typed AgentRunResponse response_text = agent_response.text if agent_response else None structured_response = None @@ -276,33 +276,33 @@ def run_workflow_orchestrator( structured_response = agent_response.value.model_dump() elif isinstance(agent_response.value, dict): structured_response = agent_response.value - + output_message = build_agent_executor_response( executor_id=executor_id, response_text=response_text, structured_response=structured_response, previous_message=message, ) - + all_results.append((executor_id, output_message, None)) - + # Queue remaining messages for sequential processing remaining_to_process.extend(remaining) - + # Process remaining messages sequentially (these are additional messages to same agent) - for executor_id, message, source_executor_id in remaining_to_process: + for executor_id, message, _source_executor_id in remaining_to_process: agent_name = executor_id logger.debug("Processing additional message for agent: %s (sequential)", agent_name) - + message_content = _extract_message_content(message) session_id = AgentSessionId(name=agent_name, key=context.instance_id) thread = DurableAgentThread(session_id=session_id) - + az_executor = AzureFunctionsAgentExecutor(context) agent = DurableAIAgent(az_executor, agent_name) agent_response: AgentRunResponse = yield agent.run(message_content, thread=thread) logger.debug("Durable Entity %s returned: %s", agent_name, agent_response) - + response_text = agent_response.text if agent_response else None structured_response = None if agent_response and agent_response.value is not None: @@ -310,14 +310,14 @@ def run_workflow_orchestrator( structured_response = agent_response.value.model_dump() elif isinstance(agent_response.value, dict): structured_response = agent_response.value - + output_message = build_agent_executor_response( executor_id=executor_id, response_text=response_text, structured_response=structured_response, previous_message=message, ) - + all_results.append((executor_id, output_message, None)) # Process Activity Executors in parallel @@ -367,11 +367,15 @@ def run_workflow_orchestrator( if shared_state and result: if result.get("shared_state_updates"): updates = result["shared_state_updates"] - logger.debug("[workflow] Applying SharedState updates from activity %s: %s", executor_id, updates) + logger.debug( + "[workflow] Applying SharedState updates from activity %s: %s", executor_id, updates + ) yield from shared_state.update(updates) if result.get("shared_state_deletes"): deletes = result["shared_state_deletes"] - logger.debug("[workflow] Applying SharedState deletes from activity %s: %s", executor_id, deletes) + logger.debug( + "[workflow] Applying SharedState deletes from activity %s: %s", executor_id, deletes + ) for key in deletes: yield from shared_state.delete(key) @@ -462,7 +466,8 @@ def run_workflow_orchestrator( pending_messages = next_pending_messages iteration += 1 - return workflow_outputs + # Durable Functions runtime extracts return value from StopIteration + return workflow_outputs # noqa: B901 def _extract_message_content(message: Any) -> str: diff --git a/python/packages/azurefunctions/tests/test_app.py b/python/packages/azurefunctions/tests/test_app.py index 51360636a4..ac461cc233 100644 --- a/python/packages/azurefunctions/tests/test_app.py +++ b/python/packages/azurefunctions/tests/test_app.py @@ -1116,9 +1116,11 @@ def test_init_with_workflow_stores_workflow(self) -> None: mock_workflow = Mock() mock_workflow.executors = {} - with patch.object(AgentFunctionApp, "_setup_executor_activity"): - with patch.object(AgentFunctionApp, "_setup_workflow_orchestration"): - app = AgentFunctionApp(workflow=mock_workflow) + with ( + patch.object(AgentFunctionApp, "_setup_executor_activity"), + patch.object(AgentFunctionApp, "_setup_workflow_orchestration"), + ): + app = AgentFunctionApp(workflow=mock_workflow) assert app.workflow is mock_workflow @@ -1135,10 +1137,12 @@ def test_init_with_workflow_extracts_agents(self) -> None: mock_workflow = Mock() mock_workflow.executors = {"WorkflowAgent": mock_executor} - with patch.object(AgentFunctionApp, "_setup_executor_activity"): - with patch.object(AgentFunctionApp, "_setup_workflow_orchestration"): - with patch.object(AgentFunctionApp, "_setup_agent_functions"): - app = AgentFunctionApp(workflow=mock_workflow) + with ( + patch.object(AgentFunctionApp, "_setup_executor_activity"), + patch.object(AgentFunctionApp, "_setup_workflow_orchestration"), + patch.object(AgentFunctionApp, "_setup_agent_functions"), + ): + app = AgentFunctionApp(workflow=mock_workflow) assert "WorkflowAgent" in app.agents @@ -1147,9 +1151,11 @@ def test_init_with_workflow_calls_setup_methods(self) -> None: mock_workflow = Mock() mock_workflow.executors = {} - with patch.object(AgentFunctionApp, "_setup_executor_activity") as setup_exec: - with patch.object(AgentFunctionApp, "_setup_workflow_orchestration") as setup_orch: - AgentFunctionApp(workflow=mock_workflow) + with ( + patch.object(AgentFunctionApp, "_setup_executor_activity") as setup_exec, + patch.object(AgentFunctionApp, "_setup_workflow_orchestration") as setup_orch, + ): + AgentFunctionApp(workflow=mock_workflow) setup_exec.assert_called_once() setup_orch.assert_called_once() @@ -1159,9 +1165,11 @@ def test_init_shared_state_enabled_by_default(self) -> None: mock_workflow = Mock() mock_workflow.executors = {} - with patch.object(AgentFunctionApp, "_setup_executor_activity"): - with patch.object(AgentFunctionApp, "_setup_workflow_orchestration"): - app = AgentFunctionApp(workflow=mock_workflow) + with ( + patch.object(AgentFunctionApp, "_setup_executor_activity"), + patch.object(AgentFunctionApp, "_setup_workflow_orchestration"), + ): + app = AgentFunctionApp(workflow=mock_workflow) assert app.enable_shared_state is True @@ -1170,9 +1178,11 @@ def test_init_shared_state_can_be_disabled(self) -> None: mock_workflow = Mock() mock_workflow.executors = {} - with patch.object(AgentFunctionApp, "_setup_executor_activity"): - with patch.object(AgentFunctionApp, "_setup_workflow_orchestration"): - app = AgentFunctionApp(workflow=mock_workflow, enable_shared_state=False) + with ( + patch.object(AgentFunctionApp, "_setup_executor_activity"), + patch.object(AgentFunctionApp, "_setup_workflow_orchestration"), + ): + app = AgentFunctionApp(workflow=mock_workflow, enable_shared_state=False) assert app.enable_shared_state is False @@ -1181,9 +1191,11 @@ def test_init_without_workflow_does_not_call_workflow_setup(self) -> None: mock_agent = Mock() mock_agent.name = "TestAgent" - with patch.object(AgentFunctionApp, "_setup_executor_activity") as setup_exec: - with patch.object(AgentFunctionApp, "_setup_workflow_orchestration") as setup_orch: - AgentFunctionApp(agents=[mock_agent]) + with ( + patch.object(AgentFunctionApp, "_setup_executor_activity") as setup_exec, + patch.object(AgentFunctionApp, "_setup_workflow_orchestration") as setup_orch, + ): + AgentFunctionApp(agents=[mock_agent]) setup_exec.assert_not_called() setup_orch.assert_not_called() @@ -1193,9 +1205,11 @@ def test_build_status_url(self) -> None: mock_workflow = Mock() mock_workflow.executors = {} - with patch.object(AgentFunctionApp, "_setup_executor_activity"): - with patch.object(AgentFunctionApp, "_setup_workflow_orchestration"): - app = AgentFunctionApp(workflow=mock_workflow) + with ( + patch.object(AgentFunctionApp, "_setup_executor_activity"), + patch.object(AgentFunctionApp, "_setup_workflow_orchestration"), + ): + app = AgentFunctionApp(workflow=mock_workflow) url = app._build_status_url("http://localhost:7071/api/workflow/run", "instance-123") @@ -1206,9 +1220,11 @@ def test_build_status_url_handles_trailing_slash(self) -> None: mock_workflow = Mock() mock_workflow.executors = {} - with patch.object(AgentFunctionApp, "_setup_executor_activity"): - with patch.object(AgentFunctionApp, "_setup_workflow_orchestration"): - app = AgentFunctionApp(workflow=mock_workflow) + with ( + patch.object(AgentFunctionApp, "_setup_executor_activity"), + patch.object(AgentFunctionApp, "_setup_workflow_orchestration"), + ): + app = AgentFunctionApp(workflow=mock_workflow) url = app._build_status_url("http://localhost:7071/", "instance-456") diff --git a/python/samples/getting_started/azure_functions/10_workflow_no_shared_state/function_app.py b/python/samples/getting_started/azure_functions/10_workflow_no_shared_state/function_app.py index 83a583a351..4cb156f7b4 100644 --- a/python/samples/getting_started/azure_functions/10_workflow_no_shared_state/function_app.py +++ b/python/samples/getting_started/azure_functions/10_workflow_no_shared_state/function_app.py @@ -86,7 +86,7 @@ class EmailPayload(BaseModel): email_content: str -def _build_client_kwargs() -> Dict[str, Any]: +def _build_client_kwargs() -> dict[str, Any]: endpoint = os.getenv(AZURE_OPENAI_ENDPOINT_ENV) if not endpoint: raise RuntimeError(f"{AZURE_OPENAI_ENDPOINT_ENV} environment variable is required.") @@ -95,7 +95,7 @@ def _build_client_kwargs() -> Dict[str, Any]: if not deployment: raise RuntimeError(f"{AZURE_OPENAI_DEPLOYMENT_ENV} environment variable is required.") - client_kwargs: Dict[str, Any] = { + client_kwargs: dict[str, Any] = { "endpoint": endpoint, "deployment_name": deployment, } From 105c15b85d3792eb12d620d34bfc43774b56ecce Mon Sep 17 00:00:00 2001 From: Ahmed Muhsin Date: Tue, 20 Jan 2026 16:47:53 -0600 Subject: [PATCH 15/18] use local orchestration variable for share state instead of a durable entity --- .../__init__.py | 2 - .../agent_framework_azurefunctions/_app.py | 25 +- .../_shared_state.py | 243 -------------- .../_workflow.py | 20 +- .../packages/azurefunctions/tests/test_app.py | 26 -- .../azurefunctions/tests/test_shared_state.py | 296 ------------------ .../09_workflow_shared_state/README.md | 4 +- .../10_workflow_no_shared_state/README.md | 6 +- .../function_app.py | 2 +- .../11_workflow_parallel/function_app.py | 5 +- 10 files changed, 19 insertions(+), 610 deletions(-) delete mode 100644 python/packages/azurefunctions/agent_framework_azurefunctions/_shared_state.py delete mode 100644 python/packages/azurefunctions/tests/test_shared_state.py diff --git a/python/packages/azurefunctions/agent_framework_azurefunctions/__init__.py b/python/packages/azurefunctions/agent_framework_azurefunctions/__init__.py index 5f1894bec1..e5be2aa36e 100644 --- a/python/packages/azurefunctions/agent_framework_azurefunctions/__init__.py +++ b/python/packages/azurefunctions/agent_framework_azurefunctions/__init__.py @@ -5,7 +5,6 @@ from agent_framework_durabletask import AgentCallbackContext, AgentResponseCallbackProtocol, DurableAIAgent from ._app import AgentFunctionApp -from ._shared_state import DurableSharedState try: __version__ = importlib.metadata.version(__name__) @@ -17,6 +16,5 @@ "AgentFunctionApp", "AgentResponseCallbackProtocol", "DurableAIAgent", - "DurableSharedState", "__version__", ] diff --git a/python/packages/azurefunctions/agent_framework_azurefunctions/_app.py b/python/packages/azurefunctions/agent_framework_azurefunctions/_app.py index 07021a9507..828ece1617 100644 --- a/python/packages/azurefunctions/agent_framework_azurefunctions/_app.py +++ b/python/packages/azurefunctions/agent_framework_azurefunctions/_app.py @@ -40,7 +40,6 @@ from ._entities import create_agent_entity from ._errors import IncomingRequestError from ._orchestration import AgentOrchestrationContextType, AgentTask, AzureFunctionsAgentExecutor -from ._shared_state import SHARED_STATE_ENTITY_NAME, DurableSharedState, create_shared_state_entity_function from ._utils import CapturingRunnerContext, deserialize_value, reconstruct_message_for_handler, serialize_message from ._workflow import run_workflow_orchestrator @@ -156,7 +155,6 @@ def my_orchestration(context): max_poll_retries: Maximum polling attempts when waiting for responses poll_interval_seconds: Delay (seconds) between polling attempts workflow: Optional Workflow instance for workflow orchestration - enable_shared_state: Whether SharedState entity is enabled for workflows """ _agent_metadata: dict[str, AgentMetadata] @@ -164,7 +162,6 @@ def my_orchestration(context): enable_http_endpoints: bool enable_mcp_tool_trigger: bool workflow: Workflow | None - enable_shared_state: bool def __init__( self, @@ -177,7 +174,6 @@ def __init__( poll_interval_seconds: float = DEFAULT_POLL_INTERVAL_SECONDS, enable_mcp_tool_trigger: bool = False, default_callback: AgentResponseCallbackProtocol | None = None, - enable_shared_state: bool = True, ): """Initialize the AgentFunctionApp. @@ -193,7 +189,6 @@ def __init__( :param poll_interval_seconds: Delay in seconds between polling attempts. Defaults to ``DEFAULT_POLL_INTERVAL_SECONDS``. :param default_callback: Optional callback invoked for agents without specific callbacks. - :param enable_shared_state: Enable SharedState entity for workflow executors (default: ``True``). :note: If no agents are provided, they can be added later using :meth:`add_agent`. """ @@ -209,7 +204,6 @@ def __init__( self.enable_mcp_tool_trigger = enable_mcp_tool_trigger self.default_callback = default_callback self.workflow = workflow - self.enable_shared_state = enable_shared_state try: retries = int(max_poll_retries) @@ -340,21 +334,8 @@ async def run() -> dict[str, Any]: result = asyncio.run(run()) return json_module.dumps(result) - def _setup_shared_state_entity(self) -> None: - """Register the SharedState durable entity for workflow state sharing.""" - entity_function = create_shared_state_entity_function() - entity_function.__name__ = SHARED_STATE_ENTITY_NAME - self.entity_trigger(context_name="context", entity_name=SHARED_STATE_ENTITY_NAME)(entity_function) - logger.debug(f"[AgentFunctionApp] Registered SharedState entity: {SHARED_STATE_ENTITY_NAME}") - def _setup_workflow_orchestration(self) -> None: """Register the workflow orchestration and related HTTP endpoints.""" - # Only register the SharedState entity if enabled - if self.enable_shared_state: - self._setup_shared_state_entity() - - # Capture enable_shared_state for use in nested function - enable_shared_state = self.enable_shared_state @self.orchestration_trigger(context_name="context") def workflow_orchestrator(context: df.DurableOrchestrationContext): # type: ignore[type-arg] @@ -364,10 +345,8 @@ def workflow_orchestrator(context: df.DurableOrchestrationContext): # type: ign # Ensure input is a string for the agent initial_message = json.dumps(input_data) if isinstance(input_data, (dict, list)) else str(input_data) - # Only create DurableSharedState if enabled to avoid extra entity calls - shared_state = None - if enable_shared_state: - shared_state = DurableSharedState(context, context.instance_id) + # Create local shared state dict for cross-executor state sharing + shared_state: dict[str, Any] = {} outputs = yield from run_workflow_orchestrator(context, self.workflow, initial_message, shared_state) # Durable Functions runtime extracts return value from StopIteration diff --git a/python/packages/azurefunctions/agent_framework_azurefunctions/_shared_state.py b/python/packages/azurefunctions/agent_framework_azurefunctions/_shared_state.py deleted file mode 100644 index f51bed842a..0000000000 --- a/python/packages/azurefunctions/agent_framework_azurefunctions/_shared_state.py +++ /dev/null @@ -1,243 +0,0 @@ -# Copyright (c) Microsoft. All rights reserved. - -"""Durable Shared State for Workflow Execution. - -This module provides a durable SharedState implementation that allows executors -in a workflow to share state across the execution lifecycle. Unlike MAF's in-memory -SharedState which uses async locks, this implementation is backed by Azure Durable -Entities for durability and replay-safety. - -Key features: -- DurableSharedState: Orchestration-side wrapper for shared state operations -- SharedStateEntity: Entity function that stores the shared state -- Compatible API with agent_framework SharedState - -Usage: - In run_workflow_orchestrator: - shared_state = DurableSharedState(context, session_id) - value = yield shared_state.get("my_key") - yield shared_state.set("my_key", "my_value") -""" - -from __future__ import annotations - -import logging -from collections.abc import Generator -from dataclasses import dataclass, field -from typing import Any - -from azure.durable_functions import DurableOrchestrationContext, EntityId - -logger = logging.getLogger(__name__) - -# Entity name for SharedState -SHARED_STATE_ENTITY_NAME = "SharedStateEntity" - - -@dataclass -class SharedStateData: - """The underlying data structure for shared state. - - This is stored as the state of the SharedStateEntity. - """ - - state: dict[str, Any] = field(default_factory=dict) - - def to_dict(self) -> dict[str, Any]: - """Serialize to dictionary for entity storage.""" - return {"state": self.state} - - @classmethod - def from_dict(cls, data: dict[str, Any] | None) -> SharedStateData: - """Deserialize from entity state.""" - if data is None: - return cls() - return cls(state=data.get("state", {})) - - -class DurableSharedState: - """Orchestration-side wrapper for shared state operations. - - This class provides a generator-based API compatible with Durable Functions - orchestrations. Each operation (get, set, has, delete) returns a generator - that yields entity calls. - - The shared state is scoped to a workflow session using the session_id as - the entity instance id. - - Example: - shared_state = DurableSharedState(context, "session-123") - - # Get a value - value = yield from shared_state.get("my_key") - - # Set a value - yield from shared_state.set("my_key", {"data": "value"}) - - # Check if key exists - exists = yield from shared_state.has("my_key") - - # Delete a key - yield from shared_state.delete("my_key") - - # Get all state - all_state = yield from shared_state.get_all() - """ - - def __init__(self, context: DurableOrchestrationContext, session_id: str) -> None: - """Initialize the shared state wrapper. - - Args: - context: The Durable Functions orchestration context - session_id: The session identifier used as the entity instance id - """ - self._context = context - self._session_id = session_id - self._entity_id = EntityId(SHARED_STATE_ENTITY_NAME, session_id) - - @property - def entity_id(self) -> EntityId: - """Get the entity ID for this shared state instance.""" - return self._entity_id - - def get(self, key: str, default: Any = None) -> Generator[Any, Any, Any]: - """Get a value from the shared state. - - Args: - key: The key to retrieve - default: Default value if key doesn't exist - - Returns: - Generator that yields the value or default - """ - result = yield self._context.call_entity(self._entity_id, "get", {"key": key, "default": default}) - # Durable Functions runtime extracts return value from StopIteration - return result # noqa: B901 - - def set(self, key: str, value: Any) -> Generator[Any, Any, None]: - """Set a value in the shared state. - - Args: - key: The key to set - value: The value to store (must be JSON serializable) - """ - yield self._context.call_entity(self._entity_id, "set", {"key": key, "value": value}) - - def has(self, key: str) -> Generator[Any, Any, bool]: - """Check if a key exists in the shared state. - - Args: - key: The key to check - - Returns: - Generator that yields True if key exists, False otherwise - """ - result = yield self._context.call_entity(self._entity_id, "has", {"key": key}) - # Durable Functions runtime extracts return value from StopIteration - return result # noqa: B901 - - def delete(self, key: str) -> Generator[Any, Any, bool]: - """Delete a key from the shared state. - - Args: - key: The key to delete - - Returns: - Generator that yields True if key was deleted, False if it didn't exist - """ - result = yield self._context.call_entity(self._entity_id, "delete", {"key": key}) - # Durable Functions runtime extracts return value from StopIteration - return result # noqa: B901 - - def get_all(self) -> Generator[Any, Any, dict[str, Any]]: - """Get all shared state as a dictionary. - - Returns: - Generator that yields the complete state dictionary - """ - result = yield self._context.call_entity(self._entity_id, "get_all", None) - # Durable Functions runtime extracts return value from StopIteration - return result if result else {} # noqa: B901 - - def update(self, updates: dict[str, Any]) -> Generator[Any, Any, None]: - """Update multiple keys at once. - - Args: - updates: Dictionary of key-value pairs to update - """ - yield self._context.call_entity(self._entity_id, "update", {"updates": updates}) - - def clear(self) -> Generator[Any, Any, None]: - """Clear all shared state.""" - yield self._context.call_entity(self._entity_id, "clear", None) - - -def create_shared_state_entity_function(): - """Create the entity function for SharedState. - - This function handles all shared state operations: - - get: Retrieve a value by key - - set: Store a value by key - - has: Check if a key exists - - delete: Remove a key - - get_all: Get the complete state dictionary - - update: Update multiple keys at once - - clear: Clear all state - - Returns: - The entity function to be registered with the Durable Functions app - """ - - def shared_state_entity(context): - """Entity function for SharedState storage.""" - # Get or initialize state - current_state = context.get_state(lambda: {"state": {}}) - state_data = SharedStateData.from_dict(current_state) - - operation = context.operation_name - operation_input = context.get_input() - - logger.debug("[SharedState] Operation: %s, Input: %s", operation, operation_input) - - if operation == "get": - key = operation_input.get("key") - default = operation_input.get("default") - result = state_data.state.get(key, default) - context.set_result(result) - - elif operation == "set": - key = operation_input.get("key") - value = operation_input.get("value") - state_data.state[key] = value - context.set_state(state_data.to_dict()) - - elif operation == "has": - key = operation_input.get("key") - result = key in state_data.state - context.set_result(result) - - elif operation == "delete": - key = operation_input.get("key") - if key in state_data.state: - del state_data.state[key] - context.set_state(state_data.to_dict()) - context.set_result(True) - else: - context.set_result(False) - - elif operation == "get_all": - context.set_result(state_data.state.copy()) - - elif operation == "update": - updates = operation_input.get("updates", {}) - state_data.state.update(updates) - context.set_state(state_data.to_dict()) - - elif operation == "clear": - state_data.state.clear() - context.set_state(state_data.to_dict()) - - else: - logger.warning("[SharedState] Unknown operation: %s", operation) - - return shared_state_entity diff --git a/python/packages/azurefunctions/agent_framework_azurefunctions/_workflow.py b/python/packages/azurefunctions/agent_framework_azurefunctions/_workflow.py index 8efed6c922..21ba4ef8cb 100644 --- a/python/packages/azurefunctions/agent_framework_azurefunctions/_workflow.py +++ b/python/packages/azurefunctions/agent_framework_azurefunctions/_workflow.py @@ -37,7 +37,6 @@ from azure.durable_functions import DurableOrchestrationContext from ._orchestration import AzureFunctionsAgentExecutor -from ._shared_state import DurableSharedState from ._utils import deserialize_value, serialize_message logger = logging.getLogger(__name__) @@ -144,7 +143,7 @@ def run_workflow_orchestrator( context: DurableOrchestrationContext, workflow: Workflow, initial_message: Any, - shared_state: DurableSharedState | None = None, + shared_state: dict[str, Any] | None = None, ): """Traverse and execute the workflow graph using Durable Functions. @@ -156,7 +155,7 @@ def run_workflow_orchestrator( - SwitchCaseEdgeGroup: First matching condition wins - FanOutEdgeGroup: Broadcast to multiple targets - **executed in parallel** - FanInEdgeGroup: Aggregates messages from multiple sources before delivery - - SharedState: Durable shared state accessible to all executors + - SharedState: Local shared state accessible to all executors Execution model: - Different executors pending in the same iteration run in parallel @@ -172,7 +171,7 @@ def run_workflow_orchestrator( context: The Durable Functions orchestration context workflow: The MAF Workflow instance to execute initial_message: The initial message to send to the start executor - shared_state: Optional DurableSharedState for cross-executor state sharing + shared_state: Optional dict for cross-executor state sharing (local to orchestration) Returns: List of workflow outputs collected from executor activities @@ -324,10 +323,9 @@ def run_workflow_orchestrator( if activity_executor_tasks: logger.debug("Processing %d activity executors in parallel", len(activity_executor_tasks)) - # Get shared state snapshot once before all activity executions (if shared_state is available) - shared_state_snapshot: dict[str, Any] | None = None - if shared_state: - shared_state_snapshot = yield from shared_state.get_all() + # Use shared state dict directly (no entity calls needed) + shared_state_snapshot: dict[str, Any] | None = shared_state + if shared_state_snapshot: logger.debug("[workflow] SharedState snapshot for activities: %s", shared_state_snapshot) # Create all activity tasks without yielding (to enable parallel execution) @@ -364,20 +362,20 @@ def run_workflow_orchestrator( logger.debug("Activity for executor %s returned", executor_id) # Apply any shared state updates from the activity result - if shared_state and result: + if shared_state is not None and result: if result.get("shared_state_updates"): updates = result["shared_state_updates"] logger.debug( "[workflow] Applying SharedState updates from activity %s: %s", executor_id, updates ) - yield from shared_state.update(updates) + shared_state.update(updates) if result.get("shared_state_deletes"): deletes = result["shared_state_deletes"] logger.debug( "[workflow] Applying SharedState deletes from activity %s: %s", executor_id, deletes ) for key in deletes: - yield from shared_state.delete(key) + shared_state.pop(key, None) # Collect outputs if result and result.get("outputs"): diff --git a/python/packages/azurefunctions/tests/test_app.py b/python/packages/azurefunctions/tests/test_app.py index ac461cc233..827e3374db 100644 --- a/python/packages/azurefunctions/tests/test_app.py +++ b/python/packages/azurefunctions/tests/test_app.py @@ -1160,32 +1160,6 @@ def test_init_with_workflow_calls_setup_methods(self) -> None: setup_exec.assert_called_once() setup_orch.assert_called_once() - def test_init_shared_state_enabled_by_default(self) -> None: - """Test that SharedState is enabled by default.""" - mock_workflow = Mock() - mock_workflow.executors = {} - - with ( - patch.object(AgentFunctionApp, "_setup_executor_activity"), - patch.object(AgentFunctionApp, "_setup_workflow_orchestration"), - ): - app = AgentFunctionApp(workflow=mock_workflow) - - assert app.enable_shared_state is True - - def test_init_shared_state_can_be_disabled(self) -> None: - """Test that SharedState can be disabled.""" - mock_workflow = Mock() - mock_workflow.executors = {} - - with ( - patch.object(AgentFunctionApp, "_setup_executor_activity"), - patch.object(AgentFunctionApp, "_setup_workflow_orchestration"), - ): - app = AgentFunctionApp(workflow=mock_workflow, enable_shared_state=False) - - assert app.enable_shared_state is False - def test_init_without_workflow_does_not_call_workflow_setup(self) -> None: """Test that workflow setup is not called when no workflow provided.""" mock_agent = Mock() diff --git a/python/packages/azurefunctions/tests/test_shared_state.py b/python/packages/azurefunctions/tests/test_shared_state.py deleted file mode 100644 index 5b1bd0bf79..0000000000 --- a/python/packages/azurefunctions/tests/test_shared_state.py +++ /dev/null @@ -1,296 +0,0 @@ -# Copyright (c) Microsoft. All rights reserved. - -"""Unit tests for DurableSharedState and SharedState entity.""" - -from unittest.mock import Mock - -import pytest -from azure.durable_functions import EntityId - -from agent_framework_azurefunctions._shared_state import ( - SHARED_STATE_ENTITY_NAME, - DurableSharedState, - SharedStateData, - create_shared_state_entity_function, -) - - -class TestSharedStateData: - """Test suite for SharedStateData dataclass.""" - - def test_default_initialization(self) -> None: - """Test default initialization creates empty state.""" - data = SharedStateData() - - assert data.state == {} - - def test_initialization_with_state(self) -> None: - """Test initialization with provided state.""" - data = SharedStateData(state={"key": "value"}) - - assert data.state == {"key": "value"} - - def test_to_dict(self) -> None: - """Test serialization to dictionary.""" - data = SharedStateData(state={"a": 1, "b": 2}) - - result = data.to_dict() - - assert result == {"state": {"a": 1, "b": 2}} - - def test_from_dict_with_none(self) -> None: - """Test deserialization from None.""" - result = SharedStateData.from_dict(None) - - assert result.state == {} - - def test_from_dict_with_empty_dict(self) -> None: - """Test deserialization from empty dict.""" - result = SharedStateData.from_dict({}) - - assert result.state == {} - - def test_from_dict_with_state(self) -> None: - """Test deserialization from dict with state.""" - result = SharedStateData.from_dict({"state": {"x": 10, "y": 20}}) - - assert result.state == {"x": 10, "y": 20} - - -class TestDurableSharedState: - """Test suite for DurableSharedState orchestration wrapper.""" - - @pytest.fixture - def mock_context(self) -> Mock: - """Create a mock DurableOrchestrationContext.""" - context = Mock() - context.call_entity = Mock(return_value="mocked_result") - return context - - @pytest.fixture - def shared_state(self, mock_context: Mock) -> DurableSharedState: - """Create a DurableSharedState instance for testing.""" - return DurableSharedState(mock_context, "test-session-123") - - def test_initialization(self, mock_context: Mock) -> None: - """Test DurableSharedState initialization.""" - state = DurableSharedState(mock_context, "my-session") - - assert state._context == mock_context - assert state._session_id == "my-session" - assert state._entity_id.name == SHARED_STATE_ENTITY_NAME - assert state._entity_id.key == "my-session" - - def test_entity_id_property(self, shared_state: DurableSharedState) -> None: - """Test entity_id property returns correct EntityId.""" - entity_id = shared_state.entity_id - - assert isinstance(entity_id, EntityId) - assert entity_id.name == SHARED_STATE_ENTITY_NAME - assert entity_id.key == "test-session-123" - - def test_get_generator_yields_entity_call(self, shared_state: DurableSharedState, mock_context: Mock) -> None: - """Test get() yields a call_entity operation.""" - gen = shared_state.get("my_key", default="default_val") - - # The generator should yield the entity call - next(gen) - - # Verify the call was made with correct parameters - mock_context.call_entity.assert_called_once_with( - shared_state._entity_id, "get", {"key": "my_key", "default": "default_val"} - ) - - def test_set_generator_yields_entity_call(self, shared_state: DurableSharedState, mock_context: Mock) -> None: - """Test set() yields a call_entity operation.""" - gen = shared_state.set("my_key", {"data": "value"}) - - # Consume the generator - next(gen) - - mock_context.call_entity.assert_called_once_with( - shared_state._entity_id, "set", {"key": "my_key", "value": {"data": "value"}} - ) - - def test_has_generator_yields_entity_call(self, shared_state: DurableSharedState, mock_context: Mock) -> None: - """Test has() yields a call_entity operation.""" - gen = shared_state.has("check_key") - - next(gen) - - mock_context.call_entity.assert_called_once_with(shared_state._entity_id, "has", {"key": "check_key"}) - - def test_delete_generator_yields_entity_call(self, shared_state: DurableSharedState, mock_context: Mock) -> None: - """Test delete() yields a call_entity operation.""" - gen = shared_state.delete("remove_key") - - next(gen) - - mock_context.call_entity.assert_called_once_with(shared_state._entity_id, "delete", {"key": "remove_key"}) - - def test_get_all_generator_yields_entity_call(self, shared_state: DurableSharedState, mock_context: Mock) -> None: - """Test get_all() yields a call_entity operation.""" - gen = shared_state.get_all() - - next(gen) - - mock_context.call_entity.assert_called_once_with(shared_state._entity_id, "get_all", None) - - def test_update_generator_yields_entity_call(self, shared_state: DurableSharedState, mock_context: Mock) -> None: - """Test update() yields a call_entity operation.""" - updates = {"key1": "val1", "key2": "val2"} - gen = shared_state.update(updates) - - next(gen) - - mock_context.call_entity.assert_called_once_with(shared_state._entity_id, "update", {"updates": updates}) - - def test_clear_generator_yields_entity_call(self, shared_state: DurableSharedState, mock_context: Mock) -> None: - """Test clear() yields a call_entity operation.""" - gen = shared_state.clear() - - next(gen) - - mock_context.call_entity.assert_called_once_with(shared_state._entity_id, "clear", None) - - -class TestSharedStateEntityFunction: - """Test suite for the SharedState entity function.""" - - @pytest.fixture - def entity_function(self): - """Create the entity function.""" - return create_shared_state_entity_function() - - @pytest.fixture - def mock_entity_context(self) -> Mock: - """Create a mock entity context.""" - context = Mock() - context.get_state = Mock(return_value={"state": {}}) - context.set_state = Mock() - context.set_result = Mock() - return context - - def test_get_operation_returns_value(self, entity_function, mock_entity_context: Mock) -> None: - """Test get operation returns the stored value.""" - mock_entity_context.get_state.return_value = {"state": {"my_key": "my_value"}} - mock_entity_context.operation_name = "get" - mock_entity_context.get_input.return_value = {"key": "my_key", "default": None} - - entity_function(mock_entity_context) - - mock_entity_context.set_result.assert_called_once_with("my_value") - - def test_get_operation_returns_default_when_key_missing(self, entity_function, mock_entity_context: Mock) -> None: - """Test get operation returns default when key doesn't exist.""" - mock_entity_context.get_state.return_value = {"state": {}} - mock_entity_context.operation_name = "get" - mock_entity_context.get_input.return_value = {"key": "missing_key", "default": "fallback"} - - entity_function(mock_entity_context) - - mock_entity_context.set_result.assert_called_once_with("fallback") - - def test_set_operation_stores_value(self, entity_function, mock_entity_context: Mock) -> None: - """Test set operation stores a value.""" - mock_entity_context.get_state.return_value = {"state": {}} - mock_entity_context.operation_name = "set" - mock_entity_context.get_input.return_value = {"key": "new_key", "value": {"data": 123}} - - entity_function(mock_entity_context) - - mock_entity_context.set_state.assert_called_once() - saved_state = mock_entity_context.set_state.call_args[0][0] - assert saved_state["state"]["new_key"] == {"data": 123} - - def test_has_operation_returns_true_when_exists(self, entity_function, mock_entity_context: Mock) -> None: - """Test has operation returns True when key exists.""" - mock_entity_context.get_state.return_value = {"state": {"existing_key": "value"}} - mock_entity_context.operation_name = "has" - mock_entity_context.get_input.return_value = {"key": "existing_key"} - - entity_function(mock_entity_context) - - mock_entity_context.set_result.assert_called_once_with(True) - - def test_has_operation_returns_false_when_missing(self, entity_function, mock_entity_context: Mock) -> None: - """Test has operation returns False when key doesn't exist.""" - mock_entity_context.get_state.return_value = {"state": {}} - mock_entity_context.operation_name = "has" - mock_entity_context.get_input.return_value = {"key": "missing_key"} - - entity_function(mock_entity_context) - - mock_entity_context.set_result.assert_called_once_with(False) - - def test_delete_operation_removes_key(self, entity_function, mock_entity_context: Mock) -> None: - """Test delete operation removes a key and returns True.""" - mock_entity_context.get_state.return_value = {"state": {"to_delete": "value"}} - mock_entity_context.operation_name = "delete" - mock_entity_context.get_input.return_value = {"key": "to_delete"} - - entity_function(mock_entity_context) - - mock_entity_context.set_result.assert_called_once_with(True) - saved_state = mock_entity_context.set_state.call_args[0][0] - assert "to_delete" not in saved_state["state"] - - def test_delete_operation_returns_false_when_missing(self, entity_function, mock_entity_context: Mock) -> None: - """Test delete operation returns False when key doesn't exist.""" - mock_entity_context.get_state.return_value = {"state": {}} - mock_entity_context.operation_name = "delete" - mock_entity_context.get_input.return_value = {"key": "nonexistent"} - - entity_function(mock_entity_context) - - mock_entity_context.set_result.assert_called_once_with(False) - mock_entity_context.set_state.assert_not_called() - - def test_get_all_operation_returns_all_state(self, entity_function, mock_entity_context: Mock) -> None: - """Test get_all operation returns complete state.""" - state_data = {"key1": "val1", "key2": "val2"} - mock_entity_context.get_state.return_value = {"state": state_data} - mock_entity_context.operation_name = "get_all" - mock_entity_context.get_input.return_value = None - - entity_function(mock_entity_context) - - mock_entity_context.set_result.assert_called_once() - result = mock_entity_context.set_result.call_args[0][0] - assert result == state_data - - def test_update_operation_merges_updates(self, entity_function, mock_entity_context: Mock) -> None: - """Test update operation merges multiple key-value pairs.""" - mock_entity_context.get_state.return_value = {"state": {"existing": "old"}} - mock_entity_context.operation_name = "update" - mock_entity_context.get_input.return_value = {"updates": {"new1": "val1", "new2": "val2"}} - - entity_function(mock_entity_context) - - saved_state = mock_entity_context.set_state.call_args[0][0] - assert saved_state["state"]["existing"] == "old" - assert saved_state["state"]["new1"] == "val1" - assert saved_state["state"]["new2"] == "val2" - - def test_clear_operation_removes_all_state(self, entity_function, mock_entity_context: Mock) -> None: - """Test clear operation removes all state.""" - mock_entity_context.get_state.return_value = {"state": {"key1": "val1", "key2": "val2"}} - mock_entity_context.operation_name = "clear" - mock_entity_context.get_input.return_value = None - - entity_function(mock_entity_context) - - saved_state = mock_entity_context.set_state.call_args[0][0] - assert saved_state["state"] == {} - - def test_unknown_operation_is_handled(self, entity_function, mock_entity_context: Mock) -> None: - """Test unknown operation doesn't crash.""" - mock_entity_context.get_state.return_value = {"state": {}} - mock_entity_context.operation_name = "unknown_op" - mock_entity_context.get_input.return_value = {} - - # Should not raise - entity_function(mock_entity_context) - - # No result should be set for unknown operations - mock_entity_context.set_result.assert_not_called() diff --git a/python/samples/getting_started/azure_functions/09_workflow_shared_state/README.md b/python/samples/getting_started/azure_functions/09_workflow_shared_state/README.md index 0385f65d98..bd6e33c916 100644 --- a/python/samples/getting_started/azure_functions/09_workflow_shared_state/README.md +++ b/python/samples/getting_started/azure_functions/09_workflow_shared_state/README.md @@ -4,7 +4,7 @@ This sample demonstrates running **Agent Framework workflows with SharedState** ## Overview -This sample shows how to use `AgentFunctionApp` to execute a `WorkflowBuilder` workflow that uses SharedState to pass data between executors. SharedState is backed by a Durable Entity for persistence across workflow steps. +This sample shows how to use `AgentFunctionApp` to execute a `WorkflowBuilder` workflow that uses SharedState to pass data between executors. SharedState is a local dictionary maintained by the orchestration that allows executors to share data across workflow steps. ## What This Sample Demonstrates @@ -95,5 +95,5 @@ Email sent: Hi, Thank you for the reminder about the sprint planning meeting tom ## Related Samples -- `10_workflow_no_shared_state` - Workflow execution without SharedState +- `10_workflow_no_shared_state` - Workflow execution without SharedState usage - `06_multi_agent_orchestration_conditionals` - Manual Durable Functions orchestration with agents diff --git a/python/samples/getting_started/azure_functions/10_workflow_no_shared_state/README.md b/python/samples/getting_started/azure_functions/10_workflow_no_shared_state/README.md index f027e40596..f5f77f3c91 100644 --- a/python/samples/getting_started/azure_functions/10_workflow_no_shared_state/README.md +++ b/python/samples/getting_started/azure_functions/10_workflow_no_shared_state/README.md @@ -1,4 +1,4 @@ -# Workflow Execution Sample (No SharedState) +# Workflow Execution Sample This sample demonstrates running **Agent Framework workflows** in Azure Durable Functions without using SharedState. @@ -8,7 +8,7 @@ This sample shows how to use `AgentFunctionApp` with a `WorkflowBuilder` workflo ```python workflow = _create_workflow() # Build the workflow graph -app = AgentFunctionApp(workflow=workflow, enable_shared_state=False) +app = AgentFunctionApp(workflow=workflow) ``` This approach provides durable, fault-tolerant workflow execution with minimal code. @@ -124,7 +124,7 @@ workflow = ( ### Registering with AgentFunctionApp ```python -app = AgentFunctionApp(workflow=workflow, enable_health_check=True, enable_shared_state=False) +app = AgentFunctionApp(workflow=workflow, enable_health_check=True) ``` ### Executor Classes diff --git a/python/samples/getting_started/azure_functions/10_workflow_no_shared_state/function_app.py b/python/samples/getting_started/azure_functions/10_workflow_no_shared_state/function_app.py index 4cb156f7b4..8bf1e063e9 100644 --- a/python/samples/getting_started/azure_functions/10_workflow_no_shared_state/function_app.py +++ b/python/samples/getting_started/azure_functions/10_workflow_no_shared_state/function_app.py @@ -198,7 +198,7 @@ def launch(durable: bool = True) -> AgentFunctionApp | None: if durable: # Initialize app workflow = _create_workflow() - app = AgentFunctionApp(workflow=workflow, enable_health_check=True, enable_shared_state=False) + app = AgentFunctionApp(workflow=workflow, enable_health_check=True) return app else: # Launch the spam detection workflow in DevUI diff --git a/python/samples/getting_started/azure_functions/11_workflow_parallel/function_app.py b/python/samples/getting_started/azure_functions/11_workflow_parallel/function_app.py index e19da877ac..1c626bae20 100644 --- a/python/samples/getting_started/azure_functions/11_workflow_parallel/function_app.py +++ b/python/samples/getting_started/azure_functions/11_workflow_parallel/function_app.py @@ -496,8 +496,7 @@ def launch(durable: bool = True) -> AgentFunctionApp | None: workflow = _create_workflow() app = AgentFunctionApp( workflow=workflow, - enable_health_check=True, - enable_shared_state=False, + enable_health_check=True, ) return app else: @@ -522,4 +521,4 @@ def launch(durable: bool = True) -> AgentFunctionApp | None: return None -app = launch(durable=False) +app = launch(durable=True) From 3f14e038d43f97087d39e5f2c3d71d39665112f5 Mon Sep 17 00:00:00 2001 From: Ahmed Muhsin Date: Tue, 20 Jan 2026 17:11:59 -0600 Subject: [PATCH 16/18] refactor worfklow handling logic --- .../_workflow.py | 685 +++++++++++------- 1 file changed, 413 insertions(+), 272 deletions(-) diff --git a/python/packages/azurefunctions/agent_framework_azurefunctions/_workflow.py b/python/packages/azurefunctions/agent_framework_azurefunctions/_workflow.py index 21ba4ef8cb..43c2d7977e 100644 --- a/python/packages/azurefunctions/agent_framework_azurefunctions/_workflow.py +++ b/python/packages/azurefunctions/agent_framework_azurefunctions/_workflow.py @@ -16,6 +16,9 @@ import json import logging +from collections import defaultdict +from dataclasses import dataclass +from enum import Enum from typing import Any from agent_framework import ( @@ -42,6 +45,44 @@ logger = logging.getLogger(__name__) +# ============================================================================ +# Task Types and Data Structures +# ============================================================================ + + +class TaskType(Enum): + """Type of executor task.""" + + AGENT = "agent" + ACTIVITY = "activity" + + +@dataclass +class TaskMetadata: + """Metadata for a pending task.""" + + executor_id: str + message: Any + source_executor_id: str + task_type: TaskType + remaining_messages: list[tuple[str, Any, str]] | None = None # For agents with multiple messages + + +@dataclass +class ExecutorResult: + """Result from executing an agent or activity.""" + + executor_id: str + output_message: AgentExecutorResponse | None + activity_result: dict[str, Any] | None + task_type: TaskType + + +# ============================================================================ +# Routing Functions +# ============================================================================ + + def route_message_through_edge_groups( edge_groups: list[EdgeGroup], source_id: str, @@ -139,6 +180,262 @@ def build_agent_executor_response( ) +# ============================================================================ +# Task Preparation Helpers +# ============================================================================ + + +def _prepare_agent_task( + context: DurableOrchestrationContext, + executor_id: str, + message: Any, +) -> Any: + """Prepare an agent task for execution. + + Args: + context: The Durable Functions orchestration context + executor_id: The agent executor ID (agent name) + message: The input message for the agent + + Returns: + A task that can be yielded to execute the agent + """ + message_content = _extract_message_content(message) + session_id = AgentSessionId(name=executor_id, key=context.instance_id) + thread = DurableAgentThread(session_id=session_id) + + az_executor = AzureFunctionsAgentExecutor(context) + agent = DurableAIAgent(az_executor, executor_id) + return agent.run(message_content, thread=thread) + + +def _prepare_activity_task( + context: DurableOrchestrationContext, + executor_id: str, + message: Any, + source_executor_id: str, + shared_state_snapshot: dict[str, Any] | None, +) -> Any: + """Prepare an activity task for execution. + + Args: + context: The Durable Functions orchestration context + executor_id: The activity executor ID + message: The input message for the activity + source_executor_id: The ID of the executor that sent the message + shared_state_snapshot: Current shared state snapshot + + Returns: + A task that can be yielded to execute the activity + """ + activity_input = { + "executor_id": executor_id, + "message": serialize_message(message), + "shared_state_snapshot": shared_state_snapshot, + "source_executor_ids": [source_executor_id], + } + activity_input_json = json.dumps(activity_input) + return context.call_activity("ExecuteExecutor", activity_input_json) + + +# ============================================================================ +# Result Processing Helpers +# ============================================================================ + + +def _process_agent_response( + agent_response: AgentRunResponse, + executor_id: str, + message: Any, +) -> ExecutorResult: + """Process an agent response into an ExecutorResult. + + Args: + agent_response: The response from the agent + executor_id: The agent executor ID + message: The original input message + + Returns: + ExecutorResult containing the processed response + """ + response_text = agent_response.text if agent_response else None + structured_response = None + + if agent_response and agent_response.value is not None: + if hasattr(agent_response.value, "model_dump"): + structured_response = agent_response.value.model_dump() + elif isinstance(agent_response.value, dict): + structured_response = agent_response.value + + output_message = build_agent_executor_response( + executor_id=executor_id, + response_text=response_text, + structured_response=structured_response, + previous_message=message, + ) + + return ExecutorResult( + executor_id=executor_id, + output_message=output_message, + activity_result=None, + task_type=TaskType.AGENT, + ) + + +def _process_activity_result( + result_json: str | None, + executor_id: str, + shared_state: dict[str, Any] | None, + workflow_outputs: list[Any], +) -> ExecutorResult: + """Process an activity result and apply shared state updates. + + Args: + result_json: The JSON result from the activity + executor_id: The activity executor ID + shared_state: The shared state dict to update (mutated in place) + workflow_outputs: List to append outputs to (mutated in place) + + Returns: + ExecutorResult containing the processed result + """ + result = json.loads(result_json) if result_json else None + + # Apply shared state updates + if shared_state is not None and result: + if result.get("shared_state_updates"): + updates = result["shared_state_updates"] + logger.debug("[workflow] Applying SharedState updates from %s: %s", executor_id, updates) + shared_state.update(updates) + if result.get("shared_state_deletes"): + deletes = result["shared_state_deletes"] + logger.debug("[workflow] Applying SharedState deletes from %s: %s", executor_id, deletes) + for key in deletes: + shared_state.pop(key, None) + + # Collect outputs + if result and result.get("outputs"): + workflow_outputs.extend(result["outputs"]) + + return ExecutorResult( + executor_id=executor_id, + output_message=None, + activity_result=result, + task_type=TaskType.ACTIVITY, + ) + + +# ============================================================================ +# Routing Helpers +# ============================================================================ + + +def _route_result_messages( + result: ExecutorResult, + workflow: Workflow, + next_pending_messages: dict[str, list[tuple[Any, str]]], + fan_in_pending: dict[str, dict[str, list[tuple[Any, str]]]], +) -> None: + """Route messages from an executor result to their targets. + + Args: + result: The executor result containing messages to route + workflow: The workflow definition + next_pending_messages: Dict to accumulate next iteration's messages (mutated) + fan_in_pending: Dict tracking fan-in state (mutated) + """ + executor_id = result.executor_id + messages_to_route: list[tuple[Any, str | None]] = [] + + # Collect messages from agent response + if result.output_message: + messages_to_route.append((result.output_message, None)) + + # Collect sent_messages from activity results + if result.activity_result and result.activity_result.get("sent_messages"): + for msg_data in result.activity_result["sent_messages"]: + sent_msg = msg_data.get("message") + target_id = msg_data.get("target_id") + if sent_msg: + sent_msg = deserialize_value(sent_msg) + messages_to_route.append((sent_msg, target_id)) + + # Route each message + for msg_to_route, explicit_target in messages_to_route: + logger.debug("Routing output from %s", executor_id) + + # If explicit target specified, route directly + if explicit_target: + if explicit_target not in next_pending_messages: + next_pending_messages[explicit_target] = [] + next_pending_messages[explicit_target].append((msg_to_route, executor_id)) + logger.debug("Routed message from %s to explicit target %s", executor_id, explicit_target) + continue + + # Check for FanInEdgeGroup sources + for group in workflow.edge_groups: + if isinstance(group, FanInEdgeGroup) and executor_id in group.source_executor_ids: + fan_in_pending[group.id][executor_id].append((msg_to_route, executor_id)) + logger.debug("Accumulated message for FanIn group %s from %s", group.id, executor_id) + + # Use MAF's edge group routing for other edge types + targets = route_message_through_edge_groups(workflow.edge_groups, executor_id, msg_to_route) + + for target_id in targets: + logger.debug("Routing to %s", target_id) + if target_id not in next_pending_messages: + next_pending_messages[target_id] = [] + next_pending_messages[target_id].append((msg_to_route, executor_id)) + + +def _check_fan_in_ready( + workflow: Workflow, + fan_in_pending: dict[str, dict[str, list[tuple[Any, str]]]], + next_pending_messages: dict[str, list[tuple[Any, str]]], +) -> None: + """Check if any FanInEdgeGroups are ready and deliver their messages. + + Args: + workflow: The workflow definition + fan_in_pending: Dict tracking fan-in state (mutated - cleared when delivered) + next_pending_messages: Dict to add aggregated messages to (mutated) + """ + for group in workflow.edge_groups: + if not isinstance(group, FanInEdgeGroup): + continue + + pending_sources = fan_in_pending.get(group.id, {}) + + # Check if all sources have contributed at least one message + if not all(src in pending_sources and pending_sources[src] for src in group.source_executor_ids): + continue + + # Aggregate all messages into a single list + aggregated: list[Any] = [] + aggregated_sources: list[str] = [] + for src in group.source_executor_ids: + for msg, msg_source in pending_sources[src]: + aggregated.append(msg) + aggregated_sources.append(msg_source) + + target_id = group.target_executor_ids[0] + logger.debug("FanIn group %s ready, delivering %d messages to %s", group.id, len(aggregated), target_id) + + if target_id not in next_pending_messages: + next_pending_messages[target_id] = [] + + first_source = aggregated_sources[0] if aggregated_sources else "__fan_in__" + next_pending_messages[target_id].append((aggregated, first_source)) + + # Clear the pending sources for this group + fan_in_pending[group.id] = defaultdict(list) + + +# ============================================================================ +# Main Orchestrator +# ============================================================================ + + def run_workflow_orchestrator( context: DurableOrchestrationContext, workflow: Workflow, @@ -158,14 +455,9 @@ def run_workflow_orchestrator( - SharedState: Local shared state accessible to all executors Execution model: - - Different executors pending in the same iteration run in parallel - - Agent executors (entities): Different agents run in parallel; multiple messages - to the SAME agent are processed sequentially to maintain conversation coherence - - Standard executors (activities): All batched and executed in parallel using task_all() - - Note: When running in parallel with shared state, updates are applied - in order after all tasks complete. This may cause conflicts if multiple - executors modify the same state keys. + - All pending executors (agents AND activities) run in parallel via single task_all() + - Multiple messages to the SAME agent are processed sequentially for conversation coherence + - SharedState updates are applied in order after parallel tasks complete Args: context: The Durable Functions orchestration context @@ -176,296 +468,145 @@ def run_workflow_orchestrator( Returns: List of workflow outputs collected from executor activities """ - # pending_messages stores {target_executor_id: [(message, source_executor_id), ...]} - # This allows executors to know who sent them each message pending_messages: dict[str, list[tuple[Any, str]]] = { workflow.start_executor_id: [(initial_message, "__workflow_start__")] } - iteration = 0 - max_iterations = workflow.max_iterations workflow_outputs: list[Any] = [] + iteration = 0 - # Track pending sources for FanInEdgeGroups. - # Maps group_id to a dict of source_id to list of (message, source_executor_id) tuples. - fan_in_pending: dict[str, dict[str, list[tuple[Any, str]]]] = {} - - # Initialize fan-in tracking for all FanInEdgeGroups - for group in workflow.edge_groups: - if isinstance(group, FanInEdgeGroup): - fan_in_pending[group.id] = {} + # Track pending sources for FanInEdgeGroups using defaultdict for cleaner access + fan_in_pending: dict[str, dict[str, list[tuple[Any, str]]]] = { + group.id: defaultdict(list) for group in workflow.edge_groups if isinstance(group, FanInEdgeGroup) + } - while pending_messages and iteration < max_iterations: + while pending_messages and iteration < workflow.max_iterations: logger.debug("Orchestrator iteration %d", iteration) next_pending_messages: dict[str, list[tuple[Any, str]]] = {} - # Separate executors into agents (entities) and standard executors (activities) - # Agents must be processed sequentially due to entity semantics - # Activities can be processed in parallel - agent_executor_tasks: list[tuple[str, Any, str]] = [] # (executor_id, message, source_id) - activity_executor_tasks: list[tuple[str, Any, str]] = [] # (executor_id, message, source_id) - - for executor_id, messages_with_sources in pending_messages.items(): - executor = workflow.executors[executor_id] - for message, source_executor_id in messages_with_sources: - if isinstance(executor, AgentExecutor): - agent_executor_tasks.append((executor_id, message, source_executor_id)) + # Phase 1: Prepare all tasks (agents and activities unified) + all_tasks, task_metadata_list, remaining_agent_messages = _prepare_all_tasks( + context, workflow, pending_messages, shared_state + ) + + # Phase 2: Execute all tasks in parallel (single task_all for true parallelism) + all_results: list[ExecutorResult] = [] + if all_tasks: + logger.debug("Executing %d tasks in parallel (agents + activities)", len(all_tasks)) + raw_results = yield context.task_all(all_tasks) + logger.debug("All %d tasks completed", len(all_tasks)) + + # Process results based on task type + for idx, raw_result in enumerate(raw_results): + metadata = task_metadata_list[idx] + if metadata.task_type == TaskType.AGENT: + result = _process_agent_response(raw_result, metadata.executor_id, metadata.message) else: - activity_executor_tasks.append((executor_id, message, source_executor_id)) - - # Results collected from all executor types - # Structure: list of (executor_id, output_message, result_dict_or_none) - all_results: list[tuple[str, Any | None, dict[str, Any] | None]] = [] - - # Process Agent Executors (entities) in parallel when they are different agents - # Messages to the SAME agent are processed sequentially to maintain conversation coherence - if agent_executor_tasks: - # Group tasks by executor_id (agent_name) - same agent needs sequential processing - agent_groups: dict[str, list[tuple[str, Any, str]]] = {} - for executor_id, message, source_executor_id in agent_executor_tasks: - if executor_id not in agent_groups: - agent_groups[executor_id] = [] - agent_groups[executor_id].append((executor_id, message, source_executor_id)) - - # Process groups - if only one message per agent, can run all in parallel - # If multiple messages to same agent, need sequential within that agent - - # First pass: create tasks for the first message of each agent (parallel) - agent_tasks = [] - agent_task_metadata = [] # (executor_id, message, source_executor_id, remaining_messages) - - for executor_id, messages_list in agent_groups.items(): - first_msg = messages_list[0] - remaining = messages_list[1:] - - message = first_msg[1] - source_executor_id = first_msg[2] - - agent_name = executor_id - logger.debug("Preparing agent task for: %s", agent_name) - - message_content = _extract_message_content(message) - session_id = AgentSessionId(name=agent_name, key=context.instance_id) - thread = DurableAgentThread(session_id=session_id) - - az_executor = AzureFunctionsAgentExecutor(context) - agent = DurableAIAgent(az_executor, agent_name) - task = agent.run(message_content, thread=thread) - - agent_tasks.append(task) - agent_task_metadata.append((executor_id, message, source_executor_id, remaining)) - - # Execute first batch of agent tasks in parallel - if agent_tasks: - logger.debug("Executing %d agent tasks in parallel", len(agent_tasks)) - agent_responses = yield context.task_all(agent_tasks) - logger.debug("All %d agent tasks completed", len(agent_tasks)) - - # Process results and handle remaining messages for agents with multiple inputs - remaining_to_process: list[tuple[str, Any, str]] = [] - - for idx, agent_response in enumerate(agent_responses): - executor_id, message, source_executor_id, remaining = agent_task_metadata[idx] - logger.debug("Durable Entity %s returned: %s", executor_id, agent_response) - - # Build AgentExecutorResponse from the typed AgentRunResponse - response_text = agent_response.text if agent_response else None - structured_response = None - if agent_response and agent_response.value is not None: - if hasattr(agent_response.value, "model_dump"): - structured_response = agent_response.value.model_dump() - elif isinstance(agent_response.value, dict): - structured_response = agent_response.value - - output_message = build_agent_executor_response( - executor_id=executor_id, - response_text=response_text, - structured_response=structured_response, - previous_message=message, - ) + result = _process_activity_result(raw_result, metadata.executor_id, shared_state, workflow_outputs) + all_results.append(result) + + # Phase 3: Process sequential agent messages (for same-agent conversation coherence) + for executor_id, message, _source_executor_id in remaining_agent_messages: + logger.debug("Processing sequential message for agent: %s", executor_id) + task = _prepare_agent_task(context, executor_id, message) + agent_response: AgentRunResponse = yield task + logger.debug("Agent %s sequential response completed", executor_id) + + result = _process_agent_response(agent_response, executor_id, message) + all_results.append(result) + + # Phase 4: Route all results to next iteration + for result in all_results: + _route_result_messages(result, workflow, next_pending_messages, fan_in_pending) + + # Phase 5: Check if any FanInEdgeGroups are ready to deliver + _check_fan_in_ready(workflow, fan_in_pending, next_pending_messages) + + pending_messages = next_pending_messages + iteration += 1 + + # Durable Functions runtime extracts return value from StopIteration + return workflow_outputs # noqa: B901 + - all_results.append((executor_id, output_message, None)) +def _prepare_all_tasks( + context: DurableOrchestrationContext, + workflow: Workflow, + pending_messages: dict[str, list[tuple[Any, str]]], + shared_state: dict[str, Any] | None, +) -> tuple[list[Any], list[TaskMetadata], list[tuple[str, Any, str]]]: + """Prepare all pending tasks for parallel execution. - # Queue remaining messages for sequential processing - remaining_to_process.extend(remaining) + Groups agent messages by executor ID so that only the first message per agent + runs in the parallel batch. Additional messages to the same agent are returned + for sequential processing. - # Process remaining messages sequentially (these are additional messages to same agent) - for executor_id, message, _source_executor_id in remaining_to_process: - agent_name = executor_id - logger.debug("Processing additional message for agent: %s (sequential)", agent_name) + Args: + context: The Durable Functions orchestration context + workflow: The workflow definition + pending_messages: Messages pending for each executor + shared_state: Current shared state snapshot - message_content = _extract_message_content(message) - session_id = AgentSessionId(name=agent_name, key=context.instance_id) - thread = DurableAgentThread(session_id=session_id) + Returns: + Tuple of (tasks, metadata, remaining_agent_messages): + - tasks: List of tasks ready for task_all() + - metadata: TaskMetadata for each task (same order as tasks) + - remaining_agent_messages: Agent messages requiring sequential processing + """ + all_tasks: list[Any] = [] + task_metadata_list: list[TaskMetadata] = [] + remaining_agent_messages: list[tuple[str, Any, str]] = [] - az_executor = AzureFunctionsAgentExecutor(context) - agent = DurableAIAgent(az_executor, agent_name) - agent_response: AgentRunResponse = yield agent.run(message_content, thread=thread) - logger.debug("Durable Entity %s returned: %s", agent_name, agent_response) + # Group agent messages by executor_id for sequential handling of same-agent messages + agent_messages_by_executor: dict[str, list[tuple[str, Any, str]]] = defaultdict(list) - response_text = agent_response.text if agent_response else None - structured_response = None - if agent_response and agent_response.value is not None: - if hasattr(agent_response.value, "model_dump"): - structured_response = agent_response.value.model_dump() - elif isinstance(agent_response.value, dict): - structured_response = agent_response.value + # Categorize all pending messages + for executor_id, messages_with_sources in pending_messages.items(): + executor = workflow.executors[executor_id] + is_agent = isinstance(executor, AgentExecutor) - output_message = build_agent_executor_response( + for message, source_executor_id in messages_with_sources: + if is_agent: + agent_messages_by_executor[executor_id].append((executor_id, message, source_executor_id)) + else: + # Activity tasks can all run in parallel + logger.debug("Preparing activity task: %s", executor_id) + task = _prepare_activity_task(context, executor_id, message, source_executor_id, shared_state) + all_tasks.append(task) + task_metadata_list.append( + TaskMetadata( executor_id=executor_id, - response_text=response_text, - structured_response=structured_response, - previous_message=message, + message=message, + source_executor_id=source_executor_id, + task_type=TaskType.ACTIVITY, ) - - all_results.append((executor_id, output_message, None)) - - # Process Activity Executors in parallel - if activity_executor_tasks: - logger.debug("Processing %d activity executors in parallel", len(activity_executor_tasks)) - - # Use shared state dict directly (no entity calls needed) - shared_state_snapshot: dict[str, Any] | None = shared_state - if shared_state_snapshot: - logger.debug("[workflow] SharedState snapshot for activities: %s", shared_state_snapshot) - - # Create all activity tasks without yielding (to enable parallel execution) - activity_tasks = [] - task_metadata = [] # Track which task corresponds to which executor - - for executor_id, message, source_executor_id in activity_executor_tasks: - logger.debug("Preparing activity task for executor: %s", executor_id) - - activity_input = { - "executor_id": executor_id, - "message": serialize_message(message), - "shared_state_snapshot": shared_state_snapshot, - "source_executor_ids": [source_executor_id], - } - - # Create the task (don't yield yet - this enables parallelism) - activity_input_json = json.dumps(activity_input) - task = context.call_activity("ExecuteExecutor", activity_input_json) - activity_tasks.append(task) - task_metadata.append((executor_id, message, source_executor_id)) - - # Execute all activities in parallel using task_all - logger.debug("Executing %d activities in parallel", len(activity_tasks)) - results_json_list = yield context.task_all(activity_tasks) - logger.debug("All %d activities completed", len(activity_tasks)) - - # Process results and apply shared state updates - # Note: When running in parallel, shared state updates may conflict - # We apply them in order, but this is a limitation of parallel execution - for idx, result_json in enumerate(results_json_list): - executor_id, message, source_executor_id = task_metadata[idx] - result = json.loads(result_json) if result_json else None - logger.debug("Activity for executor %s returned", executor_id) - - # Apply any shared state updates from the activity result - if shared_state is not None and result: - if result.get("shared_state_updates"): - updates = result["shared_state_updates"] - logger.debug( - "[workflow] Applying SharedState updates from activity %s: %s", executor_id, updates - ) - shared_state.update(updates) - if result.get("shared_state_deletes"): - deletes = result["shared_state_deletes"] - logger.debug( - "[workflow] Applying SharedState deletes from activity %s: %s", executor_id, deletes - ) - for key in deletes: - shared_state.pop(key, None) - - # Collect outputs - if result and result.get("outputs"): - workflow_outputs.extend(result["outputs"]) - - # Add to results for routing - all_results.append((executor_id, None, result)) - - # Routing phase - process all results - for executor_id, output_message, result in all_results: - messages_to_route: list[tuple[Any, str | None]] = [] - - if output_message: - messages_to_route.append((output_message, None)) - - # Also route sent_messages from activities - if result and result.get("sent_messages"): - for msg_data in result["sent_messages"]: - sent_msg = msg_data.get("message") - target_id = msg_data.get("target_id") - if sent_msg: - sent_msg = deserialize_value(sent_msg) - messages_to_route.append((sent_msg, target_id)) - - for msg_to_route, explicit_target in messages_to_route: - logger.debug("Routing output from %s", executor_id) - - # If explicit target specified, route directly - if explicit_target: - if explicit_target not in next_pending_messages: - next_pending_messages[explicit_target] = [] - next_pending_messages[explicit_target].append((msg_to_route, executor_id)) - logger.debug("Routed message from %s to explicit target %s", executor_id, explicit_target) - continue - - # Check for FanInEdgeGroup sources first - for group in workflow.edge_groups: - if isinstance(group, FanInEdgeGroup) and executor_id in group.source_executor_ids: - if executor_id not in fan_in_pending[group.id]: - fan_in_pending[group.id][executor_id] = [] - fan_in_pending[group.id][executor_id].append((msg_to_route, executor_id)) - logger.debug("Accumulated message for FanIn group %s from %s", group.id, executor_id) - - # Use MAF's edge group routing for other edge types - targets = route_message_through_edge_groups( - workflow.edge_groups, - executor_id, - msg_to_route, ) - for target_id in targets: - logger.debug("Routing to %s", target_id) - if target_id not in next_pending_messages: - next_pending_messages[target_id] = [] - next_pending_messages[target_id].append((msg_to_route, executor_id)) + # Process agent messages: first message per agent goes to parallel batch + for executor_id, messages_list in agent_messages_by_executor.items(): + first_msg = messages_list[0] + remaining = messages_list[1:] - # Check if any FanInEdgeGroups are ready to deliver - for group in workflow.edge_groups: - if isinstance(group, FanInEdgeGroup): - pending_sources = fan_in_pending.get(group.id, {}) - # Check if all sources have contributed at least one message - if all(src in pending_sources for src in group.source_executor_ids): - # Aggregate all messages into a single list (extract just the messages) - aggregated: list[Any] = [] - aggregated_sources: list[str] = [] - for src in group.source_executor_ids: - for msg, msg_source in pending_sources[src]: - aggregated.append(msg) - aggregated_sources.append(msg_source) - - target_id = group.target_executor_ids[0] - logger.debug( - "FanIn group %s ready, delivering %d messages to %s", group.id, len(aggregated), target_id - ) + logger.debug("Preparing agent task: %s", executor_id) + task = _prepare_agent_task(context, first_msg[0], first_msg[1]) + all_tasks.append(task) + task_metadata_list.append( + TaskMetadata( + executor_id=first_msg[0], + message=first_msg[1], + source_executor_id=first_msg[2], + task_type=TaskType.AGENT, + ) + ) - if target_id not in next_pending_messages: - next_pending_messages[target_id] = [] - # For fan-in, the aggregated list is the message, sources are all contributors - # Use first source as representative (or could join them) - first_source = aggregated_sources[0] if aggregated_sources else "__fan_in__" - next_pending_messages[target_id].append((aggregated, first_source)) + # Queue remaining messages for sequential processing + remaining_agent_messages.extend(remaining) - # Clear the pending sources for this group - fan_in_pending[group.id] = {} + return all_tasks, task_metadata_list, remaining_agent_messages - pending_messages = next_pending_messages - iteration += 1 - # Durable Functions runtime extracts return value from StopIteration - return workflow_outputs # noqa: B901 +# ============================================================================ +# Message Content Extraction +# ============================================================================ def _extract_message_content(message: Any) -> str: From 33e7ed4d2730254aba2e5fc62aa602edbc33b793 Mon Sep 17 00:00:00 2001 From: Ahmed Muhsin Date: Tue, 20 Jan 2026 17:26:34 -0600 Subject: [PATCH 17/18] register an activity trigger for each non agent executor --- .../agent_framework_azurefunctions/_app.py | 30 ++++++++++++++----- .../_workflow.py | 4 ++- 2 files changed, 25 insertions(+), 9 deletions(-) diff --git a/python/packages/azurefunctions/agent_framework_azurefunctions/_app.py b/python/packages/azurefunctions/agent_framework_azurefunctions/_app.py index 828ece1617..4857a82ef6 100644 --- a/python/packages/azurefunctions/agent_framework_azurefunctions/_app.py +++ b/python/packages/azurefunctions/agent_framework_azurefunctions/_app.py @@ -225,8 +225,10 @@ def __init__( for executor in workflow.executors.values(): if isinstance(executor, AgentExecutor): agents.append(executor.agent) + else: + # Setup individual activity for each non-agent executor + self._setup_executor_activity(executor.id) - self._setup_executor_activity() self._setup_workflow_orchestration() if agents: @@ -241,12 +243,22 @@ def __init__( logger.debug("[AgentFunctionApp] Initialization complete") - def _setup_executor_activity(self) -> None: - """Register the activity for executing standard executors.""" + def _setup_executor_activity(self, executor_id: str) -> None: + """Register an activity for executing a specific non-agent executor. + Args: + executor_id: The ID of the executor to create an activity for. + """ + activity_name = f"dafx-{executor_id}" + logger.debug(f"[AgentFunctionApp] Registering activity '{activity_name}' for executor '{executor_id}'") + + # Capture executor_id in closure + captured_executor_id = executor_id + + @self.function_name(activity_name) @self.activity_trigger(input_name="inputData") - def ExecuteExecutor(inputData: str) -> str: - """Activity to execute non-agent executors. + def executor_activity(inputData: str) -> str: + """Activity to execute a specific non-agent executor. Note: We use str type annotations instead of dict to work around Azure Functions worker type validation issues with dict[str, Any]. @@ -256,7 +268,6 @@ def ExecuteExecutor(inputData: str) -> str: from agent_framework import SharedState data = json_module.loads(inputData) - executor_id = data["executor_id"] message_data = data["message"] shared_state_snapshot = data.get("shared_state_snapshot", {}) source_executor_ids = data.get("source_executor_ids", ["__orchestrator__"]) @@ -264,9 +275,9 @@ def ExecuteExecutor(inputData: str) -> str: if not self.workflow: raise RuntimeError("Workflow not initialized in AgentFunctionApp") - executor = self.workflow.executors.get(executor_id) + executor = self.workflow.executors.get(captured_executor_id) if not executor: - raise ValueError(f"Unknown executor: {executor_id}") + raise ValueError(f"Unknown executor: {captured_executor_id}") # Reconstruct message - try to match handler's expected types using public input_types message = reconstruct_message_for_handler(message_data, executor.input_types) @@ -334,6 +345,9 @@ async def run() -> dict[str, Any]: result = asyncio.run(run()) return json_module.dumps(result) + # Ensure the function is registered (prevents garbage collection) + _ = executor_activity + def _setup_workflow_orchestration(self) -> None: """Register the workflow orchestration and related HTTP endpoints.""" diff --git a/python/packages/azurefunctions/agent_framework_azurefunctions/_workflow.py b/python/packages/azurefunctions/agent_framework_azurefunctions/_workflow.py index 43c2d7977e..9813f70294 100644 --- a/python/packages/azurefunctions/agent_framework_azurefunctions/_workflow.py +++ b/python/packages/azurefunctions/agent_framework_azurefunctions/_workflow.py @@ -235,7 +235,9 @@ def _prepare_activity_task( "source_executor_ids": [source_executor_id], } activity_input_json = json.dumps(activity_input) - return context.call_activity("ExecuteExecutor", activity_input_json) + # Use the prefixed activity name that matches the registered function + activity_name = f"dafx-{executor_id}" + return context.call_activity(activity_name, activity_input_json) # ============================================================================ From 15976391dfa57bc7bb594d054860f60fabdd97e9 Mon Sep 17 00:00:00 2001 From: Ahmed Muhsin Date: Fri, 30 Jan 2026 17:27:02 -0600 Subject: [PATCH 18/18] add support for hitl --- .../agent_framework_azurefunctions/_app.py | 126 ++++- .../agent_framework_azurefunctions/_utils.py | 240 ++++++++- .../_workflow.py | 190 ++++++- .../demo.http | 2 +- .../function_app.py | 4 +- .../09_workflow_shared_state/function_app.py | 207 ++++++-- .../function_app.py | 22 +- .../11_workflow_parallel/function_app.py | 14 + .../12_workflow_hitl/.gitignore | 5 + .../12_workflow_hitl/README.md | 141 ++++++ .../12_workflow_hitl/demo.http | 123 +++++ .../12_workflow_hitl/function_app.py | 468 ++++++++++++++++++ .../12_workflow_hitl/host.json | 16 + .../local.settings.json.sample | 11 + .../12_workflow_hitl/requirements.txt | 3 + 15 files changed, 1496 insertions(+), 76 deletions(-) create mode 100644 python/samples/getting_started/azure_functions/12_workflow_hitl/.gitignore create mode 100644 python/samples/getting_started/azure_functions/12_workflow_hitl/README.md create mode 100644 python/samples/getting_started/azure_functions/12_workflow_hitl/demo.http create mode 100644 python/samples/getting_started/azure_functions/12_workflow_hitl/function_app.py create mode 100644 python/samples/getting_started/azure_functions/12_workflow_hitl/host.json create mode 100644 python/samples/getting_started/azure_functions/12_workflow_hitl/local.settings.json.sample create mode 100644 python/samples/getting_started/azure_functions/12_workflow_hitl/requirements.txt diff --git a/python/packages/azurefunctions/agent_framework_azurefunctions/_app.py b/python/packages/azurefunctions/agent_framework_azurefunctions/_app.py index 4857a82ef6..324d43faba 100644 --- a/python/packages/azurefunctions/agent_framework_azurefunctions/_app.py +++ b/python/packages/azurefunctions/agent_framework_azurefunctions/_app.py @@ -40,7 +40,13 @@ from ._entities import create_agent_entity from ._errors import IncomingRequestError from ._orchestration import AgentOrchestrationContextType, AgentTask, AzureFunctionsAgentExecutor -from ._utils import CapturingRunnerContext, deserialize_value, reconstruct_message_for_handler, serialize_message +from ._utils import ( + CapturingRunnerContext, + _execute_hitl_response_handler, + deserialize_value, + reconstruct_message_for_handler, + serialize_message, +) from ._workflow import run_workflow_orchestrator logger = get_logger("agent_framework.azurefunctions") @@ -282,6 +288,9 @@ def executor_activity(inputData: str) -> str: # Reconstruct message - try to match handler's expected types using public input_types message = reconstruct_message_for_handler(message_data, executor.input_types) + # Check if this is a HITL response message + is_hitl_response = isinstance(message_data, dict) and message_data.get("__hitl_response__") + async def run() -> dict[str, Any]: # Create runner context and shared state runner_context = CapturingRunnerContext() @@ -292,13 +301,22 @@ async def run() -> dict[str, Any]: original_snapshot = dict(deserialized_state) await shared_state.import_state(deserialized_state) - # Execute using the public execute() method - await executor.execute( - message=message, - source_executor_ids=source_executor_ids, - shared_state=shared_state, - runner_context=runner_context, - ) + if is_hitl_response: + # Handle HITL response by calling the executor's @response_handler + await _execute_hitl_response_handler( + executor=executor, + hitl_message=message_data, + shared_state=shared_state, + runner_context=runner_context, + ) + else: + # Execute using the public execute() method + await executor.execute( + message=message, + source_executor_ids=source_executor_ids, + shared_state=shared_state, + runner_context=runner_context, + ) # Export current state and compute changes current_state = await shared_state.export_state() @@ -323,6 +341,22 @@ async def run() -> dict[str, Any]: if isinstance(event, WorkflowOutputEvent): outputs.append(serialize_message(event.data)) + # Get pending request info events for HITL + pending_request_info_events = await runner_context.get_pending_request_info_events() + + # Serialize pending request info events for orchestrator + serialized_pending_requests = [] + for _request_id, event in pending_request_info_events.items(): + serialized_pending_requests.append({ + "request_id": event.request_id, + "source_executor_id": event.source_executor_id, + "data": serialize_message(event.data), + "request_type": f"{type(event.data).__module__}:{type(event.data).__name__}", + "response_type": f"{event.response_type.__module__}:{event.response_type.__name__}" + if event.response_type + else None, + }) + # Serialize messages for JSON compatibility serialized_sent_messages = [] for _source_id, msg_list in sent_messages.items(): @@ -340,6 +374,7 @@ async def run() -> dict[str, Any]: "outputs": outputs, "shared_state_updates": serialized_updates, "shared_state_deletes": list(deletes), + "pending_request_info_events": serialized_pending_requests, } result = asyncio.run(run()) @@ -383,12 +418,14 @@ async def start_workflow_orchestration( instance_id = await client.start_new("workflow_orchestrator", client_input=req_body) - status_url = self._build_status_url(req.url, instance_id) + base_url = self._build_base_url(req.url) + status_url = f"{base_url}/api/workflow/status/{instance_id}" return func.HttpResponse( json.dumps({ "instanceId": instance_id, "statusQueryGetUri": status_url, + "respondUri": f"{base_url}/api/workflow/respond/{instance_id}/{{requestId}}", "message": "Workflow started", }), status_code=202, @@ -414,24 +451,91 @@ async def get_workflow_status( response = { "instanceId": status.instance_id, "runtimeStatus": status.runtime_status.name if status.runtime_status else None, + "customStatus": status.custom_status, "output": status.output, "error": status.output if status.runtime_status == df.OrchestrationRuntimeStatus.Failed else None, "createdTime": status.created_time.isoformat() if status.created_time else None, "lastUpdatedTime": status.last_updated_time.isoformat() if status.last_updated_time else None, } + # Add pending HITL requests info if available + custom_status = status.custom_status or {} + if isinstance(custom_status, dict) and custom_status.get("pending_requests"): + base_url = self._build_base_url(req.url) + pending_requests = [] + for req_id, req_data in custom_status["pending_requests"].items(): + pending_requests.append({ + "requestId": req_id, + "sourceExecutor": req_data.get("source_executor_id"), + "requestData": req_data.get("data"), + "requestType": req_data.get("request_type"), + "responseType": req_data.get("response_type"), + "respondUrl": f"{base_url}/api/workflow/respond/{instance_id}/{req_id}", + }) + response["pendingHumanInputRequests"] = pending_requests + + return func.HttpResponse( + json.dumps(response, default=str), + status_code=200, + mimetype="application/json", + ) + + @self.route(route="workflow/respond/{instanceId}/{requestId}", methods=["POST"]) + @self.durable_client_input(client_name="client") + async def send_hitl_response(req: func.HttpRequest, client: df.DurableOrchestrationClient) -> func.HttpResponse: + """HTTP endpoint to send a response to a pending HITL request. + + The requestId in the URL corresponds to the request_id from the RequestInfoEvent. + The request body should contain the response data matching the expected response_type. + """ + instance_id = req.route_params.get("instanceId") + request_id = req.route_params.get("requestId") + + if not instance_id or not request_id: + return func.HttpResponse( + json.dumps({"error": "Instance ID and Request ID are required."}), + status_code=400, + mimetype="application/json", + ) + + try: + response_data = req.get_json() + except ValueError: + return func.HttpResponse( + json.dumps({"error": "Request body must be valid JSON."}), + status_code=400, + mimetype="application/json", + ) + + # Send the response as an external event + # The request_id is used as the event name for correlation + await client.raise_event( + instance_id=instance_id, + event_name=request_id, + event_data=response_data, + ) + return func.HttpResponse( - json.dumps(response), + json.dumps({ + "message": "Response delivered successfully", + "instanceId": instance_id, + "requestId": request_id, + }), status_code=200, mimetype="application/json", ) def _build_status_url(self, request_url: str, instance_id: str) -> str: """Build the status URL for a workflow instance.""" + base_url = self._build_base_url(request_url) + return f"{base_url}/api/workflow/status/{instance_id}" + + def _build_base_url(self, request_url: str) -> str: + """Extract the base URL from a request URL.""" base_url, _, _ = request_url.partition("/api/") if not base_url: base_url = request_url.rstrip("/") - return f"{base_url}/api/workflow/status/{instance_id}" + return base_url @property def agents(self) -> dict[str, AgentProtocol]: diff --git a/python/packages/azurefunctions/agent_framework_azurefunctions/_utils.py b/python/packages/azurefunctions/agent_framework_azurefunctions/_utils.py index 5ef9fd7ea6..3b25f5db85 100644 --- a/python/packages/azurefunctions/agent_framework_azurefunctions/_utils.py +++ b/python/packages/azurefunctions/agent_framework_azurefunctions/_utils.py @@ -284,14 +284,129 @@ def deserialize_value(data: Any, type_registry: dict[str, type] | None = None) - # Remove metadata before reconstruction clean_data = {k: v for k, v in data.items() if not k.startswith("__")} try: - if is_dataclass(target_type) or issubclass(target_type, BaseModel): - return target_type(**clean_data) + if is_dataclass(target_type): + # Recursively reconstruct nested fields for dataclasses + reconstructed_data = _reconstruct_dataclass_fields(target_type, clean_data) + return target_type(**reconstructed_data) + if issubclass(target_type, BaseModel): + # Pydantic handles nested model validation automatically + return target_type.model_validate(clean_data) except Exception: logger.debug("Could not reconstruct type %s from data", type_name) return data +def _reconstruct_dataclass_fields(dataclass_type: type, data: dict[str, Any]) -> dict[str, Any]: + """Recursively reconstruct nested dataclass and Pydantic fields. + + This function processes each field of a dataclass, looking up the expected type + from type hints and reconstructing nested objects (dataclasses, Pydantic models, lists). + + Args: + dataclass_type: The dataclass type being constructed + data: The dict of field values + + Returns: + Dict with nested objects properly reconstructed + """ + if not is_dataclass(dataclass_type): + return data + + result = {} + type_hints = {} + + # Get type hints for the dataclass + try: + import typing + + type_hints = typing.get_type_hints(dataclass_type) + except Exception: + # Fall back to field annotations if get_type_hints fails + for f in fields(dataclass_type): + type_hints[f.name] = f.type + + for key, value in data.items(): + if key not in type_hints: + result[key] = value + continue + + field_type = type_hints[key] + + # Handle Optional types (Union with None) + origin = get_origin(field_type) + if origin is Union or isinstance(field_type, types.UnionType): + args = get_args(field_type) + # Filter out NoneType to get the actual type + non_none_types = [t for t in args if t is not type(None)] + if len(non_none_types) == 1: + field_type = non_none_types[0] + + # Recursively reconstruct the value + result[key] = _reconstruct_typed_value(value, field_type) + + return result + + +def _reconstruct_typed_value(value: Any, target_type: type) -> Any: + """Reconstruct a single value to the target type. + + Handles dataclasses, Pydantic models, and lists with typed elements. + + Args: + value: The value to reconstruct + target_type: The expected type + + Returns: + The reconstructed value + """ + if value is None: + return None + + # If already the correct type, return as-is + try: + if isinstance(value, target_type): + return value + except TypeError: + # target_type might not be a valid type for isinstance + pass + + # Handle dict values that need reconstruction + if isinstance(value, dict): + # First try deserialize_value which uses embedded type metadata + if "__type__" in value: + deserialized = deserialize_value(value) + if deserialized is not value: + return deserialized + + # Handle Pydantic models + if hasattr(target_type, "model_validate"): + try: + return target_type.model_validate(value) + except Exception: + logger.debug("Could not validate Pydantic model %s", target_type) + + # Handle dataclasses + if is_dataclass(target_type) and isinstance(target_type, type): + try: + # Recursively reconstruct nested fields + reconstructed = _reconstruct_dataclass_fields(target_type, value) + return target_type(**reconstructed) + except Exception: + logger.debug("Could not construct dataclass %s", target_type) + + # Handle list values + if isinstance(value, list): + origin = get_origin(target_type) + if origin is list: + args = get_args(target_type) + if args: + element_type = args[0] + return [_reconstruct_typed_value(item, element_type) for item in value] + + return value + + def reconstruct_agent_executor_request(data: dict[str, Any]) -> AgentExecutorRequest: """Helper to reconstruct AgentExecutorRequest from dict.""" # Reconstruct ChatMessage objects in messages @@ -399,8 +514,127 @@ def reconstruct_message_for_handler(data: Any, input_types: list[type[Any]]) -> try: # Remove metadata before constructing clean_data = {k: v for k, v in data.items() if not k.startswith("__")} - return msg_type(**clean_data) + # Recursively reconstruct nested objects based on field types + reconstructed_data = _reconstruct_dataclass_fields(msg_type, clean_data) + return msg_type(**reconstructed_data) except Exception: logger.debug("Could not construct %s from matching fields", msg_type.__name__) return data + + +# ============================================================================ +# HITL Response Handler Execution +# ============================================================================ + + +async def _execute_hitl_response_handler( + executor: Any, + hitl_message: dict[str, Any], + shared_state: SharedState, + runner_context: CapturingRunnerContext, +) -> None: + """Execute a HITL response handler on an executor. + + This function handles the delivery of a HITL response to the executor's + @response_handler method. It: + 1. Deserializes the original request and response + 2. Finds the matching response handler based on types + 3. Creates a WorkflowContext and invokes the handler + + Args: + executor: The executor instance that has a @response_handler + hitl_message: The HITL response message containing original_request and response + shared_state: The shared state for the workflow context + runner_context: The runner context for capturing outputs + """ + from agent_framework._workflows._workflow_context import WorkflowContext + + # Extract the response data + original_request_data = hitl_message.get("original_request") + response_data = hitl_message.get("response") + response_type_str = hitl_message.get("response_type") + + # Deserialize the original request + original_request = deserialize_value(original_request_data) + + # Deserialize the response - try to match expected type + response = _deserialize_hitl_response(response_data, response_type_str) + + # Find the matching response handler + handler = executor._find_response_handler(original_request, response) + + if handler is None: + logger.warning( + "No response handler found for HITL response in executor %s. Request type: %s, Response type: %s", + executor.id, + type(original_request).__name__, + type(response).__name__, + ) + return + + # Create a WorkflowContext for the handler + # Use a special source ID to indicate this is a HITL response + ctx = WorkflowContext( + executor=executor, + source_executor_ids=["__hitl_response__"], + runner_context=runner_context, + shared_state=shared_state, + ) + + # Call the response handler + # Note: handler is already a partial with original_request bound + logger.debug( + "Invoking response handler for HITL request in executor %s", + executor.id, + ) + await handler(response, ctx) + + +def _deserialize_hitl_response(response_data: Any, response_type_str: str | None) -> Any: + """Deserialize a HITL response to its expected type. + + Args: + response_data: The raw response data (typically a dict from JSON) + response_type_str: The fully qualified type name (module:classname) + + Returns: + The deserialized response, or the original data if deserialization fails + """ + logger.debug( + "Deserializing HITL response. response_type_str=%s, response_data type=%s", + response_type_str, + type(response_data).__name__, + ) + + if response_data is None: + return None + + # If already a primitive, return as-is + if not isinstance(response_data, dict): + logger.debug("Response data is not a dict, returning as-is: %s", type(response_data).__name__) + return response_data + + # Try to deserialize using the type hint + if response_type_str: + try: + module_name, class_name = response_type_str.rsplit(":", 1) + import importlib + + module = importlib.import_module(module_name) + response_type = getattr(module, class_name, None) + + if response_type: + logger.debug("Found response type %s, attempting reconstruction", response_type) + # Use the shared reconstruction logic which handles nested objects + result = _reconstruct_typed_value(response_data, response_type) + logger.debug("Reconstructed response type: %s", type(result).__name__) + return result + logger.warning("Could not find class %s in module %s", class_name, module_name) + + except Exception as e: + logger.warning("Could not deserialize HITL response to %s: %s", response_type_str, e) + + # Fall back to generic deserialization + logger.debug("Falling back to generic deserialization") + return deserialize_value(response_data) diff --git a/python/packages/azurefunctions/agent_framework_azurefunctions/_workflow.py b/python/packages/azurefunctions/agent_framework_azurefunctions/_workflow.py index 9813f70294..20bb12db63 100644 --- a/python/packages/azurefunctions/agent_framework_azurefunctions/_workflow.py +++ b/python/packages/azurefunctions/agent_framework_azurefunctions/_workflow.py @@ -10,6 +10,11 @@ - run_workflow_orchestrator: Main orchestration function for workflow execution - route_message_through_edge_groups: Routing helper using MAF edge group APIs - build_agent_executor_response: Helper to construct AgentExecutorResponse + +HITL (Human-in-the-Loop) Support: +- Detects pending RequestInfoEvents from executor activities +- Uses wait_for_external_event to pause for human input +- Routes responses back to executor's @response_handler methods """ from __future__ import annotations @@ -18,6 +23,7 @@ import logging from collections import defaultdict from dataclasses import dataclass +from datetime import timedelta from enum import Enum from typing import Any @@ -78,6 +84,29 @@ class ExecutorResult: task_type: TaskType +@dataclass +class PendingHITLRequest: + """Tracks a pending Human-in-the-Loop request in the orchestrator. + + Attributes: + request_id: Unique identifier for correlation with external events + source_executor_id: The executor that called ctx.request_info() + request_data: The serialized request payload + request_type: Fully qualified type name of the request data + response_type: Fully qualified type name of expected response + """ + + request_id: str + source_executor_id: str + request_data: Any + request_type: str | None + response_type: str | None + + +# Default timeout for HITL requests (72 hours) +DEFAULT_HITL_TIMEOUT_HOURS = 72.0 + + # ============================================================================ # Routing Functions # ============================================================================ @@ -433,6 +462,79 @@ def _check_fan_in_ready( fan_in_pending[group.id] = defaultdict(list) +# ============================================================================ +# HITL (Human-in-the-Loop) Helpers +# ============================================================================ + + +def _collect_hitl_requests( + result: ExecutorResult, + pending_hitl_requests: dict[str, PendingHITLRequest], +) -> None: + """Collect pending HITL requests from an activity result. + + Args: + result: The executor result that may contain pending request info events + pending_hitl_requests: Dict to accumulate pending requests (mutated) + """ + if result.activity_result and result.activity_result.get("pending_request_info_events"): + for req_data in result.activity_result["pending_request_info_events"]: + request_id = req_data.get("request_id") + if request_id: + pending_hitl_requests[request_id] = PendingHITLRequest( + request_id=request_id, + source_executor_id=req_data.get("source_executor_id", result.executor_id), + request_data=req_data.get("data"), + request_type=req_data.get("request_type"), + response_type=req_data.get("response_type"), + ) + logger.debug( + "Collected HITL request %s from executor %s", + request_id, + result.executor_id, + ) + + +def _route_hitl_response( + hitl_request: PendingHITLRequest, + raw_response: Any, + pending_messages: dict[str, list[tuple[Any, str]]], +) -> None: + """Route a HITL response back to the source executor's @response_handler. + + The response is packaged as a special HITL response message that the executor + activity can recognize and route to the appropriate @response_handler method. + + Args: + hitl_request: The original HITL request + raw_response: The raw response data from the external event + pending_messages: Dict to add the response message to (mutated) + """ + # Create a message structure that the executor can recognize + # This mimics what the InProcRunnerContext does for request_info responses + response_message = { + "__hitl_response__": True, + "request_id": hitl_request.request_id, + "original_request": hitl_request.request_data, + "response": raw_response, + "response_type": hitl_request.response_type, + } + + target_id = hitl_request.source_executor_id + if target_id not in pending_messages: + pending_messages[target_id] = [] + + # Use a special source ID to indicate this is a HITL response + source_id = f"__hitl_response__{hitl_request.request_id}" + pending_messages[target_id].append((response_message, source_id)) + + logger.debug( + "Routed HITL response for request %s to executor %s", + hitl_request.request_id, + target_id, + ) + + # ============================================================================ # Main Orchestrator # ============================================================================ @@ -443,6 +545,7 @@ def run_workflow_orchestrator( workflow: Workflow, initial_message: Any, shared_state: dict[str, Any] | None = None, + hitl_timeout_hours: float = DEFAULT_HITL_TIMEOUT_HOURS, ): """Traverse and execute the workflow graph using Durable Functions. @@ -455,17 +558,20 @@ def run_workflow_orchestrator( - FanOutEdgeGroup: Broadcast to multiple targets - **executed in parallel** - FanInEdgeGroup: Aggregates messages from multiple sources before delivery - SharedState: Local shared state accessible to all executors + - HITL: Human-in-the-loop via request_info / @response_handler pattern Execution model: - All pending executors (agents AND activities) run in parallel via single task_all() - Multiple messages to the SAME agent are processed sequentially for conversation coherence - SharedState updates are applied in order after parallel tasks complete + - HITL requests pause the orchestration until external events are received Args: context: The Durable Functions orchestration context workflow: The MAF Workflow instance to execute initial_message: The initial message to send to the start executor shared_state: Optional dict for cross-executor state sharing (local to orchestration) + hitl_timeout_hours: Timeout in hours for HITL requests (default: 72 hours) Returns: List of workflow outputs collected from executor activities @@ -481,6 +587,9 @@ def run_workflow_orchestrator( group.id: defaultdict(list) for group in workflow.edge_groups if isinstance(group, FanInEdgeGroup) } + # Track pending HITL requests + pending_hitl_requests: dict[str, PendingHITLRequest] = {} + while pending_messages and iteration < workflow.max_iterations: logger.debug("Orchestrator iteration %d", iteration) next_pending_messages: dict[str, list[tuple[Any, str]]] = {} @@ -516,14 +625,91 @@ def run_workflow_orchestrator( result = _process_agent_response(agent_response, executor_id, message) all_results.append(result) - # Phase 4: Route all results to next iteration + # Phase 4: Collect pending HITL requests from activity results + for result in all_results: + _collect_hitl_requests(result, pending_hitl_requests) + + # Phase 5: Route all results to next iteration for result in all_results: _route_result_messages(result, workflow, next_pending_messages, fan_in_pending) - # Phase 5: Check if any FanInEdgeGroups are ready to deliver + # Phase 6: Check if any FanInEdgeGroups are ready to deliver _check_fan_in_ready(workflow, fan_in_pending, next_pending_messages) pending_messages = next_pending_messages + + # Phase 7: Handle HITL - if no pending work but HITL requests exist, wait for responses + if not pending_messages and pending_hitl_requests: + logger.debug("Workflow paused for HITL - %d pending requests", len(pending_hitl_requests)) + + # Update custom status to expose pending requests + context.set_custom_status({ + "state": "waiting_for_human_input", + "pending_requests": { + req_id: { + "request_id": req.request_id, + "source_executor_id": req.source_executor_id, + "data": req.request_data, + "request_type": req.request_type, + "response_type": req.response_type, + } + for req_id, req in pending_hitl_requests.items() + }, + }) + + # Wait for external events for each pending request + # Process responses one at a time to maintain ordering + for request_id, hitl_request in list(pending_hitl_requests.items()): + logger.debug("Waiting for HITL response for request: %s", request_id) + + # Create tasks for approval and timeout + approval_task = context.wait_for_external_event(request_id) + timeout_task = context.create_timer(context.current_utc_datetime + timedelta(hours=hitl_timeout_hours)) + + winner = yield context.task_any([approval_task, timeout_task]) + + if winner == approval_task: + # Cancel the timeout + timeout_task.cancel() + + # Get the response + raw_response = approval_task.result + logger.debug( + "Received HITL response for request %s. Type: %s, Value: %s", + request_id, + type(raw_response).__name__, + raw_response, + ) + + # Durable Functions may return a JSON string; parse it if so + if isinstance(raw_response, str): + try: + import json + + raw_response = json.loads(raw_response) + logger.debug("Parsed JSON string response to: %s", type(raw_response).__name__) + except (json.JSONDecodeError, TypeError): + logger.debug("Response is not JSON, keeping as string") + + # Remove from pending + del pending_hitl_requests[request_id] + + # Route the response back to the source executor's @response_handler + _route_hitl_response( + hitl_request, + raw_response, + pending_messages, + ) + else: + # Timeout occurred + logger.warning("HITL request %s timed out after %s hours", request_id, hitl_timeout_hours) + raise TimeoutError( + f"Human-in-the-loop request '{request_id}' timed out after {hitl_timeout_hours} hours." + ) + + # Clear custom status after HITL is resolved + context.set_custom_status({"state": "running"}) + iteration += 1 # Durable Functions runtime extracts return value from StopIteration diff --git a/python/samples/getting_started/azure_functions/07_single_agent_orchestration_hitl/demo.http b/python/samples/getting_started/azure_functions/07_single_agent_orchestration_hitl/demo.http index 42f93b8543..28231a08a8 100644 --- a/python/samples/getting_started/azure_functions/07_single_agent_orchestration_hitl/demo.http +++ b/python/samples/getting_started/azure_functions/07_single_agent_orchestration_hitl/demo.http @@ -20,7 +20,7 @@ Content-Type: application/json ### Replace INSTANCE_ID_GOES_HERE below with the value returned from the POST call -@instanceId= +@instanceId=ccf3950407b5496893df93d1357a5afa ### Check the status of the orchestration GET http://localhost:7071/api/hitl/status/{{instanceId}} diff --git a/python/samples/getting_started/azure_functions/07_single_agent_orchestration_hitl/function_app.py b/python/samples/getting_started/azure_functions/07_single_agent_orchestration_hitl/function_app.py index 08a14ffe11..8985b0245e 100644 --- a/python/samples/getting_started/azure_functions/07_single_agent_orchestration_hitl/function_app.py +++ b/python/samples/getting_started/azure_functions/07_single_agent_orchestration_hitl/function_app.py @@ -62,7 +62,7 @@ def _create_writer_agent() -> Any: # 3. Activities encapsulate external work for review notifications and publishing. @app.activity_trigger(input_name="content") -def notify_user_for_approval(content: Any) -> None: +def notify_user_for_approval(content: dict) -> None: model = GeneratedContent.model_validate(content) logger.info("NOTIFICATION: Please review the following content for approval:") logger.info("Title: %s", model.title or "(untitled)") @@ -71,7 +71,7 @@ def notify_user_for_approval(content: Any) -> None: @app.activity_trigger(input_name="content") -def publish_content(content: Any) -> None: +def publish_content(content: dict) -> None: model = GeneratedContent.model_validate(content) logger.info("PUBLISHING: Content has been published successfully:") logger.info("Title: %s", model.title or "(untitled)") diff --git a/python/samples/getting_started/azure_functions/09_workflow_shared_state/function_app.py b/python/samples/getting_started/azure_functions/09_workflow_shared_state/function_app.py index 70456ce3f4..bf38dfc72b 100644 --- a/python/samples/getting_started/azure_functions/09_workflow_shared_state/function_app.py +++ b/python/samples/getting_started/azure_functions/09_workflow_shared_state/function_app.py @@ -1,5 +1,25 @@ # Copyright (c) Microsoft. All rights reserved. +""" +Sample: Shared state with agents and conditional routing. + +Store an email once by id, classify it with a detector agent, then either draft a reply with an assistant +agent or finish with a spam notice. Stream events as the workflow runs. + +Purpose: +Show how to: +- Use shared state to decouple large payloads from messages and pass around lightweight references. +- Enforce structured agent outputs with Pydantic models via response_format for robust parsing. +- Route using conditional edges based on a typed intermediate DetectionResult. +- Compose agent backed executors with function style executors and yield the final output when the workflow completes. + +Prerequisites: +- Azure OpenAI configured for AzureOpenAIChatClient with required environment variables. +- Authentication via azure-identity. Use DefaultAzureCredential and run az login before executing the sample. +- Familiarity with WorkflowBuilder, executors, conditional edges, and streaming runs. +""" +import logging +import os from dataclasses import dataclass from typing import Any from uuid import uuid4 @@ -9,35 +29,24 @@ AgentExecutorResponse, ChatMessage, Role, + Workflow, WorkflowBuilder, WorkflowContext, executor, ) from agent_framework.azure import AzureOpenAIChatClient -from azure.identity import DefaultAzureCredential +from azure.identity import AzureCliCredential from pydantic import BaseModel from typing_extensions import Never from agent_framework_azurefunctions import AgentFunctionApp -""" -Sample: Shared state with agents and conditional routing. +logger = logging.getLogger(__name__) -Store an email once by id, classify it with a detector agent, then either draft a reply with an assistant -agent or finish with a spam notice. Stream events as the workflow runs. - -Purpose: -Show how to: -- Use shared state to decouple large payloads from messages and pass around lightweight references. -- Enforce structured agent outputs with Pydantic models via response_format for robust parsing. -- Route using conditional edges based on a typed intermediate DetectionResult. -- Compose agent backed executors with function style executors and yield the final output when the workflow completes. - -Prerequisites: -- Azure OpenAI configured for AzureOpenAIChatClient with required environment variables. -- Authentication via azure-identity. Use DefaultAzureCredential and run az login before executing the sample. -- Familiarity with WorkflowBuilder, executors, conditional edges, and streaming runs. -""" +# Environment variable names +AZURE_OPENAI_ENDPOINT_ENV = "AZURE_OPENAI_ENDPOINT" +AZURE_OPENAI_DEPLOYMENT_ENV = "AZURE_OPENAI_CHAT_DEPLOYMENT_NAME" +AZURE_OPENAI_API_KEY_ENV = "AZURE_OPENAI_API_KEY" EMAIL_STATE_PREFIX = "email:" CURRENT_EMAIL_ID_KEY = "current_email_id" @@ -154,44 +163,132 @@ async def handle_spam(detection: DetectionResult, ctx: WorkflowContext[Never, st raise RuntimeError("This executor should only handle spam messages.") -# Create chat client and agents. response_format enforces structured JSON from each agent. -chat_client = AzureOpenAIChatClient(credential=DefaultAzureCredential()) +# ============================================================================ +# Workflow Creation +# ============================================================================ -spam_detection_agent = chat_client.create_agent( - instructions=( - "You are a spam detection assistant that identifies spam emails. " - "Always return JSON with fields is_spam (bool) and reason (string)." - ), - response_format=DetectionResultAgent, - name="spam_detection_agent", -) -email_assistant_agent = chat_client.create_agent( - instructions=( - "You are an email assistant that helps users draft responses to emails with professionalism. " - "Return JSON with a single field 'response' containing the drafted reply." - ), - response_format=EmailResponse, - name="email_assistant_agent", -) +def _build_client_kwargs() -> dict[str, Any]: + """Build Azure OpenAI client configuration from environment variables.""" + endpoint = os.getenv(AZURE_OPENAI_ENDPOINT_ENV) + if not endpoint: + raise RuntimeError(f"{AZURE_OPENAI_ENDPOINT_ENV} environment variable is required.") -# Build the workflow graph with conditional edges. -# Flow: -# store_email -> spam_detection_agent -> to_detection_result -> branch: -# False -> submit_to_email_assistant -> email_assistant_agent -> finalize_and_send -# True -> handle_spam -workflow = ( - WorkflowBuilder() - .set_start_executor(store_email) - .add_edge(store_email, spam_detection_agent) - .add_edge(spam_detection_agent, to_detection_result) - .add_edge(to_detection_result, submit_to_email_assistant, condition=get_condition(False)) - .add_edge(to_detection_result, handle_spam, condition=get_condition(True)) - .add_edge(submit_to_email_assistant, email_assistant_agent) - .add_edge(email_assistant_agent, finalize_and_send) - .build() -) + deployment = os.getenv(AZURE_OPENAI_DEPLOYMENT_ENV) + if not deployment: + raise RuntimeError(f"{AZURE_OPENAI_DEPLOYMENT_ENV} environment variable is required.") + + client_kwargs: dict[str, Any] = { + "endpoint": endpoint, + "deployment_name": deployment, + } + + api_key = os.getenv(AZURE_OPENAI_API_KEY_ENV) + if api_key: + client_kwargs["api_key"] = api_key + else: + client_kwargs["credential"] = AzureCliCredential() + + return client_kwargs + + +def _create_workflow() -> Workflow: + """Create the email classification workflow with conditional routing.""" + client_kwargs = _build_client_kwargs() + chat_client = AzureOpenAIChatClient(**client_kwargs) + + spam_detection_agent = chat_client.create_agent( + instructions=( + "You are a spam detection assistant that identifies spam emails. " + "Always return JSON with fields is_spam (bool) and reason (string)." + ), + response_format=DetectionResultAgent, + name="spam_detection_agent", + ) + + email_assistant_agent = chat_client.create_agent( + instructions=( + "You are an email assistant that helps users draft responses to emails with professionalism. " + "Return JSON with a single field 'response' containing the drafted reply." + ), + response_format=EmailResponse, + name="email_assistant_agent", + ) + + # Build the workflow graph with conditional edges. + # Flow: + # store_email -> spam_detection_agent -> to_detection_result -> branch: + # False -> submit_to_email_assistant -> email_assistant_agent -> finalize_and_send + # True -> handle_spam + workflow = ( + WorkflowBuilder() + .set_start_executor(store_email) + .add_edge(store_email, spam_detection_agent) + .add_edge(spam_detection_agent, to_detection_result) + .add_edge(to_detection_result, submit_to_email_assistant, condition=get_condition(False)) + .add_edge(to_detection_result, handle_spam, condition=get_condition(True)) + .add_edge(submit_to_email_assistant, email_assistant_agent) + .add_edge(email_assistant_agent, finalize_and_send) + .build() + ) + + return workflow -# Wrap workflow with AgentFunctionApp for durable execution -# SharedState is enabled by default, which this sample requires for storing emails -app = AgentFunctionApp(workflow=workflow, enable_health_check=True) + +# ============================================================================ +# Application Entry Point +# ============================================================================ + + +def launch(durable: bool = True) -> AgentFunctionApp | None: + """Launch the function app or DevUI. + + Args: + durable: If True, returns AgentFunctionApp for Azure Functions. + If False, launches DevUI for local MAF development. + """ + if durable: + # Azure Functions mode with Durable Functions + # SharedState is enabled by default, which this sample requires for storing emails + workflow = _create_workflow() + app = AgentFunctionApp(workflow=workflow, enable_health_check=True) + return app + else: + # Pure MAF mode with DevUI for local development + from pathlib import Path + + from agent_framework.devui import serve + from dotenv import load_dotenv + + env_path = Path(__file__).parent / ".env" + load_dotenv(dotenv_path=env_path) + + logger.info("Starting Workflow Shared State Sample in MAF mode") + logger.info("Available at: http://localhost:8096") + logger.info("\nThis workflow demonstrates:") + logger.info("- Shared state to decouple large payloads from messages") + logger.info("- Structured agent outputs with Pydantic models") + logger.info("- Conditional routing based on detection results") + logger.info("\nFlow: store_email -> spam_detection -> branch (spam/not spam)") + + workflow = _create_workflow() + serve(entities=[workflow], port=8096, auto_open=True) + + return None + + +# Default: Azure Functions mode +# Run with `python function_app.py --maf` for pure MAF mode with DevUI +app = launch(durable=True) + + +if __name__ == "__main__": + import sys + + if "--maf" in sys.argv: + # Run in pure MAF mode with DevUI + launch(durable=False) + else: + print("Usage: python function_app.py --maf") + print(" --maf Run in pure MAF mode with DevUI (http://localhost:8096)") + print("\nFor Azure Functions mode, use: func start") diff --git a/python/samples/getting_started/azure_functions/10_workflow_no_shared_state/function_app.py b/python/samples/getting_started/azure_functions/10_workflow_no_shared_state/function_app.py index 8bf1e063e9..b55fef58b8 100644 --- a/python/samples/getting_started/azure_functions/10_workflow_no_shared_state/function_app.py +++ b/python/samples/getting_started/azure_functions/10_workflow_no_shared_state/function_app.py @@ -198,7 +198,11 @@ def launch(durable: bool = True) -> AgentFunctionApp | None: if durable: # Initialize app workflow = _create_workflow() - app = AgentFunctionApp(workflow=workflow, enable_health_check=True) + + + app = AgentFunctionApp(workflow=workflow) + + return app else: # Launch the spam detection workflow in DevUI @@ -222,5 +226,19 @@ def launch(durable: bool = True) -> AgentFunctionApp | None: return None - + +# Default: Azure Functions mode +# Run with `python function_app.py --maf` for pure MAF mode with DevUI app = launch(durable=True) + + +if __name__ == "__main__": + import sys + + if "--maf" in sys.argv: + # Run in pure MAF mode with DevUI + launch(durable=False) + else: + print("Usage: python function_app.py --maf") + print(" --maf Run in pure MAF mode with DevUI (http://localhost:8096)") + print("\nFor Azure Functions mode, use: func start") diff --git a/python/samples/getting_started/azure_functions/11_workflow_parallel/function_app.py b/python/samples/getting_started/azure_functions/11_workflow_parallel/function_app.py index 1c626bae20..a51a1b6a04 100644 --- a/python/samples/getting_started/azure_functions/11_workflow_parallel/function_app.py +++ b/python/samples/getting_started/azure_functions/11_workflow_parallel/function_app.py @@ -521,4 +521,18 @@ def launch(durable: bool = True) -> AgentFunctionApp | None: return None +# Default: Azure Functions mode +# Run with `python function_app.py --maf` for pure MAF mode with DevUI app = launch(durable=True) + + +if __name__ == "__main__": + import sys + + if "--maf" in sys.argv: + # Run in pure MAF mode with DevUI + launch(durable=False) + else: + print("Usage: python function_app.py --maf") + print(" --maf Run in pure MAF mode with DevUI (http://localhost:8095)") + print("\nFor Azure Functions mode, use: func start") diff --git a/python/samples/getting_started/azure_functions/12_workflow_hitl/.gitignore b/python/samples/getting_started/azure_functions/12_workflow_hitl/.gitignore new file mode 100644 index 0000000000..7097fe0170 --- /dev/null +++ b/python/samples/getting_started/azure_functions/12_workflow_hitl/.gitignore @@ -0,0 +1,5 @@ +# Local settings - copy from local.settings.json.sample and fill in your values +local.settings.json +__pycache__/ +*.pyc +.venv/ diff --git a/python/samples/getting_started/azure_functions/12_workflow_hitl/README.md b/python/samples/getting_started/azure_functions/12_workflow_hitl/README.md new file mode 100644 index 0000000000..2bb84f16dc --- /dev/null +++ b/python/samples/getting_started/azure_functions/12_workflow_hitl/README.md @@ -0,0 +1,141 @@ +# 12. Workflow with Human-in-the-Loop (HITL) + +This sample demonstrates how to integrate human approval into a MAF workflow running on Azure Durable Functions using the MAF `request_info` and `@response_handler` pattern. + +## Overview + +The sample implements a content moderation pipeline: + +1. **User starts workflow** with content for publication via HTTP endpoint +2. **AI Agent analyzes** the content for policy compliance +3. **Workflow pauses** and requests human reviewer approval +4. **Human responds** via HTTP endpoint with approval/rejection +5. **Workflow resumes** and publishes or rejects the content + +## Key Concepts + +### MAF HITL Pattern + +This sample uses MAF's built-in human-in-the-loop pattern: + +```python +# In an executor, request human input +await ctx.request_info( + request_data=HumanApprovalRequest(...), + response_type=HumanApprovalResponse, +) + +# Handle the response in a separate method +@response_handler +async def handle_approval_response( + self, + original_request: HumanApprovalRequest, + response: HumanApprovalResponse, + ctx: WorkflowContext, +) -> None: + # Process the human's decision + ... +``` + +### Automatic HITL Endpoints + +`AgentFunctionApp` automatically provides all the HTTP endpoints needed for HITL: + +| Endpoint | Description | +|----------|-------------| +| `POST /api/workflow/run` | Start the workflow | +| `GET /api/workflow/status/{instanceId}` | Check status and pending HITL requests | +| `POST /api/workflow/respond/{instanceId}/{requestId}` | Send human response | +| `GET /api/health` | Health check | + +### Durable Functions Integration + +When running on Durable Functions, the HITL pattern maps to: + +| MAF Concept | Durable Functions | +|-------------|-------------------| +| `ctx.request_info()` | Workflow pauses, custom status updated | +| `RequestInfoEvent` | Exposed via status endpoint | +| HTTP response | `client.raise_event(instance_id, request_id, data)` | +| `@response_handler` | Workflow resumes, handler invoked | + +## Workflow Architecture + +``` +┌─────────────────┐ ┌──────────────────────┐ ┌────────────────────────┐ +│ Input Router │ ──► │ Content Analyzer │ ──► │ Content Analyzer │ +│ Executor │ │ Agent (AI) │ │ Executor (Parse JSON) │ +└─────────────────┘ └──────────────────────┘ └────────────────────────┘ + │ + ▼ +┌─────────────────┐ ┌──────────────────────┐ +│ Publish │ ◄── │ Human Review │ ◄── HITL PAUSE +│ Executor │ │ Executor │ (wait for external event) +└─────────────────┘ └──────────────────────┘ +``` + +## Prerequisites + +1. **Azure OpenAI** - Access to Azure OpenAI with a deployed chat model +2. **Durable Task Scheduler** - Local emulator or Azure deployment +3. **Azurite** - Local Azure Storage emulator +4. **Azure CLI** - For authentication (`az login`) + +## Setup + +1. Copy the sample settings file: + ```bash + cp local.settings.json.sample local.settings.json + ``` + +2. Update `local.settings.json` with your Azure OpenAI credentials: + ```json + { + "Values": { + "AZURE_OPENAI_ENDPOINT": "https://your-resource.openai.azure.com/", + "AZURE_OPENAI_CHAT_DEPLOYMENT_NAME": "gpt-4o" + } + } + ``` + +3. Start the local emulators: + ```bash + # Terminal 1: Start Azurite + azurite --silent --location . + + # Terminal 2: Start Durable Task Scheduler (if using local emulator) + # Follow Durable Task Scheduler setup instructions + ``` + +4. Start the Function App: + ```bash + func start + ``` + +## Running in Pure MAF Mode + +You can also run this sample in pure MAF mode (without Durable Functions) using the DevUI: + +```bash +python function_app.py --maf +``` + +This launches the DevUI at http://localhost:8096 where you can interact with the workflow directly. This is useful for: +- Local development and debugging +- Testing the HITL pattern without Durable Functions infrastructure +- Comparing behavior between MAF and Durable modes + +## Testing + +Use the `demo.http` file with the VS Code REST Client extension: + +1. **Start workflow** - `POST /api/workflow/run` with content payload +2. **Check status** - `GET /api/workflow/status/{instanceId}` to see pending HITL requests +3. **Send response** - `POST /api/workflow/respond/{instanceId}/{requestId}` with approval +4. **Check result** - `GET /api/workflow/status/{instanceId}` to see final output + +## Related Samples + +- [07_single_agent_orchestration_hitl](../07_single_agent_orchestration_hitl/) - HITL at orchestrator level (not using MAF pattern) +- [09_workflow_shared_state](../09_workflow_shared_state/) - Workflow with shared state +- [guessing_game_with_human_input](../../workflows/human-in-the-loop/guessing_game_with_human_input.py) - MAF HITL pattern (non-durable) diff --git a/python/samples/getting_started/azure_functions/12_workflow_hitl/demo.http b/python/samples/getting_started/azure_functions/12_workflow_hitl/demo.http new file mode 100644 index 0000000000..b59ae8b61c --- /dev/null +++ b/python/samples/getting_started/azure_functions/12_workflow_hitl/demo.http @@ -0,0 +1,123 @@ +### ============================================================================ +### Workflow HITL Sample - Content Moderation with Human Approval +### ============================================================================ +### This sample demonstrates MAF workflows with human-in-the-loop using the +### request_info / @response_handler pattern on Azure Durable Functions. +### +### The AgentFunctionApp automatically provides all HITL endpoints. +### +### Prerequisites: +### 1. Start Azurite: azurite --silent --location . +### 2. Start Durable Task Scheduler emulator +### 3. Configure local.settings.json with Azure OpenAI credentials +### 4. Run: func start +### ============================================================================ + + +### ============================================================================ +### 1. Start the Workflow with Content for Moderation +### ============================================================================ +### This starts the workflow. The AI will analyze the content, then the workflow +### will pause waiting for human approval. + +POST http://localhost:7071/api/workflow/run +Content-Type: application/json + +{ + "content_id": "article-001", + "title": "Introduction to AI in Healthcare", + "body": "Artificial intelligence is revolutionizing healthcare by enabling faster diagnosis, personalized treatment plans, and improved patient outcomes. Machine learning algorithms can analyze medical images with remarkable accuracy, often detecting issues that human radiologists might miss.", + "author": "Dr. Jane Smith" +} + + +### ============================================================================ +### 2. Start Workflow with Potentially Problematic Content +### ============================================================================ +### This content should trigger higher risk assessment from the AI analyzer. + +POST http://localhost:7071/api/workflow/run +Content-Type: application/json + +{ + "content_id": "article-002", + "title": "Get Rich Quick Scheme", + "body": "Click here NOW to make $10,000 overnight! This SECRET method is GUARANTEED to work! Limited time offer - act NOW before it's too late! Send your bank details immediately!", + "author": "Definitely Not Spam" +} + + +### ============================================================================ +### 3. Check Workflow Status +### ============================================================================ +### Replace INSTANCE_ID with the value returned from the run call. +### The status will show pending HITL requests if waiting for human approval. + +@instanceId = 3130c486c9374e4e87125cbd9a238dfc + +GET http://localhost:7071/api/workflow/status/{{instanceId}} + + +### ============================================================================ +### 4. Send Human Approval +### ============================================================================ +### Approve the content for publication. +### Replace INSTANCE_ID and REQUEST_ID with values from the status response. + +@requestId = 1682e5f8-0917-4b68-aa04-d4688cfa2e69 + +POST http://localhost:7071/api/workflow/respond/{{instanceId}}/{{requestId}} +Content-Type: application/json + +{ + "approved": true, + "reviewer_notes": "Content is appropriate and well-written. Approved for publication." +} + + +### ============================================================================ +### 5. Send Human Rejection +### ============================================================================ +### Reject the content with feedback. + +POST http://localhost:7071/api/workflow/respond/{{instanceId}}/{{requestId}} +Content-Type: application/json + +{ + "approved": false, + "reviewer_notes": "Content appears to be spam. Contains multiple spam indicators including urgency language, promises of easy money, and requests for personal information." +} + + +### ============================================================================ +### Example Workflow - Complete Happy Path +### ============================================================================ +### +### Step 1: Start workflow with content +### POST http://localhost:7071/api/workflow/run +### -> Returns instanceId: "abc123..." +### +### Step 2: Check status (workflow is waiting for human input) +### GET http://localhost:7071/api/workflow/status/abc123 +### -> Returns pendingHumanInputRequests with requestId: "req-456..." +### +### Step 3: Approve content +### POST http://localhost:7071/api/workflow/respond/abc123/req-456 +### { +### "approved": true, +### "reviewer_notes": "Looks good!" +### } +### -> Returns success +### +### Step 4: Check final status +### GET http://localhost:7071/api/workflow/status/abc123 +### -> Returns runtimeStatus: "Completed", output: "✅ Content approved..." +### +### ============================================================================ + + +### ============================================================================ +### Health Check +### ============================================================================ + +GET http://localhost:7071/api/health diff --git a/python/samples/getting_started/azure_functions/12_workflow_hitl/function_app.py b/python/samples/getting_started/azure_functions/12_workflow_hitl/function_app.py new file mode 100644 index 0000000000..bb36832b17 --- /dev/null +++ b/python/samples/getting_started/azure_functions/12_workflow_hitl/function_app.py @@ -0,0 +1,468 @@ +# Copyright (c) Microsoft. All rights reserved. +"""Workflow with Human-in-the-Loop (HITL) using MAF request_info Pattern. + +This sample demonstrates how to integrate human approval into a MAF workflow +running on Azure Durable Functions. It uses the MAF `request_info` and +`@response_handler` pattern for structured HITL interactions. + +The workflow simulates a content moderation pipeline: +1. User submits content for publication +2. An AI agent analyzes the content for policy compliance +3. A human reviewer is prompted to approve/reject the content +4. Based on approval, content is either published or rejected + +Key architectural points: +- Uses MAF's `ctx.request_info()` to pause workflow and request human input +- Uses `@response_handler` decorator to handle the human's response +- AgentFunctionApp automatically provides HITL endpoints for status and response +- Durable Functions provides durability while waiting for human input + +Prerequisites: +- Azure OpenAI configured with required environment variables +- Durable Task Scheduler connection string +- Authentication via Azure CLI (az login) +""" + +import json +import logging +import os +from dataclasses import dataclass +from typing import Any + +from agent_framework import ( + AgentExecutorRequest, + AgentExecutorResponse, + ChatMessage, + Executor, + Role, + Workflow, + WorkflowBuilder, + WorkflowContext, + handler, + response_handler, +) +from agent_framework.azure import AzureOpenAIChatClient +from azure.identity import AzureCliCredential +from pydantic import BaseModel +from typing_extensions import Never + +from agent_framework_azurefunctions import AgentFunctionApp + +logger = logging.getLogger(__name__) + +# Environment variable names +AZURE_OPENAI_ENDPOINT_ENV = "AZURE_OPENAI_ENDPOINT" +AZURE_OPENAI_DEPLOYMENT_ENV = "AZURE_OPENAI_CHAT_DEPLOYMENT_NAME" +AZURE_OPENAI_API_KEY_ENV = "AZURE_OPENAI_API_KEY" + +# Agent names +CONTENT_ANALYZER_AGENT_NAME = "ContentAnalyzerAgent" + + +# ============================================================================ +# Data Models +# ============================================================================ + + +class ContentAnalysisResult(BaseModel): + """Structured output from the content analysis agent.""" + + is_appropriate: bool + risk_level: str # low, medium, high + concerns: list[str] + recommendation: str + + +@dataclass +class ContentSubmission: + """Content submitted for moderation.""" + + content_id: str + title: str + body: str + author: str + + +@dataclass +class HumanApprovalRequest: + """Request sent to human reviewer for approval. + + This is the payload passed to ctx.request_info() and will be + exposed via the orchestration status for external systems to retrieve. + """ + + content_id: str + title: str + body: str + author: str + ai_analysis: ContentAnalysisResult + prompt: str + + +class HumanApprovalResponse(BaseModel): + """Response from human reviewer. + + This is what the external system must send back via the HITL response endpoint. + """ + + approved: bool + reviewer_notes: str = "" + + +@dataclass +class ModerationResult: + """Final result of the moderation workflow.""" + + content_id: str + status: str # "approved", "rejected" + ai_analysis: ContentAnalysisResult | None + reviewer_notes: str + + +# ============================================================================ +# Agent Instructions +# ============================================================================ + +CONTENT_ANALYZER_INSTRUCTIONS = """You are a content moderation assistant that analyzes user-submitted content +for policy compliance. Evaluate the content for: + +1. Appropriateness - Is the content suitable for a general audience? +2. Risk level - Rate as 'low', 'medium', or 'high' based on potential issues +3. Concerns - List any specific issues found (empty list if none) +4. Recommendation - Provide a brief recommendation for human reviewers + +Return a JSON response with: +- is_appropriate: boolean +- risk_level: string ('low', 'medium', 'high') +- concerns: list of strings +- recommendation: string + +Be thorough but fair in your analysis.""" + + +# ============================================================================ +# Executors +# ============================================================================ + + +@dataclass +class AnalysisWithSubmission: + """Combines the AI analysis with the original submission for downstream processing.""" + + submission: ContentSubmission + analysis: ContentAnalysisResult + + +class ContentAnalyzerExecutor(Executor): + """Parses the AI agent's response and prepares for human review.""" + + def __init__(self): + super().__init__(id="content_analyzer_executor") + + @handler + async def handle_analysis( + self, + response: AgentExecutorResponse, + ctx: WorkflowContext[AnalysisWithSubmission], + ) -> None: + """Parse the AI analysis and forward with submission context.""" + analysis = ContentAnalysisResult.model_validate_json(response.agent_run_response.text) + + # Retrieve the original submission from shared state + submission: ContentSubmission = await ctx.get_shared_state("current_submission") + + await ctx.send_message(AnalysisWithSubmission(submission=submission, analysis=analysis)) + + +class HumanReviewExecutor(Executor): + """Requests human approval using MAF's request_info pattern. + + This executor demonstrates the core HITL pattern: + 1. Receives the AI analysis result + 2. Calls ctx.request_info() to pause and request human input + 3. The @response_handler method processes the human's response + """ + + def __init__(self): + super().__init__(id="human_review_executor") + + @handler + async def request_review( + self, + data: AnalysisWithSubmission, + ctx: WorkflowContext, + ) -> None: + """Request human review for the content. + + This method: + 1. Constructs the approval request with all context + 2. Calls request_info to pause the workflow + 3. The workflow will resume when a response is provided via the HITL endpoint + """ + submission = data.submission + analysis = data.analysis + + # Construct the human-readable prompt + prompt = ( + f"Please review the following content for publication:\n\n" + f"Title: {submission.title}\n" + f"Author: {submission.author}\n" + f"Content: {submission.body}\n\n" + f"AI Analysis:\n" + f"- Appropriate: {analysis.is_appropriate}\n" + f"- Risk Level: {analysis.risk_level}\n" + f"- Concerns: {', '.join(analysis.concerns) if analysis.concerns else 'None'}\n" + f"- Recommendation: {analysis.recommendation}\n\n" + f"Please approve or reject this content." + ) + + approval_request = HumanApprovalRequest( + content_id=submission.content_id, + title=submission.title, + body=submission.body, + author=submission.author, + ai_analysis=analysis, + prompt=prompt, + ) + + # Store analysis in shared state for the response handler + await ctx.set_shared_state("pending_analysis", data) + + # Request human input - workflow will pause here + # The response_type specifies what we expect back + await ctx.request_info( + request_data=approval_request, + response_type=HumanApprovalResponse, + ) + + @response_handler + async def handle_approval_response( + self, + original_request: HumanApprovalRequest, + response: HumanApprovalResponse, + ctx: WorkflowContext[ModerationResult], + ) -> None: + """Process the human reviewer's decision. + + This method is called automatically when a response to request_info is received. + The original_request contains the HumanApprovalRequest we sent. + The response contains the HumanApprovalResponse from the reviewer. + """ + logger.info( + "Human review received for content %s: approved=%s, notes=%s", + original_request.content_id, + response.approved, + response.reviewer_notes, + ) + + # Create the final moderation result + status = "approved" if response.approved else "rejected" + result = ModerationResult( + content_id=original_request.content_id, + status=status, + ai_analysis=original_request.ai_analysis, + reviewer_notes=response.reviewer_notes, + ) + + await ctx.send_message(result) + + +class PublishExecutor(Executor): + """Handles the final publication or rejection of content.""" + + def __init__(self): + super().__init__(id="publish_executor") + + @handler + async def handle_result( + self, + result: ModerationResult, + ctx: WorkflowContext[Never, str], + ) -> None: + """Finalize the moderation and yield output.""" + if result.status == "approved": + message = ( + f"✅ Content '{result.content_id}' has been APPROVED and published.\n" + f"Reviewer notes: {result.reviewer_notes or 'None'}" + ) + else: + message = ( + f"❌ Content '{result.content_id}' has been REJECTED.\n" + f"Reviewer notes: {result.reviewer_notes or 'None'}" + ) + + logger.info(message) + await ctx.yield_output(message) + + +# ============================================================================ +# Input Router Executor +# ============================================================================ + + +def _build_client_kwargs() -> dict[str, Any]: + """Build Azure OpenAI client configuration from environment variables.""" + endpoint = os.getenv(AZURE_OPENAI_ENDPOINT_ENV) + if not endpoint: + raise RuntimeError(f"{AZURE_OPENAI_ENDPOINT_ENV} environment variable is required.") + + deployment = os.getenv(AZURE_OPENAI_DEPLOYMENT_ENV) + if not deployment: + raise RuntimeError(f"{AZURE_OPENAI_DEPLOYMENT_ENV} environment variable is required.") + + client_kwargs: dict[str, Any] = { + "endpoint": endpoint, + "deployment_name": deployment, + } + + api_key = os.getenv(AZURE_OPENAI_API_KEY_ENV) + if api_key: + client_kwargs["api_key"] = api_key + else: + client_kwargs["credential"] = AzureCliCredential() + + return client_kwargs + + +class InputRouterExecutor(Executor): + """Routes incoming content submission to the analysis agent.""" + + def __init__(self): + super().__init__(id="input_router") + + @handler + async def route_input( + self, + input_json: str, + ctx: WorkflowContext[AgentExecutorRequest], + ) -> None: + """Parse input and create agent request.""" + data = json.loads(input_json) if isinstance(input_json, str) else input_json + + submission = ContentSubmission( + content_id=data.get("content_id", "unknown"), + title=data.get("title", "Untitled"), + body=data.get("body", ""), + author=data.get("author", "Anonymous"), + ) + + # Store submission in shared state for later retrieval + await ctx.set_shared_state("current_submission", submission) + + # Create the agent request + message = ( + f"Please analyze the following content for policy compliance:\n\n" + f"Title: {submission.title}\n" + f"Author: {submission.author}\n" + f"Content:\n{submission.body}" + ) + + await ctx.send_message( + AgentExecutorRequest( + messages=[ChatMessage(Role.USER, text=message)], + should_respond=True, + ) + ) + + +# ============================================================================ +# Workflow Creation +# ============================================================================ + + +def _create_workflow() -> Workflow: + """Create the content moderation workflow with HITL.""" + client_kwargs = _build_client_kwargs() + chat_client = AzureOpenAIChatClient(**client_kwargs) + + # Create the content analysis agent + content_analyzer_agent = chat_client.create_agent( + name=CONTENT_ANALYZER_AGENT_NAME, + instructions=CONTENT_ANALYZER_INSTRUCTIONS, + response_format=ContentAnalysisResult, + ) + + # Create executors + input_router = InputRouterExecutor() + content_analyzer_executor = ContentAnalyzerExecutor() + human_review_executor = HumanReviewExecutor() + publish_executor = PublishExecutor() + + # Build the workflow graph + # Flow: + # input_router -> content_analyzer_agent -> content_analyzer_executor + # -> human_review_executor (HITL pause here) -> publish_executor + workflow = ( + WorkflowBuilder() + .set_start_executor(input_router) + .add_edge(input_router, content_analyzer_agent) + .add_edge(content_analyzer_agent, content_analyzer_executor) + .add_edge(content_analyzer_executor, human_review_executor) + .add_edge(human_review_executor, publish_executor) + .build() + ) + + return workflow + + +# ============================================================================ +# Application Entry Point +# ============================================================================ + + +def launch(durable: bool = True) -> AgentFunctionApp | None: + """Launch the function app or DevUI. + + Args: + durable: If True, returns AgentFunctionApp for Azure Functions. + If False, launches DevUI for local MAF development. + """ + if durable: + # Azure Functions mode with Durable Functions + # The app automatically provides HITL endpoints: + # - POST /api/workflow/run - Start the workflow + # - GET /api/workflow/status/{instanceId} - Check status and pending HITL requests + # - POST /api/workflow/respond/{instanceId}/{requestId} - Send HITL response + # - GET /api/health - Health check + workflow = _create_workflow() + app = AgentFunctionApp(workflow=workflow, enable_health_check=True) + return app + else: + # Pure MAF mode with DevUI for local development + from pathlib import Path + + from agent_framework.devui import serve + from dotenv import load_dotenv + + env_path = Path(__file__).parent / ".env" + load_dotenv(dotenv_path=env_path) + + logger.info("Starting Workflow HITL Sample in MAF mode") + logger.info("Available at: http://localhost:8096") + logger.info("\nThis workflow demonstrates:") + logger.info("- Human-in-the-loop using request_info / @response_handler pattern") + logger.info("- AI content analysis with structured output") + logger.info("- Human approval workflow integration") + logger.info("\nFlow: InputRouter -> ContentAnalyzer Agent -> HumanReview -> Publish") + + workflow = _create_workflow() + serve(entities=[workflow], port=8096, auto_open=True) + + return None + + +# Default: Azure Functions mode +# Run with `python function_app.py --maf` for pure MAF mode with DevUI +app = launch(durable=True) + + +if __name__ == "__main__": + import sys + + if "--maf" in sys.argv: + # Run in pure MAF mode with DevUI + launch(durable=False) + else: + print("Usage: python function_app.py --maf") + print(" --maf Run in pure MAF mode with DevUI (http://localhost:8096)") + print("\nFor Azure Functions mode, use: func start") diff --git a/python/samples/getting_started/azure_functions/12_workflow_hitl/host.json b/python/samples/getting_started/azure_functions/12_workflow_hitl/host.json new file mode 100644 index 0000000000..292562af8e --- /dev/null +++ b/python/samples/getting_started/azure_functions/12_workflow_hitl/host.json @@ -0,0 +1,16 @@ +{ + "version": "2.0", + "extensionBundle": { + "id": "Microsoft.Azure.Functions.ExtensionBundle", + "version": "[4.*, 5.0.0)" + }, + "extensions": { + "durableTask": { + "hubName": "%TASKHUB_NAME%", + "storageProvider": { + "type": "AzureManaged", + "connectionStringName": "DURABLE_TASK_SCHEDULER_CONNECTION_STRING" + } + } + } +} diff --git a/python/samples/getting_started/azure_functions/12_workflow_hitl/local.settings.json.sample b/python/samples/getting_started/azure_functions/12_workflow_hitl/local.settings.json.sample new file mode 100644 index 0000000000..69c08a3386 --- /dev/null +++ b/python/samples/getting_started/azure_functions/12_workflow_hitl/local.settings.json.sample @@ -0,0 +1,11 @@ +{ + "IsEncrypted": false, + "Values": { + "AzureWebJobsStorage": "UseDevelopmentStorage=true", + "DURABLE_TASK_SCHEDULER_CONNECTION_STRING": "Endpoint=http://localhost:8080;TaskHub=default;Authentication=None", + "TASKHUB_NAME": "default", + "FUNCTIONS_WORKER_RUNTIME": "python", + "AZURE_OPENAI_ENDPOINT": "", + "AZURE_OPENAI_CHAT_DEPLOYMENT_NAME": "" + } +} diff --git a/python/samples/getting_started/azure_functions/12_workflow_hitl/requirements.txt b/python/samples/getting_started/azure_functions/12_workflow_hitl/requirements.txt new file mode 100644 index 0000000000..85e158b8d4 --- /dev/null +++ b/python/samples/getting_started/azure_functions/12_workflow_hitl/requirements.txt @@ -0,0 +1,3 @@ +agent-framework-azurefunctions +azure-identity +agents-maf