From cdabd32817b0625e4769066b0954bc9dad5df8ad Mon Sep 17 00:00:00 2001 From: Evan Mattson Date: Thu, 12 Feb 2026 12:22:10 +0900 Subject: [PATCH 1/3] adopt AzureOpenAIResponsesClient, reorganize orchestration examples, and fix workflow/orchestration bugs --- .../_workflows/_agent_executor.py | 81 ++++-- .../agent_framework/_workflows/_runner.py | 4 + .../openai/_responses_client.py | 10 +- .../openai/test_openai_responses_client.py | 34 +++ .../tests/workflow/test_full_conversation.py | 62 +++++ .../tests/workflow/test_workflow_kwargs.py | 135 ++++++++++ .../_group_chat.py | 58 ++++- .../orchestrations/tests/test_group_chat.py | 68 +++++ .../orchestrations/tests/test_magentic.py | 7 +- .../getting_started/orchestrations/README.md | 70 +++-- .../{ => concurrent}/concurrent_agents.py | 14 +- .../concurrent_builder_tool_approval.py | 11 +- .../concurrent_custom_agent_executors.py | 20 +- .../concurrent_custom_aggregator.py | 18 +- .../concurrent}/concurrent_request_info.py | 14 +- .../concurrent_workflow_as_agent.py | 14 +- .../group_chat_agent_manager.py | 59 +++-- .../group_chat_builder_tool_approval.py | 11 +- .../group_chat_philosophical_debate.py | 61 +++-- .../group-chat}/group_chat_request_info.py | 12 +- .../group_chat_simple_selector.py | 12 +- .../group_chat_workflow_as_agent.py | 25 +- .../{ => handoff}/handoff_autonomous.py | 14 +- .../{ => handoff}/handoff_simple.py | 16 +- .../handoff_with_code_interpreter_file.py | 186 ++++++++++++++ ...ff_with_tool_approval_checkpoint_resume.py | 14 +- .../handoff}/handoff_workflow_as_agent.py | 16 +- .../handoff_with_code_interpreter_file.py | 241 ------------------ .../orchestrations/{ => magentic}/magentic.py | 25 +- .../{ => magentic}/magentic_checkpoint.py | 26 +- .../magentic_human_plan_review.py | 25 +- .../magentic}/magentic_workflow_as_agent.py | 25 +- .../{ => sequential}/sequential_agents.py | 12 +- .../sequential_builder_tool_approval.py | 11 +- .../sequential_custom_executors.py | 12 +- .../sequential}/sequential_request_info.py | 12 +- .../sequential_workflow_as_agent.py | 12 +- .../getting_started/workflows/README.md | 23 +- .../_start-here/step2_agents_in_a_workflow.py | 14 +- .../workflows/_start-here/step3_streaming.py | 14 +- .../agents/azure_ai_agents_streaming.py | 87 ++++--- .../azure_ai_agents_with_shared_thread.py | 96 +++---- .../agents/azure_chat_agents_and_executor.py | 18 +- .../agents/azure_chat_agents_streaming.py | 18 +- ...re_chat_agents_tool_calls_with_feedback.py | 18 +- .../agents/custom_agent_executors.py | 22 +- .../workflow_as_agent_human_in_the_loop.py | 12 +- .../agents/workflow_as_agent_kwargs.py | 11 +- .../workflow_as_agent_reflection_pattern.py | 25 +- .../agents/workflow_as_agent_with_thread.py | 19 +- .../checkpoint_with_human_in_the_loop.py | 12 +- .../workflow_as_agent_checkpoint.py | 25 +- .../composition/sub_workflow_basics.py | 38 +-- .../composition/sub_workflow_kwargs.py | 11 +- .../sub_workflow_parallel_requests.py | 6 +- .../sub_workflow_request_interception.py | 32 +-- .../workflows/control-flow/edge_condition.py | 19 +- .../multi_selection_edge_group.py | 28 +- .../workflows/control-flow/simple_loop.py | 21 +- .../control-flow/switch_case_edge_group.py | 17 +- .../declarative/customer_support/main.py | 13 +- .../declarative/deep_research/main.py | 9 +- .../declarative/function_tools/main.py | 9 +- .../workflows/declarative/marketing/main.py | 9 +- .../declarative/student_teacher/main.py | 13 +- .../human-in-the-loop/agents_with_HITL.py | 18 +- .../agents_with_approval_requests.py | 26 +- .../agents_with_declaration_only_tools.py | 10 +- .../guessing_game_with_human_input.py | 12 +- .../parallelism/fan_out_fan_in_edges.py | 69 ++++- .../state-management/state_with_agents.py | 18 +- .../state-management/workflow_kwargs.py | 11 +- .../concurrent_with_visualization.py | 24 +- 73 files changed, 1595 insertions(+), 679 deletions(-) rename python/samples/getting_started/orchestrations/{ => concurrent}/concurrent_agents.py (89%) rename python/samples/getting_started/{workflows/tool-approval => orchestrations/concurrent}/concurrent_builder_tool_approval.py (95%) rename python/samples/getting_started/orchestrations/{ => concurrent}/concurrent_custom_agent_executors.py (89%) rename python/samples/getting_started/orchestrations/{ => concurrent}/concurrent_custom_aggregator.py (88%) rename python/samples/getting_started/{workflows/human-in-the-loop => orchestrations/concurrent}/concurrent_request_info.py (93%) rename python/samples/getting_started/{workflows/agents => orchestrations/concurrent}/concurrent_workflow_as_agent.py (83%) rename python/samples/getting_started/orchestrations/{ => group-chat}/group_chat_agent_manager.py (67%) rename python/samples/getting_started/{workflows/tool-approval => orchestrations/group-chat}/group_chat_builder_tool_approval.py (95%) rename python/samples/getting_started/orchestrations/{ => group-chat}/group_chat_philosophical_debate.py (90%) rename python/samples/getting_started/{workflows/human-in-the-loop => orchestrations/group-chat}/group_chat_request_info.py (93%) rename python/samples/getting_started/orchestrations/{ => group-chat}/group_chat_simple_selector.py (92%) rename python/samples/getting_started/{workflows/agents => orchestrations/group-chat}/group_chat_workflow_as_agent.py (68%) rename python/samples/getting_started/orchestrations/{ => handoff}/handoff_autonomous.py (92%) rename python/samples/getting_started/orchestrations/{ => handoff}/handoff_simple.py (95%) create mode 100644 python/samples/getting_started/orchestrations/handoff/handoff_with_code_interpreter_file.py rename python/samples/getting_started/orchestrations/{ => handoff}/handoff_with_tool_approval_checkpoint_resume.py (94%) rename python/samples/getting_started/{workflows/agents => orchestrations/handoff}/handoff_workflow_as_agent.py (93%) delete mode 100644 python/samples/getting_started/orchestrations/handoff_with_code_interpreter_file.py rename python/samples/getting_started/orchestrations/{ => magentic}/magentic.py (85%) rename python/samples/getting_started/orchestrations/{ => magentic}/magentic_checkpoint.py (92%) rename python/samples/getting_started/orchestrations/{ => magentic}/magentic_human_plan_review.py (84%) rename python/samples/getting_started/{workflows/agents => orchestrations/magentic}/magentic_workflow_as_agent.py (78%) rename python/samples/getting_started/orchestrations/{ => sequential}/sequential_agents.py (85%) rename python/samples/getting_started/{workflows/tool-approval => orchestrations/sequential}/sequential_builder_tool_approval.py (93%) rename python/samples/getting_started/orchestrations/{ => sequential}/sequential_custom_executors.py (89%) rename python/samples/getting_started/{workflows/human-in-the-loop => orchestrations/sequential}/sequential_request_info.py (91%) rename python/samples/getting_started/{workflows/agents => orchestrations/sequential}/sequential_workflow_as_agent.py (87%) diff --git a/python/packages/core/agent_framework/_workflows/_agent_executor.py b/python/packages/core/agent_framework/_workflows/_agent_executor.py index 8290391fb9..e96e802e64 100644 --- a/python/packages/core/agent_framework/_workflows/_agent_executor.py +++ b/python/packages/core/agent_framework/_workflows/_agent_executor.py @@ -2,6 +2,7 @@ import logging import sys +from collections.abc import Mapping from dataclasses import dataclass from typing import Any, cast @@ -292,10 +293,10 @@ async def _run_agent_and_emit( # Non-streaming mode: use run() and emit single event response = await self._run_agent(cast(WorkflowContext[Never, AgentResponse], ctx)) - # Always extend full conversation with cached messages plus agent outputs - # (agent_response.messages) after each run. This is to avoid losing context - # when agent did not complete and the cache is cleared when responses come back. - self._full_conversation.extend(list(self._cache) + (list(response.messages) if response else [])) + # Snapshot current conversation as cache + latest agent outputs. + # Do not append to prior snapshots: callers may provide full-history messages + # in request.messages, and extending would duplicate prior turns. + self._full_conversation = list(self._cache) + (list(response.messages) if response else []) if response is None: # Agent did not complete (e.g., waiting for user input); do not emit response @@ -315,12 +316,7 @@ async def _run_agent(self, ctx: WorkflowContext[Never, AgentResponse]) -> AgentR Returns: The complete AgentResponse, or None if waiting for user input. """ - run_kwargs: dict[str, Any] = ctx.get_state(WORKFLOW_RUN_KWARGS_KEY, {}) - - # Build options dict with additional_function_arguments for tool kwargs propagation - options: dict[str, Any] | None = None - if run_kwargs: - options = {"additional_function_arguments": run_kwargs} + run_kwargs, options = self._prepare_agent_run_args(ctx.get_state(WORKFLOW_RUN_KWARGS_KEY, {})) response = await self._agent.run( self._cache, @@ -349,12 +345,7 @@ async def _run_agent_streaming(self, ctx: WorkflowContext[Never, AgentResponseUp Returns: The complete AgentResponse, or None if waiting for user input. """ - run_kwargs: dict[str, Any] = ctx.get_state(WORKFLOW_RUN_KWARGS_KEY) or {} - - # Build options dict with additional_function_arguments for tool kwargs propagation - options: dict[str, Any] | None = None - if run_kwargs: - options = {"additional_function_arguments": run_kwargs} + run_kwargs, options = self._prepare_agent_run_args(ctx.get_state(WORKFLOW_RUN_KWARGS_KEY) or {}) updates: list[AgentResponseUpdate] = [] user_input_requests: list[Content] = [] @@ -389,3 +380,61 @@ async def _run_agent_streaming(self, ctx: WorkflowContext[Never, AgentResponseUp return None return response + + @staticmethod + def _prepare_agent_run_args(raw_run_kwargs: dict[str, Any]) -> tuple[dict[str, Any], dict[str, Any] | None]: + """Prepare kwargs and options for agent.run(), avoiding duplicate option passing. + + Workflow-level kwargs are propagated to tool calls through + `options.additional_function_arguments`. If workflow kwargs include an + `options` key, merge it into the final options object and remove it from + kwargs before spreading `**run_kwargs`. + """ + run_kwargs = dict(raw_run_kwargs) + options_from_workflow = run_kwargs.pop("options", None) + workflow_additional_args = run_kwargs.pop("additional_function_arguments", None) + + options: dict[str, Any] = {} + if options_from_workflow is not None: + if isinstance(options_from_workflow, Mapping): + for key, value in cast(Mapping[Any, Any], options_from_workflow).items(): + if isinstance(key, str): + options[key] = value + else: + logger.warning( + "Ignoring non-mapping workflow 'options' kwarg of type %s for AgentExecutor %s.", + type(options_from_workflow).__name__, + AgentExecutor.__name__, + ) + + existing_additional_args = options.get("additional_function_arguments") + if isinstance(existing_additional_args, Mapping): + additional_args = { + key: value + for key, value in cast(Mapping[Any, Any], existing_additional_args).items() + if isinstance(key, str) + } + else: + additional_args = {} + + if workflow_additional_args is not None: + if isinstance(workflow_additional_args, Mapping): + additional_args.update({ + key: value + for key, value in cast(Mapping[Any, Any], workflow_additional_args).items() + if isinstance(key, str) + }) + else: + logger.warning( + "Ignoring non-mapping workflow 'additional_function_arguments' kwarg of type %s for AgentExecutor %s.", # noqa: E501 + type(workflow_additional_args).__name__, + AgentExecutor.__name__, + ) + + if run_kwargs: + additional_args.update(run_kwargs) + + if additional_args: + options["additional_function_arguments"] = additional_args + + return run_kwargs, options or None diff --git a/python/packages/core/agent_framework/_workflows/_runner.py b/python/packages/core/agent_framework/_workflows/_runner.py index 88281597a2..bad37148b7 100644 --- a/python/packages/core/agent_framework/_workflows/_runner.py +++ b/python/packages/core/agent_framework/_workflows/_runner.py @@ -190,6 +190,10 @@ async def _create_checkpoint_if_enabled(self, previous_checkpoint_id: Checkpoint # Save executor states into the shared state before creating the checkpoint, # so that they are included in the checkpoint payload. await self._save_executor_states() + # `on_checkpoint_save()` writes via State.set(), which stages values in the + # pending buffer. Checkpoints serialize committed state only, so commit here + # to ensure executor snapshots are captured in this checkpoint. + self._state.commit() checkpoint_id = await self._ctx.create_checkpoint( self._workflow_name, diff --git a/python/packages/core/agent_framework/openai/_responses_client.py b/python/packages/core/agent_framework/openai/_responses_client.py index f239221c49..fc127ded25 100644 --- a/python/packages/core/agent_framework/openai/_responses_client.py +++ b/python/packages/core/agent_framework/openai/_responses_client.py @@ -941,8 +941,16 @@ def _prepare_content_for_openai( """Prepare content for the OpenAI Responses API format.""" match content.type: case "text": + if role == "assistant": + # Assistant history is represented as output text items; Azure validation + # requires `annotations` to be present for this type. + return { + "type": "output_text", + "text": content.text, + "annotations": [], + } return { - "type": "output_text" if role == "assistant" else "input_text", + "type": "input_text", "text": content.text, } case "text_reasoning": diff --git a/python/packages/core/tests/openai/test_openai_responses_client.py b/python/packages/core/tests/openai/test_openai_responses_client.py index e3f982d826..a83c4a398b 100644 --- a/python/packages/core/tests/openai/test_openai_responses_client.py +++ b/python/packages/core/tests/openai/test_openai_responses_client.py @@ -677,6 +677,40 @@ def test_prepare_content_for_openai_hosted_vector_store_content() -> None: assert result == {} +def test_prepare_content_for_openai_text_uses_role_specific_type() -> None: + """Text content should use input_text for user and output_text for assistant.""" + client = OpenAIResponsesClient(model_id="test-model", api_key="test-key") + + text_content = Content.from_text(text="hello") + + user_result = client._prepare_content_for_openai("user", text_content, {}) + assistant_result = client._prepare_content_for_openai("assistant", text_content, {}) + + assert user_result["type"] == "input_text" + assert assistant_result["type"] == "output_text" + assert assistant_result["annotations"] == [] + assert user_result["text"] == "hello" + assert assistant_result["text"] == "hello" + + +def test_prepare_messages_for_openai_assistant_history_uses_output_text_with_annotations() -> None: + """Assistant history should be output_text and include required annotations.""" + client = OpenAIResponsesClient(model_id="test-model", api_key="test-key") + + messages = [ + Message(role="user", text="What is async/await?"), + Message(role="assistant", text="Async/await enables non-blocking concurrency."), + ] + + prepared = client._prepare_messages_for_openai(messages) + + assert prepared[0]["role"] == "user" + assert prepared[0]["content"][0]["type"] == "input_text" + assert prepared[1]["role"] == "assistant" + assert prepared[1]["content"][0]["type"] == "output_text" + assert prepared[1]["content"][0]["annotations"] == [] + + def test_parse_response_from_openai_with_mcp_server_tool_result() -> None: """Test _parse_response_from_openai with MCP server tool result.""" client = OpenAIResponsesClient(model_id="test-model", api_key="test-key") diff --git a/python/packages/core/tests/workflow/test_full_conversation.py b/python/packages/core/tests/workflow/test_full_conversation.py index 3eb47803fc..41003f6544 100644 --- a/python/packages/core/tests/workflow/test_full_conversation.py +++ b/python/packages/core/tests/workflow/test_full_conversation.py @@ -8,6 +8,7 @@ from agent_framework import ( AgentExecutor, + AgentExecutorRequest, AgentExecutorResponse, AgentResponse, AgentResponseUpdate, @@ -150,3 +151,64 @@ async def test_sequential_adapter_uses_full_conversation() -> None: assert len(seen) == 2 assert seen[0].role == "user" and "hello seq" in (seen[0].text or "") assert seen[1].role == "assistant" and "A1 reply" in (seen[1].text or "") + + +class _RoundTripCoordinator(Executor): + """Loops once back to the same agent with full conversation + feedback.""" + + def __init__(self, *, target_agent_id: str, id: str = "round_trip_coordinator") -> None: + super().__init__(id=id) + self._target_agent_id = target_agent_id + self._seen = 0 + + @handler + async def handle_response( + self, + response: AgentExecutorResponse, + ctx: WorkflowContext[Never, dict[str, Any]], + ) -> None: + self._seen += 1 + if self._seen == 1: + assert response.full_conversation is not None + await ctx.send_message( + AgentExecutorRequest( + messages=list(response.full_conversation) + [Message(role="user", text="apply feedback")], + should_respond=True, + ), + target_id=self._target_agent_id, + ) + return + + assert response.full_conversation is not None + await ctx.yield_output({ + "roles": [m.role for m in response.full_conversation], + "texts": [m.text for m in response.full_conversation], + }) + + +async def test_agent_executor_full_conversation_round_trip_does_not_duplicate_history() -> None: + """When full history is replayed, AgentExecutor should not duplicate prior turns.""" + agent = _SimpleAgent(id="writer_agent", name="Writer", reply_text="draft reply") + agent_exec = AgentExecutor(agent, id="writer_agent") + coordinator = _RoundTripCoordinator(target_agent_id="writer_agent") + + wf = ( + WorkflowBuilder(start_executor=agent_exec, output_executors=[coordinator]) + .add_edge(agent_exec, coordinator) + .add_edge(coordinator, agent_exec) + .build() + ) + + result = await wf.run("initial prompt") + outputs = result.get_outputs() + assert len(outputs) == 1 + payload = outputs[0] + assert isinstance(payload, dict) + + # Expected conversation after one loop: + # user(initial), assistant(first reply), user(feedback), assistant(second reply) + assert payload["roles"] == ["user", "assistant", "user", "assistant"] + assert payload["texts"][0] == "initial prompt" + assert payload["texts"][1] == "draft reply" + assert payload["texts"][2] == "apply feedback" + assert payload["texts"][3] == "draft reply" diff --git a/python/packages/core/tests/workflow/test_workflow_kwargs.py b/python/packages/core/tests/workflow/test_workflow_kwargs.py index 83e13975db..9c21652281 100644 --- a/python/packages/core/tests/workflow/test_workflow_kwargs.py +++ b/python/packages/core/tests/workflow/test_workflow_kwargs.py @@ -72,6 +72,41 @@ async def _run() -> AgentResponse: return _run() +class _OptionsAwareAgent(BaseAgent): + """Test agent that captures explicit `options` and kwargs passed to run().""" + + captured_options: list[dict[str, Any] | None] + captured_kwargs: list[dict[str, Any]] + + def __init__(self, name: str = "options_agent") -> None: + super().__init__(name=name, description="Test agent for options capture") + self.captured_options = [] + self.captured_kwargs = [] + + def run( + self, + messages: str | Message | Sequence[str | Message] | None = None, + *, + stream: bool = False, + thread: AgentThread | None = None, + options: dict[str, Any] | None = None, + **kwargs: Any, + ) -> Awaitable[AgentResponse] | ResponseStream[AgentResponseUpdate, AgentResponse]: + self.captured_options.append(dict(options) if options is not None else None) + self.captured_kwargs.append(dict(kwargs)) + if stream: + + async def _stream() -> AsyncIterable[AgentResponseUpdate]: + yield AgentResponseUpdate(contents=[Content.from_text(text=f"{self.name} response")]) + + return ResponseStream(_stream(), finalizer=AgentResponse.from_updates) + + async def _run() -> AgentResponse: + return AgentResponse(messages=[Message("assistant", [f"{self.name} response"])]) + + return _run() + + # region Sequential Builder Tests @@ -131,6 +166,106 @@ async def test_sequential_run_kwargs_flow() -> None: assert agent.captured_kwargs[0].get("custom_data") == {"test": True} +async def test_sequential_run_options_does_not_conflict_with_agent_options() -> None: + """Test workflow.run(options=...) does not conflict with Agent.run(options=...).""" + agent = _OptionsAwareAgent(name="options_agent") + workflow = SequentialBuilder(participants=[agent]).build() + + custom_data = {"session_id": "abc123"} + user_token = {"user_name": "alice"} + provided_options = { + "store": False, + "additional_function_arguments": {"source": "workflow-options"}, + } + + async for event in workflow.run( + "test message", + stream=True, + options=provided_options, + custom_data=custom_data, + user_token=user_token, + ): + if event.type == "status" and event.state == WorkflowRunState.IDLE: + break + + assert len(agent.captured_options) >= 1 + captured_options = agent.captured_options[0] + assert captured_options is not None + assert captured_options.get("store") is False + + additional_args = captured_options.get("additional_function_arguments") + assert isinstance(additional_args, dict) + assert additional_args.get("source") == "workflow-options" + assert additional_args.get("custom_data") == custom_data + assert additional_args.get("user_token") == user_token + + # "options" should be passed once via the dedicated options parameter, + # not duplicated in **kwargs. + assert len(agent.captured_kwargs) >= 1 + captured_kwargs = agent.captured_kwargs[0] + assert "options" not in captured_kwargs + assert captured_kwargs.get("custom_data") == custom_data + assert captured_kwargs.get("user_token") == user_token + + +async def test_sequential_run_additional_function_arguments_flattened() -> None: + """Test workflow.run(additional_function_arguments=...) maps directly to tool kwargs.""" + agent = _OptionsAwareAgent(name="options_agent") + workflow = SequentialBuilder(participants=[agent]).build() + + custom_data = {"session_id": "abc123"} + user_token = {"user_name": "alice"} + + async for event in workflow.run( + "test message", + stream=True, + additional_function_arguments={"custom_data": custom_data, "user_token": user_token}, + ): + if event.type == "status" and event.state == WorkflowRunState.IDLE: + break + + assert len(agent.captured_options) >= 1 + captured_options = agent.captured_options[0] + assert captured_options is not None + + additional_args = captured_options.get("additional_function_arguments") + assert isinstance(additional_args, dict) + assert additional_args.get("custom_data") == custom_data + assert additional_args.get("user_token") == user_token + assert "additional_function_arguments" not in additional_args + + assert len(agent.captured_kwargs) >= 1 + captured_kwargs = agent.captured_kwargs[0] + assert "additional_function_arguments" not in captured_kwargs + + +async def test_sequential_run_additional_function_arguments_merges_with_options() -> None: + """Test workflow additional_function_arguments merges with workflow options.""" + agent = _OptionsAwareAgent(name="options_agent") + workflow = SequentialBuilder(participants=[agent]).build() + + async for event in workflow.run( + "test message", + stream=True, + options={"additional_function_arguments": {"source": "workflow-options"}}, + additional_function_arguments={"custom_data": {"session_id": "abc123"}}, + user_token={"user_name": "alice"}, + ): + if event.type == "status" and event.state == WorkflowRunState.IDLE: + break + + assert len(agent.captured_options) >= 1 + captured_options = agent.captured_options[0] + assert captured_options is not None + + additional_args = captured_options.get("additional_function_arguments") + assert isinstance(additional_args, dict) + assert additional_args.get("source") == "workflow-options" + assert additional_args.get("custom_data") == {"session_id": "abc123"} + assert additional_args.get("user_token") == {"user_name": "alice"} + assert "additional_function_arguments" not in additional_args + + # endregion diff --git a/python/packages/orchestrations/agent_framework_orchestrations/_group_chat.py b/python/packages/orchestrations/agent_framework_orchestrations/_group_chat.py index d7ac1576c7..384bbea687 100644 --- a/python/packages/orchestrations/agent_framework_orchestrations/_group_chat.py +++ b/python/packages/orchestrations/agent_framework_orchestrations/_group_chat.py @@ -21,6 +21,7 @@ from __future__ import annotations import inspect +import json import logging import sys from collections import OrderedDict @@ -393,6 +394,61 @@ async def _handle_response( ) self._increment_round() + @staticmethod + def _parse_last_json_object(text: str) -> AgentOrchestrationOutput | None: + """Parse one or more concatenated JSON values and return the last object.""" + decoder = json.JSONDecoder() + index = 0 + parsed: Any | None = None + + while index < len(text): + while index < len(text) and text[index].isspace(): + index += 1 + if index >= len(text): + break + parsed, index = decoder.raw_decode(text, index) + + if parsed is None: + return None + return AgentOrchestrationOutput.model_validate(parsed) + + @classmethod + def _parse_agent_output(cls, agent_response: Any) -> AgentOrchestrationOutput: + """Parse manager output, handling both structured values and concatenated JSON text.""" + try: + structured_value = agent_response.value + except Exception: + structured_value = None + + if structured_value is not None: + return AgentOrchestrationOutput.model_validate(structured_value) + + text_candidates: list[str] = [] + for message in reversed(agent_response.messages): + if message.role == "assistant" and message.text.strip(): + text_candidates.append(message.text.strip()) + break + + response_text = agent_response.text.strip() + if response_text and response_text not in text_candidates: + text_candidates.append(response_text) + + last_error: Exception | None = None + for candidate in text_candidates: + try: + return AgentOrchestrationOutput.model_validate_json(candidate) + except Exception as ex: + last_error = ex + + try: + parsed = cls._parse_last_json_object(candidate) + if parsed is not None: + return parsed + except Exception as ex: + last_error = ex + + raise ValueError("Failed to parse agent orchestration output.") from last_error + async def _invoke_agent(self) -> AgentOrchestrationOutput: """Invoke the orchestrator agent to determine the next speaker and termination.""" @@ -404,7 +460,7 @@ async def _invoke_agent_helper(conversation: list[Message]) -> AgentOrchestratio options={"response_format": AgentOrchestrationOutput}, ) # Parse and validate the structured output - agent_orchestration_output = AgentOrchestrationOutput.model_validate_json(agent_response.text) + agent_orchestration_output = self._parse_agent_output(agent_response) if not agent_orchestration_output.terminate and not agent_orchestration_output.next_speaker: raise ValueError("next_speaker must be provided if not terminating the conversation.") diff --git a/python/packages/orchestrations/tests/test_group_chat.py b/python/packages/orchestrations/tests/test_group_chat.py index 6544b681a0..79bc62d6c5 100644 --- a/python/packages/orchestrations/tests/test_group_chat.py +++ b/python/packages/orchestrations/tests/test_group_chat.py @@ -121,6 +121,51 @@ async def run( ) +class ConcatenatedJsonManagerAgent(Agent): + """Manager agent that emits concatenated JSON in a single assistant message.""" + + def __init__(self) -> None: + super().__init__(client=MockChatClient(), name="concat_manager", description="Concatenated JSON manager") + self._call_count = 0 + + async def run( + self, + messages: str | Message | Sequence[str | Message] | None = None, + *, + thread: AgentThread | None = None, + **kwargs: Any, + ) -> AgentResponse: + if self._call_count == 0: + self._call_count += 1 + return AgentResponse( + messages=[ + Message( + role="assistant", + text=( + '{"terminate": false, "reason": "invalid candidate", ' + '"next_speaker": "unknown", "final_message": null} ' + '{"terminate": false, "reason": "pick known participant", ' + '"next_speaker": "agent", "final_message": null}' + ), + author_name=self.name, + ) + ] + ) + + return AgentResponse( + messages=[ + Message( + role="assistant", + text=( + '{"terminate": true, "reason": "Task complete", ' + '"next_speaker": null, "final_message": "concatenated manager final"}' + ), + author_name=self.name, + ) + ] + ) + + def make_sequence_selector() -> Callable[[GroupChatState], str]: state_counter = {"value": 0} @@ -221,6 +266,29 @@ async def test_group_chat_as_agent_accepts_conversation() -> None: assert response.messages, "Expected agent conversation output" +async def test_agent_manager_handles_concatenated_json_output() -> None: + manager = ConcatenatedJsonManagerAgent() + worker = StubAgent("agent", "worker response") + + workflow = GroupChatBuilder( + participants=[worker], + orchestrator_agent=manager, + ).build() + + outputs: list[list[Message]] = [] + async for event in workflow.run("coordinate task", stream=True): + if event.type == "output": + data = event.data + if isinstance(data, list): + outputs.append(cast(list[Message], data)) + + assert outputs + conversation = outputs[-1] + assert any(msg.author_name == "agent" and msg.text == "worker response" for msg in conversation) + assert conversation[-1].author_name == manager.name + assert conversation[-1].text == "concatenated manager final" + + # Comprehensive tests for group chat functionality diff --git a/python/packages/orchestrations/tests/test_magentic.py b/python/packages/orchestrations/tests/test_magentic.py index 17b6957205..c887833545 100644 --- a/python/packages/orchestrations/tests/test_magentic.py +++ b/python/packages/orchestrations/tests/test_magentic.py @@ -366,6 +366,11 @@ async def test_magentic_checkpoint_resume_round_trip(): assert checkpoints checkpoints.sort(key=lambda cp: cp.timestamp) resume_checkpoint = checkpoints[-1] + loaded_checkpoint = await storage.load(resume_checkpoint.checkpoint_id) + assert loaded_checkpoint is not None + # Regression check: checkpoints with pending request_info must include executor state. + assert "_executor_state" in loaded_checkpoint.state + assert "magentic_orchestrator" in loaded_checkpoint.state["_executor_state"] manager2 = FakeManager() wf_resume = MagenticBuilder( @@ -378,7 +383,7 @@ async def test_magentic_checkpoint_resume_round_trip(): completed: WorkflowEvent | None = None req_event = None async for event in wf_resume.run( - resume_checkpoint.checkpoint_id, + checkpoint_id=resume_checkpoint.checkpoint_id, stream=True, ): if event.type == "request_info" and event.request_type is MagenticPlanReviewRequest: diff --git a/python/samples/getting_started/orchestrations/README.md b/python/samples/getting_started/orchestrations/README.md index 9b603eda34..a346e660ca 100644 --- a/python/samples/getting_started/orchestrations/README.md +++ b/python/samples/getting_started/orchestrations/README.md @@ -26,24 +26,58 @@ from agent_framework.orchestrations import ( ) ``` -## Samples Overview - -| Sample | File | Concepts | -| ------------------------------------------------- | ------------------------------------------------------------------------------------ | ---------------------------------------------------------------------------------------------------------------- | -| Concurrent Orchestration (Default Aggregator) | [concurrent_agents.py](./concurrent_agents.py) | Fan-out to multiple agents; fan-in with default aggregator returning combined Messages | -| Concurrent Orchestration (Custom Aggregator) | [concurrent_custom_aggregator.py](./concurrent_custom_aggregator.py) | Override aggregator via callback; summarize results with an LLM | -| Concurrent Orchestration (Custom Agent Executors) | [concurrent_custom_agent_executors.py](./concurrent_custom_agent_executors.py) | Child executors own Agents; concurrent fan-out/fan-in via ConcurrentBuilder | -| Group Chat with Agent Manager | [group_chat_agent_manager.py](./group_chat_agent_manager.py) | Agent-based manager using `with_orchestrator(agent=)` to select next speaker | -| Group Chat Philosophical Debate | [group_chat_philosophical_debate.py](./group_chat_philosophical_debate.py) | Agent manager moderates long-form, multi-round debate across diverse participants | -| Group Chat with Simple Function Selector | [group_chat_simple_selector.py](./group_chat_simple_selector.py) | Group chat with a simple function selector for next speaker | -| Handoff (Simple) | [handoff_simple.py](./handoff_simple.py) | Single-tier routing: triage agent routes to specialists, control returns to user after each specialist response | -| Handoff (Autonomous) | [handoff_autonomous.py](./handoff_autonomous.py) | Autonomous mode: specialists iterate independently until invoking a handoff tool using `.with_autonomous_mode()` | -| Handoff with Code Interpreter | [handoff_with_code_interpreter_file.py](./handoff_with_code_interpreter_file.py) | Retrieve file IDs from code interpreter output in handoff workflow | -| Magentic Workflow (Multi-Agent) | [magentic.py](./magentic.py) | Orchestrate multiple agents with Magentic manager and streaming | -| Magentic + Human Plan Review | [magentic_human_plan_review.py](./magentic_human_plan_review.py) | Human reviews/updates the plan before execution | -| Magentic + Checkpoint Resume | [magentic_checkpoint.py](./magentic_checkpoint.py) | Resume Magentic orchestration from saved checkpoints | -| Sequential Orchestration (Agents) | [sequential_agents.py](./sequential_agents.py) | Chain agents sequentially with shared conversation context | -| Sequential Orchestration (Custom Executor) | [sequential_custom_executors.py](./sequential_custom_executors.py) | Mix agents with a summarizer that appends a compact summary | +## Samples Overview (by directory) + +### concurrent + +| Sample | File | Concepts | +| ------------------------------------------------- | ---------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------- | +| Concurrent Orchestration (Default Aggregator) | [concurrent/concurrent_agents.py](./concurrent/concurrent_agents.py) | Fan-out to multiple agents; fan-in with default aggregator returning combined Messages | +| Concurrent Orchestration (Custom Aggregator) | [concurrent/concurrent_custom_aggregator.py](./concurrent/concurrent_custom_aggregator.py) | Override aggregator via callback; summarize results with an LLM | +| Concurrent Orchestration (Custom Agent Executors) | [concurrent/concurrent_custom_agent_executors.py](./concurrent/concurrent_custom_agent_executors.py) | Child executors own Agents; concurrent fan-out/fan-in via ConcurrentBuilder | +| Concurrent Orchestration as Agent | [concurrent/concurrent_workflow_as_agent.py](./concurrent/concurrent_workflow_as_agent.py) | Build a ConcurrentBuilder workflow and expose it as an agent via `workflow.as_agent(...)` | +| Tool Approval with ConcurrentBuilder | [concurrent/concurrent_builder_tool_approval.py](./concurrent/concurrent_builder_tool_approval.py) | Require human approval for sensitive tools across concurrent participants | +| ConcurrentBuilder Request Info | [concurrent/concurrent_request_info.py](./concurrent/concurrent_request_info.py) | Review concurrent agent outputs before aggregation using `.with_request_info()` | + +### sequential + +| Sample | File | Concepts | +| ------------------------------------------ | ---------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------- | +| Sequential Orchestration (Agents) | [sequential/sequential_agents.py](./sequential/sequential_agents.py) | Chain agents sequentially with shared conversation context | +| Sequential Orchestration (Custom Executor) | [sequential/sequential_custom_executors.py](./sequential/sequential_custom_executors.py) | Mix agents with a summarizer that appends a compact summary | +| Sequential Orchestration as Agent | [sequential/sequential_workflow_as_agent.py](./sequential/sequential_workflow_as_agent.py) | Build a SequentialBuilder workflow and expose it as an agent via `workflow.as_agent(...)` | +| Tool Approval with SequentialBuilder | [sequential/sequential_builder_tool_approval.py](./sequential/sequential_builder_tool_approval.py) | Require human approval for sensitive tools in SequentialBuilder workflows | +| SequentialBuilder Request Info | [sequential/sequential_request_info.py](./sequential/sequential_request_info.py) | Request info for agent responses mid-orchestration using `.with_request_info()` | + +### group-chat + +| Sample | File | Concepts | +| ------------------------------------ | ------------------------------------------------------------------------------------------------------------ | ----------------------------------------------------------------------------------------------------- | +| Group Chat with Agent Manager | [group-chat/group_chat_agent_manager.py](./group-chat/group_chat_agent_manager.py) | Agent-based manager using `with_orchestrator(agent=)` to select next speaker | +| Group Chat Philosophical Debate | [group-chat/group_chat_philosophical_debate.py](./group-chat/group_chat_philosophical_debate.py) | Agent manager moderates long-form, multi-round debate across diverse participants | +| Group Chat with Simple Selector | [group-chat/group_chat_simple_selector.py](./group-chat/group_chat_simple_selector.py) | Group chat with a simple function selector for next speaker | +| Group Chat Orchestration as Agent | [group-chat/group_chat_workflow_as_agent.py](./group-chat/group_chat_workflow_as_agent.py) | Build a GroupChatBuilder workflow and wrap it as an agent for composition | +| Tool Approval with GroupChatBuilder | [group-chat/group_chat_builder_tool_approval.py](./group-chat/group_chat_builder_tool_approval.py) | Require human approval for sensitive tools in group chat orchestration | +| GroupChatBuilder Request Info | [group-chat/group_chat_request_info.py](./group-chat/group_chat_request_info.py) | Steer group discussions with periodic guidance using `.with_request_info()` | + +### handoff + +| Sample | File | Concepts | +| ---------------------------------------- | ---------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------- | +| Handoff (Simple) | [handoff/handoff_simple.py](./handoff/handoff_simple.py) | Single-tier routing: triage agent routes to specialists, control returns to user after each specialist response | +| Handoff (Autonomous) | [handoff/handoff_autonomous.py](./handoff/handoff_autonomous.py) | Autonomous mode: specialists iterate independently until invoking a handoff tool using `.with_autonomous_mode()` | +| Handoff with Code Interpreter | [handoff/handoff_with_code_interpreter_file.py](./handoff/handoff_with_code_interpreter_file.py) | Retrieve file IDs from code interpreter output in handoff workflow | +| Handoff with Tool Approval + Checkpoint | [handoff/handoff_with_tool_approval_checkpoint_resume.py](./handoff/handoff_with_tool_approval_checkpoint_resume.py) | Capture tool-approval decisions in checkpoints and resume from persisted state | +| Handoff Orchestration as Agent | [handoff/handoff_workflow_as_agent.py](./handoff/handoff_workflow_as_agent.py) | Build a HandoffBuilder workflow and expose it as an agent, including HITL request/response flow | + +### magentic + +| Sample | File | Concepts | +| ---------------------------- | ------------------------------------------------------------------------------------------ | --------------------------------------------------------------------- | +| Magentic Workflow | [magentic/magentic.py](./magentic/magentic.py) | Orchestrate multiple agents with a Magentic manager and streaming | +| Magentic + Human Plan Review | [magentic/magentic_human_plan_review.py](./magentic/magentic_human_plan_review.py) | Human reviews or updates the plan before execution | +| Magentic + Checkpoint Resume | [magentic/magentic_checkpoint.py](./magentic/magentic_checkpoint.py) | Resume Magentic orchestration from saved checkpoints | +| Magentic Orchestration as Agent | [magentic/magentic_workflow_as_agent.py](./magentic/magentic_workflow_as_agent.py) | Build a MagenticBuilder workflow and reuse it as an agent | ## Tips diff --git a/python/samples/getting_started/orchestrations/concurrent_agents.py b/python/samples/getting_started/orchestrations/concurrent/concurrent_agents.py similarity index 89% rename from python/samples/getting_started/orchestrations/concurrent_agents.py rename to python/samples/getting_started/orchestrations/concurrent/concurrent_agents.py index 2d216a131b..8a2cccd7d2 100644 --- a/python/samples/getting_started/orchestrations/concurrent_agents.py +++ b/python/samples/getting_started/orchestrations/concurrent/concurrent_agents.py @@ -1,10 +1,11 @@ # Copyright (c) Microsoft. All rights reserved. import asyncio +import os from typing import Any from agent_framework import Message -from agent_framework.azure import AzureOpenAIChatClient +from agent_framework.azure import AzureOpenAIResponsesClient from agent_framework.orchestrations import ConcurrentBuilder from azure.identity import AzureCliCredential @@ -22,14 +23,19 @@ - Workflow completion when idle with no pending work Prerequisites: -- Azure OpenAI access configured for AzureOpenAIChatClient (use az login + env vars) +- AZURE_AI_PROJECT_ENDPOINT must be your Azure AI Foundry Agent Service (V2) project endpoint. +- Azure OpenAI access configured for AzureOpenAIResponsesClient (use az login + env vars) - Familiarity with Workflow events (WorkflowEvent) """ async def main() -> None: - # 1) Create three domain agents using AzureOpenAIChatClient - client = AzureOpenAIChatClient(credential=AzureCliCredential()) + # 1) Create three domain agents using AzureOpenAIResponsesClient + client = AzureOpenAIResponsesClient( + project_endpoint=os.environ["AZURE_AI_PROJECT_ENDPOINT"], + deployment_name=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], + credential=AzureCliCredential(), + ) researcher = client.as_agent( instructions=( diff --git a/python/samples/getting_started/workflows/tool-approval/concurrent_builder_tool_approval.py b/python/samples/getting_started/orchestrations/concurrent/concurrent_builder_tool_approval.py similarity index 95% rename from python/samples/getting_started/workflows/tool-approval/concurrent_builder_tool_approval.py rename to python/samples/getting_started/orchestrations/concurrent/concurrent_builder_tool_approval.py index 34d59b62d7..80e06d9d2b 100644 --- a/python/samples/getting_started/workflows/tool-approval/concurrent_builder_tool_approval.py +++ b/python/samples/getting_started/orchestrations/concurrent/concurrent_builder_tool_approval.py @@ -1,6 +1,7 @@ # Copyright (c) Microsoft. All rights reserved. import asyncio +import os from collections.abc import AsyncIterable from typing import Annotated @@ -10,8 +11,9 @@ WorkflowEvent, tool, ) -from agent_framework.openai import OpenAIChatClient +from agent_framework.azure import AzureOpenAIResponsesClient from agent_framework.orchestrations import ConcurrentBuilder +from azure.identity import AzureCliCredential """ Sample: Concurrent Workflow with Tool Approval Requests @@ -38,6 +40,7 @@ - Understanding that approval pauses only the agent that triggered it, not all agents. Prerequisites: +- AZURE_AI_PROJECT_ENDPOINT must be your Azure AI Foundry Agent Service (V2) project endpoint. - OpenAI or Azure OpenAI configured with the required environment variables. - Basic familiarity with ConcurrentBuilder and streaming workflow events. """ @@ -126,7 +129,11 @@ async def process_event_stream(stream: AsyncIterable[WorkflowEvent]) -> dict[str async def main() -> None: # 3. Create two agents focused on different stocks but with the same tool sets - client = OpenAIChatClient() + client = AzureOpenAIResponsesClient( + project_endpoint=os.environ["AZURE_AI_PROJECT_ENDPOINT"], + deployment_name=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], + credential=AzureCliCredential(), + ) microsoft_agent = client.as_agent( name="MicrosoftAgent", diff --git a/python/samples/getting_started/orchestrations/concurrent_custom_agent_executors.py b/python/samples/getting_started/orchestrations/concurrent/concurrent_custom_agent_executors.py similarity index 89% rename from python/samples/getting_started/orchestrations/concurrent_custom_agent_executors.py rename to python/samples/getting_started/orchestrations/concurrent/concurrent_custom_agent_executors.py index bd3b8b93a5..99c0093831 100644 --- a/python/samples/getting_started/orchestrations/concurrent_custom_agent_executors.py +++ b/python/samples/getting_started/orchestrations/concurrent/concurrent_custom_agent_executors.py @@ -1,6 +1,7 @@ # Copyright (c) Microsoft. All rights reserved. import asyncio +import os from typing import Any from agent_framework import ( @@ -12,7 +13,7 @@ WorkflowContext, handler, ) -from agent_framework.azure import AzureOpenAIChatClient +from agent_framework.azure import AzureOpenAIResponsesClient from agent_framework.orchestrations import ConcurrentBuilder from azure.identity import AzureCliCredential @@ -25,21 +26,22 @@ ConcurrentBuilder API and the default aggregator. Demonstrates: -- Executors that create their Agent in __init__ (via AzureOpenAIChatClient) +- Executors that create their Agent in __init__ (via AzureOpenAIResponsesClient) - A @handler that converts AgentExecutorRequest -> AgentExecutorResponse - ConcurrentBuilder(participants=[...]) to build fan-out/fan-in - Default aggregator returning list[Message] (one user + one assistant per agent) - Workflow completion when all participants become idle Prerequisites: -- Azure OpenAI configured for AzureOpenAIChatClient (az login + required env vars) +- AZURE_AI_PROJECT_ENDPOINT must be your Azure AI Foundry Agent Service (V2) project endpoint. +- Azure OpenAI configured for AzureOpenAIResponsesClient (az login + required env vars) """ class ResearcherExec(Executor): agent: Agent - def __init__(self, client: AzureOpenAIChatClient, id: str = "researcher"): + def __init__(self, client: AzureOpenAIResponsesClient, id: str = "researcher"): self.agent = client.as_agent( instructions=( "You're an expert market and product researcher. Given a prompt, provide concise, factual insights," @@ -59,7 +61,7 @@ async def run(self, request: AgentExecutorRequest, ctx: WorkflowContext[AgentExe class MarketerExec(Executor): agent: Agent - def __init__(self, client: AzureOpenAIChatClient, id: str = "marketer"): + def __init__(self, client: AzureOpenAIResponsesClient, id: str = "marketer"): self.agent = client.as_agent( instructions=( "You're a creative marketing strategist. Craft compelling value propositions and target messaging" @@ -79,7 +81,7 @@ async def run(self, request: AgentExecutorRequest, ctx: WorkflowContext[AgentExe class LegalExec(Executor): agent: Agent - def __init__(self, client: AzureOpenAIChatClient, id: str = "legal"): + def __init__(self, client: AzureOpenAIResponsesClient, id: str = "legal"): self.agent = client.as_agent( instructions=( "You're a cautious legal/compliance reviewer. Highlight constraints, disclaimers, and policy concerns" @@ -97,7 +99,11 @@ async def run(self, request: AgentExecutorRequest, ctx: WorkflowContext[AgentExe async def main() -> None: - client = AzureOpenAIChatClient(credential=AzureCliCredential()) + client = AzureOpenAIResponsesClient( + project_endpoint=os.environ["AZURE_AI_PROJECT_ENDPOINT"], + deployment_name=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], + credential=AzureCliCredential(), + ) researcher = ResearcherExec(client) marketer = MarketerExec(client) diff --git a/python/samples/getting_started/orchestrations/concurrent_custom_aggregator.py b/python/samples/getting_started/orchestrations/concurrent/concurrent_custom_aggregator.py similarity index 88% rename from python/samples/getting_started/orchestrations/concurrent_custom_aggregator.py rename to python/samples/getting_started/orchestrations/concurrent/concurrent_custom_aggregator.py index 17b1496e0b..f7870814dc 100644 --- a/python/samples/getting_started/orchestrations/concurrent_custom_aggregator.py +++ b/python/samples/getting_started/orchestrations/concurrent/concurrent_custom_aggregator.py @@ -1,10 +1,11 @@ # Copyright (c) Microsoft. All rights reserved. import asyncio +import os from typing import Any from agent_framework import Message -from agent_framework.azure import AzureOpenAIChatClient +from agent_framework.azure import AzureOpenAIResponsesClient from agent_framework.orchestrations import ConcurrentBuilder from azure.identity import AzureCliCredential @@ -13,7 +14,7 @@ Build a concurrent workflow with ConcurrentBuilder that fans out one prompt to multiple domain agents and fans in their responses. Override the default -aggregator with a custom async callback that uses AzureOpenAIChatClient.get_response() +aggregator with a custom async callback that uses AzureOpenAIResponsesClient.get_response() to synthesize a concise, consolidated summary from the experts' outputs. The workflow completes when all participants become idle. @@ -24,12 +25,17 @@ - Workflow output yielded with the synthesized summary string Prerequisites: -- Azure OpenAI configured for AzureOpenAIChatClient (az login + required env vars) +- AZURE_AI_PROJECT_ENDPOINT must be your Azure AI Foundry Agent Service (V2) project endpoint. +- Azure OpenAI configured for AzureOpenAIResponsesClient (az login + required env vars) """ async def main() -> None: - client = AzureOpenAIChatClient(credential=AzureCliCredential()) + client = AzureOpenAIResponsesClient( + project_endpoint=os.environ["AZURE_AI_PROJECT_ENDPOINT"], + deployment_name=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], + credential=AzureCliCredential(), + ) researcher = client.as_agent( instructions=( @@ -86,9 +92,7 @@ async def summarize_results(results: list[Any]) -> str: # • Default aggregator -> returns list[Message] (one user + one assistant per agent) # • Custom callback -> return value becomes workflow output (string here) # The callback can be sync or async; it receives list[AgentExecutorResponse]. - workflow = ( - ConcurrentBuilder(participants=[researcher, marketer, legal]).with_aggregator(summarize_results).build() - ) + workflow = ConcurrentBuilder(participants=[researcher, marketer, legal]).with_aggregator(summarize_results).build() events = await workflow.run("We are launching a new budget-friendly electric bike for urban commuters.") outputs = events.get_outputs() diff --git a/python/samples/getting_started/workflows/human-in-the-loop/concurrent_request_info.py b/python/samples/getting_started/orchestrations/concurrent/concurrent_request_info.py similarity index 93% rename from python/samples/getting_started/workflows/human-in-the-loop/concurrent_request_info.py rename to python/samples/getting_started/orchestrations/concurrent/concurrent_request_info.py index 56b3a49a99..80a98df1bb 100644 --- a/python/samples/getting_started/workflows/human-in-the-loop/concurrent_request_info.py +++ b/python/samples/getting_started/orchestrations/concurrent/concurrent_request_info.py @@ -17,11 +17,13 @@ - Injecting human guidance for specific agents before aggregation Prerequisites: -- Azure OpenAI configured for AzureOpenAIChatClient with required environment variables +- AZURE_AI_PROJECT_ENDPOINT must be your Azure AI Foundry Agent Service (V2) project endpoint. +- Azure OpenAI configured for AzureOpenAIResponsesClient with required environment variables - Authentication via azure-identity (run az login before executing) """ import asyncio +import os from collections.abc import AsyncIterable from typing import Any @@ -30,12 +32,12 @@ Message, WorkflowEvent, ) -from agent_framework.azure import AzureOpenAIChatClient +from agent_framework.azure import AzureOpenAIResponsesClient from agent_framework.orchestrations import AgentRequestInfoResponse, ConcurrentBuilder from azure.identity import AzureCliCredential # Store chat client at module level for aggregator access -_chat_client: AzureOpenAIChatClient | None = None +_chat_client: AzureOpenAIResponsesClient | None = None async def aggregate_with_synthesis(results: list[AgentExecutorResponse]) -> Any: @@ -142,7 +144,11 @@ async def process_event_stream(stream: AsyncIterable[WorkflowEvent]) -> dict[str async def main() -> None: global _chat_client - _chat_client = AzureOpenAIChatClient(credential=AzureCliCredential()) + _chat_client = AzureOpenAIResponsesClient( + project_endpoint=os.environ["AZURE_AI_PROJECT_ENDPOINT"], + deployment_name=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], + credential=AzureCliCredential(), + ) # Create agents that analyze from different perspectives technical_analyst = _chat_client.as_agent( diff --git a/python/samples/getting_started/workflows/agents/concurrent_workflow_as_agent.py b/python/samples/getting_started/orchestrations/concurrent/concurrent_workflow_as_agent.py similarity index 83% rename from python/samples/getting_started/workflows/agents/concurrent_workflow_as_agent.py rename to python/samples/getting_started/orchestrations/concurrent/concurrent_workflow_as_agent.py index 42202aec5f..c9d3a55920 100644 --- a/python/samples/getting_started/workflows/agents/concurrent_workflow_as_agent.py +++ b/python/samples/getting_started/orchestrations/concurrent/concurrent_workflow_as_agent.py @@ -1,8 +1,9 @@ # Copyright (c) Microsoft. All rights reserved. import asyncio +import os -from agent_framework.azure import AzureOpenAIChatClient +from agent_framework.azure import AzureOpenAIResponsesClient from agent_framework.orchestrations import ConcurrentBuilder from azure.identity import AzureCliCredential @@ -19,7 +20,8 @@ - Workflow completion when idle with no pending work Prerequisites: -- Azure OpenAI access configured for AzureOpenAIChatClient (use az login + env vars) +- AZURE_AI_PROJECT_ENDPOINT must be your Azure AI Foundry Agent Service (V2) project endpoint. +- Azure OpenAI access configured for AzureOpenAIResponsesClient (use az login + env vars) - Familiarity with Workflow events (WorkflowEvent with type "output") """ @@ -37,8 +39,12 @@ def clear_and_redraw(buffers: dict[str, str], agent_order: list[str]) -> None: async def main() -> None: - # 1) Create three domain agents using AzureOpenAIChatClient - client = AzureOpenAIChatClient(credential=AzureCliCredential()) + # 1) Create three domain agents using AzureOpenAIResponsesClient + client = AzureOpenAIResponsesClient( + project_endpoint=os.environ["AZURE_AI_PROJECT_ENDPOINT"], + deployment_name=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], + credential=AzureCliCredential(), + ) researcher = client.as_agent( instructions=( diff --git a/python/samples/getting_started/orchestrations/group_chat_agent_manager.py b/python/samples/getting_started/orchestrations/group-chat/group_chat_agent_manager.py similarity index 67% rename from python/samples/getting_started/orchestrations/group_chat_agent_manager.py rename to python/samples/getting_started/orchestrations/group-chat/group_chat_agent_manager.py index 78eb8535ae..813056a956 100644 --- a/python/samples/getting_started/orchestrations/group_chat_agent_manager.py +++ b/python/samples/getting_started/orchestrations/group-chat/group_chat_agent_manager.py @@ -1,6 +1,7 @@ # Copyright (c) Microsoft. All rights reserved. import asyncio +import os from typing import cast from agent_framework import ( @@ -8,7 +9,7 @@ AgentResponseUpdate, Message, ) -from agent_framework.azure import AzureOpenAIChatClient +from agent_framework.azure import AzureOpenAIResponsesClient from agent_framework.orchestrations import GroupChatBuilder from azure.identity import AzureCliCredential @@ -21,7 +22,8 @@ - Coordinates a researcher and writer agent to solve tasks collaboratively Prerequisites: -- OpenAI environment variables configured for OpenAIChatClient +- AZURE_AI_PROJECT_ENDPOINT must be your Azure AI Foundry Agent Service (V2) project endpoint. +- OpenAI environment variables configured for AzureOpenAIResponsesClient """ ORCHESTRATOR_AGENT_INSTRUCTIONS = """ @@ -36,7 +38,11 @@ async def main() -> None: # Create a chat client using Azure OpenAI and Azure CLI credentials for all agents - client = AzureOpenAIChatClient(credential=AzureCliCredential()) + client = AzureOpenAIResponsesClient( + project_endpoint=os.environ["AZURE_AI_PROJECT_ENDPOINT"], + deployment_name=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], + credential=AzureCliCredential(), + ) # Orchestrator agent that manages the conversation # Note: This agent (and the underlying chat client) must support structured outputs. @@ -88,26 +94,35 @@ async def main() -> None: print(f"TASK: {task}\n") print("=" * 80) - # Keep track of the last response to format output nicely in streaming mode - last_response_id: str | None = None + # Track current speaker for readable streaming output. + pending_speaker: str | None = None + current_speaker: str | None = None async for event in workflow.run(task, stream=True): - if event.type == "output": - data = event.data - if isinstance(data, AgentResponseUpdate): - rid = data.response_id - if rid != last_response_id: - if last_response_id is not None: - print("\n") - print(f"{data.author_name}:", end=" ", flush=True) - last_response_id = rid - print(data.text, end="", flush=True) - elif event.type == "output": - # The output of the group chat workflow is a collection of chat messages from all participants - outputs = cast(list[Message], event.data) - print("\n" + "=" * 80) - print("\nFinal Conversation Transcript:\n") - for message in outputs: - print(f"{message.author_name or message.role}: {message.text}\n") + if event.type != "output": + continue + + data = event.data + if isinstance(data, AgentResponseUpdate): + if data.author_name: + pending_speaker = data.author_name + if not data.text: + continue + + speaker = data.author_name or pending_speaker or "assistant" + if speaker != current_speaker: + if current_speaker is not None: + print("\n") + print(f"{speaker}:", end=" ", flush=True) + current_speaker = speaker + print(data.text, end="", flush=True) + continue + + # The output of the group chat workflow is a collection of chat messages from all participants + outputs = cast(list[Message], data) + print("\n" + "=" * 80) + print("\nFinal Conversation Transcript:\n") + for message in outputs: + print(f"{message.author_name or message.role}: {message.text}\n") if __name__ == "__main__": diff --git a/python/samples/getting_started/workflows/tool-approval/group_chat_builder_tool_approval.py b/python/samples/getting_started/orchestrations/group-chat/group_chat_builder_tool_approval.py similarity index 95% rename from python/samples/getting_started/workflows/tool-approval/group_chat_builder_tool_approval.py rename to python/samples/getting_started/orchestrations/group-chat/group_chat_builder_tool_approval.py index 159299b9b8..2ac0738e66 100644 --- a/python/samples/getting_started/workflows/tool-approval/group_chat_builder_tool_approval.py +++ b/python/samples/getting_started/orchestrations/group-chat/group_chat_builder_tool_approval.py @@ -1,6 +1,7 @@ # Copyright (c) Microsoft. All rights reserved. import asyncio +import os from collections.abc import AsyncIterable from typing import Annotated, cast @@ -10,8 +11,9 @@ WorkflowEvent, tool, ) -from agent_framework.openai import OpenAIChatClient +from agent_framework.azure import AzureOpenAIResponsesClient from agent_framework.orchestrations import GroupChatBuilder, GroupChatState +from azure.identity import AzureCliCredential """ Sample: Group Chat Workflow with Tool Approval Requests @@ -37,6 +39,7 @@ - Multi-round group chat with tool approval interruption and resumption. Prerequisites: +- AZURE_AI_PROJECT_ENDPOINT must be your Azure AI Foundry Agent Service (V2) project endpoint. - OpenAI or Azure OpenAI configured with the required environment variables. - Basic familiarity with GroupChatBuilder and streaming workflow events. """ @@ -126,7 +129,11 @@ async def process_event_stream(stream: AsyncIterable[WorkflowEvent]) -> dict[str async def main() -> None: # 3. Create specialized agents - client = OpenAIChatClient() + client = AzureOpenAIResponsesClient( + project_endpoint=os.environ["AZURE_AI_PROJECT_ENDPOINT"], + deployment_name=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], + credential=AzureCliCredential(), + ) qa_engineer = client.as_agent( name="QAEngineer", diff --git a/python/samples/getting_started/orchestrations/group_chat_philosophical_debate.py b/python/samples/getting_started/orchestrations/group-chat/group_chat_philosophical_debate.py similarity index 90% rename from python/samples/getting_started/orchestrations/group_chat_philosophical_debate.py rename to python/samples/getting_started/orchestrations/group-chat/group_chat_philosophical_debate.py index e4723c01e0..12c14a8c93 100644 --- a/python/samples/getting_started/orchestrations/group_chat_philosophical_debate.py +++ b/python/samples/getting_started/orchestrations/group-chat/group_chat_philosophical_debate.py @@ -2,6 +2,7 @@ import asyncio import logging +import os from typing import cast from agent_framework import ( @@ -9,7 +10,7 @@ AgentResponseUpdate, Message, ) -from agent_framework.azure import AzureOpenAIChatClient +from agent_framework.azure import AzureOpenAIResponsesClient from agent_framework.orchestrations import GroupChatBuilder from azure.identity import AzureCliCredential @@ -37,12 +38,17 @@ - Doctor from Scandinavia (public health, equity, societal support) Prerequisites: -- OpenAI environment variables configured for OpenAIChatClient +- AZURE_AI_PROJECT_ENDPOINT must be your Azure AI Foundry Agent Service (V2) project endpoint. +- OpenAI environment variables configured for AzureOpenAIResponsesClient """ -def _get_chat_client() -> AzureOpenAIChatClient: - return AzureOpenAIChatClient(credential=AzureCliCredential()) +def _get_chat_client() -> AzureOpenAIResponsesClient: + return AzureOpenAIResponsesClient( + project_endpoint=os.environ["AZURE_AI_PROJECT_ENDPOINT"], + deployment_name=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], + credential=AzureCliCredential(), + ) async def main() -> None: @@ -240,26 +246,35 @@ async def main() -> None: print("DISCUSSION BEGINS") print("=" * 80 + "\n") - # Keep track of the last response to format output nicely in streaming mode - last_response_id: str | None = None + # Track current speaker for readable streaming output. + pending_speaker: str | None = None + current_speaker: str | None = None async for event in workflow.run(f"Please begin the discussion on: {topic}", stream=True): - if event.type == "output": - data = event.data - if isinstance(data, AgentResponseUpdate): - rid = data.response_id - if rid != last_response_id: - if last_response_id is not None: - print("\n") - print(f"{data.author_name}:", end=" ", flush=True) - last_response_id = rid - print(data.text, end="", flush=True) - elif event.type == "output": - # The output of the group chat workflow is a collection of chat messages from all participants - outputs = cast(list[Message], event.data) - print("\n" + "=" * 80) - print("\nFinal Conversation Transcript:\n") - for message in outputs: - print(f"{message.author_name or message.role}: {message.text}\n") + if event.type != "output": + continue + + data = event.data + if isinstance(data, AgentResponseUpdate): + if data.author_name: + pending_speaker = data.author_name + if not data.text: + continue + + speaker = data.author_name or pending_speaker or "assistant" + if speaker != current_speaker: + if current_speaker is not None: + print("\n") + print(f"{speaker}:", end=" ", flush=True) + current_speaker = speaker + print(data.text, end="", flush=True) + continue + + # The output of the group chat workflow is a collection of chat messages from all participants + outputs = cast(list[Message], data) + print("\n" + "=" * 80) + print("\nFinal Conversation Transcript:\n") + for message in outputs: + print(f"{message.author_name or message.role}: {message.text}\n") """ Sample Output: diff --git a/python/samples/getting_started/workflows/human-in-the-loop/group_chat_request_info.py b/python/samples/getting_started/orchestrations/group-chat/group_chat_request_info.py similarity index 93% rename from python/samples/getting_started/workflows/human-in-the-loop/group_chat_request_info.py rename to python/samples/getting_started/orchestrations/group-chat/group_chat_request_info.py index 85417a0f91..40186ac7fd 100644 --- a/python/samples/getting_started/workflows/human-in-the-loop/group_chat_request_info.py +++ b/python/samples/getting_started/orchestrations/group-chat/group_chat_request_info.py @@ -18,11 +18,13 @@ - Steering agent behavior with pre-agent human input Prerequisites: -- Azure OpenAI configured for AzureOpenAIChatClient with required environment variables +- AZURE_AI_PROJECT_ENDPOINT must be your Azure AI Foundry Agent Service (V2) project endpoint. +- Azure OpenAI configured for AzureOpenAIResponsesClient with required environment variables - Authentication via azure-identity (run az login before executing) """ import asyncio +import os from collections.abc import AsyncIterable from typing import cast @@ -31,7 +33,7 @@ Message, WorkflowEvent, ) -from agent_framework.azure import AzureOpenAIChatClient +from agent_framework.azure import AzureOpenAIResponsesClient from agent_framework.orchestrations import AgentRequestInfoResponse, GroupChatBuilder from azure.identity import AzureCliCredential @@ -91,7 +93,11 @@ async def process_event_stream(stream: AsyncIterable[WorkflowEvent]) -> dict[str async def main() -> None: - client = AzureOpenAIChatClient(credential=AzureCliCredential()) + client = AzureOpenAIResponsesClient( + project_endpoint=os.environ["AZURE_AI_PROJECT_ENDPOINT"], + deployment_name=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], + credential=AzureCliCredential(), + ) # Create agents for a group discussion optimist = client.as_agent( diff --git a/python/samples/getting_started/orchestrations/group_chat_simple_selector.py b/python/samples/getting_started/orchestrations/group-chat/group_chat_simple_selector.py similarity index 92% rename from python/samples/getting_started/orchestrations/group_chat_simple_selector.py rename to python/samples/getting_started/orchestrations/group-chat/group_chat_simple_selector.py index 13cd3d3e5a..903943e35f 100644 --- a/python/samples/getting_started/orchestrations/group_chat_simple_selector.py +++ b/python/samples/getting_started/orchestrations/group-chat/group_chat_simple_selector.py @@ -1,6 +1,7 @@ # Copyright (c) Microsoft. All rights reserved. import asyncio +import os from typing import cast from agent_framework import ( @@ -8,7 +9,7 @@ AgentResponseUpdate, Message, ) -from agent_framework.azure import AzureOpenAIChatClient +from agent_framework.azure import AzureOpenAIResponsesClient from agent_framework.orchestrations import GroupChatBuilder, GroupChatState from azure.identity import AzureCliCredential @@ -20,7 +21,8 @@ - Uses a pure Python function to control speaker selection based on conversation state Prerequisites: -- OpenAI environment variables configured for OpenAIChatClient +- AZURE_AI_PROJECT_ENDPOINT must be your Azure AI Foundry Agent Service (V2) project endpoint. +- OpenAI environment variables configured for AzureOpenAIResponsesClient """ @@ -33,7 +35,11 @@ def round_robin_selector(state: GroupChatState) -> str: async def main() -> None: # Create a chat client using Azure OpenAI and Azure CLI credentials for all agents - client = AzureOpenAIChatClient(credential=AzureCliCredential()) + client = AzureOpenAIResponsesClient( + project_endpoint=os.environ["AZURE_AI_PROJECT_ENDPOINT"], + deployment_name=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], + credential=AzureCliCredential(), + ) # Participant agents expert = Agent( diff --git a/python/samples/getting_started/workflows/agents/group_chat_workflow_as_agent.py b/python/samples/getting_started/orchestrations/group-chat/group_chat_workflow_as_agent.py similarity index 68% rename from python/samples/getting_started/workflows/agents/group_chat_workflow_as_agent.py rename to python/samples/getting_started/orchestrations/group-chat/group_chat_workflow_as_agent.py index 9bf24c82e1..2c25f0498e 100644 --- a/python/samples/getting_started/workflows/agents/group_chat_workflow_as_agent.py +++ b/python/samples/getting_started/orchestrations/group-chat/group_chat_workflow_as_agent.py @@ -1,10 +1,12 @@ # Copyright (c) Microsoft. All rights reserved. import asyncio +import os from agent_framework import Agent -from agent_framework.openai import OpenAIChatClient, OpenAIResponsesClient +from agent_framework.azure import AzureOpenAIResponsesClient from agent_framework.orchestrations import GroupChatBuilder +from azure.identity import AzureCliCredential """ Sample: Group Chat Orchestration @@ -14,7 +16,8 @@ - The orchestrator coordinates a researcher (chat completions) and a writer (responses API) to solve a task. Prerequisites: -- OpenAI environment variables configured for `OpenAIChatClient` and `OpenAIResponsesClient`. +- AZURE_AI_PROJECT_ENDPOINT must be your Azure AI Foundry Agent Service (V2) project endpoint. +- OpenAI environment variables configured for `AzureOpenAIResponsesClient` and `AzureOpenAIResponsesClient`. """ @@ -23,14 +26,22 @@ async def main() -> None: name="Researcher", description="Collects relevant background information.", instructions="Gather concise facts that help a teammate answer the question.", - client=OpenAIChatClient(model_id="gpt-4o-mini"), + client=AzureOpenAIResponsesClient( + project_endpoint=os.environ["AZURE_AI_PROJECT_ENDPOINT"], + deployment_name=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], + credential=AzureCliCredential(), + ), ) writer = Agent( name="Writer", description="Synthesizes a polished answer using the gathered notes.", instructions="Compose clear and structured answers using any notes provided.", - client=OpenAIResponsesClient(), + client=AzureOpenAIResponsesClient( + project_endpoint=os.environ["AZURE_AI_PROJECT_ENDPOINT"], + deployment_name=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], + credential=AzureCliCredential(), + ), ) # intermediate_outputs=True: Enable intermediate outputs to observe the conversation as it unfolds @@ -38,7 +49,11 @@ async def main() -> None: workflow = GroupChatBuilder( participants=[researcher, writer], intermediate_outputs=True, - orchestrator_agent=OpenAIChatClient().as_agent( + orchestrator_agent=AzureOpenAIResponsesClient( + project_endpoint=os.environ["AZURE_AI_PROJECT_ENDPOINT"], + deployment_name=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], + credential=AzureCliCredential(), + ).as_agent( name="Orchestrator", instructions="You coordinate a team conversation to solve the user's task.", ), diff --git a/python/samples/getting_started/orchestrations/handoff_autonomous.py b/python/samples/getting_started/orchestrations/handoff/handoff_autonomous.py similarity index 92% rename from python/samples/getting_started/orchestrations/handoff_autonomous.py rename to python/samples/getting_started/orchestrations/handoff/handoff_autonomous.py index 997d854ef2..2d30144742 100644 --- a/python/samples/getting_started/orchestrations/handoff_autonomous.py +++ b/python/samples/getting_started/orchestrations/handoff/handoff_autonomous.py @@ -2,6 +2,7 @@ import asyncio import logging +import os from typing import cast from agent_framework import ( @@ -10,7 +11,7 @@ Message, resolve_agent_id, ) -from agent_framework.azure import AzureOpenAIChatClient +from agent_framework.azure import AzureOpenAIResponsesClient from agent_framework.orchestrations import HandoffBuilder from azure.identity import AzureCliCredential @@ -27,8 +28,9 @@ User -> Coordinator -> Specialist (iterates N times) -> Handoff -> Final Output Prerequisites: + - AZURE_AI_PROJECT_ENDPOINT must be your Azure AI Foundry Agent Service (V2) project endpoint. - `az login` (Azure CLI authentication) - - Environment variables for AzureOpenAIChatClient (AZURE_OPENAI_ENDPOINT, etc.) + - Environment variables for AzureOpenAIResponsesClient (AZURE_AI_MODEL_DEPLOYMENT_NAME) Key Concepts: - Autonomous interaction mode: agents iterate until they handoff @@ -37,7 +39,7 @@ def create_agents( - client: AzureOpenAIChatClient, + client: AzureOpenAIResponsesClient, ) -> tuple[Agent, Agent, Agent]: """Create coordinator and specialists for autonomous iteration.""" coordinator = client.as_agent( @@ -73,7 +75,11 @@ def create_agents( async def main() -> None: """Run an autonomous handoff workflow with specialist iteration enabled.""" - client = AzureOpenAIChatClient(credential=AzureCliCredential()) + client = AzureOpenAIResponsesClient( + project_endpoint=os.environ["AZURE_AI_PROJECT_ENDPOINT"], + deployment_name=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], + credential=AzureCliCredential(), + ) coordinator, research_agent, summary_agent = create_agents(client) # Build the workflow with autonomous mode diff --git a/python/samples/getting_started/orchestrations/handoff_simple.py b/python/samples/getting_started/orchestrations/handoff/handoff_simple.py similarity index 95% rename from python/samples/getting_started/orchestrations/handoff_simple.py rename to python/samples/getting_started/orchestrations/handoff/handoff_simple.py index b2f40f438f..23c957e5d7 100644 --- a/python/samples/getting_started/orchestrations/handoff_simple.py +++ b/python/samples/getting_started/orchestrations/handoff/handoff_simple.py @@ -1,6 +1,7 @@ # Copyright (c) Microsoft. All rights reserved. import asyncio +import os from typing import Annotated, cast from agent_framework import ( @@ -11,7 +12,7 @@ WorkflowRunState, tool, ) -from agent_framework.azure import AzureOpenAIChatClient +from agent_framework.azure import AzureOpenAIResponsesClient from agent_framework.orchestrations import HandoffAgentUserRequest, HandoffBuilder from azure.identity import AzureCliCredential @@ -21,8 +22,9 @@ them to transfer control to each other based on the conversation context. Prerequisites: + - AZURE_AI_PROJECT_ENDPOINT must be your Azure AI Foundry Agent Service (V2) project endpoint. - `az login` (Azure CLI authentication) - - Environment variables configured for AzureOpenAIChatClient (AZURE_OPENAI_ENDPOINT, etc.) + - Environment variables configured for AzureOpenAIResponsesClient (AZURE_AI_MODEL_DEPLOYMENT_NAME) Key Concepts: - Auto-registered handoff tools: HandoffBuilder automatically creates handoff tools @@ -54,11 +56,11 @@ def process_return(order_number: Annotated[str, "Order number to process return return f"Return initiated successfully for order {order_number}. You will receive return instructions via email." -def create_agents(client: AzureOpenAIChatClient) -> tuple[Agent, Agent, Agent, Agent]: +def create_agents(client: AzureOpenAIResponsesClient) -> tuple[Agent, Agent, Agent, Agent]: """Create and configure the triage and specialist agents. Args: - client: The AzureOpenAIChatClient to use for creating agents. + client: The AzureOpenAIResponsesClient to use for creating agents. Returns: Tuple of (triage_agent, refund_agent, order_agent, return_agent) @@ -189,7 +191,11 @@ async def main() -> None: replace the scripted_responses with actual user input collection. """ # Initialize the Azure OpenAI chat client - client = AzureOpenAIChatClient(credential=AzureCliCredential()) + client = AzureOpenAIResponsesClient( + project_endpoint=os.environ["AZURE_AI_PROJECT_ENDPOINT"], + deployment_name=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], + credential=AzureCliCredential(), + ) # Create all agents: triage + specialists triage, refund, order, support = create_agents(client) diff --git a/python/samples/getting_started/orchestrations/handoff/handoff_with_code_interpreter_file.py b/python/samples/getting_started/orchestrations/handoff/handoff_with_code_interpreter_file.py new file mode 100644 index 0000000000..9a4f0ba76a --- /dev/null +++ b/python/samples/getting_started/orchestrations/handoff/handoff_with_code_interpreter_file.py @@ -0,0 +1,186 @@ +# Copyright (c) Microsoft. All rights reserved. + +""" +Handoff Workflow with Code Interpreter File Generation Sample + +This sample demonstrates retrieving file IDs from code interpreter output +in a handoff workflow context. A triage agent routes to a code specialist +that generates a text file, and we verify the file_id is captured correctly +from the streaming workflow events. + +Verifies GitHub issue #2718: files generated by code interpreter in +HandoffBuilder workflows can be properly retrieved. + +Prerequisites: + - AZURE_AI_PROJECT_ENDPOINT must be your Azure AI Foundry Agent Service (V2) project endpoint. + - `az login` (Azure CLI authentication) + - AZURE_AI_MODEL_DEPLOYMENT_NAME +""" + +import asyncio +import os +from collections.abc import AsyncIterable +from typing import cast + +from agent_framework import ( + AgentResponseUpdate, + Message, + WorkflowEvent, + WorkflowRunState, +) +from agent_framework.azure import AzureOpenAIResponsesClient +from agent_framework.orchestrations import HandoffAgentUserRequest, HandoffBuilder +from azure.identity import AzureCliCredential + + +async def _drain(stream: AsyncIterable[WorkflowEvent]) -> list[WorkflowEvent]: + """Collect all events from an async stream.""" + return [event async for event in stream] + + +def _handle_events(events: list[WorkflowEvent]) -> tuple[list[WorkflowEvent[HandoffAgentUserRequest]], list[str]]: + """Process workflow events and extract file IDs and pending requests. + + Returns: + Tuple of (pending_requests, file_ids_found) + """ + + requests: list[WorkflowEvent[HandoffAgentUserRequest]] = [] + file_ids: list[str] = [] + + for event in events: + if event.type == "handoff_sent": + print(f"\n[Handoff from {event.data.source} to {event.data.target} initiated.]") + elif event.type == "status" and event.state in { + WorkflowRunState.IDLE, + WorkflowRunState.IDLE_WITH_PENDING_REQUESTS, + }: + print(f"[status] {event.state}") + elif event.type == "request_info" and isinstance(event.data, HandoffAgentUserRequest): + requests.append(cast(WorkflowEvent[HandoffAgentUserRequest], event)) + elif event.type == "output": + data = event.data + if isinstance(data, AgentResponseUpdate): + for content in data.contents: + if content.type == "hosted_file": + file_ids.append(content.file_id) # type: ignore + print(f"[Found HostedFileContent: file_id={content.file_id}]") + elif content.type == "text" and content.annotations: + for annotation in content.annotations: + file_id = annotation["file_id"] # type: ignore + file_ids.append(file_id) + print(f"[Found file annotation: file_id={file_id}]") + elif event.type == "output": + conversation = cast(list[Message], event.data) + if isinstance(conversation, list): + print("\n=== Final Conversation Snapshot ===") + for message in conversation: + speaker = message.author_name or message.role + print(f"- {speaker}: {message.text or [content.type for content in message.contents]}") + print("===================================") + + return requests, file_ids + + +async def main() -> None: + """Run a simple handoff workflow with code interpreter file generation.""" + print("=== Handoff Workflow with Code Interpreter File Generation ===\n") + + client = AzureOpenAIResponsesClient( + project_endpoint=os.environ["AZURE_AI_PROJECT_ENDPOINT"], + deployment_name=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], + credential=AzureCliCredential(), + ) + + triage = client.as_agent( + name="triage_agent", + instructions=( + "You are a triage agent. Route code-related requests to the code_specialist. " + "When the user asks to create or generate files, hand off to code_specialist " + "by calling handoff_to_code_specialist." + ), + ) + + code_interpreter_tool = client.get_code_interpreter_tool() + + code_specialist = client.as_agent( + name="code_specialist", + instructions=( + "You are a Python code specialist. Use the code interpreter to execute Python code " + "and create files when requested. Always save files to /mnt/data/ directory." + ), + tools=[code_interpreter_tool], + ) + + workflow = ( + HandoffBuilder( + termination_condition=lambda conv: sum(1 for msg in conv if msg.role == "user") >= 2, + ) + .participants([triage, code_specialist]) + .with_start_agent(triage) + .build() + ) + + user_inputs = [ + "Please create a text file called hello.txt with 'Hello from handoff workflow!' inside it.", + "exit", + ] + input_index = 0 + all_file_ids: list[str] = [] + + print(f"User: {user_inputs[0]}") + events = await _drain(workflow.run(user_inputs[0], stream=True)) + requests, file_ids = _handle_events(events) + all_file_ids.extend(file_ids) + input_index += 1 + + while requests: + request = requests[0] + if input_index >= len(user_inputs): + break + user_input = user_inputs[input_index] + print(f"\nUser: {user_input}") + + responses = {request.request_id: HandoffAgentUserRequest.create_response(user_input)} + events = await _drain(workflow.run(stream=True, responses=responses)) + requests, file_ids = _handle_events(events) + all_file_ids.extend(file_ids) + input_index += 1 + + print("\n" + "=" * 50) + if all_file_ids: + print(f"SUCCESS: Found {len(all_file_ids)} file ID(s) in handoff workflow:") + for fid in all_file_ids: + print(f" - {fid}") + else: + print("WARNING: No file IDs captured from the handoff workflow.") + print("=" * 50) + + """ + Sample Output: + + User: Please create a text file called hello.txt with 'Hello from handoff workflow!' inside it. + [Found HostedFileContent: file_id=assistant-JT1sA...] + + === Conversation So Far === + - user: Please create a text file called hello.txt with 'Hello from handoff workflow!' inside it. + - triage_agent: I am handing off your request to create the text file "hello.txt" with the specified content to the code specialist. They will assist you shortly. + - code_specialist: The file "hello.txt" has been created with the content "Hello from handoff workflow!". You can download it using the link below: + + [hello.txt](sandbox:/mnt/data/hello.txt) + =========================== + + [status] IDLE_WITH_PENDING_REQUESTS + + User: exit + [status] IDLE + + ================================================== + SUCCESS: Found 1 file ID(s) in handoff workflow: + - assistant-JT1sA... + ================================================== + """ # noqa: E501 + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/python/samples/getting_started/orchestrations/handoff_with_tool_approval_checkpoint_resume.py b/python/samples/getting_started/orchestrations/handoff/handoff_with_tool_approval_checkpoint_resume.py similarity index 94% rename from python/samples/getting_started/orchestrations/handoff_with_tool_approval_checkpoint_resume.py rename to python/samples/getting_started/orchestrations/handoff/handoff_with_tool_approval_checkpoint_resume.py index ce377b654d..a774674c5d 100644 --- a/python/samples/getting_started/orchestrations/handoff_with_tool_approval_checkpoint_resume.py +++ b/python/samples/getting_started/orchestrations/handoff/handoff_with_tool_approval_checkpoint_resume.py @@ -2,6 +2,7 @@ import asyncio import json +import os from pathlib import Path from typing import Any @@ -12,7 +13,7 @@ Workflow, tool, ) -from agent_framework.azure import AzureOpenAIChatClient +from agent_framework.azure import AzureOpenAIResponsesClient from agent_framework.orchestrations import HandoffAgentUserRequest, HandoffBuilder from azure.identity import AzureCliCredential @@ -39,8 +40,9 @@ workflow.run(stream=True, checkpoint_id=..., responses=responses).) Prerequisites: +- AZURE_AI_PROJECT_ENDPOINT must be your Azure AI Foundry Agent Service (V2) project endpoint. - Azure CLI authentication (az login). -- Environment variables configured for AzureOpenAIChatClient. +- Environment variables configured for AzureOpenAIResponsesClient. """ CHECKPOINT_DIR = Path(__file__).parent / "tmp" / "handoff_checkpoints" @@ -53,7 +55,7 @@ def submit_refund(refund_description: str, amount: str, order_id: str) -> str: return f"refund recorded for order {order_id} (amount: {amount}) with details: {refund_description}" -def create_agents(client: AzureOpenAIChatClient) -> tuple[Agent, Agent, Agent]: +def create_agents(client: AzureOpenAIResponsesClient) -> tuple[Agent, Agent, Agent]: """Create a simple handoff scenario: triage, refund, and order specialists.""" triage = client.as_agent( @@ -90,7 +92,11 @@ def create_agents(client: AzureOpenAIChatClient) -> tuple[Agent, Agent, Agent]: def create_workflow(checkpoint_storage: FileCheckpointStorage) -> Workflow: """Build the handoff workflow with checkpointing enabled.""" - client = AzureOpenAIChatClient(credential=AzureCliCredential()) + client = AzureOpenAIResponsesClient( + project_endpoint=os.environ["AZURE_AI_PROJECT_ENDPOINT"], + deployment_name=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], + credential=AzureCliCredential(), + ) triage, refund, order = create_agents(client) # checkpoint_storage: Enable checkpointing for resume diff --git a/python/samples/getting_started/workflows/agents/handoff_workflow_as_agent.py b/python/samples/getting_started/orchestrations/handoff/handoff_workflow_as_agent.py similarity index 93% rename from python/samples/getting_started/workflows/agents/handoff_workflow_as_agent.py rename to python/samples/getting_started/orchestrations/handoff/handoff_workflow_as_agent.py index 955446ca80..0a209f1884 100644 --- a/python/samples/getting_started/workflows/agents/handoff_workflow_as_agent.py +++ b/python/samples/getting_started/orchestrations/handoff/handoff_workflow_as_agent.py @@ -1,6 +1,7 @@ # Copyright (c) Microsoft. All rights reserved. import asyncio +import os from typing import Annotated from agent_framework import ( @@ -11,7 +12,7 @@ WorkflowAgent, tool, ) -from agent_framework.azure import AzureOpenAIChatClient +from agent_framework.azure import AzureOpenAIResponsesClient from agent_framework.orchestrations import HandoffAgentUserRequest, HandoffBuilder from azure.identity import AzureCliCredential @@ -24,8 +25,9 @@ them to transfer control to each other based on the conversation context. Prerequisites: + - AZURE_AI_PROJECT_ENDPOINT must be your Azure AI Foundry Agent Service (V2) project endpoint. - `az login` (Azure CLI authentication) - - Environment variables configured for AzureOpenAIChatClient (AZURE_OPENAI_ENDPOINT, etc.) + - Environment variables configured for AzureOpenAIResponsesClient (AZURE_AI_MODEL_DEPLOYMENT_NAME) Key Concepts: - Auto-registered handoff tools: HandoffBuilder automatically creates handoff tools @@ -57,11 +59,11 @@ def process_return(order_number: Annotated[str, "Order number to process return return f"Return initiated successfully for order {order_number}. You will receive return instructions via email." -def create_agents(client: AzureOpenAIChatClient) -> tuple[Agent, Agent, Agent, Agent]: +def create_agents(client: AzureOpenAIResponsesClient) -> tuple[Agent, Agent, Agent, Agent]: """Create and configure the triage and specialist agents. Args: - client: The AzureOpenAIChatClient to use for creating agents. + client: The AzureOpenAIResponsesClient to use for creating agents. Returns: Tuple of (triage_agent, refund_agent, order_agent, return_agent) @@ -147,7 +149,11 @@ async def main() -> None: replace the scripted_responses with actual user input collection. """ # Initialize the Azure OpenAI chat client - client = AzureOpenAIChatClient(credential=AzureCliCredential()) + client = AzureOpenAIResponsesClient( + project_endpoint=os.environ["AZURE_AI_PROJECT_ENDPOINT"], + deployment_name=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], + credential=AzureCliCredential(), + ) # Create all agents: triage + specialists triage, refund, order, support = create_agents(client) diff --git a/python/samples/getting_started/orchestrations/handoff_with_code_interpreter_file.py b/python/samples/getting_started/orchestrations/handoff_with_code_interpreter_file.py deleted file mode 100644 index bc65e3bb20..0000000000 --- a/python/samples/getting_started/orchestrations/handoff_with_code_interpreter_file.py +++ /dev/null @@ -1,241 +0,0 @@ -# Copyright (c) Microsoft. All rights reserved. - -""" -Handoff Workflow with Code Interpreter File Generation Sample - -This sample demonstrates retrieving file IDs from code interpreter output -in a handoff workflow context. A triage agent routes to a code specialist -that generates a text file, and we verify the file_id is captured correctly -from the streaming workflow events. - -Verifies GitHub issue #2718: files generated by code interpreter in -HandoffBuilder workflows can be properly retrieved. - -Toggle USE_V2_CLIENT to switch between: - - V1: AzureAIAgentClient (azure-ai-agents SDK) - - V2: AzureAIClient (azure-ai-projects 2.x with Responses API) - -IMPORTANT: When using V2 AzureAIClient with HandoffBuilder, each agent must -have its own client instance. The V2 client binds to a single server-side -agent name, so sharing a client between agents causes routing issues. - -Prerequisites: - - `az login` (Azure CLI authentication) - - V1: AZURE_AI_AGENT_PROJECT_CONNECTION_STRING - - V2: AZURE_AI_PROJECT_ENDPOINT, AZURE_AI_MODEL_DEPLOYMENT_NAME -""" - -import asyncio -from collections.abc import AsyncIterable, AsyncIterator -from contextlib import asynccontextmanager -from typing import cast - -from agent_framework import ( - Agent, - AgentResponseUpdate, - Message, - WorkflowEvent, - WorkflowRunState, -) -from agent_framework.orchestrations import HandoffAgentUserRequest, HandoffBuilder -from azure.identity.aio import AzureCliCredential - -# Toggle between V1 (AzureAIAgentClient) and V2 (AzureAIClient) -USE_V2_CLIENT = False - - -async def _drain(stream: AsyncIterable[WorkflowEvent]) -> list[WorkflowEvent]: - """Collect all events from an async stream.""" - return [event async for event in stream] - - -def _handle_events(events: list[WorkflowEvent]) -> tuple[list[WorkflowEvent[HandoffAgentUserRequest]], list[str]]: - """Process workflow events and extract file IDs and pending requests. - - Returns: - Tuple of (pending_requests, file_ids_found) - """ - - requests: list[WorkflowEvent[HandoffAgentUserRequest]] = [] - file_ids: list[str] = [] - - for event in events: - if event.type == "handoff_sent": - print(f"\n[Handoff from {event.data.source} to {event.data.target} initiated.]") - elif event.type == "status" and event.state in { - WorkflowRunState.IDLE, - WorkflowRunState.IDLE_WITH_PENDING_REQUESTS, - }: - print(f"[status] {event.state.name}") - elif event.type == "request_info" and isinstance(event.data, HandoffAgentUserRequest): - requests.append(cast(WorkflowEvent[HandoffAgentUserRequest], event)) - elif event.type == "output": - data = event.data - if isinstance(data, AgentResponseUpdate): - for content in data.contents: - if content.type == "hosted_file": - file_ids.append(content.file_id) # type: ignore - print(f"[Found HostedFileContent: file_id={content.file_id}]") - elif content.type == "text" and content.annotations: - for annotation in content.annotations: - file_id = annotation["file_id"] # type: ignore - file_ids.append(file_id) - print(f"[Found file annotation: file_id={file_id}]") - elif event.type == "output": - conversation = cast(list[Message], event.data) - if isinstance(conversation, list): - print("\n=== Final Conversation Snapshot ===") - for message in conversation: - speaker = message.author_name or message.role - print(f"- {speaker}: {message.text or [content.type for content in message.contents]}") - print("===================================") - - return requests, file_ids - - -@asynccontextmanager -async def create_agents_v1(credential: AzureCliCredential) -> AsyncIterator[tuple[Agent, Agent]]: - """Create agents using V1 AzureAIAgentClient.""" - from agent_framework.azure import AzureAIAgentClient - - async with AzureAIAgentClient(credential=credential) as client: - triage = client.as_agent( - name="triage_agent", - instructions=( - "You are a triage agent. Route code-related requests to the code_specialist. " - "When the user asks to create or generate files, hand off to code_specialist " - "by calling handoff_to_code_specialist." - ), - ) - - # Create code interpreter tool using instance method - code_interpreter_tool = client.get_code_interpreter_tool() - - code_specialist = client.as_agent( - name="code_specialist", - instructions=( - "You are a Python code specialist. Use the code interpreter to execute Python code " - "and create files when requested. Always save files to /mnt/data/ directory." - ), - tools=[code_interpreter_tool], - ) - - yield triage, code_specialist # type: ignore - - -@asynccontextmanager -async def create_agents_v2(credential: AzureCliCredential) -> AsyncIterator[tuple[Agent, Agent]]: - """Create agents using V2 AzureAIClient. - - Each agent needs its own client instance because the V2 client binds - to a single server-side agent name. - """ - from agent_framework.azure import AzureAIClient - - async with ( - AzureAIClient(credential=credential) as triage_client, - AzureAIClient(credential=credential) as code_client, - ): - triage = triage_client.as_agent( - name="TriageAgent", - instructions="You are a triage agent. Your ONLY job is to route requests to the appropriate specialist.", - ) - - # Create code interpreter tool using instance method - code_interpreter_tool = code_client.get_code_interpreter_tool() - - code_specialist = code_client.as_agent( - name="CodeSpecialist", - instructions=( - "You are a Python code specialist. You have access to a code interpreter tool. " - "Use the code interpreter to execute Python code and create files. " - "Always save files to /mnt/data/ directory. " - "Do NOT discuss handoffs or routing - just complete the coding task directly." - ), - tools=[code_interpreter_tool], - ) - - yield triage, code_specialist - - -async def main() -> None: - """Run a simple handoff workflow with code interpreter file generation.""" - client_version = "V2 (AzureAIClient)" if USE_V2_CLIENT else "V1 (AzureAIAgentClient)" - print(f"=== Handoff Workflow with Code Interpreter File Generation [{client_version}] ===\n") - - async with AzureCliCredential() as credential: - create_agents = create_agents_v2 if USE_V2_CLIENT else create_agents_v1 - - async with create_agents(credential) as (triage, code_specialist): - workflow = ( - HandoffBuilder( - termination_condition=lambda conv: sum(1 for msg in conv if msg.role == "user") >= 2, - ) - .participants([triage, code_specialist]) - .with_start_agent(triage) - .build() - ) - - user_inputs = [ - "Please create a text file called hello.txt with 'Hello from handoff workflow!' inside it.", - "exit", - ] - input_index = 0 - all_file_ids: list[str] = [] - - print(f"User: {user_inputs[0]}") - events = await _drain(workflow.run(user_inputs[0], stream=True)) - requests, file_ids = _handle_events(events) - all_file_ids.extend(file_ids) - input_index += 1 - - while requests: - request = requests[0] - if input_index >= len(user_inputs): - break - user_input = user_inputs[input_index] - print(f"\nUser: {user_input}") - - responses = {request.request_id: HandoffAgentUserRequest.create_response(user_input)} - events = await _drain(workflow.run(stream=True, responses=responses)) - requests, file_ids = _handle_events(events) - all_file_ids.extend(file_ids) - input_index += 1 - - print("\n" + "=" * 50) - if all_file_ids: - print(f"SUCCESS: Found {len(all_file_ids)} file ID(s) in handoff workflow:") - for fid in all_file_ids: - print(f" - {fid}") - else: - print("WARNING: No file IDs captured from the handoff workflow.") - print("=" * 50) - - """ - Sample Output: - - User: Please create a text file called hello.txt with 'Hello from handoff workflow!' inside it. - [Found HostedFileContent: file_id=assistant-JT1sA...] - - === Conversation So Far === - - user: Please create a text file called hello.txt with 'Hello from handoff workflow!' inside it. - - triage_agent: I am handing off your request to create the text file "hello.txt" with the specified content to the code specialist. They will assist you shortly. - - code_specialist: The file "hello.txt" has been created with the content "Hello from handoff workflow!". You can download it using the link below: - - [hello.txt](sandbox:/mnt/data/hello.txt) - =========================== - - [status] IDLE_WITH_PENDING_REQUESTS - - User: exit - [status] IDLE - - ================================================== - SUCCESS: Found 1 file ID(s) in handoff workflow: - - assistant-JT1sA... - ================================================== - """ # noqa: E501 - - -if __name__ == "__main__": - asyncio.run(main()) diff --git a/python/samples/getting_started/orchestrations/magentic.py b/python/samples/getting_started/orchestrations/magentic/magentic.py similarity index 85% rename from python/samples/getting_started/orchestrations/magentic.py rename to python/samples/getting_started/orchestrations/magentic/magentic.py index 7ff0a08b1b..52d12b4ce1 100644 --- a/python/samples/getting_started/orchestrations/magentic.py +++ b/python/samples/getting_started/orchestrations/magentic/magentic.py @@ -3,6 +3,7 @@ import asyncio import json import logging +import os from typing import cast from agent_framework import ( @@ -11,8 +12,9 @@ Message, WorkflowEvent, ) -from agent_framework.openai import OpenAIChatClient, OpenAIResponsesClient +from agent_framework.azure import AzureOpenAIResponsesClient from agent_framework.orchestrations import GroupChatRequestSentEvent, MagenticBuilder, MagenticProgressLedger +from azure.identity import AzureCliCredential logging.basicConfig(level=logging.WARNING) logger = logging.getLogger(__name__) @@ -38,7 +40,8 @@ events, and prints the final answer. The workflow completes when idle. Prerequisites: -- OpenAI credentials configured for `OpenAIChatClient` and `OpenAIResponsesClient`. +- AZURE_AI_PROJECT_ENDPOINT must be your Azure AI Foundry Agent Service (V2) project endpoint. +- OpenAI credentials configured for `AzureOpenAIResponsesClient` and `AzureOpenAIResponsesClient`. """ @@ -50,11 +53,19 @@ async def main() -> None: "You are a Researcher. You find information without additional computation or quantitative analysis." ), # This agent requires the gpt-4o-search-preview model to perform web searches. - client=OpenAIChatClient(model_id="gpt-4o-search-preview"), + client=AzureOpenAIResponsesClient( + project_endpoint=os.environ["AZURE_AI_PROJECT_ENDPOINT"], + deployment_name=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], + credential=AzureCliCredential(), + ), ) # Create code interpreter tool using instance method - coder_client = OpenAIResponsesClient() + coder_client = AzureOpenAIResponsesClient( + project_endpoint=os.environ["AZURE_AI_PROJECT_ENDPOINT"], + deployment_name=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], + credential=AzureCliCredential(), + ) code_interpreter_tool = coder_client.get_code_interpreter_tool() coder_agent = Agent( @@ -70,7 +81,11 @@ async def main() -> None: name="MagenticManager", description="Orchestrator that coordinates the research and coding workflow", instructions="You coordinate a team to complete complex tasks efficiently.", - client=OpenAIChatClient(), + client=AzureOpenAIResponsesClient( + project_endpoint=os.environ["AZURE_AI_PROJECT_ENDPOINT"], + deployment_name=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], + credential=AzureCliCredential(), + ), ) print("\nBuilding Magentic Workflow...") diff --git a/python/samples/getting_started/orchestrations/magentic_checkpoint.py b/python/samples/getting_started/orchestrations/magentic/magentic_checkpoint.py similarity index 92% rename from python/samples/getting_started/orchestrations/magentic_checkpoint.py rename to python/samples/getting_started/orchestrations/magentic/magentic_checkpoint.py index adce878f0d..54827a2d3b 100644 --- a/python/samples/getting_started/orchestrations/magentic_checkpoint.py +++ b/python/samples/getting_started/orchestrations/magentic/magentic_checkpoint.py @@ -2,6 +2,7 @@ import asyncio import json +import os from datetime import datetime from pathlib import Path from typing import cast @@ -14,9 +15,9 @@ WorkflowEvent, WorkflowRunState, ) -from agent_framework.azure import AzureOpenAIChatClient +from agent_framework.azure import AzureOpenAIResponsesClient from agent_framework.orchestrations import MagenticBuilder, MagenticPlanReviewRequest -from azure.identity._credentials import AzureCliCredential +from azure.identity import AzureCliCredential """ Sample: Magentic Orchestration + Checkpointing @@ -34,7 +35,8 @@ `responses` mapping so we can inject the stored human reply during restoration. Prerequisites: -- OpenAI environment variables configured for `OpenAIChatClient`. +- AZURE_AI_PROJECT_ENDPOINT must be your Azure AI Foundry Agent Service (V2) project endpoint. +- OpenAI environment variables configured for `AzureOpenAIResponsesClient`. """ TASK = ( @@ -57,14 +59,22 @@ def build_workflow(checkpoint_storage: FileCheckpointStorage): name="ResearcherAgent", description="Collects background facts and references for the project.", instructions=("You are the research lead. Gather crisp bullet points the team should know."), - client=AzureOpenAIChatClient(credential=AzureCliCredential()), + client=AzureOpenAIResponsesClient( + project_endpoint=os.environ["AZURE_AI_PROJECT_ENDPOINT"], + deployment_name=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], + credential=AzureCliCredential(), + ), ) writer = Agent( name="WriterAgent", description="Synthesizes the final brief for stakeholders.", instructions=("You convert the research notes into a structured brief with milestones and risks."), - client=AzureOpenAIChatClient(credential=AzureCliCredential()), + client=AzureOpenAIResponsesClient( + project_endpoint=os.environ["AZURE_AI_PROJECT_ENDPOINT"], + deployment_name=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], + credential=AzureCliCredential(), + ), ) # Create a manager agent for orchestration @@ -72,7 +82,11 @@ def build_workflow(checkpoint_storage: FileCheckpointStorage): name="MagenticManager", description="Orchestrator that coordinates the research and writing workflow", instructions="You coordinate a team to complete complex tasks efficiently.", - client=AzureOpenAIChatClient(credential=AzureCliCredential()), + client=AzureOpenAIResponsesClient( + project_endpoint=os.environ["AZURE_AI_PROJECT_ENDPOINT"], + deployment_name=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], + credential=AzureCliCredential(), + ), ) # The builder wires in the Magentic orchestrator, sets the plan review path, and diff --git a/python/samples/getting_started/orchestrations/magentic_human_plan_review.py b/python/samples/getting_started/orchestrations/magentic/magentic_human_plan_review.py similarity index 84% rename from python/samples/getting_started/orchestrations/magentic_human_plan_review.py rename to python/samples/getting_started/orchestrations/magentic/magentic_human_plan_review.py index 95f8de5f46..53f2d59df8 100644 --- a/python/samples/getting_started/orchestrations/magentic_human_plan_review.py +++ b/python/samples/getting_started/orchestrations/magentic/magentic_human_plan_review.py @@ -2,6 +2,7 @@ import asyncio import json +import os from collections.abc import AsyncIterable from typing import cast @@ -11,8 +12,9 @@ Message, WorkflowEvent, ) -from agent_framework.openai import OpenAIChatClient +from agent_framework.azure import AzureOpenAIResponsesClient from agent_framework.orchestrations import MagenticBuilder, MagenticPlanReviewRequest, MagenticPlanReviewResponse +from azure.identity import AzureCliCredential """ Sample: Magentic Orchestration with Human Plan Review @@ -31,7 +33,8 @@ - revise(feedback): Provide textual feedback to modify the plan Prerequisites: -- OpenAI credentials configured for `OpenAIChatClient`. +- AZURE_AI_PROJECT_ENDPOINT must be your Azure AI Foundry Agent Service (V2) project endpoint. +- OpenAI credentials configured for `AzureOpenAIResponsesClient`. """ # Keep track of the last response to format output nicely in streaming mode @@ -96,21 +99,33 @@ async def main() -> None: name="ResearcherAgent", description="Specialist in research and information gathering", instructions="You are a Researcher. You find information and gather facts.", - client=OpenAIChatClient(model_id="gpt-4o"), + client=AzureOpenAIResponsesClient( + project_endpoint=os.environ["AZURE_AI_PROJECT_ENDPOINT"], + deployment_name=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], + credential=AzureCliCredential(), + ), ) analyst_agent = Agent( name="AnalystAgent", description="Data analyst who processes and summarizes research findings", instructions="You are an Analyst. You analyze findings and create summaries.", - client=OpenAIChatClient(model_id="gpt-4o"), + client=AzureOpenAIResponsesClient( + project_endpoint=os.environ["AZURE_AI_PROJECT_ENDPOINT"], + deployment_name=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], + credential=AzureCliCredential(), + ), ) manager_agent = Agent( name="MagenticManager", description="Orchestrator that coordinates the workflow", instructions="You coordinate a team to complete tasks efficiently.", - client=OpenAIChatClient(model_id="gpt-4o"), + client=AzureOpenAIResponsesClient( + project_endpoint=os.environ["AZURE_AI_PROJECT_ENDPOINT"], + deployment_name=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], + credential=AzureCliCredential(), + ), ) print("\nBuilding Magentic Workflow with Human Plan Review...") diff --git a/python/samples/getting_started/workflows/agents/magentic_workflow_as_agent.py b/python/samples/getting_started/orchestrations/magentic/magentic_workflow_as_agent.py similarity index 78% rename from python/samples/getting_started/workflows/agents/magentic_workflow_as_agent.py rename to python/samples/getting_started/orchestrations/magentic/magentic_workflow_as_agent.py index 6255b18d0b..ecceeeacd4 100644 --- a/python/samples/getting_started/workflows/agents/magentic_workflow_as_agent.py +++ b/python/samples/getting_started/orchestrations/magentic/magentic_workflow_as_agent.py @@ -1,12 +1,14 @@ # Copyright (c) Microsoft. All rights reserved. import asyncio +import os from agent_framework import ( Agent, ) -from agent_framework.openai import OpenAIChatClient, OpenAIResponsesClient +from agent_framework.azure import AzureOpenAIResponsesClient from agent_framework.orchestrations import MagenticBuilder +from azure.identity import AzureCliCredential """ Sample: Build a Magentic orchestration and wrap it as an agent. @@ -16,7 +18,8 @@ like any other agent while still emitting callback telemetry. Prerequisites: -- OpenAI credentials configured for `OpenAIChatClient` and `OpenAIResponsesClient`. +- AZURE_AI_PROJECT_ENDPOINT must be your Azure AI Foundry Agent Service (V2) project endpoint. +- OpenAI credentials configured for `AzureOpenAIResponsesClient` and `AzureOpenAIResponsesClient`. """ @@ -28,11 +31,19 @@ async def main() -> None: "You are a Researcher. You find information without additional computation or quantitative analysis." ), # This agent requires the gpt-4o-search-preview model to perform web searches. - client=OpenAIChatClient(model_id="gpt-4o-search-preview"), + client=AzureOpenAIResponsesClient( + project_endpoint=os.environ["AZURE_AI_PROJECT_ENDPOINT"], + deployment_name=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], + credential=AzureCliCredential(), + ), ) # Create code interpreter tool using instance method - coder_client = OpenAIResponsesClient() + coder_client = AzureOpenAIResponsesClient( + project_endpoint=os.environ["AZURE_AI_PROJECT_ENDPOINT"], + deployment_name=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], + credential=AzureCliCredential(), + ) code_interpreter_tool = coder_client.get_code_interpreter_tool() coder_agent = Agent( @@ -48,7 +59,11 @@ async def main() -> None: name="MagenticManager", description="Orchestrator that coordinates the research and coding workflow", instructions="You coordinate a team to complete complex tasks efficiently.", - client=OpenAIChatClient(), + client=AzureOpenAIResponsesClient( + project_endpoint=os.environ["AZURE_AI_PROJECT_ENDPOINT"], + deployment_name=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], + credential=AzureCliCredential(), + ), ) print("\nBuilding Magentic Workflow...") diff --git a/python/samples/getting_started/orchestrations/sequential_agents.py b/python/samples/getting_started/orchestrations/sequential/sequential_agents.py similarity index 85% rename from python/samples/getting_started/orchestrations/sequential_agents.py rename to python/samples/getting_started/orchestrations/sequential/sequential_agents.py index 7d77ef35c6..083b2e42ed 100644 --- a/python/samples/getting_started/orchestrations/sequential_agents.py +++ b/python/samples/getting_started/orchestrations/sequential/sequential_agents.py @@ -1,10 +1,11 @@ # Copyright (c) Microsoft. All rights reserved. import asyncio +import os from typing import cast from agent_framework import Message -from agent_framework.azure import AzureOpenAIChatClient +from agent_framework.azure import AzureOpenAIResponsesClient from agent_framework.orchestrations import SequentialBuilder from azure.identity import AzureCliCredential @@ -24,13 +25,18 @@ You can safely ignore them when focusing on agent progress. Prerequisites: -- Azure OpenAI access configured for AzureOpenAIChatClient (use az login + env vars) +- AZURE_AI_PROJECT_ENDPOINT must be your Azure AI Foundry Agent Service (V2) project endpoint. +- Azure OpenAI access configured for AzureOpenAIResponsesClient (use az login + env vars) """ async def main() -> None: # 1) Create agents - client = AzureOpenAIChatClient(credential=AzureCliCredential()) + client = AzureOpenAIResponsesClient( + project_endpoint=os.environ["AZURE_AI_PROJECT_ENDPOINT"], + deployment_name=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], + credential=AzureCliCredential(), + ) writer = client.as_agent( instructions=("You are a concise copywriter. Provide a single, punchy marketing sentence based on the prompt."), diff --git a/python/samples/getting_started/workflows/tool-approval/sequential_builder_tool_approval.py b/python/samples/getting_started/orchestrations/sequential/sequential_builder_tool_approval.py similarity index 93% rename from python/samples/getting_started/workflows/tool-approval/sequential_builder_tool_approval.py rename to python/samples/getting_started/orchestrations/sequential/sequential_builder_tool_approval.py index 2f7ecea0ac..554cca926f 100644 --- a/python/samples/getting_started/workflows/tool-approval/sequential_builder_tool_approval.py +++ b/python/samples/getting_started/orchestrations/sequential/sequential_builder_tool_approval.py @@ -1,6 +1,7 @@ # Copyright (c) Microsoft. All rights reserved. import asyncio +import os from collections.abc import AsyncIterable from typing import Annotated, cast @@ -10,8 +11,9 @@ WorkflowEvent, tool, ) -from agent_framework.openai import OpenAIChatClient +from agent_framework.azure import AzureOpenAIResponsesClient from agent_framework.orchestrations import SequentialBuilder +from azure.identity import AzureCliCredential """ Sample: Sequential Workflow with Tool Approval Requests @@ -38,6 +40,7 @@ - Resuming workflow execution after approval via run(responses=..., stream=True). Prerequisites: +- AZURE_AI_PROJECT_ENDPOINT must be your Azure AI Foundry Agent Service (V2) project endpoint. - OpenAI or Azure OpenAI configured with the required environment variables. - Basic familiarity with SequentialBuilder and streaming workflow events. """ @@ -99,7 +102,11 @@ async def process_event_stream(stream: AsyncIterable[WorkflowEvent]) -> dict[str async def main() -> None: # 2. Create the agent with tools (approval mode is set per-tool via decorator) - client = OpenAIChatClient() + client = AzureOpenAIResponsesClient( + project_endpoint=os.environ["AZURE_AI_PROJECT_ENDPOINT"], + deployment_name=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], + credential=AzureCliCredential(), + ) database_agent = client.as_agent( name="DatabaseAgent", instructions=( diff --git a/python/samples/getting_started/orchestrations/sequential_custom_executors.py b/python/samples/getting_started/orchestrations/sequential/sequential_custom_executors.py similarity index 89% rename from python/samples/getting_started/orchestrations/sequential_custom_executors.py rename to python/samples/getting_started/orchestrations/sequential/sequential_custom_executors.py index 7f3e61fe2e..9110a828d8 100644 --- a/python/samples/getting_started/orchestrations/sequential_custom_executors.py +++ b/python/samples/getting_started/orchestrations/sequential/sequential_custom_executors.py @@ -1,6 +1,7 @@ # Copyright (c) Microsoft. All rights reserved. import asyncio +import os from typing import Any from agent_framework import ( @@ -10,7 +11,7 @@ WorkflowContext, handler, ) -from agent_framework.azure import AzureOpenAIChatClient +from agent_framework.azure import AzureOpenAIResponsesClient from agent_framework.orchestrations import SequentialBuilder from azure.identity import AzureCliCredential @@ -28,7 +29,8 @@ - Emit the updated conversation via ctx.send_message([...]) Prerequisites: -- Azure OpenAI access configured for AzureOpenAIChatClient (use az login + env vars) +- AZURE_AI_PROJECT_ENDPOINT must be your Azure AI Foundry Agent Service (V2) project endpoint. +- Azure OpenAI access configured for AzureOpenAIResponsesClient (use az login + env vars) """ @@ -58,7 +60,11 @@ async def summarize(self, agent_response: AgentExecutorResponse, ctx: WorkflowCo async def main() -> None: # 1) Create a content agent - client = AzureOpenAIChatClient(credential=AzureCliCredential()) + client = AzureOpenAIResponsesClient( + project_endpoint=os.environ["AZURE_AI_PROJECT_ENDPOINT"], + deployment_name=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], + credential=AzureCliCredential(), + ) content = client.as_agent( instructions="Produce a concise paragraph answering the user's request.", name="content", diff --git a/python/samples/getting_started/workflows/human-in-the-loop/sequential_request_info.py b/python/samples/getting_started/orchestrations/sequential/sequential_request_info.py similarity index 91% rename from python/samples/getting_started/workflows/human-in-the-loop/sequential_request_info.py rename to python/samples/getting_started/orchestrations/sequential/sequential_request_info.py index eb3578c6b0..633f218632 100644 --- a/python/samples/getting_started/workflows/human-in-the-loop/sequential_request_info.py +++ b/python/samples/getting_started/orchestrations/sequential/sequential_request_info.py @@ -17,11 +17,13 @@ - Injecting responses back into the workflow via run(responses=..., stream=True) Prerequisites: -- Azure OpenAI configured for AzureOpenAIChatClient with required environment variables +- AZURE_AI_PROJECT_ENDPOINT must be your Azure AI Foundry Agent Service (V2) project endpoint. +- Azure OpenAI configured for AzureOpenAIResponsesClient with required environment variables - Authentication via azure-identity (run az login before executing) """ import asyncio +import os from collections.abc import AsyncIterable from typing import cast @@ -30,7 +32,7 @@ Message, WorkflowEvent, ) -from agent_framework.azure import AzureOpenAIChatClient +from agent_framework.azure import AzureOpenAIResponsesClient from agent_framework.orchestrations import AgentRequestInfoResponse, SequentialBuilder from azure.identity import AzureCliCredential @@ -88,7 +90,11 @@ async def process_event_stream(stream: AsyncIterable[WorkflowEvent]) -> dict[str async def main() -> None: - client = AzureOpenAIChatClient(credential=AzureCliCredential()) + client = AzureOpenAIResponsesClient( + project_endpoint=os.environ["AZURE_AI_PROJECT_ENDPOINT"], + deployment_name=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], + credential=AzureCliCredential(), + ) # Create agents for a sequential document review workflow drafter = client.as_agent( diff --git a/python/samples/getting_started/workflows/agents/sequential_workflow_as_agent.py b/python/samples/getting_started/orchestrations/sequential/sequential_workflow_as_agent.py similarity index 87% rename from python/samples/getting_started/workflows/agents/sequential_workflow_as_agent.py rename to python/samples/getting_started/orchestrations/sequential/sequential_workflow_as_agent.py index 73e8cbb2c7..1b2a6c6af4 100644 --- a/python/samples/getting_started/workflows/agents/sequential_workflow_as_agent.py +++ b/python/samples/getting_started/orchestrations/sequential/sequential_workflow_as_agent.py @@ -1,8 +1,9 @@ # Copyright (c) Microsoft. All rights reserved. import asyncio +import os -from agent_framework.azure import AzureOpenAIChatClient +from agent_framework.azure import AzureOpenAIResponsesClient from agent_framework.orchestrations import SequentialBuilder from azure.identity import AzureCliCredential @@ -21,13 +22,18 @@ You can safely ignore them when focusing on agent progress. Prerequisites: -- Azure OpenAI access configured for AzureOpenAIChatClient (use az login + env vars) +- AZURE_AI_PROJECT_ENDPOINT must be your Azure AI Foundry Agent Service (V2) project endpoint. +- Azure OpenAI access configured for AzureOpenAIResponsesClient (use az login + env vars) """ async def main() -> None: # 1) Create agents - client = AzureOpenAIChatClient(credential=AzureCliCredential()) + client = AzureOpenAIResponsesClient( + project_endpoint=os.environ["AZURE_AI_PROJECT_ENDPOINT"], + deployment_name=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], + credential=AzureCliCredential(), + ) writer = client.as_agent( instructions=("You are a concise copywriter. Provide a single, punchy marketing sentence based on the prompt."), diff --git a/python/samples/getting_started/workflows/README.md b/python/samples/getting_started/workflows/README.md index ce4aee4172..dd2b4b06e1 100644 --- a/python/samples/getting_started/workflows/README.md +++ b/python/samples/getting_started/workflows/README.md @@ -38,14 +38,10 @@ Once comfortable with these, explore the rest of the samples below. | Azure AI Agents (Streaming) | [agents/azure_ai_agents_streaming.py](./agents/azure_ai_agents_streaming.py) | Add Azure AI agents as edges and handle streaming events | | Azure AI Agents (Shared Thread) | [agents/azure_ai_agents_with_shared_thread.py](./agents/azure_ai_agents_with_shared_thread.py) | Share a common message thread between multiple Azure AI agents in a workflow | | Custom Agent Executors | [agents/custom_agent_executors.py](./agents/custom_agent_executors.py) | Create executors to handle agent run methods | -| Sequential Workflow as Agent | [agents/sequential_workflow_as_agent.py](./agents/sequential_workflow_as_agent.py) | Build a sequential workflow orchestrating agents, then expose it as a reusable agent | -| Concurrent Workflow as Agent | [agents/concurrent_workflow_as_agent.py](./agents/concurrent_workflow_as_agent.py) | Build a concurrent fan-out/fan-in workflow, then expose it as a reusable agent | -| Magentic Workflow as Agent | [agents/magentic_workflow_as_agent.py](./agents/magentic_workflow_as_agent.py) | Configure Magentic orchestration with callbacks, then expose the workflow as an agent | | Workflow as Agent (Reflection Pattern) | [agents/workflow_as_agent_reflection_pattern.py](./agents/workflow_as_agent_reflection_pattern.py) | Wrap a workflow so it can behave like an agent (reflection pattern) | | Workflow as Agent + HITL | [agents/workflow_as_agent_human_in_the_loop.py](./agents/workflow_as_agent_human_in_the_loop.py) | Extend workflow-as-agent with human-in-the-loop capability | | Workflow as Agent with Thread | [agents/workflow_as_agent_with_thread.py](./agents/workflow_as_agent_with_thread.py) | Use AgentThread to maintain conversation history across workflow-as-agent invocations | | Workflow as Agent kwargs | [agents/workflow_as_agent_kwargs.py](./agents/workflow_as_agent_kwargs.py) | Pass custom context (data, user tokens) via kwargs through workflow.as_agent() to @ai_function tools | -| Handoff Workflow as Agent | [agents/handoff_workflow_as_agent.py](./agents/handoff_workflow_as_agent.py) | Use a HandoffBuilder workflow as an agent with HITL via FunctionCallContent/FunctionResultContent | ### checkpoint @@ -54,7 +50,7 @@ Once comfortable with these, explore the rest of the samples below. | Checkpoint & Resume | [checkpoint/checkpoint_with_resume.py](./checkpoint/checkpoint_with_resume.py) | Create checkpoints, inspect them, and resume execution | | Checkpoint & HITL Resume | [checkpoint/checkpoint_with_human_in_the_loop.py](./checkpoint/checkpoint_with_human_in_the_loop.py) | Combine checkpointing with human approvals and resume pending HITL requests | | Checkpointed Sub-Workflow | [checkpoint/sub_workflow_checkpoint.py](./checkpoint/sub_workflow_checkpoint.py) | Save and resume a sub-workflow that pauses for human approval | -| Handoff + Tool Approval Resume | [checkpoint/handoff_with_tool_approval_checkpoint_resume.py](./checkpoint/handoff_with_tool_approval_checkpoint_resume.py) | Handoff workflow that captures tool-call approvals in checkpoints and resumes with human decisions | +| Handoff + Tool Approval Resume | Moved to orchestration samples | Handoff workflow that captures tool-call approvals in checkpoints and resumes with human decisions | | Workflow as Agent Checkpoint | [checkpoint/workflow_as_agent_checkpoint.py](./checkpoint/workflow_as_agent_checkpoint.py) | Enable checkpointing when using workflow.as_agent() with checkpoint_storage parameter | ### composition @@ -85,19 +81,13 @@ Once comfortable with these, explore the rest of the samples below. | Human-In-The-Loop (Guessing Game) | [human-in-the-loop/guessing_game_with_human_input.py](./human-in-the-loop/guessing_game_with_human_input.py) | Interactive request/response prompts with a human via `ctx.request_info()` | | Agents with Approval Requests in Workflows | [human-in-the-loop/agents_with_approval_requests.py](./human-in-the-loop/agents_with_approval_requests.py) | Agents that create approval requests during workflow execution and wait for human approval to proceed | | Agents with Declaration-Only Tools | [human-in-the-loop/agents_with_declaration_only_tools.py](./human-in-the-loop/agents_with_declaration_only_tools.py) | Workflow pauses when agent calls a client-side tool (`func=None`), caller supplies the result | -| SequentialBuilder Request Info | [human-in-the-loop/sequential_request_info.py](./human-in-the-loop/sequential_request_info.py) | Request info for agent responses mid-workflow using `.with_request_info()` on SequentialBuilder | -| ConcurrentBuilder Request Info | [human-in-the-loop/concurrent_request_info.py](./human-in-the-loop/concurrent_request_info.py) | Review concurrent agent outputs before aggregation using `.with_request_info()` on ConcurrentBuilder | -| GroupChatBuilder Request Info | [human-in-the-loop/group_chat_request_info.py](./human-in-the-loop/group_chat_request_info.py) | Steer group discussions with periodic guidance using `.with_request_info()` on GroupChatBuilder | -### tool-approval +Builder-oriented request-info samples are maintained in the orchestration sample set +(sequential, concurrent, and group-chat builder variants). -Tool approval samples demonstrate using `@tool(approval_mode="always_require")` to gate sensitive tool executions with human approval. These work with the high-level builder APIs. +### tool-approval -| Sample | File | Concepts | -| ------------------------------- | -------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------- | -| SequentialBuilder Tool Approval | [tool-approval/sequential_builder_tool_approval.py](./tool-approval/sequential_builder_tool_approval.py) | Sequential workflow with tool approval gates for sensitive operations | -| ConcurrentBuilder Tool Approval | [tool-approval/concurrent_builder_tool_approval.py](./tool-approval/concurrent_builder_tool_approval.py) | Concurrent workflow with tool approvals across parallel agents | -| GroupChatBuilder Tool Approval | [tool-approval/group_chat_builder_tool_approval.py](./tool-approval/group_chat_builder_tool_approval.py) | Group chat workflow with tool approval for multi-agent collaboration | +Builder-based tool approval samples are maintained in the orchestration sample set. ### observability @@ -109,7 +99,8 @@ For additional observability samples in Agent Framework, see the [observability ### orchestration -Orchestration samples (Sequential, Concurrent, Handoff, GroupChat, Magentic) have moved to the dedicated [orchestrations samples directory](../orchestrations/README.md). +Orchestration-focused samples (Sequential, Concurrent, Handoff, GroupChat, Magentic), including builder-based +`workflow.as_agent(...)` variants, are documented in the [orchestrations](../orchestrations/README.md) directory. ### parallelism diff --git a/python/samples/getting_started/workflows/_start-here/step2_agents_in_a_workflow.py b/python/samples/getting_started/workflows/_start-here/step2_agents_in_a_workflow.py index 8a8ac369e4..5330cf4973 100644 --- a/python/samples/getting_started/workflows/_start-here/step2_agents_in_a_workflow.py +++ b/python/samples/getting_started/workflows/_start-here/step2_agents_in_a_workflow.py @@ -1,10 +1,11 @@ # Copyright (c) Microsoft. All rights reserved. import asyncio +import os from typing import cast from agent_framework import AgentResponse, WorkflowBuilder -from agent_framework.azure import AzureOpenAIChatClient +from agent_framework.azure import AzureOpenAIResponsesClient from azure.identity import AzureCliCredential """ @@ -14,11 +15,12 @@ evaluates and provides feedback. Purpose: -Show how to create agents from AzureOpenAIChatClient and use them directly in a workflow. Demonstrate +Show how to create agents from AzureOpenAIResponsesClient and use them directly in a workflow. Demonstrate how agents can be used in a workflow. Prerequisites: -- Azure OpenAI configured for AzureOpenAIChatClient with required environment variables. +- AZURE_AI_PROJECT_ENDPOINT must be your Azure AI Foundry Agent Service (V2) project endpoint. +- Azure OpenAI configured for AzureOpenAIResponsesClient with required environment variables. - Authentication via azure-identity. Use AzureCliCredential and run az login before executing the sample. - Basic familiarity with WorkflowBuilder, edges, events, and streaming or non-streaming runs. """ @@ -27,7 +29,11 @@ async def main(): """Build and run a simple two node agent workflow: Writer then Reviewer.""" # Create the Azure chat client. AzureCliCredential uses your current az login. - client = AzureOpenAIChatClient(credential=AzureCliCredential()) + client = AzureOpenAIResponsesClient( + project_endpoint=os.environ["AZURE_AI_PROJECT_ENDPOINT"], + deployment_name=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], + credential=AzureCliCredential(), + ) writer_agent = client.as_agent( instructions=( "You are an excellent content writer. You create new content and edit contents based on the feedback." diff --git a/python/samples/getting_started/workflows/_start-here/step3_streaming.py b/python/samples/getting_started/workflows/_start-here/step3_streaming.py index 7c5a7c86a7..15e3512c02 100644 --- a/python/samples/getting_started/workflows/_start-here/step3_streaming.py +++ b/python/samples/getting_started/workflows/_start-here/step3_streaming.py @@ -1,9 +1,10 @@ # Copyright (c) Microsoft. All rights reserved. import asyncio +import os from agent_framework import AgentResponseUpdate, Message, WorkflowBuilder -from agent_framework.azure import AzureOpenAIChatClient +from agent_framework.azure import AzureOpenAIResponsesClient from azure.identity import AzureCliCredential """ @@ -13,11 +14,12 @@ evaluates and provides feedback. Purpose: -Show how to create agents from AzureOpenAIChatClient and use them directly in a workflow. Demonstrate +Show how to create agents from AzureOpenAIResponsesClient and use them directly in a workflow. Demonstrate how agents can be used in a workflow. Prerequisites: -- Azure OpenAI configured for AzureOpenAIChatClient with required environment variables. +- AZURE_AI_PROJECT_ENDPOINT must be your Azure AI Foundry Agent Service (V2) project endpoint. +- Azure OpenAI configured for AzureOpenAIResponsesClient with required environment variables. - Authentication via azure-identity. Use AzureCliCredential and run az login before executing the sample. - Basic familiarity with WorkflowBuilder, executors, edges, events, and streaming runs. """ @@ -26,7 +28,11 @@ async def main(): """Build the two node workflow and run it with streaming to observe events.""" # Create the Azure chat client. AzureCliCredential uses your current az login. - client = AzureOpenAIChatClient(credential=AzureCliCredential()) + client = AzureOpenAIResponsesClient( + project_endpoint=os.environ["AZURE_AI_PROJECT_ENDPOINT"], + deployment_name=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], + credential=AzureCliCredential(), + ) writer_agent = client.as_agent( instructions=( "You are an excellent content writer. You create new content and edit contents based on the feedback." diff --git a/python/samples/getting_started/workflows/agents/azure_ai_agents_streaming.py b/python/samples/getting_started/workflows/agents/azure_ai_agents_streaming.py index d05fcbf319..ccca56cf36 100644 --- a/python/samples/getting_started/workflows/agents/azure_ai_agents_streaming.py +++ b/python/samples/getting_started/workflows/agents/azure_ai_agents_streaming.py @@ -1,65 +1,70 @@ # Copyright (c) Microsoft. All rights reserved. import asyncio +import os from agent_framework import AgentResponseUpdate, WorkflowBuilder -from agent_framework.azure import AzureAIAgentClient -from azure.identity.aio import AzureCliCredential +from agent_framework.azure import AzureOpenAIResponsesClient +from azure.identity import AzureCliCredential """ Sample: Azure AI Agents in a Workflow with Streaming -This sample shows how to create Azure AI Agents and use them in a workflow with streaming. +This sample shows how to create agents backed by Azure OpenAI Responses and use them in a workflow with streaming. Prerequisites: -- Azure AI Agent Service configured, along with the required environment variables. +- AZURE_AI_PROJECT_ENDPOINT must be your Azure AI Foundry Agent Service (V2) project endpoint. +- AZURE_AI_MODEL_DEPLOYMENT_NAME must be set to your Azure OpenAI model deployment name. - Authentication via azure-identity. Use AzureCliCredential and run az login before executing the sample. - Basic familiarity with WorkflowBuilder, edges, events, and streaming runs. """ async def main() -> None: - async with AzureCliCredential() as cred, AzureAIAgentClient(credential=cred) as client: - # Create two agents: a Writer and a Reviewer. - writer_agent = client.as_agent( - name="Writer", - instructions=( - "You are an excellent content writer. You create new content and edit contents based on the feedback." - ), - ) + client = AzureOpenAIResponsesClient( + project_endpoint=os.environ["AZURE_AI_PROJECT_ENDPOINT"], + deployment_name=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], + credential=AzureCliCredential(), + ) - reviewer_agent = client.as_agent( - name="Reviewer", - instructions=( - "You are an excellent content reviewer. " - "Provide actionable feedback to the writer about the provided content. " - "Provide the feedback in the most concise manner possible." - ), - ) + # Create two agents: a Writer and a Reviewer. + writer_agent = client.as_agent( + name="Writer", + instructions=( + "You are an excellent content writer. You create new content and edit contents based on the feedback." + ), + ) - # Build the workflow by adding agents directly as edges. - # Agents adapt to workflow mode: run(stream=True) for incremental updates, run() for complete responses. - workflow = WorkflowBuilder(start_executor=writer_agent).add_edge(writer_agent, reviewer_agent).build() + reviewer_agent = client.as_agent( + name="Reviewer", + instructions=( + "You are an excellent content reviewer. " + "Provide actionable feedback to the writer about the provided content. " + "Provide the feedback in the most concise manner possible." + ), + ) - # Track the last author to format streaming output. - last_author: str | None = None + # Build the workflow by adding agents directly as edges. + # Agents adapt to workflow mode: run(stream=True) for incremental updates, run() for complete responses. + workflow = WorkflowBuilder(start_executor=writer_agent).add_edge(writer_agent, reviewer_agent).build() - events = workflow.run( - "Create a slogan for a new electric SUV that is affordable and fun to drive.", stream=True - ) - async for event in events: - # The outputs of the workflow are whatever the agents produce. So the events are expected to - # contain `AgentResponseUpdate` from the agents in the workflow. - if event.type == "output" and isinstance(event.data, AgentResponseUpdate): - update = event.data - author = update.author_name - if author != last_author: - if last_author is not None: - print() # Newline between different authors - print(f"{author}: {update.text}", end="", flush=True) - last_author = author - else: - print(update.text, end="", flush=True) + # Track the last author to format streaming output. + last_author: str | None = None + + events = workflow.run("Create a slogan for a new electric SUV that is affordable and fun to drive.", stream=True) + async for event in events: + # The outputs of the workflow are whatever the agents produce. So the events are expected to + # contain `AgentResponseUpdate` from the agents in the workflow. + if event.type == "output" and isinstance(event.data, AgentResponseUpdate): + update = event.data + author = update.author_name + if author != last_author: + if last_author is not None: + print() # Newline between different authors + print(f"{author}: {update.text}", end="", flush=True) + last_author = author + else: + print(update.text, end="", flush=True) if __name__ == "__main__": diff --git a/python/samples/getting_started/workflows/agents/azure_ai_agents_with_shared_thread.py b/python/samples/getting_started/workflows/agents/azure_ai_agents_with_shared_thread.py index c5ab83e3e7..3492be6474 100644 --- a/python/samples/getting_started/workflows/agents/azure_ai_agents_with_shared_thread.py +++ b/python/samples/getting_started/workflows/agents/azure_ai_agents_with_shared_thread.py @@ -1,6 +1,7 @@ # Copyright (c) Microsoft. All rights reserved. import asyncio +import os from agent_framework import ( AgentExecutor, @@ -12,8 +13,8 @@ WorkflowRunState, executor, ) -from agent_framework.azure import AzureAIProjectAgentProvider -from azure.identity.aio import AzureCliCredential +from agent_framework.azure import AzureOpenAIResponsesClient +from azure.identity import AzureCliCredential """ Sample: Agents with a shared thread in a workflow @@ -28,11 +29,12 @@ - Not all agents can share threads; usually only the same type of agents can share threads. Demonstrate: -- Creating multiple agents with Azure AI Agent Service (V2 API). +- Creating multiple agents with AzureOpenAIResponsesClient. - Setting up a shared thread between agents. Prerequisites: -- Azure AI Agent Service configured, along with the required environment variables. +- AZURE_AI_PROJECT_ENDPOINT must be your Azure AI Foundry Agent Service (V2) project endpoint. +- AZURE_AI_MODEL_DEPLOYMENT_NAME must be set to your Azure OpenAI model deployment name. - Authentication via azure-identity. Use AzureCliCredential and run az login before executing the sample. - Basic familiarity with agents, workflows, and executors in the agent framework. """ @@ -51,49 +53,49 @@ async def intercept_agent_response( async def main() -> None: - async with ( - AzureCliCredential() as credential, - AzureAIProjectAgentProvider(credential=credential) as provider, - ): - writer = await provider.create_agent( - instructions=( - "You are a concise copywriter. Provide a single, punchy marketing sentence based on the prompt." - ), - name="writer", - ) - - reviewer = await provider.create_agent( - instructions=("You are a thoughtful reviewer. Give brief feedback on the previous assistant message."), - name="reviewer", - ) - - shared_thread = writer.get_new_thread() - # Set the message store to store messages in memory. - shared_thread.message_store = ChatMessageStore() - - writer_executor = AgentExecutor(writer, agent_thread=shared_thread) - reviewer_executor = AgentExecutor(reviewer, agent_thread=shared_thread) - - workflow = ( - WorkflowBuilder(start_executor=writer_executor) - .add_chain([writer_executor, intercept_agent_response, reviewer_executor]) - .build() - ) - - result = await workflow.run( - "Write a tagline for a budget-friendly eBike.", - # Keyword arguments will be passed to each agent call. - # Setting store=False to avoid storing messages in the service for this example. - options={"store": False}, - ) - # The final state should be IDLE since the workflow no longer has messages to - # process after the reviewer agent responds. - assert result.get_final_state() == WorkflowRunState.IDLE - - # The shared thread now contains the conversation between the writer and reviewer. Print it out. - print("=== Shared Thread Conversation ===") - for message in shared_thread.message_store.messages: - print(f"{message.author_name or message.role}: {message.text}") + client = AzureOpenAIResponsesClient( + project_endpoint=os.environ["AZURE_AI_PROJECT_ENDPOINT"], + deployment_name=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], + credential=AzureCliCredential(), + ) + + writer = client.as_agent( + instructions=("You are a concise copywriter. Provide a single, punchy marketing sentence based on the prompt."), + name="writer", + ) + + reviewer = client.as_agent( + instructions=("You are a thoughtful reviewer. Give brief feedback on the previous assistant message."), + name="reviewer", + ) + + shared_thread = writer.get_new_thread() + # Set the message store to store messages in memory. + shared_thread.message_store = ChatMessageStore() + + writer_executor = AgentExecutor(writer, agent_thread=shared_thread) + reviewer_executor = AgentExecutor(reviewer, agent_thread=shared_thread) + + workflow = ( + WorkflowBuilder(start_executor=writer_executor) + .add_chain([writer_executor, intercept_agent_response, reviewer_executor]) + .build() + ) + + result = await workflow.run( + "Write a tagline for a budget-friendly eBike.", + # Keyword arguments will be passed to each agent call. + # Setting store=False to avoid storing messages in the service for this example. + options={"store": False}, + ) + # The final state should be IDLE since the workflow no longer has messages to + # process after the reviewer agent responds. + assert result.get_final_state() == WorkflowRunState.IDLE + + # The shared thread now contains the conversation between the writer and reviewer. Print it out. + print("=== Shared Thread Conversation ===") + for message in shared_thread.message_store.messages: + print(f"{message.author_name or message.role}: {message.text}") if __name__ == "__main__": diff --git a/python/samples/getting_started/workflows/agents/azure_chat_agents_and_executor.py b/python/samples/getting_started/workflows/agents/azure_chat_agents_and_executor.py index 8de5b71b73..ed724332b9 100644 --- a/python/samples/getting_started/workflows/agents/azure_chat_agents_and_executor.py +++ b/python/samples/getting_started/workflows/agents/azure_chat_agents_and_executor.py @@ -1,6 +1,7 @@ # Copyright (c) Microsoft. All rights reserved. import asyncio +import os from typing import Final from agent_framework import ( @@ -12,7 +13,7 @@ WorkflowContext, executor, ) -from agent_framework.azure import AzureOpenAIChatClient +from agent_framework.azure import AzureOpenAIResponsesClient from azure.identity import AzureCliCredential """ @@ -30,7 +31,8 @@ - Consuming an AgentExecutorResponse and forwarding an AgentExecutorRequest for the next agent. Prerequisites: -- Azure OpenAI configured for AzureOpenAIChatClient with required environment variables. +- AZURE_AI_PROJECT_ENDPOINT must be your Azure AI Foundry Agent Service (V2) project endpoint. +- Azure OpenAI configured for AzureOpenAIResponsesClient with required environment variables. - Authentication via azure-identity. Run `az login` before executing. """ @@ -94,14 +96,22 @@ async def enrich_with_references( async def main() -> None: """Run the workflow and stream combined updates from both agents.""" # Create the agents - research_agent = AzureOpenAIChatClient(credential=AzureCliCredential()).as_agent( + research_agent = AzureOpenAIResponsesClient( + project_endpoint=os.environ["AZURE_AI_PROJECT_ENDPOINT"], + deployment_name=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], + credential=AzureCliCredential(), + ).as_agent( name="research_agent", instructions=( "Produce a short, bullet-style briefing with two actionable ideas. Label the section as 'Initial Draft'." ), ) - final_editor_agent = AzureOpenAIChatClient(credential=AzureCliCredential()).as_agent( + final_editor_agent = AzureOpenAIResponsesClient( + project_endpoint=os.environ["AZURE_AI_PROJECT_ENDPOINT"], + deployment_name=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], + credential=AzureCliCredential(), + ).as_agent( name="final_editor_agent", instructions=( "Use all conversation context (including external notes) to produce the final answer. " diff --git a/python/samples/getting_started/workflows/agents/azure_chat_agents_streaming.py b/python/samples/getting_started/workflows/agents/azure_chat_agents_streaming.py index 04c08a0602..a18a6e7086 100644 --- a/python/samples/getting_started/workflows/agents/azure_chat_agents_streaming.py +++ b/python/samples/getting_started/workflows/agents/azure_chat_agents_streaming.py @@ -1,9 +1,10 @@ # Copyright (c) Microsoft. All rights reserved. import asyncio +import os from agent_framework import AgentResponseUpdate, WorkflowBuilder -from agent_framework.azure import AzureOpenAIChatClient +from agent_framework.azure import AzureOpenAIResponsesClient from azure.identity import AzureCliCredential """ @@ -12,7 +13,8 @@ This sample shows how to create AzureOpenAI Chat Agents and use them in a workflow with streaming. Prerequisites: -- Azure OpenAI configured for AzureOpenAIChatClient with required environment variables. +- AZURE_AI_PROJECT_ENDPOINT must be your Azure AI Foundry Agent Service (V2) project endpoint. +- Azure OpenAI configured for AzureOpenAIResponsesClient with required environment variables. - Authentication via azure-identity. Use AzureCliCredential and run az login before executing the sample. - Basic familiarity with WorkflowBuilder, edges, events, and streaming runs. """ @@ -21,14 +23,22 @@ async def main(): """Build and run a simple two node agent workflow: Writer then Reviewer.""" # Create the agents - writer_agent = AzureOpenAIChatClient(credential=AzureCliCredential()).as_agent( + writer_agent = AzureOpenAIResponsesClient( + project_endpoint=os.environ["AZURE_AI_PROJECT_ENDPOINT"], + deployment_name=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], + credential=AzureCliCredential(), + ).as_agent( instructions=( "You are an excellent content writer. You create new content and edit contents based on the feedback." ), name="writer", ) - reviewer_agent = AzureOpenAIChatClient(credential=AzureCliCredential()).as_agent( + reviewer_agent = AzureOpenAIResponsesClient( + project_endpoint=os.environ["AZURE_AI_PROJECT_ENDPOINT"], + deployment_name=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], + credential=AzureCliCredential(), + ).as_agent( instructions=( "You are an excellent content reviewer." "Provide actionable feedback to the writer about the provided content." diff --git a/python/samples/getting_started/workflows/agents/azure_chat_agents_tool_calls_with_feedback.py b/python/samples/getting_started/workflows/agents/azure_chat_agents_tool_calls_with_feedback.py index cacaa2b493..10afd3304f 100644 --- a/python/samples/getting_started/workflows/agents/azure_chat_agents_tool_calls_with_feedback.py +++ b/python/samples/getting_started/workflows/agents/azure_chat_agents_tool_calls_with_feedback.py @@ -2,6 +2,7 @@ import asyncio import json +import os from dataclasses import dataclass, field from typing import Annotated @@ -21,7 +22,7 @@ response_handler, tool, ) -from agent_framework.azure import AzureOpenAIChatClient +from agent_framework.azure import AzureOpenAIResponsesClient from azure.identity import AzureCliCredential from pydantic import Field from typing_extensions import Never @@ -43,7 +44,8 @@ - Streaming AgentRunUpdateEvent updates alongside human-in-the-loop pauses. Prerequisites: -- Azure OpenAI configured for AzureOpenAIChatClient with required environment variables. +- AZURE_AI_PROJECT_ENDPOINT must be your Azure AI Foundry Agent Service (V2) project endpoint. +- Azure OpenAI configured for AzureOpenAIResponsesClient with required environment variables. - Authentication via azure-identity. Run `az login` before executing. """ @@ -170,7 +172,11 @@ async def on_human_feedback( def create_writer_agent() -> Agent: """Creates a writer agent with tools.""" - return AzureOpenAIChatClient(credential=AzureCliCredential()).as_agent( + return AzureOpenAIResponsesClient( + project_endpoint=os.environ["AZURE_AI_PROJECT_ENDPOINT"], + deployment_name=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], + credential=AzureCliCredential(), + ).as_agent( name="writer_agent", instructions=( "You are a marketing writer. Call the available tools before drafting copy so you are precise. " @@ -184,7 +190,11 @@ def create_writer_agent() -> Agent: def create_final_editor_agent() -> Agent: """Creates a final editor agent.""" - return AzureOpenAIChatClient(credential=AzureCliCredential()).as_agent( + return AzureOpenAIResponsesClient( + project_endpoint=os.environ["AZURE_AI_PROJECT_ENDPOINT"], + deployment_name=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], + credential=AzureCliCredential(), + ).as_agent( name="final_editor_agent", instructions=( "You are an editor who polishes marketing copy after human approval. " diff --git a/python/samples/getting_started/workflows/agents/custom_agent_executors.py b/python/samples/getting_started/workflows/agents/custom_agent_executors.py index a44aff4f09..fa727efdf9 100644 --- a/python/samples/getting_started/workflows/agents/custom_agent_executors.py +++ b/python/samples/getting_started/workflows/agents/custom_agent_executors.py @@ -1,6 +1,7 @@ # Copyright (c) Microsoft. All rights reserved. import asyncio +import os from agent_framework import ( Agent, @@ -10,7 +11,7 @@ WorkflowContext, handler, ) -from agent_framework.azure import AzureOpenAIChatClient +from agent_framework.azure import AzureOpenAIResponsesClient from azure.identity import AzureCliCredential """ @@ -20,14 +21,15 @@ then hands the conversation to a Reviewer agent which evaluates and finalizes the result. Purpose: -Show how to wrap chat agents created by AzureOpenAIChatClient inside workflow executors. Demonstrate the @handler +Show how to wrap chat agents created by AzureOpenAIResponsesClient inside workflow executors. Demonstrate the @handler pattern with typed inputs and typed WorkflowContext[T] outputs, connect executors with the fluent WorkflowBuilder, and finish by yielding outputs from the terminal node. Note: When an agent is passed to a workflow, the workflow essenatially wrap the agent in a more sophisticated executor. Prerequisites: -- Azure OpenAI configured for AzureOpenAIChatClient with required environment variables. +- AZURE_AI_PROJECT_ENDPOINT must be your Azure AI Foundry Agent Service (V2) project endpoint. +- Azure OpenAI configured for AzureOpenAIResponsesClient with required environment variables. - Authentication via azure-identity. Use AzureCliCredential and run az login before executing the sample. - Basic familiarity with WorkflowBuilder, executors, edges, events, and streaming or non streaming runs. """ @@ -44,8 +46,12 @@ class Writer(Executor): agent: Agent def __init__(self, id: str = "writer"): - # Create a domain specific agent using your configured AzureOpenAIChatClient. - self.agent = AzureOpenAIChatClient(credential=AzureCliCredential()).as_agent( + # Create a domain specific agent using your configured AzureOpenAIResponsesClient. + self.agent = AzureOpenAIResponsesClient( + project_endpoint=os.environ["AZURE_AI_PROJECT_ENDPOINT"], + deployment_name=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], + credential=AzureCliCredential(), + ).as_agent( instructions=( "You are an excellent content writer. You create new content and edit contents based on the feedback." ), @@ -87,7 +93,11 @@ class Reviewer(Executor): def __init__(self, id: str = "reviewer"): # Create a domain specific agent that evaluates and refines content. - self.agent = AzureOpenAIChatClient(credential=AzureCliCredential()).as_agent( + self.agent = AzureOpenAIResponsesClient( + project_endpoint=os.environ["AZURE_AI_PROJECT_ENDPOINT"], + deployment_name=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], + credential=AzureCliCredential(), + ).as_agent( instructions=( "You are an excellent content reviewer. You review the content and provide feedback to the writer." ), diff --git a/python/samples/getting_started/workflows/agents/workflow_as_agent_human_in_the_loop.py b/python/samples/getting_started/workflows/agents/workflow_as_agent_human_in_the_loop.py index 30c1d78a3e..d0a679455a 100644 --- a/python/samples/getting_started/workflows/agents/workflow_as_agent_human_in_the_loop.py +++ b/python/samples/getting_started/workflows/agents/workflow_as_agent_human_in_the_loop.py @@ -1,13 +1,14 @@ # Copyright (c) Microsoft. All rights reserved. import asyncio +import os import sys from collections.abc import Mapping from dataclasses import dataclass from pathlib import Path from typing import Any -from agent_framework.azure import AzureOpenAIChatClient +from agent_framework.azure import AzureOpenAIResponsesClient from azure.identity import AzureCliCredential # Ensure local getting_started package can be imported when running as a script. @@ -42,7 +43,8 @@ to the Worker. The workflow completes when idle. Prerequisites: -- OpenAI account configured and accessible for OpenAIChatClient. +- AZURE_AI_PROJECT_ENDPOINT must be your Azure AI Foundry Agent Service (V2) project endpoint. +- OpenAI account configured and accessible for AzureOpenAIResponsesClient. - Familiarity with WorkflowBuilder, Executor, and WorkflowContext from agent_framework. - Understanding of request-response message handling in executors. - (Optional) Review of reflection and escalation patterns, such as those in @@ -100,7 +102,11 @@ async def main() -> None: # and escalation paths for human review. worker = Worker( id="worker", - chat_client=AzureOpenAIChatClient(credential=AzureCliCredential()), + chat_client=AzureOpenAIResponsesClient( + project_endpoint=os.environ["AZURE_AI_PROJECT_ENDPOINT"], + deployment_name=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], + credential=AzureCliCredential(), + ), ) reviewer = ReviewerWithHumanInTheLoop(worker_id="worker") diff --git a/python/samples/getting_started/workflows/agents/workflow_as_agent_kwargs.py b/python/samples/getting_started/workflows/agents/workflow_as_agent_kwargs.py index a41ede52d1..a58095f3f0 100644 --- a/python/samples/getting_started/workflows/agents/workflow_as_agent_kwargs.py +++ b/python/samples/getting_started/workflows/agents/workflow_as_agent_kwargs.py @@ -2,11 +2,13 @@ import asyncio import json +import os from typing import Annotated, Any from agent_framework import tool -from agent_framework.openai import OpenAIChatClient +from agent_framework.azure import AzureOpenAIResponsesClient from agent_framework.orchestrations import SequentialBuilder +from azure.identity import AzureCliCredential from pydantic import Field """ @@ -28,6 +30,7 @@ - To maintain a consistent agent interface for callers Prerequisites: +- AZURE_AI_PROJECT_ENDPOINT must be your Azure AI Foundry Agent Service (V2) project endpoint. - OpenAI environment variables configured """ @@ -80,7 +83,11 @@ async def main() -> None: print("=" * 70) # Create chat client - client = OpenAIChatClient() + client = AzureOpenAIResponsesClient( + project_endpoint=os.environ["AZURE_AI_PROJECT_ENDPOINT"], + deployment_name=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], + credential=AzureCliCredential(), + ) # Create agent with tools that use kwargs agent = client.as_agent( diff --git a/python/samples/getting_started/workflows/agents/workflow_as_agent_reflection_pattern.py b/python/samples/getting_started/workflows/agents/workflow_as_agent_reflection_pattern.py index d2aa65c9a2..ed885d5ade 100644 --- a/python/samples/getting_started/workflows/agents/workflow_as_agent_reflection_pattern.py +++ b/python/samples/getting_started/workflows/agents/workflow_as_agent_reflection_pattern.py @@ -1,6 +1,7 @@ # Copyright (c) Microsoft. All rights reserved. import asyncio +import os from dataclasses import dataclass from uuid import uuid4 @@ -13,7 +14,8 @@ WorkflowContext, handler, ) -from agent_framework.openai import OpenAIChatClient +from agent_framework.azure import AzureOpenAIResponsesClient +from azure.identity import AzureCliCredential from pydantic import BaseModel """ @@ -33,7 +35,8 @@ - State management for pending requests and retry logic. Prerequisites: -- OpenAI account configured and accessible for OpenAIChatClient. +- AZURE_AI_PROJECT_ENDPOINT must be your Azure AI Foundry Agent Service (V2) project endpoint. +- OpenAI account configured and accessible for AzureOpenAIResponsesClient. - Familiarity with WorkflowBuilder, Executor, WorkflowContext, and event handling. - Understanding of how agent messages are generated, reviewed, and re-submitted. """ @@ -186,8 +189,22 @@ async def main() -> None: print("=" * 50) print("Building workflow with Worker ↔ Reviewer cycle...") - worker = Worker(id="worker", chat_client=OpenAIChatClient(model_id="gpt-4.1-nano")) - reviewer = Reviewer(id="reviewer", chat_client=OpenAIChatClient(model_id="gpt-4.1")) + worker = Worker( + id="worker", + chat_client=AzureOpenAIResponsesClient( + project_endpoint=os.environ["AZURE_AI_PROJECT_ENDPOINT"], + deployment_name=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], + credential=AzureCliCredential(), + ), + ) + reviewer = Reviewer( + id="reviewer", + chat_client=AzureOpenAIResponsesClient( + project_endpoint=os.environ["AZURE_AI_PROJECT_ENDPOINT"], + deployment_name=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], + credential=AzureCliCredential(), + ), + ) agent = ( WorkflowBuilder(start_executor=worker) diff --git a/python/samples/getting_started/workflows/agents/workflow_as_agent_with_thread.py b/python/samples/getting_started/workflows/agents/workflow_as_agent_with_thread.py index 0e84b10821..d2b8234ac0 100644 --- a/python/samples/getting_started/workflows/agents/workflow_as_agent_with_thread.py +++ b/python/samples/getting_started/workflows/agents/workflow_as_agent_with_thread.py @@ -1,10 +1,12 @@ # Copyright (c) Microsoft. All rights reserved. import asyncio +import os from agent_framework import AgentThread, ChatMessageStore -from agent_framework.openai import OpenAIChatClient +from agent_framework.azure import AzureOpenAIResponsesClient from agent_framework.orchestrations import SequentialBuilder +from azure.identity import AzureCliCredential """ Sample: Workflow as Agent with Thread Conversation History and Checkpointing @@ -31,13 +33,18 @@ - Long-running workflows that need pause/resume capability Prerequisites: -- OpenAI environment variables configured for OpenAIChatClient +- AZURE_AI_PROJECT_ENDPOINT must be your Azure AI Foundry Agent Service (V2) project endpoint. +- OpenAI environment variables configured for AzureOpenAIResponsesClient """ async def main() -> None: # Create a chat client - client = OpenAIChatClient() + client = AzureOpenAIResponsesClient( + project_endpoint=os.environ["AZURE_AI_PROJECT_ENDPOINT"], + deployment_name=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], + credential=AzureCliCredential(), + ) assistant = client.as_agent( name="assistant", @@ -119,7 +126,11 @@ async def demonstrate_thread_serialization() -> None: This shows how conversation history can be persisted and restored, enabling long-running conversational workflows. """ - client = OpenAIChatClient() + client = AzureOpenAIResponsesClient( + project_endpoint=os.environ["AZURE_AI_PROJECT_ENDPOINT"], + deployment_name=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], + credential=AzureCliCredential(), + ) memory_assistant = client.as_agent( name="memory_assistant", diff --git a/python/samples/getting_started/workflows/checkpoint/checkpoint_with_human_in_the_loop.py b/python/samples/getting_started/workflows/checkpoint/checkpoint_with_human_in_the_loop.py index 12cb08a8be..b26d4dd8e8 100644 --- a/python/samples/getting_started/workflows/checkpoint/checkpoint_with_human_in_the_loop.py +++ b/python/samples/getting_started/workflows/checkpoint/checkpoint_with_human_in_the_loop.py @@ -1,12 +1,15 @@ # Copyright (c) Microsoft. All rights reserved. import asyncio +import os import sys from dataclasses import dataclass from datetime import datetime from pathlib import Path from typing import Any +from azure.identity import AzureCliCredential + if sys.version_info >= (3, 12): from typing import override # type: ignore # pragma: no cover else: @@ -30,8 +33,7 @@ handler, response_handler, ) -from agent_framework.azure import AzureOpenAIChatClient -from azure.identity import AzureCliCredential +from agent_framework.azure import AzureOpenAIResponsesClient """ Sample: Checkpoint + human-in-the-loop quickstart. @@ -178,7 +180,11 @@ def create_workflow(checkpoint_storage: FileCheckpointStorage) -> Workflow: # Wire the workflow DAG. Edges mirror the numbered steps described in the # module docstring. Because `WorkflowBuilder` is declarative, reading these # edges is often the quickest way to understand execution order. - writer_agent = AzureOpenAIChatClient(credential=AzureCliCredential()).as_agent( + writer_agent = AzureOpenAIResponsesClient( + project_endpoint=os.environ["AZURE_AI_PROJECT_ENDPOINT"], + deployment_name=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], + credential=AzureCliCredential(), + ).as_agent( instructions="Write concise, warm release notes that sound human and helpful.", name="writer", ) diff --git a/python/samples/getting_started/workflows/checkpoint/workflow_as_agent_checkpoint.py b/python/samples/getting_started/workflows/checkpoint/workflow_as_agent_checkpoint.py index 552ced2892..3593ad6a97 100644 --- a/python/samples/getting_started/workflows/checkpoint/workflow_as_agent_checkpoint.py +++ b/python/samples/getting_started/workflows/checkpoint/workflow_as_agent_checkpoint.py @@ -20,18 +20,21 @@ - These are complementary: threads track conversation, checkpoints track workflow state Prerequisites: -- OpenAI environment variables configured for OpenAIChatClient +- AZURE_AI_PROJECT_ENDPOINT must be your Azure AI Foundry Agent Service (V2) project endpoint. +- OpenAI environment variables configured for AzureOpenAIResponsesClient """ import asyncio +import os from agent_framework import ( AgentThread, ChatMessageStore, InMemoryCheckpointStorage, ) -from agent_framework.openai import OpenAIChatClient +from agent_framework.azure import AzureOpenAIResponsesClient from agent_framework.orchestrations import SequentialBuilder +from azure.identity import AzureCliCredential async def basic_checkpointing() -> None: @@ -40,7 +43,11 @@ async def basic_checkpointing() -> None: print("Basic Checkpointing with Workflow as Agent") print("=" * 60) - client = OpenAIChatClient() + client = AzureOpenAIResponsesClient( + project_endpoint=os.environ["AZURE_AI_PROJECT_ENDPOINT"], + deployment_name=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], + credential=AzureCliCredential(), + ) assistant = client.as_agent( name="assistant", @@ -81,7 +88,11 @@ async def checkpointing_with_thread() -> None: print("Checkpointing with Thread Conversation History") print("=" * 60) - client = OpenAIChatClient() + client = AzureOpenAIResponsesClient( + project_endpoint=os.environ["AZURE_AI_PROJECT_ENDPOINT"], + deployment_name=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], + credential=AzureCliCredential(), + ) assistant = client.as_agent( name="memory_assistant", @@ -124,7 +135,11 @@ async def streaming_with_checkpoints() -> None: print("Streaming with Checkpointing") print("=" * 60) - client = OpenAIChatClient() + client = AzureOpenAIResponsesClient( + project_endpoint=os.environ["AZURE_AI_PROJECT_ENDPOINT"], + deployment_name=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], + credential=AzureCliCredential(), + ) assistant = client.as_agent( name="streaming_assistant", diff --git a/python/samples/getting_started/workflows/composition/sub_workflow_basics.py b/python/samples/getting_started/workflows/composition/sub_workflow_basics.py index 1eeac824b5..fff4efbc7b 100644 --- a/python/samples/getting_started/workflows/composition/sub_workflow_basics.py +++ b/python/samples/getting_started/workflows/composition/sub_workflow_basics.py @@ -58,13 +58,13 @@ async def process_text( ) -> None: """Process a text string and return statistics.""" text_preview = f"'{request.text[:50]}{'...' if len(request.text) > 50 else ''}'" - print(f"šŸ” Sub-workflow processing text (Task {request.task_id}): {text_preview}") + print(f"Sub-workflow processing text (Task {request.task_id}): {text_preview}") # Simple text processing word_count = len(request.text.split()) if request.text.strip() else 0 char_count = len(request.text) - print(f"šŸ“Š Task {request.task_id}: {word_count} words, {char_count} characters") + print(f"Task {request.task_id}: {word_count} words, {char_count} characters") # Create result result = TextProcessingResult( @@ -74,7 +74,7 @@ async def process_text( char_count=char_count, ) - print(f"āœ… Sub-workflow completed task {request.task_id}") + print(f"Sub-workflow completed task {request.task_id}") # Signal completion by yielding the result await ctx.yield_output(result) @@ -92,7 +92,7 @@ def __init__(self): @handler async def start_processing(self, texts: list[str], ctx: WorkflowContext[TextProcessingRequest]) -> None: """Start processing multiple text strings.""" - print(f"šŸ“„ Starting processing of {len(texts)} text strings") + print(f"Starting processing of {len(texts)} text strings") print("=" * 60) self.expected_count = len(texts) @@ -101,7 +101,7 @@ async def start_processing(self, texts: list[str], ctx: WorkflowContext[TextProc for i, text in enumerate(texts): task_id = f"task_{i + 1}" request = TextProcessingRequest(text=text, task_id=task_id) - print(f"šŸ“¤ Dispatching {task_id} to sub-workflow") + print(f"Dispatching {task_id} to sub-workflow") await ctx.send_message(request, target_id="text_processor_workflow") @handler @@ -111,12 +111,12 @@ async def collect_result( ctx: WorkflowContext[Never, list[TextProcessingResult]], ) -> None: """Collect results from sub-workflows.""" - print(f"šŸ“„ Collected result from {result.task_id}") + print(f"Collected result from {result.task_id}") self.results.append(result) # Check if all results are collected if len(self.results) == self.expected_count: - print("\nšŸŽ‰ All tasks completed!") + print("\nAll tasks completed!") await ctx.yield_output(self.results) @@ -138,7 +138,7 @@ def get_result_summary(results: list[TextProcessingResult]) -> dict[str, Any]: def create_sub_workflow() -> WorkflowExecutor: """Create the text processing sub-workflow.""" - print("šŸš€ Setting up sub-workflow...") + print("Setting up sub-workflow...") text_processor = TextProcessor() processing_workflow = ( @@ -151,7 +151,7 @@ def create_sub_workflow() -> WorkflowExecutor: async def main(): """Main function to run the basic sub-workflow example.""" - print("šŸ”§ Setting up parent workflow...") + print("Setting up parent workflow...") # Step 1: Create the parent workflow orchestrator = TextProcessingOrchestrator() sub_workflow_executor = create_sub_workflow() @@ -172,14 +172,14 @@ async def main(): " Spaces around text ", ] - print(f"\n🧪 Testing with {len(test_texts)} text strings") + print(f"\nTesting with {len(test_texts)} text strings") print("=" * 60) # Step 3: Run the workflow result = await main_workflow.run(test_texts) # Step 4: Display results - print("\nšŸ“Š Processing Results:") + print("\nProcessing Results:") print("=" * 60) # Sort results by task_id for consistent display @@ -190,19 +190,19 @@ async def main(): for result in sorted_results: preview = result.text[:30] + "..." if len(result.text) > 30 else result.text preview = preview.replace("\n", " ").strip() or "(empty)" - print(f"āœ… {result.task_id}: '{preview}' -> {result.word_count} words, {result.char_count} chars") + print(f"{result.task_id}: '{preview}' -> {result.word_count} words, {result.char_count} chars") # Step 6: Display summary summary = get_result_summary(sorted_results) - print("\nšŸ“ˆ Summary:") + print("\nSummary:") print("=" * 60) - print(f"šŸ“„ Total texts processed: {summary['total_texts']}") - print(f"šŸ“ Total words: {summary['total_words']}") - print(f"šŸ”¤ Total characters: {summary['total_characters']}") - print(f"šŸ“Š Average words per text: {summary['average_words_per_text']}") - print(f"šŸ“ Average characters per text: {summary['average_characters_per_text']}") + print(f"Total texts processed: {summary['total_texts']}") + print(f"Total words: {summary['total_words']}") + print(f"Total characters: {summary['total_characters']}") + print(f"Average words per text: {summary['average_words_per_text']}") + print(f"Average characters per text: {summary['average_characters_per_text']}") - print("\nšŸ Processing complete!") + print("\nProcessing complete!") if __name__ == "__main__": diff --git a/python/samples/getting_started/workflows/composition/sub_workflow_kwargs.py b/python/samples/getting_started/workflows/composition/sub_workflow_kwargs.py index af6ed4d61a..ce19c27f95 100644 --- a/python/samples/getting_started/workflows/composition/sub_workflow_kwargs.py +++ b/python/samples/getting_started/workflows/composition/sub_workflow_kwargs.py @@ -2,6 +2,7 @@ import asyncio import json +import os from typing import Annotated, Any from agent_framework import ( @@ -9,8 +10,9 @@ WorkflowExecutor, tool, ) -from agent_framework.openai import OpenAIChatClient +from agent_framework.azure import AzureOpenAIResponsesClient from agent_framework.orchestrations import SequentialBuilder +from azure.identity import AzureCliCredential """ Sample: Sub-Workflow kwargs Propagation @@ -26,6 +28,7 @@ - Useful for passing authentication tokens, configuration, or request context Prerequisites: +- AZURE_AI_PROJECT_ENDPOINT must be your Azure AI Foundry Agent Service (V2) project endpoint. - OpenAI environment variables configured """ @@ -74,7 +77,11 @@ async def main() -> None: print("=" * 70) # Create chat client - client = OpenAIChatClient() + client = AzureOpenAIResponsesClient( + project_endpoint=os.environ["AZURE_AI_PROJECT_ENDPOINT"], + deployment_name=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], + credential=AzureCliCredential(), + ) # Create an agent with tools that use kwargs inner_agent = client.as_agent( diff --git a/python/samples/getting_started/workflows/composition/sub_workflow_parallel_requests.py b/python/samples/getting_started/workflows/composition/sub_workflow_parallel_requests.py index 70030021ca..b373883db3 100644 --- a/python/samples/getting_started/workflows/composition/sub_workflow_parallel_requests.py +++ b/python/samples/getting_started/workflows/composition/sub_workflow_parallel_requests.py @@ -319,14 +319,14 @@ async def main() -> None: ] # Run the workflow - print(f"🧪 Testing with {len(test_requests)} mixed requests.") - print("šŸš€ Starting main workflow...") + print(f"Testing with {len(test_requests)} mixed requests.") + print("Starting main workflow...") run_result = await main_workflow.run(test_requests) # Handle request info events request_info_events = run_result.get_request_info_events() if request_info_events: - print(f"\nšŸ” Handling {len(request_info_events)} request info events...\n") + print(f"\nHandling {len(request_info_events)} request info events...\n") responses: dict[str, ResourceResponse | PolicyResponse] = {} for event in request_info_events: diff --git a/python/samples/getting_started/workflows/composition/sub_workflow_request_interception.py b/python/samples/getting_started/workflows/composition/sub_workflow_request_interception.py index 7324ecd5c7..4e475f6c3a 100644 --- a/python/samples/getting_started/workflows/composition/sub_workflow_request_interception.py +++ b/python/samples/getting_started/workflows/composition/sub_workflow_request_interception.py @@ -73,7 +73,7 @@ async def handle(self, email_address: str, ctx: WorkflowContext[SanitizedEmailRe email address to the next executor in the workflow. """ sanitized = email_address.strip() - print(f"āœ‚ļø Sanitized email address: '{sanitized}'") + print(f"Sanitized email address: '{sanitized}'") await ctx.send_message(SanitizedEmailResult(original=email_address, sanitized=sanitized, is_valid=False)) class EmailFormatValidator(Executor): @@ -91,14 +91,14 @@ async def handle( When the format is valid, it sends the validated email address to the next executor in the workflow. """ if "@" not in partial_result.sanitized or "." not in partial_result.sanitized.split("@")[-1]: - print(f"āŒ Invalid email format: '{partial_result.sanitized}'") + print(f"Invalid email format: '{partial_result.sanitized}'") await ctx.yield_output( SanitizedEmailResult( original=partial_result.original, sanitized=partial_result.sanitized, is_valid=False ) ) return - print(f"āœ… Validated email format: '{partial_result.sanitized}'") + print(f"Validated email format: '{partial_result.sanitized}'") await ctx.send_message( SanitizedEmailResult( original=partial_result.original, sanitized=partial_result.sanitized, is_valid=False @@ -120,7 +120,7 @@ async def handle(self, partial_result: SanitizedEmailResult, ctx: WorkflowContex to an external system to user for validation. """ domain = partial_result.sanitized.split("@")[-1] - print(f"šŸ” Validating domain: '{domain}'") + print(f"Validating domain: '{domain}'") self._pending_domains[domain] = partial_result # Send a request to the external system via the request_info mechanism await ctx.request_info(request_data=domain, response_type=bool) @@ -138,14 +138,14 @@ async def handle_domain_validation_response( raise ValueError(f"Received response for unknown domain: '{original_request}'") partial_result = self._pending_domains.pop(original_request) if is_valid: - print(f"āœ… Domain '{original_request}' is valid.") + print(f"Domain '{original_request}' is valid.") await ctx.yield_output( SanitizedEmailResult( original=partial_result.original, sanitized=partial_result.sanitized, is_valid=True ) ) else: - print(f"āŒ Domain '{original_request}' is invalid.") + print(f"Domain '{original_request}' is invalid.") await ctx.yield_output( SanitizedEmailResult( original=partial_result.original, sanitized=partial_result.sanitized, is_valid=False @@ -201,15 +201,15 @@ async def run(self, email: Email, ctx: WorkflowContext[Email | str, bool]) -> No """ recipient = email.recipient if recipient in self._approved_recipients: - print(f"šŸ“§ Recipient '{recipient}' has been previously approved.") + print(f"Recipient '{recipient}' has been previously approved.") await ctx.send_message(email) return if recipient in self._disapproved_recipients: - print(f"🚫 Blocking email to previously disapproved recipient: '{recipient}'") + print(f"Blocking email to previously disapproved recipient: '{recipient}'") await ctx.yield_output(False) return - print(f"šŸ” Validating new recipient email address: '{recipient}'") + print(f"Validating new recipient email address: '{recipient}'") self._pending_emails[recipient] = email await ctx.send_message(recipient) @@ -227,7 +227,7 @@ async def handler_domain_validation_request( raise TypeError(f"Expected domain string, got {type(request.source_event.data)}") domain = request.source_event.data is_valid = domain in self._approved_domains - print(f"🌐 External domain validation for '{domain}': {'valid' if is_valid else 'invalid'}") + print(f"External domain validation for '{domain}': {'valid' if is_valid else 'invalid'}") await ctx.send_message(request.create_response(is_valid), target_id=request.executor_id) @handler @@ -243,11 +243,11 @@ async def handle_validation_result(self, result: SanitizedEmailResult, ctx: Work email = self._pending_emails.pop(result.original) email.recipient = result.sanitized # Use the sanitized email address if result.is_valid: - print(f"āœ… Email address '{result.original}' is valid.") + print(f"Email address '{result.original}' is valid.") self._approved_recipients.add(result.original) await ctx.send_message(email) else: - print(f"🚫 Email address '{result.original}' is invalid. Blocking email.") + print(f"Email address '{result.original}' is invalid. Blocking email.") self._disapproved_recipients.add(result.original) await ctx.yield_output(False) @@ -258,9 +258,9 @@ class EmailDelivery(Executor): @handler async def handle(self, email: Email, ctx: WorkflowContext[Never, bool]) -> None: """Simulate sending the email and yield True as the final result.""" - print(f"šŸ“¤ Sending email to '{email.recipient}' with subject '{email.subject}'") + print(f"Sending email to '{email.recipient}' with subject '{email.subject}'") await asyncio.sleep(1) # Simulate network delay - print(f"āœ… Email sent to '{email.recipient}' successfully.") + print(f"Email sent to '{email.recipient}' successfully.") await ctx.yield_output(True) @@ -294,10 +294,10 @@ async def main() -> None: # Execute the workflow for email in test_emails: - print(f"\nšŸš€ Processing email to '{email.recipient}'") + print(f"\nProcessing email to '{email.recipient}'") async for event in workflow.run(email, stream=True): if event.type == "output": - print(f"šŸŽ‰ Final result for '{email.recipient}': {'Delivered' if event.data else 'Blocked'}") + print(f"Final result for '{email.recipient}': {'Delivered' if event.data else 'Blocked'}") if __name__ == "__main__": diff --git a/python/samples/getting_started/workflows/control-flow/edge_condition.py b/python/samples/getting_started/workflows/control-flow/edge_condition.py index c7d8cbeb2d..5970dde420 100644 --- a/python/samples/getting_started/workflows/control-flow/edge_condition.py +++ b/python/samples/getting_started/workflows/control-flow/edge_condition.py @@ -14,7 +14,7 @@ WorkflowContext, # Per-run context and event bus executor, # Decorator to declare a Python function as a workflow executor ) -from agent_framework.azure import AzureOpenAIChatClient # Thin client wrapper for Azure OpenAI chat models +from agent_framework.azure import AzureOpenAIResponsesClient # Thin client wrapper for Azure OpenAI chat models from azure.identity import AzureCliCredential # Uses your az CLI login for credentials from pydantic import BaseModel # Structured outputs for safer parsing from typing_extensions import Never @@ -32,9 +32,10 @@ - Illustrate how to transform one agent's structured result into a new AgentExecutorRequest for a downstream agent. Prerequisites: +- AZURE_AI_PROJECT_ENDPOINT must be your Azure AI Foundry Agent Service (V2) project endpoint. - You understand the basics of WorkflowBuilder, executors, and events in this framework. - You know the concept of edge conditions and how they gate routes using a predicate function. -- Azure OpenAI access is configured for AzureOpenAIChatClient. You should be logged in with Azure CLI (AzureCliCredential) +- Azure OpenAI access is configured for AzureOpenAIResponsesClient. You should be logged in with Azure CLI (AzureCliCredential) and have the Azure OpenAI environment variables set as documented in the getting started chat client README. - The sample email resource file exists at workflow/resources/email.txt. @@ -131,7 +132,11 @@ async def to_email_assistant_request( def create_spam_detector_agent() -> Agent: """Helper to create a spam detection agent.""" # AzureCliCredential uses your current az login. This avoids embedding secrets in code. - return AzureOpenAIChatClient(credential=AzureCliCredential()).as_agent( + return AzureOpenAIResponsesClient( + project_endpoint=os.environ["AZURE_AI_PROJECT_ENDPOINT"], + deployment_name=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], + credential=AzureCliCredential(), + ).as_agent( instructions=( "You are a spam detection assistant that identifies spam emails. " "Always return JSON with fields is_spam (bool), reason (string), and email_content (string). " @@ -145,7 +150,11 @@ def create_spam_detector_agent() -> Agent: def create_email_assistant_agent() -> Agent: """Helper to create an email assistant agent.""" # AzureCliCredential uses your current az login. This avoids embedding secrets in code. - return AzureOpenAIChatClient(credential=AzureCliCredential()).as_agent( + return AzureOpenAIResponsesClient( + project_endpoint=os.environ["AZURE_AI_PROJECT_ENDPOINT"], + deployment_name=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], + credential=AzureCliCredential(), + ).as_agent( instructions=( "You are an email assistant that helps users draft professional responses to emails. " "Your input may be a JSON object that includes 'email_content'; base your reply on that content. " @@ -178,7 +187,7 @@ async def main() -> None: # Read Email content from the sample resource file. # This keeps the sample deterministic since the model sees the same email every run. - email_path = os.path.join(os.path.dirname(os.path.dirname(os.path.realpath(__file__))), "resources", "email.txt") + email_path = os.path.join(os.path.dirname(os.path.dirname(os.path.realpath(__file__))), "resources", "email.txt") # noqa: ASYNC240 with open(email_path) as email_file: # noqa: ASYNC230 email = email_file.read() diff --git a/python/samples/getting_started/workflows/control-flow/multi_selection_edge_group.py b/python/samples/getting_started/workflows/control-flow/multi_selection_edge_group.py index f6c32c7882..05002a2f0c 100644 --- a/python/samples/getting_started/workflows/control-flow/multi_selection_edge_group.py +++ b/python/samples/getting_started/workflows/control-flow/multi_selection_edge_group.py @@ -13,13 +13,14 @@ AgentExecutor, AgentExecutorRequest, AgentExecutorResponse, + AgentResponseUpdate, Message, WorkflowBuilder, WorkflowContext, WorkflowEvent, executor, ) -from agent_framework.azure import AzureOpenAIChatClient +from agent_framework.azure import AzureOpenAIResponsesClient from azure.identity import AzureCliCredential from pydantic import BaseModel from typing_extensions import Never @@ -42,6 +43,7 @@ - Apply conditional persistence logic (short vs long emails). Prerequisites: +- AZURE_AI_PROJECT_ENDPOINT must be your Azure AI Foundry Agent Service (V2) project endpoint. - Familiarity with WorkflowBuilder, executors, edges, and events. - Understanding of multi-selection edge groups and how their selection function maps to target ids. - Experience with workflow state for persisting and reusing objects. @@ -177,12 +179,16 @@ async def handle_uncertain(analysis: AnalysisResult, ctx: WorkflowContext[Never, async def database_access(analysis: AnalysisResult, ctx: WorkflowContext[Never, str]) -> None: # Simulate DB writes for email and analysis (and summary if present) await asyncio.sleep(0.05) - await ctx.add_event(DatabaseEvent(f"Email {analysis.email_id} saved to database.")) + await ctx.add_event(DatabaseEvent(type="database_event", data=f"Email {analysis.email_id} saved to database.")) # type: ignore def create_email_analysis_agent() -> Agent: """Creates the email analysis agent.""" - return AzureOpenAIChatClient(credential=AzureCliCredential()).as_agent( + return AzureOpenAIResponsesClient( + project_endpoint=os.environ["AZURE_AI_PROJECT_ENDPOINT"], + deployment_name=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], + credential=AzureCliCredential(), + ).as_agent( instructions=( "You are a spam detection assistant that identifies spam emails. " "Always return JSON with fields 'spam_decision' (one of NotSpam, Spam, Uncertain) " @@ -195,7 +201,11 @@ def create_email_analysis_agent() -> Agent: def create_email_assistant_agent() -> Agent: """Creates the email assistant agent.""" - return AzureOpenAIChatClient(credential=AzureCliCredential()).as_agent( + return AzureOpenAIResponsesClient( + project_endpoint=os.environ["AZURE_AI_PROJECT_ENDPOINT"], + deployment_name=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], + credential=AzureCliCredential(), + ).as_agent( instructions=("You are an email assistant that helps users draft responses to emails with professionalism."), name="email_assistant_agent", default_options={"response_format": EmailResponse}, @@ -204,7 +214,11 @@ def create_email_assistant_agent() -> Agent: def create_email_summary_agent() -> Agent: """Creates the email summary agent.""" - return AzureOpenAIChatClient(credential=AzureCliCredential()).as_agent( + return AzureOpenAIResponsesClient( + project_endpoint=os.environ["AZURE_AI_PROJECT_ENDPOINT"], + deployment_name=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], + credential=AzureCliCredential(), + ).as_agent( instructions=("You are an assistant that helps users summarize emails."), name="email_summary_agent", default_options={"response_format": EmailSummaryModel}, @@ -267,6 +281,10 @@ def select_targets(analysis: AnalysisResult, target_ids: list[str]) -> list[str] if isinstance(event, DatabaseEvent): print(f"{event}") elif event.type == "output": + if isinstance(event.data, AgentResponseUpdate): + # Agent executors stream token-level updates. Skip these to keep sample + # output focused on final workflow results. + continue print(f"Workflow output: {event.data}") """ diff --git a/python/samples/getting_started/workflows/control-flow/simple_loop.py b/python/samples/getting_started/workflows/control-flow/simple_loop.py index f0232863bc..571d90c70b 100644 --- a/python/samples/getting_started/workflows/control-flow/simple_loop.py +++ b/python/samples/getting_started/workflows/control-flow/simple_loop.py @@ -1,6 +1,7 @@ # Copyright (c) Microsoft. All rights reserved. import asyncio +import os from enum import Enum from agent_framework import ( @@ -8,13 +9,14 @@ AgentExecutor, AgentExecutorRequest, AgentExecutorResponse, + AgentResponseUpdate, Executor, Message, WorkflowBuilder, WorkflowContext, handler, ) -from agent_framework.azure import AzureOpenAIChatClient +from agent_framework.azure import AzureOpenAIResponsesClient from azure.identity import AzureCliCredential """ @@ -26,7 +28,8 @@ - The workflow completes when the correct number is guessed. Prerequisites: -- Azure AI/ Azure OpenAI for `AzureOpenAIChatClient` agent. +- AZURE_AI_PROJECT_ENDPOINT must be your Azure AI Foundry Agent Service (V2) project endpoint. +- Azure AI/ Azure OpenAI for `AzureOpenAIResponsesClient` agent. - Authentication via `azure-identity` — uses `AzureCliCredential()` (run `az login`). """ @@ -116,7 +119,11 @@ async def parse(self, response: AgentExecutorResponse, ctx: WorkflowContext[Numb def create_judge_agent() -> Agent: """Create a judge agent that evaluates guesses.""" - return AzureOpenAIChatClient(credential=AzureCliCredential()).as_agent( + return AzureOpenAIResponsesClient( + project_endpoint=os.environ["AZURE_AI_PROJECT_ENDPOINT"], + deployment_name=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], + credential=AzureCliCredential(), + ).as_agent( instructions=("You strictly respond with one of: MATCHED, ABOVE, BELOW based on the given target and guess."), name="judge_agent", ) @@ -140,12 +147,16 @@ async def main(): .build() ) - # Step 2: Run the workflow and print the events. + # Step 2: Run the workflow with concise streaming output. iterations = 0 async for event in workflow.run(NumberSignal.INIT, stream=True): if event.type == "executor_completed" and event.executor_id == "guess_number": iterations += 1 - print(f"Event: {event}") + elif event.type == "output": + if isinstance(event.data, AgentResponseUpdate): + # Agent executor streams token-level updates; skip to avoid noisy logs. + continue + print(f"Workflow output: {event.data}") # This is essentially a binary search, so the number of iterations should be logarithmic. # The maximum number of iterations is [log2(range size)]. For a range of 1 to 100, this is log2(100) which is 7. diff --git a/python/samples/getting_started/workflows/control-flow/switch_case_edge_group.py b/python/samples/getting_started/workflows/control-flow/switch_case_edge_group.py index 43c5a2354d..994796e096 100644 --- a/python/samples/getting_started/workflows/control-flow/switch_case_edge_group.py +++ b/python/samples/getting_started/workflows/control-flow/switch_case_edge_group.py @@ -18,7 +18,7 @@ WorkflowContext, # Per-run context and event bus executor, # Decorator to turn a function into a workflow executor ) -from agent_framework.azure import AzureOpenAIChatClient # Thin client for Azure OpenAI chat models +from agent_framework.azure import AzureOpenAIResponsesClient # Thin client for Azure OpenAI chat models from azure.identity import AzureCliCredential # Uses your az CLI login for credentials from pydantic import BaseModel # Structured outputs with validation from typing_extensions import Never @@ -39,9 +39,10 @@ - Use ctx.yield_output() to provide workflow results - the workflow completes when idle with no pending work. Prerequisites: +- AZURE_AI_PROJECT_ENDPOINT must be your Azure AI Foundry Agent Service (V2) project endpoint. - Familiarity with WorkflowBuilder, executors, edges, and events. - Understanding of switch-case edge groups and how Case and Default are evaluated in order. -- Working Azure OpenAI configuration for AzureOpenAIChatClient, with Azure CLI login and required environment variables. +- Working Azure OpenAI configuration for AzureOpenAIResponsesClient, with Azure CLI login and required environment variables. - Access to workflow/resources/ambiguous_email.txt, or accept the inline fallback string. """ @@ -154,7 +155,11 @@ async def handle_uncertain(detection: DetectionResult, ctx: WorkflowContext[Neve def create_spam_detection_agent() -> Agent: """Create and return the spam detection agent.""" - return AzureOpenAIChatClient(credential=AzureCliCredential()).as_agent( + return AzureOpenAIResponsesClient( + project_endpoint=os.environ["AZURE_AI_PROJECT_ENDPOINT"], + deployment_name=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], + credential=AzureCliCredential(), + ).as_agent( instructions=( "You are a spam detection assistant that identifies spam emails. " "Be less confident in your assessments. " @@ -168,7 +173,11 @@ def create_spam_detection_agent() -> Agent: def create_email_assistant_agent() -> Agent: """Create and return the email assistant agent.""" - return AzureOpenAIChatClient(credential=AzureCliCredential()).as_agent( + return AzureOpenAIResponsesClient( + project_endpoint=os.environ["AZURE_AI_PROJECT_ENDPOINT"], + deployment_name=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], + credential=AzureCliCredential(), + ).as_agent( instructions=("You are an email assistant that helps users draft responses to emails with professionalism."), name="email_assistant_agent", default_options={"response_format": EmailResponse}, diff --git a/python/samples/getting_started/workflows/declarative/customer_support/main.py b/python/samples/getting_started/workflows/declarative/customer_support/main.py index 7b47fa2930..cbb4eefb9e 100644 --- a/python/samples/getting_started/workflows/declarative/customer_support/main.py +++ b/python/samples/getting_started/workflows/declarative/customer_support/main.py @@ -23,10 +23,11 @@ import asyncio import json import logging +import os import uuid from pathlib import Path -from agent_framework.azure import AzureOpenAIChatClient +from agent_framework.azure import AzureOpenAIResponsesClient from agent_framework.declarative import ( AgentExternalInputRequest, AgentExternalInputResponse, @@ -164,7 +165,11 @@ async def main() -> None: plugin = TicketingPlugin() # Create Azure OpenAI client - client = AzureOpenAIChatClient(credential=AzureCliCredential()) + client = AzureOpenAIResponsesClient( + project_endpoint=os.environ["AZURE_AI_PROJECT_ENDPOINT"], + deployment_name=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], + credential=AzureCliCredential(), + ) # Create agents with structured outputs self_service_agent = client.as_agent( @@ -260,7 +265,9 @@ async def main() -> None: async for event in stream: if event.type == "output": data = event.data - source_id = getattr(event, "source_executor_id", "") + # source_executor_id is only available on request_info events. + # For output events, use executor_id to identify the emitting node. + source_id = event.executor_id or "" # Check if this is a SendActivity output (activity text from log_ticket, log_route, etc.) if "log_" in source_id.lower(): diff --git a/python/samples/getting_started/workflows/declarative/deep_research/main.py b/python/samples/getting_started/workflows/declarative/deep_research/main.py index d949a210f9..a36f3f49a1 100644 --- a/python/samples/getting_started/workflows/declarative/deep_research/main.py +++ b/python/samples/getting_started/workflows/declarative/deep_research/main.py @@ -22,9 +22,10 @@ """ import asyncio +import os from pathlib import Path -from agent_framework.azure import AzureOpenAIChatClient +from agent_framework.azure import AzureOpenAIResponsesClient from agent_framework.declarative import WorkflowFactory from azure.identity import AzureCliCredential from pydantic import BaseModel, Field @@ -122,7 +123,11 @@ class ManagerResponse(BaseModel): async def main() -> None: """Run the deep research workflow.""" # Create Azure OpenAI client - client = AzureOpenAIChatClient(credential=AzureCliCredential()) + client = AzureOpenAIResponsesClient( + project_endpoint=os.environ["AZURE_AI_PROJECT_ENDPOINT"], + deployment_name=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], + credential=AzureCliCredential(), + ) # Create agents research_agent = client.as_agent( diff --git a/python/samples/getting_started/workflows/declarative/function_tools/main.py b/python/samples/getting_started/workflows/declarative/function_tools/main.py index 056cf419a4..e6169b68e3 100644 --- a/python/samples/getting_started/workflows/declarative/function_tools/main.py +++ b/python/samples/getting_started/workflows/declarative/function_tools/main.py @@ -6,12 +6,13 @@ """ import asyncio +import os from dataclasses import dataclass from pathlib import Path from typing import Annotated, Any from agent_framework import FileCheckpointStorage, tool -from agent_framework.azure import AzureOpenAIChatClient +from agent_framework.azure import AzureOpenAIResponsesClient from agent_framework_declarative import ExternalInputRequest, ExternalInputResponse, WorkflowFactory from azure.identity import AzureCliCredential from pydantic import Field @@ -62,7 +63,11 @@ def get_item_price(name: Annotated[str, Field(description="Menu item name")]) -> async def main(): # Create agent with tools - client = AzureOpenAIChatClient(credential=AzureCliCredential()) + client = AzureOpenAIResponsesClient( + project_endpoint=os.environ["AZURE_AI_PROJECT_ENDPOINT"], + deployment_name=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], + credential=AzureCliCredential(), + ) menu_agent = client.as_agent( name="MenuAgent", instructions="Answer questions about menu items, specials, and prices.", diff --git a/python/samples/getting_started/workflows/declarative/marketing/main.py b/python/samples/getting_started/workflows/declarative/marketing/main.py index 7e5b5ec7c2..f59b19947e 100644 --- a/python/samples/getting_started/workflows/declarative/marketing/main.py +++ b/python/samples/getting_started/workflows/declarative/marketing/main.py @@ -13,9 +13,10 @@ """ import asyncio +import os from pathlib import Path -from agent_framework.azure import AzureOpenAIChatClient +from agent_framework.azure import AzureOpenAIResponsesClient from agent_framework.declarative import WorkflowFactory from azure.identity import AzureCliCredential @@ -49,7 +50,11 @@ async def main() -> None: """Run the marketing workflow with real Azure AI agents.""" - client = AzureOpenAIChatClient(credential=AzureCliCredential()) + client = AzureOpenAIResponsesClient( + project_endpoint=os.environ["AZURE_AI_PROJECT_ENDPOINT"], + deployment_name=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], + credential=AzureCliCredential(), + ) analyst_agent = client.as_agent( name="AnalystAgent", diff --git a/python/samples/getting_started/workflows/declarative/student_teacher/main.py b/python/samples/getting_started/workflows/declarative/student_teacher/main.py index 28c9ab0446..1984265aa8 100644 --- a/python/samples/getting_started/workflows/declarative/student_teacher/main.py +++ b/python/samples/getting_started/workflows/declarative/student_teacher/main.py @@ -15,14 +15,15 @@ Prerequisites: - Azure OpenAI deployment with chat completion capability - Environment variables: - AZURE_OPENAI_ENDPOINT: Your Azure OpenAI endpoint - AZURE_OPENAI_DEPLOYMENT_NAME: Your deployment name (optional, defaults to gpt-4o) + AZURE_AI_PROJECT_ENDPOINT: Your Azure AI Foundry Agent Service (V2) project endpoint + AZURE_AI_MODEL_DEPLOYMENT_NAME: Your model deployment name """ import asyncio +import os from pathlib import Path -from agent_framework.azure import AzureOpenAIChatClient +from agent_framework.azure import AzureOpenAIResponsesClient from agent_framework.declarative import WorkflowFactory from azure.identity import AzureCliCredential @@ -51,7 +52,11 @@ async def main() -> None: """Run the student-teacher workflow with real Azure AI agents.""" # Create chat client - client = AzureOpenAIChatClient(credential=AzureCliCredential()) + client = AzureOpenAIResponsesClient( + project_endpoint=os.environ["AZURE_AI_PROJECT_ENDPOINT"], + deployment_name=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], + credential=AzureCliCredential(), + ) # Create student and teacher agents student_agent = client.as_agent( diff --git a/python/samples/getting_started/workflows/human-in-the-loop/agents_with_HITL.py b/python/samples/getting_started/workflows/human-in-the-loop/agents_with_HITL.py index 6cf292ce4f..4398b269d9 100644 --- a/python/samples/getting_started/workflows/human-in-the-loop/agents_with_HITL.py +++ b/python/samples/getting_started/workflows/human-in-the-loop/agents_with_HITL.py @@ -1,6 +1,7 @@ # Copyright (c) Microsoft. All rights reserved. import asyncio +import os from collections.abc import AsyncIterable from dataclasses import dataclass, field @@ -17,7 +18,7 @@ handler, response_handler, ) -from agent_framework.azure import AzureOpenAIChatClient +from agent_framework.azure import AzureOpenAIResponsesClient from azure.identity import AzureCliCredential from typing_extensions import Never @@ -37,7 +38,8 @@ - Handling human feedback and routing it to the appropriate agents. Prerequisites: -- Azure OpenAI configured for AzureOpenAIChatClient with required environment variables. +- AZURE_AI_PROJECT_ENDPOINT must be your Azure AI Foundry Agent Service (V2) project endpoint. +- Azure OpenAI configured for AzureOpenAIResponsesClient with required environment variables. - Authentication via azure-identity. Run `az login` before executing. """ @@ -161,13 +163,21 @@ async def process_event_stream(stream: AsyncIterable[WorkflowEvent]) -> dict[str async def main() -> None: """Run the workflow and bridge human feedback between two agents.""" # Create the agents - writer_agent = AzureOpenAIChatClient(credential=AzureCliCredential()).as_agent( + writer_agent = AzureOpenAIResponsesClient( + project_endpoint=os.environ["AZURE_AI_PROJECT_ENDPOINT"], + deployment_name=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], + credential=AzureCliCredential(), + ).as_agent( name="writer_agent", instructions=("You are a marketing writer."), tool_choice="required", ) - final_editor_agent = AzureOpenAIChatClient(credential=AzureCliCredential()).as_agent( + final_editor_agent = AzureOpenAIResponsesClient( + project_endpoint=os.environ["AZURE_AI_PROJECT_ENDPOINT"], + deployment_name=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], + credential=AzureCliCredential(), + ).as_agent( name="final_editor_agent", instructions=( "You are an editor who polishes marketing copy after human approval. " diff --git a/python/samples/getting_started/workflows/human-in-the-loop/agents_with_approval_requests.py b/python/samples/getting_started/workflows/human-in-the-loop/agents_with_approval_requests.py index c0d935bc03..1a70ff977d 100644 --- a/python/samples/getting_started/workflows/human-in-the-loop/agents_with_approval_requests.py +++ b/python/samples/getting_started/workflows/human-in-the-loop/agents_with_approval_requests.py @@ -2,6 +2,7 @@ import asyncio import json +import os from dataclasses import dataclass from typing import Annotated @@ -15,7 +16,8 @@ handler, tool, ) -from agent_framework.openai import OpenAIChatClient +from agent_framework.azure import AzureOpenAIResponsesClient +from azure.identity import AzureCliCredential from typing_extensions import Never """ @@ -45,6 +47,7 @@ - Handling approval requests during workflow execution. Prerequisites: +- AZURE_AI_PROJECT_ENDPOINT must be your Azure AI Foundry Agent Service (V2) project endpoint. - Azure AI Agent Service configured, along with the required environment variables. - Authentication via azure-identity. Use AzureCliCredential and run az login before executing the sample. - Basic familiarity with WorkflowBuilder, edges, events, request_info events (type='request_info'), and streaming runs. @@ -193,13 +196,20 @@ def __init__(self, special_email_addresses: set[str]) -> None: @handler async def preprocess(self, email: Email, ctx: WorkflowContext[str]) -> None: """Preprocess the incoming email.""" - message = str(email) + email_payload = ( + f"Incoming email:\n" + f"From: {email.sender}\n" + f"Subject: {email.subject}\n" + f"Body: {email.body}" + ) + message = email_payload if email.sender in self.special_email_addresses: note = ( - "Pay special attention to this sender. This email is very important. " - "Gather relevant information from all previous emails within my team before responding." + "Priority sender context: this message is business-critical. " + "If additional context is needed, use available tools to retrieve only the minimum relevant " + "prior team communication related to this request." ) - message = f"{note}\n\n{message}" + message = f"{note}\n\n{email_payload}" await ctx.send_message(message) @@ -215,7 +225,11 @@ async def conclude_workflow( async def main() -> None: # Create agent - email_writer_agent = OpenAIChatClient().as_agent( + email_writer_agent = AzureOpenAIResponsesClient( + project_endpoint=os.environ["AZURE_AI_PROJECT_ENDPOINT"], + deployment_name=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], + credential=AzureCliCredential(), + ).as_agent( name="EmailWriter", instructions=("You are an excellent email assistant. You respond to incoming emails."), # tools with `approval_mode="always_require"` will trigger approval requests diff --git a/python/samples/getting_started/workflows/human-in-the-loop/agents_with_declaration_only_tools.py b/python/samples/getting_started/workflows/human-in-the-loop/agents_with_declaration_only_tools.py index b203c2d522..a9a68593be 100644 --- a/python/samples/getting_started/workflows/human-in-the-loop/agents_with_declaration_only_tools.py +++ b/python/samples/getting_started/workflows/human-in-the-loop/agents_with_declaration_only_tools.py @@ -16,16 +16,18 @@ 4. The workflow resumes — the agent sees the tool result and finishes. Prerequisites: + - AZURE_AI_PROJECT_ENDPOINT must be your Azure AI Foundry Agent Service (V2) project endpoint. - Azure OpenAI endpoint configured via environment variables. - `az login` for AzureCliCredential. """ import asyncio import json +import os from typing import Any from agent_framework import Content, FunctionTool, WorkflowBuilder -from agent_framework.azure import AzureOpenAIChatClient +from agent_framework.azure import AzureOpenAIResponsesClient from azure.identity import AzureCliCredential # A declaration-only tool: the schema is sent to the LLM, but the framework @@ -45,7 +47,11 @@ async def main() -> None: - agent = AzureOpenAIChatClient(credential=AzureCliCredential()).as_agent( + agent = AzureOpenAIResponsesClient( + project_endpoint=os.environ["AZURE_AI_PROJECT_ENDPOINT"], + deployment_name=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], + credential=AzureCliCredential(), + ).as_agent( name="WeatherBot", instructions=( "You are a helpful weather assistant. " diff --git a/python/samples/getting_started/workflows/human-in-the-loop/guessing_game_with_human_input.py b/python/samples/getting_started/workflows/human-in-the-loop/guessing_game_with_human_input.py index d6b8161f98..3ceb284ed6 100644 --- a/python/samples/getting_started/workflows/human-in-the-loop/guessing_game_with_human_input.py +++ b/python/samples/getting_started/workflows/human-in-the-loop/guessing_game_with_human_input.py @@ -1,6 +1,7 @@ # Copyright (c) Microsoft. All rights reserved. import asyncio +import os from collections.abc import AsyncIterable from dataclasses import dataclass @@ -16,7 +17,7 @@ handler, response_handler, ) -from agent_framework.azure import AzureOpenAIChatClient +from agent_framework.azure import AzureOpenAIResponsesClient from azure.identity import AzureCliCredential from pydantic import BaseModel @@ -37,7 +38,8 @@ - Driving the loop in application code with run and responses parameter. Prerequisites: -- Azure OpenAI configured for AzureOpenAIChatClient with required environment variables. +- AZURE_AI_PROJECT_ENDPOINT must be your Azure AI Foundry Agent Service (V2) project endpoint. +- Azure OpenAI configured for AzureOpenAIResponsesClient with required environment variables. - Authentication via azure-identity. Use AzureCliCredential and run az login before executing the sample. - Basic familiarity with WorkflowBuilder, executors, edges, events, and streaming runs. """ @@ -183,7 +185,11 @@ async def process_event_stream(stream: AsyncIterable[WorkflowEvent]) -> dict[str async def main() -> None: """Run the human-in-the-loop guessing game workflow.""" # Create agent and executor - guessing_agent = AzureOpenAIChatClient(credential=AzureCliCredential()).as_agent( + guessing_agent = AzureOpenAIResponsesClient( + project_endpoint=os.environ["AZURE_AI_PROJECT_ENDPOINT"], + deployment_name=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], + credential=AzureCliCredential(), + ).as_agent( name="GuessingAgent", instructions=( "You guess a number between 1 and 10. " diff --git a/python/samples/getting_started/workflows/parallelism/fan_out_fan_in_edges.py b/python/samples/getting_started/workflows/parallelism/fan_out_fan_in_edges.py index 1dd78a1d76..e1722cd9ef 100644 --- a/python/samples/getting_started/workflows/parallelism/fan_out_fan_in_edges.py +++ b/python/samples/getting_started/workflows/parallelism/fan_out_fan_in_edges.py @@ -1,19 +1,21 @@ # Copyright (c) Microsoft. All rights reserved. import asyncio +import os from dataclasses import dataclass from agent_framework import ( AgentExecutor, # Wraps a ChatAgent as an Executor for use in workflows AgentExecutorRequest, # The message bundle sent to an AgentExecutor AgentExecutorResponse, # The structured result returned by an AgentExecutor + AgentResponseUpdate, Executor, # Base class for custom Python executors Message, # Chat message structure WorkflowBuilder, # Fluent builder for wiring the workflow graph WorkflowContext, # Per run context and event bus handler, # Decorator to mark an Executor method as invokable ) -from agent_framework.azure import AzureOpenAIChatClient +from agent_framework.azure import AzureOpenAIResponsesClient from azure.identity import AzureCliCredential # Uses your az CLI login for credentials from typing_extensions import Never @@ -29,8 +31,9 @@ - Fan in by collecting a list of AgentExecutorResponse objects and reducing them to a single result. Prerequisites: +- AZURE_AI_PROJECT_ENDPOINT must be your Azure AI Foundry Agent Service (V2) project endpoint. - Familiarity with WorkflowBuilder, executors, edges, events, and streaming runs. -- Azure OpenAI access configured for AzureOpenAIChatClient. Log in with Azure CLI and set any required environment variables. +- Azure OpenAI access configured for AzureOpenAIResponsesClient. Log in with Azure CLI and set any required environment variables. - Comfort reading AgentExecutorResponse.agent_response.text for assistant output aggregation. """ @@ -87,13 +90,31 @@ async def aggregate(self, results: list[AgentExecutorResponse], ctx: WorkflowCon await ctx.yield_output(consolidated) +def render_live_streams(buffers: dict[str, str], order: list[str], completed: set[str]) -> None: + """Render concurrent agent streams in separate sections.""" + # Clear terminal and move cursor to top-left for a live dashboard effect. + print("\033[2J\033[H", end="") + print("=== Expert Streams (Live) ===") + print("Concurrent agent updates are shown below as they stream.\n") + for agent_id in order: + state = "completed" if agent_id in completed else "streaming" + print(f"[{agent_id}] ({state})") + print(buffers.get(agent_id, "")) + print("-" * 80) + print("", end="", flush=True) + + async def main() -> None: # 1) Create executor and agent instances dispatcher = DispatchToExperts(id="dispatcher") aggregator = AggregateInsights(id="aggregator") researcher = AgentExecutor( - AzureOpenAIChatClient(credential=AzureCliCredential()).as_agent( + AzureOpenAIResponsesClient( + project_endpoint=os.environ["AZURE_AI_PROJECT_ENDPOINT"], + deployment_name=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], + credential=AzureCliCredential(), + ).as_agent( instructions=( "You're an expert market and product researcher. Given a prompt, provide concise, factual insights," " opportunities, and risks." @@ -102,7 +123,11 @@ async def main() -> None: ) ) marketer = AgentExecutor( - AzureOpenAIChatClient(credential=AzureCliCredential()).as_agent( + AzureOpenAIResponsesClient( + project_endpoint=os.environ["AZURE_AI_PROJECT_ENDPOINT"], + deployment_name=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], + credential=AzureCliCredential(), + ).as_agent( instructions=( "You're a creative marketing strategist. Craft compelling value propositions and target messaging" " aligned to the prompt." @@ -111,7 +136,11 @@ async def main() -> None: ) ) legal = AgentExecutor( - AzureOpenAIChatClient(credential=AzureCliCredential()).as_agent( + AzureOpenAIResponsesClient( + project_endpoint=os.environ["AZURE_AI_PROJECT_ENDPOINT"], + deployment_name=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], + credential=AzureCliCredential(), + ).as_agent( instructions=( "You're a cautious legal/compliance reviewer. Highlight constraints, disclaimers, and policy concerns" " based on the prompt." @@ -128,18 +157,32 @@ async def main() -> None: .build() ) - # 3) Run with a single prompt and print progress plus the final consolidated output + # 3) Run with a single prompt and render live expert streams plus final consolidated output. + expert_order = ["researcher", "marketer", "legal"] + expert_buffers: dict[str, str] = {expert_id: "" for expert_id in expert_order} + completed_experts: set[str] = set() + final_output: str | None = None + async for event in workflow.run( "We are launching a new budget-friendly electric bike for urban commuters.", stream=True ): - if event.type == "executor_invoked": - # Show when executors are invoked and completed for lightweight observability. - print(f"{event.executor_id} invoked") - elif event.type == "executor_completed": - print(f"{event.executor_id} completed") + if event.type == "executor_completed" and event.executor_id in expert_buffers: + completed_experts.add(event.executor_id) + render_live_streams(expert_buffers, expert_order, completed_experts) elif event.type == "output": - print("===== Final Aggregated Output =====") - print(event.data) + if isinstance(event.data, AgentResponseUpdate): + executor_id = event.executor_id or "" + if executor_id in expert_buffers: + expert_buffers[executor_id] += event.data.text + render_live_streams(expert_buffers, expert_order, completed_experts) + continue + + if event.executor_id == "aggregator": + final_output = str(event.data) + + if final_output: + print("\n=== Final Consolidated Output ===\n") + print(final_output) if __name__ == "__main__": diff --git a/python/samples/getting_started/workflows/state-management/state_with_agents.py b/python/samples/getting_started/workflows/state-management/state_with_agents.py index 97b9fab240..e09ab23eda 100644 --- a/python/samples/getting_started/workflows/state-management/state_with_agents.py +++ b/python/samples/getting_started/workflows/state-management/state_with_agents.py @@ -1,6 +1,7 @@ # Copyright (c) Microsoft. All rights reserved. import asyncio +import os from dataclasses import dataclass from pathlib import Path from typing import Any @@ -15,7 +16,7 @@ WorkflowContext, executor, ) -from agent_framework.azure import AzureOpenAIChatClient +from agent_framework.azure import AzureOpenAIResponsesClient from azure.identity import AzureCliCredential from pydantic import BaseModel from typing_extensions import Never @@ -34,7 +35,8 @@ - Compose agent backed executors with function style executors and yield the final output when the workflow completes. Prerequisites: -- Azure OpenAI configured for AzureOpenAIChatClient with required environment variables. +- AZURE_AI_PROJECT_ENDPOINT must be your Azure AI Foundry Agent Service (V2) project endpoint. +- Azure OpenAI configured for AzureOpenAIResponsesClient with required environment variables. - Authentication via azure-identity. Use AzureCliCredential and run az login before executing the sample. - Familiarity with WorkflowBuilder, executors, conditional edges, and streaming runs. """ @@ -156,7 +158,11 @@ async def handle_spam(detection: DetectionResult, ctx: WorkflowContext[Never, st def create_spam_detection_agent() -> Agent: """Creates a spam detection agent.""" - return AzureOpenAIChatClient(credential=AzureCliCredential()).as_agent( + return AzureOpenAIResponsesClient( + project_endpoint=os.environ["AZURE_AI_PROJECT_ENDPOINT"], + deployment_name=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], + credential=AzureCliCredential(), + ).as_agent( instructions=( "You are a spam detection assistant that identifies spam emails. " "Always return JSON with fields is_spam (bool) and reason (string)." @@ -169,7 +175,11 @@ def create_spam_detection_agent() -> Agent: def create_email_assistant_agent() -> Agent: """Creates an email assistant agent.""" - return AzureOpenAIChatClient(credential=AzureCliCredential()).as_agent( + return AzureOpenAIResponsesClient( + project_endpoint=os.environ["AZURE_AI_PROJECT_ENDPOINT"], + deployment_name=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], + credential=AzureCliCredential(), + ).as_agent( instructions=( "You are an email assistant that helps users draft responses to emails with professionalism. " "Return JSON with a single field 'response' containing the drafted reply." diff --git a/python/samples/getting_started/workflows/state-management/workflow_kwargs.py b/python/samples/getting_started/workflows/state-management/workflow_kwargs.py index 5125464a1a..65dc4f8535 100644 --- a/python/samples/getting_started/workflows/state-management/workflow_kwargs.py +++ b/python/samples/getting_started/workflows/state-management/workflow_kwargs.py @@ -2,11 +2,13 @@ import asyncio import json +import os from typing import Annotated, Any, cast from agent_framework import Message, tool -from agent_framework.openai import OpenAIChatClient +from agent_framework.azure import AzureOpenAIResponsesClient from agent_framework.orchestrations import SequentialBuilder +from azure.identity import AzureCliCredential from pydantic import Field """ @@ -22,6 +24,7 @@ - Works with Sequential, Concurrent, GroupChat, Handoff, and Magentic patterns Prerequisites: +- AZURE_AI_PROJECT_ENDPOINT must be your Azure AI Foundry Agent Service (V2) project endpoint. - OpenAI environment variables configured """ @@ -74,7 +77,11 @@ async def main() -> None: print("=" * 70) # Create chat client - client = OpenAIChatClient() + client = AzureOpenAIResponsesClient( + project_endpoint=os.environ["AZURE_AI_PROJECT_ENDPOINT"], + deployment_name=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], + credential=AzureCliCredential(), + ) # Create agent with tools that use kwargs agent = client.as_agent( diff --git a/python/samples/getting_started/workflows/visualization/concurrent_with_visualization.py b/python/samples/getting_started/workflows/visualization/concurrent_with_visualization.py index e9e042020d..c7c2ce2d0c 100644 --- a/python/samples/getting_started/workflows/visualization/concurrent_with_visualization.py +++ b/python/samples/getting_started/workflows/visualization/concurrent_with_visualization.py @@ -1,6 +1,7 @@ # Copyright (c) Microsoft. All rights reserved. import asyncio +import os from dataclasses import dataclass from agent_framework import ( @@ -14,7 +15,7 @@ WorkflowViz, handler, ) -from agent_framework.azure import AzureOpenAIChatClient +from agent_framework.azure import AzureOpenAIResponsesClient from azure.identity import AzureCliCredential from typing_extensions import Never @@ -27,7 +28,8 @@ - Visualization: generate Mermaid and GraphViz representations via `WorkflowViz` and optionally export SVG. Prerequisites: -- Azure AI/ Azure OpenAI for `AzureOpenAIChatClient` agents. +- AZURE_AI_PROJECT_ENDPOINT must be your Azure AI Foundry Agent Service (V2) project endpoint. +- Azure AI/ Azure OpenAI for `AzureOpenAIResponsesClient` agents. - Authentication via `azure-identity` — uses `AzureCliCredential()` (run `az login`). - For visualization export: `pip install graphviz>=0.20.0` and install GraphViz binaries. """ @@ -90,7 +92,11 @@ async def main() -> None: # Create agent instances researcher = AgentExecutor( - AzureOpenAIChatClient(credential=AzureCliCredential()).as_agent( + AzureOpenAIResponsesClient( + project_endpoint=os.environ["AZURE_AI_PROJECT_ENDPOINT"], + deployment_name=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], + credential=AzureCliCredential(), + ).as_agent( instructions=( "You're an expert market and product researcher. Given a prompt, provide concise, factual insights," " opportunities, and risks." @@ -100,7 +106,11 @@ async def main() -> None: ) marketer = AgentExecutor( - AzureOpenAIChatClient(credential=AzureCliCredential()).as_agent( + AzureOpenAIResponsesClient( + project_endpoint=os.environ["AZURE_AI_PROJECT_ENDPOINT"], + deployment_name=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], + credential=AzureCliCredential(), + ).as_agent( instructions=( "You're a creative marketing strategist. Craft compelling value propositions and target messaging" " aligned to the prompt." @@ -110,7 +120,11 @@ async def main() -> None: ) legal = AgentExecutor( - AzureOpenAIChatClient(credential=AzureCliCredential()).as_agent( + AzureOpenAIResponsesClient( + project_endpoint=os.environ["AZURE_AI_PROJECT_ENDPOINT"], + deployment_name=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], + credential=AzureCliCredential(), + ).as_agent( instructions=( "You're a cautious legal/compliance reviewer. Highlight constraints, disclaimers, and policy concerns" " based on the prompt." From dcc4905e878444dd568ab81ca6b8645a3b368441 Mon Sep 17 00:00:00 2001 From: Evan Mattson Date: Thu, 12 Feb 2026 12:48:35 +0900 Subject: [PATCH 2/3] Updates --- .../agent_framework/_workflows/_agent_executor.py | 12 +++--------- .../samples/getting_started/orchestrations/README.md | 9 +++++---- .../group-chat/group_chat_agent_manager.py | 2 +- .../group-chat/group_chat_philosophical_debate.py | 2 +- .../group-chat/group_chat_simple_selector.py | 2 +- .../group-chat/group_chat_workflow_as_agent.py | 2 +- .../handoff/handoff_with_code_interpreter_file.py | 4 ++-- .../orchestrations/magentic/magentic_checkpoint.py | 2 +- python/samples/getting_started/workflows/README.md | 10 +++++----- .../workflows/agents/custom_agent_executors.py | 2 +- .../workflows/agents/workflow_as_agent_kwargs.py | 2 +- .../agents/workflow_as_agent_reflection_pattern.py | 4 ++-- .../agents/workflow_as_agent_with_thread.py | 2 +- .../checkpoint/workflow_as_agent_checkpoint.py | 2 +- .../workflows/composition/sub_workflow_kwargs.py | 2 +- .../workflows/control-flow/edge_condition.py | 2 +- .../workflows/state-management/workflow_kwargs.py | 2 +- 17 files changed, 29 insertions(+), 34 deletions(-) diff --git a/python/packages/core/agent_framework/_workflows/_agent_executor.py b/python/packages/core/agent_framework/_workflows/_agent_executor.py index e96e802e64..85bc236982 100644 --- a/python/packages/core/agent_framework/_workflows/_agent_executor.py +++ b/python/packages/core/agent_framework/_workflows/_agent_executor.py @@ -397,7 +397,7 @@ def _prepare_agent_run_args(raw_run_kwargs: dict[str, Any]) -> tuple[dict[str, A options: dict[str, Any] = {} if options_from_workflow is not None: if isinstance(options_from_workflow, Mapping): - for key, value in cast(Mapping[Any, Any], options_from_workflow).items(): + for key, value in options_from_workflow.items(): if isinstance(key, str): options[key] = value else: @@ -409,20 +409,14 @@ def _prepare_agent_run_args(raw_run_kwargs: dict[str, Any]) -> tuple[dict[str, A existing_additional_args = options.get("additional_function_arguments") if isinstance(existing_additional_args, Mapping): - additional_args = { - key: value - for key, value in cast(Mapping[Any, Any], existing_additional_args).items() - if isinstance(key, str) - } + additional_args = {key: value for key, value in existing_additional_args.items() if isinstance(key, str)} else: additional_args = {} if workflow_additional_args is not None: if isinstance(workflow_additional_args, Mapping): additional_args.update({ - key: value - for key, value in cast(Mapping[Any, Any], workflow_additional_args).items() - if isinstance(key, str) + key: value for key, value in workflow_additional_args.items() if isinstance(key, str) }) else: logger.warning( diff --git a/python/samples/getting_started/orchestrations/README.md b/python/samples/getting_started/orchestrations/README.md index a346e660ca..8ea5470038 100644 --- a/python/samples/getting_started/orchestrations/README.md +++ b/python/samples/getting_started/orchestrations/README.md @@ -94,8 +94,9 @@ These may appear in event streams (executor_invoked/executor_completed). They're ## Environment Variables -- **AzureOpenAIChatClient**: Set Azure OpenAI environment variables as documented [here](https://github.com/microsoft/agent-framework/blob/main/python/samples/getting_started/chat_client/README.md#environment-variables). +Orchestration samples that use `AzureOpenAIResponsesClient` expect: -- **OpenAI** (used in some orchestration samples): - - [OpenAIChatClient env vars](https://github.com/microsoft/agent-framework/blob/main/python/samples/getting_started/agents/openai_chat_client/README.md) - - [OpenAIResponsesClient env vars](https://github.com/microsoft/agent-framework/blob/main/python/samples/getting_started/agents/openai_responses_client/README.md) +- `AZURE_AI_PROJECT_ENDPOINT` (Azure AI Foundry Agent Service (V2) project endpoint) +- `AZURE_AI_MODEL_DEPLOYMENT_NAME` (model deployment name) + +These values are passed directly into the client constructor via `os.getenv()` in sample code. diff --git a/python/samples/getting_started/orchestrations/group-chat/group_chat_agent_manager.py b/python/samples/getting_started/orchestrations/group-chat/group_chat_agent_manager.py index 813056a956..baeaa79197 100644 --- a/python/samples/getting_started/orchestrations/group-chat/group_chat_agent_manager.py +++ b/python/samples/getting_started/orchestrations/group-chat/group_chat_agent_manager.py @@ -23,7 +23,7 @@ Prerequisites: - AZURE_AI_PROJECT_ENDPOINT must be your Azure AI Foundry Agent Service (V2) project endpoint. -- OpenAI environment variables configured for AzureOpenAIResponsesClient +- Environment variables configured for AzureOpenAIResponsesClient """ ORCHESTRATOR_AGENT_INSTRUCTIONS = """ diff --git a/python/samples/getting_started/orchestrations/group-chat/group_chat_philosophical_debate.py b/python/samples/getting_started/orchestrations/group-chat/group_chat_philosophical_debate.py index 12c14a8c93..7cb522fb25 100644 --- a/python/samples/getting_started/orchestrations/group-chat/group_chat_philosophical_debate.py +++ b/python/samples/getting_started/orchestrations/group-chat/group_chat_philosophical_debate.py @@ -39,7 +39,7 @@ Prerequisites: - AZURE_AI_PROJECT_ENDPOINT must be your Azure AI Foundry Agent Service (V2) project endpoint. -- OpenAI environment variables configured for AzureOpenAIResponsesClient +- Environment variables configured for AzureOpenAIResponsesClient """ diff --git a/python/samples/getting_started/orchestrations/group-chat/group_chat_simple_selector.py b/python/samples/getting_started/orchestrations/group-chat/group_chat_simple_selector.py index 903943e35f..b57f00cc1b 100644 --- a/python/samples/getting_started/orchestrations/group-chat/group_chat_simple_selector.py +++ b/python/samples/getting_started/orchestrations/group-chat/group_chat_simple_selector.py @@ -22,7 +22,7 @@ Prerequisites: - AZURE_AI_PROJECT_ENDPOINT must be your Azure AI Foundry Agent Service (V2) project endpoint. -- OpenAI environment variables configured for AzureOpenAIResponsesClient +- Environment variables configured for AzureOpenAIResponsesClient """ diff --git a/python/samples/getting_started/orchestrations/group-chat/group_chat_workflow_as_agent.py b/python/samples/getting_started/orchestrations/group-chat/group_chat_workflow_as_agent.py index 2c25f0498e..f5da892d6d 100644 --- a/python/samples/getting_started/orchestrations/group-chat/group_chat_workflow_as_agent.py +++ b/python/samples/getting_started/orchestrations/group-chat/group_chat_workflow_as_agent.py @@ -17,7 +17,7 @@ Prerequisites: - AZURE_AI_PROJECT_ENDPOINT must be your Azure AI Foundry Agent Service (V2) project endpoint. -- OpenAI environment variables configured for `AzureOpenAIResponsesClient` and `AzureOpenAIResponsesClient`. +- Environment variables configured for `AzureOpenAIResponsesClient`. """ diff --git a/python/samples/getting_started/orchestrations/handoff/handoff_with_code_interpreter_file.py b/python/samples/getting_started/orchestrations/handoff/handoff_with_code_interpreter_file.py index 9a4f0ba76a..9801d64888 100644 --- a/python/samples/getting_started/orchestrations/handoff/handoff_with_code_interpreter_file.py +++ b/python/samples/getting_started/orchestrations/handoff/handoff_with_code_interpreter_file.py @@ -70,8 +70,8 @@ def _handle_events(events: list[WorkflowEvent]) -> tuple[list[WorkflowEvent[Hand file_id = annotation["file_id"] # type: ignore file_ids.append(file_id) print(f"[Found file annotation: file_id={file_id}]") - elif event.type == "output": - conversation = cast(list[Message], event.data) + elif isinstance(data, list): + conversation = cast(list[Message], data) if isinstance(conversation, list): print("\n=== Final Conversation Snapshot ===") for message in conversation: diff --git a/python/samples/getting_started/orchestrations/magentic/magentic_checkpoint.py b/python/samples/getting_started/orchestrations/magentic/magentic_checkpoint.py index 54827a2d3b..87442598b4 100644 --- a/python/samples/getting_started/orchestrations/magentic/magentic_checkpoint.py +++ b/python/samples/getting_started/orchestrations/magentic/magentic_checkpoint.py @@ -36,7 +36,7 @@ Prerequisites: - AZURE_AI_PROJECT_ENDPOINT must be your Azure AI Foundry Agent Service (V2) project endpoint. -- OpenAI environment variables configured for `AzureOpenAIResponsesClient`. +- Environment variables configured for `AzureOpenAIResponsesClient`. """ TASK = ( diff --git a/python/samples/getting_started/workflows/README.md b/python/samples/getting_started/workflows/README.md index dd2b4b06e1..1e132f9475 100644 --- a/python/samples/getting_started/workflows/README.md +++ b/python/samples/getting_started/workflows/README.md @@ -160,9 +160,9 @@ Sequential orchestration uses a few small adapter nodes for plumbing: ### Environment Variables -- **AzureOpenAIChatClient**: Set Azure OpenAI environment variables as documented [here](https://github.com/microsoft/agent-framework/blob/main/python/samples/getting_started/chat_client/README.md#environment-variables). - These variables are required for samples that construct `AzureOpenAIChatClient` +Workflow samples that use `AzureOpenAIResponsesClient` expect: -- **OpenAI** (used in orchestration samples): - - [OpenAIChatClient env vars](https://github.com/microsoft/agent-framework/blob/main/python/samples/getting_started/agents/openai_chat_client/README.md) - - [OpenAIResponsesClient env vars](https://github.com/microsoft/agent-framework/blob/main/python/samples/getting_started/agents/openai_responses_client/README.md) +- `AZURE_AI_PROJECT_ENDPOINT` (Azure AI Foundry Agent Service (V2) project endpoint) +- `AZURE_AI_MODEL_DEPLOYMENT_NAME` (model deployment name) + +These values are passed directly into the client constructor via `os.getenv()` in sample code. diff --git a/python/samples/getting_started/workflows/agents/custom_agent_executors.py b/python/samples/getting_started/workflows/agents/custom_agent_executors.py index fa727efdf9..3d6b34a2eb 100644 --- a/python/samples/getting_started/workflows/agents/custom_agent_executors.py +++ b/python/samples/getting_started/workflows/agents/custom_agent_executors.py @@ -25,7 +25,7 @@ pattern with typed inputs and typed WorkflowContext[T] outputs, connect executors with the fluent WorkflowBuilder, and finish by yielding outputs from the terminal node. -Note: When an agent is passed to a workflow, the workflow essenatially wrap the agent in a more sophisticated executor. +Note: When an agent is passed to a workflow, the workflow wraps the agent in a more sophisticated executor. Prerequisites: - AZURE_AI_PROJECT_ENDPOINT must be your Azure AI Foundry Agent Service (V2) project endpoint. diff --git a/python/samples/getting_started/workflows/agents/workflow_as_agent_kwargs.py b/python/samples/getting_started/workflows/agents/workflow_as_agent_kwargs.py index a58095f3f0..a6fb51f8d8 100644 --- a/python/samples/getting_started/workflows/agents/workflow_as_agent_kwargs.py +++ b/python/samples/getting_started/workflows/agents/workflow_as_agent_kwargs.py @@ -31,7 +31,7 @@ Prerequisites: - AZURE_AI_PROJECT_ENDPOINT must be your Azure AI Foundry Agent Service (V2) project endpoint. -- OpenAI environment variables configured +- Environment variables configured """ diff --git a/python/samples/getting_started/workflows/agents/workflow_as_agent_reflection_pattern.py b/python/samples/getting_started/workflows/agents/workflow_as_agent_reflection_pattern.py index ed885d5ade..e0dde3eacf 100644 --- a/python/samples/getting_started/workflows/agents/workflow_as_agent_reflection_pattern.py +++ b/python/samples/getting_started/workflows/agents/workflow_as_agent_reflection_pattern.py @@ -191,7 +191,7 @@ async def main() -> None: print("Building workflow with Worker ↔ Reviewer cycle...") worker = Worker( id="worker", - chat_client=AzureOpenAIResponsesClient( + client=AzureOpenAIResponsesClient( project_endpoint=os.environ["AZURE_AI_PROJECT_ENDPOINT"], deployment_name=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], credential=AzureCliCredential(), @@ -199,7 +199,7 @@ async def main() -> None: ) reviewer = Reviewer( id="reviewer", - chat_client=AzureOpenAIResponsesClient( + client=AzureOpenAIResponsesClient( project_endpoint=os.environ["AZURE_AI_PROJECT_ENDPOINT"], deployment_name=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"], credential=AzureCliCredential(), diff --git a/python/samples/getting_started/workflows/agents/workflow_as_agent_with_thread.py b/python/samples/getting_started/workflows/agents/workflow_as_agent_with_thread.py index d2b8234ac0..7e56f6618a 100644 --- a/python/samples/getting_started/workflows/agents/workflow_as_agent_with_thread.py +++ b/python/samples/getting_started/workflows/agents/workflow_as_agent_with_thread.py @@ -34,7 +34,7 @@ Prerequisites: - AZURE_AI_PROJECT_ENDPOINT must be your Azure AI Foundry Agent Service (V2) project endpoint. -- OpenAI environment variables configured for AzureOpenAIResponsesClient +- Environment variables configured for AzureOpenAIResponsesClient """ diff --git a/python/samples/getting_started/workflows/checkpoint/workflow_as_agent_checkpoint.py b/python/samples/getting_started/workflows/checkpoint/workflow_as_agent_checkpoint.py index 3593ad6a97..2e0362dd73 100644 --- a/python/samples/getting_started/workflows/checkpoint/workflow_as_agent_checkpoint.py +++ b/python/samples/getting_started/workflows/checkpoint/workflow_as_agent_checkpoint.py @@ -21,7 +21,7 @@ Prerequisites: - AZURE_AI_PROJECT_ENDPOINT must be your Azure AI Foundry Agent Service (V2) project endpoint. -- OpenAI environment variables configured for AzureOpenAIResponsesClient +- Environment variables configured for AzureOpenAIResponsesClient """ import asyncio diff --git a/python/samples/getting_started/workflows/composition/sub_workflow_kwargs.py b/python/samples/getting_started/workflows/composition/sub_workflow_kwargs.py index ce19c27f95..bd1c60ecfa 100644 --- a/python/samples/getting_started/workflows/composition/sub_workflow_kwargs.py +++ b/python/samples/getting_started/workflows/composition/sub_workflow_kwargs.py @@ -29,7 +29,7 @@ Prerequisites: - AZURE_AI_PROJECT_ENDPOINT must be your Azure AI Foundry Agent Service (V2) project endpoint. -- OpenAI environment variables configured +- Environment variables configured """ diff --git a/python/samples/getting_started/workflows/control-flow/edge_condition.py b/python/samples/getting_started/workflows/control-flow/edge_condition.py index 5970dde420..01fdd9a256 100644 --- a/python/samples/getting_started/workflows/control-flow/edge_condition.py +++ b/python/samples/getting_started/workflows/control-flow/edge_condition.py @@ -36,7 +36,7 @@ - You understand the basics of WorkflowBuilder, executors, and events in this framework. - You know the concept of edge conditions and how they gate routes using a predicate function. - Azure OpenAI access is configured for AzureOpenAIResponsesClient. You should be logged in with Azure CLI (AzureCliCredential) -and have the Azure OpenAI environment variables set as documented in the getting started chat client README. +and have the Foundry V2 Project environment variables set as documented in the getting started chat client README. - The sample email resource file exists at workflow/resources/email.txt. High level flow: diff --git a/python/samples/getting_started/workflows/state-management/workflow_kwargs.py b/python/samples/getting_started/workflows/state-management/workflow_kwargs.py index 65dc4f8535..3728ae6ff9 100644 --- a/python/samples/getting_started/workflows/state-management/workflow_kwargs.py +++ b/python/samples/getting_started/workflows/state-management/workflow_kwargs.py @@ -25,7 +25,7 @@ Prerequisites: - AZURE_AI_PROJECT_ENDPOINT must be your Azure AI Foundry Agent Service (V2) project endpoint. -- OpenAI environment variables configured +- Environment variables configured """ From 9cc2c4091a90525d6651da1ef13495fb85e33470 Mon Sep 17 00:00:00 2001 From: Evan Mattson Date: Thu, 12 Feb 2026 12:52:47 +0900 Subject: [PATCH 3/3] add comment --- .../_group_chat.py | 19 +++++++++++++++++-- 1 file changed, 17 insertions(+), 2 deletions(-) diff --git a/python/packages/orchestrations/agent_framework_orchestrations/_group_chat.py b/python/packages/orchestrations/agent_framework_orchestrations/_group_chat.py index 384bbea687..747d6efb10 100644 --- a/python/packages/orchestrations/agent_framework_orchestrations/_group_chat.py +++ b/python/packages/orchestrations/agent_framework_orchestrations/_group_chat.py @@ -396,7 +396,15 @@ async def _handle_response( @staticmethod def _parse_last_json_object(text: str) -> AgentOrchestrationOutput | None: - """Parse one or more concatenated JSON values and return the last object.""" + """Best-effort parser for concatenated JSON and return the last object. + + Stop-gap workaround: + In some runs, the orchestrator manager text can contain multiple JSON objects + concatenated back-to-back (for example: `{...}{...}`), which causes + `model_validate_json` to fail with trailing characters. Until the root cause + is fully understood and fixed, decode sequential top-level JSON values and + validate the last one. + """ decoder = json.JSONDecoder() index = 0 parsed: Any | None = None @@ -414,7 +422,12 @@ def _parse_last_json_object(text: str) -> AgentOrchestrationOutput | None: @classmethod def _parse_agent_output(cls, agent_response: Any) -> AgentOrchestrationOutput: - """Parse manager output, handling both structured values and concatenated JSON text.""" + """Parse manager output with defensive fallbacks. + + Preferred path is structured output (`agent_response.value`) when available. + If only text is available, first attempt strict JSON parsing and then apply a + temporary concatenated-JSON fallback as a stop-gap. + """ try: structured_value = agent_response.value except Exception: @@ -441,6 +454,8 @@ def _parse_agent_output(cls, agent_response: Any) -> AgentOrchestrationOutput: last_error = ex try: + # Stop-gap fallback for rare cases where multiple JSON objects are + # returned in one text payload (concatenated with no separator). parsed = cls._parse_last_json_object(candidate) if parsed is not None: return parsed