From df70940690e3524b7d0ef970504153381be216e9 Mon Sep 17 00:00:00 2001 From: Eduard van Valkenburg Date: Sat, 7 Feb 2026 07:37:46 +0100 Subject: [PATCH 01/16] [BREAKING] Rename ChatAgent -> Agent, ChatMessage -> Message, ChatClientProtocol -> SupportsChatGetResponse Simplify the public API by removing redundant 'Chat' prefix from core types: - ChatAgent -> Agent - RawChatAgent -> RawAgent - ChatMessage -> Message - ChatClientProtocol -> SupportsChatGetResponse Also renamed internal WorkflowMessage (was Message in _runner_context) to avoid collision. No backward compatibility aliases - this is a clean breaking change. --- python/CHANGELOG.md | 11 +- python/CODING_STANDARD.md | 16 +- python/README.md | 20 +- .../a2a/agent_framework_a2a/_agent.py | 27 +- python/packages/a2a/tests/test_a2a_agent.py | 34 +- python/packages/ag-ui/README.md | 10 +- .../ag-ui/agent_framework_ag_ui/_client.py | 30 +- .../_message_adapters.py | 50 +- .../_orchestration/_helpers.py | 8 +- .../_orchestration/_tooling.py | 6 +- .../ag-ui/agent_framework_ag_ui/_run.py | 22 +- .../agent_framework_ag_ui_examples/README.md | 26 +- .../agents/document_writer_agent.py | 6 +- .../agents/human_in_the_loop_agent.py | 8 +- .../agents/recipe_agent.py | 6 +- .../agents/research_assistant_agent.py | 6 +- .../agents/simple_agent.py | 8 +- .../agents/task_planner_agent.py | 6 +- .../agents/task_steps_agent.py | 18 +- .../agents/ui_generator_agent.py | 6 +- .../agents/weather_agent.py | 8 +- .../server/api/backend_tool_rendering.py | 4 +- .../server/main.py | 6 +- .../packages/ag-ui/getting_started/README.md | 24 +- .../ag-ui/getting_started/client_advanced.py | 8 +- .../getting_started/client_with_agent.py | 14 +- .../packages/ag-ui/getting_started/server.py | 4 +- python/packages/ag-ui/tests/ag_ui/conftest.py | 30 +- .../ag-ui/tests/ag_ui/test_ag_ui_client.py | 38 +- .../ag_ui/test_agent_wrapper_comprehensive.py | 98 ++-- .../ag-ui/tests/ag_ui/test_endpoint.py | 48 +- .../ag-ui/tests/ag_ui/test_helpers.py | 30 +- .../tests/ag_ui/test_message_adapters.py | 26 +- .../ag-ui/tests/ag_ui/test_message_hygiene.py | 32 +- python/packages/ag-ui/tests/ag_ui/test_run.py | 16 +- .../tests/ag_ui/test_structured_output.py | 26 +- .../ag-ui/tests/ag_ui/test_tooling.py | 12 +- .../packages/ag-ui/tests/ag_ui/test_utils.py | 4 +- .../agent_framework_anthropic/_chat_client.py | 20 +- .../anthropic/tests/test_anthropic_client.py | 72 +-- python/packages/azure-ai-search/AGENTS.md | 2 +- .../_search_provider.py | 12 +- .../tests/test_search_provider.py | 36 +- .../_agent_provider.py | 58 ++- .../agent_framework_azure_ai/_chat_client.py | 28 +- .../agent_framework_azure_ai/_client.py | 20 +- .../_project_provider.py | 38 +- .../azure-ai/tests/test_agent_provider.py | 46 +- .../tests/test_azure_ai_agent_client.py | 100 ++-- .../azure-ai/tests/test_azure_ai_client.py | 42 +- .../packages/azure-ai/tests/test_provider.py | 14 +- .../packages/azurefunctions/tests/test_app.py | 20 +- .../azurefunctions/tests/test_entities.py | 4 +- .../tests/test_orchestration.py | 6 +- .../agent_framework_bedrock/_chat_client.py | 12 +- .../bedrock/samples/bedrock_sample.py | 4 +- .../bedrock/tests/test_bedrock_client.py | 8 +- .../bedrock/tests/test_bedrock_settings.py | 10 +- python/packages/chatkit/README.md | 8 +- .../agent_framework_chatkit/_converter.py | 92 ++-- .../packages/chatkit/tests/test_converter.py | 20 +- .../claude/agent_framework_claude/_agent.py | 14 +- .../claude/tests/test_claude_agent.py | 10 +- .../agent_framework_copilotstudio/_agent.py | 20 +- .../copilotstudio/tests/test_copilot_agent.py | 6 +- python/packages/core/AGENTS.md | 20 +- python/packages/core/README.md | 20 +- .../packages/core/agent_framework/_agents.py | 108 ++-- .../packages/core/agent_framework/_clients.py | 48 +- python/packages/core/agent_framework/_mcp.py | 38 +- .../packages/core/agent_framework/_memory.py | 20 +- .../core/agent_framework/_middleware.py | 56 +-- .../core/agent_framework/_serialization.py | 20 +- .../packages/core/agent_framework/_threads.py | 52 +- .../packages/core/agent_framework/_tools.py | 42 +- .../packages/core/agent_framework/_types.py | 138 +++--- .../agent_framework/_workflows/__init__.py | 4 +- .../core/agent_framework/_workflows/_agent.py | 56 +-- .../_workflows/_agent_executor.py | 20 +- .../_workflows/_conversation_history.py | 8 +- .../_workflows/_conversation_state.py | 12 +- .../_workflows/_edge_runner.py | 20 +- .../agent_framework/_workflows/_executor.py | 20 +- .../_workflows/_message_utils.py | 24 +- .../agent_framework/_workflows/_runner.py | 8 +- .../_workflows/_runner_context.py | 46 +- .../_workflows/_typing_utils.py | 12 +- .../agent_framework/_workflows/_workflow.py | 14 +- .../_workflows/_workflow_context.py | 4 +- .../_workflows/_workflow_executor.py | 4 +- .../core/agent_framework/observability.py | 30 +- .../openai/_assistant_provider.py | 58 +-- .../openai/_assistants_client.py | 6 +- .../agent_framework/openai/_chat_client.py | 14 +- .../openai/_responses_client.py | 14 +- python/packages/core/tests/azure/conftest.py | 4 +- .../azure/test_azure_assistants_client.py | 68 +-- .../tests/azure/test_azure_chat_client.py | 108 ++-- .../azure/test_azure_responses_client.py | 28 +- python/packages/core/tests/core/conftest.py | 28 +- .../packages/core/tests/core/test_agents.py | 246 +++++----- .../core/test_as_tool_kwargs_propagation.py | 40 +- .../packages/core/tests/core/test_clients.py | 34 +- .../core/test_function_invocation_logic.py | 290 +++++------ .../test_kwargs_propagation_to_ai_function.py | 30 +- python/packages/core/tests/core/test_mcp.py | 12 +- .../packages/core/tests/core/test_memory.py | 18 +- .../core/tests/core/test_middleware.py | 136 ++--- .../core/test_middleware_context_result.py | 50 +- .../tests/core/test_middleware_with_agent.py | 300 ++++++------ .../tests/core/test_middleware_with_chat.py | 58 +-- .../core/tests/core/test_observability.py | 54 +- .../packages/core/tests/core/test_threads.py | 58 +-- python/packages/core/tests/core/test_types.py | 126 ++--- .../tests/openai/test_assistant_provider.py | 30 +- .../openai/test_openai_assistants_client.py | 104 ++-- .../tests/openai/test_openai_chat_client.py | 58 ++- .../openai/test_openai_chat_client_base.py | 48 +- .../openai/test_openai_responses_client.py | 72 +-- .../tests/workflow/test_agent_executor.py | 20 +- .../test_agent_executor_tool_calls.py | 38 +- .../workflow/test_agent_run_event_typing.py | 6 +- .../core/tests/workflow/test_agent_utils.py | 4 +- .../core/tests/workflow/test_executor.py | 7 +- .../tests/workflow/test_full_conversation.py | 18 +- .../core/tests/workflow/test_typing_utils.py | 8 +- .../core/tests/workflow/test_workflow.py | 5 +- .../tests/workflow/test_workflow_agent.py | 100 ++-- .../tests/workflow/test_workflow_builder.py | 8 +- .../tests/workflow/test_workflow_kwargs.py | 36 +- .../agent_framework_declarative/_loader.py | 80 +-- .../_workflows/_actions_agents.py | 32 +- .../_workflows/_declarative_base.py | 6 +- .../_workflows/_executors_agents.py | 22 +- .../_workflows/_factory.py | 2 +- .../_workflows/_state.py | 2 +- .../declarative/tests/test_graph_coverage.py | 4 +- python/packages/devui/AGENTS.md | 2 +- python/packages/devui/README.md | 12 +- .../devui/agent_framework_devui/__init__.py | 4 +- .../agent_framework_devui/_conversations.py | 16 +- .../devui/agent_framework_devui/_discovery.py | 4 +- .../devui/agent_framework_devui/_executor.py | 30 +- .../devui/agent_framework_devui/_mapper.py | 20 +- .../devui/agent_framework_devui/_utils.py | 16 +- .../agent_framework_devui/ui/assets/index.js | 2 +- .../features/workflow/run-workflow-button.tsx | 2 +- .../workflow/schema-form-renderer.tsx | 6 +- .../features/workflow/workflow-input-form.tsx | 4 +- .../src/components/layout/debug-panel.tsx | 2 +- .../frontend/src/types/agent-framework.ts | 4 +- .../devui/frontend/src/types/index.ts | 4 +- .../frontend/src/utils/workflow-utils.ts | 12 +- python/packages/devui/tests/devui/conftest.py | 76 +-- .../devui/tests/devui/test_cleanup_hooks.py | 12 +- .../devui/tests/devui/test_conversations.py | 14 +- .../devui/tests/devui/test_discovery.py | 8 +- .../devui/tests/devui/test_execution.py | 28 +- .../packages/devui/tests/devui/test_mapper.py | 10 +- .../tests/devui/test_multimodal_workflow.py | 26 +- .../tests/devui/test_schema_generation.py | 8 +- .../packages/devui/tests/devui/test_server.py | 12 +- .../_durable_agent_state.py | 14 +- .../agent_framework_durabletask/_entities.py | 6 +- .../agent_framework_durabletask/_executors.py | 8 +- .../agent_framework_durabletask/_shim.py | 10 +- .../agent_framework_durabletask/_worker.py | 4 +- .../tests/test_durable_entities.py | 6 +- .../packages/durabletask/tests/test_shim.py | 12 +- .../samples/foundry_local_agent.py | 6 +- .../tests/test_foundry_local_client.py | 4 +- .../agent_framework_github_copilot/_agent.py | 16 +- .../tests/test_github_copilot_agent.py | 6 +- .../lab/gaia/samples/azure_ai_agent.py | 6 +- .../packages/lab/gaia/samples/openai_agent.py | 6 +- python/packages/lab/lightning/README.md | 2 +- .../lab/lightning/samples/train_math_agent.py | 4 +- .../lab/lightning/tests/test_lightning.py | 6 +- python/packages/lab/tau2/README.md | 6 +- .../_message_utils.py | 10 +- .../_sliding_window.py | 10 +- .../agent_framework_lab_tau2/_tau2_utils.py | 12 +- .../tau2/agent_framework_lab_tau2/runner.py | 34 +- .../lab/tau2/tests/test_message_utils.py | 48 +- .../lab/tau2/tests/test_sliding_window.py | 44 +- .../lab/tau2/tests/test_tau2_utils.py | 26 +- python/packages/mem0/AGENTS.md | 2 +- .../mem0/agent_framework_mem0/_provider.py | 18 +- .../mem0/tests/test_mem0_context_provider.py | 56 +-- .../agent_framework_ollama/_chat_client.py | 22 +- .../ollama/tests/test_ollama_chat_client.py | 68 +-- .../_base_group_chat_orchestrator.py | 68 +-- .../_concurrent.py | 79 ++- .../_group_chat.py | 251 ++++++---- .../_handoff.py | 79 ++- .../_magentic.py | 463 +++++++++++++----- .../_orchestration_request_info.py | 12 +- .../_orchestration_state.py | 10 +- .../_orchestrator_helpers.py | 14 +- .../_sequential.py | 84 ++-- .../orchestrations/tests/test_concurrent.py | 26 +- .../orchestrations/tests/test_group_chat.py | 394 +++++++++------ .../orchestrations/tests/test_handoff.py | 26 +- .../orchestrations/tests/test_magentic.py | 134 ++--- .../tests/test_orchestration_request_info.py | 22 +- .../orchestrations/tests/test_sequential.py | 30 +- python/packages/purview/AGENTS.md | 2 +- python/packages/purview/README.md | 20 +- .../agent_framework_purview/_middleware.py | 20 +- .../agent_framework_purview/_processor.py | 6 +- .../purview/tests/test_chat_middleware.py | 36 +- .../packages/purview/tests/test_middleware.py | 66 +-- .../packages/purview/tests/test_processor.py | 48 +- python/packages/redis/AGENTS.md | 2 +- .../_chat_message_store.py | 64 +-- .../redis/agent_framework_redis/_provider.py | 18 +- .../tests/test_redis_chat_message_store.py | 28 +- .../redis/tests/test_redis_provider.py | 38 +- python/samples/autogen-migration/README.md | 4 +- .../orchestrations/04_magentic_one.py | 6 +- .../single_agent/01_basic_assistant_agent.py | 6 +- .../02_assistant_agent_with_tool.py | 2 +- python/samples/concepts/response_stream.py | 10 +- python/samples/concepts/tools/README.md | 12 +- python/samples/concepts/typed_options.py | 18 +- .../demos/chatkit-integration/README.md | 6 +- .../samples/demos/chatkit-integration/app.py | 19 +- .../agent_with_text_search_rag/main.py | 10 +- .../demos/m365-agent/m365_agent_demo/app.py | 4 +- .../workflow_evaluation/create_workflow.py | 10 +- .../azure_ai/azure_ai_provider_methods.py | 6 +- .../azure_ai_with_application_endpoint.py | 4 +- ..._ai_with_code_interpreter_file_download.py | 65 ++- ...i_with_code_interpreter_file_generation.py | 2 +- .../azure_ai/azure_ai_with_existing_agent.py | 2 +- .../azure_ai/azure_ai_with_hosted_mcp.py | 8 +- .../agents/azure_ai_agent/README.md | 2 +- .../azure_ai_with_hosted_mcp.py | 4 +- .../azure_ai_agent/azure_ai_with_local_mcp.py | 2 +- .../azure_ai_with_multiple_tools.py | 4 +- .../agents/azure_openai/README.md | 6 +- .../azure_assistants_with_code_interpreter.py | 4 +- ...zure_assistants_with_existing_assistant.py | 4 +- .../azure_assistants_with_function_tools.py | 8 +- .../azure_assistants_with_thread.py | 10 +- .../azure_chat_client_with_function_tools.py | 8 +- .../azure_chat_client_with_thread.py | 10 +- ...responses_client_code_interpreter_files.py | 4 +- .../azure_responses_client_image_analysis.py | 4 +- ..._responses_client_with_code_interpreter.py | 4 +- ...azure_responses_client_with_file_search.py | 8 +- ...re_responses_client_with_function_tools.py | 8 +- .../azure_responses_client_with_hosted_mcp.py | 28 +- .../azure_responses_client_with_local_mcp.py | 4 +- .../azure_responses_client_with_thread.py | 10 +- .../getting_started/agents/custom/README.md | 4 +- .../agents/custom/custom_agent.py | 23 +- .../agents/ollama/ollama_chat_multimodal.py | 4 +- .../getting_started/agents/openai/README.md | 4 +- .../openai_chat_client_with_function_tools.py | 8 +- .../openai_chat_client_with_local_mcp.py | 4 +- .../openai/openai_chat_client_with_thread.py | 10 +- .../openai_chat_client_with_web_search.py | 4 +- .../openai/openai_responses_client_basic.py | 21 +- .../openai_responses_client_image_analysis.py | 4 +- ..._responses_client_with_code_interpreter.py | 4 +- ...nses_client_with_code_interpreter_files.py | 4 +- ...penai_responses_client_with_file_search.py | 4 +- ...ai_responses_client_with_function_tools.py | 8 +- ...openai_responses_client_with_hosted_mcp.py | 28 +- .../openai_responses_client_with_local_mcp.py | 6 +- .../openai_responses_client_with_thread.py | 10 +- ...openai_responses_client_with_web_search.py | 4 +- .../azure_functions/02_multi_agent/README.md | 4 +- .../getting_started/chat_client/README.md | 2 +- .../chat_client/custom_chat_client.py | 8 +- .../context_providers/README.md | 8 +- .../aggregate_context_provider.py | 28 +- .../azure_ai_search/README.md | 6 +- .../azure_ai_with_search_context_agentic.py | 4 +- .../azure_ai_with_search_context_semantic.py | 4 +- .../context_providers/redis/redis_basics.py | 10 +- .../simple_context_provider.py | 12 +- .../getting_started/declarative/README.md | 2 +- .../samples/getting_started/devui/README.md | 6 +- .../devui/azure_responses_agent/agent.py | 4 +- .../devui/foundry_agent/agent.py | 4 +- .../getting_started/devui/in_memory_mode.py | 6 +- .../devui/weather_agent_azure/agent.py | 29 +- .../durabletask/01_single_agent/worker.py | 6 +- .../durabletask/02_multi_agent/worker.py | 4 +- .../03_single_agent_streaming/worker.py | 6 +- .../worker.py | 6 +- .../worker.py | 10 +- .../worker.py | 10 +- .../worker.py | 6 +- .../self_reflection/self_reflection.py | 12 +- .../getting_started/mcp/mcp_api_key_auth.py | 4 +- .../getting_started/mcp/mcp_github_pat.py | 4 +- .../middleware/chat_middleware.py | 8 +- .../middleware/class_based_middleware.py | 4 +- .../middleware/middleware_termination.py | 10 +- .../override_result_with_middleware.py | 50 +- .../multimodal_input/azure_chat_multimodal.py | 4 +- .../azure_responses_multimodal.py | 6 +- .../openai_chat_multimodal.py | 8 +- .../observability/advanced_zero_code.py | 4 +- .../observability/agent_observability.py | 4 +- .../agent_with_foundry_tracing.py | 6 +- .../azure_ai_agent_observability.py | 4 +- .../configure_otel_providers_with_env_var.py | 4 +- ...onfigure_otel_providers_with_parameters.py | 4 +- .../getting_started/orchestrations/README.md | 4 +- .../orchestrations/concurrent_agents.py | 6 +- .../concurrent_custom_agent_executors.py | 22 +- .../concurrent_custom_aggregator.py | 8 +- .../group_chat_agent_manager.py | 14 +- .../group_chat_philosophical_debate.py | 24 +- .../group_chat_simple_selector.py | 14 +- .../orchestrations/handoff_autonomous.py | 8 +- .../orchestrations/handoff_simple.py | 8 +- .../handoff_with_code_interpreter_file.py | 10 +- .../orchestrations/magentic.py | 18 +- .../orchestrations/magentic_checkpoint.py | 14 +- .../magentic_human_plan_review.py | 12 +- .../orchestrations/sequential_agents.py | 8 +- .../sequential_custom_executors.py | 16 +- .../getting_started/purview_agent/README.md | 4 +- .../purview_agent/sample_purview_agent.py | 26 +- .../custom_chat_message_store_thread.py | 10 +- .../tools/function_tool_with_approval.py | 12 +- ...function_tool_with_approval_and_threads.py | 10 +- .../getting_started/workflows/README.md | 2 +- .../workflows/_start-here/step3_streaming.py | 4 +- .../agents/azure_chat_agents_and_executor.py | 4 +- ...re_chat_agents_tool_calls_with_feedback.py | 18 +- .../agents/custom_agent_executors.py | 22 +- .../agents/group_chat_workflow_as_agent.py | 6 +- .../agents/handoff_workflow_as_agent.py | 8 +- .../agents/magentic_workflow_as_agent.py | 8 +- .../workflow_as_agent_human_in_the_loop.py | 4 +- .../workflow_as_agent_reflection_pattern.py | 26 +- .../checkpoint_with_human_in_the_loop.py | 6 +- ...ff_with_tool_approval_checkpoint_resume.py | 14 +- .../composition/sub_workflow_kwargs.py | 4 +- .../workflows/control-flow/edge_condition.py | 14 +- .../multi_selection_edge_group.py | 16 +- .../workflows/control-flow/simple_loop.py | 8 +- .../control-flow/switch_case_edge_group.py | 12 +- .../human-in-the-loop/agents_with_HITL.py | 12 +- .../concurrent_request_info.py | 6 +- .../group_chat_request_info.py | 4 +- .../guessing_game_with_human_input.py | 6 +- .../sequential_request_info.py | 4 +- .../parallelism/fan_out_fan_in_edges.py | 4 +- .../state-management/state_with_agents.py | 12 +- .../state-management/workflow_kwargs.py | 6 +- .../concurrent_builder_tool_approval.py | 8 +- .../group_chat_builder_tool_approval.py | 4 +- .../sequential_builder_tool_approval.py | 4 +- .../concurrent_with_visualization.py | 4 +- .../semantic-kernel-migration/README.md | 2 +- .../01_basic_chat_completion.py | 6 +- .../01_basic_responses_agent.py | 6 +- .../02_responses_agent_with_tool.py | 4 +- .../03_responses_agent_structured_output.py | 4 +- .../orchestrations/concurrent_basic.py | 16 +- .../orchestrations/group_chat.py | 21 +- .../orchestrations/handoff.py | 6 +- .../orchestrations/magentic.py | 12 +- .../orchestrations/sequential.py | 10 +- 371 files changed, 4723 insertions(+), 4317 deletions(-) diff --git a/python/CHANGELOG.md b/python/CHANGELOG.md index 366d2e76d8..6c7e264395 100644 --- a/python/CHANGELOG.md +++ b/python/CHANGELOG.md @@ -10,6 +10,13 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ### Added - **agent-framework-core**: Add long-running agents and background responses support with `ContinuationToken` TypedDict, `background` option in `OpenAIResponsesOptions`, and continuation token propagation through response types ([#2478](https://github.com/microsoft/agent-framework/issues/2478)) +### Changed + +- **agent-framework-core**: [BREAKING] Renamed core types for simpler API: + - `ChatAgent` → `Agent` + - `RawChatAgent` → `RawAgent` + - `ChatMessage` → `Message` + - `ChatClientProtocol` → `SupportsChatGetResponse` ## [1.0.0b260130] - 2026-01-30 @@ -272,7 +279,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ### Changed - **agent-framework-core**: [BREAKING] Support Magentic agent tool call approvals and plan stalling HITL behavior (#2569) -- **agent-framework-core**: [BREAKING] Standardize orchestration outputs as list of `ChatMessage`; allow agent as group chat manager (#2291) +- **agent-framework-core**: [BREAKING] Standardize orchestration outputs as list of `Message`; allow agent as group chat manager (#2291) - **agent-framework-core**: [BREAKING] Respond with `AgentRunResponse` including serialized structured output (#2285) - **observability**: Use `executor_id` and `edge_group_id` as span names for clearer traces (#2538) - **agent-framework-devui**: Add multimodal input support for workflows and refactor chat input (#2593) @@ -318,7 +325,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - **agent-framework-core**: Fix tool execution bleed-over in aiohttp/Bot Framework scenarios ([#2314](https://github.com/microsoft/agent-framework/pull/2314)) - **agent-framework-core**: `@ai_function` now correctly handles `self` parameter ([#2266](https://github.com/microsoft/agent-framework/pull/2266)) - **agent-framework-core**: Resolve string annotations in `FunctionExecutor` ([#2308](https://github.com/microsoft/agent-framework/pull/2308)) -- **agent-framework-core**: Langfuse observability captures ChatAgent system instructions ([#2316](https://github.com/microsoft/agent-framework/pull/2316)) +- **agent-framework-core**: Langfuse observability captures Agent system instructions ([#2316](https://github.com/microsoft/agent-framework/pull/2316)) - **agent-framework-core**: Incomplete URL substring sanitization fix ([#2274](https://github.com/microsoft/agent-framework/pull/2274)) - **observability**: Handle datetime serialization in tool results ([#2248](https://github.com/microsoft/agent-framework/pull/2248)) diff --git a/python/CODING_STANDARD.md b/python/CODING_STANDARD.md index 16f34be54c..c05f6c2e18 100644 --- a/python/CODING_STANDARD.md +++ b/python/CODING_STANDARD.md @@ -118,10 +118,10 @@ Prefer attributes over inheritance when parameters are mostly the same: ```python # ✅ Preferred - using attributes -from agent_framework import ChatMessage +from agent_framework import Message -user_msg = ChatMessage("user", ["Hello, world!"]) -asst_msg = ChatMessage("assistant", ["Hello, world!"]) +user_msg = Message("user", ["Hello, world!"]) +asst_msg = Message("assistant", ["Hello, world!"]) # ❌ Not preferred - unnecessary inheritance from agent_framework import UserMessage, AssistantMessage @@ -157,7 +157,7 @@ The package follows a flat import structure: - **Core**: Import directly from `agent_framework` ```python - from agent_framework import ChatAgent, tool + from agent_framework import Agent, tool ``` - **Components**: Import from `agent_framework.` @@ -381,7 +381,7 @@ def create_client( Use Google-style docstrings for all public APIs: ```python -def create_agent(name: str, chat_client: ChatClientProtocol) -> Agent: +def create_agent(name: str, chat_client: SupportsChatGetResponse) -> Agent: """Create a new agent with the specified configuration. Args: @@ -409,10 +409,10 @@ Define `__all__` in each module to explicitly declare the public API. Avoid usin ```python # ✅ Preferred - explicit __all__ and imports -__all__ = ["ChatAgent", "ChatMessage", "ChatResponse"] +__all__ = ["Agent", "Message", "ChatResponse"] -from ._agents import ChatAgent -from ._types import ChatMessage, ChatResponse +from ._agents import Agent +from ._types import Message, ChatResponse # ❌ Avoid - star imports from ._agents import * diff --git a/python/README.md b/python/README.md index 80cb85e4f4..4e7ac202f1 100644 --- a/python/README.md +++ b/python/README.md @@ -78,11 +78,11 @@ Create agents and invoke them directly: ```python import asyncio -from agent_framework import ChatAgent +from agent_framework import Agent from agent_framework.openai import OpenAIChatClient async def main(): - agent = ChatAgent( + agent = Agent( chat_client=OpenAIChatClient(), instructions=""" 1) A robot may not injure a human being... @@ -106,15 +106,15 @@ You can use the chat client classes directly for advanced workflows: ```python import asyncio -from agent_framework import ChatMessage +from agent_framework import Message from agent_framework.openai import OpenAIChatClient async def main(): client = OpenAIChatClient() messages = [ - ChatMessage("system", ["You are a helpful assistant."]), - ChatMessage("user", ["Write a haiku about Agent Framework."]) + Message("system", ["You are a helpful assistant."]), + Message("user", ["Write a haiku about Agent Framework."]) ] response = await client.get_response(messages) @@ -140,7 +140,7 @@ import asyncio from typing import Annotated from random import randint from pydantic import Field -from agent_framework import ChatAgent +from agent_framework import Agent from agent_framework.openai import OpenAIChatClient @@ -162,7 +162,7 @@ def get_menu_specials() -> str: async def main(): - agent = ChatAgent( + agent = Agent( chat_client=OpenAIChatClient(), instructions="You are a helpful assistant that can provide weather and restaurant information.", tools=[get_weather, get_menu_specials] @@ -189,19 +189,19 @@ Coordinate multiple agents to collaborate on complex tasks using orchestration p ```python import asyncio -from agent_framework import ChatAgent +from agent_framework import Agent from agent_framework.openai import OpenAIChatClient async def main(): # Create specialized agents - writer = ChatAgent( + writer = Agent( chat_client=OpenAIChatClient(), name="Writer", instructions="You are a creative content writer. Generate and refine slogans based on feedback." ) - reviewer = ChatAgent( + reviewer = Agent( chat_client=OpenAIChatClient(), name="Reviewer", instructions="You are a critical reviewer. Provide detailed feedback on proposed slogans." diff --git a/python/packages/a2a/agent_framework_a2a/_agent.py b/python/packages/a2a/agent_framework_a2a/_agent.py index 8c6fffa66a..b93ca1516b 100644 --- a/python/packages/a2a/agent_framework_a2a/_agent.py +++ b/python/packages/a2a/agent_framework_a2a/_agent.py @@ -18,7 +18,6 @@ FilePart, FileWithBytes, FileWithUri, - Message, Task, TaskIdParams, TaskQueryParams, @@ -34,9 +33,9 @@ AgentResponseUpdate, AgentThread, BaseAgent, - ChatMessage, Content, ContinuationToken, + Message, ResponseStream, normalize_messages, prepend_agent_framework_to_user_agent, @@ -209,7 +208,7 @@ async def __aexit__( @overload def run( self, - messages: str | ChatMessage | Sequence[str | ChatMessage] | None = None, + messages: str | Message | Sequence[str | Message] | None = None, *, stream: Literal[False] = ..., thread: AgentThread | None = None, @@ -221,7 +220,7 @@ def run( @overload def run( self, - messages: str | ChatMessage | Sequence[str | ChatMessage] | None = None, + messages: str | Message | Sequence[str | Message] | None = None, *, stream: Literal[True], thread: AgentThread | None = None, @@ -232,7 +231,7 @@ def run( def run( self, - messages: str | ChatMessage | Sequence[str | ChatMessage] | None = None, + messages: str | Message | Sequence[str | Message] | None = None, *, stream: bool = False, thread: AgentThread | None = None, @@ -268,7 +267,7 @@ def run( response = ResponseStream( self._map_a2a_stream(a2a_stream, background=background), - finalizer=lambda updates: AgentResponse.from_updates(list(updates)), + finalizer=AgentResponse.from_updates, ) if stream: return response @@ -380,7 +379,7 @@ async def poll_task(self, continuation_token: A2AContinuationToken) -> AgentResp def _prepare_message_for_a2a(self, message: ChatMessage) -> A2AMessage: """Prepare a ChatMessage for the A2A protocol. - Transforms Agent Framework ChatMessage objects into A2A protocol Messages by: + Transforms Agent Framework Message objects into A2A protocol Messages by: - Converting all message contents to appropriate A2A Part types - Mapping text content to TextPart objects - Converting file references (URI/data/hosted_file) to FilePart objects @@ -389,7 +388,7 @@ def _prepare_message_for_a2a(self, message: ChatMessage) -> A2AMessage: """ parts: list[A2APart] = [] if not message.contents: - raise ValueError("ChatMessage.contents is empty; cannot convert to A2AMessage.") + raise ValueError("Message.contents is empty; cannot convert to A2AMessage.") # Process ALL contents for content in message.contents: @@ -511,9 +510,9 @@ def _parse_contents_from_a2a(self, parts: Sequence[A2APart]) -> list[Content]: raise ValueError(f"Unknown Part kind: {inner_part.kind}") return contents - def _parse_messages_from_task(self, task: Task) -> list[ChatMessage]: + def _parse_messages_from_task(self, task: Task) -> list[Message]: """Parse A2A Task artifacts into ChatMessages with ASSISTANT role.""" - messages: list[ChatMessage] = [] + messages: list[Message] = [] if task.artifacts is not None: for artifact in task.artifacts: @@ -523,7 +522,7 @@ def _parse_messages_from_task(self, task: Task) -> list[ChatMessage]: history_item = task.history[-1] contents = self._parse_contents_from_a2a(history_item.parts) messages.append( - ChatMessage( + Message( role="assistant" if history_item.role == A2ARole.agent else "user", contents=contents, raw_representation=history_item, @@ -532,10 +531,10 @@ def _parse_messages_from_task(self, task: Task) -> list[ChatMessage]: return messages - def _parse_message_from_artifact(self, artifact: Artifact) -> ChatMessage: - """Parse A2A Artifact into ChatMessage using part contents.""" + def _parse_message_from_artifact(self, artifact: Artifact) -> Message: + """Parse A2A Artifact into Message using part contents.""" contents = self._parse_contents_from_a2a(artifact.parts) - return ChatMessage( + return Message( role="assistant", contents=contents, raw_representation=artifact, diff --git a/python/packages/a2a/tests/test_a2a_agent.py b/python/packages/a2a/tests/test_a2a_agent.py index a76583324d..61123df5ab 100644 --- a/python/packages/a2a/tests/test_a2a_agent.py +++ b/python/packages/a2a/tests/test_a2a_agent.py @@ -12,19 +12,19 @@ DataPart, FilePart, FileWithUri, - Message, Part, Task, TaskState, TaskStatus, TextPart, ) +from a2a.types import Message as A2AMessage from a2a.types import Role as A2ARole from agent_framework import ( AgentResponse, AgentResponseUpdate, - ChatMessage, Content, + Message, ) from agent_framework.a2a import A2AAgent from pytest import fixture, raises @@ -49,7 +49,7 @@ def add_message_response(self, message_id: str, text: str, role: str = "agent") text_part = Part(root=TextPart(text=text)) # Create actual Message instance - message = Message( + message = A2AMessage( message_id=message_id, role=A2ARole.agent if role == "agent" else A2ARole.user, parts=[text_part] ) self.responses.append(message) @@ -281,7 +281,7 @@ def test_parse_message_from_artifact(a2a_agent: A2AAgent) -> None: result = a2a_agent._parse_message_from_artifact(artifact) - assert isinstance(result, ChatMessage) + assert isinstance(result, Message) assert result.role == "assistant" assert result.text == "Artifact content" assert result.raw_representation == artifact @@ -324,9 +324,9 @@ def test_parse_contents_from_a2a_conversion(a2a_agent: A2AAgent) -> None: def test_prepare_message_for_a2a_with_error_content(a2a_agent: A2AAgent) -> None: """Test _prepare_message_for_a2a with ErrorContent.""" - # Create ChatMessage with ErrorContent + # Create Message with ErrorContent error_content = Content.from_error(message="Test error message") - message = ChatMessage(role="user", contents=[error_content]) + message = Message(role="user", contents=[error_content]) # Convert to A2A message a2a_message = a2a_agent._prepare_message_for_a2a(message) @@ -339,9 +339,9 @@ def test_prepare_message_for_a2a_with_error_content(a2a_agent: A2AAgent) -> None def test_prepare_message_for_a2a_with_uri_content(a2a_agent: A2AAgent) -> None: """Test _prepare_message_for_a2a with UriContent.""" - # Create ChatMessage with UriContent + # Create Message with UriContent uri_content = Content.from_uri(uri="http://example.com/file.pdf", media_type="application/pdf") - message = ChatMessage(role="user", contents=[uri_content]) + message = Message(role="user", contents=[uri_content]) # Convert to A2A message a2a_message = a2a_agent._prepare_message_for_a2a(message) @@ -355,9 +355,9 @@ def test_prepare_message_for_a2a_with_uri_content(a2a_agent: A2AAgent) -> None: def test_prepare_message_for_a2a_with_data_content(a2a_agent: A2AAgent) -> None: """Test _prepare_message_for_a2a with DataContent.""" - # Create ChatMessage with DataContent (base64 data URI) + # Create Message with DataContent (base64 data URI) data_content = Content.from_uri(uri="data:text/plain;base64,SGVsbG8gV29ybGQ=", media_type="text/plain") - message = ChatMessage(role="user", contents=[data_content]) + message = Message(role="user", contents=[data_content]) # Convert to A2A message a2a_message = a2a_agent._prepare_message_for_a2a(message) @@ -370,11 +370,11 @@ def test_prepare_message_for_a2a_with_data_content(a2a_agent: A2AAgent) -> None: def test_prepare_message_for_a2a_empty_contents_raises_error(a2a_agent: A2AAgent) -> None: """Test _prepare_message_for_a2a with empty contents raises ValueError.""" - # Create ChatMessage with no contents - message = ChatMessage(role="user", contents=[]) + # Create Message with no contents + message = Message(role="user", contents=[]) # Should raise ValueError for empty contents - with raises(ValueError, match="ChatMessage.contents is empty"): + with raises(ValueError, match="Message.contents is empty"): a2a_agent._prepare_message_for_a2a(message) @@ -432,12 +432,12 @@ async def test_context_manager_no_cleanup_when_no_http_client() -> None: def test_prepare_message_for_a2a_with_multiple_contents() -> None: - """Test conversion of ChatMessage with multiple contents.""" + """Test conversion of Message with multiple contents.""" agent = A2AAgent(client=MagicMock(), _http_client=None) # Create message with multiple content types - message = ChatMessage( + message = Message( role="user", contents=[ Content.from_text(text="Here's the analysis:"), @@ -489,12 +489,12 @@ def test_parse_contents_from_a2a_unknown_part_kind() -> None: def test_prepare_message_for_a2a_with_hosted_file() -> None: - """Test conversion of ChatMessage with HostedFileContent to A2A message.""" + """Test conversion of Message with HostedFileContent to A2A message.""" agent = A2AAgent(client=MagicMock(), _http_client=None) # Create message with hosted file content - message = ChatMessage( + message = Message( role="user", contents=[Content.from_hosted_file(file_id="hosted://storage/document.pdf")], ) diff --git a/python/packages/ag-ui/README.md b/python/packages/ag-ui/README.md index d2ff4c3d10..7ac367611c 100644 --- a/python/packages/ag-ui/README.md +++ b/python/packages/ag-ui/README.md @@ -14,12 +14,12 @@ pip install agent-framework-ag-ui ```python from fastapi import FastAPI -from agent_framework import ChatAgent +from agent_framework import Agent from agent_framework.azure import AzureOpenAIChatClient from agent_framework.ag_ui import add_agent_framework_fastapi_endpoint # Create your agent -agent = ChatAgent( +agent = Agent( name="my_agent", instructions="You are a helpful assistant.", chat_client=AzureOpenAIChatClient( @@ -58,7 +58,7 @@ The `AGUIChatClient` supports: - Streaming and non-streaming responses - Hybrid tool execution (client-side + server-side tools) - Automatic thread management for conversation continuity -- Integration with `ChatAgent` for client-side history management +- Integration with `Agent` for client-side history management ## Documentation @@ -91,7 +91,7 @@ The AG-UI endpoint does not enforce authentication by default. **For production import os from fastapi import Depends, FastAPI, HTTPException, Security from fastapi.security import APIKeyHeader -from agent_framework import ChatAgent +from agent_framework import Agent from agent_framework.ag_ui import add_agent_framework_fastapi_endpoint # Configure API key authentication @@ -104,7 +104,7 @@ async def verify_api_key(api_key: str | None = Security(API_KEY_HEADER)) -> None raise HTTPException(status_code=401, detail="Invalid or missing API key") # Create agent and app -agent = ChatAgent(name="my_agent", instructions="...", chat_client=...) +agent = Agent(name="my_agent", instructions="...", chat_client=...) app = FastAPI() # Register endpoint WITH authentication diff --git a/python/packages/ag-ui/agent_framework_ag_ui/_client.py b/python/packages/ag-ui/agent_framework_ag_ui/_client.py index c30e4e5926..717d8cdefa 100644 --- a/python/packages/ag-ui/agent_framework_ag_ui/_client.py +++ b/python/packages/ag-ui/agent_framework_ag_ui/_client.py @@ -15,11 +15,11 @@ import httpx from agent_framework import ( BaseChatClient, - ChatMessage, ChatResponse, ChatResponseUpdate, Content, FunctionTool, + Message, ResponseStream, ) from agent_framework._middleware import ChatMiddlewareLayer @@ -130,8 +130,8 @@ class AGUIChatClient( This client sends exactly the messages it receives to the server. It does NOT automatically maintain conversation history. The server must handle history via thread_id. - For stateless servers: Use ChatAgent wrapper which will send full message history on each - request. However, even with ChatAgent, the server must echo back all context for the + For stateless servers: Use Agent wrapper which will send full message history on each + request. However, even with Agent, the server must echo back all context for the agent to maintain history across turns. Important: Tool Handling (Hybrid Execution - matches .NET) @@ -140,7 +140,7 @@ class AGUIChatClient( 3. When LLM calls a client tool, function invocation executes it locally 4. Both client and server tools work together (hybrid pattern) - The wrapping ChatAgent's function invocation handles client tool execution + The wrapping Agent's function invocation handles client tool execution automatically when the server's LLM decides to call them. Examples: @@ -162,18 +162,18 @@ class AGUIChatClient( metadata={"thread_id": thread_id} ) - Recommended usage with ChatAgent (client manages history): + Recommended usage with Agent (client manages history): .. code-block:: python - from agent_framework import ChatAgent + from agent_framework import Agent from agent_framework.ag_ui import AGUIChatClient client = AGUIChatClient(endpoint="http://localhost:8888/") - agent = ChatAgent(name="assistant", client=client) + agent = Agent(name="assistant", client=client) thread = await agent.get_new_thread() - # ChatAgent automatically maintains history and sends full context + # Agent automatically maintains history and sends full context response = await agent.run("Hello!", thread=thread) response2 = await agent.run("How are you?", thread=thread) @@ -282,9 +282,7 @@ def _register_server_tool_placeholder(self, tool_name: str) -> None: logger = get_logger() logger.debug(f"[AGUIChatClient] Registered server placeholder: {tool_name}") - def _extract_state_from_messages( - self, messages: Sequence[ChatMessage] - ) -> tuple[list[ChatMessage], dict[str, Any] | None]: + def _extract_state_from_messages(self, messages: Sequence[Message]) -> tuple[list[Message], dict[str, Any] | None]: """Extract state from last message if present. Args: @@ -319,11 +317,11 @@ def _extract_state_from_messages( return list(messages), None - def _convert_messages_to_agui_format(self, messages: list[ChatMessage]) -> list[dict[str, Any]]: + def _convert_messages_to_agui_format(self, messages: list[Message]) -> list[dict[str, Any]]: """Convert Agent Framework messages to AG-UI format. Args: - messages: List of ChatMessage objects + messages: List of Message objects Returns: List of AG-UI formatted message dictionaries @@ -353,7 +351,7 @@ def _get_thread_id(self, options: Mapping[str, Any]) -> str: def _inner_get_response( self, *, - messages: Sequence[ChatMessage], + messages: Sequence[Message], stream: bool = False, options: Mapping[str, Any], **kwargs: Any, @@ -393,7 +391,7 @@ async def _get_response() -> ChatResponse: async def _streaming_impl( self, *, - messages: Sequence[ChatMessage], + messages: Sequence[Message], options: Mapping[str, Any], **kwargs: Any, ) -> AsyncIterable[ChatResponseUpdate]: @@ -415,7 +413,7 @@ async def _streaming_impl( agui_messages = self._convert_messages_to_agui_format(messages_to_send) # Send client tools to server so LLM knows about them - # Client tools execute via ChatAgent's function invocation wrapper + # Client tools execute via Agent's function invocation wrapper agui_tools = convert_tools_to_agui_format(options.get("tools")) # Build set of client tool names (matches .NET clientToolSet) diff --git a/python/packages/ag-ui/agent_framework_ag_ui/_message_adapters.py b/python/packages/ag-ui/agent_framework_ag_ui/_message_adapters.py index 3f35572f78..709d8f4887 100644 --- a/python/packages/ag-ui/agent_framework_ag_ui/_message_adapters.py +++ b/python/packages/ag-ui/agent_framework_ag_ui/_message_adapters.py @@ -9,8 +9,8 @@ from typing import Any, cast from agent_framework import ( - ChatMessage, Content, + Message, prepare_function_call_results, ) @@ -25,9 +25,9 @@ logger = logging.getLogger(__name__) -def _sanitize_tool_history(messages: list[ChatMessage]) -> list[ChatMessage]: +def _sanitize_tool_history(messages: list[Message]) -> list[Message]: """Normalize tool ordering and inject synthetic results for AG-UI edge cases.""" - sanitized: list[ChatMessage] = [] + sanitized: list[Message] = [] pending_tool_call_ids: set[str] | None = None pending_confirm_changes_id: str | None = None @@ -60,7 +60,7 @@ def _sanitize_tool_history(messages: list[ChatMessage]) -> list[ChatMessage]: ] if filtered_contents: # Create a new message without confirm_changes to avoid mutating the input - filtered_msg = ChatMessage(role=msg.role, contents=filtered_contents) + filtered_msg = Message(role=msg.role, contents=filtered_contents) sanitized.append(filtered_msg) # If no contents left after filtering, don't append anything @@ -99,7 +99,7 @@ def _sanitize_tool_history(messages: list[ChatMessage]) -> list[ChatMessage]: if pending_confirm_changes_id and approval_accepted is not None: logger.info(f"Injecting synthetic tool result for confirm_changes call_id={pending_confirm_changes_id}") - synthetic_result = ChatMessage( + synthetic_result = Message( role="tool", contents=[ Content.from_function_result( @@ -128,7 +128,7 @@ def _sanitize_tool_history(messages: list[ChatMessage]) -> list[ChatMessage]: logger.info( f"Injecting synthetic tool result for confirm_changes call_id={pending_confirm_changes_id}" ) - synthetic_result = ChatMessage( + synthetic_result = Message( role="tool", contents=[ Content.from_function_result( @@ -152,7 +152,7 @@ def _sanitize_tool_history(messages: list[ChatMessage]) -> list[ChatMessage]: ) for pending_call_id in pending_tool_call_ids: logger.info(f"Injecting synthetic tool result for pending call_id={pending_call_id}") - synthetic_result = ChatMessage( + synthetic_result = Message( role="tool", contents=[ Content.from_function_result( @@ -196,10 +196,10 @@ def _sanitize_tool_history(messages: list[ChatMessage]) -> list[ChatMessage]: return sanitized -def _deduplicate_messages(messages: list[ChatMessage]) -> list[ChatMessage]: +def _deduplicate_messages(messages: list[Message]) -> list[Message]: """Remove duplicate messages while preserving order.""" seen_keys: dict[Any, int] = {} - unique_messages: list[ChatMessage] = [] + unique_messages: list[Message] = [] for idx, msg in enumerate(messages): role_value = get_role_value(msg) @@ -256,7 +256,7 @@ def _deduplicate_messages(messages: list[ChatMessage]) -> list[ChatMessage]: def normalize_agui_input_messages( messages: list[dict[str, Any]], -) -> tuple[list[ChatMessage], list[dict[str, Any]]]: +) -> tuple[list[Message], list[dict[str, Any]]]: """Normalize raw AG-UI messages into provider and snapshot formats.""" provider_messages = agui_messages_to_agent_framework(messages) provider_messages = _sanitize_tool_history(provider_messages) @@ -265,14 +265,14 @@ def normalize_agui_input_messages( return provider_messages, snapshot_messages -def agui_messages_to_agent_framework(messages: list[dict[str, Any]]) -> list[ChatMessage]: +def agui_messages_to_agent_framework(messages: list[dict[str, Any]]) -> list[Message]: """Convert AG-UI messages to Agent Framework format. Args: messages: List of AG-UI messages Returns: - List of Agent Framework ChatMessage objects + List of Agent Framework Message objects """ def _update_tool_call_arguments( @@ -367,7 +367,7 @@ def _filter_modified_args( allowed_keys = set(original_args.keys()) return {key: value for key, value in modified_args.items() if key in allowed_keys} - result: list[ChatMessage] = [] + result: list[Message] = [] for msg in messages: # Handle standard tool result messages early (role="tool") to preserve provider invariants # This path maps AG‑UI tool messages to function_result content with the correct tool_call_id @@ -480,7 +480,7 @@ def _filter_modified_args( merged_args["steps"] = merged_steps state_args = merged_args - # Update the ChatMessage tool call with only enabled steps (for LLM context). + # Update the Message tool call with only enabled steps (for LLM context). # The LLM should only see the steps that were actually approved/executed. updated_args_for_llm = ( json.dumps(filtered_args) @@ -510,14 +510,14 @@ def _filter_modified_args( function_call=func_call_for_approval, additional_properties={"ag_ui_state_args": state_args} if state_args else None, ) - chat_msg = ChatMessage( + chat_msg = Message( role="user", contents=[approval_response], ) else: # No matching function call found - this is likely a confirm_changes approval # Keep the old behavior for backwards compatibility - chat_msg = ChatMessage( + chat_msg = Message( role="user", contents=[Content.from_text(text=approval_payload_text)], additional_properties={"is_tool_result": True, "tool_call_id": str(tool_call_id or "")}, @@ -537,7 +537,7 @@ def _filter_modified_args( func_result = result_content else: func_result = str(result_content) - chat_msg = ChatMessage( + chat_msg = Message( role="tool", contents=[Content.from_function_result(call_id=str(tool_call_id), result=func_result)], ) @@ -553,7 +553,7 @@ def _filter_modified_args( tool_call_id = msg.get("toolCallId") or msg.get("tool_call_id") or msg.get("actionExecutionId", "") result_content = msg.get("result", msg.get("content", "")) - chat_msg = ChatMessage( + chat_msg = Message( role="tool", contents=[Content.from_function_result(call_id=str(tool_call_id), result=result_content)], ) @@ -592,7 +592,7 @@ def _filter_modified_args( arguments=arguments, ) ) - chat_msg = ChatMessage(role="assistant", contents=contents) + chat_msg = Message(role="assistant", contents=contents) if "id" in msg: chat_msg.message_id = msg["id"] result.append(chat_msg) @@ -622,14 +622,14 @@ def _filter_modified_args( ) approval_contents.append(approval_response) - chat_msg = ChatMessage(role=role, contents=approval_contents) # type: ignore[call-overload] + chat_msg = Message(role=role, contents=approval_contents) # type: ignore[call-overload] else: # Regular text message content = msg.get("content", "") if isinstance(content, str): - chat_msg = ChatMessage(role=role, contents=[Content.from_text(text=content)]) # type: ignore[call-overload] + chat_msg = Message(role=role, contents=[Content.from_text(text=content)]) # type: ignore[call-overload] else: - chat_msg = ChatMessage(role=role, contents=[Content.from_text(text=str(content))]) # type: ignore[call-overload] + chat_msg = Message(role=role, contents=[Content.from_text(text=str(content))]) # type: ignore[call-overload] if "id" in msg: chat_msg.message_id = msg["id"] @@ -639,11 +639,11 @@ def _filter_modified_args( return result -def agent_framework_messages_to_agui(messages: list[ChatMessage] | list[dict[str, Any]]) -> list[dict[str, Any]]: +def agent_framework_messages_to_agui(messages: list[Message] | list[dict[str, Any]]) -> list[dict[str, Any]]: """Convert Agent Framework messages to AG-UI format. Args: - messages: List of Agent Framework ChatMessage objects or AG-UI dicts (already converted) + messages: List of Agent Framework Message objects or AG-UI dicts (already converted) Returns: List of AG-UI message dictionaries @@ -672,7 +672,7 @@ def agent_framework_messages_to_agui(messages: list[ChatMessage] | list[dict[str result.append(normalized_msg) continue - # Convert ChatMessage to AG-UI format + # Convert Message to AG-UI format role_value: str = msg.role if hasattr(msg.role, "value") else msg.role # type: ignore[assignment] role = FRAMEWORK_TO_AGUI_ROLE.get(role_value, "user") diff --git a/python/packages/ag-ui/agent_framework_ag_ui/_orchestration/_helpers.py b/python/packages/ag-ui/agent_framework_ag_ui/_orchestration/_helpers.py index 277b5effce..aea3eb66c5 100644 --- a/python/packages/ag-ui/agent_framework_ag_ui/_orchestration/_helpers.py +++ b/python/packages/ag-ui/agent_framework_ag_ui/_orchestration/_helpers.py @@ -13,8 +13,8 @@ from typing import Any from agent_framework import ( - ChatMessage, Content, + Message, ) from .._utils import get_role_value @@ -22,7 +22,7 @@ logger = logging.getLogger(__name__) -def pending_tool_call_ids(messages: list[ChatMessage]) -> set[str]: +def pending_tool_call_ids(messages: list[Message]) -> set[str]: """Get IDs of tool calls without corresponding results. Args: @@ -42,7 +42,7 @@ def pending_tool_call_ids(messages: list[ChatMessage]) -> set[str]: return pending_ids - resolved_ids -def is_state_context_message(message: ChatMessage) -> bool: +def is_state_context_message(message: Message) -> bool: """Check if a message is a state context system message. Args: @@ -178,7 +178,7 @@ def build_safe_metadata(thread_metadata: dict[str, Any] | None) -> dict[str, Any return safe_metadata -def latest_approval_response(messages: list[ChatMessage]) -> Content | None: +def latest_approval_response(messages: list[Message]) -> Content | None: """Get the latest approval response from messages. Args: diff --git a/python/packages/ag-ui/agent_framework_ag_ui/_orchestration/_tooling.py b/python/packages/ag-ui/agent_framework_ag_ui/_orchestration/_tooling.py index 069622f490..ede8a49985 100644 --- a/python/packages/ag-ui/agent_framework_ag_ui/_orchestration/_tooling.py +++ b/python/packages/ag-ui/agent_framework_ag_ui/_orchestration/_tooling.py @@ -39,7 +39,7 @@ def collect_server_tools(agent: SupportsAgentRun) -> list[Any]: functions need to be included for tool execution during approval flows. Args: - agent: Agent instance to collect tools from. Works with ChatAgent + agent: Agent instance to collect tools from. Works with Agent or any agent with default_options and optional mcp_tools attributes. Returns: @@ -53,7 +53,7 @@ def collect_server_tools(agent: SupportsAgentRun) -> list[Any]: tools_from_agent = default_options.get("tools") if isinstance(default_options, dict) else None server_tools = list(tools_from_agent) if tools_from_agent else [] - # Include functions from connected MCP tools (only available on ChatAgent) + # Include functions from connected MCP tools (only available on Agent) mcp_tools = getattr(agent, "mcp_tools", None) if mcp_tools: server_tools.extend(_collect_mcp_tool_functions(mcp_tools)) @@ -70,7 +70,7 @@ def register_additional_client_tools(agent: SupportsAgentRun, client_tools: list """Register client tools as additional declaration-only tools to avoid server execution. Args: - agent: Agent instance to register tools on. Works with ChatAgent + agent: Agent instance to register tools on. Works with Agent or any agent with a chat_client attribute. client_tools: List of client tools to register. """ diff --git a/python/packages/ag-ui/agent_framework_ag_ui/_run.py b/python/packages/ag-ui/agent_framework_ag_ui/_run.py index d47fdc4d67..eda625f0ef 100644 --- a/python/packages/ag-ui/agent_framework_ag_ui/_run.py +++ b/python/packages/ag-ui/agent_framework_ag_ui/_run.py @@ -28,8 +28,8 @@ ) from agent_framework import ( AgentThread, - ChatMessage, Content, + Message, SupportsAgentRun, prepare_function_call_results, ) @@ -195,7 +195,7 @@ def get_pending_without_end(self) -> list[dict[str, Any]]: def _create_state_context_message( current_state: dict[str, Any], state_schema: dict[str, Any], -) -> ChatMessage | None: +) -> Message | None: """Create a system message with current state context. This injects the current state into the conversation so the model @@ -206,13 +206,13 @@ def _create_state_context_message( state_schema: The state schema (used to determine if injection is needed) Returns: - ChatMessage with state context, or None if not needed + Message with state context, or None if not needed """ if not current_state or not state_schema: return None state_json = json.dumps(current_state, indent=2) - return ChatMessage( + return Message( role="system", contents=[ Content.from_text( @@ -229,10 +229,10 @@ def _create_state_context_message( def _inject_state_context( - messages: list[ChatMessage], + messages: list[Message], current_state: dict[str, Any], state_schema: dict[str, Any], -) -> list[ChatMessage]: +) -> list[Message]: """Inject state context message into messages if appropriate. The state context is injected before the last user message to give @@ -672,7 +672,7 @@ def _convert_approval_results_to_tool_messages(messages: list[Any]) -> None: This modifies the messages list in place. Args: - messages: List of ChatMessage objects to process + messages: List of Message objects to process """ result: list[Any] = [] @@ -694,11 +694,11 @@ def _convert_approval_results_to_tool_messages(messages: list[Any]) -> None: # Tool messages first (right after the preceding assistant message per OpenAI requirements) for func_result in function_results: - result.append(ChatMessage(role="tool", contents=[func_result])) + result.append(Message(role="tool", contents=[func_result])) # Then user message with remaining content (if any) if other_contents: - result.append(ChatMessage(role=msg.role, contents=other_contents)) + result.append(Message(role=msg.role, contents=other_contents)) messages[:] = result @@ -793,9 +793,9 @@ async def run_agent_stream( # Check for structured output mode (skip text content) skip_text = False response_format = None - from agent_framework import ChatAgent + from agent_framework import Agent - if isinstance(agent, ChatAgent): + if isinstance(agent, Agent): response_format = agent.default_options.get("response_format") skip_text = response_format is not None diff --git a/python/packages/ag-ui/agent_framework_ag_ui_examples/README.md b/python/packages/ag-ui/agent_framework_ag_ui_examples/README.md index df07cff85d..97d6a3b911 100644 --- a/python/packages/ag-ui/agent_framework_ag_ui_examples/README.md +++ b/python/packages/ag-ui/agent_framework_ag_ui_examples/README.md @@ -12,7 +12,7 @@ pip install agent-framework-ag-ui ### Using Example Agents with Any Chat Client -All example agents are factory functions that accept any `ChatClientProtocol`-compatible chat client: +All example agents are factory functions that accept any `SupportsChatGetResponse`-compatible chat client: ```python from fastapi import FastAPI @@ -38,12 +38,12 @@ add_agent_framework_fastapi_endpoint(app, weather_agent(openai_client), "/weathe ```python from fastapi import FastAPI -from agent_framework import ChatAgent +from agent_framework import Agent from agent_framework.azure import AzureOpenAIChatClient from agent_framework.ag_ui import add_agent_framework_fastapi_endpoint # Create your agent -agent = ChatAgent( +agent = Agent( name="my_agent", instructions="You are a helpful assistant.", chat_client=AzureOpenAIChatClient(model_id="gpt-4o"), @@ -70,7 +70,7 @@ This integration supports all 7 AG-UI features: ## Examples -All example agents are implemented as **factory functions** that accept any chat client implementing `ChatClientProtocol`. This provides maximum flexibility to use Azure OpenAI, OpenAI, Anthropic, or any custom chat client implementation. +All example agents are implemented as **factory functions** that accept any chat client implementing `SupportsChatGetResponse`. This provides maximum flexibility to use Azure OpenAI, OpenAI, Anthropic, or any custom chat client implementation. ### Available Example Agents @@ -97,7 +97,7 @@ from agent_framework_ag_ui_examples.agents import ( recipe_agent, ) -# Create a chat client (use any ChatClientProtocol implementation) +# Create a chat client (use any SupportsChatGetResponse implementation) azure_client = AzureOpenAIChatClient(model_id="gpt-4") openai_client = OpenAIChatClient(model_id="gpt-4o") @@ -187,8 +187,8 @@ The package uses a clean, orchestrator-based architecture: You can create your own agent factories following the same pattern as the examples: ```python -from agent_framework import ChatAgent, tool -from agent_framework import ChatClientProtocol +from agent_framework import Agent, tool +from agent_framework import SupportsChatGetResponse from agent_framework.ag_ui import AgentFrameworkAgent @tool @@ -196,7 +196,7 @@ def my_tool(param: str) -> str: """My custom tool.""" return f"Result: {param}" -def my_custom_agent(chat_client: ChatClientProtocol) -> AgentFrameworkAgent: +def my_custom_agent(chat_client: SupportsChatGetResponse) -> AgentFrameworkAgent: """Create a custom agent with the specified chat client. Args: @@ -205,7 +205,7 @@ def my_custom_agent(chat_client: ChatClientProtocol) -> AgentFrameworkAgent: Returns: A configured AgentFrameworkAgent instance """ - agent = ChatAgent( + agent = Agent( name="my_custom_agent", instructions="Custom instructions here", chat_client=chat_client, @@ -229,12 +229,12 @@ agent = my_custom_agent(chat_client) State is injected as system messages and updated via predictive state updates: ```python -from agent_framework import ChatAgent +from agent_framework import Agent from agent_framework.azure import AzureOpenAIChatClient from agent_framework.ag_ui import AgentFrameworkAgent # Create your agent -agent = ChatAgent( +agent = Agent( name="recipe_agent", chat_client=AzureOpenAIChatClient(model_id="gpt-4o"), ) @@ -266,12 +266,12 @@ wrapped_agent = AgentFrameworkAgent( Predictive state updates automatically stream tool arguments as optimistic state updates: ```python -from agent_framework import ChatAgent +from agent_framework import Agent from agent_framework.azure import AzureOpenAIChatClient from agent_framework.ag_ui import AgentFrameworkAgent # Create your agent -agent = ChatAgent( +agent = Agent( name="document_writer", chat_client=AzureOpenAIChatClient(model_id="gpt-4o"), ) diff --git a/python/packages/ag-ui/agent_framework_ag_ui_examples/agents/document_writer_agent.py b/python/packages/ag-ui/agent_framework_ag_ui_examples/agents/document_writer_agent.py index 3a74af346a..80c109b6bd 100644 --- a/python/packages/ag-ui/agent_framework_ag_ui_examples/agents/document_writer_agent.py +++ b/python/packages/ag-ui/agent_framework_ag_ui_examples/agents/document_writer_agent.py @@ -4,7 +4,7 @@ from __future__ import annotations -from agent_framework import ChatAgent, ChatClientProtocol, tool +from agent_framework import Agent, SupportsChatGetResponse, tool from agent_framework.ag_ui import AgentFrameworkAgent @@ -40,7 +40,7 @@ def write_document(document: str) -> str: ) -def document_writer_agent(chat_client: ChatClientProtocol) -> AgentFrameworkAgent: +def document_writer_agent(chat_client: SupportsChatGetResponse) -> AgentFrameworkAgent: """Create a document writer agent with predictive state updates. Args: @@ -49,7 +49,7 @@ def document_writer_agent(chat_client: ChatClientProtocol) -> AgentFrameworkAgen Returns: A configured AgentFrameworkAgent instance with document writing capabilities """ - agent = ChatAgent( + agent = Agent( name="document_writer", instructions=_DOCUMENT_WRITER_INSTRUCTIONS, chat_client=chat_client, diff --git a/python/packages/ag-ui/agent_framework_ag_ui_examples/agents/human_in_the_loop_agent.py b/python/packages/ag-ui/agent_framework_ag_ui_examples/agents/human_in_the_loop_agent.py index 368c4e47ed..34a5ee952b 100644 --- a/python/packages/ag-ui/agent_framework_ag_ui_examples/agents/human_in_the_loop_agent.py +++ b/python/packages/ag-ui/agent_framework_ag_ui_examples/agents/human_in_the_loop_agent.py @@ -5,7 +5,7 @@ from enum import Enum from typing import Any -from agent_framework import ChatAgent, ChatClientProtocol, tool +from agent_framework import Agent, SupportsChatGetResponse, tool from pydantic import BaseModel, Field @@ -43,16 +43,16 @@ def generate_task_steps(steps: list[TaskStep]) -> str: return f"Generated {len(steps)} execution steps for the task." -def human_in_the_loop_agent(chat_client: ChatClientProtocol[Any]) -> ChatAgent[Any]: +def human_in_the_loop_agent(chat_client: SupportsChatGetResponse[Any]) -> Agent[Any]: """Create a human-in-the-loop agent using tool-based approach for predictive state. Args: chat_client: The chat client to use for the agent Returns: - A configured ChatAgent instance with human-in-the-loop capabilities + A configured Agent instance with human-in-the-loop capabilities """ - return ChatAgent( + return Agent( name="human_in_the_loop_agent", instructions="""You are a helpful assistant that can perform any task by breaking it down into steps. diff --git a/python/packages/ag-ui/agent_framework_ag_ui_examples/agents/recipe_agent.py b/python/packages/ag-ui/agent_framework_ag_ui_examples/agents/recipe_agent.py index 2d9bb066ba..10c4969bdd 100644 --- a/python/packages/ag-ui/agent_framework_ag_ui_examples/agents/recipe_agent.py +++ b/python/packages/ag-ui/agent_framework_ag_ui_examples/agents/recipe_agent.py @@ -7,7 +7,7 @@ from enum import Enum from typing import Any -from agent_framework import ChatAgent, ChatClientProtocol, tool +from agent_framework import Agent, SupportsChatGetResponse, tool from agent_framework.ag_ui import AgentFrameworkAgent from pydantic import BaseModel, Field @@ -104,7 +104,7 @@ def update_recipe(recipe: Recipe) -> str: """ -def recipe_agent(chat_client: ChatClientProtocol[Any]) -> AgentFrameworkAgent: +def recipe_agent(chat_client: SupportsChatGetResponse[Any]) -> AgentFrameworkAgent: """Create a recipe agent with streaming state updates. Args: @@ -113,7 +113,7 @@ def recipe_agent(chat_client: ChatClientProtocol[Any]) -> AgentFrameworkAgent: Returns: A configured AgentFrameworkAgent instance with recipe management """ - agent = ChatAgent( + agent = Agent( name="recipe_agent", instructions=_RECIPE_INSTRUCTIONS, chat_client=chat_client, diff --git a/python/packages/ag-ui/agent_framework_ag_ui_examples/agents/research_assistant_agent.py b/python/packages/ag-ui/agent_framework_ag_ui_examples/agents/research_assistant_agent.py index b92874421a..8847cf0aab 100644 --- a/python/packages/ag-ui/agent_framework_ag_ui_examples/agents/research_assistant_agent.py +++ b/python/packages/ag-ui/agent_framework_ag_ui_examples/agents/research_assistant_agent.py @@ -5,7 +5,7 @@ import asyncio from typing import Any -from agent_framework import ChatAgent, ChatClientProtocol, tool +from agent_framework import Agent, SupportsChatGetResponse, tool from agent_framework.ag_ui import AgentFrameworkAgent @@ -88,7 +88,7 @@ async def analyze_data(dataset: str) -> str: ) -def research_assistant_agent(chat_client: ChatClientProtocol[Any]) -> AgentFrameworkAgent: +def research_assistant_agent(chat_client: SupportsChatGetResponse[Any]) -> AgentFrameworkAgent: """Create a research assistant agent. Args: @@ -97,7 +97,7 @@ def research_assistant_agent(chat_client: ChatClientProtocol[Any]) -> AgentFrame Returns: A configured AgentFrameworkAgent instance with research capabilities """ - agent = ChatAgent( + agent = Agent( name="research_assistant", instructions=_RESEARCH_ASSISTANT_INSTRUCTIONS, chat_client=chat_client, diff --git a/python/packages/ag-ui/agent_framework_ag_ui_examples/agents/simple_agent.py b/python/packages/ag-ui/agent_framework_ag_ui_examples/agents/simple_agent.py index 3e72fd3a11..99cb67db31 100644 --- a/python/packages/ag-ui/agent_framework_ag_ui_examples/agents/simple_agent.py +++ b/python/packages/ag-ui/agent_framework_ag_ui_examples/agents/simple_agent.py @@ -4,19 +4,19 @@ from typing import Any -from agent_framework import ChatAgent, ChatClientProtocol +from agent_framework import Agent, SupportsChatGetResponse -def simple_agent(chat_client: ChatClientProtocol[Any]) -> ChatAgent[Any]: +def simple_agent(chat_client: SupportsChatGetResponse[Any]) -> Agent[Any]: """Create a simple chat agent. Args: chat_client: The chat client to use for the agent Returns: - A configured ChatAgent instance + A configured Agent instance """ - return ChatAgent[Any]( + return Agent[Any]( name="simple_chat_agent", instructions="You are a helpful assistant. Be concise and friendly.", chat_client=chat_client, diff --git a/python/packages/ag-ui/agent_framework_ag_ui_examples/agents/task_planner_agent.py b/python/packages/ag-ui/agent_framework_ag_ui_examples/agents/task_planner_agent.py index 57e14bb6c3..085e6ff1c4 100644 --- a/python/packages/ag-ui/agent_framework_ag_ui_examples/agents/task_planner_agent.py +++ b/python/packages/ag-ui/agent_framework_ag_ui_examples/agents/task_planner_agent.py @@ -4,7 +4,7 @@ from typing import Any -from agent_framework import ChatAgent, ChatClientProtocol, tool +from agent_framework import Agent, SupportsChatGetResponse, tool from agent_framework.ag_ui import AgentFrameworkAgent @@ -61,7 +61,7 @@ def book_meeting_room(room_name: str, date: str, start_time: str, end_time: str) ) -def task_planner_agent(chat_client: ChatClientProtocol[Any]) -> AgentFrameworkAgent: +def task_planner_agent(chat_client: SupportsChatGetResponse[Any]) -> AgentFrameworkAgent: """Create a task planner agent with user approval for actions. Args: @@ -70,7 +70,7 @@ def task_planner_agent(chat_client: ChatClientProtocol[Any]) -> AgentFrameworkAg Returns: A configured AgentFrameworkAgent instance with task planning capabilities """ - agent = ChatAgent( + agent = Agent( name="task_planner", instructions=_TASK_PLANNER_INSTRUCTIONS, chat_client=chat_client, diff --git a/python/packages/ag-ui/agent_framework_ag_ui_examples/agents/task_steps_agent.py b/python/packages/ag-ui/agent_framework_ag_ui_examples/agents/task_steps_agent.py index 2fe79d063f..8bc31a55a4 100644 --- a/python/packages/ag-ui/agent_framework_ag_ui_examples/agents/task_steps_agent.py +++ b/python/packages/ag-ui/agent_framework_ag_ui_examples/agents/task_steps_agent.py @@ -20,7 +20,7 @@ TextMessageStartEvent, ToolCallStartEvent, ) -from agent_framework import ChatAgent, ChatClientProtocol, ChatMessage, Content, tool +from agent_framework import Agent, Content, Message, SupportsChatGetResponse, tool from agent_framework.ag_ui import AgentFrameworkAgent from pydantic import BaseModel, Field @@ -54,7 +54,7 @@ def generate_task_steps(steps: list[TaskStep]) -> str: return "Steps generated." -def _create_task_steps_agent(chat_client: ChatClientProtocol[Any]) -> AgentFrameworkAgent: +def _create_task_steps_agent(chat_client: SupportsChatGetResponse[Any]) -> AgentFrameworkAgent: """Create the task steps agent using tool-based approach for streaming. Args: @@ -63,7 +63,7 @@ def _create_task_steps_agent(chat_client: ChatClientProtocol[Any]) -> AgentFrame Returns: A configured AgentFrameworkAgent instance """ - agent = ChatAgent[Any]( + agent = Agent[Any]( name="task_steps_agent", instructions="""You are a helpful assistant that breaks down tasks into actionable steps. @@ -226,24 +226,24 @@ async def run_agent(self, input_data: dict[str, Any]) -> AsyncGenerator[Any]: original_messages = input_data.get("messages", []) - # Convert to ChatMessage objects if needed - messages: list[ChatMessage] = [] + # Convert to Message objects if needed + messages: list[Message] = [] for msg in original_messages: if isinstance(msg, dict): content_str = msg.get("content", "") if isinstance(content_str, str): messages.append( - ChatMessage( + Message( role=msg.get("role", "user"), contents=[Content.from_text(text=content_str)], ) ) - elif isinstance(msg, ChatMessage): + elif isinstance(msg, Message): messages.append(msg) # Add completion message messages.append( - ChatMessage( + Message( role="user", contents=[ Content.from_text( @@ -332,7 +332,7 @@ async def run_agent(self, input_data: dict[str, Any]) -> AsyncGenerator[Any]: yield run_finished_event -def task_steps_agent_wrapped(chat_client: ChatClientProtocol[Any]) -> TaskStepsAgentWithExecution: +def task_steps_agent_wrapped(chat_client: SupportsChatGetResponse[Any]) -> TaskStepsAgentWithExecution: """Create a task steps agent with execution simulation. Args: diff --git a/python/packages/ag-ui/agent_framework_ag_ui_examples/agents/ui_generator_agent.py b/python/packages/ag-ui/agent_framework_ag_ui_examples/agents/ui_generator_agent.py index 3f50fc9c07..e4fb275494 100644 --- a/python/packages/ag-ui/agent_framework_ag_ui_examples/agents/ui_generator_agent.py +++ b/python/packages/ag-ui/agent_framework_ag_ui_examples/agents/ui_generator_agent.py @@ -7,7 +7,7 @@ import sys from typing import TYPE_CHECKING, Any, TypedDict -from agent_framework import ChatAgent, ChatClientProtocol, FunctionTool +from agent_framework import Agent, FunctionTool, SupportsChatGetResponse from agent_framework.ag_ui import AgentFrameworkAgent if sys.version_info >= (3, 13): @@ -168,7 +168,7 @@ OptionsT = TypeVar("OptionsT", bound=TypedDict, default="ChatOptions") # type: ignore[valid-type] -def ui_generator_agent(chat_client: ChatClientProtocol[OptionsT]) -> AgentFrameworkAgent: +def ui_generator_agent(chat_client: SupportsChatGetResponse[TOptions]) -> AgentFrameworkAgent: """Create a UI generator agent with custom React component rendering. Args: @@ -177,7 +177,7 @@ def ui_generator_agent(chat_client: ChatClientProtocol[OptionsT]) -> AgentFramew Returns: A configured AgentFrameworkAgent instance with UI generation capabilities """ - agent = ChatAgent( + agent = Agent( name="ui_generator", instructions=_UI_GENERATOR_INSTRUCTIONS, chat_client=chat_client, diff --git a/python/packages/ag-ui/agent_framework_ag_ui_examples/agents/weather_agent.py b/python/packages/ag-ui/agent_framework_ag_ui_examples/agents/weather_agent.py index f8b03c2d0e..23616af7a5 100644 --- a/python/packages/ag-ui/agent_framework_ag_ui_examples/agents/weather_agent.py +++ b/python/packages/ag-ui/agent_framework_ag_ui_examples/agents/weather_agent.py @@ -6,7 +6,7 @@ from typing import Any -from agent_framework import ChatAgent, ChatClientProtocol, tool +from agent_framework import Agent, SupportsChatGetResponse, tool @tool @@ -59,16 +59,16 @@ def get_forecast(location: str, days: int = 3) -> str: return f"{days}-day forecast for {location}:\n" + "\n".join(forecast) -def weather_agent(chat_client: ChatClientProtocol[Any]) -> ChatAgent[Any]: +def weather_agent(chat_client: SupportsChatGetResponse[Any]) -> Agent[Any]: """Create a weather agent with get_weather and get_forecast tools. Args: chat_client: The chat client to use for the agent Returns: - A configured ChatAgent instance with weather tools + A configured Agent instance with weather tools """ - return ChatAgent[Any]( + return Agent[Any]( name="weather_agent", instructions=( "You are a helpful weather assistant. " diff --git a/python/packages/ag-ui/agent_framework_ag_ui_examples/server/api/backend_tool_rendering.py b/python/packages/ag-ui/agent_framework_ag_ui_examples/server/api/backend_tool_rendering.py index 915e57c6e2..44c30290c2 100644 --- a/python/packages/ag-ui/agent_framework_ag_ui_examples/server/api/backend_tool_rendering.py +++ b/python/packages/ag-ui/agent_framework_ag_ui_examples/server/api/backend_tool_rendering.py @@ -4,7 +4,7 @@ from typing import Any, cast -from agent_framework._clients import ChatClientProtocol +from agent_framework._clients import SupportsChatGetResponse from agent_framework.ag_ui import add_agent_framework_fastapi_endpoint from agent_framework.azure import AzureOpenAIChatClient from fastapi import FastAPI @@ -19,7 +19,7 @@ def register_backend_tool_rendering(app: FastAPI) -> None: app: The FastAPI application. """ # Create a chat client and call the factory function - chat_client = cast(ChatClientProtocol[Any], AzureOpenAIChatClient()) + chat_client = cast(SupportsChatGetResponse[Any], AzureOpenAIChatClient()) add_agent_framework_fastapi_endpoint( app, diff --git a/python/packages/ag-ui/agent_framework_ag_ui_examples/server/main.py b/python/packages/ag-ui/agent_framework_ag_ui_examples/server/main.py index 8c2f4be261..53b494c4de 100644 --- a/python/packages/ag-ui/agent_framework_ag_ui_examples/server/main.py +++ b/python/packages/ag-ui/agent_framework_ag_ui_examples/server/main.py @@ -10,7 +10,7 @@ import uvicorn from agent_framework import ChatOptions -from agent_framework._clients import ChatClientProtocol +from agent_framework._clients import SupportsChatGetResponse from agent_framework.ag_ui import add_agent_framework_fastapi_endpoint from agent_framework.anthropic import AnthropicClient from agent_framework.azure import AzureOpenAIChatClient @@ -67,8 +67,8 @@ # Create a shared chat client for all agents # You can use different chat clients for different agents if needed # Set CHAT_CLIENT=anthropic to use Anthropic, defaults to Azure OpenAI -chat_client: ChatClientProtocol[ChatOptions] = cast( - ChatClientProtocol[ChatOptions], +chat_client: SupportsChatGetResponse[ChatOptions] = cast( + SupportsChatGetResponse[ChatOptions], AnthropicClient() if os.getenv("CHAT_CLIENT", "").lower() == "anthropic" else AzureOpenAIChatClient(), ) diff --git a/python/packages/ag-ui/getting_started/README.md b/python/packages/ag-ui/getting_started/README.md index 9421935a4d..5422f8621e 100644 --- a/python/packages/ag-ui/getting_started/README.md +++ b/python/packages/ag-ui/getting_started/README.md @@ -35,9 +35,9 @@ python client_advanced.py **Note:** This example shows direct `AGUIChatClient` usage. Tool execution and conversation continuity depend on server-side configuration and capabilities. -### ChatAgent Integration (`client_with_agent.py`) +### Agent Integration (`client_with_agent.py`) -Best practice example using `ChatAgent` wrapper with **AgentThread** +Best practice example using `Agent` wrapper with **AgentThread** - **AgentThread** maintains conversation state - Client-side conversation history management via `thread.message_store` - **Hybrid tool execution**: client-side + server-side tools simultaneously @@ -77,7 +77,7 @@ The AG-UI protocol supports two approaches to conversation history: - Full message history sent with each request - Works with any AG-UI server (stateful or stateless) -The `ChatAgent` wrapper (used in client_with_agent.py) collects messages from local storage and sends the full history to `AGUIChatClient`, which then forwards everything to the server. +The `Agent` wrapper (used in client_with_agent.py) collects messages from local storage and sends the full history to `AGUIChatClient`, which then forwards everything to the server. ### Tool/Function Calling @@ -91,14 +91,14 @@ Client defines: Server defines: User: "What's the weather in SF and what time is it?" ↓ -ChatAgent sends: full history + tool definitions for get_weather, read_sensors +Agent sends: full history + tool definitions for get_weather, read_sensors ↓ Server LLM decides: "I need get_weather('SF') and get_current_time()" ↓ Server executes get_current_time() → "2025-11-11 14:30:00 UTC" Server sends function call request → get_weather('SF') ↓ -ChatAgent intercepts get_weather call → executes locally +Agent intercepts get_weather call → executes locally ↓ Client sends result → "Sunny, 72°F" ↓ @@ -110,7 +110,7 @@ Client receives final response **How it works:** 1. **Client-Side Tools** (`client_with_agent.py`): - - Tools defined in ChatAgent's `tools` parameter execute locally + - Tools defined in Agent's `tools` parameter execute locally - Tool metadata (name, description, schema) sent to server for planning - When server requests client tool → client intercepts → executes locally → sends result @@ -126,7 +126,7 @@ Client receives final response - Client tools execute client-side **Direct AGUIChatClient Usage** (client_advanced.py): -Even without ChatAgent wrapper, client-side tools work: +Even without Agent wrapper, client-side tools work: - Tools passed in ChatOptions execute locally - Server can also have its own tools - Hybrid execution works automatically @@ -184,7 +184,7 @@ Create a file named `server.py`: import os -from agent_framework import ChatAgent +from agent_framework import Agent from agent_framework.azure import AzureOpenAIChatClient from agent_framework.ag_ui import add_agent_framework_fastapi_endpoint from fastapi import FastAPI @@ -202,7 +202,7 @@ if not api_key: raise ValueError("AZURE_OPENAI_API_KEY environment variable is required") # Create the AI agent -agent = ChatAgent( +agent = Agent( name="AGUIAssistant", instructions="You are a helpful assistant.", chat_client=AzureOpenAIChatClient( @@ -227,7 +227,7 @@ if __name__ == "__main__": ### Key Concepts - **`add_agent_framework_fastapi_endpoint`**: Registers the AG-UI endpoint with automatic request/response handling and SSE streaming -- **`ChatAgent`**: The agent that will handle incoming requests +- **`Agent`**: The agent that will handle incoming requests - **FastAPI Integration**: Uses FastAPI's native async support for streaming responses - **Instructions**: The agent is created with default instructions, which can be overridden by client messages - **Configuration**: `AzureOpenAIChatClient` can read from environment variables (`AZURE_OPENAI_ENDPOINT`, `AZURE_OPENAI_CHAT_DEPLOYMENT_NAME`, `AZURE_OPENAI_API_KEY`) or accept parameters directly @@ -236,7 +236,7 @@ if __name__ == "__main__": ```python # No need to read environment variables manually -agent = ChatAgent( +agent = Agent( name="AGUIAssistant", instructions="You are a helpful assistant.", chat_client=AzureOpenAIChatClient(), # Reads from environment automatically @@ -354,7 +354,7 @@ if __name__ == "__main__": - **Thread Management**: Pass `thread_id` in metadata to maintain conversation context across requests - **Streaming Responses**: Use `get_response(..., stream=True)` for real-time streaming or `get_response(..., stream=False)` for non-streaming - **Context Manager**: Use `async with` for automatic cleanup of HTTP connections -- **Standard Interface**: Works with all Agent Framework patterns (ChatAgent, tools, etc.) +- **Standard Interface**: Works with all Agent Framework patterns (Agent, tools, etc.) - **Hybrid Tool Execution**: Supports both client-side and server-side tools executing together in the same conversation ### Configure and Run the Client diff --git a/python/packages/ag-ui/getting_started/client_advanced.py b/python/packages/ag-ui/getting_started/client_advanced.py index 65f5e896bf..dcb4e5ca3c 100644 --- a/python/packages/ag-ui/getting_started/client_advanced.py +++ b/python/packages/ag-ui/getting_started/client_advanced.py @@ -114,15 +114,15 @@ async def non_streaming_example(client: AGUIChatClient, thread_id: str | None = async def tool_example(client: AGUIChatClient, thread_id: str | None = None): """Demonstrate sending tool definitions to the server. - IMPORTANT: When using AGUIChatClient directly (without ChatAgent wrapper): + IMPORTANT: When using AGUIChatClient directly (without Agent wrapper): - Tools are sent as DEFINITIONS only - No automatic client-side execution (no function invocation middleware) - Server must have matching tool implementations to execute them For CLIENT-SIDE tool execution (like .NET AGUIClient sample): - - Use ChatAgent wrapper with tools + - Use Agent wrapper with tools - See client_with_agent.py for the hybrid pattern - - ChatAgent middleware intercepts and executes client tools locally + - Agent middleware intercepts and executes client tools locally - Server can have its own tools that execute server-side - Both client and server tools work together in same conversation @@ -186,7 +186,7 @@ async def conversation_example(client: AGUIChatClient): # Check if context was maintained if "alice" not in response2.text.lower(): - print("\n[Note: Server may not maintain thread context - consider using ChatAgent for history management]") + print("\n[Note: Server may not maintain thread context - consider using Agent for history management]") # Third turn print("\nUser: Can you also tell me what 10 * 5 is?\n") diff --git a/python/packages/ag-ui/getting_started/client_with_agent.py b/python/packages/ag-ui/getting_started/client_with_agent.py index 5d9917327b..01e4a8563b 100644 --- a/python/packages/ag-ui/getting_started/client_with_agent.py +++ b/python/packages/ag-ui/getting_started/client_with_agent.py @@ -1,6 +1,6 @@ # Copyright (c) Microsoft. All rights reserved. -"""Example showing ChatAgent with AGUIChatClient for hybrid tool execution. +"""Example showing Agent with AGUIChatClient for hybrid tool execution. This demonstrates the HYBRID pattern matching .NET AGUIClient implementation: @@ -24,7 +24,7 @@ import logging import os -from agent_framework import ChatAgent, tool +from agent_framework import Agent, tool from agent_framework.ag_ui import AGUIChatClient # Enable debug logging @@ -55,7 +55,7 @@ def get_weather(location: str) -> str: async def main(): - """Demonstrate ChatAgent + AGUIChatClient hybrid tool execution. + """Demonstrate Agent + AGUIChatClient hybrid tool execution. This matches the .NET pattern from Program.cs where: - AIAgent agent = chatClient.CreateAIAgent(tools: [...]) @@ -63,14 +63,14 @@ async def main(): - RunStreamingAsync(messages, thread) Python equivalent: - - agent = ChatAgent(chat_client=AGUIChatClient(...), tools=[...]) + - agent = Agent(chat_client=AGUIChatClient(...), tools=[...]) - thread = agent.get_new_thread() # Creates thread with message_store - agent.run(message, stream=True, thread=thread) # Thread accumulates history """ server_url = os.environ.get("AGUI_SERVER_URL", "http://127.0.0.1:5100/") print("=" * 70) - print("ChatAgent + AGUIChatClient: Hybrid Tool Execution") + print("Agent + AGUIChatClient: Hybrid Tool Execution") print("=" * 70) print(f"\nServer: {server_url}") print("\nThis example demonstrates:") @@ -82,8 +82,8 @@ async def main(): try: # Create remote client in async context manager async with AGUIChatClient(endpoint=server_url) as remote_client: - # Wrap in ChatAgent for conversation history management - agent = ChatAgent( + # Wrap in Agent for conversation history management + agent = Agent( name="remote_assistant", instructions="You are a helpful assistant. Remember user information across the conversation.", chat_client=remote_client, diff --git a/python/packages/ag-ui/getting_started/server.py b/python/packages/ag-ui/getting_started/server.py index fa3f21c3e7..4d83832051 100644 --- a/python/packages/ag-ui/getting_started/server.py +++ b/python/packages/ag-ui/getting_started/server.py @@ -7,7 +7,7 @@ import logging import os -from agent_framework import ChatAgent, tool +from agent_framework import Agent, tool from agent_framework.ag_ui import add_agent_framework_fastapi_endpoint from agent_framework.azure import AzureOpenAIChatClient from dotenv import load_dotenv @@ -116,7 +116,7 @@ def get_time_zone(location: str) -> str: # The client will send get_weather tool metadata so the LLM knows about it, # and the function invocation mixin on AGUIChatClient will execute it client-side. # This matches the .NET AG-UI hybrid execution pattern. -agent = ChatAgent( +agent = Agent( name="AGUIAssistant", instructions="You are a helpful assistant. Use get_weather for weather and get_time_zone for time zones.", chat_client=AzureOpenAIChatClient( diff --git a/python/packages/ag-ui/tests/ag_ui/conftest.py b/python/packages/ag-ui/tests/ag_ui/conftest.py index 4612750f5f..a86b0f0b67 100644 --- a/python/packages/ag-ui/tests/ag_ui/conftest.py +++ b/python/packages/ag-ui/tests/ag_ui/conftest.py @@ -13,13 +13,13 @@ AgentResponseUpdate, AgentThread, BaseChatClient, - ChatClientProtocol, - ChatMessage, ChatOptions, ChatResponse, ChatResponseUpdate, Content, + Message, SupportsAgentRun, + SupportsChatGetResponse, ) from agent_framework._clients import OptionsCoT from agent_framework._middleware import ChatMiddlewareLayer @@ -43,7 +43,7 @@ class StreamingChatClientStub( BaseChatClient[OptionsCoT], Generic[OptionsCoT], ): - """Typed streaming stub that satisfies ChatClientProtocol.""" + """Typed streaming stub that satisfies SupportsChatGetResponse.""" def __init__(self, stream_fn: StreamFn, response_fn: ResponseFn | None = None) -> None: super().__init__(function_middleware=[]) @@ -55,7 +55,7 @@ def __init__(self, stream_fn: StreamFn, response_fn: ResponseFn | None = None) - @overload def get_response( self, - messages: str | ChatMessage | Sequence[str | ChatMessage], + messages: str | Message | Sequence[str | Message], *, stream: Literal[False] = ..., options: ChatOptions[Any], @@ -65,7 +65,7 @@ def get_response( @overload def get_response( self, - messages: str | ChatMessage | Sequence[str | ChatMessage], + messages: str | Message | Sequence[str | Message], *, stream: Literal[False] = ..., options: OptionsCoT | ChatOptions[None] | None = ..., @@ -75,7 +75,7 @@ def get_response( @overload def get_response( self, - messages: str | ChatMessage | Sequence[str | ChatMessage], + messages: str | Message | Sequence[str | Message], *, stream: Literal[True], options: OptionsCoT | ChatOptions[Any] | None = ..., @@ -84,7 +84,7 @@ def get_response( def get_response( self, - messages: str | ChatMessage | Sequence[str | ChatMessage], + messages: str | Message | Sequence[str | Message], *, stream: bool = False, options: OptionsCoT | ChatOptions[Any] | None = None, @@ -106,7 +106,7 @@ def get_response( def _inner_get_response( self, *, - messages: Sequence[ChatMessage], + messages: Sequence[Message], stream: bool = False, options: Mapping[str, Any], **kwargs: Any, @@ -121,7 +121,7 @@ def _finalize(updates: Sequence[ChatResponseUpdate]) -> ChatResponse: return self._get_response_impl(messages, options, **kwargs) async def _get_response_impl( - self, messages: Sequence[ChatMessage], options: Mapping[str, Any], **kwargs: Any + self, messages: Sequence[Message], options: Mapping[str, Any], **kwargs: Any ) -> ChatResponse: """Non-streaming implementation.""" if self._response_fn is not None: @@ -132,7 +132,7 @@ async def _get_response_impl( contents.extend(update.contents) return ChatResponse( - messages=[ChatMessage(role="assistant", contents=contents)], + messages=[Message(role="assistant", contents=contents)], response_id="stub-response", ) @@ -141,7 +141,7 @@ def stream_from_updates(updates: list[ChatResponseUpdate]) -> StreamFn: """Create a stream function that yields from a static list of updates.""" async def _stream( - messages: MutableSequence[ChatMessage], options: dict[str, Any], **kwargs: Any + messages: MutableSequence[Message], options: dict[str, Any], **kwargs: Any ) -> AsyncIterator[ChatResponseUpdate]: for update in updates: yield update @@ -175,7 +175,7 @@ def __init__( @overload def run( self, - messages: str | ChatMessage | Sequence[str | ChatMessage] | None = None, + messages: str | Message | Sequence[str | Message] | None = None, *, stream: Literal[False] = ..., thread: AgentThread | None = None, @@ -185,7 +185,7 @@ def run( @overload def run( self, - messages: str | ChatMessage | Sequence[str | ChatMessage] | None = None, + messages: str | Message | Sequence[str | Message] | None = None, *, stream: Literal[True], thread: AgentThread | None = None, @@ -194,7 +194,7 @@ def run( def run( self, - messages: str | ChatMessage | Sequence[str | ChatMessage] | None = None, + messages: str | Message | Sequence[str | Message] | None = None, *, stream: bool = False, thread: AgentThread | None = None, @@ -226,7 +226,7 @@ def get_new_thread(self, **kwargs: Any) -> AgentThread: @pytest.fixture -def streaming_chat_client_stub() -> type[ChatClientProtocol]: +def streaming_chat_client_stub() -> type[SupportsChatGetResponse]: """Return the StreamingChatClientStub class for creating test instances.""" return StreamingChatClientStub # type: ignore[return-value] diff --git a/python/packages/ag-ui/tests/ag_ui/test_ag_ui_client.py b/python/packages/ag-ui/tests/ag_ui/test_ag_ui_client.py index b5dc73bd02..7e7fd7cded 100644 --- a/python/packages/ag-ui/tests/ag_ui/test_ag_ui_client.py +++ b/python/packages/ag-ui/tests/ag_ui/test_ag_ui_client.py @@ -7,11 +7,11 @@ from typing import Any from agent_framework import ( - ChatMessage, ChatOptions, ChatResponse, ChatResponseUpdate, Content, + Message, ResponseStream, tool, ) @@ -29,13 +29,11 @@ def http_service(self) -> AGUIHttpService: """Expose http service for monkeypatching.""" return self._http_service - def extract_state_from_messages( - self, messages: list[ChatMessage] - ) -> tuple[list[ChatMessage], dict[str, Any] | None]: + def extract_state_from_messages(self, messages: list[Message]) -> tuple[list[Message], dict[str, Any] | None]: """Expose state extraction helper.""" return self._extract_state_from_messages(messages) - def convert_messages_to_agui_format(self, messages: list[ChatMessage]) -> list[dict[str, Any]]: + def convert_messages_to_agui_format(self, messages: list[Message]) -> list[dict[str, Any]]: """Expose message conversion helper.""" return self._convert_messages_to_agui_format(messages) @@ -44,7 +42,7 @@ def get_thread_id(self, options: dict[str, Any]) -> str: return self._get_thread_id(options) def inner_get_response( - self, *, messages: MutableSequence[ChatMessage], options: dict[str, Any], stream: bool = False + self, *, messages: MutableSequence[Message], options: dict[str, Any], stream: bool = False ) -> Awaitable[ChatResponse] | ResponseStream[ChatResponseUpdate, ChatResponse]: """Proxy to protected response call.""" return self._inner_get_response(messages=messages, options=options, stream=stream) @@ -69,8 +67,8 @@ async def test_extract_state_from_messages_no_state(self) -> None: """Test state extraction when no state is present.""" client = TestableAGUIChatClient(endpoint="http://localhost:8888/") messages = [ - ChatMessage(role="user", text="Hello"), - ChatMessage(role="assistant", text="Hi there"), + Message(role="user", text="Hello"), + Message(role="assistant", text="Hi there"), ] result_messages, state = client.extract_state_from_messages(messages) @@ -89,8 +87,8 @@ async def test_extract_state_from_messages_with_state(self) -> None: state_b64 = base64.b64encode(state_json.encode("utf-8")).decode("utf-8") messages = [ - ChatMessage(role="user", text="Hello"), - ChatMessage( + Message(role="user", text="Hello"), + Message( role="user", contents=[Content.from_uri(uri=f"data:application/json;base64,{state_b64}")], ), @@ -112,7 +110,7 @@ async def test_extract_state_invalid_json(self) -> None: state_b64 = base64.b64encode(invalid_json.encode("utf-8")).decode("utf-8") messages = [ - ChatMessage( + Message( role="user", contents=[Content.from_uri(uri=f"data:application/json;base64,{state_b64}")], ), @@ -127,8 +125,8 @@ async def test_convert_messages_to_agui_format(self) -> None: """Test message conversion to AG-UI format.""" client = TestableAGUIChatClient(endpoint="http://localhost:8888/") messages = [ - ChatMessage(role="user", text="What is the weather?"), - ChatMessage(role="assistant", text="Let me check.", message_id="msg_123"), + Message(role="user", text="What is the weather?"), + Message(role="assistant", text="Let me check.", message_id="msg_123"), ] agui_messages = client.convert_messages_to_agui_format(messages) @@ -175,7 +173,7 @@ async def mock_post_run(*args: object, **kwargs: Any) -> AsyncGenerator[dict[str client = TestableAGUIChatClient(endpoint="http://localhost:8888/") monkeypatch.setattr(client.http_service, "post_run", mock_post_run) - messages = [ChatMessage(role="user", text="Test message")] + messages = [Message(role="user", text="Test message")] chat_options = ChatOptions() updates: list[ChatResponseUpdate] = [] @@ -208,7 +206,7 @@ async def mock_post_run(*args: object, **kwargs: Any) -> AsyncGenerator[dict[str client = TestableAGUIChatClient(endpoint="http://localhost:8888/") monkeypatch.setattr(client.http_service, "post_run", mock_post_run) - messages = [ChatMessage(role="user", text="Test message")] + messages = [Message(role="user", text="Test message")] chat_options = {} response = await client.inner_get_response(messages=messages, options=chat_options) @@ -251,7 +249,7 @@ async def mock_post_run(*args: object, **kwargs: Any) -> AsyncGenerator[dict[str client = TestableAGUIChatClient(endpoint="http://localhost:8888/") monkeypatch.setattr(client.http_service, "post_run", mock_post_run) - messages = [ChatMessage(role="user", text="Test with tools")] + messages = [Message(role="user", text="Test with tools")] chat_options = ChatOptions(tools=[test_tool]) response = await client.inner_get_response(messages=messages, options=chat_options) @@ -275,7 +273,7 @@ async def mock_post_run(*args: object, **kwargs: Any) -> AsyncGenerator[dict[str client = TestableAGUIChatClient(endpoint="http://localhost:8888/") monkeypatch.setattr(client.http_service, "post_run", mock_post_run) - messages = [ChatMessage(role="user", text="Test server tool execution")] + messages = [Message(role="user", text="Test server tool execution")] updates: list[ChatResponseUpdate] = [] async for update in client.get_response(messages, stream=True): @@ -317,7 +315,7 @@ async def fake_auto_invoke(*args: object, **kwargs: Any) -> None: client = TestableAGUIChatClient(endpoint="http://localhost:8888/") monkeypatch.setattr(client.http_service, "post_run", mock_post_run) - messages = [ChatMessage(role="user", text="Test server tool execution")] + messages = [Message(role="user", text="Test server tool execution")] async for _ in client.get_response( messages, stream=True, options={"tool_choice": "auto", "tools": [client_tool]} @@ -333,8 +331,8 @@ async def test_state_transmission(self, monkeypatch: MonkeyPatch) -> None: state_b64 = base64.b64encode(state_json.encode("utf-8")).decode("utf-8") messages = [ - ChatMessage(role="user", text="Hello"), - ChatMessage( + Message(role="user", text="Hello"), + Message( role="user", contents=[Content.from_uri(uri=f"data:application/json;base64,{state_b64}")], ), diff --git a/python/packages/ag-ui/tests/ag_ui/test_agent_wrapper_comprehensive.py b/python/packages/ag-ui/tests/ag_ui/test_agent_wrapper_comprehensive.py index b61aa1edd3..f57f2809d3 100644 --- a/python/packages/ag-ui/tests/ag_ui/test_agent_wrapper_comprehensive.py +++ b/python/packages/ag-ui/tests/ag_ui/test_agent_wrapper_comprehensive.py @@ -7,7 +7,7 @@ from typing import Any import pytest -from agent_framework import ChatAgent, ChatMessage, ChatOptions, ChatResponseUpdate, Content +from agent_framework import Agent, ChatOptions, ChatResponseUpdate, Content, Message from pydantic import BaseModel @@ -16,11 +16,11 @@ async def test_agent_initialization_basic(streaming_chat_client_stub): from agent_framework.ag_ui import AgentFrameworkAgent async def stream_fn( - messages: MutableSequence[ChatMessage], options: dict[str, Any], **kwargs: Any + messages: MutableSequence[Message], options: dict[str, Any], **kwargs: Any ) -> AsyncIterator[ChatResponseUpdate]: yield ChatResponseUpdate(contents=[Content.from_text(text="Hello")]) - agent = ChatAgent[ChatOptions]( + agent = Agent[ChatOptions]( chat_client=streaming_chat_client_stub(stream_fn), name="test_agent", instructions="Test", @@ -38,11 +38,11 @@ async def test_agent_initialization_with_state_schema(streaming_chat_client_stub from agent_framework.ag_ui import AgentFrameworkAgent async def stream_fn( - messages: MutableSequence[ChatMessage], options: dict[str, Any], **kwargs: Any + messages: MutableSequence[Message], options: dict[str, Any], **kwargs: Any ) -> AsyncIterator[ChatResponseUpdate]: yield ChatResponseUpdate(contents=[Content.from_text(text="Hello")]) - agent = ChatAgent(name="test_agent", instructions="Test", chat_client=streaming_chat_client_stub(stream_fn)) + agent = Agent(name="test_agent", instructions="Test", chat_client=streaming_chat_client_stub(stream_fn)) state_schema: dict[str, dict[str, Any]] = {"document": {"type": "string"}} wrapper = AgentFrameworkAgent(agent=agent, state_schema=state_schema) @@ -54,11 +54,11 @@ async def test_agent_initialization_with_predict_state_config(streaming_chat_cli from agent_framework.ag_ui import AgentFrameworkAgent async def stream_fn( - messages: MutableSequence[ChatMessage], options: dict[str, Any], **kwargs: Any + messages: MutableSequence[Message], options: dict[str, Any], **kwargs: Any ) -> AsyncIterator[ChatResponseUpdate]: yield ChatResponseUpdate(contents=[Content.from_text(text="Hello")]) - agent = ChatAgent(name="test_agent", instructions="Test", chat_client=streaming_chat_client_stub(stream_fn)) + agent = Agent(name="test_agent", instructions="Test", chat_client=streaming_chat_client_stub(stream_fn)) predict_config = {"document": {"tool": "write_doc", "tool_argument": "content"}} wrapper = AgentFrameworkAgent(agent=agent, predict_state_config=predict_config) @@ -70,7 +70,7 @@ async def test_agent_initialization_with_pydantic_state_schema(streaming_chat_cl from agent_framework.ag_ui import AgentFrameworkAgent async def stream_fn( - messages: MutableSequence[ChatMessage], options: dict[str, Any], **kwargs: Any + messages: MutableSequence[Message], options: dict[str, Any], **kwargs: Any ) -> AsyncIterator[ChatResponseUpdate]: yield ChatResponseUpdate(contents=[Content.from_text(text="Hello")]) @@ -78,7 +78,7 @@ class MyState(BaseModel): document: str tags: list[str] = [] - agent = ChatAgent(name="test_agent", instructions="Test", chat_client=streaming_chat_client_stub(stream_fn)) + agent = Agent(name="test_agent", instructions="Test", chat_client=streaming_chat_client_stub(stream_fn)) wrapper_class_schema = AgentFrameworkAgent(agent=agent, state_schema=MyState) wrapper_instance_schema = AgentFrameworkAgent(agent=agent, state_schema=MyState(document="hi")) @@ -93,11 +93,11 @@ async def test_run_started_event_emission(streaming_chat_client_stub): from agent_framework.ag_ui import AgentFrameworkAgent async def stream_fn( - messages: MutableSequence[ChatMessage], options: dict[str, Any], **kwargs: Any + messages: MutableSequence[Message], options: dict[str, Any], **kwargs: Any ) -> AsyncIterator[ChatResponseUpdate]: yield ChatResponseUpdate(contents=[Content.from_text(text="Hello")]) - agent = ChatAgent(name="test_agent", instructions="Test", chat_client=streaming_chat_client_stub(stream_fn)) + agent = Agent(name="test_agent", instructions="Test", chat_client=streaming_chat_client_stub(stream_fn)) wrapper = AgentFrameworkAgent(agent=agent) input_data = {"messages": [{"role": "user", "content": "Hi"}]} @@ -117,11 +117,11 @@ async def test_predict_state_custom_event_emission(streaming_chat_client_stub): from agent_framework.ag_ui import AgentFrameworkAgent async def stream_fn( - messages: MutableSequence[ChatMessage], options: dict[str, Any], **kwargs: Any + messages: MutableSequence[Message], options: dict[str, Any], **kwargs: Any ) -> AsyncIterator[ChatResponseUpdate]: yield ChatResponseUpdate(contents=[Content.from_text(text="Hello")]) - agent = ChatAgent(name="test_agent", instructions="Test", chat_client=streaming_chat_client_stub(stream_fn)) + agent = Agent(name="test_agent", instructions="Test", chat_client=streaming_chat_client_stub(stream_fn)) predict_config = { "document": {"tool": "write_doc", "tool_argument": "content"}, "summary": {"tool": "summarize", "tool_argument": "text"}, @@ -149,11 +149,11 @@ async def test_initial_state_snapshot_with_schema(streaming_chat_client_stub): from agent_framework.ag_ui import AgentFrameworkAgent async def stream_fn( - messages: MutableSequence[ChatMessage], options: dict[str, Any], **kwargs: Any + messages: MutableSequence[Message], options: dict[str, Any], **kwargs: Any ) -> AsyncIterator[ChatResponseUpdate]: yield ChatResponseUpdate(contents=[Content.from_text(text="Hello")]) - agent = ChatAgent(name="test_agent", instructions="Test", chat_client=streaming_chat_client_stub(stream_fn)) + agent = Agent(name="test_agent", instructions="Test", chat_client=streaming_chat_client_stub(stream_fn)) state_schema = {"document": {"type": "string"}} wrapper = AgentFrameworkAgent(agent=agent, state_schema=state_schema) @@ -179,11 +179,11 @@ async def test_state_initialization_object_type(streaming_chat_client_stub): from agent_framework.ag_ui import AgentFrameworkAgent async def stream_fn( - messages: MutableSequence[ChatMessage], options: dict[str, Any], **kwargs: Any + messages: MutableSequence[Message], options: dict[str, Any], **kwargs: Any ) -> AsyncIterator[ChatResponseUpdate]: yield ChatResponseUpdate(contents=[Content.from_text(text="Hello")]) - agent = ChatAgent(name="test_agent", instructions="Test", chat_client=streaming_chat_client_stub(stream_fn)) + agent = Agent(name="test_agent", instructions="Test", chat_client=streaming_chat_client_stub(stream_fn)) state_schema: dict[str, dict[str, Any]] = {"recipe": {"type": "object", "properties": {}}} wrapper = AgentFrameworkAgent(agent=agent, state_schema=state_schema) @@ -206,11 +206,11 @@ async def test_state_initialization_array_type(streaming_chat_client_stub): from agent_framework.ag_ui import AgentFrameworkAgent async def stream_fn( - messages: MutableSequence[ChatMessage], options: dict[str, Any], **kwargs: Any + messages: MutableSequence[Message], options: dict[str, Any], **kwargs: Any ) -> AsyncIterator[ChatResponseUpdate]: yield ChatResponseUpdate(contents=[Content.from_text(text="Hello")]) - agent = ChatAgent(name="test_agent", instructions="Test", chat_client=streaming_chat_client_stub(stream_fn)) + agent = Agent(name="test_agent", instructions="Test", chat_client=streaming_chat_client_stub(stream_fn)) state_schema: dict[str, dict[str, Any]] = {"steps": {"type": "array", "items": {}}} wrapper = AgentFrameworkAgent(agent=agent, state_schema=state_schema) @@ -233,11 +233,11 @@ async def test_run_finished_event_emission(streaming_chat_client_stub): from agent_framework.ag_ui import AgentFrameworkAgent async def stream_fn( - messages: MutableSequence[ChatMessage], options: dict[str, Any], **kwargs: Any + messages: MutableSequence[Message], options: dict[str, Any], **kwargs: Any ) -> AsyncIterator[ChatResponseUpdate]: yield ChatResponseUpdate(contents=[Content.from_text(text="Hello")]) - agent = ChatAgent(name="test_agent", instructions="Test", chat_client=streaming_chat_client_stub(stream_fn)) + agent = Agent(name="test_agent", instructions="Test", chat_client=streaming_chat_client_stub(stream_fn)) wrapper = AgentFrameworkAgent(agent=agent) input_data = {"messages": [{"role": "user", "content": "Hi"}]} @@ -255,11 +255,11 @@ async def test_tool_result_confirm_changes_accepted(streaming_chat_client_stub): from agent_framework.ag_ui import AgentFrameworkAgent async def stream_fn( - messages: MutableSequence[ChatMessage], options: dict[str, Any], **kwargs: Any + messages: MutableSequence[Message], options: dict[str, Any], **kwargs: Any ) -> AsyncIterator[ChatResponseUpdate]: yield ChatResponseUpdate(contents=[Content.from_text(text="Document updated")]) - agent = ChatAgent(name="test_agent", instructions="Test", chat_client=streaming_chat_client_stub(stream_fn)) + agent = Agent(name="test_agent", instructions="Test", chat_client=streaming_chat_client_stub(stream_fn)) wrapper = AgentFrameworkAgent( agent=agent, state_schema={"document": {"type": "string"}}, @@ -302,11 +302,11 @@ async def test_tool_result_confirm_changes_rejected(streaming_chat_client_stub): from agent_framework.ag_ui import AgentFrameworkAgent async def stream_fn( - messages: MutableSequence[ChatMessage], options: dict[str, Any], **kwargs: Any + messages: MutableSequence[Message], options: dict[str, Any], **kwargs: Any ) -> AsyncIterator[ChatResponseUpdate]: yield ChatResponseUpdate(contents=[Content.from_text(text="OK")]) - agent = ChatAgent(name="test_agent", instructions="Test", chat_client=streaming_chat_client_stub(stream_fn)) + agent = Agent(name="test_agent", instructions="Test", chat_client=streaming_chat_client_stub(stream_fn)) wrapper = AgentFrameworkAgent(agent=agent) # Simulate tool result message with rejection @@ -336,11 +336,11 @@ async def test_tool_result_function_approval_accepted(streaming_chat_client_stub from agent_framework.ag_ui import AgentFrameworkAgent async def stream_fn( - messages: MutableSequence[ChatMessage], options: dict[str, Any], **kwargs: Any + messages: MutableSequence[Message], options: dict[str, Any], **kwargs: Any ) -> AsyncIterator[ChatResponseUpdate]: yield ChatResponseUpdate(contents=[Content.from_text(text="OK")]) - agent = ChatAgent(name="test_agent", instructions="Test", chat_client=streaming_chat_client_stub(stream_fn)) + agent = Agent(name="test_agent", instructions="Test", chat_client=streaming_chat_client_stub(stream_fn)) wrapper = AgentFrameworkAgent(agent=agent) # Simulate tool result with multiple steps @@ -382,11 +382,11 @@ async def test_tool_result_function_approval_rejected(streaming_chat_client_stub from agent_framework.ag_ui import AgentFrameworkAgent async def stream_fn( - messages: MutableSequence[ChatMessage], options: dict[str, Any], **kwargs: Any + messages: MutableSequence[Message], options: dict[str, Any], **kwargs: Any ) -> AsyncIterator[ChatResponseUpdate]: yield ChatResponseUpdate(contents=[Content.from_text(text="OK")]) - agent = ChatAgent(name="test_agent", instructions="Test", chat_client=streaming_chat_client_stub(stream_fn)) + agent = Agent(name="test_agent", instructions="Test", chat_client=streaming_chat_client_stub(stream_fn)) wrapper = AgentFrameworkAgent(agent=agent) # Simulate tool result rejection with steps @@ -425,13 +425,13 @@ async def test_thread_metadata_tracking(streaming_chat_client_stub): captured_options: dict[str, Any] = {} async def stream_fn( - messages: MutableSequence[ChatMessage], options: dict[str, Any], **kwargs: Any + messages: MutableSequence[Message], options: dict[str, Any], **kwargs: Any ) -> AsyncIterator[ChatResponseUpdate]: # Capture options to verify internal keys are NOT passed to chat client captured_options.update(options) yield ChatResponseUpdate(contents=[Content.from_text(text="Hello")]) - agent = ChatAgent(name="test_agent", instructions="Test", chat_client=streaming_chat_client_stub(stream_fn)) + agent = Agent(name="test_agent", instructions="Test", chat_client=streaming_chat_client_stub(stream_fn)) wrapper = AgentFrameworkAgent(agent=agent) input_data = { @@ -467,13 +467,13 @@ async def test_state_context_injection(streaming_chat_client_stub): captured_options: dict[str, Any] = {} async def stream_fn( - messages: MutableSequence[ChatMessage], options: dict[str, Any], **kwargs: Any + messages: MutableSequence[Message], options: dict[str, Any], **kwargs: Any ) -> AsyncIterator[ChatResponseUpdate]: # Capture options to verify internal keys are NOT passed to chat client captured_options.update(options) yield ChatResponseUpdate(contents=[Content.from_text(text="Hello")]) - agent = ChatAgent(name="test_agent", instructions="Test", chat_client=streaming_chat_client_stub(stream_fn)) + agent = Agent(name="test_agent", instructions="Test", chat_client=streaming_chat_client_stub(stream_fn)) wrapper = AgentFrameworkAgent( agent=agent, state_schema={"document": {"type": "string"}}, @@ -506,11 +506,11 @@ async def test_no_messages_provided(streaming_chat_client_stub): from agent_framework.ag_ui import AgentFrameworkAgent async def stream_fn( - messages: MutableSequence[ChatMessage], options: dict[str, Any], **kwargs: Any + messages: MutableSequence[Message], options: dict[str, Any], **kwargs: Any ) -> AsyncIterator[ChatResponseUpdate]: yield ChatResponseUpdate(contents=[Content.from_text(text="Hello")]) - agent = ChatAgent(name="test_agent", instructions="Test", chat_client=streaming_chat_client_stub(stream_fn)) + agent = Agent(name="test_agent", instructions="Test", chat_client=streaming_chat_client_stub(stream_fn)) wrapper = AgentFrameworkAgent(agent=agent) input_data: dict[str, Any] = {"messages": []} @@ -530,11 +530,11 @@ async def test_message_end_event_emission(streaming_chat_client_stub): from agent_framework.ag_ui import AgentFrameworkAgent async def stream_fn( - messages: MutableSequence[ChatMessage], options: dict[str, Any], **kwargs: Any + messages: MutableSequence[Message], options: dict[str, Any], **kwargs: Any ) -> AsyncIterator[ChatResponseUpdate]: yield ChatResponseUpdate(contents=[Content.from_text(text="Hello world")]) - agent = ChatAgent(name="test_agent", instructions="Test", chat_client=streaming_chat_client_stub(stream_fn)) + agent = Agent(name="test_agent", instructions="Test", chat_client=streaming_chat_client_stub(stream_fn)) wrapper = AgentFrameworkAgent(agent=agent) input_data: dict[str, Any] = {"messages": [{"role": "user", "content": "Hi"}]} @@ -558,13 +558,13 @@ async def test_error_handling_with_exception(streaming_chat_client_stub): from agent_framework.ag_ui import AgentFrameworkAgent async def stream_fn( - messages: MutableSequence[ChatMessage], options: dict[str, Any], **kwargs: Any + messages: MutableSequence[Message], options: dict[str, Any], **kwargs: Any ) -> AsyncIterator[ChatResponseUpdate]: if False: yield ChatResponseUpdate(contents=[]) raise RuntimeError("Simulated failure") - agent = ChatAgent(name="test_agent", instructions="Test", chat_client=streaming_chat_client_stub(stream_fn)) + agent = Agent(name="test_agent", instructions="Test", chat_client=streaming_chat_client_stub(stream_fn)) wrapper = AgentFrameworkAgent(agent=agent) input_data: dict[str, Any] = {"messages": [{"role": "user", "content": "Hi"}]} @@ -579,13 +579,13 @@ async def test_json_decode_error_in_tool_result(streaming_chat_client_stub): from agent_framework.ag_ui import AgentFrameworkAgent async def stream_fn( - messages: MutableSequence[ChatMessage], options: dict[str, Any], **kwargs: Any + messages: MutableSequence[Message], options: dict[str, Any], **kwargs: Any ) -> AsyncIterator[ChatResponseUpdate]: if False: yield ChatResponseUpdate(contents=[]) raise AssertionError("ChatClient should not be called with orphaned tool result") - agent = ChatAgent(name="test_agent", instructions="Test", chat_client=streaming_chat_client_stub(stream_fn)) + agent = Agent(name="test_agent", instructions="Test", chat_client=streaming_chat_client_stub(stream_fn)) wrapper = AgentFrameworkAgent(agent=agent) # Send invalid JSON as tool result without preceding tool call @@ -618,13 +618,13 @@ async def test_agent_with_use_service_thread_is_false(streaming_chat_client_stub request_service_thread_id: str | None = None async def stream_fn( - messages: MutableSequence[ChatMessage], chat_options: ChatOptions, **kwargs: Any + messages: MutableSequence[Message], chat_options: ChatOptions, **kwargs: Any ) -> AsyncIterator[ChatResponseUpdate]: yield ChatResponseUpdate( contents=[Content.from_text(text="Response")], response_id="resp_67890", conversation_id="conv_12345" ) - agent = ChatAgent(chat_client=streaming_chat_client_stub(stream_fn)) + agent = Agent(chat_client=streaming_chat_client_stub(stream_fn)) wrapper = AgentFrameworkAgent(agent=agent, use_service_thread=False) input_data = {"messages": [{"role": "user", "content": "Hi"}], "thread_id": "conv_123456"} @@ -642,7 +642,7 @@ async def test_agent_with_use_service_thread_is_true(streaming_chat_client_stub) request_service_thread_id: str | None = None async def stream_fn( - messages: MutableSequence[ChatMessage], chat_options: ChatOptions, **kwargs: Any + messages: MutableSequence[Message], chat_options: ChatOptions, **kwargs: Any ) -> AsyncIterator[ChatResponseUpdate]: nonlocal request_service_thread_id thread = kwargs.get("thread") @@ -651,7 +651,7 @@ async def stream_fn( contents=[Content.from_text(text="Response")], response_id="resp_67890", conversation_id="conv_12345" ) - agent = ChatAgent(chat_client=streaming_chat_client_stub(stream_fn)) + agent = Agent(chat_client=streaming_chat_client_stub(stream_fn)) wrapper = AgentFrameworkAgent(agent=agent, use_service_thread=True) input_data = {"messages": [{"role": "user", "content": "Hi"}], "thread_id": "conv_123456"} @@ -679,14 +679,14 @@ def get_datetime() -> str: return "2025/12/01 12:00:00" async def stream_fn( - messages: MutableSequence[ChatMessage], options: ChatOptions, **kwargs: Any + messages: MutableSequence[Message], options: ChatOptions, **kwargs: Any ) -> AsyncIterator[ChatResponseUpdate]: # Capture the messages received by the chat client messages_received.clear() messages_received.extend(messages) yield ChatResponseUpdate(contents=[Content.from_text(text="Processing completed")]) - agent = ChatAgent( + agent = Agent( chat_client=streaming_chat_client_stub(stream_fn), name="test_agent", instructions="Test", @@ -770,14 +770,14 @@ def delete_all_data() -> str: return "All data deleted" async def stream_fn( - messages: MutableSequence[ChatMessage], options: ChatOptions, **kwargs: Any + messages: MutableSequence[Message], options: ChatOptions, **kwargs: Any ) -> AsyncIterator[ChatResponseUpdate]: # Capture the messages received by the chat client messages_received.clear() messages_received.extend(messages) yield ChatResponseUpdate(contents=[Content.from_text(text="Operation cancelled")]) - agent = ChatAgent( + agent = Agent( name="test_agent", instructions="Test", chat_client=streaming_chat_client_stub(stream_fn), diff --git a/python/packages/ag-ui/tests/ag_ui/test_endpoint.py b/python/packages/ag-ui/tests/ag_ui/test_endpoint.py index 4c1f03a49d..5ebfc471f6 100644 --- a/python/packages/ag-ui/tests/ag_ui/test_endpoint.py +++ b/python/packages/ag-ui/tests/ag_ui/test_endpoint.py @@ -5,7 +5,7 @@ import json import pytest -from agent_framework import ChatAgent, ChatResponseUpdate, Content +from agent_framework import Agent, ChatResponseUpdate, Content from fastapi import FastAPI, Header, HTTPException from fastapi.params import Depends from fastapi.testclient import TestClient @@ -28,7 +28,7 @@ def _build(response_text: str = "Test response"): async def test_add_endpoint_with_agent_protocol(build_chat_client): """Test adding endpoint with raw SupportsAgentRun.""" app = FastAPI() - agent = ChatAgent(name="test", instructions="Test agent", chat_client=build_chat_client()) + agent = Agent(name="test", instructions="Test agent", chat_client=build_chat_client()) add_agent_framework_fastapi_endpoint(app, agent, path="/test-agent") @@ -42,7 +42,7 @@ async def test_add_endpoint_with_agent_protocol(build_chat_client): async def test_add_endpoint_with_wrapped_agent(build_chat_client): """Test adding endpoint with pre-wrapped AgentFrameworkAgent.""" app = FastAPI() - agent = ChatAgent(name="test", instructions="Test agent", chat_client=build_chat_client()) + agent = Agent(name="test", instructions="Test agent", chat_client=build_chat_client()) wrapped_agent = AgentFrameworkAgent(agent=agent, name="wrapped") add_agent_framework_fastapi_endpoint(app, wrapped_agent, path="/wrapped-agent") @@ -57,7 +57,7 @@ async def test_add_endpoint_with_wrapped_agent(build_chat_client): async def test_endpoint_with_state_schema(build_chat_client): """Test endpoint with state_schema parameter.""" app = FastAPI() - agent = ChatAgent(name="test", instructions="Test agent", chat_client=build_chat_client()) + agent = Agent(name="test", instructions="Test agent", chat_client=build_chat_client()) state_schema = {"document": {"type": "string"}} add_agent_framework_fastapi_endpoint(app, agent, path="/stateful", state_schema=state_schema) @@ -73,7 +73,7 @@ async def test_endpoint_with_state_schema(build_chat_client): async def test_endpoint_with_default_state_seed(build_chat_client): """Test endpoint seeds default state when client omits it.""" app = FastAPI() - agent = ChatAgent(name="test", instructions="Test agent", chat_client=build_chat_client()) + agent = Agent(name="test", instructions="Test agent", chat_client=build_chat_client()) state_schema = {"proverbs": {"type": "array"}} default_state = {"proverbs": ["Keep the original."]} @@ -100,7 +100,7 @@ async def test_endpoint_with_default_state_seed(build_chat_client): async def test_endpoint_with_predict_state_config(build_chat_client): """Test endpoint with predict_state_config parameter.""" app = FastAPI() - agent = ChatAgent(name="test", instructions="Test agent", chat_client=build_chat_client()) + agent = Agent(name="test", instructions="Test agent", chat_client=build_chat_client()) predict_config = {"document": {"tool": "write_doc", "tool_argument": "content"}} add_agent_framework_fastapi_endpoint(app, agent, path="/predictive", predict_state_config=predict_config) @@ -114,7 +114,7 @@ async def test_endpoint_with_predict_state_config(build_chat_client): async def test_endpoint_request_logging(build_chat_client): """Test that endpoint logs request details.""" app = FastAPI() - agent = ChatAgent(name="test", instructions="Test agent", chat_client=build_chat_client()) + agent = Agent(name="test", instructions="Test agent", chat_client=build_chat_client()) add_agent_framework_fastapi_endpoint(app, agent, path="/logged") @@ -134,7 +134,7 @@ async def test_endpoint_request_logging(build_chat_client): async def test_endpoint_event_streaming(build_chat_client): """Test that endpoint streams events correctly.""" app = FastAPI() - agent = ChatAgent(name="test", instructions="Test agent", chat_client=build_chat_client("Streamed response")) + agent = Agent(name="test", instructions="Test agent", chat_client=build_chat_client("Streamed response")) add_agent_framework_fastapi_endpoint(app, agent, path="/stream") @@ -168,7 +168,7 @@ async def test_endpoint_event_streaming(build_chat_client): async def test_endpoint_error_handling(build_chat_client): """Test endpoint error handling during request parsing.""" app = FastAPI() - agent = ChatAgent(name="test", instructions="Test agent", chat_client=build_chat_client()) + agent = Agent(name="test", instructions="Test agent", chat_client=build_chat_client()) add_agent_framework_fastapi_endpoint(app, agent, path="/failing") @@ -184,8 +184,8 @@ async def test_endpoint_error_handling(build_chat_client): async def test_endpoint_multiple_paths(build_chat_client): """Test adding multiple endpoints with different paths.""" app = FastAPI() - agent1 = ChatAgent(name="agent1", instructions="First agent", chat_client=build_chat_client("Response 1")) - agent2 = ChatAgent(name="agent2", instructions="Second agent", chat_client=build_chat_client("Response 2")) + agent1 = Agent(name="agent1", instructions="First agent", chat_client=build_chat_client("Response 1")) + agent2 = Agent(name="agent2", instructions="Second agent", chat_client=build_chat_client("Response 2")) add_agent_framework_fastapi_endpoint(app, agent1, path="/agent1") add_agent_framework_fastapi_endpoint(app, agent2, path="/agent2") @@ -202,7 +202,7 @@ async def test_endpoint_multiple_paths(build_chat_client): async def test_endpoint_default_path(build_chat_client): """Test endpoint with default path.""" app = FastAPI() - agent = ChatAgent(name="test", instructions="Test agent", chat_client=build_chat_client()) + agent = Agent(name="test", instructions="Test agent", chat_client=build_chat_client()) add_agent_framework_fastapi_endpoint(app, agent) @@ -215,7 +215,7 @@ async def test_endpoint_default_path(build_chat_client): async def test_endpoint_response_headers(build_chat_client): """Test that endpoint sets correct response headers.""" app = FastAPI() - agent = ChatAgent(name="test", instructions="Test agent", chat_client=build_chat_client()) + agent = Agent(name="test", instructions="Test agent", chat_client=build_chat_client()) add_agent_framework_fastapi_endpoint(app, agent, path="/headers") @@ -231,7 +231,7 @@ async def test_endpoint_response_headers(build_chat_client): async def test_endpoint_empty_messages(build_chat_client): """Test endpoint with empty messages list.""" app = FastAPI() - agent = ChatAgent(name="test", instructions="Test agent", chat_client=build_chat_client()) + agent = Agent(name="test", instructions="Test agent", chat_client=build_chat_client()) add_agent_framework_fastapi_endpoint(app, agent, path="/empty") @@ -244,7 +244,7 @@ async def test_endpoint_empty_messages(build_chat_client): async def test_endpoint_complex_input(build_chat_client): """Test endpoint with complex input data.""" app = FastAPI() - agent = ChatAgent(name="test", instructions="Test agent", chat_client=build_chat_client()) + agent = Agent(name="test", instructions="Test agent", chat_client=build_chat_client()) add_agent_framework_fastapi_endpoint(app, agent, path="/complex") @@ -269,7 +269,7 @@ async def test_endpoint_complex_input(build_chat_client): async def test_endpoint_openapi_schema(build_chat_client): """Test that endpoint generates proper OpenAPI schema with request model.""" app = FastAPI() - agent = ChatAgent(name="test", instructions="Test agent", chat_client=build_chat_client()) + agent = Agent(name="test", instructions="Test agent", chat_client=build_chat_client()) add_agent_framework_fastapi_endpoint(app, agent, path="/schema-test") @@ -313,7 +313,7 @@ async def test_endpoint_openapi_schema(build_chat_client): async def test_endpoint_default_tags(build_chat_client): """Test that endpoint uses default 'AG-UI' tag.""" app = FastAPI() - agent = ChatAgent(name="test", instructions="Test agent", chat_client=build_chat_client()) + agent = Agent(name="test", instructions="Test agent", chat_client=build_chat_client()) add_agent_framework_fastapi_endpoint(app, agent, path="/default-tags") @@ -331,7 +331,7 @@ async def test_endpoint_default_tags(build_chat_client): async def test_endpoint_custom_tags(build_chat_client): """Test that endpoint accepts custom tags.""" app = FastAPI() - agent = ChatAgent(name="test", instructions="Test agent", chat_client=build_chat_client()) + agent = Agent(name="test", instructions="Test agent", chat_client=build_chat_client()) add_agent_framework_fastapi_endpoint(app, agent, path="/custom-tags", tags=["Custom", "Agent"]) @@ -349,7 +349,7 @@ async def test_endpoint_custom_tags(build_chat_client): async def test_endpoint_missing_required_field(build_chat_client): """Test that endpoint validates required fields with Pydantic.""" app = FastAPI() - agent = ChatAgent(name="test", instructions="Test agent", chat_client=build_chat_client()) + agent = Agent(name="test", instructions="Test agent", chat_client=build_chat_client()) add_agent_framework_fastapi_endpoint(app, agent, path="/validation") @@ -368,7 +368,7 @@ async def test_endpoint_internal_error_handling(build_chat_client): from unittest.mock import patch app = FastAPI() - agent = ChatAgent(name="test", instructions="Test agent", chat_client=build_chat_client()) + agent = Agent(name="test", instructions="Test agent", chat_client=build_chat_client()) # Use default_state to trigger the code path that can raise an exception add_agent_framework_fastapi_endpoint(app, agent, path="/error-test", default_state={"key": "value"}) @@ -387,7 +387,7 @@ async def test_endpoint_internal_error_handling(build_chat_client): async def test_endpoint_with_dependencies_blocks_unauthorized(build_chat_client): """Test that endpoint blocks requests when authentication dependency fails.""" app = FastAPI() - agent = ChatAgent(name="test", instructions="Test agent", chat_client=build_chat_client()) + agent = Agent(name="test", instructions="Test agent", chat_client=build_chat_client()) async def require_api_key(x_api_key: str | None = Header(None)): if x_api_key != "secret-key": @@ -406,7 +406,7 @@ async def require_api_key(x_api_key: str | None = Header(None)): async def test_endpoint_with_dependencies_allows_authorized(build_chat_client): """Test that endpoint allows requests when authentication dependency passes.""" app = FastAPI() - agent = ChatAgent(name="test", instructions="Test agent", chat_client=build_chat_client()) + agent = Agent(name="test", instructions="Test agent", chat_client=build_chat_client()) async def require_api_key(x_api_key: str | None = Header(None)): if x_api_key != "secret-key": @@ -429,7 +429,7 @@ async def require_api_key(x_api_key: str | None = Header(None)): async def test_endpoint_with_multiple_dependencies(build_chat_client): """Test that endpoint supports multiple dependencies.""" app = FastAPI() - agent = ChatAgent(name="test", instructions="Test agent", chat_client=build_chat_client()) + agent = Agent(name="test", instructions="Test agent", chat_client=build_chat_client()) execution_order: list[str] = [] @@ -457,7 +457,7 @@ async def second_dependency(): async def test_endpoint_without_dependencies_is_accessible(build_chat_client): """Test that endpoint without dependencies remains accessible (backward compatibility).""" app = FastAPI() - agent = ChatAgent(name="test", instructions="Test agent", chat_client=build_chat_client()) + agent = Agent(name="test", instructions="Test agent", chat_client=build_chat_client()) # No dependencies parameter - should be accessible without auth add_agent_framework_fastapi_endpoint(app, agent, path="/open") diff --git a/python/packages/ag-ui/tests/ag_ui/test_helpers.py b/python/packages/ag-ui/tests/ag_ui/test_helpers.py index b4a7e9f047..7173f5c6b3 100644 --- a/python/packages/ag-ui/tests/ag_ui/test_helpers.py +++ b/python/packages/ag-ui/tests/ag_ui/test_helpers.py @@ -2,7 +2,7 @@ """Tests for orchestration helper functions.""" -from agent_framework import ChatMessage, Content +from agent_framework import Content, Message from agent_framework_ag_ui._orchestration._helpers import ( approval_steps, @@ -29,8 +29,8 @@ def test_empty_messages(self): def test_no_tool_calls(self): """Returns empty set when no tool calls in messages.""" messages = [ - ChatMessage(role="user", contents=[Content.from_text("Hello")]), - ChatMessage(role="assistant", contents=[Content.from_text("Hi there")]), + Message(role="user", contents=[Content.from_text("Hello")]), + Message(role="assistant", contents=[Content.from_text("Hi there")]), ] result = pending_tool_call_ids(messages) assert result == set() @@ -38,7 +38,7 @@ def test_no_tool_calls(self): def test_pending_tool_call(self): """Returns pending tool call ID when no result exists.""" messages = [ - ChatMessage( + Message( role="assistant", contents=[Content.from_function_call(call_id="call_123", name="get_weather", arguments="{}")], ), @@ -49,11 +49,11 @@ def test_pending_tool_call(self): def test_resolved_tool_call(self): """Returns empty set when tool call has result.""" messages = [ - ChatMessage( + Message( role="assistant", contents=[Content.from_function_call(call_id="call_123", name="get_weather", arguments="{}")], ), - ChatMessage( + Message( role="tool", contents=[Content.from_function_result(call_id="call_123", result="sunny")], ), @@ -64,7 +64,7 @@ def test_resolved_tool_call(self): def test_multiple_tool_calls_some_resolved(self): """Returns only unresolved tool call IDs.""" messages = [ - ChatMessage( + Message( role="assistant", contents=[ Content.from_function_call(call_id="call_1", name="tool_a", arguments="{}"), @@ -72,11 +72,11 @@ def test_multiple_tool_calls_some_resolved(self): Content.from_function_call(call_id="call_3", name="tool_c", arguments="{}"), ], ), - ChatMessage( + Message( role="tool", contents=[Content.from_function_result(call_id="call_1", result="result_a")], ), - ChatMessage( + Message( role="tool", contents=[Content.from_function_result(call_id="call_3", result="result_c")], ), @@ -90,7 +90,7 @@ class TestIsStateContextMessage: def test_state_context_message(self): """Returns True for state context message.""" - message = ChatMessage( + message = Message( role="system", contents=[Content.from_text("Current state of the application: {}")], ) @@ -98,7 +98,7 @@ def test_state_context_message(self): def test_non_system_message(self): """Returns False for non-system message.""" - message = ChatMessage( + message = Message( role="user", contents=[Content.from_text("Current state of the application: {}")], ) @@ -106,7 +106,7 @@ def test_non_system_message(self): def test_system_message_without_state_prefix(self): """Returns False for system message without state prefix.""" - message = ChatMessage( + message = Message( role="system", contents=[Content.from_text("You are a helpful assistant.")], ) @@ -114,7 +114,7 @@ def test_system_message_without_state_prefix(self): def test_empty_contents(self): """Returns False for message with empty contents.""" - message = ChatMessage(role="system", contents=[]) + message = Message(role="system", contents=[]) assert is_state_context_message(message) is False @@ -342,7 +342,7 @@ def test_empty_messages(self): def test_no_approval_response(self): """Returns None when no approval response in last message.""" messages = [ - ChatMessage(role="assistant", contents=[Content.from_text("Hello")]), + Message(role="assistant", contents=[Content.from_text("Hello")]), ] result = latest_approval_response(messages) assert result is None @@ -357,7 +357,7 @@ def test_finds_approval_response(self): function_call=fc, ) messages = [ - ChatMessage(role="user", contents=[approval_content]), + Message(role="user", contents=[approval_content]), ] result = latest_approval_response(messages) assert result is approval_content diff --git a/python/packages/ag-ui/tests/ag_ui/test_message_adapters.py b/python/packages/ag-ui/tests/ag_ui/test_message_adapters.py index 47970d7005..61cd9f1d06 100644 --- a/python/packages/ag-ui/tests/ag_ui/test_message_adapters.py +++ b/python/packages/ag-ui/tests/ag_ui/test_message_adapters.py @@ -5,7 +5,7 @@ import json import pytest -from agent_framework import ChatMessage, Content +from agent_framework import Content, Message from agent_framework_ag_ui._message_adapters import ( agent_framework_messages_to_agui, @@ -24,7 +24,7 @@ def sample_agui_message(): @pytest.fixture def sample_agent_framework_message(): """Create a sample Agent Framework message.""" - return ChatMessage(role="user", contents=[Content.from_text(text="Hello")], message_id="msg-123") + return Message(role="user", contents=[Content.from_text(text="Hello")], message_id="msg-123") def test_agui_to_agent_framework_basic(sample_agui_message): @@ -100,7 +100,7 @@ def test_agui_tool_result_to_agent_framework(): def test_agui_tool_approval_updates_tool_call_arguments(): """Tool approval updates matching tool call arguments for snapshots and agent context. - The LLM context (ChatMessage) should contain only enabled steps, so the LLM + The LLM context (Message) should contain only enabled steps, so the LLM generates responses based on what was actually approved/executed. The raw messages (for MESSAGES_SNAPSHOT) should contain all steps with status, @@ -446,7 +446,7 @@ def test_agui_with_tool_calls_to_agent_framework(): def test_agent_framework_to_agui_with_tool_calls(): """Test converting Agent Framework message with tool calls to AG-UI.""" - msg = ChatMessage( + msg = Message( role="assistant", contents=[ Content.from_text(text="Calling tool"), @@ -471,7 +471,7 @@ def test_agent_framework_to_agui_with_tool_calls(): def test_agent_framework_to_agui_multiple_text_contents(): """Test concatenating multiple text contents.""" - msg = ChatMessage( + msg = Message( role="assistant", contents=[Content.from_text(text="Part 1 "), Content.from_text(text="Part 2")], ) @@ -484,7 +484,7 @@ def test_agent_framework_to_agui_multiple_text_contents(): def test_agent_framework_to_agui_no_message_id(): """Test message without message_id - should auto-generate ID.""" - msg = ChatMessage(role="user", contents=[Content.from_text(text="Hello")]) + msg = Message(role="user", contents=[Content.from_text(text="Hello")]) messages = agent_framework_messages_to_agui([msg]) @@ -496,7 +496,7 @@ def test_agent_framework_to_agui_no_message_id(): def test_agent_framework_to_agui_system_role(): """Test system role conversion.""" - msg = ChatMessage(role="system", contents=[Content.from_text(text="System")]) + msg = Message(role="system", contents=[Content.from_text(text="System")]) messages = agent_framework_messages_to_agui([msg]) @@ -541,7 +541,7 @@ def test_extract_text_from_custom_contents(): def test_agent_framework_to_agui_function_result_dict(): """Test converting FunctionResultContent with dict result to AG-UI.""" - msg = ChatMessage( + msg = Message( role="tool", contents=[Content.from_function_result(call_id="call-123", result={"key": "value", "count": 42})], message_id="msg-789", @@ -558,7 +558,7 @@ def test_agent_framework_to_agui_function_result_dict(): def test_agent_framework_to_agui_function_result_none(): """Test converting FunctionResultContent with None result to AG-UI.""" - msg = ChatMessage( + msg = Message( role="tool", contents=[Content.from_function_result(call_id="call-123", result=None)], message_id="msg-789", @@ -574,7 +574,7 @@ def test_agent_framework_to_agui_function_result_none(): def test_agent_framework_to_agui_function_result_string(): """Test converting FunctionResultContent with string result to AG-UI.""" - msg = ChatMessage( + msg = Message( role="tool", contents=[Content.from_function_result(call_id="call-123", result="plain text result")], message_id="msg-789", @@ -589,7 +589,7 @@ def test_agent_framework_to_agui_function_result_string(): def test_agent_framework_to_agui_function_result_empty_list(): """Test converting FunctionResultContent with empty list result to AG-UI.""" - msg = ChatMessage( + msg = Message( role="tool", contents=[Content.from_function_result(call_id="call-123", result=[])], message_id="msg-789", @@ -611,7 +611,7 @@ def test_agent_framework_to_agui_function_result_single_text_content(): class MockTextContent: text: str - msg = ChatMessage( + msg = Message( role="tool", contents=[Content.from_function_result(call_id="call-123", result=[MockTextContent("Hello from MCP!")])], message_id="msg-789", @@ -633,7 +633,7 @@ def test_agent_framework_to_agui_function_result_multiple_text_contents(): class MockTextContent: text: str - msg = ChatMessage( + msg = Message( role="tool", contents=[ Content.from_function_result( diff --git a/python/packages/ag-ui/tests/ag_ui/test_message_hygiene.py b/python/packages/ag-ui/tests/ag_ui/test_message_hygiene.py index d1773bf10c..ed8526e592 100644 --- a/python/packages/ag-ui/tests/ag_ui/test_message_hygiene.py +++ b/python/packages/ag-ui/tests/ag_ui/test_message_hygiene.py @@ -1,6 +1,6 @@ # Copyright (c) Microsoft. All rights reserved. -from agent_framework import ChatMessage, Content +from agent_framework import Content, Message from agent_framework_ag_ui._message_adapters import _deduplicate_messages, _sanitize_tool_history @@ -13,7 +13,7 @@ def test_sanitize_tool_history_filters_out_confirm_changes_only_message() -> Non tool for the approval UI flow that shouldn't be sent to the LLM. """ messages = [ - ChatMessage( + Message( role="assistant", contents=[ Content.from_function_call( @@ -23,7 +23,7 @@ def test_sanitize_tool_history_filters_out_confirm_changes_only_message() -> Non ) ], ), - ChatMessage( + Message( role="user", contents=[Content.from_text(text='{"accepted": true}')], ), @@ -44,11 +44,11 @@ def test_sanitize_tool_history_filters_out_confirm_changes_only_message() -> Non def test_deduplicate_messages_prefers_non_empty_tool_results() -> None: messages = [ - ChatMessage( + Message( role="tool", contents=[Content.from_function_result(call_id="call1", result="")], ), - ChatMessage( + Message( role="tool", contents=[Content.from_function_result(call_id="call1", result="result data")], ), @@ -71,13 +71,13 @@ def test_convert_approval_results_to_tool_messages() -> None: # Simulate what happens after _resolve_approval_responses: # A user message contains function_result content (the executed tool result) messages = [ - ChatMessage( + Message( role="assistant", contents=[ Content.from_function_call(call_id="call_123", name="my_mcp_tool", arguments="{}"), ], ), - ChatMessage( + Message( role="user", contents=[ Content.from_function_result(call_id="call_123", result="tool execution result"), @@ -109,13 +109,13 @@ def test_convert_approval_results_preserves_other_user_content() -> None: from agent_framework_ag_ui._run import _convert_approval_results_to_tool_messages messages = [ - ChatMessage( + Message( role="assistant", contents=[ Content.from_function_call(call_id="call_123", name="my_tool", arguments="{}"), ], ), - ChatMessage( + Message( role="user", contents=[ Content.from_text(text="User also said something"), @@ -152,12 +152,12 @@ def test_sanitize_tool_history_filters_confirm_changes_keeps_other_tools() -> No """ messages = [ # User asks something - ChatMessage( + Message( role="user", contents=[Content.from_text(text="What time is it?")], ), # Assistant calls MCP tool + confirm_changes - ChatMessage( + Message( role="assistant", contents=[ Content.from_function_call(call_id="call_1", name="get_datetime", arguments="{}"), @@ -165,12 +165,12 @@ def test_sanitize_tool_history_filters_confirm_changes_keeps_other_tools() -> No ], ), # Tool result for the actual MCP tool - ChatMessage( + Message( role="tool", contents=[Content.from_function_result(call_id="call_1", result="2024-01-01 12:00:00")], ), # User asks something else - ChatMessage( + Message( role="user", contents=[Content.from_text(text="What's the date?")], ), @@ -204,12 +204,12 @@ def test_sanitize_tool_history_filters_confirm_changes_from_assistant_messages() respond with "Here's your 5-step plan" instead of "Here's your 2-step plan". """ messages = [ - ChatMessage( + Message( role="user", contents=[Content.from_text(text="Build a robot")], ), # Assistant message with both generate_task_steps and confirm_changes - ChatMessage( + Message( role="assistant", contents=[ Content.from_function_call( @@ -225,7 +225,7 @@ def test_sanitize_tool_history_filters_confirm_changes_from_assistant_messages() ], ), # Approval response - ChatMessage( + Message( role="user", contents=[ Content.from_function_approval_response( diff --git a/python/packages/ag-ui/tests/ag_ui/test_run.py b/python/packages/ag-ui/tests/ag_ui/test_run.py index 6428180fc0..9d21bd2d0a 100644 --- a/python/packages/ag-ui/tests/ag_ui/test_run.py +++ b/python/packages/ag-ui/tests/ag_ui/test_run.py @@ -6,7 +6,7 @@ TextMessageEndEvent, TextMessageStartEvent, ) -from agent_framework import ChatMessage, Content +from agent_framework import Content, Message from agent_framework_ag_ui._run import ( FlowState, @@ -212,7 +212,7 @@ class TestInjectStateContext: def test_no_state_message(self): """Returns original messages when no state context needed.""" - messages = [ChatMessage(role="user", contents=[Content.from_text("Hello")])] + messages = [Message(role="user", contents=[Content.from_text("Hello")])] result = _inject_state_context(messages, {}, {}) assert result == messages @@ -224,8 +224,8 @@ def test_empty_messages(self): def test_last_message_not_user(self): """Returns original messages when last message is not from user.""" messages = [ - ChatMessage(role="user", contents=[Content.from_text("Hello")]), - ChatMessage(role="assistant", contents=[Content.from_text("Hi")]), + Message(role="user", contents=[Content.from_text("Hello")]), + Message(role="assistant", contents=[Content.from_text("Hi")]), ] state = {"key": "value"} schema = {"properties": {"key": {"type": "string"}}} @@ -237,8 +237,8 @@ def test_injects_before_last_user_message(self): """Injects state context before last user message.""" messages = [ - ChatMessage(role="system", contents=[Content.from_text("You are helpful")]), - ChatMessage(role="user", contents=[Content.from_text("Hello")]), + Message(role="system", contents=[Content.from_text("You are helpful")]), + Message(role="user", contents=[Content.from_text("Hello")]), ] state = {"document": "content"} schema = {"properties": {"document": {"type": "string"}}} @@ -405,7 +405,7 @@ def test_extract_approved_state_updates_no_handler(): """Test _extract_approved_state_updates returns empty with no handler.""" from agent_framework_ag_ui._run import _extract_approved_state_updates - messages = [ChatMessage(role="user", contents=[Content.from_text("Hello")])] + messages = [Message(role="user", contents=[Content.from_text("Hello")])] result = _extract_approved_state_updates(messages, None) assert result == {} @@ -416,7 +416,7 @@ def test_extract_approved_state_updates_no_approval(): from agent_framework_ag_ui._run import _extract_approved_state_updates handler = PredictiveStateHandler(predict_state_config={"doc": {"tool": "write", "tool_argument": "content"}}) - messages = [ChatMessage(role="user", contents=[Content.from_text("Hello")])] + messages = [Message(role="user", contents=[Content.from_text("Hello")])] result = _extract_approved_state_updates(messages, handler) assert result == {} diff --git a/python/packages/ag-ui/tests/ag_ui/test_structured_output.py b/python/packages/ag-ui/tests/ag_ui/test_structured_output.py index d1afdc971c..cd690fcfb7 100644 --- a/python/packages/ag-ui/tests/ag_ui/test_structured_output.py +++ b/python/packages/ag-ui/tests/ag_ui/test_structured_output.py @@ -6,7 +6,7 @@ from collections.abc import AsyncIterator, MutableSequence from typing import Any -from agent_framework import ChatAgent, ChatMessage, ChatOptions, ChatResponseUpdate, Content +from agent_framework import Agent, ChatOptions, ChatResponseUpdate, Content, Message from pydantic import BaseModel @@ -35,13 +35,13 @@ async def test_structured_output_with_recipe(streaming_chat_client_stub, stream_ from agent_framework.ag_ui import AgentFrameworkAgent async def stream_fn( - messages: MutableSequence[ChatMessage], options: ChatOptions, **kwargs: Any + messages: MutableSequence[Message], options: ChatOptions, **kwargs: Any ) -> AsyncIterator[ChatResponseUpdate]: yield ChatResponseUpdate( contents=[Content.from_text(text='{"recipe": {"name": "Pasta"}, "message": "Here is your recipe"}')] ) - agent = ChatAgent(name="test", instructions="Test", chat_client=streaming_chat_client_stub(stream_fn)) + agent = Agent(name="test", instructions="Test", chat_client=streaming_chat_client_stub(stream_fn)) agent.default_options = ChatOptions(response_format=RecipeOutput) wrapper = AgentFrameworkAgent( @@ -73,7 +73,7 @@ async def test_structured_output_with_steps(streaming_chat_client_stub, stream_f from agent_framework.ag_ui import AgentFrameworkAgent async def stream_fn( - messages: MutableSequence[ChatMessage], options: ChatOptions, **kwargs: Any + messages: MutableSequence[Message], options: ChatOptions, **kwargs: Any ) -> AsyncIterator[ChatResponseUpdate]: steps_data = { "steps": [ @@ -83,7 +83,7 @@ async def stream_fn( } yield ChatResponseUpdate(contents=[Content.from_text(text=json.dumps(steps_data))]) - agent = ChatAgent(name="test", instructions="Test", chat_client=streaming_chat_client_stub(stream_fn)) + agent = Agent(name="test", instructions="Test", chat_client=streaming_chat_client_stub(stream_fn)) agent.default_options = ChatOptions(response_format=StepsOutput) wrapper = AgentFrameworkAgent( @@ -116,7 +116,7 @@ async def test_structured_output_with_no_schema_match(streaming_chat_client_stub ChatResponseUpdate(contents=[Content.from_text(text='{"data": {"key": "value"}}')]), ] - agent = ChatAgent( + agent = Agent( name="test", instructions="Test", chat_client=streaming_chat_client_stub(stream_from_updates_fixture(updates)) ) agent.default_options = ChatOptions(response_format=GenericOutput) @@ -149,11 +149,11 @@ class DataOutput(BaseModel): info: str async def stream_fn( - messages: MutableSequence[ChatMessage], options: ChatOptions, **kwargs: Any + messages: MutableSequence[Message], options: ChatOptions, **kwargs: Any ) -> AsyncIterator[ChatResponseUpdate]: yield ChatResponseUpdate(contents=[Content.from_text(text='{"data": {"key": "value"}, "info": "processed"}')]) - agent = ChatAgent(name="test", instructions="Test", chat_client=streaming_chat_client_stub(stream_fn)) + agent = Agent(name="test", instructions="Test", chat_client=streaming_chat_client_stub(stream_fn)) agent.default_options = ChatOptions(response_format=DataOutput) wrapper = AgentFrameworkAgent( @@ -182,7 +182,7 @@ async def test_no_structured_output_when_no_response_format(streaming_chat_clien updates = [ChatResponseUpdate(contents=[Content.from_text(text="Regular text")])] - agent = ChatAgent( + agent = Agent( name="test", instructions="Test", chat_client=streaming_chat_client_stub(stream_from_updates_fixture(updates)), @@ -208,12 +208,12 @@ async def test_structured_output_with_message_field(streaming_chat_client_stub, from agent_framework.ag_ui import AgentFrameworkAgent async def stream_fn( - messages: MutableSequence[ChatMessage], options: ChatOptions, **kwargs: Any + messages: MutableSequence[Message], options: ChatOptions, **kwargs: Any ) -> AsyncIterator[ChatResponseUpdate]: output_data = {"recipe": {"name": "Salad"}, "message": "Fresh salad recipe ready"} yield ChatResponseUpdate(contents=[Content.from_text(text=json.dumps(output_data))]) - agent = ChatAgent(name="test", instructions="Test", chat_client=streaming_chat_client_stub(stream_fn)) + agent = Agent(name="test", instructions="Test", chat_client=streaming_chat_client_stub(stream_fn)) agent.default_options = ChatOptions(response_format=RecipeOutput) wrapper = AgentFrameworkAgent( @@ -243,12 +243,12 @@ async def test_empty_updates_no_structured_processing(streaming_chat_client_stub from agent_framework.ag_ui import AgentFrameworkAgent async def stream_fn( - messages: MutableSequence[ChatMessage], options: ChatOptions, **kwargs: Any + messages: MutableSequence[Message], options: ChatOptions, **kwargs: Any ) -> AsyncIterator[ChatResponseUpdate]: if False: yield ChatResponseUpdate(contents=[]) - agent = ChatAgent(name="test", instructions="Test", chat_client=streaming_chat_client_stub(stream_fn)) + agent = Agent(name="test", instructions="Test", chat_client=streaming_chat_client_stub(stream_fn)) agent.default_options = ChatOptions(response_format=RecipeOutput) wrapper = AgentFrameworkAgent(agent=agent) diff --git a/python/packages/ag-ui/tests/ag_ui/test_tooling.py b/python/packages/ag-ui/tests/ag_ui/test_tooling.py index 242f5fd668..f174897087 100644 --- a/python/packages/ag-ui/tests/ag_ui/test_tooling.py +++ b/python/packages/ag-ui/tests/ag_ui/test_tooling.py @@ -2,7 +2,7 @@ from unittest.mock import MagicMock -from agent_framework import ChatAgent, tool +from agent_framework import Agent, tool from agent_framework_ag_ui._orchestration._tooling import ( collect_server_tools, @@ -31,14 +31,14 @@ def regular_tool() -> str: return "result" -def _create_chat_agent_with_tool(tool_name: str = "regular_tool") -> ChatAgent: - """Create a ChatAgent with a mocked chat client and a simple tool. +def _create_chat_agent_with_tool(tool_name: str = "regular_tool") -> Agent: + """Create a Agent with a mocked chat client and a simple tool. Note: tool_name parameter is kept for API compatibility but the tool will always be named 'regular_tool' since tool uses the function name. """ mock_chat_client = MagicMock() - return ChatAgent(chat_client=mock_chat_client, tools=[regular_tool]) + return Agent(chat_client=mock_chat_client, tools=[regular_tool]) def test_merge_tools_filters_duplicates() -> None: @@ -59,7 +59,7 @@ def test_register_additional_client_tools_assigns_when_configured() -> None: mock_chat_client = MagicMock(spec=BaseChatClient) mock_chat_client.function_invocation_configuration = normalize_function_invocation_configuration(None) - agent = ChatAgent(chat_client=mock_chat_client) + agent = Agent(chat_client=mock_chat_client) tools = [DummyTool("x")] register_additional_client_tools(agent, tools) @@ -148,7 +148,7 @@ class MockAgent: def test_register_additional_client_tools_no_tools() -> None: """register_additional_client_tools does nothing with None tools.""" mock_chat_client = MagicMock() - agent = ChatAgent(chat_client=mock_chat_client) + agent = Agent(chat_client=mock_chat_client) # Should not raise register_additional_client_tools(agent, None) diff --git a/python/packages/ag-ui/tests/ag_ui/test_utils.py b/python/packages/ag-ui/tests/ag_ui/test_utils.py index 4b680d4b71..0f453132f7 100644 --- a/python/packages/ag-ui/tests/ag_ui/test_utils.py +++ b/python/packages/ag-ui/tests/ag_ui/test_utils.py @@ -404,11 +404,11 @@ def test_safe_json_parse_with_none(): def test_get_role_value_with_enum(): """Test get_role_value with enum role.""" - from agent_framework import ChatMessage, Content + from agent_framework import Content, Message from agent_framework_ag_ui._utils import get_role_value - message = ChatMessage(role="user", contents=[Content.from_text("test")]) + message = Message(role="user", contents=[Content.from_text("test")]) result = get_role_value(message) assert result == "user" diff --git a/python/packages/anthropic/agent_framework_anthropic/_chat_client.py b/python/packages/anthropic/agent_framework_anthropic/_chat_client.py index 1a000ebd69..e14e8ba4d6 100644 --- a/python/packages/anthropic/agent_framework_anthropic/_chat_client.py +++ b/python/packages/anthropic/agent_framework_anthropic/_chat_client.py @@ -11,7 +11,6 @@ Annotation, BaseChatClient, ChatAndFunctionMiddlewareTypes, - ChatMessage, ChatMiddlewareLayer, ChatOptions, ChatResponse, @@ -24,6 +23,7 @@ HostedCodeInterpreterTool, HostedMCPTool, HostedWebSearchTool, + Message, ResponseStream, TextSpanRegion, UsageDetails, @@ -356,7 +356,7 @@ class MyOptions(AnthropicChatOptions, total=False): def _inner_get_response( self, *, - messages: Sequence[ChatMessage], + messages: Sequence[Message], options: Mapping[str, Any], stream: bool = False, **kwargs: Any, @@ -385,7 +385,7 @@ async def _get_response() -> ChatResponse: def _prepare_options( self, - messages: Sequence[ChatMessage], + messages: Sequence[Message], options: Mapping[str, Any], **kwargs: Any, ) -> dict[str, Any]: @@ -430,7 +430,7 @@ def _prepare_options( run_options["messages"] = self._prepare_messages_for_anthropic(messages) # system message - first system message is passed as instructions - if messages and isinstance(messages[0], ChatMessage) and messages[0].role == "system": + if messages and isinstance(messages[0], Message) and messages[0].role == "system": run_options["system"] = messages[0].text # betas @@ -516,22 +516,22 @@ def _prepare_response_format(self, response_format: type[BaseModel] | dict[str, "schema": schema, } - def _prepare_messages_for_anthropic(self, messages: Sequence[ChatMessage]) -> list[dict[str, Any]]: + def _prepare_messages_for_anthropic(self, messages: Sequence[Message]) -> list[dict[str, Any]]: """Prepare a list of ChatMessages for the Anthropic client. This skips the first message if it is a system message, as Anthropic expects system instructions as a separate parameter. """ # first system message is passed as instructions - if messages and isinstance(messages[0], ChatMessage) and messages[0].role == "system": + if messages and isinstance(messages[0], Message) and messages[0].role == "system": return [self._prepare_message_for_anthropic(msg) for msg in messages[1:]] return [self._prepare_message_for_anthropic(msg) for msg in messages] - def _prepare_message_for_anthropic(self, message: ChatMessage) -> dict[str, Any]: - """Prepare a ChatMessage for the Anthropic client. + def _prepare_message_for_anthropic(self, message: Message) -> dict[str, Any]: + """Prepare a Message for the Anthropic client. Args: - message: The ChatMessage to convert. + message: The Message to convert. Returns: A dictionary representing the message in Anthropic format. @@ -693,7 +693,7 @@ def _process_message(self, message: BetaMessage, options: Mapping[str, Any]) -> return ChatResponse( response_id=message.id, messages=[ - ChatMessage( + Message( role="assistant", contents=self._parse_contents_from_anthropic(message.content), raw_representation=message, diff --git a/python/packages/anthropic/tests/test_anthropic_client.py b/python/packages/anthropic/tests/test_anthropic_client.py index 75c2144258..c3749d247e 100644 --- a/python/packages/anthropic/tests/test_anthropic_client.py +++ b/python/packages/anthropic/tests/test_anthropic_client.py @@ -6,14 +6,14 @@ import pytest from agent_framework import ( - ChatClientProtocol, - ChatMessage, ChatOptions, ChatResponseUpdate, Content, HostedCodeInterpreterTool, HostedMCPTool, HostedWebSearchTool, + Message, + SupportsChatGetResponse, tool, ) from agent_framework.exceptions import ServiceInitializationError @@ -102,7 +102,7 @@ def test_anthropic_client_init_with_client(mock_anthropic_client: MagicMock) -> assert chat_client.anthropic_client is mock_anthropic_client assert chat_client.model_id == "claude-3-5-sonnet-20241022" - assert isinstance(chat_client, ChatClientProtocol) + assert isinstance(chat_client, SupportsChatGetResponse) def test_anthropic_client_init_auto_create_client(anthropic_unit_test_env: dict[str, str]) -> None: @@ -148,7 +148,7 @@ def test_anthropic_client_service_url(mock_anthropic_client: MagicMock) -> None: def test_prepare_message_for_anthropic_text(mock_anthropic_client: MagicMock) -> None: """Test converting text message to Anthropic format.""" chat_client = create_test_anthropic_client(mock_anthropic_client) - message = ChatMessage(role="user", text="Hello, world!") + message = Message(role="user", text="Hello, world!") result = chat_client._prepare_message_for_anthropic(message) @@ -161,7 +161,7 @@ def test_prepare_message_for_anthropic_text(mock_anthropic_client: MagicMock) -> def test_prepare_message_for_anthropic_function_call(mock_anthropic_client: MagicMock) -> None: """Test converting function call message to Anthropic format.""" chat_client = create_test_anthropic_client(mock_anthropic_client) - message = ChatMessage( + message = Message( role="assistant", contents=[ Content.from_function_call( @@ -185,7 +185,7 @@ def test_prepare_message_for_anthropic_function_call(mock_anthropic_client: Magi def test_prepare_message_for_anthropic_function_result(mock_anthropic_client: MagicMock) -> None: """Test converting function result message to Anthropic format.""" chat_client = create_test_anthropic_client(mock_anthropic_client) - message = ChatMessage( + message = Message( role="tool", contents=[ Content.from_function_result( @@ -210,7 +210,7 @@ def test_prepare_message_for_anthropic_function_result(mock_anthropic_client: Ma def test_prepare_message_for_anthropic_text_reasoning(mock_anthropic_client: MagicMock) -> None: """Test converting text reasoning message to Anthropic format.""" chat_client = create_test_anthropic_client(mock_anthropic_client) - message = ChatMessage( + message = Message( role="assistant", contents=[Content.from_text_reasoning(text="Let me think about this...")], ) @@ -227,8 +227,8 @@ def test_prepare_messages_for_anthropic_with_system(mock_anthropic_client: Magic """Test converting messages list with system message.""" chat_client = create_test_anthropic_client(mock_anthropic_client) messages = [ - ChatMessage(role="system", text="You are a helpful assistant."), - ChatMessage(role="user", text="Hello!"), + Message(role="system", text="You are a helpful assistant."), + Message(role="user", text="Hello!"), ] result = chat_client._prepare_messages_for_anthropic(messages) @@ -243,8 +243,8 @@ def test_prepare_messages_for_anthropic_without_system(mock_anthropic_client: Ma """Test converting messages list without system message.""" chat_client = create_test_anthropic_client(mock_anthropic_client) messages = [ - ChatMessage(role="user", text="Hello!"), - ChatMessage(role="assistant", text="Hi there!"), + Message(role="user", text="Hello!"), + Message(role="assistant", text="Hi there!"), ] result = chat_client._prepare_messages_for_anthropic(messages) @@ -372,7 +372,7 @@ async def test_prepare_options_basic(mock_anthropic_client: MagicMock) -> None: """Test _prepare_options with basic ChatOptions.""" chat_client = create_test_anthropic_client(mock_anthropic_client) - messages = [ChatMessage(role="user", text="Hello")] + messages = [Message(role="user", text="Hello")] chat_options = ChatOptions(max_tokens=100, temperature=0.7) run_options = chat_client._prepare_options(messages, chat_options) @@ -388,8 +388,8 @@ async def test_prepare_options_with_system_message(mock_anthropic_client: MagicM chat_client = create_test_anthropic_client(mock_anthropic_client) messages = [ - ChatMessage(role="system", text="You are helpful."), - ChatMessage(role="user", text="Hello"), + Message(role="system", text="You are helpful."), + Message(role="user", text="Hello"), ] chat_options = ChatOptions() @@ -403,7 +403,7 @@ async def test_prepare_options_with_tool_choice_auto(mock_anthropic_client: Magi """Test _prepare_options with auto tool choice.""" chat_client = create_test_anthropic_client(mock_anthropic_client) - messages = [ChatMessage(role="user", text="Hello")] + messages = [Message(role="user", text="Hello")] chat_options = ChatOptions(tool_choice="auto") run_options = chat_client._prepare_options(messages, chat_options) @@ -415,7 +415,7 @@ async def test_prepare_options_with_tool_choice_required(mock_anthropic_client: """Test _prepare_options with required tool choice.""" chat_client = create_test_anthropic_client(mock_anthropic_client) - messages = [ChatMessage(role="user", text="Hello")] + messages = [Message(role="user", text="Hello")] # For required with specific function, need to pass as dict chat_options = ChatOptions(tool_choice={"mode": "required", "required_function_name": "get_weather"}) @@ -429,7 +429,7 @@ async def test_prepare_options_with_tool_choice_none(mock_anthropic_client: Magi """Test _prepare_options with none tool choice.""" chat_client = create_test_anthropic_client(mock_anthropic_client) - messages = [ChatMessage(role="user", text="Hello")] + messages = [Message(role="user", text="Hello")] chat_options = ChatOptions(tool_choice="none") run_options = chat_client._prepare_options(messages, chat_options) @@ -446,7 +446,7 @@ def get_weather(location: str) -> str: """Get weather for a location.""" return f"Weather for {location}" - messages = [ChatMessage(role="user", text="Hello")] + messages = [Message(role="user", text="Hello")] chat_options = ChatOptions(tools=[get_weather]) run_options = chat_client._prepare_options(messages, chat_options) @@ -459,7 +459,7 @@ async def test_prepare_options_with_stop_sequences(mock_anthropic_client: MagicM """Test _prepare_options with stop sequences.""" chat_client = create_test_anthropic_client(mock_anthropic_client) - messages = [ChatMessage(role="user", text="Hello")] + messages = [Message(role="user", text="Hello")] chat_options = ChatOptions(stop=["STOP", "END"]) run_options = chat_client._prepare_options(messages, chat_options) @@ -471,7 +471,7 @@ async def test_prepare_options_with_top_p(mock_anthropic_client: MagicMock) -> N """Test _prepare_options with top_p.""" chat_client = create_test_anthropic_client(mock_anthropic_client) - messages = [ChatMessage(role="user", text="Hello")] + messages = [Message(role="user", text="Hello")] chat_options = ChatOptions(top_p=0.9) run_options = chat_client._prepare_options(messages, chat_options) @@ -487,7 +487,7 @@ async def test_prepare_options_filters_internal_kwargs(mock_anthropic_client: Ma """ chat_client = create_test_anthropic_client(mock_anthropic_client) - messages = [ChatMessage(role="user", text="Hello")] + messages = [Message(role="user", text="Hello")] chat_options: ChatOptions = {} # Simulate internal kwargs that get passed through the middleware pipeline @@ -696,7 +696,7 @@ async def test_inner_get_response(mock_anthropic_client: MagicMock) -> None: mock_anthropic_client.beta.messages.create.return_value = mock_message - messages = [ChatMessage(role="user", text="Hi")] + messages = [Message(role="user", text="Hi")] chat_options = ChatOptions(max_tokens=10) response = await chat_client._inner_get_response( # type: ignore[attr-defined] @@ -720,7 +720,7 @@ async def mock_stream(): mock_anthropic_client.beta.messages.create.return_value = mock_stream() - messages = [ChatMessage(role="user", text="Hi")] + messages = [Message(role="user", text="Hi")] chat_options = ChatOptions(max_tokens=10) chunks: list[ChatResponseUpdate] = [] @@ -751,7 +751,7 @@ async def test_anthropic_client_integration_basic_chat() -> None: """Integration test for basic chat completion.""" client = AnthropicClient() - messages = [ChatMessage(role="user", text="Say 'Hello, World!' and nothing else.")] + messages = [Message(role="user", text="Say 'Hello, World!' and nothing else.")] response = await client.get_response(messages=messages, options={"max_tokens": 50}) @@ -768,7 +768,7 @@ async def test_anthropic_client_integration_streaming_chat() -> None: """Integration test for streaming chat completion.""" client = AnthropicClient() - messages = [ChatMessage(role="user", text="Count from 1 to 5.")] + messages = [Message(role="user", text="Count from 1 to 5.")] chunks = [] async for chunk in client.get_response(messages=messages, stream=True, options={"max_tokens": 50}): @@ -784,7 +784,7 @@ async def test_anthropic_client_integration_function_calling() -> None: """Integration test for function calling.""" client = AnthropicClient() - messages = [ChatMessage(role="user", text="What's the weather in San Francisco?")] + messages = [Message(role="user", text="What's the weather in San Francisco?")] tools = [get_weather] response = await client.get_response( @@ -804,7 +804,7 @@ async def test_anthropic_client_integration_hosted_tools() -> None: """Integration test for hosted tools.""" client = AnthropicClient() - messages = [ChatMessage(role="user", text="What tools do you have available?")] + messages = [Message(role="user", text="What tools do you have available?")] tools = [ HostedWebSearchTool(), HostedCodeInterpreterTool(), @@ -831,8 +831,8 @@ async def test_anthropic_client_integration_with_system_message() -> None: client = AnthropicClient() messages = [ - ChatMessage(role="system", text="You are a pirate. Always respond like a pirate."), - ChatMessage(role="user", text="Hello!"), + Message(role="system", text="You are a pirate. Always respond like a pirate."), + Message(role="user", text="Hello!"), ] response = await client.get_response(messages=messages, options={"max_tokens": 50}) @@ -847,7 +847,7 @@ async def test_anthropic_client_integration_temperature_control() -> None: """Integration test with temperature control.""" client = AnthropicClient() - messages = [ChatMessage(role="user", text="Say hello.")] + messages = [Message(role="user", text="Say hello.")] response = await client.get_response( messages=messages, @@ -865,11 +865,11 @@ async def test_anthropic_client_integration_ordering() -> None: client = AnthropicClient() messages = [ - ChatMessage(role="user", text="Say hello."), - ChatMessage(role="user", text="Then say goodbye."), - ChatMessage(role="assistant", text="Thank you for chatting!"), - ChatMessage(role="assistant", text="Let me know if I can help."), - ChatMessage(role="user", text="Just testing things."), + Message(role="user", text="Say hello."), + Message(role="user", text="Then say goodbye."), + Message(role="assistant", text="Thank you for chatting!"), + Message(role="assistant", text="Let me know if I can help."), + Message(role="user", text="Just testing things."), ] response = await client.get_response(messages=messages) @@ -890,7 +890,7 @@ async def test_anthropic_client_integration_images() -> None: image_bytes = img_file.read() messages = [ - ChatMessage( + Message( role="user", contents=[ Content.from_text(text="Describe this image"), diff --git a/python/packages/azure-ai-search/AGENTS.md b/python/packages/azure-ai-search/AGENTS.md index 14e8f65e96..114ee9d9ab 100644 --- a/python/packages/azure-ai-search/AGENTS.md +++ b/python/packages/azure-ai-search/AGENTS.md @@ -16,7 +16,7 @@ provider = AzureAISearchContextProvider( endpoint="https://your-search.search.windows.net", index_name="your-index", ) -agent = ChatAgent(..., context_provider=provider) +agent = Agent(..., context_provider=provider) ``` ## Import Path diff --git a/python/packages/azure-ai-search/agent_framework_azure_ai_search/_search_provider.py b/python/packages/azure-ai-search/agent_framework_azure_ai_search/_search_provider.py index 734d6c08e7..332c477d85 100644 --- a/python/packages/azure-ai-search/agent_framework_azure_ai_search/_search_provider.py +++ b/python/packages/azure-ai-search/agent_framework_azure_ai_search/_search_provider.py @@ -7,7 +7,7 @@ from collections.abc import Awaitable, Callable, MutableSequence from typing import TYPE_CHECKING, Any, ClassVar, Literal -from agent_framework import AGENT_FRAMEWORK_USER_AGENT, ChatMessage, Context, ContextProvider +from agent_framework import AGENT_FRAMEWORK_USER_AGENT, Context, ContextProvider, Message from agent_framework._logging import get_logger from agent_framework._pydantic import AFBaseSettings from agent_framework.exceptions import ServiceInitializationError @@ -511,7 +511,7 @@ async def __aexit__( @override async def invoking( self, - messages: ChatMessage | MutableSequence[ChatMessage], + messages: Message | MutableSequence[Message], **kwargs: Any, ) -> Context: """Retrieve relevant context from Azure AI Search before model invocation. @@ -524,7 +524,7 @@ async def invoking( Context object with retrieved documents as messages. """ # Convert to list and filter to USER/ASSISTANT messages with text only - messages_list = [messages] if isinstance(messages, ChatMessage) else list(messages) + messages_list = [messages] if isinstance(messages, Message) else list(messages) def get_role_value(role: str | Any) -> str: return role.value if hasattr(role, "value") else str(role) @@ -553,8 +553,8 @@ def get_role_value(role: str | Any) -> str: return Context() # Create context messages: first message with prompt, then one message per result part - context_messages = [ChatMessage(role="user", text=self.context_prompt)] - context_messages.extend([ChatMessage(role="user", text=part) for part in search_result_parts]) + context_messages = [Message(role="user", text=self.context_prompt)] + context_messages.extend([Message(role="user", text=part) for part in search_result_parts]) return Context(messages=context_messages) @@ -875,7 +875,7 @@ async def _ensure_knowledge_base(self) -> None: user_agent=AGENT_FRAMEWORK_USER_AGENT, ) - async def _agentic_search(self, messages: list[ChatMessage]) -> list[str]: + async def _agentic_search(self, messages: list[Message]) -> list[str]: """Perform agentic retrieval with multi-hop reasoning using Knowledge Bases. This mode uses query planning and is slightly slower than semantic search, diff --git a/python/packages/azure-ai-search/tests/test_search_provider.py b/python/packages/azure-ai-search/tests/test_search_provider.py index 4e118df02e..def95cd732 100644 --- a/python/packages/azure-ai-search/tests/test_search_provider.py +++ b/python/packages/azure-ai-search/tests/test_search_provider.py @@ -5,7 +5,7 @@ from unittest.mock import AsyncMock, MagicMock, patch import pytest -from agent_framework import ChatMessage, Context +from agent_framework import Context, Message from agent_framework.azure import AzureAISearchContextProvider, AzureAISearchSettings from agent_framework.exceptions import ServiceInitializationError from azure.core.credentials import AzureKeyCredential @@ -36,10 +36,10 @@ def mock_index_client() -> AsyncMock: @pytest.fixture -def sample_messages() -> list[ChatMessage]: +def sample_messages() -> list[Message]: """Create sample chat messages for testing.""" return [ - ChatMessage(role="user", text="What is in the documents?"), + Message(role="user", text="What is in the documents?"), ] @@ -276,9 +276,7 @@ class TestSemanticSearch: @pytest.mark.asyncio @patch("agent_framework_azure_ai_search._search_provider.SearchClient") - async def test_semantic_search_basic( - self, mock_search_class: MagicMock, sample_messages: list[ChatMessage] - ) -> None: + async def test_semantic_search_basic(self, mock_search_class: MagicMock, sample_messages: list[Message]) -> None: """Test basic semantic search without vector search.""" # Setup mock mock_search_client = AsyncMock() @@ -318,7 +316,7 @@ async def test_semantic_search_empty_query(self, mock_search_class: MagicMock) - ) # Empty message - context = await provider.invoking([ChatMessage(role="user", text="")]) + context = await provider.invoking([Message(role="user", text="")]) assert isinstance(context, Context) assert len(context.messages) == 0 @@ -326,7 +324,7 @@ async def test_semantic_search_empty_query(self, mock_search_class: MagicMock) - @pytest.mark.asyncio @patch("agent_framework_azure_ai_search._search_provider.SearchClient") async def test_semantic_search_with_vector_query( - self, mock_search_class: MagicMock, sample_messages: list[ChatMessage] + self, mock_search_class: MagicMock, sample_messages: list[Message] ) -> None: """Test semantic search with vector query.""" # Setup mock @@ -520,10 +518,10 @@ async def test_filters_non_user_assistant_messages(self, mock_search_class: Magi # Mix of message types messages = [ - ChatMessage(role="system", text="System message"), - ChatMessage(role="user", text="User message"), - ChatMessage(role="assistant", text="Assistant message"), - ChatMessage(role="tool", text="Tool message"), + Message(role="system", text="System message"), + Message(role="user", text="User message"), + Message(role="assistant", text="Assistant message"), + Message(role="tool", text="Tool message"), ] context = await provider.invoking(messages) @@ -548,9 +546,9 @@ async def test_filters_empty_messages(self, mock_search_class: MagicMock) -> Non # Messages with empty/whitespace text messages = [ - ChatMessage(role="user", text=""), - ChatMessage(role="user", text=" "), - ChatMessage(role="user", text=""), # ChatMessage with None text becomes empty string + Message(role="user", text=""), + Message(role="user", text=" "), + Message(role="user", text=""), # Message with None text becomes empty string ] context = await provider.invoking(messages) @@ -581,7 +579,7 @@ async def test_citations_included_in_semantic_search(self, mock_search_class: Ma mode="semantic", ) - context = await provider.invoking([ChatMessage(role="user", text="test query")]) + context = await provider.invoking([Message(role="user", text="test query")]) # Check that citation is included assert isinstance(context, Context) @@ -603,7 +601,7 @@ async def test_agentic_search_basic( mock_search_class: MagicMock, mock_index_class: MagicMock, mock_retrieval_class: MagicMock, - sample_messages: list[ChatMessage], + sample_messages: list[Message], ) -> None: """Test basic agentic search with Knowledge Base retrieval.""" # Setup search client mock @@ -660,7 +658,7 @@ async def test_agentic_search_no_results( mock_search_class: MagicMock, mock_index_class: MagicMock, mock_retrieval_class: MagicMock, - sample_messages: list[ChatMessage], + sample_messages: list[Message], ) -> None: """Test agentic search when no results are returned.""" # Setup mocks @@ -705,7 +703,7 @@ async def test_agentic_search_with_medium_reasoning( mock_search_class: MagicMock, mock_index_class: MagicMock, mock_retrieval_class: MagicMock, - sample_messages: list[ChatMessage], + sample_messages: list[Message], ) -> None: """Test agentic search with medium reasoning effort.""" # Setup mocks diff --git a/python/packages/azure-ai/agent_framework_azure_ai/_agent_provider.py b/python/packages/azure-ai/agent_framework_azure_ai/_agent_provider.py index dcc0e9db29..c78f359331 100644 --- a/python/packages/azure-ai/agent_framework_azure_ai/_agent_provider.py +++ b/python/packages/azure-ai/agent_framework_azure_ai/_agent_provider.py @@ -4,11 +4,11 @@ import sys from collections.abc import Callable, MutableMapping, Sequence -from typing import TYPE_CHECKING, Any, Generic, cast +from typing import Any, Generic, cast from agent_framework import ( AGENT_FRAMEWORK_USER_AGENT, - ChatAgent, + Agent, ContextProvider, FunctionTool, MiddlewareTypes, @@ -18,16 +18,14 @@ from agent_framework._mcp import MCPTool from agent_framework.exceptions import ServiceInitializationError from azure.ai.agents.aio import AgentsClient -from azure.ai.agents.models import Agent, ResponseFormatJsonSchema, ResponseFormatJsonSchemaType +from azure.ai.agents.models import Agent as AzureAgent +from azure.ai.agents.models import ResponseFormatJsonSchema, ResponseFormatJsonSchemaType from azure.core.credentials_async import AsyncTokenCredential from pydantic import BaseModel, ValidationError -from ._chat_client import AzureAIAgentClient +from ._chat_client import AzureAIAgentClient, AzureAIAgentOptions from ._shared import AzureAISettings, from_azure_ai_agent_tools, to_azure_ai_agent_tools -if TYPE_CHECKING: - from ._chat_client import AzureAIAgentOptions - if sys.version_info >= (3, 13): from typing import Self, TypeVar # type: ignore # pragma: no cover else: @@ -38,7 +36,7 @@ from typing_extensions import TypedDict # type: ignore # pragma: no cover -# Type variable for options - allows typed ChatAgent[OptionsCoT] returns +# Type variable for options - allows typed Agent[TOptions] returns # Default matches AzureAIAgentClient's default options type OptionsCoT = TypeVar( "OptionsCoT", @@ -51,7 +49,7 @@ class AzureAIAgentsProvider(Generic[OptionsCoT]): """Provider for Azure AI Agent Service V1 (Persistent Agents API). - This provider enables creating, retrieving, and wrapping Azure AI agents as ChatAgent + This provider enables creating, retrieving, and wrapping Azure AI agents as Agent instances. It manages the underlying AgentsClient lifecycle and provides a high-level interface for agent operations. @@ -179,11 +177,11 @@ async def create_agent( default_options: OptionsCoT | None = None, middleware: Sequence[MiddlewareTypes] | None = None, context_provider: ContextProvider | None = None, - ) -> ChatAgent[OptionsCoT]: - """Create a new agent on the Azure AI service and return a ChatAgent. + ) -> Agent[OptionsCoT]: + """Create a new agent on the Azure AI service and return a Agent. This method creates a persistent agent on the Azure AI service with the specified - configuration and returns a local ChatAgent instance for interaction. + configuration and returns a local Agent instance for interaction. Args: name: The name for the agent. @@ -200,7 +198,7 @@ async def create_agent( context_provider: Context provider to include during agent invocation. Returns: - ChatAgent: A ChatAgent instance configured with the created agent. + Agent: A Agent instance configured with the created agent. Raises: ServiceInitializationError: If model deployment name is not available. @@ -240,7 +238,7 @@ async def create_agent( args["response_format"] = self._create_response_format_config(response_format) # Normalize and convert tools - # Local MCP tools (MCPTool) are handled by ChatAgent at runtime, not stored on the Azure agent + # Local MCP tools (MCPTool) are handled by Agent at runtime, not stored on the Azure agent normalized_tools = normalize_tools(tools) if normalized_tools: # Only convert non-MCP tools to Azure AI format @@ -255,7 +253,7 @@ async def create_agent( # Create the agent on the service created_agent = await self._agents_client.create_agent(**args) - # Create ChatAgent wrapper + # Create Agent wrapper return self._to_chat_agent_from_agent( created_agent, normalized_tools, @@ -276,11 +274,11 @@ async def get_agent( default_options: OptionsCoT | None = None, middleware: Sequence[MiddlewareTypes] | None = None, context_provider: ContextProvider | None = None, - ) -> ChatAgent[OptionsCoT]: - """Retrieve an existing agent from the service and return a ChatAgent. + ) -> Agent[OptionsCoT]: + """Retrieve an existing agent from the service and return a Agent. This method fetches an agent by ID from the Azure AI service - and returns a local ChatAgent instance for interaction. + and returns a local Agent instance for interaction. Args: id: The ID of the agent to retrieve from the service. @@ -294,7 +292,7 @@ async def get_agent( context_provider: Context provider to include during agent invocation. Returns: - ChatAgent: A ChatAgent instance configured with the retrieved agent. + Agent: A Agent instance configured with the retrieved agent. Raises: ServiceInitializationError: If required function tools are not provided. @@ -323,7 +321,7 @@ async def get_agent( def as_agent( self, - agent: Agent, + agent: AzureAgent, tools: ToolProtocol | Callable[..., Any] | MutableMapping[str, Any] @@ -332,8 +330,8 @@ def as_agent( default_options: OptionsCoT | None = None, middleware: Sequence[MiddlewareTypes] | None = None, context_provider: ContextProvider | None = None, - ) -> ChatAgent[OptionsCoT]: - """Wrap an existing Agent SDK object as a ChatAgent without making HTTP calls. + ) -> Agent[OptionsCoT]: + """Wrap an existing Agent SDK object as a Agent without making HTTP calls. Use this method when you already have an Agent object from a previous SDK operation and want to use it with the Agent Framework. @@ -348,7 +346,7 @@ def as_agent( context_provider: Context provider to include during agent invocation. Returns: - ChatAgent: A ChatAgent instance configured with the agent. + Agent: A Agent instance configured with the agent. Raises: ServiceInitializationError: If required function tools are not provided. @@ -363,7 +361,7 @@ def as_agent( instructions="...", ) - # Wrap as ChatAgent + # Wrap as Agent chat_agent = provider.as_agent(sdk_agent) """ # Validate function tools @@ -380,13 +378,13 @@ def as_agent( def _to_chat_agent_from_agent( self, - agent: Agent, + agent: AzureAgent, provided_tools: Sequence[ToolProtocol | MutableMapping[str, Any]] | None = None, default_options: OptionsCoT | None = None, middleware: Sequence[MiddlewareTypes] | None = None, context_provider: ContextProvider | None = None, - ) -> ChatAgent[OptionsCoT]: - """Create a ChatAgent from an Agent SDK object. + ) -> Agent[OptionsCoT]: + """Create a Agent from an Agent SDK object. Args: agent: The Agent SDK object. @@ -408,7 +406,7 @@ def _to_chat_agent_from_agent( # Merge tools: convert agent's hosted tools + user-provided function tools merged_tools = self._merge_tools(agent.tools, provided_tools) - return ChatAgent( # type: ignore[return-value] + return Agent( # type: ignore[return-value] chat_client=client, id=agent.id, name=agent.name, @@ -433,7 +431,7 @@ def _merge_tools( provided_tools: User-provided tools (Agent Framework format). Returns: - Combined list of tools for the ChatAgent. + Combined list of tools for the Agent. """ merged: list[ToolProtocol | dict[str, Any]] = [] @@ -452,7 +450,7 @@ def _merge_tools( if provided_tools: for provided_tool in provided_tools: # FunctionTool - has implementation for function calling - # MCPTool - ChatAgent handles MCP connection and tool discovery at runtime + # MCPTool - Agent handles MCP connection and tool discovery at runtime if isinstance(provided_tool, (FunctionTool, MCPTool)): merged.append(provided_tool) # type: ignore[reportUnknownArgumentType] diff --git a/python/packages/azure-ai/agent_framework_azure_ai/_chat_client.py b/python/packages/azure-ai/agent_framework_azure_ai/_chat_client.py index 504f615a0a..563849d529 100644 --- a/python/packages/azure-ai/agent_framework_azure_ai/_chat_client.py +++ b/python/packages/azure-ai/agent_framework_azure_ai/_chat_client.py @@ -12,11 +12,10 @@ from agent_framework import ( AGENT_FRAMEWORK_USER_AGENT, + Agent, Annotation, BaseChatClient, - ChatAgent, ChatAndFunctionMiddlewareTypes, - ChatMessage, ChatMessageStoreProtocol, ChatMiddlewareLayer, ChatOptions, @@ -31,6 +30,7 @@ HostedFileSearchTool, HostedMCPTool, HostedWebSearchTool, + Message, MiddlewareTypes, ResponseStream, Role, @@ -44,7 +44,9 @@ from agent_framework.observability import ChatTelemetryLayer from azure.ai.agents.aio import AgentsClient from azure.ai.agents.models import ( - Agent, + Agent as AzureAgent, +) +from azure.ai.agents.models import ( AgentsNamedToolChoice, AgentsNamedToolChoiceType, AgentsToolChoiceOptionMode, @@ -346,7 +348,7 @@ class MyOptions(AzureAIAgentOptions, total=False): self.should_cleanup_agent = should_cleanup_agent # Track whether we should delete the agent self._agent_created = False # Track whether agent was created inside this class self._should_close_client = should_close_client # Track whether we should close client connection - self._agent_definition: Agent | None = None # Cached definition for existing agent + self._agent_definition: AzureAgent | None = None # Cached definition for existing agent async def __aenter__(self) -> Self: """Async context manager entry.""" @@ -365,7 +367,7 @@ async def close(self) -> None: def _inner_get_response( self, *, - messages: Sequence[ChatMessage], + messages: Sequence[Message], options: Mapping[str, Any], stream: bool = False, **kwargs: Any, @@ -898,7 +900,7 @@ async def _cleanup_agent_if_needed(self) -> None: self.agent_id = None self._agent_created = False - async def _load_agent_definition_if_needed(self) -> Agent | None: + async def _load_agent_definition_if_needed(self) -> AzureAgent | None: """Load and cache agent details if not already loaded.""" if self._agent_definition is None and self.agent_id is not None: self._agent_definition = await self.agents_client.get_agent(self.agent_id) @@ -906,7 +908,7 @@ async def _load_agent_definition_if_needed(self) -> Agent | None: async def _prepare_options( self, - messages: Sequence[ChatMessage], + messages: Sequence[Message], options: Mapping[str, Any], **kwargs: Any, ) -> tuple[dict[str, Any], list[Content] | None]: @@ -1020,7 +1022,7 @@ def _prepare_tool_choice_mode( async def _prepare_tool_definitions_and_resources( self, options: Mapping[str, Any], - agent_definition: Agent | None, + agent_definition: AzureAgent | None, run_options: dict[str, Any], ) -> list[ToolDefinition | dict[str, Any]]: """Prepare tool definitions and resources for the run options.""" @@ -1084,7 +1086,7 @@ def _prepare_mcp_resources(self, tools: Sequence[ToolProtocol | MutableMapping[s return mcp_resources def _prepare_messages( - self, messages: Sequence[ChatMessage] + self, messages: Sequence[Message] ) -> tuple[ list[ThreadMessageOptions] | None, list[str], @@ -1301,10 +1303,10 @@ def as_agent( context_provider: ContextProvider | None = None, middleware: Sequence[MiddlewareTypes] | None = None, **kwargs: Any, - ) -> ChatAgent[AzureAIAgentOptionsT]: - """Convert this chat client to a ChatAgent. + ) -> Agent[AzureAIAgentOptionsT]: + """Convert this chat client to a Agent. - This method creates a ChatAgent instance with this client pre-configured. + This method creates a Agent instance with this client pre-configured. It does NOT create an agent on the Azure AI service - the actual agent will be created on the server during the first invocation (run). @@ -1324,7 +1326,7 @@ def as_agent( kwargs: Any additional keyword arguments. Returns: - A ChatAgent instance configured with this chat client. + A Agent instance configured with this chat client. """ return super().as_agent( id=id, diff --git a/python/packages/azure-ai/agent_framework_azure_ai/_client.py b/python/packages/azure-ai/agent_framework_azure_ai/_client.py index 8d262b9b13..0ab52261d8 100644 --- a/python/packages/azure-ai/agent_framework_azure_ai/_client.py +++ b/python/packages/azure-ai/agent_framework_azure_ai/_client.py @@ -8,15 +8,15 @@ from agent_framework import ( AGENT_FRAMEWORK_USER_AGENT, - ChatAgent, + Agent, ChatAndFunctionMiddlewareTypes, - ChatMessage, ChatMessageStoreProtocol, ChatMiddlewareLayer, ContextProvider, FunctionInvocationConfiguration, FunctionInvocationLayer, HostedMCPTool, + Message, MiddlewareTypes, ToolProtocol, get_logger, @@ -329,7 +329,7 @@ async def _get_agent_reference_or_create( if self.agent_name is None: raise ServiceInitializationError( "Agent name is required. Provide 'agent_name' when initializing AzureAIClient " - "or 'name' when initializing ChatAgent." + "or 'name' when initializing Agent." ) # If no agent_version is provided, either use latest version or create a new agent: @@ -396,7 +396,7 @@ async def _close_client_if_needed(self) -> None: @override async def _prepare_options( self, - messages: Sequence[ChatMessage], + messages: Sequence[Message], options: Mapping[str, Any], **kwargs: Any, ) -> dict[str, Any]: @@ -489,9 +489,9 @@ def _get_current_conversation_id(self, options: Mapping[str, Any], **kwargs: Any """Get the current conversation ID from chat options or kwargs.""" return options.get("conversation_id") or kwargs.get("conversation_id") or self.conversation_id - def _prepare_messages_for_azure_ai(self, messages: Sequence[ChatMessage]) -> tuple[list[ChatMessage], str | None]: + def _prepare_messages_for_azure_ai(self, messages: Sequence[Message]) -> tuple[list[Message], str | None]: """Prepare input from messages and convert system/developer messages to instructions.""" - result: list[ChatMessage] = [] + result: list[Message] = [] instructions_list: list[str] = [] instructions: str | None = None @@ -575,10 +575,10 @@ def as_agent( context_provider: ContextProvider | None = None, middleware: Sequence[MiddlewareTypes] | None = None, **kwargs: Any, - ) -> ChatAgent[AzureAIClientOptionsT]: - """Convert this chat client to a ChatAgent. + ) -> Agent[AzureAIClientOptionsT]: + """Convert this chat client to a Agent. - This method creates a ChatAgent instance with this client pre-configured. + This method creates a Agent instance with this client pre-configured. It does NOT create an agent on the Azure AI service - the actual agent will be created on the server during the first invocation (run). @@ -598,7 +598,7 @@ def as_agent( kwargs: Any additional keyword arguments. Returns: - A ChatAgent instance configured with this chat client. + A Agent instance configured with this chat client. """ return super().as_agent( id=id, diff --git a/python/packages/azure-ai/agent_framework_azure_ai/_project_provider.py b/python/packages/azure-ai/agent_framework_azure_ai/_project_provider.py index e486a14560..e9beac3c7b 100644 --- a/python/packages/azure-ai/agent_framework_azure_ai/_project_provider.py +++ b/python/packages/azure-ai/agent_framework_azure_ai/_project_provider.py @@ -8,7 +8,7 @@ from agent_framework import ( AGENT_FRAMEWORK_USER_AGENT, - ChatAgent, + Agent, ContextProvider, FunctionTool, MiddlewareTypes, @@ -47,7 +47,7 @@ logger = get_logger("agent_framework.azure") -# Type variable for options - allows typed ChatAgent[OptionsT] returns +# Type variable for options - allows typed Agent[OptionsT] returns # Default matches AzureAIClient's default options type OptionsCoT = TypeVar( "OptionsCoT", @@ -170,8 +170,8 @@ async def create_agent( default_options: OptionsCoT | None = None, middleware: Sequence[MiddlewareTypes] | None = None, context_provider: ContextProvider | None = None, - ) -> ChatAgent[OptionsCoT]: - """Create a new agent on the Azure AI service and return a local ChatAgent wrapper. + ) -> Agent[OptionsCoT]: + """Create a new agent on the Azure AI service and return a local Agent wrapper. Args: name: The name of the agent to create. @@ -186,7 +186,7 @@ async def create_agent( context_provider: Context provider to include during agent invocation. Returns: - ChatAgent: A ChatAgent instance configured with the created agent. + Agent: A Agent instance configured with the created agent. Raises: ServiceInitializationError: If required parameters are missing. @@ -272,8 +272,8 @@ async def get_agent( default_options: OptionsCoT | None = None, middleware: Sequence[MiddlewareTypes] | None = None, context_provider: ContextProvider | None = None, - ) -> ChatAgent[OptionsCoT]: - """Retrieve an existing agent from the Azure AI service and return a local ChatAgent wrapper. + ) -> Agent[OptionsCoT]: + """Retrieve an existing agent from the Azure AI service and return a local Agent wrapper. You must provide either name or reference. Use `as_agent()` if you already have AgentVersionDetails and want to avoid an async call. @@ -288,7 +288,7 @@ async def get_agent( context_provider: Context provider to include during agent invocation. Returns: - ChatAgent: A ChatAgent instance configured with the retrieved agent. + Agent: A Agent instance configured with the retrieved agent. Raises: ValueError: If no identifier is provided or required tools are missing. @@ -308,7 +308,7 @@ async def get_agent( raise ValueError("Either name or reference must be provided to get an agent.") if not isinstance(existing_agent.definition, PromptAgentDefinition): - raise ValueError("Agent definition must be PromptAgentDefinition to get a ChatAgent.") + raise ValueError("Agent definition must be PromptAgentDefinition to get a Agent.") # Validate that required function tools are provided self._validate_function_tools(existing_agent.definition.tools, tools) @@ -332,8 +332,8 @@ def as_agent( default_options: OptionsCoT | None = None, middleware: Sequence[MiddlewareTypes] | None = None, context_provider: ContextProvider | None = None, - ) -> ChatAgent[OptionsCoT]: - """Wrap an SDK agent version object into a ChatAgent without making HTTP calls. + ) -> Agent[OptionsCoT]: + """Wrap an SDK agent version object into a Agent without making HTTP calls. Use this when you already have an AgentVersionDetails from a previous API call. @@ -346,13 +346,13 @@ def as_agent( context_provider: Context provider to include during agent invocation. Returns: - ChatAgent: A ChatAgent instance configured with the agent version. + Agent: A Agent instance configured with the agent version. Raises: ValueError: If the agent definition is not a PromptAgentDefinition or required tools are missing. """ if not isinstance(details.definition, PromptAgentDefinition): - raise ValueError("Agent definition must be PromptAgentDefinition to create a ChatAgent.") + raise ValueError("Agent definition must be PromptAgentDefinition to create a Agent.") # Validate that required function tools are provided self._validate_function_tools(details.definition.tools, tools) @@ -372,8 +372,8 @@ def _to_chat_agent_from_details( default_options: OptionsCoT | None = None, middleware: Sequence[MiddlewareTypes] | None = None, context_provider: ContextProvider | None = None, - ) -> ChatAgent[OptionsCoT]: - """Create a ChatAgent from an AgentVersionDetails. + ) -> Agent[OptionsCoT]: + """Create a Agent from an AgentVersionDetails. Args: details: The AgentVersionDetails containing the agent definition. @@ -385,7 +385,7 @@ def _to_chat_agent_from_details( context_provider: Context provider to include during agent invocation. """ if not isinstance(details.definition, PromptAgentDefinition): - raise ValueError("Agent definition must be PromptAgentDefinition to get a ChatAgent.") + raise ValueError("Agent definition must be PromptAgentDefinition to get a Agent.") client = AzureAIClient( project_client=self._project_client, @@ -400,7 +400,7 @@ def _to_chat_agent_from_details( # but function tools need the actual implementations from provided_tools merged_tools = self._merge_tools(details.definition.tools, provided_tools) - return ChatAgent( # type: ignore[return-value] + return Agent( # type: ignore[return-value] chat_client=client, id=details.id, name=details.name, @@ -425,7 +425,7 @@ def _merge_tools( provided_tools: User-provided tools (Agent Framework format), including function implementations. Returns: - Combined list of tools for the ChatAgent. + Combined list of tools for the Agent. """ merged: list[ToolProtocol | dict[str, Any]] = [] @@ -442,7 +442,7 @@ def _merge_tools( if provided_tools: for provided_tool in provided_tools: # FunctionTool - has implementation for function calling - # MCPTool - ChatAgent handles MCP connection and tool discovery at runtime + # MCPTool - Agent handles MCP connection and tool discovery at runtime if isinstance(provided_tool, (FunctionTool, MCPTool)): merged.append(provided_tool) # type: ignore[reportUnknownArgumentType] diff --git a/python/packages/azure-ai/tests/test_agent_provider.py b/python/packages/azure-ai/tests/test_agent_provider.py index c4bcf0e953..07f33736e2 100644 --- a/python/packages/azure-ai/tests/test_agent_provider.py +++ b/python/packages/azure-ai/tests/test_agent_provider.py @@ -6,7 +6,7 @@ import pytest from agent_framework import ( - ChatAgent, + Agent, Content, HostedCodeInterpreterTool, HostedFileSearchTool, @@ -16,7 +16,9 @@ ) from agent_framework.exceptions import ServiceInitializationError from azure.ai.agents.models import ( - Agent, + Agent as AzureAgent, +) +from azure.ai.agents.models import ( CodeInterpreterToolDefinition, ) from azure.identity.aio import AzureCliCredential @@ -156,7 +158,7 @@ async def test_create_agent_basic( mock_agents_client: MagicMock, ) -> None: """Test creating a basic agent.""" - mock_agent = MagicMock(spec=Agent) + mock_agent = MagicMock(spec=AzureAgent) mock_agent.id = "test-agent-id" mock_agent.name = "TestAgent" mock_agent.description = "A test agent" @@ -175,7 +177,7 @@ async def test_create_agent_basic( description="A test agent", ) - assert isinstance(agent, ChatAgent) + assert isinstance(agent, Agent) assert agent.name == "TestAgent" assert agent.id == "test-agent-id" mock_agents_client.create_agent.assert_called_once() @@ -186,7 +188,7 @@ async def test_create_agent_with_model( mock_agents_client: MagicMock, ) -> None: """Test creating an agent with explicit model.""" - mock_agent = MagicMock(spec=Agent) + mock_agent = MagicMock(spec=AzureAgent) mock_agent.id = "test-agent-id" mock_agent.name = "TestAgent" mock_agent.description = None @@ -210,7 +212,7 @@ async def test_create_agent_with_tools( mock_agents_client: MagicMock, ) -> None: """Test creating an agent with tools.""" - mock_agent = MagicMock(spec=Agent) + mock_agent = MagicMock(spec=AzureAgent) mock_agent.id = "test-agent-id" mock_agent.name = "TestAgent" mock_agent.description = None @@ -245,7 +247,7 @@ class WeatherResponse(BaseModel): temperature: float description: str - mock_agent = MagicMock(spec=Agent) + mock_agent = MagicMock(spec=AzureAgent) mock_agent.id = "test-agent-id" mock_agent.name = "TestAgent" mock_agent.description = None @@ -297,7 +299,7 @@ async def test_get_agent_by_id( mock_agents_client: MagicMock, ) -> None: """Test getting an agent by ID.""" - mock_agent = MagicMock(spec=Agent) + mock_agent = MagicMock(spec=AzureAgent) mock_agent.id = "existing-agent-id" mock_agent.name = "ExistingAgent" mock_agent.description = "An existing agent" @@ -312,7 +314,7 @@ async def test_get_agent_by_id( agent = await provider.get_agent("existing-agent-id") - assert isinstance(agent, ChatAgent) + assert isinstance(agent, Agent) assert agent.id == "existing-agent-id" mock_agents_client.get_agent.assert_called_once_with("existing-agent-id") @@ -327,7 +329,7 @@ async def test_get_agent_with_function_tools( mock_function_tool.function = MagicMock() mock_function_tool.function.name = "get_weather" - mock_agent = MagicMock(spec=Agent) + mock_agent = MagicMock(spec=AzureAgent) mock_agent.id = "agent-with-tools" mock_agent.name = "AgentWithTools" mock_agent.description = None @@ -356,7 +358,7 @@ async def test_get_agent_with_provided_function_tools( mock_function_tool.function = MagicMock() mock_function_tool.function.name = "get_weather" - mock_agent = MagicMock(spec=Agent) + mock_agent = MagicMock(spec=AzureAgent) mock_agent.id = "agent-with-tools" mock_agent.name = "AgentWithTools" mock_agent.description = None @@ -376,7 +378,7 @@ def get_weather(city: str) -> str: agent = await provider.get_agent("agent-with-tools", tools=get_weather) - assert isinstance(agent, ChatAgent) + assert isinstance(agent, Agent) assert agent.id == "agent-with-tools" @@ -391,7 +393,7 @@ def test_as_agent_wraps_without_http( mock_agents_client: MagicMock, ) -> None: """Test as_agent wraps Agent object without making HTTP calls.""" - mock_agent = MagicMock(spec=Agent) + mock_agent = MagicMock(spec=AzureAgent) mock_agent.id = "wrap-agent-id" mock_agent.name = "WrapAgent" mock_agent.description = "Wrapped agent" @@ -405,7 +407,7 @@ def test_as_agent_wraps_without_http( agent = provider.as_agent(mock_agent) - assert isinstance(agent, ChatAgent) + assert isinstance(agent, Agent) assert agent.id == "wrap-agent-id" assert agent.name == "WrapAgent" # Ensure no HTTP calls were made @@ -423,7 +425,7 @@ def test_as_agent_with_function_tools_validates( mock_function_tool.function = MagicMock() mock_function_tool.function.name = "my_function" - mock_agent = MagicMock(spec=Agent) + mock_agent = MagicMock(spec=AzureAgent) mock_agent.id = "agent-id" mock_agent.name = "Agent" mock_agent.description = None @@ -449,7 +451,7 @@ def test_as_agent_with_hosted_tools( mock_code_interpreter = MagicMock() mock_code_interpreter.type = "code_interpreter" - mock_agent = MagicMock(spec=Agent) + mock_agent = MagicMock(spec=AzureAgent) mock_agent.id = "agent-id" mock_agent.name = "Agent" mock_agent.description = None @@ -463,7 +465,7 @@ def test_as_agent_with_hosted_tools( agent = provider.as_agent(mock_agent) - assert isinstance(agent, ChatAgent) + assert isinstance(agent, Agent) # Should have HostedCodeInterpreterTool in the default_options tools assert any(isinstance(t, HostedCodeInterpreterTool) for t in (agent.default_options.get("tools") or [])) # type: ignore @@ -483,7 +485,7 @@ def test_as_agent_with_dict_function_tools_validates( }, } - mock_agent = MagicMock(spec=Agent) + mock_agent = MagicMock(spec=AzureAgent) mock_agent.id = "agent-id" mock_agent.name = "Agent" mock_agent.description = None @@ -515,7 +517,7 @@ def test_as_agent_with_dict_function_tools_provided( }, } - mock_agent = MagicMock(spec=Agent) + mock_agent = MagicMock(spec=AzureAgent) mock_agent.id = "agent-id" mock_agent.name = "Agent" mock_agent.description = None @@ -534,7 +536,7 @@ def dict_based_function() -> str: agent = provider.as_agent(mock_agent, tools=dict_based_function) - assert isinstance(agent, ChatAgent) + assert isinstance(agent, Agent) assert agent.id == "agent-id" @@ -810,7 +812,7 @@ async def test_integration_create_agent() -> None: ) try: - assert isinstance(agent, ChatAgent) + assert isinstance(agent, Agent) assert agent.name == "IntegrationTestAgent" assert agent.id is not None finally: @@ -837,7 +839,7 @@ async def test_integration_get_agent() -> None: # Then get it using the provider agent = await provider.get_agent(created.id) - assert isinstance(agent, ChatAgent) + assert isinstance(agent, Agent) assert agent.id == created.id finally: await provider._agents_client.delete_agent(created.id) # type: ignore diff --git a/python/packages/azure-ai/tests/test_azure_ai_agent_client.py b/python/packages/azure-ai/tests/test_azure_ai_agent_client.py index ef1000b12d..2eadc6cafc 100644 --- a/python/packages/azure-ai/tests/test_azure_ai_agent_client.py +++ b/python/packages/azure-ai/tests/test_azure_ai_agent_client.py @@ -8,12 +8,10 @@ import pytest from agent_framework import ( + Agent, AgentResponse, AgentResponseUpdate, AgentThread, - ChatAgent, - ChatClientProtocol, - ChatMessage, ChatOptions, ChatResponse, ChatResponseUpdate, @@ -22,6 +20,8 @@ HostedFileSearchTool, HostedMCPTool, HostedWebSearchTool, + Message, + SupportsChatGetResponse, tool, ) from agent_framework._serialization import SerializationMixin @@ -134,7 +134,7 @@ def test_azure_ai_chat_client_init_with_client(mock_agents_client: MagicMock) -> assert chat_client.agents_client is mock_agents_client assert chat_client.agent_id == "existing-agent-id" assert chat_client.thread_id == "test-thread-id" - assert isinstance(chat_client, ChatClientProtocol) + assert isinstance(chat_client, SupportsChatGetResponse) def test_azure_ai_chat_client_init_auto_create_client( @@ -319,7 +319,7 @@ async def empty_async_iter(): mock_stream.__aenter__ = AsyncMock(return_value=empty_async_iter()) mock_stream.__aexit__ = AsyncMock(return_value=None) - messages = [ChatMessage(role="user", text="Hello")] + messages = [Message(role="user", text="Hello")] # Call without existing thread - should create new one response = chat_client.get_response(messages, stream=True) @@ -346,7 +346,7 @@ async def test_azure_ai_chat_client_prepare_options_basic(mock_agents_client: Ma """Test _prepare_options with basic ChatOptions.""" chat_client = create_test_azure_ai_chat_client(mock_agents_client) - messages = [ChatMessage(role="user", text="Hello")] + messages = [Message(role="user", text="Hello")] chat_options: ChatOptions = {"max_tokens": 100, "temperature": 0.7} run_options, tool_results = await chat_client._prepare_options(messages, chat_options) # type: ignore @@ -359,7 +359,7 @@ async def test_azure_ai_chat_client_prepare_options_no_chat_options(mock_agents_ """Test _prepare_options with default ChatOptions.""" chat_client = create_test_azure_ai_chat_client(mock_agents_client) - messages = [ChatMessage(role="user", text="Hello")] + messages = [Message(role="user", text="Hello")] run_options, tool_results = await chat_client._prepare_options(messages, {}) # type: ignore @@ -376,7 +376,7 @@ async def test_azure_ai_chat_client_prepare_options_with_image_content(mock_agen mock_agents_client.get_agent = AsyncMock(return_value=None) image_content = Content.from_uri(uri="https://example.com/image.jpg", media_type="image/jpeg") - messages = [ChatMessage(role="user", contents=[image_content])] + messages = [Message(role="user", contents=[image_content])] run_options, _ = await chat_client._prepare_options(messages, {}) # type: ignore @@ -465,8 +465,8 @@ async def test_azure_ai_chat_client_prepare_options_with_messages(mock_agents_cl # Test with system message (becomes instruction) messages = [ - ChatMessage(role="system", text="You are a helpful assistant"), - ChatMessage(role="user", text="Hello"), + Message(role="system", text="You are a helpful assistant"), + Message(role="user", text="Hello"), ] run_options, _ = await chat_client._prepare_options(messages, {}) # type: ignore @@ -488,7 +488,7 @@ async def test_azure_ai_chat_client_prepare_options_with_instructions_from_optio chat_client = create_test_azure_ai_chat_client(mock_agents_client, agent_id="test-agent") mock_agents_client.get_agent = AsyncMock(return_value=None) - messages = [ChatMessage(role="user", text="Hello")] + messages = [Message(role="user", text="Hello")] chat_options: ChatOptions = { "instructions": "You are a thoughtful reviewer. Give brief feedback.", } @@ -511,8 +511,8 @@ async def test_azure_ai_chat_client_prepare_options_merges_instructions_from_mes mock_agents_client.get_agent = AsyncMock(return_value=None) messages = [ - ChatMessage(role="system", text="Context: You are reviewing marketing copy."), - ChatMessage(role="user", text="Review this tagline"), + Message(role="system", text="Context: You are reviewing marketing copy."), + Message(role="user", text="Review this tagline"), ] chat_options: ChatOptions = { "instructions": "Be concise and constructive in your feedback.", @@ -538,7 +538,7 @@ async def mock_streaming_response(): patch.object(chat_client, "_inner_get_response", return_value=mock_streaming_response()), patch("agent_framework.ChatResponse.from_update_generator") as mock_from_generator, ): - mock_response = ChatResponse(messages=[ChatMessage(role="assistant", text="Hello back")]) + mock_response = ChatResponse(messages=[Message(role="assistant", text="Hello back")]) mock_from_generator.return_value = mock_response result = await ChatResponse.from_update_generator(mock_streaming_response()) @@ -681,7 +681,7 @@ async def test_azure_ai_chat_client_prepare_options_tool_choice_required_specifi dict_tool = {"type": "function", "function": {"name": "test_function"}} chat_options = {"tools": [dict_tool], "tool_choice": required_tool_mode} - messages = [ChatMessage(role="user", text="Hello")] + messages = [Message(role="user", text="Hello")] run_options, _ = await chat_client._prepare_options(messages, chat_options) # type: ignore @@ -726,7 +726,7 @@ async def test_azure_ai_chat_client_prepare_options_mcp_never_require(mock_agent mcp_tool = HostedMCPTool(name="Test MCP Tool", url="https://example.com/mcp", approval_mode="never_require") - messages = [ChatMessage(role="user", text="Hello")] + messages = [Message(role="user", text="Hello")] chat_options: ChatOptions = {"tools": [mcp_tool], "tool_choice": "auto"} with patch("agent_framework_azure_ai._shared.McpTool") as mock_mcp_tool_class: @@ -758,7 +758,7 @@ async def test_azure_ai_chat_client_prepare_options_mcp_with_headers(mock_agents name="Test MCP Tool", url="https://example.com/mcp", headers=headers, approval_mode="never_require" ) - messages = [ChatMessage(role="user", text="Hello")] + messages = [Message(role="user", text="Hello")] chat_options: ChatOptions = {"tools": [mcp_tool], "tool_choice": "auto"} with patch("agent_framework_azure_ai._shared.McpTool") as mock_mcp_tool_class: @@ -1407,17 +1407,17 @@ def get_weather( async def test_azure_ai_chat_client_get_response() -> None: """Test Azure AI Chat Client response.""" async with AzureAIAgentClient(credential=AzureCliCredential()) as azure_ai_chat_client: - assert isinstance(azure_ai_chat_client, ChatClientProtocol) + assert isinstance(azure_ai_chat_client, SupportsChatGetResponse) - messages: list[ChatMessage] = [] + messages: list[Message] = [] messages.append( - ChatMessage( + Message( role="user", text="The weather in Seattle is currently sunny with a high of 25°C. " "It's a beautiful day for outdoor activities.", ) ) - messages.append(ChatMessage(role="user", text="What's the weather like today?")) + messages.append(Message(role="user", text="What's the weather like today?")) # Test that the agents_client can be used to get a response response = await azure_ai_chat_client.get_response(messages=messages) @@ -1432,10 +1432,10 @@ async def test_azure_ai_chat_client_get_response() -> None: async def test_azure_ai_chat_client_get_response_tools() -> None: """Test Azure AI Chat Client response with tools.""" async with AzureAIAgentClient(credential=AzureCliCredential()) as azure_ai_chat_client: - assert isinstance(azure_ai_chat_client, ChatClientProtocol) + assert isinstance(azure_ai_chat_client, SupportsChatGetResponse) - messages: list[ChatMessage] = [] - messages.append(ChatMessage(role="user", text="What's the weather like in Seattle?")) + messages: list[Message] = [] + messages.append(Message(role="user", text="What's the weather like in Seattle?")) # Test that the agents_client can be used to get a response response = await azure_ai_chat_client.get_response( @@ -1453,17 +1453,17 @@ async def test_azure_ai_chat_client_get_response_tools() -> None: async def test_azure_ai_chat_client_streaming() -> None: """Test Azure AI Chat Client streaming response.""" async with AzureAIAgentClient(credential=AzureCliCredential()) as azure_ai_chat_client: - assert isinstance(azure_ai_chat_client, ChatClientProtocol) + assert isinstance(azure_ai_chat_client, SupportsChatGetResponse) - messages: list[ChatMessage] = [] + messages: list[Message] = [] messages.append( - ChatMessage( + Message( role="user", text="The weather in Seattle is currently sunny with a high of 25°C. " "It's a beautiful day for outdoor activities.", ) ) - messages.append(ChatMessage(role="user", text="What's the weather like today?")) + messages.append(Message(role="user", text="What's the weather like today?")) # Test that the agents_client can be used to get a response response = azure_ai_chat_client.get_response(messages=messages, stream=True) @@ -1484,10 +1484,10 @@ async def test_azure_ai_chat_client_streaming() -> None: async def test_azure_ai_chat_client_streaming_tools() -> None: """Test Azure AI Chat Client streaming response with tools.""" async with AzureAIAgentClient(credential=AzureCliCredential()) as azure_ai_chat_client: - assert isinstance(azure_ai_chat_client, ChatClientProtocol) + assert isinstance(azure_ai_chat_client, SupportsChatGetResponse) - messages: list[ChatMessage] = [] - messages.append(ChatMessage(role="user", text="What's the weather like in Seattle?")) + messages: list[Message] = [] + messages.append(Message(role="user", text="What's the weather like in Seattle?")) # Test that the agents_client can be used to get a response response = azure_ai_chat_client.get_response( @@ -1509,8 +1509,8 @@ async def test_azure_ai_chat_client_streaming_tools() -> None: @pytest.mark.flaky @skip_if_azure_ai_integration_tests_disabled async def test_azure_ai_chat_client_agent_basic_run() -> None: - """Test ChatAgent basic run functionality with AzureAIAgentClient.""" - async with ChatAgent( + """Test Agent basic run functionality with AzureAIAgentClient.""" + async with Agent( chat_client=AzureAIAgentClient(credential=AzureCliCredential()), ) as agent: # Run a simple query @@ -1526,8 +1526,8 @@ async def test_azure_ai_chat_client_agent_basic_run() -> None: @pytest.mark.flaky @skip_if_azure_ai_integration_tests_disabled async def test_azure_ai_chat_client_agent_basic_run_streaming() -> None: - """Test ChatAgent basic streaming functionality with AzureAIAgentClient.""" - async with ChatAgent( + """Test Agent basic streaming functionality with AzureAIAgentClient.""" + async with Agent( chat_client=AzureAIAgentClient(credential=AzureCliCredential()), ) as agent: # Run streaming query @@ -1546,8 +1546,8 @@ async def test_azure_ai_chat_client_agent_basic_run_streaming() -> None: @pytest.mark.flaky @skip_if_azure_ai_integration_tests_disabled async def test_azure_ai_chat_client_agent_thread_persistence() -> None: - """Test ChatAgent thread persistence across runs with AzureAIAgentClient.""" - async with ChatAgent( + """Test Agent thread persistence across runs with AzureAIAgentClient.""" + async with Agent( chat_client=AzureAIAgentClient(credential=AzureCliCredential()), instructions="You are a helpful assistant with good memory.", ) as agent: @@ -1572,8 +1572,8 @@ async def test_azure_ai_chat_client_agent_thread_persistence() -> None: @pytest.mark.flaky @skip_if_azure_ai_integration_tests_disabled async def test_azure_ai_chat_client_agent_existing_thread_id() -> None: - """Test ChatAgent existing thread ID functionality with AzureAIAgentClient.""" - async with ChatAgent( + """Test Agent existing thread ID functionality with AzureAIAgentClient.""" + async with Agent( chat_client=AzureAIAgentClient(credential=AzureCliCredential()), instructions="You are a helpful assistant with good memory.", ) as first_agent: @@ -1590,7 +1590,7 @@ async def test_azure_ai_chat_client_agent_existing_thread_id() -> None: assert existing_thread_id is not None # Now continue with the same thread ID in a new agent instance - async with ChatAgent( + async with Agent( chat_client=AzureAIAgentClient(thread_id=existing_thread_id, credential=AzureCliCredential()), instructions="You are a helpful assistant with good memory.", ) as second_agent: @@ -1610,9 +1610,9 @@ async def test_azure_ai_chat_client_agent_existing_thread_id() -> None: @pytest.mark.flaky @skip_if_azure_ai_integration_tests_disabled async def test_azure_ai_chat_client_agent_code_interpreter(): - """Test ChatAgent with code interpreter through AzureAIAgentClient.""" + """Test Agent with code interpreter through AzureAIAgentClient.""" - async with ChatAgent( + async with Agent( chat_client=AzureAIAgentClient(credential=AzureCliCredential()), instructions="You are a helpful assistant that can write and execute Python code.", tools=[HostedCodeInterpreterTool()], @@ -1630,7 +1630,7 @@ async def test_azure_ai_chat_client_agent_code_interpreter(): @pytest.mark.flaky @skip_if_azure_ai_integration_tests_disabled async def test_azure_ai_chat_client_agent_file_search(): - """Test ChatAgent with file search through AzureAIAgentClient.""" + """Test Agent with file search through AzureAIAgentClient.""" client = AzureAIAgentClient(credential=AzureCliCredential()) file: FileInfo | None = None @@ -1649,7 +1649,7 @@ async def test_azure_ai_chat_client_agent_file_search(): inputs=[Content.from_hosted_vector_store(vector_store_id=vector_store.id)] ) - async with ChatAgent( + async with Agent( chat_client=client, instructions="You are a helpful assistant that can search through uploaded employee files.", tools=[file_search_tool], @@ -1688,7 +1688,7 @@ async def test_azure_ai_chat_client_agent_hosted_mcp_tool() -> None: approval_mode="never_require", ) - async with ChatAgent( + async with Agent( chat_client=AzureAIAgentClient(credential=AzureCliCredential()), instructions="You are a helpful assistant that can help with microsoft documentation questions.", tools=[mcp_tool], @@ -1715,7 +1715,7 @@ async def test_azure_ai_chat_client_agent_hosted_mcp_tool() -> None: @skip_if_azure_ai_integration_tests_disabled async def test_azure_ai_chat_client_agent_level_tool_persistence(): """Test that agent-level tools persist across multiple runs with AzureAIAgentClient.""" - async with ChatAgent( + async with Agent( chat_client=AzureAIAgentClient(credential=AzureCliCredential()), instructions="You are a helpful assistant that uses available tools.", tools=[get_weather], @@ -1740,7 +1740,7 @@ async def test_azure_ai_chat_client_agent_level_tool_persistence(): @skip_if_azure_ai_integration_tests_disabled async def test_azure_ai_chat_client_agent_chat_options_run_level() -> None: """Test ChatOptions parameter coverage at run level.""" - async with ChatAgent( + async with Agent( chat_client=AzureAIAgentClient(credential=AzureCliCredential()), instructions="You are a helpful assistant.", ) as agent: @@ -1764,7 +1764,7 @@ async def test_azure_ai_chat_client_agent_chat_options_run_level() -> None: @skip_if_azure_ai_integration_tests_disabled async def test_azure_ai_chat_client_agent_chat_options_agent_level() -> None: """Test ChatOptions parameter coverage agent level.""" - async with ChatAgent( + async with Agent( chat_client=AzureAIAgentClient(credential=AzureCliCredential()), instructions="You are a helpful assistant.", tools=[get_weather], @@ -2107,7 +2107,7 @@ def test_azure_ai_chat_client_prepare_messages_with_function_result( chat_client = create_test_azure_ai_chat_client(mock_agents_client) function_result = Content.from_function_result(call_id='["run_123", "call_456"]', result="test result") - messages = [ChatMessage(role="user", contents=[function_result])] + messages = [Message(role="user", contents=[function_result])] additional_messages, instructions, required_action_results = chat_client._prepare_messages(messages) # type: ignore @@ -2127,7 +2127,7 @@ def test_azure_ai_chat_client_prepare_messages_with_raw_content_block( # Create content with raw_representation that is a MessageInputContentBlock raw_block = MessageInputTextBlock(text="Raw block text") custom_content = Content(type="custom", raw_representation=raw_block) - messages = [ChatMessage(role="user", contents=[custom_content])] + messages = [Message(role="user", contents=[custom_content])] additional_messages, instructions, required_action_results = chat_client._prepare_messages(messages) # type: ignore diff --git a/python/packages/azure-ai/tests/test_azure_ai_client.py b/python/packages/azure-ai/tests/test_azure_ai_client.py index 38ccfb5ad3..cc15a7bc09 100644 --- a/python/packages/azure-ai/tests/test_azure_ai_client.py +++ b/python/packages/azure-ai/tests/test_azure_ai_client.py @@ -11,10 +11,8 @@ import pytest from agent_framework import ( + Agent, AgentResponse, - ChatAgent, - ChatClientProtocol, - ChatMessage, ChatOptions, ChatResponse, Content, @@ -22,6 +20,8 @@ HostedFileSearchTool, HostedMCPTool, HostedWebSearchTool, + Message, + SupportsChatGetResponse, tool, ) from agent_framework.exceptions import ServiceInitializationError @@ -88,7 +88,7 @@ async def temporary_chat_client(agent_name: str) -> AsyncIterator[AzureAIClient] """Async context manager that creates an Azure AI agent and yields an `AzureAIClient`. The underlying agent version is cleaned up automatically after use. - Tests can construct their own `ChatAgent` instances from the yielded client. + Tests can construct their own `Agent` instances from the yielded client. """ endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] async with ( @@ -179,7 +179,7 @@ def test_init_with_project_client(mock_project_client: MagicMock) -> None: assert client.agent_name == "test-agent" assert client.agent_version == "1.0" assert not client._should_close_client # type: ignore - assert isinstance(client, ChatClientProtocol) + assert isinstance(client, SupportsChatGetResponse) def test_init_auto_create_client( @@ -298,9 +298,9 @@ async def test_prepare_messages_for_azure_ai_with_system_messages( client = create_test_azure_ai_client(mock_project_client) messages = [ - ChatMessage(role="system", contents=[Content.from_text(text="You are a helpful assistant.")]), - ChatMessage(role="user", contents=[Content.from_text(text="Hello")]), - ChatMessage(role="assistant", contents=[Content.from_text(text="System response")]), + Message(role="system", contents=[Content.from_text(text="You are a helpful assistant.")]), + Message(role="user", contents=[Content.from_text(text="Hello")]), + Message(role="assistant", contents=[Content.from_text(text="System response")]), ] result_messages, instructions = client._prepare_messages_for_azure_ai(messages) # type: ignore @@ -318,8 +318,8 @@ async def test_prepare_messages_for_azure_ai_no_system_messages( client = create_test_azure_ai_client(mock_project_client) messages = [ - ChatMessage(role="user", contents=[Content.from_text(text="Hello")]), - ChatMessage(role="assistant", contents=[Content.from_text(text="Hi there!")]), + Message(role="user", contents=[Content.from_text(text="Hello")]), + Message(role="assistant", contents=[Content.from_text(text="Hi there!")]), ] result_messages, instructions = client._prepare_messages_for_azure_ai(messages) # type: ignore @@ -419,7 +419,7 @@ async def test_prepare_options_basic(mock_project_client: MagicMock) -> None: """Test prepare_options basic functionality.""" client = create_test_azure_ai_client(mock_project_client, agent_name="test-agent", agent_version="1.0") - messages = [ChatMessage(role="user", contents=[Content.from_text(text="Hello")])] + messages = [Message(role="user", contents=[Content.from_text(text="Hello")])] with ( patch( @@ -456,7 +456,7 @@ async def test_prepare_options_with_application_endpoint( agent_version="1", ) - messages = [ChatMessage(role="user", contents=[Content.from_text(text="Hello")])] + messages = [Message(role="user", contents=[Content.from_text(text="Hello")])] with ( patch( @@ -498,7 +498,7 @@ async def test_prepare_options_with_application_project_client( agent_version="1", ) - messages = [ChatMessage(role="user", contents=[Content.from_text(text="Hello")])] + messages = [Message(role="user", contents=[Content.from_text(text="Hello")])] with ( patch( @@ -977,7 +977,7 @@ async def test_prepare_options_excludes_response_format( """Test that prepare_options excludes response_format, text, and text_format from final run options.""" client = create_test_azure_ai_client(mock_project_client, agent_name="test-agent", agent_version="1.0") - messages = [ChatMessage(role="user", contents=[Content.from_text(text="Hello")])] + messages = [Message(role="user", contents=[Content.from_text(text="Hello")])] chat_options: ChatOptions = {} with ( @@ -1363,10 +1363,10 @@ async def test_integration_options( # Prepare test message if option_name.startswith("tool_choice"): # Use weather-related prompt for tool tests - messages = [ChatMessage(role="user", text="What is the weather in Seattle?")] + messages = [Message(role="user", text="What is the weather in Seattle?")] else: # Generic prompt for simple options - messages = [ChatMessage(role="user", text="Say 'Hello World' briefly.")] + messages = [Message(role="user", text="Say 'Hello World' briefly.")] # Build options dict options: dict[str, Any] = {option_name: option_value, "tools": [get_weather]} @@ -1480,11 +1480,11 @@ async def test_integration_agent_options( # Prepare test message if option_name.startswith("response_format"): # Use prompt that works well with structured output - messages = [ChatMessage(role="user", text="The weather in Seattle is sunny")] - messages.append(ChatMessage(role="user", text="What is the weather in Seattle?")) + messages = [Message(role="user", text="The weather in Seattle is sunny")] + messages.append(Message(role="user", text="What is the weather in Seattle?")) else: # Generic prompt for simple options - messages = [ChatMessage(role="user", text="Say 'Hello World' briefly.")] + messages = [Message(role="user", text="Say 'Hello World' briefly.")] # Build options dict options = {option_name: option_value} @@ -1621,7 +1621,7 @@ async def test_integration_agent_existing_thread(): async with ( temporary_chat_client(agent_name="af-int-test-existing-thread") as client, - ChatAgent( + Agent( chat_client=client, instructions="You are a helpful assistant with good memory.", ) as first_agent, @@ -1640,7 +1640,7 @@ async def test_integration_agent_existing_thread(): if preserved_thread: async with ( temporary_chat_client(agent_name="af-int-test-existing-thread-2") as client, - ChatAgent( + Agent( chat_client=client, instructions="You are a helpful assistant with good memory.", ) as second_agent, diff --git a/python/packages/azure-ai/tests/test_provider.py b/python/packages/azure-ai/tests/test_provider.py index c209d14fd6..3174008138 100644 --- a/python/packages/azure-ai/tests/test_provider.py +++ b/python/packages/azure-ai/tests/test_provider.py @@ -4,7 +4,7 @@ from unittest.mock import AsyncMock, MagicMock, patch import pytest -from agent_framework import ChatAgent, FunctionTool +from agent_framework import Agent, FunctionTool from agent_framework._mcp import MCPTool from agent_framework.exceptions import ServiceInitializationError from azure.ai.projects.aio import AIProjectClient @@ -158,7 +158,7 @@ async def test_provider_create_agent( description="Test Agent", ) - assert isinstance(agent, ChatAgent) + assert isinstance(agent, Agent) assert agent.name == "test-agent" mock_project_client.agents.create_version.assert_called_once() @@ -192,7 +192,7 @@ async def test_provider_create_agent_with_env_model( # Call without model parameter - should use env var agent = await provider.create_agent(name="test-agent") - assert isinstance(agent, ChatAgent) + assert isinstance(agent, Agent) # Verify the model from env var was used call_args = mock_project_client.agents.create_version.call_args assert call_args[1]["definition"].model == azure_ai_unit_test_env["AZURE_AI_MODEL_DEPLOYMENT_NAME"] @@ -322,7 +322,7 @@ async def test_provider_get_agent_with_name(mock_project_client: MagicMock) -> N agent = await provider.get_agent(name="test-agent") - assert isinstance(agent, ChatAgent) + assert isinstance(agent, Agent) assert agent.name == "test-agent" mock_project_client.agents.get.assert_called_with(agent_name="test-agent") @@ -350,7 +350,7 @@ async def test_provider_get_agent_with_reference(mock_project_client: MagicMock) agent_reference = AgentReference(name="test-agent", version="1.0") agent = await provider.get_agent(reference=agent_reference) - assert isinstance(agent, ChatAgent) + assert isinstance(agent, Agent) assert agent.name == "test-agent" mock_project_client.agents.get_version.assert_called_with(agent_name="test-agent", agent_version="1.0") @@ -410,7 +410,7 @@ def test_provider_as_agent(mock_project_client: MagicMock) -> None: with patch("agent_framework_azure_ai._project_provider.AzureAIClient") as mock_azure_ai_client: agent = provider.as_agent(mock_agent_version) - assert isinstance(agent, ChatAgent) + assert isinstance(agent, Agent) assert agent.name == "test-agent" assert agent.description == "Test Agent" @@ -709,7 +709,7 @@ async def test_provider_create_and_get_agent_integration() -> None: instructions="You are a helpful assistant. Always respond with 'Hello from provider!'", ) - assert isinstance(agent, ChatAgent) + assert isinstance(agent, Agent) assert agent.name == "ProviderTestAgent" # Run the agent diff --git a/python/packages/azurefunctions/tests/test_app.py b/python/packages/azurefunctions/tests/test_app.py index 5a454e6217..cc68176c93 100644 --- a/python/packages/azurefunctions/tests/test_app.py +++ b/python/packages/azurefunctions/tests/test_app.py @@ -12,7 +12,7 @@ import azure.durable_functions as df import azure.functions as func import pytest -from agent_framework import AgentResponse, ChatMessage +from agent_framework import AgentResponse, Message from agent_framework_durabletask import ( MIMETYPE_APPLICATION_JSON, MIMETYPE_TEXT_PLAIN, @@ -356,7 +356,7 @@ async def test_entity_run_agent_operation(self) -> None: """Test that entity can run agent operation.""" mock_agent = Mock() mock_agent.run = AsyncMock( - return_value=AgentResponse(messages=[ChatMessage(role="assistant", text="Test response")]) + return_value=AgentResponse(messages=[Message(role="assistant", text="Test response")]) ) entity = AgentEntity(mock_agent, state_provider=_InMemoryStateProvider(thread_id="test-conv-123")) @@ -373,9 +373,7 @@ async def test_entity_run_agent_operation(self) -> None: async def test_entity_stores_conversation_history(self) -> None: """Test that the entity stores conversation history.""" mock_agent = Mock() - mock_agent.run = AsyncMock( - return_value=AgentResponse(messages=[ChatMessage(role="assistant", text="Response 1")]) - ) + mock_agent.run = AsyncMock(return_value=AgentResponse(messages=[Message(role="assistant", text="Response 1")])) entity = AgentEntity(mock_agent, state_provider=_InMemoryStateProvider(thread_id="conv-1")) @@ -407,9 +405,7 @@ async def test_entity_stores_conversation_history(self) -> None: async def test_entity_increments_message_count(self) -> None: """Test that the entity increments the message count.""" mock_agent = Mock() - mock_agent.run = AsyncMock( - return_value=AgentResponse(messages=[ChatMessage(role="assistant", text="Response")]) - ) + mock_agent.run = AsyncMock(return_value=AgentResponse(messages=[Message(role="assistant", text="Response")])) entity = AgentEntity(mock_agent, state_provider=_InMemoryStateProvider(thread_id="conv-1")) @@ -448,9 +444,7 @@ def test_create_agent_entity_returns_function(self) -> None: def test_entity_function_handles_run_operation(self) -> None: """Test that the entity function handles the run operation.""" mock_agent = Mock() - mock_agent.run = AsyncMock( - return_value=AgentResponse(messages=[ChatMessage(role="assistant", text="Response")]) - ) + mock_agent.run = AsyncMock(return_value=AgentResponse(messages=[Message(role="assistant", text="Response")])) entity_function = create_agent_entity(mock_agent) @@ -475,9 +469,7 @@ def test_entity_function_handles_run_operation(self) -> None: def test_entity_function_handles_run_agent_operation(self) -> None: """Test that the entity function handles the deprecated run_agent operation for backward compatibility.""" mock_agent = Mock() - mock_agent.run = AsyncMock( - return_value=AgentResponse(messages=[ChatMessage(role="assistant", text="Response")]) - ) + mock_agent.run = AsyncMock(return_value=AgentResponse(messages=[Message(role="assistant", text="Response")])) entity_function = create_agent_entity(mock_agent) diff --git a/python/packages/azurefunctions/tests/test_entities.py b/python/packages/azurefunctions/tests/test_entities.py index eb740daddb..2fdbc3463e 100644 --- a/python/packages/azurefunctions/tests/test_entities.py +++ b/python/packages/azurefunctions/tests/test_entities.py @@ -10,7 +10,7 @@ from unittest.mock import AsyncMock, Mock import pytest -from agent_framework import AgentResponse, ChatMessage +from agent_framework import AgentResponse, Message from agent_framework_azurefunctions._entities import create_agent_entity @@ -19,7 +19,7 @@ def _agent_response(text: str | None) -> AgentResponse: """Create an AgentResponse with a single assistant message.""" - message = ChatMessage(role="assistant", text=text) if text is not None else ChatMessage(role="assistant", text="") + message = Message(role="assistant", text=text) if text is not None else Message(role="assistant", text="") return AgentResponse(messages=[message]) diff --git a/python/packages/azurefunctions/tests/test_orchestration.py b/python/packages/azurefunctions/tests/test_orchestration.py index 989d391e68..e778875887 100644 --- a/python/packages/azurefunctions/tests/test_orchestration.py +++ b/python/packages/azurefunctions/tests/test_orchestration.py @@ -6,7 +6,7 @@ from unittest.mock import Mock import pytest -from agent_framework import AgentResponse, ChatMessage +from agent_framework import AgentResponse, Message from agent_framework_durabletask import DurableAIAgent from azure.durable_functions.models.Task import TaskBase, TaskState @@ -136,7 +136,7 @@ def test_try_set_value_success(self) -> None: # Simulate successful entity task completion entity_task.state = TaskState.SUCCEEDED - entity_task.result = AgentResponse(messages=[ChatMessage(role="assistant", text="Test response")]).to_dict() + entity_task.result = AgentResponse(messages=[Message(role="assistant", text="Test response")]).to_dict() # Clear pending_tasks to simulate that parent has processed the child task.pending_tasks.clear() @@ -178,7 +178,7 @@ class TestSchema(BaseModel): # Simulate successful entity task with JSON response entity_task.state = TaskState.SUCCEEDED - entity_task.result = AgentResponse(messages=[ChatMessage(role="assistant", text='{"answer": "42"}')]).to_dict() + entity_task.result = AgentResponse(messages=[Message(role="assistant", text='{"answer": "42"}')]).to_dict() # Clear pending_tasks to simulate that parent has processed the child task.pending_tasks.clear() diff --git a/python/packages/bedrock/agent_framework_bedrock/_chat_client.py b/python/packages/bedrock/agent_framework_bedrock/_chat_client.py index 1a6bf01d59..c2b53b5b71 100644 --- a/python/packages/bedrock/agent_framework_bedrock/_chat_client.py +++ b/python/packages/bedrock/agent_framework_bedrock/_chat_client.py @@ -14,7 +14,6 @@ AGENT_FRAMEWORK_USER_AGENT, BaseChatClient, ChatAndFunctionMiddlewareTypes, - ChatMessage, ChatMiddlewareLayer, ChatOptions, ChatResponse, @@ -24,6 +23,7 @@ FunctionInvocationConfiguration, FunctionInvocationLayer, FunctionTool, + Message, ResponseStream, ToolProtocol, UsageDetails, @@ -325,7 +325,7 @@ def _create_session(settings: BedrockSettings) -> Boto3Session: def _inner_get_response( self, *, - messages: Sequence[ChatMessage], + messages: Sequence[Message], options: Mapping[str, Any], stream: bool = False, **kwargs: Any, @@ -359,7 +359,7 @@ async def _get_response() -> ChatResponse: def _prepare_options( self, - messages: Sequence[ChatMessage], + messages: Sequence[Message], options: Mapping[str, Any], **kwargs: Any, ) -> dict[str, Any]: @@ -410,7 +410,7 @@ def _prepare_options( return run_options def _prepare_bedrock_messages( - self, messages: Sequence[ChatMessage] + self, messages: Sequence[Message] ) -> tuple[list[dict[str, str]], list[dict[str, Any]]]: prompts: list[dict[str, str]] = [] conversation: list[dict[str, Any]] = [] @@ -482,7 +482,7 @@ def _align_tool_results_with_pending( return aligned_blocks - def _convert_message_to_content_blocks(self, message: ChatMessage) -> list[dict[str, Any]]: + def _convert_message_to_content_blocks(self, message: Message) -> list[dict[str, Any]]: blocks: list[dict[str, Any]] = [] for content in message.contents: block = self._convert_content_to_bedrock_block(content) @@ -593,7 +593,7 @@ def _process_converse_response(self, response: dict[str, Any]) -> ChatResponse: message = output.get("message", {}) content_blocks = message.get("content", []) or [] contents = self._parse_message_contents(content_blocks) - chat_message = ChatMessage(role="assistant", contents=contents, raw_representation=message) + chat_message = Message(role="assistant", contents=contents, raw_representation=message) usage_details = self._parse_usage(response.get("usage") or output.get("usage")) finish_reason = self._map_finish_reason(output.get("completionReason") or response.get("stopReason")) response_id = response.get("responseId") or message.get("id") diff --git a/python/packages/bedrock/samples/bedrock_sample.py b/python/packages/bedrock/samples/bedrock_sample.py index 15a347997d..b9ecc1101d 100644 --- a/python/packages/bedrock/samples/bedrock_sample.py +++ b/python/packages/bedrock/samples/bedrock_sample.py @@ -3,7 +3,7 @@ import asyncio import logging -from agent_framework import ChatAgent, tool +from agent_framework import Agent, tool from agent_framework_bedrock import BedrockChatClient @@ -17,7 +17,7 @@ def get_weather(city: str) -> dict[str, str]: async def main() -> None: """Run the Bedrock sample agent, invoke the weather tool, and log the response.""" - agent = ChatAgent( + agent = Agent( chat_client=BedrockChatClient(), instructions="You are a concise travel assistant.", name="BedrockWeatherAgent", diff --git a/python/packages/bedrock/tests/test_bedrock_client.py b/python/packages/bedrock/tests/test_bedrock_client.py index d267691e71..ef896db9c3 100644 --- a/python/packages/bedrock/tests/test_bedrock_client.py +++ b/python/packages/bedrock/tests/test_bedrock_client.py @@ -5,7 +5,7 @@ from typing import Any import pytest -from agent_framework import ChatMessage, Content +from agent_framework import Content, Message from agent_framework.exceptions import ServiceInitializationError from agent_framework_bedrock import BedrockChatClient @@ -41,8 +41,8 @@ async def test_get_response_invokes_bedrock_runtime() -> None: ) messages = [ - ChatMessage(role="system", contents=[Content.from_text(text="You are concise.")]), - ChatMessage(role="user", contents=[Content.from_text(text="hello")]), + Message(role="system", contents=[Content.from_text(text="You are concise.")]), + Message(role="user", contents=[Content.from_text(text="hello")]), ] response = await client.get_response(messages=messages, options={"max_tokens": 32}) @@ -62,7 +62,7 @@ def test_build_request_requires_non_system_messages() -> None: client=_StubBedrockRuntime(), ) - messages = [ChatMessage(role="system", contents=[Content.from_text(text="Only system text")])] + messages = [Message(role="system", contents=[Content.from_text(text="Only system text")])] with pytest.raises(ServiceInitializationError): client._prepare_options(messages, {}) diff --git a/python/packages/bedrock/tests/test_bedrock_settings.py b/python/packages/bedrock/tests/test_bedrock_settings.py index 25df37b11f..6a1956dd7c 100644 --- a/python/packages/bedrock/tests/test_bedrock_settings.py +++ b/python/packages/bedrock/tests/test_bedrock_settings.py @@ -6,10 +6,10 @@ import pytest from agent_framework import ( - ChatMessage, ChatOptions, Content, FunctionTool, + Message, ) from pydantic import BaseModel @@ -46,7 +46,7 @@ def test_build_request_includes_tool_config() -> None: "tools": [tool], "tool_choice": {"mode": "required", "required_function_name": "get_weather"}, } - messages = [ChatMessage(role="user", contents=[Content.from_text(text="hi")])] + messages = [Message(role="user", contents=[Content.from_text(text="hi")])] request = client._prepare_options(messages, options) @@ -58,14 +58,14 @@ def test_build_request_serializes_tool_history() -> None: client = _build_client() options: ChatOptions = {} messages = [ - ChatMessage(role="user", contents=[Content.from_text(text="how's weather?")]), - ChatMessage( + Message(role="user", contents=[Content.from_text(text="how's weather?")]), + Message( role="assistant", contents=[ Content.from_function_call(call_id="call-1", name="get_weather", arguments='{"location": "SEA"}') ], ), - ChatMessage( + Message( role="tool", contents=[Content.from_function_result(call_id="call-1", result={"answer": "72F"})], ), diff --git a/python/packages/chatkit/README.md b/python/packages/chatkit/README.md index cc48016561..c901ce3f58 100644 --- a/python/packages/chatkit/README.md +++ b/python/packages/chatkit/README.md @@ -7,9 +7,9 @@ Specifically, it mirrors the [Agent SDK integration](https://github.com/openai/c - `stream_agent_response`: A helper to convert a streamed `AgentResponseUpdate` from a Microsoft Agent Framework agent that implements `SupportsAgentRun` to ChatKit events. - `ThreadItemConverter`: A extendable helper class to convert ChatKit thread items to - `ChatMessage` objects that can be consumed by an Agent Framework agent. + `Message` objects that can be consumed by an Agent Framework agent. - `simple_to_agent_input`: A helper function that uses the default implementation - of `ThreadItemConverter` to convert a ChatKit thread to a list of `ChatMessage`, + of `ThreadItemConverter` to convert a ChatKit thread to a list of `Message`, useful for getting started quickly. ## Installation @@ -63,7 +63,7 @@ from azure.identity import AzureCliCredential from fastapi import FastAPI, Request from fastapi.responses import Response, StreamingResponse -from agent_framework import ChatAgent +from agent_framework import Agent from agent_framework.azure import AzureOpenAIChatClient from agent_framework.chatkit import simple_to_agent_input, stream_agent_response @@ -74,7 +74,7 @@ from chatkit.types import ThreadMetadata, UserMessageItem, ThreadStreamEvent from your_store import YourStore # type: ignore[import-not-found] # Replace with your Store implementation # Define your agent with tools -agent = ChatAgent( +agent = Agent( chat_client=AzureOpenAIChatClient(credential=AzureCliCredential()), instructions="You are a helpful assistant.", tools=[], # Add your tools here diff --git a/python/packages/chatkit/agent_framework_chatkit/_converter.py b/python/packages/chatkit/agent_framework_chatkit/_converter.py index ca5127e8c7..5aa953e25a 100644 --- a/python/packages/chatkit/agent_framework_chatkit/_converter.py +++ b/python/packages/chatkit/agent_framework_chatkit/_converter.py @@ -9,8 +9,8 @@ from collections.abc import Awaitable, Callable, Sequence from agent_framework import ( - ChatMessage, Content, + Message, ) from chatkit.types import ( AssistantMessageItem, @@ -39,7 +39,7 @@ class ThreadItemConverter: - """Helper class to convert ChatKit thread items to Agent Framework ChatMessage objects. + """Helper class to convert ChatKit thread items to Agent Framework Message objects. This class provides a base implementation for converting ChatKit thread items to Agent Framework messages. It can be extended to handle attachments, @@ -64,8 +64,8 @@ def __init__( async def user_message_to_input( self, item: UserMessageItem, is_last_message: bool = True - ) -> ChatMessage | list[ChatMessage] | None: - """Convert a ChatKit UserMessageItem to Agent Framework ChatMessage(s). + ) -> Message | list[Message] | None: + """Convert a ChatKit UserMessageItem to Agent Framework Message(s). This method is called internally by `to_agent_input()`. Override this method to customize how user messages are converted. @@ -75,7 +75,7 @@ async def user_message_to_input( is_last_message: Whether this is the last message in the thread (used for quoted_text handling). Returns: - A ChatMessage, list of messages, or None to skip. + A Message, list of messages, or None to skip. Note: Instead of calling this method directly, use `to_agent_input()` which handles @@ -102,19 +102,19 @@ async def user_message_to_input( # If only text and no attachments, use text parameter for simplicity if text_content.strip() and not data_contents: - user_message = ChatMessage(role="user", text=text_content.strip()) + user_message = Message(role="user", text=text_content.strip()) else: # Build contents list with both text and attachments contents: list[Content] = [] if text_content.strip(): contents.append(Content.from_text(text=text_content.strip())) contents.extend(data_contents) - user_message = ChatMessage(role="user", contents=contents) + user_message = Message(role="user", contents=contents) # Handle quoted text if this is the last message messages = [user_message] if item.quoted_text and is_last_message: - quoted_context = ChatMessage( + quoted_context = Message( role="user", text=f"The user is referring to this in particular:\n{item.quoted_text}", ) @@ -179,10 +179,8 @@ async def fetch_data(attachment_id: str) -> bytes: # Subclasses can override this method to provide custom handling return None - def hidden_context_to_input( - self, item: HiddenContextItem | SDKHiddenContextItem - ) -> ChatMessage | list[ChatMessage] | None: - """Convert a ChatKit HiddenContextItem or SDKHiddenContextItem to Agent Framework ChatMessage(s). + def hidden_context_to_input(self, item: HiddenContextItem | SDKHiddenContextItem) -> Message | list[Message] | None: + """Convert a ChatKit HiddenContextItem or SDKHiddenContextItem to Agent Framework Message(s). This method is called internally by `to_agent_input()`. Override this method to customize how hidden context is converted. @@ -195,7 +193,7 @@ def hidden_context_to_input( item: The ChatKit hidden context item to convert. Returns: - A ChatMessage with system role, a list of messages, or None to skip. + A Message with system role, a list of messages, or None to skip. Note: Instead of calling this method directly, use `to_agent_input()` which handles @@ -213,9 +211,9 @@ def hidden_context_to_input( content="User's email: user@example.com", ) message = converter.hidden_context_to_input(hidden_item) - # Returns: ChatMessage(role=SYSTEM, text="User's email: ...") + # Returns: Message(role=SYSTEM, text="User's email: ...") """ - return ChatMessage(role="system", text=f"{item.content}") + return Message(role="system", text=f"{item.content}") def tag_to_message_content(self, tag: UserMessageTagContent) -> Content: """Convert a ChatKit tag (@-mention) to Agent Framework content. @@ -250,8 +248,8 @@ def tag_to_message_content(self, tag: UserMessageTagContent) -> Content: name = getattr(tag.data, "name", tag.text if hasattr(tag, "text") else "unknown") return Content.from_text(text=f"Name:{name}") - def task_to_input(self, item: TaskItem) -> ChatMessage | list[ChatMessage] | None: - """Convert a ChatKit TaskItem to Agent Framework ChatMessage(s). + def task_to_input(self, item: TaskItem) -> Message | list[Message] | None: + """Convert a ChatKit TaskItem to Agent Framework Message(s). This method is called internally by `to_agent_input()`. Override this method to customize how tasks are converted. @@ -263,7 +261,7 @@ def task_to_input(self, item: TaskItem) -> ChatMessage | list[ChatMessage] | Non item: The ChatKit task item to convert. Returns: - A ChatMessage, a list of messages, or None to skip the task. + A Message, a list of messages, or None to skip the task. Note: Instead of calling this method directly, use `to_agent_input()` which handles @@ -294,10 +292,10 @@ def task_to_input(self, item: TaskItem) -> ChatMessage | list[ChatMessage] | Non f"A message was displayed to the user that the following task was performed:\n\n{task_text}\n" ) - return ChatMessage(role="user", text=text) + return Message(role="user", text=text) - def workflow_to_input(self, item: WorkflowItem) -> ChatMessage | list[ChatMessage] | None: - """Convert a ChatKit WorkflowItem to Agent Framework ChatMessage(s). + def workflow_to_input(self, item: WorkflowItem) -> Message | list[Message] | None: + """Convert a ChatKit WorkflowItem to Agent Framework Message(s). This method is called internally by `to_agent_input()`. Override this method to customize how workflows are converted. @@ -336,7 +334,7 @@ def workflow_to_input(self, item: WorkflowItem) -> ChatMessage | list[ChatMessag messages = converter.workflow_to_input(workflow_item) # Returns list of messages for each task """ - messages: list[ChatMessage] = [] + messages: list[Message] = [] for task in item.workflow.tasks: if task.type != "custom" or (not task.title and not task.content): continue @@ -349,12 +347,12 @@ def workflow_to_input(self, item: WorkflowItem) -> ChatMessage | list[ChatMessag f"\n{task_text}\n" ) - messages.append(ChatMessage(role="user", text=text)) + messages.append(Message(role="user", text=text)) return messages if messages else None - def widget_to_input(self, item: WidgetItem) -> ChatMessage | list[ChatMessage] | None: - """Convert a ChatKit WidgetItem to Agent Framework ChatMessage(s). + def widget_to_input(self, item: WidgetItem) -> Message | list[Message] | None: + """Convert a ChatKit WidgetItem to Agent Framework Message(s). This method is called internally by `to_agent_input()`. Override this method to customize how widgets are converted. @@ -367,7 +365,7 @@ def widget_to_input(self, item: WidgetItem) -> ChatMessage | list[ChatMessage] | item: The ChatKit widget item to convert. Returns: - A ChatMessage describing the widget, or None to skip. + A Message describing the widget, or None to skip. Note: Instead of calling this method directly, use `to_agent_input()` which handles @@ -391,13 +389,13 @@ def widget_to_input(self, item: WidgetItem) -> ChatMessage | list[ChatMessage] | try: widget_json = item.widget.model_dump_json(exclude_unset=True, exclude_none=True) text = f"The following graphical UI widget (id: {item.id}) was displayed to the user:{widget_json}" - return ChatMessage(role="user", text=text) + return Message(role="user", text=text) except Exception: # If JSON serialization fails, skip the widget return None - async def assistant_message_to_input(self, item: AssistantMessageItem) -> ChatMessage | list[ChatMessage] | None: - """Convert a ChatKit AssistantMessageItem to Agent Framework ChatMessage(s). + async def assistant_message_to_input(self, item: AssistantMessageItem) -> Message | list[Message] | None: + """Convert a ChatKit AssistantMessageItem to Agent Framework Message(s). The default implementation extracts text from all content parts and creates an assistant message. @@ -406,7 +404,7 @@ async def assistant_message_to_input(self, item: AssistantMessageItem) -> ChatMe item: The ChatKit assistant message item to convert. Returns: - A ChatMessage with assistant role, or None to skip. + A Message with assistant role, or None to skip. Note: Instead of calling this method directly, use `to_agent_input()` which handles @@ -417,10 +415,10 @@ async def assistant_message_to_input(self, item: AssistantMessageItem) -> ChatMe if not text_parts: return None - return ChatMessage(role="assistant", text="".join(text_parts)) + return Message(role="assistant", text="".join(text_parts)) - async def client_tool_call_to_input(self, item: ClientToolCallItem) -> ChatMessage | list[ChatMessage] | None: - """Convert a ChatKit ClientToolCallItem to Agent Framework ChatMessage(s). + async def client_tool_call_to_input(self, item: ClientToolCallItem) -> Message | list[Message] | None: + """Convert a ChatKit ClientToolCallItem to Agent Framework Message(s). The default implementation converts completed tool calls into function call and result content. @@ -442,7 +440,7 @@ async def client_tool_call_to_input(self, item: ClientToolCallItem) -> ChatMessa import json # Create function call message - function_call_msg = ChatMessage( + function_call_msg = Message( role="assistant", contents=[ Content.from_function_call( @@ -454,7 +452,7 @@ async def client_tool_call_to_input(self, item: ClientToolCallItem) -> ChatMessa ) # Create function result message - function_result_msg = ChatMessage( + function_result_msg = Message( role="tool", contents=[ Content.from_function_result( @@ -466,8 +464,8 @@ async def client_tool_call_to_input(self, item: ClientToolCallItem) -> ChatMessa return [function_call_msg, function_result_msg] - async def end_of_turn_to_input(self, item: EndOfTurnItem) -> ChatMessage | list[ChatMessage] | None: - """Convert a ChatKit EndOfTurnItem to Agent Framework ChatMessage(s). + async def end_of_turn_to_input(self, item: EndOfTurnItem) -> Message | list[Message] | None: + """Convert a ChatKit EndOfTurnItem to Agent Framework Message(s). The default implementation skips end-of-turn markers as they are only UI hints. @@ -488,15 +486,15 @@ async def _thread_item_to_input_item( self, item: ThreadItem, is_last_message: bool = True, - ) -> list[ChatMessage]: - """Internal method to convert a single ThreadItem to ChatMessage(s). + ) -> list[Message]: + """Internal method to convert a single ThreadItem to Message(s). Args: item: The thread item to convert. is_last_message: Whether this is the last item in the thread. Returns: - A list of ChatMessage objects (may be empty). + A list of Message objects (may be empty). """ match item: case UserMessageItem(): @@ -535,7 +533,7 @@ async def _thread_item_to_input_item( async def to_agent_input( self, thread_items: Sequence[ThreadItem] | ThreadItem, - ) -> list[ChatMessage]: + ) -> list[Message]: """Convert ChatKit thread items to Agent Framework ChatMessages. This is the main entry point for converting ChatKit thread items. It handles @@ -546,7 +544,7 @@ async def to_agent_input( thread_items: A single ThreadItem or a sequence of ThreadItems to convert. Returns: - A list of ChatMessage objects that can be sent to an Agent Framework agent. + A list of Message objects that can be sent to an Agent Framework agent. Examples: .. code-block:: python @@ -562,14 +560,14 @@ async def to_agent_input( messages = await converter.to_agent_input([user_message_item, assistant_message_item, task_item]) # Use with agent - from agent_framework import ChatAgent + from agent_framework import Agent - agent = ChatAgent(...) + agent = Agent(...) response = await agent.run(messages) """ thread_items = list(thread_items) if isinstance(thread_items, Sequence) else [thread_items] - output: list[ChatMessage] = [] + output: list[Message] = [] for item in thread_items: output.extend( await self._thread_item_to_input_item( @@ -584,7 +582,7 @@ async def to_agent_input( _DEFAULT_CONVERTER = ThreadItemConverter() -async def simple_to_agent_input(thread_items: Sequence[ThreadItem] | ThreadItem) -> list[ChatMessage]: +async def simple_to_agent_input(thread_items: Sequence[ThreadItem] | ThreadItem) -> list[Message]: """Helper function that uses the default ThreadItemConverter. This function provides a quick way to get started with ChatKit integration @@ -594,7 +592,7 @@ async def simple_to_agent_input(thread_items: Sequence[ThreadItem] | ThreadItem) thread_items: A single ThreadItem or a sequence of ThreadItems to convert. Returns: - A list of ChatMessage objects that can be sent to an Agent Framework agent. + A list of Message objects that can be sent to an Agent Framework agent. Examples: .. code-block:: python diff --git a/python/packages/chatkit/tests/test_converter.py b/python/packages/chatkit/tests/test_converter.py index 71400527aa..907a1ad0a9 100644 --- a/python/packages/chatkit/tests/test_converter.py +++ b/python/packages/chatkit/tests/test_converter.py @@ -5,7 +5,7 @@ from unittest.mock import Mock import pytest -from agent_framework import ChatMessage +from agent_framework import Message from chatkit.types import UserMessageTextContent from agent_framework_chatkit import ThreadItemConverter, simple_to_agent_input @@ -43,7 +43,7 @@ async def test_to_agent_input_with_text(self, converter): result = await converter.to_agent_input(input_item) assert len(result) == 1 - assert isinstance(result[0], ChatMessage) + assert isinstance(result[0], Message) assert result[0].role == "user" assert result[0].text == "Hello, how can you help me?" @@ -110,13 +110,13 @@ async def test_to_agent_input_multiple_content_parts(self, converter): assert result[0].text == "Hello world!" def test_hidden_context_to_input(self, converter): - """Test converting hidden context item to ChatMessage.""" + """Test converting hidden context item to Message.""" hidden_item = Mock() hidden_item.content = "This is hidden context information" result = converter.hidden_context_to_input(hidden_item) - assert isinstance(result, ChatMessage) + assert isinstance(result, Message) assert result.role == "system" assert result.text == "This is hidden context information" @@ -288,7 +288,7 @@ async def fetch_data(attachment_id: str) -> bytes: assert message.contents[1].media_type == "application/pdf" def test_task_to_input(self, converter): - """Test converting TaskItem to ChatMessage.""" + """Test converting TaskItem to Message.""" from datetime import datetime from chatkit.types import CustomTask, TaskItem @@ -302,7 +302,7 @@ def test_task_to_input(self, converter): ) result = converter.task_to_input(task_item) - assert isinstance(result, ChatMessage) + assert isinstance(result, Message) assert result.role == "user" assert "Analysis: Analyzed the data" in result.text assert "" in result.text @@ -347,7 +347,7 @@ def test_workflow_to_input(self, converter): result = converter.workflow_to_input(workflow_item) assert isinstance(result, list) assert len(result) == 2 - assert all(isinstance(msg, ChatMessage) for msg in result) + assert all(isinstance(msg, Message) for msg in result) assert "Step 1: First step" in result[0].text assert "Step 2: Second step" in result[1].text @@ -369,7 +369,7 @@ def test_workflow_to_input_empty(self, converter): assert result is None def test_widget_to_input(self, converter): - """Test converting WidgetItem to ChatMessage.""" + """Test converting WidgetItem to Message.""" from datetime import datetime from chatkit.types import WidgetItem @@ -384,7 +384,7 @@ def test_widget_to_input(self, converter): ) result = converter.widget_to_input(widget_item) - assert isinstance(result, ChatMessage) + assert isinstance(result, Message) assert result.role == "user" assert "widget_1" in result.text assert "graphical UI widget" in result.text @@ -417,6 +417,6 @@ async def test_simple_to_agent_input_with_text(self): result = await simple_to_agent_input(input_item) assert len(result) == 1 - assert isinstance(result[0], ChatMessage) + assert isinstance(result[0], Message) assert result[0].role == "user" assert result[0].text == "Test message" diff --git a/python/packages/claude/agent_framework_claude/_agent.py b/python/packages/claude/agent_framework_claude/_agent.py index f5a343d6b9..ddf228d995 100644 --- a/python/packages/claude/agent_framework_claude/_agent.py +++ b/python/packages/claude/agent_framework_claude/_agent.py @@ -14,10 +14,10 @@ AgentResponseUpdate, AgentThread, BaseAgent, - ChatMessage, Content, ContextProvider, FunctionTool, + Message, ToolProtocol, get_logger, normalize_messages, @@ -541,7 +541,7 @@ async def _apply_runtime_options(self, options: dict[str, Any] | None) -> None: if "permission_mode" in options: await self._client.set_permission_mode(options["permission_mode"]) - def _format_prompt(self, messages: list[ChatMessage] | None) -> str: + def _format_prompt(self, messages: list[Message] | None) -> str: """Format messages into a prompt string. Args: @@ -557,7 +557,7 @@ def _format_prompt(self, messages: list[ChatMessage] | None) -> str: @overload def run( self, - messages: str | ChatMessage | Sequence[str | ChatMessage] | None = None, + messages: str | Message | Sequence[str | Message] | None = None, *, stream: Literal[True], thread: AgentThread | None = None, @@ -568,7 +568,7 @@ def run( @overload async def run( self, - messages: str | ChatMessage | Sequence[str | ChatMessage] | None = None, + messages: str | Message | Sequence[str | Message] | None = None, *, stream: Literal[False] = ..., thread: AgentThread | None = None, @@ -578,7 +578,7 @@ async def run( def run( self, - messages: str | ChatMessage | Sequence[str | ChatMessage] | None = None, + messages: str | Message | Sequence[str | Message] | None = None, *, stream: bool = False, thread: AgentThread | None = None, @@ -608,7 +608,7 @@ def run( async def _run_non_streaming( self, - messages: str | ChatMessage | Sequence[str | ChatMessage] | None = None, + messages: str | Message | Sequence[str | Message] | None = None, *, thread: AgentThread | None = None, options: OptionsT | MutableMapping[str, Any] | None = None, @@ -622,7 +622,7 @@ async def _run_non_streaming( async def _run_streaming( self, - messages: str | ChatMessage | Sequence[str | ChatMessage] | None = None, + messages: str | Message | Sequence[str | Message] | None = None, *, thread: AgentThread | None = None, options: OptionsT | MutableMapping[str, Any] | None = None, diff --git a/python/packages/claude/tests/test_claude_agent.py b/python/packages/claude/tests/test_claude_agent.py index 3025962f26..b3d39d82ab 100644 --- a/python/packages/claude/tests/test_claude_agent.py +++ b/python/packages/claude/tests/test_claude_agent.py @@ -4,7 +4,7 @@ from unittest.mock import AsyncMock, MagicMock, patch import pytest -from agent_framework import AgentResponseUpdate, AgentThread, ChatMessage, Content, tool +from agent_framework import AgentResponseUpdate, AgentThread, Content, Message, tool from agent_framework_claude import ClaudeAgent, ClaudeAgentOptions, ClaudeAgentSettings from agent_framework_claude._agent import TOOLS_MCP_SERVER_NAME @@ -686,7 +686,7 @@ def test_format_none_messages(self) -> None: def test_format_user_message(self) -> None: """Test formatting user message.""" agent = ClaudeAgent() - msg = ChatMessage( + msg = Message( role="user", contents=[Content.from_text(text="Hello")], ) @@ -697,9 +697,9 @@ def test_format_multiple_messages(self) -> None: """Test formatting multiple messages.""" agent = ClaudeAgent() messages = [ - ChatMessage(role="user", contents=[Content.from_text(text="Hi")]), - ChatMessage(role="assistant", contents=[Content.from_text(text="Hello!")]), - ChatMessage(role="user", contents=[Content.from_text(text="How are you?")]), + Message(role="user", contents=[Content.from_text(text="Hi")]), + Message(role="assistant", contents=[Content.from_text(text="Hello!")]), + Message(role="user", contents=[Content.from_text(text="How are you?")]), ] result = agent._format_prompt(messages) # type: ignore[reportPrivateUsage] assert "Hi" in result diff --git a/python/packages/copilotstudio/agent_framework_copilotstudio/_agent.py b/python/packages/copilotstudio/agent_framework_copilotstudio/_agent.py index 40f93eee6a..7a2567e48f 100644 --- a/python/packages/copilotstudio/agent_framework_copilotstudio/_agent.py +++ b/python/packages/copilotstudio/agent_framework_copilotstudio/_agent.py @@ -11,9 +11,9 @@ AgentResponseUpdate, AgentThread, BaseAgent, - ChatMessage, Content, ContextProvider, + Message, ResponseStream, normalize_messages, ) @@ -210,7 +210,7 @@ def __init__( @overload def run( self, - messages: str | ChatMessage | list[str] | list[ChatMessage] | None = None, + messages: str | Message | list[str] | list[Message] | None = None, *, stream: Literal[False] = False, thread: AgentThread | None = None, @@ -220,7 +220,7 @@ def run( @overload def run( self, - messages: str | ChatMessage | list[str] | list[ChatMessage] | None = None, + messages: str | Message | list[str] | list[Message] | None = None, *, stream: Literal[True], thread: AgentThread | None = None, @@ -229,7 +229,7 @@ def run( def run( self, - messages: str | ChatMessage | list[str] | list[ChatMessage] | None = None, + messages: str | Message | list[str] | list[Message] | None = None, *, stream: bool = False, thread: AgentThread | None = None, @@ -259,7 +259,7 @@ def run( async def _run_impl( self, - messages: str | ChatMessage | list[str] | list[ChatMessage] | None = None, + messages: str | Message | list[str] | list[Message] | None = None, *, thread: AgentThread | None = None, **kwargs: Any, @@ -274,7 +274,7 @@ async def _run_impl( question = "\n".join([message.text for message in input_messages]) activities = self.client.ask_question(question, thread.service_thread_id) - response_messages: list[ChatMessage] = [] + response_messages: list[Message] = [] response_id: str | None = None response_messages = [message async for message in self._process_activities(activities, streaming=False)] @@ -284,7 +284,7 @@ async def _run_impl( def _run_stream_impl( self, - messages: str | ChatMessage | list[str] | list[ChatMessage] | None = None, + messages: str | Message | list[str] | list[Message] | None = None, *, thread: AgentThread | None = None, **kwargs: Any, @@ -338,7 +338,7 @@ async def _start_new_conversation(self) -> str: return conversation_id - async def _process_activities(self, activities: AsyncIterable[Any], streaming: bool) -> AsyncIterable[ChatMessage]: + async def _process_activities(self, activities: AsyncIterable[Any], streaming: bool) -> AsyncIterable[Message]: """Process activities from the Copilot Studio agent. Args: @@ -347,13 +347,13 @@ async def _process_activities(self, activities: AsyncIterable[Any], streaming: b or non-streaming (message activities) responses. Yields: - ChatMessage objects created from the activities. + Message objects created from the activities. """ async for activity in activities: if activity.text and ( (activity.type == "message" and not streaming) or (activity.type == "typing" and streaming) ): - yield ChatMessage( + yield Message( role="assistant", contents=[Content.from_text(activity.text)], author_name=activity.from_property.name if activity.from_property else None, diff --git a/python/packages/copilotstudio/tests/test_copilot_agent.py b/python/packages/copilotstudio/tests/test_copilot_agent.py index cd11c7a6ef..6172f871d3 100644 --- a/python/packages/copilotstudio/tests/test_copilot_agent.py +++ b/python/packages/copilotstudio/tests/test_copilot_agent.py @@ -4,7 +4,7 @@ from unittest.mock import MagicMock, patch import pytest -from agent_framework import AgentResponse, AgentResponseUpdate, AgentThread, ChatMessage, Content +from agent_framework import AgentResponse, AgentResponseUpdate, AgentThread, Content, Message from agent_framework.exceptions import ServiceException, ServiceInitializationError from microsoft_agents.copilotstudio.client import CopilotClient @@ -134,7 +134,7 @@ async def test_run_with_string_message(self, mock_copilot_client: MagicMock, moc assert response.messages[0].role == "assistant" async def test_run_with_chat_message(self, mock_copilot_client: MagicMock, mock_activity: MagicMock) -> None: - """Test run method with ChatMessage.""" + """Test run method with Message.""" agent = CopilotStudioAgent(client=mock_copilot_client) conversation_activity = MagicMock() @@ -143,7 +143,7 @@ async def test_run_with_chat_message(self, mock_copilot_client: MagicMock, mock_ mock_copilot_client.start_conversation.return_value = create_async_generator([conversation_activity]) mock_copilot_client.ask_question.return_value = create_async_generator([mock_activity]) - chat_message = ChatMessage(role="user", contents=[Content.from_text("test message")]) + chat_message = Message(role="user", contents=[Content.from_text("test message")]) response = await agent.run(chat_message) assert isinstance(response, AgentResponse) diff --git a/python/packages/core/AGENTS.md b/python/packages/core/AGENTS.md index 2a308c245d..ad2965200b 100644 --- a/python/packages/core/AGENTS.md +++ b/python/packages/core/AGENTS.md @@ -9,7 +9,7 @@ agent_framework/ ├── __init__.py # Public API exports ├── _agents.py # Agent implementations ├── _clients.py # Chat client base classes and protocols -├── _types.py # Core types (ChatMessage, ChatResponse, Content, etc.) +├── _types.py # Core types (Message, ChatResponse, Content, etc.) ├── _tools.py # Tool definitions and function invocation ├── _middleware.py # Middleware system for request/response interception ├── _threads.py # AgentThread and message store abstractions @@ -27,16 +27,16 @@ agent_framework/ - **`SupportsAgentRun`** - Protocol defining the agent interface - **`BaseAgent`** - Abstract base class for agents -- **`ChatAgent`** - Main agent class wrapping a chat client with tools, instructions, and middleware +- **`Agent`** - Main agent class wrapping a chat client with tools, instructions, and middleware ### Chat Clients (`_clients.py`) -- **`ChatClientProtocol`** - Protocol for chat client implementations +- **`SupportsChatGetResponse`** - Protocol for chat client implementations - **`BaseChatClient`** - Abstract base class with middleware support; subclasses implement `_inner_get_response()` and `_inner_get_streaming_response()` ### Types (`_types.py`) -- **`ChatMessage`** - Represents a chat message with role, content, and metadata +- **`Message`** - Represents a chat message with role, content, and metadata - **`ChatResponse`** - Response from a chat client containing messages and usage - **`ChatResponseUpdate`** - Streaming response update - **`AgentResponse`** / **`AgentResponseUpdate`** - Agent-level response wrappers @@ -91,10 +91,10 @@ agent_framework/ ### Creating an Agent ```python -from agent_framework import ChatAgent +from agent_framework import Agent from agent_framework.openai import OpenAIChatClient -agent = ChatAgent( +agent = Agent( chat_client=OpenAIChatClient(), instructions="You are helpful.", tools=[my_function], @@ -114,7 +114,7 @@ agent = OpenAIChatClient().as_agent( ### Middleware Pipeline ```python -from agent_framework import ChatAgent, AgentMiddleware, AgentContext +from agent_framework import Agent, AgentMiddleware, AgentContext class LoggingMiddleware(AgentMiddleware): async def process(self, context: AgentContext, call_next) -> None: @@ -122,18 +122,18 @@ class LoggingMiddleware(AgentMiddleware): await call_next(context) print(f"Output: {context.result}") -agent = ChatAgent(..., middleware=[LoggingMiddleware()]) +agent = Agent(..., middleware=[LoggingMiddleware()]) ``` ### Custom Chat Client ```python -from agent_framework import BaseChatClient, ChatResponse, ChatMessage +from agent_framework import BaseChatClient, ChatResponse, Message class MyClient(BaseChatClient): async def _inner_get_response(self, *, messages, options, **kwargs) -> ChatResponse: # Call your LLM here - return ChatResponse(messages=[ChatMessage(role="assistant", text="Hi!")]) + return ChatResponse(messages=[Message(role="assistant", text="Hi!")]) async def _inner_get_streaming_response(self, *, messages, options, **kwargs): yield ChatResponseUpdate(...) diff --git a/python/packages/core/README.md b/python/packages/core/README.md index a56badd777..a633a745cb 100644 --- a/python/packages/core/README.md +++ b/python/packages/core/README.md @@ -61,11 +61,11 @@ Create agents and invoke them directly: ```python import asyncio -from agent_framework import ChatAgent +from agent_framework import Agent from agent_framework.openai import OpenAIChatClient async def main(): - agent = ChatAgent( + agent = Agent( chat_client=OpenAIChatClient(), instructions=""" 1) A robot may not injure a human being... @@ -90,14 +90,14 @@ You can use the chat client classes directly for advanced workflows: ```python import asyncio from agent_framework.openai import OpenAIChatClient -from agent_framework import ChatMessage, Role +from agent_framework import Message, Role async def main(): client = OpenAIChatClient() messages = [ - ChatMessage("system", ["You are a helpful assistant."]), - ChatMessage("user", ["Write a haiku about Agent Framework."]) + Message("system", ["You are a helpful assistant."]), + Message("user", ["Write a haiku about Agent Framework."]) ] response = await client.get_response(messages) @@ -123,7 +123,7 @@ import asyncio from typing import Annotated from random import randint from pydantic import Field -from agent_framework import ChatAgent +from agent_framework import Agent from agent_framework.openai import OpenAIChatClient @@ -145,7 +145,7 @@ def get_menu_specials() -> str: async def main(): - agent = ChatAgent( + agent = Agent( chat_client=OpenAIChatClient(), instructions="You are a helpful assistant that can provide weather and restaurant information.", tools=[get_weather, get_menu_specials] @@ -169,19 +169,19 @@ Coordinate multiple agents to collaborate on complex tasks using orchestration p ```python import asyncio -from agent_framework import ChatAgent +from agent_framework import Agent from agent_framework.openai import OpenAIChatClient async def main(): # Create specialized agents - writer = ChatAgent( + writer = Agent( chat_client=OpenAIChatClient(), name="Writer", instructions="You are a creative content writer. Generate and refine slogans based on feedback." ) - reviewer = ChatAgent( + reviewer = Agent( chat_client=OpenAIChatClient(), name="Reviewer", instructions="You are a critical reviewer. Provide detailed feedback on proposed slogans." diff --git a/python/packages/core/agent_framework/_agents.py b/python/packages/core/agent_framework/_agents.py index 79b4f27187..523528a5f2 100644 --- a/python/packages/core/agent_framework/_agents.py +++ b/python/packages/core/agent_framework/_agents.py @@ -28,7 +28,7 @@ from mcp.shared.exceptions import McpError from pydantic import BaseModel, Field, create_model -from ._clients import BaseChatClient, ChatClientProtocol +from ._clients import BaseChatClient, SupportsChatGetResponse from ._logging import get_logger from ._mcp import LOG_LEVEL_MAPPING, MCPTool from ._memory import Context, ContextProvider @@ -43,9 +43,9 @@ from ._types import ( AgentResponse, AgentResponseUpdate, - ChatMessage, ChatResponse, ChatResponseUpdate, + Message, ResponseStream, map_chat_to_agent_update, normalize_messages, @@ -157,15 +157,15 @@ def _sanitize_agent_name(agent_name: str | None) -> str | None: class _RunContext(TypedDict): thread: AgentThread - input_messages: list[ChatMessage] - thread_messages: list[ChatMessage] + input_messages: list[Message] + thread_messages: list[Message] agent_name: str chat_options: dict[str, Any] filtered_kwargs: dict[str, Any] finalize_kwargs: dict[str, Any] -__all__ = ["BaseAgent", "ChatAgent", "RawChatAgent", "SupportsAgentRun"] +__all__ = ["Agent", "BaseAgent", "RawAgent", "SupportsAgentRun"] # region Agent Protocol @@ -230,7 +230,7 @@ def get_new_thread(self, **kwargs): @overload def run( self, - messages: str | ChatMessage | Sequence[str | ChatMessage] | None = None, + messages: str | Message | Sequence[str | Message] | None = None, *, stream: Literal[False] = ..., thread: AgentThread | None = None, @@ -242,7 +242,7 @@ def run( @overload def run( self, - messages: str | ChatMessage | Sequence[str | ChatMessage] | None = None, + messages: str | Message | Sequence[str | Message] | None = None, *, stream: Literal[True], thread: AgentThread | None = None, @@ -253,7 +253,7 @@ def run( def run( self, - messages: str | ChatMessage | Sequence[str | ChatMessage] | None = None, + messages: str | Message | Sequence[str | Message] | None = None, *, stream: bool = False, thread: AgentThread | None = None, @@ -292,7 +292,7 @@ class BaseAgent(SerializationMixin): """Base class for all Agent Framework agents. This is the minimal base class without middleware or telemetry layers. - For most use cases, prefer :class:`ChatAgent` which includes all standard layers. + For most use cases, prefer :class:`Agent` which includes all standard layers. This class provides core functionality for agent implementations, including context providers, middleware support, and thread management. @@ -300,7 +300,7 @@ class BaseAgent(SerializationMixin): Note: BaseAgent cannot be instantiated directly as it doesn't implement the ``run()`` and other methods required by SupportsAgentRun. - Use a concrete implementation like ChatAgent or create a subclass. + Use a concrete implementation like Agent or create a subclass. Examples: .. code-block:: python @@ -380,8 +380,8 @@ def __init__( async def _notify_thread_of_new_messages( self, thread: AgentThread, - input_messages: ChatMessage | Sequence[ChatMessage], - response_messages: ChatMessage | Sequence[ChatMessage], + input_messages: Message | Sequence[Message], + response_messages: Message | Sequence[Message], **kwargs: Any, ) -> None: """Notify the thread of new messages. @@ -394,9 +394,9 @@ async def _notify_thread_of_new_messages( response_messages: The response messages to notify about. **kwargs: Any extra arguments to pass from the agent run. """ - if isinstance(input_messages, ChatMessage) or len(input_messages) > 0: + if isinstance(input_messages, Message) or len(input_messages) > 0: await thread.on_new_messages(input_messages) - if isinstance(response_messages, ChatMessage) or len(response_messages) > 0: + if isinstance(response_messages, Message) or len(response_messages) > 0: await thread.on_new_messages(response_messages) if thread.context_provider: await thread.context_provider.invoked(input_messages, response_messages, **kwargs) @@ -459,16 +459,16 @@ def as_tool( Examples: .. code-block:: python - from agent_framework import ChatAgent + from agent_framework import Agent # Create an agent - agent = ChatAgent(chat_client=client, name="research-agent", description="Performs research tasks") + agent = Agent(chat_client=client, name="research-agent", description="Performs research tasks") # Convert the agent to a tool research_tool = agent.as_tool() # Use the tool with another agent - coordinator = ChatAgent(chat_client=client, name="coordinator", tools=research_tool) + coordinator = Agent(chat_client=client, name="coordinator", tools=research_tool) """ # Verify that self implements SupportsAgentRun if not isinstance(self, SupportsAgentRun): @@ -523,14 +523,14 @@ async def agent_wrapper(**kwargs: Any) -> str: return agent_tool -# region ChatAgent +# region Agent -class RawChatAgent(BaseAgent, Generic[OptionsCoT]): # type: ignore[misc] +class RawAgent(BaseAgent, Generic[OptionsCoT]): # type: ignore[misc] """A Chat Client Agent without middleware or telemetry layers. This is the core chat agent implementation. For most use cases, - prefer :class:`ChatAgent` which includes all standard layers. + prefer :class:`Agent` which includes all standard layers. This is the primary agent implementation that uses a chat client to interact with language models. It supports tools, context providers, middleware, and @@ -544,12 +544,12 @@ class RawChatAgent(BaseAgent, Generic[OptionsCoT]): # type: ignore[misc] .. code-block:: python - from agent_framework import ChatAgent + from agent_framework import Agent from agent_framework.openai import OpenAIChatClient # Create a basic chat agent client = OpenAIChatClient(model_id="gpt-4") - agent = ChatAgent(chat_client=client, name="assistant", description="A helpful assistant") + agent = Agent(chat_client=client, name="assistant", description="A helpful assistant") # Run the agent with a simple message response = await agent.run("Hello, how are you?") @@ -564,7 +564,7 @@ def get_weather(location: str) -> str: return f"The weather in {location} is sunny." - agent = ChatAgent( + agent = Agent( chat_client=client, name="weather-agent", instructions="You are a weather assistant.", @@ -583,11 +583,11 @@ def get_weather(location: str) -> str: .. code-block:: python - from agent_framework import ChatAgent + from agent_framework import Agent from agent_framework.openai import OpenAIChatClient, OpenAIChatOptions client = OpenAIChatClient(model_id="gpt-4o") - agent: ChatAgent[OpenAIChatOptions] = ChatAgent( + agent: Agent[OpenAIChatOptions] = Agent( chat_client=client, name="reasoning-agent", instructions="You are a reasoning assistant.", @@ -609,7 +609,7 @@ def get_weather(location: str) -> str: def __init__( self, - chat_client: ChatClientProtocol[OptionsCoT], + client: SupportsChatGetResponse[OptionsCoT], instructions: str | None = None, *, id: str | None = None, @@ -625,10 +625,10 @@ def __init__( context_provider: ContextProvider | None = None, **kwargs: Any, ) -> None: - """Initialize a ChatAgent instance. + """Initialize a Agent instance. Args: - chat_client: The chat client to use for the agent. + client: The chat client to use for the agent. instructions: Optional instructions for the agent. These will be put into the messages sent to the chat client service as a system message. @@ -641,7 +641,7 @@ def __init__( context_provider: The context providers to include during agent invocation. middleware: List of middleware to intercept agent and function invocations. default_options: A TypedDict containing chat options. When using a typed agent like - ``ChatAgent[OpenAIChatOptions]``, this enables IDE autocomplete for + ``Agent[OpenAIChatOptions]``, this enables IDE autocomplete for provider-specific options including temperature, max_tokens, model_id, tool_choice, and provider-specific options like reasoning_effort. You can also create your own TypedDict for custom chat clients. @@ -663,7 +663,7 @@ def __init__( "Use conversation_id for service-managed threads or chat_message_store_factory for local storage." ) - if not isinstance(chat_client, FunctionInvocationLayer) and isinstance(chat_client, BaseChatClient): + if not isinstance(client, FunctionInvocationLayer) and isinstance(client, BaseChatClient): logger.warning( "The provided chat client does not support function invoking, this might limit agent capabilities." ) @@ -675,7 +675,7 @@ def __init__( context_provider=context_provider, **kwargs, ) - self.chat_client = chat_client + self.client = client self.chat_message_store_factory = chat_message_store_factory # Get tools from options or named parameter (named param takes precedence) @@ -702,7 +702,7 @@ def __init__( # Build chat options dict self.default_options: dict[str, Any] = { - "model_id": opts.pop("model_id", None) or (getattr(self.chat_client, "model_id", None)), + "model_id": opts.pop("model_id", None) or (getattr(self.client, "model_id", None)), "allow_multiple_tool_calls": opts.pop("allow_multiple_tool_calls", None), "conversation_id": conversation_id, "frequency_penalty": opts.pop("frequency_penalty", None), @@ -737,9 +737,9 @@ async def __aenter__(self) -> Self: This list might be extended in the future. Returns: - The ChatAgent instance. + The Agent instance. """ - for context_manager in chain([self.chat_client], self.mcp_tools): + for context_manager in chain([self.client], self.mcp_tools): if isinstance(context_manager, AbstractAsyncContextManager): await self._async_exit_stack.enter_async_context(context_manager) return self @@ -768,15 +768,15 @@ def _update_agent_name_and_description(self) -> None: should check if there is already an agent name defined, and if not set it to this value. """ - if hasattr(self.chat_client, "_update_agent_name_and_description") and callable( - self.chat_client._update_agent_name_and_description + if hasattr(self.client, "_update_agent_name_and_description") and callable( + self.client._update_agent_name_and_description ): # type: ignore[reportAttributeAccessIssue, attr-defined] - self.chat_client._update_agent_name_and_description(self.name, self.description) # type: ignore[reportAttributeAccessIssue, attr-defined] + self.client._update_agent_name_and_description(self.name, self.description) # type: ignore[reportAttributeAccessIssue, attr-defined] @overload def run( self, - messages: str | ChatMessage | Sequence[str | ChatMessage] | None = None, + messages: str | Message | Sequence[str | Message] | None = None, *, stream: Literal[False] = ..., thread: AgentThread | None = None, @@ -792,7 +792,7 @@ def run( @overload def run( self, - messages: str | ChatMessage | Sequence[str | ChatMessage] | None = None, + messages: str | Message | Sequence[str | Message] | None = None, *, stream: Literal[False] = ..., thread: AgentThread | None = None, @@ -808,7 +808,7 @@ def run( @overload def run( self, - messages: str | ChatMessage | Sequence[str | ChatMessage] | None = None, + messages: str | Message | Sequence[str | Message] | None = None, *, stream: Literal[True], thread: AgentThread | None = None, @@ -823,7 +823,7 @@ def run( def run( self, - messages: str | ChatMessage | Sequence[str | ChatMessage] | None = None, + messages: str | Message | Sequence[str | Message] | None = None, *, stream: bool = False, thread: AgentThread | None = None, @@ -851,7 +851,7 @@ def run( thread: The thread to use for the agent. tools: The tools to use for this specific run (merged with default tools). options: A TypedDict containing chat options. When using a typed agent like - ``ChatAgent[OpenAIChatOptions]``, this enables IDE autocomplete for + ``Agent[OpenAIChatOptions]``, this enables IDE autocomplete for provider-specific options including temperature, max_tokens, model_id, tool_choice, and provider-specific options like reasoning_effort. kwargs: Additional keyword arguments for the agent. @@ -872,7 +872,7 @@ async def _run_non_streaming() -> AgentResponse[Any]: options=options, kwargs=kwargs, ) - response = await self.chat_client.get_response( # type: ignore[call-overload] + response = await self.client.get_response( # type: ignore[call-overload] messages=ctx["thread_messages"], stream=False, options=ctx["chat_options"], @@ -944,7 +944,7 @@ async def _get_stream() -> ResponseStream[ChatResponseUpdate, ChatResponse]: kwargs=kwargs, ) ctx: _RunContext = ctx_holder["ctx"] # type: ignore[assignment] # Safe: we just assigned it - return self.chat_client.get_response( # type: ignore[call-overload, no-any-return] + return self.client.get_response( # type: ignore[call-overload, no-any-return] messages=ctx["thread_messages"], stream=True, options=ctx["chat_options"], @@ -979,7 +979,7 @@ def _finalize_response_updates( async def _prepare_run_context( self, *, - messages: str | ChatMessage | Sequence[str | ChatMessage] | None, + messages: str | Message | Sequence[str | Message] | None, thread: AgentThread | None, tools: ToolProtocol | Callable[..., Any] @@ -1067,7 +1067,7 @@ async def _finalize_response_and_update_thread( response: ChatResponse, agent_name: str, thread: AgentThread, - input_messages: list[ChatMessage], + input_messages: list[Message], kwargs: dict[str, Any], ) -> None: """Finalize response by updating thread and setting author names. @@ -1287,9 +1287,9 @@ async def _prepare_thread_and_messages( self, *, thread: AgentThread | None, - input_messages: list[ChatMessage] | None = None, + input_messages: list[Message] | None = None, **kwargs: Any, - ) -> tuple[AgentThread, dict[str, Any], list[ChatMessage]]: + ) -> tuple[AgentThread, dict[str, Any], list[Message]]: """Prepare the thread and messages for agent execution. This method prepares the conversation thread, merges context provider data, @@ -1325,7 +1325,7 @@ async def _prepare_thread_and_messages( thread = thread or self.get_new_thread() if thread.service_thread_id and thread.context_provider: await thread.context_provider.thread_created(thread.service_thread_id) - thread_messages: list[ChatMessage] = [] + thread_messages: list[Message] = [] if thread.message_store: thread_messages.extend(await thread.message_store.list_messages() or []) context: Context | None = None @@ -1369,10 +1369,10 @@ def _get_agent_name(self) -> str: return self.name or "UnnamedAgent" -class ChatAgent( +class Agent( AgentTelemetryLayer, AgentMiddlewareLayer, - RawChatAgent[OptionsCoT], + RawAgent[OptionsCoT], Generic[OptionsCoT], ): """A Chat Client Agent with middleware, telemetry, and full layer support. @@ -1381,12 +1381,12 @@ class ChatAgent( - Agent middleware support for request/response interception - OpenTelemetry-based telemetry for observability - For a minimal implementation without these features, use :class:`RawChatAgent`. + For a minimal implementation without these features, use :class:`RawAgent`. """ def __init__( self, - chat_client: ChatClientProtocol[OptionsCoT], + chat_client: SupportsChatGetResponse[OptionsCoT], instructions: str | None = None, *, id: str | None = None, @@ -1403,7 +1403,7 @@ def __init__( middleware: Sequence[MiddlewareTypes] | None = None, **kwargs: Any, ) -> None: - """Initialize a ChatAgent instance.""" + """Initialize a Agent instance.""" super().__init__( chat_client=chat_client, instructions=instructions, diff --git a/python/packages/core/agent_framework/_clients.py b/python/packages/core/agent_framework/_clients.py index 1893f27f80..44af994e26 100644 --- a/python/packages/core/agent_framework/_clients.py +++ b/python/packages/core/agent_framework/_clients.py @@ -36,9 +36,9 @@ ToolProtocol, ) from ._types import ( - ChatMessage, ChatResponse, ChatResponseUpdate, + Message, ResponseStream, prepare_messages, validate_chat_options, @@ -51,7 +51,7 @@ if TYPE_CHECKING: - from ._agents import ChatAgent + from ._agents import Agent from ._middleware import ( MiddlewareTypes, ) @@ -67,11 +67,11 @@ __all__ = [ "BaseChatClient", - "ChatClientProtocol", + "SupportsChatGetResponse", ] -# region ChatClientProtocol Protocol +# region SupportsChatGetResponse Protocol # Contravariant for the Protocol OptionsContraT = TypeVar( @@ -86,7 +86,7 @@ @runtime_checkable -class ChatClientProtocol(Protocol[OptionsContraT]): +class SupportsChatGetResponse(Protocol[OptionsContraT]): """A protocol for a chat client that can generate responses. This protocol defines the interface that all chat clients must implement, @@ -103,7 +103,7 @@ class ChatClientProtocol(Protocol[OptionsContraT]): Examples: .. code-block:: python - from agent_framework import ChatClientProtocol, ChatResponse, ChatMessage + from agent_framework import SupportsChatGetResponse, ChatResponse, Message # Any class implementing the required methods is compatible @@ -128,7 +128,7 @@ async def _response(): # Verify the instance satisfies the protocol client = CustomChatClient() - assert isinstance(client, ChatClientProtocol) + assert isinstance(client, SupportsChatGetResponse) """ additional_properties: dict[str, Any] @@ -136,7 +136,7 @@ async def _response(): @overload def get_response( self, - messages: str | ChatMessage | Sequence[str | ChatMessage], + messages: str | Message | Sequence[str | Message], *, stream: Literal[False] = ..., options: ChatOptions[ResponseModelBoundT], @@ -146,7 +146,7 @@ def get_response( @overload def get_response( self, - messages: str | ChatMessage | Sequence[str | ChatMessage], + messages: str | Message | Sequence[str | Message], *, stream: Literal[False] = ..., options: OptionsContraT | ChatOptions[None] | None = None, @@ -156,7 +156,7 @@ def get_response( @overload def get_response( self, - messages: str | ChatMessage | Sequence[str | ChatMessage], + messages: str | Message | Sequence[str | Message], *, stream: Literal[True], options: OptionsContraT | ChatOptions[Any] | None = None, @@ -165,7 +165,7 @@ def get_response( def get_response( self, - messages: str | ChatMessage | Sequence[str | ChatMessage], + messages: str | Message | Sequence[str | Message], *, stream: bool = False, options: OptionsContraT | ChatOptions[Any] | None = None, @@ -226,7 +226,7 @@ class BaseChatClient(SerializationMixin, ABC, Generic[OptionsCoT]): Examples: .. code-block:: python - from agent_framework import BaseChatClient, ChatResponse, ChatMessage + from agent_framework import BaseChatClient, ChatResponse, Message from collections.abc import AsyncIterable @@ -243,7 +243,7 @@ async def _stream(): else: # Non-streaming implementation return ChatResponse( - messages=[ChatMessage(role="assistant", text="Hello!")], response_id="custom-response" + messages=[Message(role="assistant", text="Hello!")], response_id="custom-response" ) @@ -338,7 +338,7 @@ def _build_response_stream( def _inner_get_response( self, *, - messages: Sequence[ChatMessage], + messages: Sequence[Message], stream: bool, options: Mapping[str, Any], **kwargs: Any, @@ -365,7 +365,7 @@ def _inner_get_response( @overload def get_response( self, - messages: str | ChatMessage | Sequence[str | ChatMessage], + messages: str | Message | Sequence[str | Message], *, stream: Literal[False] = ..., options: ChatOptions[ResponseModelBoundT], @@ -375,7 +375,7 @@ def get_response( @overload def get_response( self, - messages: str | ChatMessage | Sequence[str | ChatMessage], + messages: str | Message | Sequence[str | Message], *, stream: Literal[False] = ..., options: OptionsCoT | ChatOptions[None] | None = None, @@ -385,7 +385,7 @@ def get_response( @overload def get_response( self, - messages: str | ChatMessage | Sequence[str | ChatMessage], + messages: str | Message | Sequence[str | Message], *, stream: Literal[True], options: OptionsCoT | ChatOptions[Any] | None = None, @@ -394,7 +394,7 @@ def get_response( def get_response( self, - messages: str | ChatMessage | Sequence[str | ChatMessage], + messages: str | Message | Sequence[str | Message], *, stream: bool = False, options: OptionsCoT | ChatOptions[Any] | None = None, @@ -448,10 +448,10 @@ def as_agent( middleware: Sequence[MiddlewareTypes] | None = None, function_invocation_configuration: FunctionInvocationConfiguration | None = None, **kwargs: Any, - ) -> ChatAgent[OptionsCoT]: - """Create a ChatAgent with this client. + ) -> Agent[OptionsCoT]: + """Create a Agent with this client. - This is a convenience method that creates a ChatAgent instance with this + This is a convenience method that creates a Agent instance with this chat client already configured. Keyword Args: @@ -474,7 +474,7 @@ def as_agent( kwargs: Any additional keyword arguments. Will be stored as ``additional_properties``. Returns: - A ChatAgent instance configured with this chat client. + A Agent instance configured with this chat client. Examples: .. code-block:: python @@ -494,9 +494,9 @@ def as_agent( # Run the agent response = await agent.run("Hello!") """ - from ._agents import ChatAgent + from ._agents import Agent - return ChatAgent( + return Agent( chat_client=self, id=id, name=name, diff --git a/python/packages/core/agent_framework/_mcp.py b/python/packages/core/agent_framework/_mcp.py index d716aa0c94..9e960c56d6 100644 --- a/python/packages/core/agent_framework/_mcp.py +++ b/python/packages/core/agent_framework/_mcp.py @@ -32,8 +32,8 @@ _build_pydantic_model_from_json_schema, ) from ._types import ( - ChatMessage, Content, + Message, ) from .exceptions import ToolException, ToolExecutionException @@ -43,7 +43,7 @@ from typing_extensions import Self # pragma: no cover if TYPE_CHECKING: - from ._clients import ChatClientProtocol + from ._clients import SupportsChatGetResponse logger = logging.getLogger(__name__) @@ -69,9 +69,9 @@ def _parse_message_from_mcp( mcp_type: types.PromptMessage | types.SamplingMessage, -) -> ChatMessage: +) -> Message: """Parse an MCP container type into an Agent Framework type.""" - return ChatMessage( + return Message( role=mcp_type.role, contents=_parse_content_from_mcp(mcp_type.content), raw_representation=mcp_type, @@ -256,9 +256,9 @@ def _prepare_content_for_mcp( def _prepare_message_for_mcp( - content: ChatMessage, + content: Message, ) -> list[types.TextContent | types.ImageContent | types.AudioContent | types.EmbeddedResource | types.ResourceLink]: - """Prepare a ChatMessage for MCP format.""" + """Prepare a Message for MCP format.""" messages: list[ types.TextContent | types.ImageContent | types.AudioContent | types.EmbeddedResource | types.ResourceLink ] = [] @@ -335,7 +335,7 @@ def __init__( parse_prompt_results: Literal[True] | Callable[[types.GetPromptResult], Any] | None = True, session: ClientSession | None = None, request_timeout: int | None = None, - chat_client: ChatClientProtocol | None = None, + chat_client: SupportsChatGetResponse | None = None, additional_properties: dict[str, Any] | None = None, ) -> None: """Initialize the MCP Tool base. @@ -513,7 +513,7 @@ async def sampling_callback( message="No chat client available. Please set a chat client.", ) logger.debug("Sampling callback called with params: %s", params) - messages: list[ChatMessage] = [] + messages: list[Message] = [] for msg in params.messages: messages.append(_parse_message_from_mcp(msg)) try: @@ -634,7 +634,7 @@ async def load_prompts(self) -> None: input_model = _get_input_model_from_mcp_prompt(prompt) approval_mode = self._determine_approval_mode(local_name) - func: FunctionTool[BaseModel, list[ChatMessage] | Any | types.GetPromptResult] = FunctionTool( + func: FunctionTool[BaseModel, list[Message] | Any | types.GetPromptResult] = FunctionTool( func=partial(self.get_prompt, prompt.name), name=local_name, description=prompt.description or "", @@ -801,7 +801,7 @@ async def call_tool(self, tool_name: str, **kwargs: Any) -> list[Content] | Any raise ToolExecutionException(f"Failed to call tool '{tool_name}'.", inner_exception=ex) from ex raise ToolExecutionException(f"Failed to call tool '{tool_name}' after retries.") - async def get_prompt(self, prompt_name: str, **kwargs: Any) -> list[ChatMessage] | Any | types.GetPromptResult: + async def get_prompt(self, prompt_name: str, **kwargs: Any) -> list[Message] | Any | types.GetPromptResult: """Call a prompt with the given arguments. Args: @@ -909,7 +909,7 @@ class MCPStdioTool(MCPTool): Examples: .. code-block:: python - from agent_framework import MCPStdioTool, ChatAgent + from agent_framework import MCPStdioTool, Agent # Create an MCP stdio tool mcp_tool = MCPStdioTool( @@ -921,7 +921,7 @@ class MCPStdioTool(MCPTool): # Use with a chat agent async with mcp_tool: - agent = ChatAgent(chat_client=client, name="assistant", tools=mcp_tool) + agent = Agent(chat_client=client, name="assistant", tools=mcp_tool) response = await agent.run("List files in the directory") """ @@ -942,7 +942,7 @@ def __init__( args: list[str] | None = None, env: dict[str, str] | None = None, encoding: str | None = None, - chat_client: ChatClientProtocol | None = None, + chat_client: SupportsChatGetResponse | None = None, additional_properties: dict[str, Any] | None = None, **kwargs: Any, ) -> None: @@ -1031,7 +1031,7 @@ class MCPStreamableHTTPTool(MCPTool): Examples: .. code-block:: python - from agent_framework import MCPStreamableHTTPTool, ChatAgent + from agent_framework import MCPStreamableHTTPTool, Agent # Create an MCP HTTP tool mcp_tool = MCPStreamableHTTPTool( @@ -1042,7 +1042,7 @@ class MCPStreamableHTTPTool(MCPTool): # Use with a chat agent async with mcp_tool: - agent = ChatAgent(chat_client=client, name="assistant", tools=mcp_tool) + agent = Agent(chat_client=client, name="assistant", tools=mcp_tool) response = await agent.run("Fetch data from the API") """ @@ -1061,7 +1061,7 @@ def __init__( approval_mode: (Literal["always_require", "never_require"] | HostedMCPSpecificApproval | None) = None, allowed_tools: Collection[str] | None = None, terminate_on_close: bool | None = None, - chat_client: ChatClientProtocol | None = None, + chat_client: SupportsChatGetResponse | None = None, additional_properties: dict[str, Any] | None = None, http_client: httpx.AsyncClient | None = None, **kwargs: Any, @@ -1148,7 +1148,7 @@ class MCPWebsocketTool(MCPTool): Examples: .. code-block:: python - from agent_framework import MCPWebsocketTool, ChatAgent + from agent_framework import MCPWebsocketTool, Agent # Create an MCP WebSocket tool mcp_tool = MCPWebsocketTool( @@ -1157,7 +1157,7 @@ class MCPWebsocketTool(MCPTool): # Use with a chat agent async with mcp_tool: - agent = ChatAgent(chat_client=client, name="assistant", tools=mcp_tool) + agent = Agent(chat_client=client, name="assistant", tools=mcp_tool) response = await agent.run("Connect to the real-time service") """ @@ -1175,7 +1175,7 @@ def __init__( description: str | None = None, approval_mode: (Literal["always_require", "never_require"] | HostedMCPSpecificApproval | None) = None, allowed_tools: Collection[str] | None = None, - chat_client: ChatClientProtocol | None = None, + chat_client: SupportsChatGetResponse | None = None, additional_properties: dict[str, Any] | None = None, **kwargs: Any, ) -> None: diff --git a/python/packages/core/agent_framework/_memory.py b/python/packages/core/agent_framework/_memory.py index 465bc1ffec..f84b614ae0 100644 --- a/python/packages/core/agent_framework/_memory.py +++ b/python/packages/core/agent_framework/_memory.py @@ -8,7 +8,7 @@ from types import TracebackType from typing import TYPE_CHECKING, Any, Final -from ._types import ChatMessage +from ._types import Message if TYPE_CHECKING: from ._tools import ToolProtocol @@ -34,12 +34,12 @@ class Context: Examples: .. code-block:: python - from agent_framework import Context, ChatMessage + from agent_framework import Context, Message # Create context with instructions context = Context( instructions="Use a professional tone when responding.", - messages=[ChatMessage(content="Previous context", role="user")], + messages=[Message(content="Previous context", role="user")], tools=[my_tool], ) @@ -51,7 +51,7 @@ class Context: def __init__( self, instructions: str | None = None, - messages: Sequence[ChatMessage] | None = None, + messages: Sequence[Message] | None = None, tools: Sequence[ToolProtocol] | None = None, ): """Create a new Context object. @@ -62,7 +62,7 @@ def __init__( tools: The list of tools to provide to this run. """ self.instructions = instructions - self.messages: Sequence[ChatMessage] = messages or [] + self.messages: Sequence[Message] = messages or [] self.tools: Sequence[ToolProtocol] = tools or [] @@ -85,7 +85,7 @@ class ContextProvider(ABC): Examples: .. code-block:: python - from agent_framework import ContextProvider, Context, ChatMessage + from agent_framework import ContextProvider, Context, Message class CustomContextProvider(ContextProvider): @@ -96,7 +96,7 @@ async def invoking(self, messages, **kwargs): # Use with a chat agent async with CustomContextProvider() as provider: - agent = ChatAgent(chat_client=client, name="assistant", context_provider=provider) + agent = Agent(chat_client=client, name="assistant", context_provider=provider) """ # Default prompt to be used by all context providers when assembling memories/instructions @@ -116,8 +116,8 @@ async def thread_created(self, thread_id: str | None) -> None: async def invoked( self, - request_messages: ChatMessage | Sequence[ChatMessage], - response_messages: ChatMessage | Sequence[ChatMessage] | None = None, + request_messages: Message | Sequence[Message], + response_messages: Message | Sequence[Message] | None = None, invoke_exception: Exception | None = None, **kwargs: Any, ) -> None: @@ -136,7 +136,7 @@ async def invoked( pass @abstractmethod - async def invoking(self, messages: ChatMessage | MutableSequence[ChatMessage], **kwargs: Any) -> Context: + async def invoking(self, messages: Message | MutableSequence[Message], **kwargs: Any) -> Context: """Called just before the model/agent is invoked. Implementers can load any additional context required at this time, diff --git a/python/packages/core/agent_framework/_middleware.py b/python/packages/core/agent_framework/_middleware.py index 8d63aa2eba..915b6a4f8f 100644 --- a/python/packages/core/agent_framework/_middleware.py +++ b/python/packages/core/agent_framework/_middleware.py @@ -10,13 +10,13 @@ from enum import Enum from typing import TYPE_CHECKING, Any, Generic, Literal, TypeAlias, overload -from ._clients import ChatClientProtocol +from ._clients import SupportsChatGetResponse from ._types import ( AgentResponse, AgentResponseUpdate, - ChatMessage, ChatResponse, ChatResponseUpdate, + Message, ResponseStream, prepare_messages, ) @@ -35,7 +35,7 @@ from pydantic import BaseModel from ._agents import SupportsAgentRun - from ._clients import ChatClientProtocol + from ._clients import SupportsChatGetResponse from ._threads import AgentThread from ._tools import FunctionTool from ._types import ChatOptions, ChatResponse, ChatResponseUpdate @@ -155,7 +155,7 @@ def __init__( self, *, agent: SupportsAgentRun, - messages: list[ChatMessage], + messages: list[Message], thread: AgentThread | None = None, options: Mapping[str, Any] | None = None, stream: bool = False, @@ -302,8 +302,8 @@ async def process(self, context: ChatContext, call_next): def __init__( self, - chat_client: ChatClientProtocol, - messages: Sequence[ChatMessage], + chat_client: SupportsChatGetResponse, + messages: Sequence[Message], options: Mapping[str, Any] | None, stream: bool = False, metadata: Mapping[str, Any] | None = None, @@ -356,7 +356,7 @@ class AgentMiddleware(ABC): Examples: .. code-block:: python - from agent_framework import AgentMiddleware, AgentContext, ChatAgent + from agent_framework import AgentMiddleware, AgentContext, Agent class RetryMiddleware(AgentMiddleware): @@ -372,7 +372,7 @@ async def process(self, context: AgentContext, call_next): # Use with an agent - agent = ChatAgent(chat_client=client, name="assistant", middleware=[RetryMiddleware()]) + agent = Agent(chat_client=client, name="assistant", middleware=[RetryMiddleware()]) """ @abstractmethod @@ -415,7 +415,7 @@ class FunctionMiddleware(ABC): Examples: .. code-block:: python - from agent_framework import FunctionMiddleware, FunctionInvocationContext, ChatAgent + from agent_framework import FunctionMiddleware, FunctionInvocationContext, Agent class CachingMiddleware(FunctionMiddleware): @@ -439,7 +439,7 @@ async def process(self, context: FunctionInvocationContext, call_next): # Use with an agent - agent = ChatAgent(chat_client=client, name="assistant", middleware=[CachingMiddleware()]) + agent = Agent(chat_client=client, name="assistant", middleware=[CachingMiddleware()]) """ @abstractmethod @@ -479,7 +479,7 @@ class ChatMiddleware(ABC): Examples: .. code-block:: python - from agent_framework import ChatMiddleware, ChatContext, ChatAgent + from agent_framework import ChatMiddleware, ChatContext, Agent class SystemPromptMiddleware(ChatMiddleware): @@ -488,16 +488,16 @@ def __init__(self, system_prompt: str): async def process(self, context: ChatContext, call_next): # Add system prompt to messages - from agent_framework import ChatMessage + from agent_framework import Message - context.messages.insert(0, ChatMessage(role="system", text=self.system_prompt)) + context.messages.insert(0, Message(role="system", text=self.system_prompt)) # Continue execution await call_next(context) # Use with an agent - agent = ChatAgent( + agent = Agent( chat_client=client, name="assistant", middleware=[SystemPromptMiddleware("You are a helpful assistant.")], @@ -572,7 +572,7 @@ def agent_middleware(func: AgentMiddlewareCallable) -> AgentMiddlewareCallable: Examples: .. code-block:: python - from agent_framework import agent_middleware, AgentContext, ChatAgent + from agent_framework import agent_middleware, AgentContext, Agent @agent_middleware @@ -583,7 +583,7 @@ async def logging_middleware(context: AgentContext, call_next): # Use with an agent - agent = ChatAgent(chat_client=client, name="assistant", middleware=[logging_middleware]) + agent = Agent(chat_client=client, name="assistant", middleware=[logging_middleware]) """ # Add marker attribute to identify this as agent middleware func._middleware_type: MiddlewareType = MiddlewareType.AGENT # type: ignore @@ -605,7 +605,7 @@ def function_middleware(func: FunctionMiddlewareCallable) -> FunctionMiddlewareC Examples: .. code-block:: python - from agent_framework import function_middleware, FunctionInvocationContext, ChatAgent + from agent_framework import function_middleware, FunctionInvocationContext, Agent @function_middleware @@ -616,7 +616,7 @@ async def logging_middleware(context: FunctionInvocationContext, call_next): # Use with an agent - agent = ChatAgent(chat_client=client, name="assistant", middleware=[logging_middleware]) + agent = Agent(chat_client=client, name="assistant", middleware=[logging_middleware]) """ # Add marker attribute to identify this as function middleware func._middleware_type: MiddlewareType = MiddlewareType.FUNCTION # type: ignore @@ -638,7 +638,7 @@ def chat_middleware(func: ChatMiddlewareCallable) -> ChatMiddlewareCallable: Examples: .. code-block:: python - from agent_framework import chat_middleware, ChatContext, ChatAgent + from agent_framework import chat_middleware, ChatContext, Agent @chat_middleware @@ -649,7 +649,7 @@ async def logging_middleware(context: ChatContext, call_next): # Use with an agent - agent = ChatAgent(chat_client=client, name="assistant", middleware=[logging_middleware]) + agent = Agent(chat_client=client, name="assistant", middleware=[logging_middleware]) """ # Add marker attribute to identify this as chat middleware func._middleware_type: MiddlewareType = MiddlewareType.CHAT # type: ignore @@ -980,7 +980,7 @@ def __init__( @overload def get_response( self, - messages: str | ChatMessage | Sequence[str | ChatMessage], + messages: str | Message | Sequence[str | Message], *, stream: Literal[False] = ..., options: ChatOptions[ResponseModelBoundT], @@ -990,7 +990,7 @@ def get_response( @overload def get_response( self, - messages: str | ChatMessage | Sequence[str | ChatMessage], + messages: str | Message | Sequence[str | Message], *, stream: Literal[False] = ..., options: OptionsCoT | ChatOptions[None] | None = None, @@ -1000,7 +1000,7 @@ def get_response( @overload def get_response( self, - messages: str | ChatMessage | Sequence[str | ChatMessage], + messages: str | Message | Sequence[str | Message], *, stream: Literal[True], options: OptionsCoT | ChatOptions[Any] | None = None, @@ -1009,7 +1009,7 @@ def get_response( def get_response( self, - messages: str | ChatMessage | Sequence[str | ChatMessage], + messages: str | Message | Sequence[str | Message], *, stream: bool = False, options: OptionsCoT | ChatOptions[Any] | None = None, @@ -1097,7 +1097,7 @@ def __init__( @overload def run( self, - messages: str | ChatMessage | Sequence[str | ChatMessage] | None = None, + messages: str | Message | Sequence[str | Message] | None = None, *, stream: Literal[False] = ..., thread: AgentThread | None = None, @@ -1109,7 +1109,7 @@ def run( @overload def run( self, - messages: str | ChatMessage | Sequence[str | ChatMessage] | None = None, + messages: str | Message | Sequence[str | Message] | None = None, *, stream: Literal[False] = ..., thread: AgentThread | None = None, @@ -1121,7 +1121,7 @@ def run( @overload def run( self, - messages: str | ChatMessage | Sequence[str | ChatMessage] | None = None, + messages: str | Message | Sequence[str | Message] | None = None, *, stream: Literal[True], thread: AgentThread | None = None, @@ -1132,7 +1132,7 @@ def run( def run( self, - messages: str | ChatMessage | Sequence[str | ChatMessage] | None = None, + messages: str | Message | Sequence[str | Message] | None = None, *, stream: bool = False, thread: AgentThread | None = None, diff --git a/python/packages/core/agent_framework/_serialization.py b/python/packages/core/agent_framework/_serialization.py index 44de6ef848..ed367ffbce 100644 --- a/python/packages/core/agent_framework/_serialization.py +++ b/python/packages/core/agent_framework/_serialization.py @@ -31,16 +31,16 @@ class SerializationProtocol(Protocol): ensuring consistent behavior across the framework. Examples: - The framework's ``ChatMessage`` class demonstrates the protocol in action: + The framework's ``Message`` class demonstrates the protocol in action: .. code-block:: python - from agent_framework import ChatMessage + from agent_framework import Message from agent_framework._serialization import SerializationProtocol - # ChatMessage implements SerializationProtocol via SerializationMixin - user_msg = ChatMessage(role="user", text="What's the weather like today?") + # Message implements SerializationProtocol via SerializationMixin + user_msg = Message(role="user", text="What's the weather like today?") # Serialize to dictionary - automatic type identification and nested serialization msg_dict = user_msg.to_dict() @@ -52,8 +52,8 @@ class SerializationProtocol(Protocol): # "additional_properties": {} # } - # Deserialize back to ChatMessage instance - automatic type reconstruction - restored_msg = ChatMessage.from_dict(msg_dict) + # Deserialize back to Message instance - automatic type reconstruction + restored_msg = Message.from_dict(msg_dict) print(restored_msg.text) # "What's the weather like today?" print(restored_msg.role) # "user" @@ -170,15 +170,15 @@ class SerializationMixin: .. code-block:: python - from agent_framework import ChatMessage + from agent_framework import Message from agent_framework._threads import AgentThreadState, ChatMessageStoreState - # ChatMessageStoreState handles nested ChatMessage serialization + # ChatMessageStoreState handles nested Message serialization store_state = ChatMessageStoreState( messages=[ - ChatMessage(role="user", text="Hello agent"), - ChatMessage(role="assistant", text="Hi! How can I help?"), + Message(role="user", text="Hello agent"), + Message(role="assistant", text="Hi! How can I help?"), ] ) diff --git a/python/packages/core/agent_framework/_threads.py b/python/packages/core/agent_framework/_threads.py index df142643f6..83b33519d8 100644 --- a/python/packages/core/agent_framework/_threads.py +++ b/python/packages/core/agent_framework/_threads.py @@ -7,7 +7,7 @@ from ._memory import ContextProvider from ._serialization import SerializationMixin -from ._types import ChatMessage +from ._types import Message from .exceptions import AgentThreadException __all__ = ["AgentThread", "ChatMessageStore", "ChatMessageStoreProtocol"] @@ -22,17 +22,17 @@ class ChatMessageStoreProtocol(Protocol): Examples: .. code-block:: python - from agent_framework import ChatMessage + from agent_framework import Message class MyMessageStore: def __init__(self): self._messages = [] - async def list_messages(self) -> list[ChatMessage]: + async def list_messages(self) -> list[Message]: return self._messages - async def add_messages(self, messages: Sequence[ChatMessage]) -> None: + async def add_messages(self, messages: Sequence[Message]) -> None: self._messages.extend(messages) @classmethod @@ -52,7 +52,7 @@ async def serialize(self, **kwargs): store = MyMessageStore() """ - async def list_messages(self) -> list[ChatMessage]: + async def list_messages(self) -> list[Message]: """Gets all the messages from the store that should be used for the next agent invocation. Messages are returned in ascending chronological order, with the oldest message first. @@ -65,11 +65,11 @@ async def list_messages(self) -> list[ChatMessage]: """ ... - async def add_messages(self, messages: Sequence[ChatMessage]) -> None: + async def add_messages(self, messages: Sequence[Message]) -> None: """Adds messages to the store. Args: - messages: The sequence of ChatMessage objects to add to the store. + messages: The sequence of Message objects to add to the store. """ ... @@ -128,7 +128,7 @@ class ChatMessageStoreState(SerializationMixin): def __init__( self, - messages: Sequence[ChatMessage] | Sequence[MutableMapping[str, Any]] | None = None, + messages: Sequence[Message] | Sequence[MutableMapping[str, Any]] | None = None, **kwargs: Any, ) -> None: """Create the store state. @@ -141,16 +141,16 @@ def __init__( """ if not messages: - self.messages: list[ChatMessage] = [] + self.messages: list[Message] = [] return if not isinstance(messages, list): raise TypeError("Messages should be a list") - new_messages: list[ChatMessage] = [] + new_messages: list[Message] = [] for msg in messages: - if isinstance(msg, ChatMessage): + if isinstance(msg, Message): new_messages.append(msg) else: - new_messages.append(ChatMessage.from_dict(msg)) + new_messages.append(Message.from_dict(msg)) self.messages = new_messages @@ -198,13 +198,13 @@ class ChatMessageStore: Examples: .. code-block:: python - from agent_framework import ChatMessageStore, ChatMessage + from agent_framework import ChatMessageStore, Message # Create an empty store store = ChatMessageStore() # Add messages - message = ChatMessage(role="user", text="Hello") + message = Message(role="user", text="Hello") await store.add_messages([message]) # Retrieve messages @@ -217,7 +217,7 @@ class ChatMessageStore: restored_store = await ChatMessageStore.deserialize(state) """ - def __init__(self, messages: Sequence[ChatMessage] | None = None): + def __init__(self, messages: Sequence[Message] | None = None): """Create a ChatMessageStore for use in a thread. Args: @@ -225,19 +225,19 @@ def __init__(self, messages: Sequence[ChatMessage] | None = None): """ self.messages = list(messages) if messages else [] - async def add_messages(self, messages: Sequence[ChatMessage]) -> None: + async def add_messages(self, messages: Sequence[Message]) -> None: """Add messages to the store. Args: - messages: Sequence of ChatMessage objects to add to the store. + messages: Sequence of Message objects to add to the store. """ self.messages.extend(messages) - async def list_messages(self) -> list[ChatMessage]: + async def list_messages(self) -> list[Message]: """Get all messages from the store in chronological order. Returns: - List of ChatMessage objects, ordered from oldest to newest. + List of Message objects, ordered from oldest to newest. """ return self.messages @@ -302,21 +302,21 @@ class AgentThread: Examples: .. code-block:: python - from agent_framework import ChatAgent, ChatMessageStore + from agent_framework import Agent, ChatMessageStore from agent_framework.openai import OpenAIChatClient client = OpenAIChatClient(model="gpt-4o") # Create agent with service-managed threads using a service_thread_id - service_agent = ChatAgent(name="assistant", client=client) + service_agent = Agent(name="assistant", client=client) service_thread = await service_agent.get_new_thread(service_thread_id="thread_abc123") # Create agent with service-managed threads using conversation_id - conversation_agent = ChatAgent(name="assistant", client=client, conversation_id="thread_abc123") + conversation_agent = Agent(name="assistant", client=client, conversation_id="thread_abc123") conversation_thread = await conversation_agent.get_new_thread() # Create agent with custom message store factory - local_agent = ChatAgent(name="assistant", client=client, chat_message_store_factory=ChatMessageStore) + local_agent = Agent(name="assistant", client=client, chat_message_store_factory=ChatMessageStore) local_thread = await local_agent.get_new_thread() # Serialize and restore thread state @@ -401,11 +401,11 @@ def message_store(self, message_store: ChatMessageStoreProtocol | None) -> None: self._message_store = message_store - async def on_new_messages(self, new_messages: ChatMessage | Sequence[ChatMessage]) -> None: + async def on_new_messages(self, new_messages: Message | Sequence[Message]) -> None: """Invoked when a new message has been contributed to the chat by any participant. Args: - new_messages: The new ChatMessage or sequence of ChatMessage objects to add to the thread. + new_messages: The new Message or sequence of Message objects to add to the thread. """ if self._service_thread_id is not None: # If the thread messages are stored in the service there is nothing to do here, @@ -416,7 +416,7 @@ async def on_new_messages(self, new_messages: ChatMessage | Sequence[ChatMessage # create a default in memory store. self._message_store = ChatMessageStore() # If a store has been provided, we need to add the messages to the store. - if isinstance(new_messages, ChatMessage): + if isinstance(new_messages, Message): new_messages = [new_messages] await self._message_store.add_messages(new_messages) diff --git a/python/packages/core/agent_framework/_tools.py b/python/packages/core/agent_framework/_tools.py index 0d31471aba..2d98982274 100644 --- a/python/packages/core/agent_framework/_tools.py +++ b/python/packages/core/agent_framework/_tools.py @@ -65,14 +65,14 @@ if TYPE_CHECKING: - from ._clients import ChatClientProtocol + from ._clients import SupportsChatGetResponse from ._middleware import FunctionMiddlewarePipeline, FunctionMiddlewareTypes from ._types import ( - ChatMessage, ChatOptions, ChatResponse, ChatResponseUpdate, Content, + Message, ResponseStream, ) @@ -100,7 +100,7 @@ logger = get_logger() DEFAULT_MAX_ITERATIONS: Final[int] = 40 DEFAULT_MAX_CONSECUTIVE_ERRORS_PER_REQUEST: Final[int] = 3 -ChatClientT = TypeVar("ChatClientT", bound="ChatClientProtocol[Any]") +ChatClientT = TypeVar("ChatClientT", bound="SupportsChatGetResponse[Any]") # region Helpers ArgsT = TypeVar("ArgsT", bound=BaseModel, default=BaseModel) @@ -1857,14 +1857,14 @@ def _extract_tools(options: dict[str, Any] | None) -> Any: def _collect_approval_responses( - messages: list[ChatMessage], + messages: list[Message], ) -> dict[str, Content]: """Collect approval responses (both approved and rejected) from messages.""" - from ._types import ChatMessage + from ._types import Message fcc_todo: dict[str, Content] = {} for msg in messages: - for content in msg.contents if isinstance(msg, ChatMessage) else []: + for content in msg.contents if isinstance(msg, Message) else []: # Collect BOTH approved and rejected responses if content.type == "function_approval_response": fcc_todo[content.id] = content # type: ignore[attr-defined, index] @@ -1872,7 +1872,7 @@ def _collect_approval_responses( def _replace_approval_contents_with_results( - messages: list[ChatMessage], + messages: list[Message], fcc_todo: dict[str, Content], approved_function_results: list[Content], ) -> None: @@ -1941,7 +1941,7 @@ def _extract_function_calls(response: ChatResponse) -> list[Content]: ] -def _prepend_fcc_messages(response: ChatResponse, fcc_messages: list[ChatMessage]) -> None: +def _prepend_fcc_messages(response: ChatResponse, fcc_messages: list[Message]) -> None: if not fcc_messages: return for msg in reversed(fcc_messages): @@ -1961,7 +1961,7 @@ class FunctionRequestResult(TypedDict, total=False): action: Literal["return", "continue", "stop"] errors_in_a_row: int - result_message: ChatMessage | None + result_message: Message | None update_role: Literal["assistant", "tool"] | None function_call_results: list[Content] | None @@ -1970,12 +1970,12 @@ def _handle_function_call_results( *, response: ChatResponse, function_call_results: list[Content], - fcc_messages: list[ChatMessage], + fcc_messages: list[Message], errors_in_a_row: int, had_errors: bool, max_errors: int, ) -> FunctionRequestResult: - from ._types import ChatMessage + from ._types import Message if any(fccr.type in {"function_approval_request", "function_call"} for fccr in function_call_results): # Only add items that aren't already in the message (e.g. function_approval_request wrappers). @@ -1985,7 +1985,7 @@ def _handle_function_call_results( if response.messages and response.messages[0].role == "assistant": response.messages[0].contents.extend(new_items) else: - response.messages.append(ChatMessage(role="assistant", contents=new_items)) + response.messages.append(Message(role="assistant", contents=new_items)) return { "action": "return", "errors_in_a_row": errors_in_a_row, @@ -2012,7 +2012,7 @@ def _handle_function_call_results( else: errors_in_a_row = 0 - result_message = ChatMessage(role="tool", contents=function_call_results) + result_message = Message(role="tool", contents=function_call_results) response.messages.append(result_message) fcc_messages.extend(response.messages) return { @@ -2027,10 +2027,10 @@ def _handle_function_call_results( async def _process_function_requests( *, response: ChatResponse | None, - prepped_messages: list[ChatMessage] | None, + prepped_messages: list[Message] | None, tool_options: dict[str, Any] | None, attempt_idx: int, - fcc_messages: list[ChatMessage] | None, + fcc_messages: list[Message] | None, errors_in_a_row: int, max_errors: int, execute_function_calls: Callable[..., Awaitable[tuple[list[Content], bool, bool]]], @@ -2139,7 +2139,7 @@ def __init__( @overload def get_response( self, - messages: str | ChatMessage | Sequence[str | ChatMessage], + messages: str | Message | Sequence[str | Message], *, stream: Literal[False] = ..., options: ChatOptions[ResponseModelBoundT], @@ -2149,7 +2149,7 @@ def get_response( @overload def get_response( self, - messages: str | ChatMessage | Sequence[str | ChatMessage], + messages: str | Message | Sequence[str | Message], *, stream: Literal[False] = ..., options: OptionsCoT | ChatOptions[None] | None = None, @@ -2159,7 +2159,7 @@ def get_response( @overload def get_response( self, - messages: str | ChatMessage | Sequence[str | ChatMessage], + messages: str | Message | Sequence[str | Message], *, stream: Literal[True], options: OptionsCoT | ChatOptions[Any] | None = None, @@ -2168,7 +2168,7 @@ def get_response( def get_response( self, - messages: str | ChatMessage | Sequence[str | ChatMessage], + messages: str | Message | Sequence[str | Message], *, stream: bool = False, options: OptionsCoT | ChatOptions[Any] | None = None, @@ -2213,7 +2213,7 @@ async def _get_response() -> ChatResponse: nonlocal filtered_kwargs errors_in_a_row: int = 0 prepped_messages = prepare_messages(messages) - fcc_messages: list[ChatMessage] = [] + fcc_messages: list[Message] = [] response: ChatResponse | None = None for attempt_idx in range( @@ -2307,7 +2307,7 @@ async def _stream() -> AsyncIterable[ChatResponseUpdate]: nonlocal stream_result_hooks errors_in_a_row: int = 0 prepped_messages = prepare_messages(messages) - fcc_messages: list[ChatMessage] = [] + fcc_messages: list[Message] = [] response: ChatResponse | None = None for attempt_idx in range( diff --git a/python/packages/core/agent_framework/_types.py b/python/packages/core/agent_framework/_types.py index 4885781dfc..f9e55ddc13 100644 --- a/python/packages/core/agent_framework/_types.py +++ b/python/packages/core/agent_framework/_types.py @@ -31,7 +31,6 @@ "AgentResponse", "AgentResponseUpdate", "Annotation", - "ChatMessage", "ChatOptions", "ChatResponse", "ChatResponseUpdate", @@ -40,6 +39,7 @@ "FinalT", "FinishReason", "FinishReasonLiteral", + "Message", "OuterFinalT", "OuterUpdateT", "ResponseStream", @@ -1420,14 +1420,14 @@ def prepare_function_call_results(content: Content | Any | list[Content | Any]) Examples: .. code-block:: python - from agent_framework import ChatMessage + from agent_framework import Message # Use string values directly - user_msg = ChatMessage("user", ["Hello"]) - assistant_msg = ChatMessage("assistant", ["Hi there!"]) + user_msg = Message("user", ["Hello"]) + assistant_msg = Message("assistant", ["Hi there!"]) # Custom roles are also supported - custom_msg = ChatMessage("custom", ["Custom role message"]) + custom_msg = Message("custom", ["Custom role message"]) # Compare roles directly as strings if user_msg.role == "user": @@ -1461,10 +1461,10 @@ def prepare_function_call_results(content: Content | Any | list[Content | Any]) """ -# region ChatMessage +# region Message -class ChatMessage(SerializationMixin): +class Message(SerializationMixin): """Represents a chat message. Attributes: @@ -1479,17 +1479,17 @@ class ChatMessage(SerializationMixin): Examples: .. code-block:: python - from agent_framework import ChatMessage, Content + from agent_framework import Message, Content # Create a message with text content - user_msg = ChatMessage("user", ["What's the weather?"]) + user_msg = Message("user", ["What's the weather?"]) print(user_msg.text) # "What's the weather?" # Create a system message - system_msg = ChatMessage("system", ["You are a helpful assistant."]) + system_msg = Message("system", ["You are a helpful assistant."]) # Create a message with mixed content types - assistant_msg = ChatMessage( + assistant_msg = Message( "assistant", ["The weather is sunny!", Content.from_image_uri("https://...")], ) @@ -1499,13 +1499,13 @@ class ChatMessage(SerializationMixin): msg_dict = user_msg.to_dict() # {'type': 'chat_message', 'role': 'user', # 'contents': [{'type': 'text', 'text': "What's the weather?"}], 'additional_properties': {}} - restored_msg = ChatMessage.from_dict(msg_dict) + restored_msg = Message.from_dict(msg_dict) print(restored_msg.text) # "What's the weather?" # Serialization - to_json and from_json msg_json = user_msg.to_json() # '{"type": "chat_message", "role": "user", "contents": [...], ...}' - restored_from_json = ChatMessage.from_json(msg_json) + restored_from_json = Message.from_json(msg_json) print(restored_from_json.role) # "user" """ @@ -1523,7 +1523,7 @@ def __init__( additional_properties: MutableMapping[str, Any] | None = None, raw_representation: Any | None = None, ) -> None: - """Initialize ChatMessage. + """Initialize Message. Args: role: The role of the author of the message (e.g., "user", "assistant", "system", "tool"). @@ -1568,86 +1568,86 @@ def text(self) -> str: def prepare_messages( - messages: str | Content | ChatMessage | Sequence[str | Content | ChatMessage], + messages: str | Content | Message | Sequence[str | Content | Message], system_instructions: str | Sequence[str] | None = None, -) -> list[ChatMessage]: - """Convert various message input formats into a list of ChatMessage objects. +) -> list[Message]: + """Convert various message input formats into a list of Message objects. Args: messages: The input messages in various supported formats. Can be: - A string (converted to a user message) - - A Content object (wrapped in a user ChatMessage) - - A ChatMessage object + - A Content object (wrapped in a user Message) + - A Message object - A sequence containing any mix of the above system_instructions: The system instructions. They will be inserted to the start of the messages list. Returns: - A list of ChatMessage objects. + A list of Message objects. """ if system_instructions is not None: if isinstance(system_instructions, str): system_instructions = [system_instructions] - system_instruction_messages = [ChatMessage("system", [instr]) for instr in system_instructions] + system_instruction_messages = [Message("system", [instr]) for instr in system_instructions] else: system_instruction_messages = [] if isinstance(messages, str): - return [*system_instruction_messages, ChatMessage("user", [messages])] + return [*system_instruction_messages, Message("user", [messages])] if isinstance(messages, Content): - return [*system_instruction_messages, ChatMessage("user", [messages])] - if isinstance(messages, ChatMessage): + return [*system_instruction_messages, Message("user", [messages])] + if isinstance(messages, Message): return [*system_instruction_messages, messages] - return_messages: list[ChatMessage] = system_instruction_messages + return_messages: list[Message] = system_instruction_messages for msg in messages: if isinstance(msg, (str, Content)): - msg = ChatMessage("user", [msg]) + msg = Message("user", [msg]) return_messages.append(msg) return return_messages def normalize_messages( - messages: str | Content | ChatMessage | Sequence[str | Content | ChatMessage] | None = None, -) -> list[ChatMessage]: - """Normalize message inputs to a list of ChatMessage objects. + messages: str | Content | Message | Sequence[str | Content | Message] | None = None, +) -> list[Message]: + """Normalize message inputs to a list of Message objects. Args: messages: The input messages in various supported formats. Can be: - None (returns empty list) - A string (converted to a user message) - - A Content object (wrapped in a user ChatMessage) - - A ChatMessage object + - A Content object (wrapped in a user Message) + - A Message object - A sequence containing any mix of the above Returns: - A list of ChatMessage objects. + A list of Message objects. """ if messages is None: return [] if isinstance(messages, str): - return [ChatMessage("user", [messages])] + return [Message("user", [messages])] if isinstance(messages, Content): - return [ChatMessage("user", [messages])] + return [Message("user", [messages])] - if isinstance(messages, ChatMessage): + if isinstance(messages, Message): return [messages] - result: list[ChatMessage] = [] + result: list[Message] = [] for msg in messages: if isinstance(msg, (str, Content)): - result.append(ChatMessage("user", [msg])) + result.append(Message("user", [msg])) else: result.append(msg) return result def prepend_instructions_to_messages( - messages: list[ChatMessage], + messages: list[Message], instructions: str | Sequence[str] | None, role: RoleLiteral | str = "system", -) -> list[ChatMessage]: +) -> list[Message]: """Prepend instructions to a list of messages with a specified role. This is a helper method for chat clients that need to add instructions @@ -1655,7 +1655,7 @@ def prepend_instructions_to_messages( instructions (e.g., OpenAI uses "system", some providers might use "user"). Args: - messages: The existing list of ChatMessage objects. + messages: The existing list of Message objects. instructions: The instructions to prepend. Can be a single string or a sequence of strings. role: The role to use for the instruction messages. Defaults to "system". @@ -1665,9 +1665,9 @@ def prepend_instructions_to_messages( Examples: .. code-block:: python - from agent_framework import prepend_instructions_to_messages, ChatMessage + from agent_framework import prepend_instructions_to_messages, Message - messages = [ChatMessage("user", ["Hello"])] + messages = [Message("user", ["Hello"])] instructions = "You are a helpful assistant" # Prepend as system message (default) @@ -1682,7 +1682,7 @@ def prepend_instructions_to_messages( if isinstance(instructions, str): instructions = [instructions] - instruction_messages = [ChatMessage(role, [instr]) for instr in instructions] + instruction_messages = [Message(role, [instr]) for instr in instructions] return [*instruction_messages, *messages] @@ -1704,7 +1704,7 @@ def _process_update(response: ChatResponse | AgentResponse, update: ChatResponse is_new_message = True if is_new_message: - message = ChatMessage("assistant", []) + message = Message("assistant", []) response.messages.append(message) else: message = response.messages[-1] @@ -1847,17 +1847,17 @@ class ChatResponse(SerializationMixin, Generic[ResponseModelT]): raw_representation: The raw representation of the chat response from an underlying implementation. Note: - The `author_name` attribute is available on the `ChatMessage` objects inside `messages`, + The `author_name` attribute is available on the `Message` objects inside `messages`, not on the `ChatResponse` itself. Use `response.messages[0].author_name` to access the author name of individual messages. Examples: .. code-block:: python - from agent_framework import ChatResponse, ChatMessage + from agent_framework import ChatResponse, Message # Create a response with messages - msg = ChatMessage("assistant", ["The weather is sunny."]) + msg = Message("assistant", ["The weather is sunny."]) response = ChatResponse( messages=[msg], finish_reason="stop", @@ -1887,7 +1887,7 @@ class ChatResponse(SerializationMixin, Generic[ResponseModelT]): def __init__( self, *, - messages: ChatMessage | Sequence[ChatMessage] | None = None, + messages: Message | Sequence[Message] | None = None, response_id: str | None = None, conversation_id: str | None = None, model_id: str | None = None, @@ -1903,7 +1903,7 @@ def __init__( """Initializes a ChatResponse with the provided parameters. Keyword Args: - messages: A single ChatMessage or sequence of ChatMessage objects to include in the response. + messages: A single Message or sequence of Message objects to include in the response. response_id: Optional ID of the chat response. conversation_id: Optional identifier for the state of the conversation. model_id: Optional model ID used in the creation of the chat response. @@ -1918,17 +1918,17 @@ def __init__( raw_representation: Optional raw representation of the chat response from an underlying implementation. """ if messages is None: - self.messages: list[ChatMessage] = [] - elif isinstance(messages, ChatMessage): + self.messages: list[Message] = [] + elif isinstance(messages, Message): self.messages = [messages] else: - # Handle both ChatMessage objects and dicts (for from_dict support) - processed_messages: list[ChatMessage] = [] + # Handle both Message objects and dicts (for from_dict support) + processed_messages: list[Message] = [] for msg in messages: - if isinstance(msg, ChatMessage): + if isinstance(msg, Message): processed_messages.append(msg) elif isinstance(msg, dict): - processed_messages.append(ChatMessage.from_dict(msg)) + processed_messages.append(Message.from_dict(msg)) else: processed_messages.append(msg) self.messages = processed_messages @@ -2057,7 +2057,7 @@ async def from_update_generator( @property def text(self) -> str: """Returns the concatenated text of all messages in the response.""" - return ("\n".join(message.text for message in self.messages if isinstance(message, ChatMessage))).strip() + return ("\n".join(message.text for message in self.messages if isinstance(message, Message))).strip() @property def value(self) -> ResponseModelT | None: @@ -2096,7 +2096,7 @@ class ChatResponseUpdate(SerializationMixin): author_name: The name of the author of the response update. This is primarily used in multi-agent scenarios to identify which agent or participant generated the response. When updates are combined into a `ChatResponse`, the `author_name` is propagated - to the resulting `ChatMessage` objects. + to the resulting `Message` objects. response_id: The ID of the response of which this update is a part. message_id: The ID of the message of which this update is a part. conversation_id: An identifier for the state of the conversation of which this update is a part. @@ -2217,17 +2217,17 @@ class AgentResponse(SerializationMixin, Generic[ResponseModelT]): messages in scenarios involving function calls, RAG retrievals, or complex logic. Note: - The `author_name` attribute is available on the `ChatMessage` objects inside `messages`, + The `author_name` attribute is available on the `Message` objects inside `messages`, not on the `AgentResponse` itself. Use `response.messages[0].author_name` to access the author name of individual messages. Examples: .. code-block:: python - from agent_framework import AgentResponse, ChatMessage + from agent_framework import AgentResponse, Message # Create agent response - msg = ChatMessage("assistant", ["Task completed successfully."]) + msg = Message("assistant", ["Task completed successfully."]) response = AgentResponse(messages=[msg], response_id="run_123") print(response.text) # "Task completed successfully." @@ -2258,7 +2258,7 @@ class AgentResponse(SerializationMixin, Generic[ResponseModelT]): def __init__( self, *, - messages: ChatMessage | Sequence[ChatMessage] | None = None, + messages: Message | Sequence[Message] | None = None, response_id: str | None = None, agent_id: str | None = None, created_at: CreatedAtT | None = None, @@ -2272,7 +2272,7 @@ def __init__( """Initialize an AgentResponse. Keyword Args: - messages: A single ChatMessage or sequence of ChatMessage objects to include in the response. + messages: A single Message or sequence of Message objects to include in the response. response_id: The ID of the chat response. agent_id: The identifier of the agent that produced this response. Useful in multi-agent scenarios to track which agent generated the response. @@ -2286,17 +2286,17 @@ def __init__( raw_representation: The raw representation of the chat response from an underlying implementation. """ if messages is None: - self.messages: list[ChatMessage] = [] - elif isinstance(messages, ChatMessage): + self.messages: list[Message] = [] + elif isinstance(messages, Message): self.messages = [messages] else: - # Handle both ChatMessage objects and dicts (for from_dict support) - processed_messages: list[ChatMessage] = [] + # Handle both Message objects and dicts (for from_dict support) + processed_messages: list[Message] = [] for msg in messages: - if isinstance(msg, ChatMessage): + if isinstance(msg, Message): processed_messages.append(msg) elif isinstance(msg, dict): - processed_messages.append(ChatMessage.from_dict(msg)) + processed_messages.append(Message.from_dict(msg)) else: processed_messages.append(msg) self.messages = processed_messages @@ -2440,7 +2440,7 @@ class AgentResponseUpdate(SerializationMixin): role: The role of the author of the response update. author_name: The name of the author of the response update. In multi-agent scenarios, this identifies which agent generated this update. When updates are combined into - an `AgentResponse`, the `author_name` is propagated to the resulting `ChatMessage` objects. + an `AgentResponse`, the `author_name` is propagated to the resulting `Message` objects. agent_id: The identifier of the agent that produced this update. Useful in multi-agent scenarios to track which agent generated specific parts of the response. response_id: The ID of the response of which this update is a part. diff --git a/python/packages/core/agent_framework/_workflows/__init__.py b/python/packages/core/agent_framework/_workflows/__init__.py index c5666f7b26..e573c51e23 100644 --- a/python/packages/core/agent_framework/_workflows/__init__.py +++ b/python/packages/core/agent_framework/_workflows/__init__.py @@ -52,8 +52,8 @@ from ._runner import Runner from ._runner_context import ( InProcRunnerContext, - Message, RunnerContext, + WorkflowMessage, ) from ._validation import ( EdgeDuplicationError, @@ -92,7 +92,6 @@ "GraphConnectivityError", "InMemoryCheckpointStorage", "InProcRunnerContext", - "Message", "Runner", "RunnerContext", "SingleEdgeGroup", @@ -117,6 +116,7 @@ "WorkflowEventType", "WorkflowException", "WorkflowExecutor", + "WorkflowMessage", "WorkflowRunResult", "WorkflowRunState", "WorkflowRunnerException", diff --git a/python/packages/core/agent_framework/_workflows/_agent.py b/python/packages/core/agent_framework/_workflows/_agent.py index 46161e61e4..962da2bcd4 100644 --- a/python/packages/core/agent_framework/_workflows/_agent.py +++ b/python/packages/core/agent_framework/_workflows/_agent.py @@ -16,8 +16,8 @@ AgentResponseUpdate, AgentThread, BaseAgent, - ChatMessage, Content, + Message, UsageDetails, ) @@ -107,8 +107,8 @@ def __init__( except KeyError as exc: # Defensive: workflow lacks a configured entry point raise ValueError("Workflow's start executor is not defined.") from exc - if not any(is_type_compatible(list[ChatMessage], input_type) for input_type in start_executor.input_types): - raise ValueError("Workflow's start executor cannot handle list[ChatMessage]") + if not any(is_type_compatible(list[Message], input_type) for input_type in start_executor.input_types): + raise ValueError("Workflow's start executor cannot handle list[Message]") super().__init__(id=id, name=name, description=description, **kwargs) self._workflow: Workflow = workflow @@ -127,7 +127,7 @@ def pending_requests(self) -> dict[str, WorkflowEvent[Any]]: @overload def run( self, - messages: str | ChatMessage | list[str] | list[ChatMessage] | None = None, + messages: str | Message | list[str] | list[Message] | None = None, *, stream: Literal[True], thread: AgentThread | None = None, @@ -139,7 +139,7 @@ def run( @overload async def run( self, - messages: str | ChatMessage | list[str] | list[ChatMessage] | None = None, + messages: str | Message | list[str] | list[Message] | None = None, *, stream: Literal[False] = ..., thread: AgentThread | None = None, @@ -150,7 +150,7 @@ async def run( def run( self, - messages: str | ChatMessage | list[str] | list[ChatMessage] | None = None, + messages: str | Message | list[str] | list[Message] | None = None, *, stream: bool = False, thread: AgentThread | None = None, @@ -202,7 +202,7 @@ def run( async def _run_non_streaming( self, - messages: str | ChatMessage | list[str] | list[ChatMessage] | None = None, + messages: str | Message | list[str] | list[Message] | None = None, *, thread: AgentThread | None = None, checkpoint_id: str | None = None, @@ -225,7 +225,7 @@ async def _run_non_streaming( async def _run_streaming( self, - messages: str | ChatMessage | list[str] | list[ChatMessage] | None = None, + messages: str | Message | list[str] | list[Message] | None = None, *, thread: AgentThread | None = None, checkpoint_id: str | None = None, @@ -257,7 +257,7 @@ async def _run_streaming( async def _run_impl( self, - input_messages: list[ChatMessage], + input_messages: list[Message], response_id: str, thread: AgentThread, checkpoint_id: str | None = None, @@ -289,7 +289,7 @@ async def _run_impl( async def _run_stream_impl( self, - input_messages: list[ChatMessage], + input_messages: list[Message], response_id: str, thread: AgentThread, checkpoint_id: str | None = None, @@ -319,7 +319,7 @@ async def _run_stream_impl( async def _run_core( self, - input_messages: list[ChatMessage], + input_messages: list[Message], thread: AgentThread, checkpoint_id: str | None, checkpoint_storage: CheckpointStorage | None, @@ -393,8 +393,8 @@ async def _run_core( async def _build_conversation_messages( self, thread: AgentThread, - input_messages: list[ChatMessage], - ) -> list[ChatMessage]: + input_messages: list[Message], + ) -> list[Message]: """Build the complete conversation by prepending thread history to input messages. Args: @@ -402,9 +402,9 @@ async def _build_conversation_messages( input_messages: The new input messages to append. Returns: - A list of ChatMessage objects representing the full conversation. + A list of Message objects representing the full conversation. """ - conversation_messages: list[ChatMessage] = [] + conversation_messages: list[Message] = [] if thread.message_store: history = await thread.message_store.list_messages() if history: @@ -412,7 +412,7 @@ async def _build_conversation_messages( conversation_messages.extend(input_messages) return conversation_messages - def _process_pending_requests(self, input_messages: list[ChatMessage]) -> dict[str, Any]: + def _process_pending_requests(self, input_messages: list[Message]) -> dict[str, Any]: """Process pending requests by extracting function responses and updating state. Args: @@ -444,7 +444,7 @@ def _convert_workflow_events_to_agent_response( output_events: list[WorkflowEvent[Any]], ) -> AgentResponse: """Convert a list of workflow output events to an AgentResponse.""" - messages: list[ChatMessage] = [] + messages: list[Message] = [] raw_representations: list[object] = [] merged_usage: UsageDetails | None = None latest_created_at: str | None = None @@ -453,7 +453,7 @@ def _convert_workflow_events_to_agent_response( if output_event.type == "request_info": function_call, approval_request = self._process_request_info_event(output_event) messages.append( - ChatMessage( + Message( contents=[function_call, approval_request], role="assistant", author_name=output_event.source_executor_id, @@ -484,11 +484,11 @@ def _convert_workflow_events_to_agent_response( if data.created_at else latest_created_at ) - elif isinstance(data, ChatMessage): + elif isinstance(data, Message): messages.append(data) raw_representations.append(data.raw_representation) - elif is_instance_of(data, list[ChatMessage]): - chat_messages = cast(list[ChatMessage], data) + elif is_instance_of(data, list[Message]): + chat_messages = cast(list[Message], data) messages.extend(chat_messages) raw_representations.append(data) else: @@ -497,7 +497,7 @@ def _convert_workflow_events_to_agent_response( continue messages.append( - ChatMessage( + Message( contents=contents, role="assistant", author_name=output_event.executor_id, @@ -591,7 +591,7 @@ def _convert_workflow_event_to_agent_response_updates( ) ) return updates - if isinstance(data, ChatMessage): + if isinstance(data, Message): return [ AgentResponseUpdate( contents=list(data.contents), @@ -603,9 +603,9 @@ def _convert_workflow_event_to_agent_response_updates( raw_representation=data, ) ] - if is_instance_of(data, list[ChatMessage]): - # Convert each ChatMessage to an AgentResponseUpdate - chat_messages = cast(list[ChatMessage], data) + if is_instance_of(data, list[Message]): + # Convert each Message to an AgentResponseUpdate + chat_messages = cast(list[Message], data) updates = [] for msg in chat_messages: updates.append( @@ -669,7 +669,7 @@ def _convert_workflow_event_to_agent_response_updates( # Ignore workflow-internal events return [] - def _extract_function_responses(self, input_messages: list[ChatMessage]) -> dict[str, Any]: + def _extract_function_responses(self, input_messages: list[Message]) -> dict[str, Any]: """Extract function responses from input messages.""" function_responses: dict[str, Any] = {} for message in input_messages: @@ -820,7 +820,7 @@ def _add_raw(value: object) -> None: ) # PHASE 2: CONVERT GROUPED UPDATES TO RESPONSES AND MERGE - final_messages: list[ChatMessage] = [] + final_messages: list[Message] = [] merged_usage: UsageDetails | None = None latest_created_at: str | None = None merged_additional_properties: dict[str, Any] | None = None diff --git a/python/packages/core/agent_framework/_workflows/_agent_executor.py b/python/packages/core/agent_framework/_workflows/_agent_executor.py index 4158380086..0024f2afe0 100644 --- a/python/packages/core/agent_framework/_workflows/_agent_executor.py +++ b/python/packages/core/agent_framework/_workflows/_agent_executor.py @@ -11,7 +11,7 @@ from .._agents import SupportsAgentRun from .._threads import AgentThread -from .._types import AgentResponse, AgentResponseUpdate, ChatMessage +from .._types import AgentResponse, AgentResponseUpdate, Message from ._agent_utils import resolve_agent_id from ._checkpoint_encoding import decode_checkpoint_value, encode_checkpoint_value from ._const import WORKFLOW_RUN_KWARGS_KEY @@ -40,7 +40,7 @@ class AgentExecutorRequest: If False, the messages will be saved to the executor's cache but not sent to the agent. """ - messages: list[ChatMessage] + messages: list[Message] should_respond: bool = True @@ -58,7 +58,7 @@ class AgentExecutorResponse: executor_id: str agent_response: AgentResponse - full_conversation: list[ChatMessage] | None = None + full_conversation: list[Message] | None = None class AgentExecutor(Executor): @@ -104,9 +104,9 @@ def __init__( self._pending_responses_to_agent: list[Content] = [] # AgentExecutor maintains an internal cache of messages in between runs - self._cache: list[ChatMessage] = [] + self._cache: list[Message] = [] # This tracks the full conversation after each run - self._full_conversation: list[ChatMessage] = [] + self._full_conversation: list[Message] = [] @property def description(self) -> str | None: @@ -157,20 +157,20 @@ async def from_str( @handler async def from_message( self, - message: ChatMessage, + message: Message, ctx: WorkflowContext[AgentExecutorResponse, AgentResponse | AgentResponseUpdate], ) -> None: - """Accept a single ChatMessage as input.""" + """Accept a single Message as input.""" self._cache = normalize_messages_input(message) await self._run_agent_and_emit(ctx) @handler async def from_messages( self, - messages: list[str | ChatMessage], + messages: list[str | Message], ctx: WorkflowContext[AgentExecutorResponse, AgentResponse | AgentResponseUpdate], ) -> None: - """Accept a list of chat inputs (strings or ChatMessage) as conversation context.""" + """Accept a list of chat inputs (strings or Message) as conversation context.""" self._cache = normalize_messages_input(messages) await self._run_agent_and_emit(ctx) @@ -198,7 +198,7 @@ async def handle_user_input_response( # Use role="tool" for function_result responses (from declaration-only tools) # so the LLM receives proper tool results instead of orphaned tool_calls. role = "tool" if all(r.type == "function_result" for r in self._pending_responses_to_agent) else "user" - self._cache = normalize_messages_input(ChatMessage(role=role, contents=self._pending_responses_to_agent)) + self._cache = normalize_messages_input(Message(role=role, contents=self._pending_responses_to_agent)) self._pending_responses_to_agent.clear() await self._run_agent_and_emit(ctx) diff --git a/python/packages/core/agent_framework/_workflows/_conversation_history.py b/python/packages/core/agent_framework/_workflows/_conversation_history.py index 52d7d99c74..3df5282ee4 100644 --- a/python/packages/core/agent_framework/_workflows/_conversation_history.py +++ b/python/packages/core/agent_framework/_workflows/_conversation_history.py @@ -2,16 +2,16 @@ """Helpers for managing chat conversation history. -These utilities operate on standard `list[ChatMessage]` collections and simple +These utilities operate on standard `list[Message]` collections and simple dictionary snapshots so orchestrators can share logic without new mixins. """ from collections.abc import Sequence -from .._types import ChatMessage +from .._types import Message -def latest_user_message(conversation: Sequence[ChatMessage]) -> ChatMessage: +def latest_user_message(conversation: Sequence[Message]) -> Message: """Return the most recent user-authored message from `conversation`.""" for message in reversed(conversation): role_value = getattr(message.role, "value", message.role) @@ -20,7 +20,7 @@ def latest_user_message(conversation: Sequence[ChatMessage]) -> ChatMessage: raise ValueError("No user message in conversation") -def ensure_author(message: ChatMessage, fallback: str) -> ChatMessage: +def ensure_author(message: Message, fallback: str) -> Message: """Attach `fallback` author if message is missing `author_name`.""" message.author_name = message.author_name or fallback return message diff --git a/python/packages/core/agent_framework/_workflows/_conversation_state.py b/python/packages/core/agent_framework/_workflows/_conversation_state.py index 22433e6775..95945998df 100644 --- a/python/packages/core/agent_framework/_workflows/_conversation_state.py +++ b/python/packages/core/agent_framework/_workflows/_conversation_state.py @@ -3,20 +3,20 @@ from collections.abc import Iterable from typing import Any, cast -from agent_framework import ChatMessage +from agent_framework import Message from ._checkpoint_encoding import decode_checkpoint_value, encode_checkpoint_value """Utilities for serializing and deserializing chat conversations for persistence. -These helpers convert rich `ChatMessage` instances to checkpoint-friendly payloads +These helpers convert rich `Message` instances to checkpoint-friendly payloads using the same encoding primitives as the workflow runner. This preserves `additional_properties` and other metadata without relying on unsafe mechanisms such as pickling. """ -def encode_chat_messages(messages: Iterable[ChatMessage]) -> list[dict[str, Any]]: +def encode_chat_messages(messages: Iterable[Message]) -> list[dict[str, Any]]: """Serialize chat messages into checkpoint-safe payloads.""" encoded: list[dict[str, Any]] = [] for message in messages: @@ -32,9 +32,9 @@ def encode_chat_messages(messages: Iterable[ChatMessage]) -> list[dict[str, Any] return encoded -def decode_chat_messages(payload: Iterable[dict[str, Any]]) -> list[ChatMessage]: +def decode_chat_messages(payload: Iterable[dict[str, Any]]) -> list[Message]: """Restore chat messages from checkpoint-safe payloads.""" - restored: list[ChatMessage] = [] + restored: list[Message] = [] for item in payload: if not isinstance(item, dict): continue @@ -64,7 +64,7 @@ def decode_chat_messages(payload: Iterable[dict[str, Any]]) -> list[ChatMessage] additional[key] = decode_checkpoint_value(value) restored.append( - ChatMessage( # type: ignore[call-overload] + Message( # type: ignore[call-overload] role=role, contents=contents, author_name=item.get("author_name"), diff --git a/python/packages/core/agent_framework/_workflows/_edge_runner.py b/python/packages/core/agent_framework/_workflows/_edge_runner.py index c87994b4b4..06188e9fb2 100644 --- a/python/packages/core/agent_framework/_workflows/_edge_runner.py +++ b/python/packages/core/agent_framework/_workflows/_edge_runner.py @@ -18,7 +18,7 @@ SwitchCaseEdgeGroup, ) from ._executor import Executor -from ._runner_context import Message, RunnerContext +from ._runner_context import RunnerContext, WorkflowMessage from ._state import State logger = logging.getLogger(__name__) @@ -38,7 +38,7 @@ def __init__(self, edge_group: EdgeGroup, executors: dict[str, Executor]) -> Non self._executors = executors @abstractmethod - async def send_message(self, message: Message, state: State, ctx: RunnerContext) -> bool: + async def send_message(self, message: WorkflowMessage, state: State, ctx: RunnerContext) -> bool: """Send a message through the edge group. Args: @@ -52,7 +52,7 @@ async def send_message(self, message: Message, state: State, ctx: RunnerContext) """ raise NotImplementedError - def _can_handle(self, executor_id: str, message: Message) -> bool: + def _can_handle(self, executor_id: str, message: WorkflowMessage) -> bool: """Check if an executor can handle the given message data.""" if executor_id not in self._executors: return False @@ -62,7 +62,7 @@ async def _execute_on_target( self, target_id: str, source_ids: list[str], - message: Message, + message: WorkflowMessage, state: State, ctx: RunnerContext, ) -> None: @@ -90,7 +90,7 @@ def __init__(self, edge_group: SingleEdgeGroup | InternalEdgeGroup, executors: d super().__init__(edge_group, executors) self._edge = edge_group.edges[0] - async def send_message(self, message: Message, state: State, ctx: RunnerContext) -> bool: + async def send_message(self, message: WorkflowMessage, state: State, ctx: RunnerContext) -> bool: """Send a message through the single edge.""" should_execute = False target_id: str | None = None @@ -162,7 +162,7 @@ def __init__(self, edge_group: FanOutEdgeGroup, executors: dict[str, Executor]) Callable[[Any, list[str]], list[str]] | None, getattr(edge_group, "selection_func", None) ) - async def send_message(self, message: Message, state: State, ctx: RunnerContext) -> bool: + async def send_message(self, message: WorkflowMessage, state: State, ctx: RunnerContext) -> bool: """Send a message through all edges in the fan-out edge group.""" deliverable_edges: list[Edge] = [] single_target_edge: Edge | None = None @@ -283,9 +283,9 @@ def __init__(self, edge_group: FanInEdgeGroup, executors: dict[str, Executor]) - self._edges = edge_group.edges # Buffer to hold messages before sending them to the target executor # Key is the source executor ID, value is a list of messages - self._buffer: dict[str, list[Message]] = defaultdict(list) + self._buffer: dict[str, list[WorkflowMessage]] = defaultdict(list) - async def send_message(self, message: Message, state: State, ctx: RunnerContext) -> bool: + async def send_message(self, message: WorkflowMessage, state: State, ctx: RunnerContext) -> bool: """Send a message through all edges in the fan-in edge group.""" execution_data: dict[str, Any] | None = None with create_edge_group_processing_span( @@ -306,7 +306,7 @@ async def send_message(self, message: Message, state: State, ctx: RunnerContext) # Check if target can handle list of message data (fan-in aggregates multiple messages) if self._can_handle( - self._edges[0].target_id, Message(data=[message.data], source_id=message.source_id) + self._edges[0].target_id, WorkflowMessage(data=[message.data], source_id=message.source_id) ): # If the edge can handle the data, buffer the message self._buffer[message.source_id].append(message) @@ -334,7 +334,7 @@ async def send_message(self, message: Message, state: State, ctx: RunnerContext) source_span_ids = [msg.source_span_id for msg in messages_to_send if msg.source_span_id] # Create a new Message object for the aggregated data - aggregated_message = Message( + aggregated_message = WorkflowMessage( data=aggregated_data, source_id=self._edge_group.__class__.__name__, # This won't be used in self._execute_on_target. trace_contexts=trace_contexts, diff --git a/python/packages/core/agent_framework/_workflows/_executor.py b/python/packages/core/agent_framework/_workflows/_executor.py index ffab65e3a3..2bd4be229c 100644 --- a/python/packages/core/agent_framework/_workflows/_executor.py +++ b/python/packages/core/agent_framework/_workflows/_executor.py @@ -17,7 +17,7 @@ ) from ._model_utils import DictConvertible from ._request_info_mixin import RequestInfoMixin -from ._runner_context import Message, MessageType, RunnerContext +from ._runner_context import MessageType, RunnerContext, WorkflowMessage from ._state import State from ._typing_utils import is_instance_of, normalize_type_to_list, resolve_type_annotation from ._workflow_context import WorkflowContext, validate_workflow_context_annotation @@ -244,16 +244,16 @@ async def execute( with create_processing_span( self.id, self.__class__.__name__, - str(MessageType.STANDARD if not isinstance(message, Message) else message.type), - type(message).__name__, + str(MessageType.STANDARD if not isinstance(message, WorkflowMessage) else message.type), + type(WorkflowMessage).__name__, source_trace_contexts=trace_contexts, source_span_ids=source_span_ids, ): # Find the handler and handler spec that matches the message type. - handler = self._find_handler(message) + handler = self._find_handler(WorkflowMessage) original_message = message - if isinstance(message, Message): + if isinstance(message, WorkflowMessage): # Unwrap raw data for handler call message = message.data @@ -265,14 +265,14 @@ async def execute( trace_contexts=trace_contexts, source_span_ids=source_span_ids, request_id=original_message.original_request_info_event.request_id - if isinstance(original_message, Message) and original_message.original_request_info_event + if isinstance(original_message, WorkflowMessage) and original_message.original_request_info_event else None, ) # Invoke the handler with the message and context # Use deepcopy to capture original input state before handler can mutate it with _framework_event_origin(): - invoke_event = WorkflowEvent.executor_invoked(self.id, copy.deepcopy(message)) + invoke_event = WorkflowEvent.executor_invoked(self.id, copy.deepcopy(WorkflowMessage)) await context.add_event(invoke_event) try: await handler(message, context) @@ -351,7 +351,7 @@ def _discover_handlers(self) -> None: # Add to unified handler specs list self._handler_specs.append({**handler_spec}) - def can_handle(self, message: Message) -> bool: + def can_handle(self, message: WorkflowMessage) -> bool: """Check if the executor can handle a given message type. Args: @@ -460,7 +460,7 @@ def _find_handler(self, message: Any) -> Callable[[Any, WorkflowContext[Any, Any Returns: The handler function if found, None otherwise """ - if isinstance(message, Message): + if isinstance(message, WorkflowMessage): # Case where Message wrapper is passed instead of raw data # Handler can be a standard handler or a response handler if message.type == MessageType.STANDARD: @@ -487,7 +487,7 @@ def _find_handler(self, message: Any) -> Callable[[Any, WorkflowContext[Any, Any for message_type in self._handlers: if is_instance_of(message, message_type): return self._handlers[message_type] - raise RuntimeError(f"Executor {self.__class__.__name__} cannot handle message of type {type(message)}.") + raise RuntimeError(f"Executor {self.__class__.__name__} cannot handle message of type {type(WorkflowMessage)}.") async def on_checkpoint_save(self) -> dict[str, Any]: """Hook called when the workflow is being saved to a checkpoint. diff --git a/python/packages/core/agent_framework/_workflows/_message_utils.py b/python/packages/core/agent_framework/_workflows/_message_utils.py index 920672cead..6d27a905ee 100644 --- a/python/packages/core/agent_framework/_workflows/_message_utils.py +++ b/python/packages/core/agent_framework/_workflows/_message_utils.py @@ -4,38 +4,38 @@ from collections.abc import Sequence -from agent_framework import ChatMessage +from agent_framework import Message def normalize_messages_input( - messages: str | ChatMessage | Sequence[str | ChatMessage] | None = None, -) -> list[ChatMessage]: - """Normalize heterogeneous message inputs to a list of ChatMessage objects. + messages: str | Message | Sequence[str | Message] | None = None, +) -> list[Message]: + """Normalize heterogeneous message inputs to a list of Message objects. Args: - messages: String, ChatMessage, or sequence of either. None yields empty list. + messages: String, Message, or sequence of either. None yields empty list. Returns: - List of ChatMessage instances suitable for workflow consumption. + List of Message instances suitable for workflow consumption. """ if messages is None: return [] if isinstance(messages, str): - return [ChatMessage(role="user", text=messages)] + return [Message(role="user", text=messages)] - if isinstance(messages, ChatMessage): + if isinstance(messages, Message): return [messages] - normalized: list[ChatMessage] = [] + normalized: list[Message] = [] for item in messages: if isinstance(item, str): - normalized.append(ChatMessage(role="user", text=item)) - elif isinstance(item, ChatMessage): + normalized.append(Message(role="user", text=item)) + elif isinstance(item, Message): normalized.append(item) else: raise TypeError( - f"Messages sequence must contain only str or ChatMessage instances; found {type(item).__name__}." + f"Messages sequence must contain only str or Message instances; found {type(item).__name__}." ) return normalized diff --git a/python/packages/core/agent_framework/_workflows/_runner.py b/python/packages/core/agent_framework/_workflows/_runner.py index f3a475e034..83ce5d8085 100644 --- a/python/packages/core/agent_framework/_workflows/_runner.py +++ b/python/packages/core/agent_framework/_workflows/_runner.py @@ -24,8 +24,8 @@ ) from ._executor import Executor from ._runner_context import ( - Message, RunnerContext, + WorkflowMessage, ) from ._state import State @@ -162,14 +162,14 @@ async def run_until_convergence(self) -> AsyncGenerator[WorkflowEvent, None]: self._running = False async def _run_iteration(self) -> None: - async def _deliver_messages(source_executor_id: str, messages: list[Message]) -> None: + async def _deliver_messages(source_executor_id: str, messages: list[WorkflowMessage]) -> None: """Outer loop to concurrently deliver messages from all sources to their targets.""" - async def _deliver_message_inner(edge_runner: EdgeRunner, message: Message) -> bool: + async def _deliver_message_inner(edge_runner: EdgeRunner, message: WorkflowMessage) -> bool: """Inner loop to deliver a single message through an edge runner.""" return await edge_runner.send_message(message, self._state, self._ctx) - def _normalize_message_payload(message: Message) -> None: + def _normalize_message_payload(message: WorkflowMessage) -> None: data = message.data if not isinstance(data, dict): return diff --git a/python/packages/core/agent_framework/_workflows/_runner_context.py b/python/packages/core/agent_framework/_workflows/_runner_context.py index ed81026245..db6558306a 100644 --- a/python/packages/core/agent_framework/_workflows/_runner_context.py +++ b/python/packages/core/agent_framework/_workflows/_runner_context.py @@ -29,25 +29,25 @@ class MessageType(Enum): - """Enumeration of message types in the workflow.""" + """Enumeration of WorkflowMessage types in the workflow.""" STANDARD = "standard" - """A standard message between executors.""" + """A standard WorkflowMessage between executors.""" RESPONSE = "response" - """A response message to a pending request.""" + """A response WorkflowMessage to a pending request.""" @dataclass -class Message: - """A class representing a message in the workflow.""" +class WorkflowMessage: + """A class representing a WorkflowMessage in the workflow.""" data: Any source_id: str target_id: str | None = None type: MessageType = MessageType.STANDARD - # OpenTelemetry trace context fields for message propagation + # OpenTelemetry trace context fields for WorkflowMessage propagation # These are plural to support fan-in scenarios where multiple messages are aggregated trace_contexts: list[dict[str, str]] | None = None # W3C Trace Context headers from multiple sources source_span_ids: list[str] | None = None # Publishing span IDs for linking from multiple sources @@ -67,7 +67,7 @@ def source_span_id(self) -> str | None: return self.source_span_ids[0] if self.source_span_ids else None def to_dict(self) -> dict[str, Any]: - """Convert the Message to a dictionary for serialization.""" + """Convert the WorkflowMessage to a dictionary for serialization.""" return { "data": encode_checkpoint_value(self.data), "source_id": self.source_id, @@ -79,16 +79,16 @@ def to_dict(self) -> dict[str, Any]: } @staticmethod - def from_dict(data: dict[str, Any]) -> Message: - """Create a Message from a dictionary.""" + def from_dict(data: dict[str, Any]) -> WorkflowMessage: + """Create a WorkflowMessage from a dictionary.""" # Validation if "data" not in data: - raise KeyError("Missing 'data' field in Message dictionary.") + raise KeyError("Missing 'data' field in WorkflowMessage dictionary.") if "source_id" not in data: - raise KeyError("Missing 'source_id' field in Message dictionary.") + raise KeyError("Missing 'source_id' field in WorkflowMessage dictionary.") - return Message( + return WorkflowMessage( data=decode_checkpoint_value(data["data"]), source_id=data["source_id"], target_id=data.get("target_id"), @@ -119,15 +119,15 @@ class RunnerContext(Protocol): If checkpoint storage is not configured, checkpoint methods may raise. """ - async def send_message(self, message: Message) -> None: - """Send a message from the executor to the context. + async def send_message(self, WorkflowMessage: WorkflowMessage) -> None: + """Send a WorkflowMessage from the executor to the context. Args: - message: The message to be sent. + WorkflowMessage: The WorkflowMessage to be sent. """ ... - async def drain_messages(self) -> dict[str, list[Message]]: + async def drain_messages(self) -> dict[str, list[WorkflowMessage]]: """Drain all messages from the context. Returns: @@ -291,7 +291,7 @@ def __init__(self, checkpoint_storage: CheckpointStorage | None = None): Args: checkpoint_storage: Optional storage to enable checkpointing. """ - self._messages: dict[str, list[Message]] = {} + self._messages: dict[str, list[WorkflowMessage]] = {} # Event queue for immediate streaming of events self._event_queue: asyncio.Queue[WorkflowEvent] = asyncio.Queue() @@ -307,11 +307,11 @@ def __init__(self, checkpoint_storage: CheckpointStorage | None = None): self._streaming: bool = False # region Messaging and Events - async def send_message(self, message: Message) -> None: - self._messages.setdefault(message.source_id, []) - self._messages[message.source_id].append(message) + async def send_message(self, WorkflowMessage: WorkflowMessage) -> None: + self._messages.setdefault(WorkflowMessage.source_id, []) + self._messages[WorkflowMessage.source_id].append(WorkflowMessage) - async def drain_messages(self) -> dict[str, list[Message]]: + async def drain_messages(self) -> dict[str, list[WorkflowMessage]]: messages = copy(self._messages) self._messages.clear() return messages @@ -422,7 +422,7 @@ async def apply_checkpoint(self, checkpoint: WorkflowCheckpoint) -> None: self._messages.clear() messages_data = checkpoint.messages for source_id, message_list in messages_data.items(): - self._messages[source_id] = [Message.from_dict(msg) for msg in message_list] + self._messages[source_id] = [WorkflowMessage.from_dict(msg) for msg in message_list] # Restore pending request info events self._pending_request_info_events.clear() @@ -504,7 +504,7 @@ async def send_request_info_response(self, request_id: str, response: Any) -> No source_executor_id = event.source_executor_id # Create ResponseMessage instance - response_msg = Message( + response_msg = WorkflowMessage( data=response, source_id=INTERNAL_SOURCE_ID(source_executor_id), target_id=source_executor_id, diff --git a/python/packages/core/agent_framework/_workflows/_typing_utils.py b/python/packages/core/agent_framework/_workflows/_typing_utils.py index 5bff0900b6..41ed071f0a 100644 --- a/python/packages/core/agent_framework/_workflows/_typing_utils.py +++ b/python/packages/core/agent_framework/_workflows/_typing_utils.py @@ -3,19 +3,19 @@ from types import UnionType from typing import Any, TypeGuard, Union, cast, get_args, get_origin -from .._agents import ChatAgent +from .._agents import Agent -def is_chat_agent(agent: Any) -> TypeGuard[ChatAgent]: - """Check if the given agent is a ChatAgent. +def is_chat_agent(agent: Any) -> TypeGuard[Agent]: + """Check if the given agent is a Agent. Args: agent (Any): The agent to check. Returns: - TypeGuard[ChatAgent]: True if the agent is a ChatAgent, False otherwise. + TypeGuard[Agent]: True if the agent is a Agent, False otherwise. """ - return isinstance(agent, ChatAgent) + return isinstance(agent, Agent) def resolve_type_annotation( @@ -255,7 +255,7 @@ def is_type_compatible(source_type: type | UnionType | Any, target_type: type | A type is compatible if values of source_type can be assigned to variables of target_type. For example: - - list[ChatMessage] is compatible with list[str | ChatMessage] + - list[Message] is compatible with list[str | Message] - str is compatible with str | int - int is compatible with Any diff --git a/python/packages/core/agent_framework/_workflows/_workflow.py b/python/packages/core/agent_framework/_workflows/_workflow.py index 08e7512234..88a92dc703 100644 --- a/python/packages/core/agent_framework/_workflows/_workflow.py +++ b/python/packages/core/agent_framework/_workflows/_workflow.py @@ -841,14 +841,14 @@ def output_types(self) -> list[type[Any] | types.UnionType]: def as_agent(self, name: str | None = None) -> WorkflowAgent: """Create a WorkflowAgent that wraps this workflow. - The returned agent converts standard agent inputs (strings, ChatMessage, or lists of these) - into a list[ChatMessage] that is passed to the workflow's start executor. This conversion + The returned agent converts standard agent inputs (strings, Message, or lists of these) + into a list[Message] that is passed to the workflow's start executor. This conversion happens in WorkflowAgent._normalize_messages() which transforms: - - str -> [ChatMessage(USER, [str])] - - ChatMessage -> [ChatMessage] - - list[str | ChatMessage] -> list[ChatMessage] (with string elements converted) + - str -> [Message(USER, [str])] + - Message -> [Message] + - list[str | Message] -> list[Message] (with string elements converted) - The workflow's start executor must accept list[ChatMessage] as an input type, otherwise + The workflow's start executor must accept list[Message] as an input type, otherwise initialization will fail with a ValueError. Args: @@ -858,7 +858,7 @@ def as_agent(self, name: str | None = None) -> WorkflowAgent: A WorkflowAgent instance that wraps this workflow. Raises: - ValueError: If the workflow's start executor cannot handle list[ChatMessage] input. + ValueError: If the workflow's start executor cannot handle list[Message] input. """ # Import here to avoid circular imports from ._agent import WorkflowAgent diff --git a/python/packages/core/agent_framework/_workflows/_workflow_context.py b/python/packages/core/agent_framework/_workflows/_workflow_context.py index 2bdd81ef41..51add07a5c 100644 --- a/python/packages/core/agent_framework/_workflows/_workflow_context.py +++ b/python/packages/core/agent_framework/_workflows/_workflow_context.py @@ -19,7 +19,7 @@ WorkflowEventSource, _framework_event_origin, # type: ignore ) -from ._runner_context import Message, RunnerContext +from ._runner_context import RunnerContext, WorkflowMessage from ._state import State if TYPE_CHECKING: @@ -321,7 +321,7 @@ async def send_message(self, message: OutT, target_id: str | None = None) -> Non attributes[OtelAttr.MESSAGE_DESTINATION_EXECUTOR_ID] = target_id with create_workflow_span(OtelAttr.MESSAGE_SEND_SPAN, attributes, kind=SpanKind.PRODUCER) as span: # Create Message wrapper - msg = Message(data=message, source_id=self._executor_id, target_id=target_id) + msg = WorkflowMessage(data=message, source_id=self._executor_id, target_id=target_id) # Track sent message for executor_completed event (type='executor_completed') self._sent_messages.append(message) diff --git a/python/packages/core/agent_framework/_workflows/_workflow_executor.py b/python/packages/core/agent_framework/_workflows/_workflow_executor.py index 319af46076..3e5fd449bc 100644 --- a/python/packages/core/agent_framework/_workflows/_workflow_executor.py +++ b/python/packages/core/agent_framework/_workflows/_workflow_executor.py @@ -19,7 +19,7 @@ ) from ._executor import Executor, handler from ._request_info_mixin import response_handler -from ._runner_context import Message +from ._runner_context import WorkflowMessage from ._typing_utils import is_instance_of from ._workflow import WorkflowRunResult from ._workflow_context import WorkflowContext @@ -340,7 +340,7 @@ def to_dict(self) -> dict[str, Any]: data["workflow"] = self.workflow.to_dict() return data - def can_handle(self, message: Message) -> bool: + def can_handle(self, message: WorkflowMessage) -> bool: """Override can_handle to only accept messages that the wrapped workflow can handle. This prevents the WorkflowExecutor from accepting messages that should go to other diff --git a/python/packages/core/agent_framework/observability.py b/python/packages/core/agent_framework/observability.py index be70288c06..c97ae0168a 100644 --- a/python/packages/core/agent_framework/observability.py +++ b/python/packages/core/agent_framework/observability.py @@ -39,18 +39,18 @@ from pydantic import BaseModel from ._agents import SupportsAgentRun - from ._clients import ChatClientProtocol + from ._clients import SupportsChatGetResponse from ._threads import AgentThread from ._tools import FunctionTool from ._types import ( AgentResponse, AgentResponseUpdate, - ChatMessage, ChatOptions, ChatResponse, ChatResponseUpdate, Content, FinishReason, + Message, ResponseStream, ) @@ -71,7 +71,7 @@ AgentT = TypeVar("AgentT", bound="SupportsAgentRun") -ChatClientT = TypeVar("ChatClientT", bound="ChatClientProtocol[Any]") +ChatClientT = TypeVar("ChatClientT", bound="SupportsChatGetResponse[Any]") logger = get_logger() @@ -122,7 +122,7 @@ # # This is a workaround, we'll find a generic and better solution - see # https://github.com/open-telemetry/semantic-conventions/issues/1701 -class ChatMessageListTimestampFilter(logging.Filter): +class MessageListTimestampFilter(logging.Filter): """A filter to increment the timestamp of INFO logs by 1 microsecond.""" INDEX_KEY: ClassVar[str] = "chat_message_index" @@ -135,7 +135,7 @@ def filter(self, record: logging.LogRecord) -> bool: return True -logger.addFilter(ChatMessageListTimestampFilter()) +logger.addFilter(MessageListTimestampFilter()) class OtelAttr(str, Enum): @@ -1070,7 +1070,7 @@ def __init__(self, *args: Any, otel_provider_name: str | None = None, **kwargs: @overload def get_response( self, - messages: str | ChatMessage | Sequence[str | ChatMessage], + messages: str | Message | Sequence[str | Message], *, stream: Literal[False] = ..., options: ChatOptions[ResponseModelBoundT], @@ -1080,7 +1080,7 @@ def get_response( @overload def get_response( self, - messages: str | ChatMessage | Sequence[str | ChatMessage], + messages: str | Message | Sequence[str | Message], *, stream: Literal[False] = ..., options: OptionsCoT | ChatOptions[None] | None = None, @@ -1090,7 +1090,7 @@ def get_response( @overload def get_response( self, - messages: str | ChatMessage | Sequence[str | ChatMessage], + messages: str | Message | Sequence[str | Message], *, stream: Literal[True], options: OptionsCoT | ChatOptions[Any] | None = None, @@ -1099,7 +1099,7 @@ def get_response( def get_response( self, - messages: str | ChatMessage | Sequence[str | ChatMessage], + messages: str | Message | Sequence[str | Message], *, stream: bool = False, options: OptionsCoT | ChatOptions[Any] | None = None, @@ -1263,7 +1263,7 @@ def __init__( @overload def run( self, - messages: str | ChatMessage | Sequence[str | ChatMessage] | None = None, + messages: str | Message | Sequence[str | Message] | None = None, *, stream: Literal[False] = ..., thread: AgentThread | None = None, @@ -1273,7 +1273,7 @@ def run( @overload def run( self, - messages: str | ChatMessage | Sequence[str | ChatMessage] | None = None, + messages: str | Message | Sequence[str | Message] | None = None, *, stream: Literal[True], thread: AgentThread | None = None, @@ -1282,7 +1282,7 @@ def run( def run( self, - messages: str | ChatMessage | Sequence[str | ChatMessage] | None = None, + messages: str | Message | Sequence[str | Message] | None = None, *, stream: bool = False, thread: AgentThread | None = None, @@ -1600,7 +1600,7 @@ def capture_exception(span: trace.Span, exception: Exception, timestamp: int | N def _capture_messages( span: trace.Span, provider_name: str, - messages: str | ChatMessage | Sequence[str | ChatMessage], + messages: str | Message | Sequence[str | Message], system_instructions: str | list[str] | None = None, output: bool = False, finish_reason: FinishReason | None = None, @@ -1620,7 +1620,7 @@ def _capture_messages( extra={ OtelAttr.EVENT_NAME: OtelAttr.CHOICE if output else ROLE_EVENT_MAP.get(message.role), OtelAttr.PROVIDER_NAME: provider_name, - ChatMessageListTimestampFilter.INDEX_KEY: index, + MessageListTimestampFilter.INDEX_KEY: index, }, ) if finish_reason: @@ -1633,7 +1633,7 @@ def _capture_messages( span.set_attribute(OtelAttr.SYSTEM_INSTRUCTIONS, json.dumps(otel_sys_instructions)) -def _to_otel_message(message: ChatMessage) -> dict[str, Any]: +def _to_otel_message(message: Message) -> dict[str, Any]: """Create a otel representation of a message.""" return {"role": message.role, "parts": [_to_otel_part(content) for content in message.contents]} diff --git a/python/packages/core/agent_framework/openai/_assistant_provider.py b/python/packages/core/agent_framework/openai/_assistant_provider.py index 7b662e4c2a..7ac165ed09 100644 --- a/python/packages/core/agent_framework/openai/_assistant_provider.py +++ b/python/packages/core/agent_framework/openai/_assistant_provider.py @@ -10,7 +10,7 @@ from openai.types.beta.assistant import Assistant from pydantic import BaseModel, SecretStr, ValidationError -from .._agents import ChatAgent +from .._agents import Agent from .._memory import ContextProvider from .._middleware import MiddlewareTypes from .._tools import FunctionTool, ToolProtocol @@ -51,10 +51,10 @@ class OpenAIAssistantProvider(Generic[OptionsCoT]): - """Provider for creating ChatAgent instances from OpenAI Assistants API. + """Provider for creating Agent instances from OpenAI Assistants API. This provider allows you to create, retrieve, and wrap OpenAI Assistants - as ChatAgent instances for use in the agent framework. + as Agent instances for use in the agent framework. Examples: Basic usage with automatic client creation: @@ -208,11 +208,11 @@ async def create_agent( default_options: OptionsCoT | None = None, middleware: Sequence[MiddlewareTypes] | None = None, context_provider: ContextProvider | None = None, - ) -> ChatAgent[OptionsCoT]: - """Create a new assistant on OpenAI and return a ChatAgent. + ) -> Agent[OptionsCoT]: + """Create a new assistant on OpenAI and return a Agent. This method creates a new assistant on the OpenAI service and wraps it - in a ChatAgent instance. The assistant will persist on OpenAI until deleted. + in a Agent instance. The assistant will persist on OpenAI until deleted. Keyword Args: name: The name of the assistant (required). @@ -228,11 +228,11 @@ async def create_agent( default_options: A TypedDict containing default chat options for the agent. These options are applied to every run unless overridden. Include ``response_format`` here for structured output responses. - middleware: MiddlewareTypes for the ChatAgent. - context_provider: Context provider for the ChatAgent. + middleware: MiddlewareTypes for the Agent. + context_provider: Context provider for the Agent. Returns: - A ChatAgent instance wrapping the created assistant. + A Agent instance wrapping the created assistant. Raises: ServiceInitializationError: If assistant creation fails. @@ -297,7 +297,7 @@ async def create_agent( assistant = await self._client.beta.assistants.create(**create_params) - # Create ChatAgent - pass default_options which contains response_format + # Create Agent - pass default_options which contains response_format return self._create_chat_agent_from_assistant( assistant=assistant, tools=normalized_tools, @@ -316,11 +316,11 @@ async def get_agent( default_options: OptionsCoT | None = None, middleware: Sequence[MiddlewareTypes] | None = None, context_provider: ContextProvider | None = None, - ) -> ChatAgent[OptionsCoT]: - """Retrieve an existing assistant by ID and return a ChatAgent. + ) -> Agent[OptionsCoT]: + """Retrieve an existing assistant by ID and return a Agent. This method fetches an existing assistant from OpenAI by its ID - and wraps it in a ChatAgent instance. + and wraps it in a Agent instance. Args: assistant_id: The ID of the assistant to retrieve (e.g., "asst_123"). @@ -333,11 +333,11 @@ async def get_agent( instructions: Override the assistant's instructions (optional). default_options: A TypedDict containing default chat options for the agent. These options are applied to every run unless overridden. - middleware: MiddlewareTypes for the ChatAgent. - context_provider: Context provider for the ChatAgent. + middleware: MiddlewareTypes for the Agent. + context_provider: Context provider for the Agent. Returns: - A ChatAgent instance wrapping the retrieved assistant. + A Agent instance wrapping the retrieved assistant. Raises: ServiceInitializationError: If the assistant cannot be retrieved. @@ -382,11 +382,11 @@ def as_agent( default_options: OptionsCoT | None = None, middleware: Sequence[MiddlewareTypes] | None = None, context_provider: ContextProvider | None = None, - ) -> ChatAgent[OptionsCoT]: - """Wrap an existing SDK Assistant object as a ChatAgent. + ) -> Agent[OptionsCoT]: + """Wrap an existing SDK Assistant object as a Agent. This method does NOT make any HTTP calls. It simply wraps an already- - fetched Assistant object in a ChatAgent. + fetched Assistant object in a Agent. Args: assistant: The OpenAI Assistant SDK object to wrap. @@ -398,11 +398,11 @@ def as_agent( instructions: Override the assistant's instructions (optional). default_options: A TypedDict containing default chat options for the agent. These options are applied to every run unless overridden. - middleware: MiddlewareTypes for the ChatAgent. - context_provider: Context provider for the ChatAgent. + middleware: MiddlewareTypes for the Agent. + context_provider: Context provider for the Agent. Returns: - A ChatAgent instance wrapping the assistant. + A Agent instance wrapping the assistant. Raises: ValueError: If required function tools are missing. @@ -429,7 +429,7 @@ def as_agent( # Merge hosted tools with user-provided function tools merged_tools = self._merge_tools(assistant.tools or [], tools) - # Create ChatAgent + # Create Agent return self._create_chat_agent_from_assistant( assistant=assistant, tools=merged_tools, @@ -526,8 +526,8 @@ def _create_chat_agent_from_assistant( context_provider: ContextProvider | None, default_options: OptionsCoT | None = None, **kwargs: Any, - ) -> ChatAgent[OptionsCoT]: - """Create a ChatAgent from an Assistant. + ) -> Agent[OptionsCoT]: + """Create a Agent from an Assistant. Args: assistant: The OpenAI Assistant object. @@ -536,10 +536,10 @@ def _create_chat_agent_from_assistant( middleware: MiddlewareTypes for the agent. context_provider: Context provider for the agent. default_options: Default chat options for the agent (may include response_format). - **kwargs: Additional arguments passed to ChatAgent. + **kwargs: Additional arguments passed to Agent. Returns: - A configured ChatAgent instance. + A configured Agent instance. """ # Create the chat client with the assistant chat_client = OpenAIAssistantsClient( @@ -553,8 +553,8 @@ def _create_chat_agent_from_assistant( # Use instructions from assistant if not overridden final_instructions = instructions if instructions is not None else assistant.instructions - # Create and return ChatAgent - return ChatAgent( + # Create and return Agent + return Agent( chat_client=chat_client, id=assistant.id, name=assistant.name, diff --git a/python/packages/core/agent_framework/openai/_assistants_client.py b/python/packages/core/agent_framework/openai/_assistants_client.py index 914109827b..86c8fa7456 100644 --- a/python/packages/core/agent_framework/openai/_assistants_client.py +++ b/python/packages/core/agent_framework/openai/_assistants_client.py @@ -39,11 +39,11 @@ HostedFileSearchTool, ) from .._types import ( - ChatMessage, ChatOptions, ChatResponse, ChatResponseUpdate, Content, + Message, ResponseStream, UsageDetails, prepare_function_call_results, @@ -352,7 +352,7 @@ async def close(self) -> None: def _inner_get_response( self, *, - messages: Sequence[ChatMessage], + messages: Sequence[Message], options: Mapping[str, Any], stream: bool = False, **kwargs: Any, @@ -605,7 +605,7 @@ def _parse_function_calls_from_assistants(self, event_data: Run, response_id: st def _prepare_options( self, - messages: Sequence[ChatMessage], + messages: Sequence[Message], options: Mapping[str, Any], **kwargs: Any, ) -> tuple[dict[str, Any], list[Content] | None]: diff --git a/python/packages/core/agent_framework/openai/_chat_client.py b/python/packages/core/agent_framework/openai/_chat_client.py index b3d54f251e..b7c33e73f5 100644 --- a/python/packages/core/agent_framework/openai/_chat_client.py +++ b/python/packages/core/agent_framework/openai/_chat_client.py @@ -29,12 +29,12 @@ ToolProtocol, ) from .._types import ( - ChatMessage, ChatOptions, ChatResponse, ChatResponseUpdate, Content, FinishReason, + Message, ResponseStream, UsageDetails, prepare_function_call_results, @@ -158,7 +158,7 @@ class RawOpenAIChatClient( # type: ignore[misc] def _inner_get_response( self, *, - messages: Sequence[ChatMessage], + messages: Sequence[Message], options: Mapping[str, Any], stream: bool = False, **kwargs: Any, @@ -252,7 +252,7 @@ def _prepare_tools_for_openai(self, tools: Sequence[ToolProtocol | MutableMappin ret_dict["web_search_options"] = web_search_options return ret_dict - def _prepare_options(self, messages: Sequence[ChatMessage], options: Mapping[str, Any]) -> dict[str, Any]: + def _prepare_options(self, messages: Sequence[Message], options: Mapping[str, Any]) -> dict[str, Any]: # Prepend instructions from options if they exist from .._types import prepend_instructions_to_messages, validate_tool_mode @@ -310,7 +310,7 @@ def _prepare_options(self, messages: Sequence[ChatMessage], options: Mapping[str def _parse_response_from_openai(self, response: ChatCompletion, options: Mapping[str, Any]) -> ChatResponse: """Parse a response from OpenAI into a ChatResponse.""" response_metadata = self._get_metadata_from_chat_response(response) - messages: list[ChatMessage] = [] + messages: list[Message] = [] finish_reason: FinishReason | None = None for choice in response.choices: response_metadata.update(self._get_metadata_from_chat_choice(choice)) @@ -323,7 +323,7 @@ def _parse_response_from_openai(self, response: ChatCompletion, options: Mapping contents.extend(parsed_tool_calls) if reasoning_details := getattr(choice.message, "reasoning_details", None): contents.append(Content.from_text_reasoning(protected_data=json.dumps(reasoning_details))) - messages.append(ChatMessage(role="assistant", contents=contents)) + messages.append(Message(role="assistant", contents=contents)) return ChatResponse( response_id=response.id, created_at=datetime.fromtimestamp(response.created, tz=timezone.utc).strftime("%Y-%m-%dT%H:%M:%S.%fZ"), @@ -448,7 +448,7 @@ def _parse_tool_calls_from_openai(self, choice: Choice | ChunkChoice) -> list[Co def _prepare_messages_for_openai( self, - chat_messages: Sequence[ChatMessage], + chat_messages: Sequence[Message], role_key: str = "role", content_key: str = "content", ) -> list[dict[str, Any]]: @@ -476,7 +476,7 @@ def _prepare_messages_for_openai( # region Parsers - def _prepare_message_for_openai(self, message: ChatMessage) -> list[dict[str, Any]]: + def _prepare_message_for_openai(self, message: Message) -> list[dict[str, Any]]: """Prepare a chat message for OpenAI.""" all_messages: list[dict[str, Any]] = [] for content in message.contents: diff --git a/python/packages/core/agent_framework/openai/_responses_client.py b/python/packages/core/agent_framework/openai/_responses_client.py index 48b0665b46..da05038730 100644 --- a/python/packages/core/agent_framework/openai/_responses_client.py +++ b/python/packages/core/agent_framework/openai/_responses_client.py @@ -51,12 +51,12 @@ ) from .._types import ( Annotation, - ChatMessage, ChatOptions, ChatResponse, ChatResponseUpdate, Content, ContinuationToken, + Message, ResponseStream, Role, TextSpanRegion, @@ -250,7 +250,7 @@ class RawOpenAIResponsesClient( # type: ignore[misc] async def _prepare_request( self, - messages: Sequence[ChatMessage], + messages: Sequence[Message], options: Mapping[str, Any], **kwargs: Any, ) -> tuple[AsyncOpenAI, dict[str, Any], dict[str, Any]]: @@ -280,7 +280,7 @@ def _handle_request_error(self, ex: Exception) -> NoReturn: def _inner_get_response( self, *, - messages: Sequence[ChatMessage], + messages: Sequence[Message], options: Mapping[str, Any], stream: bool = False, **kwargs: Any, @@ -567,7 +567,7 @@ def _prepare_mcp_tool(tool: HostedMCPTool) -> Mcp: async def _prepare_options( self, - messages: Sequence[ChatMessage], + messages: Sequence[Message], options: Mapping[str, Any], **kwargs: Any, ) -> dict[str, Any]: @@ -673,7 +673,7 @@ def _get_current_conversation_id(self, options: Mapping[str, Any], **kwargs: Any """ return kwargs.get("conversation_id") or options.get("conversation_id") - def _prepare_messages_for_openai(self, chat_messages: Sequence[ChatMessage]) -> list[dict[str, Any]]: + def _prepare_messages_for_openai(self, chat_messages: Sequence[Message]) -> list[dict[str, Any]]: """Prepare the chat messages for a request. Allowing customization of the key names for role/author, and optionally overriding the role. @@ -705,7 +705,7 @@ def _prepare_messages_for_openai(self, chat_messages: Sequence[ChatMessage]) -> def _prepare_message_for_openai( self, - message: ChatMessage, + message: Message, call_id_to_id: dict[str, str], ) -> list[dict[str, Any]]: """Prepare a chat message for the OpenAI Responses API format.""" @@ -1095,7 +1095,7 @@ def _parse_response_from_openai( ) case _: logger.debug("Unparsed output of type: %s: %s", item.type, item) - response_message = ChatMessage(role="assistant", contents=contents) + response_message = Message(role="assistant", contents=contents) args: dict[str, Any] = { "response_id": response.id, "created_at": datetime.fromtimestamp(response.created_at, tz=timezone.utc).strftime( diff --git a/python/packages/core/tests/azure/conftest.py b/python/packages/core/tests/azure/conftest.py index a9c03cd664..9d8ce0cebb 100644 --- a/python/packages/core/tests/azure/conftest.py +++ b/python/packages/core/tests/azure/conftest.py @@ -3,7 +3,7 @@ from pytest import fixture -from agent_framework import ChatMessage +from agent_framework import Message # region: Connector Settings fixtures @@ -58,5 +58,5 @@ def azure_openai_unit_test_env(monkeypatch, exclude_list, override_env_param_dic @fixture(scope="function") -def chat_history() -> list[ChatMessage]: +def chat_history() -> list[Message]: return [] diff --git a/python/packages/core/tests/azure/test_azure_assistants_client.py b/python/packages/core/tests/azure/test_azure_assistants_client.py index 9c95bed1c1..e89b38ae5a 100644 --- a/python/packages/core/tests/azure/test_azure_assistants_client.py +++ b/python/packages/core/tests/azure/test_azure_assistants_client.py @@ -9,15 +9,15 @@ from pydantic import Field from agent_framework import ( + Agent, AgentResponse, AgentResponseUpdate, AgentThread, - ChatAgent, - ChatClientProtocol, - ChatMessage, ChatResponse, ChatResponseUpdate, HostedCodeInterpreterTool, + Message, + SupportsChatGetResponse, tool, ) from agent_framework.azure import AzureOpenAIAssistantsClient @@ -95,7 +95,7 @@ def test_azure_assistants_client_init_with_client(mock_async_azure_openai: Magic assert chat_client.assistant_id == "existing-assistant-id" assert chat_client.thread_id == "test-thread-id" assert not chat_client._should_delete_assistant # type: ignore - assert isinstance(chat_client, ChatClientProtocol) + assert isinstance(chat_client, SupportsChatGetResponse) def test_azure_assistants_client_init_auto_create_client( @@ -146,7 +146,7 @@ def test_azure_assistants_client_init_with_default_headers(azure_openai_unit_tes ) assert chat_client.model_id == "test_chat_deployment" - assert isinstance(chat_client, ChatClientProtocol) + assert isinstance(chat_client, SupportsChatGetResponse) # Assert that the default header we added is present in the client's default headers for key, value in default_headers.items(): @@ -267,17 +267,17 @@ def get_weather( async def test_azure_assistants_client_get_response() -> None: """Test Azure Assistants Client response.""" async with AzureOpenAIAssistantsClient(credential=AzureCliCredential()) as azure_assistants_client: - assert isinstance(azure_assistants_client, ChatClientProtocol) + assert isinstance(azure_assistants_client, SupportsChatGetResponse) - messages: list[ChatMessage] = [] + messages: list[Message] = [] messages.append( - ChatMessage( + Message( role="user", text="The weather in Seattle is currently sunny with a high of 25°C. " "It's a beautiful day for outdoor activities.", ) ) - messages.append(ChatMessage(role="user", text="What's the weather like today?")) + messages.append(Message(role="user", text="What's the weather like today?")) # Test that the client can be used to get a response response = await azure_assistants_client.get_response(messages=messages) @@ -292,10 +292,10 @@ async def test_azure_assistants_client_get_response() -> None: async def test_azure_assistants_client_get_response_tools() -> None: """Test Azure Assistants Client response with tools.""" async with AzureOpenAIAssistantsClient(credential=AzureCliCredential()) as azure_assistants_client: - assert isinstance(azure_assistants_client, ChatClientProtocol) + assert isinstance(azure_assistants_client, SupportsChatGetResponse) - messages: list[ChatMessage] = [] - messages.append(ChatMessage(role="user", text="What's the weather like in Seattle?")) + messages: list[Message] = [] + messages.append(Message(role="user", text="What's the weather like in Seattle?")) # Test that the client can be used to get a response response = await azure_assistants_client.get_response( @@ -313,17 +313,17 @@ async def test_azure_assistants_client_get_response_tools() -> None: async def test_azure_assistants_client_streaming() -> None: """Test Azure Assistants Client streaming response.""" async with AzureOpenAIAssistantsClient(credential=AzureCliCredential()) as azure_assistants_client: - assert isinstance(azure_assistants_client, ChatClientProtocol) + assert isinstance(azure_assistants_client, SupportsChatGetResponse) - messages: list[ChatMessage] = [] + messages: list[Message] = [] messages.append( - ChatMessage( + Message( role="user", text="The weather in Seattle is currently sunny with a high of 25°C. " "It's a beautiful day for outdoor activities.", ) ) - messages.append(ChatMessage(role="user", text="What's the weather like today?")) + messages.append(Message(role="user", text="What's the weather like today?")) # Test that the client can be used to get a response response = azure_assistants_client.get_response(messages=messages, stream=True) @@ -344,10 +344,10 @@ async def test_azure_assistants_client_streaming() -> None: async def test_azure_assistants_client_streaming_tools() -> None: """Test Azure Assistants Client streaming response with tools.""" async with AzureOpenAIAssistantsClient(credential=AzureCliCredential()) as azure_assistants_client: - assert isinstance(azure_assistants_client, ChatClientProtocol) + assert isinstance(azure_assistants_client, SupportsChatGetResponse) - messages: list[ChatMessage] = [] - messages.append(ChatMessage(role="user", text="What's the weather like in Seattle?")) + messages: list[Message] = [] + messages.append(Message(role="user", text="What's the weather like in Seattle?")) # Test that the client can be used to get a response response = azure_assistants_client.get_response( @@ -373,7 +373,7 @@ async def test_azure_assistants_client_with_existing_assistant() -> None: # First create an assistant to use in the test async with AzureOpenAIAssistantsClient(credential=AzureCliCredential()) as temp_client: # Get the assistant ID by triggering assistant creation - messages = [ChatMessage(role="user", text="Hello")] + messages = [Message(role="user", text="Hello")] await temp_client.get_response(messages=messages) assistant_id = temp_client.assistant_id @@ -381,10 +381,10 @@ async def test_azure_assistants_client_with_existing_assistant() -> None: async with AzureOpenAIAssistantsClient( assistant_id=assistant_id, credential=AzureCliCredential() ) as azure_assistants_client: - assert isinstance(azure_assistants_client, ChatClientProtocol) + assert isinstance(azure_assistants_client, SupportsChatGetResponse) assert azure_assistants_client.assistant_id == assistant_id - messages = [ChatMessage(role="user", text="What can you do?")] + messages = [Message(role="user", text="What can you do?")] # Test that the client can be used to get a response response = await azure_assistants_client.get_response(messages=messages) @@ -397,8 +397,8 @@ async def test_azure_assistants_client_with_existing_assistant() -> None: @pytest.mark.flaky @skip_if_azure_integration_tests_disabled async def test_azure_assistants_agent_basic_run(): - """Test ChatAgent basic run functionality with AzureOpenAIAssistantsClient.""" - async with ChatAgent( + """Test Agent basic run functionality with AzureOpenAIAssistantsClient.""" + async with Agent( chat_client=AzureOpenAIAssistantsClient(credential=AzureCliCredential()), ) as agent: # Run a simple query @@ -414,8 +414,8 @@ async def test_azure_assistants_agent_basic_run(): @pytest.mark.flaky @skip_if_azure_integration_tests_disabled async def test_azure_assistants_agent_basic_run_streaming(): - """Test ChatAgent basic streaming functionality with AzureOpenAIAssistantsClient.""" - async with ChatAgent( + """Test Agent basic streaming functionality with AzureOpenAIAssistantsClient.""" + async with Agent( chat_client=AzureOpenAIAssistantsClient(credential=AzureCliCredential()), ) as agent: # Run streaming query @@ -434,8 +434,8 @@ async def test_azure_assistants_agent_basic_run_streaming(): @pytest.mark.flaky @skip_if_azure_integration_tests_disabled async def test_azure_assistants_agent_thread_persistence(): - """Test ChatAgent thread persistence across runs with AzureOpenAIAssistantsClient.""" - async with ChatAgent( + """Test Agent thread persistence across runs with AzureOpenAIAssistantsClient.""" + async with Agent( chat_client=AzureOpenAIAssistantsClient(credential=AzureCliCredential()), instructions="You are a helpful assistant with good memory.", ) as agent: @@ -463,11 +463,11 @@ async def test_azure_assistants_agent_thread_persistence(): @pytest.mark.flaky @skip_if_azure_integration_tests_disabled async def test_azure_assistants_agent_existing_thread_id(): - """Test ChatAgent with existing thread ID to continue conversations across agent instances.""" + """Test Agent with existing thread ID to continue conversations across agent instances.""" # First, create a conversation and capture the thread ID existing_thread_id = None - async with ChatAgent( + async with Agent( chat_client=AzureOpenAIAssistantsClient(credential=AzureCliCredential()), instructions="You are a helpful weather agent.", tools=[get_weather], @@ -487,7 +487,7 @@ async def test_azure_assistants_agent_existing_thread_id(): # Now continue with the same thread ID in a new agent instance - async with ChatAgent( + async with Agent( chat_client=AzureOpenAIAssistantsClient(thread_id=existing_thread_id, credential=AzureCliCredential()), instructions="You are a helpful weather agent.", tools=[get_weather], @@ -508,9 +508,9 @@ async def test_azure_assistants_agent_existing_thread_id(): @pytest.mark.flaky @skip_if_azure_integration_tests_disabled async def test_azure_assistants_agent_code_interpreter(): - """Test ChatAgent with code interpreter through AzureOpenAIAssistantsClient.""" + """Test Agent with code interpreter through AzureOpenAIAssistantsClient.""" - async with ChatAgent( + async with Agent( chat_client=AzureOpenAIAssistantsClient(credential=AzureCliCredential()), instructions="You are a helpful assistant that can write and execute Python code.", tools=[HostedCodeInterpreterTool()], @@ -530,7 +530,7 @@ async def test_azure_assistants_agent_code_interpreter(): async def test_azure_assistants_client_agent_level_tool_persistence(): """Test that agent-level tools persist across multiple runs with Azure Assistants Client.""" - async with ChatAgent( + async with Agent( chat_client=AzureOpenAIAssistantsClient(credential=AzureCliCredential()), instructions="You are a helpful assistant that uses available tools.", tools=[get_weather], # Agent-level tool diff --git a/python/packages/core/tests/azure/test_azure_chat_client.py b/python/packages/core/tests/azure/test_azure_chat_client.py index f434b55fd1..37687e20e7 100644 --- a/python/packages/core/tests/azure/test_azure_chat_client.py +++ b/python/packages/core/tests/azure/test_azure_chat_client.py @@ -17,13 +17,13 @@ from openai.types.chat.chat_completion_message import ChatCompletionMessage from agent_framework import ( + Agent, AgentResponse, AgentResponseUpdate, - ChatAgent, - ChatClientProtocol, - ChatMessage, ChatResponse, ChatResponseUpdate, + Message, + SupportsChatGetResponse, tool, ) from agent_framework._telemetry import USER_AGENT_KEY @@ -52,7 +52,7 @@ def test_init(azure_openai_unit_test_env: dict[str, str]) -> None: assert azure_chat_client.client is not None assert isinstance(azure_chat_client.client, AsyncAzureOpenAI) assert azure_chat_client.model_id == azure_openai_unit_test_env["AZURE_OPENAI_CHAT_DEPLOYMENT_NAME"] - assert isinstance(azure_chat_client, ChatClientProtocol) + assert isinstance(azure_chat_client, SupportsChatGetResponse) def test_init_client(azure_openai_unit_test_env: dict[str, str]) -> None: @@ -75,7 +75,7 @@ def test_init_base_url(azure_openai_unit_test_env: dict[str, str]) -> None: assert azure_chat_client.client is not None assert isinstance(azure_chat_client.client, AsyncAzureOpenAI) assert azure_chat_client.model_id == azure_openai_unit_test_env["AZURE_OPENAI_CHAT_DEPLOYMENT_NAME"] - assert isinstance(azure_chat_client, ChatClientProtocol) + assert isinstance(azure_chat_client, SupportsChatGetResponse) for key, value in default_headers.items(): assert key in azure_chat_client.client.default_headers assert azure_chat_client.client.default_headers[key] == value @@ -88,7 +88,7 @@ def test_init_endpoint(azure_openai_unit_test_env: dict[str, str]) -> None: assert azure_chat_client.client is not None assert isinstance(azure_chat_client.client, AsyncAzureOpenAI) assert azure_chat_client.model_id == azure_openai_unit_test_env["AZURE_OPENAI_CHAT_DEPLOYMENT_NAME"] - assert isinstance(azure_chat_client, ChatClientProtocol) + assert isinstance(azure_chat_client, SupportsChatGetResponse) @pytest.mark.parametrize("exclude_list", [["AZURE_OPENAI_CHAT_DEPLOYMENT_NAME"]], indirect=True) @@ -178,11 +178,11 @@ def mock_streaming_chat_completion_response() -> AsyncStream[ChatCompletionChunk async def test_cmc( mock_create: AsyncMock, azure_openai_unit_test_env: dict[str, str], - chat_history: list[ChatMessage], + chat_history: list[Message], mock_chat_completion_response: ChatCompletion, ) -> None: mock_create.return_value = mock_chat_completion_response - chat_history.append(ChatMessage(text="hello world", role="user")) + chat_history.append(Message(text="hello world", role="user")) azure_chat_client = AzureOpenAIChatClient() await azure_chat_client.get_response( @@ -199,12 +199,12 @@ async def test_cmc( async def test_cmc_with_logit_bias( mock_create: AsyncMock, azure_openai_unit_test_env: dict[str, str], - chat_history: list[ChatMessage], + chat_history: list[Message], mock_chat_completion_response: ChatCompletion, ) -> None: mock_create.return_value = mock_chat_completion_response prompt = "hello world" - chat_history.append(ChatMessage(text=prompt, role="user")) + chat_history.append(Message(text=prompt, role="user")) token_bias: dict[str | int, float] = {"1": -100} @@ -224,12 +224,12 @@ async def test_cmc_with_logit_bias( async def test_cmc_with_stop( mock_create: AsyncMock, azure_openai_unit_test_env: dict[str, str], - chat_history: list[ChatMessage], + chat_history: list[Message], mock_chat_completion_response: ChatCompletion, ) -> None: mock_create.return_value = mock_chat_completion_response prompt = "hello world" - chat_history.append(ChatMessage(text=prompt, role="user")) + chat_history.append(Message(text=prompt, role="user")) stop = ["!"] @@ -249,7 +249,7 @@ async def test_cmc_with_stop( async def test_azure_on_your_data( mock_create: AsyncMock, azure_openai_unit_test_env: dict[str, str], - chat_history: list[ChatMessage], + chat_history: list[Message], mock_chat_completion_response: ChatCompletion, ) -> None: mock_chat_completion_response.choices = [ @@ -277,9 +277,9 @@ async def test_azure_on_your_data( mock_create.return_value = mock_chat_completion_response prompt = "hello world" messages_in = chat_history - chat_history.append(ChatMessage(text=prompt, role="user")) - messages_out: list[ChatMessage] = [] - messages_out.append(ChatMessage(text=prompt, role="user")) + chat_history.append(Message(text=prompt, role="user")) + messages_out: list[Message] = [] + messages_out.append(Message(text=prompt, role="user")) expected_data_settings = { "data_sources": [ @@ -319,7 +319,7 @@ async def test_azure_on_your_data( async def test_azure_on_your_data_string( mock_create: AsyncMock, azure_openai_unit_test_env: dict[str, str], - chat_history: list[ChatMessage], + chat_history: list[Message], mock_chat_completion_response: ChatCompletion, ) -> None: mock_chat_completion_response.choices = [ @@ -347,9 +347,9 @@ async def test_azure_on_your_data_string( mock_create.return_value = mock_chat_completion_response prompt = "hello world" messages_in = chat_history - messages_in.append(ChatMessage(text=prompt, role="user")) - messages_out: list[ChatMessage] = [] - messages_out.append(ChatMessage(text=prompt, role="user")) + messages_in.append(Message(text=prompt, role="user")) + messages_out: list[Message] = [] + messages_out.append(Message(text=prompt, role="user")) expected_data_settings = { "data_sources": [ @@ -389,7 +389,7 @@ async def test_azure_on_your_data_string( async def test_azure_on_your_data_fail( mock_create: AsyncMock, azure_openai_unit_test_env: dict[str, str], - chat_history: list[ChatMessage], + chat_history: list[Message], mock_chat_completion_response: ChatCompletion, ) -> None: mock_chat_completion_response.choices = [ @@ -406,9 +406,9 @@ async def test_azure_on_your_data_fail( mock_create.return_value = mock_chat_completion_response prompt = "hello world" messages_in = chat_history - messages_in.append(ChatMessage(text=prompt, role="user")) - messages_out: list[ChatMessage] = [] - messages_out.append(ChatMessage(text=prompt, role="user")) + messages_in.append(Message(text=prompt, role="user")) + messages_out: list[Message] = [] + messages_out.append(Message(text=prompt, role="user")) expected_data_settings = { "data_sources": [ @@ -459,10 +459,10 @@ async def test_azure_on_your_data_fail( async def test_content_filtering_raises_correct_exception( mock_create: AsyncMock, azure_openai_unit_test_env: dict[str, str], - chat_history: list[ChatMessage], + chat_history: list[Message], ) -> None: prompt = "some prompt that would trigger the content filtering" - chat_history.append(ChatMessage(text=prompt, role="user")) + chat_history.append(Message(text=prompt, role="user")) test_endpoint = os.getenv("AZURE_OPENAI_ENDPOINT") assert test_endpoint is not None @@ -504,10 +504,10 @@ async def test_content_filtering_raises_correct_exception( async def test_content_filtering_without_response_code_raises_with_default_code( mock_create: AsyncMock, azure_openai_unit_test_env: dict[str, str], - chat_history: list[ChatMessage], + chat_history: list[Message], ) -> None: prompt = "some prompt that would trigger the content filtering" - chat_history.append(ChatMessage(text=prompt, role="user")) + chat_history.append(Message(text=prompt, role="user")) test_endpoint = os.getenv("AZURE_OPENAI_ENDPOINT") assert test_endpoint is not None @@ -543,10 +543,10 @@ async def test_content_filtering_without_response_code_raises_with_default_code( async def test_bad_request_non_content_filter( mock_create: AsyncMock, azure_openai_unit_test_env: dict[str, str], - chat_history: list[ChatMessage], + chat_history: list[Message], ) -> None: prompt = "some prompt that would trigger the content filtering" - chat_history.append(ChatMessage(text=prompt, role="user")) + chat_history.append(Message(text=prompt, role="user")) test_endpoint = os.getenv("AZURE_OPENAI_ENDPOINT") assert test_endpoint is not None @@ -566,11 +566,11 @@ async def test_bad_request_non_content_filter( async def test_get_streaming( mock_create: AsyncMock, azure_openai_unit_test_env: dict[str, str], - chat_history: list[ChatMessage], + chat_history: list[Message], mock_streaming_chat_completion_response: AsyncStream[ChatCompletionChunk], ) -> None: mock_create.return_value = mock_streaming_chat_completion_response - chat_history.append(ChatMessage(text="hello world", role="user")) + chat_history.append(Message(text="hello world", role="user")) azure_chat_client = AzureOpenAIChatClient() async for msg in azure_chat_client.get_response( @@ -595,7 +595,7 @@ async def test_get_streaming( async def test_streaming_with_none_delta( mock_create: AsyncMock, azure_openai_unit_test_env: dict[str, str], - chat_history: list[ChatMessage], + chat_history: list[Message], ) -> None: """Test streaming handles None delta from async content filtering.""" # First chunk has None delta (simulates async filtering) @@ -619,7 +619,7 @@ async def test_streaming_with_none_delta( stream.__aiter__.return_value = [chunk_with_none_delta, chunk_with_content] mock_create.return_value = stream - chat_history.append(ChatMessage(text="hello world", role="user")) + chat_history.append(Message(text="hello world", role="user")) azure_chat_client = AzureOpenAIChatClient() results: list[ChatResponseUpdate] = [] @@ -653,11 +653,11 @@ def get_weather(location: str) -> str: async def test_azure_openai_chat_client_response() -> None: """Test Azure OpenAI chat completion responses.""" azure_chat_client = AzureOpenAIChatClient(credential=AzureCliCredential()) - assert isinstance(azure_chat_client, ChatClientProtocol) + assert isinstance(azure_chat_client, SupportsChatGetResponse) - messages: list[ChatMessage] = [] + messages: list[Message] = [] messages.append( - ChatMessage( + Message( role="user", text="Emily and David, two passionate scientists, met during a research expedition to Antarctica. " "Bonded by their love for the natural world and shared curiosity, they uncovered a " @@ -665,7 +665,7 @@ async def test_azure_openai_chat_client_response() -> None: "of climate change.", ) ) - messages.append(ChatMessage(role="user", text="who are Emily and David?")) + messages.append(Message(role="user", text="who are Emily and David?")) # Test that the client can be used to get a response response = await azure_chat_client.get_response(messages=messages) @@ -683,10 +683,10 @@ async def test_azure_openai_chat_client_response() -> None: async def test_azure_openai_chat_client_response_tools() -> None: """Test AzureOpenAI chat completion responses.""" azure_chat_client = AzureOpenAIChatClient(credential=AzureCliCredential()) - assert isinstance(azure_chat_client, ChatClientProtocol) + assert isinstance(azure_chat_client, SupportsChatGetResponse) - messages: list[ChatMessage] = [] - messages.append(ChatMessage(role="user", text="who are Emily and David?")) + messages: list[Message] = [] + messages.append(Message(role="user", text="who are Emily and David?")) # Test that the client can be used to get a response response = await azure_chat_client.get_response( @@ -704,11 +704,11 @@ async def test_azure_openai_chat_client_response_tools() -> None: async def test_azure_openai_chat_client_streaming() -> None: """Test Azure OpenAI chat completion responses.""" azure_chat_client = AzureOpenAIChatClient(credential=AzureCliCredential()) - assert isinstance(azure_chat_client, ChatClientProtocol) + assert isinstance(azure_chat_client, SupportsChatGetResponse) - messages: list[ChatMessage] = [] + messages: list[Message] = [] messages.append( - ChatMessage( + Message( role="user", text="Emily and David, two passionate scientists, met during a research expedition to Antarctica. " "Bonded by their love for the natural world and shared curiosity, they uncovered a " @@ -716,7 +716,7 @@ async def test_azure_openai_chat_client_streaming() -> None: "of climate change.", ) ) - messages.append(ChatMessage(role="user", text="who are Emily and David?")) + messages.append(Message(role="user", text="who are Emily and David?")) # Test that the client can be used to get a response response = azure_chat_client.get_response(messages=messages, stream=True) @@ -739,10 +739,10 @@ async def test_azure_openai_chat_client_streaming() -> None: async def test_azure_openai_chat_client_streaming_tools() -> None: """Test AzureOpenAI chat completion responses.""" azure_chat_client = AzureOpenAIChatClient(credential=AzureCliCredential()) - assert isinstance(azure_chat_client, ChatClientProtocol) + assert isinstance(azure_chat_client, SupportsChatGetResponse) - messages: list[ChatMessage] = [] - messages.append(ChatMessage(role="user", text="who are Emily and David?")) + messages: list[Message] = [] + messages.append(Message(role="user", text="who are Emily and David?")) # Test that the client can be used to get a response response = azure_chat_client.get_response( @@ -765,7 +765,7 @@ async def test_azure_openai_chat_client_streaming_tools() -> None: @skip_if_azure_integration_tests_disabled async def test_azure_openai_chat_client_agent_basic_run(): """Test Azure OpenAI chat client agent basic run functionality with AzureOpenAIChatClient.""" - async with ChatAgent( + async with Agent( chat_client=AzureOpenAIChatClient(credential=AzureCliCredential()), ) as agent: # Test basic run @@ -781,7 +781,7 @@ async def test_azure_openai_chat_client_agent_basic_run(): @skip_if_azure_integration_tests_disabled async def test_azure_openai_chat_client_agent_basic_run_streaming(): """Test Azure OpenAI chat client agent basic streaming functionality with AzureOpenAIChatClient.""" - async with ChatAgent( + async with Agent( chat_client=AzureOpenAIChatClient(credential=AzureCliCredential()), ) as agent: # Test streaming run @@ -799,7 +799,7 @@ async def test_azure_openai_chat_client_agent_basic_run_streaming(): @skip_if_azure_integration_tests_disabled async def test_azure_openai_chat_client_agent_thread_persistence(): """Test Azure OpenAI chat client agent thread persistence across runs with AzureOpenAIChatClient.""" - async with ChatAgent( + async with Agent( chat_client=AzureOpenAIChatClient(credential=AzureCliCredential()), instructions="You are a helpful assistant with good memory.", ) as agent: @@ -827,7 +827,7 @@ async def test_azure_openai_chat_client_agent_existing_thread(): # First conversation - capture the thread preserved_thread = None - async with ChatAgent( + async with Agent( chat_client=AzureOpenAIChatClient(credential=AzureCliCredential()), instructions="You are a helpful assistant with good memory.", ) as first_agent: @@ -843,7 +843,7 @@ async def test_azure_openai_chat_client_agent_existing_thread(): # Second conversation - reuse the thread in a new agent instance if preserved_thread: - async with ChatAgent( + async with Agent( chat_client=AzureOpenAIChatClient(credential=AzureCliCredential()), instructions="You are a helpful assistant with good memory.", ) as second_agent: @@ -860,7 +860,7 @@ async def test_azure_openai_chat_client_agent_existing_thread(): async def test_azure_chat_client_agent_level_tool_persistence(): """Test that agent-level tools persist across multiple runs with Azure Chat Client.""" - async with ChatAgent( + async with Agent( chat_client=AzureOpenAIChatClient(credential=AzureCliCredential()), instructions="You are a helpful assistant that uses available tools.", tools=[get_weather], # Agent-level tool diff --git a/python/packages/core/tests/azure/test_azure_responses_client.py b/python/packages/core/tests/azure/test_azure_responses_client.py index e8e9e9e089..3a847e8c96 100644 --- a/python/packages/core/tests/azure/test_azure_responses_client.py +++ b/python/packages/core/tests/azure/test_azure_responses_client.py @@ -10,16 +10,16 @@ from pytest import param from agent_framework import ( + Agent, AgentResponse, - ChatAgent, - ChatClientProtocol, - ChatMessage, ChatResponse, Content, HostedCodeInterpreterTool, HostedFileSearchTool, HostedMCPTool, HostedWebSearchTool, + Message, + SupportsChatGetResponse, tool, ) from agent_framework.azure import AzureOpenAIResponsesClient @@ -76,7 +76,7 @@ def test_init(azure_openai_unit_test_env: dict[str, str]) -> None: azure_responses_client = AzureOpenAIResponsesClient(credential=AzureCliCredential()) assert azure_responses_client.model_id == azure_openai_unit_test_env["AZURE_OPENAI_RESPONSES_DEPLOYMENT_NAME"] - assert isinstance(azure_responses_client, ChatClientProtocol) + assert isinstance(azure_responses_client, SupportsChatGetResponse) def test_init_validation_fail() -> None: @@ -91,7 +91,7 @@ def test_init_model_id_constructor(azure_openai_unit_test_env: dict[str, str]) - azure_responses_client = AzureOpenAIResponsesClient(deployment_name=model_id) assert azure_responses_client.model_id == model_id - assert isinstance(azure_responses_client, ChatClientProtocol) + assert isinstance(azure_responses_client, SupportsChatGetResponse) def test_init_with_default_header(azure_openai_unit_test_env: dict[str, str]) -> None: @@ -103,7 +103,7 @@ def test_init_with_default_header(azure_openai_unit_test_env: dict[str, str]) -> ) assert azure_responses_client.model_id == azure_openai_unit_test_env["AZURE_OPENAI_RESPONSES_DEPLOYMENT_NAME"] - assert isinstance(azure_responses_client, ChatClientProtocol) + assert isinstance(azure_responses_client, SupportsChatGetResponse) # Assert that the default header we added is present in the client's default headers for key, value in default_headers.items(): @@ -221,14 +221,14 @@ async def test_integration_options( # Prepare test message if option_name == "tools" or option_name == "tool_choice": # Use weather-related prompt for tool tests - messages = [ChatMessage(role="user", text="What is the weather in Seattle?")] + messages = [Message(role="user", text="What is the weather in Seattle?")] elif option_name == "response_format": # Use prompt that works well with structured output - messages = [ChatMessage(role="user", text="The weather in Seattle is sunny")] - messages.append(ChatMessage(role="user", text="What is the weather in Seattle?")) + messages = [Message(role="user", text="The weather in Seattle is sunny")] + messages.append(Message(role="user", text="What is the weather in Seattle?")) else: # Generic prompt for simple options - messages = [ChatMessage(role="user", text="Say 'Hello World' briefly.")] + messages = [Message(role="user", text="Say 'Hello World' briefly.")] # Build options dict options: dict[str, Any] = {option_name: option_value} @@ -336,7 +336,7 @@ async def test_integration_client_file_search() -> None: # Test that the client will use the file search tool response = await azure_responses_client.get_response( messages=[ - ChatMessage( + Message( role="user", text="What is the weather today? Do a file search to find the answer.", ) @@ -360,7 +360,7 @@ async def test_integration_client_file_search_streaming() -> None: try: response_stream = azure_responses_client.get_response( messages=[ - ChatMessage( + Message( role="user", text="What is the weather today? Do a file search to find the answer.", ) @@ -426,7 +426,7 @@ async def test_integration_client_agent_existing_thread(): # First conversation - capture the thread preserved_thread = None - async with ChatAgent( + async with Agent( chat_client=AzureOpenAIResponsesClient(credential=AzureCliCredential()), instructions="You are a helpful assistant with good memory.", ) as first_agent: @@ -442,7 +442,7 @@ async def test_integration_client_agent_existing_thread(): # Second conversation - reuse the thread in a new agent instance if preserved_thread: - async with ChatAgent( + async with Agent( chat_client=AzureOpenAIResponsesClient(credential=AzureCliCredential()), instructions="You are a helpful assistant with good memory.", ) as second_agent: diff --git a/python/packages/core/tests/core/conftest.py b/python/packages/core/tests/core/conftest.py index 7cb5e63549..f21433b8f8 100644 --- a/python/packages/core/tests/core/conftest.py +++ b/python/packages/core/tests/core/conftest.py @@ -16,12 +16,12 @@ AgentResponseUpdate, AgentThread, BaseChatClient, - ChatMessage, ChatMiddlewareLayer, ChatResponse, ChatResponseUpdate, Content, FunctionInvocationLayer, + Message, ResponseStream, SupportsAgentRun, ToolProtocol, @@ -40,7 +40,7 @@ @fixture(scope="function") -def chat_history() -> list[ChatMessage]: +def chat_history() -> list[Message]: return [] @@ -90,7 +90,7 @@ def __init__(self, **kwargs: Any) -> None: def get_response( self, - messages: str | ChatMessage | list[str] | list[ChatMessage], + messages: str | Message | list[str] | list[Message], *, stream: bool = False, options: dict[str, Any] | None = None, @@ -105,14 +105,14 @@ async def _get() -> ChatResponse: self.call_count += 1 if self.responses: return self.responses.pop(0) - return ChatResponse(messages=ChatMessage(role="assistant", text="test response")) + return ChatResponse(messages=Message(role="assistant", text="test response")) return _get() def _get_streaming_response( self, *, - messages: str | ChatMessage | list[str] | list[ChatMessage], + messages: str | Message | list[str] | list[Message], options: dict[str, Any], **kwargs: Any, ) -> ResponseStream[ChatResponseUpdate, ChatResponse]: @@ -153,7 +153,7 @@ def __init__(self, **kwargs: Any): def _inner_get_response( self, *, - messages: MutableSequence[ChatMessage], + messages: MutableSequence[Message], stream: bool, options: dict[str, Any], **kwargs: Any, @@ -180,7 +180,7 @@ async def _get() -> ChatResponse: async def _get_non_streaming_response( self, *, - messages: MutableSequence[ChatMessage], + messages: MutableSequence[Message], options: dict[str, Any], **kwargs: Any, ) -> ChatResponse: @@ -188,13 +188,13 @@ async def _get_non_streaming_response( logger.debug(f"Running base chat client inner, with: {messages=}, {options=}, {kwargs=}") self.call_count += 1 if not self.run_responses: - return ChatResponse(messages=ChatMessage(role="assistant", text=f"test response - {messages[-1].text}")) + return ChatResponse(messages=Message(role="assistant", text=f"test response - {messages[-1].text}")) response = self.run_responses.pop(0) if options.get("tool_choice") == "none": return ChatResponse( - messages=ChatMessage( + messages=Message( role="assistant", text="I broke out of the function invocation loop...", ), @@ -206,7 +206,7 @@ async def _get_non_streaming_response( def _get_streaming_response( self, *, - messages: MutableSequence[ChatMessage], + messages: MutableSequence[Message], options: dict[str, Any], **kwargs: Any, ) -> ResponseStream[ChatResponseUpdate, ChatResponse]: @@ -289,7 +289,7 @@ def description(self) -> str | None: def run( self, - messages: str | ChatMessage | list[str] | list[ChatMessage] | None = None, + messages: str | Message | list[str] | list[Message] | None = None, *, thread: AgentThread | None = None, stream: bool = False, @@ -301,17 +301,17 @@ def run( async def _run_impl( self, - messages: str | ChatMessage | list[str] | list[ChatMessage] | None = None, + messages: str | Message | list[str] | list[Message] | None = None, *, thread: AgentThread | None = None, **kwargs: Any, ) -> AgentResponse: logger.debug(f"Running mock agent, with: {messages=}, {thread=}, {kwargs=}") - return AgentResponse(messages=[ChatMessage(role="assistant", contents=[Content.from_text("Response")])]) + return AgentResponse(messages=[Message(role="assistant", contents=[Content.from_text("Response")])]) async def _run_stream_impl( self, - messages: str | ChatMessage | list[str] | list[ChatMessage] | None = None, + messages: str | Message | list[str] | list[Message] | None = None, *, thread: AgentThread | None = None, **kwargs: Any, diff --git a/python/packages/core/tests/core/test_agents.py b/python/packages/core/tests/core/test_agents.py index cbd8ea0469..3e77bfc6f0 100644 --- a/python/packages/core/tests/core/test_agents.py +++ b/python/packages/core/tests/core/test_agents.py @@ -10,12 +10,10 @@ from pytest import raises from agent_framework import ( + Agent, AgentResponse, AgentResponseUpdate, AgentThread, - ChatAgent, - ChatClientProtocol, - ChatMessage, ChatMessageStore, ChatOptions, ChatResponse, @@ -23,7 +21,9 @@ Context, ContextProvider, HostedCodeInterpreterTool, + Message, SupportsAgentRun, + SupportsChatGetResponse, ToolProtocol, tool, ) @@ -55,60 +55,60 @@ async def collect_updates(updates: AsyncIterable[AgentResponseUpdate]) -> list[A assert updates[0].text == "Response" -def test_chat_client_agent_type(chat_client: ChatClientProtocol) -> None: - chat_client_agent = ChatAgent(chat_client=chat_client) +def test_chat_client_agent_type(chat_client: SupportsChatGetResponse) -> None: + chat_client_agent = Agent(chat_client=chat_client) assert isinstance(chat_client_agent, SupportsAgentRun) -async def test_chat_client_agent_init(chat_client: ChatClientProtocol) -> None: +async def test_chat_client_agent_init(chat_client: SupportsChatGetResponse) -> None: agent_id = str(uuid4()) - agent = ChatAgent(chat_client=chat_client, id=agent_id, description="Test") + agent = Agent(chat_client=chat_client, id=agent_id, description="Test") assert agent.id == agent_id assert agent.name is None assert agent.description == "Test" -async def test_chat_client_agent_init_with_name(chat_client: ChatClientProtocol) -> None: +async def test_chat_client_agent_init_with_name(chat_client: SupportsChatGetResponse) -> None: agent_id = str(uuid4()) - agent = ChatAgent(chat_client=chat_client, id=agent_id, name="Test Agent", description="Test") + agent = Agent(chat_client=chat_client, id=agent_id, name="Test Agent", description="Test") assert agent.id == agent_id assert agent.name == "Test Agent" assert agent.description == "Test" -async def test_chat_client_agent_run(chat_client: ChatClientProtocol) -> None: - agent = ChatAgent(chat_client=chat_client) +async def test_chat_client_agent_run(chat_client: SupportsChatGetResponse) -> None: + agent = Agent(chat_client=chat_client) result = await agent.run("Hello") assert result.text == "test response" -async def test_chat_client_agent_run_streaming(chat_client: ChatClientProtocol) -> None: - agent = ChatAgent(chat_client=chat_client) +async def test_chat_client_agent_run_streaming(chat_client: SupportsChatGetResponse) -> None: + agent = Agent(chat_client=chat_client) result = await AgentResponse.from_update_generator(agent.run("Hello", stream=True)) assert result.text == "test streaming response another update" -async def test_chat_client_agent_get_new_thread(chat_client: ChatClientProtocol) -> None: - agent = ChatAgent(chat_client=chat_client) +async def test_chat_client_agent_get_new_thread(chat_client: SupportsChatGetResponse) -> None: + agent = Agent(chat_client=chat_client) thread = agent.get_new_thread() assert isinstance(thread, AgentThread) -async def test_chat_client_agent_prepare_thread_and_messages(chat_client: ChatClientProtocol) -> None: - agent = ChatAgent(chat_client=chat_client) - message = ChatMessage(role="user", text="Hello") +async def test_chat_client_agent_prepare_thread_and_messages(chat_client: SupportsChatGetResponse) -> None: + agent = Agent(chat_client=chat_client) + message = Message(role="user", text="Hello") thread = AgentThread(message_store=ChatMessageStore(messages=[message])) _, _, result_messages = await agent._prepare_thread_and_messages( # type: ignore[reportPrivateUsage] thread=thread, - input_messages=[ChatMessage(role="user", text="Test")], + input_messages=[Message(role="user", text="Test")], ) assert len(result_messages) == 2 @@ -116,9 +116,9 @@ async def test_chat_client_agent_prepare_thread_and_messages(chat_client: ChatCl assert result_messages[1].text == "Test" -async def test_prepare_thread_does_not_mutate_agent_chat_options(chat_client: ChatClientProtocol) -> None: +async def test_prepare_thread_does_not_mutate_agent_chat_options(chat_client: SupportsChatGetResponse) -> None: tool = HostedCodeInterpreterTool() - agent = ChatAgent(chat_client=chat_client, tools=[tool]) + agent = Agent(chat_client=chat_client, tools=[tool]) assert agent.default_options.get("tools") is not None base_tools = agent.default_options["tools"] @@ -126,7 +126,7 @@ async def test_prepare_thread_does_not_mutate_agent_chat_options(chat_client: Ch _, prepared_chat_options, _ = await agent._prepare_thread_and_messages( # type: ignore[reportPrivateUsage] thread=thread, - input_messages=[ChatMessage(role="user", text="Test")], + input_messages=[Message(role="user", text="Test")], ) assert prepared_chat_options.get("tools") is not None @@ -136,13 +136,13 @@ async def test_prepare_thread_does_not_mutate_agent_chat_options(chat_client: Ch assert len(agent.default_options["tools"]) == 1 -async def test_chat_client_agent_update_thread_id(chat_client_base: ChatClientProtocol) -> None: +async def test_chat_client_agent_update_thread_id(chat_client_base: SupportsChatGetResponse) -> None: mock_response = ChatResponse( - messages=[ChatMessage(role="assistant", contents=[Content.from_text("test response")])], + messages=[Message(role="assistant", contents=[Content.from_text("test response")])], conversation_id="123", ) chat_client_base.run_responses = [mock_response] - agent = ChatAgent( + agent = Agent( chat_client=chat_client_base, tools=HostedCodeInterpreterTool(), ) @@ -154,8 +154,8 @@ async def test_chat_client_agent_update_thread_id(chat_client_base: ChatClientPr assert thread.service_thread_id == "123" -async def test_chat_client_agent_update_thread_messages(chat_client: ChatClientProtocol) -> None: - agent = ChatAgent(chat_client=chat_client) +async def test_chat_client_agent_update_thread_messages(chat_client: SupportsChatGetResponse) -> None: + agent = Agent(chat_client=chat_client) thread = agent.get_new_thread() result = await agent.run("Hello", thread=thread) @@ -164,7 +164,7 @@ async def test_chat_client_agent_update_thread_messages(chat_client: ChatClientP assert thread.service_thread_id is None assert thread.message_store is not None - chat_messages: list[ChatMessage] = await thread.message_store.list_messages() + chat_messages: list[Message] = await thread.message_store.list_messages() assert chat_messages is not None assert len(chat_messages) == 2 @@ -172,42 +172,42 @@ async def test_chat_client_agent_update_thread_messages(chat_client: ChatClientP assert chat_messages[1].text == "test response" -async def test_chat_client_agent_update_thread_conversation_id_missing(chat_client: ChatClientProtocol) -> None: - agent = ChatAgent(chat_client=chat_client) +async def test_chat_client_agent_update_thread_conversation_id_missing(chat_client: SupportsChatGetResponse) -> None: + agent = Agent(chat_client=chat_client) thread = AgentThread(service_thread_id="123") with raises(AgentExecutionException, match="Service did not return a valid conversation id"): await agent._update_thread_with_type_and_conversation_id(thread, None) # type: ignore[reportPrivateUsage] -async def test_chat_client_agent_default_author_name(chat_client: ChatClientProtocol) -> None: +async def test_chat_client_agent_default_author_name(chat_client: SupportsChatGetResponse) -> None: # Name is not specified here, so default name should be used - agent = ChatAgent(chat_client=chat_client) + agent = Agent(chat_client=chat_client) result = await agent.run("Hello") assert result.text == "test response" assert result.messages[0].author_name == "UnnamedAgent" -async def test_chat_client_agent_author_name_as_agent_name(chat_client: ChatClientProtocol) -> None: +async def test_chat_client_agent_author_name_as_agent_name(chat_client: SupportsChatGetResponse) -> None: # Name is specified here, so it should be used as author name - agent = ChatAgent(chat_client=chat_client, name="TestAgent") + agent = Agent(chat_client=chat_client, name="TestAgent") result = await agent.run("Hello") assert result.text == "test response" assert result.messages[0].author_name == "TestAgent" -async def test_chat_client_agent_author_name_is_used_from_response(chat_client_base: ChatClientProtocol) -> None: +async def test_chat_client_agent_author_name_is_used_from_response(chat_client_base: SupportsChatGetResponse) -> None: chat_client_base.run_responses = [ ChatResponse( messages=[ - ChatMessage(role="assistant", contents=[Content.from_text("test response")], author_name="TestAuthor") + Message(role="assistant", contents=[Content.from_text("test response")], author_name="TestAuthor") ] ) ] - agent = ChatAgent(chat_client=chat_client_base, tools=HostedCodeInterpreterTool()) + agent = Agent(chat_client=chat_client_base, tools=HostedCodeInterpreterTool()) result = await agent.run("Hello") assert result.text == "test response" @@ -216,14 +216,14 @@ async def test_chat_client_agent_author_name_is_used_from_response(chat_client_b # Mock context provider for testing class MockContextProvider(ContextProvider): - def __init__(self, messages: list[ChatMessage] | None = None) -> None: + def __init__(self, messages: list[Message] | None = None) -> None: self.context_messages = messages self.thread_created_called = False self.invoked_called = False self.invoking_called = False self.thread_created_thread_id = None self.invoked_thread_id = None - self.new_messages: list[ChatMessage] = [] + self.new_messages: list[Message] = [] async def thread_created(self, thread_id: str | None) -> None: self.thread_created_called = True @@ -231,47 +231,47 @@ async def thread_created(self, thread_id: str | None) -> None: async def invoked( self, - request_messages: ChatMessage | Sequence[ChatMessage], - response_messages: ChatMessage | Sequence[ChatMessage] | None = None, + request_messages: Message | Sequence[Message], + response_messages: Message | Sequence[Message] | None = None, invoke_exception: Any = None, **kwargs: Any, ) -> None: self.invoked_called = True - if isinstance(request_messages, ChatMessage): + if isinstance(request_messages, Message): self.new_messages.append(request_messages) else: self.new_messages.extend(request_messages) - if isinstance(response_messages, ChatMessage): + if isinstance(response_messages, Message): self.new_messages.append(response_messages) else: self.new_messages.extend(response_messages) - async def invoking(self, messages: ChatMessage | MutableSequence[ChatMessage], **kwargs: Any) -> Context: + async def invoking(self, messages: Message | MutableSequence[Message], **kwargs: Any) -> Context: self.invoking_called = True return Context(messages=self.context_messages) -async def test_chat_agent_context_providers_model_invoking(chat_client: ChatClientProtocol) -> None: +async def test_chat_agent_context_providers_model_invoking(chat_client: SupportsChatGetResponse) -> None: """Test that context providers' invoking is called during agent run.""" - mock_provider = MockContextProvider(messages=[ChatMessage(role="system", text="Test context instructions")]) - agent = ChatAgent(chat_client=chat_client, context_provider=mock_provider) + mock_provider = MockContextProvider(messages=[Message(role="system", text="Test context instructions")]) + agent = Agent(chat_client=chat_client, context_provider=mock_provider) await agent.run("Hello") assert mock_provider.invoking_called -async def test_chat_agent_context_providers_thread_created(chat_client_base: ChatClientProtocol) -> None: +async def test_chat_agent_context_providers_thread_created(chat_client_base: SupportsChatGetResponse) -> None: """Test that context providers' thread_created is called during agent run.""" mock_provider = MockContextProvider() chat_client_base.run_responses = [ ChatResponse( - messages=[ChatMessage(role="assistant", contents=[Content.from_text("test response")])], + messages=[Message(role="assistant", contents=[Content.from_text("test response")])], conversation_id="test-thread-id", ) ] - agent = ChatAgent(chat_client=chat_client_base, context_provider=mock_provider) + agent = Agent(chat_client=chat_client_base, context_provider=mock_provider) await agent.run("Hello") @@ -279,10 +279,10 @@ async def test_chat_agent_context_providers_thread_created(chat_client_base: Cha assert mock_provider.thread_created_thread_id == "test-thread-id" -async def test_chat_agent_context_providers_messages_adding(chat_client: ChatClientProtocol) -> None: +async def test_chat_agent_context_providers_messages_adding(chat_client: SupportsChatGetResponse) -> None: """Test that context providers' invoked is called during agent run.""" mock_provider = MockContextProvider() - agent = ChatAgent(chat_client=chat_client, context_provider=mock_provider) + agent = Agent(chat_client=chat_client, context_provider=mock_provider) await agent.run("Hello") @@ -291,14 +291,14 @@ async def test_chat_agent_context_providers_messages_adding(chat_client: ChatCli assert len(mock_provider.new_messages) >= 2 -async def test_chat_agent_context_instructions_in_messages(chat_client: ChatClientProtocol) -> None: +async def test_chat_agent_context_instructions_in_messages(chat_client: SupportsChatGetResponse) -> None: """Test that AI context instructions are included in messages.""" - mock_provider = MockContextProvider(messages=[ChatMessage(role="system", text="Context-specific instructions")]) - agent = ChatAgent(chat_client=chat_client, instructions="Agent instructions", context_provider=mock_provider) + mock_provider = MockContextProvider(messages=[Message(role="system", text="Context-specific instructions")]) + agent = Agent(chat_client=chat_client, instructions="Agent instructions", context_provider=mock_provider) # We need to test the _prepare_thread_and_messages method directly _, _, messages = await agent._prepare_thread_and_messages( # type: ignore[reportPrivateUsage] - thread=None, input_messages=[ChatMessage(role="user", text="Hello")] + thread=None, input_messages=[Message(role="user", text="Hello")] ) # Should have context instructions, and user message @@ -310,13 +310,13 @@ async def test_chat_agent_context_instructions_in_messages(chat_client: ChatClie # instructions system message is added by a chat_client -async def test_chat_agent_no_context_instructions(chat_client: ChatClientProtocol) -> None: +async def test_chat_agent_no_context_instructions(chat_client: SupportsChatGetResponse) -> None: """Test behavior when AI context has no instructions.""" mock_provider = MockContextProvider() - agent = ChatAgent(chat_client=chat_client, instructions="Agent instructions", context_provider=mock_provider) + agent = Agent(chat_client=chat_client, instructions="Agent instructions", context_provider=mock_provider) _, _, messages = await agent._prepare_thread_and_messages( # type: ignore[reportPrivateUsage] - thread=None, input_messages=[ChatMessage(role="user", text="Hello")] + thread=None, input_messages=[Message(role="user", text="Hello")] ) # Should have agent instructions and user message only @@ -325,10 +325,10 @@ async def test_chat_agent_no_context_instructions(chat_client: ChatClientProtoco assert messages[0].text == "Hello" -async def test_chat_agent_run_stream_context_providers(chat_client: ChatClientProtocol) -> None: +async def test_chat_agent_run_stream_context_providers(chat_client: SupportsChatGetResponse) -> None: """Test that context providers work with run method.""" - mock_provider = MockContextProvider(messages=[ChatMessage(role="system", text="Stream context instructions")]) - agent = ChatAgent(chat_client=chat_client, context_provider=mock_provider) + mock_provider = MockContextProvider(messages=[Message(role="system", text="Stream context instructions")]) + agent = Agent(chat_client=chat_client, context_provider=mock_provider) # Collect all stream updates and get final response stream = agent.run("Hello", stream=True) @@ -345,17 +345,17 @@ async def test_chat_agent_run_stream_context_providers(chat_client: ChatClientPr assert mock_provider.invoked_called -async def test_chat_agent_context_providers_with_thread_service_id(chat_client_base: ChatClientProtocol) -> None: +async def test_chat_agent_context_providers_with_thread_service_id(chat_client_base: SupportsChatGetResponse) -> None: """Test context providers with service-managed thread.""" mock_provider = MockContextProvider() chat_client_base.run_responses = [ ChatResponse( - messages=[ChatMessage(role="assistant", contents=[Content.from_text("test response")])], + messages=[Message(role="assistant", contents=[Content.from_text("test response")])], conversation_id="service-thread-123", ) ] - agent = ChatAgent(chat_client=chat_client_base, context_provider=mock_provider) + agent = Agent(chat_client=chat_client_base, context_provider=mock_provider) # Use existing service-managed thread thread = agent.get_new_thread(service_thread_id="existing-thread-id") @@ -366,9 +366,9 @@ async def test_chat_agent_context_providers_with_thread_service_id(chat_client_b # Tests for as_tool method -async def test_chat_agent_as_tool_basic(chat_client: ChatClientProtocol) -> None: +async def test_chat_agent_as_tool_basic(chat_client: SupportsChatGetResponse) -> None: """Test basic as_tool functionality.""" - agent = ChatAgent(chat_client=chat_client, name="TestAgent", description="Test agent for as_tool") + agent = Agent(chat_client=chat_client, name="TestAgent", description="Test agent for as_tool") tool = agent.as_tool() @@ -378,9 +378,9 @@ async def test_chat_agent_as_tool_basic(chat_client: ChatClientProtocol) -> None assert hasattr(tool, "input_model") -async def test_chat_agent_as_tool_custom_parameters(chat_client: ChatClientProtocol) -> None: +async def test_chat_agent_as_tool_custom_parameters(chat_client: SupportsChatGetResponse) -> None: """Test as_tool with custom parameters.""" - agent = ChatAgent(chat_client=chat_client, name="TestAgent", description="Original description") + agent = Agent(chat_client=chat_client, name="TestAgent", description="Original description") tool = agent.as_tool( name="CustomTool", @@ -398,9 +398,9 @@ async def test_chat_agent_as_tool_custom_parameters(chat_client: ChatClientProto assert schema["properties"]["query"]["description"] == "Custom input description" -async def test_chat_agent_as_tool_defaults(chat_client: ChatClientProtocol) -> None: +async def test_chat_agent_as_tool_defaults(chat_client: SupportsChatGetResponse) -> None: """Test as_tool with default parameters.""" - agent = ChatAgent( + agent = Agent( chat_client=chat_client, name="TestAgent", # No description provided @@ -417,18 +417,18 @@ async def test_chat_agent_as_tool_defaults(chat_client: ChatClientProtocol) -> N assert "Task for TestAgent" in schema["properties"]["task"]["description"] -async def test_chat_agent_as_tool_no_name(chat_client: ChatClientProtocol) -> None: +async def test_chat_agent_as_tool_no_name(chat_client: SupportsChatGetResponse) -> None: """Test as_tool when agent has no name (should raise ValueError).""" - agent = ChatAgent(chat_client=chat_client) # No name provided + agent = Agent(chat_client=chat_client) # No name provided # Should raise ValueError since agent has no name with raises(ValueError, match="Agent tool name cannot be None"): agent.as_tool() -async def test_chat_agent_as_tool_function_execution(chat_client: ChatClientProtocol) -> None: +async def test_chat_agent_as_tool_function_execution(chat_client: SupportsChatGetResponse) -> None: """Test that the generated FunctionTool can be executed.""" - agent = ChatAgent(chat_client=chat_client, name="TestAgent", description="Test agent") + agent = Agent(chat_client=chat_client, name="TestAgent", description="Test agent") tool = agent.as_tool() @@ -440,9 +440,9 @@ async def test_chat_agent_as_tool_function_execution(chat_client: ChatClientProt assert result == "test response" # From mock chat client -async def test_chat_agent_as_tool_with_stream_callback(chat_client: ChatClientProtocol) -> None: +async def test_chat_agent_as_tool_with_stream_callback(chat_client: SupportsChatGetResponse) -> None: """Test as_tool with stream callback functionality.""" - agent = ChatAgent(chat_client=chat_client, name="StreamingAgent") + agent = Agent(chat_client=chat_client, name="StreamingAgent") # Collect streaming updates collected_updates: list[AgentResponseUpdate] = [] @@ -463,9 +463,9 @@ def stream_callback(update: AgentResponseUpdate) -> None: assert result == expected_text -async def test_chat_agent_as_tool_with_custom_arg_name(chat_client: ChatClientProtocol) -> None: +async def test_chat_agent_as_tool_with_custom_arg_name(chat_client: SupportsChatGetResponse) -> None: """Test as_tool with custom argument name.""" - agent = ChatAgent(chat_client=chat_client, name="CustomArgAgent") + agent = Agent(chat_client=chat_client, name="CustomArgAgent") tool = agent.as_tool(arg_name="prompt", arg_description="Custom prompt input") @@ -474,9 +474,9 @@ async def test_chat_agent_as_tool_with_custom_arg_name(chat_client: ChatClientPr assert result == "test response" -async def test_chat_agent_as_tool_with_async_stream_callback(chat_client: ChatClientProtocol) -> None: +async def test_chat_agent_as_tool_with_async_stream_callback(chat_client: SupportsChatGetResponse) -> None: """Test as_tool with async stream callback functionality.""" - agent = ChatAgent(chat_client=chat_client, name="AsyncStreamingAgent") + agent = Agent(chat_client=chat_client, name="AsyncStreamingAgent") # Collect streaming updates using an async callback collected_updates: list[AgentResponseUpdate] = [] @@ -497,7 +497,7 @@ async def async_stream_callback(update: AgentResponseUpdate) -> None: assert result == expected_text -async def test_chat_agent_as_tool_name_sanitization(chat_client: ChatClientProtocol) -> None: +async def test_chat_agent_as_tool_name_sanitization(chat_client: SupportsChatGetResponse) -> None: """Test as_tool name sanitization.""" test_cases = [ ("Invoice & Billing Agent", "Invoice_Billing_Agent"), @@ -510,14 +510,14 @@ async def test_chat_agent_as_tool_name_sanitization(chat_client: ChatClientProto ] for agent_name, expected_tool_name in test_cases: - agent = ChatAgent(chat_client=chat_client, name=agent_name, description="Test agent") + agent = Agent(chat_client=chat_client, name=agent_name, description="Test agent") tool = agent.as_tool() assert tool.name == expected_tool_name, f"Expected {expected_tool_name}, got {tool.name} for input {agent_name}" -async def test_chat_agent_as_mcp_server_basic(chat_client: ChatClientProtocol) -> None: +async def test_chat_agent_as_mcp_server_basic(chat_client: SupportsChatGetResponse) -> None: """Test basic as_mcp_server functionality.""" - agent = ChatAgent(chat_client=chat_client, name="TestAgent", description="Test agent for MCP") + agent = Agent(chat_client=chat_client, name="TestAgent", description="Test agent for MCP") # Create MCP server with default parameters server = agent.as_mcp_server() @@ -528,9 +528,9 @@ async def test_chat_agent_as_mcp_server_basic(chat_client: ChatClientProtocol) - assert hasattr(server, "version") -async def test_chat_agent_run_with_mcp_tools(chat_client: ChatClientProtocol) -> None: +async def test_chat_agent_run_with_mcp_tools(chat_client: SupportsChatGetResponse) -> None: """Test run method with MCP tools to cover MCP tool handling code.""" - agent = ChatAgent(chat_client=chat_client, name="TestAgent", description="Test agent") + agent = Agent(chat_client=chat_client, name="TestAgent", description="Test agent") # Create a mock MCP tool mock_mcp_tool = MagicMock(spec=MCPTool) @@ -547,7 +547,7 @@ async def test_chat_agent_run_with_mcp_tools(chat_client: ChatClientProtocol) -> await agent.run(messages="Test message", tools=[mock_mcp_tool]) -async def test_chat_agent_with_local_mcp_tools(chat_client: ChatClientProtocol) -> None: +async def test_chat_agent_with_local_mcp_tools(chat_client: SupportsChatGetResponse) -> None: """Test agent initialization with local MCP tools.""" # Create a mock MCP tool mock_mcp_tool = MagicMock(spec=MCPTool) @@ -557,7 +557,7 @@ async def test_chat_agent_with_local_mcp_tools(chat_client: ChatClientProtocol) # Test agent with MCP tools in constructor with contextlib.suppress(Exception): - agent = ChatAgent(chat_client=chat_client, name="TestAgent", description="Test agent", tools=[mock_mcp_tool]) + agent = Agent(chat_client=chat_client, name="TestAgent", description="Test agent", tools=[mock_mcp_tool]) # Test async context manager with MCP tools async with agent: pass @@ -578,19 +578,17 @@ def echo_thread_info(text: str, **kwargs: Any) -> str: # type: ignore[reportUnk # Make the base client emit a function call for our tool chat_client_base.run_responses = [ ChatResponse( - messages=ChatMessage( + messages=Message( role="assistant", contents=[ Content.from_function_call(call_id="1", name="echo_thread_info", arguments='{"text": "hello"}') ], ) ), - ChatResponse(messages=ChatMessage(role="assistant", text="done")), + ChatResponse(messages=Message(role="assistant", text="done")), ] - agent = ChatAgent( - chat_client=chat_client_base, tools=[echo_thread_info], chat_message_store_factory=ChatMessageStore - ) + agent = Agent(chat_client=chat_client_base, tools=[echo_thread_info], chat_message_store_factory=ChatMessageStore) thread = agent.get_new_thread() result = await agent.run("hello", thread=thread, options={"additional_function_arguments": {"thread": thread}}) @@ -609,7 +607,7 @@ async def test_chat_agent_tool_choice_run_level_overrides_agent_level(chat_clien original_inner = chat_client_base._inner_get_response async def capturing_inner( - *, messages: MutableSequence[ChatMessage], options: dict[str, Any], **kwargs: Any + *, messages: MutableSequence[Message], options: dict[str, Any], **kwargs: Any ) -> ChatResponse: captured_options.append(options) return await original_inner(messages=messages, options=options, **kwargs) @@ -617,7 +615,7 @@ async def capturing_inner( chat_client_base._inner_get_response = capturing_inner # Create agent with agent-level tool_choice="auto" and a tool (tools required for tool_choice to be meaningful) - agent = ChatAgent( + agent = Agent( chat_client=chat_client_base, tools=[tool_tool], options={"tool_choice": "auto"}, @@ -640,7 +638,7 @@ async def test_chat_agent_tool_choice_agent_level_used_when_run_level_not_specif original_inner = chat_client_base._inner_get_response async def capturing_inner( - *, messages: MutableSequence[ChatMessage], options: dict[str, Any], **kwargs: Any + *, messages: MutableSequence[Message], options: dict[str, Any], **kwargs: Any ) -> ChatResponse: captured_options.append(options) return await original_inner(messages=messages, options=options, **kwargs) @@ -648,7 +646,7 @@ async def capturing_inner( chat_client_base._inner_get_response = capturing_inner # Create agent with agent-level tool_choice="required" and a tool - agent = ChatAgent( + agent = Agent( chat_client=chat_client_base, tools=[tool_tool], default_options={"tool_choice": "required"}, @@ -671,7 +669,7 @@ async def test_chat_agent_tool_choice_none_at_run_preserves_agent_level(chat_cli original_inner = chat_client_base._inner_get_response async def capturing_inner( - *, messages: MutableSequence[ChatMessage], options: dict[str, Any], **kwargs: Any + *, messages: MutableSequence[Message], options: dict[str, Any], **kwargs: Any ) -> ChatResponse: captured_options.append(options) return await original_inner(messages=messages, options=options, **kwargs) @@ -679,7 +677,7 @@ async def capturing_inner( chat_client_base._inner_get_response = capturing_inner # Create agent with agent-level tool_choice="auto" and a tool - agent = ChatAgent( + agent = Agent( chat_client=chat_client_base, tools=[tool_tool], default_options={"tool_choice": "auto"}, @@ -808,9 +806,9 @@ def test_sanitize_agent_name_replaces_invalid_chars(): @pytest.mark.asyncio -async def test_agent_get_new_thread(chat_client_base: ChatClientProtocol, tool_tool: ToolProtocol): +async def test_agent_get_new_thread(chat_client_base: SupportsChatGetResponse, tool_tool: ToolProtocol): """Test that get_new_thread returns a new AgentThread.""" - agent = ChatAgent(chat_client=chat_client_base, tools=[tool_tool]) + agent = Agent(chat_client=chat_client_base, tools=[tool_tool]) thread = agent.get_new_thread() @@ -820,7 +818,7 @@ async def test_agent_get_new_thread(chat_client_base: ChatClientProtocol, tool_t @pytest.mark.asyncio async def test_agent_get_new_thread_with_context_provider( - chat_client_base: ChatClientProtocol, tool_tool: ToolProtocol + chat_client_base: SupportsChatGetResponse, tool_tool: ToolProtocol ): """Test that get_new_thread passes context_provider to the thread.""" @@ -829,7 +827,7 @@ async def invoking(self, messages, **kwargs): return Context() provider = TestContextProvider() - agent = ChatAgent(chat_client=chat_client_base, tools=[tool_tool], context_provider=provider) + agent = Agent(chat_client=chat_client_base, tools=[tool_tool], context_provider=provider) thread = agent.get_new_thread() @@ -839,10 +837,10 @@ async def invoking(self, messages, **kwargs): @pytest.mark.asyncio async def test_agent_get_new_thread_with_service_thread_id( - chat_client_base: ChatClientProtocol, tool_tool: ToolProtocol + chat_client_base: SupportsChatGetResponse, tool_tool: ToolProtocol ): """Test that get_new_thread passes kwargs like service_thread_id to the thread.""" - agent = ChatAgent(chat_client=chat_client_base, tools=[tool_tool]) + agent = Agent(chat_client=chat_client_base, tools=[tool_tool]) thread = agent.get_new_thread(service_thread_id="test-thread-123") @@ -851,9 +849,9 @@ async def test_agent_get_new_thread_with_service_thread_id( @pytest.mark.asyncio -async def test_agent_deserialize_thread(chat_client_base: ChatClientProtocol, tool_tool: ToolProtocol): +async def test_agent_deserialize_thread(chat_client_base: SupportsChatGetResponse, tool_tool: ToolProtocol): """Test deserialize_thread restores a thread from serialized state.""" - agent = ChatAgent(chat_client=chat_client_base, tools=[tool_tool]) + agent = Agent(chat_client=chat_client_base, tools=[tool_tool]) # Create serialized thread state with messages serialized_state = { @@ -876,17 +874,17 @@ async def test_agent_deserialize_thread(chat_client_base: ChatClientProtocol, to # endregion -# region Test ChatAgent initialization edge cases +# region Test Agent initialization edge cases @pytest.mark.asyncio async def test_chat_agent_raises_with_both_conversation_id_and_store(): - """Test ChatAgent raises error with both conversation_id and chat_message_store_factory.""" + """Test Agent raises error with both conversation_id and chat_message_store_factory.""" mock_client = MagicMock() mock_store_factory = MagicMock() with pytest.raises(AgentInitializationError, match="Cannot specify both"): - ChatAgent( + Agent( chat_client=mock_client, default_options={"conversation_id": "test_id"}, chat_message_store_factory=mock_store_factory, @@ -894,11 +892,11 @@ async def test_chat_agent_raises_with_both_conversation_id_and_store(): def test_chat_agent_calls_update_agent_name_on_client(): - """Test that ChatAgent calls _update_agent_name_and_description on client if available.""" + """Test that Agent calls _update_agent_name_and_description on client if available.""" mock_client = MagicMock() mock_client._update_agent_name_and_description = MagicMock() - ChatAgent( + Agent( chat_client=mock_client, name="TestAgent", description="Test description", @@ -909,7 +907,7 @@ def test_chat_agent_calls_update_agent_name_on_client(): @pytest.mark.asyncio -async def test_chat_agent_context_provider_adds_tools_when_agent_has_none(chat_client_base: ChatClientProtocol): +async def test_chat_agent_context_provider_adds_tools_when_agent_has_none(chat_client_base: SupportsChatGetResponse): """Test that context provider tools are used when agent has no default tools.""" @tool @@ -922,14 +920,14 @@ async def invoking(self, messages, **kwargs): return Context(tools=[context_tool]) provider = ToolContextProvider() - agent = ChatAgent(chat_client=chat_client_base, context_provider=provider) + agent = Agent(chat_client=chat_client_base, context_provider=provider) # Agent starts with empty tools list assert agent.default_options.get("tools") == [] # Run the agent and verify context tools are added _, options, _ = await agent._prepare_thread_and_messages( # type: ignore[reportPrivateUsage] - thread=None, input_messages=[ChatMessage(role="user", text="Hello")] + thread=None, input_messages=[Message(role="user", text="Hello")] ) # The context tools should now be in the options @@ -938,7 +936,9 @@ async def invoking(self, messages, **kwargs): @pytest.mark.asyncio -async def test_chat_agent_context_provider_adds_instructions_when_agent_has_none(chat_client_base: ChatClientProtocol): +async def test_chat_agent_context_provider_adds_instructions_when_agent_has_none( + chat_client_base: SupportsChatGetResponse, +): """Test that context provider instructions are used when agent has no default instructions.""" class InstructionContextProvider(ContextProvider): @@ -946,14 +946,14 @@ async def invoking(self, messages, **kwargs): return Context(instructions="Context-provided instructions") provider = InstructionContextProvider() - agent = ChatAgent(chat_client=chat_client_base, context_provider=provider) + agent = Agent(chat_client=chat_client_base, context_provider=provider) # Verify agent has no default instructions assert agent.default_options.get("instructions") is None # Run the agent and verify context instructions are available _, options, _ = await agent._prepare_thread_and_messages( # type: ignore[reportPrivateUsage] - thread=None, input_messages=[ChatMessage(role="user", text="Hello")] + thread=None, input_messages=[Message(role="user", text="Hello")] ) # The context instructions should now be in the options @@ -961,9 +961,9 @@ async def invoking(self, messages, **kwargs): @pytest.mark.asyncio -async def test_chat_agent_raises_on_conversation_id_mismatch(chat_client_base: ChatClientProtocol): - """Test that ChatAgent raises when thread and agent have different conversation IDs.""" - agent = ChatAgent( +async def test_chat_agent_raises_on_conversation_id_mismatch(chat_client_base: SupportsChatGetResponse): + """Test that Agent raises when thread and agent have different conversation IDs.""" + agent = Agent( chat_client=chat_client_base, default_options={"conversation_id": "agent-conversation-id"}, ) @@ -973,7 +973,7 @@ async def test_chat_agent_raises_on_conversation_id_mismatch(chat_client_base: C with pytest.raises(AgentExecutionException, match="conversation_id set on the agent is different"): await agent._prepare_thread_and_messages( # type: ignore[reportPrivateUsage] - thread=thread, input_messages=[ChatMessage(role="user", text="Hello")] + thread=thread, input_messages=[Message(role="user", text="Hello")] ) diff --git a/python/packages/core/tests/core/test_as_tool_kwargs_propagation.py b/python/packages/core/tests/core/test_as_tool_kwargs_propagation.py index 4672b10e77..b073badaef 100644 --- a/python/packages/core/tests/core/test_as_tool_kwargs_propagation.py +++ b/python/packages/core/tests/core/test_as_tool_kwargs_propagation.py @@ -5,7 +5,7 @@ from collections.abc import Awaitable, Callable from typing import Any -from agent_framework import ChatAgent, ChatMessage, ChatResponse, Content, agent_middleware +from agent_framework import Agent, ChatResponse, Content, Message, agent_middleware from agent_framework._middleware import AgentContext from .conftest import MockChatClient @@ -28,11 +28,11 @@ async def capture_middleware( # Setup mock response chat_client.responses = [ - ChatResponse(messages=[ChatMessage(role="assistant", text="Response from sub-agent")]), + ChatResponse(messages=[Message(role="assistant", text="Response from sub-agent")]), ] # Create sub-agent with middleware - sub_agent = ChatAgent( + sub_agent = Agent( chat_client=chat_client, name="sub_agent", middleware=[capture_middleware], @@ -70,10 +70,10 @@ async def capture_middleware( # Setup mock response chat_client.responses = [ - ChatResponse(messages=[ChatMessage(role="assistant", text="Response from sub-agent")]), + ChatResponse(messages=[Message(role="assistant", text="Response from sub-agent")]), ] - sub_agent = ChatAgent( + sub_agent = Agent( chat_client=chat_client, name="sub_agent", middleware=[capture_middleware], @@ -110,7 +110,7 @@ async def capture_middleware( chat_client.responses = [ ChatResponse( messages=[ - ChatMessage( + Message( role="assistant", contents=[ Content.from_function_call( @@ -122,19 +122,19 @@ async def capture_middleware( ) ] ), - ChatResponse(messages=[ChatMessage(role="assistant", text="Response from agent_c")]), - ChatResponse(messages=[ChatMessage(role="assistant", text="Response from agent_b")]), + ChatResponse(messages=[Message(role="assistant", text="Response from agent_c")]), + ChatResponse(messages=[Message(role="assistant", text="Response from agent_b")]), ] # Create agent C (bottom level) - agent_c = ChatAgent( + agent_c = Agent( chat_client=chat_client, name="agent_c", middleware=[capture_middleware], ) # Create agent B (middle level) - delegates to C - agent_b = ChatAgent( + agent_b = Agent( chat_client=chat_client, name="agent_b", tools=[agent_c.as_tool(name="call_c")], @@ -175,7 +175,7 @@ async def capture_middleware( [ChatResponseUpdate(contents=[Content.from_text(text="Streaming response")], role="assistant")], ] - sub_agent = ChatAgent( + sub_agent = Agent( chat_client=chat_client, name="sub_agent", middleware=[capture_middleware], @@ -203,10 +203,10 @@ async def test_as_tool_empty_kwargs_still_works(self, chat_client: MockChatClien """Test that as_tool works correctly when no extra kwargs are provided.""" # Setup mock response chat_client.responses = [ - ChatResponse(messages=[ChatMessage(role="assistant", text="Response from agent")]), + ChatResponse(messages=[Message(role="assistant", text="Response from agent")]), ] - sub_agent = ChatAgent( + sub_agent = Agent( chat_client=chat_client, name="sub_agent", ) @@ -232,10 +232,10 @@ async def capture_middleware( # Setup mock response chat_client.responses = [ - ChatResponse(messages=[ChatMessage(role="assistant", text="Response with options")]), + ChatResponse(messages=[Message(role="assistant", text="Response with options")]), ] - sub_agent = ChatAgent( + sub_agent = Agent( chat_client=chat_client, name="sub_agent", middleware=[capture_middleware], @@ -279,11 +279,11 @@ async def capture_middleware( # Setup mock responses for both calls chat_client.responses = [ - ChatResponse(messages=[ChatMessage(role="assistant", text="First response")]), - ChatResponse(messages=[ChatMessage(role="assistant", text="Second response")]), + ChatResponse(messages=[Message(role="assistant", text="First response")]), + ChatResponse(messages=[Message(role="assistant", text="Second response")]), ] - sub_agent = ChatAgent( + sub_agent = Agent( chat_client=chat_client, name="sub_agent", middleware=[capture_middleware], @@ -326,10 +326,10 @@ async def capture_middleware( # Setup mock response chat_client.responses = [ - ChatResponse(messages=[ChatMessage(role="assistant", text="Response from sub-agent")]), + ChatResponse(messages=[Message(role="assistant", text="Response from sub-agent")]), ] - sub_agent = ChatAgent( + sub_agent = Agent( chat_client=chat_client, name="sub_agent", middleware=[capture_middleware], diff --git a/python/packages/core/tests/core/test_clients.py b/python/packages/core/tests/core/test_clients.py index e0c3da64da..8fd8e3cb6b 100644 --- a/python/packages/core/tests/core/test_clients.py +++ b/python/packages/core/tests/core/test_clients.py @@ -5,49 +5,49 @@ from agent_framework import ( BaseChatClient, - ChatClientProtocol, - ChatMessage, ChatResponse, + Message, + SupportsChatGetResponse, ) -def test_chat_client_type(chat_client: ChatClientProtocol): - assert isinstance(chat_client, ChatClientProtocol) +def test_chat_client_type(chat_client: SupportsChatGetResponse): + assert isinstance(chat_client, SupportsChatGetResponse) -async def test_chat_client_get_response(chat_client: ChatClientProtocol): - response = await chat_client.get_response(ChatMessage(role="user", text="Hello")) +async def test_chat_client_get_response(chat_client: SupportsChatGetResponse): + response = await chat_client.get_response(Message(role="user", text="Hello")) assert response.text == "test response" assert response.messages[0].role == "assistant" -async def test_chat_client_get_response_streaming(chat_client: ChatClientProtocol): - async for update in chat_client.get_response(ChatMessage(role="user", text="Hello"), stream=True): +async def test_chat_client_get_response_streaming(chat_client: SupportsChatGetResponse): + async for update in chat_client.get_response(Message(role="user", text="Hello"), stream=True): assert update.text == "test streaming response " or update.text == "another update" assert update.role == "assistant" -def test_base_client(chat_client_base: ChatClientProtocol): +def test_base_client(chat_client_base: SupportsChatGetResponse): assert isinstance(chat_client_base, BaseChatClient) - assert isinstance(chat_client_base, ChatClientProtocol) + assert isinstance(chat_client_base, SupportsChatGetResponse) -async def test_base_client_get_response(chat_client_base: ChatClientProtocol): - response = await chat_client_base.get_response(ChatMessage(role="user", text="Hello")) +async def test_base_client_get_response(chat_client_base: SupportsChatGetResponse): + response = await chat_client_base.get_response(Message(role="user", text="Hello")) assert response.messages[0].role == "assistant" assert response.messages[0].text == "test response - Hello" -async def test_base_client_get_response_streaming(chat_client_base: ChatClientProtocol): - async for update in chat_client_base.get_response(ChatMessage(role="user", text="Hello"), stream=True): +async def test_base_client_get_response_streaming(chat_client_base: SupportsChatGetResponse): + async for update in chat_client_base.get_response(Message(role="user", text="Hello"), stream=True): assert update.text == "update - Hello" or update.text == "another update" -async def test_chat_client_instructions_handling(chat_client_base: ChatClientProtocol): +async def test_chat_client_instructions_handling(chat_client_base: SupportsChatGetResponse): instructions = "You are a helpful assistant." async def fake_inner_get_response(**kwargs): - return ChatResponse(messages=[ChatMessage(role="assistant", text="ok")]) + return ChatResponse(messages=[Message(role="assistant", text="ok")]) with patch.object( chat_client_base, @@ -65,7 +65,7 @@ async def fake_inner_get_response(**kwargs): from agent_framework._types import prepend_instructions_to_messages appended_messages = prepend_instructions_to_messages( - [ChatMessage(role="user", text="hello")], + [Message(role="user", text="hello")], instructions, ) assert len(appended_messages) == 2 diff --git a/python/packages/core/tests/core/test_function_invocation_logic.py b/python/packages/core/tests/core/test_function_invocation_logic.py index 946bb89724..ebb3171b19 100644 --- a/python/packages/core/tests/core/test_function_invocation_logic.py +++ b/python/packages/core/tests/core/test_function_invocation_logic.py @@ -7,18 +7,18 @@ import pytest from agent_framework import ( - ChatAgent, - ChatClientProtocol, - ChatMessage, + Agent, ChatResponse, ChatResponseUpdate, Content, + Message, + SupportsChatGetResponse, tool, ) from agent_framework._middleware import FunctionInvocationContext, FunctionMiddleware, MiddlewareTermination -async def test_base_client_with_function_calling(chat_client_base: ChatClientProtocol): +async def test_base_client_with_function_calling(chat_client_base: SupportsChatGetResponse): exec_counter = 0 @tool(name="test_function", approval_mode="never_require") @@ -29,14 +29,14 @@ def ai_func(arg1: str) -> str: chat_client_base.run_responses = [ ChatResponse( - messages=ChatMessage( + messages=Message( role="assistant", contents=[ Content.from_function_call(call_id="1", name="test_function", arguments='{"arg1": "value1"}') ], ) ), - ChatResponse(messages=ChatMessage(role="assistant", text="done")), + ChatResponse(messages=Message(role="assistant", text="done")), ] response = await chat_client_base.get_response("hello", options={"tool_choice": "auto", "tools": [ai_func]}) assert exec_counter == 1 @@ -55,7 +55,7 @@ def ai_func(arg1: str) -> str: @pytest.mark.parametrize("max_iterations", [3]) -async def test_base_client_with_function_calling_resets(chat_client_base: ChatClientProtocol): +async def test_base_client_with_function_calling_resets(chat_client_base: SupportsChatGetResponse): exec_counter = 0 @tool(name="test_function", approval_mode="never_require") @@ -66,7 +66,7 @@ def ai_func(arg1: str) -> str: chat_client_base.run_responses = [ ChatResponse( - messages=ChatMessage( + messages=Message( role="assistant", contents=[ Content.from_function_call(call_id="1", name="test_function", arguments='{"arg1": "value1"}') @@ -74,14 +74,14 @@ def ai_func(arg1: str) -> str: ) ), ChatResponse( - messages=ChatMessage( + messages=Message( role="assistant", contents=[ Content.from_function_call(call_id="2", name="test_function", arguments='{"arg1": "value1"}') ], ) ), - ChatResponse(messages=ChatMessage(role="assistant", text="done")), + ChatResponse(messages=Message(role="assistant", text="done")), ] response = await chat_client_base.get_response("hello", options={"tool_choice": "auto", "tools": [ai_func]}) assert exec_counter == 2 @@ -97,7 +97,7 @@ def ai_func(arg1: str) -> str: assert response.messages[3].contents[0].type == "function_result" -async def test_base_client_with_streaming_function_calling(chat_client_base: ChatClientProtocol): +async def test_base_client_with_streaming_function_calling(chat_client_base: SupportsChatGetResponse): exec_counter = 0 @tool(name="test_function", approval_mode="never_require") @@ -137,7 +137,7 @@ def ai_func(arg1: str) -> str: assert exec_counter == 1 -async def test_function_invocation_inside_aiohttp_server(chat_client_base: ChatClientProtocol): +async def test_function_invocation_inside_aiohttp_server(chat_client_base: SupportsChatGetResponse): import aiohttp from aiohttp import web @@ -151,7 +151,7 @@ def ai_func(user_query: str) -> str: chat_client_base.run_responses = [ ChatResponse( - messages=ChatMessage( + messages=Message( role="assistant", contents=[ Content.from_function_call( @@ -162,10 +162,10 @@ def ai_func(user_query: str) -> str: ], ) ), - ChatResponse(messages=ChatMessage(role="assistant", text="done")), + ChatResponse(messages=Message(role="assistant", text="done")), ] - agent = ChatAgent(chat_client=chat_client_base, tools=[ai_func]) + agent = Agent(chat_client=chat_client_base, tools=[ai_func]) async def handler(request: web.Request) -> web.Response: thread = agent.get_new_thread() @@ -190,7 +190,7 @@ async def handler(request: web.Request) -> web.Response: assert exec_counter == 1 -async def test_function_invocation_in_threaded_aiohttp_app(chat_client_base: ChatClientProtocol): +async def test_function_invocation_in_threaded_aiohttp_app(chat_client_base: SupportsChatGetResponse): import asyncio import threading from queue import Queue @@ -208,7 +208,7 @@ def ai_func(user_query: str) -> str: chat_client_base.run_responses = [ ChatResponse( - messages=ChatMessage( + messages=Message( role="assistant", contents=[ Content.from_function_call( @@ -219,10 +219,10 @@ def ai_func(user_query: str) -> str: ], ) ), - ChatResponse(messages=ChatMessage(role="assistant", text="done")), + ChatResponse(messages=Message(role="assistant", text="done")), ] - agent = ChatAgent(chat_client=chat_client_base, tools=[ai_func]) + agent = Agent(chat_client=chat_client_base, tools=[ai_func]) ready_event = threading.Event() port_queue: Queue[int] = Queue() @@ -297,7 +297,7 @@ async def runner_main() -> None: ) @pytest.mark.parametrize("streaming", [False, True], ids=["non-streaming", "streaming"]) async def test_function_invocation_scenarios( - chat_client_base: ChatClientProtocol, + chat_client_base: SupportsChatGetResponse, streaming: bool, thread_type: str | None, approval_required: bool | str, @@ -339,11 +339,11 @@ def func_with_approval(arg1: str) -> str: # Single function call content func_call = Content.from_function_call(call_id="1", name=function_name, arguments='{"arg1": "value1"}') - completion = ChatMessage(role="assistant", text="done") + completion = Message(role="assistant", text="done") - chat_client_base.run_responses = [ - ChatResponse(messages=ChatMessage(role="assistant", contents=[func_call])) - ] + ([] if approval_required else [ChatResponse(messages=completion)]) + chat_client_base.run_responses = [ChatResponse(messages=Message(role="assistant", contents=[func_call]))] + ( + [] if approval_required else [ChatResponse(messages=completion)] + ) chat_client_base.streaming_responses = [ [ @@ -371,7 +371,7 @@ def func_with_approval(arg1: str) -> str: Content.from_function_call(call_id="2", name="approval_func", arguments='{"arg1": "value2"}'), ] - chat_client_base.run_responses = [ChatResponse(messages=ChatMessage(role="assistant", contents=func_calls))] + chat_client_base.run_responses = [ChatResponse(messages=Message(role="assistant", contents=func_calls))] chat_client_base.streaming_responses = [ [ @@ -468,7 +468,7 @@ def func_with_approval(arg1: str) -> str: assert exec_counter == 0 # Neither function executed yet -async def test_rejected_approval(chat_client_base: ChatClientProtocol): +async def test_rejected_approval(chat_client_base: SupportsChatGetResponse): """Test that rejecting an approval alongside an approved one is handled correctly.""" exec_counter_approved = 0 @@ -489,7 +489,7 @@ def func_rejected(arg1: str) -> str: # Setup: two function calls that require approval chat_client_base.run_responses = [ ChatResponse( - messages=ChatMessage( + messages=Message( role="assistant", contents=[ Content.from_function_call(call_id="1", name="approved_func", arguments='{"arg1": "value1"}'), @@ -497,7 +497,7 @@ def func_rejected(arg1: str) -> str: ], ) ), - ChatResponse(messages=ChatMessage(role="assistant", text="done")), + ChatResponse(messages=Message(role="assistant", text="done")), ] # Get the response with approval requests @@ -527,7 +527,7 @@ def func_rejected(arg1: str) -> str: ) # Continue conversation with one approved and one rejected - all_messages = response.messages + [ChatMessage(role="user", contents=[approved_response, rejected_response])] + all_messages = response.messages + [Message(role="user", contents=[approved_response, rejected_response])] # Call get_response which will process the approvals await chat_client_base.get_response( @@ -564,7 +564,7 @@ def func_rejected(arg1: str) -> str: assert msg.role == "tool", f"Message with FunctionResultContent must have role='tool', got '{msg.role}'" -async def test_approval_requests_in_assistant_message(chat_client_base: ChatClientProtocol): +async def test_approval_requests_in_assistant_message(chat_client_base: SupportsChatGetResponse): """Approval requests should be added to the assistant message that contains the function call.""" exec_counter = 0 @@ -576,7 +576,7 @@ def func_with_approval(arg1: str) -> str: chat_client_base.run_responses = [ ChatResponse( - messages=ChatMessage( + messages=Message( role="assistant", contents=[ Content.from_function_call(call_id="1", name="test_func", arguments='{"arg1": "value1"}'), @@ -598,7 +598,7 @@ def func_with_approval(arg1: str) -> str: assert exec_counter == 0 -async def test_persisted_approval_messages_replay_correctly(chat_client_base: ChatClientProtocol): +async def test_persisted_approval_messages_replay_correctly(chat_client_base: SupportsChatGetResponse): """Approval flow should work when messages are persisted and sent back (thread scenario).""" exec_counter = 0 @@ -611,14 +611,14 @@ def func_with_approval(arg1: str) -> str: chat_client_base.run_responses = [ ChatResponse( - messages=ChatMessage( + messages=Message( role="assistant", contents=[ Content.from_function_call(call_id="1", name="test_func", arguments='{"arg1": "value1"}'), ], ) ), - ChatResponse(messages=ChatMessage(role="assistant", text="done")), + ChatResponse(messages=Message(role="assistant", text="done")), ] # Get approval request @@ -628,7 +628,7 @@ def func_with_approval(arg1: str) -> str: # Store messages (like a thread would) persisted_messages = [ - ChatMessage(role="user", text="hello"), + Message(role="user", text="hello"), *response1.messages, ] @@ -639,7 +639,7 @@ def func_with_approval(arg1: str) -> str: function_call=approval_req.function_call, approved=True, ) - persisted_messages.append(ChatMessage(role="user", contents=[approval_response])) + persisted_messages.append(Message(role="user", contents=[approval_response])) # Continue with all persisted messages response2 = await chat_client_base.get_response( @@ -651,7 +651,7 @@ def func_with_approval(arg1: str) -> str: assert exec_counter == 1 -async def test_no_duplicate_function_calls_after_approval_processing(chat_client_base: ChatClientProtocol): +async def test_no_duplicate_function_calls_after_approval_processing(chat_client_base: SupportsChatGetResponse): """Processing approval should not create duplicate function calls in messages.""" @tool(name="test_func", approval_mode="always_require") @@ -660,14 +660,14 @@ def func_with_approval(arg1: str) -> str: chat_client_base.run_responses = [ ChatResponse( - messages=ChatMessage( + messages=Message( role="assistant", contents=[ Content.from_function_call(call_id="1", name="test_func", arguments='{"arg1": "value1"}'), ], ) ), - ChatResponse(messages=ChatMessage(role="assistant", text="done")), + ChatResponse(messages=Message(role="assistant", text="done")), ] response1 = await chat_client_base.get_response( @@ -681,7 +681,7 @@ def func_with_approval(arg1: str) -> str: approved=True, ) - all_messages = response1.messages + [ChatMessage(role="user", contents=[approval_response])] + all_messages = response1.messages + [Message(role="user", contents=[approval_response])] await chat_client_base.get_response(all_messages, options={"tool_choice": "auto", "tools": [func_with_approval]}) # Count function calls with the same call_id @@ -695,7 +695,7 @@ def func_with_approval(arg1: str) -> str: assert function_call_count == 1 -async def test_rejection_result_uses_function_call_id(chat_client_base: ChatClientProtocol): +async def test_rejection_result_uses_function_call_id(chat_client_base: SupportsChatGetResponse): """Rejection error result should use the function call's call_id, not the approval's id.""" @tool(name="test_func", approval_mode="always_require") @@ -704,14 +704,14 @@ def func_with_approval(arg1: str) -> str: chat_client_base.run_responses = [ ChatResponse( - messages=ChatMessage( + messages=Message( role="assistant", contents=[ Content.from_function_call(call_id="call_123", name="test_func", arguments='{"arg1": "value1"}'), ], ) ), - ChatResponse(messages=ChatMessage(role="assistant", text="done")), + ChatResponse(messages=Message(role="assistant", text="done")), ] response1 = await chat_client_base.get_response( @@ -725,7 +725,7 @@ def func_with_approval(arg1: str) -> str: approved=False, ) - all_messages = response1.messages + [ChatMessage(role="user", contents=[rejection_response])] + all_messages = response1.messages + [Message(role="user", contents=[rejection_response])] await chat_client_base.get_response(all_messages, options={"tool_choice": "auto", "tools": [func_with_approval]}) # Find the rejection result @@ -741,7 +741,7 @@ def func_with_approval(arg1: str) -> str: @pytest.mark.skip(reason="Failsafe behavior with max_iterations needs investigation in unified API") @pytest.mark.skip(reason="Failsafe behavior with max_iterations needs investigation in unified API") -async def test_max_iterations_limit(chat_client_base: ChatClientProtocol): +async def test_max_iterations_limit(chat_client_base: SupportsChatGetResponse): """Test that MAX_ITERATIONS in additional_properties limits function call loops.""" exec_counter = 0 @@ -754,7 +754,7 @@ def ai_func(arg1: str) -> str: # Set up multiple function call responses to create a loop chat_client_base.run_responses = [ ChatResponse( - messages=ChatMessage( + messages=Message( role="assistant", contents=[ Content.from_function_call(call_id="1", name="test_function", arguments='{"arg1": "value1"}') @@ -762,7 +762,7 @@ def ai_func(arg1: str) -> str: ) ), ChatResponse( - messages=ChatMessage( + messages=Message( role="assistant", contents=[ Content.from_function_call(call_id="2", name="test_function", arguments='{"arg1": "value2"}') @@ -770,7 +770,7 @@ def ai_func(arg1: str) -> str: ) ), # Failsafe response when tool_choice is set to "none" - ChatResponse(messages=ChatMessage(role="assistant", text="giving up on tools")), + ChatResponse(messages=Message(role="assistant", text="giving up on tools")), ] # Set max_iterations to 1 in additional_properties @@ -786,7 +786,7 @@ def ai_func(arg1: str) -> str: assert response.messages[-1].text == "I broke out of the function invocation loop..." # Failsafe response -async def test_function_invocation_config_enabled_false(chat_client_base: ChatClientProtocol): +async def test_function_invocation_config_enabled_false(chat_client_base: SupportsChatGetResponse): """Test that setting enabled=False disables function invocation.""" exec_counter = 0 @@ -797,7 +797,7 @@ def ai_func(arg1: str) -> str: return f"Processed {arg1}" chat_client_base.run_responses = [ - ChatResponse(messages=ChatMessage(role="assistant", text="response without function calling")), + ChatResponse(messages=Message(role="assistant", text="response without function calling")), ] # Disable function invocation @@ -812,7 +812,7 @@ def ai_func(arg1: str) -> str: @pytest.mark.skip(reason="Error handling and failsafe behavior needs investigation in unified API") -async def test_function_invocation_config_max_consecutive_errors(chat_client_base: ChatClientProtocol): +async def test_function_invocation_config_max_consecutive_errors(chat_client_base: SupportsChatGetResponse): """Test that max_consecutive_errors_per_request limits error retries.""" @tool(name="error_function", approval_mode="never_require") @@ -822,7 +822,7 @@ def error_func(arg1: str) -> str: # Set up multiple function call responses that will all error chat_client_base.run_responses = [ ChatResponse( - messages=ChatMessage( + messages=Message( role="assistant", contents=[ Content.from_function_call(call_id="1", name="error_function", arguments='{"arg1": "value1"}') @@ -830,7 +830,7 @@ def error_func(arg1: str) -> str: ) ), ChatResponse( - messages=ChatMessage( + messages=Message( role="assistant", contents=[ Content.from_function_call(call_id="2", name="error_function", arguments='{"arg1": "value2"}') @@ -838,7 +838,7 @@ def error_func(arg1: str) -> str: ) ), ChatResponse( - messages=ChatMessage( + messages=Message( role="assistant", contents=[ Content.from_function_call(call_id="3", name="error_function", arguments='{"arg1": "value3"}') @@ -846,14 +846,14 @@ def error_func(arg1: str) -> str: ) ), ChatResponse( - messages=ChatMessage( + messages=Message( role="assistant", contents=[ Content.from_function_call(call_id="4", name="error_function", arguments='{"arg1": "value4"}') ], ) ), - ChatResponse(messages=ChatMessage(role="assistant", text="final response")), + ChatResponse(messages=Message(role="assistant", text="final response")), ] # Set max_consecutive_errors to 2 @@ -879,7 +879,7 @@ def error_func(arg1: str) -> str: assert len(function_calls) <= 2 -async def test_function_invocation_config_terminate_on_unknown_calls_false(chat_client_base: ChatClientProtocol): +async def test_function_invocation_config_terminate_on_unknown_calls_false(chat_client_base: SupportsChatGetResponse): """Test that terminate_on_unknown_calls=False returns error message for unknown functions.""" exec_counter = 0 @@ -891,14 +891,14 @@ def known_func(arg1: str) -> str: chat_client_base.run_responses = [ ChatResponse( - messages=ChatMessage( + messages=Message( role="assistant", contents=[ Content.from_function_call(call_id="1", name="unknown_function", arguments='{"arg1": "value1"}') ], ) ), - ChatResponse(messages=ChatMessage(role="assistant", text="done")), + ChatResponse(messages=Message(role="assistant", text="done")), ] # Set terminate_on_unknown_calls to False (default) @@ -914,7 +914,7 @@ def known_func(arg1: str) -> str: assert exec_counter == 0 # Known function not executed -async def test_function_invocation_config_terminate_on_unknown_calls_true(chat_client_base: ChatClientProtocol): +async def test_function_invocation_config_terminate_on_unknown_calls_true(chat_client_base: SupportsChatGetResponse): """Test that terminate_on_unknown_calls=True stops execution on unknown functions.""" exec_counter = 0 @@ -926,7 +926,7 @@ def known_func(arg1: str) -> str: chat_client_base.run_responses = [ ChatResponse( - messages=ChatMessage( + messages=Message( role="assistant", contents=[ Content.from_function_call(call_id="1", name="unknown_function", arguments='{"arg1": "value1"}') @@ -945,7 +945,7 @@ def known_func(arg1: str) -> str: assert exec_counter == 0 -async def test_function_invocation_config_additional_tools(chat_client_base: ChatClientProtocol): +async def test_function_invocation_config_additional_tools(chat_client_base: SupportsChatGetResponse): """Test that additional_tools are available but treated as declaration_only.""" exec_counter_visible = 0 exec_counter_hidden = 0 @@ -964,14 +964,14 @@ def hidden_func(arg1: str) -> str: chat_client_base.run_responses = [ ChatResponse( - messages=ChatMessage( + messages=Message( role="assistant", contents=[ Content.from_function_call(call_id="1", name="hidden_function", arguments='{"arg1": "value1"}') ], ) ), - ChatResponse(messages=ChatMessage(role="assistant", text="done")), + ChatResponse(messages=Message(role="assistant", text="done")), ] # Add hidden_func to additional_tools @@ -994,7 +994,7 @@ def hidden_func(arg1: str) -> str: assert len(function_calls) >= 1 -async def test_function_invocation_config_include_detailed_errors_false(chat_client_base: ChatClientProtocol): +async def test_function_invocation_config_include_detailed_errors_false(chat_client_base: SupportsChatGetResponse): """Test that include_detailed_errors=False returns generic error messages.""" @tool(name="error_function", approval_mode="never_require") @@ -1003,14 +1003,14 @@ def error_func(arg1: str) -> str: chat_client_base.run_responses = [ ChatResponse( - messages=ChatMessage( + messages=Message( role="assistant", contents=[ Content.from_function_call(call_id="1", name="error_function", arguments='{"arg1": "value1"}') ], ) ), - ChatResponse(messages=ChatMessage(role="assistant", text="done")), + ChatResponse(messages=Message(role="assistant", text="done")), ] # Set include_detailed_errors to False (default) @@ -1028,7 +1028,7 @@ def error_func(arg1: str) -> str: assert "Error:" in error_result.result # Generic error prefix -async def test_function_invocation_config_include_detailed_errors_true(chat_client_base: ChatClientProtocol): +async def test_function_invocation_config_include_detailed_errors_true(chat_client_base: SupportsChatGetResponse): """Test that include_detailed_errors=True returns detailed error information.""" @tool(name="error_function", approval_mode="never_require") @@ -1037,14 +1037,14 @@ def error_func(arg1: str) -> str: chat_client_base.run_responses = [ ChatResponse( - messages=ChatMessage( + messages=Message( role="assistant", contents=[ Content.from_function_call(call_id="1", name="error_function", arguments='{"arg1": "value1"}') ], ) ), - ChatResponse(messages=ChatMessage(role="assistant", text="done")), + ChatResponse(messages=Message(role="assistant", text="done")), ] # Set include_detailed_errors to True @@ -1098,7 +1098,7 @@ async def test_function_invocation_config_validation_max_consecutive_errors(): normalize_function_invocation_configuration({"max_consecutive_errors_per_request": -1}) -async def test_argument_validation_error_with_detailed_errors(chat_client_base: ChatClientProtocol): +async def test_argument_validation_error_with_detailed_errors(chat_client_base: SupportsChatGetResponse): """Test that argument validation errors include details when include_detailed_errors=True.""" @tool(name="typed_function", approval_mode="never_require") @@ -1107,14 +1107,14 @@ def typed_func(arg1: int) -> str: # Expects int, not str chat_client_base.run_responses = [ ChatResponse( - messages=ChatMessage( + messages=Message( role="assistant", contents=[ Content.from_function_call(call_id="1", name="typed_function", arguments='{"arg1": "not_an_int"}') ], ) ), - ChatResponse(messages=ChatMessage(role="assistant", text="done")), + ChatResponse(messages=Message(role="assistant", text="done")), ] # Set include_detailed_errors to True @@ -1132,7 +1132,7 @@ def typed_func(arg1: int) -> str: # Expects int, not str assert "Exception:" in error_result.result # Detailed error included -async def test_argument_validation_error_without_detailed_errors(chat_client_base: ChatClientProtocol): +async def test_argument_validation_error_without_detailed_errors(chat_client_base: SupportsChatGetResponse): """Test that argument validation errors are generic when include_detailed_errors=False.""" @tool(name="typed_function", approval_mode="never_require") @@ -1141,14 +1141,14 @@ def typed_func(arg1: int) -> str: # Expects int, not str chat_client_base.run_responses = [ ChatResponse( - messages=ChatMessage( + messages=Message( role="assistant", contents=[ Content.from_function_call(call_id="1", name="typed_function", arguments='{"arg1": "not_an_int"}') ], ) ), - ChatResponse(messages=ChatMessage(role="assistant", text="done")), + ChatResponse(messages=Message(role="assistant", text="done")), ] # Set include_detailed_errors to False (default) @@ -1166,7 +1166,7 @@ def typed_func(arg1: int) -> str: # Expects int, not str assert "Exception:" not in error_result.result # No detailed error -async def test_hosted_tool_approval_response(chat_client_base: ChatClientProtocol): +async def test_hosted_tool_approval_response(chat_client_base: SupportsChatGetResponse): """Test handling of approval responses for hosted tools (tools not in tool_map).""" @tool(name="local_function") @@ -1184,12 +1184,12 @@ def local_func(arg1: str) -> str: ) chat_client_base.run_responses = [ - ChatResponse(messages=ChatMessage(role="assistant", text="done")), + ChatResponse(messages=Message(role="assistant", text="done")), ] # Send the approval response response = await chat_client_base.get_response( - [ChatMessage(role="user", contents=[approval_response])], + [Message(role="user", contents=[approval_response])], tool_choice="auto", tools=[local_func], ) @@ -1199,7 +1199,7 @@ def local_func(arg1: str) -> str: assert response is not None -async def test_unapproved_tool_execution_raises_exception(chat_client_base: ChatClientProtocol): +async def test_unapproved_tool_execution_raises_exception(chat_client_base: SupportsChatGetResponse): """Test that attempting to execute an unapproved tool raises ToolException.""" @tool(name="test_function", approval_mode="always_require") @@ -1208,14 +1208,14 @@ def test_func(arg1: str) -> str: chat_client_base.run_responses = [ ChatResponse( - messages=ChatMessage( + messages=Message( role="assistant", contents=[ Content.from_function_call(call_id="1", name="test_function", arguments='{"arg1": "value1"}'), ], ) ), - ChatResponse(messages=ChatMessage(role="assistant", text="done")), + ChatResponse(messages=Message(role="assistant", text="done")), ] # Get approval request @@ -1231,7 +1231,7 @@ def test_func(arg1: str) -> str: ) # Continue conversation with rejection - all_messages = response1.messages + [ChatMessage(role="user", contents=[rejection_response])] + all_messages = response1.messages + [Message(role="user", contents=[rejection_response])] # This should handle the rejection gracefully (not raise ToolException to user) await chat_client_base.get_response(all_messages, options={"tool_choice": "auto", "tools": [test_func]}) @@ -1249,7 +1249,7 @@ def test_func(arg1: str) -> str: assert rejection_result is not None -async def test_approved_function_call_with_error_without_detailed_errors(chat_client_base: ChatClientProtocol): +async def test_approved_function_call_with_error_without_detailed_errors(chat_client_base: SupportsChatGetResponse): """Test that approved functions that raise errors return generic error messages. When include_detailed_errors=False. @@ -1265,12 +1265,12 @@ def error_func(arg1: str) -> str: chat_client_base.run_responses = [ ChatResponse( - messages=ChatMessage( + messages=Message( role="assistant", contents=[Content.from_function_call(call_id="1", name="error_func", arguments='{"arg1": "value1"}')], ) ), - ChatResponse(messages=ChatMessage(role="assistant", text="done")), + ChatResponse(messages=Message(role="assistant", text="done")), ] # Set include_detailed_errors to False (default) @@ -1288,7 +1288,7 @@ def error_func(arg1: str) -> str: approved=True, ) - all_messages = response1.messages + [ChatMessage(role="user", contents=[approval_response])] + all_messages = response1.messages + [Message(role="user", contents=[approval_response])] # Execute the approved function (which will error) await chat_client_base.get_response(all_messages, options={"tool_choice": "auto", "tools": [error_func]}) @@ -1312,7 +1312,7 @@ def error_func(arg1: str) -> str: assert "Specific error from approved function" not in error_result.result # Detail not included -async def test_approved_function_call_with_error_with_detailed_errors(chat_client_base: ChatClientProtocol): +async def test_approved_function_call_with_error_with_detailed_errors(chat_client_base: SupportsChatGetResponse): """Test that approved functions that raise errors return detailed error messages. When include_detailed_errors=True. @@ -1328,12 +1328,12 @@ def error_func(arg1: str) -> str: chat_client_base.run_responses = [ ChatResponse( - messages=ChatMessage( + messages=Message( role="assistant", contents=[Content.from_function_call(call_id="1", name="error_func", arguments='{"arg1": "value1"}')], ) ), - ChatResponse(messages=ChatMessage(role="assistant", text="done")), + ChatResponse(messages=Message(role="assistant", text="done")), ] # Set include_detailed_errors to True @@ -1351,7 +1351,7 @@ def error_func(arg1: str) -> str: approved=True, ) - all_messages = response1.messages + [ChatMessage(role="user", contents=[approval_response])] + all_messages = response1.messages + [Message(role="user", contents=[approval_response])] # Execute the approved function (which will error) await chat_client_base.get_response(all_messages, options={"tool_choice": "auto", "tools": [error_func]}) @@ -1376,7 +1376,7 @@ def error_func(arg1: str) -> str: assert "Specific error from approved function" in error_result.result # Detail included -async def test_approved_function_call_with_validation_error(chat_client_base: ChatClientProtocol): +async def test_approved_function_call_with_validation_error(chat_client_base: SupportsChatGetResponse): """Test that approved functions with validation errors are handled correctly.""" exec_counter = 0 @@ -1389,14 +1389,14 @@ def typed_func(arg1: int) -> str: # Expects int, not str chat_client_base.run_responses = [ ChatResponse( - messages=ChatMessage( + messages=Message( role="assistant", contents=[ Content.from_function_call(call_id="1", name="typed_func", arguments='{"arg1": "not_an_int"}') ], ) ), - ChatResponse(messages=ChatMessage(role="assistant", text="done")), + ChatResponse(messages=Message(role="assistant", text="done")), ] # Set include_detailed_errors to True to see validation details @@ -1414,7 +1414,7 @@ def typed_func(arg1: int) -> str: # Expects int, not str approved=True, ) - all_messages = response1.messages + [ChatMessage(role="user", contents=[approval_response])] + all_messages = response1.messages + [Message(role="user", contents=[approval_response])] # Execute the approved function (which will fail validation) await chat_client_base.get_response(all_messages, options={"tool_choice": "auto", "tools": [typed_func]}) @@ -1437,7 +1437,7 @@ def typed_func(arg1: int) -> str: # Expects int, not str assert "Argument parsing failed" in error_result.result -async def test_approved_function_call_successful_execution(chat_client_base: ChatClientProtocol): +async def test_approved_function_call_successful_execution(chat_client_base: SupportsChatGetResponse): """Test that approved functions execute successfully when no errors occur.""" exec_counter = 0 @@ -1450,12 +1450,12 @@ def success_func(arg1: str) -> str: chat_client_base.run_responses = [ ChatResponse( - messages=ChatMessage( + messages=Message( role="assistant", contents=[Content.from_function_call(call_id="1", name="success_func", arguments='{"arg1": "value1"}')], ) ), - ChatResponse(messages=ChatMessage(role="assistant", text="done")), + ChatResponse(messages=Message(role="assistant", text="done")), ] # Get approval request @@ -1470,7 +1470,7 @@ def success_func(arg1: str) -> str: approved=True, ) - all_messages = response1.messages + [ChatMessage(role="user", contents=[approval_response])] + all_messages = response1.messages + [Message(role="user", contents=[approval_response])] # Execute the approved function await chat_client_base.get_response(all_messages, options={"tool_choice": "auto", "tools": [success_func]}) @@ -1492,7 +1492,7 @@ def success_func(arg1: str) -> str: assert success_result.result == "Success value1" -async def test_declaration_only_tool(chat_client_base: ChatClientProtocol): +async def test_declaration_only_tool(chat_client_base: SupportsChatGetResponse): """Test that declaration_only tools without implementation (func=None) are not executed.""" from agent_framework import FunctionTool @@ -1509,14 +1509,14 @@ async def test_declaration_only_tool(chat_client_base: ChatClientProtocol): chat_client_base.run_responses = [ ChatResponse( - messages=ChatMessage( + messages=Message( role="assistant", contents=[ Content.from_function_call(call_id="1", name="declaration_func", arguments='{"arg1": "value1"}') ], ) ), - ChatResponse(messages=ChatMessage(role="assistant", text="done")), + ChatResponse(messages=Message(role="assistant", text="done")), ] response = await chat_client_base.get_response( @@ -1542,7 +1542,7 @@ async def test_declaration_only_tool(chat_client_base: ChatClientProtocol): assert len(function_results) == 0 -async def test_multiple_function_calls_parallel_execution(chat_client_base: ChatClientProtocol): +async def test_multiple_function_calls_parallel_execution(chat_client_base: SupportsChatGetResponse): """Test that multiple function calls are executed in parallel.""" import asyncio @@ -1564,7 +1564,7 @@ async def func2(arg1: str) -> str: chat_client_base.run_responses = [ ChatResponse( - messages=ChatMessage( + messages=Message( role="assistant", contents=[ Content.from_function_call(call_id="1", name="func1", arguments='{"arg1": "value1"}'), @@ -1572,7 +1572,7 @@ async def func2(arg1: str) -> str: ], ) ), - ChatResponse(messages=ChatMessage(role="assistant", text="done")), + ChatResponse(messages=Message(role="assistant", text="done")), ] response = await chat_client_base.get_response("hello", options={"tool_choice": "auto", "tools": [func1, func2]}) @@ -1588,7 +1588,7 @@ async def func2(arg1: str) -> str: assert len(results) == 2 -async def test_callable_function_converted_to_tool(chat_client_base: ChatClientProtocol): +async def test_callable_function_converted_to_tool(chat_client_base: SupportsChatGetResponse): """Test that plain callable functions are converted to FunctionTool.""" exec_counter = 0 @@ -1601,14 +1601,14 @@ def plain_function(arg1: str) -> str: chat_client_base.run_responses = [ ChatResponse( - messages=ChatMessage( + messages=Message( role="assistant", contents=[ Content.from_function_call(call_id="1", name="plain_function", arguments='{"arg1": "value1"}') ], ) ), - ChatResponse(messages=ChatMessage(role="assistant", text="done")), + ChatResponse(messages=Message(role="assistant", text="done")), ] # Pass plain function (will be auto-converted) @@ -1620,7 +1620,7 @@ def plain_function(arg1: str) -> str: assert result.result == "Plain value1" -async def test_conversation_id_handling(chat_client_base: ChatClientProtocol): +async def test_conversation_id_handling(chat_client_base: SupportsChatGetResponse): """Test that conversation_id is properly handled and messages are cleared.""" @tool(name="test_function", approval_mode="never_require") @@ -1630,7 +1630,7 @@ def test_func(arg1: str) -> str: # Return a response with a conversation_id chat_client_base.run_responses = [ ChatResponse( - messages=ChatMessage( + messages=Message( role="assistant", contents=[ Content.from_function_call(call_id="1", name="test_function", arguments='{"arg1": "value1"}') @@ -1639,7 +1639,7 @@ def test_func(arg1: str) -> str: conversation_id="conv_123", # Simulate service-side thread ), ChatResponse( - messages=ChatMessage(role="assistant", text="done"), + messages=Message(role="assistant", text="done"), conversation_id="conv_123", ), ] @@ -1652,7 +1652,7 @@ def test_func(arg1: str) -> str: assert response.conversation_id == "conv_123" -async def test_function_result_appended_to_existing_assistant_message(chat_client_base: ChatClientProtocol): +async def test_function_result_appended_to_existing_assistant_message(chat_client_base: SupportsChatGetResponse): """Test that function results are appended to existing assistant message when appropriate.""" @tool(name="test_function", approval_mode="never_require") @@ -1661,14 +1661,14 @@ def test_func(arg1: str) -> str: chat_client_base.run_responses = [ ChatResponse( - messages=ChatMessage( + messages=Message( role="assistant", contents=[ Content.from_function_call(call_id="1", name="test_function", arguments='{"arg1": "value1"}') ], ) ), - ChatResponse(messages=ChatMessage(role="assistant", text="done")), + ChatResponse(messages=Message(role="assistant", text="done")), ] response = await chat_client_base.get_response("hello", options={"tool_choice": "auto", "tools": [test_func]}) @@ -1683,7 +1683,7 @@ def test_func(arg1: str) -> str: @pytest.mark.parametrize("max_iterations", [3]) -async def test_error_recovery_resets_counter(chat_client_base: ChatClientProtocol): +async def test_error_recovery_resets_counter(chat_client_base: SupportsChatGetResponse): """Test that error counter resets after a successful function call.""" call_count = 0 @@ -1698,7 +1698,7 @@ def sometimes_fails(arg1: str) -> str: chat_client_base.run_responses = [ ChatResponse( - messages=ChatMessage( + messages=Message( role="assistant", contents=[ Content.from_function_call(call_id="1", name="sometimes_fails", arguments='{"arg1": "value1"}') @@ -1706,14 +1706,14 @@ def sometimes_fails(arg1: str) -> str: ) ), ChatResponse( - messages=ChatMessage( + messages=Message( role="assistant", contents=[ Content.from_function_call(call_id="2", name="sometimes_fails", arguments='{"arg1": "value2"}') ], ) ), - ChatResponse(messages=ChatMessage(role="assistant", text="done")), + ChatResponse(messages=Message(role="assistant", text="done")), ] response = await chat_client_base.get_response("hello", options={"tool_choice": "auto", "tools": [sometimes_fails]}) @@ -1740,7 +1740,7 @@ def sometimes_fails(arg1: str) -> str: # ==================== STREAMING SCENARIO TESTS ==================== -async def test_streaming_approval_request_generated(chat_client_base: ChatClientProtocol): +async def test_streaming_approval_request_generated(chat_client_base: SupportsChatGetResponse): """Test that approval requests are generated correctly in streaming mode.""" exec_counter = 0 @@ -1777,7 +1777,7 @@ def func_with_approval(arg1: str) -> str: @pytest.mark.skip(reason="Failsafe behavior with max_iterations needs investigation in unified API") -async def test_streaming_max_iterations_limit(chat_client_base: ChatClientProtocol): +async def test_streaming_max_iterations_limit(chat_client_base: SupportsChatGetResponse): """Test that MAX_ITERATIONS in streaming mode limits function call loops.""" exec_counter = 0 @@ -1829,7 +1829,7 @@ def ai_func(arg1: str) -> str: assert "I broke out of the function invocation loop..." in last_text -async def test_streaming_function_invocation_config_enabled_false(chat_client_base: ChatClientProtocol): +async def test_streaming_function_invocation_config_enabled_false(chat_client_base: SupportsChatGetResponse): """Test that setting enabled=False disables function invocation in streaming mode.""" exec_counter = 0 @@ -1858,7 +1858,7 @@ def ai_func(arg1: str) -> str: assert len(updates) > 0 -async def test_streaming_function_invocation_config_max_consecutive_errors(chat_client_base: ChatClientProtocol): +async def test_streaming_function_invocation_config_max_consecutive_errors(chat_client_base: SupportsChatGetResponse): """Test that max_consecutive_errors_per_request limits error retries in streaming mode.""" @tool(name="error_function", approval_mode="never_require") @@ -1919,7 +1919,7 @@ def error_func(arg1: str) -> str: async def test_streaming_function_invocation_config_terminate_on_unknown_calls_false( - chat_client_base: ChatClientProtocol, + chat_client_base: SupportsChatGetResponse, ): """Test that terminate_on_unknown_calls=False returns error message for unknown functions in streaming mode.""" exec_counter = 0 @@ -1963,7 +1963,7 @@ def known_func(arg1: str) -> str: @pytest.mark.skip(reason="Failsafe behavior needs investigation in unified API") async def test_streaming_function_invocation_config_terminate_on_unknown_calls_true( - chat_client_base: ChatClientProtocol, + chat_client_base: SupportsChatGetResponse, ): """Test that terminate_on_unknown_calls=True stops execution on unknown functions in streaming mode.""" exec_counter = 0 @@ -1996,7 +1996,9 @@ def known_func(arg1: str) -> str: assert exec_counter == 0 -async def test_streaming_function_invocation_config_include_detailed_errors_true(chat_client_base: ChatClientProtocol): +async def test_streaming_function_invocation_config_include_detailed_errors_true( + chat_client_base: SupportsChatGetResponse, +): """Test that include_detailed_errors=True returns detailed error information in streaming mode.""" @tool(name="error_function", approval_mode="never_require") @@ -2035,7 +2037,7 @@ def error_func(arg1: str) -> str: async def test_streaming_function_invocation_config_include_detailed_errors_false( - chat_client_base: ChatClientProtocol, + chat_client_base: SupportsChatGetResponse, ): """Test that include_detailed_errors=False returns generic error messages in streaming mode.""" @@ -2074,7 +2076,7 @@ def error_func(arg1: str) -> str: assert "Error:" in error_result.result # Generic error prefix -async def test_streaming_argument_validation_error_with_detailed_errors(chat_client_base: ChatClientProtocol): +async def test_streaming_argument_validation_error_with_detailed_errors(chat_client_base: SupportsChatGetResponse): """Test that argument validation errors include details when include_detailed_errors=True in streaming mode.""" @tool(name="typed_function", approval_mode="never_require") @@ -2112,7 +2114,7 @@ def typed_func(arg1: int) -> str: # Expects int, not str assert "Exception:" in error_result.result # Detailed error included -async def test_streaming_argument_validation_error_without_detailed_errors(chat_client_base: ChatClientProtocol): +async def test_streaming_argument_validation_error_without_detailed_errors(chat_client_base: SupportsChatGetResponse): """Test that argument validation errors are generic when include_detailed_errors=False in streaming mode.""" @tool(name="typed_function", approval_mode="never_require") @@ -2150,7 +2152,7 @@ def typed_func(arg1: int) -> str: # Expects int, not str assert "Exception:" not in error_result.result # No detailed error -async def test_streaming_multiple_function_calls_parallel_execution(chat_client_base: ChatClientProtocol): +async def test_streaming_multiple_function_calls_parallel_execution(chat_client_base: SupportsChatGetResponse): """Test that multiple function calls are executed in parallel in streaming mode.""" exec_order = [] @@ -2200,7 +2202,7 @@ async def func2(arg1: str) -> str: assert len(results) == 2 -async def test_streaming_approval_requests_in_assistant_message(chat_client_base: ChatClientProtocol): +async def test_streaming_approval_requests_in_assistant_message(chat_client_base: SupportsChatGetResponse): """Approval requests should be added to assistant updates in streaming mode.""" exec_counter = 0 @@ -2235,7 +2237,7 @@ def func_with_approval(arg1: str) -> str: assert exec_counter == 0 -async def test_streaming_error_recovery_resets_counter(chat_client_base: ChatClientProtocol): +async def test_streaming_error_recovery_resets_counter(chat_client_base: SupportsChatGetResponse): """Test that error counter resets after a successful function call in streaming mode.""" call_count = 0 @@ -2304,7 +2306,7 @@ async def process( raise MiddlewareTermination -async def test_terminate_loop_single_function_call(chat_client_base: ChatClientProtocol): +async def test_terminate_loop_single_function_call(chat_client_base: SupportsChatGetResponse): """Test that terminate_loop=True exits the function calling loop after single function call.""" exec_counter = 0 @@ -2318,14 +2320,14 @@ def ai_func(arg1: str) -> str: # If terminate_loop works, only the first response should be consumed chat_client_base.run_responses = [ ChatResponse( - messages=ChatMessage( + messages=Message( role="assistant", contents=[ Content.from_function_call(call_id="1", name="test_function", arguments='{"arg1": "value1"}') ], ) ), - ChatResponse(messages=ChatMessage(role="assistant", text="done")), + ChatResponse(messages=Message(role="assistant", text="done")), ] response = await chat_client_base.get_response( @@ -2363,7 +2365,7 @@ async def process( await next_handler(context) -async def test_terminate_loop_multiple_function_calls_one_terminates(chat_client_base: ChatClientProtocol): +async def test_terminate_loop_multiple_function_calls_one_terminates(chat_client_base: SupportsChatGetResponse): """Test that any(terminate_loop=True) exits loop even with multiple function calls.""" normal_call_count = 0 terminating_call_count = 0 @@ -2383,7 +2385,7 @@ def terminating_func(arg1: str) -> str: # Queue up two responses: parallel function calls, then final text chat_client_base.run_responses = [ ChatResponse( - messages=ChatMessage( + messages=Message( role="assistant", contents=[ Content.from_function_call(call_id="1", name="normal_function", arguments='{"arg1": "value1"}'), @@ -2393,7 +2395,7 @@ def terminating_func(arg1: str) -> str: ], ) ), - ChatResponse(messages=ChatMessage(role="assistant", text="done")), + ChatResponse(messages=Message(role="assistant", text="done")), ] response = await chat_client_base.get_response( @@ -2420,7 +2422,7 @@ def terminating_func(arg1: str) -> str: assert len(chat_client_base.run_responses) == 1 -async def test_terminate_loop_streaming_single_function_call(chat_client_base: ChatClientProtocol): +async def test_terminate_loop_streaming_single_function_call(chat_client_base: SupportsChatGetResponse): """Test that terminate_loop=True exits the streaming function calling loop.""" exec_counter = 0 @@ -2482,10 +2484,10 @@ async def test_conversation_id_updated_in_options_between_tool_iterations(): from agent_framework import ( BaseChatClient, - ChatMessage, ChatResponse, ChatResponseUpdate, Content, + Message, ResponseStream, tool, ) @@ -2509,7 +2511,7 @@ def __init__(self) -> None: def _inner_get_response( self, *, - messages: MutableSequence[ChatMessage], + messages: MutableSequence[Message], stream: bool, options: dict[str, Any], **kwargs: Any, @@ -2523,7 +2525,7 @@ def _inner_get_response( async def _get() -> ChatResponse: self.call_count += 1 if not self.run_responses: - return ChatResponse(messages=ChatMessage(role="assistant", text="done")) + return ChatResponse(messages=Message(role="assistant", text="done")) return self.run_responses.pop(0) return _get() @@ -2531,7 +2533,7 @@ async def _get() -> ChatResponse: def _get_streaming_response( self, *, - messages: MutableSequence[ChatMessage], + messages: MutableSequence[Message], options: dict[str, Any], **kwargs: Any, ) -> ResponseStream[ChatResponseUpdate, ChatResponse]: @@ -2563,14 +2565,14 @@ def test_func(arg1: str) -> str: # Second response (after tool execution) should receive the updated conversation_id client.run_responses = [ ChatResponse( - messages=ChatMessage( + messages=Message( role="assistant", contents=[Content.from_function_call(call_id="call_1", name="test_func", arguments='{"arg1": "v1"}')], ), conversation_id="conv_after_first_call", ), ChatResponse( - messages=ChatMessage(role="assistant", text="done"), + messages=Message(role="assistant", text="done"), conversation_id="conv_after_second_call", ), ] diff --git a/python/packages/core/tests/core/test_kwargs_propagation_to_ai_function.py b/python/packages/core/tests/core/test_kwargs_propagation_to_ai_function.py index cbbd4b69f7..cecd466d86 100644 --- a/python/packages/core/tests/core/test_kwargs_propagation_to_ai_function.py +++ b/python/packages/core/tests/core/test_kwargs_propagation_to_ai_function.py @@ -7,12 +7,12 @@ from agent_framework import ( BaseChatClient, - ChatMessage, ChatMiddlewareLayer, ChatResponse, ChatResponseUpdate, Content, FunctionInvocationLayer, + Message, ResponseStream, tool, ) @@ -31,7 +31,7 @@ def __init__(self) -> None: def _inner_get_response( self, *, - messages: MutableSequence[ChatMessage], + messages: MutableSequence[Message], stream: bool, options: dict[str, Any], **kwargs: Any, @@ -47,19 +47,19 @@ async def _get() -> ChatResponse: async def _get_non_streaming_response( self, *, - messages: MutableSequence[ChatMessage], + messages: MutableSequence[Message], options: dict[str, Any], **kwargs: Any, ) -> ChatResponse: self.call_count += 1 if self.run_responses: return self.run_responses.pop(0) - return ChatResponse(messages=ChatMessage(role="assistant", text="default response")) + return ChatResponse(messages=Message(role="assistant", text="default response")) def _get_streaming_response( self, *, - messages: MutableSequence[ChatMessage], + messages: MutableSequence[Message], options: dict[str, Any], **kwargs: Any, ) -> ResponseStream[ChatResponseUpdate, ChatResponse]: @@ -110,7 +110,7 @@ def capture_kwargs_tool(x: int, **kwargs: Any) -> str: # First response: function call ChatResponse( messages=[ - ChatMessage( + Message( role="assistant", contents=[ Content.from_function_call( @@ -121,11 +121,11 @@ def capture_kwargs_tool(x: int, **kwargs: Any) -> str: ] ), # Second response: final answer - ChatResponse(messages=[ChatMessage(role="assistant", text="Done!")]), + ChatResponse(messages=[Message(role="assistant", text="Done!")]), ] result = await client.get_response( - messages=[ChatMessage(role="user", text="Test")], + messages=[Message(role="user", text="Test")], stream=False, options={ "tools": [capture_kwargs_tool], @@ -159,7 +159,7 @@ def simple_tool(x: int) -> str: client.run_responses = [ ChatResponse( messages=[ - ChatMessage( + Message( role="assistant", contents=[ Content.from_function_call(call_id="call_1", name="simple_tool", arguments='{"x": 99}') @@ -167,12 +167,12 @@ def simple_tool(x: int) -> str: ) ] ), - ChatResponse(messages=[ChatMessage(role="assistant", text="Completed!")]), + ChatResponse(messages=[Message(role="assistant", text="Completed!")]), ] # Call with additional_function_arguments - the tool should work but not receive them result = await client.get_response( - messages=[ChatMessage(role="user", text="Test")], + messages=[Message(role="user", text="Test")], stream=False, options={ "tools": [simple_tool], @@ -198,7 +198,7 @@ def tracking_tool(name: str, **kwargs: Any) -> str: # Two function calls in one response ChatResponse( messages=[ - ChatMessage( + Message( role="assistant", contents=[ Content.from_function_call( @@ -211,11 +211,11 @@ def tracking_tool(name: str, **kwargs: Any) -> str: ) ] ), - ChatResponse(messages=[ChatMessage(role="assistant", text="All done!")]), + ChatResponse(messages=[Message(role="assistant", text="All done!")]), ] result = await client.get_response( - messages=[ChatMessage(role="user", text="Test")], + messages=[Message(role="user", text="Test")], stream=False, options={ "tools": [tracking_tool], @@ -270,7 +270,7 @@ def streaming_capture_tool(value: str, **kwargs: Any) -> str: # Collect streaming updates updates: list[ChatResponseUpdate] = [] stream = client.get_response( - messages=[ChatMessage(role="user", text="Test")], + messages=[Message(role="user", text="Test")], stream=True, options={ "tools": [streaming_capture_tool], diff --git a/python/packages/core/tests/core/test_mcp.py b/python/packages/core/tests/core/test_mcp.py index 7695affb5a..a0309f8b73 100644 --- a/python/packages/core/tests/core/test_mcp.py +++ b/python/packages/core/tests/core/test_mcp.py @@ -13,11 +13,11 @@ from pydantic import AnyUrl, BaseModel, ValidationError from agent_framework import ( - ChatMessage, Content, MCPStdioTool, MCPStreamableHTTPTool, MCPWebsocketTool, + Message, ToolProtocol, ) from agent_framework._mcp import ( @@ -61,7 +61,7 @@ def test_mcp_prompt_message_to_ai_content(): mcp_message = types.PromptMessage(role="user", content=types.TextContent(type="text", text="Hello, world!")) ai_content = _parse_message_from_mcp(mcp_message) - assert isinstance(ai_content, ChatMessage) + assert isinstance(ai_content, Message) assert ai_content.role == "user" assert len(ai_content.contents) == 1 assert ai_content.contents[0].type == "text" @@ -349,7 +349,7 @@ def test_ai_content_to_mcp_content_types_uri(): def test_prepare_message_for_mcp(): - message = ChatMessage( + message = Message( role="user", contents=[ Content.from_text(text="test"), @@ -1054,7 +1054,7 @@ def get_mcp_client(self) -> _AsyncGeneratorContextManager[Any, None]: result = await prompt.invoke(arg="test_value") assert len(result) == 1 - assert isinstance(result[0], ChatMessage) + assert isinstance(result[0], Message) assert result[0].role == "user" assert len(result[0].contents) == 1 assert result[0].contents[0].text == "Test message" @@ -1413,7 +1413,7 @@ async def test_mcp_tool_sampling_callback_chat_client_exception(): async def test_mcp_tool_sampling_callback_no_valid_content(): """Test sampling callback when response has no valid content types.""" - from agent_framework import ChatMessage + from agent_framework import Message tool = MCPStdioTool(name="test_tool", command="python") @@ -1421,7 +1421,7 @@ async def test_mcp_tool_sampling_callback_no_valid_content(): mock_chat_client = AsyncMock() mock_response = Mock() mock_response.messages = [ - ChatMessage( + Message( role="assistant", contents=[ Content.from_uri( diff --git a/python/packages/core/tests/core/test_memory.py b/python/packages/core/tests/core/test_memory.py index ca28a01e8c..bd83933e54 100644 --- a/python/packages/core/tests/core/test_memory.py +++ b/python/packages/core/tests/core/test_memory.py @@ -4,14 +4,14 @@ from collections.abc import MutableSequence from typing import Any -from agent_framework import ChatMessage +from agent_framework import Message from agent_framework._memory import Context, ContextProvider class MockContextProvider(ContextProvider): """Mock ContextProvider for testing.""" - def __init__(self, messages: list[ChatMessage] | None = None) -> None: + def __init__(self, messages: list[Message] | None = None) -> None: self.context_messages = messages self.thread_created_called = False self.invoked_called = False @@ -36,7 +36,7 @@ async def invoked( self.invoked_called = True self.new_messages = request_messages - async def invoking(self, messages: ChatMessage | MutableSequence[ChatMessage], **kwargs: Any) -> Context: + async def invoking(self, messages: Message | MutableSequence[Message], **kwargs: Any) -> Context: """Track invoking calls and return context.""" self.invoking_called = True self.model_invoking_messages = messages @@ -52,7 +52,7 @@ class MinimalContextProvider(ContextProvider): invoked, __aenter__, and __aexit__. """ - async def invoking(self, messages: ChatMessage | MutableSequence[ChatMessage], **kwargs: Any) -> Context: + async def invoking(self, messages: Message | MutableSequence[Message], **kwargs: Any) -> Context: """Return empty context.""" return Context() @@ -69,7 +69,7 @@ def test_context_default_values(self) -> None: def test_context_with_values(self) -> None: """Test Context can be initialized with values.""" - messages = [ChatMessage(role="user", text="Test message")] + messages = [Message(role="user", text="Test message")] context = Context(instructions="Test instructions", messages=messages) assert context.instructions == "Test instructions" assert len(context.messages) == 1 @@ -89,15 +89,15 @@ async def test_thread_created(self) -> None: async def test_invoked(self) -> None: """Test invoked is called.""" provider = MockContextProvider() - message = ChatMessage(role="user", text="Test message") + message = Message(role="user", text="Test message") await provider.invoked(message) assert provider.invoked_called assert provider.new_messages == message async def test_invoking(self) -> None: """Test invoking is called and returns context.""" - provider = MockContextProvider(messages=[ChatMessage(role="user", text="Context message")]) - message = ChatMessage(role="user", text="Test message") + provider = MockContextProvider(messages=[Message(role="user", text="Context message")]) + message = Message(role="user", text="Test message") context = await provider.invoking(message) assert provider.invoking_called assert provider.model_invoking_messages == message @@ -114,7 +114,7 @@ async def test_base_thread_created_does_nothing(self) -> None: async def test_base_invoked_does_nothing(self) -> None: """Test that base ContextProvider.invoked does nothing by default.""" provider = MinimalContextProvider() - message = ChatMessage(role="user", text="Test") + message = Message(role="user", text="Test") await provider.invoked(message) await provider.invoked(message, response_messages=message) await provider.invoked(message, invoke_exception=Exception("test")) diff --git a/python/packages/core/tests/core/test_middleware.py b/python/packages/core/tests/core/test_middleware.py index ae84541df4..3dd10ac871 100644 --- a/python/packages/core/tests/core/test_middleware.py +++ b/python/packages/core/tests/core/test_middleware.py @@ -10,10 +10,10 @@ from agent_framework import ( AgentResponse, AgentResponseUpdate, - ChatMessage, ChatResponse, ChatResponseUpdate, Content, + Message, ResponseStream, SupportsAgentRun, ) @@ -37,7 +37,7 @@ class TestAgentContext: def test_init_with_defaults(self, mock_agent: SupportsAgentRun) -> None: """Test AgentContext initialization with default values.""" - messages = [ChatMessage(role="user", text="test")] + messages = [Message(role="user", text="test")] context = AgentContext(agent=mock_agent, messages=messages) assert context.agent is mock_agent @@ -47,7 +47,7 @@ def test_init_with_defaults(self, mock_agent: SupportsAgentRun) -> None: def test_init_with_custom_values(self, mock_agent: SupportsAgentRun) -> None: """Test AgentContext initialization with custom values.""" - messages = [ChatMessage(role="user", text="test")] + messages = [Message(role="user", text="test")] metadata = {"key": "value"} context = AgentContext(agent=mock_agent, messages=messages, stream=True, metadata=metadata) @@ -60,7 +60,7 @@ def test_init_with_thread(self, mock_agent: SupportsAgentRun) -> None: """Test AgentContext initialization with thread parameter.""" from agent_framework import AgentThread - messages = [ChatMessage(role="user", text="test")] + messages = [Message(role="user", text="test")] thread = AgentThread() context = AgentContext(agent=mock_agent, messages=messages, thread=thread) @@ -99,7 +99,7 @@ class TestChatContext: def test_init_with_defaults(self, mock_chat_client: Any) -> None: """Test ChatContext initialization with default values.""" - messages = [ChatMessage(role="user", text="test")] + messages = [Message(role="user", text="test")] chat_options: dict[str, Any] = {} context = ChatContext(chat_client=mock_chat_client, messages=messages, options=chat_options) @@ -112,7 +112,7 @@ def test_init_with_defaults(self, mock_chat_client: Any) -> None: def test_init_with_custom_values(self, mock_chat_client: Any) -> None: """Test ChatContext initialization with custom values.""" - messages = [ChatMessage(role="user", text="test")] + messages = [Message(role="user", text="test")] chat_options: dict[str, Any] = {"temperature": 0.5} metadata = {"key": "value"} @@ -166,10 +166,10 @@ async def test_middleware(context: AgentContext, call_next: Callable[[AgentConte async def test_execute_no_middleware(self, mock_agent: SupportsAgentRun) -> None: """Test pipeline execution with no middleware.""" pipeline = AgentMiddlewarePipeline() - messages = [ChatMessage(role="user", text="test")] + messages = [Message(role="user", text="test")] context = AgentContext(agent=mock_agent, messages=messages) - expected_response = AgentResponse(messages=[ChatMessage(role="assistant", text="response")]) + expected_response = AgentResponse(messages=[Message(role="assistant", text="response")]) async def final_handler(ctx: AgentContext) -> AgentResponse: return expected_response @@ -194,10 +194,10 @@ async def process( middleware = OrderTrackingMiddleware("test") pipeline = AgentMiddlewarePipeline(middleware) - messages = [ChatMessage(role="user", text="test")] + messages = [Message(role="user", text="test")] context = AgentContext(agent=mock_agent, messages=messages) - expected_response = AgentResponse(messages=[ChatMessage(role="assistant", text="response")]) + expected_response = AgentResponse(messages=[Message(role="assistant", text="response")]) async def final_handler(ctx: AgentContext) -> AgentResponse: execution_order.append("handler") @@ -210,7 +210,7 @@ async def final_handler(ctx: AgentContext) -> AgentResponse: async def test_execute_stream_no_middleware(self, mock_agent: SupportsAgentRun) -> None: """Test pipeline streaming execution with no middleware.""" pipeline = AgentMiddlewarePipeline() - messages = [ChatMessage(role="user", text="test")] + messages = [Message(role="user", text="test")] context = AgentContext(agent=mock_agent, messages=messages, stream=True) async def final_handler(ctx: AgentContext) -> ResponseStream[AgentResponseUpdate, AgentResponse]: @@ -247,7 +247,7 @@ async def process( middleware = StreamOrderTrackingMiddleware("test") pipeline = AgentMiddlewarePipeline(middleware) - messages = [ChatMessage(role="user", text="test")] + messages = [Message(role="user", text="test")] context = AgentContext(agent=mock_agent, messages=messages, stream=True) async def final_handler(ctx: AgentContext) -> ResponseStream[AgentResponseUpdate, AgentResponse]: @@ -273,14 +273,14 @@ async def test_execute_with_pre_next_termination(self, mock_agent: SupportsAgent """Test pipeline execution with termination before next().""" middleware = self.PreNextTerminateMiddleware() pipeline = AgentMiddlewarePipeline(middleware) - messages = [ChatMessage(role="user", text="test")] + messages = [Message(role="user", text="test")] context = AgentContext(agent=mock_agent, messages=messages) execution_order: list[str] = [] async def final_handler(ctx: AgentContext) -> AgentResponse: # Handler should not be executed when terminated before next() execution_order.append("handler") - return AgentResponse(messages=[ChatMessage(role="assistant", text="response")]) + return AgentResponse(messages=[Message(role="assistant", text="response")]) response = await pipeline.execute(context, final_handler) assert response is None @@ -291,13 +291,13 @@ async def test_execute_with_post_next_termination(self, mock_agent: SupportsAgen """Test pipeline execution with termination after next().""" middleware = self.PostNextTerminateMiddleware() pipeline = AgentMiddlewarePipeline(middleware) - messages = [ChatMessage(role="user", text="test")] + messages = [Message(role="user", text="test")] context = AgentContext(agent=mock_agent, messages=messages) execution_order: list[str] = [] async def final_handler(ctx: AgentContext) -> AgentResponse: execution_order.append("handler") - return AgentResponse(messages=[ChatMessage(role="assistant", text="response")]) + return AgentResponse(messages=[Message(role="assistant", text="response")]) response = await pipeline.execute(context, final_handler) assert response is not None @@ -309,7 +309,7 @@ async def test_execute_stream_with_pre_next_termination(self, mock_agent: Suppor """Test pipeline streaming execution with termination before next().""" middleware = self.PreNextTerminateMiddleware() pipeline = AgentMiddlewarePipeline(middleware) - messages = [ChatMessage(role="user", text="test")] + messages = [Message(role="user", text="test")] context = AgentContext(agent=mock_agent, messages=messages, stream=True) execution_order: list[str] = [] @@ -337,7 +337,7 @@ async def test_execute_stream_with_post_next_termination(self, mock_agent: Suppo """Test pipeline streaming execution with termination after next().""" middleware = self.PostNextTerminateMiddleware() pipeline = AgentMiddlewarePipeline(middleware) - messages = [ChatMessage(role="user", text="test")] + messages = [Message(role="user", text="test")] context = AgentContext(agent=mock_agent, messages=messages, stream=True) execution_order: list[str] = [] @@ -376,11 +376,11 @@ async def process( middleware = ThreadCapturingMiddleware() pipeline = AgentMiddlewarePipeline(middleware) - messages = [ChatMessage(role="user", text="test")] + messages = [Message(role="user", text="test")] thread = AgentThread() context = AgentContext(agent=mock_agent, messages=messages, thread=thread) - expected_response = AgentResponse(messages=[ChatMessage(role="assistant", text="response")]) + expected_response = AgentResponse(messages=[Message(role="assistant", text="response")]) async def final_handler(ctx: AgentContext) -> AgentResponse: return expected_response @@ -403,10 +403,10 @@ async def process( middleware = ThreadCapturingMiddleware() pipeline = AgentMiddlewarePipeline(middleware) - messages = [ChatMessage(role="user", text="test")] + messages = [Message(role="user", text="test")] context = AgentContext(agent=mock_agent, messages=messages, thread=None) - expected_response = AgentResponse(messages=[ChatMessage(role="assistant", text="response")]) + expected_response = AgentResponse(messages=[Message(role="assistant", text="response")]) async def final_handler(ctx: AgentContext) -> AgentResponse: return expected_response @@ -572,11 +572,11 @@ async def test_middleware(context: ChatContext, call_next: Callable[[ChatContext async def test_execute_no_middleware(self, mock_chat_client: Any) -> None: """Test pipeline execution with no middleware.""" pipeline = ChatMiddlewarePipeline() - messages = [ChatMessage(role="user", text="test")] + messages = [Message(role="user", text="test")] chat_options: dict[str, Any] = {} context = ChatContext(chat_client=mock_chat_client, messages=messages, options=chat_options) - expected_response = ChatResponse(messages=[ChatMessage(role="assistant", text="response")]) + expected_response = ChatResponse(messages=[Message(role="assistant", text="response")]) async def final_handler(ctx: ChatContext) -> ChatResponse: return expected_response @@ -599,11 +599,11 @@ async def process(self, context: ChatContext, call_next: Callable[[ChatContext], middleware = OrderTrackingChatMiddleware("test") pipeline = ChatMiddlewarePipeline(middleware) - messages = [ChatMessage(role="user", text="test")] + messages = [Message(role="user", text="test")] chat_options: dict[str, Any] = {} context = ChatContext(chat_client=mock_chat_client, messages=messages, options=chat_options) - expected_response = ChatResponse(messages=[ChatMessage(role="assistant", text="response")]) + expected_response = ChatResponse(messages=[Message(role="assistant", text="response")]) async def final_handler(ctx: ChatContext) -> ChatResponse: execution_order.append("handler") @@ -616,7 +616,7 @@ async def final_handler(ctx: ChatContext) -> ChatResponse: async def test_execute_stream_no_middleware(self, mock_chat_client: Any) -> None: """Test pipeline streaming execution with no middleware.""" pipeline = ChatMiddlewarePipeline() - messages = [ChatMessage(role="user", text="test")] + messages = [Message(role="user", text="test")] chat_options: dict[str, Any] = {} context = ChatContext(chat_client=mock_chat_client, messages=messages, options=chat_options, stream=True) @@ -651,7 +651,7 @@ async def process(self, context: ChatContext, call_next: Callable[[ChatContext], middleware = StreamOrderTrackingChatMiddleware("test") pipeline = ChatMiddlewarePipeline(middleware) - messages = [ChatMessage(role="user", text="test")] + messages = [Message(role="user", text="test")] chat_options: dict[str, Any] = {} context = ChatContext(chat_client=mock_chat_client, messages=messages, options=chat_options, stream=True) @@ -678,7 +678,7 @@ async def test_execute_with_pre_next_termination(self, mock_chat_client: Any) -> """Test pipeline execution with termination before next().""" middleware = self.PreNextTerminateChatMiddleware() pipeline = ChatMiddlewarePipeline(middleware) - messages = [ChatMessage(role="user", text="test")] + messages = [Message(role="user", text="test")] chat_options: dict[str, Any] = {} context = ChatContext(chat_client=mock_chat_client, messages=messages, options=chat_options) execution_order: list[str] = [] @@ -686,7 +686,7 @@ async def test_execute_with_pre_next_termination(self, mock_chat_client: Any) -> async def final_handler(ctx: ChatContext) -> ChatResponse: # Handler should not be executed when terminated before next() execution_order.append("handler") - return ChatResponse(messages=[ChatMessage(role="assistant", text="response")]) + return ChatResponse(messages=[Message(role="assistant", text="response")]) response = await pipeline.execute(context, final_handler) assert response is None @@ -697,14 +697,14 @@ async def test_execute_with_post_next_termination(self, mock_chat_client: Any) - """Test pipeline execution with termination after next().""" middleware = self.PostNextTerminateChatMiddleware() pipeline = ChatMiddlewarePipeline(middleware) - messages = [ChatMessage(role="user", text="test")] + messages = [Message(role="user", text="test")] chat_options: dict[str, Any] = {} context = ChatContext(chat_client=mock_chat_client, messages=messages, options=chat_options) execution_order: list[str] = [] async def final_handler(ctx: ChatContext) -> ChatResponse: execution_order.append("handler") - return ChatResponse(messages=[ChatMessage(role="assistant", text="response")]) + return ChatResponse(messages=[Message(role="assistant", text="response")]) response = await pipeline.execute(context, final_handler) assert response is not None @@ -716,7 +716,7 @@ async def test_execute_stream_with_pre_next_termination(self, mock_chat_client: """Test pipeline streaming execution with termination before next().""" middleware = self.PreNextTerminateChatMiddleware() pipeline = ChatMiddlewarePipeline(middleware) - messages = [ChatMessage(role="user", text="test")] + messages = [Message(role="user", text="test")] chat_options: dict[str, Any] = {} context = ChatContext(chat_client=mock_chat_client, messages=messages, options=chat_options, stream=True) execution_order: list[str] = [] @@ -741,7 +741,7 @@ async def test_execute_stream_with_post_next_termination(self, mock_chat_client: """Test pipeline streaming execution with termination after next().""" middleware = self.PostNextTerminateChatMiddleware() pipeline = ChatMiddlewarePipeline(middleware) - messages = [ChatMessage(role="user", text="test")] + messages = [Message(role="user", text="test")] chat_options: dict[str, Any] = {} context = ChatContext(chat_client=mock_chat_client, messages=messages, options=chat_options, stream=True) execution_order: list[str] = [] @@ -785,12 +785,12 @@ async def process( middleware = MetadataAgentMiddleware() pipeline = AgentMiddlewarePipeline(middleware) - messages = [ChatMessage(role="user", text="test")] + messages = [Message(role="user", text="test")] context = AgentContext(agent=mock_agent, messages=messages) async def final_handler(ctx: AgentContext) -> AgentResponse: metadata_updates.append("handler") - return AgentResponse(messages=[ChatMessage(role="assistant", text="response")]) + return AgentResponse(messages=[Message(role="assistant", text="response")]) result = await pipeline.execute(context, final_handler) @@ -848,12 +848,12 @@ async def test_agent_middleware( execution_order.append("function_after") pipeline = AgentMiddlewarePipeline(test_agent_middleware) - messages = [ChatMessage(role="user", text="test")] + messages = [Message(role="user", text="test")] context = AgentContext(agent=mock_agent, messages=messages) async def final_handler(ctx: AgentContext) -> AgentResponse: execution_order.append("handler") - return AgentResponse(messages=[ChatMessage(role="assistant", text="response")]) + return AgentResponse(messages=[Message(role="assistant", text="response")]) result = await pipeline.execute(context, final_handler) @@ -911,12 +911,12 @@ async def function_middleware( execution_order.append("function_after") pipeline = AgentMiddlewarePipeline(ClassMiddleware(), function_middleware) - messages = [ChatMessage(role="user", text="test")] + messages = [Message(role="user", text="test")] context = AgentContext(agent=mock_agent, messages=messages) async def final_handler(ctx: AgentContext) -> AgentResponse: execution_order.append("handler") - return AgentResponse(messages=[ChatMessage(role="assistant", text="response")]) + return AgentResponse(messages=[Message(role="assistant", text="response")]) result = await pipeline.execute(context, final_handler) @@ -975,13 +975,13 @@ async def function_chat_middleware( execution_order.append("function_after") pipeline = ChatMiddlewarePipeline(ClassChatMiddleware(), function_chat_middleware) - messages = [ChatMessage(role="user", text="test")] + messages = [Message(role="user", text="test")] chat_options: dict[str, Any] = {} context = ChatContext(chat_client=mock_chat_client, messages=messages, options=chat_options) async def final_handler(ctx: ChatContext) -> ChatResponse: execution_order.append("handler") - return ChatResponse(messages=[ChatMessage(role="assistant", text="response")]) + return ChatResponse(messages=[Message(role="assistant", text="response")]) result = await pipeline.execute(context, final_handler) @@ -1022,12 +1022,12 @@ async def process( middleware = [FirstMiddleware(), SecondMiddleware(), ThirdMiddleware()] pipeline = AgentMiddlewarePipeline(*middleware) - messages = [ChatMessage(role="user", text="test")] + messages = [Message(role="user", text="test")] context = AgentContext(agent=mock_agent, messages=messages) async def final_handler(ctx: AgentContext) -> AgentResponse: execution_order.append("handler") - return AgentResponse(messages=[ChatMessage(role="assistant", text="response")]) + return AgentResponse(messages=[Message(role="assistant", text="response")]) result = await pipeline.execute(context, final_handler) @@ -1106,13 +1106,13 @@ async def process(self, context: ChatContext, call_next: Callable[[ChatContext], middleware = [FirstChatMiddleware(), SecondChatMiddleware(), ThirdChatMiddleware()] pipeline = ChatMiddlewarePipeline(*middleware) - messages = [ChatMessage(role="user", text="test")] + messages = [Message(role="user", text="test")] chat_options: dict[str, Any] = {} context = ChatContext(chat_client=mock_chat_client, messages=messages, options=chat_options) async def final_handler(ctx: ChatContext) -> ChatResponse: execution_order.append("handler") - return ChatResponse(messages=[ChatMessage(role="assistant", text="response")]) + return ChatResponse(messages=[Message(role="assistant", text="response")]) result = await pipeline.execute(context, final_handler) @@ -1160,13 +1160,13 @@ async def process( middleware = ContextValidationMiddleware() pipeline = AgentMiddlewarePipeline(middleware) - messages = [ChatMessage(role="user", text="test")] + messages = [Message(role="user", text="test")] context = AgentContext(agent=mock_agent, messages=messages) async def final_handler(ctx: AgentContext) -> AgentResponse: # Verify metadata was set by middleware assert ctx.metadata.get("validated") is True - return AgentResponse(messages=[ChatMessage(role="assistant", text="response")]) + return AgentResponse(messages=[Message(role="assistant", text="response")]) result = await pipeline.execute(context, final_handler) assert result is not None @@ -1239,14 +1239,14 @@ async def process(self, context: ChatContext, call_next: Callable[[ChatContext], middleware = ChatContextValidationMiddleware() pipeline = ChatMiddlewarePipeline(middleware) - messages = [ChatMessage(role="user", text="test")] + messages = [Message(role="user", text="test")] chat_options: dict[str, Any] = {"temperature": 0.5} context = ChatContext(chat_client=mock_chat_client, messages=messages, options=chat_options) async def final_handler(ctx: ChatContext) -> ChatResponse: # Verify metadata was set by middleware assert ctx.metadata.get("validated") is True - return ChatResponse(messages=[ChatMessage(role="assistant", text="response")]) + return ChatResponse(messages=[Message(role="assistant", text="response")]) result = await pipeline.execute(context, final_handler) assert result is not None @@ -1268,14 +1268,14 @@ async def process( middleware = StreamingFlagMiddleware() pipeline = AgentMiddlewarePipeline(middleware) - messages = [ChatMessage(role="user", text="test")] + messages = [Message(role="user", text="test")] # Test non-streaming context = AgentContext(agent=mock_agent, messages=messages) async def final_handler(ctx: AgentContext) -> AgentResponse: streaming_flags.append(ctx.stream) - return AgentResponse(messages=[ChatMessage(role="assistant", text="response")]) + return AgentResponse(messages=[Message(role="assistant", text="response")]) await pipeline.execute(context, final_handler) @@ -1311,7 +1311,7 @@ async def process( middleware = StreamProcessingMiddleware() pipeline = AgentMiddlewarePipeline(middleware) - messages = [ChatMessage(role="user", text="test")] + messages = [Message(role="user", text="test")] context = AgentContext(agent=mock_agent, messages=messages, stream=True) async def final_stream_handler(ctx: AgentContext) -> ResponseStream[AgentResponseUpdate, AgentResponse]: @@ -1351,7 +1351,7 @@ async def process(self, context: ChatContext, call_next: Callable[[ChatContext], middleware = ChatStreamingFlagMiddleware() pipeline = ChatMiddlewarePipeline(middleware) - messages = [ChatMessage(role="user", text="test")] + messages = [Message(role="user", text="test")] chat_options: dict[str, Any] = {} # Test non-streaming @@ -1359,7 +1359,7 @@ async def process(self, context: ChatContext, call_next: Callable[[ChatContext], async def final_handler(ctx: ChatContext) -> ChatResponse: streaming_flags.append(ctx.stream) - return ChatResponse(messages=[ChatMessage(role="assistant", text="response")]) + return ChatResponse(messages=[Message(role="assistant", text="response")]) await pipeline.execute(context, final_handler) @@ -1393,7 +1393,7 @@ async def process(self, context: ChatContext, call_next: Callable[[ChatContext], middleware = ChatStreamProcessingMiddleware() pipeline = ChatMiddlewarePipeline(middleware) - messages = [ChatMessage(role="user", text="test")] + messages = [Message(role="user", text="test")] chat_options: dict[str, Any] = {} context = ChatContext(chat_client=mock_chat_client, messages=messages, options=chat_options, stream=True) @@ -1477,7 +1477,7 @@ async def process( middleware = NoNextMiddleware() pipeline = AgentMiddlewarePipeline(middleware) - messages = [ChatMessage(role="user", text="test")] + messages = [Message(role="user", text="test")] context = AgentContext(agent=mock_agent, messages=messages) handler_called = False @@ -1485,7 +1485,7 @@ async def process( async def final_handler(ctx: AgentContext) -> AgentResponse: nonlocal handler_called handler_called = True - return AgentResponse(messages=[ChatMessage(role="assistant", text="should not execute")]) + return AgentResponse(messages=[Message(role="assistant", text="should not execute")]) result = await pipeline.execute(context, final_handler) @@ -1506,7 +1506,7 @@ async def process( middleware = NoNextStreamingMiddleware() pipeline = AgentMiddlewarePipeline(middleware) - messages = [ChatMessage(role="user", text="test")] + messages = [Message(role="user", text="test")] context = AgentContext(agent=mock_agent, messages=messages, stream=True) handler_called = False @@ -1580,7 +1580,7 @@ async def process( await call_next(context) pipeline = AgentMiddlewarePipeline(FirstMiddleware(), SecondMiddleware()) - messages = [ChatMessage(role="user", text="test")] + messages = [Message(role="user", text="test")] context = AgentContext(agent=mock_agent, messages=messages) handler_called = False @@ -1588,7 +1588,7 @@ async def process( async def final_handler(ctx: AgentContext) -> AgentResponse: nonlocal handler_called handler_called = True - return AgentResponse(messages=[ChatMessage(role="assistant", text="should not execute")]) + return AgentResponse(messages=[Message(role="assistant", text="should not execute")]) result = await pipeline.execute(context, final_handler) @@ -1607,7 +1607,7 @@ async def process(self, context: ChatContext, call_next: Callable[[ChatContext], middleware = NoNextChatMiddleware() pipeline = ChatMiddlewarePipeline(middleware) - messages = [ChatMessage(role="user", text="test")] + messages = [Message(role="user", text="test")] chat_options: dict[str, Any] = {} context = ChatContext(chat_client=mock_chat_client, messages=messages, options=chat_options) @@ -1616,7 +1616,7 @@ async def process(self, context: ChatContext, call_next: Callable[[ChatContext], async def final_handler(ctx: ChatContext) -> ChatResponse: nonlocal handler_called handler_called = True - return ChatResponse(messages=[ChatMessage(role="assistant", text="should not execute")]) + return ChatResponse(messages=[Message(role="assistant", text="should not execute")]) result = await pipeline.execute(context, final_handler) @@ -1635,7 +1635,7 @@ async def process(self, context: ChatContext, call_next: Callable[[ChatContext], middleware = NoNextStreamingChatMiddleware() pipeline = ChatMiddlewarePipeline(middleware) - messages = [ChatMessage(role="user", text="test")] + messages = [Message(role="user", text="test")] chat_options: dict[str, Any] = {} context = ChatContext(chat_client=mock_chat_client, messages=messages, options=chat_options, stream=True) @@ -1680,7 +1680,7 @@ async def process(self, context: ChatContext, call_next: Callable[[ChatContext], await call_next(context) pipeline = ChatMiddlewarePipeline(FirstChatMiddleware(), SecondChatMiddleware()) - messages = [ChatMessage(role="user", text="test")] + messages = [Message(role="user", text="test")] chat_options: dict[str, Any] = {} context = ChatContext(chat_client=mock_chat_client, messages=messages, options=chat_options) @@ -1689,7 +1689,7 @@ async def process(self, context: ChatContext, call_next: Callable[[ChatContext], async def final_handler(ctx: ChatContext) -> ChatResponse: nonlocal handler_called handler_called = True - return ChatResponse(messages=[ChatMessage(role="assistant", text="should not execute")]) + return ChatResponse(messages=[Message(role="assistant", text="should not execute")]) result = await pipeline.execute(context, final_handler) @@ -1718,8 +1718,8 @@ def mock_function() -> FunctionTool[Any, Any]: @pytest.fixture def mock_chat_client() -> Any: """Mock chat client for testing.""" - from agent_framework import ChatClientProtocol + from agent_framework import SupportsChatGetResponse - client = MagicMock(spec=ChatClientProtocol) + client = MagicMock(spec=SupportsChatGetResponse) client.service_url = MagicMock(return_value="mock://test") return client diff --git a/python/packages/core/tests/core/test_middleware_context_result.py b/python/packages/core/tests/core/test_middleware_context_result.py index abdea790df..74f35f7e2c 100644 --- a/python/packages/core/tests/core/test_middleware_context_result.py +++ b/python/packages/core/tests/core/test_middleware_context_result.py @@ -8,11 +8,11 @@ from pydantic import BaseModel, Field from agent_framework import ( + Agent, AgentResponse, AgentResponseUpdate, - ChatAgent, - ChatMessage, Content, + Message, ResponseStream, SupportsAgentRun, ) @@ -40,7 +40,7 @@ class TestResultOverrideMiddleware: async def test_agent_middleware_response_override_non_streaming(self, mock_agent: SupportsAgentRun) -> None: """Test that agent middleware can override response for non-streaming execution.""" - override_response = AgentResponse(messages=[ChatMessage(role="assistant", text="overridden response")]) + override_response = AgentResponse(messages=[Message(role="assistant", text="overridden response")]) class ResponseOverrideMiddleware(AgentMiddleware): async def process( @@ -52,7 +52,7 @@ async def process( middleware = ResponseOverrideMiddleware() pipeline = AgentMiddlewarePipeline(middleware) - messages = [ChatMessage(role="user", text="test")] + messages = [Message(role="user", text="test")] context = AgentContext(agent=mock_agent, messages=messages) handler_called = False @@ -60,7 +60,7 @@ async def process( async def final_handler(ctx: AgentContext) -> AgentResponse: nonlocal handler_called handler_called = True - return AgentResponse(messages=[ChatMessage(role="assistant", text="original response")]) + return AgentResponse(messages=[Message(role="assistant", text="original response")]) result = await pipeline.execute(context, final_handler) @@ -88,7 +88,7 @@ async def process( middleware = StreamResponseOverrideMiddleware() pipeline = AgentMiddlewarePipeline(middleware) - messages = [ChatMessage(role="user", text="test")] + messages = [Message(role="user", text="test")] context = AgentContext(agent=mock_agent, messages=messages, stream=True) async def final_handler(ctx: AgentContext) -> ResponseStream[AgentResponseUpdate, AgentResponse]: @@ -141,7 +141,7 @@ async def final_handler(ctx: FunctionInvocationContext) -> str: assert handler_called async def test_chat_agent_middleware_response_override(self) -> None: - """Test result override functionality with ChatAgent integration.""" + """Test result override functionality with Agent integration.""" mock_chat_client = MockChatClient() class ChatAgentResponseOverrideMiddleware(AgentMiddleware): @@ -153,29 +153,29 @@ async def process( # Then conditionally override based on content if any("special" in msg.text for msg in context.messages if msg.text): context.result = AgentResponse( - messages=[ChatMessage(role="assistant", text="Special response from middleware!")] + messages=[Message(role="assistant", text="Special response from middleware!")] ) - # Create ChatAgent with override middleware + # Create Agent with override middleware middleware = ChatAgentResponseOverrideMiddleware() - agent = ChatAgent(chat_client=mock_chat_client, middleware=[middleware]) + agent = Agent(chat_client=mock_chat_client, middleware=[middleware]) # Test override case - override_messages = [ChatMessage(role="user", text="Give me a special response")] + override_messages = [Message(role="user", text="Give me a special response")] override_response = await agent.run(override_messages) assert override_response.messages[0].text == "Special response from middleware!" # Verify chat client was called since middleware called next() assert mock_chat_client.call_count == 1 # Test normal case - normal_messages = [ChatMessage(role="user", text="Normal request")] + normal_messages = [Message(role="user", text="Normal request")] normal_response = await agent.run(normal_messages) assert normal_response.messages[0].text == "test response" # Verify chat client was called for normal case assert mock_chat_client.call_count == 2 async def test_chat_agent_middleware_streaming_override(self) -> None: - """Test streaming result override functionality with ChatAgent integration.""" + """Test streaming result override functionality with Agent integration.""" mock_chat_client = MockChatClient() async def custom_stream() -> AsyncIterable[AgentResponseUpdate]: @@ -194,12 +194,12 @@ async def process( # Normal case - let the agent handle it await call_next(context) - # Create ChatAgent with override middleware + # Create Agent with override middleware middleware = ChatAgentStreamOverrideMiddleware() - agent = ChatAgent(chat_client=mock_chat_client, middleware=[middleware]) + agent = Agent(chat_client=mock_chat_client, middleware=[middleware]) # Test streaming override case - override_messages = [ChatMessage(role="user", text="Give me a custom stream")] + override_messages = [Message(role="user", text="Give me a custom stream")] override_updates: list[AgentResponseUpdate] = [] async for update in agent.run(override_messages, stream=True): override_updates.append(update) @@ -210,7 +210,7 @@ async def process( assert override_updates[2].text == " response!" # Test normal streaming case - normal_messages = [ChatMessage(role="user", text="Normal streaming request")] + normal_messages = [Message(role="user", text="Normal streaming request")] normal_updates: list[AgentResponseUpdate] = [] async for update in agent.run(normal_messages, stream=True): normal_updates.append(update) @@ -239,10 +239,10 @@ async def process( async def final_handler(ctx: AgentContext) -> AgentResponse: nonlocal handler_called handler_called = True - return AgentResponse(messages=[ChatMessage(role="assistant", text="executed response")]) + return AgentResponse(messages=[Message(role="assistant", text="executed response")]) # Test case where next() is NOT called - no_execute_messages = [ChatMessage(role="user", text="Don't run this")] + no_execute_messages = [Message(role="user", text="Don't run this")] no_execute_context = AgentContext(agent=mock_agent, messages=no_execute_messages, stream=False) no_execute_result = await pipeline.execute(no_execute_context, final_handler) @@ -254,7 +254,7 @@ async def final_handler(ctx: AgentContext) -> AgentResponse: handler_called = False # Test case where next() IS called - execute_messages = [ChatMessage(role="user", text="Please execute this")] + execute_messages = [Message(role="user", text="Please execute this")] execute_context = AgentContext(agent=mock_agent, messages=execute_messages, stream=False) execute_result = await pipeline.execute(execute_context, final_handler) @@ -334,11 +334,11 @@ async def process( middleware = ObservabilityMiddleware() pipeline = AgentMiddlewarePipeline(middleware) - messages = [ChatMessage(role="user", text="test")] + messages = [Message(role="user", text="test")] context = AgentContext(agent=mock_agent, messages=messages, stream=False) async def final_handler(ctx: AgentContext) -> AgentResponse: - return AgentResponse(messages=[ChatMessage(role="assistant", text="executed response")]) + return AgentResponse(messages=[Message(role="assistant", text="executed response")]) result = await pipeline.execute(context, final_handler) @@ -399,16 +399,16 @@ async def process( if "modify" in context.result.messages[0].text: # Override after observing context.result = AgentResponse( - messages=[ChatMessage(role="assistant", text="modified after execution")] + messages=[Message(role="assistant", text="modified after execution")] ) middleware = PostExecutionOverrideMiddleware() pipeline = AgentMiddlewarePipeline(middleware) - messages = [ChatMessage(role="user", text="test")] + messages = [Message(role="user", text="test")] context = AgentContext(agent=mock_agent, messages=messages, stream=False) async def final_handler(ctx: AgentContext) -> AgentResponse: - return AgentResponse(messages=[ChatMessage(role="assistant", text="response to modify")]) + return AgentResponse(messages=[Message(role="assistant", text="response to modify")]) result = await pipeline.execute(context, final_handler) diff --git a/python/packages/core/tests/core/test_middleware_with_agent.py b/python/packages/core/tests/core/test_middleware_with_agent.py index 10cc8b3011..f296e6a7b1 100644 --- a/python/packages/core/tests/core/test_middleware_with_agent.py +++ b/python/packages/core/tests/core/test_middleware_with_agent.py @@ -6,13 +6,11 @@ import pytest from agent_framework import ( + Agent, AgentContext, AgentMiddleware, AgentResponseUpdate, - ChatAgent, - ChatClientProtocol, ChatContext, - ChatMessage, ChatMiddleware, ChatResponse, ChatResponseUpdate, @@ -20,9 +18,11 @@ FunctionInvocationContext, FunctionMiddleware, FunctionTool, + Message, MiddlewareException, MiddlewareTermination, MiddlewareType, + SupportsChatGetResponse, agent_middleware, chat_middleware, function_middleware, @@ -30,14 +30,14 @@ from .conftest import MockBaseChatClient, MockChatClient -# region ChatAgent Tests +# region Agent Tests class TestChatAgentClassBasedMiddleware: - """Test cases for class-based middleware integration with ChatAgent.""" + """Test cases for class-based middleware integration with Agent.""" - async def test_class_based_agent_middleware_with_chat_agent(self, chat_client: ChatClientProtocol) -> None: - """Test class-based agent middleware with ChatAgent.""" + async def test_class_based_agent_middleware_with_chat_agent(self, chat_client: SupportsChatGetResponse) -> None: + """Test class-based agent middleware with Agent.""" execution_order: list[str] = [] class TrackingAgentMiddleware(AgentMiddleware): @@ -51,12 +51,12 @@ async def process( await call_next(context) execution_order.append(f"{self.name}_after") - # Create ChatAgent with middleware + # Create Agent with middleware middleware = TrackingAgentMiddleware("agent_middleware") - agent = ChatAgent(chat_client=chat_client, middleware=[middleware]) + agent = Agent(chat_client=chat_client, middleware=[middleware]) # Execute the agent - messages = [ChatMessage(role="user", text="test message")] + messages = [Message(role="user", text="test message")] response = await agent.run(messages) # Verify response @@ -70,7 +70,7 @@ async def process( assert execution_order == ["agent_middleware_before", "agent_middleware_after"] async def test_class_based_function_middleware_with_chat_agent(self, chat_client: "MockChatClient") -> None: - """Test class-based function middleware with ChatAgent.""" + """Test class-based function middleware with Agent.""" class TrackingFunctionMiddleware(FunctionMiddleware): async def process( @@ -81,12 +81,12 @@ async def process( await call_next(context) middleware = TrackingFunctionMiddleware() - ChatAgent(chat_client=chat_client, middleware=[middleware]) + Agent(chat_client=chat_client, middleware=[middleware]) async def test_class_based_function_middleware_with_chat_agent_supported_client( self, chat_client_base: "MockBaseChatClient" ) -> None: - """Test class-based function middleware with ChatAgent using a full chat client.""" + """Test class-based function middleware with Agent using a full chat client.""" execution_order: list[str] = [] class TrackingFunctionMiddleware(FunctionMiddleware): @@ -103,9 +103,9 @@ async def process( execution_order.append(f"{self.name}_after") middleware = TrackingFunctionMiddleware("function_middleware") - agent = ChatAgent(chat_client=chat_client_base, middleware=[middleware]) + agent = Agent(chat_client=chat_client_base, middleware=[middleware]) - messages = [ChatMessage(role="user", text="test message")] + messages = [Message(role="user", text="test message")] response = await agent.run(messages) assert response is not None @@ -115,7 +115,7 @@ async def process( class TestChatAgentFunctionBasedMiddleware: - """Test cases for function-based middleware integration with ChatAgent.""" + """Test cases for function-based middleware integration with Agent.""" async def test_agent_middleware_with_pre_termination(self, chat_client: "MockChatClient") -> None: """Test that agent middleware can terminate execution before calling next().""" @@ -131,14 +131,14 @@ async def process( await call_next(context) execution_order.append("middleware_after") - # Create ChatAgent with terminating middleware + # Create Agent with terminating middleware middleware = PreTerminationMiddleware() - agent = ChatAgent(chat_client=chat_client, middleware=[middleware]) + agent = Agent(chat_client=chat_client, middleware=[middleware]) # Execute the agent with multiple messages messages = [ - ChatMessage(role="user", text="message1"), - ChatMessage(role="user", text="message2"), # This should not be processed due to termination + Message(role="user", text="message1"), + Message(role="user", text="message2"), # This should not be processed due to termination ] response = await agent.run(messages) @@ -161,14 +161,14 @@ async def process( execution_order.append("middleware_after") context.terminate = True - # Create ChatAgent with terminating middleware + # Create Agent with terminating middleware middleware = PostTerminationMiddleware() - agent = ChatAgent(chat_client=chat_client, middleware=[middleware]) + agent = Agent(chat_client=chat_client, middleware=[middleware]) # Execute the agent with multiple messages messages = [ - ChatMessage(role="user", text="message1"), - ChatMessage(role="user", text="message2"), + Message(role="user", text="message1"), + Message(role="user", text="message2"), ] response = await agent.run(messages) @@ -201,7 +201,7 @@ async def process( await call_next(context) execution_order.append("middleware_after") - ChatAgent(chat_client=chat_client, middleware=[PreTerminationFunctionMiddleware()], tools=[]) + Agent(chat_client=chat_client, middleware=[PreTerminationFunctionMiddleware()], tools=[]) async def test_function_middleware_with_post_termination(self, chat_client: "MockChatClient") -> None: """Test that function middleware can terminate execution after calling next().""" @@ -218,10 +218,10 @@ async def process( execution_order.append("middleware_after") context.terminate = True - ChatAgent(chat_client=chat_client, middleware=[PostTerminationFunctionMiddleware()], tools=[]) + Agent(chat_client=chat_client, middleware=[PostTerminationFunctionMiddleware()], tools=[]) async def test_function_based_agent_middleware_with_chat_agent(self, chat_client: "MockChatClient") -> None: - """Test function-based agent middleware with ChatAgent.""" + """Test function-based agent middleware with Agent.""" execution_order: list[str] = [] async def tracking_agent_middleware( @@ -231,11 +231,11 @@ async def tracking_agent_middleware( await call_next(context) execution_order.append("agent_function_after") - # Create ChatAgent with function middleware - agent = ChatAgent(chat_client=chat_client, middleware=[tracking_agent_middleware]) + # Create Agent with function middleware + agent = Agent(chat_client=chat_client, middleware=[tracking_agent_middleware]) # Execute the agent - messages = [ChatMessage(role="user", text="test message")] + messages = [Message(role="user", text="test message")] response = await agent.run(messages) # Verify response @@ -249,19 +249,19 @@ async def tracking_agent_middleware( assert execution_order == ["agent_function_before", "agent_function_after"] async def test_function_based_function_middleware_with_chat_agent(self, chat_client: "MockChatClient") -> None: - """Test function-based function middleware with ChatAgent.""" + """Test function-based function middleware with Agent.""" async def tracking_function_middleware( context: FunctionInvocationContext, call_next: Callable[[FunctionInvocationContext], Awaitable[None]] ) -> None: await call_next(context) - ChatAgent(chat_client=chat_client, middleware=[tracking_function_middleware]) + Agent(chat_client=chat_client, middleware=[tracking_function_middleware]) async def test_function_based_function_middleware_with_supported_client( self, chat_client_base: "MockBaseChatClient" ) -> None: - """Test function-based function middleware with ChatAgent using a full chat client.""" + """Test function-based function middleware with Agent using a full chat client.""" execution_order: list[str] = [] async def tracking_function_middleware( @@ -271,8 +271,8 @@ async def tracking_function_middleware( await call_next(context) execution_order.append("function_function_after") - agent = ChatAgent(chat_client=chat_client_base, middleware=[tracking_function_middleware]) - messages = [ChatMessage(role="user", text="test message")] + agent = Agent(chat_client=chat_client_base, middleware=[tracking_function_middleware]) + messages = [Message(role="user", text="test message")] response = await agent.run(messages) assert response is not None @@ -282,10 +282,10 @@ async def tracking_function_middleware( class TestChatAgentStreamingMiddleware: - """Test cases for streaming middleware integration with ChatAgent.""" + """Test cases for streaming middleware integration with Agent.""" async def test_agent_middleware_with_streaming(self, chat_client: "MockChatClient") -> None: - """Test agent middleware with streaming ChatAgent responses.""" + """Test agent middleware with streaming Agent responses.""" execution_order: list[str] = [] streaming_flags: list[bool] = [] @@ -298,9 +298,9 @@ async def process( await call_next(context) execution_order.append("middleware_after") - # Create ChatAgent with middleware + # Create Agent with middleware middleware = StreamingTrackingMiddleware() - agent = ChatAgent(chat_client=chat_client, middleware=[middleware]) + agent = Agent(chat_client=chat_client, middleware=[middleware]) # Set up mock streaming responses chat_client.streaming_responses = [ @@ -311,7 +311,7 @@ async def process( ] # Execute streaming - messages = [ChatMessage(role="user", text="test message")] + messages = [Message(role="user", text="test message")] updates: list[AgentResponseUpdate] = [] async for update in agent.run(messages, stream=True): updates.append(update) @@ -340,10 +340,10 @@ async def process( streaming_flags.append(context.stream) await call_next(context) - # Create ChatAgent with middleware + # Create Agent with middleware middleware = FlagTrackingMiddleware() - agent = ChatAgent(chat_client=chat_client, middleware=[middleware]) - messages = [ChatMessage(role="user", text="test message")] + agent = Agent(chat_client=chat_client, middleware=[middleware]) + messages = [Message(role="user", text="test message")] # Test non-streaming execution response = await agent.run(messages) @@ -358,10 +358,10 @@ async def process( class TestChatAgentMultipleMiddlewareOrdering: - """Test cases for multiple middleware execution order with ChatAgent.""" + """Test cases for multiple middleware execution order with Agent.""" async def test_multiple_agent_middleware_execution_order(self, chat_client: "MockChatClient") -> None: - """Test that multiple agent middleware execute in correct order with ChatAgent.""" + """Test that multiple agent middleware execute in correct order with Agent.""" execution_order: list[str] = [] class OrderedMiddleware(AgentMiddleware): @@ -380,11 +380,11 @@ async def process( middleware2 = OrderedMiddleware("second") middleware3 = OrderedMiddleware("third") - # Create ChatAgent with multiple middleware - agent = ChatAgent(chat_client=chat_client, middleware=[middleware1, middleware2, middleware3]) + # Create Agent with multiple middleware + agent = Agent(chat_client=chat_client, middleware=[middleware1, middleware2, middleware3]) # Execute the agent - messages = [ChatMessage(role="user", text="test message")] + messages = [Message(role="user", text="test message")] response = await agent.run(messages) # Verify response @@ -396,7 +396,7 @@ async def process( assert execution_order == expected_order async def test_mixed_middleware_types_with_chat_agent(self, chat_client_base: "MockBaseChatClient") -> None: - """Test mixed class and function-based middleware with ChatAgent.""" + """Test mixed class and function-based middleware with Agent.""" execution_order: list[str] = [] class ClassAgentMiddleware(AgentMiddleware): @@ -431,7 +431,7 @@ async def function_function_middleware( await call_next(context) execution_order.append("function_function_after") - agent = ChatAgent( + agent = Agent( chat_client=chat_client_base, middleware=[ ClassAgentMiddleware(), @@ -440,7 +440,7 @@ async def function_function_middleware( function_function_middleware, ], ) - await agent.run([ChatMessage(role="user", text="test")]) + await agent.run([Message(role="user", text="test")]) async def test_mixed_middleware_types_with_supported_client(self, chat_client_base: "MockBaseChatClient") -> None: """Test mixed class and function-based middleware with a full chat client.""" @@ -468,7 +468,7 @@ async def function_function_middleware( await call_next(context) execution_order.append("function_function_after") - agent = ChatAgent( + agent = Agent( chat_client=chat_client_base, middleware=[ ClassAgentMiddleware(), @@ -477,7 +477,7 @@ async def function_function_middleware( ], ) - messages = [ChatMessage(role="user", text="test message")] + messages = [Message(role="user", text="test message")] response = await agent.run(messages) assert response is not None @@ -502,16 +502,16 @@ def _sample_tool_function_impl(location: str) -> str: ) -# region ChatAgent Function MiddlewareTypes Tests with Tools +# region Agent Function MiddlewareTypes Tests with Tools class TestChatAgentFunctionMiddlewareWithTools: - """Test cases for function middleware integration with ChatAgent when tools are used.""" + """Test cases for function middleware integration with Agent when tools are used.""" async def test_class_based_function_middleware_with_tool_calls( self, chat_client_base: "MockBaseChatClient" ) -> None: - """Test class-based function middleware with ChatAgent when function calls are made.""" + """Test class-based function middleware with Agent when function calls are made.""" execution_order: list[str] = [] class TrackingFunctionMiddleware(FunctionMiddleware): @@ -530,7 +530,7 @@ async def process( # Set up mock to return a function call first, then a regular response function_call_response = ChatResponse( messages=[ - ChatMessage( + Message( role="assistant", contents=[ Content.from_function_call( @@ -542,20 +542,20 @@ async def process( ) ] ) - final_response = ChatResponse(messages=[ChatMessage(role="assistant", text="Final response")]) + final_response = ChatResponse(messages=[Message(role="assistant", text="Final response")]) chat_client_base.run_responses = [function_call_response, final_response] - # Create ChatAgent with function middleware and tools + # Create Agent with function middleware and tools middleware = TrackingFunctionMiddleware("function_middleware") - agent = ChatAgent( + agent = Agent( chat_client=chat_client_base, middleware=[middleware], tools=[sample_tool_function], ) # Execute the agent - messages = [ChatMessage(role="user", text="Get weather for Seattle")] + messages = [Message(role="user", text="Get weather for Seattle")] response = await agent.run(messages) # Verify response @@ -579,7 +579,7 @@ async def process( async def test_function_based_function_middleware_with_tool_calls( self, chat_client_base: "MockBaseChatClient" ) -> None: - """Test function-based function middleware with ChatAgent when function calls are made.""" + """Test function-based function middleware with Agent when function calls are made.""" execution_order: list[str] = [] async def tracking_function_middleware( @@ -592,7 +592,7 @@ async def tracking_function_middleware( # Set up mock to return a function call first, then a regular response function_call_response = ChatResponse( messages=[ - ChatMessage( + Message( role="assistant", contents=[ Content.from_function_call( @@ -604,19 +604,19 @@ async def tracking_function_middleware( ) ] ) - final_response = ChatResponse(messages=[ChatMessage(role="assistant", text="Final response")]) + final_response = ChatResponse(messages=[Message(role="assistant", text="Final response")]) chat_client_base.run_responses = [function_call_response, final_response] - # Create ChatAgent with function middleware and tools - agent = ChatAgent( + # Create Agent with function middleware and tools + agent = Agent( chat_client=chat_client_base, middleware=[tracking_function_middleware], tools=[sample_tool_function], ) # Execute the agent - messages = [ChatMessage(role="user", text="Get weather for San Francisco")] + messages = [Message(role="user", text="Get weather for San Francisco")] response = await agent.run(messages) # Verify response @@ -640,7 +640,7 @@ async def tracking_function_middleware( async def test_mixed_agent_and_function_middleware_with_tool_calls( self, chat_client_base: "MockBaseChatClient" ) -> None: - """Test both agent and function middleware with ChatAgent when function calls are made.""" + """Test both agent and function middleware with Agent when function calls are made.""" execution_order: list[str] = [] class TrackingAgentMiddleware(AgentMiddleware): @@ -666,7 +666,7 @@ async def process( # Set up mock to return a function call first, then a regular response function_call_response = ChatResponse( messages=[ - ChatMessage( + Message( role="assistant", contents=[ Content.from_function_call( @@ -678,19 +678,19 @@ async def process( ) ] ) - final_response = ChatResponse(messages=[ChatMessage(role="assistant", text="Final response")]) + final_response = ChatResponse(messages=[Message(role="assistant", text="Final response")]) chat_client_base.run_responses = [function_call_response, final_response] - # Create ChatAgent with both agent and function middleware and tools - agent = ChatAgent( + # Create Agent with both agent and function middleware and tools + agent = Agent( chat_client=chat_client_base, middleware=[TrackingAgentMiddleware(), TrackingFunctionMiddleware()], tools=[sample_tool_function], ) # Execute the agent - messages = [ChatMessage(role="user", text="Get weather for New York")] + messages = [Message(role="user", text="Get weather for New York")] response = await agent.run(messages) # Verify response @@ -753,7 +753,7 @@ async def kwargs_middleware( chat_client_base.run_responses = [ ChatResponse( messages=[ - ChatMessage( + Message( role="assistant", contents=[ Content.from_function_call( @@ -763,14 +763,14 @@ async def kwargs_middleware( ) ] ), - ChatResponse(messages=[ChatMessage(role="assistant", contents=[Content.from_text("Function completed")])]), + ChatResponse(messages=[Message(role="assistant", contents=[Content.from_text("Function completed")])]), ] - # Create ChatAgent with function middleware - agent = ChatAgent(chat_client=chat_client_base, middleware=[kwargs_middleware], tools=[sample_tool_function]) + # Create Agent with function middleware + agent = Agent(chat_client=chat_client_base, middleware=[kwargs_middleware], tools=[sample_tool_function]) # Execute the agent with custom parameters passed as kwargs - messages = [ChatMessage(role="user", text="test message")] + messages = [Message(role="user", text="test message")] response = await agent.run(messages, options={"additional_function_arguments": {"custom_param": "test_value"}}) # Verify response @@ -792,7 +792,7 @@ async def kwargs_middleware( class TestMiddlewareDynamicRebuild: - """Test cases for dynamic middleware pipeline rebuilding with ChatAgent.""" + """Test cases for dynamic middleware pipeline rebuilding with Agent.""" class TrackingAgentMiddleware(AgentMiddleware): """Test middleware that tracks execution.""" @@ -812,7 +812,7 @@ async def test_middleware_dynamic_rebuild_non_streaming(self, chat_client: "Mock # Create agent with initial middleware middleware1 = self.TrackingAgentMiddleware("middleware1", execution_log) - agent = ChatAgent(chat_client=chat_client, middleware=[middleware1]) + agent = Agent(chat_client=chat_client, middleware=[middleware1]) # First execution - should use middleware1 await agent.run("Test message 1") @@ -862,7 +862,7 @@ async def test_middleware_dynamic_rebuild_streaming(self, chat_client: "MockChat # Create agent with initial middleware middleware1 = self.TrackingAgentMiddleware("stream_middleware1", execution_log) - agent = ChatAgent(chat_client=chat_client, middleware=[middleware1]) + agent = Agent(chat_client=chat_client, middleware=[middleware1]) # First streaming execution updates: list[AgentResponseUpdate] = [] @@ -897,7 +897,7 @@ async def test_middleware_order_change_detection(self, chat_client: "MockChatCli middleware2 = self.TrackingAgentMiddleware("second", execution_log) # Create agent with middleware in order [first, second] - agent = ChatAgent(chat_client=chat_client, middleware=[middleware1, middleware2]) + agent = Agent(chat_client=chat_client, middleware=[middleware1, middleware2]) # First execution await agent.run("Test message 1") @@ -934,7 +934,7 @@ async def test_run_level_middleware_isolation(self, chat_client: "MockChatClient execution_log: list[str] = [] # Create agent without any agent-level middleware - agent = ChatAgent(chat_client=chat_client) + agent = Agent(chat_client=chat_client) # Create run-level middleware run_middleware1 = self.TrackingAgentMiddleware("run1", execution_log) @@ -1003,7 +1003,7 @@ async def process( # Create agent with agent-level middleware agent_middleware = MetadataAgentMiddleware("agent") - agent = ChatAgent(chat_client=chat_client, middleware=[agent_middleware]) + agent = Agent(chat_client=chat_client, middleware=[agent_middleware]) # Create run-level middleware run_middleware = MetadataRunMiddleware("run") @@ -1023,7 +1023,7 @@ async def test_run_level_middleware_non_streaming(self, chat_client: "MockChatCl execution_log: list[str] = [] # Create agent without agent-level middleware - agent = ChatAgent(chat_client=chat_client) + agent = Agent(chat_client=chat_client) # Create run-level middleware run_middleware = self.TrackingAgentMiddleware("run_nonstream", execution_log) @@ -1058,7 +1058,7 @@ async def process( execution_log.append(f"{self.name}_end") # Create agent without agent-level middleware - agent = ChatAgent(chat_client=chat_client) + agent = Agent(chat_client=chat_client) # Set up mock streaming responses chat_client.streaming_responses = [ @@ -1149,7 +1149,7 @@ def custom_tool(message: str) -> str: # Set up mock to return a function call first, then a regular response function_call_response = ChatResponse( messages=[ - ChatMessage( + Message( role="assistant", contents=[ Content.from_function_call( @@ -1161,11 +1161,11 @@ def custom_tool(message: str) -> str: ) ] ) - final_response = ChatResponse(messages=[ChatMessage(role="assistant", text="Final response")]) + final_response = ChatResponse(messages=[Message(role="assistant", text="Final response")]) chat_client_base.run_responses = [function_call_response, final_response] # Create agent with agent-level middleware - agent = ChatAgent( + agent = Agent( chat_client=chat_client_base, middleware=[AgentLevelAgentMiddleware(), AgentLevelFunctionMiddleware()], tools=[custom_tool_wrapped], @@ -1242,7 +1242,7 @@ def custom_tool(message: str) -> str: # Set up mock to return a function call first, then a regular response function_call_response = ChatResponse( messages=[ - ChatMessage( + Message( role="assistant", contents=[ Content.from_function_call( @@ -1254,17 +1254,17 @@ def custom_tool(message: str) -> str: ) ] ) - final_response = ChatResponse(messages=[ChatMessage(role="assistant", text="Final response")]) + final_response = ChatResponse(messages=[Message(role="assistant", text="Final response")]) chat_client_base.responses = [function_call_response, final_response] # Should work without errors - agent = ChatAgent( + agent = Agent( chat_client=chat_client_base, middleware=[matching_agent_middleware, matching_function_middleware], tools=[custom_tool_wrapped], ) - response = await agent.run([ChatMessage(role="user", text="test")]) + response = await agent.run([Message(role="user", text="test")]) assert response is not None assert "decorator_type_match_agent" in execution_order @@ -1284,8 +1284,8 @@ async def mismatched_middleware( ) -> None: await call_next(context) - agent = ChatAgent(chat_client=chat_client, middleware=[mismatched_middleware]) - await agent.run([ChatMessage(role="user", text="test")]) + agent = Agent(chat_client=chat_client, middleware=[mismatched_middleware]) + await agent.run([Message(role="user", text="test")]) async def test_only_decorator_specified(self, chat_client_base: "MockBaseChatClient") -> None: """Only decorator specified - rely on decorator.""" @@ -1313,7 +1313,7 @@ def custom_tool(message: str) -> str: # Set up mock to return a function call first, then a regular response function_call_response = ChatResponse( messages=[ - ChatMessage( + Message( role="assistant", contents=[ Content.from_function_call( @@ -1325,17 +1325,17 @@ def custom_tool(message: str) -> str: ) ] ) - final_response = ChatResponse(messages=[ChatMessage(role="assistant", text="Final response")]) + final_response = ChatResponse(messages=[Message(role="assistant", text="Final response")]) chat_client_base.responses = [function_call_response, final_response] # Should work - relies on decorator - agent = ChatAgent( + agent = Agent( chat_client=chat_client_base, middleware=[decorator_only_agent, decorator_only_function], tools=[custom_tool_wrapped], ) - response = await agent.run([ChatMessage(role="user", text="test")]) + response = await agent.run([Message(role="user", text="test")]) assert response is not None assert "decorator_only_agent" in execution_order @@ -1369,7 +1369,7 @@ def custom_tool(message: str) -> str: # Set up mock to return a function call first, then a regular response function_call_response = ChatResponse( messages=[ - ChatMessage( + Message( role="assistant", contents=[ Content.from_function_call( @@ -1381,15 +1381,15 @@ def custom_tool(message: str) -> str: ) ] ) - final_response = ChatResponse(messages=[ChatMessage(role="assistant", text="Final response")]) + final_response = ChatResponse(messages=[Message(role="assistant", text="Final response")]) chat_client_base.responses = [function_call_response, final_response] # Should work - relies on type annotations - agent = ChatAgent( + agent = Agent( chat_client=chat_client_base, middleware=[type_only_agent, type_only_function], tools=[custom_tool_wrapped] ) - response = await agent.run([ChatMessage(role="user", text="test")]) + response = await agent.run([Message(role="user", text="test")]) assert response is not None assert "type_only_agent" in execution_order @@ -1403,12 +1403,12 @@ async def no_info_middleware(context: Any, call_next: Any) -> None: # No decora # Should raise MiddlewareException with pytest.raises(MiddlewareException, match="Cannot determine middleware type"): - agent = ChatAgent(chat_client=chat_client, middleware=[no_info_middleware]) - await agent.run([ChatMessage(role="user", text="test")]) + agent = Agent(chat_client=chat_client, middleware=[no_info_middleware]) + await agent.run([Message(role="user", text="test")]) async def test_insufficient_parameters_error(self, chat_client: Any) -> None: """Test that middleware with insufficient parameters raises an error.""" - from agent_framework import ChatAgent, agent_middleware + from agent_framework import Agent, agent_middleware # Should raise MiddlewareException about insufficient parameters with pytest.raises(MiddlewareException, match="must have at least 2 parameters"): @@ -1417,8 +1417,8 @@ async def test_insufficient_parameters_error(self, chat_client: Any) -> None: async def insufficient_params_middleware(context: Any) -> None: # Missing 'next' parameter pass - agent = ChatAgent(chat_client=chat_client, middleware=[insufficient_params_middleware]) - await agent.run([ChatMessage(role="user", text="test")]) + agent = Agent(chat_client=chat_client, middleware=[insufficient_params_middleware]) + await agent.run([Message(role="user", text="test")]) async def test_decorator_markers_preserved(self) -> None: """Test that decorator markers are properly set on functions.""" @@ -1483,15 +1483,15 @@ async def process( # Import the ChatMessageStore to configure the agent with a message store factory from agent_framework import ChatMessageStore - # Create ChatAgent with thread tracking middleware and a message store factory + # Create Agent with thread tracking middleware and a message store factory middleware = ThreadTrackingMiddleware() - agent = ChatAgent(chat_client=chat_client, middleware=[middleware], chat_message_store_factory=ChatMessageStore) + agent = Agent(chat_client=chat_client, middleware=[middleware], chat_message_store_factory=ChatMessageStore) # Create a thread that will persist messages between runs thread = agent.get_new_thread() # First run - first_messages = [ChatMessage(role="user", text="first message")] + first_messages = [Message(role="user", text="first message")] first_response = await agent.run(first_messages, thread=thread) # Verify first response @@ -1499,7 +1499,7 @@ async def process( assert len(first_response.messages) > 0 # Second run - use the same thread - second_messages = [ChatMessage(role="user", text="second message")] + second_messages = [Message(role="user", text="second message")] second_response = await agent.run(second_messages, thread=thread) # Verify second response @@ -1553,10 +1553,10 @@ async def process( class TestChatAgentChatMiddleware: - """Test cases for chat middleware integration with ChatAgent.""" + """Test cases for chat middleware integration with Agent.""" async def test_class_based_chat_middleware_with_chat_agent(self) -> None: - """Test class-based chat middleware with ChatAgent.""" + """Test class-based chat middleware with Agent.""" execution_order: list[str] = [] class TrackingChatMiddleware(ChatMiddleware): @@ -1565,13 +1565,13 @@ async def process(self, context: ChatContext, call_next: Callable[[ChatContext], await call_next(context) execution_order.append("chat_middleware_after") - # Create ChatAgent with chat middleware + # Create Agent with chat middleware chat_client = MockBaseChatClient() middleware = TrackingChatMiddleware() - agent = ChatAgent(chat_client=chat_client, middleware=[middleware]) + agent = Agent(chat_client=chat_client, middleware=[middleware]) # Execute the agent - messages = [ChatMessage(role="user", text="test message")] + messages = [Message(role="user", text="test message")] response = await agent.run(messages) # Verify response @@ -1585,7 +1585,7 @@ async def process(self, context: ChatContext, call_next: Callable[[ChatContext], ] async def test_function_based_chat_middleware_with_chat_agent(self) -> None: - """Test function-based chat middleware with ChatAgent.""" + """Test function-based chat middleware with Agent.""" execution_order: list[str] = [] async def tracking_chat_middleware( @@ -1595,12 +1595,12 @@ async def tracking_chat_middleware( await call_next(context) execution_order.append("chat_middleware_after") - # Create ChatAgent with function-based chat middleware + # Create Agent with function-based chat middleware chat_client = MockBaseChatClient() - agent = ChatAgent(chat_client=chat_client, middleware=[tracking_chat_middleware]) + agent = Agent(chat_client=chat_client, middleware=[tracking_chat_middleware]) # Execute the agent - messages = [ChatMessage(role="user", text="test message")] + messages = [Message(role="user", text="test message")] response = await agent.run(messages) # Verify response @@ -1626,16 +1626,16 @@ async def message_modifier_middleware( if msg.role == "system": continue original_text = msg.text or "" - context.messages[idx] = ChatMessage(role=msg.role, text=f"MODIFIED: {original_text}") + context.messages[idx] = Message(role=msg.role, text=f"MODIFIED: {original_text}") break await call_next(context) - # Create ChatAgent with message-modifying middleware + # Create Agent with message-modifying middleware chat_client = MockBaseChatClient() - agent = ChatAgent(chat_client=chat_client, middleware=[message_modifier_middleware]) + agent = Agent(chat_client=chat_client, middleware=[message_modifier_middleware]) # Execute the agent - messages = [ChatMessage(role="user", text="test message")] + messages = [Message(role="user", text="test message")] response = await agent.run(messages) # Verify that the message was modified (MockBaseChatClient echoes back the input) @@ -1651,17 +1651,17 @@ async def response_override_middleware( ) -> None: # Override the response without calling next() context.result = ChatResponse( - messages=[ChatMessage(role="assistant", text="MiddlewareTypes overridden response")], + messages=[Message(role="assistant", text="MiddlewareTypes overridden response")], response_id="middleware-response-123", ) context.terminate = True - # Create ChatAgent with response-overriding middleware + # Create Agent with response-overriding middleware chat_client = MockBaseChatClient() - agent = ChatAgent(chat_client=chat_client, middleware=[response_override_middleware]) + agent = Agent(chat_client=chat_client, middleware=[response_override_middleware]) # Execute the agent - messages = [ChatMessage(role="user", text="test message")] + messages = [Message(role="user", text="test message")] response = await agent.run(messages) # Verify that the response was overridden @@ -1686,12 +1686,12 @@ async def second_middleware(context: ChatContext, call_next: Callable[[ChatConte await call_next(context) execution_order.append("second_after") - # Create ChatAgent with multiple chat middleware + # Create Agent with multiple chat middleware chat_client = MockBaseChatClient() - agent = ChatAgent(chat_client=chat_client, middleware=[first_middleware, second_middleware]) + agent = Agent(chat_client=chat_client, middleware=[first_middleware, second_middleware]) # Execute the agent - messages = [ChatMessage(role="user", text="test message")] + messages = [Message(role="user", text="test message")] response = await agent.run(messages) # Verify response @@ -1715,9 +1715,9 @@ async def process(self, context: ChatContext, call_next: Callable[[ChatContext], await call_next(context) execution_order.append("streaming_chat_after") - # Create ChatAgent with chat middleware + # Create Agent with chat middleware chat_client = MockBaseChatClient() - agent = ChatAgent(chat_client=chat_client, middleware=[StreamingTrackingChatMiddleware()]) + agent = Agent(chat_client=chat_client, middleware=[StreamingTrackingChatMiddleware()]) # Set up mock streaming responses # TODO: refactor to return a ResponseStream object @@ -1729,7 +1729,7 @@ async def process(self, context: ChatContext, call_next: Callable[[ChatContext], ] # Execute streaming - messages = [ChatMessage(role="user", text="test message")] + messages = [Message(role="user", text="test message")] updates: list[AgentResponseUpdate] = [] async for update in agent.run(messages, stream=True): updates.append(update) @@ -1752,18 +1752,18 @@ class PreTerminationChatMiddleware(ChatMiddleware): async def process(self, context: ChatContext, call_next: Callable[[ChatContext], Awaitable[None]]) -> None: execution_order.append("middleware_before") # Set a custom response since we're terminating - context.result = ChatResponse(messages=[ChatMessage(role="assistant", text="Terminated by middleware")]) + context.result = ChatResponse(messages=[Message(role="assistant", text="Terminated by middleware")]) raise MiddlewareTermination # We call next() but since terminate=True, execution should stop await call_next(context) execution_order.append("middleware_after") - # Create ChatAgent with terminating middleware + # Create Agent with terminating middleware chat_client = MockBaseChatClient() - agent = ChatAgent(chat_client=chat_client, middleware=[PreTerminationChatMiddleware()]) + agent = Agent(chat_client=chat_client, middleware=[PreTerminationChatMiddleware()]) # Execute the agent - messages = [ChatMessage(role="user", text="test message")] + messages = [Message(role="user", text="test message")] response = await agent.run(messages) # Verify response was from middleware @@ -1783,12 +1783,12 @@ async def process(self, context: ChatContext, call_next: Callable[[ChatContext], execution_order.append("middleware_after") context.terminate = True - # Create ChatAgent with terminating middleware + # Create Agent with terminating middleware chat_client = MockBaseChatClient() - agent = ChatAgent(chat_client=chat_client, middleware=[PostTerminationChatMiddleware()]) + agent = Agent(chat_client=chat_client, middleware=[PostTerminationChatMiddleware()]) # Execute the agent - messages = [ChatMessage(role="user", text="test message")] + messages = [Message(role="user", text="test message")] response = await agent.run(messages) # Verify response is from actual execution @@ -1801,7 +1801,7 @@ async def process(self, context: ChatContext, call_next: Callable[[ChatContext], ] async def test_combined_middleware(self) -> None: - """Test ChatAgent with combined middleware types.""" + """Test Agent with combined middleware types.""" execution_order: list[str] = [] async def agent_middleware(context: AgentContext, call_next: Callable[[AgentContext], Awaitable[None]]) -> None: @@ -1821,13 +1821,13 @@ async def function_middleware( await call_next(context) execution_order.append("function_middleware_after") - # Create ChatAgent with function middleware and tools - agent = ChatAgent( + # Create Agent with function middleware and tools + agent = Agent( chat_client=MockBaseChatClient(), middleware=[chat_middleware, function_middleware, agent_middleware], tools=[sample_tool_function], ) - await agent.run([ChatMessage(role="user", text="test")]) + await agent.run([Message(role="user", text="test")]) assert execution_order == [ "agent_middleware_before", @@ -1858,12 +1858,12 @@ async def kwargs_middleware( await call_next(context) - # Create ChatAgent with agent middleware + # Create Agent with agent middleware chat_client = MockBaseChatClient() - agent = ChatAgent(chat_client=chat_client, middleware=[kwargs_middleware]) + agent = Agent(chat_client=chat_client, middleware=[kwargs_middleware]) # Execute the agent with custom parameters - messages = [ChatMessage(role="user", text="test message")] + messages = [Message(role="user", text="test message")] response = await agent.run(messages, temperature=0.7, max_tokens=100, custom_param="test_value") # Verify response @@ -1920,7 +1920,7 @@ async def kwargs_middleware( # yield AgentResponseUpdate() # return _stream() -# return AgentResponse(messages=[ChatMessage(role="assistant", text="response")]) +# return AgentResponse(messages=[Message(role="assistant", text="response")]) # def get_new_thread(self, **kwargs): # return None diff --git a/python/packages/core/tests/core/test_middleware_with_chat.py b/python/packages/core/tests/core/test_middleware_with_chat.py index 15621f759f..76883ffca3 100644 --- a/python/packages/core/tests/core/test_middleware_with_chat.py +++ b/python/packages/core/tests/core/test_middleware_with_chat.py @@ -4,16 +4,16 @@ from typing import Any from agent_framework import ( - ChatAgent, - ChatClientProtocol, + Agent, ChatContext, - ChatMessage, ChatMiddleware, ChatResponse, ChatResponseUpdate, Content, FunctionInvocationContext, FunctionTool, + Message, + SupportsChatGetResponse, chat_middleware, function_middleware, ) @@ -24,7 +24,7 @@ class TestChatMiddleware: """Test cases for chat middleware functionality.""" - async def test_class_based_chat_middleware(self, chat_client_base: ChatClientProtocol) -> None: + async def test_class_based_chat_middleware(self, chat_client_base: SupportsChatGetResponse) -> None: """Test class-based chat middleware with ChatClient.""" execution_order: list[str] = [] @@ -42,7 +42,7 @@ async def process( chat_client_base.chat_middleware = [LoggingChatMiddleware()] # Execute chat client directly - messages = [ChatMessage(role="user", text="test message")] + messages = [Message(role="user", text="test message")] response = await chat_client_base.get_response(messages) # Verify response @@ -69,7 +69,7 @@ async def logging_chat_middleware( chat_client_base.chat_middleware = [logging_chat_middleware] # Execute chat client directly - messages = [ChatMessage(role="user", text="test message")] + messages = [Message(role="user", text="test message")] response = await chat_client_base.get_response(messages) # Verify response @@ -90,14 +90,14 @@ async def message_modifier_middleware( # Modify the first message by adding a prefix if context.messages and len(context.messages) > 0: original_text = context.messages[0].text or "" - context.messages[0] = ChatMessage(role=context.messages[0].role, text=f"MODIFIED: {original_text}") + context.messages[0] = Message(role=context.messages[0].role, text=f"MODIFIED: {original_text}") await call_next(context) # Add middleware to chat client chat_client_base.chat_middleware = [message_modifier_middleware] # Execute chat client - messages = [ChatMessage(role="user", text="test message")] + messages = [Message(role="user", text="test message")] response = await chat_client_base.get_response(messages) # Verify that the message was modified (MockChatClient echoes back the input) @@ -115,7 +115,7 @@ async def response_override_middleware( ) -> None: # Override the response without calling next() context.result = ChatResponse( - messages=[ChatMessage(role="assistant", text="MiddlewareTypes overridden response")], + messages=[Message(role="assistant", text="MiddlewareTypes overridden response")], response_id="middleware-response-123", ) context.terminate = True @@ -124,7 +124,7 @@ async def response_override_middleware( chat_client_base.chat_middleware = [response_override_middleware] # Execute chat client - messages = [ChatMessage(role="user", text="test message")] + messages = [Message(role="user", text="test message")] response = await chat_client_base.get_response(messages) # Verify that the response was overridden @@ -153,7 +153,7 @@ async def second_middleware(context: ChatContext, call_next: Callable[[ChatConte chat_client_base.chat_middleware = [first_middleware, second_middleware] # Execute chat client - messages = [ChatMessage(role="user", text="test message")] + messages = [Message(role="user", text="test message")] response = await chat_client_base.get_response(messages) # Verify response @@ -169,7 +169,7 @@ async def second_middleware(context: ChatContext, call_next: Callable[[ChatConte assert execution_order == expected_order async def test_chat_agent_with_chat_middleware(self) -> None: - """Test ChatAgent with chat middleware specified at agent level.""" + """Test Agent with chat middleware specified at agent level.""" execution_order: list[str] = [] @chat_middleware @@ -182,11 +182,11 @@ async def agent_level_chat_middleware( chat_client = MockBaseChatClient() - # Create ChatAgent with chat middleware - agent = ChatAgent(chat_client=chat_client, middleware=[agent_level_chat_middleware]) + # Create Agent with chat middleware + agent = Agent(chat_client=chat_client, middleware=[agent_level_chat_middleware]) # Execute the agent - messages = [ChatMessage(role="user", text="test message")] + messages = [Message(role="user", text="test message")] response = await agent.run(messages) # Verify response @@ -201,7 +201,7 @@ async def agent_level_chat_middleware( ] async def test_chat_agent_with_multiple_chat_middleware(self, chat_client_base: "MockBaseChatClient") -> None: - """Test that ChatAgent can have multiple chat middleware.""" + """Test that Agent can have multiple chat middleware.""" execution_order: list[str] = [] @chat_middleware @@ -216,11 +216,11 @@ async def second_middleware(context: ChatContext, call_next: Callable[[ChatConte await call_next(context) execution_order.append("second_after") - # Create ChatAgent with multiple chat middleware - agent = ChatAgent(chat_client=chat_client_base, middleware=[first_middleware, second_middleware]) + # Create Agent with multiple chat middleware + agent = Agent(chat_client=chat_client_base, middleware=[first_middleware, second_middleware]) # Execute the agent - messages = [ChatMessage(role="user", text="test message")] + messages = [Message(role="user", text="test message")] response = await agent.run(messages) # Verify response @@ -261,7 +261,7 @@ def upper_case_update(update: ChatResponseUpdate) -> ChatResponseUpdate: chat_client_base.chat_middleware = [streaming_middleware] # Execute streaming response - messages = [ChatMessage(role="user", text="test message")] + messages = [Message(role="user", text="test message")] updates: list[object] = [] async for update in chat_client_base.get_response(messages, stream=True): updates.append(update) @@ -285,19 +285,19 @@ async def counting_middleware( await call_next(context) # First call with run-level middleware - messages = [ChatMessage(role="user", text="first message")] + messages = [Message(role="user", text="first message")] response1 = await chat_client_base.get_response(messages, middleware=[counting_middleware]) assert response1 is not None assert execution_count["count"] == 1 # Second call WITHOUT run-level middleware - should not execute the middleware - messages = [ChatMessage(role="user", text="second message")] + messages = [Message(role="user", text="second message")] response2 = await chat_client_base.get_response(messages) assert response2 is not None assert execution_count["count"] == 1 # Should still be 1, not 2 # Third call with run-level middleware again - should execute - messages = [ChatMessage(role="user", text="third message")] + messages = [Message(role="user", text="third message")] response3 = await chat_client_base.get_response(messages, middleware=[counting_middleware]) assert response3 is not None assert execution_count["count"] == 2 # Should be 2 now @@ -328,7 +328,7 @@ async def kwargs_middleware(context: ChatContext, call_next: Callable[[ChatConte chat_client_base.chat_middleware = [kwargs_middleware] # Execute chat client with custom parameters - messages = [ChatMessage(role="user", text="test message")] + messages = [Message(role="user", text="test message")] response = await chat_client_base.get_response( messages, temperature=0.7, max_tokens=100, custom_param="test_value" ) @@ -383,7 +383,7 @@ def sample_tool(location: str) -> str: # Prepare responses that will trigger function invocation function_call_response = ChatResponse( messages=[ - ChatMessage( + Message( role="assistant", contents=[ Content.from_function_call( @@ -396,12 +396,12 @@ def sample_tool(location: str) -> str: ] ) final_response = ChatResponse( - messages=[ChatMessage(role="assistant", text="Based on the weather data, it's sunny!")] + messages=[Message(role="assistant", text="Based on the weather data, it's sunny!")] ) chat_client.run_responses = [function_call_response, final_response] # Execute the chat client directly with tools - this should trigger function invocation and middleware - messages = [ChatMessage(role="user", text="What's the weather in San Francisco?")] + messages = [Message(role="user", text="What's the weather in San Francisco?")] response = await chat_client.get_response(messages, options={"tools": [sample_tool_wrapped]}) # Verify response @@ -445,7 +445,7 @@ def sample_tool(location: str) -> str: # Prepare responses that will trigger function invocation function_call_response = ChatResponse( messages=[ - ChatMessage( + Message( role="assistant", contents=[ Content.from_function_call( @@ -460,7 +460,7 @@ def sample_tool(location: str) -> str: chat_client.run_responses = [function_call_response] # Execute the chat client directly with run-level middleware and tools - messages = [ChatMessage(role="user", text="What's the weather in New York?")] + messages = [Message(role="user", text="What's the weather in New York?")] response = await chat_client.get_response( messages, options={"tools": [sample_tool_wrapped]}, middleware=[run_level_function_middleware] ) diff --git a/python/packages/core/tests/core/test_observability.py b/python/packages/core/tests/core/test_observability.py index a85f851957..b2c35c386c 100644 --- a/python/packages/core/tests/core/test_observability.py +++ b/python/packages/core/tests/core/test_observability.py @@ -14,10 +14,10 @@ AGENT_FRAMEWORK_USER_AGENT, AgentResponse, BaseChatClient, - ChatMessage, ChatResponse, ChatResponseUpdate, Content, + Message, ResponseStream, SupportsAgentRun, UsageDetails, @@ -162,7 +162,7 @@ def service_url(self): return "https://test.example.com" def _inner_get_response( - self, *, messages: MutableSequence[ChatMessage], stream: bool, options: dict[str, Any], **kwargs: Any + self, *, messages: MutableSequence[Message], stream: bool, options: dict[str, Any], **kwargs: Any ) -> Awaitable[ChatResponse] | ResponseStream[ChatResponseUpdate, ChatResponse]: if stream: return self._get_streaming_response(messages=messages, options=options, **kwargs) @@ -173,16 +173,16 @@ async def _get() -> ChatResponse: return _get() async def _get_non_streaming_response( - self, *, messages: MutableSequence[ChatMessage], options: dict[str, Any], **kwargs: Any + self, *, messages: MutableSequence[Message], options: dict[str, Any], **kwargs: Any ) -> ChatResponse: return ChatResponse( - messages=[ChatMessage("assistant", ["Test response"])], + messages=[Message("assistant", ["Test response"])], usage_details=UsageDetails(input_token_count=10, output_token_count=20), finish_reason=None, ) def _get_streaming_response( - self, *, messages: MutableSequence[ChatMessage], options: dict[str, Any], **kwargs: Any + self, *, messages: MutableSequence[Message], options: dict[str, Any], **kwargs: Any ) -> ResponseStream[ChatResponseUpdate, ChatResponse]: async def _stream() -> AsyncIterable[ChatResponseUpdate]: yield ChatResponseUpdate(contents=[Content.from_text("Hello")], role="assistant") @@ -203,7 +203,7 @@ async def test_chat_client_observability(mock_chat_client, span_exporter: InMemo """Test that when diagnostics are enabled, telemetry is applied.""" client = mock_chat_client() - messages = [ChatMessage(role="user", text="Test message")] + messages = [Message(role="user", text="Test message")] span_exporter.clear() response = await client.get_response(messages=messages, model_id="Test") assert response is not None @@ -226,7 +226,7 @@ async def test_chat_client_streaming_observability( ): """Test streaming telemetry through the chat telemetry mixin.""" client = mock_chat_client() - messages = [ChatMessage(role="user", text="Test")] + messages = [Message(role="user", text="Test")] span_exporter.clear() # Collect all yielded updates updates = [] @@ -257,7 +257,7 @@ async def test_chat_client_observability_with_instructions( client = mock_chat_client() - messages = [ChatMessage(role="user", text="Test message")] + messages = [Message(role="user", text="Test message")] options = {"model_id": "Test", "instructions": "You are a helpful assistant."} span_exporter.clear() response = await client.get_response(messages=messages, options=options) @@ -286,7 +286,7 @@ async def test_chat_client_streaming_observability_with_instructions( import json client = mock_chat_client() - messages = [ChatMessage(role="user", text="Test")] + messages = [Message(role="user", text="Test")] options = {"model_id": "Test", "instructions": "You are a helpful assistant."} span_exporter.clear() @@ -315,7 +315,7 @@ async def test_chat_client_observability_without_instructions( """Test that system_instructions attribute is not set when instructions are not provided.""" client = mock_chat_client() - messages = [ChatMessage(role="user", text="Test message")] + messages = [Message(role="user", text="Test message")] options = {"model_id": "Test"} # No instructions span_exporter.clear() response = await client.get_response(messages=messages, options=options) @@ -336,7 +336,7 @@ async def test_chat_client_observability_with_empty_instructions( """Test that system_instructions attribute is not set when instructions is an empty string.""" client = mock_chat_client() - messages = [ChatMessage(role="user", text="Test message")] + messages = [Message(role="user", text="Test message")] options = {"model_id": "Test", "instructions": ""} # Empty string span_exporter.clear() response = await client.get_response(messages=messages, options=options) @@ -359,7 +359,7 @@ async def test_chat_client_observability_with_list_instructions( client = mock_chat_client() - messages = [ChatMessage(role="user", text="Test message")] + messages = [Message(role="user", text="Test message")] options = {"model_id": "Test", "instructions": ["Instruction 1", "Instruction 2"]} span_exporter.clear() response = await client.get_response(messages=messages, options=options) @@ -380,7 +380,7 @@ async def test_chat_client_observability_with_list_instructions( async def test_chat_client_without_model_id_observability(mock_chat_client, span_exporter: InMemorySpanExporter): """Test telemetry shouldn't fail when the model_id is not provided for unknown reason.""" client = mock_chat_client() - messages = [ChatMessage(role="user", text="Test")] + messages = [Message(role="user", text="Test")] span_exporter.clear() response = await client.get_response(messages=messages) @@ -399,7 +399,7 @@ async def test_chat_client_streaming_without_model_id_observability( ): """Test streaming telemetry shouldn't fail when the model_id is not provided for unknown reason.""" client = mock_chat_client() - messages = [ChatMessage(role="user", text="Test")] + messages = [Message(role="user", text="Test")] span_exporter.clear() # Collect all yielded updates updates = [] @@ -448,7 +448,7 @@ def run(self, messages=None, *, thread=None, stream=False, **kwargs): async def _run_impl(self, messages=None, *, thread=None, **kwargs): return AgentResponse( - messages=[ChatMessage("assistant", ["Agent response"])], + messages=[Message("assistant", ["Agent response"])], usage_details=UsageDetails(input_token_count=15, output_token_count=25), response_id="test_response_id", ) @@ -1261,7 +1261,7 @@ async def _inner_get_response(self, *, messages, options, **kwargs): raise ValueError("Test error") client = FailingChatClient() - messages = [ChatMessage(role="user", text="Test")] + messages = [Message(role="user", text="Test")] span_exporter.clear() with pytest.raises(ValueError, match="Test error"): @@ -1291,7 +1291,7 @@ async def _stream(): return ResponseStream(_stream(), finalizer=ChatResponse.from_updates) client = FailingStreamingChatClient() - messages = [ChatMessage(role="user", text="Test")] + messages = [Message(role="user", text="Test")] span_exporter.clear() with pytest.raises(ValueError, match="Streaming error"): @@ -1580,7 +1580,7 @@ async def run( self._run_stream(messages=messages, thread=thread), finalizer=lambda x: AgentResponse.from_updates(x), ) - return AgentResponse(messages=[ChatMessage("assistant", ["Test response"])]) + return AgentResponse(messages=[Message("assistant", ["Test response"])]) async def _run_stream( self, @@ -1691,7 +1691,7 @@ def run(self, messages=None, *, stream=False, thread=None, **kwargs): return self._run_impl(messages=messages, **kwargs) async def _run_impl(self, messages=None, *, thread=None, **kwargs): - return AgentResponse(messages=[ChatMessage("assistant", ["Test"])]) + return AgentResponse(messages=[Message("assistant", ["Test"])]) def _run_stream_impl(self, messages=None, *, thread=None, **kwargs): async def _stream(): @@ -1767,13 +1767,13 @@ async def test_capture_messages_with_finish_reason(mock_chat_client, span_export class ClientWithFinishReason(mock_chat_client): async def _inner_get_response(self, *, messages, options, **kwargs): return ChatResponse( - messages=[ChatMessage(role="assistant", text="Done")], + messages=[Message(role="assistant", text="Done")], usage_details=UsageDetails(input_token_count=5, output_token_count=10), finish_reason="stop", ) client = ClientWithFinishReason() - messages = [ChatMessage(role="user", text="Test")] + messages = [Message(role="user", text="Test")] span_exporter.clear() response = await client.get_response(messages=messages, model_id="Test") @@ -1863,7 +1863,7 @@ class FailingStreamingAgent(AgentTelemetryLayer, _FailingStreamingAgent): async def test_chat_client_when_disabled(mock_chat_client, span_exporter: InMemorySpanExporter): """Test that no spans are created when instrumentation is disabled.""" client = mock_chat_client() - messages = [ChatMessage(role="user", text="Test")] + messages = [Message(role="user", text="Test")] span_exporter.clear() response = await client.get_response(messages=messages, model_id="Test") @@ -1878,7 +1878,7 @@ async def test_chat_client_when_disabled(mock_chat_client, span_exporter: InMemo async def test_chat_client_streaming_when_disabled(mock_chat_client, span_exporter: InMemorySpanExporter): """Test streaming creates no spans when instrumentation is disabled.""" client = mock_chat_client() - messages = [ChatMessage(role="user", text="Test")] + messages = [Message(role="user", text="Test")] span_exporter.clear() updates = [] @@ -2208,14 +2208,14 @@ def service_url(self): return "https://test.example.com" def _inner_get_response( - self, *, messages: MutableSequence[ChatMessage], stream: bool, options: dict[str, Any], **kwargs: Any + self, *, messages: MutableSequence[Message], stream: bool, options: dict[str, Any], **kwargs: Any ) -> Awaitable[ChatResponse] | ResponseStream[ChatResponseUpdate, ChatResponse]: async def _get() -> ChatResponse: self.call_count += 1 if self.call_count == 1: return ChatResponse( messages=[ - ChatMessage( + Message( role="assistant", contents=[ Content.from_function_call( @@ -2228,7 +2228,7 @@ async def _get() -> ChatResponse: ], ) return ChatResponse( - messages=[ChatMessage(role="assistant", text="The weather in Seattle is sunny!")], + messages=[Message(role="assistant", text="The weather in Seattle is sunny!")], ) return _get() @@ -2237,7 +2237,7 @@ async def _get() -> ChatResponse: span_exporter.clear() response = await client.get_response( - messages=[ChatMessage(role="user", text="What's the weather in Seattle?")], + messages=[Message(role="user", text="What's the weather in Seattle?")], options={"tools": [get_weather], "tool_choice": "auto"}, ) diff --git a/python/packages/core/tests/core/test_threads.py b/python/packages/core/tests/core/test_threads.py index a891f6b440..5b3fc5ffd1 100644 --- a/python/packages/core/tests/core/test_threads.py +++ b/python/packages/core/tests/core/test_threads.py @@ -5,7 +5,7 @@ import pytest -from agent_framework import AgentThread, ChatMessage, ChatMessageStore +from agent_framework import AgentThread, ChatMessageStore, Message from agent_framework._threads import AgentThreadState, ChatMessageStoreState from agent_framework.exceptions import AgentThreadException @@ -13,15 +13,15 @@ class MockChatMessageStore: """Mock implementation of ChatMessageStoreProtocol for testing.""" - def __init__(self, messages: list[ChatMessage] | None = None) -> None: + def __init__(self, messages: list[Message] | None = None) -> None: self._messages = messages or [] self._serialize_calls = 0 self._deserialize_calls = 0 - async def list_messages(self) -> list[ChatMessage]: + async def list_messages(self) -> list[Message]: return self._messages - async def add_messages(self, messages: Sequence[ChatMessage]) -> None: + async def add_messages(self, messages: Sequence[Message]) -> None: self._messages.extend(messages) async def serialize(self, **kwargs: Any) -> Any: @@ -41,19 +41,19 @@ async def deserialize(cls, serialized_store_state: Any, **kwargs: Any) -> "MockC @pytest.fixture -def sample_messages() -> list[ChatMessage]: +def sample_messages() -> list[Message]: """Fixture providing sample chat messages for testing.""" return [ - ChatMessage(role="user", text="Hello", message_id="msg1"), - ChatMessage(role="assistant", text="Hi there!", message_id="msg2"), - ChatMessage(role="user", text="How are you?", message_id="msg3"), + Message(role="user", text="Hello", message_id="msg1"), + Message(role="assistant", text="Hi there!", message_id="msg2"), + Message(role="user", text="How are you?", message_id="msg3"), ] @pytest.fixture -def sample_message() -> ChatMessage: +def sample_message() -> Message: """Fixture providing a single sample chat message for testing.""" - return ChatMessage(role="user", text="Test message", message_id="test1") + return Message(role="user", text="Test message", message_id="test1") class TestAgentThread: @@ -124,14 +124,14 @@ def test_message_store_setter_with_none_values(self) -> None: thread.message_store = None # Should not raise error assert thread.message_store is None - async def test_get_messages_with_message_store(self, sample_messages: list[ChatMessage]) -> None: + async def test_get_messages_with_message_store(self, sample_messages: list[Message]) -> None: """Test get_messages when message_store is set.""" store = ChatMessageStore(sample_messages) thread = AgentThread(message_store=store) assert thread.message_store is not None - messages: list[ChatMessage] = await thread.message_store.list_messages() + messages: list[Message] = await thread.message_store.list_messages() assert messages is not None assert len(messages) == 3 @@ -145,7 +145,7 @@ async def test_get_messages_with_no_message_store(self) -> None: assert thread.message_store is None - async def test_on_new_messages_with_service_thread_id(self, sample_message: ChatMessage) -> None: + async def test_on_new_messages_with_service_thread_id(self, sample_message: Message) -> None: """Test _on_new_messages when service_thread_id is set (should do nothing).""" thread = AgentThread(service_thread_id="test-conv") @@ -154,7 +154,7 @@ async def test_on_new_messages_with_service_thread_id(self, sample_message: Chat # Should not create a message store assert thread.message_store is None - async def test_on_new_messages_single_message_creates_store(self, sample_message: ChatMessage) -> None: + async def test_on_new_messages_single_message_creates_store(self, sample_message: Message) -> None: """Test _on_new_messages with single message creates ChatMessageStore.""" thread = AgentThread() @@ -166,7 +166,7 @@ async def test_on_new_messages_single_message_creates_store(self, sample_message assert len(messages) == 1 assert messages[0].text == "Test message" - async def test_on_new_messages_multiple_messages(self, sample_messages: list[ChatMessage]) -> None: + async def test_on_new_messages_multiple_messages(self, sample_messages: list[Message]) -> None: """Test _on_new_messages with multiple messages.""" thread = AgentThread() @@ -176,9 +176,9 @@ async def test_on_new_messages_multiple_messages(self, sample_messages: list[Cha messages = await thread.message_store.list_messages() assert len(messages) == 3 - async def test_on_new_messages_with_existing_store(self, sample_message: ChatMessage) -> None: + async def test_on_new_messages_with_existing_store(self, sample_message: Message) -> None: """Test _on_new_messages adds to existing message store.""" - initial_messages = [ChatMessage(role="user", text="Initial", message_id="init1")] + initial_messages = [Message(role="user", text="Initial", message_id="init1")] store = ChatMessageStore(initial_messages) thread = AgentThread(message_store=store) @@ -199,7 +199,7 @@ async def test_deserialize_with_service_thread_id(self) -> None: assert thread.service_thread_id == "test-conv-123" assert thread.message_store is None - async def test_deserialize_with_store_state(self, sample_messages: list[ChatMessage]) -> None: + async def test_deserialize_with_store_state(self, sample_messages: list[Message]) -> None: """Test _deserialize with chat_message_store_state.""" store_state = {"messages": sample_messages} serialized_data = {"service_thread_id": None, "chat_message_store_state": store_state} @@ -226,7 +226,7 @@ async def test_deserialize_with_existing_store(self) -> None: thread = AgentThread(message_store=store) serialized_data: dict[str, Any] = { "service_thread_id": None, - "chat_message_store_state": {"messages": [ChatMessage(role="user", text="test")]}, + "chat_message_store_state": {"messages": [Message(role="user", text="test")]}, } await thread.update_from_thread_state(serialized_data) @@ -272,7 +272,7 @@ async def test_serialize_with_kwargs(self) -> None: assert store._serialize_calls == 1 # pyright: ignore[reportPrivateUsage] - async def test_serialize_round_trip_messages(self, sample_messages: list[ChatMessage]) -> None: + async def test_serialize_round_trip_messages(self, sample_messages: list[Message]) -> None: """Test a roundtrip of the serialization.""" store = ChatMessageStore(sample_messages) thread = AgentThread(message_store=store) @@ -298,12 +298,12 @@ def test_init_empty(self) -> None: store = ChatMessageStore() assert len(store.messages) == 0 - def test_init_with_messages(self, sample_messages: list[ChatMessage]) -> None: + def test_init_with_messages(self, sample_messages: list[Message]) -> None: """Test ChatMessageStore initialization with messages.""" store = ChatMessageStore(sample_messages) assert len(store.messages) == 3 - async def test_add_messages(self, sample_messages: list[ChatMessage]) -> None: + async def test_add_messages(self, sample_messages: list[Message]) -> None: """Test adding messages to the store.""" store = ChatMessageStore() @@ -313,7 +313,7 @@ async def test_add_messages(self, sample_messages: list[ChatMessage]) -> None: messages = await store.list_messages() assert messages[0].text == "Hello" - async def test_get_messages(self, sample_messages: list[ChatMessage]) -> None: + async def test_get_messages(self, sample_messages: list[Message]) -> None: """Test getting messages from the store.""" store = ChatMessageStore(sample_messages) @@ -322,7 +322,7 @@ async def test_get_messages(self, sample_messages: list[ChatMessage]) -> None: assert len(messages) == 3 assert messages[0].message_id == "msg1" - async def test_serialize_state(self, sample_messages: list[ChatMessage]) -> None: + async def test_serialize_state(self, sample_messages: list[Message]) -> None: """Test serializing store state.""" store = ChatMessageStore(sample_messages) @@ -340,7 +340,7 @@ async def test_serialize_state_empty(self) -> None: assert "messages" in result assert len(result["messages"]) == 0 - async def test_deserialize_state(self, sample_messages: list[ChatMessage]) -> None: + async def test_deserialize_state(self, sample_messages: list[Message]) -> None: """Test deserializing store state.""" store = ChatMessageStore() state_data = {"messages": sample_messages} @@ -371,7 +371,7 @@ async def test_deserialize_state_empty(self) -> None: class TestStoreState: """Test cases for ChatMessageStoreState class.""" - def test_init(self, sample_messages: list[ChatMessage]) -> None: + def test_init(self, sample_messages: list[Message]) -> None: """Test ChatMessageStoreState initialization.""" state = ChatMessageStoreState(messages=sample_messages) @@ -449,7 +449,7 @@ def test_init_with_chat_message_store_state_no_messages(self) -> None: def test_init_with_chat_message_store_state_object(self) -> None: """Test AgentThreadState initialization with ChatMessageStoreState object.""" - store_state = ChatMessageStoreState(messages=[ChatMessage(role="user", text="test")]) + store_state = ChatMessageStoreState(messages=[Message(role="user", text="test")]) state = AgentThreadState(chat_message_store_state=store_state) assert state.service_thread_id is None @@ -479,7 +479,7 @@ def test_init_with_dict_messages(self) -> None: state = ChatMessageStoreState(messages=messages) assert len(state.messages) == 2 - assert isinstance(state.messages[0], ChatMessage) + assert isinstance(state.messages[0], Message) assert state.messages[0].text == "Hello" @@ -551,7 +551,7 @@ async def test_deserialize_with_failing_message_store_raises(self) -> None: """Test deserialize raises AgentThreadException when message store fails.""" class FailingStore: - async def add_messages(self, messages: Sequence[ChatMessage], **kwargs: Any) -> None: + async def add_messages(self, messages: Sequence[Message], **kwargs: Any) -> None: raise RuntimeError("Store failed") serialized_data = { diff --git a/python/packages/core/tests/core/test_types.py b/python/packages/core/tests/core/test_types.py index 3fe9a1cf88..8bb503df95 100644 --- a/python/packages/core/tests/core/test_types.py +++ b/python/packages/core/tests/core/test_types.py @@ -14,11 +14,11 @@ AgentResponse, AgentResponseUpdate, Annotation, - ChatMessage, ChatOptions, ChatResponse, ChatResponseUpdate, Content, + Message, ResponseStream, TextSpanRegion, ToolMode, @@ -566,13 +566,13 @@ def test_ai_content_serialization(args: dict): assert content == deserialized -# region ChatMessage +# region Message def test_chat_message_text(): - """Test the ChatMessage class to ensure it initializes correctly with text content.""" - # Create a ChatMessage with a role and text content - message = ChatMessage(role="user", text="Hello, how are you?") + """Test the Message class to ensure it initializes correctly with text content.""" + # Create a Message with a role and text content + message = Message(role="user", text="Hello, how are you?") # Check the type and content assert message.role == "user" @@ -586,11 +586,11 @@ def test_chat_message_text(): def test_chat_message_contents(): - """Test the ChatMessage class to ensure it initializes correctly with contents.""" - # Create a ChatMessage with a role and multiple contents + """Test the Message class to ensure it initializes correctly with contents.""" + # Create a Message with a role and multiple contents content1 = Content.from_text("Hello, how are you?") content2 = Content.from_text("I'm fine, thank you!") - message = ChatMessage(role="user", contents=[content1, content2]) + message = Message(role="user", contents=[content1, content2]) # Check the type and content assert message.role == "user" @@ -603,7 +603,7 @@ def test_chat_message_contents(): def test_chat_message_with_chatrole_instance(): - m = ChatMessage(role="user", text="hi") + m = Message(role="user", text="hi") assert m.role == "user" assert m.text == "hi" @@ -613,8 +613,8 @@ def test_chat_message_with_chatrole_instance(): def test_chat_response(): """Test the ChatResponse class to ensure it initializes correctly with a message.""" - # Create a ChatMessage - message = ChatMessage(role="assistant", text="I'm doing well, thank you!") + # Create a Message + message = Message(role="assistant", text="I'm doing well, thank you!") # Create a ChatResponse with the message response = ChatResponse(messages=message) @@ -622,7 +622,7 @@ def test_chat_response(): # Check the type and content assert response.messages[0].role == "assistant" assert response.messages[0].text == "I'm doing well, thank you!" - assert isinstance(response.messages[0], ChatMessage) + assert isinstance(response.messages[0], Message) # __str__ returns text assert str(response) == response.text @@ -633,8 +633,8 @@ class OutputModel(BaseModel): def test_chat_response_with_format(): """Test the ChatResponse class to ensure it initializes correctly with a message.""" - # Create a ChatMessage - message = ChatMessage(role="assistant", text='{"response": "Hello"}') + # Create a Message + message = Message(role="assistant", text='{"response": "Hello"}') # Create a ChatResponse with the message response = ChatResponse(messages=message, response_format=OutputModel) @@ -642,7 +642,7 @@ def test_chat_response_with_format(): # Check the type and content assert response.messages[0].role == "assistant" assert response.messages[0].text == '{"response": "Hello"}' - assert isinstance(response.messages[0], ChatMessage) + assert isinstance(response.messages[0], Message) assert response.text == '{"response": "Hello"}' assert response.value is not None assert response.value.response == "Hello" @@ -650,8 +650,8 @@ def test_chat_response_with_format(): def test_chat_response_with_format_init(): """Test the ChatResponse class to ensure it initializes correctly with a message.""" - # Create a ChatMessage - message = ChatMessage(role="assistant", text='{"response": "Hello"}') + # Create a Message + message = Message(role="assistant", text='{"response": "Hello"}') # Create a ChatResponse with the message response = ChatResponse(messages=message, response_format=OutputModel) @@ -659,7 +659,7 @@ def test_chat_response_with_format_init(): # Check the type and content assert response.messages[0].role == "assistant" assert response.messages[0].text == '{"response": "Hello"}' - assert isinstance(response.messages[0], ChatMessage) + assert isinstance(response.messages[0], Message) assert response.text == '{"response": "Hello"}' assert response.value is not None assert response.value.response == "Hello" @@ -673,7 +673,7 @@ class StrictSchema(BaseModel): name: str = Field(min_length=10) score: int = Field(gt=0, le=100) - message = ChatMessage(role="assistant", text='{"id": 1, "name": "test", "score": -5}') + message = Message(role="assistant", text='{"id": 1, "name": "test", "score": -5}') response = ChatResponse(messages=message, response_format=StrictSchema) with raises(ValidationError) as exc_info: @@ -694,7 +694,7 @@ class StrictSchema(BaseModel): name: str = Field(min_length=10) score: int = Field(gt=0, le=100) - message = ChatMessage(role="assistant", text='{"id": 1, "name": "test", "score": -5}') + message = Message(role="assistant", text='{"id": 1, "name": "test", "score": -5}') response = AgentResponse(messages=message, response_format=StrictSchema) with raises(ValidationError) as exc_info: @@ -712,7 +712,7 @@ class StrictSchema(BaseModel): def test_chat_response_update(): """Test the ChatResponseUpdate class to ensure it initializes correctly with a message.""" - # Create a ChatMessage + # Create a Message message = Content.from_text(text="I'm doing well, thank you!") # Create a ChatResponseUpdate with the message @@ -726,7 +726,7 @@ def test_chat_response_update(): def test_chat_response_updates_to_chat_response_one(): """Test converting ChatResponseUpdate to ChatResponse.""" - # Create a ChatMessage + # Create a Message message1 = Content.from_text("I'm doing well, ") message2 = Content.from_text("thank you!") @@ -742,14 +742,14 @@ def test_chat_response_updates_to_chat_response_one(): # Check the type and content assert len(chat_response.messages) == 1 assert chat_response.text == "I'm doing well, thank you!" - assert isinstance(chat_response.messages[0], ChatMessage) + assert isinstance(chat_response.messages[0], Message) assert len(chat_response.messages[0].contents) == 1 assert chat_response.messages[0].message_id == "1" def test_chat_response_updates_to_chat_response_two(): """Test converting ChatResponseUpdate to ChatResponse.""" - # Create a ChatMessage + # Create a Message message1 = Content.from_text("I'm doing well, ") message2 = Content.from_text("thank you!") @@ -765,15 +765,15 @@ def test_chat_response_updates_to_chat_response_two(): # Check the type and content assert len(chat_response.messages) == 2 assert chat_response.text == "I'm doing well, \nthank you!" - assert isinstance(chat_response.messages[0], ChatMessage) + assert isinstance(chat_response.messages[0], Message) assert chat_response.messages[0].message_id == "1" - assert isinstance(chat_response.messages[1], ChatMessage) + assert isinstance(chat_response.messages[1], Message) assert chat_response.messages[1].message_id == "2" def test_chat_response_updates_to_chat_response_multiple(): """Test converting ChatResponseUpdate to ChatResponse.""" - # Create a ChatMessage + # Create a Message message1 = Content.from_text("I'm doing well, ") message2 = Content.from_text("thank you!") @@ -790,14 +790,14 @@ def test_chat_response_updates_to_chat_response_multiple(): # Check the type and content assert len(chat_response.messages) == 1 assert chat_response.text == "I'm doing well, thank you!" - assert isinstance(chat_response.messages[0], ChatMessage) + assert isinstance(chat_response.messages[0], Message) assert len(chat_response.messages[0].contents) == 3 assert chat_response.messages[0].message_id == "1" def test_chat_response_updates_to_chat_response_multiple_multiple(): """Test converting ChatResponseUpdate to ChatResponse.""" - # Create a ChatMessage + # Create a Message message1 = Content.from_text("I'm doing well, ", raw_representation="I'm doing well, ") message2 = Content.from_text("thank you!") @@ -815,7 +815,7 @@ def test_chat_response_updates_to_chat_response_multiple_multiple(): # Check the type and content assert len(chat_response.messages) == 1 - assert isinstance(chat_response.messages[0], ChatMessage) + assert isinstance(chat_response.messages[0], Message) assert chat_response.messages[0].message_id == "1" assert chat_response.messages[0].contents[0].raw_representation is not None @@ -1012,8 +1012,8 @@ def test_chat_options_and_tool_choice_required_specific_function() -> None: @fixture -def chat_message() -> ChatMessage: - return ChatMessage(role="user", text="Hello") +def chat_message() -> Message: + return Message(role="user", text="Hello") @fixture @@ -1022,7 +1022,7 @@ def text_content() -> Content: @fixture -def agent_response(chat_message: ChatMessage) -> AgentResponse: +def agent_response(chat_message: Message) -> AgentResponse: return AgentResponse(messages=chat_message) @@ -1034,12 +1034,12 @@ def agent_response_update(text_content: Content) -> AgentResponseUpdate: # region AgentResponse -def test_agent_run_response_init_single_message(chat_message: ChatMessage) -> None: +def test_agent_run_response_init_single_message(chat_message: Message) -> None: response = AgentResponse(messages=chat_message) assert response.messages == [chat_message] -def test_agent_run_response_init_list_messages(chat_message: ChatMessage) -> None: +def test_agent_run_response_init_list_messages(chat_message: Message) -> None: response = AgentResponse(messages=[chat_message, chat_message]) assert len(response.messages) == 2 assert response.messages[0] == chat_message @@ -1050,7 +1050,7 @@ def test_agent_run_response_init_none_messages() -> None: assert response.messages == [] -def test_agent_run_response_text_property(chat_message: ChatMessage) -> None: +def test_agent_run_response_text_property(chat_message: Message) -> None: response = AgentResponse(messages=[chat_message, chat_message]) assert response.text == "HelloHello" @@ -1067,7 +1067,7 @@ def test_agent_run_response_from_updates(agent_response_update: AgentResponseUpd assert response.text == "Test contentTest content" -def test_agent_run_response_str_method(chat_message: ChatMessage) -> None: +def test_agent_run_response_str_method(chat_message: Message) -> None: response = AgentResponse(messages=chat_message) assert str(response) == "Hello" @@ -1130,7 +1130,7 @@ def test_agent_run_response_created_at() -> None: # Test with a properly formatted UTC timestamp utc_timestamp = "2024-12-01T00:31:30.000000Z" response = AgentResponse( - messages=[ChatMessage(role="assistant", text="Hello")], + messages=[Message(role="assistant", text="Hello")], created_at=utc_timestamp, ) assert response.created_at == utc_timestamp @@ -1140,7 +1140,7 @@ def test_agent_run_response_created_at() -> None: now_utc = datetime.now(tz=timezone.utc) formatted_utc = now_utc.strftime("%Y-%m-%dT%H:%M:%S.%fZ") response_with_now = AgentResponse( - messages=[ChatMessage(role="assistant", text="Hello")], + messages=[Message(role="assistant", text="Hello")], created_at=formatted_utc, ) assert response_with_now.created_at == formatted_utc @@ -1294,7 +1294,7 @@ def test_chat_tool_mode_eq_with_string(): @fixture def agent_run_response_async() -> AgentResponse: - return AgentResponse(messages=[ChatMessage(role="user", text="Hello")]) + return AgentResponse(messages=[Message(role="user", text="Hello")]) async def test_agent_run_response_from_async_generator(): @@ -1444,7 +1444,7 @@ def test_usage_details_iadd_edge_cases(): def test_chat_message_from_dict_with_mixed_content(): - """Test ChatMessage from_dict with mixed content types for better coverage.""" + """Test Message from_dict with mixed content types for better coverage.""" message_data = { "role": "assistant", @@ -1455,7 +1455,7 @@ def test_chat_message_from_dict_with_mixed_content(): ], } - message = ChatMessage.from_dict(message_data) + message = Message.from_dict(message_data) assert len(message.contents) == 3 # Unknown type is ignored assert message.contents[0].type == "text" assert message.contents[1].type == "function_call" @@ -1513,7 +1513,7 @@ def test_comprehensive_serialization_methods(): def test_chat_message_complex_content_serialization(): - """Test ChatMessage serialization with various content types.""" + """Test Message serialization with various content types.""" # Create a message with multiple content types contents = [ @@ -1522,7 +1522,7 @@ def test_chat_message_complex_content_serialization(): Content.from_function_result(call_id="call1", result="success"), ] - message = ChatMessage(role="assistant", contents=contents) + message = Message(role="assistant", contents=contents) # Test to_dict message_dict = message.to_dict() @@ -1532,7 +1532,7 @@ def test_chat_message_complex_content_serialization(): assert message_dict["contents"][2]["type"] == "function_result" # Test from_dict round-trip - reconstructed = ChatMessage.from_dict(message_dict) + reconstructed = Message.from_dict(message_dict) assert len(reconstructed.contents) == 3 assert reconstructed.contents[0].type == "text" assert reconstructed.contents[1].type == "function_call" @@ -1610,7 +1610,7 @@ def test_chat_response_complex_serialization(): response = ChatResponse.from_dict(response_data) assert len(response.messages) == 2 - assert isinstance(response.messages[0], ChatMessage) + assert isinstance(response.messages[0], Message) assert isinstance(response.finish_reason, str) # FinishReason is now a NewType of str assert isinstance(response.usage_details, dict) assert response.model_id == "gpt-4" # Should be stored as model_id @@ -1687,7 +1687,7 @@ def test_agent_run_response_complex_serialization(): response = AgentResponse.from_dict(response_data) assert len(response.messages) == 2 - assert isinstance(response.messages[0], ChatMessage) + assert isinstance(response.messages[0], Message) assert isinstance(response.usage_details, dict) # Test to_dict @@ -1869,7 +1869,7 @@ def test_agent_run_response_update_all_content_types(): id="function_approval_response", ), pytest.param( - ChatMessage, + Message, { "role": "\1", "contents": [ @@ -1887,12 +1887,12 @@ def test_agent_run_response_update_all_content_types(): "type": "chat_response", "messages": [ { - "type": "chat_message", + "type": "message", "role": "\1", "contents": [{"type": "text", "text": "Hello"}], }, { - "type": "chat_message", + "type": "message", "role": "\1", "contents": [{"type": "text", "text": "Hi there"}], }, @@ -2761,7 +2761,7 @@ async def test_result_hook_can_transform_result(self) -> None: """Result hook can transform the final result.""" def wrap_text(response: ChatResponse) -> ChatResponse: - return ChatResponse(messages=ChatMessage("assistant", [f"[{response.text}]"])) + return ChatResponse(messages=Message("assistant", [f"[{response.text}]"])) stream = ResponseStream( _generate_updates(2), @@ -2777,10 +2777,10 @@ async def test_multiple_result_hooks_chained(self) -> None: """Multiple result hooks are called in order.""" def add_prefix(response: ChatResponse) -> ChatResponse: - return ChatResponse(messages=ChatMessage("assistant", [f"prefix_{response.text}"])) + return ChatResponse(messages=Message("assistant", [f"prefix_{response.text}"])) def add_suffix(response: ChatResponse) -> ChatResponse: - return ChatResponse(messages=ChatMessage("assistant", [f"{response.text}_suffix"])) + return ChatResponse(messages=Message("assistant", [f"{response.text}_suffix"])) stream = ResponseStream( _generate_updates(1), @@ -2828,7 +2828,7 @@ async def test_async_result_hook(self) -> None: """Async result hooks are awaited.""" async def async_hook(response: ChatResponse) -> ChatResponse: - return ChatResponse(messages=ChatMessage("assistant", [f"async_{response.text}"])) + return ChatResponse(messages=Message("assistant", [f"async_{response.text}"])) stream = ResponseStream( _generate_updates(2), @@ -2850,7 +2850,7 @@ async def test_finalizer_receives_all_updates(self) -> None: def capturing_finalizer(updates: list[ChatResponseUpdate]) -> ChatResponse: received_updates.extend(updates) - return ChatResponse(messages=ChatMessage("assistant", ["done"])) + return ChatResponse(messages=Message("assistant", ["done"])) stream = ResponseStream(_generate_updates(3), finalizer=capturing_finalizer) @@ -2875,7 +2875,7 @@ async def test_async_finalizer(self) -> None: async def async_finalizer(updates: list[ChatResponseUpdate]) -> ChatResponse: text = "".join(u.text or "" for u in updates) - return ChatResponse(messages=ChatMessage("assistant", [f"async_{text}"])) + return ChatResponse(messages=Message("assistant", [f"async_{text}"])) stream = ResponseStream(_generate_updates(2), finalizer=async_finalizer) @@ -2889,7 +2889,7 @@ async def test_finalized_only_once(self) -> None: def counting_finalizer(updates: list[ChatResponseUpdate]) -> ChatResponse: call_count["value"] += 1 - return ChatResponse(messages=ChatMessage("assistant", ["done"])) + return ChatResponse(messages=Message("assistant", ["done"])) stream = ResponseStream(_generate_updates(2), finalizer=counting_finalizer) @@ -2949,7 +2949,7 @@ async def test_map_calls_inner_result_hooks(self) -> None: def inner_result_hook(response: ChatResponse) -> ChatResponse: inner_result_hook_called["value"] = True - return ChatResponse(messages=ChatMessage("assistant", [f"hooked_{response.text}"])) + return ChatResponse(messages=Message("assistant", [f"hooked_{response.text}"])) inner = ResponseStream( _generate_updates(2), @@ -2969,7 +2969,7 @@ async def test_with_finalizer_calls_inner_finalizer(self) -> None: def inner_finalizer(updates: Sequence[ChatResponseUpdate]) -> ChatResponse: inner_finalizer_called["value"] = True - return ChatResponse(messages=ChatMessage("assistant", ["inner_result"])) + return ChatResponse(messages=Message("assistant", ["inner_result"])) inner = ResponseStream( _generate_updates(2), @@ -2989,7 +2989,7 @@ async def test_with_finalizer_plus_result_hooks(self) -> None: inner = ResponseStream(_generate_updates(2), finalizer=_combine_updates) def outer_hook(response: ChatResponse) -> ChatResponse: - return ChatResponse(messages=ChatMessage("assistant", [f"outer_{response.text}"])) + return ChatResponse(messages=Message("assistant", [f"outer_{response.text}"])) outer = inner.with_finalizer(_combine_updates).with_result_hook(outer_hook) @@ -3114,7 +3114,7 @@ def cleanup_hook() -> None: def finalizer(updates: list[ChatResponseUpdate]) -> ChatResponse: order.append("finalizer") - return ChatResponse(messages=ChatMessage("assistant", ["done"])) + return ChatResponse(messages=Message("assistant", ["done"])) def result_hook(response: ChatResponse) -> ChatResponse: order.append("result") @@ -3149,7 +3149,7 @@ def cleanup_hook() -> None: def finalizer(updates: list[ChatResponseUpdate]) -> ChatResponse: order.append("finalizer") - return ChatResponse(messages=ChatMessage("assistant", ["done"])) + return ChatResponse(messages=Message("assistant", ["done"])) stream = ResponseStream( _generate_updates(2), @@ -3269,7 +3269,7 @@ def cleanup() -> None: def finalizer(updates: list[ChatResponseUpdate]) -> ChatResponse: events.append("finalizer") - return ChatResponse(messages=ChatMessage("assistant", ["done"])) + return ChatResponse(messages=Message("assistant", ["done"])) def result(r: ChatResponse) -> ChatResponse: events.append("result") diff --git a/python/packages/core/tests/openai/test_assistant_provider.py b/python/packages/core/tests/openai/test_assistant_provider.py index 90b077c941..b500caf583 100644 --- a/python/packages/core/tests/openai/test_assistant_provider.py +++ b/python/packages/core/tests/openai/test_assistant_provider.py @@ -8,7 +8,7 @@ from openai.types.beta.assistant import Assistant from pydantic import BaseModel, Field -from agent_framework import ChatAgent, HostedCodeInterpreterTool, HostedFileSearchTool, normalize_tools, tool +from agent_framework import Agent, HostedCodeInterpreterTool, HostedFileSearchTool, normalize_tools, tool from agent_framework.exceptions import ServiceInitializationError from agent_framework.openai import OpenAIAssistantProvider from agent_framework.openai._shared import from_assistant_tools, to_assistant_tools @@ -202,7 +202,7 @@ async def test_create_agent_basic(self, mock_async_openai: MagicMock) -> None: instructions="You are helpful.", ) - assert isinstance(agent, ChatAgent) + assert isinstance(agent, Agent) assert agent.name == "CreatedAssistant" mock_async_openai.beta.assistants.create.assert_called_once() @@ -235,7 +235,7 @@ async def test_create_agent_with_function_tools(self, mock_async_openai: MagicMo tools=[get_weather], ) - assert isinstance(agent, ChatAgent) + assert isinstance(agent, Agent) # Verify tools were passed to create call_kwargs = mock_async_openai.beta.assistants.create.call_args.kwargs @@ -343,7 +343,7 @@ async def test_create_agent_with_response_format_pydantic(self, mock_async_opena assert call_kwargs["response_format"]["json_schema"]["name"] == "WeatherResponse" async def test_create_agent_returns_chat_agent(self, mock_async_openai: MagicMock) -> None: - """Test that create_agent returns a ChatAgent instance.""" + """Test that create_agent returns a Agent instance.""" provider = OpenAIAssistantProvider(mock_async_openai) agent = await provider.create_agent( @@ -351,7 +351,7 @@ async def test_create_agent_returns_chat_agent(self, mock_async_openai: MagicMoc model="gpt-4", ) - assert isinstance(agent, ChatAgent) + assert isinstance(agent, Agent) # endregion @@ -369,7 +369,7 @@ async def test_get_agent_basic(self, mock_async_openai: MagicMock) -> None: agent = await provider.get_agent(assistant_id="asst_123") - assert isinstance(agent, ChatAgent) + assert isinstance(agent, Agent) mock_async_openai.beta.assistants.retrieve.assert_called_once_with("asst_123") async def test_get_agent_with_instructions_override(self, mock_async_openai: MagicMock) -> None: @@ -382,7 +382,7 @@ async def test_get_agent_with_instructions_override(self, mock_async_openai: Mag ) # Agent should be created successfully with the custom instructions - assert isinstance(agent, ChatAgent) + assert isinstance(agent, Agent) assert agent.id == "asst_retrieved123" async def test_get_agent_with_function_tools(self, mock_async_openai: MagicMock) -> None: @@ -398,7 +398,7 @@ async def test_get_agent_with_function_tools(self, mock_async_openai: MagicMock) tools=[get_weather], ) - assert isinstance(agent, ChatAgent) + assert isinstance(agent, Agent) async def test_get_agent_validates_missing_function_tools(self, mock_async_openai: MagicMock) -> None: """Test that missing function tools raise ValueError.""" @@ -439,7 +439,7 @@ async def test_get_agent_merges_hosted_tools(self, mock_async_openai: MagicMock) agent = await provider.get_agent(assistant_id="asst_123") # Hosted tools should be merged automatically - assert isinstance(agent, ChatAgent) + assert isinstance(agent, Agent) # endregion @@ -458,7 +458,7 @@ def test_as_agent_no_http_call(self, mock_async_openai: MagicMock) -> None: agent = provider.as_agent(assistant) - assert isinstance(agent, ChatAgent) + assert isinstance(agent, Agent) # Verify no HTTP calls were made mock_async_openai.beta.assistants.create.assert_not_called() mock_async_openai.beta.assistants.retrieve.assert_not_called() @@ -477,7 +477,7 @@ def test_as_agent_wraps_assistant(self, mock_async_openai: MagicMock) -> None: assert agent.id == "asst_wrap123" assert agent.name == "WrappedAssistant" # Instructions are passed to ChatOptions, not exposed as attribute - assert isinstance(agent, ChatAgent) + assert isinstance(agent, Agent) def test_as_agent_with_instructions_override(self, mock_async_openai: MagicMock) -> None: """Test as_agent with instruction override.""" @@ -487,7 +487,7 @@ def test_as_agent_with_instructions_override(self, mock_async_openai: MagicMock) agent = provider.as_agent(assistant, instructions="Override") # Agent should be created successfully with override instructions - assert isinstance(agent, ChatAgent) + assert isinstance(agent, Agent) def test_as_agent_validates_function_tools(self, mock_async_openai: MagicMock) -> None: """Test that missing function tools raise ValueError.""" @@ -506,7 +506,7 @@ def test_as_agent_with_function_tools_provided(self, mock_async_openai: MagicMoc agent = provider.as_agent(assistant, tools=[get_weather]) - assert isinstance(agent, ChatAgent) + assert isinstance(agent, Agent) def test_as_agent_merges_hosted_tools(self, mock_async_openai: MagicMock) -> None: """Test that hosted tools are merged automatically.""" @@ -515,7 +515,7 @@ def test_as_agent_merges_hosted_tools(self, mock_async_openai: MagicMock) -> Non agent = provider.as_agent(assistant) - assert isinstance(agent, ChatAgent) + assert isinstance(agent, Agent) def test_as_agent_hosted_tools_not_required(self, mock_async_openai: MagicMock) -> None: """Test that hosted tools don't require user implementations.""" @@ -525,7 +525,7 @@ def test_as_agent_hosted_tools_not_required(self, mock_async_openai: MagicMock) # Should not raise - hosted tools don't need implementations agent = provider.as_agent(assistant) - assert isinstance(agent, ChatAgent) + assert isinstance(agent, Agent) # endregion diff --git a/python/packages/core/tests/openai/test_openai_assistants_client.py b/python/packages/core/tests/openai/test_openai_assistants_client.py index 2cefc5ad54..4d0eb9ce7a 100644 --- a/python/packages/core/tests/openai/test_openai_assistants_client.py +++ b/python/packages/core/tests/openai/test_openai_assistants_client.py @@ -11,17 +11,17 @@ from pydantic import Field from agent_framework import ( + Agent, AgentResponse, AgentResponseUpdate, AgentThread, - ChatAgent, - ChatClientProtocol, - ChatMessage, ChatResponse, ChatResponseUpdate, Content, HostedCodeInterpreterTool, HostedFileSearchTool, + Message, + SupportsChatGetResponse, tool, ) from agent_framework.exceptions import ServiceInitializationError @@ -122,7 +122,7 @@ def test_init_with_client(mock_async_openai: MagicMock) -> None: assert chat_client.assistant_id == "existing-assistant-id" assert chat_client.thread_id == "test-thread-id" assert not chat_client._should_delete_assistant # type: ignore - assert isinstance(chat_client, ChatClientProtocol) + assert isinstance(chat_client, SupportsChatGetResponse) def test_init_auto_create_client( @@ -179,7 +179,7 @@ def test_init_with_default_headers(openai_unit_test_env: dict[str, str]) -> None ) assert chat_client.model_id == "gpt-4" - assert isinstance(chat_client, ChatClientProtocol) + assert isinstance(chat_client, SupportsChatGetResponse) # Assert that the default header we added is present in the client's default headers for key, value in default_headers.items(): @@ -695,7 +695,7 @@ def test_prepare_options_basic(mock_async_openai: MagicMock) -> None: "top_p": 0.9, } - messages = [ChatMessage(role="user", text="Hello")] + messages = [Message(role="user", text="Hello")] # Call the method run_options, tool_results = chat_client._prepare_options(messages, options) # type: ignore @@ -724,7 +724,7 @@ def test_function(query: str) -> str: "tool_choice": "auto", } - messages = [ChatMessage(role="user", text="Hello")] + messages = [Message(role="user", text="Hello")] # Call the method run_options, tool_results = chat_client._prepare_options(messages, options) # type: ignore @@ -749,7 +749,7 @@ def test_prepare_options_with_code_interpreter(mock_async_openai: MagicMock) -> "tool_choice": "auto", } - messages = [ChatMessage(role="user", text="Calculate something")] + messages = [Message(role="user", text="Calculate something")] # Call the method run_options, tool_results = chat_client._prepare_options(messages, options) # type: ignore @@ -769,7 +769,7 @@ def test_prepare_options_tool_choice_none(mock_async_openai: MagicMock) -> None: "tool_choice": "none", } - messages = [ChatMessage(role="user", text="Hello")] + messages = [Message(role="user", text="Hello")] # Call the method run_options, tool_results = chat_client._prepare_options(messages, options) # type: ignore @@ -797,7 +797,7 @@ def test_func(arg: str) -> str: "tools": [test_func], } - messages = [ChatMessage(role="user", text="Hello")] + messages = [Message(role="user", text="Hello")] # Call the method run_options, tool_results = chat_client._prepare_options(messages, options) # type: ignore @@ -819,7 +819,7 @@ def test_prepare_options_required_function(mock_async_openai: MagicMock) -> None "tool_choice": tool_choice, } - messages = [ChatMessage(role="user", text="Hello")] + messages = [Message(role="user", text="Hello")] # Call the method run_options, tool_results = chat_client._prepare_options(messages, options) # type: ignore @@ -845,7 +845,7 @@ def test_prepare_options_with_file_search_tool(mock_async_openai: MagicMock) -> "tool_choice": "auto", } - messages = [ChatMessage(role="user", text="Search for information")] + messages = [Message(role="user", text="Search for information")] # Call the method run_options, tool_results = chat_client._prepare_options(messages, options) # type: ignore @@ -870,7 +870,7 @@ def test_prepare_options_with_mapping_tool(mock_async_openai: MagicMock) -> None "tool_choice": "auto", } - messages = [ChatMessage(role="user", text="Use custom tool")] + messages = [Message(role="user", text="Use custom tool")] # Call the method run_options, tool_results = chat_client._prepare_options(messages, options) # type: ignore @@ -892,7 +892,7 @@ class TestResponse(BaseModel): model_config = ConfigDict(extra="forbid") chat_client = create_test_openai_assistants_client(mock_async_openai) - messages = [ChatMessage(role="user", text="Test")] + messages = [Message(role="user", text="Test")] options = {"response_format": TestResponse} run_options, _ = chat_client._prepare_options(messages, options) # type: ignore @@ -908,8 +908,8 @@ def test_prepare_options_with_system_message(mock_async_openai: MagicMock) -> No chat_client = create_test_openai_assistants_client(mock_async_openai) messages = [ - ChatMessage(role="system", text="You are a helpful assistant."), - ChatMessage(role="user", text="Hello"), + Message(role="system", text="You are a helpful assistant."), + Message(role="user", text="Hello"), ] # Call the method @@ -929,7 +929,7 @@ def test_prepare_options_with_image_content(mock_async_openai: MagicMock) -> Non # Create message with image content image_content = Content.from_uri(uri="https://example.com/image.jpg", media_type="image/jpeg") - messages = [ChatMessage(role="user", contents=[image_content])] + messages = [Message(role="user", contents=[image_content])] # Call the method run_options, tool_results = chat_client._prepare_options(messages, {}) # type: ignore @@ -1039,17 +1039,17 @@ def get_weather( async def test_get_response() -> None: """Test OpenAI Assistants Client response.""" async with OpenAIAssistantsClient(model_id=INTEGRATION_TEST_MODEL) as openai_assistants_client: - assert isinstance(openai_assistants_client, ChatClientProtocol) + assert isinstance(openai_assistants_client, SupportsChatGetResponse) - messages: list[ChatMessage] = [] + messages: list[Message] = [] messages.append( - ChatMessage( + Message( role="user", text="The weather in Seattle is currently sunny with a high of 25°C. " "It's a beautiful day for outdoor activities.", ) ) - messages.append(ChatMessage(role="user", text="What's the weather like today?")) + messages.append(Message(role="user", text="What's the weather like today?")) # Test that the client can be used to get a response response = await openai_assistants_client.get_response(messages=messages) @@ -1064,10 +1064,10 @@ async def test_get_response() -> None: async def test_get_response_tools() -> None: """Test OpenAI Assistants Client response with tools.""" async with OpenAIAssistantsClient(model_id=INTEGRATION_TEST_MODEL) as openai_assistants_client: - assert isinstance(openai_assistants_client, ChatClientProtocol) + assert isinstance(openai_assistants_client, SupportsChatGetResponse) - messages: list[ChatMessage] = [] - messages.append(ChatMessage(role="user", text="What's the weather like in Seattle?")) + messages: list[Message] = [] + messages.append(Message(role="user", text="What's the weather like in Seattle?")) # Test that the client can be used to get a response response = await openai_assistants_client.get_response( @@ -1085,17 +1085,17 @@ async def test_get_response_tools() -> None: async def test_streaming() -> None: """Test OpenAI Assistants Client streaming response.""" async with OpenAIAssistantsClient(model_id=INTEGRATION_TEST_MODEL) as openai_assistants_client: - assert isinstance(openai_assistants_client, ChatClientProtocol) + assert isinstance(openai_assistants_client, SupportsChatGetResponse) - messages: list[ChatMessage] = [] + messages: list[Message] = [] messages.append( - ChatMessage( + Message( role="user", text="The weather in Seattle is currently sunny with a high of 25°C. " "It's a beautiful day for outdoor activities.", ) ) - messages.append(ChatMessage(role="user", text="What's the weather like today?")) + messages.append(Message(role="user", text="What's the weather like today?")) # Test that the client can be used to get a response response = openai_assistants_client.get_response(stream=True, messages=messages) @@ -1116,10 +1116,10 @@ async def test_streaming() -> None: async def test_streaming_tools() -> None: """Test OpenAI Assistants Client streaming response with tools.""" async with OpenAIAssistantsClient(model_id=INTEGRATION_TEST_MODEL) as openai_assistants_client: - assert isinstance(openai_assistants_client, ChatClientProtocol) + assert isinstance(openai_assistants_client, SupportsChatGetResponse) - messages: list[ChatMessage] = [] - messages.append(ChatMessage(role="user", text="What's the weather like in Seattle?")) + messages: list[Message] = [] + messages.append(Message(role="user", text="What's the weather like in Seattle?")) # Test that the client can be used to get a response response = openai_assistants_client.get_response( @@ -1148,7 +1148,7 @@ async def test_with_existing_assistant() -> None: # First create an assistant to use in the test async with OpenAIAssistantsClient(model_id=INTEGRATION_TEST_MODEL) as temp_client: # Get the assistant ID by triggering assistant creation - messages = [ChatMessage(role="user", text="Hello")] + messages = [Message(role="user", text="Hello")] await temp_client.get_response(messages=messages) assistant_id = temp_client.assistant_id @@ -1156,10 +1156,10 @@ async def test_with_existing_assistant() -> None: async with OpenAIAssistantsClient( model_id=INTEGRATION_TEST_MODEL, assistant_id=assistant_id ) as openai_assistants_client: - assert isinstance(openai_assistants_client, ChatClientProtocol) + assert isinstance(openai_assistants_client, SupportsChatGetResponse) assert openai_assistants_client.assistant_id == assistant_id - messages = [ChatMessage(role="user", text="What can you do?")] + messages = [Message(role="user", text="What can you do?")] # Test that the client can be used to get a response response = await openai_assistants_client.get_response(messages=messages) @@ -1175,10 +1175,10 @@ async def test_with_existing_assistant() -> None: async def test_file_search() -> None: """Test OpenAI Assistants Client response.""" async with OpenAIAssistantsClient(model_id=INTEGRATION_TEST_MODEL) as openai_assistants_client: - assert isinstance(openai_assistants_client, ChatClientProtocol) + assert isinstance(openai_assistants_client, SupportsChatGetResponse) - messages: list[ChatMessage] = [] - messages.append(ChatMessage(role="user", text="What's the weather like today?")) + messages: list[Message] = [] + messages.append(Message(role="user", text="What's the weather like today?")) file_id, vector_store = await create_vector_store(openai_assistants_client) response = await openai_assistants_client.get_response( @@ -1201,10 +1201,10 @@ async def test_file_search() -> None: async def test_file_search_streaming() -> None: """Test OpenAI Assistants Client response.""" async with OpenAIAssistantsClient(model_id=INTEGRATION_TEST_MODEL) as openai_assistants_client: - assert isinstance(openai_assistants_client, ChatClientProtocol) + assert isinstance(openai_assistants_client, SupportsChatGetResponse) - messages: list[ChatMessage] = [] - messages.append(ChatMessage(role="user", text="What's the weather like today?")) + messages: list[Message] = [] + messages.append(Message(role="user", text="What's the weather like today?")) file_id, vector_store = await create_vector_store(openai_assistants_client) response = openai_assistants_client.get_response( @@ -1232,8 +1232,8 @@ async def test_file_search_streaming() -> None: @pytest.mark.flaky @skip_if_openai_integration_tests_disabled async def test_openai_assistants_agent_basic_run(): - """Test ChatAgent basic run functionality with OpenAIAssistantsClient.""" - async with ChatAgent( + """Test Agent basic run functionality with OpenAIAssistantsClient.""" + async with Agent( chat_client=OpenAIAssistantsClient(model_id=INTEGRATION_TEST_MODEL), ) as agent: # Run a simple query @@ -1249,8 +1249,8 @@ async def test_openai_assistants_agent_basic_run(): @pytest.mark.flaky @skip_if_openai_integration_tests_disabled async def test_openai_assistants_agent_basic_run_streaming(): - """Test ChatAgent basic streaming functionality with OpenAIAssistantsClient.""" - async with ChatAgent( + """Test Agent basic streaming functionality with OpenAIAssistantsClient.""" + async with Agent( chat_client=OpenAIAssistantsClient(model_id=INTEGRATION_TEST_MODEL), ) as agent: # Run streaming query @@ -1269,8 +1269,8 @@ async def test_openai_assistants_agent_basic_run_streaming(): @pytest.mark.flaky @skip_if_openai_integration_tests_disabled async def test_openai_assistants_agent_thread_persistence(): - """Test ChatAgent thread persistence across runs with OpenAIAssistantsClient.""" - async with ChatAgent( + """Test Agent thread persistence across runs with OpenAIAssistantsClient.""" + async with Agent( chat_client=OpenAIAssistantsClient(model_id=INTEGRATION_TEST_MODEL), instructions="You are a helpful assistant with good memory.", ) as agent: @@ -1298,11 +1298,11 @@ async def test_openai_assistants_agent_thread_persistence(): @pytest.mark.flaky @skip_if_openai_integration_tests_disabled async def test_openai_assistants_agent_existing_thread_id(): - """Test ChatAgent with existing thread ID to continue conversations across agent instances.""" + """Test Agent with existing thread ID to continue conversations across agent instances.""" # First, create a conversation and capture the thread ID existing_thread_id = None - async with ChatAgent( + async with Agent( chat_client=OpenAIAssistantsClient(model_id=INTEGRATION_TEST_MODEL), instructions="You are a helpful weather agent.", tools=[get_weather], @@ -1322,7 +1322,7 @@ async def test_openai_assistants_agent_existing_thread_id(): # Now continue with the same thread ID in a new agent instance - async with ChatAgent( + async with Agent( chat_client=OpenAIAssistantsClient(thread_id=existing_thread_id), instructions="You are a helpful weather agent.", tools=[get_weather], @@ -1343,9 +1343,9 @@ async def test_openai_assistants_agent_existing_thread_id(): @pytest.mark.flaky @skip_if_openai_integration_tests_disabled async def test_openai_assistants_agent_code_interpreter(): - """Test ChatAgent with code interpreter through OpenAIAssistantsClient.""" + """Test Agent with code interpreter through OpenAIAssistantsClient.""" - async with ChatAgent( + async with Agent( chat_client=OpenAIAssistantsClient(model_id=INTEGRATION_TEST_MODEL), instructions="You are a helpful assistant that can write and execute Python code.", tools=[HostedCodeInterpreterTool()], @@ -1365,7 +1365,7 @@ async def test_openai_assistants_agent_code_interpreter(): async def test_agent_level_tool_persistence(): """Test that agent-level tools persist across multiple runs with OpenAI Assistants Client.""" - async with ChatAgent( + async with Agent( chat_client=OpenAIAssistantsClient(model_id=INTEGRATION_TEST_MODEL), instructions="You are a helpful assistant that uses available tools.", tools=[get_weather], # Agent-level tool diff --git a/python/packages/core/tests/openai/test_openai_chat_client.py b/python/packages/core/tests/openai/test_openai_chat_client.py index 7b5f0cde13..db80487616 100644 --- a/python/packages/core/tests/openai/test_openai_chat_client.py +++ b/python/packages/core/tests/openai/test_openai_chat_client.py @@ -13,11 +13,11 @@ from pytest import param from agent_framework import ( - ChatClientProtocol, - ChatMessage, ChatResponse, Content, HostedWebSearchTool, + Message, + SupportsChatGetResponse, ToolProtocol, prepare_function_call_results, tool, @@ -40,7 +40,7 @@ def test_init(openai_unit_test_env: dict[str, str]) -> None: open_ai_chat_completion = OpenAIChatClient() assert open_ai_chat_completion.model_id == openai_unit_test_env["OPENAI_CHAT_MODEL_ID"] - assert isinstance(open_ai_chat_completion, ChatClientProtocol) + assert isinstance(open_ai_chat_completion, SupportsChatGetResponse) def test_init_validation_fail() -> None: @@ -55,7 +55,7 @@ def test_init_model_id_constructor(openai_unit_test_env: dict[str, str]) -> None open_ai_chat_completion = OpenAIChatClient(model_id=model_id) assert open_ai_chat_completion.model_id == model_id - assert isinstance(open_ai_chat_completion, ChatClientProtocol) + assert isinstance(open_ai_chat_completion, SupportsChatGetResponse) def test_init_with_default_header(openai_unit_test_env: dict[str, str]) -> None: @@ -67,7 +67,7 @@ def test_init_with_default_header(openai_unit_test_env: dict[str, str]) -> None: ) assert open_ai_chat_completion.model_id == openai_unit_test_env["OPENAI_CHAT_MODEL_ID"] - assert isinstance(open_ai_chat_completion, ChatClientProtocol) + assert isinstance(open_ai_chat_completion, SupportsChatGetResponse) # Assert that the default header we added is present in the client's default headers for key, value in default_headers.items(): @@ -154,7 +154,7 @@ def test_serialize_with_org_id(openai_unit_test_env: dict[str, str]) -> None: async def test_content_filter_exception_handling(openai_unit_test_env: dict[str, str]) -> None: """Test that content filter errors are properly handled.""" client = OpenAIChatClient() - messages = [ChatMessage(role="user", text="test message")] + messages = [Message(role="user", text="test message")] # Create a mock BadRequestError with content_filter code mock_response = MagicMock() @@ -209,7 +209,7 @@ def get_weather(location: str) -> str: async def test_exception_message_includes_original_error_details() -> None: """Test that exception messages include original error details in the new format.""" client = OpenAIChatClient(model_id="test-model", api_key="test-key") - messages = [ChatMessage(role="user", text="test message")] + messages = [Message(role="user", text="test message")] mock_response = MagicMock() original_error_message = "Invalid API request format" @@ -283,7 +283,7 @@ def test_function_result_falsy_values_handling(openai_unit_test_env: dict[str, s client = OpenAIChatClient() # Test with empty list (falsy but not None) - message_with_empty_list = ChatMessage( + message_with_empty_list = Message( role="tool", contents=[Content.from_function_result(call_id="call-123", result=[])] ) @@ -292,7 +292,7 @@ def test_function_result_falsy_values_handling(openai_unit_test_env: dict[str, s assert openai_messages[0]["content"] == "[]" # Empty list should be JSON serialized # Test with empty string (falsy but not None) - message_with_empty_string = ChatMessage( + message_with_empty_string = Message( role="tool", contents=[Content.from_function_result(call_id="call-456", result="")] ) @@ -301,9 +301,7 @@ def test_function_result_falsy_values_handling(openai_unit_test_env: dict[str, s assert openai_messages[0]["content"] == "" # Empty string should be preserved # Test with False (falsy but not None) - message_with_false = ChatMessage( - role="tool", contents=[Content.from_function_result(call_id="call-789", result=False)] - ) + message_with_false = Message(role="tool", contents=[Content.from_function_result(call_id="call-789", result=False)]) openai_messages = client._prepare_message_for_openai(message_with_false) assert len(openai_messages) == 1 @@ -319,7 +317,7 @@ def test_function_result_exception_handling(openai_unit_test_env: dict[str, str] # Test with exception (no result) test_exception = ValueError("Test error message") - message_with_exception = ChatMessage( + message_with_exception = Message( role="tool", contents=[ Content.from_function_result(call_id="call-123", result="Error: Function failed.", exception=test_exception) @@ -609,7 +607,7 @@ def test_prepare_message_with_text_reasoning_content(openai_unit_test_env: dict[ reasoning_content = Content.from_text_reasoning(text=None, protected_data=json.dumps(mock_reasoning_data)) # Message must have other content first for reasoning to attach to - message = ChatMessage( + message = Message( role="assistant", contents=[ Content.from_text(text="The answer is 42."), @@ -652,17 +650,17 @@ def test_function_approval_content_is_skipped_in_preparation(openai_unit_test_en ) # Test that approval request is skipped - message_with_request = ChatMessage(role="assistant", contents=[approval_request]) + message_with_request = Message(role="assistant", contents=[approval_request]) prepared_request = client._prepare_message_for_openai(message_with_request) assert len(prepared_request) == 0 # Should be empty - approval content is skipped # Test that approval response is skipped - message_with_response = ChatMessage(role="user", contents=[approval_response]) + message_with_response = Message(role="user", contents=[approval_response]) prepared_response = client._prepare_message_for_openai(message_with_response) assert len(prepared_response) == 0 # Should be empty - approval content is skipped # Test with mixed content - approval should be skipped, text should remain - mixed_message = ChatMessage( + mixed_message = Message( role="assistant", contents=[ Content.from_text(text="I need approval for this action."), @@ -752,7 +750,7 @@ def test_prepare_options_without_model_id(openai_unit_test_env: dict[str, str]) client = OpenAIChatClient() client.model_id = None # Remove model_id - messages = [ChatMessage(role="user", text="test")] + messages = [Message(role="user", text="test")] with pytest.raises(ValueError, match="model_id must be a non-empty string"): client._prepare_options(messages, {}) @@ -786,7 +784,7 @@ def test_prepare_options_with_instructions(openai_unit_test_env: dict[str, str]) """Test that instructions are prepended as system message.""" client = OpenAIChatClient() - messages = [ChatMessage(role="user", text="Hello")] + messages = [Message(role="user", text="Hello")] options = {"instructions": "You are a helpful assistant."} prepared_options = client._prepare_options(messages, options) @@ -802,7 +800,7 @@ def test_prepare_message_with_author_name(openai_unit_test_env: dict[str, str]) """Test that author_name is included in prepared message.""" client = OpenAIChatClient() - message = ChatMessage( + message = Message( role="user", author_name="TestUser", contents=[Content.from_text(text="Hello")], @@ -819,7 +817,7 @@ def test_prepare_message_with_tool_result_author_name(openai_unit_test_env: dict client = OpenAIChatClient() # Tool messages should not have 'name' field (it's for function name instead) - message = ChatMessage( + message = Message( role="tool", author_name="ShouldNotAppear", contents=[Content.from_function_result(call_id="call_123", result="result")], @@ -836,7 +834,7 @@ def test_tool_choice_required_with_function_name(openai_unit_test_env: dict[str, """Test that tool_choice with required mode and function name is correctly prepared.""" client = OpenAIChatClient() - messages = [ChatMessage(role="user", text="test")] + messages = [Message(role="user", text="test")] options = { "tools": [get_weather], "tool_choice": {"mode": "required", "required_function_name": "get_weather"}, @@ -854,7 +852,7 @@ def test_response_format_dict_passthrough(openai_unit_test_env: dict[str, str]) """Test that response_format as dict is passed through directly.""" client = OpenAIChatClient() - messages = [ChatMessage(role="user", text="test")] + messages = [Message(role="user", text="test")] custom_format = { "type": "json_schema", "json_schema": {"name": "Test", "schema": {"type": "object"}}, @@ -872,7 +870,7 @@ def test_multiple_function_calls_in_single_message(openai_unit_test_env: dict[st client = OpenAIChatClient() # Create message with multiple function calls - message = ChatMessage( + message = Message( role="assistant", contents=[ Content.from_function_call(call_id="call_1", name="func_1", arguments='{"a": 1}'), @@ -894,7 +892,7 @@ def test_prepare_options_removes_parallel_tool_calls_when_no_tools(openai_unit_t """Test that parallel_tool_calls is removed when no tools are present.""" client = OpenAIChatClient() - messages = [ChatMessage(role="user", text="test")] + messages = [Message(role="user", text="test")] options = {"allow_multiple_tool_calls": True} prepared_options = client._prepare_options(messages, options) @@ -906,7 +904,7 @@ def test_prepare_options_removes_parallel_tool_calls_when_no_tools(openai_unit_t async def test_streaming_exception_handling(openai_unit_test_env: dict[str, str]) -> None: """Test that streaming errors are properly handled.""" client = OpenAIChatClient() - messages = [ChatMessage(role="user", text="test")] + messages = [Message(role="user", text="test")] # Create a mock error during streaming mock_error = Exception("Streaming error") @@ -1004,14 +1002,14 @@ async def test_integration_options( # Prepare test message if option_name.startswith("tools") or option_name.startswith("tool_choice"): # Use weather-related prompt for tool tests - messages = [ChatMessage(role="user", text="What is the weather in Seattle?")] + messages = [Message(role="user", text="What is the weather in Seattle?")] elif option_name.startswith("response_format"): # Use prompt that works well with structured output - messages = [ChatMessage(role="user", text="The weather in Seattle is sunny")] - messages.append(ChatMessage(role="user", text="What is the weather in Seattle?")) + messages = [Message(role="user", text="The weather in Seattle is sunny")] + messages.append(Message(role="user", text="What is the weather in Seattle?")) else: # Generic prompt for simple options - messages = [ChatMessage(role="user", text="Say 'Hello World' briefly.")] + messages = [Message(role="user", text="Say 'Hello World' briefly.")] # Build options dict options: dict[str, Any] = {option_name: option_value} diff --git a/python/packages/core/tests/openai/test_openai_chat_client_base.py b/python/packages/core/tests/openai/test_openai_chat_client_base.py index 51a7ae0bc3..4c31394fb6 100644 --- a/python/packages/core/tests/openai/test_openai_chat_client_base.py +++ b/python/packages/core/tests/openai/test_openai_chat_client_base.py @@ -14,7 +14,7 @@ from openai.types.chat.chat_completion_message import ChatCompletionMessage from pydantic import BaseModel -from agent_framework import ChatMessage, ChatResponseUpdate +from agent_framework import ChatResponseUpdate, Message from agent_framework.exceptions import ( ServiceResponseException, ) @@ -27,7 +27,7 @@ async def mock_async_process_chat_stream_response(_): @pytest.fixture(scope="function") -def chat_history() -> list[ChatMessage]: +def chat_history() -> list[Message]: return [] @@ -64,12 +64,12 @@ def mock_streaming_chat_completion_response() -> AsyncStream[ChatCompletionChunk @patch.object(AsyncChatCompletions, "create", new_callable=AsyncMock) async def test_cmc( mock_create: AsyncMock, - chat_history: list[ChatMessage], + chat_history: list[Message], mock_chat_completion_response: ChatCompletion, openai_unit_test_env: dict[str, str], ): mock_create.return_value = mock_chat_completion_response - chat_history.append(ChatMessage(role="user", text="hello world")) + chat_history.append(Message(role="user", text="hello world")) openai_chat_completion = OpenAIChatClient() await openai_chat_completion.get_response(messages=chat_history) @@ -83,12 +83,12 @@ async def test_cmc( @patch.object(AsyncChatCompletions, "create", new_callable=AsyncMock) async def test_cmc_chat_options( mock_create: AsyncMock, - chat_history: list[ChatMessage], + chat_history: list[Message], mock_chat_completion_response: ChatCompletion, openai_unit_test_env: dict[str, str], ): mock_create.return_value = mock_chat_completion_response - chat_history.append(ChatMessage(role="user", text="hello world")) + chat_history.append(Message(role="user", text="hello world")) openai_chat_completion = OpenAIChatClient() await openai_chat_completion.get_response( @@ -104,12 +104,12 @@ async def test_cmc_chat_options( @patch.object(AsyncChatCompletions, "create", new_callable=AsyncMock) async def test_cmc_no_fcc_in_response( mock_create: AsyncMock, - chat_history: list[ChatMessage], + chat_history: list[Message], mock_chat_completion_response: ChatCompletion, openai_unit_test_env: dict[str, str], ): mock_create.return_value = mock_chat_completion_response - chat_history.append(ChatMessage(role="user", text="hello world")) + chat_history.append(Message(role="user", text="hello world")) orig_chat_history = deepcopy(chat_history) openai_chat_completion = OpenAIChatClient() @@ -126,12 +126,12 @@ async def test_cmc_no_fcc_in_response( @patch.object(AsyncChatCompletions, "create", new_callable=AsyncMock) async def test_cmc_structured_output_no_fcc( mock_create: AsyncMock, - chat_history: list[ChatMessage], + chat_history: list[Message], mock_chat_completion_response: ChatCompletion, openai_unit_test_env: dict[str, str], ): mock_create.return_value = mock_chat_completion_response - chat_history.append(ChatMessage(role="user", text="hello world")) + chat_history.append(Message(role="user", text="hello world")) # Define a mock response format class Test(BaseModel): @@ -148,12 +148,12 @@ class Test(BaseModel): @patch.object(AsyncChatCompletions, "create", new_callable=AsyncMock) async def test_scmc_chat_options( mock_create: AsyncMock, - chat_history: list[ChatMessage], + chat_history: list[Message], mock_streaming_chat_completion_response: AsyncStream[ChatCompletionChunk], openai_unit_test_env: dict[str, str], ): mock_create.return_value = mock_streaming_chat_completion_response - chat_history.append(ChatMessage(role="user", text="hello world")) + chat_history.append(Message(role="user", text="hello world")) openai_chat_completion = OpenAIChatClient() async for msg in openai_chat_completion.get_response( @@ -174,12 +174,12 @@ async def test_scmc_chat_options( @patch.object(AsyncChatCompletions, "create", new_callable=AsyncMock, side_effect=Exception) async def test_cmc_general_exception( mock_create: AsyncMock, - chat_history: list[ChatMessage], + chat_history: list[Message], mock_chat_completion_response: ChatCompletion, openai_unit_test_env: dict[str, str], ): mock_create.return_value = mock_chat_completion_response - chat_history.append(ChatMessage(role="user", text="hello world")) + chat_history.append(Message(role="user", text="hello world")) openai_chat_completion = OpenAIChatClient() with pytest.raises(ServiceResponseException): @@ -191,12 +191,12 @@ async def test_cmc_general_exception( @patch.object(AsyncChatCompletions, "create", new_callable=AsyncMock) async def test_cmc_additional_properties( mock_create: AsyncMock, - chat_history: list[ChatMessage], + chat_history: list[Message], mock_chat_completion_response: ChatCompletion, openai_unit_test_env: dict[str, str], ): mock_create.return_value = mock_chat_completion_response - chat_history.append(ChatMessage(role="user", text="hello world")) + chat_history.append(Message(role="user", text="hello world")) openai_chat_completion = OpenAIChatClient() await openai_chat_completion.get_response(messages=chat_history, options={"reasoning_effort": "low"}) @@ -214,7 +214,7 @@ async def test_cmc_additional_properties( @patch.object(AsyncChatCompletions, "create", new_callable=AsyncMock) async def test_get_streaming( mock_create: AsyncMock, - chat_history: list[ChatMessage], + chat_history: list[Message], openai_unit_test_env: dict[str, str], ): content1 = ChatCompletionChunk( @@ -234,7 +234,7 @@ async def test_get_streaming( stream = MagicMock(spec=AsyncStream) stream.__aiter__.return_value = [content1, content2] mock_create.return_value = stream - chat_history.append(ChatMessage(role="user", text="hello world")) + chat_history.append(Message(role="user", text="hello world")) orig_chat_history = deepcopy(chat_history) openai_chat_completion = OpenAIChatClient() @@ -254,7 +254,7 @@ async def test_get_streaming( @patch.object(AsyncChatCompletions, "create", new_callable=AsyncMock) async def test_get_streaming_singular( mock_create: AsyncMock, - chat_history: list[ChatMessage], + chat_history: list[Message], openai_unit_test_env: dict[str, str], ): content1 = ChatCompletionChunk( @@ -274,7 +274,7 @@ async def test_get_streaming_singular( stream = MagicMock(spec=AsyncStream) stream.__aiter__.return_value = [content1, content2] mock_create.return_value = stream - chat_history.append(ChatMessage(role="user", text="hello world")) + chat_history.append(Message(role="user", text="hello world")) orig_chat_history = deepcopy(chat_history) openai_chat_completion = OpenAIChatClient() @@ -294,7 +294,7 @@ async def test_get_streaming_singular( @patch.object(AsyncChatCompletions, "create", new_callable=AsyncMock) async def test_get_streaming_structured_output_no_fcc( mock_create: AsyncMock, - chat_history: list[ChatMessage], + chat_history: list[Message], openai_unit_test_env: dict[str, str], ): content1 = ChatCompletionChunk( @@ -314,7 +314,7 @@ async def test_get_streaming_structured_output_no_fcc( stream = MagicMock(spec=AsyncStream) stream.__aiter__.return_value = [content1, content2] mock_create.return_value = stream - chat_history.append(ChatMessage(role="user", text="hello world")) + chat_history.append(Message(role="user", text="hello world")) # Define a mock response format class Test(BaseModel): @@ -333,12 +333,12 @@ class Test(BaseModel): @patch.object(AsyncChatCompletions, "create", new_callable=AsyncMock) async def test_get_streaming_no_fcc_in_response( mock_create: AsyncMock, - chat_history: list[ChatMessage], + chat_history: list[Message], mock_streaming_chat_completion_response: ChatCompletion, openai_unit_test_env: dict[str, str], ): mock_create.return_value = mock_streaming_chat_completion_response - chat_history.append(ChatMessage(role="user", text="hello world")) + chat_history.append(Message(role="user", text="hello world")) orig_chat_history = deepcopy(chat_history) openai_chat_completion = OpenAIChatClient() diff --git a/python/packages/core/tests/openai/test_openai_responses_client.py b/python/packages/core/tests/openai/test_openai_responses_client.py index c0f1d0b9f0..6bc07bbd0a 100644 --- a/python/packages/core/tests/openai/test_openai_responses_client.py +++ b/python/packages/core/tests/openai/test_openai_responses_client.py @@ -27,8 +27,6 @@ from pytest import param from agent_framework import ( - ChatClientProtocol, - ChatMessage, ChatOptions, ChatResponse, ChatResponseUpdate, @@ -38,6 +36,8 @@ HostedImageGenerationTool, HostedMCPTool, HostedWebSearchTool, + Message, + SupportsChatGetResponse, tool, ) from agent_framework.exceptions import ( @@ -106,7 +106,7 @@ def test_init(openai_unit_test_env: dict[str, str]) -> None: openai_responses_client = OpenAIResponsesClient() assert openai_responses_client.model_id == openai_unit_test_env["OPENAI_RESPONSES_MODEL_ID"] - assert isinstance(openai_responses_client, ChatClientProtocol) + assert isinstance(openai_responses_client, SupportsChatGetResponse) def test_init_validation_fail() -> None: @@ -121,7 +121,7 @@ def test_init_model_id_constructor(openai_unit_test_env: dict[str, str]) -> None openai_responses_client = OpenAIResponsesClient(model_id=model_id) assert openai_responses_client.model_id == model_id - assert isinstance(openai_responses_client, ChatClientProtocol) + assert isinstance(openai_responses_client, SupportsChatGetResponse) def test_init_with_default_header(openai_unit_test_env: dict[str, str]) -> None: @@ -133,7 +133,7 @@ def test_init_with_default_header(openai_unit_test_env: dict[str, str]) -> None: ) assert openai_responses_client.model_id == openai_unit_test_env["OPENAI_RESPONSES_MODEL_ID"] - assert isinstance(openai_responses_client, ChatClientProtocol) + assert isinstance(openai_responses_client, SupportsChatGetResponse) # Assert that the default header we added is present in the client's default headers for key, value in default_headers.items(): @@ -211,7 +211,7 @@ async def test_get_response_with_all_parameters() -> None: # Test with comprehensive parameter set - should fail due to invalid API key with pytest.raises(ServiceResponseException): await client.get_response( - messages=[ChatMessage(role="user", text="Test message")], + messages=[Message(role="user", text="Test message")], options={ "include": ["message.output_text.logprobs"], "instructions": "You are a helpful assistant", @@ -255,7 +255,7 @@ async def test_web_search_tool_with_location() -> None: # Should raise an authentication error due to invalid API key with pytest.raises(ServiceResponseException): await client.get_response( - messages=[ChatMessage(role="user", text="What's the weather?")], + messages=[Message(role="user", text="What's the weather?")], options={"tools": [web_search_tool], "tool_choice": "auto"}, ) @@ -270,7 +270,7 @@ async def test_file_search_tool_with_invalid_inputs() -> None: # Should raise an error due to invalid inputs with pytest.raises(ValueError, match="HostedFileSearchTool requires inputs to be of type"): await client.get_response( - messages=[ChatMessage(role="user", text="Search files")], + messages=[Message(role="user", text="Search files")], options={"tools": [file_search_tool]}, ) @@ -284,7 +284,7 @@ async def test_code_interpreter_tool_variations() -> None: with pytest.raises(ServiceResponseException): await client.get_response( - messages=[ChatMessage(role="user", text="Run some code")], + messages=[Message(role="user", text="Run some code")], options={"tools": [code_tool_empty]}, ) @@ -295,7 +295,7 @@ async def test_code_interpreter_tool_variations() -> None: with pytest.raises(ServiceResponseException): await client.get_response( - messages=[ChatMessage(role="user", text="Process these files")], + messages=[Message(role="user", text="Process these files")], options={"tools": [code_tool_with_files]}, ) @@ -314,7 +314,7 @@ async def test_content_filter_exception() -> None: with patch.object(client.client.responses, "create", side_effect=mock_error): with pytest.raises(OpenAIContentFilterException) as exc_info: - await client.get_response(messages=[ChatMessage(role="user", text="Test message")]) + await client.get_response(messages=[Message(role="user", text="Test message")]) assert "content error" in str(exc_info.value) @@ -329,7 +329,7 @@ async def test_hosted_file_search_tool_validation() -> None: with pytest.raises((ValueError, ServiceInvalidRequestError)): await client.get_response( - messages=[ChatMessage(role="user", text="Test")], + messages=[Message(role="user", text="Test")], options={"tools": [empty_file_search_tool]}, ) @@ -349,9 +349,9 @@ async def test_chat_message_parsing_with_function_calls() -> None: function_result = Content.from_function_result(call_id="test-call-id", result="Function executed successfully") messages = [ - ChatMessage(role="user", text="Call a function"), - ChatMessage(role="assistant", contents=[function_call]), - ChatMessage(role="tool", contents=[function_result]), + Message(role="user", text="Call a function"), + Message(role="assistant", contents=[function_call]), + Message(role="tool", contents=[function_result]), ] # This should exercise the message parsing logic - will fail due to invalid API key @@ -377,7 +377,7 @@ async def test_response_format_parse_path() -> None: with patch.object(client.client.responses, "parse", return_value=mock_parsed_response): response = await client.get_response( - messages=[ChatMessage(role="user", text="Test message")], + messages=[Message(role="user", text="Test message")], options={"response_format": OutputStruct, "store": True}, ) assert response.response_id == "parsed_response_123" @@ -404,7 +404,7 @@ async def test_response_format_parse_path_with_conversation_id() -> None: with patch.object(client.client.responses, "parse", return_value=mock_parsed_response): response = await client.get_response( - messages=[ChatMessage(role="user", text="Test message")], + messages=[Message(role="user", text="Test message")], options={"response_format": OutputStruct, "store": True}, ) assert response.response_id == "parsed_response_123" @@ -427,7 +427,7 @@ async def test_bad_request_error_non_content_filter() -> None: with patch.object(client.client.responses, "parse", side_effect=mock_error): with pytest.raises(ServiceResponseException) as exc_info: await client.get_response( - messages=[ChatMessage(role="user", text="Test message")], + messages=[Message(role="user", text="Test message")], options={"response_format": OutputStruct}, ) @@ -448,7 +448,7 @@ async def test_streaming_content_filter_exception_handling() -> None: mock_create.side_effect.code = "content_filter" with pytest.raises(OpenAIContentFilterException, match="service encountered a content error"): - response_stream = client.get_response(stream=True, messages=[ChatMessage(role="user", text="Test")]) + response_stream = client.get_response(stream=True, messages=[Message(role="user", text="Test")]) async for _ in response_stream: break @@ -792,7 +792,7 @@ def test_prepare_message_for_openai_with_function_approval_response() -> None: function_call=function_call, ) - message = ChatMessage(role="user", contents=[approval_response]) + message = Message(role="user", contents=[approval_response]) call_id_to_id: dict[str, str] = {} result = client._prepare_message_for_openai(message, call_id_to_id) @@ -814,7 +814,7 @@ def test_chat_message_with_error_content() -> None: error_code="TEST_ERR", ) - message = ChatMessage(role="assistant", contents=[error_content]) + message = Message(role="assistant", contents=[error_content]) call_id_to_id: dict[str, str] = {} result = client._prepare_message_for_openai(message, call_id_to_id) @@ -839,7 +839,7 @@ def test_chat_message_with_usage_content() -> None: } ) - message = ChatMessage(role="assistant", contents=[usage_content]) + message = Message(role="assistant", contents=[usage_content]) call_id_to_id: dict[str, str] = {} result = client._prepare_message_for_openai(message, call_id_to_id) @@ -1343,14 +1343,14 @@ async def test_end_to_end_mcp_approval_flow(span_exporter) -> None: # Patch the create call to return the two mocked responses in sequence with patch.object(client.client.responses, "create", side_effect=[mock_response1, mock_response2]) as mock_create: # First call: get the approval request - response = await client.get_response(messages=[ChatMessage(role="user", text="Trigger approval")]) + response = await client.get_response(messages=[Message(role="user", text="Trigger approval")]) assert response.messages[0].contents[0].type == "function_approval_request" req = response.messages[0].contents[0] assert req.id == "approval-1" # Build a user approval and send it (include required function_call) approval = Content.from_function_approval_response(approved=True, id=req.id, function_call=req.function_call) - approval_message = ChatMessage(role="user", contents=[approval]) + approval_message = Message(role="user", contents=[approval]) _ = await client.get_response(messages=[approval_message]) # After approval is processed, the model is called again to get the final response @@ -1595,7 +1595,7 @@ def test_streaming_annotation_added_with_unknown_type() -> None: async def test_service_response_exception_includes_original_error_details() -> None: """Test that ServiceResponseException messages include original error details in the new format.""" client = OpenAIResponsesClient(model_id="test-model", api_key="test-key") - messages = [ChatMessage(role="user", text="test message")] + messages = [Message(role="user", text="test message")] mock_response = MagicMock() original_error_message = "Request rate limit exceeded" @@ -1620,7 +1620,7 @@ async def test_service_response_exception_includes_original_error_details() -> N async def test_get_response_streaming_with_response_format() -> None: """Test get_response streaming with response_format.""" client = OpenAIResponsesClient(model_id="test-model", api_key="test-key") - messages = [ChatMessage(role="user", text="Test streaming with format")] + messages = [Message(role="user", text="Test streaming with format")] # It will fail due to invalid API key, but exercises the code path with pytest.raises(ServiceResponseException): @@ -2126,7 +2126,7 @@ def test_parse_response_from_openai_image_generation_fallback(): async def test_prepare_options_store_parameter_handling() -> None: client = OpenAIResponsesClient(model_id="test-model", api_key="test-key") - messages = [ChatMessage(role="user", text="Test message")] + messages = [Message(role="user", text="Test message")] test_conversation_id = "test-conversation-123" chat_options = ChatOptions(store=True, conversation_id=test_conversation_id) @@ -2152,7 +2152,7 @@ async def test_prepare_options_store_parameter_handling() -> None: async def test_conversation_id_precedence_kwargs_over_options() -> None: """When both kwargs and options contain conversation_id, kwargs wins.""" client = OpenAIResponsesClient(model_id="test-model", api_key="test-key") - messages = [ChatMessage(role="user", text="Hello")] + messages = [Message(role="user", text="Hello")] # options has a stale response id, kwargs carries the freshest one opts = {"conversation_id": "resp_old_123"} @@ -2259,14 +2259,14 @@ async def test_integration_options( # Prepare test message if option_name.startswith("tools") or option_name.startswith("tool_choice"): # Use weather-related prompt for tool tests - messages = [ChatMessage(role="user", text="What is the weather in Seattle?")] + messages = [Message(role="user", text="What is the weather in Seattle?")] elif option_name.startswith("response_format"): # Use prompt that works well with structured output - messages = [ChatMessage(role="user", text="The weather in Seattle is sunny")] - messages.append(ChatMessage(role="user", text="What is the weather in Seattle?")) + messages = [Message(role="user", text="The weather in Seattle is sunny")] + messages.append(Message(role="user", text="What is the weather in Seattle?")) else: # Generic prompt for simple options - messages = [ChatMessage(role="user", text="Say 'Hello World' briefly.")] + messages = [Message(role="user", text="Say 'Hello World' briefly.")] # Build options dict options: dict[str, Any] = {option_name: option_value} @@ -2372,13 +2372,13 @@ async def test_integration_web_search() -> None: async def test_integration_file_search() -> None: openai_responses_client = OpenAIResponsesClient() - assert isinstance(openai_responses_client, ChatClientProtocol) + assert isinstance(openai_responses_client, SupportsChatGetResponse) file_id, vector_store = await create_vector_store(openai_responses_client) # Test that the client will use the web search tool response = await openai_responses_client.get_response( messages=[ - ChatMessage( + Message( role="user", text="What is the weather today? Do a file search to find the answer.", ) @@ -2403,14 +2403,14 @@ async def test_integration_file_search() -> None: async def test_integration_streaming_file_search() -> None: openai_responses_client = OpenAIResponsesClient() - assert isinstance(openai_responses_client, ChatClientProtocol) + assert isinstance(openai_responses_client, SupportsChatGetResponse) file_id, vector_store = await create_vector_store(openai_responses_client) # Test that the client will use the web search tool response = openai_responses_client.get_response( stream=True, messages=[ - ChatMessage( + Message( role="user", text="What is the weather today? Do a file search to find the answer.", ) diff --git a/python/packages/core/tests/workflow/test_agent_executor.py b/python/packages/core/tests/workflow/test_agent_executor.py index 841ef84b85..d3cef6f1fa 100644 --- a/python/packages/core/tests/workflow/test_agent_executor.py +++ b/python/packages/core/tests/workflow/test_agent_executor.py @@ -9,9 +9,9 @@ AgentResponseUpdate, AgentThread, BaseAgent, - ChatMessage, ChatMessageStore, Content, + Message, ResponseStream, WorkflowRunState, ) @@ -29,7 +29,7 @@ def __init__(self, **kwargs: Any): def run( self, - messages: str | ChatMessage | list[str] | list[ChatMessage] | None = None, + messages: str | Message | list[str] | list[Message] | None = None, *, stream: bool = False, thread: AgentThread | None = None, @@ -46,7 +46,7 @@ async def _stream() -> AsyncIterable[AgentResponseUpdate]: return ResponseStream(_stream(), finalizer=AgentResponse.from_updates) async def _run() -> AgentResponse: - return AgentResponse(messages=[ChatMessage("assistant", [f"Response #{self.call_count}: {self.name}"])]) + return AgentResponse(messages=[Message("assistant", [f"Response #{self.call_count}: {self.name}"])]) return _run() @@ -61,8 +61,8 @@ async def test_agent_executor_checkpoint_stores_and_restores_state() -> None: # Add some initial messages to the thread to verify thread state persistence initial_messages = [ - ChatMessage(role="user", text="Initial message 1"), - ChatMessage(role="assistant", text="Initial response 1"), + Message(role="user", text="Initial message 1"), + Message(role="assistant", text="Initial response 1"), ] await initial_thread.on_new_messages(initial_messages) @@ -165,9 +165,9 @@ async def test_agent_executor_save_and_restore_state_directly() -> None: # Add messages to thread thread_messages = [ - ChatMessage(role="user", text="Message in thread 1"), - ChatMessage(role="assistant", text="Thread response 1"), - ChatMessage(role="user", text="Message in thread 2"), + Message(role="user", text="Message in thread 1"), + Message(role="assistant", text="Thread response 1"), + Message(role="user", text="Message in thread 2"), ] await thread.on_new_messages(thread_messages) @@ -175,8 +175,8 @@ async def test_agent_executor_save_and_restore_state_directly() -> None: # Add messages to executor cache cache_messages = [ - ChatMessage(role="user", text="Cached user message"), - ChatMessage(role="assistant", text="Cached assistant response"), + Message(role="user", text="Cached user message"), + Message(role="assistant", text="Cached assistant response"), ] executor._cache = list(cache_messages) # type: ignore[reportPrivateUsage] diff --git a/python/packages/core/tests/workflow/test_agent_executor_tool_calls.py b/python/packages/core/tests/workflow/test_agent_executor_tool_calls.py index 2d4e3ecf39..0e93196bbd 100644 --- a/python/packages/core/tests/workflow/test_agent_executor_tool_calls.py +++ b/python/packages/core/tests/workflow/test_agent_executor_tool_calls.py @@ -8,18 +8,18 @@ from typing_extensions import Never from agent_framework import ( + Agent, AgentExecutor, AgentExecutorResponse, AgentResponse, AgentResponseUpdate, AgentThread, BaseAgent, - ChatAgent, - ChatMessage, ChatResponse, ChatResponseUpdate, Content, FunctionTool, + Message, ResponseStream, WorkflowBuilder, WorkflowContext, @@ -39,7 +39,7 @@ def __init__(self, **kwargs: Any) -> None: def run( self, - messages: str | ChatMessage | Sequence[str | ChatMessage] | None = None, + messages: str | Message | Sequence[str | Message] | None = None, *, stream: bool = False, thread: AgentThread | None = None, @@ -49,7 +49,7 @@ def run( return ResponseStream(self._run_stream_impl(), finalizer=AgentResponse.from_updates) async def _run() -> AgentResponse: - return AgentResponse(messages=[ChatMessage("assistant", ["done"])]) + return AgentResponse(messages=[Message("assistant", ["done"])]) return _run() @@ -156,7 +156,7 @@ def __init__(self, parallel_request: bool = False) -> None: def _inner_get_response( self, *, - messages: Sequence[ChatMessage], + messages: Sequence[Message], stream: bool, options: Mapping[str, Any], **kwargs: Any, @@ -175,7 +175,7 @@ def _create_response(self) -> ChatResponse: if self._iteration == 0: if self._parallel_request: response = ChatResponse( - messages=ChatMessage( + messages=Message( "assistant", [ Content.from_function_call( @@ -189,7 +189,7 @@ def _create_response(self) -> ChatResponse: ) else: response = ChatResponse( - messages=ChatMessage( + messages=Message( "assistant", [ Content.from_function_call( @@ -199,7 +199,7 @@ def _create_response(self) -> ChatResponse: ) ) else: - response = ChatResponse(messages=ChatMessage("assistant", ["Tool executed successfully."])) + response = ChatResponse(messages=Message("assistant", ["Tool executed successfully."])) self._iteration += 1 return response @@ -243,7 +243,7 @@ async def test_executor(agent_executor_response: AgentExecutorResponse, ctx: Wor async def test_agent_executor_tool_call_with_approval() -> None: """Test that AgentExecutor handles tool calls requiring approval.""" # Arrange - agent = ChatAgent( + agent = Agent( chat_client=MockChatClient(), name="ApprovalAgent", tools=[mock_tool_requiring_approval], @@ -277,7 +277,7 @@ async def test_agent_executor_tool_call_with_approval() -> None: async def test_agent_executor_tool_call_with_approval_streaming() -> None: """Test that AgentExecutor handles tool calls requiring approval in streaming mode.""" # Arrange - agent = ChatAgent( + agent = Agent( chat_client=MockChatClient(), name="ApprovalAgent", tools=[mock_tool_requiring_approval], @@ -314,7 +314,7 @@ async def test_agent_executor_tool_call_with_approval_streaming() -> None: async def test_agent_executor_parallel_tool_call_with_approval() -> None: """Test that AgentExecutor handles parallel tool calls requiring approval.""" # Arrange - agent = ChatAgent( + agent = Agent( chat_client=MockChatClient(parallel_request=True), name="ApprovalAgent", tools=[mock_tool_requiring_approval], @@ -350,7 +350,7 @@ async def test_agent_executor_parallel_tool_call_with_approval() -> None: async def test_agent_executor_parallel_tool_call_with_approval_streaming() -> None: """Test that AgentExecutor handles parallel tool calls requiring approval in streaming mode.""" # Arrange - agent = ChatAgent( + agent = Agent( chat_client=MockChatClient(parallel_request=True), name="ApprovalAgent", tools=[mock_tool_requiring_approval], @@ -409,7 +409,7 @@ def __init__(self, parallel_request: bool = False) -> None: def _inner_get_response( self, *, - messages: Sequence[ChatMessage], + messages: Sequence[Message], stream: bool, options: Mapping[str, Any], **kwargs: Any, @@ -426,7 +426,7 @@ def _create_response(self) -> ChatResponse: if self._iteration == 0: if self._parallel_request: response = ChatResponse( - messages=ChatMessage( + messages=Message( "assistant", [ Content.from_function_call( @@ -440,7 +440,7 @@ def _create_response(self) -> ChatResponse: ) else: response = ChatResponse( - messages=ChatMessage( + messages=Message( "assistant", [ Content.from_function_call( @@ -450,7 +450,7 @@ def _create_response(self) -> ChatResponse: ) ) else: - response = ChatResponse(messages=ChatMessage("assistant", ["Tool executed successfully."])) + response = ChatResponse(messages=Message("assistant", ["Tool executed successfully."])) self._iteration += 1 return response @@ -483,7 +483,7 @@ async def _stream_response(self) -> AsyncIterable[ChatResponseUpdate]: async def test_agent_executor_declaration_only_tool_emits_request_info() -> None: """Test that AgentExecutor emits request_info when agent calls a declaration-only tool.""" - agent = ChatAgent( + agent = Agent( chat_client=DeclarationOnlyMockChatClient(), name="DeclarationOnlyAgent", tools=[declaration_only_tool], @@ -519,7 +519,7 @@ async def test_agent_executor_declaration_only_tool_emits_request_info() -> None async def test_agent_executor_declaration_only_tool_emits_request_info_streaming() -> None: """Test that AgentExecutor emits request_info for declaration-only tools in streaming mode.""" - agent = ChatAgent( + agent = Agent( chat_client=DeclarationOnlyMockChatClient(), name="DeclarationOnlyAgent", tools=[declaration_only_tool], @@ -558,7 +558,7 @@ async def test_agent_executor_declaration_only_tool_emits_request_info_streaming async def test_agent_executor_parallel_declaration_only_tool_emits_request_info() -> None: """Test that AgentExecutor emits request_info for parallel declaration-only tool calls.""" - agent = ChatAgent( + agent = Agent( chat_client=DeclarationOnlyMockChatClient(parallel_request=True), name="DeclarationOnlyAgent", tools=[declaration_only_tool], diff --git a/python/packages/core/tests/workflow/test_agent_run_event_typing.py b/python/packages/core/tests/workflow/test_agent_run_event_typing.py index 410f57f962..ff8ef99893 100644 --- a/python/packages/core/tests/workflow/test_agent_run_event_typing.py +++ b/python/packages/core/tests/workflow/test_agent_run_event_typing.py @@ -2,13 +2,13 @@ """Tests for WorkflowEvent[T] generic type annotations.""" -from agent_framework import AgentResponse, AgentResponseUpdate, ChatMessage +from agent_framework import AgentResponse, AgentResponseUpdate, Message from agent_framework._workflows._events import WorkflowEvent def test_workflow_event_with_agent_response_data_type() -> None: """Verify WorkflowEvent[AgentResponse].data is typed as AgentResponse.""" - response = AgentResponse(messages=[ChatMessage(role="assistant", text="Hello")]) + response = AgentResponse(messages=[Message(role="assistant", text="Hello")]) event: WorkflowEvent[AgentResponse] = WorkflowEvent.emit(executor_id="test", data=response) # This assignment should pass type checking without a cast @@ -29,7 +29,7 @@ def test_workflow_event_with_agent_response_update_data_type() -> None: def test_workflow_event_repr() -> None: """Verify WorkflowEvent.__repr__ uses consistent format.""" - response = AgentResponse(messages=[ChatMessage(role="assistant", text="Hello")]) + response = AgentResponse(messages=[Message(role="assistant", text="Hello")]) event: WorkflowEvent[AgentResponse] = WorkflowEvent.emit(executor_id="test", data=response) repr_str = repr(event) diff --git a/python/packages/core/tests/workflow/test_agent_utils.py b/python/packages/core/tests/workflow/test_agent_utils.py index c26ecda04c..8a8beae5b1 100644 --- a/python/packages/core/tests/workflow/test_agent_utils.py +++ b/python/packages/core/tests/workflow/test_agent_utils.py @@ -3,7 +3,7 @@ from collections.abc import AsyncIterable from typing import Any -from agent_framework import AgentResponse, AgentResponseUpdate, AgentThread, ChatMessage +from agent_framework import AgentResponse, AgentResponseUpdate, AgentThread, Message from agent_framework._workflows._agent_utils import resolve_agent_id @@ -34,7 +34,7 @@ def description(self) -> str | None: def run( self, - messages: str | ChatMessage | list[str] | list[ChatMessage] | None = None, + messages: str | Message | list[str] | list[Message] | None = None, *, stream: bool = False, thread: AgentThread | None = None, diff --git a/python/packages/core/tests/workflow/test_executor.py b/python/packages/core/tests/workflow/test_executor.py index 507b798e96..c089fb30f3 100644 --- a/python/packages/core/tests/workflow/test_executor.py +++ b/python/packages/core/tests/workflow/test_executor.py @@ -6,7 +6,6 @@ from typing_extensions import Never from agent_framework import ( - ChatMessage, Executor, Message, WorkflowBuilder, @@ -531,10 +530,10 @@ async def test_executor_invoked_event_data_not_mutated_by_handler(): """Test that executor_invoked event (type='executor_invoked').data captures original input, not mutated input.""" @executor(id="Mutator") - async def mutator(messages: list[ChatMessage], ctx: WorkflowContext[list[ChatMessage]]) -> None: + async def mutator(messages: list[Message], ctx: WorkflowContext[list[Message]]) -> None: # The handler mutates the input list by appending new messages original_len = len(messages) - messages.append(ChatMessage(role="assistant", text="Added by executor")) + messages.append(Message(role="assistant", text="Added by executor")) await ctx.send_message(messages) # Verify mutation happened assert len(messages) == original_len + 1 @@ -542,7 +541,7 @@ async def mutator(messages: list[ChatMessage], ctx: WorkflowContext[list[ChatMes workflow = WorkflowBuilder(start_executor=mutator).build() # Run with a single user message - input_messages = [ChatMessage(role="user", text="hello")] + input_messages = [Message(role="user", text="hello")] events = await workflow.run(input_messages) # Find the invoked event for the Mutator executor diff --git a/python/packages/core/tests/workflow/test_full_conversation.py b/python/packages/core/tests/workflow/test_full_conversation.py index c29dd61fe5..3eb47803fc 100644 --- a/python/packages/core/tests/workflow/test_full_conversation.py +++ b/python/packages/core/tests/workflow/test_full_conversation.py @@ -13,9 +13,9 @@ AgentResponseUpdate, AgentThread, BaseAgent, - ChatMessage, Content, Executor, + Message, ResponseStream, WorkflowBuilder, WorkflowContext, @@ -34,7 +34,7 @@ def __init__(self, *, reply_text: str, **kwargs: Any) -> None: def run( self, - messages: str | ChatMessage | Sequence[str | ChatMessage] | None = None, + messages: str | Message | Sequence[str | Message] | None = None, *, stream: bool = False, thread: AgentThread | None = None, @@ -48,7 +48,7 @@ async def _stream() -> AsyncIterable[AgentResponseUpdate]: return ResponseStream(_stream(), finalizer=AgentResponse.from_updates) async def _run() -> AgentResponse: - return AgentResponse(messages=[ChatMessage("assistant", [self._reply_text])]) + return AgentResponse(messages=[Message("assistant", [self._reply_text])]) return _run() @@ -96,7 +96,7 @@ async def test_agent_executor_populates_full_conversation_non_streaming() -> Non class _CaptureAgent(BaseAgent): """Streaming-capable agent that records the messages it received.""" - _last_messages: list[ChatMessage] = PrivateAttr(default_factory=list) # type: ignore + _last_messages: list[Message] = PrivateAttr(default_factory=list) # type: ignore def __init__(self, *, reply_text: str, **kwargs: Any) -> None: super().__init__(**kwargs) @@ -104,20 +104,20 @@ def __init__(self, *, reply_text: str, **kwargs: Any) -> None: def run( self, - messages: str | ChatMessage | Sequence[str | ChatMessage] | None = None, + messages: str | Message | Sequence[str | Message] | None = None, *, stream: bool = False, thread: AgentThread | None = None, **kwargs: Any, ) -> Awaitable[AgentResponse] | ResponseStream[AgentResponseUpdate, AgentResponse]: # Normalize and record messages for verification - norm: list[ChatMessage] = [] + norm: list[Message] = [] if messages: for m in messages: # type: ignore[iteration-over-optional] - if isinstance(m, ChatMessage): + if isinstance(m, Message): norm.append(m) elif isinstance(m, str): - norm.append(ChatMessage("user", [m])) + norm.append(Message("user", [m])) self._last_messages = norm if stream: @@ -128,7 +128,7 @@ async def _stream() -> AsyncIterable[AgentResponseUpdate]: return ResponseStream(_stream(), finalizer=AgentResponse.from_updates) async def _run() -> AgentResponse: - return AgentResponse(messages=[ChatMessage("assistant", [self._reply_text])]) + return AgentResponse(messages=[Message("assistant", [self._reply_text])]) return _run() diff --git a/python/packages/core/tests/workflow/test_typing_utils.py b/python/packages/core/tests/workflow/test_typing_utils.py index ab483e05e9..4dc8d8c917 100644 --- a/python/packages/core/tests/workflow/test_typing_utils.py +++ b/python/packages/core/tests/workflow/test_typing_utils.py @@ -378,12 +378,12 @@ def test_type_compatibility_collections() -> None: # List compatibility - key use case @dataclass - class ChatMessage: + class Message: text: str - assert is_type_compatible(list[ChatMessage], list[Union[str, ChatMessage]]) - assert is_type_compatible(list[str], list[Union[str, ChatMessage]]) - assert not is_type_compatible(list[Union[str, ChatMessage]], list[ChatMessage]) + assert is_type_compatible(list[Message], list[Union[str, Message]]) + assert is_type_compatible(list[str], list[Union[str, Message]]) + assert not is_type_compatible(list[Union[str, Message]], list[Message]) # Dict compatibility assert is_type_compatible(dict[str, int], dict[str, Union[int, float]]) diff --git a/python/packages/core/tests/workflow/test_workflow.py b/python/packages/core/tests/workflow/test_workflow.py index 1e98ff08c5..c8923f4774 100644 --- a/python/packages/core/tests/workflow/test_workflow.py +++ b/python/packages/core/tests/workflow/test_workflow.py @@ -15,7 +15,6 @@ AgentResponseUpdate, AgentThread, BaseAgent, - ChatMessage, Content, Executor, FileCheckpointStorage, @@ -833,7 +832,7 @@ def __init__(self, *, reply_text: str, **kwargs: Any) -> None: def run( self, - messages: str | ChatMessage | Sequence[str | ChatMessage] | None = None, + messages: str | Message | Sequence[str | Message] | None = None, *, stream: bool = False, thread: AgentThread | None = None, @@ -849,7 +848,7 @@ async def _stream() -> AsyncIterable[AgentResponseUpdate]: return ResponseStream(_stream(), finalizer=AgentResponse.from_updates) async def _run() -> AgentResponse: - return AgentResponse(messages=[ChatMessage("assistant", [self._reply_text])]) + return AgentResponse(messages=[Message("assistant", [self._reply_text])]) return _run() diff --git a/python/packages/core/tests/workflow/test_workflow_agent.py b/python/packages/core/tests/workflow/test_workflow_agent.py index 2ab919e6b5..2013cd77e2 100644 --- a/python/packages/core/tests/workflow/test_workflow_agent.py +++ b/python/packages/core/tests/workflow/test_workflow_agent.py @@ -12,10 +12,10 @@ AgentResponse, AgentResponseUpdate, AgentThread, - ChatMessage, - ChatMessageStore, Content, Executor, + Message, + MessageStore, ResponseStream, SupportsAgentRun, UsageDetails, @@ -39,14 +39,14 @@ def __init__(self, id: str, response_text: str, streaming: bool = False): @handler async def handle_message( self, - message: list[ChatMessage], - ctx: WorkflowContext[list[ChatMessage], AgentResponseUpdate | AgentResponse], + message: list[Message], + ctx: WorkflowContext[list[Message], AgentResponseUpdate | AgentResponse], ) -> None: input_text = message[0].contents[0].text if message and message[0].contents[0].type == "text" else "no input" response_text = f"{self.response_text}: {input_text}" # Create response message for both streaming and non-streaming cases - response_message = ChatMessage(role="assistant", contents=[Content.from_text(text=response_text)]) + response_message = Message(role="assistant", contents=[Content.from_text(text=response_text)]) if self.streaming: # Emit update event. @@ -70,7 +70,7 @@ def __init__(self, id: str, streaming: bool = False): self.streaming = streaming @handler - async def handle_message(self, _: list[ChatMessage], ctx: WorkflowContext) -> None: + async def handle_message(self, _: list[Message], ctx: WorkflowContext) -> None: # Send a RequestInfoMessage to trigger the request info process await ctx.request_info("Mock request data", str) @@ -79,7 +79,7 @@ async def handle_request_response( self, original_request: str, response: str, - ctx: WorkflowContext[ChatMessage, AgentResponseUpdate | AgentResponse], + ctx: WorkflowContext[Message, AgentResponseUpdate | AgentResponse], ) -> None: # Handle the response and emit completion response content = Content.from_text(text=f"Request completed with response: {response}") @@ -96,7 +96,7 @@ async def handle_request_response( await ctx.yield_output( AgentResponse( messages=[ - ChatMessage( + Message( role="assistant", contents=[content], ) @@ -110,14 +110,14 @@ class ConversationHistoryCapturingExecutor(Executor): def __init__(self, id: str, streaming: bool = False): super().__init__(id=id) - self.received_messages: list[ChatMessage] = [] + self.received_messages: list[Message] = [] self.streaming = streaming @handler async def handle_message( self, - messages: list[ChatMessage], - ctx: WorkflowContext[list[ChatMessage], AgentResponseUpdate | AgentResponse], + messages: list[Message], + ctx: WorkflowContext[list[Message], AgentResponseUpdate | AgentResponse], ) -> None: # Capture all received messages self.received_messages = list(messages) @@ -126,7 +126,7 @@ async def handle_message( message_count = len(messages) response_text = f"Received {message_count} messages" - response_message = ChatMessage(role="assistant", contents=[Content.from_text(text=response_text)]) + response_message = Message(role="assistant", contents=[Content.from_text(text=response_text)]) if self.streaming: # Emit streaming update @@ -162,8 +162,8 @@ async def test_end_to_end_basic_workflow(self): assert len(result.messages) >= 2, f"Expected at least 2 messages, got {len(result.messages)}" # Find messages from each executor - step1_messages: list[ChatMessage] = [] - step2_messages: list[ChatMessage] = [] + step1_messages: list[Message] = [] + step2_messages: list[Message] = [] for message in result.messages: first_content = message.contents[0] @@ -281,7 +281,7 @@ async def test_end_to_end_request_info_handling(self): ), ) - response_message = ChatMessage(role="user", contents=[approval_response]) + response_message = Message(role="user", contents=[approval_response]) # Continue the workflow with the response continuation_result = await agent.run(response_message) @@ -325,7 +325,7 @@ async def handle_bool(self, message: bool, context: WorkflowContext[Any]) -> Non workflow = WorkflowBuilder(start_executor=executor).build() # Try to create an agent with unsupported input types - with pytest.raises(ValueError, match="Workflow's start executor cannot handle list\\[ChatMessage\\]"): + with pytest.raises(ValueError, match="Workflow's start executor cannot handle list\\[Message\\]"): workflow.as_agent() async def test_workflow_as_agent_yield_output_surfaces_as_agent_response(self) -> None: @@ -336,7 +336,7 @@ async def test_workflow_as_agent_yield_output_surfaces_as_agent_response(self) - """ @executor - async def yielding_executor(messages: list[ChatMessage], ctx: WorkflowContext[Never, str]) -> None: + async def yielding_executor(messages: list[Message], ctx: WorkflowContext[Never, str]) -> None: # Extract text from input for demonstration input_text = messages[0].text if messages else "no input" await ctx.yield_output(f"processed: {input_text}") @@ -344,7 +344,7 @@ async def yielding_executor(messages: list[ChatMessage], ctx: WorkflowContext[Ne workflow = WorkflowBuilder(start_executor=yielding_executor).build() # Run directly - should return output event (type='output') in result - direct_result = await workflow.run([ChatMessage(role="user", text="hello")]) + direct_result = await workflow.run([Message(role="user", text="hello")]) direct_outputs = direct_result.get_outputs() assert len(direct_outputs) == 1 assert direct_outputs[0] == "processed: hello" @@ -361,7 +361,7 @@ async def test_workflow_as_agent_yield_output_surfaces_in_run_stream(self) -> No """Test that ctx.yield_output() surfaces as AgentResponseUpdate when streaming.""" @executor - async def yielding_executor(messages: list[ChatMessage], ctx: WorkflowContext[Never, str]) -> None: + async def yielding_executor(messages: list[Message], ctx: WorkflowContext[Never, str]) -> None: await ctx.yield_output("first output") await ctx.yield_output("second output") @@ -381,7 +381,7 @@ async def test_workflow_as_agent_yield_output_with_content_types(self) -> None: """Test that yield_output preserves different content types (Content, Content, etc.).""" @executor - async def content_yielding_executor(messages: list[ChatMessage], ctx: WorkflowContext[Never, Content]) -> None: + async def content_yielding_executor(messages: list[Message], ctx: WorkflowContext[Never, Content]) -> None: # Yield different content types await ctx.yield_output(Content.from_text(text="text content")) await ctx.yield_output(Content.from_data(data=b"binary data", media_type="application/octet-stream")) @@ -406,11 +406,11 @@ async def content_yielding_executor(messages: list[ChatMessage], ctx: WorkflowCo assert result.messages[2].contents[0].uri == "https://example.com/image.png" async def test_workflow_as_agent_yield_output_with_chat_message(self) -> None: - """Test that yield_output with ChatMessage preserves the message structure.""" + """Test that yield_output with Message preserves the message structure.""" @executor - async def chat_message_executor(messages: list[ChatMessage], ctx: WorkflowContext[Never, ChatMessage]) -> None: - msg = ChatMessage( + async def chat_message_executor(messages: list[Message], ctx: WorkflowContext[Never, Message]) -> None: + msg = Message( role="assistant", contents=[Content.from_text(text="response text")], author_name="custom-author", @@ -440,7 +440,7 @@ def __str__(self) -> str: @executor async def raw_yielding_executor( - messages: list[ChatMessage], ctx: WorkflowContext[Never, Content | CustomData | str] + messages: list[Message], ctx: WorkflowContext[Never, Content | CustomData | str] ) -> None: # Yield different types of data await ctx.yield_output("simple string") @@ -469,21 +469,19 @@ async def raw_yielding_executor( assert updates[2].raw_representation.value == 42 async def test_workflow_as_agent_yield_output_with_list_of_chat_messages(self) -> None: - """Test that yield_output with list[ChatMessage] extracts contents from all messages. + """Test that yield_output with list[Message] extracts contents from all messages. Note: Content items are coalesced by _finalize_response, so multiple text contents become a single merged Content in the final response. """ @executor - async def list_yielding_executor( - messages: list[ChatMessage], ctx: WorkflowContext[Never, list[ChatMessage]] - ) -> None: - # Yield a list of ChatMessages (as SequentialBuilder does) + async def list_yielding_executor(messages: list[Message], ctx: WorkflowContext[Never, list[Message]]) -> None: + # Yield a list of Messages (as SequentialBuilder does) msg_list = [ - ChatMessage(role="user", text="first message"), - ChatMessage(role="assistant", text="second message"), - ChatMessage( + Message(role="user", text="first message"), + Message(role="assistant", text="second message"), + Message( role="assistant", contents=[Content.from_text(text="third"), Content.from_text(text="fourth")], ), @@ -526,10 +524,10 @@ async def test_thread_conversation_history_included_in_workflow_run(self) -> Non # Create a thread with existing conversation history history_messages = [ - ChatMessage(role="user", text="Previous user message"), - ChatMessage(role="assistant", text="Previous assistant response"), + Message(role="user", text="Previous user message"), + Message(role="assistant", text="Previous assistant response"), ] - message_store = ChatMessageStore(messages=history_messages) + message_store = MessageStore(messages=history_messages) thread = AgentThread(message_store=message_store) # Run the agent with the thread and a new message @@ -556,11 +554,11 @@ async def test_thread_conversation_history_included_in_workflow_stream(self) -> # Create a thread with existing conversation history history_messages = [ - ChatMessage(role="system", text="You are a helpful assistant"), - ChatMessage(role="user", text="Hello"), - ChatMessage("assistant", ["Hi there!"]), + Message(role="system", text="You are a helpful assistant"), + Message(role="user", text="Hello"), + Message("assistant", ["Hi there!"]), ] - message_store = ChatMessageStore(messages=history_messages) + message_store = MessageStore(messages=history_messages) thread = AgentThread(message_store=message_store) # Stream from the agent with the thread and a new message @@ -629,7 +627,7 @@ def get_new_thread(self, **kwargs: Any) -> AgentThread: def run( self, - messages: str | Content | ChatMessage | Sequence[str | Content | ChatMessage] | None = None, + messages: str | Content | Message | Sequence[str | Content | Message] | None = None, *, stream: bool = False, thread: AgentThread | None = None, @@ -641,7 +639,7 @@ def run( async def _run( self, - messages: str | Content | ChatMessage | Sequence[str | Content | ChatMessage] | None = None, + messages: str | Content | Message | Sequence[str | Content | Message] | None = None, *, stream: bool = False, thread: AgentThread | None = None, @@ -649,12 +647,12 @@ async def _run( ) -> AgentResponse: return AgentResponse( - messages=[ChatMessage("assistant", [self._response_text])], + messages=[Message("assistant", [self._response_text])], ) def _run_stream( self, - messages: str | Content | ChatMessage | Sequence[str | Content | ChatMessage] | None = None, + messages: str | Content | Message | Sequence[str | Content | Message] | None = None, *, thread: AgentThread | None = None, **kwargs: Any, @@ -670,7 +668,7 @@ async def _iter(): return ResponseStream(_iter(), finalizer=AgentResponse.from_updates) @executor - async def start_exec(messages: list[ChatMessage], ctx: WorkflowContext[AgentExecutorRequest, str]) -> None: + async def start_exec(messages: list[Message], ctx: WorkflowContext[AgentExecutorRequest, str]) -> None: await ctx.yield_output("Start output") await ctx.send_message(AgentExecutorRequest(messages=messages, should_respond=True)) @@ -717,7 +715,7 @@ def get_new_thread(self, **kwargs: Any) -> AgentThread: def run( self, - messages: str | Content | ChatMessage | Sequence[str | Content | ChatMessage] | None = None, + messages: str | Content | Message | Sequence[str | Content | Message] | None = None, *, stream: bool = False, thread: AgentThread | None = None, @@ -729,7 +727,7 @@ def run( async def _run( self, - messages: str | Content | ChatMessage | Sequence[str | Content | ChatMessage] | None = None, + messages: str | Content | Message | Sequence[str | Content | Message] | None = None, *, stream: bool = False, thread: AgentThread | None = None, @@ -737,12 +735,12 @@ async def _run( ) -> AgentResponse: return AgentResponse( - messages=[ChatMessage("assistant", [self._response_text])], + messages=[Message("assistant", [self._response_text])], ) def _run_stream( self, - messages: str | Content | ChatMessage | Sequence[str | Content | ChatMessage] | None = None, + messages: str | Content | Message | Sequence[str | Content | Message] | None = None, *, thread: AgentThread | None = None, **kwargs: Any, @@ -758,7 +756,7 @@ async def _iter(): return ResponseStream(_iter(), finalizer=AgentResponse.from_updates) @executor - async def start_exec(messages: list[ChatMessage], ctx: WorkflowContext[AgentExecutorRequest]) -> None: + async def start_exec(messages: list[Message], ctx: WorkflowContext[AgentExecutorRequest]) -> None: await ctx.send_message(AgentExecutorRequest(messages=messages, should_respond=True)) mock_agent = MockAgent("agent", "Unique response text") @@ -810,8 +808,8 @@ class AuthorNameExecutor(Executor): @handler async def handle_message( self, - message: list[ChatMessage], - ctx: WorkflowContext[list[ChatMessage], AgentResponseUpdate], + message: list[Message], + ctx: WorkflowContext[list[Message], AgentResponseUpdate], ) -> None: # Emit update with explicit author_name update = AgentResponseUpdate( diff --git a/python/packages/core/tests/workflow/test_workflow_builder.py b/python/packages/core/tests/workflow/test_workflow_builder.py index 22c8645dae..fd0d74586a 100644 --- a/python/packages/core/tests/workflow/test_workflow_builder.py +++ b/python/packages/core/tests/workflow/test_workflow_builder.py @@ -11,8 +11,8 @@ AgentResponseUpdate, AgentThread, BaseAgent, - ChatMessage, Executor, + Message, WorkflowBuilder, WorkflowContext, WorkflowValidationError, @@ -27,13 +27,13 @@ def run(self, messages=None, *, stream: bool = False, thread: AgentThread | None return self._run_impl(messages) async def _run_impl(self, messages=None) -> AgentResponse: - norm: list[ChatMessage] = [] + norm: list[Message] = [] if messages: for m in messages: # type: ignore[iteration-over-optional] - if isinstance(m, ChatMessage): + if isinstance(m, Message): norm.append(m) elif isinstance(m, str): - norm.append(ChatMessage(role="user", text=m)) + norm.append(Message(role="user", text=m)) return AgentResponse(messages=norm) async def _run_stream_impl(self): # type: ignore[override] diff --git a/python/packages/core/tests/workflow/test_workflow_kwargs.py b/python/packages/core/tests/workflow/test_workflow_kwargs.py index 2e46454601..83e13975db 100644 --- a/python/packages/core/tests/workflow/test_workflow_kwargs.py +++ b/python/packages/core/tests/workflow/test_workflow_kwargs.py @@ -10,8 +10,8 @@ AgentResponseUpdate, AgentThread, BaseAgent, - ChatMessage, Content, + Message, ResponseStream, WorkflowRunState, tool, @@ -52,7 +52,7 @@ def __init__(self, name: str = "test_agent") -> None: def run( self, - messages: str | ChatMessage | Sequence[str | ChatMessage] | None = None, + messages: str | Message | Sequence[str | Message] | None = None, *, stream: bool = False, thread: AgentThread | None = None, @@ -67,7 +67,7 @@ async def _stream() -> AsyncIterable[AgentResponseUpdate]: return ResponseStream(_stream(), finalizer=AgentResponse.from_updates) async def _run() -> AgentResponse: - return AgentResponse(messages=[ChatMessage("assistant", [f"{self.name} response"])]) + return AgentResponse(messages=[Message("assistant", [f"{self.name} response"])]) return _run() @@ -222,7 +222,7 @@ async def test_kwargs_stored_in_state() -> None: class _StateInspector(Executor): @handler - async def inspect(self, msgs: list[ChatMessage], ctx: WorkflowContext[list[ChatMessage]]) -> None: + async def inspect(self, msgs: list[Message], ctx: WorkflowContext[list[Message]]) -> None: nonlocal stored_kwargs stored_kwargs = ctx.get_state(WORKFLOW_RUN_KWARGS_KEY) await ctx.send_message(msgs) @@ -247,7 +247,7 @@ async def test_empty_kwargs_stored_as_empty_dict() -> None: class _StateChecker(Executor): @handler - async def check(self, msgs: list[ChatMessage], ctx: WorkflowContext[list[ChatMessage]]) -> None: + async def check(self, msgs: list[Message], ctx: WorkflowContext[list[Message]]) -> None: nonlocal stored_kwargs stored_kwargs = ctx.get_state(WORKFLOW_RUN_KWARGS_KEY) await ctx.send_message(msgs) @@ -388,11 +388,11 @@ def __init__(self) -> None: super().__init__(max_stall_count=3, max_reset_count=None, max_round_count=2) self.task_ledger = None - async def plan(self, magentic_context: MagenticContext) -> ChatMessage: - return ChatMessage(role="assistant", text="Plan: Test task", author_name="manager") + async def plan(self, magentic_context: MagenticContext) -> Message: + return Message(role="assistant", text="Plan: Test task", author_name="manager") - async def replan(self, magentic_context: MagenticContext) -> ChatMessage: - return ChatMessage(role="assistant", text="Replan: Test task", author_name="manager") + async def replan(self, magentic_context: MagenticContext) -> Message: + return Message(role="assistant", text="Replan: Test task", author_name="manager") async def create_progress_ledger(self, magentic_context: MagenticContext) -> MagenticProgressLedger: # Return completed on first call @@ -404,8 +404,8 @@ async def create_progress_ledger(self, magentic_context: MagenticContext) -> Mag next_speaker=MagenticProgressLedgerItem(answer="agent1", reason="First"), ) - async def prepare_final_answer(self, magentic_context: MagenticContext) -> ChatMessage: - return ChatMessage(role="assistant", text="Final answer", author_name="manager") + async def prepare_final_answer(self, magentic_context: MagenticContext) -> Message: + return Message(role="assistant", text="Final answer", author_name="manager") agent = _KwargsCapturingAgent(name="agent1") manager = _MockManager() @@ -439,11 +439,11 @@ def __init__(self) -> None: super().__init__(max_stall_count=3, max_reset_count=None, max_round_count=1) self.task_ledger = None - async def plan(self, magentic_context: MagenticContext) -> ChatMessage: - return ChatMessage(role="assistant", text="Plan", author_name="manager") + async def plan(self, magentic_context: MagenticContext) -> Message: + return Message(role="assistant", text="Plan", author_name="manager") - async def replan(self, magentic_context: MagenticContext) -> ChatMessage: - return ChatMessage(role="assistant", text="Replan", author_name="manager") + async def replan(self, magentic_context: MagenticContext) -> Message: + return Message(role="assistant", text="Replan", author_name="manager") async def create_progress_ledger(self, magentic_context: MagenticContext) -> MagenticProgressLedger: return MagenticProgressLedger( @@ -454,8 +454,8 @@ async def create_progress_ledger(self, magentic_context: MagenticContext) -> Mag next_speaker=MagenticProgressLedgerItem(answer="agent1", reason="First"), ) - async def prepare_final_answer(self, magentic_context: MagenticContext) -> ChatMessage: - return ChatMessage(role="assistant", text="Final", author_name="manager") + async def prepare_final_answer(self, magentic_context: MagenticContext) -> Message: + return Message(role="assistant", text="Final", author_name="manager") agent = _KwargsCapturingAgent(name="agent1") manager = _MockManager() @@ -660,7 +660,7 @@ class _StateReader(Executor): """Executor that reads kwargs from State for verification.""" @handler - async def read_kwargs(self, msgs: list[ChatMessage], ctx: WorkflowContext[list[ChatMessage]]) -> None: + async def read_kwargs(self, msgs: list[Message], ctx: WorkflowContext[list[Message]]) -> None: kwargs_from_state = ctx.get_state(WORKFLOW_RUN_KWARGS_KEY) captured_kwargs_from_state.append(kwargs_from_state or {}) await ctx.send_message(msgs) diff --git a/python/packages/declarative/agent_framework_declarative/_loader.py b/python/packages/declarative/agent_framework_declarative/_loader.py index 493787350e..b868e7d7bb 100644 --- a/python/packages/declarative/agent_framework_declarative/_loader.py +++ b/python/packages/declarative/agent_framework_declarative/_loader.py @@ -9,14 +9,14 @@ import yaml from agent_framework import ( - ChatAgent, - ChatClientProtocol, + Agent, Content, HostedCodeInterpreterTool, HostedFileSearchTool, HostedMCPSpecificApproval, HostedMCPTool, HostedWebSearchTool, + SupportsChatGetResponse, ToolProtocol, ) from agent_framework import ( @@ -124,10 +124,10 @@ class ProviderLookupError(DeclarativeLoaderError): class AgentFactory: - """Factory for creating ChatAgent instances from declarative YAML definitions. + """Factory for creating Agent instances from declarative YAML definitions. AgentFactory parses YAML agent definitions (PromptAgent kind) and creates - configured ChatAgent instances with the appropriate chat client, tools, + configured Agent instances with the appropriate chat client, tools, and response format. Examples: @@ -174,7 +174,7 @@ class AgentFactory: def __init__( self, *, - chat_client: ChatClientProtocol | None = None, + chat_client: SupportsChatGetResponse | None = None, bindings: Mapping[str, Any] | None = None, connections: Mapping[str, Any] | None = None, client_kwargs: Mapping[str, Any] | None = None, @@ -187,8 +187,8 @@ def __init__( """Create the agent factory. Args: - chat_client: An optional ChatClientProtocol instance to use as a dependency. - This will be passed to the ChatAgent that gets created. + chat_client: An optional SupportsChatGetResponse instance to use as a dependency. + This will be passed to the Agent that gets created. If you need to create multiple agents with different chat clients, do not pass this and instead provide the chat client in the YAML definition. bindings: An optional dictionary of bindings to use when creating agents. @@ -210,9 +210,9 @@ def __init__( Here, "Provider.ApiType" is the lookup key used when both provider and apiType are specified in the model, "Provider" is also allowed. - Package refers to which model needs to be imported, Name is the class name of the ChatClientProtocol - implementation, and model_id_field is the name of the field in the constructor - that accepts the model.id value. + Package refers to which model needs to be imported, Name is the class name of the + SupportsChatGetResponse implementation, and model_id_field is the name of the field in the + constructor that accepts the model.id value. default_provider: The default provider used when model.provider is not specified, default is "AzureAIClient". safe_mode: Whether to run in safe mode, default is True. @@ -269,8 +269,8 @@ def __init__( self.safe_mode = safe_mode load_dotenv(dotenv_path=env_file_path, encoding=env_file_encoding) - def create_agent_from_yaml_path(self, yaml_path: str | Path) -> ChatAgent: - """Create a ChatAgent from a YAML file path. + def create_agent_from_yaml_path(self, yaml_path: str | Path) -> Agent: + """Create a Agent from a YAML file path. This method does the following things: @@ -278,13 +278,13 @@ def create_agent_from_yaml_path(self, yaml_path: str | Path) -> ChatAgent: 2. Validates that the loaded object is a PromptAgent. 3. Creates the appropriate ChatClient based on the model provider and apiType. 4. Parses the tools, options, and response format from the PromptAgent. - 5. Creates and returns a ChatAgent instance with the configured properties. + 5. Creates and returns a Agent instance with the configured properties. Args: yaml_path: Path to the YAML file representation of a PromptAgent. Returns: - The ``ChatAgent`` instance created from the YAML file. + The ``Agent`` instance created from the YAML file. Raises: DeclarativeLoaderError: If the YAML does not represent a PromptAgent. @@ -323,8 +323,8 @@ def create_agent_from_yaml_path(self, yaml_path: str | Path) -> ChatAgent: yaml_str = f.read() return self.create_agent_from_yaml(yaml_str) - def create_agent_from_yaml(self, yaml_str: str) -> ChatAgent: - """Create a ChatAgent from a YAML string. + def create_agent_from_yaml(self, yaml_str: str) -> Agent: + """Create a Agent from a YAML string. This method does the following things: @@ -332,13 +332,13 @@ def create_agent_from_yaml(self, yaml_str: str) -> ChatAgent: 2. Validates that the loaded object is a PromptAgent. 3. Creates the appropriate ChatClient based on the model provider and apiType. 4. Parses the tools, options, and response format from the PromptAgent. - 5. Creates and returns a ChatAgent instance with the configured properties. + 5. Creates and returns a Agent instance with the configured properties. Args: yaml_str: YAML string representation of a PromptAgent. Returns: - The ``ChatAgent`` instance created from the YAML string. + The ``Agent`` instance created from the YAML string. Raises: DeclarativeLoaderError: If the YAML does not represent a PromptAgent. @@ -396,8 +396,8 @@ def create_agent_from_yaml(self, yaml_str: str) -> ChatAgent: """ return self.create_agent_from_dict(yaml.safe_load(yaml_str)) - def create_agent_from_dict(self, agent_def: dict[str, Any]) -> ChatAgent: - """Create a ChatAgent from a dictionary definition. + def create_agent_from_dict(self, agent_def: dict[str, Any]) -> Agent: + """Create a Agent from a dictionary definition. This method does the following things: @@ -405,13 +405,13 @@ def create_agent_from_dict(self, agent_def: dict[str, Any]) -> ChatAgent: 2. Validates that the loaded object is a PromptAgent. 3. Creates the appropriate ChatClient based on the model provider and apiType. 4. Parses the tools, options, and response format from the PromptAgent. - 5. Creates and returns a ChatAgent instance with the configured properties. + 5. Creates and returns a Agent instance with the configured properties. Args: agent_def: Dictionary representation of a PromptAgent. Returns: - The `ChatAgent` instance created from the dictionary. + The `Agent` instance created from the dictionary. Raises: DeclarativeLoaderError: If the dictionary does not represent a PromptAgent. @@ -454,7 +454,7 @@ def create_agent_from_dict(self, agent_def: dict[str, Any]) -> ChatAgent: if output_schema := prompt_agent.outputSchema: chat_options["response_format"] = _create_model_from_json_schema("agent", output_schema.to_json_schema()) # Step 3: Create the agent instance - return ChatAgent( + return Agent( chat_client=client, name=prompt_agent.name, description=prompt_agent.description, @@ -462,8 +462,8 @@ def create_agent_from_dict(self, agent_def: dict[str, Any]) -> ChatAgent: **chat_options, ) - async def create_agent_from_yaml_path_async(self, yaml_path: str | Path) -> ChatAgent: - """Async version: Create a ChatAgent from a YAML file path. + async def create_agent_from_yaml_path_async(self, yaml_path: str | Path) -> Agent: + """Async version: Create a Agent from a YAML file path. Use this method when the provider requires async initialization, such as AzureAI.ProjectProvider which creates agents on the Azure AI Agent Service. @@ -472,7 +472,7 @@ async def create_agent_from_yaml_path_async(self, yaml_path: str | Path) -> Chat yaml_path: Path to the YAML file representation of a PromptAgent. Returns: - The ``ChatAgent`` instance created from the YAML file. + The ``Agent`` instance created from the YAML file. Examples: .. code-block:: python @@ -492,8 +492,8 @@ async def create_agent_from_yaml_path_async(self, yaml_path: str | Path) -> Chat yaml_str = yaml_path.read_text() return await self.create_agent_from_yaml_async(yaml_str) - async def create_agent_from_yaml_async(self, yaml_str: str) -> ChatAgent: - """Async version: Create a ChatAgent from a YAML string. + async def create_agent_from_yaml_async(self, yaml_str: str) -> Agent: + """Async version: Create a Agent from a YAML string. Use this method when the provider requires async initialization, such as AzureAI.ProjectProvider which creates agents on the Azure AI Agent Service. @@ -502,7 +502,7 @@ async def create_agent_from_yaml_async(self, yaml_str: str) -> ChatAgent: yaml_str: YAML string representation of a PromptAgent. Returns: - The ``ChatAgent`` instance created from the YAML string. + The ``Agent`` instance created from the YAML string. Examples: .. code-block:: python @@ -523,8 +523,8 @@ async def create_agent_from_yaml_async(self, yaml_str: str) -> ChatAgent: """ return await self.create_agent_from_dict_async(yaml.safe_load(yaml_str)) - async def create_agent_from_dict_async(self, agent_def: dict[str, Any]) -> ChatAgent: - """Async version: Create a ChatAgent from a dictionary definition. + async def create_agent_from_dict_async(self, agent_def: dict[str, Any]) -> Agent: + """Async version: Create a Agent from a dictionary definition. Use this method when the provider requires async initialization, such as AzureAI.ProjectProvider which creates agents on the Azure AI Agent Service. @@ -533,7 +533,7 @@ async def create_agent_from_dict_async(self, agent_def: dict[str, Any]) -> ChatA agent_def: Dictionary representation of a PromptAgent. Returns: - The ``ChatAgent`` instance created from the dictionary. + The ``Agent`` instance created from the dictionary. Examples: .. code-block:: python @@ -571,7 +571,7 @@ async def create_agent_from_dict_async(self, agent_def: dict[str, Any]) -> ChatA chat_options["tools"] = tools if output_schema := prompt_agent.outputSchema: chat_options["response_format"] = _create_model_from_json_schema("agent", output_schema.to_json_schema()) - return ChatAgent( + return Agent( chat_client=client, name=prompt_agent.name, description=prompt_agent.description, @@ -579,12 +579,12 @@ async def create_agent_from_dict_async(self, agent_def: dict[str, Any]) -> ChatA **chat_options, ) - async def _create_agent_with_provider(self, prompt_agent: PromptAgent, mapping: ProviderTypeMapping) -> ChatAgent: - """Create a ChatAgent using AzureAIProjectAgentProvider. + async def _create_agent_with_provider(self, prompt_agent: PromptAgent, mapping: ProviderTypeMapping) -> Agent: + """Create a Agent using AzureAIProjectAgentProvider. This method handles the special case where we use a provider that creates agents on a remote service (like Azure AI Agent Service) and returns - ChatAgent instances directly. + Agent instances directly. """ # Import the provider class module_name = mapping["package"] @@ -618,9 +618,9 @@ async def _create_agent_with_provider(self, prompt_agent: PromptAgent, mapping: response_format = _create_model_from_json_schema("agent", prompt_agent.outputSchema.to_json_schema()) # Create the agent using the provider - # The provider's create_agent returns a ChatAgent directly + # The provider's create_agent returns a Agent directly return cast( - ChatAgent, + Agent, await provider.create_agent( name=prompt_agent.name, model=prompt_agent.model.id if prompt_agent.model else None, @@ -631,8 +631,8 @@ async def _create_agent_with_provider(self, prompt_agent: PromptAgent, mapping: ), ) - def _get_client(self, prompt_agent: PromptAgent) -> ChatClientProtocol: - """Create the ChatClientProtocol instance based on the PromptAgent model.""" + def _get_client(self, prompt_agent: PromptAgent) -> SupportsChatGetResponse: + """Create the SupportsChatGetResponse instance based on the PromptAgent model.""" if not prompt_agent.model: # if no model is defined, use the supplied chat_client if self.chat_client: diff --git a/python/packages/declarative/agent_framework_declarative/_workflows/_actions_agents.py b/python/packages/declarative/agent_framework_declarative/_workflows/_actions_agents.py index 1a49f9b89d..b7c05b8607 100644 --- a/python/packages/declarative/agent_framework_declarative/_workflows/_actions_agents.py +++ b/python/packages/declarative/agent_framework_declarative/_workflows/_actions_agents.py @@ -14,7 +14,7 @@ from typing import Any, cast from agent_framework import get_logger -from agent_framework._types import AgentResponse, ChatMessage +from agent_framework._types import AgentResponse, Message from ._handlers import ( ActionContext, @@ -162,7 +162,7 @@ def _extract_json_from_response(text: str) -> Any: raise json.JSONDecodeError("No valid JSON found in response", text, 0) -def _build_messages_from_state(ctx: ActionContext) -> list[ChatMessage]: +def _build_messages_from_state(ctx: ActionContext) -> list[Message]: """Build the message list to send to an agent. This collects messages from: @@ -174,9 +174,9 @@ def _build_messages_from_state(ctx: ActionContext) -> list[ChatMessage]: ctx: The action context Returns: - List of ChatMessage objects to send to the agent + List of Message objects to send to the agent """ - messages: list[ChatMessage] = [] + messages: list[Message] = [] # Get conversation history history = ctx.state.get("conversation.messages", []) @@ -287,23 +287,23 @@ async def handle_invoke_azure_agent(ctx: ActionContext) -> AsyncGenerator[Workfl evaluated_input = ctx.state.eval_if_expression(input_messages) if evaluated_input: if isinstance(evaluated_input, str): - messages.append(ChatMessage(role="user", text=evaluated_input)) + messages.append(Message(role="user", text=evaluated_input)) elif isinstance(evaluated_input, list): for msg_item in evaluated_input: # type: ignore if isinstance(msg_item, str): - messages.append(ChatMessage(role="user", text=msg_item)) - elif isinstance(msg_item, ChatMessage): + messages.append(Message(role="user", text=msg_item)) + elif isinstance(msg_item, Message): messages.append(msg_item) elif isinstance(msg_item, dict) and "content" in msg_item: item_dict = cast(dict[str, Any], msg_item) role: str = str(item_dict.get("role", "user")) content: str = str(item_dict.get("content", "")) if role == "user": - messages.append(ChatMessage(role="user", text=content)) + messages.append(Message(role="user", text=content)) elif role == "assistant": - messages.append(ChatMessage(role="assistant", text=content)) + messages.append(Message(role="assistant", text=content)) elif role == "system": - messages.append(ChatMessage(role="system", text=content)) + messages.append(Message(role="system", text=content)) # Evaluate and include input arguments evaluated_args: dict[str, Any] = {} @@ -365,7 +365,7 @@ async def handle_invoke_azure_agent(ctx: ActionContext) -> AsyncGenerator[Workfl # Add to conversation history if text: - ctx.state.add_conversation_message(ChatMessage(role="assistant", text=text)) + ctx.state.add_conversation_message(Message(role="assistant", text=text)) # Store in output variables (.NET style) if output_messages_var: @@ -418,7 +418,7 @@ async def handle_invoke_azure_agent(ctx: ActionContext) -> AsyncGenerator[Workfl # Add to conversation history if text: - ctx.state.add_conversation_message(ChatMessage(role="assistant", text=text)) + ctx.state.add_conversation_message(Message(role="assistant", text=text)) # Store in output variables (.NET style) if output_messages_var: @@ -564,8 +564,8 @@ async def handle_invoke_prompt_agent(ctx: ActionContext) -> AsyncGenerator[Workf # Add input as user message if provided if input_value: if isinstance(input_value, str): - messages.append(ChatMessage(role="user", text=input_value)) - elif isinstance(input_value, ChatMessage): + messages.append(Message(role="user", text=input_value)) + elif isinstance(input_value, Message): messages.append(input_value) logger.debug(f"InvokePromptAgent: calling '{agent_name}' with {len(messages)} messages") @@ -594,7 +594,7 @@ async def handle_invoke_prompt_agent(ctx: ActionContext) -> AsyncGenerator[Workf ctx.state.set_agent_result(text=text, messages=response_messages) if text: - ctx.state.add_conversation_message(ChatMessage(role="assistant", text=text)) + ctx.state.add_conversation_message(Message(role="assistant", text=text)) if output_path: ctx.state.set(output_path, text) @@ -614,7 +614,7 @@ async def handle_invoke_prompt_agent(ctx: ActionContext) -> AsyncGenerator[Workf ctx.state.set_agent_result(text=text, messages=response_messages) if text: - ctx.state.add_conversation_message(ChatMessage(role="assistant", text=text)) + ctx.state.add_conversation_message(Message(role="assistant", text=text)) if output_path: ctx.state.set(output_path, text) diff --git a/python/packages/declarative/agent_framework_declarative/_workflows/_declarative_base.py b/python/packages/declarative/agent_framework_declarative/_workflows/_declarative_base.py index 11b6868ad1..9bb868135b 100644 --- a/python/packages/declarative/agent_framework_declarative/_workflows/_declarative_base.py +++ b/python/packages/declarative/agent_framework_declarative/_workflows/_declarative_base.py @@ -102,7 +102,7 @@ def _make_powerfx_safe(value: Any) -> Any: """Convert a value to a PowerFx-serializable form. PowerFx can only serialize primitive types, dicts, and lists. - Custom objects (like ChatMessage) must be converted to dicts or excluded. + Custom objects (like Message) must be converted to dicts or excluded. Args: value: Any Python value @@ -558,8 +558,8 @@ def _eval_and_replace_message_text(self, inner_expr: str) -> str: # Try "text" key first (simple dict format) if "text" in last_msg: return str(last_msg["text"]) - # Try extracting from "contents" (ChatMessage dict format) - # ChatMessage.text concatenates text from all TextContent items + # Try extracting from "contents" (Message dict format) + # Message.text concatenates text from all TextContent items contents = last_msg.get("contents", []) if isinstance(contents, list): text_parts = [] diff --git a/python/packages/declarative/agent_framework_declarative/_workflows/_executors_agents.py b/python/packages/declarative/agent_framework_declarative/_workflows/_executors_agents.py index d4300a9909..f28d283e60 100644 --- a/python/packages/declarative/agent_framework_declarative/_workflows/_executors_agents.py +++ b/python/packages/declarative/agent_framework_declarative/_workflows/_executors_agents.py @@ -20,8 +20,8 @@ from typing import Any, cast from agent_framework import ( - ChatMessage, Content, + Message, WorkflowContext, handler, response_handler, @@ -170,7 +170,7 @@ def _extract_json_from_response(text: str) -> Any: raise json.JSONDecodeError("No valid JSON found in response", text, 0) -def _validate_conversation_history(messages: list[ChatMessage], agent_name: str) -> None: +def _validate_conversation_history(messages: list[Message], agent_name: str) -> None: """Validate that conversation history has matching tool calls and results. This helps catch issues where tool call messages are stored without their @@ -263,7 +263,7 @@ class AgentResult: success: bool response: str agent_name: str - messages: list[ChatMessage] = field(default_factory=lambda: cast(list[ChatMessage], [])) + messages: list[Message] = field(default_factory=lambda: cast(list[Message], [])) tool_calls: list[Content] = field(default_factory=lambda: cast(list[Content], [])) error: str | None = None @@ -309,7 +309,7 @@ async def on_request(request: AgentExternalInputRequest) -> ExternalInputRespons agent_name: str agent_response: str iteration: int = 0 - messages: list[ChatMessage] = field(default_factory=lambda: cast(list[ChatMessage], [])) + messages: list[Message] = field(default_factory=lambda: cast(list[Message], [])) function_calls: list[Content] = field(default_factory=lambda: cast(list[Content], [])) @@ -340,7 +340,7 @@ class AgentExternalInputResponse: """ user_input: str - messages: list[ChatMessage] = field(default_factory=lambda: cast(list[ChatMessage], [])) + messages: list[Message] = field(default_factory=lambda: cast(list[Message], [])) function_results: dict[str, Content] = field(default_factory=lambda: cast(dict[str, Content], {})) @@ -637,20 +637,20 @@ async def _invoke_agent_and_store_results( Tuple of (accumulated_response, all_messages, tool_calls) """ accumulated_response = "" - all_messages: list[ChatMessage] = [] + all_messages: list[Message] = [] tool_calls: list[Content] = [] # Add user input to conversation history first (via state.append only) if input_text: - user_message = ChatMessage(role="user", text=input_text) + user_message = Message(role="user", text=input_text) state.append(messages_path, user_message) # Get conversation history from state AFTER adding user message # Note: We get a fresh copy to avoid mutation issues - conversation_history: list[ChatMessage] = state.get(messages_path) or [] + conversation_history: list[Message] = state.get(messages_path) or [] # Build messages list for agent (use history if available, otherwise just input) - messages_for_agent: list[ChatMessage] | str = conversation_history if conversation_history else input_text + messages_for_agent: list[Message] | str = conversation_history if conversation_history else input_text # Validate conversation history before invoking agent if isinstance(messages_for_agent, list) and messages_for_agent: @@ -672,7 +672,7 @@ async def _invoke_agent_and_store_results( if not isinstance(result, str): result_messages: Any = getattr(result, "messages", None) if result_messages is not None: - all_messages = list(cast(list[ChatMessage], result_messages)) + all_messages = list(cast(list[Message], result_messages)) result_tool_calls: Any = getattr(result, "tool_calls", None) if result_tool_calls is not None: tool_calls = list(cast(list[Content], result_tool_calls)) @@ -707,7 +707,7 @@ async def _invoke_agent_and_store_results( "Agent '%s': No messages in response, creating simple assistant message", agent_name, ) - assistant_message = ChatMessage(role="assistant", text=accumulated_response) + assistant_message = Message(role="assistant", text=accumulated_response) state.append(messages_path, assistant_message) # Store results in state - support both schema formats: diff --git a/python/packages/declarative/agent_framework_declarative/_workflows/_factory.py b/python/packages/declarative/agent_framework_declarative/_workflows/_factory.py index c3cfff1d21..2633f82f99 100644 --- a/python/packages/declarative/agent_framework_declarative/_workflows/_factory.py +++ b/python/packages/declarative/agent_framework_declarative/_workflows/_factory.py @@ -517,7 +517,7 @@ def register_agent(self, name: str, agent: SupportsAgentRun | AgentExecutor) -> Args: name: The name to register the agent under. Must match the agent name referenced in InvokeAzureAgent actions. - agent: The agent instance (typically a ChatAgent or similar). + agent: The agent instance (typically a Agent or similar). Returns: Self for method chaining. diff --git a/python/packages/declarative/agent_framework_declarative/_workflows/_state.py b/python/packages/declarative/agent_framework_declarative/_workflows/_state.py index 7d1f9e4945..31ad4124da 100644 --- a/python/packages/declarative/agent_framework_declarative/_workflows/_state.py +++ b/python/packages/declarative/agent_framework_declarative/_workflows/_state.py @@ -314,7 +314,7 @@ def add_conversation_message(self, message: Any) -> None: """Add a message to the conversation history. Args: - message: The message to add (typically a ChatMessage or similar) + message: The message to add (typically a Message or similar) """ self._conversation["messages"].append(message) self._conversation["history"].append(message) diff --git a/python/packages/declarative/tests/test_graph_coverage.py b/python/packages/declarative/tests/test_graph_coverage.py index fb88dae87e..cf622f6467 100644 --- a/python/packages/declarative/tests/test_graph_coverage.py +++ b/python/packages/declarative/tests/test_graph_coverage.py @@ -1835,8 +1835,8 @@ async def test_agent_executor_with_external_loop(self, mock_context, mock_state) } executor = InvokeAzureAgentExecutor(action_def, agents={"TestAgent": mock_agent}) - # Mock the internal method to avoid storing ChatMessage objects in state - # (PowerFx cannot serialize ChatMessage) + # Mock the internal method to avoid storing Message objects in state + # (PowerFx cannot serialize Message) with patch.object( executor, "_invoke_agent_and_store_results", diff --git a/python/packages/devui/AGENTS.md b/python/packages/devui/AGENTS.md index c478c11e2d..5213095244 100644 --- a/python/packages/devui/AGENTS.md +++ b/python/packages/devui/AGENTS.md @@ -20,7 +20,7 @@ Interactive developer UI for testing and debugging agents and workflows. ```python from agent_framework.devui import serve -agent = ChatAgent(...) +agent = Agent(...) serve(entities=[agent], port=8080, auto_open=True) ``` diff --git a/python/packages/devui/README.md b/python/packages/devui/README.md index f984c56799..50c2758ca3 100644 --- a/python/packages/devui/README.md +++ b/python/packages/devui/README.md @@ -17,7 +17,7 @@ pip install agent-framework-devui --pre You can also launch it programmatically ```python -from agent_framework import ChatAgent +from agent_framework import Agent from agent_framework.openai import OpenAIChatClient from agent_framework.devui import serve @@ -26,7 +26,7 @@ def get_weather(location: str) -> str: return f"Weather in {location}: 72°F and sunny" # Create your agent -agent = ChatAgent( +agent = Agent( name="WeatherAgent", chat_client=OpenAIChatClient(), tools=[get_weather] @@ -56,7 +56,7 @@ When DevUI starts with no discovered entities, it displays a **sample entity gal ```python # ✅ Correct - DevUI handles cleanup automatically mcp_tool = MCPStreamableHTTPTool(url="http://localhost:8011/mcp", chat_client=chat_client) -agent = ChatAgent(tools=mcp_tool) +agent = Agent(tools=mcp_tool) serve(entities=[agent]) ``` @@ -68,13 +68,13 @@ Register cleanup hooks to properly close credentials and resources on shutdown: ```python from azure.identity.aio import DefaultAzureCredential -from agent_framework import ChatAgent +from agent_framework import Agent from agent_framework.azure import AzureOpenAIChatClient from agent_framework_devui import register_cleanup, serve credential = DefaultAzureCredential() client = AzureOpenAIChatClient() -agent = ChatAgent(name="MyAgent", chat_client=client) +agent = Agent(name="MyAgent", chat_client=client) # Register cleanup hook - credential will be closed on shutdown register_cleanup(agent, credential.close) @@ -92,7 +92,7 @@ For your agents to be discovered by the DevUI, they must be organized in a direc ``` agents/ ├── weather_agent/ -│ ├── __init__.py # Must export: agent = ChatAgent(...) +│ ├── __init__.py # Must export: agent = Agent(...) │ ├── agent.py │ └── .env # Optional: API keys, config vars ├── my_workflow/ diff --git a/python/packages/devui/agent_framework_devui/__init__.py b/python/packages/devui/agent_framework_devui/__init__.py index 50010cd9cd..f703e85a63 100644 --- a/python/packages/devui/agent_framework_devui/__init__.py +++ b/python/packages/devui/agent_framework_devui/__init__.py @@ -41,7 +41,7 @@ def register_cleanup(entity: Any, *hooks: Callable[[], Any]) -> None: Single cleanup hook: >>> from agent_framework.devui import serve, register_cleanup >>> credential = DefaultAzureCredential() - >>> agent = ChatAgent(...) + >>> agent = Agent(...) >>> register_cleanup(agent, credential.close) >>> serve(entities=[agent]) @@ -52,7 +52,7 @@ def register_cleanup(entity: Any, *hooks: Callable[[], Any]) -> None: >>> # In agents/my_agent/agent.py >>> from agent_framework.devui import register_cleanup >>> credential = DefaultAzureCredential() - >>> agent = ChatAgent(...) + >>> agent = Agent(...) >>> register_cleanup(agent, credential.close) >>> # Run: devui ./agents """ diff --git a/python/packages/devui/agent_framework_devui/_conversations.py b/python/packages/devui/agent_framework_devui/_conversations.py index 6b271ddff5..2ea28f6e6a 100644 --- a/python/packages/devui/agent_framework_devui/_conversations.py +++ b/python/packages/devui/agent_framework_devui/_conversations.py @@ -13,11 +13,11 @@ from abc import ABC, abstractmethod from typing import Any, Literal, cast -from agent_framework import AgentThread, ChatMessage +from agent_framework import AgentThread, Message from agent_framework._workflows._checkpoint import InMemoryCheckpointStorage from openai.types.conversations import Conversation, ConversationDeletedResource from openai.types.conversations.conversation_item import ConversationItem -from openai.types.conversations.message import Message +from openai.types.conversations.message import Message as OpenAIMessage from openai.types.conversations.text_content import TextContent from openai.types.responses import ( ResponseFunctionToolCallItem, @@ -305,7 +305,7 @@ async def add_items(self, conversation_id: str, items: list[dict[str, Any]]) -> content = item.get("content", []) text = content[0].get("text", "") if content else "" - chat_msg = ChatMessage(role=role, text=text) # type: ignore[arg-type] + chat_msg = Message(role=role, text=text) # type: ignore[arg-type] chat_messages.append(chat_msg) # Add messages to AgentThread @@ -320,7 +320,7 @@ async def add_items(self, conversation_id: str, items: list[dict[str, Any]]) -> role_str = msg.role if hasattr(msg.role, "value") else str(msg.role) role = cast(MessageRole, role_str) # Safe: Agent Framework roles match OpenAI roles - # Convert ChatMessage contents to OpenAI TextContent format + # Convert Message contents to OpenAI TextContent format message_content = [] for content_item in msg.contents: if content_item.type == "text": @@ -329,7 +329,7 @@ async def add_items(self, conversation_id: str, items: list[dict[str, Any]]) -> message_content.append(TextContent(type="text", text=text_value)) # Create Message object (concrete type from ConversationItem union) - message = Message( + message = OpenAIMessage( id=item_id, type="message", # Required discriminator for union role=role, @@ -372,14 +372,14 @@ async def list_items( if thread.message_store: af_messages = await thread.message_store.list_messages() - # Convert each AgentFramework ChatMessage to appropriate ConversationItem type(s) + # Convert each AgentFramework Message to appropriate ConversationItem type(s) for i, msg in enumerate(af_messages): item_id = f"item_{i}" role_str = msg.role if hasattr(msg.role, "value") else str(msg.role) role = cast(MessageRole, role_str) # Safe: Agent Framework roles match OpenAI roles # Process each content item in the message - # A single ChatMessage may produce multiple ConversationItems + # A single Message may produce multiple ConversationItems # (e.g., a message with both text and a function call) message_contents: list[TextContent | ResponseInputImage | ResponseInputFile] = [] function_calls = [] @@ -464,7 +464,7 @@ async def list_items( # Create ConversationItems based on what we found # If message has text/images/files, create a Message item if message_contents: - message = Message( + message = OpenAIMessage( id=item_id, type="message", role=role, # type: ignore diff --git a/python/packages/devui/agent_framework_devui/_discovery.py b/python/packages/devui/agent_framework_devui/_discovery.py index 8058d31083..83bfde898e 100644 --- a/python/packages/devui/agent_framework_devui/_discovery.py +++ b/python/packages/devui/agent_framework_devui/_discovery.py @@ -541,8 +541,8 @@ def _has_entity_exports(self, file_path: Path) -> bool: """Check if a Python file has entity exports (agent or workflow) using AST parsing. This safely checks for module-level assignments like: - - agent = ChatAgent(...) - - workflow = WorkflowBuilder(start_executor=...)... + - agent = Agent(...) + - workflow = WorkflowBuilder()... Args: file_path: Python file to check diff --git a/python/packages/devui/agent_framework_devui/_executor.py b/python/packages/devui/agent_framework_devui/_executor.py index c70c123983..b55a57cf44 100644 --- a/python/packages/devui/agent_framework_devui/_executor.py +++ b/python/packages/devui/agent_framework_devui/_executor.py @@ -305,7 +305,7 @@ async def _execute_agent( yield AgentStartedEvent() - # Convert input to proper ChatMessage or string + # Convert input to proper Message or string user_message = self._convert_input_to_chat_message(request.input) # Get thread from conversation parameter (OpenAI standard!) @@ -321,7 +321,7 @@ async def _execute_agent( if isinstance(user_message, str): logger.debug(f"Executing agent with text input: {user_message[:100]}...") else: - logger.debug(f"Executing agent with multimodal ChatMessage: {type(user_message)}") + logger.debug(f"Executing agent with multimodal Message: {type(user_message)}") # Workaround for MCP tool stale connection bug (GitHub issue pending) # When HTTP streaming ends, GeneratorExit can close MCP stdio streams @@ -534,7 +534,7 @@ async def _execute_workflow( yield {"type": "error", "message": f"Workflow execution error: {e!s}"} def _convert_input_to_chat_message(self, input_data: Any) -> Any: - """Convert OpenAI Responses API input to Agent Framework ChatMessage or string. + """Convert OpenAI Responses API input to Agent Framework Message or string. Handles various input formats including text, images, files, and multimodal content. Falls back to string extraction for simple cases. @@ -543,11 +543,11 @@ def _convert_input_to_chat_message(self, input_data: Any) -> Any: input_data: OpenAI ResponseInputParam (List[ResponseInputItemParam]) Returns: - ChatMessage for multimodal content, or string for simple text + Message for multimodal content, or string for simple text """ # Import Agent Framework types try: - from agent_framework import ChatMessage, Role + from agent_framework import Message, Role except ImportError: # Fallback to string extraction if Agent Framework not available return self._extract_user_message_fallback(input_data) @@ -558,24 +558,24 @@ def _convert_input_to_chat_message(self, input_data: Any) -> Any: # Handle OpenAI ResponseInputParam (List[ResponseInputItemParam]) if isinstance(input_data, list): - return self._convert_openai_input_to_chat_message(input_data, ChatMessage, Role) + return self._convert_openai_input_to_chat_message(input_data, Message, Role) # Fallback for other formats return self._extract_user_message_fallback(input_data) - def _convert_openai_input_to_chat_message(self, input_items: list[Any], ChatMessage: Any, Role: Any) -> Any: - """Convert OpenAI ResponseInputParam to Agent Framework ChatMessage. + def _convert_openai_input_to_chat_message(self, input_items: list[Any], Message: Any, Role: Any) -> Any: + """Convert OpenAI ResponseInputParam to Agent Framework Message. Processes text, images, files, and other content types from OpenAI format - to Agent Framework ChatMessage with appropriate content objects. + to Agent Framework Message with appropriate content objects. Args: input_items: List of OpenAI ResponseInputItemParam objects (dicts or objects) - ChatMessage: ChatMessage class for creating chat messages + Message: Message class for creating chat messages Role: Role enum for message roles Returns: - ChatMessage with converted content + Message with converted content """ contents: list[Content] = [] @@ -705,9 +705,9 @@ def _convert_openai_input_to_chat_message(self, input_items: list[Any], ChatMess if not contents: contents.append(Content.from_text(text="")) - chat_message = ChatMessage(role="user", contents=contents) + chat_message = Message(role="user", contents=contents) - logger.info(f"Created ChatMessage with {len(contents)} contents:") + logger.info(f"Created Message with {len(contents)} contents:") for idx, content in enumerate(contents): content_type = content.__class__.__name__ if hasattr(content, "media_type"): @@ -772,9 +772,9 @@ async def _parse_workflow_input(self, workflow: Any, raw_input: Any) -> Any: pass # Check for OpenAI multimodal format (list with type: "message") - # This handles ChatMessage inputs with images, files, etc. + # This handles Message inputs with images, files, etc. if self._is_openai_multimodal_format(raw_input): - logger.debug("Detected OpenAI multimodal format, converting to ChatMessage") + logger.debug("Detected OpenAI multimodal format, converting to Message") return self._convert_input_to_chat_message(raw_input) # Handle structured input (dict) diff --git a/python/packages/devui/agent_framework_devui/_mapper.py b/python/packages/devui/agent_framework_devui/_mapper.py index cb2ecacdd0..bcb99634cb 100644 --- a/python/packages/devui/agent_framework_devui/_mapper.py +++ b/python/packages/devui/agent_framework_devui/_mapper.py @@ -14,7 +14,7 @@ from typing import Any, Union from uuid import uuid4 -from agent_framework import ChatMessage, Content +from agent_framework import Content, Message from openai.types.responses import ( Response, ResponseContentPartAddedEvent, @@ -453,7 +453,7 @@ def _serialize_value(self, value: Any) -> Any: Handles: - Primitives (str, int, float, bool, None) - Collections (list, tuple, set, dict) - - SerializationMixin objects (ChatMessage, etc.) - calls to_dict() + - SerializationMixin objects (Message, etc.) - calls to_dict() - Pydantic models - calls model_dump() - Dataclasses - recursively serializes with asdict() - Enums - extracts value @@ -502,7 +502,7 @@ def _serialize_value(self, value: Any) -> Any: if isinstance(value, dict): return {k: self._serialize_value(v) for k, v in value.items()} - # Handle SerializationMixin (like ChatMessage) - call to_dict() + # Handle SerializationMixin (like Message) - call to_dict() if hasattr(value, "to_dict") and callable(getattr(value, "to_dict", None)): try: return value.to_dict() # type: ignore[attr-defined, no-any-return] @@ -536,7 +536,7 @@ def _serialize_value(self, value: Any) -> Any: def _serialize_request_data(self, request_data: Any) -> dict[str, Any]: """Serialize RequestInfoMessage to dict for JSON transmission. - Handles nested SerializationMixin objects (like ChatMessage) within dataclasses. + Handles nested SerializationMixin objects (like Message) within dataclasses. Args: request_data: The RequestInfoMessage instance @@ -554,7 +554,7 @@ def _serialize_request_data(self, request_data: Any) -> dict[str, Any]: return {k: self._serialize_value(v) for k, v in request_data.items()} # Handle dataclasses with nested SerializationMixin objects - # We can't use asdict() directly because it doesn't handle ChatMessage + # We can't use asdict() directly because it doesn't handle Message if is_dataclass(request_data) and not isinstance(request_data, type): try: # Manually serialize each field to handle nested SerializationMixin @@ -892,17 +892,17 @@ async def _convert_workflow_event(self, event: Any, context: dict[str, Any]) -> # Extract text from output data based on type text = None - if isinstance(output_data, ChatMessage): - # Handle ChatMessage (from Magentic and AgentExecutor with output_response=True) + if isinstance(output_data, Message): + # Handle Message (from Magentic and AgentExecutor with output_response=True) text = getattr(output_data, "text", None) if not text: # Fallback to string representation text = str(output_data) elif isinstance(output_data, list): - # Handle list of ChatMessage objects (from Magentic yield_output([final_answer])) + # Handle list of Message objects (from Magentic yield_output([final_answer])) text_parts = [] for item in output_data: - if isinstance(item, ChatMessage): + if isinstance(item, Message): item_text = getattr(item, "text", None) if item_text: text_parts.append(item_text) @@ -1047,7 +1047,7 @@ async def _convert_workflow_event(self, event: Any, context: dict[str, Any]) -> # Create ExecutorActionItem with completed status # executor_completed event (type='executor_completed') uses 'data' field, not 'result' # Serialize the result data to ensure it's JSON-serializable - # (AgentExecutorResponse contains AgentResponse/ChatMessage which are SerializationMixin) + # (AgentExecutorResponse contains AgentResponse/Message which are SerializationMixin) raw_result = getattr(event, "data", None) serialized_result = self._serialize_value(raw_result) if raw_result is not None else None executor_item = ExecutorActionItem( diff --git a/python/packages/devui/agent_framework_devui/_utils.py b/python/packages/devui/agent_framework_devui/_utils.py index b715263075..3ddcbd1588 100644 --- a/python/packages/devui/agent_framework_devui/_utils.py +++ b/python/packages/devui/agent_framework_devui/_utils.py @@ -9,7 +9,7 @@ from types import UnionType from typing import Any, Union, get_args, get_origin, get_type_hints -from agent_framework import ChatMessage +from agent_framework import Message logger = logging.getLogger(__name__) @@ -124,8 +124,8 @@ def extract_executor_message_types(executor: Any) -> list[Any]: def _contains_chat_message(type_hint: Any) -> bool: - """Check whether the provided type hint directly or indirectly references ChatMessage.""" - if type_hint is ChatMessage: + """Check whether the provided type hint directly or indirectly references Message.""" + if type_hint is Message: return True origin = get_origin(type_hint) @@ -141,7 +141,7 @@ def _contains_chat_message(type_hint: Any) -> bool: def select_primary_input_type(message_types: list[Any]) -> Any | None: """Choose the most user-friendly input type for workflow inputs. - Prefers ChatMessage (or containers thereof) and then falls back to primitives. + Prefers Message (or containers thereof) and then falls back to primitives. Args: message_types: List of possible message types @@ -154,7 +154,7 @@ def select_primary_input_type(message_types: list[Any]) -> Any | None: for message_type in message_types: if _contains_chat_message(message_type): - return ChatMessage + return Message preferred = (str, dict) @@ -427,7 +427,7 @@ def generate_input_schema(input_type: type) -> dict[str, Any]: if hasattr(input_type, "model_json_schema"): return input_type.model_json_schema() # type: ignore - # 3. SerializationMixin classes (ChatMessage, etc.) + # 3. SerializationMixin classes (Message, etc.) if is_serialization_mixin(input_type): return generate_schema_from_serialization_mixin(input_type) @@ -521,7 +521,7 @@ def _parse_string_input(input_str: str, target_type: type) -> Any: except Exception as e: logger.debug(f"Failed to parse string as Pydantic model: {e}") - # SerializationMixin (like ChatMessage) + # SerializationMixin (like Message) if is_serialization_mixin(target_type): try: # Try parsing as JSON dict first @@ -531,7 +531,7 @@ def _parse_string_input(input_str: str, target_type: type) -> Any: return target_type.from_dict(data) # type: ignore return target_type(**data) # type: ignore - # For ChatMessage specifically: create from text + # For Message specifically: create from text # Try common field patterns common_fields = ["text", "message", "content"] sig = inspect.signature(target_type) diff --git a/python/packages/devui/agent_framework_devui/ui/assets/index.js b/python/packages/devui/agent_framework_devui/ui/assets/index.js index 276af33633..c75e2f635b 100644 --- a/python/packages/devui/agent_framework_devui/ui/assets/index.js +++ b/python/packages/devui/agent_framework_devui/ui/assets/index.js @@ -453,7 +453,7 @@ Error generating stack: `+i.message+` and value of this only key should be a state object. Example: { "type": "__setState", "state": { "abc123Store": { "foo": "bar" } } } `);const A=_.state[f];if(A==null)return;JSON.stringify(l.getState())!==JSON.stringify(A)&&b(A);return}l.dispatchFromDevtools&&typeof l.dispatch=="function"&&l.dispatch(_)});case"DISPATCH":switch(N.payload.type){case"RESET":return b(j),f===void 0?g?.init(l.getState()):g?.init(au(m.name));case"COMMIT":if(f===void 0){g?.init(l.getState());return}return g?.init(au(m.name));case"ROLLBACK":return bh(N.state,_=>{if(f===void 0){b(_),g?.init(l.getState());return}b(_[f]),g?.init(au(m.name))});case"JUMP_TO_STATE":case"JUMP_TO_ACTION":return bh(N.state,_=>{if(f===void 0){b(_);return}JSON.stringify(l.getState())!==JSON.stringify(_[f])&&b(_[f])});case"IMPORT_STATE":{const{nextLiftedState:_}=N.payload,A=(S=_.computedStates.slice(-1)[0])==null?void 0:S.state;if(!A)return;b(f===void 0?A:A[f]),g?.send(null,_);return}case"PAUSE_RECORDING":return y=!y}return}}),j},m5=f5,bh=(e,n)=>{let r;try{r=JSON.parse(e)}catch(a){console.error("[zustand devtools middleware] Could not parse the received json",a)}r!==void 0&&n(r)};function h5(e,n){let r;try{r=e()}catch{return}return{getItem:l=>{var c;const d=m=>m===null?null:JSON.parse(m,void 0),f=(c=r.getItem(l))!=null?c:null;return f instanceof Promise?f.then(d):d(f)},setItem:(l,c)=>r.setItem(l,JSON.stringify(c,void 0)),removeItem:l=>r.removeItem(l)}}const ep=e=>n=>{try{const r=e(n);return r instanceof Promise?r:{then(a){return ep(a)(r)},catch(a){return this}}}catch(r){return{then(a){return this},catch(a){return ep(a)(r)}}}},p5=(e,n)=>(r,a,l)=>{let c={storage:h5(()=>localStorage),partialize:N=>N,version:0,merge:(N,S)=>({...S,...N}),...n},d=!1;const f=new Set,m=new Set;let h=c.storage;if(!h)return e((...N)=>{console.warn(`[zustand persist middleware] Unable to update item '${c.name}', the given storage is currently unavailable.`),r(...N)},a,l);const g=()=>{const N=c.partialize({...a()});return h.setItem(c.name,{state:N,version:c.version})},x=l.setState;l.setState=(N,S)=>(x(N,S),g());const y=e((...N)=>(r(...N),g()),a,l);l.getInitialState=()=>y;let b;const j=()=>{var N,S;if(!h)return;d=!1,f.forEach(A=>{var E;return A((E=a())!=null?E:y)});const _=((S=c.onRehydrateStorage)==null?void 0:S.call(c,(N=a())!=null?N:y))||void 0;return ep(h.getItem.bind(h))(c.name).then(A=>{if(A)if(typeof A.version=="number"&&A.version!==c.version){if(c.migrate){const E=c.migrate(A.state,A.version);return E instanceof Promise?E.then(M=>[!0,M]):[!0,E]}console.error("State loaded from storage couldn't be migrated since no migrate function was provided")}else return[!1,A.state];return[!1,void 0]}).then(A=>{var E;const[M,T]=A;if(b=c.merge(T,(E=a())!=null?E:y),r(b,!0),M)return g()}).then(()=>{_?.(b,void 0),b=a(),d=!0,m.forEach(A=>A(b))}).catch(A=>{_?.(void 0,A)})};return l.persist={setOptions:N=>{c={...c,...N},N.storage&&(h=N.storage)},clearStorage:()=>{h?.removeItem(c.name)},getOptions:()=>c,rehydrate:()=>j(),hasHydrated:()=>d,onHydrate:N=>(f.add(N),()=>{f.delete(N)}),onFinishHydration:N=>(m.add(N),()=>{m.delete(N)})},c.skipHydration||j(),b||y},g5=p5,le=l5()(m5(g5(e=>({agents:[],workflows:[],entities:[],selectedAgent:void 0,isLoadingEntities:!0,entityError:null,currentConversation:void 0,availableConversations:[],chatItems:[],isStreaming:!1,isSubmitting:!1,loadingConversations:!1,inputValue:"",attachments:[],conversationUsage:{total_tokens:0,message_count:0},pendingApprovals:[],currentSession:void 0,availableSessions:[],sessionCheckpoints:[],loadingSessions:!1,loadingCheckpoints:!1,showDebugPanel:!0,debugPanelMinimized:!1,debugPanelWidth:320,debugEvents:[],isResizing:!1,showToolCalls:!0,streamingEnabled:!0,debugPanelTab:"events",debugTraceSubTab:"spans",contextInspectorViewMode:"tokens",contextInspectorCumulative:!1,showAboutModal:!1,showGallery:!1,showDeployModal:!1,showEntityNotFoundToast:!1,toasts:[],oaiMode:{enabled:!1,model:"gpt-4o-mini"},uiMode:"developer",runtime:"python",serverCapabilities:{instrumentation:!1,openai_proxy:!1,deployment:!1},authRequired:!1,serverVersion:null,isDeploying:!1,deploymentLogs:[],lastDeployment:null,azureDeploymentEnabled:!1,setAgents:n=>e({agents:n}),setWorkflows:n=>e({workflows:n}),setEntities:n=>e({entities:n}),setSelectedAgent:n=>e({selectedAgent:n}),addAgent:n=>e(r=>({agents:[...r.agents,n]})),addWorkflow:n=>e(r=>({workflows:[...r.workflows,n]})),updateAgent:n=>e(r=>({agents:r.agents.map(a=>a.id===n.id?n:a),selectedAgent:r.selectedAgent?.id===n.id&&r.selectedAgent.type==="agent"?n:r.selectedAgent})),updateWorkflow:n=>e(r=>({workflows:r.workflows.map(a=>a.id===n.id?n:a),selectedAgent:r.selectedAgent?.id===n.id&&r.selectedAgent.type==="workflow"?n:r.selectedAgent})),removeEntity:n=>e(r=>({agents:r.agents.filter(a=>a.id!==n),workflows:r.workflows.filter(a=>a.id!==n),selectedAgent:r.selectedAgent?.id===n?void 0:r.selectedAgent})),setEntityError:n=>e({entityError:n}),setIsLoadingEntities:n=>e({isLoadingEntities:n}),setCurrentConversation:n=>e({currentConversation:n}),setAvailableConversations:n=>e({availableConversations:n}),setChatItems:n=>e({chatItems:n}),setIsStreaming:n=>e({isStreaming:n}),setIsSubmitting:n=>e({isSubmitting:n}),setLoadingConversations:n=>e({loadingConversations:n}),setInputValue:n=>e({inputValue:n}),setAttachments:n=>e({attachments:n}),updateConversationUsage:n=>e(r=>({conversationUsage:{total_tokens:r.conversationUsage.total_tokens+n,message_count:r.conversationUsage.message_count+1}})),setPendingApprovals:n=>e({pendingApprovals:n}),setCurrentSession:n=>e({currentSession:n}),setAvailableSessions:n=>e({availableSessions:n}),setSessionCheckpoints:n=>e({sessionCheckpoints:n}),setLoadingSessions:n=>e({loadingSessions:n}),setLoadingCheckpoints:n=>e({loadingCheckpoints:n}),addSession:n=>e(r=>({availableSessions:[n,...r.availableSessions]})),removeSession:n=>e(r=>({availableSessions:r.availableSessions.filter(a=>a.conversation_id!==n),currentSession:r.currentSession?.conversation_id===n?void 0:r.currentSession,sessionCheckpoints:r.currentSession?.conversation_id===n?[]:r.sessionCheckpoints})),setShowDebugPanel:n=>e({showDebugPanel:n}),setDebugPanelMinimized:n=>e({debugPanelMinimized:n}),setDebugPanelWidth:n=>e({debugPanelWidth:n}),setShowToolCalls:n=>e({showToolCalls:n}),setStreamingEnabled:n=>e({streamingEnabled:n}),addDebugEvent:n=>e(r=>{const a=Math.floor(Date.now()/1e3),c=(r.debugEvents.length>0?r.debugEvents[r.debugEvents.length-1]:null)?._uiTimestamp??0,d=Math.max(a,c+1);return{debugEvents:[...r.debugEvents,{...n,_uiTimestamp:"created_at"in n&&n.created_at?n.created_at:d}]}}),clearDebugEvents:()=>e({debugEvents:[]}),setIsResizing:n=>e({isResizing:n}),setDebugPanelTab:n=>e({debugPanelTab:n}),setDebugTraceSubTab:n=>e({debugTraceSubTab:n}),setContextInspectorViewMode:n=>e({contextInspectorViewMode:n}),setContextInspectorCumulative:n=>e({contextInspectorCumulative:n}),setShowAboutModal:n=>e({showAboutModal:n}),setShowGallery:n=>e({showGallery:n}),setShowDeployModal:n=>e({showDeployModal:n}),setShowEntityNotFoundToast:n=>e({showEntityNotFoundToast:n}),addToast:n=>e(r=>({toasts:[...r.toasts,{id:`toast-${Date.now()}-${Math.random().toString(36).substr(2,9)}`,type:n.type||"info",duration:n.duration||4e3,...n}]})),removeToast:n=>e(r=>({toasts:r.toasts.filter(a=>a.id!==n)})),setOAIMode:n=>e(r=>n.enabled&&!r.oaiMode.enabled?(Object.keys(localStorage).forEach(a=>{a.startsWith("devui_convs_")&&localStorage.removeItem(a)}),{oaiMode:n,currentConversation:void 0,availableConversations:[],chatItems:[],inputValue:"",attachments:[],conversationUsage:{total_tokens:0,message_count:0},isStreaming:!1,isSubmitting:!1,pendingApprovals:[],debugEvents:[]}):!n.enabled&&r.oaiMode.enabled?(Object.keys(localStorage).forEach(a=>{a.startsWith("devui_convs_")&&localStorage.removeItem(a)}),{oaiMode:n,currentConversation:void 0,availableConversations:[],chatItems:[],inputValue:"",attachments:[],conversationUsage:{total_tokens:0,message_count:0},isStreaming:!1,isSubmitting:!1,pendingApprovals:[],debugEvents:[]}):{oaiMode:n}),toggleOAIMode:()=>e(n=>{const r=!n.oaiMode.enabled;return{oaiMode:{...n.oaiMode,enabled:r},currentConversation:void 0,availableConversations:[],chatItems:[],inputValue:"",attachments:[],conversationUsage:{total_tokens:0,message_count:0},isStreaming:!1,isSubmitting:!1,pendingApprovals:[],debugEvents:[]}}),setServerMeta:n=>e({uiMode:n.uiMode,runtime:n.runtime,serverCapabilities:n.capabilities,authRequired:n.authRequired,serverVersion:n.version||null}),startDeployment:()=>e({isDeploying:!0,deploymentLogs:[],lastDeployment:null}),addDeploymentLog:n=>e(r=>({deploymentLogs:[...r.deploymentLogs,n]})),setDeploymentResult:n=>e({isDeploying:!1,lastDeployment:n}),stopDeployment:()=>e({isDeploying:!1}),clearDeploymentState:()=>e({isDeploying:!1,deploymentLogs:[],lastDeployment:null}),setAzureDeploymentEnabled:n=>e({azureDeploymentEnabled:n}),selectEntity:n=>{e({selectedAgent:n,currentConversation:void 0,availableConversations:[],chatItems:[],inputValue:"",attachments:[],conversationUsage:{total_tokens:0,message_count:0},isStreaming:!1,isSubmitting:!1,pendingApprovals:[],currentSession:void 0,availableSessions:[],sessionCheckpoints:[],debugEvents:[]});const r=new URL(window.location.href);r.searchParams.set("entity_id",n.id),window.history.pushState({},"",r)}}),{name:"devui-storage",partialize:e=>({showDebugPanel:e.showDebugPanel,debugPanelMinimized:e.debugPanelMinimized,debugPanelWidth:e.debugPanelWidth,showToolCalls:e.showToolCalls,streamingEnabled:e.streamingEnabled,oaiMode:e.oaiMode,azureDeploymentEnabled:e.azureDeploymentEnabled,debugPanelTab:e.debugPanelTab,debugTraceSubTab:e.debugTraceSubTab,contextInspectorViewMode:e.contextInspectorViewMode,contextInspectorCumulative:e.contextInspectorCumulative})}),{name:"DevUI Store"})),wu=Object.freeze(Object.defineProperty({__proto__:null,useDevUIStore:le},Symbol.toStringTag,{value:"Module"}));function ab({agents:e,workflows:n,entities:r,selectedItem:a,onSelect:l,onBrowseGallery:c,isLoading:d=!1,onSettingsClick:f}){const{oaiMode:m,serverVersion:h}=le();return o.jsxs("header",{className:"flex h-14 items-center gap-4 border-b px-4",children:[o.jsxs("div",{className:"flex items-center gap-2 font-semibold",children:[o.jsxs("svg",{width:"24",height:"24",viewBox:"0 0 805 805",fill:"none",xmlns:"http://www.w3.org/2000/svg",className:"flex-shrink-0",children:[o.jsx("path",{d:"M402.488 119.713C439.197 119.713 468.955 149.472 468.955 186.18C468.955 192.086 471.708 197.849 476.915 200.635L546.702 237.977C555.862 242.879 566.95 240.96 576.092 236.023C585.476 230.955 596.218 228.078 607.632 228.078C644.341 228.078 674.098 257.836 674.099 294.545C674.099 316.95 663.013 336.765 646.028 348.806C637.861 354.595 631.412 363.24 631.412 373.251V430.818C631.412 440.83 637.861 449.475 646.028 455.264C663.013 467.305 674.099 487.121 674.099 509.526C674.099 546.235 644.341 575.994 607.632 575.994C598.598 575.994 589.985 574.191 582.133 570.926C573.644 567.397 563.91 566.393 555.804 570.731L469.581 616.867C469.193 617.074 468.955 617.479 468.955 617.919C468.955 654.628 439.197 684.386 402.488 684.386C365.779 684.386 336.021 654.628 336.021 617.919C336.021 616.802 335.423 615.765 334.439 615.238L249.895 570C241.61 565.567 231.646 566.713 223.034 570.472C214.898 574.024 205.914 575.994 196.47 575.994C159.761 575.994 130.002 546.235 130.002 509.526C130.002 486.66 141.549 466.49 159.13 454.531C167.604 448.766 174.349 439.975 174.349 429.726V372.538C174.349 362.289 167.604 353.498 159.13 347.734C141.549 335.774 130.002 315.604 130.002 292.738C130.002 256.029 159.761 226.271 196.47 226.271C208.223 226.271 219.263 229.322 228.843 234.674C238.065 239.827 249.351 241.894 258.666 236.91L328.655 199.459C333.448 196.895 336.021 191.616 336.021 186.18C336.021 149.471 365.779 119.713 402.488 119.713ZM475.716 394.444C471.337 396.787 468.955 401.586 468.955 406.552C468.955 429.68 457.142 450.048 439.221 461.954C430.571 467.7 423.653 476.574 423.653 486.959V537.511C423.653 547.896 430.746 556.851 439.379 562.622C449 569.053 461.434 572.052 471.637 566.592L527.264 536.826C536.887 531.677 541.164 520.44 541.164 509.526C541.164 485.968 553.42 465.272 571.904 453.468C580.846 447.757 588.054 438.749 588.054 428.139V371.427C588.054 363.494 582.671 356.676 575.716 352.862C569.342 349.366 561.663 348.454 555.253 351.884L475.716 394.444ZM247.992 349.841C241.997 346.633 234.806 347.465 228.873 350.785C222.524 354.337 217.706 360.639 217.706 367.915V429.162C217.706 439.537 224.611 448.404 233.248 454.152C251.144 466.062 262.937 486.417 262.937 509.526C262.937 519.654 267.026 529.991 275.955 534.769L334.852 566.284C344.582 571.49 356.362 568.81 365.528 562.667C373.735 557.166 380.296 548.643 380.296 538.764V486.305C380.296 476.067 373.564 467.282 365.103 461.516C347.548 449.552 336.021 429.398 336.021 406.552C336.021 400.967 333.389 395.536 328.465 392.902L247.992 349.841ZM270.019 280.008C265.421 282.469 262.936 287.522 262.937 292.738C262.937 293.308 262.929 293.876 262.915 294.443C262.615 306.354 266.961 318.871 277.466 324.492L334.017 354.751C344.13 360.163 356.442 357.269 366.027 350.969C376.495 344.088 389.024 340.085 402.488 340.085C416.203 340.085 428.947 344.239 439.532 351.357C449.163 357.834 461.63 360.861 471.864 355.385L526.625 326.083C537.106 320.474 541.458 307.999 541.182 296.115C541.17 295.593 541.164 295.069 541.164 294.545C541.164 288.551 538.376 282.696 533.091 279.868L463.562 242.664C454.384 237.753 443.274 239.688 434.123 244.65C424.716 249.75 413.941 252.647 402.488 252.647C390.83 252.647 379.873 249.646 370.348 244.373C361.148 239.281 349.917 237.256 340.646 242.217L270.019 280.008Z",fill:"url(#paint0_linear_510_1294)"}),o.jsx("defs",{children:o.jsxs("linearGradient",{id:"paint0_linear_510_1294",x1:"255.628",y1:"-34.3245",x2:"618.483",y2:"632.032",gradientUnits:"userSpaceOnUse",children:[o.jsx("stop",{stopColor:"#D59FFF"}),o.jsx("stop",{offset:"1",stopColor:"#8562C5"})]})})]}),"Dev UI",h&&o.jsxs("span",{className:"text-xs text-muted-foreground ml-1",children:["v",h]}),m.enabled&&o.jsxs(ut,{variant:"secondary",className:"gap-1 ml-2",children:[o.jsx(og,{className:"h-3 w-3"}),"OpenAI: ",m.model]})]}),!m.enabled&&o.jsx(YM,{agents:e,workflows:n,entities:r,selectedItem:a,onSelect:l,onBrowseGallery:c,isLoading:d}),o.jsx("div",{className:"flex-1"}),o.jsxs("div",{className:"flex items-center gap-2 ml-auto",children:[o.jsx(s5,{}),o.jsx(Le,{variant:"ghost",size:"sm",onClick:g=>{g.stopPropagation(),f?.()},children:o.jsx(Jh,{className:"h-4 w-4"})})]})]})}function tp(e,[n,r]){return Math.min(r,Math.max(n,e))}function x5(e,n){return w.useReducer((r,a)=>n[r][a]??r,e)}var ig="ScrollArea",[dN,W7]=Kn(ig),[y5,$n]=dN(ig),fN=w.forwardRef((e,n)=>{const{__scopeScrollArea:r,type:a="hover",dir:l,scrollHideDelay:c=600,...d}=e,[f,m]=w.useState(null),[h,g]=w.useState(null),[x,y]=w.useState(null),[b,j]=w.useState(null),[N,S]=w.useState(null),[_,A]=w.useState(0),[E,M]=w.useState(0),[T,D]=w.useState(!1),[z,H]=w.useState(!1),q=rt(n,W=>m(W)),X=jl(l);return o.jsx(y5,{scope:r,type:a,dir:X,scrollHideDelay:c,scrollArea:f,viewport:h,onViewportChange:g,content:x,onContentChange:y,scrollbarX:b,onScrollbarXChange:j,scrollbarXEnabled:T,onScrollbarXEnabledChange:D,scrollbarY:N,onScrollbarYChange:S,scrollbarYEnabled:z,onScrollbarYEnabledChange:H,onCornerWidthChange:A,onCornerHeightChange:M,children:o.jsx(Ye.div,{dir:X,...d,ref:q,style:{position:"relative","--radix-scroll-area-corner-width":_+"px","--radix-scroll-area-corner-height":E+"px",...e.style}})})});fN.displayName=ig;var mN="ScrollAreaViewport",hN=w.forwardRef((e,n)=>{const{__scopeScrollArea:r,children:a,nonce:l,...c}=e,d=$n(mN,r),f=w.useRef(null),m=rt(n,f,d.onViewportChange);return o.jsxs(o.Fragment,{children:[o.jsx("style",{dangerouslySetInnerHTML:{__html:"[data-radix-scroll-area-viewport]{scrollbar-width:none;-ms-overflow-style:none;-webkit-overflow-scrolling:touch;}[data-radix-scroll-area-viewport]::-webkit-scrollbar{display:none}"},nonce:l}),o.jsx(Ye.div,{"data-radix-scroll-area-viewport":"",...c,ref:m,style:{overflowX:d.scrollbarXEnabled?"scroll":"hidden",overflowY:d.scrollbarYEnabled?"scroll":"hidden",...e.style},children:o.jsx("div",{ref:d.onContentChange,style:{minWidth:"100%",display:"table"},children:a})})]})});hN.displayName=mN;var xs="ScrollAreaScrollbar",lg=w.forwardRef((e,n)=>{const{forceMount:r,...a}=e,l=$n(xs,e.__scopeScrollArea),{onScrollbarXEnabledChange:c,onScrollbarYEnabledChange:d}=l,f=e.orientation==="horizontal";return w.useEffect(()=>(f?c(!0):d(!0),()=>{f?c(!1):d(!1)}),[f,c,d]),l.type==="hover"?o.jsx(v5,{...a,ref:n,forceMount:r}):l.type==="scroll"?o.jsx(b5,{...a,ref:n,forceMount:r}):l.type==="auto"?o.jsx(pN,{...a,ref:n,forceMount:r}):l.type==="always"?o.jsx(cg,{...a,ref:n}):null});lg.displayName=xs;var v5=w.forwardRef((e,n)=>{const{forceMount:r,...a}=e,l=$n(xs,e.__scopeScrollArea),[c,d]=w.useState(!1);return w.useEffect(()=>{const f=l.scrollArea;let m=0;if(f){const h=()=>{window.clearTimeout(m),d(!0)},g=()=>{m=window.setTimeout(()=>d(!1),l.scrollHideDelay)};return f.addEventListener("pointerenter",h),f.addEventListener("pointerleave",g),()=>{window.clearTimeout(m),f.removeEventListener("pointerenter",h),f.removeEventListener("pointerleave",g)}}},[l.scrollArea,l.scrollHideDelay]),o.jsx(Cn,{present:r||c,children:o.jsx(pN,{"data-state":c?"visible":"hidden",...a,ref:n})})}),b5=w.forwardRef((e,n)=>{const{forceMount:r,...a}=e,l=$n(xs,e.__scopeScrollArea),c=e.orientation==="horizontal",d=Sd(()=>m("SCROLL_END"),100),[f,m]=x5("hidden",{hidden:{SCROLL:"scrolling"},scrolling:{SCROLL_END:"idle",POINTER_ENTER:"interacting"},interacting:{SCROLL:"interacting",POINTER_LEAVE:"idle"},idle:{HIDE:"hidden",SCROLL:"scrolling",POINTER_ENTER:"interacting"}});return w.useEffect(()=>{if(f==="idle"){const h=window.setTimeout(()=>m("HIDE"),l.scrollHideDelay);return()=>window.clearTimeout(h)}},[f,l.scrollHideDelay,m]),w.useEffect(()=>{const h=l.viewport,g=c?"scrollLeft":"scrollTop";if(h){let x=h[g];const y=()=>{const b=h[g];x!==b&&(m("SCROLL"),d()),x=b};return h.addEventListener("scroll",y),()=>h.removeEventListener("scroll",y)}},[l.viewport,c,m,d]),o.jsx(Cn,{present:r||f!=="hidden",children:o.jsx(cg,{"data-state":f==="hidden"?"hidden":"visible",...a,ref:n,onPointerEnter:ke(e.onPointerEnter,()=>m("POINTER_ENTER")),onPointerLeave:ke(e.onPointerLeave,()=>m("POINTER_LEAVE"))})})}),pN=w.forwardRef((e,n)=>{const r=$n(xs,e.__scopeScrollArea),{forceMount:a,...l}=e,[c,d]=w.useState(!1),f=e.orientation==="horizontal",m=Sd(()=>{if(r.viewport){const h=r.viewport.offsetWidth{const{orientation:r="vertical",...a}=e,l=$n(xs,e.__scopeScrollArea),c=w.useRef(null),d=w.useRef(0),[f,m]=w.useState({content:0,viewport:0,scrollbar:{size:0,paddingStart:0,paddingEnd:0}}),h=bN(f.viewport,f.content),g={...a,sizes:f,onSizesChange:m,hasThumb:h>0&&h<1,onThumbChange:y=>c.current=y,onThumbPointerUp:()=>d.current=0,onThumbPointerDown:y=>d.current=y};function x(y,b){return E5(y,d.current,f,b)}return r==="horizontal"?o.jsx(w5,{...g,ref:n,onThumbPositionChange:()=>{if(l.viewport&&c.current){const y=l.viewport.scrollLeft,b=ib(y,f,l.dir);c.current.style.transform=`translate3d(${b}px, 0, 0)`}},onWheelScroll:y=>{l.viewport&&(l.viewport.scrollLeft=y)},onDragScroll:y=>{l.viewport&&(l.viewport.scrollLeft=x(y,l.dir))}}):r==="vertical"?o.jsx(N5,{...g,ref:n,onThumbPositionChange:()=>{if(l.viewport&&c.current){const y=l.viewport.scrollTop,b=ib(y,f);c.current.style.transform=`translate3d(0, ${b}px, 0)`}},onWheelScroll:y=>{l.viewport&&(l.viewport.scrollTop=y)},onDragScroll:y=>{l.viewport&&(l.viewport.scrollTop=x(y))}}):null}),w5=w.forwardRef((e,n)=>{const{sizes:r,onSizesChange:a,...l}=e,c=$n(xs,e.__scopeScrollArea),[d,f]=w.useState(),m=w.useRef(null),h=rt(n,m,c.onScrollbarXChange);return w.useEffect(()=>{m.current&&f(getComputedStyle(m.current))},[m]),o.jsx(xN,{"data-orientation":"horizontal",...l,ref:h,sizes:r,style:{bottom:0,left:c.dir==="rtl"?"var(--radix-scroll-area-corner-width)":0,right:c.dir==="ltr"?"var(--radix-scroll-area-corner-width)":0,"--radix-scroll-area-thumb-width":jd(r)+"px",...e.style},onThumbPointerDown:g=>e.onThumbPointerDown(g.x),onDragScroll:g=>e.onDragScroll(g.x),onWheelScroll:(g,x)=>{if(c.viewport){const y=c.viewport.scrollLeft+g.deltaX;e.onWheelScroll(y),NN(y,x)&&g.preventDefault()}},onResize:()=>{m.current&&c.viewport&&d&&a({content:c.viewport.scrollWidth,viewport:c.viewport.offsetWidth,scrollbar:{size:m.current.clientWidth,paddingStart:Fu(d.paddingLeft),paddingEnd:Fu(d.paddingRight)}})}})}),N5=w.forwardRef((e,n)=>{const{sizes:r,onSizesChange:a,...l}=e,c=$n(xs,e.__scopeScrollArea),[d,f]=w.useState(),m=w.useRef(null),h=rt(n,m,c.onScrollbarYChange);return w.useEffect(()=>{m.current&&f(getComputedStyle(m.current))},[m]),o.jsx(xN,{"data-orientation":"vertical",...l,ref:h,sizes:r,style:{top:0,right:c.dir==="ltr"?0:void 0,left:c.dir==="rtl"?0:void 0,bottom:"var(--radix-scroll-area-corner-height)","--radix-scroll-area-thumb-height":jd(r)+"px",...e.style},onThumbPointerDown:g=>e.onThumbPointerDown(g.y),onDragScroll:g=>e.onDragScroll(g.y),onWheelScroll:(g,x)=>{if(c.viewport){const y=c.viewport.scrollTop+g.deltaY;e.onWheelScroll(y),NN(y,x)&&g.preventDefault()}},onResize:()=>{m.current&&c.viewport&&d&&a({content:c.viewport.scrollHeight,viewport:c.viewport.offsetHeight,scrollbar:{size:m.current.clientHeight,paddingStart:Fu(d.paddingTop),paddingEnd:Fu(d.paddingBottom)}})}})}),[j5,gN]=dN(xs),xN=w.forwardRef((e,n)=>{const{__scopeScrollArea:r,sizes:a,hasThumb:l,onThumbChange:c,onThumbPointerUp:d,onThumbPointerDown:f,onThumbPositionChange:m,onDragScroll:h,onWheelScroll:g,onResize:x,...y}=e,b=$n(xs,r),[j,N]=w.useState(null),S=rt(n,q=>N(q)),_=w.useRef(null),A=w.useRef(""),E=b.viewport,M=a.content-a.viewport,T=Zt(g),D=Zt(m),z=Sd(x,10);function H(q){if(_.current){const X=q.clientX-_.current.left,W=q.clientY-_.current.top;h({x:X,y:W})}}return w.useEffect(()=>{const q=X=>{const W=X.target;j?.contains(W)&&T(X,M)};return document.addEventListener("wheel",q,{passive:!1}),()=>document.removeEventListener("wheel",q,{passive:!1})},[E,j,M,T]),w.useEffect(D,[a,D]),Ca(j,z),Ca(b.content,z),o.jsx(j5,{scope:r,scrollbar:j,hasThumb:l,onThumbChange:Zt(c),onThumbPointerUp:Zt(d),onThumbPositionChange:D,onThumbPointerDown:Zt(f),children:o.jsx(Ye.div,{...y,ref:S,style:{position:"absolute",...y.style},onPointerDown:ke(e.onPointerDown,q=>{q.button===0&&(q.target.setPointerCapture(q.pointerId),_.current=j.getBoundingClientRect(),A.current=document.body.style.webkitUserSelect,document.body.style.webkitUserSelect="none",b.viewport&&(b.viewport.style.scrollBehavior="auto"),H(q))}),onPointerMove:ke(e.onPointerMove,H),onPointerUp:ke(e.onPointerUp,q=>{const X=q.target;X.hasPointerCapture(q.pointerId)&&X.releasePointerCapture(q.pointerId),document.body.style.webkitUserSelect=A.current,b.viewport&&(b.viewport.style.scrollBehavior=""),_.current=null})})})}),qu="ScrollAreaThumb",yN=w.forwardRef((e,n)=>{const{forceMount:r,...a}=e,l=gN(qu,e.__scopeScrollArea);return o.jsx(Cn,{present:r||l.hasThumb,children:o.jsx(S5,{ref:n,...a})})}),S5=w.forwardRef((e,n)=>{const{__scopeScrollArea:r,style:a,...l}=e,c=$n(qu,r),d=gN(qu,r),{onThumbPositionChange:f}=d,m=rt(n,x=>d.onThumbChange(x)),h=w.useRef(void 0),g=Sd(()=>{h.current&&(h.current(),h.current=void 0)},100);return w.useEffect(()=>{const x=c.viewport;if(x){const y=()=>{if(g(),!h.current){const b=C5(x,f);h.current=b,f()}};return f(),x.addEventListener("scroll",y),()=>x.removeEventListener("scroll",y)}},[c.viewport,g,f]),o.jsx(Ye.div,{"data-state":d.hasThumb?"visible":"hidden",...l,ref:m,style:{width:"var(--radix-scroll-area-thumb-width)",height:"var(--radix-scroll-area-thumb-height)",...a},onPointerDownCapture:ke(e.onPointerDownCapture,x=>{const b=x.target.getBoundingClientRect(),j=x.clientX-b.left,N=x.clientY-b.top;d.onThumbPointerDown({x:j,y:N})}),onPointerUp:ke(e.onPointerUp,d.onThumbPointerUp)})});yN.displayName=qu;var ug="ScrollAreaCorner",vN=w.forwardRef((e,n)=>{const r=$n(ug,e.__scopeScrollArea),a=!!(r.scrollbarX&&r.scrollbarY);return r.type!=="scroll"&&a?o.jsx(_5,{...e,ref:n}):null});vN.displayName=ug;var _5=w.forwardRef((e,n)=>{const{__scopeScrollArea:r,...a}=e,l=$n(ug,r),[c,d]=w.useState(0),[f,m]=w.useState(0),h=!!(c&&f);return Ca(l.scrollbarX,()=>{const g=l.scrollbarX?.offsetHeight||0;l.onCornerHeightChange(g),m(g)}),Ca(l.scrollbarY,()=>{const g=l.scrollbarY?.offsetWidth||0;l.onCornerWidthChange(g),d(g)}),h?o.jsx(Ye.div,{...a,ref:n,style:{width:c,height:f,position:"absolute",right:l.dir==="ltr"?0:void 0,left:l.dir==="rtl"?0:void 0,bottom:0,...e.style}}):null});function Fu(e){return e?parseInt(e,10):0}function bN(e,n){const r=e/n;return isNaN(r)?0:r}function jd(e){const n=bN(e.viewport,e.content),r=e.scrollbar.paddingStart+e.scrollbar.paddingEnd,a=(e.scrollbar.size-r)*n;return Math.max(a,18)}function E5(e,n,r,a="ltr"){const l=jd(r),c=l/2,d=n||c,f=l-d,m=r.scrollbar.paddingStart+d,h=r.scrollbar.size-r.scrollbar.paddingEnd-f,g=r.content-r.viewport,x=a==="ltr"?[0,g]:[g*-1,0];return wN([m,h],x)(e)}function ib(e,n,r="ltr"){const a=jd(n),l=n.scrollbar.paddingStart+n.scrollbar.paddingEnd,c=n.scrollbar.size-l,d=n.content-n.viewport,f=c-a,m=r==="ltr"?[0,d]:[d*-1,0],h=tp(e,m);return wN([0,d],[0,f])(h)}function wN(e,n){return r=>{if(e[0]===e[1]||n[0]===n[1])return n[0];const a=(n[1]-n[0])/(e[1]-e[0]);return n[0]+a*(r-e[0])}}function NN(e,n){return e>0&&e{})=>{let r={left:e.scrollLeft,top:e.scrollTop},a=0;return(function l(){const c={left:e.scrollLeft,top:e.scrollTop},d=r.left!==c.left,f=r.top!==c.top;(d||f)&&n(),r=c,a=window.requestAnimationFrame(l)})(),()=>window.cancelAnimationFrame(a)};function Sd(e,n){const r=Zt(e),a=w.useRef(0);return w.useEffect(()=>()=>window.clearTimeout(a.current),[]),w.useCallback(()=>{window.clearTimeout(a.current),a.current=window.setTimeout(r,n)},[r,n])}function Ca(e,n){const r=Zt(n);Wt(()=>{let a=0;if(e){const l=new ResizeObserver(()=>{cancelAnimationFrame(a),a=window.requestAnimationFrame(r)});return l.observe(e),()=>{window.cancelAnimationFrame(a),l.unobserve(e)}}},[e,r])}var jN=fN,k5=hN,T5=vN;const Wn=w.forwardRef(({className:e,children:n,...r},a)=>o.jsxs(jN,{ref:a,className:We("relative overflow-hidden",e),...r,children:[o.jsx(k5,{className:"h-full w-full rounded-[inherit]",children:n}),o.jsx(SN,{}),o.jsx(T5,{})]}));Wn.displayName=jN.displayName;const SN=w.forwardRef(({className:e,orientation:n="vertical",...r},a)=>o.jsx(lg,{ref:a,orientation:n,className:We("flex touch-none select-none transition-colors",n==="vertical"&&"h-full w-2.5 border-l border-l-transparent p-[1px]",n==="horizontal"&&"h-2.5 flex-col border-t border-t-transparent p-[1px]",e),...r,children:o.jsx(yN,{className:"relative flex-1 rounded-full bg-border"})}));SN.displayName=lg.displayName;var _d="Tabs",[A5,K7]=Kn(_d,[md]),_N=md(),[M5,dg]=A5(_d),EN=w.forwardRef((e,n)=>{const{__scopeTabs:r,value:a,onValueChange:l,defaultValue:c,orientation:d="horizontal",dir:f,activationMode:m="automatic",...h}=e,g=jl(f),[x,y]=Ar({prop:a,onChange:l,defaultProp:c??"",caller:_d});return o.jsx(M5,{scope:r,baseId:Mr(),value:x,onValueChange:y,orientation:d,dir:g,activationMode:m,children:o.jsx(Ye.div,{dir:g,"data-orientation":d,...h,ref:n})})});EN.displayName=_d;var CN="TabsList",kN=w.forwardRef((e,n)=>{const{__scopeTabs:r,loop:a=!0,...l}=e,c=dg(CN,r),d=_N(r);return o.jsx(d1,{asChild:!0,...d,orientation:c.orientation,dir:c.dir,loop:a,children:o.jsx(Ye.div,{role:"tablist","aria-orientation":c.orientation,...l,ref:n})})});kN.displayName=CN;var TN="TabsTrigger",AN=w.forwardRef((e,n)=>{const{__scopeTabs:r,value:a,disabled:l=!1,...c}=e,d=dg(TN,r),f=_N(r),m=DN(d.baseId,a),h=ON(d.baseId,a),g=a===d.value;return o.jsx(f1,{asChild:!0,...f,focusable:!l,active:g,children:o.jsx(Ye.button,{type:"button",role:"tab","aria-selected":g,"aria-controls":h,"data-state":g?"active":"inactive","data-disabled":l?"":void 0,disabled:l,id:m,...c,ref:n,onMouseDown:ke(e.onMouseDown,x=>{!l&&x.button===0&&x.ctrlKey===!1?d.onValueChange(a):x.preventDefault()}),onKeyDown:ke(e.onKeyDown,x=>{[" ","Enter"].includes(x.key)&&d.onValueChange(a)}),onFocus:ke(e.onFocus,()=>{const x=d.activationMode!=="manual";!g&&!l&&x&&d.onValueChange(a)})})})});AN.displayName=TN;var MN="TabsContent",RN=w.forwardRef((e,n)=>{const{__scopeTabs:r,value:a,forceMount:l,children:c,...d}=e,f=dg(MN,r),m=DN(f.baseId,a),h=ON(f.baseId,a),g=a===f.value,x=w.useRef(g);return w.useEffect(()=>{const y=requestAnimationFrame(()=>x.current=!1);return()=>cancelAnimationFrame(y)},[]),o.jsx(Cn,{present:l||g,children:({present:y})=>o.jsx(Ye.div,{"data-state":g?"active":"inactive","data-orientation":f.orientation,role:"tabpanel","aria-labelledby":m,hidden:!y,id:h,tabIndex:0,...d,ref:n,style:{...e.style,animationDuration:x.current?"0s":void 0},children:y&&c})})});RN.displayName=MN;function DN(e,n){return`${e}-trigger-${n}`}function ON(e,n){return`${e}-content-${n}`}var R5=EN,zN=kN,IN=AN,LN=RN;const D5=R5,$N=w.forwardRef(({className:e,...n},r)=>o.jsx(zN,{ref:r,className:We("inline-flex h-9 items-center justify-center rounded-lg bg-muted p-1 text-muted-foreground",e),...n}));$N.displayName=zN.displayName;const Nu=w.forwardRef(({className:e,...n},r)=>o.jsx(IN,{ref:r,className:We("inline-flex items-center justify-center whitespace-nowrap rounded-md px-3 py-1 text-sm font-medium ring-offset-background transition-all focus-visible:outline-none focus-visible:ring-2 focus-visible:ring-ring focus-visible:ring-offset-2 disabled:pointer-events-none disabled:opacity-50 data-[state=active]:bg-background data-[state=active]:text-foreground data-[state=active]:shadow",e),...n}));Nu.displayName=IN.displayName;const ju=w.forwardRef(({className:e,...n},r)=>o.jsx(LN,{ref:r,className:We("mt-2 ring-offset-background focus-visible:outline-none focus-visible:ring-2 focus-visible:ring-ring focus-visible:ring-offset-2",e),...n}));ju.displayName=LN.displayName;function fg(e){const n=w.useRef({value:e,previous:e});return w.useMemo(()=>(n.current.value!==e&&(n.current.previous=n.current.value,n.current.value=e),n.current.previous),[e])}var Ed="Checkbox",[O5,Q7]=Kn(Ed),[z5,mg]=O5(Ed);function I5(e){const{__scopeCheckbox:n,checked:r,children:a,defaultChecked:l,disabled:c,form:d,name:f,onCheckedChange:m,required:h,value:g="on",internal_do_not_use_render:x}=e,[y,b]=Ar({prop:r,defaultProp:l??!1,onChange:m,caller:Ed}),[j,N]=w.useState(null),[S,_]=w.useState(null),A=w.useRef(!1),E=j?!!d||!!j.closest("form"):!0,M={checked:y,disabled:c,setChecked:b,control:j,setControl:N,name:f,form:d,value:g,hasConsumerStoppedPropagationRef:A,required:h,defaultChecked:Tr(l)?!1:l,isFormControl:E,bubbleInput:S,setBubbleInput:_};return o.jsx(z5,{scope:n,...M,children:L5(x)?x(M):a})}var PN="CheckboxTrigger",HN=w.forwardRef(({__scopeCheckbox:e,onKeyDown:n,onClick:r,...a},l)=>{const{control:c,value:d,disabled:f,checked:m,required:h,setControl:g,setChecked:x,hasConsumerStoppedPropagationRef:y,isFormControl:b,bubbleInput:j}=mg(PN,e),N=rt(l,g),S=w.useRef(m);return w.useEffect(()=>{const _=c?.form;if(_){const A=()=>x(S.current);return _.addEventListener("reset",A),()=>_.removeEventListener("reset",A)}},[c,x]),o.jsx(Ye.button,{type:"button",role:"checkbox","aria-checked":Tr(m)?"mixed":m,"aria-required":h,"data-state":YN(m),"data-disabled":f?"":void 0,disabled:f,value:d,...a,ref:N,onKeyDown:ke(n,_=>{_.key==="Enter"&&_.preventDefault()}),onClick:ke(r,_=>{x(A=>Tr(A)?!0:!A),j&&b&&(y.current=_.isPropagationStopped(),y.current||_.stopPropagation())})})});HN.displayName=PN;var UN=w.forwardRef((e,n)=>{const{__scopeCheckbox:r,name:a,checked:l,defaultChecked:c,required:d,disabled:f,value:m,onCheckedChange:h,form:g,...x}=e;return o.jsx(I5,{__scopeCheckbox:r,checked:l,defaultChecked:c,disabled:f,required:d,onCheckedChange:h,name:a,form:g,value:m,internal_do_not_use_render:({isFormControl:y})=>o.jsxs(o.Fragment,{children:[o.jsx(HN,{...x,ref:n,__scopeCheckbox:r}),y&&o.jsx(FN,{__scopeCheckbox:r})]})})});UN.displayName=Ed;var BN="CheckboxIndicator",VN=w.forwardRef((e,n)=>{const{__scopeCheckbox:r,forceMount:a,...l}=e,c=mg(BN,r);return o.jsx(Cn,{present:a||Tr(c.checked)||c.checked===!0,children:o.jsx(Ye.span,{"data-state":YN(c.checked),"data-disabled":c.disabled?"":void 0,...l,ref:n,style:{pointerEvents:"none",...e.style}})})});VN.displayName=BN;var qN="CheckboxBubbleInput",FN=w.forwardRef(({__scopeCheckbox:e,...n},r)=>{const{control:a,hasConsumerStoppedPropagationRef:l,checked:c,defaultChecked:d,required:f,disabled:m,name:h,value:g,form:x,bubbleInput:y,setBubbleInput:b}=mg(qN,e),j=rt(r,b),N=fg(c),S=Lp(a);w.useEffect(()=>{const A=y;if(!A)return;const E=window.HTMLInputElement.prototype,T=Object.getOwnPropertyDescriptor(E,"checked").set,D=!l.current;if(N!==c&&T){const z=new Event("click",{bubbles:D});A.indeterminate=Tr(c),T.call(A,Tr(c)?!1:c),A.dispatchEvent(z)}},[y,N,c,l]);const _=w.useRef(Tr(c)?!1:c);return o.jsx(Ye.input,{type:"checkbox","aria-hidden":!0,defaultChecked:d??_.current,required:f,disabled:m,name:h,value:g,form:x,...n,tabIndex:-1,ref:j,style:{...n.style,...S,position:"absolute",pointerEvents:"none",opacity:0,margin:0,transform:"translateX(-100%)"}})});FN.displayName=qN;function L5(e){return typeof e=="function"}function Tr(e){return e==="indeterminate"}function YN(e){return Tr(e)?"indeterminate":e?"checked":"unchecked"}function co({className:e,...n}){return o.jsx(UN,{"data-slot":"checkbox",className:We("peer border-input dark:bg-input/30 data-[state=checked]:bg-primary data-[state=checked]:text-primary-foreground dark:data-[state=checked]:bg-primary data-[state=checked]:border-primary focus-visible:border-ring focus-visible:ring-ring/50 aria-invalid:ring-destructive/20 dark:aria-invalid:ring-destructive/40 aria-invalid:border-destructive size-4 shrink-0 rounded-[4px] border shadow-xs transition-shadow outline-none focus-visible:ring-[3px] disabled:cursor-not-allowed disabled:opacity-50",e),...n,children:o.jsx(VN,{"data-slot":"checkbox-indicator",className:"flex items-center justify-center text-current transition-none",children:o.jsx(jo,{className:"size-3.5"})})})}var GN=Object.freeze({position:"absolute",border:0,width:1,height:1,padding:0,margin:-1,overflow:"hidden",clip:"rect(0, 0, 0, 0)",whiteSpace:"nowrap",wordWrap:"normal"}),$5="VisuallyHidden",XN=w.forwardRef((e,n)=>o.jsx(Ye.span,{...e,ref:n,style:{...GN,...e.style}}));XN.displayName=$5;var P5=XN,[Cd,J7]=Kn("Tooltip",[Ua]),kd=Ua(),ZN="TooltipProvider",H5=700,np="tooltip.open",[U5,hg]=Cd(ZN),WN=e=>{const{__scopeTooltip:n,delayDuration:r=H5,skipDelayDuration:a=300,disableHoverableContent:l=!1,children:c}=e,d=w.useRef(!0),f=w.useRef(!1),m=w.useRef(0);return w.useEffect(()=>{const h=m.current;return()=>window.clearTimeout(h)},[]),o.jsx(U5,{scope:n,isOpenDelayedRef:d,delayDuration:r,onOpen:w.useCallback(()=>{window.clearTimeout(m.current),d.current=!1},[]),onClose:w.useCallback(()=>{window.clearTimeout(m.current),m.current=window.setTimeout(()=>d.current=!0,a)},[a]),isPointerInTransitRef:f,onPointerInTransitChange:w.useCallback(h=>{f.current=h},[]),disableHoverableContent:l,children:c})};WN.displayName=ZN;var ul="Tooltip",[B5,Tl]=Cd(ul),KN=e=>{const{__scopeTooltip:n,children:r,open:a,defaultOpen:l,onOpenChange:c,disableHoverableContent:d,delayDuration:f}=e,m=hg(ul,e.__scopeTooltip),h=kd(n),[g,x]=w.useState(null),y=Mr(),b=w.useRef(0),j=d??m.disableHoverableContent,N=f??m.delayDuration,S=w.useRef(!1),[_,A]=Ar({prop:a,defaultProp:l??!1,onChange:z=>{z?(m.onOpen(),document.dispatchEvent(new CustomEvent(np))):m.onClose(),c?.(z)},caller:ul}),E=w.useMemo(()=>_?S.current?"delayed-open":"instant-open":"closed",[_]),M=w.useCallback(()=>{window.clearTimeout(b.current),b.current=0,S.current=!1,A(!0)},[A]),T=w.useCallback(()=>{window.clearTimeout(b.current),b.current=0,A(!1)},[A]),D=w.useCallback(()=>{window.clearTimeout(b.current),b.current=window.setTimeout(()=>{S.current=!0,A(!0),b.current=0},N)},[N,A]);return w.useEffect(()=>()=>{b.current&&(window.clearTimeout(b.current),b.current=0)},[]),o.jsx(Hp,{...h,children:o.jsx(B5,{scope:n,contentId:y,open:_,stateAttribute:E,trigger:g,onTriggerChange:x,onTriggerEnter:w.useCallback(()=>{m.isOpenDelayedRef.current?D():M()},[m.isOpenDelayedRef,D,M]),onTriggerLeave:w.useCallback(()=>{j?T():(window.clearTimeout(b.current),b.current=0)},[T,j]),onOpen:M,onClose:T,disableHoverableContent:j,children:r})})};KN.displayName=ul;var sp="TooltipTrigger",QN=w.forwardRef((e,n)=>{const{__scopeTooltip:r,...a}=e,l=Tl(sp,r),c=hg(sp,r),d=kd(r),f=w.useRef(null),m=rt(n,f,l.onTriggerChange),h=w.useRef(!1),g=w.useRef(!1),x=w.useCallback(()=>h.current=!1,[]);return w.useEffect(()=>()=>document.removeEventListener("pointerup",x),[x]),o.jsx(Up,{asChild:!0,...d,children:o.jsx(Ye.button,{"aria-describedby":l.open?l.contentId:void 0,"data-state":l.stateAttribute,...a,ref:m,onPointerMove:ke(e.onPointerMove,y=>{y.pointerType!=="touch"&&!g.current&&!c.isPointerInTransitRef.current&&(l.onTriggerEnter(),g.current=!0)}),onPointerLeave:ke(e.onPointerLeave,()=>{l.onTriggerLeave(),g.current=!1}),onPointerDown:ke(e.onPointerDown,()=>{l.open&&l.onClose(),h.current=!0,document.addEventListener("pointerup",x,{once:!0})}),onFocus:ke(e.onFocus,()=>{h.current||l.onOpen()}),onBlur:ke(e.onBlur,l.onClose),onClick:ke(e.onClick,l.onClose)})})});QN.displayName=sp;var pg="TooltipPortal",[V5,q5]=Cd(pg,{forceMount:void 0}),JN=e=>{const{__scopeTooltip:n,forceMount:r,children:a,container:l}=e,c=Tl(pg,n);return o.jsx(V5,{scope:n,forceMount:r,children:o.jsx(Cn,{present:r||c.open,children:o.jsx(fd,{asChild:!0,container:l,children:a})})})};JN.displayName=pg;var ka="TooltipContent",e2=w.forwardRef((e,n)=>{const r=q5(ka,e.__scopeTooltip),{forceMount:a=r.forceMount,side:l="top",...c}=e,d=Tl(ka,e.__scopeTooltip);return o.jsx(Cn,{present:a||d.open,children:d.disableHoverableContent?o.jsx(t2,{side:l,...c,ref:n}):o.jsx(F5,{side:l,...c,ref:n})})}),F5=w.forwardRef((e,n)=>{const r=Tl(ka,e.__scopeTooltip),a=hg(ka,e.__scopeTooltip),l=w.useRef(null),c=rt(n,l),[d,f]=w.useState(null),{trigger:m,onClose:h}=r,g=l.current,{onPointerInTransitChange:x}=a,y=w.useCallback(()=>{f(null),x(!1)},[x]),b=w.useCallback((j,N)=>{const S=j.currentTarget,_={x:j.clientX,y:j.clientY},A=W5(_,S.getBoundingClientRect()),E=K5(_,A),M=Q5(N.getBoundingClientRect()),T=eR([...E,...M]);f(T),x(!0)},[x]);return w.useEffect(()=>()=>y(),[y]),w.useEffect(()=>{if(m&&g){const j=S=>b(S,g),N=S=>b(S,m);return m.addEventListener("pointerleave",j),g.addEventListener("pointerleave",N),()=>{m.removeEventListener("pointerleave",j),g.removeEventListener("pointerleave",N)}}},[m,g,b,y]),w.useEffect(()=>{if(d){const j=N=>{const S=N.target,_={x:N.clientX,y:N.clientY},A=m?.contains(S)||g?.contains(S),E=!J5(_,d);A?y():E&&(y(),h())};return document.addEventListener("pointermove",j),()=>document.removeEventListener("pointermove",j)}},[m,g,d,h,y]),o.jsx(t2,{...e,ref:c})}),[Y5,G5]=Cd(ul,{isInside:!1}),X5=cC("TooltipContent"),t2=w.forwardRef((e,n)=>{const{__scopeTooltip:r,children:a,"aria-label":l,onEscapeKeyDown:c,onPointerDownOutside:d,...f}=e,m=Tl(ka,r),h=kd(r),{onClose:g}=m;return w.useEffect(()=>(document.addEventListener(np,g),()=>document.removeEventListener(np,g)),[g]),w.useEffect(()=>{if(m.trigger){const x=y=>{y.target?.contains(m.trigger)&&g()};return window.addEventListener("scroll",x,{capture:!0}),()=>window.removeEventListener("scroll",x,{capture:!0})}},[m.trigger,g]),o.jsx(id,{asChild:!0,disableOutsidePointerEvents:!1,onEscapeKeyDown:c,onPointerDownOutside:d,onFocusOutside:x=>x.preventDefault(),onDismiss:g,children:o.jsxs(Bp,{"data-state":m.stateAttribute,...h,...f,ref:n,style:{...f.style,"--radix-tooltip-content-transform-origin":"var(--radix-popper-transform-origin)","--radix-tooltip-content-available-width":"var(--radix-popper-available-width)","--radix-tooltip-content-available-height":"var(--radix-popper-available-height)","--radix-tooltip-trigger-width":"var(--radix-popper-anchor-width)","--radix-tooltip-trigger-height":"var(--radix-popper-anchor-height)"},children:[o.jsx(X5,{children:a}),o.jsx(Y5,{scope:r,isInside:!0,children:o.jsx(P5,{id:m.contentId,role:"tooltip",children:l||a})})]})})});e2.displayName=ka;var n2="TooltipArrow",Z5=w.forwardRef((e,n)=>{const{__scopeTooltip:r,...a}=e,l=kd(r);return G5(n2,r).isInside?null:o.jsx(Vp,{...l,...a,ref:n})});Z5.displayName=n2;function W5(e,n){const r=Math.abs(n.top-e.y),a=Math.abs(n.bottom-e.y),l=Math.abs(n.right-e.x),c=Math.abs(n.left-e.x);switch(Math.min(r,a,l,c)){case c:return"left";case l:return"right";case r:return"top";case a:return"bottom";default:throw new Error("unreachable")}}function K5(e,n,r=5){const a=[];switch(n){case"top":a.push({x:e.x-r,y:e.y+r},{x:e.x+r,y:e.y+r});break;case"bottom":a.push({x:e.x-r,y:e.y-r},{x:e.x+r,y:e.y-r});break;case"left":a.push({x:e.x+r,y:e.y-r},{x:e.x+r,y:e.y+r});break;case"right":a.push({x:e.x-r,y:e.y-r},{x:e.x-r,y:e.y+r});break}return a}function Q5(e){const{top:n,right:r,bottom:a,left:l}=e;return[{x:l,y:n},{x:r,y:n},{x:r,y:a},{x:l,y:a}]}function J5(e,n){const{x:r,y:a}=e;let l=!1;for(let c=0,d=n.length-1;ca!=y>a&&r<(x-h)*(a-g)/(y-g)+h&&(l=!l)}return l}function eR(e){const n=e.slice();return n.sort((r,a)=>r.xa.x?1:r.ya.y?1:0),tR(n)}function tR(e){if(e.length<=1)return e.slice();const n=[];for(let a=0;a=2;){const c=n[n.length-1],d=n[n.length-2];if((c.x-d.x)*(l.y-d.y)>=(c.y-d.y)*(l.x-d.x))n.pop();else break}n.push(l)}n.pop();const r=[];for(let a=e.length-1;a>=0;a--){const l=e[a];for(;r.length>=2;){const c=r[r.length-1],d=r[r.length-2];if((c.x-d.x)*(l.y-d.y)>=(c.y-d.y)*(l.x-d.x))r.pop();else break}r.push(l)}return r.pop(),n.length===1&&r.length===1&&n[0].x===r[0].x&&n[0].y===r[0].y?n:n.concat(r)}var nR=WN,sR=KN,rR=QN,oR=JN,s2=e2;const aR=nR,iR=sR,lR=rR,r2=w.forwardRef(({className:e,sideOffset:n=4,...r},a)=>o.jsx(oR,{children:o.jsx(s2,{ref:a,sideOffset:n,className:We("z-50 overflow-hidden rounded-md bg-primary px-3 py-1.5 text-xs text-primary-foreground animate-in fade-in-0 zoom-in-95 data-[state=closed]:animate-out data-[state=closed]:fade-out-0 data-[state=closed]:zoom-out-95 data-[side=bottom]:slide-in-from-top-2 data-[side=left]:slide-in-from-right-2 data-[side=right]:slide-in-from-left-2 data-[side=top]:slide-in-from-bottom-2",e),...r})}));r2.displayName=s2.displayName;const fa={MODEL:"gen_ai.request.model",INPUT_TOKENS:"gen_ai.usage.input_tokens",OUTPUT_TOKENS:"gen_ai.usage.output_tokens",INPUT_MESSAGES:"gen_ai.input.messages",SYSTEM_INSTRUCTIONS:"gen_ai.system_instructions"};function cR(e){return e.type==="text"}function uR(e){return e.type==="tool_call"||e.type==="function_call"}function dR(e){return e.type==="tool_result"||e.type==="function_result"||e.type==="tool_call_response"}function fR(e){if(!e)return[];try{return JSON.parse(e)}catch{return[]}}function mR(e){const n={system:0,user:0,assistant:0,toolCalls:0,toolResults:0,total:0};try{let r;if(typeof e=="string")r=fR(e);else if(Array.isArray(e))r=e;else return n;for(const a of r){if(!a||typeof a!="object")continue;const l=a.role,c=a.parts;let d=0;if(Array.isArray(c)){for(const f of c)if(!(!f||typeof f!="object")){if(cR(f)){const m=f.content||f.text||"";d+=m.length}else if(uR(f)){const m=f.name||"",h=f.arguments||"";n.toolCalls+=m.length+h.length}else if(dR(f)){const m=f.result||f.response||"";n.toolResults+=m.length}}}l==="system"?n.system+=d:l==="user"?n.user+=d:l==="assistant"?n.assistant+=d:l==="tool"&&(n.toolResults+=d)}n.total=n.system+n.user+n.assistant+n.toolCalls+n.toolResults}catch{}return n}function hR(e){const n=e.filter(l=>l.type==="response.trace.completed"),r=new Map;for(const l of n){if(!("data"in l))continue;const c=l.data,d=c.response_id||"unknown";r.has(d)||r.set(d,[]),r.get(d).push(c)}const a=[];for(const[l,c]of r){let d=0,f=0,m,h=Date.now()/1e3,g,x=0,y={system:0,user:0,assistant:0,toolCalls:0,toolResults:0,total:0};for(const b of c){const j=b.attributes||{},N=j[fa.INPUT_TOKENS],S=j[fa.OUTPUT_TOKENS];N!==void 0&&(d+=Number(N)),S!==void 0&&(f+=Number(S)),j[fa.MODEL]&&(m=String(j[fa.MODEL])),b.start_time&&b.start_time0||f>0)&&a.push({response_id:l,timestamp:h,input_tokens:d,output_tokens:f,total_tokens:d+f,model:m,entity_id:g,duration_ms:x,composition:y})}return a.sort((l,c)=>l.timestamp-c.timestamp),a}function pR(e){if(e.length===0)return{totalInput:0,totalOutput:0,totalTokens:0,avgInput:0,avgOutput:0,avgTotal:0,peakInput:0,peakOutput:0,peakTotal:0,turnCount:0};const n=e.reduce((f,m)=>f+m.input_tokens,0),r=e.reduce((f,m)=>f+m.output_tokens,0),a=n+r,l=Math.max(...e.map(f=>f.input_tokens)),c=Math.max(...e.map(f=>f.output_tokens)),d=Math.max(...e.map(f=>f.total_tokens));return{totalInput:n,totalOutput:r,totalTokens:a,avgInput:Math.round(n/e.length),avgOutput:Math.round(r/e.length),avgTotal:Math.round(a/e.length),peakInput:l,peakOutput:c,peakTotal:d,turnCount:e.length}}function gR(e){return e.reduce((n,r)=>({system:n.system+r.composition.system,user:n.user+r.composition.user,assistant:n.assistant+r.composition.assistant,toolCalls:n.toolCalls+r.composition.toolCalls,toolResults:n.toolResults+r.composition.toolResults,total:n.total+r.composition.total}),{system:0,user:0,assistant:0,toolCalls:0,toolResults:0,total:0})}function In(e){return e>=1e3?`${(e/1e3).toFixed(1)}k`:String(e)}const Pt={input:"bg-blue-500 dark:bg-blue-600",output:"bg-emerald-500 dark:bg-emerald-600",system:"bg-purple-500 dark:bg-purple-600",user:"bg-blue-500 dark:bg-blue-600",assistant:"bg-emerald-500 dark:bg-emerald-600",toolCalls:"bg-amber-500 dark:bg-amber-600",toolResults:"bg-orange-500 dark:bg-orange-600"};function lb({segments:e,maxValue:n,height:r=20,renderLabel:a}){const l=e.reduce((f,m)=>f+m.value,0);if(l===0)return o.jsx("div",{className:"flex items-center gap-2 w-full",children:o.jsx("div",{className:"rounded bg-muted/30 flex-1",style:{height:`${r}px`}})});const c=n>0?l/n*100:100,d=e.filter(f=>f.value>0).map(f=>({...f,percent:Math.round(f.value/l*100)}));return o.jsxs("div",{className:"flex items-center gap-2 w-full",children:[o.jsx("div",{className:"relative rounded overflow-hidden bg-muted/30 flex-1",style:{height:`${r}px`},children:o.jsx(aR,{delayDuration:150,children:o.jsx("div",{className:"h-full flex transition-all duration-300",style:{width:`${c}%`},children:d.map(f=>o.jsxs(iR,{children:[o.jsx(lR,{asChild:!0,children:o.jsx("div",{className:`h-full ${f.color} transition-all duration-150 hover:brightness-110 hover:scale-y-[1.15] origin-bottom cursor-default`,style:{width:`${f.value/l*100}%`}})}),o.jsx(r2,{side:"top",className:"text-xs",children:o.jsxs("div",{className:"flex items-center gap-1.5",children:[o.jsx("div",{className:`w-2 h-2 rounded-sm ${f.color} flex-shrink-0`}),o.jsx("span",{className:"font-medium",children:f.label}),o.jsxs("span",{className:"opacity-80",children:[In(f.value)," (",f.percent,"%)"]})]})})]},f.key))})})}),a?.(l,e)]})}function xR(e,n){return[{key:"input",value:e,color:Pt.input,label:"Input"},{key:"output",value:n,color:Pt.output,label:"Output"}]}function yR(e){return[{key:"system",value:e.system,color:Pt.system,label:"System"},{key:"user",value:e.user,color:Pt.user,label:"User"},{key:"assistant",value:e.assistant,color:Pt.assistant,label:"Assistant"},{key:"toolCalls",value:e.toolCalls,color:Pt.toolCalls,label:"Tool Calls"},{key:"toolResults",value:e.toolResults,color:Pt.toolResults,label:"Tool Results"}]}function o2({composition:e,className:n=""}){const{system:r,user:a,assistant:l,toolCalls:c,toolResults:d,total:f}=e;if(f===0)return o.jsx("div",{className:`text-xs text-muted-foreground ${n}`,children:"No composition data available"});const m=[{label:"System",value:r,color:Pt.system},{label:"User",value:a,color:Pt.user},{label:"Assistant",value:l,color:Pt.assistant},{label:"Tool Calls",value:c,color:Pt.toolCalls},{label:"Tool Results",value:d,color:Pt.toolResults}].filter(h=>h.value>0);return o.jsx("div",{className:`space-y-1.5 ${n}`,children:m.map(h=>{const g=Math.round(h.value/f*100);return o.jsxs("div",{className:"flex items-center gap-2 text-xs",children:[o.jsx("div",{className:`w-2 h-2 rounded-sm ${h.color}`}),o.jsx("span",{className:"text-muted-foreground w-20",children:h.label}),o.jsx("div",{className:"flex-1 h-3 bg-muted/30 rounded overflow-hidden",children:o.jsx("div",{className:`h-full ${h.color} transition-all duration-300`,style:{width:`${g}%`}})}),o.jsxs("span",{className:"font-mono w-10 text-right text-muted-foreground",children:[g,"%"]})]},h.label)})})}function vR({turn:e,index:n,maxValue:r,maxCompositionValue:a,cumulativeInput:l,cumulativeOutput:c,cumulativeComposition:d,showCumulative:f,viewMode:m}){const[h,g]=w.useState(!1),x=f?l:e.input_tokens,y=f?c:e.output_tokens,b=f?d:e.composition,j=new Date(e.timestamp*1e3).toLocaleTimeString([],{hour:"2-digit",minute:"2-digit",second:"2-digit"});return o.jsxs("div",{className:"border-b border-muted/50 last:border-0",children:[o.jsxs("div",{className:"flex items-center gap-3 py-2 px-2 hover:bg-muted/30 cursor-pointer transition-colors",onClick:()=>g(!h),children:[o.jsx("div",{className:"w-6 h-6 rounded-full bg-muted flex items-center justify-center text-xs font-medium flex-shrink-0",children:n+1}),o.jsx("div",{className:"flex-1 min-w-0",children:m==="tokens"?o.jsx(lb,{segments:xR(x,y),maxValue:r,height:20,renderLabel:(N,S)=>o.jsxs("div",{className:"flex items-center gap-1 text-xs font-mono text-muted-foreground min-w-[80px] justify-end",children:[o.jsxs("span",{className:"text-blue-600 dark:text-blue-400",children:["↑",In(S[0]?.value||0)]}),o.jsx("span",{children:"/"}),o.jsxs("span",{className:"text-emerald-600 dark:text-emerald-400",children:["↓",In(S[1]?.value||0)]})]})}):o.jsx(lb,{segments:yR(b),maxValue:a,height:20,renderLabel:N=>o.jsxs("div",{className:"text-xs font-mono text-muted-foreground min-w-[50px] text-right",children:[In(Math.round(N/4)),"~"]})})}),o.jsx("div",{className:"text-muted-foreground flex-shrink-0",children:h?o.jsx(Rt,{className:"h-4 w-4"}):o.jsx(en,{className:"h-4 w-4"})})]}),h&&o.jsx("div",{className:"pb-3",children:o.jsxs("div",{className:"flex items-start gap-3 px-2",children:[o.jsx("div",{className:"w-6 flex justify-center flex-shrink-0",children:o.jsx("div",{className:"w-px h-full bg-muted"})}),o.jsx("div",{className:"flex-1 min-w-0",children:o.jsxs("div",{className:"flex items-start gap-2",children:[o.jsx("div",{className:"text-muted-foreground text-xs mt-1",children:"└─"}),o.jsxs("div",{className:"flex-1 space-y-3",children:[o.jsxs("div",{className:"grid grid-cols-2 gap-x-4 gap-y-1 text-xs text-muted-foreground",children:[o.jsxs("div",{children:["Time: ",o.jsx("span",{className:"font-mono text-foreground",children:j})]}),o.jsxs("div",{children:["Duration: ",o.jsxs("span",{className:"font-mono text-foreground",children:[e.duration_ms.toFixed(0),"ms"]})]}),e.model&&o.jsxs("div",{children:["Model: ",o.jsx("span",{className:"font-mono text-foreground",children:e.model})]}),e.entity_id&&o.jsxs("div",{children:["Entity: ",o.jsx("span",{className:"font-mono text-foreground",children:e.entity_id})]})]}),m==="tokens"&&o.jsxs("div",{className:"flex gap-4 text-xs",children:[o.jsxs("div",{children:[o.jsx("span",{className:"text-blue-600 dark:text-blue-400",children:"Input:"})," ",o.jsx("span",{className:"font-mono",children:e.input_tokens.toLocaleString()})]}),o.jsxs("div",{children:[o.jsx("span",{className:"text-emerald-600 dark:text-emerald-400",children:"Output:"})," ",o.jsx("span",{className:"font-mono",children:e.output_tokens.toLocaleString()})]}),o.jsxs("div",{children:[o.jsx("span",{className:"text-muted-foreground",children:"Total:"})," ",o.jsx("span",{className:"font-mono",children:e.total_tokens.toLocaleString()})]})]}),m==="composition"&&e.composition.total>0&&o.jsxs("div",{children:[o.jsxs("div",{className:"text-xs text-muted-foreground mb-2 flex items-center gap-1",children:[o.jsx(Fs,{className:"h-3 w-3"}),"Context Composition (estimated from ~",In(Math.round(e.composition.total/4))," tokens)"]}),o.jsx(o2,{composition:e.composition})]})]})]})})]})})]})}function wh({label:e,value:n,icon:r,color:a="default"}){const l={default:"text-muted-foreground",blue:"text-blue-600 dark:text-blue-400",green:"text-emerald-600 dark:text-emerald-400"}[a];return o.jsxs("div",{className:"flex items-center gap-2 p-2 bg-muted/30 rounded",children:[o.jsx(r,{className:`h-4 w-4 ${l}`}),o.jsxs("div",{className:"flex-1 min-w-0",children:[o.jsx("div",{className:"text-xs text-muted-foreground truncate",children:e}),o.jsx("div",{className:"font-mono text-sm font-medium",children:n})]})]})}function bR({events:e}){const n=le(x=>x.contextInspectorViewMode),r=le(x=>x.setContextInspectorViewMode),a=le(x=>x.contextInspectorCumulative),l=le(x=>x.setContextInspectorCumulative),c=w.useMemo(()=>hR(e),[e]),d=w.useMemo(()=>pR(c),[c]),f=w.useMemo(()=>gR(c),[c]),m=w.useMemo(()=>c.length===0?0:a?d.totalTokens:0,[c,a,d.totalTokens]),h=w.useMemo(()=>c.length===0?0:a?f.total:0,[c,a,f.total]),g=w.useMemo(()=>{let x=0,y=0,b={system:0,user:0,assistant:0,toolCalls:0,toolResults:0,total:0};return c.map(j=>(x+=j.input_tokens,y+=j.output_tokens,b={system:b.system+j.composition.system,user:b.user+j.composition.user,assistant:b.assistant+j.composition.assistant,toolCalls:b.toolCalls+j.composition.toolCalls,toolResults:b.toolResults+j.composition.toolResults,total:b.total+j.composition.total},{input:x,output:y,composition:{...b}}))},[c]);return c.length===0?o.jsxs("div",{className:"flex flex-col items-center text-center p-6 pt-9",children:[o.jsx(ha,{className:"h-8 w-8 text-muted-foreground mb-3"}),o.jsx("div",{className:"text-sm font-medium mb-1",children:"No Data"}),o.jsxs("div",{className:"text-xs text-muted-foreground max-w-[200px]",children:["Run"," ",o.jsx("span",{className:"font-mono bg-accent/10 px-1 rounded",children:"devui --instrumentation"})," ","and start a conversation."]})]}):o.jsxs("div",{className:"h-full flex flex-col",children:[o.jsxs("div",{className:"p-3 border-b flex-shrink-0 space-y-2",children:[o.jsxs("div",{className:"flex items-center justify-between gap-2",children:[o.jsxs("div",{className:"flex items-center gap-2",children:[o.jsx(ha,{className:"h-4 w-4"}),o.jsx("span",{className:"font-medium text-sm",children:"Context Inspector"}),o.jsxs(ut,{variant:"outline",className:"text-xs",children:[c.length," turn",c.length!==1?"s":""]})]}),o.jsxs("label",{className:"flex items-center gap-1.5 text-xs text-muted-foreground cursor-pointer",children:[o.jsx(co,{checked:a,onCheckedChange:x=>l(x===!0),className:"h-3.5 w-3.5"}),o.jsx("span",{children:"Cumulative"})]})]}),o.jsxs("div",{className:"flex items-center bg-muted rounded-md p-1",children:[o.jsx("button",{onClick:()=>r("tokens"),className:`flex-1 px-3 py-1.5 text-xs rounded transition-colors ${n==="tokens"?"bg-background shadow-sm font-medium":"text-muted-foreground hover:text-foreground"}`,children:"Tokens"}),o.jsx("button",{onClick:()=>r("composition"),className:`flex-1 px-3 py-1.5 text-xs rounded transition-colors ${n==="composition"?"bg-background shadow-sm font-medium":"text-muted-foreground hover:text-foreground"}`,children:"Composition"})]}),o.jsx("div",{className:"text-xs text-muted-foreground",children:n==="tokens"?"Token usage per turn":"Context breakdown by message type (chars)"})]}),o.jsx(Wn,{className:"flex-1",children:o.jsxs("div",{className:"p-3 space-y-4",children:[o.jsxs("div",{className:"flex items-center gap-4 text-xs px-1 flex-wrap",children:[n==="tokens"?o.jsxs(o.Fragment,{children:[o.jsxs("div",{className:"flex items-center gap-1.5",children:[o.jsx("div",{className:`w-3 h-3 rounded ${Pt.input}`}),o.jsx("span",{className:"text-muted-foreground",children:"Input (↑)"})]}),o.jsxs("div",{className:"flex items-center gap-1.5",children:[o.jsx("div",{className:`w-3 h-3 rounded ${Pt.output}`}),o.jsx("span",{className:"text-muted-foreground",children:"Output (↓)"})]})]}):o.jsxs(o.Fragment,{children:[o.jsxs("div",{className:"flex items-center gap-1.5",children:[o.jsx("div",{className:`w-2.5 h-2.5 rounded-sm ${Pt.system}`}),o.jsx("span",{className:"text-muted-foreground",children:"System"})]}),o.jsxs("div",{className:"flex items-center gap-1.5",children:[o.jsx("div",{className:`w-2.5 h-2.5 rounded-sm ${Pt.user}`}),o.jsx("span",{className:"text-muted-foreground",children:"User"})]}),o.jsxs("div",{className:"flex items-center gap-1.5",children:[o.jsx("div",{className:`w-2.5 h-2.5 rounded-sm ${Pt.assistant}`}),o.jsx("span",{className:"text-muted-foreground",children:"Assistant"})]}),o.jsxs("div",{className:"flex items-center gap-1.5",children:[o.jsx("div",{className:`w-2.5 h-2.5 rounded-sm ${Pt.toolCalls}`}),o.jsx("span",{className:"text-muted-foreground",children:"Tools"})]}),o.jsxs("div",{className:"flex items-center gap-1.5",children:[o.jsx("div",{className:`w-2.5 h-2.5 rounded-sm ${Pt.toolResults}`}),o.jsx("span",{className:"text-muted-foreground",children:"Results"})]})]}),o.jsx("div",{className:"flex-1"}),o.jsxs("div",{className:"flex items-center gap-1 text-muted-foreground",children:[o.jsx(Fs,{className:"h-3 w-3"}),o.jsx("span",{children:"Click for details"})]})]}),o.jsx("div",{className:"border rounded-lg overflow-hidden",children:c.map((x,y)=>o.jsx(vR,{turn:x,index:y,maxValue:m,maxCompositionValue:h,cumulativeInput:g[y]?.input||0,cumulativeOutput:g[y]?.output||0,cumulativeComposition:g[y]?.composition||x.composition,showCumulative:a,viewMode:n},x.response_id))}),o.jsxs("div",{className:"border rounded-lg overflow-hidden",children:[o.jsx("div",{className:"p-3 bg-muted/30 border-b",children:o.jsx("span",{className:"text-xs font-medium",children:"Session Summary"})}),o.jsxs("div",{className:"p-3 space-y-3",children:[o.jsxs("div",{className:"grid grid-cols-3 gap-2",children:[o.jsx(wh,{label:"Total Tokens",value:In(d.totalTokens),icon:JA}),o.jsx(wh,{label:"Input",value:In(d.totalInput),icon:ha,color:"blue"}),o.jsx(wh,{label:"Output",value:In(d.totalOutput),icon:ha,color:"green"})]}),c.length>1&&o.jsxs("div",{className:"grid grid-cols-2 gap-x-4 gap-y-1 text-xs pt-2 border-t border-muted/50",children:[o.jsxs("div",{className:"flex justify-between",children:[o.jsx("span",{className:"text-muted-foreground",children:"Avg per turn:"}),o.jsx("span",{className:"font-mono",children:In(d.avgTotal)})]}),o.jsxs("div",{className:"flex justify-between",children:[o.jsx("span",{className:"text-muted-foreground",children:"Peak turn:"}),o.jsx("span",{className:"font-mono",children:In(d.peakTotal)})]}),o.jsxs("div",{className:"flex justify-between",children:[o.jsx("span",{className:"text-muted-foreground",children:"Avg input:"}),o.jsx("span",{className:"font-mono text-blue-600 dark:text-blue-400",children:In(d.avgInput)})]}),o.jsxs("div",{className:"flex justify-between",children:[o.jsx("span",{className:"text-muted-foreground",children:"Avg output:"}),o.jsx("span",{className:"font-mono text-emerald-600 dark:text-emerald-400",children:In(d.avgOutput)})]})]}),f.total>0&&o.jsx("div",{className:"pt-3 border-t border-muted/50",children:o.jsxs("div",{className:"flex items-start gap-2",children:[o.jsx("div",{className:"text-muted-foreground text-xs mt-0.5",children:"└─"}),o.jsxs("div",{className:"flex-1",children:[o.jsxs("div",{className:"text-xs text-muted-foreground mb-2 flex items-center gap-1",children:[o.jsx(Fs,{className:"h-3 w-3"}),"Total Composition (all turns)"]}),o.jsx(o2,{composition:f})]})]})})]})]})]})})]})}function a2(){return o.jsx("div",{className:"flex items-center gap-2 py-3 px-2",children:o.jsx("div",{className:"flex-1 border-t border-border/50"})})}function i2(e){const n=[];let r=!1;for(let a=0;a100&&(l.includes(` -`)||l.trim().match(/[.!?]\s*$/))&&(n.push({type:"response.output_text.delta",delta:l.trim()}),l="");continue}c.type!=="response.usage.complete"&&n.push(c)}for(const[,c]of r)if(c.arguments.trim()&&c.arguments.trim().length>2){const d=a.get(c.callId)||c.name||"unknown";n.push({type:"response.function_call.complete",data:{name:d,arguments:c.arguments,call_id:c.callId}})}return l.trim()&&n.push({type:"response.output_text.delta",delta:l.trim()}),n}function wR(e){switch(e.type){case"response.output_text.delta":if("delta"in e){const n=e.delta||"";return n.length>60?`${n.slice(0,60)}...`:n}return"Text output";case"response.function_call.complete":if("data"in e&&e.data){const n=e.data;let r=n.name||"unknown";(!r||r==="unknown")&&(r="function_call");const a=n.arguments?typeof n.arguments=="string"?n.arguments.slice(0,30):JSON.stringify(n.arguments).slice(0,30):"";return`Calling ${r}(${a}${a.length>=30?"...":""})`}return"Function call";case"response.function_call_arguments.delta":return"delta"in e&&e.delta?`Function arg delta: ${e.delta.slice(0,30)}${e.delta.length>30?"...":""}`:"Function arguments...";case"response.function_result.complete":{const r=e.output.slice(0,40);return`Function result: ${r}${r.length>=40?"...":""}`}case"response.output_item.added":{const n=e;return n.item.type==="function_call"?`Tool call: ${n.item.name}`:"Output item added"}case"response.workflow_event.completed":return"data"in e&&e.data?`Executor: ${e.data.executor_id||"unknown"}`:"Workflow event";case"response.trace.completed":return"data"in e&&e.data?`Trace: ${e.data.operation_name||"unknown"}`:"Trace event";case"response.completed":if("response"in e&&e.response&&"usage"in e.response){const r=e.response.usage;if(r)return`Response complete (${r.total_tokens} tokens)`}return"Response complete";case"response.done":return"Response complete";case"error":return"message"in e&&typeof e.message=="string"?e.message:"Error occurred";default:return`${e.type}`}}function NR(e){switch(e){case"response.output_text.delta":return eg;case"response.function_call.complete":case"response.function_call.delta":case"response.function_call_arguments.delta":return _a;case"response.function_result.complete":return nn;case"response.output_item.added":return nn;case"response.workflow_event.completed":return Qp;case"response.trace.completed":return Bu;case"response.completed":return nn;case"response.done":return nn;case"error":return kl;default:return hs}}function jR(e){switch(e){case"response.output_text.delta":return"text-gray-600 dark:text-gray-400";case"response.function_call.complete":case"response.function_call.delta":case"response.function_call_arguments.delta":return"text-blue-600 dark:text-blue-400";case"response.function_result.complete":return"text-green-600 dark:text-green-400";case"response.output_item.added":return"text-green-600 dark:text-green-400";case"response.workflow_event.completed":return"text-purple-600 dark:text-purple-400";case"response.trace.completed":return"text-orange-600 dark:text-orange-400";case"response.completed":return"text-green-600 dark:text-green-400";case"response.done":return"text-green-600 dark:text-green-400";case"error":return"text-red-600 dark:text-red-400";default:return"text-gray-600 dark:text-gray-400"}}function SR({event:e}){const[n,r]=w.useState(!1),a=e.type||"unknown",l=NR(a),c=jR(a),d="_uiTimestamp"in e&&typeof e._uiTimestamp=="number"?new Date(e._uiTimestamp*1e3).toLocaleTimeString():new Date().toLocaleTimeString(),f=wR(e),m=e.type==="response.function_call.complete"&&"data"in e&&e.data||e.type==="response.function_result.complete"||e.type==="response.output_item.added"&&zr(e)!==null||e.type==="response.workflow_event.completed"&&"data"in e&&e.data||e.type==="response.trace.completed"&&"data"in e&&e.data||e.type==="response.trace.completed"&&"data"in e&&e.data||e.type==="response.output_text.delta"&&"delta"in e&&e.delta&&e.delta.length>100||e.type==="response.completed"&&"response"in e&&e.response||e.type==="error";return o.jsxs("div",{className:"border-l-2 border-muted pl-3 py-2 hover:bg-muted/50 transition-colors",children:[o.jsxs("div",{className:"flex items-center gap-2 text-xs text-muted-foreground mb-1",children:[o.jsx(l,{className:`h-3 w-3 ${c}`}),o.jsx("span",{className:"font-mono",children:d}),o.jsx(ut,{variant:"outline",className:"text-xs py-0",children:e.type?e.type.replace("response.",""):"unknown"})]}),o.jsxs("div",{className:"text-sm",children:[o.jsxs("div",{className:`flex items-center gap-2 ${m?"cursor-pointer":""}`,onClick:()=>m&&r(!n),children:[m&&o.jsx("div",{className:"text-muted-foreground",children:n?o.jsx(Rt,{className:"h-3 w-3"}):o.jsx(en,{className:"h-3 w-3"})}),o.jsx("div",{className:"text-muted-foreground flex-1",children:m&&f.length>80?`${f.slice(0,80)}...`:f})]}),n&&m&&o.jsx("div",{className:"mt-2 ml-5 p-3 bg-muted/30 rounded border",children:o.jsx(_R,{event:e})})]})]})}function _R({event:e}){if(e.type==="error"){const n=e;return o.jsxs("div",{className:"space-y-2",children:[o.jsxs("div",{className:"flex items-center gap-2",children:[o.jsx(kl,{className:"h-4 w-4 text-red-500"}),o.jsx("span",{className:"font-semibold text-sm",children:"Error Details"})]}),o.jsxs("div",{className:"text-xs",children:[n.message&&o.jsxs("div",{className:"mb-2",children:[o.jsx("span",{className:"font-medium text-muted-foreground",children:"Message:"}),o.jsx("div",{className:"mt-1",children:o.jsx("pre",{className:"text-xs bg-destructive/10 border border-destructive/30 rounded p-2 text-destructive whitespace-pre-wrap break-all",children:n.message})})]}),n.code&&o.jsxs("div",{className:"mb-2",children:[o.jsx("span",{className:"font-medium text-muted-foreground",children:"Code:"}),o.jsx("span",{className:"ml-2 font-mono text-xs",children:n.code})]}),n.param&&o.jsxs("div",{className:"mb-2",children:[o.jsx("span",{className:"font-medium text-muted-foreground",children:"Parameter:"}),o.jsx("span",{className:"ml-2 font-mono text-xs",children:n.param})]}),o.jsxs("div",{children:[o.jsx("span",{className:"font-medium text-muted-foreground",children:"Raw Event:"}),o.jsx("div",{className:"mt-1",children:o.jsx("pre",{className:"text-xs bg-background border rounded p-2 whitespace-pre-wrap break-all max-h-32 overflow-auto",children:JSON.stringify(e,null,2)})})]})]})]})}switch(e.type){case"response.function_call.complete":if("data"in e&&e.data){const n=e.data;return o.jsxs("div",{className:"space-y-2",children:[o.jsxs("div",{className:"flex items-center gap-2",children:[o.jsx(_a,{className:"h-4 w-4 text-blue-500"}),o.jsx("span",{className:"font-semibold text-sm",children:"Function Call"})]}),o.jsxs("div",{className:"grid grid-cols-1 gap-2 text-xs",children:[o.jsxs("div",{children:[o.jsx("span",{className:"font-medium text-muted-foreground",children:"Function:"}),o.jsx("span",{className:"ml-2 font-mono bg-blue-100 dark:bg-blue-900 px-2 py-1 rounded",children:n.name||"unknown"})]}),n.call_id&&o.jsxs("div",{children:[o.jsx("span",{className:"font-medium text-muted-foreground",children:"Call ID:"}),o.jsx("span",{className:"ml-2 font-mono text-xs",children:n.call_id})]}),n.arguments&&o.jsxs("div",{children:[o.jsx("span",{className:"font-medium text-muted-foreground",children:"Arguments:"}),o.jsx("div",{className:"mt-1 max-h-32 overflow-auto",children:o.jsx("pre",{className:"text-xs bg-background border rounded p-2 whitespace-pre-wrap max-w-full break-all",children:typeof n.arguments=="string"?n.arguments:JSON.stringify(n.arguments,null,1)})})]})]})]})}break;case"response.function_result.complete":{const n=e;return o.jsxs("div",{className:"space-y-2",children:[o.jsxs("div",{className:"flex items-center gap-2",children:[o.jsx(nn,{className:"h-4 w-4 text-green-500"}),o.jsx("span",{className:"font-semibold text-sm",children:"Function Result"})]}),o.jsxs("div",{className:"grid grid-cols-1 gap-2 text-xs",children:[o.jsxs("div",{children:[o.jsx("span",{className:"font-medium text-muted-foreground",children:"Call ID:"}),o.jsx("span",{className:"ml-2 font-mono text-xs",children:n.call_id})]}),o.jsxs("div",{children:[o.jsx("span",{className:"font-medium text-muted-foreground",children:"Status:"}),o.jsx("span",{className:`ml-2 px-2 py-1 rounded text-xs font-medium ${n.status==="completed"?"bg-green-100 dark:bg-green-900 text-green-800 dark:text-green-200":"bg-red-100 dark:bg-red-900 text-red-800 dark:text-red-200"}`,children:n.status})]}),o.jsxs("div",{children:[o.jsx("span",{className:"font-medium text-muted-foreground",children:"Output:"}),o.jsx("div",{className:"mt-1 max-h-32 overflow-auto",children:o.jsx("pre",{className:"text-xs bg-background border rounded p-2 whitespace-pre-wrap max-w-full break-all",children:n.output})})]})]})]})}case"response.output_item.added":{const n=zr(e);if(n)return o.jsxs("div",{className:"space-y-2",children:[o.jsxs("div",{className:"flex items-center gap-2",children:[o.jsx(nn,{className:"h-4 w-4 text-green-500"}),o.jsx("span",{className:"font-semibold text-sm",children:"Function Result"})]}),o.jsxs("div",{className:"grid grid-cols-1 gap-2 text-xs",children:[o.jsxs("div",{children:[o.jsx("span",{className:"font-medium text-muted-foreground",children:"Call ID:"}),o.jsx("span",{className:"ml-2 font-mono text-xs",children:n.call_id})]}),o.jsxs("div",{children:[o.jsx("span",{className:"font-medium text-muted-foreground",children:"Status:"}),o.jsx("span",{className:`ml-2 px-2 py-1 rounded text-xs font-medium ${n.status==="completed"?"bg-green-100 dark:bg-green-900 text-green-800 dark:text-green-200":"bg-red-100 dark:bg-red-900 text-red-800 dark:text-red-200"}`,children:n.status})]}),o.jsxs("div",{children:[o.jsx("span",{className:"font-medium text-muted-foreground",children:"Output:"}),o.jsx("div",{className:"mt-1 max-h-32 overflow-auto",children:o.jsx("pre",{className:"text-xs bg-background border rounded p-2 whitespace-pre-wrap max-w-full break-all",children:n.output})})]})]})]});break}case"response.workflow_event.completed":if("data"in e&&e.data){const n=e.data;return o.jsxs("div",{className:"space-y-2",children:[o.jsxs("div",{className:"flex items-center gap-2",children:[o.jsx(Qp,{className:"h-4 w-4 text-purple-500"}),o.jsx("span",{className:"font-semibold text-sm",children:"Workflow Event"})]}),o.jsxs("div",{className:"grid grid-cols-1 gap-2 text-xs",children:[o.jsxs("div",{children:[o.jsx("span",{className:"font-medium text-muted-foreground",children:"Event Type:"}),o.jsx("span",{className:"ml-2 font-mono bg-purple-100 dark:bg-purple-900 px-2 py-1 rounded",children:n.event_type||"unknown"})]}),n.executor_id&&o.jsxs("div",{children:[o.jsx("span",{className:"font-medium text-muted-foreground",children:"Executor:"}),o.jsx("span",{className:"ml-2 font-mono",children:n.executor_id})]}),n.timestamp&&o.jsxs("div",{children:[o.jsx("span",{className:"font-medium text-muted-foreground",children:"Timestamp:"}),o.jsx("span",{className:"ml-2 font-mono text-xs",children:n.timestamp})]}),n.data&&o.jsxs("div",{children:[o.jsx("span",{className:"font-medium text-muted-foreground",children:"Data:"}),o.jsx("div",{className:"mt-1 max-h-32 overflow-auto",children:o.jsx("pre",{className:"text-xs bg-background border rounded p-2 whitespace-pre-wrap max-w-full break-all",children:typeof n.data=="string"?n.data:JSON.stringify(n.data,null,1)})})]})]})]})}break;case"response.trace.completed":if("data"in e&&e.data){const n=e.data;return o.jsxs("div",{className:"space-y-2",children:[o.jsxs("div",{className:"flex items-center gap-2",children:[o.jsx(Bu,{className:"h-4 w-4 text-orange-500"}),o.jsx("span",{className:"font-semibold text-sm",children:"Trace Event"})]}),o.jsxs("div",{className:"grid grid-cols-1 gap-2 text-xs",children:[o.jsxs("div",{children:[o.jsx("span",{className:"font-medium text-muted-foreground",children:"Operation:"}),o.jsx("span",{className:"ml-2 font-mono bg-orange-100 dark:bg-orange-900 px-2 py-1 rounded",children:n.operation_name||"unknown"})]}),n.span_id&&o.jsxs("div",{children:[o.jsx("span",{className:"font-medium text-muted-foreground",children:"Span ID:"}),o.jsx("span",{className:"ml-2 font-mono text-xs",children:n.span_id})]}),n.trace_id&&o.jsxs("div",{children:[o.jsx("span",{className:"font-medium text-muted-foreground",children:"Trace ID:"}),o.jsx("span",{className:"ml-2 font-mono text-xs",children:n.trace_id})]}),n.duration_ms&&o.jsxs("div",{children:[o.jsx("span",{className:"font-medium text-muted-foreground",children:"Duration:"}),o.jsxs("span",{className:"ml-2 font-mono text-xs",children:[Number(n.duration_ms).toFixed(2),"ms"]})]}),n.status&&o.jsxs("div",{children:[o.jsx("span",{className:"font-medium text-muted-foreground",children:"Status:"}),o.jsx("span",{className:`ml-2 px-2 py-1 rounded text-xs font-medium ${n.status==="StatusCode.UNSET"||n.status==="OK"?"bg-green-100 dark:bg-green-900 text-green-800 dark:text-green-200":"bg-red-100 dark:bg-red-900 text-red-800 dark:text-red-200"}`,children:n.status||"unknown"})]}),n.entity_id&&o.jsxs("div",{children:[o.jsx("span",{className:"font-medium text-muted-foreground",children:"Entity:"}),o.jsx("span",{className:"ml-2 font-mono text-xs",children:n.entity_id})]}),n.attributes&&Object.keys(n.attributes).length>0&&o.jsxs("div",{children:[o.jsx("span",{className:"font-medium text-muted-foreground",children:"Attributes:"}),o.jsx("div",{className:"mt-1 max-h-32 overflow-auto",children:o.jsx("pre",{className:"text-xs bg-background border rounded p-2 whitespace-pre-wrap break-all",children:l2(n.attributes)})})]})]})]})}break;case"response.output_text.delta":if("delta"in e&&e.delta)return o.jsxs("div",{className:"space-y-2",children:[o.jsxs("div",{className:"flex items-center gap-2",children:[o.jsx(eg,{className:"h-4 w-4 text-gray-500"}),o.jsx("span",{className:"font-semibold text-sm",children:"Text Output"})]}),o.jsx("div",{className:"max-h-32 overflow-auto",children:o.jsx("pre",{className:"text-xs bg-background border rounded p-2 whitespace-pre-wrap max-w-full break-all",children:e.delta})})]});break;case"response.completed":if("response"in e&&e.response){const r=e.response;return o.jsx("div",{className:"space-y-2",children:o.jsxs("div",{className:"grid grid-cols-1 gap-2 text-xs",children:[r.usage&&o.jsxs(o.Fragment,{children:[o.jsx("div",{children:o.jsx("span",{className:"font-medium text-muted-foreground",children:"Usage:"})}),o.jsxs("div",{className:"ml-4 space-y-1",children:[o.jsxs("div",{children:[o.jsx("span",{className:"font-medium text-muted-foreground",children:"Input tokens:"}),o.jsx("span",{className:"ml-2 font-mono",children:r.usage.input_tokens})]}),o.jsxs("div",{children:[o.jsx("span",{className:"font-medium text-muted-foreground",children:"Output tokens:"}),o.jsx("span",{className:"ml-2 font-mono",children:r.usage.output_tokens})]}),o.jsxs("div",{children:[o.jsx("span",{className:"font-medium text-muted-foreground",children:"Total tokens:"}),o.jsx("span",{className:"ml-2 font-mono bg-green-100 dark:bg-green-900 px-2 py-1 rounded",children:r.usage.total_tokens})]})]})]}),r.id&&o.jsxs("div",{children:[o.jsx("span",{className:"font-medium text-muted-foreground",children:"Response ID:"}),o.jsx("span",{className:"ml-2 font-mono text-xs break-all",children:r.id})]}),r.model&&o.jsxs("div",{children:[o.jsx("span",{className:"font-medium text-muted-foreground",children:"Model:"}),o.jsx("span",{className:"ml-2 font-mono text-xs break-all",children:r.model})]})]})})}break;default:return o.jsx("div",{className:"text-xs text-muted-foreground",children:o.jsx("pre",{className:"bg-background border rounded p-2 overflow-auto max-h-32",children:JSON.stringify(e,null,2)})})}return null}function ER({events:e,isStreaming:n}){const r=w.useRef(null),a=gg(e),c=[...i2(a)].reverse();return o.jsxs("div",{className:"h-full flex flex-col",children:[o.jsxs("div",{className:"flex items-center justify-between p-3 border-b",children:[o.jsxs("div",{className:"flex items-center gap-2",children:[o.jsx(Qp,{className:"h-4 w-4"}),o.jsx("span",{className:"font-medium",children:"Events"}),o.jsxs(ut,{variant:"outline",children:[a.length,e.length>a.length?` (${e.length} raw)`:""]})]}),n&&o.jsxs("div",{className:"flex items-center gap-1 text-xs text-muted-foreground",children:[o.jsx("div",{className:"h-2 w-2 animate-pulse rounded-full bg-green-500 dark:bg-green-400"}),"Streaming"]})]}),o.jsx(Wn,{ref:r,className:"flex-1",children:o.jsx("div",{className:"p-3",children:a.length===0?o.jsx("div",{className:"text-center text-muted-foreground text-sm py-8",children:e.length===0?"No events yet. Start a conversation to see real-time events.":"Processing events... Accumulated events will appear here."}):o.jsx("div",{className:"space-y-2",children:c.map((d,f)=>"type"in d&&d.type==="separator"?o.jsx(a2,{},d.id):o.jsx(SR,{event:d},`${d.type}-${f}`))})})})]})}function CR(e){const n=new Map;for(const a of e){if(!("data"in a))continue;const c=a.data.response_id||"unknown";n.has(c)||n.set(c,[]),n.get(c).push(a)}const r=[];for(const[a,l]of n){const c=new Map,d=[];for(const y of l){if(!("data"in y))continue;const b=y.data,j=b.span_id||`span_${Math.random()}`;c.set(j,{event:y,data:b,children:[]})}for(const y of l){if(!("data"in y))continue;const b=y.data,j=b.span_id||"",N=b.parent_span_id,S=c.get(j);S&&(N&&c.has(N)?c.get(N).children.push(S):d.push(S))}d.sort((y,b)=>(y.data.start_time||0)-(b.data.start_time||0));const f=y=>{y.children.sort((b,j)=>(b.data.start_time||0)-(j.data.start_time||0)),y.children.forEach(f)};d.forEach(f);const m=l[0],h=m&&"data"in m?m.data:null,g=Math.min(...l.map(y=>("data"in y?y.data:null)?.start_time||Date.now()/1e3)),x=l.reduce((y,b)=>{const j="data"in b?b.data:null;return y+(j?.duration_ms||0)},0);r.push({response_id:a,timestamp:g,traces:d,totalDuration:x,entity_id:h?.entity_id})}return r.sort((a,l)=>l.timestamp-a.timestamp),r}function Su(e){if(typeof e=="string"){const n=e.trim();if(n.startsWith("[")||n.startsWith("{"))try{const r=JSON.parse(e);return Su(r)}catch{return e}return e}if(Array.isArray(e))return e.map(Su);if(e!==null&&typeof e=="object"){const n={};for(const[r,a]of Object.entries(e))n[r]=Su(a);return n}return e}function l2(e){try{const n=Su(e);return JSON.stringify(n,null,2)}catch{return JSON.stringify(e,null,2)}}function kR(e){return e.includes("invoke_agent")||e.includes("Agent")?"bg-purple-100 dark:bg-purple-900 text-purple-800 dark:text-purple-200":e.includes("chat")||e.includes("Chat")?"bg-blue-100 dark:bg-blue-900 text-blue-800 dark:text-blue-200":e.includes("tool")||e.includes("execute")?"bg-green-100 dark:bg-green-900 text-green-800 dark:text-green-200":"bg-orange-100 dark:bg-orange-900 text-orange-800 dark:text-orange-200"}function c2({node:e,depth:n=0}){const[r,a]=w.useState(n<2),[l,c]=w.useState(!1),{data:d}=e,f=d.operation_name||"Unknown",m=d.duration_ms?`${Number(d.duration_ms).toFixed(1)}ms`:"",h=e.children.length>0,g=d.attributes?.["gen_ai.usage.input_tokens"],x=d.attributes?.["gen_ai.usage.output_tokens"],y=g!==void 0||x!==void 0;return o.jsxs("div",{className:"relative",children:[n>0&&o.jsx("div",{className:"absolute left-0 top-0 bottom-0 border-l-2 border-muted",style:{marginLeft:`${(n-1)*16+8}px`}}),o.jsxs("div",{className:"flex items-center gap-2 py-1.5 hover:bg-muted/50 rounded transition-colors",style:{paddingLeft:`${n*16}px`},children:[o.jsx("button",{onClick:()=>h?a(!r):c(!l),className:"w-4 h-4 flex items-center justify-center text-muted-foreground hover:text-foreground",children:h?r?o.jsx(Rt,{className:"h-3 w-3"}):o.jsx(en,{className:"h-3 w-3"}):l?o.jsx(Rt,{className:"h-3 w-3"}):o.jsx(en,{className:"h-3 w-3"})}),o.jsx("span",{className:`text-xs px-1.5 py-0.5 rounded font-medium ${kR(f)}`,children:f.replace("ChatAgent.","").replace("invoke_agent ","")}),m&&o.jsx("span",{className:"text-xs text-muted-foreground font-mono",children:m}),y&&o.jsxs("span",{className:"text-xs text-muted-foreground font-mono",children:[g!==void 0&&o.jsxs("span",{children:["↑",String(g)]}),g!==void 0&&x!==void 0&&o.jsx("span",{className:"mx-0.5",children:"/"}),x!==void 0&&o.jsxs("span",{children:["↓",String(x)]})]})]}),l&&!h&&o.jsx("div",{className:"ml-4 mt-1 mb-2 p-2 bg-muted/30 rounded border text-xs",style:{marginLeft:`${n*16+20}px`},children:o.jsxs("div",{className:"space-y-1",children:[d.span_id&&o.jsxs("div",{className:"flex gap-2",children:[o.jsx("span",{className:"text-muted-foreground w-20",children:"Span ID:"}),o.jsx("span",{className:"font-mono text-xs break-all",children:d.span_id})]}),d.trace_id&&o.jsxs("div",{className:"flex gap-2",children:[o.jsx("span",{className:"text-muted-foreground w-20",children:"Trace ID:"}),o.jsx("span",{className:"font-mono text-xs break-all",children:d.trace_id})]}),d.status&&o.jsxs("div",{className:"flex gap-2",children:[o.jsx("span",{className:"text-muted-foreground w-20",children:"Status:"}),o.jsx("span",{className:`px-1.5 py-0.5 rounded text-xs ${d.status==="StatusCode.UNSET"||d.status==="OK"?"bg-green-100 dark:bg-green-900 text-green-800 dark:text-green-200":"bg-red-100 dark:bg-red-900 text-red-800 dark:text-red-200"}`,children:d.status})]}),d.attributes&&Object.keys(d.attributes).length>0&&o.jsxs("div",{className:"mt-2",children:[o.jsx("span",{className:"text-muted-foreground block mb-1",children:"Attributes:"}),o.jsx("pre",{className:"text-xs bg-background border rounded p-2 overflow-auto max-h-32 whitespace-pre-wrap break-all",children:l2(d.attributes)})]})]})}),h&&r&&o.jsx("div",{children:e.children.map((b,j)=>o.jsx(c2,{node:b,depth:n+1},b.data.span_id||j))})]})}function TR({group:e}){const[n,r]=w.useState(!0),a=new Date(e.timestamp*1e3).toLocaleTimeString(),l=e.totalDuration>0?`${e.totalDuration.toFixed(0)}ms`:"",c=e.traces.reduce((d,f)=>{const m=h=>1+h.children.reduce((g,x)=>g+m(x),0);return d+m(f)},0);return o.jsxs("div",{className:"border rounded-lg overflow-hidden",children:[o.jsxs("div",{className:"flex items-center gap-2 p-2 bg-muted/50 cursor-pointer hover:bg-muted/70 transition-colors",onClick:()=>r(!n),children:[o.jsx("div",{className:"text-muted-foreground",children:n?o.jsx(Rt,{className:"h-4 w-4"}):o.jsx(en,{className:"h-4 w-4"})}),o.jsx("span",{className:"font-mono text-xs text-muted-foreground",children:a}),e.entity_id&&o.jsx(ut,{variant:"outline",className:"text-xs py-0",children:e.entity_id.replace("agent_","").replace("workflow_","")}),o.jsx("div",{className:"flex-1"}),l&&o.jsx(ut,{variant:"secondary",className:"text-xs py-0",children:l}),o.jsxs("span",{className:"text-xs text-muted-foreground",children:[c," span",c!==1?"s":""]})]}),n&&o.jsx("div",{className:"p-2 border-t",children:e.traces.map((d,f)=>o.jsx(c2,{node:d,depth:0},d.data.span_id||f))})]})}function AR({events:e}){const n=le(c=>c.debugTraceSubTab),r=le(c=>c.setDebugTraceSubTab),a=e.filter(c=>c.type==="response.trace.completed"),l=CR(a);return o.jsxs("div",{className:"h-full flex flex-col",children:[o.jsxs("div",{className:"flex items-center gap-2 p-3 border-b",children:[o.jsx(Bu,{className:"h-4 w-4"}),o.jsx("span",{className:"font-medium",children:"Traces"}),o.jsx(ut,{variant:"outline",children:a.length}),o.jsx("div",{className:"flex-1"}),o.jsxs("div",{className:"flex items-center bg-muted rounded-md p-1 min-w-0",children:[o.jsx("button",{onClick:()=>r("spans"),className:`px-3 py-1.5 text-xs rounded transition-colors truncate ${n==="spans"?"bg-background shadow-sm font-medium":"text-muted-foreground hover:text-foreground"}`,children:"OTel Spans"}),o.jsxs("button",{onClick:()=>r("context"),className:`px-3 py-1.5 text-xs rounded transition-colors flex items-center gap-1.5 min-w-0 ${n==="context"?"bg-background shadow-sm font-medium":"text-muted-foreground hover:text-foreground"}`,children:[o.jsx(ha,{className:"h-3.5 w-3.5 flex-shrink-0"}),o.jsx("span",{className:"truncate",children:"Context Inspector"})]})]})]}),n==="spans"?o.jsxs("div",{className:"flex-1 flex flex-col min-h-0",children:[a.length>0&&o.jsx("div",{className:"p-3 border-b flex-shrink-0",children:o.jsxs("div",{className:"flex items-center gap-2",children:[o.jsx(Bu,{className:"h-4 w-4"}),o.jsx("span",{className:"font-medium text-sm",children:"OTel Spans"}),o.jsxs(ut,{variant:"outline",className:"text-xs",children:[l.length," turn",l.length!==1?"s":""]})]})}),a.length===0?o.jsxs("div",{className:"flex flex-col items-center text-center p-6 pt-9",children:[o.jsx(ha,{className:"h-8 w-8 text-muted-foreground mb-3"}),o.jsx("div",{className:"text-sm font-medium mb-1",children:"No Data"}),o.jsxs("div",{className:"text-xs text-muted-foreground max-w-[200px]",children:["Run"," ",o.jsx("span",{className:"font-mono bg-accent/10 px-1 rounded",children:"devui --instrumentation"})," ","and start a conversation."]})]}):o.jsx(Wn,{className:"flex-1",children:o.jsx("div",{className:"p-3",children:o.jsx("div",{className:"space-y-3",children:l.map(c=>o.jsx(TR,{group:c},c.response_id))})})})]}):o.jsx(bR,{events:e})]})}function MR({events:e}){const n=gg(e),r=[],a=n.filter(m=>m.type==="response.function_call.complete"),l=e.filter(m=>zr(m)!==null),c=new Map;l.forEach(m=>{const h=zr(m);h&&c.set(h.call_id,m)}),a.forEach(m=>{if(r.push(m),"data"in m&&m.data&&m.data.call_id){const h=String(m.data.call_id),g=c.get(h);g&&(r.push(g),c.delete(h))}}),c.forEach(m=>{r.push(m)});const f=[...i2(r)].reverse();return o.jsxs("div",{className:"h-full flex flex-col",children:[o.jsxs("div",{className:"flex items-center gap-2 p-3 border-b",children:[o.jsx(_a,{className:"h-4 w-4"}),o.jsx("span",{className:"font-medium",children:"Tools"}),o.jsx(ut,{variant:"outline",children:r.length})]}),o.jsx(Wn,{className:"flex-1",children:o.jsx("div",{className:"p-3",children:r.length===0?o.jsx("div",{className:"text-center text-muted-foreground text-sm py-8",children:"No tool executions yet. Tool calls will appear here during conversations."}):o.jsx("div",{className:"space-y-3",children:f.map((m,h)=>"type"in m&&m.type==="separator"?o.jsx(a2,{},m.id):o.jsx(RR,{event:m},h))})})})]})}function RR({event:e}){const n="_uiTimestamp"in e&&typeof e._uiTimestamp=="number"?new Date(e._uiTimestamp*1e3).toLocaleTimeString():new Date().toLocaleTimeString(),r=e.type==="response.function_call.complete",a=zr(e),l=a!==null;if(!r&&!l)return null;const c=r&&"data"in e?e.data:null;return o.jsxs("div",{className:"border rounded p-3",children:[o.jsxs("div",{className:"flex items-center justify-between mb-2",children:[o.jsxs("div",{className:"flex items-center gap-2",children:[o.jsx(og,{className:"h-4 w-4 text-yellow-600 dark:text-yellow-400"}),o.jsx("span",{className:"font-medium text-sm",children:r?"Tool Call":"Tool Result"}),r&&c&&c.name!==void 0&&o.jsxs("span",{className:"text-xs text-muted-foreground",children:["(",String(c.name),")"]})]}),o.jsx("span",{className:"text-xs text-muted-foreground font-mono",children:n})]}),r&&c&&o.jsxs("div",{className:"p-2 bg-blue-50 dark:bg-blue-950/50 border border-blue-200 dark:border-blue-800 rounded",children:[o.jsxs("div",{className:"flex items-center gap-2 mb-2",children:[o.jsx(_a,{className:"h-3 w-3 text-blue-600 dark:text-blue-400"}),o.jsx("span",{className:"text-xs font-mono bg-blue-100 dark:bg-blue-900 text-blue-800 dark:text-blue-200 px-2 py-1 rounded",children:"CALL"}),o.jsx("span",{className:"font-medium text-sm",children:String(c.name||"unknown")})]}),c.arguments!==void 0&&o.jsxs("div",{className:"text-xs",children:[o.jsx("span",{className:"text-muted-foreground mb-1 block",children:"Arguments:"}),o.jsx("pre",{className:"p-2 bg-background border rounded text-xs overflow-auto max-h-32 max-w-full break-all whitespace-pre-wrap",children:typeof c.arguments=="string"?c.arguments:JSON.stringify(c.arguments,null,1)})]})]}),l&&a&&o.jsxs("div",{className:"p-2 bg-green-50 dark:bg-green-950/50 border border-green-200 dark:border-green-800 rounded",children:[o.jsxs("div",{className:"flex items-center gap-2 mb-2",children:[o.jsx(nn,{className:"h-3 w-3 text-green-600 dark:text-green-400"}),o.jsx("span",{className:"text-xs font-mono bg-green-100 dark:bg-green-900 text-green-800 dark:text-green-200 px-2 py-1 rounded",children:"RESULT"}),a.status!=="completed"&&o.jsx("span",{className:"ml-auto px-2 py-1 rounded text-xs font-medium bg-red-100 dark:bg-red-900 text-red-800 dark:text-red-200",children:a.status})]}),o.jsxs("div",{className:"text-xs space-y-1",children:[o.jsxs("div",{className:"flex items-center gap-2",children:[o.jsx("span",{className:"text-muted-foreground",children:"Call ID:"}),o.jsx("span",{className:"font-mono text-xs break-all",children:a.call_id})]}),o.jsxs("div",{children:[o.jsx("span",{className:"text-muted-foreground block mb-1",children:"Output:"}),o.jsx("pre",{className:"p-2 bg-background border rounded text-xs overflow-auto max-h-32 break-all whitespace-pre-wrap",children:a.output})]})]})]})]})}function DR({events:e,isStreaming:n=!1,onMinimize:r}){const a=le(d=>d.debugPanelTab),l=le(d=>d.setDebugPanelTab),c=w.useMemo(()=>{const d=gg(e),f=d.length,m=e.filter(g=>g.type==="response.trace.completed").length,h=d.filter(g=>g.type==="response.function_call.complete").length+e.filter(g=>zr(g)!==null).length;return{eventsCount:f,tracesCount:m,toolsCount:h}},[e]);return o.jsx("div",{className:"flex-1 border-l flex flex-col min-h-0",children:o.jsxs(D5,{value:a,onValueChange:d=>l(d),className:"flex-1 flex flex-col min-h-0",children:[o.jsxs("div",{className:"px-3 pt-3 flex items-center gap-2 flex-shrink-0",children:[o.jsxs($N,{className:"flex-1",children:[o.jsxs(Nu,{value:"events",className:"flex-1 gap-1.5",children:["Events",c.eventsCount>0&&o.jsx("span",{className:"text-[10px] bg-muted-foreground/20 text-muted-foreground px-1.5 py-0.5 rounded-full min-w-[1.25rem] text-center",children:c.eventsCount})]}),o.jsxs(Nu,{value:"traces",className:"flex-1 gap-1.5",children:["Traces",c.tracesCount>0&&o.jsx("span",{className:"text-[10px] bg-muted-foreground/20 text-muted-foreground px-1.5 py-0.5 rounded-full min-w-[1.25rem] text-center",children:c.tracesCount})]}),o.jsxs(Nu,{value:"tools",className:"flex-1 gap-1.5",children:["Tools",c.toolsCount>0&&o.jsx("span",{className:"text-[10px] bg-muted-foreground/20 text-muted-foreground px-1.5 py-0.5 rounded-full min-w-[1.25rem] text-center",children:c.toolsCount})]})]}),r&&o.jsx(Le,{variant:"ghost",size:"sm",onClick:r,className:"h-8 w-8 p-0 flex-shrink-0",title:"Minimize debug panel",children:o.jsx(en,{className:"h-4 w-4"})})]}),o.jsx(ju,{value:"events",className:"flex-1 mt-0 overflow-hidden",children:o.jsx(ER,{events:e,isStreaming:n})}),o.jsx(ju,{value:"traces",className:"flex-1 mt-0 overflow-hidden",children:o.jsx(AR,{events:e})}),o.jsx(ju,{value:"tools",className:"flex-1 mt-0 overflow-hidden",children:o.jsx(MR,{events:e})})]})})}function Ir({open:e,onOpenChange:n,children:r}){if(!e)return null;const a=()=>{n(!1)},l=d=>{d.stopPropagation()},c=d=>{d.stopPropagation()};return o.jsxs("div",{className:"fixed inset-0 z-50 flex items-center justify-center",children:[o.jsx("div",{className:"absolute inset-0 bg-black/50",onClick:a}),o.jsx("div",{className:"relative z-10",onClick:l,onMouseDown:c,onMouseUp:d=>d.stopPropagation(),children:r})]})}function Lr({children:e,className:n=""}){const a=n.includes("w-[")||n.includes("w-full")||n.includes("max-w-")?"":"max-w-lg w-full";return o.jsx("div",{className:`relative bg-background border rounded-lg shadow-lg max-h-[90vh] overflow-hidden ${a} ${n}`,children:e})}function $r({children:e,className:n=""}){return o.jsx("div",{className:`space-y-2 ${n}`,children:e})}function Pr({children:e,className:n=""}){return o.jsx("h2",{className:`text-lg font-semibold ${n}`,children:e})}function OR({children:e,className:n=""}){return o.jsx("p",{className:`text-sm text-muted-foreground ${n}`,children:e})}function So({onClose:e}){return o.jsx(Le,{variant:"ghost",size:"sm",onClick:e,className:"absolute top-4 right-4 h-8 w-8 p-0 rounded-sm opacity-70 hover:opacity-100",children:o.jsx(Ea,{className:"h-4 w-4"})})}function zR({children:e}){return o.jsx("div",{className:"flex justify-end gap-2 p-4 border-t bg-muted/50",children:e})}function as({className:e,type:n,...r}){return o.jsx("input",{type:n,"data-slot":"input",className:We("file:text-foreground placeholder:text-muted-foreground selection:bg-primary selection:text-primary-foreground dark:bg-input/30 border-input flex h-9 w-full min-w-0 rounded-md border bg-transparent px-3 py-1 text-base shadow-xs transition-[color,box-shadow] outline-none file:inline-flex file:h-7 file:border-0 file:bg-transparent file:text-sm file:font-medium disabled:pointer-events-none disabled:cursor-not-allowed disabled:opacity-50 md:text-sm","focus-visible:border-ring focus-visible:ring-ring/50 focus-visible:ring-[3px]","aria-invalid:ring-destructive/20 dark:aria-invalid:ring-destructive/40 aria-invalid:border-destructive",e),...r})}var IR="Label",u2=w.forwardRef((e,n)=>o.jsx(Ye.label,{...e,ref:n,onMouseDown:r=>{r.target.closest("button, input, select, textarea")||(e.onMouseDown?.(r),!r.defaultPrevented&&r.detail>1&&r.preventDefault())}}));u2.displayName=IR;var LR=u2;function kt({className:e,...n}){return o.jsx(LR,{"data-slot":"label",className:We("flex items-center gap-2 text-sm leading-none font-medium select-none group-data-[disabled=true]:pointer-events-none group-data-[disabled=true]:opacity-50 peer-disabled:cursor-not-allowed peer-disabled:opacity-50",e),...n})}var Td="Switch",[$R,e$]=Kn(Td),[PR,HR]=$R(Td),d2=w.forwardRef((e,n)=>{const{__scopeSwitch:r,name:a,checked:l,defaultChecked:c,required:d,disabled:f,value:m="on",onCheckedChange:h,form:g,...x}=e,[y,b]=w.useState(null),j=rt(n,E=>b(E)),N=w.useRef(!1),S=y?g||!!y.closest("form"):!0,[_,A]=Ar({prop:l,defaultProp:c??!1,onChange:h,caller:Td});return o.jsxs(PR,{scope:r,checked:_,disabled:f,children:[o.jsx(Ye.button,{type:"button",role:"switch","aria-checked":_,"aria-required":d,"data-state":p2(_),"data-disabled":f?"":void 0,disabled:f,value:m,...x,ref:j,onClick:ke(e.onClick,E=>{A(M=>!M),S&&(N.current=E.isPropagationStopped(),N.current||E.stopPropagation())})}),S&&o.jsx(h2,{control:y,bubbles:!N.current,name:a,value:m,checked:_,required:d,disabled:f,form:g,style:{transform:"translateX(-100%)"}})]})});d2.displayName=Td;var f2="SwitchThumb",m2=w.forwardRef((e,n)=>{const{__scopeSwitch:r,...a}=e,l=HR(f2,r);return o.jsx(Ye.span,{"data-state":p2(l.checked),"data-disabled":l.disabled?"":void 0,...a,ref:n})});m2.displayName=f2;var UR="SwitchBubbleInput",h2=w.forwardRef(({__scopeSwitch:e,control:n,checked:r,bubbles:a=!0,...l},c)=>{const d=w.useRef(null),f=rt(d,c),m=fg(r),h=Lp(n);return w.useEffect(()=>{const g=d.current;if(!g)return;const x=window.HTMLInputElement.prototype,b=Object.getOwnPropertyDescriptor(x,"checked").set;if(m!==r&&b){const j=new Event("click",{bubbles:a});b.call(g,r),g.dispatchEvent(j)}},[m,r,a]),o.jsx("input",{type:"checkbox","aria-hidden":!0,defaultChecked:r,...l,tabIndex:-1,ref:f,style:{...l.style,...h,position:"absolute",pointerEvents:"none",opacity:0,margin:0}})});h2.displayName=UR;function p2(e){return e?"checked":"unchecked"}var g2=d2,BR=m2;const Wi=w.forwardRef(({className:e,...n},r)=>o.jsx(g2,{className:We("peer inline-flex h-5 w-9 shrink-0 cursor-pointer items-center rounded-full border-2 border-transparent shadow-sm transition-colors focus-visible:outline-none focus-visible:ring-2 focus-visible:ring-ring focus-visible:ring-offset-2 focus-visible:ring-offset-background disabled:cursor-not-allowed disabled:opacity-50 data-[state=checked]:bg-primary data-[state=unchecked]:bg-input",e),...n,ref:r,children:o.jsx(BR,{className:We("pointer-events-none block h-4 w-4 rounded-full bg-background shadow-lg ring-0 transition-transform data-[state=checked]:translate-x-4 data-[state=unchecked]:translate-x-0")})}));Wi.displayName=g2.displayName;const VR=["gpt-4.1","gpt-4.1-mini","o1","o1-mini","o3-mini"];function cb({open:e,onOpenChange:n,onBackendUrlChange:r}){const[a,l]=w.useState("general"),{oaiMode:c,setOAIMode:d,azureDeploymentEnabled:f,setAzureDeploymentEnabled:m,authRequired:h,serverCapabilities:g,serverVersion:x,runtime:y,uiMode:b,streamingEnabled:j,setStreamingEnabled:N}=le(),S="",[_,A]=w.useState(()=>localStorage.getItem("devui_backend_url")||S),[E,M]=w.useState(_),[T,D]=w.useState(!!localStorage.getItem("devui_auth_token")),[z,H]=w.useState(""),q=()=>{try{new URL(E),localStorage.setItem("devui_backend_url",E),A(E),r?.(E),n(!1),window.location.reload()}catch{alert("Please enter a valid URL (e.g., http://localhost:8080)")}},X=()=>{localStorage.removeItem("devui_backend_url"),M(S),A(S),r?.(S),window.location.reload()},W=()=>{z.trim()&&(localStorage.setItem("devui_auth_token",z.trim()),D(!0),H(""),window.location.reload())},G=()=>{localStorage.removeItem("devui_auth_token"),D(!1),H(""),window.location.reload()},ne=E!==_,B=!localStorage.getItem("devui_backend_url");return o.jsx(Ir,{open:e,onOpenChange:n,children:o.jsxs(Lr,{className:"w-[600px] max-w-[90vw] flex flex-col max-h-[85vh]",children:[o.jsx($r,{className:"p-6 pb-2 flex-shrink-0",children:o.jsx(Pr,{children:"Settings"})}),o.jsx(So,{onClose:()=>n(!1)}),o.jsxs("div",{className:"flex border-b px-6 flex-shrink-0",children:[o.jsxs("button",{onClick:()=>l("general"),className:`px-4 py-2 text-sm font-medium transition-colors relative ${a==="general"?"text-foreground":"text-muted-foreground hover:text-foreground"}`,children:["General",a==="general"&&o.jsx("div",{className:"absolute bottom-0 left-0 right-0 h-0.5 bg-primary"})]}),g.openai_proxy&&o.jsxs("button",{onClick:()=>l("proxy"),className:`px-4 py-2 text-sm font-medium transition-colors relative ${a==="proxy"?"text-foreground":"text-muted-foreground hover:text-foreground"}`,children:["OpenAI Proxy",a==="proxy"&&o.jsx("div",{className:"absolute bottom-0 left-0 right-0 h-0.5 bg-primary"})]}),o.jsxs("button",{onClick:()=>l("about"),className:`px-4 py-2 text-sm font-medium transition-colors relative ${a==="about"?"text-foreground":"text-muted-foreground hover:text-foreground"}`,children:["About",a==="about"&&o.jsx("div",{className:"absolute bottom-0 left-0 right-0 h-0.5 bg-primary"})]})]}),o.jsxs("div",{className:"px-6 pb-6 overflow-y-auto flex-1 min-h-[400px]",children:[a==="general"&&o.jsxs("div",{className:"space-y-6 pt-4",children:[o.jsxs("div",{className:"space-y-3",children:[o.jsxs("div",{className:"flex items-center justify-between",children:[o.jsx(kt,{htmlFor:"backend-url",className:"text-sm font-medium",children:"Backend URL"}),!B&&o.jsxs(Le,{variant:"ghost",size:"sm",onClick:X,className:"h-7 text-xs",title:"Reset to default",children:[o.jsx(sg,{className:"h-3 w-3 mr-1"}),"Reset"]})]}),o.jsx(as,{id:"backend-url",type:"url",value:E,onChange:U=>M(U.target.value),placeholder:"http://localhost:8080",className:"font-mono text-sm"}),o.jsxs("p",{className:"text-xs text-muted-foreground",children:["Default: ",o.jsx("span",{className:"font-mono",children:S})]}),o.jsx("div",{className:"flex gap-2 pt-2 min-h-[36px]",children:ne&&o.jsxs(o.Fragment,{children:[o.jsx(Le,{onClick:q,size:"sm",className:"flex-1",children:"Apply & Reload"}),o.jsx(Le,{onClick:()=>M(_),variant:"outline",size:"sm",className:"flex-1",children:"Cancel"})]})})]}),(h||T)&&o.jsxs("div",{className:"space-y-3 border-t pt-6",children:[o.jsxs("div",{className:"flex items-center justify-between",children:[o.jsx(kt,{className:"text-sm font-medium",children:"Authentication Token"}),!h&&T&&o.jsx("span",{className:"text-xs text-muted-foreground",children:"(Not required by current backend)"})]}),T?o.jsxs("div",{className:"space-y-3",children:[o.jsxs("div",{className:"flex items-center gap-2",children:[o.jsx(as,{type:"password",value:"••••••••••••••••••••",disabled:!0,className:"font-mono text-sm flex-1"}),o.jsx(Le,{variant:"destructive",size:"sm",onClick:G,className:"flex-shrink-0",children:"Clear"})]}),o.jsx("p",{className:"text-xs text-green-600 dark:text-green-400",children:"✓ Token configured and stored locally"})]}):o.jsxs("div",{className:"space-y-3",children:[o.jsx(as,{type:"password",value:z,onChange:U=>H(U.target.value),placeholder:"Enter bearer token",className:"font-mono text-sm",onKeyDown:U=>{U.key==="Enter"&&z.trim()&&W()}}),o.jsx(Le,{onClick:W,size:"sm",disabled:!z.trim(),className:"w-full",children:"Save & Reload"}),o.jsx("p",{className:"text-xs text-muted-foreground",children:h?"Required by backend (started with --auth flag)":"Not required by current backend"})]})]}),g.deployment&&o.jsxs("div",{className:"space-y-3 border-t pt-6",children:[o.jsxs("div",{className:"flex items-center justify-between",children:[o.jsxs("div",{className:"space-y-0.5",children:[o.jsx(kt,{className:"text-sm font-medium",children:"Azure Deployment"}),o.jsx("p",{className:"text-xs text-muted-foreground",children:"Enable one-click deployment to Azure Container Apps"})]}),o.jsx(Wi,{checked:f,onCheckedChange:m})]}),o.jsxs("details",{className:"group",children:[o.jsxs("summary",{className:"cursor-pointer text-xs text-muted-foreground hover:text-foreground transition-colors flex items-center gap-1",children:[o.jsx(en,{className:"h-3 w-3 transition-transform group-open:rotate-90"}),"Learn more about Azure deployment"]}),o.jsxs("div",{className:"mt-3 space-y-3 pl-4",children:[o.jsx("p",{className:"text-xs text-muted-foreground leading-relaxed",children:'When enabled, agents that support deployment will show a "Deploy to Azure" button. This allows you to deploy your agent to Azure Container Apps directly from DevUI.'}),o.jsxs("div",{className:"space-y-1.5",children:[o.jsx("p",{className:"text-xs font-medium",children:"When enabled:"}),o.jsxs("ul",{className:"text-xs text-muted-foreground space-y-0.5 list-disc list-inside",children:[o.jsx("li",{children:'Shows "Deploy to Azure" for supported agents'}),o.jsx("li",{children:"Requires Azure CLI and proper authentication"}),o.jsx("li",{children:"Backend must have deployment capabilities enabled"})]})]}),o.jsxs("div",{className:"space-y-1.5",children:[o.jsx("p",{className:"text-xs font-medium",children:"When disabled:"}),o.jsxs("ul",{className:"text-xs text-muted-foreground space-y-0.5 list-disc list-inside",children:[o.jsx("li",{children:'Shows "Deployment Guide" for all agents'}),o.jsx("li",{children:"Provides Docker templates and manual deployment instructions"}),o.jsx("li",{children:"No backend deployment capabilities required"})]})]})]})]})]}),o.jsx("div",{className:"space-y-3 border-t pt-6",children:o.jsxs("div",{className:"flex items-center justify-between",children:[o.jsxs("div",{className:"space-y-0.5",children:[o.jsx(kt,{className:"text-sm font-medium",children:"Show Tool Calls"}),o.jsx("p",{className:"text-xs text-muted-foreground",children:"Display function/tool calls and results in chat messages"})]}),o.jsx(Wi,{checked:le.getState().showToolCalls,onCheckedChange:U=>le.getState().setShowToolCalls(U)})]})}),o.jsxs("div",{className:"space-y-3 border-t pt-6",children:[o.jsxs("div",{className:"flex items-center justify-between",children:[o.jsxs("div",{className:"space-y-0.5",children:[o.jsx(kt,{className:"text-sm font-medium",children:"Streaming Mode"}),o.jsx("p",{className:"text-xs text-muted-foreground",children:"Stream responses token-by-token as they're generated"})]}),o.jsx(Wi,{checked:j,onCheckedChange:N})]}),!j&&o.jsxs("div",{className:"flex items-start gap-2 text-xs text-amber-600 dark:text-amber-400 bg-amber-500/10 p-3 rounded",children:[o.jsx(Fs,{className:"h-3.5 w-3.5 flex-shrink-0 mt-0.5"}),o.jsxs("div",{children:[o.jsx("p",{className:"font-medium",children:"Non-streaming mode limitations:"}),o.jsxs("ul",{className:"mt-1 space-y-0.5 list-disc list-inside text-amber-600/80 dark:text-amber-400/80",children:[o.jsx("li",{children:"Tool calls won't display in real-time"}),o.jsx("li",{children:"No typing indicator during generation"}),o.jsx("li",{children:"Response appears all at once when complete"})]})]})]})]})]}),a==="proxy"&&g.openai_proxy&&o.jsxs("div",{className:"space-y-6 pt-4",children:[o.jsxs("div",{className:"space-y-4",children:[o.jsxs("div",{className:"flex items-center justify-between",children:[o.jsxs("div",{className:"space-y-0.5",children:[o.jsx(kt,{className:"text-base font-medium",children:"OpenAI Proxy Mode"}),o.jsx("p",{className:"text-xs text-muted-foreground",children:"Route requests through DevUI backend to OpenAI API"})]}),o.jsx(Wi,{checked:c.enabled,onCheckedChange:U=>d({...c,enabled:U})})]}),!c.enabled&&o.jsx("div",{className:"bordder border-muted bg-muted/30 rounded-lg p-4 space-y-3",children:o.jsxs("div",{className:"flex items-start gap-2",children:[o.jsx(Fs,{className:"h-4 w-4 flex-shrink-0 mt-0.5 text-blue-600 dark:text-blue-400"}),o.jsxs("div",{className:"space-y-2",children:[o.jsx("p",{className:"text-sm font-medium",children:"About OpenAI Proxy Mode"}),o.jsxs("p",{className:"text-xs text-muted-foreground leading-relaxed",children:["When enabled, your chat requests are sent to your DevUI backend"," ",o.jsxs("span",{className:"font-mono font-semibold",children:["(",_,")"]}),", which then forwards them to OpenAI's API. This keeps your"," ",o.jsx("span",{className:"font-mono font-semibold",children:"OPENAI_API_KEY"})," ","secure on the server instead of exposing it in the browser."]}),o.jsxs("div",{className:"space-y-1.5 pt-1",children:[o.jsx("p",{className:"text-xs font-medium",children:"Requirements:"}),o.jsxs("ul",{className:"text-xs text-muted-foreground space-y-0.5 list-disc list-inside",children:[o.jsxs("li",{children:["Backend must have"," ",o.jsx("span",{className:"font-mono",children:"OPENAI_API_KEY"})," ","configured"]}),o.jsx("li",{children:"Backend must support OpenAI Responses API proxying (DevUI does)"})]})]}),o.jsxs("div",{className:"space-y-1.5 pt-1",children:[o.jsx("p",{className:"text-xs font-medium",children:"Why use this?"}),o.jsx("p",{className:"text-xs text-muted-foreground",children:"Quickly test and compare OpenAI models directly through the DevUI interface without creating custom agents or exposing API keys in the browser."})]})]})]})}),c.enabled&&o.jsxs("div",{className:"space-y-4 pl-4 border-l-2 border-muted",children:[o.jsxs("div",{className:"space-y-2",children:[o.jsx(kt,{className:"text-sm font-medium",children:"Model"}),o.jsx(as,{type:"text",value:c.model,onChange:U=>d({...c,model:U.target.value}),placeholder:"gpt-4.1-mini",className:"font-mono text-sm"}),o.jsx("p",{className:"text-xs text-muted-foreground",children:"Enter any OpenAI model ID (e.g., gpt-4.1, o1, o3-mini)"})]}),o.jsxs("div",{className:"space-y-2",children:[o.jsx(kt,{className:"text-xs text-muted-foreground",children:"Common presets"}),o.jsx("div",{className:"flex flex-wrap gap-2",children:VR.map(U=>o.jsx(Le,{variant:c.model===U?"default":"outline",size:"sm",onClick:()=>d({...c,model:U}),className:"text-xs h-7",children:U},U))})]}),o.jsxs("details",{className:"group",children:[o.jsxs("summary",{className:"cursor-pointer text-sm font-medium text-muted-foreground hover:text-foreground transition-colors flex items-center gap-1",children:[o.jsx(en,{className:"h-3 w-3 transition-transform group-open:rotate-90"}),"Advanced Parameters (optional)"]}),o.jsxs("div",{className:"space-y-3 mt-3 pl-4",children:[o.jsxs("div",{className:"space-y-1",children:[o.jsx(kt,{className:"text-xs",children:"Temperature"}),o.jsx(as,{type:"number",step:"0.1",min:"0",max:"2",value:c.temperature??"",onChange:U=>d({...c,temperature:U.target.value?parseFloat(U.target.value):void 0}),placeholder:"1.0 (default)",className:"text-sm"}),o.jsx("p",{className:"text-xs text-muted-foreground",children:"Controls randomness (0-2)"})]}),o.jsxs("div",{className:"space-y-1",children:[o.jsx(kt,{className:"text-xs",children:"Max Output Tokens"}),o.jsx(as,{type:"number",min:"1",value:c.max_output_tokens??"",onChange:U=>d({...c,max_output_tokens:U.target.value?parseInt(U.target.value):void 0}),placeholder:"Auto",className:"text-sm"}),o.jsx("p",{className:"text-xs text-muted-foreground",children:"Maximum tokens in response"})]}),o.jsxs("div",{className:"space-y-1",children:[o.jsx(kt,{className:"text-xs",children:"Top P"}),o.jsx(as,{type:"number",step:"0.1",min:"0",max:"1",value:c.top_p??"",onChange:U=>d({...c,top_p:U.target.value?parseFloat(U.target.value):void 0}),placeholder:"1.0 (default)",className:"text-sm"}),o.jsx("p",{className:"text-xs text-muted-foreground",children:"Nucleus sampling (0-1)"})]}),o.jsxs("div",{className:"space-y-1",children:[o.jsx(kt,{className:"text-xs",children:"Reasoning Effort (o-series models)"}),o.jsxs("select",{value:c.reasoning_effort??"",onChange:U=>d({...c,reasoning_effort:U.target.value?U.target.value:void 0}),className:"flex h-9 w-full rounded-md border border-input bg-transparent px-3 py-1 text-sm shadow-sm transition-colors focus-visible:outline-none focus-visible:ring-1 focus-visible:ring-ring",children:[o.jsx("option",{value:"",children:"Auto (default)"}),o.jsx("option",{value:"minimal",children:"Minimal"}),o.jsx("option",{value:"low",children:"Low"}),o.jsx("option",{value:"medium",children:"Medium"}),o.jsx("option",{value:"high",children:"High"})]}),o.jsx("p",{className:"text-xs text-muted-foreground",children:"Constrains reasoning effort (faster/cheaper vs thorough)"})]})]})]})]})]}),c.enabled&&o.jsxs("div",{className:"flex items-start gap-2 text-xs text-muted-foreground bg-muted/50 p-3 rounded",children:[o.jsx(Fs,{className:"h-3.5 w-3.5 flex-shrink-0 mt-0.5"}),o.jsx("div",{className:"space-y-1",children:o.jsxs("p",{children:["Requests route through"," ",o.jsx("span",{className:"font-mono font-semibold",children:_})," ","to OpenAI API. Server must have"," ",o.jsx("span",{className:"font-mono font-semibold",children:"OPENAI_API_KEY"})," ","configured."]})})]})]}),a==="about"&&o.jsxs("div",{className:"space-y-4 pt-4",children:[o.jsx("p",{className:"text-sm text-muted-foreground",children:"DevUI is a sample app for getting started with Agent Framework."}),o.jsxs("div",{className:"space-y-2 text-sm",children:[o.jsxs("div",{className:"flex justify-between",children:[o.jsx("span",{className:"text-muted-foreground",children:"Version:"}),o.jsx("span",{className:"font-mono",children:x||"Unknown"})]}),o.jsxs("div",{className:"flex justify-between",children:[o.jsx("span",{className:"text-muted-foreground",children:"Runtime:"}),o.jsx("span",{className:"font-mono capitalize",children:y||"Unknown"})]}),o.jsxs("div",{className:"flex justify-between",children:[o.jsx("span",{className:"text-muted-foreground",children:"UI Mode:"}),o.jsx("span",{className:"font-mono capitalize",children:b||"Unknown"})]})]}),(g||h!==void 0)&&o.jsxs("div",{className:"space-y-2 pt-2",children:[o.jsx("p",{className:"text-xs font-medium text-muted-foreground uppercase tracking-wide",children:"Capabilities"}),o.jsxs("div",{className:"space-y-1 text-sm",children:[g?.instrumentation!==void 0&&o.jsxs("div",{className:"flex justify-between items-center",children:[o.jsx("span",{className:"text-muted-foreground",children:"Instrumentation:"}),o.jsx("span",{className:`text-xs px-2 py-0.5 rounded-full ${g.instrumentation?"bg-green-500/10 text-green-600 dark:text-green-400":"bg-muted text-muted-foreground"}`,children:g.instrumentation?"Enabled":"Disabled"})]}),g?.openai_proxy!==void 0&&o.jsxs("div",{className:"flex justify-between items-center",children:[o.jsx("span",{className:"text-muted-foreground",children:"OpenAI Proxy:"}),o.jsx("span",{className:`text-xs px-2 py-0.5 rounded-full ${g.openai_proxy?"bg-green-500/10 text-green-600 dark:text-green-400":"bg-muted text-muted-foreground"}`,children:g.openai_proxy?"Available":"Not Configured"})]}),g?.deployment!==void 0&&o.jsxs("div",{className:"flex justify-between items-center",children:[o.jsx("span",{className:"text-muted-foreground",children:"Deployment:"}),o.jsx("span",{className:`text-xs px-2 py-0.5 rounded-full ${g.deployment?"bg-green-500/10 text-green-600 dark:text-green-400":"bg-muted text-muted-foreground"}`,children:g.deployment?"Available":"Disabled"})]}),h!==void 0&&o.jsxs("div",{className:"flex justify-between items-center",children:[o.jsx("span",{className:"text-muted-foreground",children:"Authentication:"}),o.jsx("span",{className:`text-xs px-2 py-0.5 rounded-full ${h?"bg-blue-500/10 text-blue-600 dark:text-blue-400":"bg-muted text-muted-foreground"}`,children:h?"Required":"Not Required"})]})]})]}),o.jsx("div",{className:"flex justify-center pt-2",children:o.jsxs(Le,{variant:"outline",size:"sm",onClick:()=>window.open("https://github.com/microsoft/agent-framework","_blank"),className:"text-xs",children:[o.jsx(Hu,{className:"h-3 w-3 mr-1"}),"Learn More about Agent Framework"]})})]})]})]})})}const qR="modulepreload",FR=function(e,n){return new URL(e,n).href},ub={},_u=function(n,r,a){let l=Promise.resolve();if(r&&r.length>0){let h=function(g){return Promise.all(g.map(x=>Promise.resolve(x).then(y=>({status:"fulfilled",value:y}),y=>({status:"rejected",reason:y}))))};const d=document.getElementsByTagName("link"),f=document.querySelector("meta[property=csp-nonce]"),m=f?.nonce||f?.getAttribute("nonce");l=h(r.map(g=>{if(g=FR(g,a),g in ub)return;ub[g]=!0;const x=g.endsWith(".css"),y=x?'[rel="stylesheet"]':"";if(a)for(let j=d.length-1;j>=0;j--){const N=d[j];if(N.href===g&&(!x||N.rel==="stylesheet"))return}else if(document.querySelector(`link[href="${g}"]${y}`))return;const b=document.createElement("link");if(b.rel=x?"stylesheet":qR,x||(b.as="script"),b.crossOrigin="",b.href=g,m&&b.setAttribute("nonce",m),document.head.appendChild(b),x)return new Promise((j,N)=>{b.addEventListener("load",j),b.addEventListener("error",()=>N(new Error(`Unable to preload CSS for ${g}`)))})}))}function c(d){const f=new Event("vite:preloadError",{cancelable:!0});if(f.payload=d,window.dispatchEvent(f),!f.defaultPrevented)throw d}return l.then(d=>{for(const f of d||[])f.status==="rejected"&&c(f.reason);return n().catch(c)})},x2="devui_streaming_state_",y2=1440*60*1e3;function Yu(e){return`${x2}${e}`}function YR(e){let n="";for(const r of e)r.type==="response.output_text.delta"&&"delta"in r&&(n+=r.delta);return n}function v2(e){try{const n=Yu(e.conversationId),r=JSON.stringify(e);localStorage.setItem(n,r)}catch(n){console.error("Failed to save streaming state:",n);try{b2();const r=Yu(e.conversationId),a=JSON.stringify(e);localStorage.setItem(r,a)}catch{console.error("Failed to save streaming state even after cleanup")}}}function ba(e){try{const n=Yu(e),r=localStorage.getItem(n);if(!r)return null;const a=JSON.parse(r);return Date.now()-a.timestamp>y2?(Eu(e),null):a.completed?null:a}catch(n){return console.error("Failed to load streaming state:",n),null}}function Nh(e,n,r,a){try{const l=ba(e),c="sequence_number"in n?n.sequence_number:void 0,d=l?[...l.events,n]:[n],f={conversationId:e,responseId:r,lastMessageId:a,lastSequenceNumber:c??l?.lastSequenceNumber??-1,events:d,timestamp:Date.now(),completed:n.type==="response.completed"||n.type==="response.failed",accumulatedText:YR(d)};v2(f)}catch(l){console.error("Failed to update streaming state:",l)}}function jh(e){try{const n=ba(e);n&&(n.completed=!0,n.timestamp=Date.now(),v2(n))}catch(n){console.error("Failed to mark streaming as completed:",n)}}function Eu(e){try{const n=Yu(e);localStorage.removeItem(n)}catch(n){console.error("Failed to clear streaming state:",n)}}function b2(){try{const e=Object.keys(localStorage),n=Date.now();for(const r of e)if(r.startsWith(x2))try{const a=localStorage.getItem(r);if(a){const l=JSON.parse(a);(n-l.timestamp>y2||l.completed)&&localStorage.removeItem(r)}}catch{localStorage.removeItem(r)}}catch(e){console.error("Failed to clear expired streaming states:",e)}}function GR(){b2()}function w2(){const[e,n]=w.useState(!1),r=w.useRef(null),a=w.useCallback(()=>(r.current=new AbortController,n(!1),r.current.signal),[]),l=w.useCallback(()=>{r.current&&(n(!0),r.current.abort(),r.current=null)},[]),c=w.useCallback(()=>{n(!1)},[]),d=w.useCallback(()=>{r.current&&(r.current.abort(),r.current=null)},[]);return{isCancelling:e,createAbortSignal:a,handleCancel:l,resetCancelling:c,cleanup:d}}function Gu(e){return e instanceof DOMException&&e.name==="AbortError"}function XR(e={}){const{onDrop:n,disabled:r=!1}=e,[a,l]=w.useState(!1),[c,d]=w.useState([]),f=w.useRef(0),m=w.useCallback(b=>{b.preventDefault(),b.stopPropagation(),!r&&(f.current++,b.dataTransfer.items&&b.dataTransfer.items.length>0&&l(!0))},[r]),h=w.useCallback(b=>{b.preventDefault(),b.stopPropagation(),!r&&(f.current--,f.current===0&&l(!1))},[r]),g=w.useCallback(b=>{b.preventDefault(),b.stopPropagation()},[]),x=w.useCallback(b=>{if(b.preventDefault(),b.stopPropagation(),l(!1),f.current=0,r)return;const j=Array.from(b.dataTransfer.files);j.length>0&&(d(j),n?.(j))},[r,n]),y=w.useCallback(()=>{d([])},[]);return{isDragOver:a,droppedFiles:c,clearDroppedFiles:y,dragHandlers:{onDragEnter:m,onDragLeave:h,onDragOver:g,onDrop:x}}}const ZR="",WR=1e3,Sh=10;function KR(){const e=localStorage.getItem("devui_backend_url");return e||ZR}function QR(e){return new Promise(n=>setTimeout(n,e))}class JR{baseUrl;authToken=null;constructor(n){this.baseUrl=n||KR(),this.authToken=localStorage.getItem("devui_auth_token")}setBaseUrl(n){this.baseUrl=n}getBaseUrl(){return this.baseUrl}setAuthToken(n){this.authToken=n,n?localStorage.setItem("devui_auth_token",n):localStorage.removeItem("devui_auth_token")}getAuthToken(){return this.authToken}clearAuthToken(){this.setAuthToken(null)}async request(n,r={}){const a=`${this.baseUrl}${n}`,l={"Content-Type":"application/json",...r.headers};this.authToken&&(l.Authorization=`Bearer ${this.authToken}`);const c=await fetch(a,{...r,headers:l});if(!c.ok){if(c.status===401)throw this.clearAuthToken(),new Error("UNAUTHORIZED");let d=`API request failed: ${c.status} ${c.statusText}`;try{const f=await c.json();f.detail?typeof f.detail=="string"?d=f.detail:typeof f.detail=="object"&&f.detail.error?.message&&(d=f.detail.error.message):f.error?.message&&(d=f.error.message)}catch{}throw new Error(d)}return c.json()}async getHealth(){return this.request("/health")}async getMeta(){return this.request("/meta")}async getEntities(){const r=(await this.request("/v1/entities")).entities.map(c=>{if(c.type==="agent")return{id:c.id,name:c.name,description:c.description,type:"agent",source:c.source||"directory",tools:(c.tools||[]).map(d=>typeof d=="string"?d:JSON.stringify(d)),has_env:!!(c.required_env_vars&&c.required_env_vars.length>0),module_path:typeof c.metadata?.module_path=="string"?c.metadata.module_path:void 0,required_env_vars:c.required_env_vars,metadata:c.metadata,deployment_supported:c.deployment_supported,deployment_reason:c.deployment_reason,instructions:c.instructions,model_id:c.model_id,chat_client_type:c.chat_client_type,context_providers:c.context_providers,middleware:c.middleware};{const d=c.executors||c.tools||[];let f=c.start_executor_id||"";if(!f&&d.length>0){const m=d[0];typeof m=="string"&&(f=m)}return{id:c.id,name:c.name,description:c.description,type:"workflow",source:c.source||"directory",executors:d.map(m=>typeof m=="string"?m:JSON.stringify(m)),has_env:!!(c.required_env_vars&&c.required_env_vars.length>0),module_path:typeof c.metadata?.module_path=="string"?c.metadata.module_path:void 0,required_env_vars:c.required_env_vars,metadata:c.metadata,deployment_supported:c.deployment_supported,deployment_reason:c.deployment_reason,input_schema:c.input_schema||{type:"string"},input_type_name:c.input_type_name||"Input",start_executor_id:f,tools:[]}}}),a=r.filter(c=>c.type==="agent"),l=r.filter(c=>c.type==="workflow");return{entities:r,agents:a,workflows:l}}async getAgents(){const{agents:n}=await this.getEntities();return n}async getWorkflows(){const{workflows:n}=await this.getEntities();return n}async getAgentInfo(n){return this.request(`/v1/entities/${n}/info?type=agent`)}async getWorkflowInfo(n){return this.request(`/v1/entities/${n}/info?type=workflow`)}async reloadEntity(n){return this.request(`/v1/entities/${n}/reload`,{method:"POST"})}async createConversation(n){const{oaiMode:r}=await _u(()=>Promise.resolve().then(()=>wu),void 0,import.meta.url).then(c=>({oaiMode:c.useDevUIStore.getState().oaiMode})),a={};r.enabled&&(a["X-Proxy-Backend"]="openai");const l=await this.request("/v1/conversations",{method:"POST",headers:a,body:JSON.stringify({metadata:n})});return{id:l.id,object:"conversation",created_at:l.created_at,metadata:l.metadata}}async listConversations(n){const r=n?`/v1/conversations?agent_id=${encodeURIComponent(n)}`:"/v1/conversations",a=await this.request(r);return{data:a.data.map(l=>({id:l.id,object:"conversation",created_at:l.created_at,metadata:l.metadata})),has_more:a.has_more}}async getConversation(n){const r=await this.request(`/v1/conversations/${n}`);return{id:r.id,object:"conversation",created_at:r.created_at,metadata:r.metadata}}async deleteConversation(n){try{return await this.request(`/v1/conversations/${n}`,{method:"DELETE"}),Eu(n),!0}catch{return!1}}async listConversationItems(n,r){const a=new URLSearchParams;r?.limit&&a.set("limit",r.limit.toString()),r?.after&&a.set("after",r.after),r?.order&&a.set("order",r.order);const l=a.toString(),c=`/v1/conversations/${n}/items${l?`?${l}`:""}`;return this.request(c)}async getConversationItem(n,r){const a=`/v1/conversations/${n}/items/${r}`;return this.request(a)}async deleteConversationItem(n,r){const a=await fetch(`${this.baseUrl}/v1/conversations/${n}/items/${r}`,{method:"DELETE"});if(!a.ok)throw new Error(`Failed to delete item: ${a.statusText}`)}async*streamOpenAIResponse(n,r,a,l){const{oaiMode:c}=await _u(()=>Promise.resolve().then(()=>wu),void 0,import.meta.url).then(x=>({oaiMode:x.useDevUIStore.getState().oaiMode}));c.enabled&&(n.model=c.model,c.temperature!==void 0&&(n.temperature=c.temperature),c.max_output_tokens!==void 0&&(n.max_output_tokens=c.max_output_tokens),c.top_p!==void 0&&(n.top_p=c.top_p),c.instructions!==void 0&&(n.instructions=c.instructions),c.reasoning_effort!==void 0&&(n.reasoning={effort:c.reasoning_effort}));let d=-1,f=0,m=!1,h=l,g;if(r){const x=ba(r);if(x)if(l||(h=x.responseId),d=x.lastSequenceNumber,g=x.lastMessageId,l)m=x.events.length>0;else for(const y of x.events)m=!0,yield y}for(;f<=Sh;)try{let x;if(h){const N=new URLSearchParams;N.set("stream","true"),d>=0&&N.set("starting_after",d.toString());const S=`${this.baseUrl}/v1/responses/${h}?${N.toString()}`,_={Accept:"text/event-stream"};this.authToken&&(_.Authorization=`Bearer ${this.authToken}`),x=await fetch(S,{method:"GET",headers:_,signal:a})}else{const N=`${this.baseUrl}/v1/responses`,S={"Content-Type":"application/json",Accept:"text/event-stream"};c.enabled&&(S["X-Proxy-Backend"]="openai"),this.authToken&&(S.Authorization=`Bearer ${this.authToken}`),x=await fetch(N,{method:"POST",headers:S,body:JSON.stringify(n),signal:a})}if(!x.ok){if(x.status===401)throw this.clearAuthToken(),new Error("UNAUTHORIZED");if(x.status>=400&&x.status<500){let S=`Client error ${x.status}`;try{const _=await x.json();_.error&&_.error.message?S=_.error.message:_.detail&&(S=_.detail)}catch{}throw new Error(`CLIENT_ERROR: ${S}`)}let N=`Request failed with status ${x.status}`;try{const S=await x.json();S.error&&S.error.message?N=S.error.message:S.detail&&(N=S.detail)}catch{}throw new Error(N)}const y=x.body?.getReader();if(!y)throw new Error("Response body is not readable");const b=new TextDecoder;let j="";try{for(;;){if(a?.aborted)throw new DOMException("Request aborted","AbortError");const{done:N,value:S}=await y.read();if(N){r&&jh(r);return}const _=b.decode(S,{stream:!0});j+=_;const A=j.split(` +`)||l.trim().match(/[.!?]\s*$/))&&(n.push({type:"response.output_text.delta",delta:l.trim()}),l="");continue}c.type!=="response.usage.complete"&&n.push(c)}for(const[,c]of r)if(c.arguments.trim()&&c.arguments.trim().length>2){const d=a.get(c.callId)||c.name||"unknown";n.push({type:"response.function_call.complete",data:{name:d,arguments:c.arguments,call_id:c.callId}})}return l.trim()&&n.push({type:"response.output_text.delta",delta:l.trim()}),n}function wR(e){switch(e.type){case"response.output_text.delta":if("delta"in e){const n=e.delta||"";return n.length>60?`${n.slice(0,60)}...`:n}return"Text output";case"response.function_call.complete":if("data"in e&&e.data){const n=e.data;let r=n.name||"unknown";(!r||r==="unknown")&&(r="function_call");const a=n.arguments?typeof n.arguments=="string"?n.arguments.slice(0,30):JSON.stringify(n.arguments).slice(0,30):"";return`Calling ${r}(${a}${a.length>=30?"...":""})`}return"Function call";case"response.function_call_arguments.delta":return"delta"in e&&e.delta?`Function arg delta: ${e.delta.slice(0,30)}${e.delta.length>30?"...":""}`:"Function arguments...";case"response.function_result.complete":{const r=e.output.slice(0,40);return`Function result: ${r}${r.length>=40?"...":""}`}case"response.output_item.added":{const n=e;return n.item.type==="function_call"?`Tool call: ${n.item.name}`:"Output item added"}case"response.workflow_event.completed":return"data"in e&&e.data?`Executor: ${e.data.executor_id||"unknown"}`:"Workflow event";case"response.trace.completed":return"data"in e&&e.data?`Trace: ${e.data.operation_name||"unknown"}`:"Trace event";case"response.completed":if("response"in e&&e.response&&"usage"in e.response){const r=e.response.usage;if(r)return`Response complete (${r.total_tokens} tokens)`}return"Response complete";case"response.done":return"Response complete";case"error":return"message"in e&&typeof e.message=="string"?e.message:"Error occurred";default:return`${e.type}`}}function NR(e){switch(e){case"response.output_text.delta":return eg;case"response.function_call.complete":case"response.function_call.delta":case"response.function_call_arguments.delta":return _a;case"response.function_result.complete":return nn;case"response.output_item.added":return nn;case"response.workflow_event.completed":return Qp;case"response.trace.completed":return Bu;case"response.completed":return nn;case"response.done":return nn;case"error":return kl;default:return hs}}function jR(e){switch(e){case"response.output_text.delta":return"text-gray-600 dark:text-gray-400";case"response.function_call.complete":case"response.function_call.delta":case"response.function_call_arguments.delta":return"text-blue-600 dark:text-blue-400";case"response.function_result.complete":return"text-green-600 dark:text-green-400";case"response.output_item.added":return"text-green-600 dark:text-green-400";case"response.workflow_event.completed":return"text-purple-600 dark:text-purple-400";case"response.trace.completed":return"text-orange-600 dark:text-orange-400";case"response.completed":return"text-green-600 dark:text-green-400";case"response.done":return"text-green-600 dark:text-green-400";case"error":return"text-red-600 dark:text-red-400";default:return"text-gray-600 dark:text-gray-400"}}function SR({event:e}){const[n,r]=w.useState(!1),a=e.type||"unknown",l=NR(a),c=jR(a),d="_uiTimestamp"in e&&typeof e._uiTimestamp=="number"?new Date(e._uiTimestamp*1e3).toLocaleTimeString():new Date().toLocaleTimeString(),f=wR(e),m=e.type==="response.function_call.complete"&&"data"in e&&e.data||e.type==="response.function_result.complete"||e.type==="response.output_item.added"&&zr(e)!==null||e.type==="response.workflow_event.completed"&&"data"in e&&e.data||e.type==="response.trace.completed"&&"data"in e&&e.data||e.type==="response.trace.completed"&&"data"in e&&e.data||e.type==="response.output_text.delta"&&"delta"in e&&e.delta&&e.delta.length>100||e.type==="response.completed"&&"response"in e&&e.response||e.type==="error";return o.jsxs("div",{className:"border-l-2 border-muted pl-3 py-2 hover:bg-muted/50 transition-colors",children:[o.jsxs("div",{className:"flex items-center gap-2 text-xs text-muted-foreground mb-1",children:[o.jsx(l,{className:`h-3 w-3 ${c}`}),o.jsx("span",{className:"font-mono",children:d}),o.jsx(ut,{variant:"outline",className:"text-xs py-0",children:e.type?e.type.replace("response.",""):"unknown"})]}),o.jsxs("div",{className:"text-sm",children:[o.jsxs("div",{className:`flex items-center gap-2 ${m?"cursor-pointer":""}`,onClick:()=>m&&r(!n),children:[m&&o.jsx("div",{className:"text-muted-foreground",children:n?o.jsx(Rt,{className:"h-3 w-3"}):o.jsx(en,{className:"h-3 w-3"})}),o.jsx("div",{className:"text-muted-foreground flex-1",children:m&&f.length>80?`${f.slice(0,80)}...`:f})]}),n&&m&&o.jsx("div",{className:"mt-2 ml-5 p-3 bg-muted/30 rounded border",children:o.jsx(_R,{event:e})})]})]})}function _R({event:e}){if(e.type==="error"){const n=e;return o.jsxs("div",{className:"space-y-2",children:[o.jsxs("div",{className:"flex items-center gap-2",children:[o.jsx(kl,{className:"h-4 w-4 text-red-500"}),o.jsx("span",{className:"font-semibold text-sm",children:"Error Details"})]}),o.jsxs("div",{className:"text-xs",children:[n.message&&o.jsxs("div",{className:"mb-2",children:[o.jsx("span",{className:"font-medium text-muted-foreground",children:"Message:"}),o.jsx("div",{className:"mt-1",children:o.jsx("pre",{className:"text-xs bg-destructive/10 border border-destructive/30 rounded p-2 text-destructive whitespace-pre-wrap break-all",children:n.message})})]}),n.code&&o.jsxs("div",{className:"mb-2",children:[o.jsx("span",{className:"font-medium text-muted-foreground",children:"Code:"}),o.jsx("span",{className:"ml-2 font-mono text-xs",children:n.code})]}),n.param&&o.jsxs("div",{className:"mb-2",children:[o.jsx("span",{className:"font-medium text-muted-foreground",children:"Parameter:"}),o.jsx("span",{className:"ml-2 font-mono text-xs",children:n.param})]}),o.jsxs("div",{children:[o.jsx("span",{className:"font-medium text-muted-foreground",children:"Raw Event:"}),o.jsx("div",{className:"mt-1",children:o.jsx("pre",{className:"text-xs bg-background border rounded p-2 whitespace-pre-wrap break-all max-h-32 overflow-auto",children:JSON.stringify(e,null,2)})})]})]})]})}switch(e.type){case"response.function_call.complete":if("data"in e&&e.data){const n=e.data;return o.jsxs("div",{className:"space-y-2",children:[o.jsxs("div",{className:"flex items-center gap-2",children:[o.jsx(_a,{className:"h-4 w-4 text-blue-500"}),o.jsx("span",{className:"font-semibold text-sm",children:"Function Call"})]}),o.jsxs("div",{className:"grid grid-cols-1 gap-2 text-xs",children:[o.jsxs("div",{children:[o.jsx("span",{className:"font-medium text-muted-foreground",children:"Function:"}),o.jsx("span",{className:"ml-2 font-mono bg-blue-100 dark:bg-blue-900 px-2 py-1 rounded",children:n.name||"unknown"})]}),n.call_id&&o.jsxs("div",{children:[o.jsx("span",{className:"font-medium text-muted-foreground",children:"Call ID:"}),o.jsx("span",{className:"ml-2 font-mono text-xs",children:n.call_id})]}),n.arguments&&o.jsxs("div",{children:[o.jsx("span",{className:"font-medium text-muted-foreground",children:"Arguments:"}),o.jsx("div",{className:"mt-1 max-h-32 overflow-auto",children:o.jsx("pre",{className:"text-xs bg-background border rounded p-2 whitespace-pre-wrap max-w-full break-all",children:typeof n.arguments=="string"?n.arguments:JSON.stringify(n.arguments,null,1)})})]})]})]})}break;case"response.function_result.complete":{const n=e;return o.jsxs("div",{className:"space-y-2",children:[o.jsxs("div",{className:"flex items-center gap-2",children:[o.jsx(nn,{className:"h-4 w-4 text-green-500"}),o.jsx("span",{className:"font-semibold text-sm",children:"Function Result"})]}),o.jsxs("div",{className:"grid grid-cols-1 gap-2 text-xs",children:[o.jsxs("div",{children:[o.jsx("span",{className:"font-medium text-muted-foreground",children:"Call ID:"}),o.jsx("span",{className:"ml-2 font-mono text-xs",children:n.call_id})]}),o.jsxs("div",{children:[o.jsx("span",{className:"font-medium text-muted-foreground",children:"Status:"}),o.jsx("span",{className:`ml-2 px-2 py-1 rounded text-xs font-medium ${n.status==="completed"?"bg-green-100 dark:bg-green-900 text-green-800 dark:text-green-200":"bg-red-100 dark:bg-red-900 text-red-800 dark:text-red-200"}`,children:n.status})]}),o.jsxs("div",{children:[o.jsx("span",{className:"font-medium text-muted-foreground",children:"Output:"}),o.jsx("div",{className:"mt-1 max-h-32 overflow-auto",children:o.jsx("pre",{className:"text-xs bg-background border rounded p-2 whitespace-pre-wrap max-w-full break-all",children:n.output})})]})]})]})}case"response.output_item.added":{const n=zr(e);if(n)return o.jsxs("div",{className:"space-y-2",children:[o.jsxs("div",{className:"flex items-center gap-2",children:[o.jsx(nn,{className:"h-4 w-4 text-green-500"}),o.jsx("span",{className:"font-semibold text-sm",children:"Function Result"})]}),o.jsxs("div",{className:"grid grid-cols-1 gap-2 text-xs",children:[o.jsxs("div",{children:[o.jsx("span",{className:"font-medium text-muted-foreground",children:"Call ID:"}),o.jsx("span",{className:"ml-2 font-mono text-xs",children:n.call_id})]}),o.jsxs("div",{children:[o.jsx("span",{className:"font-medium text-muted-foreground",children:"Status:"}),o.jsx("span",{className:`ml-2 px-2 py-1 rounded text-xs font-medium ${n.status==="completed"?"bg-green-100 dark:bg-green-900 text-green-800 dark:text-green-200":"bg-red-100 dark:bg-red-900 text-red-800 dark:text-red-200"}`,children:n.status})]}),o.jsxs("div",{children:[o.jsx("span",{className:"font-medium text-muted-foreground",children:"Output:"}),o.jsx("div",{className:"mt-1 max-h-32 overflow-auto",children:o.jsx("pre",{className:"text-xs bg-background border rounded p-2 whitespace-pre-wrap max-w-full break-all",children:n.output})})]})]})]});break}case"response.workflow_event.completed":if("data"in e&&e.data){const n=e.data;return o.jsxs("div",{className:"space-y-2",children:[o.jsxs("div",{className:"flex items-center gap-2",children:[o.jsx(Qp,{className:"h-4 w-4 text-purple-500"}),o.jsx("span",{className:"font-semibold text-sm",children:"Workflow Event"})]}),o.jsxs("div",{className:"grid grid-cols-1 gap-2 text-xs",children:[o.jsxs("div",{children:[o.jsx("span",{className:"font-medium text-muted-foreground",children:"Event Type:"}),o.jsx("span",{className:"ml-2 font-mono bg-purple-100 dark:bg-purple-900 px-2 py-1 rounded",children:n.event_type||"unknown"})]}),n.executor_id&&o.jsxs("div",{children:[o.jsx("span",{className:"font-medium text-muted-foreground",children:"Executor:"}),o.jsx("span",{className:"ml-2 font-mono",children:n.executor_id})]}),n.timestamp&&o.jsxs("div",{children:[o.jsx("span",{className:"font-medium text-muted-foreground",children:"Timestamp:"}),o.jsx("span",{className:"ml-2 font-mono text-xs",children:n.timestamp})]}),n.data&&o.jsxs("div",{children:[o.jsx("span",{className:"font-medium text-muted-foreground",children:"Data:"}),o.jsx("div",{className:"mt-1 max-h-32 overflow-auto",children:o.jsx("pre",{className:"text-xs bg-background border rounded p-2 whitespace-pre-wrap max-w-full break-all",children:typeof n.data=="string"?n.data:JSON.stringify(n.data,null,1)})})]})]})]})}break;case"response.trace.completed":if("data"in e&&e.data){const n=e.data;return o.jsxs("div",{className:"space-y-2",children:[o.jsxs("div",{className:"flex items-center gap-2",children:[o.jsx(Bu,{className:"h-4 w-4 text-orange-500"}),o.jsx("span",{className:"font-semibold text-sm",children:"Trace Event"})]}),o.jsxs("div",{className:"grid grid-cols-1 gap-2 text-xs",children:[o.jsxs("div",{children:[o.jsx("span",{className:"font-medium text-muted-foreground",children:"Operation:"}),o.jsx("span",{className:"ml-2 font-mono bg-orange-100 dark:bg-orange-900 px-2 py-1 rounded",children:n.operation_name||"unknown"})]}),n.span_id&&o.jsxs("div",{children:[o.jsx("span",{className:"font-medium text-muted-foreground",children:"Span ID:"}),o.jsx("span",{className:"ml-2 font-mono text-xs",children:n.span_id})]}),n.trace_id&&o.jsxs("div",{children:[o.jsx("span",{className:"font-medium text-muted-foreground",children:"Trace ID:"}),o.jsx("span",{className:"ml-2 font-mono text-xs",children:n.trace_id})]}),n.duration_ms&&o.jsxs("div",{children:[o.jsx("span",{className:"font-medium text-muted-foreground",children:"Duration:"}),o.jsxs("span",{className:"ml-2 font-mono text-xs",children:[Number(n.duration_ms).toFixed(2),"ms"]})]}),n.status&&o.jsxs("div",{children:[o.jsx("span",{className:"font-medium text-muted-foreground",children:"Status:"}),o.jsx("span",{className:`ml-2 px-2 py-1 rounded text-xs font-medium ${n.status==="StatusCode.UNSET"||n.status==="OK"?"bg-green-100 dark:bg-green-900 text-green-800 dark:text-green-200":"bg-red-100 dark:bg-red-900 text-red-800 dark:text-red-200"}`,children:n.status||"unknown"})]}),n.entity_id&&o.jsxs("div",{children:[o.jsx("span",{className:"font-medium text-muted-foreground",children:"Entity:"}),o.jsx("span",{className:"ml-2 font-mono text-xs",children:n.entity_id})]}),n.attributes&&Object.keys(n.attributes).length>0&&o.jsxs("div",{children:[o.jsx("span",{className:"font-medium text-muted-foreground",children:"Attributes:"}),o.jsx("div",{className:"mt-1 max-h-32 overflow-auto",children:o.jsx("pre",{className:"text-xs bg-background border rounded p-2 whitespace-pre-wrap break-all",children:l2(n.attributes)})})]})]})]})}break;case"response.output_text.delta":if("delta"in e&&e.delta)return o.jsxs("div",{className:"space-y-2",children:[o.jsxs("div",{className:"flex items-center gap-2",children:[o.jsx(eg,{className:"h-4 w-4 text-gray-500"}),o.jsx("span",{className:"font-semibold text-sm",children:"Text Output"})]}),o.jsx("div",{className:"max-h-32 overflow-auto",children:o.jsx("pre",{className:"text-xs bg-background border rounded p-2 whitespace-pre-wrap max-w-full break-all",children:e.delta})})]});break;case"response.completed":if("response"in e&&e.response){const r=e.response;return o.jsx("div",{className:"space-y-2",children:o.jsxs("div",{className:"grid grid-cols-1 gap-2 text-xs",children:[r.usage&&o.jsxs(o.Fragment,{children:[o.jsx("div",{children:o.jsx("span",{className:"font-medium text-muted-foreground",children:"Usage:"})}),o.jsxs("div",{className:"ml-4 space-y-1",children:[o.jsxs("div",{children:[o.jsx("span",{className:"font-medium text-muted-foreground",children:"Input tokens:"}),o.jsx("span",{className:"ml-2 font-mono",children:r.usage.input_tokens})]}),o.jsxs("div",{children:[o.jsx("span",{className:"font-medium text-muted-foreground",children:"Output tokens:"}),o.jsx("span",{className:"ml-2 font-mono",children:r.usage.output_tokens})]}),o.jsxs("div",{children:[o.jsx("span",{className:"font-medium text-muted-foreground",children:"Total tokens:"}),o.jsx("span",{className:"ml-2 font-mono bg-green-100 dark:bg-green-900 px-2 py-1 rounded",children:r.usage.total_tokens})]})]})]}),r.id&&o.jsxs("div",{children:[o.jsx("span",{className:"font-medium text-muted-foreground",children:"Response ID:"}),o.jsx("span",{className:"ml-2 font-mono text-xs break-all",children:r.id})]}),r.model&&o.jsxs("div",{children:[o.jsx("span",{className:"font-medium text-muted-foreground",children:"Model:"}),o.jsx("span",{className:"ml-2 font-mono text-xs break-all",children:r.model})]})]})})}break;default:return o.jsx("div",{className:"text-xs text-muted-foreground",children:o.jsx("pre",{className:"bg-background border rounded p-2 overflow-auto max-h-32",children:JSON.stringify(e,null,2)})})}return null}function ER({events:e,isStreaming:n}){const r=w.useRef(null),a=gg(e),c=[...i2(a)].reverse();return o.jsxs("div",{className:"h-full flex flex-col",children:[o.jsxs("div",{className:"flex items-center justify-between p-3 border-b",children:[o.jsxs("div",{className:"flex items-center gap-2",children:[o.jsx(Qp,{className:"h-4 w-4"}),o.jsx("span",{className:"font-medium",children:"Events"}),o.jsxs(ut,{variant:"outline",children:[a.length,e.length>a.length?` (${e.length} raw)`:""]})]}),n&&o.jsxs("div",{className:"flex items-center gap-1 text-xs text-muted-foreground",children:[o.jsx("div",{className:"h-2 w-2 animate-pulse rounded-full bg-green-500 dark:bg-green-400"}),"Streaming"]})]}),o.jsx(Wn,{ref:r,className:"flex-1",children:o.jsx("div",{className:"p-3",children:a.length===0?o.jsx("div",{className:"text-center text-muted-foreground text-sm py-8",children:e.length===0?"No events yet. Start a conversation to see real-time events.":"Processing events... Accumulated events will appear here."}):o.jsx("div",{className:"space-y-2",children:c.map((d,f)=>"type"in d&&d.type==="separator"?o.jsx(a2,{},d.id):o.jsx(SR,{event:d},`${d.type}-${f}`))})})})]})}function CR(e){const n=new Map;for(const a of e){if(!("data"in a))continue;const c=a.data.response_id||"unknown";n.has(c)||n.set(c,[]),n.get(c).push(a)}const r=[];for(const[a,l]of n){const c=new Map,d=[];for(const y of l){if(!("data"in y))continue;const b=y.data,j=b.span_id||`span_${Math.random()}`;c.set(j,{event:y,data:b,children:[]})}for(const y of l){if(!("data"in y))continue;const b=y.data,j=b.span_id||"",N=b.parent_span_id,S=c.get(j);S&&(N&&c.has(N)?c.get(N).children.push(S):d.push(S))}d.sort((y,b)=>(y.data.start_time||0)-(b.data.start_time||0));const f=y=>{y.children.sort((b,j)=>(b.data.start_time||0)-(j.data.start_time||0)),y.children.forEach(f)};d.forEach(f);const m=l[0],h=m&&"data"in m?m.data:null,g=Math.min(...l.map(y=>("data"in y?y.data:null)?.start_time||Date.now()/1e3)),x=l.reduce((y,b)=>{const j="data"in b?b.data:null;return y+(j?.duration_ms||0)},0);r.push({response_id:a,timestamp:g,traces:d,totalDuration:x,entity_id:h?.entity_id})}return r.sort((a,l)=>l.timestamp-a.timestamp),r}function Su(e){if(typeof e=="string"){const n=e.trim();if(n.startsWith("[")||n.startsWith("{"))try{const r=JSON.parse(e);return Su(r)}catch{return e}return e}if(Array.isArray(e))return e.map(Su);if(e!==null&&typeof e=="object"){const n={};for(const[r,a]of Object.entries(e))n[r]=Su(a);return n}return e}function l2(e){try{const n=Su(e);return JSON.stringify(n,null,2)}catch{return JSON.stringify(e,null,2)}}function kR(e){return e.includes("invoke_agent")||e.includes("Agent")?"bg-purple-100 dark:bg-purple-900 text-purple-800 dark:text-purple-200":e.includes("chat")||e.includes("Chat")?"bg-blue-100 dark:bg-blue-900 text-blue-800 dark:text-blue-200":e.includes("tool")||e.includes("execute")?"bg-green-100 dark:bg-green-900 text-green-800 dark:text-green-200":"bg-orange-100 dark:bg-orange-900 text-orange-800 dark:text-orange-200"}function c2({node:e,depth:n=0}){const[r,a]=w.useState(n<2),[l,c]=w.useState(!1),{data:d}=e,f=d.operation_name||"Unknown",m=d.duration_ms?`${Number(d.duration_ms).toFixed(1)}ms`:"",h=e.children.length>0,g=d.attributes?.["gen_ai.usage.input_tokens"],x=d.attributes?.["gen_ai.usage.output_tokens"],y=g!==void 0||x!==void 0;return o.jsxs("div",{className:"relative",children:[n>0&&o.jsx("div",{className:"absolute left-0 top-0 bottom-0 border-l-2 border-muted",style:{marginLeft:`${(n-1)*16+8}px`}}),o.jsxs("div",{className:"flex items-center gap-2 py-1.5 hover:bg-muted/50 rounded transition-colors",style:{paddingLeft:`${n*16}px`},children:[o.jsx("button",{onClick:()=>h?a(!r):c(!l),className:"w-4 h-4 flex items-center justify-center text-muted-foreground hover:text-foreground",children:h?r?o.jsx(Rt,{className:"h-3 w-3"}):o.jsx(en,{className:"h-3 w-3"}):l?o.jsx(Rt,{className:"h-3 w-3"}):o.jsx(en,{className:"h-3 w-3"})}),o.jsx("span",{className:`text-xs px-1.5 py-0.5 rounded font-medium ${kR(f)}`,children:f.replace("Agent.","").replace("invoke_agent ","")}),m&&o.jsx("span",{className:"text-xs text-muted-foreground font-mono",children:m}),y&&o.jsxs("span",{className:"text-xs text-muted-foreground font-mono",children:[g!==void 0&&o.jsxs("span",{children:["↑",String(g)]}),g!==void 0&&x!==void 0&&o.jsx("span",{className:"mx-0.5",children:"/"}),x!==void 0&&o.jsxs("span",{children:["↓",String(x)]})]})]}),l&&!h&&o.jsx("div",{className:"ml-4 mt-1 mb-2 p-2 bg-muted/30 rounded border text-xs",style:{marginLeft:`${n*16+20}px`},children:o.jsxs("div",{className:"space-y-1",children:[d.span_id&&o.jsxs("div",{className:"flex gap-2",children:[o.jsx("span",{className:"text-muted-foreground w-20",children:"Span ID:"}),o.jsx("span",{className:"font-mono text-xs break-all",children:d.span_id})]}),d.trace_id&&o.jsxs("div",{className:"flex gap-2",children:[o.jsx("span",{className:"text-muted-foreground w-20",children:"Trace ID:"}),o.jsx("span",{className:"font-mono text-xs break-all",children:d.trace_id})]}),d.status&&o.jsxs("div",{className:"flex gap-2",children:[o.jsx("span",{className:"text-muted-foreground w-20",children:"Status:"}),o.jsx("span",{className:`px-1.5 py-0.5 rounded text-xs ${d.status==="StatusCode.UNSET"||d.status==="OK"?"bg-green-100 dark:bg-green-900 text-green-800 dark:text-green-200":"bg-red-100 dark:bg-red-900 text-red-800 dark:text-red-200"}`,children:d.status})]}),d.attributes&&Object.keys(d.attributes).length>0&&o.jsxs("div",{className:"mt-2",children:[o.jsx("span",{className:"text-muted-foreground block mb-1",children:"Attributes:"}),o.jsx("pre",{className:"text-xs bg-background border rounded p-2 overflow-auto max-h-32 whitespace-pre-wrap break-all",children:l2(d.attributes)})]})]})}),h&&r&&o.jsx("div",{children:e.children.map((b,j)=>o.jsx(c2,{node:b,depth:n+1},b.data.span_id||j))})]})}function TR({group:e}){const[n,r]=w.useState(!0),a=new Date(e.timestamp*1e3).toLocaleTimeString(),l=e.totalDuration>0?`${e.totalDuration.toFixed(0)}ms`:"",c=e.traces.reduce((d,f)=>{const m=h=>1+h.children.reduce((g,x)=>g+m(x),0);return d+m(f)},0);return o.jsxs("div",{className:"border rounded-lg overflow-hidden",children:[o.jsxs("div",{className:"flex items-center gap-2 p-2 bg-muted/50 cursor-pointer hover:bg-muted/70 transition-colors",onClick:()=>r(!n),children:[o.jsx("div",{className:"text-muted-foreground",children:n?o.jsx(Rt,{className:"h-4 w-4"}):o.jsx(en,{className:"h-4 w-4"})}),o.jsx("span",{className:"font-mono text-xs text-muted-foreground",children:a}),e.entity_id&&o.jsx(ut,{variant:"outline",className:"text-xs py-0",children:e.entity_id.replace("agent_","").replace("workflow_","")}),o.jsx("div",{className:"flex-1"}),l&&o.jsx(ut,{variant:"secondary",className:"text-xs py-0",children:l}),o.jsxs("span",{className:"text-xs text-muted-foreground",children:[c," span",c!==1?"s":""]})]}),n&&o.jsx("div",{className:"p-2 border-t",children:e.traces.map((d,f)=>o.jsx(c2,{node:d,depth:0},d.data.span_id||f))})]})}function AR({events:e}){const n=le(c=>c.debugTraceSubTab),r=le(c=>c.setDebugTraceSubTab),a=e.filter(c=>c.type==="response.trace.completed"),l=CR(a);return o.jsxs("div",{className:"h-full flex flex-col",children:[o.jsxs("div",{className:"flex items-center gap-2 p-3 border-b",children:[o.jsx(Bu,{className:"h-4 w-4"}),o.jsx("span",{className:"font-medium",children:"Traces"}),o.jsx(ut,{variant:"outline",children:a.length}),o.jsx("div",{className:"flex-1"}),o.jsxs("div",{className:"flex items-center bg-muted rounded-md p-1 min-w-0",children:[o.jsx("button",{onClick:()=>r("spans"),className:`px-3 py-1.5 text-xs rounded transition-colors truncate ${n==="spans"?"bg-background shadow-sm font-medium":"text-muted-foreground hover:text-foreground"}`,children:"OTel Spans"}),o.jsxs("button",{onClick:()=>r("context"),className:`px-3 py-1.5 text-xs rounded transition-colors flex items-center gap-1.5 min-w-0 ${n==="context"?"bg-background shadow-sm font-medium":"text-muted-foreground hover:text-foreground"}`,children:[o.jsx(ha,{className:"h-3.5 w-3.5 flex-shrink-0"}),o.jsx("span",{className:"truncate",children:"Context Inspector"})]})]})]}),n==="spans"?o.jsxs("div",{className:"flex-1 flex flex-col min-h-0",children:[a.length>0&&o.jsx("div",{className:"p-3 border-b flex-shrink-0",children:o.jsxs("div",{className:"flex items-center gap-2",children:[o.jsx(Bu,{className:"h-4 w-4"}),o.jsx("span",{className:"font-medium text-sm",children:"OTel Spans"}),o.jsxs(ut,{variant:"outline",className:"text-xs",children:[l.length," turn",l.length!==1?"s":""]})]})}),a.length===0?o.jsxs("div",{className:"flex flex-col items-center text-center p-6 pt-9",children:[o.jsx(ha,{className:"h-8 w-8 text-muted-foreground mb-3"}),o.jsx("div",{className:"text-sm font-medium mb-1",children:"No Data"}),o.jsxs("div",{className:"text-xs text-muted-foreground max-w-[200px]",children:["Run"," ",o.jsx("span",{className:"font-mono bg-accent/10 px-1 rounded",children:"devui --instrumentation"})," ","and start a conversation."]})]}):o.jsx(Wn,{className:"flex-1",children:o.jsx("div",{className:"p-3",children:o.jsx("div",{className:"space-y-3",children:l.map(c=>o.jsx(TR,{group:c},c.response_id))})})})]}):o.jsx(bR,{events:e})]})}function MR({events:e}){const n=gg(e),r=[],a=n.filter(m=>m.type==="response.function_call.complete"),l=e.filter(m=>zr(m)!==null),c=new Map;l.forEach(m=>{const h=zr(m);h&&c.set(h.call_id,m)}),a.forEach(m=>{if(r.push(m),"data"in m&&m.data&&m.data.call_id){const h=String(m.data.call_id),g=c.get(h);g&&(r.push(g),c.delete(h))}}),c.forEach(m=>{r.push(m)});const f=[...i2(r)].reverse();return o.jsxs("div",{className:"h-full flex flex-col",children:[o.jsxs("div",{className:"flex items-center gap-2 p-3 border-b",children:[o.jsx(_a,{className:"h-4 w-4"}),o.jsx("span",{className:"font-medium",children:"Tools"}),o.jsx(ut,{variant:"outline",children:r.length})]}),o.jsx(Wn,{className:"flex-1",children:o.jsx("div",{className:"p-3",children:r.length===0?o.jsx("div",{className:"text-center text-muted-foreground text-sm py-8",children:"No tool executions yet. Tool calls will appear here during conversations."}):o.jsx("div",{className:"space-y-3",children:f.map((m,h)=>"type"in m&&m.type==="separator"?o.jsx(a2,{},m.id):o.jsx(RR,{event:m},h))})})})]})}function RR({event:e}){const n="_uiTimestamp"in e&&typeof e._uiTimestamp=="number"?new Date(e._uiTimestamp*1e3).toLocaleTimeString():new Date().toLocaleTimeString(),r=e.type==="response.function_call.complete",a=zr(e),l=a!==null;if(!r&&!l)return null;const c=r&&"data"in e?e.data:null;return o.jsxs("div",{className:"border rounded p-3",children:[o.jsxs("div",{className:"flex items-center justify-between mb-2",children:[o.jsxs("div",{className:"flex items-center gap-2",children:[o.jsx(og,{className:"h-4 w-4 text-yellow-600 dark:text-yellow-400"}),o.jsx("span",{className:"font-medium text-sm",children:r?"Tool Call":"Tool Result"}),r&&c&&c.name!==void 0&&o.jsxs("span",{className:"text-xs text-muted-foreground",children:["(",String(c.name),")"]})]}),o.jsx("span",{className:"text-xs text-muted-foreground font-mono",children:n})]}),r&&c&&o.jsxs("div",{className:"p-2 bg-blue-50 dark:bg-blue-950/50 border border-blue-200 dark:border-blue-800 rounded",children:[o.jsxs("div",{className:"flex items-center gap-2 mb-2",children:[o.jsx(_a,{className:"h-3 w-3 text-blue-600 dark:text-blue-400"}),o.jsx("span",{className:"text-xs font-mono bg-blue-100 dark:bg-blue-900 text-blue-800 dark:text-blue-200 px-2 py-1 rounded",children:"CALL"}),o.jsx("span",{className:"font-medium text-sm",children:String(c.name||"unknown")})]}),c.arguments!==void 0&&o.jsxs("div",{className:"text-xs",children:[o.jsx("span",{className:"text-muted-foreground mb-1 block",children:"Arguments:"}),o.jsx("pre",{className:"p-2 bg-background border rounded text-xs overflow-auto max-h-32 max-w-full break-all whitespace-pre-wrap",children:typeof c.arguments=="string"?c.arguments:JSON.stringify(c.arguments,null,1)})]})]}),l&&a&&o.jsxs("div",{className:"p-2 bg-green-50 dark:bg-green-950/50 border border-green-200 dark:border-green-800 rounded",children:[o.jsxs("div",{className:"flex items-center gap-2 mb-2",children:[o.jsx(nn,{className:"h-3 w-3 text-green-600 dark:text-green-400"}),o.jsx("span",{className:"text-xs font-mono bg-green-100 dark:bg-green-900 text-green-800 dark:text-green-200 px-2 py-1 rounded",children:"RESULT"}),a.status!=="completed"&&o.jsx("span",{className:"ml-auto px-2 py-1 rounded text-xs font-medium bg-red-100 dark:bg-red-900 text-red-800 dark:text-red-200",children:a.status})]}),o.jsxs("div",{className:"text-xs space-y-1",children:[o.jsxs("div",{className:"flex items-center gap-2",children:[o.jsx("span",{className:"text-muted-foreground",children:"Call ID:"}),o.jsx("span",{className:"font-mono text-xs break-all",children:a.call_id})]}),o.jsxs("div",{children:[o.jsx("span",{className:"text-muted-foreground block mb-1",children:"Output:"}),o.jsx("pre",{className:"p-2 bg-background border rounded text-xs overflow-auto max-h-32 break-all whitespace-pre-wrap",children:a.output})]})]})]})]})}function DR({events:e,isStreaming:n=!1,onMinimize:r}){const a=le(d=>d.debugPanelTab),l=le(d=>d.setDebugPanelTab),c=w.useMemo(()=>{const d=gg(e),f=d.length,m=e.filter(g=>g.type==="response.trace.completed").length,h=d.filter(g=>g.type==="response.function_call.complete").length+e.filter(g=>zr(g)!==null).length;return{eventsCount:f,tracesCount:m,toolsCount:h}},[e]);return o.jsx("div",{className:"flex-1 border-l flex flex-col min-h-0",children:o.jsxs(D5,{value:a,onValueChange:d=>l(d),className:"flex-1 flex flex-col min-h-0",children:[o.jsxs("div",{className:"px-3 pt-3 flex items-center gap-2 flex-shrink-0",children:[o.jsxs($N,{className:"flex-1",children:[o.jsxs(Nu,{value:"events",className:"flex-1 gap-1.5",children:["Events",c.eventsCount>0&&o.jsx("span",{className:"text-[10px] bg-muted-foreground/20 text-muted-foreground px-1.5 py-0.5 rounded-full min-w-[1.25rem] text-center",children:c.eventsCount})]}),o.jsxs(Nu,{value:"traces",className:"flex-1 gap-1.5",children:["Traces",c.tracesCount>0&&o.jsx("span",{className:"text-[10px] bg-muted-foreground/20 text-muted-foreground px-1.5 py-0.5 rounded-full min-w-[1.25rem] text-center",children:c.tracesCount})]}),o.jsxs(Nu,{value:"tools",className:"flex-1 gap-1.5",children:["Tools",c.toolsCount>0&&o.jsx("span",{className:"text-[10px] bg-muted-foreground/20 text-muted-foreground px-1.5 py-0.5 rounded-full min-w-[1.25rem] text-center",children:c.toolsCount})]})]}),r&&o.jsx(Le,{variant:"ghost",size:"sm",onClick:r,className:"h-8 w-8 p-0 flex-shrink-0",title:"Minimize debug panel",children:o.jsx(en,{className:"h-4 w-4"})})]}),o.jsx(ju,{value:"events",className:"flex-1 mt-0 overflow-hidden",children:o.jsx(ER,{events:e,isStreaming:n})}),o.jsx(ju,{value:"traces",className:"flex-1 mt-0 overflow-hidden",children:o.jsx(AR,{events:e})}),o.jsx(ju,{value:"tools",className:"flex-1 mt-0 overflow-hidden",children:o.jsx(MR,{events:e})})]})})}function Ir({open:e,onOpenChange:n,children:r}){if(!e)return null;const a=()=>{n(!1)},l=d=>{d.stopPropagation()},c=d=>{d.stopPropagation()};return o.jsxs("div",{className:"fixed inset-0 z-50 flex items-center justify-center",children:[o.jsx("div",{className:"absolute inset-0 bg-black/50",onClick:a}),o.jsx("div",{className:"relative z-10",onClick:l,onMouseDown:c,onMouseUp:d=>d.stopPropagation(),children:r})]})}function Lr({children:e,className:n=""}){const a=n.includes("w-[")||n.includes("w-full")||n.includes("max-w-")?"":"max-w-lg w-full";return o.jsx("div",{className:`relative bg-background border rounded-lg shadow-lg max-h-[90vh] overflow-hidden ${a} ${n}`,children:e})}function $r({children:e,className:n=""}){return o.jsx("div",{className:`space-y-2 ${n}`,children:e})}function Pr({children:e,className:n=""}){return o.jsx("h2",{className:`text-lg font-semibold ${n}`,children:e})}function OR({children:e,className:n=""}){return o.jsx("p",{className:`text-sm text-muted-foreground ${n}`,children:e})}function So({onClose:e}){return o.jsx(Le,{variant:"ghost",size:"sm",onClick:e,className:"absolute top-4 right-4 h-8 w-8 p-0 rounded-sm opacity-70 hover:opacity-100",children:o.jsx(Ea,{className:"h-4 w-4"})})}function zR({children:e}){return o.jsx("div",{className:"flex justify-end gap-2 p-4 border-t bg-muted/50",children:e})}function as({className:e,type:n,...r}){return o.jsx("input",{type:n,"data-slot":"input",className:We("file:text-foreground placeholder:text-muted-foreground selection:bg-primary selection:text-primary-foreground dark:bg-input/30 border-input flex h-9 w-full min-w-0 rounded-md border bg-transparent px-3 py-1 text-base shadow-xs transition-[color,box-shadow] outline-none file:inline-flex file:h-7 file:border-0 file:bg-transparent file:text-sm file:font-medium disabled:pointer-events-none disabled:cursor-not-allowed disabled:opacity-50 md:text-sm","focus-visible:border-ring focus-visible:ring-ring/50 focus-visible:ring-[3px]","aria-invalid:ring-destructive/20 dark:aria-invalid:ring-destructive/40 aria-invalid:border-destructive",e),...r})}var IR="Label",u2=w.forwardRef((e,n)=>o.jsx(Ye.label,{...e,ref:n,onMouseDown:r=>{r.target.closest("button, input, select, textarea")||(e.onMouseDown?.(r),!r.defaultPrevented&&r.detail>1&&r.preventDefault())}}));u2.displayName=IR;var LR=u2;function kt({className:e,...n}){return o.jsx(LR,{"data-slot":"label",className:We("flex items-center gap-2 text-sm leading-none font-medium select-none group-data-[disabled=true]:pointer-events-none group-data-[disabled=true]:opacity-50 peer-disabled:cursor-not-allowed peer-disabled:opacity-50",e),...n})}var Td="Switch",[$R,e$]=Kn(Td),[PR,HR]=$R(Td),d2=w.forwardRef((e,n)=>{const{__scopeSwitch:r,name:a,checked:l,defaultChecked:c,required:d,disabled:f,value:m="on",onCheckedChange:h,form:g,...x}=e,[y,b]=w.useState(null),j=rt(n,E=>b(E)),N=w.useRef(!1),S=y?g||!!y.closest("form"):!0,[_,A]=Ar({prop:l,defaultProp:c??!1,onChange:h,caller:Td});return o.jsxs(PR,{scope:r,checked:_,disabled:f,children:[o.jsx(Ye.button,{type:"button",role:"switch","aria-checked":_,"aria-required":d,"data-state":p2(_),"data-disabled":f?"":void 0,disabled:f,value:m,...x,ref:j,onClick:ke(e.onClick,E=>{A(M=>!M),S&&(N.current=E.isPropagationStopped(),N.current||E.stopPropagation())})}),S&&o.jsx(h2,{control:y,bubbles:!N.current,name:a,value:m,checked:_,required:d,disabled:f,form:g,style:{transform:"translateX(-100%)"}})]})});d2.displayName=Td;var f2="SwitchThumb",m2=w.forwardRef((e,n)=>{const{__scopeSwitch:r,...a}=e,l=HR(f2,r);return o.jsx(Ye.span,{"data-state":p2(l.checked),"data-disabled":l.disabled?"":void 0,...a,ref:n})});m2.displayName=f2;var UR="SwitchBubbleInput",h2=w.forwardRef(({__scopeSwitch:e,control:n,checked:r,bubbles:a=!0,...l},c)=>{const d=w.useRef(null),f=rt(d,c),m=fg(r),h=Lp(n);return w.useEffect(()=>{const g=d.current;if(!g)return;const x=window.HTMLInputElement.prototype,b=Object.getOwnPropertyDescriptor(x,"checked").set;if(m!==r&&b){const j=new Event("click",{bubbles:a});b.call(g,r),g.dispatchEvent(j)}},[m,r,a]),o.jsx("input",{type:"checkbox","aria-hidden":!0,defaultChecked:r,...l,tabIndex:-1,ref:f,style:{...l.style,...h,position:"absolute",pointerEvents:"none",opacity:0,margin:0}})});h2.displayName=UR;function p2(e){return e?"checked":"unchecked"}var g2=d2,BR=m2;const Wi=w.forwardRef(({className:e,...n},r)=>o.jsx(g2,{className:We("peer inline-flex h-5 w-9 shrink-0 cursor-pointer items-center rounded-full border-2 border-transparent shadow-sm transition-colors focus-visible:outline-none focus-visible:ring-2 focus-visible:ring-ring focus-visible:ring-offset-2 focus-visible:ring-offset-background disabled:cursor-not-allowed disabled:opacity-50 data-[state=checked]:bg-primary data-[state=unchecked]:bg-input",e),...n,ref:r,children:o.jsx(BR,{className:We("pointer-events-none block h-4 w-4 rounded-full bg-background shadow-lg ring-0 transition-transform data-[state=checked]:translate-x-4 data-[state=unchecked]:translate-x-0")})}));Wi.displayName=g2.displayName;const VR=["gpt-4.1","gpt-4.1-mini","o1","o1-mini","o3-mini"];function cb({open:e,onOpenChange:n,onBackendUrlChange:r}){const[a,l]=w.useState("general"),{oaiMode:c,setOAIMode:d,azureDeploymentEnabled:f,setAzureDeploymentEnabled:m,authRequired:h,serverCapabilities:g,serverVersion:x,runtime:y,uiMode:b,streamingEnabled:j,setStreamingEnabled:N}=le(),S="",[_,A]=w.useState(()=>localStorage.getItem("devui_backend_url")||S),[E,M]=w.useState(_),[T,D]=w.useState(!!localStorage.getItem("devui_auth_token")),[z,H]=w.useState(""),q=()=>{try{new URL(E),localStorage.setItem("devui_backend_url",E),A(E),r?.(E),n(!1),window.location.reload()}catch{alert("Please enter a valid URL (e.g., http://localhost:8080)")}},X=()=>{localStorage.removeItem("devui_backend_url"),M(S),A(S),r?.(S),window.location.reload()},W=()=>{z.trim()&&(localStorage.setItem("devui_auth_token",z.trim()),D(!0),H(""),window.location.reload())},G=()=>{localStorage.removeItem("devui_auth_token"),D(!1),H(""),window.location.reload()},ne=E!==_,B=!localStorage.getItem("devui_backend_url");return o.jsx(Ir,{open:e,onOpenChange:n,children:o.jsxs(Lr,{className:"w-[600px] max-w-[90vw] flex flex-col max-h-[85vh]",children:[o.jsx($r,{className:"p-6 pb-2 flex-shrink-0",children:o.jsx(Pr,{children:"Settings"})}),o.jsx(So,{onClose:()=>n(!1)}),o.jsxs("div",{className:"flex border-b px-6 flex-shrink-0",children:[o.jsxs("button",{onClick:()=>l("general"),className:`px-4 py-2 text-sm font-medium transition-colors relative ${a==="general"?"text-foreground":"text-muted-foreground hover:text-foreground"}`,children:["General",a==="general"&&o.jsx("div",{className:"absolute bottom-0 left-0 right-0 h-0.5 bg-primary"})]}),g.openai_proxy&&o.jsxs("button",{onClick:()=>l("proxy"),className:`px-4 py-2 text-sm font-medium transition-colors relative ${a==="proxy"?"text-foreground":"text-muted-foreground hover:text-foreground"}`,children:["OpenAI Proxy",a==="proxy"&&o.jsx("div",{className:"absolute bottom-0 left-0 right-0 h-0.5 bg-primary"})]}),o.jsxs("button",{onClick:()=>l("about"),className:`px-4 py-2 text-sm font-medium transition-colors relative ${a==="about"?"text-foreground":"text-muted-foreground hover:text-foreground"}`,children:["About",a==="about"&&o.jsx("div",{className:"absolute bottom-0 left-0 right-0 h-0.5 bg-primary"})]})]}),o.jsxs("div",{className:"px-6 pb-6 overflow-y-auto flex-1 min-h-[400px]",children:[a==="general"&&o.jsxs("div",{className:"space-y-6 pt-4",children:[o.jsxs("div",{className:"space-y-3",children:[o.jsxs("div",{className:"flex items-center justify-between",children:[o.jsx(kt,{htmlFor:"backend-url",className:"text-sm font-medium",children:"Backend URL"}),!B&&o.jsxs(Le,{variant:"ghost",size:"sm",onClick:X,className:"h-7 text-xs",title:"Reset to default",children:[o.jsx(sg,{className:"h-3 w-3 mr-1"}),"Reset"]})]}),o.jsx(as,{id:"backend-url",type:"url",value:E,onChange:U=>M(U.target.value),placeholder:"http://localhost:8080",className:"font-mono text-sm"}),o.jsxs("p",{className:"text-xs text-muted-foreground",children:["Default: ",o.jsx("span",{className:"font-mono",children:S})]}),o.jsx("div",{className:"flex gap-2 pt-2 min-h-[36px]",children:ne&&o.jsxs(o.Fragment,{children:[o.jsx(Le,{onClick:q,size:"sm",className:"flex-1",children:"Apply & Reload"}),o.jsx(Le,{onClick:()=>M(_),variant:"outline",size:"sm",className:"flex-1",children:"Cancel"})]})})]}),(h||T)&&o.jsxs("div",{className:"space-y-3 border-t pt-6",children:[o.jsxs("div",{className:"flex items-center justify-between",children:[o.jsx(kt,{className:"text-sm font-medium",children:"Authentication Token"}),!h&&T&&o.jsx("span",{className:"text-xs text-muted-foreground",children:"(Not required by current backend)"})]}),T?o.jsxs("div",{className:"space-y-3",children:[o.jsxs("div",{className:"flex items-center gap-2",children:[o.jsx(as,{type:"password",value:"••••••••••••••••••••",disabled:!0,className:"font-mono text-sm flex-1"}),o.jsx(Le,{variant:"destructive",size:"sm",onClick:G,className:"flex-shrink-0",children:"Clear"})]}),o.jsx("p",{className:"text-xs text-green-600 dark:text-green-400",children:"✓ Token configured and stored locally"})]}):o.jsxs("div",{className:"space-y-3",children:[o.jsx(as,{type:"password",value:z,onChange:U=>H(U.target.value),placeholder:"Enter bearer token",className:"font-mono text-sm",onKeyDown:U=>{U.key==="Enter"&&z.trim()&&W()}}),o.jsx(Le,{onClick:W,size:"sm",disabled:!z.trim(),className:"w-full",children:"Save & Reload"}),o.jsx("p",{className:"text-xs text-muted-foreground",children:h?"Required by backend (started with --auth flag)":"Not required by current backend"})]})]}),g.deployment&&o.jsxs("div",{className:"space-y-3 border-t pt-6",children:[o.jsxs("div",{className:"flex items-center justify-between",children:[o.jsxs("div",{className:"space-y-0.5",children:[o.jsx(kt,{className:"text-sm font-medium",children:"Azure Deployment"}),o.jsx("p",{className:"text-xs text-muted-foreground",children:"Enable one-click deployment to Azure Container Apps"})]}),o.jsx(Wi,{checked:f,onCheckedChange:m})]}),o.jsxs("details",{className:"group",children:[o.jsxs("summary",{className:"cursor-pointer text-xs text-muted-foreground hover:text-foreground transition-colors flex items-center gap-1",children:[o.jsx(en,{className:"h-3 w-3 transition-transform group-open:rotate-90"}),"Learn more about Azure deployment"]}),o.jsxs("div",{className:"mt-3 space-y-3 pl-4",children:[o.jsx("p",{className:"text-xs text-muted-foreground leading-relaxed",children:'When enabled, agents that support deployment will show a "Deploy to Azure" button. This allows you to deploy your agent to Azure Container Apps directly from DevUI.'}),o.jsxs("div",{className:"space-y-1.5",children:[o.jsx("p",{className:"text-xs font-medium",children:"When enabled:"}),o.jsxs("ul",{className:"text-xs text-muted-foreground space-y-0.5 list-disc list-inside",children:[o.jsx("li",{children:'Shows "Deploy to Azure" for supported agents'}),o.jsx("li",{children:"Requires Azure CLI and proper authentication"}),o.jsx("li",{children:"Backend must have deployment capabilities enabled"})]})]}),o.jsxs("div",{className:"space-y-1.5",children:[o.jsx("p",{className:"text-xs font-medium",children:"When disabled:"}),o.jsxs("ul",{className:"text-xs text-muted-foreground space-y-0.5 list-disc list-inside",children:[o.jsx("li",{children:'Shows "Deployment Guide" for all agents'}),o.jsx("li",{children:"Provides Docker templates and manual deployment instructions"}),o.jsx("li",{children:"No backend deployment capabilities required"})]})]})]})]})]}),o.jsx("div",{className:"space-y-3 border-t pt-6",children:o.jsxs("div",{className:"flex items-center justify-between",children:[o.jsxs("div",{className:"space-y-0.5",children:[o.jsx(kt,{className:"text-sm font-medium",children:"Show Tool Calls"}),o.jsx("p",{className:"text-xs text-muted-foreground",children:"Display function/tool calls and results in chat messages"})]}),o.jsx(Wi,{checked:le.getState().showToolCalls,onCheckedChange:U=>le.getState().setShowToolCalls(U)})]})}),o.jsxs("div",{className:"space-y-3 border-t pt-6",children:[o.jsxs("div",{className:"flex items-center justify-between",children:[o.jsxs("div",{className:"space-y-0.5",children:[o.jsx(kt,{className:"text-sm font-medium",children:"Streaming Mode"}),o.jsx("p",{className:"text-xs text-muted-foreground",children:"Stream responses token-by-token as they're generated"})]}),o.jsx(Wi,{checked:j,onCheckedChange:N})]}),!j&&o.jsxs("div",{className:"flex items-start gap-2 text-xs text-amber-600 dark:text-amber-400 bg-amber-500/10 p-3 rounded",children:[o.jsx(Fs,{className:"h-3.5 w-3.5 flex-shrink-0 mt-0.5"}),o.jsxs("div",{children:[o.jsx("p",{className:"font-medium",children:"Non-streaming mode limitations:"}),o.jsxs("ul",{className:"mt-1 space-y-0.5 list-disc list-inside text-amber-600/80 dark:text-amber-400/80",children:[o.jsx("li",{children:"Tool calls won't display in real-time"}),o.jsx("li",{children:"No typing indicator during generation"}),o.jsx("li",{children:"Response appears all at once when complete"})]})]})]})]})]}),a==="proxy"&&g.openai_proxy&&o.jsxs("div",{className:"space-y-6 pt-4",children:[o.jsxs("div",{className:"space-y-4",children:[o.jsxs("div",{className:"flex items-center justify-between",children:[o.jsxs("div",{className:"space-y-0.5",children:[o.jsx(kt,{className:"text-base font-medium",children:"OpenAI Proxy Mode"}),o.jsx("p",{className:"text-xs text-muted-foreground",children:"Route requests through DevUI backend to OpenAI API"})]}),o.jsx(Wi,{checked:c.enabled,onCheckedChange:U=>d({...c,enabled:U})})]}),!c.enabled&&o.jsx("div",{className:"bordder border-muted bg-muted/30 rounded-lg p-4 space-y-3",children:o.jsxs("div",{className:"flex items-start gap-2",children:[o.jsx(Fs,{className:"h-4 w-4 flex-shrink-0 mt-0.5 text-blue-600 dark:text-blue-400"}),o.jsxs("div",{className:"space-y-2",children:[o.jsx("p",{className:"text-sm font-medium",children:"About OpenAI Proxy Mode"}),o.jsxs("p",{className:"text-xs text-muted-foreground leading-relaxed",children:["When enabled, your chat requests are sent to your DevUI backend"," ",o.jsxs("span",{className:"font-mono font-semibold",children:["(",_,")"]}),", which then forwards them to OpenAI's API. This keeps your"," ",o.jsx("span",{className:"font-mono font-semibold",children:"OPENAI_API_KEY"})," ","secure on the server instead of exposing it in the browser."]}),o.jsxs("div",{className:"space-y-1.5 pt-1",children:[o.jsx("p",{className:"text-xs font-medium",children:"Requirements:"}),o.jsxs("ul",{className:"text-xs text-muted-foreground space-y-0.5 list-disc list-inside",children:[o.jsxs("li",{children:["Backend must have"," ",o.jsx("span",{className:"font-mono",children:"OPENAI_API_KEY"})," ","configured"]}),o.jsx("li",{children:"Backend must support OpenAI Responses API proxying (DevUI does)"})]})]}),o.jsxs("div",{className:"space-y-1.5 pt-1",children:[o.jsx("p",{className:"text-xs font-medium",children:"Why use this?"}),o.jsx("p",{className:"text-xs text-muted-foreground",children:"Quickly test and compare OpenAI models directly through the DevUI interface without creating custom agents or exposing API keys in the browser."})]})]})]})}),c.enabled&&o.jsxs("div",{className:"space-y-4 pl-4 border-l-2 border-muted",children:[o.jsxs("div",{className:"space-y-2",children:[o.jsx(kt,{className:"text-sm font-medium",children:"Model"}),o.jsx(as,{type:"text",value:c.model,onChange:U=>d({...c,model:U.target.value}),placeholder:"gpt-4.1-mini",className:"font-mono text-sm"}),o.jsx("p",{className:"text-xs text-muted-foreground",children:"Enter any OpenAI model ID (e.g., gpt-4.1, o1, o3-mini)"})]}),o.jsxs("div",{className:"space-y-2",children:[o.jsx(kt,{className:"text-xs text-muted-foreground",children:"Common presets"}),o.jsx("div",{className:"flex flex-wrap gap-2",children:VR.map(U=>o.jsx(Le,{variant:c.model===U?"default":"outline",size:"sm",onClick:()=>d({...c,model:U}),className:"text-xs h-7",children:U},U))})]}),o.jsxs("details",{className:"group",children:[o.jsxs("summary",{className:"cursor-pointer text-sm font-medium text-muted-foreground hover:text-foreground transition-colors flex items-center gap-1",children:[o.jsx(en,{className:"h-3 w-3 transition-transform group-open:rotate-90"}),"Advanced Parameters (optional)"]}),o.jsxs("div",{className:"space-y-3 mt-3 pl-4",children:[o.jsxs("div",{className:"space-y-1",children:[o.jsx(kt,{className:"text-xs",children:"Temperature"}),o.jsx(as,{type:"number",step:"0.1",min:"0",max:"2",value:c.temperature??"",onChange:U=>d({...c,temperature:U.target.value?parseFloat(U.target.value):void 0}),placeholder:"1.0 (default)",className:"text-sm"}),o.jsx("p",{className:"text-xs text-muted-foreground",children:"Controls randomness (0-2)"})]}),o.jsxs("div",{className:"space-y-1",children:[o.jsx(kt,{className:"text-xs",children:"Max Output Tokens"}),o.jsx(as,{type:"number",min:"1",value:c.max_output_tokens??"",onChange:U=>d({...c,max_output_tokens:U.target.value?parseInt(U.target.value):void 0}),placeholder:"Auto",className:"text-sm"}),o.jsx("p",{className:"text-xs text-muted-foreground",children:"Maximum tokens in response"})]}),o.jsxs("div",{className:"space-y-1",children:[o.jsx(kt,{className:"text-xs",children:"Top P"}),o.jsx(as,{type:"number",step:"0.1",min:"0",max:"1",value:c.top_p??"",onChange:U=>d({...c,top_p:U.target.value?parseFloat(U.target.value):void 0}),placeholder:"1.0 (default)",className:"text-sm"}),o.jsx("p",{className:"text-xs text-muted-foreground",children:"Nucleus sampling (0-1)"})]}),o.jsxs("div",{className:"space-y-1",children:[o.jsx(kt,{className:"text-xs",children:"Reasoning Effort (o-series models)"}),o.jsxs("select",{value:c.reasoning_effort??"",onChange:U=>d({...c,reasoning_effort:U.target.value?U.target.value:void 0}),className:"flex h-9 w-full rounded-md border border-input bg-transparent px-3 py-1 text-sm shadow-sm transition-colors focus-visible:outline-none focus-visible:ring-1 focus-visible:ring-ring",children:[o.jsx("option",{value:"",children:"Auto (default)"}),o.jsx("option",{value:"minimal",children:"Minimal"}),o.jsx("option",{value:"low",children:"Low"}),o.jsx("option",{value:"medium",children:"Medium"}),o.jsx("option",{value:"high",children:"High"})]}),o.jsx("p",{className:"text-xs text-muted-foreground",children:"Constrains reasoning effort (faster/cheaper vs thorough)"})]})]})]})]})]}),c.enabled&&o.jsxs("div",{className:"flex items-start gap-2 text-xs text-muted-foreground bg-muted/50 p-3 rounded",children:[o.jsx(Fs,{className:"h-3.5 w-3.5 flex-shrink-0 mt-0.5"}),o.jsx("div",{className:"space-y-1",children:o.jsxs("p",{children:["Requests route through"," ",o.jsx("span",{className:"font-mono font-semibold",children:_})," ","to OpenAI API. Server must have"," ",o.jsx("span",{className:"font-mono font-semibold",children:"OPENAI_API_KEY"})," ","configured."]})})]})]}),a==="about"&&o.jsxs("div",{className:"space-y-4 pt-4",children:[o.jsx("p",{className:"text-sm text-muted-foreground",children:"DevUI is a sample app for getting started with Agent Framework."}),o.jsxs("div",{className:"space-y-2 text-sm",children:[o.jsxs("div",{className:"flex justify-between",children:[o.jsx("span",{className:"text-muted-foreground",children:"Version:"}),o.jsx("span",{className:"font-mono",children:x||"Unknown"})]}),o.jsxs("div",{className:"flex justify-between",children:[o.jsx("span",{className:"text-muted-foreground",children:"Runtime:"}),o.jsx("span",{className:"font-mono capitalize",children:y||"Unknown"})]}),o.jsxs("div",{className:"flex justify-between",children:[o.jsx("span",{className:"text-muted-foreground",children:"UI Mode:"}),o.jsx("span",{className:"font-mono capitalize",children:b||"Unknown"})]})]}),(g||h!==void 0)&&o.jsxs("div",{className:"space-y-2 pt-2",children:[o.jsx("p",{className:"text-xs font-medium text-muted-foreground uppercase tracking-wide",children:"Capabilities"}),o.jsxs("div",{className:"space-y-1 text-sm",children:[g?.instrumentation!==void 0&&o.jsxs("div",{className:"flex justify-between items-center",children:[o.jsx("span",{className:"text-muted-foreground",children:"Instrumentation:"}),o.jsx("span",{className:`text-xs px-2 py-0.5 rounded-full ${g.instrumentation?"bg-green-500/10 text-green-600 dark:text-green-400":"bg-muted text-muted-foreground"}`,children:g.instrumentation?"Enabled":"Disabled"})]}),g?.openai_proxy!==void 0&&o.jsxs("div",{className:"flex justify-between items-center",children:[o.jsx("span",{className:"text-muted-foreground",children:"OpenAI Proxy:"}),o.jsx("span",{className:`text-xs px-2 py-0.5 rounded-full ${g.openai_proxy?"bg-green-500/10 text-green-600 dark:text-green-400":"bg-muted text-muted-foreground"}`,children:g.openai_proxy?"Available":"Not Configured"})]}),g?.deployment!==void 0&&o.jsxs("div",{className:"flex justify-between items-center",children:[o.jsx("span",{className:"text-muted-foreground",children:"Deployment:"}),o.jsx("span",{className:`text-xs px-2 py-0.5 rounded-full ${g.deployment?"bg-green-500/10 text-green-600 dark:text-green-400":"bg-muted text-muted-foreground"}`,children:g.deployment?"Available":"Disabled"})]}),h!==void 0&&o.jsxs("div",{className:"flex justify-between items-center",children:[o.jsx("span",{className:"text-muted-foreground",children:"Authentication:"}),o.jsx("span",{className:`text-xs px-2 py-0.5 rounded-full ${h?"bg-blue-500/10 text-blue-600 dark:text-blue-400":"bg-muted text-muted-foreground"}`,children:h?"Required":"Not Required"})]})]})]}),o.jsx("div",{className:"flex justify-center pt-2",children:o.jsxs(Le,{variant:"outline",size:"sm",onClick:()=>window.open("https://github.com/microsoft/agent-framework","_blank"),className:"text-xs",children:[o.jsx(Hu,{className:"h-3 w-3 mr-1"}),"Learn More about Agent Framework"]})})]})]})]})})}const qR="modulepreload",FR=function(e,n){return new URL(e,n).href},ub={},_u=function(n,r,a){let l=Promise.resolve();if(r&&r.length>0){let h=function(g){return Promise.all(g.map(x=>Promise.resolve(x).then(y=>({status:"fulfilled",value:y}),y=>({status:"rejected",reason:y}))))};const d=document.getElementsByTagName("link"),f=document.querySelector("meta[property=csp-nonce]"),m=f?.nonce||f?.getAttribute("nonce");l=h(r.map(g=>{if(g=FR(g,a),g in ub)return;ub[g]=!0;const x=g.endsWith(".css"),y=x?'[rel="stylesheet"]':"";if(a)for(let j=d.length-1;j>=0;j--){const N=d[j];if(N.href===g&&(!x||N.rel==="stylesheet"))return}else if(document.querySelector(`link[href="${g}"]${y}`))return;const b=document.createElement("link");if(b.rel=x?"stylesheet":qR,x||(b.as="script"),b.crossOrigin="",b.href=g,m&&b.setAttribute("nonce",m),document.head.appendChild(b),x)return new Promise((j,N)=>{b.addEventListener("load",j),b.addEventListener("error",()=>N(new Error(`Unable to preload CSS for ${g}`)))})}))}function c(d){const f=new Event("vite:preloadError",{cancelable:!0});if(f.payload=d,window.dispatchEvent(f),!f.defaultPrevented)throw d}return l.then(d=>{for(const f of d||[])f.status==="rejected"&&c(f.reason);return n().catch(c)})},x2="devui_streaming_state_",y2=1440*60*1e3;function Yu(e){return`${x2}${e}`}function YR(e){let n="";for(const r of e)r.type==="response.output_text.delta"&&"delta"in r&&(n+=r.delta);return n}function v2(e){try{const n=Yu(e.conversationId),r=JSON.stringify(e);localStorage.setItem(n,r)}catch(n){console.error("Failed to save streaming state:",n);try{b2();const r=Yu(e.conversationId),a=JSON.stringify(e);localStorage.setItem(r,a)}catch{console.error("Failed to save streaming state even after cleanup")}}}function ba(e){try{const n=Yu(e),r=localStorage.getItem(n);if(!r)return null;const a=JSON.parse(r);return Date.now()-a.timestamp>y2?(Eu(e),null):a.completed?null:a}catch(n){return console.error("Failed to load streaming state:",n),null}}function Nh(e,n,r,a){try{const l=ba(e),c="sequence_number"in n?n.sequence_number:void 0,d=l?[...l.events,n]:[n],f={conversationId:e,responseId:r,lastMessageId:a,lastSequenceNumber:c??l?.lastSequenceNumber??-1,events:d,timestamp:Date.now(),completed:n.type==="response.completed"||n.type==="response.failed",accumulatedText:YR(d)};v2(f)}catch(l){console.error("Failed to update streaming state:",l)}}function jh(e){try{const n=ba(e);n&&(n.completed=!0,n.timestamp=Date.now(),v2(n))}catch(n){console.error("Failed to mark streaming as completed:",n)}}function Eu(e){try{const n=Yu(e);localStorage.removeItem(n)}catch(n){console.error("Failed to clear streaming state:",n)}}function b2(){try{const e=Object.keys(localStorage),n=Date.now();for(const r of e)if(r.startsWith(x2))try{const a=localStorage.getItem(r);if(a){const l=JSON.parse(a);(n-l.timestamp>y2||l.completed)&&localStorage.removeItem(r)}}catch{localStorage.removeItem(r)}}catch(e){console.error("Failed to clear expired streaming states:",e)}}function GR(){b2()}function w2(){const[e,n]=w.useState(!1),r=w.useRef(null),a=w.useCallback(()=>(r.current=new AbortController,n(!1),r.current.signal),[]),l=w.useCallback(()=>{r.current&&(n(!0),r.current.abort(),r.current=null)},[]),c=w.useCallback(()=>{n(!1)},[]),d=w.useCallback(()=>{r.current&&(r.current.abort(),r.current=null)},[]);return{isCancelling:e,createAbortSignal:a,handleCancel:l,resetCancelling:c,cleanup:d}}function Gu(e){return e instanceof DOMException&&e.name==="AbortError"}function XR(e={}){const{onDrop:n,disabled:r=!1}=e,[a,l]=w.useState(!1),[c,d]=w.useState([]),f=w.useRef(0),m=w.useCallback(b=>{b.preventDefault(),b.stopPropagation(),!r&&(f.current++,b.dataTransfer.items&&b.dataTransfer.items.length>0&&l(!0))},[r]),h=w.useCallback(b=>{b.preventDefault(),b.stopPropagation(),!r&&(f.current--,f.current===0&&l(!1))},[r]),g=w.useCallback(b=>{b.preventDefault(),b.stopPropagation()},[]),x=w.useCallback(b=>{if(b.preventDefault(),b.stopPropagation(),l(!1),f.current=0,r)return;const j=Array.from(b.dataTransfer.files);j.length>0&&(d(j),n?.(j))},[r,n]),y=w.useCallback(()=>{d([])},[]);return{isDragOver:a,droppedFiles:c,clearDroppedFiles:y,dragHandlers:{onDragEnter:m,onDragLeave:h,onDragOver:g,onDrop:x}}}const ZR="",WR=1e3,Sh=10;function KR(){const e=localStorage.getItem("devui_backend_url");return e||ZR}function QR(e){return new Promise(n=>setTimeout(n,e))}class JR{baseUrl;authToken=null;constructor(n){this.baseUrl=n||KR(),this.authToken=localStorage.getItem("devui_auth_token")}setBaseUrl(n){this.baseUrl=n}getBaseUrl(){return this.baseUrl}setAuthToken(n){this.authToken=n,n?localStorage.setItem("devui_auth_token",n):localStorage.removeItem("devui_auth_token")}getAuthToken(){return this.authToken}clearAuthToken(){this.setAuthToken(null)}async request(n,r={}){const a=`${this.baseUrl}${n}`,l={"Content-Type":"application/json",...r.headers};this.authToken&&(l.Authorization=`Bearer ${this.authToken}`);const c=await fetch(a,{...r,headers:l});if(!c.ok){if(c.status===401)throw this.clearAuthToken(),new Error("UNAUTHORIZED");let d=`API request failed: ${c.status} ${c.statusText}`;try{const f=await c.json();f.detail?typeof f.detail=="string"?d=f.detail:typeof f.detail=="object"&&f.detail.error?.message&&(d=f.detail.error.message):f.error?.message&&(d=f.error.message)}catch{}throw new Error(d)}return c.json()}async getHealth(){return this.request("/health")}async getMeta(){return this.request("/meta")}async getEntities(){const r=(await this.request("/v1/entities")).entities.map(c=>{if(c.type==="agent")return{id:c.id,name:c.name,description:c.description,type:"agent",source:c.source||"directory",tools:(c.tools||[]).map(d=>typeof d=="string"?d:JSON.stringify(d)),has_env:!!(c.required_env_vars&&c.required_env_vars.length>0),module_path:typeof c.metadata?.module_path=="string"?c.metadata.module_path:void 0,required_env_vars:c.required_env_vars,metadata:c.metadata,deployment_supported:c.deployment_supported,deployment_reason:c.deployment_reason,instructions:c.instructions,model_id:c.model_id,chat_client_type:c.chat_client_type,context_providers:c.context_providers,middleware:c.middleware};{const d=c.executors||c.tools||[];let f=c.start_executor_id||"";if(!f&&d.length>0){const m=d[0];typeof m=="string"&&(f=m)}return{id:c.id,name:c.name,description:c.description,type:"workflow",source:c.source||"directory",executors:d.map(m=>typeof m=="string"?m:JSON.stringify(m)),has_env:!!(c.required_env_vars&&c.required_env_vars.length>0),module_path:typeof c.metadata?.module_path=="string"?c.metadata.module_path:void 0,required_env_vars:c.required_env_vars,metadata:c.metadata,deployment_supported:c.deployment_supported,deployment_reason:c.deployment_reason,input_schema:c.input_schema||{type:"string"},input_type_name:c.input_type_name||"Input",start_executor_id:f,tools:[]}}}),a=r.filter(c=>c.type==="agent"),l=r.filter(c=>c.type==="workflow");return{entities:r,agents:a,workflows:l}}async getAgents(){const{agents:n}=await this.getEntities();return n}async getWorkflows(){const{workflows:n}=await this.getEntities();return n}async getAgentInfo(n){return this.request(`/v1/entities/${n}/info?type=agent`)}async getWorkflowInfo(n){return this.request(`/v1/entities/${n}/info?type=workflow`)}async reloadEntity(n){return this.request(`/v1/entities/${n}/reload`,{method:"POST"})}async createConversation(n){const{oaiMode:r}=await _u(()=>Promise.resolve().then(()=>wu),void 0,import.meta.url).then(c=>({oaiMode:c.useDevUIStore.getState().oaiMode})),a={};r.enabled&&(a["X-Proxy-Backend"]="openai");const l=await this.request("/v1/conversations",{method:"POST",headers:a,body:JSON.stringify({metadata:n})});return{id:l.id,object:"conversation",created_at:l.created_at,metadata:l.metadata}}async listConversations(n){const r=n?`/v1/conversations?agent_id=${encodeURIComponent(n)}`:"/v1/conversations",a=await this.request(r);return{data:a.data.map(l=>({id:l.id,object:"conversation",created_at:l.created_at,metadata:l.metadata})),has_more:a.has_more}}async getConversation(n){const r=await this.request(`/v1/conversations/${n}`);return{id:r.id,object:"conversation",created_at:r.created_at,metadata:r.metadata}}async deleteConversation(n){try{return await this.request(`/v1/conversations/${n}`,{method:"DELETE"}),Eu(n),!0}catch{return!1}}async listConversationItems(n,r){const a=new URLSearchParams;r?.limit&&a.set("limit",r.limit.toString()),r?.after&&a.set("after",r.after),r?.order&&a.set("order",r.order);const l=a.toString(),c=`/v1/conversations/${n}/items${l?`?${l}`:""}`;return this.request(c)}async getConversationItem(n,r){const a=`/v1/conversations/${n}/items/${r}`;return this.request(a)}async deleteConversationItem(n,r){const a=await fetch(`${this.baseUrl}/v1/conversations/${n}/items/${r}`,{method:"DELETE"});if(!a.ok)throw new Error(`Failed to delete item: ${a.statusText}`)}async*streamOpenAIResponse(n,r,a,l){const{oaiMode:c}=await _u(()=>Promise.resolve().then(()=>wu),void 0,import.meta.url).then(x=>({oaiMode:x.useDevUIStore.getState().oaiMode}));c.enabled&&(n.model=c.model,c.temperature!==void 0&&(n.temperature=c.temperature),c.max_output_tokens!==void 0&&(n.max_output_tokens=c.max_output_tokens),c.top_p!==void 0&&(n.top_p=c.top_p),c.instructions!==void 0&&(n.instructions=c.instructions),c.reasoning_effort!==void 0&&(n.reasoning={effort:c.reasoning_effort}));let d=-1,f=0,m=!1,h=l,g;if(r){const x=ba(r);if(x)if(l||(h=x.responseId),d=x.lastSequenceNumber,g=x.lastMessageId,l)m=x.events.length>0;else for(const y of x.events)m=!0,yield y}for(;f<=Sh;)try{let x;if(h){const N=new URLSearchParams;N.set("stream","true"),d>=0&&N.set("starting_after",d.toString());const S=`${this.baseUrl}/v1/responses/${h}?${N.toString()}`,_={Accept:"text/event-stream"};this.authToken&&(_.Authorization=`Bearer ${this.authToken}`),x=await fetch(S,{method:"GET",headers:_,signal:a})}else{const N=`${this.baseUrl}/v1/responses`,S={"Content-Type":"application/json",Accept:"text/event-stream"};c.enabled&&(S["X-Proxy-Backend"]="openai"),this.authToken&&(S.Authorization=`Bearer ${this.authToken}`),x=await fetch(N,{method:"POST",headers:S,body:JSON.stringify(n),signal:a})}if(!x.ok){if(x.status===401)throw this.clearAuthToken(),new Error("UNAUTHORIZED");if(x.status>=400&&x.status<500){let S=`Client error ${x.status}`;try{const _=await x.json();_.error&&_.error.message?S=_.error.message:_.detail&&(S=_.detail)}catch{}throw new Error(`CLIENT_ERROR: ${S}`)}let N=`Request failed with status ${x.status}`;try{const S=await x.json();S.error&&S.error.message?N=S.error.message:S.detail&&(N=S.detail)}catch{}throw new Error(N)}const y=x.body?.getReader();if(!y)throw new Error("Response body is not readable");const b=new TextDecoder;let j="";try{for(;;){if(a?.aborted)throw new DOMException("Request aborted","AbortError");const{done:N,value:S}=await y.read();if(N){r&&jh(r);return}const _=b.decode(S,{stream:!0});j+=_;const A=j.split(` `);j=A.pop()||"";for(const E of A)if(E.startsWith("data: ")){const M=E.slice(6);if(M==="[DONE]"){r&&jh(r);return}try{const T=JSON.parse(M);if("response"in T&&T.response&&typeof T.response=="object"&&"id"in T.response){const z=T.response.id;(!h||h!==z)&&(h=z)}else if("id"in T&&typeof T.id=="string"&&T.id.startsWith("resp_")){const z=T.id;(!h||h!==z)&&(h=z)}"item_id"in T&&T.item_id&&(g=T.item_id);const D="sequence_number"in T?T.sequence_number:void 0;if(D!==void 0)if(m&&D<=1&&d>1)r&&Eu(r),yield{type:"error",message:"Connection lost - previous response failed. Starting new response."},d=D,m=!0,r&&h&&Nh(r,T,h,g),yield T;else{if(D<=d)continue;d=D,m=!0,r&&h&&Nh(r,T,h,g),yield T}else m=!0,r&&h&&Nh(r,T,h,g),yield T}catch(T){console.error("Failed to parse OpenAI SSE event:",T)}}}}finally{y.releaseLock()}}catch(x){const y=x instanceof Error?x.message:String(x);if(Gu(x))throw r&&jh(r),x;if(y==="UNAUTHORIZED"||y.startsWith("CLIENT_ERROR:"))throw x;if(f++,f>Sh)throw new Error(`Connection failed after ${Sh} retry attempts: ${y}`);const b=Math.min(WR*Math.pow(2,f-1),3e4);await QR(b)}}async*streamAgentExecutionOpenAI(n,r,a,l){const c={metadata:{entity_id:n},input:r.input,stream:!0,conversation:r.conversation_id};return yield*this.streamAgentExecutionOpenAIDirect(n,c,r.conversation_id,a,l)}async*streamAgentExecutionOpenAIDirect(n,r,a,l,c){yield*this.streamOpenAIResponse(r,a,l,c)}async*streamWorkflowExecutionOpenAI(n,r,a){const l={metadata:{entity_id:n},input:JSON.stringify(r.input_data||{}),stream:!0,conversation:r.conversation_id,extra_body:r.checkpoint_id?{entity_id:n,checkpoint_id:r.checkpoint_id}:void 0};yield*this.streamOpenAIResponse(l,r.conversation_id,a)}async runAgentSync(n,r){const{oaiMode:a}=await _u(()=>Promise.resolve().then(()=>wu),void 0,import.meta.url).then(d=>({oaiMode:d.useDevUIStore.getState().oaiMode})),l={metadata:{entity_id:n},input:r.input,stream:!1,conversation:r.conversation_id};a.enabled&&(l.model=a.model,a.temperature!==void 0&&(l.temperature=a.temperature),a.max_output_tokens!==void 0&&(l.max_output_tokens=a.max_output_tokens));const c={};return a.enabled&&(c["X-Proxy-Backend"]="openai"),this.request("/v1/responses",{method:"POST",headers:c,body:JSON.stringify(l)})}async runWorkflowSync(n,r){const a={metadata:{entity_id:n},input:JSON.stringify(r.input_data||{}),stream:!1,conversation:r.conversation_id,extra_body:r.checkpoint_id?{entity_id:n,checkpoint_id:r.checkpoint_id}:void 0};return this.request("/v1/responses",{method:"POST",body:JSON.stringify(a)})}clearStreamingState(n){Eu(n)}async*streamDeployment(n){const r=await fetch(`${this.baseUrl}/v1/deployments`,{method:"POST",headers:{"Content-Type":"application/json"},body:JSON.stringify({...n,stream:!0})});if(!r.ok)throw new Error(`Deployment failed: ${r.statusText}`);const a=r.body?.getReader();if(!a)throw new Error("No response body");const l=new TextDecoder;let c="";try{for(;;){const{done:d,value:f}=await a.read();if(d)break;c+=l.decode(f,{stream:!0});const m=c.split(` `);c=m.pop()||"";for(const h of m)if(h.startsWith("data: ")){const g=h.slice(6);if(g==="[DONE]")return;try{yield JSON.parse(g)}catch(x){yield{type:"deploy.error",message:`Failed to parse deployment event: ${x instanceof Error?x.message:"Unknown error"}`}}}}}catch(d){throw yield{type:"deploy.failed",message:`Stream interrupted: ${d instanceof Error?d.message:"Unknown error"}`},d}finally{a.releaseLock()}}async listWorkflowSessions(n){const r=`/v1/conversations?entity_id=${encodeURIComponent(n)}&type=workflow_session`;return{data:(await this.request(r)).data.map(c=>({conversation_id:c.id,entity_id:c.metadata?.entity_id||n,created_at:c.created_at,metadata:{name:c.metadata?.name||`Session ${new Date(c.created_at*1e3).toLocaleString()}`,description:c.metadata?.description,type:"workflow_session",checkpoint_summary:c.metadata?.checkpoint_summary}}))}}async createWorkflowSession(n,r){const a={entity_id:n,type:"workflow_session",name:r?.name||`Session ${new Date().toLocaleString()}`,...r?.description&&{description:r.description}},l=await this.createConversation(a);return{conversation_id:l.id,entity_id:n,created_at:l.created_at,metadata:{name:a.name,description:a.description,type:"workflow_session"}}}async deleteWorkflowSession(n,r){if(!await this.deleteConversation(r))throw new Error("Failed to delete workflow session")}}const Ze=new JR;function eD({open:e,onClose:n,agentName:r="Agent",entity:a}){const c=le(C=>C.azureDeploymentEnabled)&&(a?.deployment_supported??!1),[d,f]=w.useState(c?"azure":"docker"),[m,h]=w.useState(null),g=w.useRef(null),x=w.useRef(null),y=le(C=>C.isDeploying),b=le(C=>C.deploymentLogs),j=le(C=>C.lastDeployment),N=le(C=>C.startDeployment),S=le(C=>C.addDeploymentLog),_=le(C=>C.setDeploymentResult),A=le(C=>C.stopDeployment),E=le(C=>C.clearDeploymentState),M=C=>{const $=C.toLowerCase().replace(/[_\s]+/g,"-").replace(/[^a-z0-9-]/g,"").replace(/--+/g,"-").replace(/^[^a-z]+/,"").replace(/-$/,"");return($.match(/^[a-z]/)?$:`app-${$}`).substring(0,31)},T=a?M(a.id):"",[D,z]=w.useState("my-test-rg"),[H,q]=w.useState(T),[X,W]=w.useState("eastus"),[G,ne]=w.useState(null);w.useEffect(()=>{if(a){const C=M(a.id);q(C);const $=B(C);ne($)}},[a?.id]),w.useEffect(()=>{x.current&&b.length>0&&(x.current.scrollTop=x.current.scrollHeight)},[b]);const B=C=>C?C.length>=32?"App name must be less than 32 characters":/^[a-z0-9-]+$/.test(C)?/^[a-z]/.test(C)?/[a-z0-9]$/.test(C)?C.includes("--")?"App name cannot contain consecutive hyphens (--)":null:"App name must end with a letter or number":"App name must start with a lowercase letter":"App name must contain only lowercase letters, numbers, and hyphens (no underscores or uppercase)":null;w.useEffect(()=>()=>{g.current&&clearTimeout(g.current)},[]);const U=async()=>{if(!a?.id||!D||!H)return;const C=D.trim(),$=H.trim(),Y=B($);if(Y){ne(Y);return}try{N();for await(const V of Ze.streamDeployment({entity_id:a.id,resource_group:C,app_name:$,region:X,ui_mode:"user"}))S(V.message),V.type==="deploy.completed"&&V.url&&V.auth_token?_({url:V.url,authToken:V.auth_token}):V.type==="deploy.failed"&&A()}catch(V){S(`Error: ${V instanceof Error?V.message:"Deployment failed"}`),A()}},R=async(C,$)=>{try{await navigator.clipboard.writeText(C),h($),g.current&&clearTimeout(g.current),g.current=setTimeout(()=>{h(null),g.current=null},2e3)}catch{h(null)}},L=`# Dockerfile for ${r} FROM python:3.11-slim diff --git a/python/packages/devui/frontend/src/components/features/workflow/run-workflow-button.tsx b/python/packages/devui/frontend/src/components/features/workflow/run-workflow-button.tsx index d4d1bc4887..9cddab5212 100644 --- a/python/packages/devui/frontend/src/components/features/workflow/run-workflow-button.tsx +++ b/python/packages/devui/frontend/src/components/features/workflow/run-workflow-button.tsx @@ -76,7 +76,7 @@ export function RunWorkflowButton({ // Analyze input requirements const inputAnalysis = useMemo(() => { - // Check if this is a ChatMessage schema (for AgentExecutor workflows) + // Check if this is a Message schema (for AgentExecutor workflows) const isChatMessage = isChatMessageSchema(inputSchema); if (!inputSchema) diff --git a/python/packages/devui/frontend/src/components/features/workflow/schema-form-renderer.tsx b/python/packages/devui/frontend/src/components/features/workflow/schema-form-renderer.tsx index 9abf6a1074..b37761ba5d 100644 --- a/python/packages/devui/frontend/src/components/features/workflow/schema-form-renderer.tsx +++ b/python/packages/devui/frontend/src/components/features/workflow/schema-form-renderer.tsx @@ -115,7 +115,7 @@ export function getFieldColumnSpan( } // ============================================================================ -// ChatMessage Pattern Detection (exported for reuse) +// Message Pattern Detection (exported for reuse) // ============================================================================ export function detectChatMessagePattern( @@ -436,7 +436,7 @@ export function SchemaFormRenderer({ (name) => !hideFields.includes(name) ); - // Detect ChatMessage pattern + // Detect Message pattern const isChatMessageLike = detectChatMessagePattern(schema, requiredFields); // Separate required and optional fields @@ -449,7 +449,7 @@ export function SchemaFormRenderer({ (name) => !requiredFields.includes(name) ); - // For ChatMessage: prioritize text/message/content + // For Message: prioritize text/message/content const sortedOptionalFields = isChatMessageLike ? [...optionalFieldNames].sort((a, b) => { const priority = (name: string) => diff --git a/python/packages/devui/frontend/src/components/features/workflow/workflow-input-form.tsx b/python/packages/devui/frontend/src/components/features/workflow/workflow-input-form.tsx index 367e1d5deb..bcb0a940e9 100644 --- a/python/packages/devui/frontend/src/components/features/workflow/workflow-input-form.tsx +++ b/python/packages/devui/frontend/src/components/features/workflow/workflow-input-form.tsx @@ -48,7 +48,7 @@ export function WorkflowInputForm({ const requiredFields = inputSchema.required || []; const isSimpleInput = inputSchema.type === "string" && !inputSchema.enum; - // Detect ChatMessage-like pattern for auto-filling role + // Detect Message-like pattern for auto-filling role const isChatMessageLike = detectChatMessagePattern(inputSchema, requiredFields); // Validation: check if required fields are filled @@ -82,7 +82,7 @@ export function WorkflowInputForm({ } }); - // Auto-fill role="user" for ChatMessage-like inputs + // Auto-fill role="user" for Message-like inputs if (isChatMessageLike && !initialData["role"]) { initialData["role"] = "user"; } diff --git a/python/packages/devui/frontend/src/components/layout/debug-panel.tsx b/python/packages/devui/frontend/src/components/layout/debug-panel.tsx index 828e03666f..19797ac74b 100644 --- a/python/packages/devui/frontend/src/components/layout/debug-panel.tsx +++ b/python/packages/devui/frontend/src/components/layout/debug-panel.tsx @@ -1340,7 +1340,7 @@ function TraceTreeNode({ node, depth = 0 }: { node: TraceNode; depth?: number }) {/* Operation badge */} - {operationName.replace("ChatAgent.", "").replace("invoke_agent ", "")} + {operationName.replace("Agent.", "").replace("invoke_agent ", "")} {/* Duration */} diff --git a/python/packages/devui/frontend/src/types/agent-framework.ts b/python/packages/devui/frontend/src/types/agent-framework.ts index 5e26580d5f..5a63b8d914 100644 --- a/python/packages/devui/frontend/src/types/agent-framework.ts +++ b/python/packages/devui/frontend/src/types/agent-framework.ts @@ -223,7 +223,7 @@ export interface AgentResponseUpdate { // Agent run response (final) export interface AgentResponse { - messages: ChatMessage[]; + messages: Message[]; response_id?: string; created_at?: CreatedAtT; usage_details?: UsageDetails; @@ -232,7 +232,7 @@ export interface AgentResponse { } // Chat message -export interface ChatMessage { +export interface Message { contents: Content[]; role?: Role; author_name?: string; diff --git a/python/packages/devui/frontend/src/types/index.ts b/python/packages/devui/frontend/src/types/index.ts index 7d6e9a8f73..dc79cc43d4 100644 --- a/python/packages/devui/frontend/src/types/index.ts +++ b/python/packages/devui/frontend/src/types/index.ts @@ -185,7 +185,7 @@ export interface MetaResponse { } // Chat message types matching Agent Framework -export interface ChatMessage { +export interface Message { id: string; role: "user" | "assistant" | "system" | "tool"; contents: import("./agent-framework").Content[]; @@ -212,7 +212,7 @@ export interface AppState { } export interface ChatState { - messages: ChatMessage[]; + messages: Message[]; isStreaming: boolean; // streamEvents removed - use OpenAI events directly instead } diff --git a/python/packages/devui/frontend/src/utils/workflow-utils.ts b/python/packages/devui/frontend/src/utils/workflow-utils.ts index adda06e2d2..6d8c45c019 100644 --- a/python/packages/devui/frontend/src/utils/workflow-utils.ts +++ b/python/packages/devui/frontend/src/utils/workflow-utils.ts @@ -15,8 +15,8 @@ import type { Workflow } from "@/types/workflow"; import { getTypedWorkflow } from "@/types/workflow"; /** - * Detects if a JSON schema represents a ChatMessage input type. - * ChatMessage schemas typically have: + * Detects if a JSON schema represents a Message input type. + * Message schemas typically have: * - type: "object" * - properties with "text" (required string) and "role" (optional string) * @@ -24,7 +24,7 @@ import { getTypedWorkflow } from "@/types/workflow"; * component for workflows that start with an AgentExecutor. * * @param schema - The JSON schema to check - * @returns true if the schema represents a ChatMessage-like input + * @returns true if the schema represents a Message-like input */ export function isChatMessageSchema(schema: JSONSchemaProperty | undefined): boolean { if (!schema) return false; @@ -37,13 +37,13 @@ export function isChatMessageSchema(schema: JSONSchemaProperty | undefined): boo const props = schema.properties; - // ChatMessage has "text" property (the main content) + // Message has "text" property (the main content) const hasText = "text" in props && props.text?.type === "string"; - // ChatMessage has "role" property (user, assistant, system) + // Message has "role" property (user, assistant, system) const hasRole = "role" in props && props.role?.type === "string"; - // If it has both text and role, it's likely a ChatMessage + // If it has both text and role, it's likely a Message if (hasText && hasRole) { return true; } diff --git a/python/packages/devui/tests/devui/conftest.py b/python/packages/devui/tests/devui/conftest.py index 4d6f818795..6902cef2ba 100644 --- a/python/packages/devui/tests/devui/conftest.py +++ b/python/packages/devui/tests/devui/conftest.py @@ -17,16 +17,16 @@ import pytest import pytest_asyncio from agent_framework import ( + Agent, AgentResponse, AgentResponseUpdate, AgentThread, BaseAgent, BaseChatClient, - ChatAgent, - ChatMessage, ChatResponse, ChatResponseUpdate, Content, + Message, ResponseStream, ) from agent_framework._clients import OptionsCoT @@ -67,17 +67,17 @@ def __init__(self) -> None: async def get_response( self, - messages: str | ChatMessage | list[str] | list[ChatMessage], + messages: str | Message | list[str] | list[Message], **kwargs: Any, ) -> ChatResponse: self.call_count += 1 if self.responses: return self.responses.pop(0) - return ChatResponse(messages=ChatMessage("assistant", ["test response"])) + return ChatResponse(messages=Message("assistant", ["test response"])) async def get_streaming_response( self, - messages: str | ChatMessage | list[str] | list[ChatMessage], + messages: str | Message | list[str] | list[Message], **kwargs: Any, ) -> AsyncIterable[ChatResponseUpdate]: self.call_count += 1 @@ -101,13 +101,13 @@ def __init__(self, **kwargs: Any): self.run_responses: list[ChatResponse] = [] self.streaming_responses: list[list[ChatResponseUpdate]] = [] self.call_count: int = 0 - self.received_messages: list[list[ChatMessage]] = [] + self.received_messages: list[list[Message]] = [] @override def _inner_get_response( self, *, - messages: Sequence[ChatMessage], + messages: Sequence[Message], stream: bool, options: Mapping[str, Any], **kwargs: Any, @@ -120,11 +120,11 @@ async def _get() -> ChatResponse: self.received_messages.append(list(messages)) if self.run_responses: return self.run_responses.pop(0) - return ChatResponse(messages=ChatMessage("assistant", ["Mock response from ChatAgent"])) + return ChatResponse(messages=Message("assistant", ["Mock response from Agent"])) return _get() - async def _stream_impl(self, messages: Sequence[ChatMessage]) -> AsyncIterable[ChatResponseUpdate]: + async def _stream_impl(self, messages: Sequence[Message]) -> AsyncIterable[ChatResponseUpdate]: self.call_count += 1 self.received_messages.append(list(messages)) if self.streaming_responses: @@ -135,7 +135,7 @@ async def _stream_impl(self, messages: Sequence[ChatMessage]) -> AsyncIterable[C yield ChatResponseUpdate(contents=[Content.from_text(text="Mock ")], role="assistant") yield ChatResponseUpdate(contents=[Content.from_text(text="streaming ")], role="assistant") yield ChatResponseUpdate(contents=[Content.from_text(text="response ")], role="assistant") - yield ChatResponseUpdate(contents=[Content.from_text(text="from ChatAgent")], role="assistant") + yield ChatResponseUpdate(contents=[Content.from_text(text="from Agent")], role="assistant") # ============================================================================= @@ -159,7 +159,7 @@ def __init__( def run( self, - messages: str | ChatMessage | list[str] | list[ChatMessage] | None = None, + messages: str | Message | list[str] | list[Message] | None = None, *, stream: bool = False, thread: AgentThread | None = None, @@ -172,17 +172,17 @@ def run( async def _run( self, - messages: str | ChatMessage | list[str] | list[ChatMessage] | None = None, + messages: str | Message | list[str] | list[Message] | None = None, *, thread: AgentThread | None = None, **kwargs: Any, ) -> AgentResponse: self.call_count += 1 - return AgentResponse(messages=[ChatMessage("assistant", [Content.from_text(text=self.response_text)])]) + return AgentResponse(messages=[Message("assistant", [Content.from_text(text=self.response_text)])]) def _run_stream( self, - messages: str | ChatMessage | list[str] | list[ChatMessage] | None = None, + messages: str | Message | list[str] | list[Message] | None = None, *, thread: AgentThread | None = None, **kwargs: Any, @@ -205,7 +205,7 @@ def __init__(self, **kwargs: Any): def run( self, - messages: str | ChatMessage | list[str] | list[ChatMessage] | None = None, + messages: str | Message | list[str] | list[Message] | None = None, *, stream: bool = False, thread: AgentThread | None = None, @@ -218,16 +218,16 @@ def run( async def _run( self, - messages: str | ChatMessage | list[str] | list[ChatMessage] | None = None, + messages: str | Message | list[str] | list[Message] | None = None, *, thread: AgentThread | None = None, **kwargs: Any, ) -> AgentResponse: - return AgentResponse(messages=[ChatMessage("assistant", ["done"])]) + return AgentResponse(messages=[Message("assistant", ["done"])]) def _run_stream( self, - messages: str | ChatMessage | list[str] | list[ChatMessage] | None = None, + messages: str | Message | list[str] | list[Message] | None = None, *, thread: AgentThread | None = None, **kwargs: Any, @@ -275,7 +275,7 @@ async def _iter() -> AsyncIterable[AgentResponseUpdate]: def _create_agent_run_response(text: str = "Test response") -> AgentResponse: """Create an AgentResponse with the given text.""" - return AgentResponse(messages=[ChatMessage("assistant", [Content.from_text(text=text)])]) + return AgentResponse(messages=[Message("assistant", [Content.from_text(text=text)])]) def _create_agent_executor_response( @@ -289,8 +289,8 @@ def _create_agent_executor_response( executor_id=executor_id, agent_response=agent_response, full_conversation=[ - ChatMessage("user", [Content.from_text(text="User input")]), - ChatMessage("assistant", [Content.from_text(text=response_text)]), + Message("user", [Content.from_text(text="User input")]), + Message("assistant", [Content.from_text(text=response_text)]), ], ) @@ -318,7 +318,7 @@ def create_executor_completed_event( This creates the exact data structure that caused the serialization bug: WorkflowEvent.data contains AgentExecutorResponse which contains - AgentResponse and ChatMessage objects (SerializationMixin, not Pydantic). + AgentResponse and Message objects (SerializationMixin, not Pydantic). """ data = _create_agent_executor_response(executor_id) if with_agent_response else {"simple": "dict"} return WorkflowEvent.executor_completed(executor_id=executor_id, data=data) @@ -390,7 +390,7 @@ def executor_completed_event() -> WorkflowEvent[Any]: This creates the exact data structure that caused the serialization bug: executor_completed event (type='executor_completed').data contains AgentExecutorResponse which contains - AgentResponse and ChatMessage objects (SerializationMixin, not Pydantic). + AgentResponse and Message objects (SerializationMixin, not Pydantic). """ data = _create_agent_executor_response("test_executor") return WorkflowEvent.executor_completed(executor_id="test_executor", data=data) @@ -425,10 +425,10 @@ def test_entities_dir() -> str: @pytest_asyncio.fixture async def executor_with_real_agent() -> tuple[AgentFrameworkExecutor, str, MockBaseChatClient]: - """Create an executor with a REAL ChatAgent using mock chat client. + """Create an executor with a REAL Agent using mock chat client. This tests the full execution pipeline: - - Real ChatAgent class + - Real Agent class - Real message handling and normalization - Real middleware pipeline - Only the LLM call is mocked @@ -440,11 +440,11 @@ async def executor_with_real_agent() -> tuple[AgentFrameworkExecutor, str, MockB mapper = MessageMapper() executor = AgentFrameworkExecutor(discovery, mapper) - # Create a REAL ChatAgent with mock client - agent = ChatAgent( + # Create a REAL Agent with mock client + agent = Agent( id="test_chat_agent", name="Test Chat Agent", - description="A real ChatAgent for testing execution flow", + description="A real Agent for testing execution flow", chat_client=mock_client, system_message="You are a helpful test assistant.", ) @@ -469,18 +469,18 @@ async def sequential_workflow() -> tuple[AgentFrameworkExecutor, str, MockBaseCh """ mock_client = MockBaseChatClient() mock_client.run_responses = [ - ChatResponse(messages=ChatMessage("assistant", ["Here's the draft content about the topic."])), - ChatResponse(messages=ChatMessage("assistant", ["Review: Content is clear and well-structured."])), + ChatResponse(messages=Message("assistant", ["Here's the draft content about the topic."])), + ChatResponse(messages=Message("assistant", ["Review: Content is clear and well-structured."])), ] - writer = ChatAgent( + writer = Agent( id="writer", name="Writer", description="Content writer agent", chat_client=mock_client, system_message="You are a content writer. Create clear, engaging content.", ) - reviewer = ChatAgent( + reviewer = Agent( id="reviewer", name="Reviewer", description="Content reviewer agent", @@ -513,26 +513,26 @@ async def concurrent_workflow() -> tuple[AgentFrameworkExecutor, str, MockBaseCh """ mock_client = MockBaseChatClient() mock_client.run_responses = [ - ChatResponse(messages=ChatMessage("assistant", ["Research findings: Key data points identified."])), - ChatResponse(messages=ChatMessage("assistant", ["Analysis: Trends indicate positive growth."])), - ChatResponse(messages=ChatMessage("assistant", ["Summary: Overall outlook is favorable."])), + ChatResponse(messages=Message("assistant", ["Research findings: Key data points identified."])), + ChatResponse(messages=Message("assistant", ["Analysis: Trends indicate positive growth."])), + ChatResponse(messages=Message("assistant", ["Summary: Overall outlook is favorable."])), ] - researcher = ChatAgent( + researcher = Agent( id="researcher", name="Researcher", description="Research agent", chat_client=mock_client, system_message="You are a researcher. Find key data and insights.", ) - analyst = ChatAgent( + analyst = Agent( id="analyst", name="Analyst", description="Analysis agent", chat_client=mock_client, system_message="You are an analyst. Identify trends and patterns.", ) - summarizer = ChatAgent( + summarizer = Agent( id="summarizer", name="Summarizer", description="Summary agent", diff --git a/python/packages/devui/tests/devui/test_cleanup_hooks.py b/python/packages/devui/tests/devui/test_cleanup_hooks.py index f8bdf5c867..8d02bfaf27 100644 --- a/python/packages/devui/tests/devui/test_cleanup_hooks.py +++ b/python/packages/devui/tests/devui/test_cleanup_hooks.py @@ -7,7 +7,7 @@ from pathlib import Path import pytest -from agent_framework import AgentResponse, ChatMessage, Content +from agent_framework import AgentResponse, Content, Message from agent_framework_devui import register_cleanup from agent_framework_devui._discovery import EntityDiscovery @@ -39,12 +39,12 @@ async def run(self, messages=None, *, stream: bool = False, thread=None, **kwarg async def _stream(): yield AgentResponse( - messages=[ChatMessage(role="assistant", contents=[Content.from_text(text="Test response")])], + messages=[Message(role="assistant", contents=[Content.from_text(text="Test response")])], ) return _stream() return AgentResponse( - messages=[ChatMessage(role="assistant", contents=[Content.from_text(text="Test response")])], + messages=[Message(role="assistant", contents=[Content.from_text(text="Test response")])], ) @@ -267,7 +267,7 @@ async def test_cleanup_with_file_based_discovery(): # Write agent module with cleanup registration agent_file = agent_dir / "__init__.py" agent_file.write_text(""" -from agent_framework import AgentResponse, ChatMessage, Role, Content +from agent_framework import AgentResponse, Message, Role, Content from agent_framework_devui import register_cleanup class MockCredential: @@ -289,12 +289,12 @@ async def run(self, messages=None, *, stream: bool = False, thread=None, **kwarg if stream: async def _stream(): yield AgentResponse( - messages=[ChatMessage(role="assistant", content=[Content.from_text(text="Test")])], + messages=[Message(role="assistant", content=[Content.from_text(text="Test")])], inner_messages=[], ) return _stream() return AgentResponse( - messages=[ChatMessage(role="assistant", content=[Content.from_text(text="Test")])], + messages=[Message(role="assistant", content=[Content.from_text(text="Test")])], inner_messages=[], ) diff --git a/python/packages/devui/tests/devui/test_conversations.py b/python/packages/devui/tests/devui/test_conversations.py index dbc2e4ddb2..ccaea3524c 100644 --- a/python/packages/devui/tests/devui/test_conversations.py +++ b/python/packages/devui/tests/devui/test_conversations.py @@ -199,7 +199,7 @@ async def test_list_items_pagination(): @pytest.mark.asyncio async def test_list_items_converts_function_calls(): """Test that list_items properly converts function calls to ResponseFunctionToolCallItem.""" - from agent_framework import ChatMessage, ChatMessageStore + from agent_framework import ChatMessageStore, Message store = InMemoryConversationStore() @@ -216,8 +216,8 @@ async def test_list_items_converts_function_calls(): # Simulate messages from agent execution with function calls messages = [ - ChatMessage(role="user", contents=[{"type": "text", "text": "What's the weather in SF?"}]), - ChatMessage( + Message(role="user", contents=[{"type": "text", "text": "What's the weather in SF?"}]), + Message( role="assistant", contents=[ { @@ -228,7 +228,7 @@ async def test_list_items_converts_function_calls(): } ], ), - ChatMessage( + Message( role="tool", contents=[ { @@ -238,7 +238,7 @@ async def test_list_items_converts_function_calls(): } ], ), - ChatMessage(role="assistant", contents=[{"type": "text", "text": "The weather is sunny, 65°F"}]), + Message(role="assistant", contents=[{"type": "text", "text": "The weather is sunny, 65°F"}]), ] # Add messages to thread @@ -284,7 +284,7 @@ async def test_list_items_converts_function_calls(): @pytest.mark.asyncio async def test_list_items_handles_images_and_files(): """Test that list_items properly converts data content (images/files) to OpenAI types.""" - from agent_framework import ChatMessage, ChatMessageStore + from agent_framework import ChatMessageStore, Message store = InMemoryConversationStore() @@ -300,7 +300,7 @@ async def test_list_items_handles_images_and_files(): # Simulate message with image and file messages = [ - ChatMessage( + Message( role="user", contents=[ {"type": "text", "text": "Check this image and PDF"}, diff --git a/python/packages/devui/tests/devui/test_discovery.py b/python/packages/devui/tests/devui/test_discovery.py index c5e92b4645..d1f68c302f 100644 --- a/python/packages/devui/tests/devui/test_discovery.py +++ b/python/packages/devui/tests/devui/test_discovery.py @@ -74,7 +74,7 @@ async def test_discovery_accepts_agents_with_only_run(): init_file = agent_dir / "__init__.py" init_file.write_text(""" -from agent_framework import AgentResponse, AgentThread, ChatMessage, Role, Content +from agent_framework import AgentResponse, AgentThread, Message, Role, Content class NonStreamingAgent: id = "non_streaming" @@ -83,7 +83,7 @@ class NonStreamingAgent: async def run(self, messages=None, *, thread=None, **kwargs): return AgentResponse( - messages=[ChatMessage( + messages=[Message( role="assistant", contents=[Content.from_text(text="response")] )], @@ -188,14 +188,14 @@ def test_func(input: str) -> str: agent_dir = temp_path / "my_agent" agent_dir.mkdir() (agent_dir / "agent.py").write_text(""" -from agent_framework import AgentResponse, AgentThread, ChatMessage, Role, TextContent +from agent_framework import AgentResponse, AgentThread, Message, Role, TextContent class TestAgent: name = "Test Agent" async def run(self, messages=None, *, thread=None, **kwargs): return AgentResponse( - messages=[ChatMessage(role="assistant", contents=[Content.from_text(text="test")])], + messages=[Message(role="assistant", contents=[Content.from_text(text="test")])], response_id="test" ) diff --git a/python/packages/devui/tests/devui/test_execution.py b/python/packages/devui/tests/devui/test_execution.py index 3dd417cbf6..1c9d544fdd 100644 --- a/python/packages/devui/tests/devui/test_execution.py +++ b/python/packages/devui/tests/devui/test_execution.py @@ -4,7 +4,7 @@ Tests include: - Entity discovery and info retrieval -- Agent execution (sync and streaming) using real ChatAgent with mock LLM +- Agent execution (sync and streaming) using real Agent with mock LLM - Workflow execution using real WorkflowBuilder with FunctionExecutor - Edge cases like non-streaming agents """ @@ -15,7 +15,7 @@ from typing import Any import pytest -from agent_framework import AgentExecutor, ChatAgent, FunctionExecutor, WorkflowBuilder +from agent_framework import Agent, AgentExecutor, FunctionExecutor, WorkflowBuilder # Import mock classes from conftest for direct use in some tests from conftest import MockBaseChatClient @@ -77,15 +77,15 @@ async def test_executor_get_entity_info(executor): # ============================================================================= -# Agent Execution Tests (using real ChatAgent with mock LLM) +# Agent Execution Tests (using real Agent with mock LLM) # ============================================================================= async def test_agent_sync_execution(executor_with_real_agent): - """Test synchronous agent execution with REAL ChatAgent (mock LLM). + """Test synchronous agent execution with REAL Agent (mock LLM). This tests the full execution pipeline without needing an API key: - - Real ChatAgent class with middleware + - Real Agent class with middleware - Real message normalization - Mock chat client for LLM calls """ @@ -130,7 +130,7 @@ async def test_agent_sync_execution_respects_model_field(executor_with_real_agen async def test_chat_client_receives_correct_messages(executor_with_real_agent): """Verify the mock chat client receives properly formatted messages. - This tests that the REAL ChatAgent properly: + This tests that the REAL Agent properly: - Normalizes input messages - Formats messages for the chat client """ @@ -297,14 +297,14 @@ async def test_full_pipeline_workflow_events_are_json_serializable(): This is particularly important for workflows with AgentExecutor because: - AgentExecutor produces executor_completed event (type='executor_completed') with AgentExecutorResponse - - AgentExecutorResponse contains AgentResponse and ChatMessage objects + - AgentExecutorResponse contains AgentResponse and Message objects - These are SerializationMixin objects, not Pydantic, which caused the original bug This test ensures the ENTIRE streaming pipeline works end-to-end. """ # Create a workflow with AgentExecutor (the problematic case) mock_client = MockBaseChatClient() - agent = ChatAgent( + agent = Agent( id="serialization_test_agent", name="Serialization Test Agent", description="Agent for testing serialization", @@ -466,15 +466,15 @@ async def process(self, text: str, ctx: WorkflowContext[Any, Any]) -> None: @pytest.mark.asyncio async def test_executor_parse_converts_to_chat_message_for_sequential_workflow(sequential_workflow): - """Sequential workflows convert string input to ChatMessage.""" - from agent_framework import ChatMessage + """Sequential workflows convert string input to Message.""" + from agent_framework import Message executor, _entity_id, _mock_client, workflow = sequential_workflow - # Sequential workflows expect ChatMessage, so raw string becomes ChatMessage + # Sequential workflows expect Message, so raw string becomes Message parsed = executor._parse_raw_workflow_input(workflow, "hello") - assert isinstance(parsed, ChatMessage) + assert isinstance(parsed, Message) assert parsed.text == "hello" @@ -538,7 +538,7 @@ def test_extract_workflow_hil_responses_handles_stringified_json(): async def test_executor_handles_streaming_agent(): """Test executor handles agents with run(stream=True) method.""" - from agent_framework import AgentResponse, AgentResponseUpdate, AgentThread, ChatMessage, Content + from agent_framework import AgentResponse, AgentResponseUpdate, AgentThread, Content, Message class StreamingAgent: """Agent with run() method supporting stream parameter.""" @@ -556,7 +556,7 @@ def run(self, messages=None, *, stream=False, thread=None, **kwargs): async def _run_impl(self, messages): return AgentResponse( - messages=[ChatMessage(role="assistant", contents=[Content.from_text(text=f"Processed: {messages}")])], + messages=[Message(role="assistant", contents=[Content.from_text(text=f"Processed: {messages}")])], response_id="test_123", ) diff --git a/python/packages/devui/tests/devui/test_mapper.py b/python/packages/devui/tests/devui/test_mapper.py index 3609cd774b..bab2130a99 100644 --- a/python/packages/devui/tests/devui/test_mapper.py +++ b/python/packages/devui/tests/devui/test_mapper.py @@ -304,7 +304,7 @@ async def test_executor_completed_event_with_agent_response( This is a REGRESSION TEST for the serialization bug where WorkflowEvent.data contained AgentExecutorResponse with nested - AgentResponse and ChatMessage objects (SerializationMixin) that + AgentResponse and Message objects (SerializationMixin) that Pydantic couldn't serialize. """ # Create event with realistic nested data - the exact structure that caused the bug @@ -579,13 +579,13 @@ async def test_workflow_output_event(mapper: MessageMapper, test_request: AgentF async def test_workflow_output_event_with_list_data(mapper: MessageMapper, test_request: AgentFrameworkRequest) -> None: """Test output event (type='output') with list data (common for sequential/concurrent workflows).""" - from agent_framework import ChatMessage + from agent_framework import Message from agent_framework._workflows._events import WorkflowEvent - # Sequential/Concurrent workflows often output list[ChatMessage] + # Sequential/Concurrent workflows often output list[Message] messages = [ - ChatMessage(role="user", contents=[Content.from_text(text="Hello")]), - ChatMessage(role="assistant", contents=[Content.from_text(text="World")]), + Message(role="user", contents=[Content.from_text(text="Hello")]), + Message(role="assistant", contents=[Content.from_text(text="World")]), ] event = WorkflowEvent.output(executor_id="complete", data=messages) events = await mapper.convert_event(event, test_request) diff --git a/python/packages/devui/tests/devui/test_multimodal_workflow.py b/python/packages/devui/tests/devui/test_multimodal_workflow.py index 1124c9afce..7af7f3f308 100644 --- a/python/packages/devui/tests/devui/test_multimodal_workflow.py +++ b/python/packages/devui/tests/devui/test_multimodal_workflow.py @@ -48,8 +48,8 @@ def test_is_openai_multimodal_format_detects_message_format(self): assert executor._is_openai_multimodal_format([{"foo": "bar"}]) is False # no type field def test_convert_openai_input_to_chat_message_with_image(self): - """Test that OpenAI format with image is converted to ChatMessage with DataContent.""" - from agent_framework import ChatMessage + """Test that OpenAI format with image is converted to Message with DataContent.""" + from agent_framework import Message discovery = MagicMock(spec=EntityDiscovery) mapper = MagicMock(spec=MessageMapper) @@ -67,11 +67,11 @@ def test_convert_openai_input_to_chat_message_with_image(self): } ] - # Convert to ChatMessage + # Convert to Message result = executor._convert_input_to_chat_message(openai_input) - # Verify result is ChatMessage - assert isinstance(result, ChatMessage), f"Expected ChatMessage, got {type(result)}" + # Verify result is Message + assert isinstance(result, Message), f"Expected Message, got {type(result)}" assert result.role == "user" # Verify contents @@ -89,7 +89,7 @@ def test_convert_openai_input_to_chat_message_with_image(self): async def test_parse_workflow_input_handles_json_string_with_multimodal(self): """Test that _parse_workflow_input correctly handles JSON string with multimodal content.""" - from agent_framework import ChatMessage + from agent_framework import Message discovery = MagicMock(spec=EntityDiscovery) mapper = MagicMock(spec=MessageMapper) @@ -114,8 +114,8 @@ async def test_parse_workflow_input_handles_json_string_with_multimodal(self): # Parse the input result = await executor._parse_workflow_input(mock_workflow, json_string_input) - # Verify result is ChatMessage with multimodal content - assert isinstance(result, ChatMessage), f"Expected ChatMessage, got {type(result)}" + # Verify result is Message with multimodal content + assert isinstance(result, Message), f"Expected Message, got {type(result)}" assert len(result.contents) == 2 # Verify text content @@ -129,7 +129,7 @@ async def test_parse_workflow_input_handles_json_string_with_multimodal(self): async def test_parse_workflow_input_still_handles_simple_dict(self): """Test that simple dict input still works (backward compatibility).""" - from agent_framework import ChatMessage + from agent_framework import Message discovery = MagicMock(spec=EntityDiscovery) mapper = MagicMock(spec=MessageMapper) @@ -139,14 +139,14 @@ async def test_parse_workflow_input_still_handles_simple_dict(self): simple_input = {"text": "Hello world", "role": "user"} json_string_input = json.dumps(simple_input) - # Mock workflow with ChatMessage input type + # Mock workflow with Message input type mock_workflow = MagicMock() mock_executor = MagicMock() - mock_executor.input_types = [ChatMessage] + mock_executor.input_types = [Message] mock_workflow.get_start_executor.return_value = mock_executor # Parse the input result = await executor._parse_workflow_input(mock_workflow, json_string_input) - # Result should be ChatMessage (from _parse_structured_workflow_input) - assert isinstance(result, ChatMessage), f"Expected ChatMessage, got {type(result)}" + # Result should be Message (from _parse_structured_workflow_input) + assert isinstance(result, Message), f"Expected Message, got {type(result)}" diff --git a/python/packages/devui/tests/devui/test_schema_generation.py b/python/packages/devui/tests/devui/test_schema_generation.py index ddc8b401a6..a5e6c47ba6 100644 --- a/python/packages/devui/tests/devui/test_schema_generation.py +++ b/python/packages/devui/tests/devui/test_schema_generation.py @@ -67,16 +67,16 @@ def test_dataclass_schema_generation(): def test_chat_message_schema_generation(): - """Test schema generation for ChatMessage (SerializationMixin).""" + """Test schema generation for Message (SerializationMixin).""" try: - from agent_framework import ChatMessage + from agent_framework import Message - schema = generate_input_schema(ChatMessage) + schema = generate_input_schema(Message) assert schema is not None assert isinstance(schema, dict) except ImportError: - pytest.skip("ChatMessage not available - agent_framework not installed") + pytest.skip("Message not available - agent_framework not installed") def test_pydantic_model_schema_generation(): diff --git a/python/packages/devui/tests/devui/test_server.py b/python/packages/devui/tests/devui/test_server.py index 1489142914..766c03e8bf 100644 --- a/python/packages/devui/tests/devui/test_server.py +++ b/python/packages/devui/tests/devui/test_server.py @@ -142,7 +142,7 @@ async def test_credential_cleanup() -> None: """Test that async credentials are properly closed during server cleanup.""" from unittest.mock import AsyncMock, Mock - from agent_framework import ChatAgent + from agent_framework import Agent # Create mock credential with async close mock_credential = AsyncMock() @@ -155,7 +155,7 @@ async def test_credential_cleanup() -> None: mock_client.function_invocation_configuration = None # Create agent with mock client - agent = ChatAgent(name="TestAgent", chat_client=mock_client, instructions="Test agent") + agent = Agent(name="TestAgent", chat_client=mock_client, instructions="Test agent") # Create DevUI server with agent server = DevServer() @@ -175,7 +175,7 @@ async def test_credential_cleanup_error_handling() -> None: """Test that credential cleanup errors are handled gracefully.""" from unittest.mock import AsyncMock, Mock - from agent_framework import ChatAgent + from agent_framework import Agent # Create mock credential that raises error on close mock_credential = AsyncMock() @@ -188,7 +188,7 @@ async def test_credential_cleanup_error_handling() -> None: mock_client.function_invocation_configuration = None # Create agent with mock client - agent = ChatAgent(name="TestAgent", chat_client=mock_client, instructions="Test agent") + agent = Agent(name="TestAgent", chat_client=mock_client, instructions="Test agent") # Create DevUI server with agent server = DevServer() @@ -207,7 +207,7 @@ async def test_multiple_credential_attributes() -> None: """Test that we check all common credential attribute names.""" from unittest.mock import AsyncMock, Mock - from agent_framework import ChatAgent + from agent_framework import Agent # Create mock credentials mock_cred1 = Mock() @@ -223,7 +223,7 @@ async def test_multiple_credential_attributes() -> None: mock_client.function_invocation_configuration = None # Create agent with mock client - agent = ChatAgent(name="TestAgent", chat_client=mock_client, instructions="Test agent") + agent = Agent(name="TestAgent", chat_client=mock_client, instructions="Test agent") # Create DevUI server with agent server = DevServer() diff --git a/python/packages/durabletask/agent_framework_durabletask/_durable_agent_state.py b/python/packages/durabletask/agent_framework_durabletask/_durable_agent_state.py index c6e6eaad08..4fd59df051 100644 --- a/python/packages/durabletask/agent_framework_durabletask/_durable_agent_state.py +++ b/python/packages/durabletask/agent_framework_durabletask/_durable_agent_state.py @@ -37,8 +37,8 @@ from agent_framework import ( AgentResponse, - ChatMessage, Content, + Message, UsageDetails, get_logger, ) @@ -803,11 +803,11 @@ def from_run_request(request: RunRequest) -> DurableAgentStateMessage: ) @staticmethod - def from_chat_message(chat_message: ChatMessage) -> DurableAgentStateMessage: + def from_chat_message(chat_message: Message) -> DurableAgentStateMessage: """Converts an Agent Framework chat message to a durable state message. Args: - chat_message: ChatMessage object with role, contents, and metadata to convert + chat_message: Message object with role, contents, and metadata to convert Returns: DurableAgentStateMessage with converted content items and metadata @@ -824,15 +824,15 @@ def from_chat_message(chat_message: ChatMessage) -> DurableAgentStateMessage: ) def to_chat_message(self) -> Any: - """Converts this DurableAgentStateMessage back to an agent framework ChatMessage. + """Converts this DurableAgentStateMessage back to an agent framework Message. Returns: - ChatMessage object with role, contents, and metadata converted back to agent framework types + Message object with role, contents, and metadata converted back to agent framework types """ # Convert DurableAgentStateContent objects back to agent_framework content objects ai_contents = [c.to_ai_content() for c in self.contents] - # Build kwargs for ChatMessage + # Build kwargs for Message kwargs: dict[str, Any] = { "role": self.role, "contents": ai_contents, @@ -844,7 +844,7 @@ def to_chat_message(self) -> Any: if self.extension_data is not None: kwargs["additional_properties"] = self.extension_data - return ChatMessage(**kwargs) + return Message(**kwargs) class DurableAgentStateDataContent(DurableAgentStateContent): diff --git a/python/packages/durabletask/agent_framework_durabletask/_entities.py b/python/packages/durabletask/agent_framework_durabletask/_entities.py index c39359dc72..186561e3f4 100644 --- a/python/packages/durabletask/agent_framework_durabletask/_entities.py +++ b/python/packages/durabletask/agent_framework_durabletask/_entities.py @@ -11,8 +11,8 @@ from agent_framework import ( AgentResponse, AgentResponseUpdate, - ChatMessage, Content, + Message, ResponseStream, SupportsAgentRun, get_logger, @@ -150,7 +150,7 @@ async def run( self.state.data.conversation_history.append(state_request) try: - chat_messages: list[ChatMessage] = [ + chat_messages: list[Message] = [ m.to_chat_message() for entry in self.state.data.conversation_history if not self._is_error_response(entry) @@ -175,7 +175,7 @@ async def run( except Exception as exc: logger.exception("[AgentEntity.run] Agent execution failed.") - error_message = ChatMessage( + error_message = Message( role="assistant", contents=[Content.from_error(message=str(exc), error_code=type(exc).__name__)] ) error_response = AgentResponse( diff --git a/python/packages/durabletask/agent_framework_durabletask/_executors.py b/python/packages/durabletask/agent_framework_durabletask/_executors.py index 226d9dff6c..2193f94e16 100644 --- a/python/packages/durabletask/agent_framework_durabletask/_executors.py +++ b/python/packages/durabletask/agent_framework_durabletask/_executors.py @@ -16,7 +16,7 @@ from datetime import datetime, timezone from typing import Any, Generic, TypeVar -from agent_framework import AgentResponse, AgentThread, ChatMessage, Content, get_logger +from agent_framework import AgentResponse, AgentThread, Content, Message, get_logger from durabletask.client import TaskHubGrpcClient from durabletask.entities import EntityInstanceId from durabletask.task import CompletableTask, CompositeTask, OrchestrationContext, Task @@ -179,7 +179,7 @@ def _create_acceptance_response(self, correlation_id: str) -> AgentResponse: Returns: AgentResponse: Acceptance response with correlation ID """ - acceptance_message = ChatMessage( + acceptance_message = Message( role="system", contents=[ Content.from_text( @@ -360,7 +360,7 @@ def _handle_agent_response( "[ClientAgentExecutor] Error converting response for correlation: %s", correlation_id, ) - error_message = ChatMessage( + error_message = Message( role="system", contents=[ Content.from_error( @@ -375,7 +375,7 @@ def _handle_agent_response( self.max_poll_retries, correlation_id, ) - error_message = ChatMessage( + error_message = Message( role="system", contents=[ Content.from_error( diff --git a/python/packages/durabletask/agent_framework_durabletask/_shim.py b/python/packages/durabletask/agent_framework_durabletask/_shim.py index 00f606ffe4..19ea8a496f 100644 --- a/python/packages/durabletask/agent_framework_durabletask/_shim.py +++ b/python/packages/durabletask/agent_framework_durabletask/_shim.py @@ -12,7 +12,7 @@ from abc import ABC, abstractmethod from typing import Any, Generic, Literal, TypeVar -from agent_framework import AgentThread, ChatMessage, SupportsAgentRun +from agent_framework import AgentThread, Message, SupportsAgentRun from ._executors import DurableAgentExecutor from ._models import DurableAgentThread @@ -86,7 +86,7 @@ def __init__(self, executor: DurableAgentExecutor[TaskT], name: str, *, agent_id def run( # type: ignore[override] self, - messages: str | ChatMessage | list[str] | list[ChatMessage] | None = None, + messages: str | Message | list[str] | list[Message] | None = None, *, stream: Literal[False] = False, thread: AgentThread | None = None, @@ -136,7 +136,7 @@ def get_new_thread(self, **kwargs: Any) -> DurableAgentThread: """Create a new agent thread via the provider.""" return self._executor.get_new_thread(self.name, **kwargs) - def _normalize_messages(self, messages: str | ChatMessage | list[str] | list[ChatMessage] | None) -> str: + def _normalize_messages(self, messages: str | Message | list[str] | list[Message] | None) -> str: """Convert supported message inputs to a single string. Args: @@ -149,7 +149,7 @@ def _normalize_messages(self, messages: str | ChatMessage | list[str] | list[Cha return "" if isinstance(messages, str): return messages - if isinstance(messages, ChatMessage): + if isinstance(messages, Message): return messages.text or "" if isinstance(messages, list): if not messages: @@ -157,6 +157,6 @@ def _normalize_messages(self, messages: str | ChatMessage | list[str] | list[Cha first_item = messages[0] if isinstance(first_item, str): return "\n".join(messages) # type: ignore[arg-type] - # List of ChatMessage + # List of Message return "\n".join([msg.text or "" for msg in messages]) # type: ignore[union-attr] return "" diff --git a/python/packages/durabletask/agent_framework_durabletask/_worker.py b/python/packages/durabletask/agent_framework_durabletask/_worker.py index ce6dc9d70e..781c8fc953 100644 --- a/python/packages/durabletask/agent_framework_durabletask/_worker.py +++ b/python/packages/durabletask/agent_framework_durabletask/_worker.py @@ -29,7 +29,7 @@ class DurableAIAgentWorker: Example: ```python from durabletask import TaskHubGrpcWorker - from agent_framework import ChatAgent + from agent_framework import Agent from agent_framework.azure import DurableAIAgentWorker # Create the underlying worker @@ -39,7 +39,7 @@ class DurableAIAgentWorker: agent_worker = DurableAIAgentWorker(worker) # Register agents - my_agent = ChatAgent(chat_client=client, name="assistant") + my_agent = Agent(chat_client=client, name="assistant") agent_worker.add_agent(my_agent) # Start the worker diff --git a/python/packages/durabletask/tests/test_durable_entities.py b/python/packages/durabletask/tests/test_durable_entities.py index 03e26784cc..a11e9718ef 100644 --- a/python/packages/durabletask/tests/test_durable_entities.py +++ b/python/packages/durabletask/tests/test_durable_entities.py @@ -11,7 +11,7 @@ from unittest.mock import AsyncMock, Mock import pytest -from agent_framework import AgentResponse, AgentResponseUpdate, ChatMessage, Content, ResponseStream +from agent_framework import AgentResponse, AgentResponseUpdate, Content, Message, ResponseStream from pydantic import BaseModel from agent_framework_durabletask import ( @@ -71,7 +71,7 @@ def _make_entity(agent: Any, callback: Any = None, *, thread_id: str = "test-thr def _role_value(chat_message: DurableAgentStateMessage) -> str: - """Helper to extract the string role from a ChatMessage.""" + """Helper to extract the string role from a Message.""" role = getattr(chat_message, "role", None) role_value = getattr(role, "value", role) if role_value is None: @@ -81,7 +81,7 @@ def _role_value(chat_message: DurableAgentStateMessage) -> str: def _agent_response(text: str | None) -> AgentResponse: """Create an AgentResponse with a single assistant message.""" - message = ChatMessage(role="assistant", text=text) if text is not None else ChatMessage(role="assistant", text="") + message = Message(role="assistant", text=text) if text is not None else Message(role="assistant", text="") return AgentResponse(messages=[message], created_at="2024-01-01T00:00:00Z") diff --git a/python/packages/durabletask/tests/test_shim.py b/python/packages/durabletask/tests/test_shim.py index 6efb027628..9f2fefc406 100644 --- a/python/packages/durabletask/tests/test_shim.py +++ b/python/packages/durabletask/tests/test_shim.py @@ -10,7 +10,7 @@ from unittest.mock import Mock import pytest -from agent_framework import ChatMessage, SupportsAgentRun +from agent_framework import Message, SupportsAgentRun from pydantic import BaseModel from agent_framework_durabletask import DurableAgentThread @@ -76,8 +76,8 @@ def test_run_accepts_string_message(self, test_agent: DurableAIAgent[Any], mock_ assert kwargs["run_request"].message == "Hello, world!" def test_run_accepts_chat_message(self, test_agent: DurableAIAgent[Any], mock_executor: Mock) -> None: - """Verify run accepts and normalizes ChatMessage objects.""" - chat_msg = ChatMessage(role="user", text="Test message") + """Verify run accepts and normalizes Message objects.""" + chat_msg = Message(role="user", text="Test message") test_agent.run(chat_msg) mock_executor.run_durable_agent.assert_called_once() @@ -93,10 +93,10 @@ def test_run_accepts_list_of_strings(self, test_agent: DurableAIAgent[Any], mock assert kwargs["run_request"].message == "First message\nSecond message" def test_run_accepts_list_of_chat_messages(self, test_agent: DurableAIAgent[Any], mock_executor: Mock) -> None: - """Verify run accepts and joins list of ChatMessage objects.""" + """Verify run accepts and joins list of Message objects.""" messages = [ - ChatMessage(role="user", text="Message 1"), - ChatMessage(role="assistant", text="Message 2"), + Message(role="user", text="Message 1"), + Message(role="assistant", text="Message 2"), ] test_agent.run(messages) diff --git a/python/packages/foundry_local/samples/foundry_local_agent.py b/python/packages/foundry_local/samples/foundry_local_agent.py index 9e81d2b33d..bca1d469d9 100644 --- a/python/packages/foundry_local/samples/foundry_local_agent.py +++ b/python/packages/foundry_local/samples/foundry_local_agent.py @@ -10,7 +10,7 @@ from agent_framework_foundry_local import FoundryLocalClient if TYPE_CHECKING: - from agent_framework import ChatAgent + from agent_framework import Agent """ This sample demonstrates basic usage of the FoundryLocalClient. @@ -33,7 +33,7 @@ def get_weather( return f"The weather in {location} is {conditions[randint(0, 3)]} with a high of {randint(10, 30)}°C." -async def non_streaming_example(agent: ChatAgent) -> None: +async def non_streaming_example(agent: Agent) -> None: """Example of non-streaming response (get the complete result at once).""" print("=== Non-streaming Response Example ===") @@ -43,7 +43,7 @@ async def non_streaming_example(agent: ChatAgent) -> None: print(f"Agent: {result}\n") -async def streaming_example(agent: ChatAgent) -> None: +async def streaming_example(agent: Agent) -> None: """Example of streaming response (get results as they are generated).""" print("=== Streaming Response Example ===") diff --git a/python/packages/foundry_local/tests/test_foundry_local_client.py b/python/packages/foundry_local/tests/test_foundry_local_client.py index 324c94630e..031461d926 100644 --- a/python/packages/foundry_local/tests/test_foundry_local_client.py +++ b/python/packages/foundry_local/tests/test_foundry_local_client.py @@ -3,7 +3,7 @@ from unittest.mock import MagicMock, patch import pytest -from agent_framework import ChatClientProtocol +from agent_framework import SupportsChatGetResponse from agent_framework.exceptions import ServiceInitializationError from pydantic import ValidationError @@ -55,7 +55,7 @@ def test_foundry_local_client_init(mock_foundry_local_manager: MagicMock) -> Non assert client.model_id == "test-model-id" assert client.manager is mock_foundry_local_manager - assert isinstance(client, ChatClientProtocol) + assert isinstance(client, SupportsChatGetResponse) def test_foundry_local_client_init_with_bootstrap_false(mock_foundry_local_manager: MagicMock) -> None: diff --git a/python/packages/github_copilot/agent_framework_github_copilot/_agent.py b/python/packages/github_copilot/agent_framework_github_copilot/_agent.py index 06fad5d126..e5e8614fd6 100644 --- a/python/packages/github_copilot/agent_framework_github_copilot/_agent.py +++ b/python/packages/github_copilot/agent_framework_github_copilot/_agent.py @@ -15,9 +15,9 @@ AgentResponseUpdate, AgentThread, BaseAgent, - ChatMessage, Content, ContextProvider, + Message, ResponseStream, normalize_messages, ) @@ -278,7 +278,7 @@ async def stop(self) -> None: @overload def run( self, - messages: str | ChatMessage | Sequence[str | ChatMessage] | None = None, + messages: str | Message | Sequence[str | Message] | None = None, *, stream: Literal[False] = False, thread: AgentThread | None = None, @@ -289,7 +289,7 @@ def run( @overload def run( self, - messages: str | ChatMessage | Sequence[str | ChatMessage] | None = None, + messages: str | Message | Sequence[str | Message] | None = None, *, stream: Literal[True], thread: AgentThread | None = None, @@ -299,7 +299,7 @@ def run( def run( self, - messages: str | ChatMessage | Sequence[str | ChatMessage] | None = None, + messages: str | Message | Sequence[str | Message] | None = None, *, stream: bool = False, thread: AgentThread | None = None, @@ -341,7 +341,7 @@ def _finalize(updates: Sequence[AgentResponseUpdate]) -> AgentResponse: async def _run_impl( self, - messages: str | ChatMessage | Sequence[str | ChatMessage] | None = None, + messages: str | Message | Sequence[str | Message] | None = None, *, thread: AgentThread | None = None, options: OptionsT | None = None, @@ -366,7 +366,7 @@ async def _run_impl( except Exception as ex: raise ServiceException(f"GitHub Copilot request failed: {ex}") from ex - response_messages: list[ChatMessage] = [] + response_messages: list[Message] = [] response_id: str | None = None # send_and_wait returns only the final ASSISTANT_MESSAGE event; @@ -376,7 +376,7 @@ async def _run_impl( if response_event.data.content: response_messages.append( - ChatMessage( + Message( role="assistant", contents=[Content.from_text(response_event.data.content)], message_id=message_id, @@ -389,7 +389,7 @@ async def _run_impl( async def _stream_updates( self, - messages: str | ChatMessage | Sequence[str | ChatMessage] | None = None, + messages: str | Message | Sequence[str | Message] | None = None, *, thread: AgentThread | None = None, options: OptionsT | None = None, diff --git a/python/packages/github_copilot/tests/test_github_copilot_agent.py b/python/packages/github_copilot/tests/test_github_copilot_agent.py index ed302b5bb6..b2b7b2ebee 100644 --- a/python/packages/github_copilot/tests/test_github_copilot_agent.py +++ b/python/packages/github_copilot/tests/test_github_copilot_agent.py @@ -11,8 +11,8 @@ AgentResponse, AgentResponseUpdate, AgentThread, - ChatMessage, Content, + Message, ) from agent_framework.exceptions import ServiceException from copilot.generated.session_events import Data, SessionEvent, SessionEventType @@ -290,11 +290,11 @@ async def test_run_chat_message( mock_session: MagicMock, assistant_message_event: SessionEvent, ) -> None: - """Test run method with ChatMessage.""" + """Test run method with Message.""" mock_session.send_and_wait.return_value = assistant_message_event agent = GitHubCopilotAgent(client=mock_client) - chat_message = ChatMessage(role="user", contents=[Content.from_text("Hello")]) + chat_message = Message(role="user", contents=[Content.from_text("Hello")]) response = await agent.run(chat_message) assert isinstance(response, AgentResponse) diff --git a/python/packages/lab/gaia/samples/azure_ai_agent.py b/python/packages/lab/gaia/samples/azure_ai_agent.py index 3f64e3a684..f83680603e 100644 --- a/python/packages/lab/gaia/samples/azure_ai_agent.py +++ b/python/packages/lab/gaia/samples/azure_ai_agent.py @@ -26,13 +26,13 @@ from collections.abc import AsyncIterator from contextlib import asynccontextmanager -from agent_framework import ChatAgent, HostedCodeInterpreterTool, HostedWebSearchTool +from agent_framework import Agent, HostedCodeInterpreterTool, HostedWebSearchTool from agent_framework.azure import AzureAIAgentClient from azure.identity.aio import AzureCliCredential @asynccontextmanager -async def create_gaia_agent() -> AsyncIterator[ChatAgent]: +async def create_gaia_agent() -> AsyncIterator[Agent]: """Create an Azure AI agent configured for GAIA benchmark tasks. The agent is configured with: @@ -40,7 +40,7 @@ async def create_gaia_agent() -> AsyncIterator[ChatAgent]: - Code Interpreter tool for calculations and data analysis Yields: - ChatAgent: A configured agent ready to run GAIA tasks. + Agent: A configured agent ready to run GAIA tasks. Example: async with create_gaia_agent() as agent: diff --git a/python/packages/lab/gaia/samples/openai_agent.py b/python/packages/lab/gaia/samples/openai_agent.py index 333c8d0931..5380866dba 100644 --- a/python/packages/lab/gaia/samples/openai_agent.py +++ b/python/packages/lab/gaia/samples/openai_agent.py @@ -25,12 +25,12 @@ from collections.abc import AsyncIterator from contextlib import asynccontextmanager -from agent_framework import ChatAgent, HostedCodeInterpreterTool, HostedWebSearchTool +from agent_framework import Agent, HostedCodeInterpreterTool, HostedWebSearchTool from agent_framework.openai import OpenAIResponsesClient @asynccontextmanager -async def create_gaia_agent() -> AsyncIterator[ChatAgent]: +async def create_gaia_agent() -> AsyncIterator[Agent]: """Create an OpenAI agent configured for GAIA benchmark tasks. Uses OpenAI Responses API for enhanced capabilities. @@ -40,7 +40,7 @@ async def create_gaia_agent() -> AsyncIterator[ChatAgent]: - Code Interpreter tool for calculations and data analysis Yields: - ChatAgent: A configured agent ready to run GAIA tasks. + Agent: A configured agent ready to run GAIA tasks. Example: async with create_gaia_agent() as agent: diff --git a/python/packages/lab/lightning/README.md b/python/packages/lab/lightning/README.md index 05d3691d74..4219713b77 100644 --- a/python/packages/lab/lightning/README.md +++ b/python/packages/lab/lightning/README.md @@ -49,7 +49,7 @@ async def math_agent(task: TaskType, llm: LLM) -> float: """A function that solves a math problem and returns the evaluation score.""" async with ( MCPStdioTool(name="calculator", command="uvx", args=["mcp-server-calculator"]) as mcp_server, - ChatAgent( + Agent( chat_client=OpenAIChatClient( model_id=llm.model, api_key="your-api-key", diff --git a/python/packages/lab/lightning/samples/train_math_agent.py b/python/packages/lab/lightning/samples/train_math_agent.py index 0cb771e856..d9164adf42 100644 --- a/python/packages/lab/lightning/samples/train_math_agent.py +++ b/python/packages/lab/lightning/samples/train_math_agent.py @@ -20,7 +20,7 @@ from typing import TypedDict, cast import sympy # type: ignore[import-untyped,reportMissingImports] -from agent_framework import AgentResponse, ChatAgent, MCPStdioTool +from agent_framework import Agent, AgentResponse, MCPStdioTool from agent_framework.lab.lightning import AgentFrameworkTracer from agent_framework.openai import OpenAIChatClient from agentlightning import LLM, Dataset, Trainer, rollout @@ -166,7 +166,7 @@ async def math_agent(task: MathProblem, llm: LLM) -> float: # MCPStdioTool provides calculator functionality via MCP protocol async with ( MCPStdioTool(name="calculator", command="uvx", args=["mcp-server-calculator"]) as mcp_server, - ChatAgent( + Agent( chat_client=OpenAIChatClient( model_id=llm.model, # This is the model being trained api_key=os.getenv("OPENAI_API_KEY") or "dummy", # Can be dummy when connecting to training LLM diff --git a/python/packages/lab/lightning/tests/test_lightning.py b/python/packages/lab/lightning/tests/test_lightning.py index c528bd8d78..d302d71353 100644 --- a/python/packages/lab/lightning/tests/test_lightning.py +++ b/python/packages/lab/lightning/tests/test_lightning.py @@ -9,7 +9,7 @@ agentlightning = pytest.importorskip("agentlightning") -from agent_framework import AgentExecutor, AgentResponse, ChatAgent, WorkflowBuilder, Workflow +from agent_framework import AgentExecutor, AgentResponse, Agent, WorkflowBuilder, Workflow from agent_framework_lab_lightning import AgentFrameworkTracer from agent_framework.openai import OpenAIChatClient from agentlightning import TracerTraceToTriplet @@ -80,13 +80,13 @@ def workflow_two_agents(): ), ): # Create the two agents - analyzer_agent = ChatAgent( + analyzer_agent = Agent( chat_client=first_chat_client, name="DataAnalyzer", instructions="You are a data analyst. Analyze the given data and provide insights.", ) - advisor_agent = ChatAgent( + advisor_agent = Agent( chat_client=second_chat_client, name="InvestmentAdvisor", instructions="You are an investment advisor. Based on analysis results, provide recommendations.", diff --git a/python/packages/lab/tau2/README.md b/python/packages/lab/tau2/README.md index a0b587ea3c..66da215b47 100644 --- a/python/packages/lab/tau2/README.md +++ b/python/packages/lab/tau2/README.md @@ -138,12 +138,12 @@ export OPENAI_BASE_URL="https://your-custom-endpoint.com/v1" ```python from agent_framework.lab.tau2 import TaskRunner -from agent_framework import ChatAgent +from agent_framework import Agent class CustomTaskRunner(TaskRunner): def assistant_agent(self, assistant_chat_client): # Override to customize the assistant agent - return ChatAgent( + return Agent( chat_client=assistant_chat_client, instructions="Your custom system prompt here", # Add custom tools, temperature, etc. @@ -151,7 +151,7 @@ class CustomTaskRunner(TaskRunner): def user_simulator(self, user_chat_client, task): # Override to customize the user simulator - return ChatAgent( + return Agent( chat_client=user_chat_client, instructions="Custom user simulator prompt", ) diff --git a/python/packages/lab/tau2/agent_framework_lab_tau2/_message_utils.py b/python/packages/lab/tau2/agent_framework_lab_tau2/_message_utils.py index dccf6e2882..bd8d521e28 100644 --- a/python/packages/lab/tau2/agent_framework_lab_tau2/_message_utils.py +++ b/python/packages/lab/tau2/agent_framework_lab_tau2/_message_utils.py @@ -2,7 +2,7 @@ from typing import Any -from agent_framework._types import ChatMessage, Content +from agent_framework._types import Content, Message from loguru import logger @@ -11,7 +11,7 @@ def _get_role_value(role: Any) -> str: return role.value if hasattr(role, "value") else str(role) -def flip_messages(messages: list[ChatMessage]) -> list[ChatMessage]: +def flip_messages(messages: list[Message]) -> list[Message]: """Flip message roles between assistant and user for role-playing scenarios. Used in agent simulations where the assistant's messages become user inputs @@ -30,7 +30,7 @@ def filter_out_function_calls(messages: list[Content]) -> list[Content]: # Flip assistant to user contents = filter_out_function_calls(msg.contents) if contents: - flipped_msg = ChatMessage( + flipped_msg = Message( role="user", # The function calls will cause 400 when role is user contents=contents, @@ -40,7 +40,7 @@ def filter_out_function_calls(messages: list[Content]) -> list[Content]: flipped_messages.append(flipped_msg) elif role_value == "user": # Flip user to assistant - flipped_msg = ChatMessage( + flipped_msg = Message( role="assistant", contents=msg.contents, author_name=msg.author_name, message_id=msg.message_id ) flipped_messages.append(flipped_msg) @@ -53,7 +53,7 @@ def filter_out_function_calls(messages: list[Content]) -> list[Content]: return flipped_messages -def log_messages(messages: list[ChatMessage]) -> None: +def log_messages(messages: list[Message]) -> None: """Log messages with colored output based on role and content type. Provides visual debugging by color-coding different message roles and diff --git a/python/packages/lab/tau2/agent_framework_lab_tau2/_sliding_window.py b/python/packages/lab/tau2/agent_framework_lab_tau2/_sliding_window.py index 20a3a2fe27..ad4328ff21 100644 --- a/python/packages/lab/tau2/agent_framework_lab_tau2/_sliding_window.py +++ b/python/packages/lab/tau2/agent_framework_lab_tau2/_sliding_window.py @@ -5,7 +5,7 @@ from typing import Any import tiktoken -from agent_framework import ChatMessage, ChatMessageStore +from agent_framework import ChatMessageStore, Message from loguru import logger @@ -19,7 +19,7 @@ class SlidingWindowChatMessageStore(ChatMessageStore): def __init__( self, - messages: Sequence[ChatMessage] | None = None, + messages: Sequence[Message] | None = None, max_tokens: int = 3800, system_message: str | None = None, tool_definitions: Any | None = None, @@ -32,17 +32,17 @@ def __init__( # An estimation based on a commonly used vocab table self.encoding = tiktoken.get_encoding("o200k_base") - async def add_messages(self, messages: Sequence[ChatMessage]) -> None: + async def add_messages(self, messages: Sequence[Message]) -> None: await super().add_messages(messages) self.truncated_messages = self.messages.copy() self.truncate_messages() - async def list_messages(self) -> list[ChatMessage]: + async def list_messages(self) -> list[Message]: """Get the current list of messages, which may be truncated.""" return self.truncated_messages - async def list_all_messages(self) -> list[ChatMessage]: + async def list_all_messages(self) -> list[Message]: """Get all messages from the store including the truncated ones.""" return self.messages diff --git a/python/packages/lab/tau2/agent_framework_lab_tau2/_tau2_utils.py b/python/packages/lab/tau2/agent_framework_lab_tau2/_tau2_utils.py index 647dd8884a..b785eae6d7 100644 --- a/python/packages/lab/tau2/agent_framework_lab_tau2/_tau2_utils.py +++ b/python/packages/lab/tau2/agent_framework_lab_tau2/_tau2_utils.py @@ -7,17 +7,19 @@ import numpy as np from agent_framework._tools import FunctionTool -from agent_framework._types import ChatMessage +from agent_framework._types import Message from loguru import logger from pydantic import BaseModel from tau2.data_model.message import ( # type: ignore[import-untyped] AssistantMessage, - Message, SystemMessage, ToolCall, ToolMessage, UserMessage, ) +from tau2.data_model.message import ( + Message as Tau2Message, +) from tau2.data_model.tasks import EnvFunctionCall, InitializationData # type: ignore[import-untyped] from tau2.environment.environment import Environment # type: ignore[import-untyped] from tau2.environment.tool import Tool # type: ignore[import-untyped] @@ -45,7 +47,7 @@ def wrapped_func(**kwargs: Any) -> Any: ) -def convert_agent_framework_messages_to_tau2_messages(messages: list[ChatMessage]) -> list[Message]: +def convert_agent_framework_messages_to_tau2_messages(messages: list[Message]) -> list[Tau2Message]: """Convert agent framework ChatMessages to tau2 Message objects. Handles role mapping, text extraction, function calls, and function results. @@ -119,13 +121,13 @@ def set_state( self: Any, initialization_data: InitializationData | None, initialization_actions: list[EnvFunctionCall] | None, - message_history: list[Message], + message_history: list[Tau2Message], ) -> None: if self.solo_mode and any(isinstance(message, UserMessage) for message in message_history): raise ValueError("User messages are not allowed in solo mode") def get_actions_from_messages( - messages: list[Message], + messages: list[Tau2Message], ) -> list[tuple[ToolCall, ToolMessage]]: """Get the actions from the messages.""" messages = deepcopy(messages)[::-1] diff --git a/python/packages/lab/tau2/agent_framework_lab_tau2/runner.py b/python/packages/lab/tau2/agent_framework_lab_tau2/runner.py index 326aaf0748..7cb161ca66 100644 --- a/python/packages/lab/tau2/agent_framework_lab_tau2/runner.py +++ b/python/packages/lab/tau2/agent_framework_lab_tau2/runner.py @@ -6,14 +6,14 @@ from typing import cast from agent_framework import ( + Agent, AgentExecutor, AgentExecutorRequest, AgentExecutorResponse, AgentResponse, - ChatAgent, - ChatClientProtocol, - ChatMessage, FunctionExecutor, + Message, + SupportsChatGetResponse, Workflow, WorkflowBuilder, WorkflowContext, @@ -67,10 +67,10 @@ class TaskRunner: # State tracking step_count: int - full_conversation: list[ChatMessage] + full_conversation: list[Message] termination_reason: TerminationReason | None full_reward_info: RewardInfo | None - _final_user_message: list[ChatMessage] | None + _final_user_message: list[Message] | None _assistant_executor: AgentExecutor | None _user_executor: AgentExecutor | None @@ -159,7 +159,7 @@ def _is_user_stop(self, text: str) -> bool: """Check if user wants to stop the conversation.""" return STOP in text or TRANSFER in text or OUT_OF_SCOPE in text - def assistant_agent(self, assistant_chat_client: ChatClientProtocol) -> ChatAgent: + def assistant_agent(self, assistant_chat_client: SupportsChatGetResponse) -> Agent: """Create an assistant agent. Users can override this method to provide a custom assistant agent. @@ -196,7 +196,7 @@ def assistant_agent(self, assistant_chat_client: ChatClientProtocol) -> ChatAgen # - Access to all domain tools (booking, cancellation, etc.) # - Sliding window memory to handle long conversations within token limits # - Temperature-controlled response generation - return ChatAgent( + return Agent( chat_client=assistant_chat_client, instructions=assistant_system_prompt, tools=tools, @@ -208,7 +208,7 @@ def assistant_agent(self, assistant_chat_client: ChatClientProtocol) -> ChatAgen ), ) - def user_simulator(self, user_simuator_chat_client: ChatClientProtocol, task: Task) -> ChatAgent: + def user_simulator(self, user_simuator_chat_client: SupportsChatGetResponse, task: Task) -> Agent: """Create a user simulator agent. Users can override this method to provide a custom user simulator agent. @@ -230,7 +230,7 @@ def user_simulator(self, user_simuator_chat_client: ChatClientProtocol, task: Ta {task.user_scenario.instructions} """ - return ChatAgent( + return Agent( chat_client=user_simuator_chat_client, instructions=user_sim_system_prompt, temperature=0.0, @@ -268,7 +268,7 @@ async def conversation_orchestrator( target_id=USER_SIMULATOR_ID if is_from_agent else ASSISTANT_AGENT_ID, ) - def build_conversation_workflow(self, assistant_agent: ChatAgent, user_simulator_agent: ChatAgent) -> Workflow: + def build_conversation_workflow(self, assistant_agent: Agent, user_simulator_agent: Agent) -> Workflow: """Build the conversation workflow. Users can override this method to provide a custom conversation workflow. @@ -304,9 +304,9 @@ def build_conversation_workflow(self, assistant_agent: ChatAgent, user_simulator async def run( self, task: Task, - assistant_chat_client: ChatClientProtocol, - user_simulator_chat_client: ChatClientProtocol, - ) -> list[ChatMessage]: + assistant_chat_client: SupportsChatGetResponse, + user_simulator_chat_client: SupportsChatGetResponse, + ) -> list[Message]: """Run a tau2 task using workflow-based agent orchestration. This method orchestrates a complex multi-agent simulation: @@ -323,7 +323,7 @@ async def run( user_simulator_chat_client: LLM client for the user simulator Returns: - Complete conversation history as ChatMessage list for evaluation + Complete conversation history as Message list for evaluation """ logger.info(f"Starting workflow agent for task {task.id}: {task.description.purpose}") # type: ignore[unused-ignore] logger.info(f"Assistant chat client: {assistant_chat_client}") @@ -340,11 +340,11 @@ async def run( # Matches tau2's expected conversation start pattern logger.info(f"Starting workflow with hardcoded greeting: '{DEFAULT_FIRST_AGENT_MESSAGE}'") - first_message = ChatMessage(role="assistant", text=DEFAULT_FIRST_AGENT_MESSAGE) + first_message = Message(role="assistant", text=DEFAULT_FIRST_AGENT_MESSAGE) initial_greeting = AgentExecutorResponse( executor_id=ASSISTANT_AGENT_ID, agent_response=AgentResponse(messages=[first_message]), - full_conversation=[ChatMessage(role="assistant", text=DEFAULT_FIRST_AGENT_MESSAGE)], + full_conversation=[Message(role="assistant", text=DEFAULT_FIRST_AGENT_MESSAGE)], ) # STEP 4: Execute the workflow and collect results @@ -371,7 +371,7 @@ async def run( return full_conversation def evaluate( - self, task_input: Task, conversation: list[ChatMessage], termination_reason: TerminationReason | None + self, task_input: Task, conversation: list[Message], termination_reason: TerminationReason | None ) -> float: """Evaluate agent performance using tau2's comprehensive evaluation system. diff --git a/python/packages/lab/tau2/tests/test_message_utils.py b/python/packages/lab/tau2/tests/test_message_utils.py index 7bee8bc9be..8908140f94 100644 --- a/python/packages/lab/tau2/tests/test_message_utils.py +++ b/python/packages/lab/tau2/tests/test_message_utils.py @@ -2,14 +2,14 @@ from unittest.mock import patch -from agent_framework._types import ChatMessage, Content +from agent_framework._types import Content, Message from agent_framework_lab_tau2._message_utils import flip_messages, log_messages def test_flip_messages_user_to_assistant(): """Test flipping user message to assistant.""" messages = [ - ChatMessage( + Message( role="user", contents=[Content.from_text(text="Hello assistant")], author_name="User1", @@ -29,7 +29,7 @@ def test_flip_messages_user_to_assistant(): def test_flip_messages_assistant_to_user(): """Test flipping assistant message to user.""" messages = [ - ChatMessage( + Message( role="assistant", contents=[Content.from_text(text="Hello user")], author_name="Assistant1", @@ -51,7 +51,7 @@ def test_flip_messages_assistant_with_function_calls_filtered(): function_call = Content.from_function_call(call_id="call_123", name="test_function", arguments={"param": "value"}) messages = [ - ChatMessage( + Message( role="assistant", contents=[ Content.from_text(text="I'll call a function"), @@ -78,7 +78,7 @@ def test_flip_messages_assistant_with_only_function_calls_skipped(): function_call = Content.from_function_call(call_id="call_456", name="another_function", arguments={"key": "value"}) messages = [ - ChatMessage(role="assistant", contents=[function_call], message_id="msg_004") # Only function call, no text + Message(role="assistant", contents=[function_call], message_id="msg_004") # Only function call, no text ] flipped = flip_messages(messages) @@ -91,7 +91,7 @@ def test_flip_messages_tool_messages_skipped(): """Test that tool messages are skipped.""" function_result = Content.from_function_result(call_id="call_789", result={"success": True}) - messages = [ChatMessage(role="tool", contents=[function_result])] + messages = [Message(role="tool", contents=[function_result])] flipped = flip_messages(messages) @@ -101,9 +101,7 @@ def test_flip_messages_tool_messages_skipped(): def test_flip_messages_system_messages_preserved(): """Test that system messages are preserved as-is.""" - messages = [ - ChatMessage(role="system", contents=[Content.from_text(text="System instruction")], message_id="sys_001") - ] + messages = [Message(role="system", contents=[Content.from_text(text="System instruction")], message_id="sys_001")] flipped = flip_messages(messages) @@ -120,11 +118,11 @@ def test_flip_messages_mixed_conversation(): function_result = Content.from_function_result(call_id="call_mixed", result="function result") messages = [ - ChatMessage(role="system", contents=[Content.from_text(text="System prompt")]), - ChatMessage(role="user", contents=[Content.from_text(text="User question")]), - ChatMessage(role="assistant", contents=[Content.from_text(text="Assistant response"), function_call]), - ChatMessage(role="tool", contents=[function_result]), - ChatMessage(role="assistant", contents=[Content.from_text(text="Final response")]), + Message(role="system", contents=[Content.from_text(text="System prompt")]), + Message(role="user", contents=[Content.from_text(text="User question")]), + Message(role="assistant", contents=[Content.from_text(text="Assistant response"), function_call]), + Message(role="tool", contents=[function_result]), + Message(role="assistant", contents=[Content.from_text(text="Final response")]), ] flipped = flip_messages(messages) @@ -159,7 +157,7 @@ def test_flip_messages_empty_list(): def test_flip_messages_preserves_metadata(): """Test that message metadata is preserved during flipping.""" messages = [ - ChatMessage( + Message( role="user", contents=[Content.from_text(text="Test message")], author_name="TestUser", @@ -178,8 +176,8 @@ def test_flip_messages_preserves_metadata(): def test_log_messages_text_content(mock_logger): """Test logging messages with text content.""" messages = [ - ChatMessage(role="user", contents=[Content.from_text(text="Hello")]), - ChatMessage(role="assistant", contents=[Content.from_text(text="Hi there!")]), + Message(role="user", contents=[Content.from_text(text="Hello")]), + Message(role="assistant", contents=[Content.from_text(text="Hi there!")]), ] log_messages(messages) @@ -193,7 +191,7 @@ def test_log_messages_function_call(mock_logger): """Test logging messages with function calls.""" function_call = Content.from_function_call(call_id="call_log", name="log_function", arguments={"param": "value"}) - messages = [ChatMessage(role="assistant", contents=[function_call])] + messages = [Message(role="assistant", contents=[function_call])] log_messages(messages) @@ -209,7 +207,7 @@ def test_log_messages_function_result(mock_logger): """Test logging messages with function results.""" function_result = Content.from_function_result(call_id="call_result", result="success") - messages = [ChatMessage(role="tool", contents=[function_result])] + messages = [Message(role="tool", contents=[function_result])] log_messages(messages) @@ -223,10 +221,10 @@ def test_log_messages_function_result(mock_logger): def test_log_messages_different_roles(mock_logger): """Test logging messages with different roles get different colors.""" messages = [ - ChatMessage(role="system", contents=[Content.from_text(text="System")]), - ChatMessage(role="user", contents=[Content.from_text(text="User")]), - ChatMessage(role="assistant", contents=[Content.from_text(text="Assistant")]), - ChatMessage(role="tool", contents=[Content.from_text(text="Tool")]), + Message(role="system", contents=[Content.from_text(text="System")]), + Message(role="user", contents=[Content.from_text(text="User")]), + Message(role="assistant", contents=[Content.from_text(text="Assistant")]), + Message(role="tool", contents=[Content.from_text(text="Tool")]), ] log_messages(messages) @@ -250,7 +248,7 @@ def test_log_messages_different_roles(mock_logger): @patch("agent_framework_lab_tau2._message_utils.logger") def test_log_messages_escapes_html(mock_logger): """Test that HTML-like characters are properly escaped in log output.""" - messages = [ChatMessage(role="user", contents=[Content.from_text(text="Message with content")])] + messages = [Message(role="user", contents=[Content.from_text(text="Message with content")])] log_messages(messages) @@ -266,7 +264,7 @@ def test_log_messages_mixed_content_types(mock_logger): function_call = Content.from_function_call(call_id="mixed_call", name="mixed_function", arguments={"key": "value"}) messages = [ - ChatMessage( + Message( role="assistant", contents=[Content.from_text(text="I'll call a function"), function_call, Content.from_text(text="Done!")], ) diff --git a/python/packages/lab/tau2/tests/test_sliding_window.py b/python/packages/lab/tau2/tests/test_sliding_window.py index 706bbf75c9..c991f5b568 100644 --- a/python/packages/lab/tau2/tests/test_sliding_window.py +++ b/python/packages/lab/tau2/tests/test_sliding_window.py @@ -4,7 +4,7 @@ from unittest.mock import patch -from agent_framework._types import ChatMessage, Content +from agent_framework._types import Content, Message from agent_framework_lab_tau2._sliding_window import SlidingWindowChatMessageStore @@ -36,8 +36,8 @@ def test_initialization_with_parameters(): def test_initialization_with_messages(): """Test initializing with existing messages.""" messages = [ - ChatMessage(role="user", contents=[Content.from_text(text="Hello")]), - ChatMessage(role="assistant", contents=[Content.from_text(text="Hi there!")]), + Message(role="user", contents=[Content.from_text(text="Hello")]), + Message(role="assistant", contents=[Content.from_text(text="Hi there!")]), ] sliding_window = SlidingWindowChatMessageStore(messages=messages, max_tokens=1000) @@ -51,8 +51,8 @@ async def test_add_messages_simple(): sliding_window = SlidingWindowChatMessageStore(max_tokens=10000) # Large limit new_messages = [ - ChatMessage(role="user", contents=[Content.from_text(text="What's the weather?")]), - ChatMessage(role="assistant", contents=[Content.from_text(text="I can help with that.")]), + Message(role="user", contents=[Content.from_text(text="What's the weather?")]), + Message(role="assistant", contents=[Content.from_text(text="I can help with that.")]), ] await sliding_window.add_messages(new_messages) @@ -69,7 +69,7 @@ async def test_list_all_messages_vs_list_messages(): # Add many messages to trigger truncation messages = [ - ChatMessage(role="user", contents=[Content.from_text(text=f"Message {i} with some content")]) for i in range(10) + Message(role="user", contents=[Content.from_text(text=f"Message {i} with some content")]) for i in range(10) ] await sliding_window.add_messages(messages) @@ -87,7 +87,7 @@ async def test_list_all_messages_vs_list_messages(): def test_get_token_count_basic(): """Test basic token counting.""" sliding_window = SlidingWindowChatMessageStore(max_tokens=1000) - sliding_window.truncated_messages = [ChatMessage(role="user", contents=[Content.from_text(text="Hello")])] + sliding_window.truncated_messages = [Message(role="user", contents=[Content.from_text(text="Hello")])] token_count = sliding_window.get_token_count() @@ -104,7 +104,7 @@ def test_get_token_count_with_system_message(): token_count_empty = sliding_window.get_token_count() # Add a message - sliding_window.truncated_messages = [ChatMessage(role="user", contents=[Content.from_text(text="Hello")])] + sliding_window.truncated_messages = [Message(role="user", contents=[Content.from_text(text="Hello")])] token_count_with_message = sliding_window.get_token_count() # With message should be more tokens @@ -117,7 +117,7 @@ def test_get_token_count_function_call(): function_call = Content.from_function_call(call_id="call_123", name="test_function", arguments={"param": "value"}) sliding_window = SlidingWindowChatMessageStore(max_tokens=1000) - sliding_window.truncated_messages = [ChatMessage(role="assistant", contents=[function_call])] + sliding_window.truncated_messages = [Message(role="assistant", contents=[function_call])] token_count = sliding_window.get_token_count() assert token_count > 0 @@ -128,7 +128,7 @@ def test_get_token_count_function_result(): function_result = Content.from_function_result(call_id="call_123", result={"success": True, "data": "result"}) sliding_window = SlidingWindowChatMessageStore(max_tokens=1000) - sliding_window.truncated_messages = [ChatMessage(role="tool", contents=[function_result])] + sliding_window.truncated_messages = [Message(role="tool", contents=[function_result])] token_count = sliding_window.get_token_count() assert token_count > 0 @@ -141,17 +141,17 @@ def test_truncate_messages_removes_old_messages(mock_logger): # Create messages that will exceed the limit messages = [ - ChatMessage( + Message( role="user", contents=[Content.from_text(text="This is a very long message that should exceed the token limit")], ), - ChatMessage( + Message( role="assistant", contents=[ Content.from_text(text="This is another very long message that should also exceed the token limit") ], ), - ChatMessage(role="user", contents=[Content.from_text(text="Short msg")]), + Message(role="user", contents=[Content.from_text(text="Short msg")]), ] sliding_window.truncated_messages = messages.copy() @@ -170,10 +170,8 @@ def test_truncate_messages_removes_leading_tool_messages(mock_logger): sliding_window = SlidingWindowChatMessageStore(max_tokens=10000) # Large limit # Create messages starting with tool message - tool_message = ChatMessage( - role="tool", contents=[Content.from_function_result(call_id="call_123", result="result")] - ) - user_message = ChatMessage(role="user", contents=[Content.from_text(text="Hello")]) + tool_message = Message(role="tool", contents=[Content.from_function_result(call_id="call_123", result="result")]) + user_message = Message(role="user", contents=[Content.from_text(text="Hello")]) sliding_window.truncated_messages = [tool_message, user_message] sliding_window.truncate_messages() @@ -231,13 +229,13 @@ async def test_real_world_scenario(): # Simulate a conversation conversation = [ - ChatMessage(role="user", contents=[Content.from_text(text="Hello, how are you?")]), - ChatMessage( + Message(role="user", contents=[Content.from_text(text="Hello, how are you?")]), + Message( role="assistant", contents=[Content.from_text(text="I'm doing well, thank you! How can I help you today?")], ), - ChatMessage(role="user", contents=[Content.from_text(text="Can you tell me about the weather?")]), - ChatMessage( + Message(role="user", contents=[Content.from_text(text="Can you tell me about the weather?")]), + Message( role="assistant", contents=[ Content.from_text( @@ -246,8 +244,8 @@ async def test_real_world_scenario(): ) ], ), - ChatMessage(role="user", contents=[Content.from_text(text="What about telling me a joke instead?")]), - ChatMessage( + Message(role="user", contents=[Content.from_text(text="What about telling me a joke instead?")]), + Message( role="assistant", contents=[ Content.from_text(text="Sure! Why don't scientists trust atoms? Because they make up everything!") diff --git a/python/packages/lab/tau2/tests/test_tau2_utils.py b/python/packages/lab/tau2/tests/test_tau2_utils.py index dff8a56e5c..f463c13ec8 100644 --- a/python/packages/lab/tau2/tests/test_tau2_utils.py +++ b/python/packages/lab/tau2/tests/test_tau2_utils.py @@ -6,7 +6,7 @@ from pathlib import Path import pytest -from agent_framework import ChatMessage, Content, FunctionTool +from agent_framework import Content, FunctionTool, Message from agent_framework_lab_tau2._tau2_utils import ( convert_agent_framework_messages_to_tau2_messages, convert_tau2_tool_to_function_tool, @@ -91,7 +91,7 @@ def test_convert_tau2_tool_to_function_tool_multiple_tools(tau2_airline_environm def test_convert_agent_framework_messages_to_tau2_messages_system(): """Test converting system message.""" - messages = [ChatMessage(role="system", contents=[Content.from_text(text="System instruction")])] + messages = [Message(role="system", contents=[Content.from_text(text="System instruction")])] tau2_messages = convert_agent_framework_messages_to_tau2_messages(messages) @@ -103,7 +103,7 @@ def test_convert_agent_framework_messages_to_tau2_messages_system(): def test_convert_agent_framework_messages_to_tau2_messages_user(): """Test converting user message.""" - messages = [ChatMessage(role="user", contents=[Content.from_text(text="Hello assistant")])] + messages = [Message(role="user", contents=[Content.from_text(text="Hello assistant")])] tau2_messages = convert_agent_framework_messages_to_tau2_messages(messages) @@ -116,7 +116,7 @@ def test_convert_agent_framework_messages_to_tau2_messages_user(): def test_convert_agent_framework_messages_to_tau2_messages_assistant(): """Test converting assistant message.""" - messages = [ChatMessage(role="assistant", contents=[Content.from_text(text="Hello user")])] + messages = [Message(role="assistant", contents=[Content.from_text(text="Hello user")])] tau2_messages = convert_agent_framework_messages_to_tau2_messages(messages) @@ -131,7 +131,7 @@ def test_convert_agent_framework_messages_to_tau2_messages_with_function_call(): """Test converting message with function call.""" function_call = Content.from_function_call(call_id="call_123", name="test_function", arguments={"param": "value"}) - messages = [ChatMessage(role="assistant", contents=[Content.from_text(text="I'll call a function"), function_call])] + messages = [Message(role="assistant", contents=[Content.from_text(text="I'll call a function"), function_call])] tau2_messages = convert_agent_framework_messages_to_tau2_messages(messages) @@ -153,7 +153,7 @@ def test_convert_agent_framework_messages_to_tau2_messages_with_function_result( """Test converting message with function result.""" function_result = Content.from_function_result(call_id="call_123", result={"success": True, "data": "result data"}) - messages = [ChatMessage(role="tool", contents=[function_result])] + messages = [Message(role="tool", contents=[function_result])] tau2_messages = convert_agent_framework_messages_to_tau2_messages(messages) @@ -173,7 +173,7 @@ def test_convert_agent_framework_messages_to_tau2_messages_with_error(): call_id="call_456", result="Error occurred", exception=Exception("Test error") ) - messages = [ChatMessage(role="tool", contents=[function_result])] + messages = [Message(role="tool", contents=[function_result])] tau2_messages = convert_agent_framework_messages_to_tau2_messages(messages) @@ -185,7 +185,7 @@ def test_convert_agent_framework_messages_to_tau2_messages_with_error(): def test_convert_agent_framework_messages_to_tau2_messages_multiple_text_contents(): """Test converting message with multiple text contents.""" messages = [ - ChatMessage(role="user", contents=[Content.from_text(text="First part"), Content.from_text(text="Second part")]) + Message(role="user", contents=[Content.from_text(text="First part"), Content.from_text(text="Second part")]) ] tau2_messages = convert_agent_framework_messages_to_tau2_messages(messages) @@ -202,11 +202,11 @@ def test_convert_agent_framework_messages_to_tau2_messages_complex_scenario(): function_result = Content.from_function_result(call_id="call_789", result={"output": "tool result"}) messages = [ - ChatMessage(role="system", contents=[Content.from_text(text="System prompt")]), - ChatMessage(role="user", contents=[Content.from_text(text="User request")]), - ChatMessage(role="assistant", contents=[Content.from_text(text="I'll help you"), function_call]), - ChatMessage(role="tool", contents=[function_result]), - ChatMessage(role="assistant", contents=[Content.from_text(text="Based on the result...")]), + Message(role="system", contents=[Content.from_text(text="System prompt")]), + Message(role="user", contents=[Content.from_text(text="User request")]), + Message(role="assistant", contents=[Content.from_text(text="I'll help you"), function_call]), + Message(role="tool", contents=[function_result]), + Message(role="assistant", contents=[Content.from_text(text="Based on the result...")]), ] tau2_messages = convert_agent_framework_messages_to_tau2_messages(messages) diff --git a/python/packages/mem0/AGENTS.md b/python/packages/mem0/AGENTS.md index 7c4ebaba2a..3a17e7b137 100644 --- a/python/packages/mem0/AGENTS.md +++ b/python/packages/mem0/AGENTS.md @@ -12,7 +12,7 @@ Integration with Mem0 for agent memory management. from agent_framework.mem0 import Mem0Provider provider = Mem0Provider(api_key="your-key") -agent = ChatAgent(..., context_provider=provider) +agent = Agent(..., context_provider=provider) ``` ## Import Path diff --git a/python/packages/mem0/agent_framework_mem0/_provider.py b/python/packages/mem0/agent_framework_mem0/_provider.py index 0dbad13134..d2ba0e7832 100644 --- a/python/packages/mem0/agent_framework_mem0/_provider.py +++ b/python/packages/mem0/agent_framework_mem0/_provider.py @@ -7,7 +7,7 @@ from contextlib import AbstractAsyncContextManager from typing import Any -from agent_framework import ChatMessage, Context, ContextProvider +from agent_framework import Context, ContextProvider, Message from agent_framework.exceptions import ServiceInitializationError from mem0 import AsyncMemory, AsyncMemoryClient @@ -103,19 +103,17 @@ async def thread_created(self, thread_id: str | None = None) -> None: @override async def invoked( self, - request_messages: ChatMessage | Sequence[ChatMessage], - response_messages: ChatMessage | Sequence[ChatMessage] | None = None, + request_messages: Message | Sequence[Message], + response_messages: Message | Sequence[Message] | None = None, invoke_exception: Exception | None = None, **kwargs: Any, ) -> None: self._validate_filters() - request_messages_list = ( - [request_messages] if isinstance(request_messages, ChatMessage) else list(request_messages) - ) + request_messages_list = [request_messages] if isinstance(request_messages, Message) else list(request_messages) response_messages_list = ( [response_messages] - if isinstance(response_messages, ChatMessage) + if isinstance(response_messages, Message) else list(response_messages) if response_messages else [] @@ -142,7 +140,7 @@ def get_role_value(role: Any) -> str: ) @override - async def invoking(self, messages: ChatMessage | MutableSequence[ChatMessage], **kwargs: Any) -> Context: + async def invoking(self, messages: Message | MutableSequence[Message], **kwargs: Any) -> Context: """Called before invoking the AI model to provide context. Args: @@ -155,7 +153,7 @@ async def invoking(self, messages: ChatMessage | MutableSequence[ChatMessage], * Context: Context object containing instructions with memories. """ self._validate_filters() - messages_list = [messages] if isinstance(messages, ChatMessage) else list(messages) + messages_list = [messages] if isinstance(messages, Message) else list(messages) input_text = "\n".join(msg.text for msg in messages_list if msg and msg.text and msg.text.strip()) # Validate input text is not empty before searching (possible for function approval responses) @@ -182,7 +180,7 @@ async def invoking(self, messages: ChatMessage | MutableSequence[ChatMessage], * line_separated_memories = "\n".join(memory.get("memory", "") for memory in memories) return Context( - messages=[ChatMessage(role="user", text=f"{self.context_prompt}\n{line_separated_memories}")] + messages=[Message(role="user", text=f"{self.context_prompt}\n{line_separated_memories}")] if line_separated_memories else None ) diff --git a/python/packages/mem0/tests/test_mem0_context_provider.py b/python/packages/mem0/tests/test_mem0_context_provider.py index 432468fe3f..f01d12053a 100644 --- a/python/packages/mem0/tests/test_mem0_context_provider.py +++ b/python/packages/mem0/tests/test_mem0_context_provider.py @@ -7,7 +7,7 @@ from unittest.mock import AsyncMock import pytest -from agent_framework import ChatMessage, Content, Context +from agent_framework import Content, Context, Message from agent_framework.exceptions import ServiceInitializationError from agent_framework.mem0 import Mem0Provider @@ -33,12 +33,12 @@ def mock_mem0_client() -> AsyncMock: @pytest.fixture -def sample_messages() -> list[ChatMessage]: +def sample_messages() -> list[Message]: """Create sample chat messages for testing.""" return [ - ChatMessage(role="user", text="Hello, how are you?"), - ChatMessage(role="assistant", text="I'm doing well, thank you!"), - ChatMessage(role="system", text="You are a helpful assistant"), + Message(role="user", text="Hello, how are you?"), + Message(role="assistant", text="I'm doing well, thank you!"), + Message(role="system", text="You are a helpful assistant"), ] @@ -157,7 +157,7 @@ class TestMem0ProviderMessagesAdding: async def test_messages_adding_fails_without_filters(self, mock_mem0_client: AsyncMock) -> None: """Test that invoked fails when no filters are provided.""" provider = Mem0Provider(mem0_client=mock_mem0_client) - message = ChatMessage(role="user", text="Hello!") + message = Message(role="user", text="Hello!") with pytest.raises(ServiceInitializationError) as exc_info: await provider.invoked(message) @@ -167,7 +167,7 @@ async def test_messages_adding_fails_without_filters(self, mock_mem0_client: Asy async def test_messages_adding_single_message(self, mock_mem0_client: AsyncMock) -> None: """Test adding a single message.""" provider = Mem0Provider(user_id="user123", mem0_client=mock_mem0_client) - message = ChatMessage(role="user", text="Hello!") + message = Message(role="user", text="Hello!") await provider.invoked(message) @@ -177,7 +177,7 @@ async def test_messages_adding_single_message(self, mock_mem0_client: AsyncMock) assert call_args.kwargs["user_id"] == "user123" async def test_messages_adding_multiple_messages( - self, mock_mem0_client: AsyncMock, sample_messages: list[ChatMessage] + self, mock_mem0_client: AsyncMock, sample_messages: list[Message] ) -> None: """Test adding multiple messages.""" provider = Mem0Provider(user_id="user123", mem0_client=mock_mem0_client) @@ -194,7 +194,7 @@ async def test_messages_adding_multiple_messages( assert call_args.kwargs["messages"] == expected_messages async def test_messages_adding_with_agent_id( - self, mock_mem0_client: AsyncMock, sample_messages: list[ChatMessage] + self, mock_mem0_client: AsyncMock, sample_messages: list[Message] ) -> None: """Test adding messages with agent_id.""" provider = Mem0Provider(agent_id="agent123", mem0_client=mock_mem0_client) @@ -206,7 +206,7 @@ async def test_messages_adding_with_agent_id( assert call_args.kwargs["user_id"] is None async def test_messages_adding_with_application_id( - self, mock_mem0_client: AsyncMock, sample_messages: list[ChatMessage] + self, mock_mem0_client: AsyncMock, sample_messages: list[Message] ) -> None: """Test adding messages with application_id in metadata.""" provider = Mem0Provider(user_id="user123", application_id="app123", mem0_client=mock_mem0_client) @@ -217,7 +217,7 @@ async def test_messages_adding_with_application_id( assert call_args.kwargs["metadata"] == {"application_id": "app123"} async def test_messages_adding_with_scope_to_per_operation_thread_id( - self, mock_mem0_client: AsyncMock, sample_messages: list[ChatMessage] + self, mock_mem0_client: AsyncMock, sample_messages: list[Message] ) -> None: """Test adding messages with scope_to_per_operation_thread_id enabled.""" provider = Mem0Provider( @@ -235,7 +235,7 @@ async def test_messages_adding_with_scope_to_per_operation_thread_id( assert call_args.kwargs["run_id"] == "operation_thread" async def test_messages_adding_without_scope_uses_base_thread_id( - self, mock_mem0_client: AsyncMock, sample_messages: list[ChatMessage] + self, mock_mem0_client: AsyncMock, sample_messages: list[Message] ) -> None: """Test adding messages without scope uses base thread_id.""" provider = Mem0Provider( @@ -254,9 +254,9 @@ async def test_messages_adding_filters_empty_messages(self, mock_mem0_client: As """Test that empty or invalid messages are filtered out.""" provider = Mem0Provider(user_id="user123", mem0_client=mock_mem0_client) messages = [ - ChatMessage(role="user", text=""), # Empty text - ChatMessage(role="user", text=" "), # Whitespace only - ChatMessage(role="user", text="Valid message"), + Message(role="user", text=""), # Empty text + Message(role="user", text=" "), # Whitespace only + Message(role="user", text="Valid message"), ] await provider.invoked(messages) @@ -269,8 +269,8 @@ async def test_messages_adding_skips_when_no_valid_messages(self, mock_mem0_clie """Test that mem0 client is not called when no valid messages exist.""" provider = Mem0Provider(user_id="user123", mem0_client=mock_mem0_client) messages = [ - ChatMessage(role="user", text=""), - ChatMessage(role="user", text=" "), + Message(role="user", text=""), + Message(role="user", text=" "), ] await provider.invoked(messages) @@ -284,7 +284,7 @@ class TestMem0ProviderModelInvoking: async def test_model_invoking_fails_without_filters(self, mock_mem0_client: AsyncMock) -> None: """Test that invoking fails when no filters are provided.""" provider = Mem0Provider(mem0_client=mock_mem0_client) - message = ChatMessage(role="user", text="What's the weather?") + message = Message(role="user", text="What's the weather?") with pytest.raises(ServiceInitializationError) as exc_info: await provider.invoking(message) @@ -294,7 +294,7 @@ async def test_model_invoking_fails_without_filters(self, mock_mem0_client: Asyn async def test_model_invoking_single_message(self, mock_mem0_client: AsyncMock) -> None: """Test invoking with a single message.""" provider = Mem0Provider(user_id="user123", mem0_client=mock_mem0_client) - message = ChatMessage(role="user", text="What's the weather?") + message = Message(role="user", text="What's the weather?") # Mock search results mock_mem0_client.search.return_value = [ @@ -319,7 +319,7 @@ async def test_model_invoking_single_message(self, mock_mem0_client: AsyncMock) assert context.messages[0].text == expected_instructions async def test_model_invoking_multiple_messages( - self, mock_mem0_client: AsyncMock, sample_messages: list[ChatMessage] + self, mock_mem0_client: AsyncMock, sample_messages: list[Message] ) -> None: """Test invoking with multiple messages.""" provider = Mem0Provider(user_id="user123", mem0_client=mock_mem0_client) @@ -335,7 +335,7 @@ async def test_model_invoking_multiple_messages( async def test_model_invoking_with_agent_id(self, mock_mem0_client: AsyncMock) -> None: """Test invoking with agent_id.""" provider = Mem0Provider(agent_id="agent123", mem0_client=mock_mem0_client) - message = ChatMessage(role="user", text="Hello") + message = Message(role="user", text="Hello") mock_mem0_client.search.return_value = [] @@ -353,7 +353,7 @@ async def test_model_invoking_with_scope_to_per_operation_thread_id(self, mock_m mem0_client=mock_mem0_client, ) provider._per_operation_thread_id = "operation_thread" - message = ChatMessage(role="user", text="Hello") + message = Message(role="user", text="Hello") mock_mem0_client.search.return_value = [] @@ -365,7 +365,7 @@ async def test_model_invoking_with_scope_to_per_operation_thread_id(self, mock_m async def test_model_invoking_no_memories_returns_none_instructions(self, mock_mem0_client: AsyncMock) -> None: """Test that no memories returns context with None instructions.""" provider = Mem0Provider(user_id="user123", mem0_client=mock_mem0_client) - message = ChatMessage(role="user", text="Hello") + message = Message(role="user", text="Hello") mock_mem0_client.search.return_value = [] @@ -381,7 +381,7 @@ async def test_model_invoking_function_approval_response_returns_none_instructio provider = Mem0Provider(user_id="user123", mem0_client=mock_mem0_client) function_call = Content.from_function_call(call_id="1", name="test_func", arguments='{"arg1": "value1"}') - message = ChatMessage( + message = Message( role="user", contents=[ Content.from_function_approval_response( @@ -403,9 +403,9 @@ async def test_model_invoking_filters_empty_message_text(self, mock_mem0_client: """Test that empty message text is filtered out from query.""" provider = Mem0Provider(user_id="user123", mem0_client=mock_mem0_client) messages = [ - ChatMessage(role="user", text=""), - ChatMessage(role="user", text="Valid message"), - ChatMessage(role="user", text=" "), + Message(role="user", text=""), + Message(role="user", text="Valid message"), + Message(role="user", text=" "), ] mock_mem0_client.search.return_value = [] @@ -423,7 +423,7 @@ async def test_model_invoking_custom_context_prompt(self, mock_mem0_client: Asyn context_prompt=custom_prompt, mem0_client=mock_mem0_client, ) - message = ChatMessage(role="user", text="Hello") + message = Message(role="user", text="Hello") mock_mem0_client.search.return_value = [{"memory": "Test memory"}] diff --git a/python/packages/ollama/agent_framework_ollama/_chat_client.py b/python/packages/ollama/agent_framework_ollama/_chat_client.py index 8ffba3be3e..d86897f1c6 100644 --- a/python/packages/ollama/agent_framework_ollama/_chat_client.py +++ b/python/packages/ollama/agent_framework_ollama/_chat_client.py @@ -18,7 +18,6 @@ from agent_framework import ( BaseChatClient, ChatAndFunctionMiddlewareTypes, - ChatMessage, ChatMiddlewareLayer, ChatOptions, ChatResponse, @@ -28,6 +27,7 @@ FunctionInvocationLayer, FunctionTool, HostedWebSearchTool, + Message, ResponseStream, ToolProtocol, UsageDetails, @@ -356,7 +356,7 @@ def __init__( def _inner_get_response( self, *, - messages: Sequence[ChatMessage], + messages: Sequence[Message], options: Mapping[str, Any], stream: bool = False, **kwargs: Any, @@ -397,7 +397,7 @@ async def _get_response() -> ChatResponse: return _get_response() - def _prepare_options(self, messages: Sequence[ChatMessage], options: Mapping[str, Any]) -> dict[str, Any]: + def _prepare_options(self, messages: Sequence[Message], options: Mapping[str, Any]) -> dict[str, Any]: # Handle instructions by prepending to messages as system message instructions = options.get("instructions") if instructions: @@ -448,13 +448,13 @@ def _prepare_options(self, messages: Sequence[ChatMessage], options: Mapping[str return run_options - def _prepare_messages_for_ollama(self, messages: Sequence[ChatMessage]) -> list[OllamaMessage]: + def _prepare_messages_for_ollama(self, messages: Sequence[Message]) -> list[OllamaMessage]: ollama_messages = [self._prepare_message_for_ollama(msg) for msg in messages] # Flatten the list of lists into a single list return list(chain.from_iterable(ollama_messages)) - def _prepare_message_for_ollama(self, message: ChatMessage) -> list[OllamaMessage]: - message_converters: dict[str, Callable[[ChatMessage], list[OllamaMessage]]] = { + def _prepare_message_for_ollama(self, message: Message) -> list[OllamaMessage]: + message_converters: dict[str, Callable[[Message], list[OllamaMessage]]] = { "system": self._format_system_message, "user": self._format_user_message, "assistant": self._format_assistant_message, @@ -462,10 +462,10 @@ def _prepare_message_for_ollama(self, message: ChatMessage) -> list[OllamaMessag } return message_converters[message.role](message) - def _format_system_message(self, message: ChatMessage) -> list[OllamaMessage]: + def _format_system_message(self, message: Message) -> list[OllamaMessage]: return [OllamaMessage(role="system", content=message.text)] - def _format_user_message(self, message: ChatMessage) -> list[OllamaMessage]: + def _format_user_message(self, message: Message) -> list[OllamaMessage]: if not any(c.type in {"text", "data"} for c in message.contents) and not message.text: raise ServiceInvalidRequestError( "Ollama connector currently only supports user messages with TextContent or DataContent." @@ -483,7 +483,7 @@ def _format_user_message(self, message: ChatMessage) -> list[OllamaMessage]: user_message["images"] = [c.uri.split(",")[1] for c in data_contents if c.uri] return [user_message] - def _format_assistant_message(self, message: ChatMessage) -> list[OllamaMessage]: + def _format_assistant_message(self, message: Message) -> list[OllamaMessage]: text_content = message.text # Ollama shouldn't have encrypted reasoning, so we just process text. reasoning_contents = "".join((c.text or "") for c in message.contents if c.type == "text_reasoning") @@ -506,7 +506,7 @@ def _format_assistant_message(self, message: ChatMessage) -> list[OllamaMessage] ] return [assistant_message] - def _format_tool_message(self, message: ChatMessage) -> list[OllamaMessage]: + def _format_tool_message(self, message: Message) -> list[OllamaMessage]: # Ollama does not support multiple tool results in a single message, so we create a separate return [ OllamaMessage(role="tool", content=str(item.result), tool_name=item.call_id) @@ -538,7 +538,7 @@ def _parse_response_from_ollama(self, response: OllamaChatResponse) -> ChatRespo contents = self._parse_contents_from_ollama(response) return ChatResponse( - messages=[ChatMessage(role="assistant", contents=contents)], + messages=[Message(role="assistant", contents=contents)], model_id=response.model, created_at=response.created_at, usage_details=UsageDetails( diff --git a/python/packages/ollama/tests/test_ollama_chat_client.py b/python/packages/ollama/tests/test_ollama_chat_client.py index 3d1f51e4c8..807d5b8eb8 100644 --- a/python/packages/ollama/tests/test_ollama_chat_client.py +++ b/python/packages/ollama/tests/test_ollama_chat_client.py @@ -8,10 +8,10 @@ import pytest from agent_framework import ( BaseChatClient, - ChatMessage, ChatResponseUpdate, Content, HostedWebSearchTool, + Message, chat_middleware, tool, ) @@ -77,7 +77,7 @@ def ollama_unit_test_env(monkeypatch, exclude_list, override_env_param_dict): # @fixture -def chat_history() -> list[ChatMessage]: +def chat_history() -> list[Message]: return [] @@ -244,12 +244,12 @@ async def test_empty_messages() -> None: async def test_cmc( mock_chat: AsyncMock, ollama_unit_test_env: dict[str, str], - chat_history: list[ChatMessage], + chat_history: list[Message], mock_chat_completion_response: AsyncStream[OllamaChatResponse], ) -> None: mock_chat.return_value = mock_chat_completion_response - chat_history.append(ChatMessage(text="hello world", role="system")) - chat_history.append(ChatMessage(text="hello world", role="user")) + chat_history.append(Message(text="hello world", role="system")) + chat_history.append(Message(text="hello world", role="user")) ollama_client = OllamaChatClient() result = await ollama_client.get_response(messages=chat_history) @@ -261,11 +261,11 @@ async def test_cmc( async def test_cmc_reasoning( mock_chat: AsyncMock, ollama_unit_test_env: dict[str, str], - chat_history: list[ChatMessage], + chat_history: list[Message], mock_chat_completion_response_reasoning: AsyncStream[OllamaChatResponse], ) -> None: mock_chat.return_value = mock_chat_completion_response_reasoning - chat_history.append(ChatMessage(text="hello world", role="user")) + chat_history.append(Message(text="hello world", role="user")) ollama_client = OllamaChatClient() result = await ollama_client.get_response(messages=chat_history) @@ -278,11 +278,11 @@ async def test_cmc_reasoning( async def test_cmc_chat_failure( mock_chat: AsyncMock, ollama_unit_test_env: dict[str, str], - chat_history: list[ChatMessage], + chat_history: list[Message], ) -> None: # Simulate a failure in the Ollama client mock_chat.side_effect = Exception("Connection error") - chat_history.append(ChatMessage(text="hello world", role="user")) + chat_history.append(Message(text="hello world", role="user")) ollama_client = OllamaChatClient() @@ -297,12 +297,12 @@ async def test_cmc_chat_failure( async def test_cmc_streaming( mock_chat: AsyncMock, ollama_unit_test_env: dict[str, str], - chat_history: list[ChatMessage], + chat_history: list[Message], mock_streaming_chat_completion_response: AsyncStream[OllamaChatResponse], ) -> None: mock_chat.return_value = mock_streaming_chat_completion_response - chat_history.append(ChatMessage(text="hello world", role="system")) - chat_history.append(ChatMessage(text="hello world", role="user")) + chat_history.append(Message(text="hello world", role="system")) + chat_history.append(Message(text="hello world", role="user")) ollama_client = OllamaChatClient() result = ollama_client.get_response(messages=chat_history, stream=True) @@ -315,11 +315,11 @@ async def test_cmc_streaming( async def test_cmc_streaming_reasoning( mock_chat: AsyncMock, ollama_unit_test_env: dict[str, str], - chat_history: list[ChatMessage], + chat_history: list[Message], mock_streaming_chat_completion_response_reasoning: AsyncStream[OllamaChatResponse], ) -> None: mock_chat.return_value = mock_streaming_chat_completion_response_reasoning - chat_history.append(ChatMessage(text="hello world", role="user")) + chat_history.append(Message(text="hello world", role="user")) ollama_client = OllamaChatClient() result = ollama_client.get_response(messages=chat_history, stream=True) @@ -333,11 +333,11 @@ async def test_cmc_streaming_reasoning( async def test_cmc_streaming_chat_failure( mock_chat: AsyncMock, ollama_unit_test_env: dict[str, str], - chat_history: list[ChatMessage], + chat_history: list[Message], ) -> None: # Simulate a failure in the Ollama client for streaming mock_chat.side_effect = Exception("Streaming connection error") - chat_history.append(ChatMessage(text="hello world", role="user")) + chat_history.append(Message(text="hello world", role="user")) ollama_client = OllamaChatClient() @@ -353,7 +353,7 @@ async def test_cmc_streaming_chat_failure( async def test_cmc_streaming_with_tool_call( mock_chat: AsyncMock, ollama_unit_test_env: dict[str, str], - chat_history: list[ChatMessage], + chat_history: list[Message], mock_streaming_chat_completion_response: AsyncStream[OllamaChatResponse], mock_streaming_chat_completion_tool_call: AsyncStream[OllamaChatResponse], ) -> None: @@ -362,7 +362,7 @@ async def test_cmc_streaming_with_tool_call( mock_streaming_chat_completion_response, ] - chat_history.append(ChatMessage(text="hello world", role="user")) + chat_history.append(Message(text="hello world", role="user")) ollama_client = OllamaChatClient() result = ollama_client.get_response(messages=chat_history, stream=True, options={"tools": [hello_world]}) @@ -386,7 +386,7 @@ async def test_cmc_streaming_with_tool_call( async def test_cmc_with_hosted_tool_call( ollama_unit_test_env: dict[str, str], - chat_history: list[ChatMessage], + chat_history: list[Message], ) -> None: with pytest.raises(ServiceInvalidRequestError): additional_properties = { @@ -396,7 +396,7 @@ async def test_cmc_with_hosted_tool_call( } } - chat_history.append(ChatMessage(text="hello world", role="user")) + chat_history.append(Message(text="hello world", role="user")) ollama_client = OllamaChatClient() await ollama_client.get_response( @@ -411,12 +411,12 @@ async def test_cmc_with_hosted_tool_call( async def test_cmc_with_data_content_type( mock_chat: AsyncMock, ollama_unit_test_env: dict[str, str], - chat_history: list[ChatMessage], + chat_history: list[Message], mock_chat_completion_response: OllamaChatResponse, ) -> None: mock_chat.return_value = mock_chat_completion_response chat_history.append( - ChatMessage( + Message( contents=[Content.from_uri(uri="data:image/png;base64,xyz", media_type="image/png")], role="user", ) @@ -432,14 +432,14 @@ async def test_cmc_with_data_content_type( async def test_cmc_with_invalid_data_content_media_type( mock_chat: AsyncMock, ollama_unit_test_env: dict[str, str], - chat_history: list[ChatMessage], + chat_history: list[Message], mock_streaming_chat_completion_response: AsyncStream[OllamaChatResponse], ) -> None: with pytest.raises(ServiceInvalidRequestError): mock_chat.return_value = mock_streaming_chat_completion_response # Remote Uris are not supported by Ollama client chat_history.append( - ChatMessage( + Message( contents=[Content.from_uri(uri="data:audio/mp3;base64,xyz", media_type="audio/mp3")], role="user", ) @@ -455,14 +455,14 @@ async def test_cmc_with_invalid_data_content_media_type( async def test_cmc_with_invalid_content_type( mock_chat: AsyncMock, ollama_unit_test_env: dict[str, str], - chat_history: list[ChatMessage], + chat_history: list[Message], mock_chat_completion_response: AsyncStream[OllamaChatResponse], ) -> None: with pytest.raises(ServiceInvalidRequestError): mock_chat.return_value = mock_chat_completion_response # Remote Uris are not supported by Ollama client chat_history.append( - ChatMessage( + Message( contents=[Content.from_uri(uri="http://example.com/image.png", media_type="image/png")], role="user", ) @@ -475,9 +475,9 @@ async def test_cmc_with_invalid_content_type( @skip_if_azure_integration_tests_disabled async def test_cmc_integration_with_tool_call( - chat_history: list[ChatMessage], + chat_history: list[Message], ) -> None: - chat_history.append(ChatMessage(text="Call the hello world function and repeat what it says", role="user")) + chat_history.append(Message(text="Call the hello world function and repeat what it says", role="user")) ollama_client = OllamaChatClient() result = await ollama_client.get_response(messages=chat_history, options={"tools": [hello_world]}) @@ -490,9 +490,9 @@ async def test_cmc_integration_with_tool_call( @skip_if_azure_integration_tests_disabled async def test_cmc_integration_with_chat_completion( - chat_history: list[ChatMessage], + chat_history: list[Message], ) -> None: - chat_history.append(ChatMessage(text="Say Hello World", role="user")) + chat_history.append(Message(text="Say Hello World", role="user")) ollama_client = OllamaChatClient() result = await ollama_client.get_response(messages=chat_history) @@ -502,9 +502,9 @@ async def test_cmc_integration_with_chat_completion( @skip_if_azure_integration_tests_disabled async def test_cmc_streaming_integration_with_tool_call( - chat_history: list[ChatMessage], + chat_history: list[Message], ) -> None: - chat_history.append(ChatMessage(text="Call the hello world function and repeat what it says", role="user")) + chat_history.append(Message(text="Call the hello world function and repeat what it says", role="user")) ollama_client = OllamaChatClient() result: AsyncIterable[ChatResponseUpdate] = ollama_client.get_response( @@ -527,9 +527,9 @@ async def test_cmc_streaming_integration_with_tool_call( @skip_if_azure_integration_tests_disabled async def test_cmc_streaming_integration_with_chat_completion( - chat_history: list[ChatMessage], + chat_history: list[Message], ) -> None: - chat_history.append(ChatMessage(text="Say Hello World", role="user")) + chat_history.append(Message(text="Say Hello World", role="user")) ollama_client = OllamaChatClient() result: AsyncIterable[ChatResponseUpdate] = ollama_client.get_response(messages=chat_history, stream=True) diff --git a/python/packages/orchestrations/agent_framework_orchestrations/_base_group_chat_orchestrator.py b/python/packages/orchestrations/agent_framework_orchestrations/_base_group_chat_orchestrator.py index 4d93a3e69b..f01f3700f7 100644 --- a/python/packages/orchestrations/agent_framework_orchestrations/_base_group_chat_orchestrator.py +++ b/python/packages/orchestrations/agent_framework_orchestrations/_base_group_chat_orchestrator.py @@ -12,7 +12,7 @@ from dataclasses import dataclass from typing import Any, ClassVar, TypeAlias -from agent_framework._types import ChatMessage +from agent_framework._types import Message from agent_framework._workflows._agent_executor import AgentExecutor, AgentExecutorRequest, AgentExecutorResponse from agent_framework._workflows._events import WorkflowEvent from agent_framework._workflows._executor import Executor, handler @@ -46,17 +46,17 @@ class GroupChatParticipantMessage: to other participants in the group chat to keep them synchronized. """ - messages: list[ChatMessage] + messages: list[Message] @dataclass class GroupChatResponseMessage: """Response envelope emitted by participants back to the orchestrator.""" - message: ChatMessage + message: Message -TerminationCondition: TypeAlias = Callable[[list[ChatMessage]], bool | Awaitable[bool]] +TerminationCondition: TypeAlias = Callable[[list[Message]], bool | Awaitable[bool]] GroupChatWorkflowContextOutT: TypeAlias = AgentExecutorRequest | GroupChatRequestMessage | GroupChatParticipantMessage @@ -167,7 +167,7 @@ def __init__( self._round_index: int = 0 self._participant_registry = participant_registry # Shared conversation state management - self._full_conversation: list[ChatMessage] = [] + self._full_conversation: list[Message] = [] # region Handlers @@ -175,11 +175,11 @@ def __init__( async def handle_str( self, task: str, - ctx: WorkflowContext[GroupChatWorkflowContextOutT, list[ChatMessage]], + ctx: WorkflowContext[GroupChatWorkflowContextOutT, list[Message]], ) -> None: """Handler for string input as workflow entry point. - Wraps the string in a USER role ChatMessage and delegates to _handle_task_message. + Wraps the string in a USER role Message and delegates to _handle_task_message. Args: task: Plain text task description from user @@ -188,32 +188,32 @@ async def handle_str( Usage: workflow.run("Write a blog post about AI agents") """ - await self._handle_messages([ChatMessage(role="user", text=task)], ctx) + await self._handle_messages([Message(role="user", text=task)], ctx) @handler async def handle_message( self, - task: ChatMessage, - ctx: WorkflowContext[GroupChatWorkflowContextOutT, list[ChatMessage]], + task: Message, + ctx: WorkflowContext[GroupChatWorkflowContextOutT, list[Message]], ) -> None: - """Handler for single ChatMessage input as workflow entry point. + """Handler for single Message input as workflow entry point. Wraps the message in a list and delegates to _handle_task_message. Args: - task: ChatMessage from user + task: Message from user ctx: Workflow context Usage: - workflow.run(ChatMessage(role="user", text="Write a blog post about AI agents")) + workflow.run(Message(role="user", text="Write a blog post about AI agents")) """ await self._handle_messages([task], ctx) @handler async def handle_messages( self, - task: list[ChatMessage], - ctx: WorkflowContext[GroupChatWorkflowContextOutT, list[ChatMessage]], + task: list[Message], + ctx: WorkflowContext[GroupChatWorkflowContextOutT, list[Message]], ) -> None: """Handler for list of ChatMessages as workflow entry point. @@ -224,19 +224,19 @@ async def handle_messages( ctx: Workflow context Usage: workflow.run([ - ChatMessage(role="user", text="Write a blog post about AI agents"), - ChatMessage(role="user", text="Make it engaging and informative.") + Message(role="user", text="Write a blog post about AI agents"), + Message(role="user", text="Make it engaging and informative.") ]) """ if not task: - raise ValueError("At least one ChatMessage is required to start the group chat workflow.") + raise ValueError("At least one Message is required to start the group chat workflow.") await self._handle_messages(task, ctx) @handler async def handle_participant_response( self, response: AgentExecutorResponse | GroupChatResponseMessage, - ctx: WorkflowContext[GroupChatWorkflowContextOutT, list[ChatMessage]], + ctx: WorkflowContext[GroupChatWorkflowContextOutT, list[Message]], ) -> None: """Handler for participant responses. @@ -263,8 +263,8 @@ async def handle_participant_response( async def _handle_messages( self, - messages: list[ChatMessage], - ctx: WorkflowContext[GroupChatWorkflowContextOutT, list[ChatMessage]], + messages: list[Message], + ctx: WorkflowContext[GroupChatWorkflowContextOutT, list[Message]], ) -> None: """Handle task messages from users as workflow entry point. @@ -279,7 +279,7 @@ async def _handle_messages( async def _handle_response( self, response: AgentExecutorResponse | GroupChatResponseMessage, - ctx: WorkflowContext[GroupChatWorkflowContextOutT, list[ChatMessage]], + ctx: WorkflowContext[GroupChatWorkflowContextOutT, list[Message]], ) -> None: """Handle a participant response. @@ -295,7 +295,7 @@ async def _handle_response( # Conversation state management (shared across all patterns) - def _append_messages(self, messages: Sequence[ChatMessage]) -> None: + def _append_messages(self, messages: Sequence[Message]) -> None: """Append messages to the conversation history. Args: @@ -303,7 +303,7 @@ def _append_messages(self, messages: Sequence[ChatMessage]) -> None: """ self._full_conversation.extend(messages) - def _get_conversation(self) -> list[ChatMessage]: + def _get_conversation(self) -> list[Message]: """Get a copy of the current conversation. Returns: @@ -313,8 +313,8 @@ def _get_conversation(self) -> list[ChatMessage]: def _process_participant_response( self, response: AgentExecutorResponse | GroupChatResponseMessage - ) -> list[ChatMessage]: - """Extract ChatMessage from participant response. + ) -> list[Message]: + """Extract Message from participant response. Args: response: Response from participant @@ -351,7 +351,7 @@ async def _check_termination(self) -> bool: result = await result return result - async def _check_terminate_and_yield(self, ctx: WorkflowContext[Never, list[ChatMessage]]) -> bool: + async def _check_terminate_and_yield(self, ctx: WorkflowContext[Never, list[Message]]) -> bool: """Check termination conditions and yield completion if met. Args: @@ -368,22 +368,22 @@ async def _check_terminate_and_yield(self, ctx: WorkflowContext[Never, list[Chat return False - def _create_completion_message(self, message: str) -> ChatMessage: + def _create_completion_message(self, message: str) -> Message: """Create a standardized completion message. Args: message: Completion text Returns: - ChatMessage with completion content + Message with completion content """ - return ChatMessage(role="assistant", text=message, author_name=self._name) + return Message(role="assistant", text=message, author_name=self._name) # Participant routing (shared across all patterns) async def _broadcast_messages_to_participants( self, - messages: list[ChatMessage], + messages: list[Message], ctx: WorkflowContext[AgentExecutorRequest | GroupChatParticipantMessage], participants: Sequence[str] | None = None, ) -> None: @@ -439,9 +439,9 @@ async def _send_request_to_participant( """ if self._participant_registry.is_agent(target): # AgentExecutors receive simple message list - messages: list[ChatMessage] = [] + messages: list[Message] = [] if additional_instruction: - messages.append(ChatMessage(role="user", text=additional_instruction)) + messages.append(Message(role="user", text=additional_instruction)) request = AgentExecutorRequest(messages=messages, should_respond=True) await ctx.send_message(request, target_id=target) await ctx.add_event( @@ -490,7 +490,7 @@ def _check_round_limit(self) -> bool: return False - async def _check_round_limit_and_yield(self, ctx: WorkflowContext[Never, list[ChatMessage]]) -> bool: + async def _check_round_limit_and_yield(self, ctx: WorkflowContext[Never, list[Message]]) -> bool: """Check round limit and yield completion if reached. Args: diff --git a/python/packages/orchestrations/agent_framework_orchestrations/_concurrent.py b/python/packages/orchestrations/agent_framework_orchestrations/_concurrent.py index 7f7031e05b..9b3cf84d35 100644 --- a/python/packages/orchestrations/agent_framework_orchestrations/_concurrent.py +++ b/python/packages/orchestrations/agent_framework_orchestrations/_concurrent.py @@ -6,7 +6,7 @@ from collections.abc import Callable, Sequence from typing import Any -from agent_framework import ChatMessage, SupportsAgentRun +from agent_framework import Message, SupportsAgentRun from agent_framework._workflows._agent_executor import AgentExecutor, AgentExecutorRequest, AgentExecutorResponse from agent_framework._workflows._agent_utils import resolve_agent_id from agent_framework._workflows._checkpoint import CheckpointStorage @@ -56,14 +56,14 @@ async def from_str(self, prompt: str, ctx: WorkflowContext[AgentExecutorRequest] await ctx.send_message(request) @handler - async def from_message(self, message: ChatMessage, ctx: WorkflowContext[AgentExecutorRequest]) -> None: + async def from_message(self, message: Message, ctx: WorkflowContext[AgentExecutorRequest]) -> None: request = AgentExecutorRequest(messages=normalize_messages_input(message), should_respond=True) await ctx.send_message(request) @handler async def from_messages( self, - messages: list[str | ChatMessage], + messages: list[str | Message], ctx: WorkflowContext[AgentExecutorRequest], ) -> None: request = AgentExecutorRequest(messages=normalize_messages_input(messages), should_respond=True) @@ -73,7 +73,7 @@ async def from_messages( class _AggregateAgentConversations(Executor): """Aggregates agent responses and completes with combined ChatMessages. - Emits a list[ChatMessage] shaped as: + Emits a list[Message] shaped as: [ single_user_prompt?, agent1_final_assistant, agent2_final_assistant, ... ] - Extracts a single user prompt (first user message seen across results). @@ -82,9 +82,7 @@ class _AggregateAgentConversations(Executor): """ @handler - async def aggregate( - self, results: list[AgentExecutorResponse], ctx: WorkflowContext[Never, list[ChatMessage]] - ) -> None: + async def aggregate(self, results: list[AgentExecutorResponse], ctx: WorkflowContext[Never, list[Message]]) -> None: if not results: logger.error("Concurrent aggregator received empty results list") raise ValueError("Aggregation failed: no results provided") @@ -98,8 +96,8 @@ def _is_role(msg: Any, role: str) -> bool: role_str = str(role).lower() return r_str == role_str - prompt_message: ChatMessage | None = None - assistant_replies: list[ChatMessage] = [] + prompt_message: Message | None = None + assistant_replies: list[Message] = [] for r in results: resp_messages = list(getattr(r.agent_response, "messages", []) or []) @@ -132,7 +130,7 @@ def _is_role(msg: Any, role: str) -> bool: logger.error(f"Aggregation failed: no assistant replies found across {len(results)} results") raise RuntimeError("Aggregation failed: no assistant replies found") - output: list[ChatMessage] = [] + output: list[Message] = [] if prompt_message is not None: output.append(prompt_message) else: @@ -195,8 +193,8 @@ class ConcurrentBuilder: from agent_framework_orchestrations import ConcurrentBuilder - # Minimal: use default aggregator (returns list[ChatMessage]) - workflow = ConcurrentBuilder(participants=[agent1, agent2, agent3]).build() + # Minimal: use default aggregator (returns list[Message]) + workflow = ConcurrentBuilder().participants([agent1, agent2, agent3]).build() # Custom aggregator via callback (sync or async). The callback receives @@ -205,14 +203,14 @@ def summarize(results: list[AgentExecutorResponse]) -> str: return " | ".join(r.agent_response.messages[-1].text for r in results) - workflow = ConcurrentBuilder(participants=[agent1, agent2, agent3]).with_aggregator(summarize).build() + workflow = ConcurrentBuilder().participants([agent1, agent2, agent3]).with_aggregator(summarize).build() # Enable checkpoint persistence so runs can resume - workflow = ConcurrentBuilder(participants=[agent1, agent2, agent3], checkpoint_storage=storage).build() + workflow = ConcurrentBuilder().participants([agent1, agent2, agent3]).with_checkpointing(storage).build() # Enable request info before aggregation - workflow = ConcurrentBuilder(participants=[agent1, agent2]).with_request_info().build() + workflow = ConcurrentBuilder().participants([agent1, agent2]).with_request_info().build() """ def __init__( @@ -235,14 +233,14 @@ def __init__( self._checkpoint_storage: CheckpointStorage | None = checkpoint_storage self._request_info_enabled: bool = False self._request_info_filter: set[str] | None = None - self._intermediate_outputs: bool = intermediate_outputs + self._intermediate_outputs: bool = False self._set_participants(participants) def _set_participants(self, participants: Sequence[SupportsAgentRun | Executor]) -> None: """Set participants (internal).""" if self._participants: - raise ValueError("participants already set.") + raise ValueError("participants() has already been called on this builder instance.") if not participants: raise ValueError("participants cannot be empty") @@ -264,6 +262,7 @@ def _set_participants(self, participants: Sequence[SupportsAgentRun | Executor]) raise TypeError(f"participants must be SupportsAgentRun or Executor instances; got {type(p).__name__}") self._participants = list(participants) + return self def with_aggregator( self, @@ -292,7 +291,7 @@ async def aggregate(self, results: list[AgentExecutorResponse], ctx: WorkflowCon await ctx.yield_output(" | ".join(r.agent_response.messages[-1].text for r in results)) - wf = ConcurrentBuilder(participants=[a1, a2, a3]).with_aggregator(CustomAggregator()).build() + wf = ConcurrentBuilder().participants([a1, a2, a3]).with_aggregator(CustomAggregator()).build() # Callback-based aggregator (string result) @@ -300,7 +299,7 @@ async def summarize(results: list[AgentExecutorResponse]) -> str: return " | ".join(r.agent_response.messages[-1].text for r in results) - wf = ConcurrentBuilder(participants=[a1, a2, a3]).with_aggregator(summarize).build() + wf = ConcurrentBuilder().participants([a1, a2, a3]).with_aggregator(summarize).build() # Callback-based aggregator (yield result) @@ -308,7 +307,7 @@ async def summarize(results: list[AgentExecutorResponse], ctx: WorkflowContext[N await ctx.yield_output(" | ".join(r.agent_response.messages[-1].text for r in results)) - wf = ConcurrentBuilder(participants=[a1, a2, a3]).with_aggregator(summarize).build() + wf = ConcurrentBuilder().participants([a1, a2, a3]).with_aggregator(summarize).build() """ if self._aggregator is not None: raise ValueError("with_aggregator() has already been called on this builder instance.") @@ -322,6 +321,15 @@ async def summarize(results: list[AgentExecutorResponse], ctx: WorkflowContext[N return self + def with_checkpointing(self, checkpoint_storage: CheckpointStorage) -> "ConcurrentBuilder": + """Enable checkpoint persistence using the provided storage backend. + + Args: + checkpoint_storage: CheckpointStorage instance for persisting workflow state + """ + self._checkpoint_storage = checkpoint_storage + return self + def with_request_info( self, *, @@ -355,6 +363,19 @@ def with_request_info( return self + def with_intermediate_outputs(self) -> "ConcurrentBuilder": + """Enable intermediate outputs from agent participants before aggregation. + + When enabled, the workflow returns each agent participant's response or yields + streaming updates as they become available. The output of the aggregator will + always be available as the final output of the workflow. + + Returns: + Self for fluent chaining + """ + self._intermediate_outputs = True + return self + def _resolve_participants(self) -> list[Executor]: """Resolve participant instances into Executor objects.""" if not self._participants: @@ -388,7 +409,7 @@ def build(self) -> Workflow: - If request info is enabled, the orchestration emits a request info event with outputs from all participants before sending the outputs to the aggregator - Aggregator yields output and the workflow becomes idle. The output is either: - - list[ChatMessage] (default aggregator: one user + one assistant per agent) + - list[Message] (default aggregator: one user + one assistant per agent) - custom payload from the provided aggregator Returns: @@ -401,7 +422,7 @@ def build(self) -> Workflow: .. code-block:: python - workflow = ConcurrentBuilder(participants=[agent1, agent2]).build() + workflow = ConcurrentBuilder().participants([agent1, agent2]).build() """ # Internal nodes dispatcher = _DispatchToAllParticipants(id="dispatcher") @@ -410,14 +431,18 @@ def build(self) -> Workflow: # Resolve participants and participant factories to executors participants: list[Executor] = self._resolve_participants() - builder = WorkflowBuilder( - start_executor=dispatcher, - checkpoint_storage=self._checkpoint_storage, - output_executors=[aggregator] if not self._intermediate_outputs else None, - ) + builder = WorkflowBuilder() + builder.set_start_executor(dispatcher) # Fan-out for parallel execution builder.add_fan_out_edges(dispatcher, participants) # Direct fan-in to aggregator builder.add_fan_in_edges(participants, aggregator) + if not self._intermediate_outputs: + # Constrain output to aggregator only + builder = builder.with_output_from([aggregator]) + + if self._checkpoint_storage is not None: + builder = builder.with_checkpointing(self._checkpoint_storage) + return builder.build() diff --git a/python/packages/orchestrations/agent_framework_orchestrations/_group_chat.py b/python/packages/orchestrations/agent_framework_orchestrations/_group_chat.py index 085ed84148..f53628c74e 100644 --- a/python/packages/orchestrations/agent_framework_orchestrations/_group_chat.py +++ b/python/packages/orchestrations/agent_framework_orchestrations/_group_chat.py @@ -26,11 +26,11 @@ from collections import OrderedDict from collections.abc import Awaitable, Callable, Sequence from dataclasses import dataclass -from typing import Any, ClassVar, cast +from typing import Any, ClassVar, cast, overload -from agent_framework import ChatAgent, SupportsAgentRun +from agent_framework import Agent, SupportsAgentRun from agent_framework._threads import AgentThread -from agent_framework._types import ChatMessage +from agent_framework._types import Message from agent_framework._workflows._agent_executor import AgentExecutor, AgentExecutorRequest, AgentExecutorResponse from agent_framework._workflows._agent_utils import resolve_agent_id from agent_framework._workflows._checkpoint import CheckpointStorage @@ -69,7 +69,7 @@ class GroupChatState: Attributes: current_round: The current round index of the group chat, starting from 0. participants: A mapping of participant names to their descriptions in the group chat. - conversation: The full conversation history up to this point as a list of ChatMessage. + conversation: The full conversation history up to this point as a list of Message. """ # Round index, starting from 0 @@ -77,7 +77,7 @@ class GroupChatState: # participant name to description mapping as a ordered dict participants: OrderedDict[str, str] # Full conversation history up to this point - conversation: list[ChatMessage] + conversation: list[Message] # region Default orchestrator @@ -165,13 +165,13 @@ async def round_robin_selector(state: GroupChatState) -> str: @override async def _handle_messages( self, - messages: list[ChatMessage], - ctx: WorkflowContext[GroupChatWorkflowContextOutT, list[ChatMessage]], + messages: list[Message], + ctx: WorkflowContext[GroupChatWorkflowContextOutT, list[Message]], ) -> None: """Initialize orchestrator state and start the conversation loop.""" self._append_messages(messages) # Termination condition will also be applied to the input messages - if await self._check_terminate_and_yield(cast(WorkflowContext[Never, list[ChatMessage]], ctx)): + if await self._check_terminate_and_yield(cast(WorkflowContext[Never, list[Message]], ctx)): return next_speaker = await self._get_next_speaker() @@ -192,7 +192,7 @@ async def _handle_messages( async def _handle_response( self, response: AgentExecutorResponse | GroupChatResponseMessage, - ctx: WorkflowContext[GroupChatWorkflowContextOutT, list[ChatMessage]], + ctx: WorkflowContext[GroupChatWorkflowContextOutT, list[Message]], ) -> None: """Handle a participant response.""" messages = self._process_participant_response(response) @@ -200,9 +200,9 @@ async def _handle_response( messages = clean_conversation_for_handoff(messages) self._append_messages(messages) - if await self._check_terminate_and_yield(cast(WorkflowContext[Never, list[ChatMessage]], ctx)): + if await self._check_terminate_and_yield(cast(WorkflowContext[Never, list[Message]], ctx)): return - if await self._check_round_limit_and_yield(cast(WorkflowContext[Never, list[ChatMessage]], ctx)): + if await self._check_round_limit_and_yield(cast(WorkflowContext[Never, list[Message]], ctx)): return next_speaker = await self._get_next_speaker() @@ -287,7 +287,7 @@ class AgentBasedGroupChatOrchestrator(BaseGroupChatOrchestrator): def __init__( self, - agent: ChatAgent, + agent: Agent, participant_registry: ParticipantRegistry, *, max_rounds: int | None = None, @@ -318,29 +318,29 @@ def __init__( self._thread = thread or agent.get_new_thread() # Cache for messages since last agent invocation # This is different from the full conversation history maintained by the base orchestrator - self._cache: list[ChatMessage] = [] + self._cache: list[Message] = [] @override - def _append_messages(self, messages: Sequence[ChatMessage]) -> None: + def _append_messages(self, messages: Sequence[Message]) -> None: self._cache.extend(messages) return super()._append_messages(messages) @override async def _handle_messages( self, - messages: list[ChatMessage], - ctx: WorkflowContext[GroupChatWorkflowContextOutT, list[ChatMessage]], + messages: list[Message], + ctx: WorkflowContext[GroupChatWorkflowContextOutT, list[Message]], ) -> None: """Initialize orchestrator state and start the conversation loop.""" self._append_messages(messages) # Termination condition will also be applied to the input messages - if await self._check_terminate_and_yield(cast(WorkflowContext[Never, list[ChatMessage]], ctx)): + if await self._check_terminate_and_yield(cast(WorkflowContext[Never, list[Message]], ctx)): return agent_orchestration_output = await self._invoke_agent() if await self._check_agent_terminate_and_yield( agent_orchestration_output, - cast(WorkflowContext[Never, list[ChatMessage]], ctx), + cast(WorkflowContext[Never, list[Message]], ctx), ): return @@ -361,22 +361,22 @@ async def _handle_messages( async def _handle_response( self, response: AgentExecutorResponse | GroupChatResponseMessage, - ctx: WorkflowContext[GroupChatWorkflowContextOutT, list[ChatMessage]], + ctx: WorkflowContext[GroupChatWorkflowContextOutT, list[Message]], ) -> None: """Handle a participant response.""" messages = self._process_participant_response(response) # Remove tool-related content to prevent API errors from empty messages messages = clean_conversation_for_handoff(messages) self._append_messages(messages) - if await self._check_terminate_and_yield(cast(WorkflowContext[Never, list[ChatMessage]], ctx)): + if await self._check_terminate_and_yield(cast(WorkflowContext[Never, list[Message]], ctx)): return - if await self._check_round_limit_and_yield(cast(WorkflowContext[Never, list[ChatMessage]], ctx)): + if await self._check_round_limit_and_yield(cast(WorkflowContext[Never, list[Message]], ctx)): return agent_orchestration_output = await self._invoke_agent() if await self._check_agent_terminate_and_yield( agent_orchestration_output, - cast(WorkflowContext[Never, list[ChatMessage]], ctx), + cast(WorkflowContext[Never, list[Message]], ctx), ): return @@ -399,7 +399,7 @@ async def _handle_response( async def _invoke_agent(self) -> AgentOrchestrationOutput: """Invoke the orchestrator agent to determine the next speaker and termination.""" - async def _invoke_agent_helper(conversation: list[ChatMessage]) -> AgentOrchestrationOutput: + async def _invoke_agent_helper(conversation: list[Message]) -> AgentOrchestrationOutput: # Run the agent in non-streaming mode for simplicity agent_response = await self._agent.run( messages=conversation, @@ -431,7 +431,7 @@ async def _invoke_agent_helper(conversation: list[ChatMessage]) -> AgentOrchestr ]) ) # Prepend instruction as system message - current_conversation.append(ChatMessage(role="user", text=instruction)) + current_conversation.append(Message(role="user", text=instruction)) retry_attempts = self._retry_attempts while True: @@ -445,7 +445,7 @@ async def _invoke_agent_helper(conversation: list[ChatMessage]) -> AgentOrchestr logger.debug(f"Retrying agent orchestration invocation, attempts left: {retry_attempts}") # We don't need the full conversation since the thread should maintain history current_conversation = [ - ChatMessage( + Message( role="user", text=f"Your input could not be parsed due to an error: {ex}. Please try again.", ) @@ -454,7 +454,7 @@ async def _invoke_agent_helper(conversation: list[ChatMessage]) -> AgentOrchestr async def _check_agent_terminate_and_yield( self, agent_orchestration_output: AgentOrchestrationOutput, - ctx: WorkflowContext[Never, list[ChatMessage]], + ctx: WorkflowContext[Never, list[Message]], ) -> bool: """Check if the agent requested termination and yield completion if so. @@ -518,7 +518,7 @@ class GroupChatBuilder: into a complete workflow graph that can be executed. Outputs: - The final conversation history as a list of ChatMessage once the group chat completes. + The final conversation history as a list of Message once the group chat completes. """ DEFAULT_ORCHESTRATOR_ID: ClassVar[str] = "group_chat_orchestrator" @@ -528,7 +528,7 @@ def __init__( *, participants: Sequence[SupportsAgentRun | Executor], # Orchestrator config (exactly one required) - orchestrator_agent: ChatAgent | Callable[[], ChatAgent] | None = None, + orchestrator_agent: Agent | Callable[[], Agent] | None = None, orchestrator: BaseGroupChatOrchestrator | Callable[[], BaseGroupChatOrchestrator] | None = None, selection_func: GroupChatSelectionFunction | None = None, orchestrator_name: str | None = None, @@ -558,46 +558,101 @@ def __init__( # Orchestrator related members self._orchestrator: BaseGroupChatOrchestrator | None = None - self._orchestrator_factory: Callable[[], ChatAgent | BaseGroupChatOrchestrator] | None = None + self._orchestrator_factory: Callable[[], Agent | BaseGroupChatOrchestrator] | None = None self._selection_func: GroupChatSelectionFunction | None = None - self._agent_orchestrator: ChatAgent | None = None - self._termination_condition: TerminationCondition | None = termination_condition - self._max_rounds: int | None = max_rounds + self._agent_orchestrator: Agent | None = None + self._termination_condition: TerminationCondition | None = None + self._max_rounds: int | None = None self._orchestrator_name: str | None = None # Checkpoint related members - self._checkpoint_storage: CheckpointStorage | None = checkpoint_storage + self._checkpoint_storage: CheckpointStorage | None = None # Request info related members self._request_info_enabled: bool = False self._request_info_filter: set[str] = set() # Intermediate outputs - self._intermediate_outputs = intermediate_outputs + self._intermediate_outputs = False self._set_participants(participants) - # Set orchestrator if provided - if any(x is not None for x in [orchestrator_agent, orchestrator, selection_func]): - self._set_orchestrator( - orchestrator_agent=orchestrator_agent, - orchestrator=orchestrator, - selection_func=selection_func, - orchestrator_name=orchestrator_name, - ) + @overload + def with_orchestrator(self, *, agent: Agent | Callable[[], Agent]) -> GroupChatBuilder: + """Set the orchestrator for this group chat workflow using a Agent. + + Args: + agent: An instance of Agent or a callable that produces one to manage the group chat. + + Returns: + Self for fluent chaining. + """ + ... + + @overload + def with_orchestrator( + self, *, orchestrator: BaseGroupChatOrchestrator | Callable[[], BaseGroupChatOrchestrator] + ) -> GroupChatBuilder: + """Set the orchestrator for this group chat workflow using a custom orchestrator. + + Args: + orchestrator: An instance of BaseGroupChatOrchestrator or a callable that produces one to + manage the group chat. + + Returns: + Self for fluent chaining. + + Note: + When using a custom orchestrator that implements `BaseGroupChatOrchestrator`, setting + `termination_condition` and `max_rounds` on the builder will have no effect since the + orchestrator is already fully defined. + """ + ... + + @overload + def with_orchestrator( + self, + *, + selection_func: GroupChatSelectionFunction, + orchestrator_name: str | None = None, + ) -> GroupChatBuilder: + """Set the orchestrator for this group chat workflow using a selection function. + + Args: + selection_func: Callable that receives the current GroupChatState and returns + the name of the next participant to speak, or None to finish. + orchestrator_name: Optional display name for the orchestrator in the workflow. + If not provided, defaults to `GroupChatBuilder.DEFAULT_ORCHESTRATOR_ID`. - def _set_orchestrator( + Returns: + Self for fluent chaining. + """ + ... + + def with_orchestrator( self, *, - orchestrator_agent: ChatAgent | Callable[[], ChatAgent] | None = None, + agent: Agent | Callable[[], Agent] | None = None, orchestrator: BaseGroupChatOrchestrator | Callable[[], BaseGroupChatOrchestrator] | None = None, selection_func: GroupChatSelectionFunction | None = None, orchestrator_name: str | None = None, - ) -> None: - """Set the orchestrator for this group chat workflow (internal). + ) -> GroupChatBuilder: + """Set the orchestrator for this group chat workflow. + + An group chat orchestrator is responsible for managing the flow of conversation, making + sure all participants are synced and picking the next speaker according to the defined logic + until the termination conditions are met. + + There are a few ways to configure the orchestrator: + 1. Provide a Agent instance or a factory function that produces one to use an agent-based orchestrator + 2. Provide a BaseGroupChatOrchestrator instance or a factory function that produces one to use a custom + orchestrator + 3. Provide a selection function to use that picks the next speaker based on the function logic + + You can only use one of the above methods to configure the orchestrator. Args: - orchestrator_agent: An instance of ChatAgent or a callable that produces one to manage the group chat. + agent: An instance of Agent or a callable that produces one to manage the group chat. orchestrator: An instance of BaseGroupChatOrchestrator or a callable that produces one to manage the group chat. selection_func: Callable that receives the current GroupChatState and returns @@ -607,39 +662,61 @@ def _set_orchestrator( `GroupChatBuilder.DEFAULT_ORCHESTRATOR_ID`. This parameter is ignored if using an agent or custom orchestrator. + Returns: + Self for fluent chaining. + Raises: ValueError: If an orchestrator has already been set or if none or multiple of the parameters are provided. + + Note: + When using a custom orchestrator that implements `BaseGroupChatOrchestrator`, either + via the `orchestrator` or `orchestrator_factory` parameters, setting `termination_condition` + and `max_rounds` on the builder will have no effect since the orchestrator is already + fully defined. + + Example: + .. code-block:: python + + from agent_framework_orchestrations import GroupChatBuilder + + + orchestrator = CustomGroupChatOrchestrator(...) + workflow = GroupChatBuilder().with_orchestrator(orchestrator).participants([agent1, agent2]).build() """ if self._agent_orchestrator is not None: - raise ValueError("An agent orchestrator has already been configured. Set orchestrator config once only.") + raise ValueError( + "An agent orchestrator has already been configured. Call with_orchestrator(...) once only." + ) if self._orchestrator is not None: - raise ValueError("An orchestrator has already been configured. Set orchestrator config once only.") + raise ValueError("An orchestrator has already been configured. Call with_orchestrator(...) once only.") if self._orchestrator_factory is not None: - raise ValueError("A factory has already been configured. Set orchestrator config once only.") + raise ValueError("A factory has already been configured. Call with_orchestrator(...) once only.") if self._selection_func is not None: - raise ValueError("A selection function has already been configured. Set orchestrator config once only.") + raise ValueError("A selection function has already been configured. Call with_orchestrator(...) once only.") - if sum(x is not None for x in [orchestrator_agent, orchestrator, selection_func]) != 1: - raise ValueError("Exactly one of orchestrator_agent, orchestrator, or selection_func must be provided.") + if sum(x is not None for x in [agent, orchestrator, selection_func]) != 1: + raise ValueError("Exactly one of agent, orchestrator, or selection_func must be provided.") - if orchestrator_agent is not None and isinstance(orchestrator_agent, ChatAgent): - self._agent_orchestrator = orchestrator_agent + if agent is not None and isinstance(agent, Agent): + self._agent_orchestrator = agent elif orchestrator is not None and isinstance(orchestrator, BaseGroupChatOrchestrator): self._orchestrator = orchestrator elif selection_func is not None: self._selection_func = selection_func self._orchestrator_name = orchestrator_name else: - self._orchestrator_factory = orchestrator_agent or orchestrator + self._orchestrator_factory = agent or orchestrator + + return self def _set_participants(self, participants: Sequence[SupportsAgentRun | Executor]) -> None: """Set participants (internal).""" if self._participants: - raise ValueError("participants already set.") + raise ValueError("participants have already been set. Call participants() at most once.") if not participants: raise ValueError("participants cannot be empty.") @@ -679,21 +756,20 @@ def with_termination_condition(self, termination_condition: TerminationCondition .. code-block:: python - from agent_framework import ChatMessage + from agent_framework import Message from agent_framework_orchestrations import GroupChatBuilder - def stop_after_two_calls(conversation: list[ChatMessage]) -> bool: + def stop_after_two_calls(conversation: list[Message]) -> bool: calls = sum(1 for msg in conversation if msg.role == "assistant" and msg.author_name == "specialist") return calls >= 2 specialist_agent = ... workflow = ( - GroupChatBuilder( - participants=[agent1, specialist_agent], - selection_func=my_selection_function, - ) + GroupChatBuilder() + .with_orchestrator(selection_func=my_selection_function) + .participants([agent1, specialist_agent]) .with_termination_condition(stop_after_two_calls) .build() ) @@ -745,10 +821,9 @@ def with_checkpointing(self, checkpoint_storage: CheckpointStorage) -> GroupChat storage = MemoryCheckpointStorage() workflow = ( - GroupChatBuilder( - participants=[agent1, agent2], - selection_func=my_selection_function, - ) + GroupChatBuilder() + .with_orchestrator(selection_func=my_selection_function) + .participants([agent1, agent2]) .with_checkpointing(storage) .build() ) @@ -785,6 +860,19 @@ def with_request_info(self, *, agents: Sequence[str | SupportsAgentRun] | None = return self + def with_intermediate_outputs(self) -> GroupChatBuilder: + """Enable intermediate outputs from agent participants. + + When enabled, the workflow returns each agent participant's response or yields + streaming updates as they become available. The output of the orchestrator will + always be available as the final output of the workflow. + + Returns: + Self for fluent chaining + """ + self._intermediate_outputs = True + return self + def _resolve_orchestrator(self, participants: Sequence[Executor]) -> Executor: """Determine the orchestrator to use for the workflow. @@ -795,11 +883,8 @@ def _resolve_orchestrator(self, participants: Sequence[Executor]) -> Executor: x is None for x in [self._agent_orchestrator, self._selection_func, self._orchestrator, self._orchestrator_factory] ): - raise ValueError( - "No orchestrator has been configured. " - "Pass orchestrator_agent, orchestrator, or selection_func to the constructor." - ) - # We don't need to check if multiple are set since that is handled in _set_orchestrator() + raise ValueError("No orchestrator has been configured. Call with_orchestrator() to set one.") + # We don't need to check if multiple are set since that is handled in with_orchestrator() if self._agent_orchestrator: return AgentBasedGroupChatOrchestrator( @@ -824,7 +909,7 @@ def _resolve_orchestrator(self, participants: Sequence[Executor]) -> Executor: if self._orchestrator_factory: orchestrator_instance = self._orchestrator_factory() - if isinstance(orchestrator_instance, ChatAgent): + if isinstance(orchestrator_instance, Agent): return AgentBasedGroupChatOrchestrator( agent=orchestrator_instance, participant_registry=ParticipantRegistry(participants), @@ -834,15 +919,12 @@ def _resolve_orchestrator(self, participants: Sequence[Executor]) -> Executor: if isinstance(orchestrator_instance, BaseGroupChatOrchestrator): return orchestrator_instance raise TypeError( - f"Orchestrator factory must return ChatAgent or BaseGroupChatOrchestrator instance. " + f"Orchestrator factory must return Agent or BaseGroupChatOrchestrator instance. " f"Got {type(orchestrator_instance).__name__}." ) # This should never be reached due to the checks above - raise RuntimeError( - "Orchestrator could not be resolved. " - "Pass orchestrator_agent, orchestrator, or selection_func to the constructor." - ) + raise RuntimeError("Orchestrator could not be resolved. Please provide one via with_orchestrator()") def _resolve_participants(self) -> list[Executor]: """Resolve participant instances into Executor objects.""" @@ -885,16 +967,19 @@ def build(self) -> Workflow: orchestrator: Executor = self._resolve_orchestrator(participants) # Build workflow graph - workflow_builder = WorkflowBuilder( - start_executor=orchestrator, - checkpoint_storage=self._checkpoint_storage, - output_executors=[orchestrator] if not self._intermediate_outputs else None, - ) + workflow_builder = WorkflowBuilder().set_start_executor(orchestrator) for participant in participants: # Orchestrator and participant bi-directional edges workflow_builder = workflow_builder.add_edge(orchestrator, participant) workflow_builder = workflow_builder.add_edge(participant, orchestrator) + if not self._intermediate_outputs: + # Constrain output to orchestrator only + workflow_builder = workflow_builder.with_output_from([orchestrator]) + + if self._checkpoint_storage is not None: + workflow_builder = workflow_builder.with_checkpointing(self._checkpoint_storage) + return workflow_builder.build() diff --git a/python/packages/orchestrations/agent_framework_orchestrations/_handoff.py b/python/packages/orchestrations/agent_framework_orchestrations/_handoff.py index f2f3d74f4d..86830ec95e 100644 --- a/python/packages/orchestrations/agent_framework_orchestrations/_handoff.py +++ b/python/packages/orchestrations/agent_framework_orchestrations/_handoff.py @@ -36,11 +36,11 @@ from dataclasses import dataclass from typing import Any, cast -from agent_framework import ChatAgent, SupportsAgentRun +from agent_framework import Agent, SupportsAgentRun from agent_framework._middleware import FunctionInvocationContext, FunctionMiddleware from agent_framework._threads import AgentThread from agent_framework._tools import FunctionTool, tool -from agent_framework._types import AgentResponse, AgentResponseUpdate, ChatMessage +from agent_framework._types import AgentResponse, AgentResponseUpdate, Message from agent_framework._workflows._agent_executor import AgentExecutor, AgentExecutorRequest, AgentExecutorResponse from agent_framework._workflows._agent_utils import resolve_agent_id from agent_framework._workflows._checkpoint import CheckpointStorage @@ -154,28 +154,28 @@ class HandoffAgentUserRequest: agent_response: AgentResponse @staticmethod - def create_response(response: str | list[str] | ChatMessage | list[ChatMessage]) -> list[ChatMessage]: + def create_response(response: str | list[str] | Message | list[Message]) -> list[Message]: """Create a HandoffAgentUserRequest from a simple text response.""" - messages: list[ChatMessage] = [] + messages: list[Message] = [] if isinstance(response, str): - messages.append(ChatMessage(role="user", text=response)) - elif isinstance(response, ChatMessage): + messages.append(Message(role="user", text=response)) + elif isinstance(response, Message): messages.append(response) elif isinstance(response, list): for item in response: - if isinstance(item, ChatMessage): + if isinstance(item, Message): messages.append(item) elif isinstance(item, str): - messages.append(ChatMessage(role="user", text=item)) + messages.append(Message(role="user", text=item)) else: - raise TypeError("List items must be either str or ChatMessage instances") + raise TypeError("List items must be either str or Message instances") else: - raise TypeError("Response must be str, list of str, ChatMessage, or list of ChatMessage") + raise TypeError("Response must be str, list of str, Message, or list of Message") return messages @staticmethod - def terminate() -> list[ChatMessage]: + def terminate() -> list[Message]: """Create a termination response for the handoff workflow.""" return [] @@ -248,10 +248,8 @@ def _prepare_agent_with_handoffs( Returns: A new AgentExecutor instance with handoff tools added """ - if not isinstance(agent, ChatAgent): - raise TypeError( - "Handoff can only be applied to ChatAgent. Please ensure the agent is a ChatAgent instance." - ) + if not isinstance(agent, Agent): + raise TypeError("Handoff can only be applied to Agent. Please ensure the agent is a Agent instance.") # Clone the agent to avoid mutating the original cloned_agent = self._clone_chat_agent(agent) # type: ignore @@ -265,13 +263,13 @@ def _prepare_agent_with_handoffs( return cloned_agent - def _clone_chat_agent(self, agent: ChatAgent) -> ChatAgent: - """Produce a deep copy of the ChatAgent while preserving runtime configuration.""" + def _clone_chat_agent(self, agent: Agent) -> Agent: + """Produce a deep copy of the Agent while preserving runtime configuration.""" options = agent.default_options middleware = list(agent.middleware or []) # Reconstruct the original tools list by combining regular tools with MCP tools. - # ChatAgent.__init__ separates MCP tools during initialization, + # Agent.__init__ separates MCP tools during initialization, # so we need to recombine them here to pass the complete tools list to the constructor. # This makes sure MCP tools are preserved when cloning agents for handoff workflows. tools_from_options = options.get("tools") @@ -303,7 +301,7 @@ def _clone_chat_agent(self, agent: ChatAgent) -> ChatAgent: "user": options.get("user"), } - return ChatAgent( + return Agent( chat_client=agent.chat_client, id=agent.id, name=agent.name, @@ -314,13 +312,13 @@ def _clone_chat_agent(self, agent: ChatAgent) -> ChatAgent: default_options=cloned_options, # type: ignore[arg-type] ) - def _apply_auto_tools(self, agent: ChatAgent, targets: Sequence[HandoffConfiguration]) -> None: + def _apply_auto_tools(self, agent: Agent, targets: Sequence[HandoffConfiguration]) -> None: """Attach synthetic handoff tools to a chat agent and return the target lookup table. Creates handoff tools for each specialist agent that this agent can route to. Args: - agent: The ChatAgent to add handoff tools to + agent: The Agent to add handoff tools to targets: Sequence of handoff configurations defining target agents """ default_options = agent.default_options @@ -375,7 +373,7 @@ async def _run_agent_and_emit( self._full_conversation.extend(self._cache) # Check termination condition before running the agent - if await self._check_terminate_and_yield(cast(WorkflowContext[Never, list[ChatMessage]], ctx)): + if await self._check_terminate_and_yield(cast(WorkflowContext[Never, list[Message]], ctx)): return # Run the agent @@ -427,19 +425,19 @@ async def _run_agent_and_emit( # or a termination condition is met. # This allows the agent to perform long-running tasks without returning control # to the coordinator or user prematurely. - self._cache.extend([ChatMessage(role="user", text=self._autonomous_mode_prompt)]) + self._cache.extend([Message(role="user", text=self._autonomous_mode_prompt)]) self._autonomous_mode_turns += 1 await self._run_agent_and_emit(ctx) else: # The response is handled via `handle_response` self._autonomous_mode_turns = 0 # Reset autonomous mode turn counter on handoff - await ctx.request_info(HandoffAgentUserRequest(response), list[ChatMessage]) + await ctx.request_info(HandoffAgentUserRequest(response), list[Message]) @response_handler async def handle_response( self, original_request: HandoffAgentUserRequest, - response: list[ChatMessage], + response: list[Message], ctx: WorkflowContext[AgentExecutorResponse, AgentResponse], ) -> None: """Handle user response for a request that is issued after agent runs. @@ -458,7 +456,7 @@ async def handle_response( If the response is empty, it indicates termination of the handoff workflow. """ if not response: - await cast(WorkflowContext[Never, list[ChatMessage]], ctx).yield_output(self._full_conversation) + await cast(WorkflowContext[Never, list[Message]], ctx).yield_output(self._full_conversation) return # Broadcast the user response to all other agents @@ -472,7 +470,7 @@ async def handle_response( async def _broadcast_messages( self, - messages: list[ChatMessage], + messages: list[Message], ctx: WorkflowContext[AgentExecutorRequest], ) -> None: """Broadcast the workflow cache to the agent before running.""" @@ -506,7 +504,7 @@ def _is_handoff_requested(self, response: AgentResponse) -> str | None: return None - async def _check_terminate_and_yield(self, ctx: WorkflowContext[Never, list[ChatMessage]]) -> bool: + async def _check_terminate_and_yield(self, ctx: WorkflowContext[Never, list[Message]]) -> bool: """Check termination conditions and yield completion if met. Args: @@ -561,10 +559,10 @@ class HandoffBuilder: Participants must be agents. Support for custom executors is not available in handoff workflows. Outputs: - The final conversation history as a list of ChatMessage once the group chat completes. + The final conversation history as a list of Message once the group chat completes. Note: - 1. Agents in handoff workflows must be ChatAgent instances and support local tool calls. + 1. Agents in handoff workflows must be Agent instances and support local tool calls. 2. Handoff doesn't support intermediate outputs from agents. All outputs are returned as they become available. This is because agents in handoff workflows are not considered sub-agents of a central orchestrator, thus all outputs are directly emitted. @@ -576,8 +574,6 @@ def __init__( name: str | None = None, participants: Sequence[SupportsAgentRun] | None = None, description: str | None = None, - checkpoint_storage: CheckpointStorage | None = None, - termination_condition: TerminationCondition | None = None, ) -> None: r"""Initialize a HandoffBuilder for creating conversational handoff workflows. @@ -596,9 +592,6 @@ def __init__( unique identifier (`.name` is preferred if set, otherwise `.id` is used). description: Optional human-readable description explaining the workflow's purpose. Useful for documentation and observability. - checkpoint_storage: Optional checkpoint storage for enabling workflow state persistence. - termination_condition: Optional callable that receives the full conversation and returns True - (or awaitable True) if the workflow should terminate. """ self._name = name self._description = description @@ -614,7 +607,7 @@ def __init__( self._handoff_config: dict[str, set[HandoffConfiguration]] = {} # Checkpoint related members - self._checkpoint_storage: CheckpointStorage | None = checkpoint_storage + self._checkpoint_storage: CheckpointStorage | None = None # Autonomous mode related self._autonomous_mode: bool = False @@ -623,9 +616,7 @@ def __init__( self._autonomous_mode_enabled_agents: list[str] = [] # Termination related members - self._termination_condition: Callable[[list[ChatMessage]], bool | Awaitable[bool]] | None = ( - termination_condition - ) + self._termination_condition: Callable[[list[Message]], bool | Awaitable[bool]] | None = None def participants(self, participants: Sequence[SupportsAgentRun]) -> "HandoffBuilder": """Register the agents that will participate in the handoff workflow. @@ -888,7 +879,7 @@ def with_termination_condition(self, termination_condition: TerminationCondition # Asynchronous condition - async def check_termination(conv: list[ChatMessage]) -> bool: + async def check_termination(conv: list[Message]) -> bool: # Can perform async operations return len(conv) > 20 @@ -929,9 +920,7 @@ def build(self) -> Workflow: builder = WorkflowBuilder( name=self._name, description=self._description, - start_executor=start_executor, - checkpoint_storage=self._checkpoint_storage, - ) + ).set_start_executor(start_executor) # Add the appropriate edges # In handoff workflows, all executors are connected, making a fully connected graph. @@ -947,6 +936,10 @@ def build(self) -> Workflow: elif len(targets) == 1: builder = builder.add_edge(executor, targets[0]) + # Configure checkpointing if enabled + if self._checkpoint_storage: + builder.with_checkpointing(self._checkpoint_storage) + return builder.build() # region Internal Helper Methods diff --git a/python/packages/orchestrations/agent_framework_orchestrations/_magentic.py b/python/packages/orchestrations/agent_framework_orchestrations/_magentic.py index 9c38f34c3f..4ba5e0ebff 100644 --- a/python/packages/orchestrations/agent_framework_orchestrations/_magentic.py +++ b/python/packages/orchestrations/agent_framework_orchestrations/_magentic.py @@ -10,11 +10,11 @@ from collections.abc import Callable, Sequence from dataclasses import dataclass, field from enum import Enum -from typing import Any, ClassVar, TypeVar, cast +from typing import Any, ClassVar, TypeVar, cast, overload from agent_framework import ( AgentResponse, - ChatMessage, + Message, SupportsAgentRun, ) from agent_framework._workflows._agent_executor import AgentExecutor, AgentExecutorRequest, AgentExecutorResponse @@ -41,6 +41,10 @@ from typing import override # type: ignore # pragma: no cover else: from typing_extensions import override # type: ignore # pragma: no cover +if sys.version_info >= (3, 11): + from typing import Self # type: ignore # pragma: no cover +else: + from typing_extensions import Self # type: ignore # pragma: no cover logger = logging.getLogger(__name__) @@ -56,7 +60,7 @@ ORCH_MSG_KIND_NOTICE = "notice" -def _message_to_payload(message: ChatMessage) -> Any: +def _message_to_payload(message: Message) -> Any: if hasattr(message, "to_dict") and callable(getattr(message, "to_dict", None)): with contextlib.suppress(Exception): return message.to_dict() # type: ignore[attr-defined] @@ -72,24 +76,24 @@ def _message_to_payload(message: ChatMessage) -> Any: return message -def _message_from_payload(payload: Any) -> ChatMessage: - if isinstance(payload, ChatMessage): +def _message_from_payload(payload: Any) -> Message: + if isinstance(payload, Message): return payload - if hasattr(ChatMessage, "from_dict") and isinstance(payload, dict): + if hasattr(Message, "from_dict") and isinstance(payload, dict): with contextlib.suppress(Exception): - return ChatMessage.from_dict(payload) # type: ignore[attr-defined,no-any-return] - if hasattr(ChatMessage, "from_json") and isinstance(payload, str): + return Message.from_dict(payload) # type: ignore[attr-defined,no-any-return] + if hasattr(Message, "from_json") and isinstance(payload, str): with contextlib.suppress(Exception): - return ChatMessage.from_json(payload) # type: ignore[attr-defined,no-any-return] + return Message.from_json(payload) # type: ignore[attr-defined,no-any-return] if isinstance(payload, dict): with contextlib.suppress(Exception): - return ChatMessage(**payload) # type: ignore[arg-type] + return Message(**payload) # type: ignore[arg-type] if isinstance(payload, str): with contextlib.suppress(Exception): decoded = json.loads(payload) if isinstance(decoded, dict): return _message_from_payload(decoded) - raise TypeError("Unable to reconstruct ChatMessage from payload") + raise TypeError("Unable to reconstruct Message from payload") # region Magentic One Prompts @@ -247,7 +251,7 @@ def _message_from_payload(payload: Any) -> ChatMessage: # region Messages and Types -def _new_chat_history() -> list[ChatMessage]: +def _new_chat_history() -> list[Message]: """Typed default factory for chat history list to satisfy type checkers.""" return [] @@ -261,8 +265,8 @@ def _new_participant_descriptions() -> dict[str, str]: class _MagenticTaskLedger(DictConvertible): """Internal: Task ledger for the Standard Magentic manager.""" - facts: ChatMessage - plan: ChatMessage + facts: Message + plan: Message def to_dict(self) -> dict[str, Any]: return {"facts": _message_to_payload(self.facts), "plan": _message_to_payload(self.plan)} @@ -328,7 +332,7 @@ class MagenticContext(DictConvertible): """Context for the Magentic manager.""" task: str - chat_history: list[ChatMessage] = field(default_factory=_new_chat_history) + chat_history: list[Message] = field(default_factory=_new_chat_history) participant_descriptions: dict[str, str] = field(default_factory=_new_participant_descriptions) round_count: int = 0 stall_count: int = 0 @@ -353,7 +357,7 @@ def from_dict(cls, data: dict[str, Any]) -> "MagenticContext": raise ValueError("MagenticContext requires a 'task' string field.") # `chat_history` is required chat_history_payload = data.get("chat_history", []) - history: list[ChatMessage] = [] + history: list[Message] = [] for item in chat_history_payload: history.append(_message_from_payload(item)) # `participant_descriptions` is required @@ -396,7 +400,7 @@ def _team_block(participants: dict[str, str]) -> str: def _extract_json(text: str) -> dict[str, Any]: """Potentially temp helper method. - Note: this method is required right now because the ChatClientProtocol, when calling + Note: this method is required right now because the SupportsChatGetResponse, when calling response.text, returns duplicate JSON payloads - need to figure out why. The `text` method is concatenating multiple text contents from diff msgs into a single string. @@ -472,12 +476,12 @@ def __init__( self.task_ledger_full_prompt: str = ORCHESTRATOR_TASK_LEDGER_FULL_PROMPT @abstractmethod - async def plan(self, magentic_context: MagenticContext) -> ChatMessage: + async def plan(self, magentic_context: MagenticContext) -> Message: """Create a plan for the task.""" ... @abstractmethod - async def replan(self, magentic_context: MagenticContext) -> ChatMessage: + async def replan(self, magentic_context: MagenticContext) -> Message: """Replan for the task.""" ... @@ -487,7 +491,7 @@ async def create_progress_ledger(self, magentic_context: MagenticContext) -> Mag ... @abstractmethod - async def prepare_final_answer(self, magentic_context: MagenticContext) -> ChatMessage: + async def prepare_final_answer(self, magentic_context: MagenticContext) -> Message: """Prepare the final answer.""" ... @@ -501,7 +505,7 @@ def on_checkpoint_restore(self, state: dict[str, Any]) -> None: class StandardMagenticManager(MagenticManagerBase): - """Standard Magentic manager that performs real LLM calls via a ChatAgent. + """Standard Magentic manager that performs real LLM calls via a Agent. The manager constructs prompts that mirror the original Magentic One orchestration: - Facts gathering @@ -580,8 +584,8 @@ def __init__( async def _complete( self, - messages: list[ChatMessage], - ) -> ChatMessage: + messages: list[Message], + ) -> Message: """Call the underlying agent and return the last assistant message. The agent's run method is called which applies the agent's configured options @@ -595,19 +599,19 @@ async def _complete( return response.messages[-1] - async def plan(self, magentic_context: MagenticContext) -> ChatMessage: + async def plan(self, magentic_context: MagenticContext) -> Message: """Create facts and plan using the model, then render a combined task ledger as a single assistant message.""" team_text = _team_block(magentic_context.participant_descriptions) # Gather facts - facts_user = ChatMessage( + facts_user = Message( role="user", text=self.task_ledger_facts_prompt.format(task=magentic_context.task), ) facts_msg = await self._complete([*magentic_context.chat_history, facts_user]) # Create plan - plan_user = ChatMessage( + plan_user = Message( role="user", text=self.task_ledger_plan_prompt.format(team=team_text), ) @@ -626,9 +630,9 @@ async def plan(self, magentic_context: MagenticContext) -> ChatMessage: facts=facts_msg.text, plan=plan_msg.text, ) - return ChatMessage(role="assistant", text=combined, author_name=MAGENTIC_MANAGER_NAME) + return Message(role="assistant", text=combined, author_name=MAGENTIC_MANAGER_NAME) - async def replan(self, magentic_context: MagenticContext) -> ChatMessage: + async def replan(self, magentic_context: MagenticContext) -> Message: """Update facts and plan when stalling or looping has been detected.""" if self.task_ledger is None: raise RuntimeError("replan() called before plan(); call plan() once before requesting a replan.") @@ -636,7 +640,7 @@ async def replan(self, magentic_context: MagenticContext) -> ChatMessage: team_text = _team_block(magentic_context.participant_descriptions) # Update facts - facts_update_user = ChatMessage( + facts_update_user = Message( role="user", text=self.task_ledger_facts_update_prompt.format( task=magentic_context.task, old_facts=self.task_ledger.facts.text @@ -645,7 +649,7 @@ async def replan(self, magentic_context: MagenticContext) -> ChatMessage: updated_facts = await self._complete([*magentic_context.chat_history, facts_update_user]) # Update plan - plan_update_user = ChatMessage( + plan_update_user = Message( role="user", text=self.task_ledger_plan_update_prompt.format(team=team_text), ) @@ -669,7 +673,7 @@ async def replan(self, magentic_context: MagenticContext) -> ChatMessage: facts=updated_facts.text, plan=updated_plan.text, ) - return ChatMessage(role="assistant", text=combined, author_name=MAGENTIC_MANAGER_NAME) + return Message(role="assistant", text=combined, author_name=MAGENTIC_MANAGER_NAME) async def create_progress_ledger(self, magentic_context: MagenticContext) -> MagenticProgressLedger: """Use the model to produce a JSON progress ledger based on the conversation so far. @@ -689,7 +693,7 @@ async def create_progress_ledger(self, magentic_context: MagenticContext) -> Mag team=team_text, names=names_csv, ) - user_message = ChatMessage(role="user", text=prompt) + user_message = Message(role="user", text=prompt) # Include full context to help the model decide current stage, with small retry loop attempts = 0 @@ -713,13 +717,13 @@ async def create_progress_ledger(self, magentic_context: MagenticContext) -> Mag f"Progress ledger parse failed after {self.progress_ledger_retry_count} attempt(s): {last_error}" ) - async def prepare_final_answer(self, magentic_context: MagenticContext) -> ChatMessage: + async def prepare_final_answer(self, magentic_context: MagenticContext) -> Message: """Ask the model to produce the final answer addressed to the user.""" prompt = self.final_answer_prompt.format(task=magentic_context.task) - user_message = ChatMessage(role="user", text=prompt) + user_message = Message(role="user", text=prompt) response = await self._complete([*magentic_context.chat_history, user_message]) # Ensure role is assistant - return ChatMessage( + return Message( role="assistant", text=response.text, author_name=response.author_name or MAGENTIC_MANAGER_NAME, @@ -771,7 +775,7 @@ class MagenticOrchestratorEvent: """Data payload for magentic_orchestrator events.""" event_type: MagenticOrchestratorEventType - content: ChatMessage | MagenticProgressLedger + content: Message | MagenticProgressLedger # region Request info related types @@ -786,7 +790,7 @@ class MagenticPlanReviewResponse: the plan is considered approved. """ - review: list[ChatMessage] + review: list[Message] @staticmethod def approve() -> "MagenticPlanReviewResponse": @@ -794,14 +798,14 @@ def approve() -> "MagenticPlanReviewResponse": return MagenticPlanReviewResponse(review=[]) @staticmethod - def revise(feedback: str | list[str] | ChatMessage | list[ChatMessage]) -> "MagenticPlanReviewResponse": + def revise(feedback: str | list[str] | Message | list[Message]) -> "MagenticPlanReviewResponse": """Create a revision response with feedback.""" if isinstance(feedback, str): - feedback = [ChatMessage(role="user", text=feedback)] - elif isinstance(feedback, ChatMessage): + feedback = [Message(role="user", text=feedback)] + elif isinstance(feedback, Message): feedback = [feedback] elif isinstance(feedback, list): - feedback = [ChatMessage(role="user", text=item) if isinstance(item, str) else item for item in feedback] + feedback = [Message(role="user", text=item) if isinstance(item, str) else item for item in feedback] return MagenticPlanReviewResponse(review=feedback) @@ -820,7 +824,7 @@ class MagenticPlanReviewRequest: is_stalled: Whether the workflow is currently stalled. """ - plan: ChatMessage + plan: Message current_progress: MagenticProgressLedger | None is_stalled: bool @@ -828,7 +832,7 @@ def approve(self) -> MagenticPlanReviewResponse: """Create an approval response.""" return MagenticPlanReviewResponse.approve() - def revise(self, feedback: str | list[str] | ChatMessage | list[ChatMessage]) -> MagenticPlanReviewResponse: + def revise(self, feedback: str | list[str] | Message | list[Message]) -> MagenticPlanReviewResponse: """Create a revision response with feedback.""" return MagenticPlanReviewResponse.revise(feedback) @@ -877,7 +881,7 @@ def __init__( # Task related state self._magentic_context: MagenticContext | None = None - self._task_ledger: ChatMessage | None = None + self._task_ledger: Message | None = None self._progress_ledger: MagenticProgressLedger | None = None # Termination related state @@ -887,8 +891,8 @@ def __init__( @override async def _handle_messages( self, - messages: list[ChatMessage], - ctx: WorkflowContext[GroupChatWorkflowContextOutT, list[ChatMessage]], + messages: list[Message], + ctx: WorkflowContext[GroupChatWorkflowContextOutT, list[Message]], ) -> None: """Handle the initial task messages to start the workflow.""" if self._terminated: @@ -942,7 +946,7 @@ async def _handle_messages( async def _handle_response( self, response: AgentExecutorResponse | GroupChatResponseMessage, - ctx: WorkflowContext[GroupChatWorkflowContextOutT, list[ChatMessage]], + ctx: WorkflowContext[GroupChatWorkflowContextOutT, list[Message]], ) -> None: """Handle a response message from a participant.""" if self._magentic_context is None or self._task_ledger is None: @@ -968,7 +972,7 @@ async def handle_plan_review_response( self, original_request: MagenticPlanReviewRequest, response: MagenticPlanReviewResponse, - ctx: WorkflowContext[GroupChatWorkflowContextOutT, list[ChatMessage]], + ctx: WorkflowContext[GroupChatWorkflowContextOutT, list[Message]], ) -> None: """Handle the human response to the plan review request. @@ -1029,7 +1033,7 @@ async def _send_plan_review_request(self, ctx: WorkflowContext, is_stalled: bool async def _run_inner_loop( self, - ctx: WorkflowContext[GroupChatWorkflowContextOutT, list[ChatMessage]], + ctx: WorkflowContext[GroupChatWorkflowContextOutT, list[Message]], ) -> None: """Run the inner orchestration loop. Coordination phase. Serialized with a lock.""" if self._magentic_context is None or self._task_ledger is None: @@ -1039,16 +1043,14 @@ async def _run_inner_loop( async def _run_inner_loop_helper( self, - ctx: WorkflowContext[GroupChatWorkflowContextOutT, list[ChatMessage]], + ctx: WorkflowContext[GroupChatWorkflowContextOutT, list[Message]], ) -> None: """Run inner loop with exclusive access.""" # Narrow optional context for the remainder of this method if self._magentic_context is None: raise RuntimeError("Context not initialized") # Check limits first - within_limits = await self._check_within_limits_or_complete( - cast(WorkflowContext[Never, list[ChatMessage]], ctx) - ) + within_limits = await self._check_within_limits_or_complete(cast(WorkflowContext[Never, list[Message]], ctx)) if not within_limits: return @@ -1083,7 +1085,7 @@ async def _run_inner_loop_helper( # Check for task completion if self._progress_ledger.is_request_satisfied.answer: logger.info("Magentic Orchestrator: Task completed") - await self._prepare_final_answer(cast(WorkflowContext[Never, list[ChatMessage]], ctx)) + await self._prepare_final_answer(cast(WorkflowContext[Never, list[Message]], ctx)) return # Check for stalling or looping @@ -1107,11 +1109,11 @@ async def _run_inner_loop_helper( if next_speaker not in self._participant_registry.participants: logger.warning(f"Invalid next speaker: {next_speaker}") - await self._prepare_final_answer(cast(WorkflowContext[Never, list[ChatMessage]], ctx)) + await self._prepare_final_answer(cast(WorkflowContext[Never, list[Message]], ctx)) return # Add instruction to conversation (assistant guidance) - instruction_msg = ChatMessage( + instruction_msg = Message( role="assistant", text=str(instruction), author_name=MAGENTIC_MANAGER_NAME, @@ -1128,7 +1130,7 @@ async def _run_inner_loop_helper( async def _reset_and_replan( self, - ctx: WorkflowContext[GroupChatWorkflowContextOutT, list[ChatMessage]], + ctx: WorkflowContext[GroupChatWorkflowContextOutT, list[Message]], ) -> None: """Reset context and replan.""" if self._magentic_context is None: @@ -1166,7 +1168,7 @@ async def _reset_and_replan( async def _run_outer_loop( self, - ctx: WorkflowContext[GroupChatWorkflowContextOutT, list[ChatMessage]], + ctx: WorkflowContext[GroupChatWorkflowContextOutT, list[Message]], ) -> None: """Run the outer orchestration loop - planning phase.""" if self._magentic_context is None: @@ -1183,7 +1185,7 @@ async def _run_outer_loop( # Start inner loop await self._run_inner_loop(ctx) - async def _prepare_final_answer(self, ctx: WorkflowContext[Never, list[ChatMessage]]) -> None: + async def _prepare_final_answer(self, ctx: WorkflowContext[Never, list[Message]]) -> None: """Prepare the final answer using the manager.""" if self._magentic_context is None: raise RuntimeError("Context not initialized") @@ -1196,7 +1198,7 @@ async def _prepare_final_answer(self, ctx: WorkflowContext[Never, list[ChatMessa self._terminated = True - async def _check_within_limits_or_complete(self, ctx: WorkflowContext[Never, list[ChatMessage]]) -> bool: + async def _check_within_limits_or_complete(self, ctx: WorkflowContext[Never, list[Message]]) -> bool: """Check if orchestrator is within operational limits. If limits are exceeded, yield a termination message and mark the workflow as terminated. @@ -1223,7 +1225,7 @@ async def _check_within_limits_or_complete(self, ctx: WorkflowContext[Never, lis # Yield the full conversation with an indication of termination due to limits await ctx.yield_output([ *self._magentic_context.chat_history, - ChatMessage( + Message( role="assistant", text=f"Workflow terminated due to reaching maximum {limit_type} count.", author_name=MAGENTIC_MANAGER_NAME, @@ -1362,7 +1364,7 @@ class MagenticBuilder: Human-in-the-loop Support: Magentic provides specialized HITL mechanisms via: - - `enable_plan_review=True` - Review and approve/revise plans before execution + - `.with_plan_review()` - Review and approve/revise plans before execution - `.with_human_input_on_stall()` - Intervene when workflow stalls - Tool approval via `function_approval_request` - Approve individual tool calls @@ -1427,12 +1429,12 @@ def __init__( self._manager_factory: Callable[[], MagenticManagerBase] | None = None self._manager_agent_factory: Callable[[], SupportsAgentRun] | None = None self._standard_manager_options: dict[str, Any] = {} - self._enable_plan_review: bool = enable_plan_review + self._enable_plan_review: bool = False - self._checkpoint_storage: CheckpointStorage | None = checkpoint_storage + self._checkpoint_storage: CheckpointStorage | None = None # Intermediate outputs - self._intermediate_outputs = intermediate_outputs + self._intermediate_outputs = False self._set_participants(participants) @@ -1459,7 +1461,7 @@ def __init__( def _set_participants(self, participants: Sequence[SupportsAgentRun | Executor]) -> None: """Set participants (internal).""" if self._participants: - raise ValueError("participants already set.") + raise ValueError("participants have already been set. Call participants(...) at most once.") if not participants: raise ValueError("participants cannot be empty.") @@ -1485,6 +1487,8 @@ def _set_participants(self, participants: Sequence[SupportsAgentRun | Executor]) self._participants = named + return self + def with_plan_review(self, enable: bool = True) -> "MagenticBuilder": """Enable or disable human-in-the-loop plan review before task execution. @@ -1510,7 +1514,9 @@ def with_plan_review(self, enable: bool = True) -> "MagenticBuilder": .. code-block:: python workflow = ( - MagenticBuilder(participants=[agent1], manager_agent=manager_agent) + MagenticBuilder() + .participants(agent1=agent1) + .with_manager(agent=manager_agent) .with_plan_review(enable=True) .build() ) @@ -1555,7 +1561,11 @@ def with_checkpointing(self, checkpoint_storage: CheckpointStorage) -> "Magentic storage = InMemoryCheckpointStorage() workflow = ( - MagenticBuilder(participants=[agent1], manager_agent=manager_agent).with_checkpointing(storage).build() + MagenticBuilder() + .participants([agent1]) + .with_manager(agent=manager_agent) + .with_checkpointing(storage) + .build() ) # First run @@ -1575,14 +1585,144 @@ def with_checkpointing(self, checkpoint_storage: CheckpointStorage) -> "Magentic self._checkpoint_storage = checkpoint_storage return self - def _set_manager( + @overload + def with_manager(self, *, manager: MagenticManagerBase) -> Self: + """Configure the workflow with a pre-defined Magentic manager instance. + + Args: + manager: A custom manager instance (subclass of MagenticManagerBase) + + Returns: + Self for method chaining + """ + ... + + @overload + def with_manager(self, *, manager_factory: Callable[[], MagenticManagerBase]) -> Self: + """Configure the workflow with a factory for creating custom Magentic manager instances. + + Args: + manager_factory: Callable that returns a new MagenticManagerBase instance + + Returns: + Self for method chaining + """ + ... + + @overload + def with_manager( + self, + *, + agent: SupportsAgentRun, + task_ledger: _MagenticTaskLedger | None = None, + # Prompt overrides + task_ledger_facts_prompt: str | None = None, + task_ledger_plan_prompt: str | None = None, + task_ledger_full_prompt: str | None = None, + task_ledger_facts_update_prompt: str | None = None, + task_ledger_plan_update_prompt: str | None = None, + progress_ledger_prompt: str | None = None, + final_answer_prompt: str | None = None, + # Limits + max_stall_count: int = 3, + max_reset_count: int | None = None, + max_round_count: int | None = None, + ) -> Self: + """Configure the workflow with an agent for creating a standard manager. + + This will create a StandardMagenticManager using the provided agent. + + Args: + agent: SupportsAgentRun instance for the standard magentic manager + (`StandardMagenticManager`) + task_ledger: Optional custom task ledger implementation for specialized + prompting or structured output requirements + task_ledger_facts_prompt: Custom prompt template for extracting facts from + task description + task_ledger_plan_prompt: Custom prompt template for generating initial plan + task_ledger_full_prompt: Custom prompt template for complete task ledger + (facts + plan combined) + task_ledger_facts_update_prompt: Custom prompt template for updating facts + based on agent progress + task_ledger_plan_update_prompt: Custom prompt template for replanning when + needed + progress_ledger_prompt: Custom prompt template for assessing progress and + determining next actions + final_answer_prompt: Custom prompt template for synthesizing final response + when task is complete + max_stall_count: Maximum consecutive rounds without progress before triggering + replan (default 3). Set to 0 to disable stall detection. + max_reset_count: Maximum number of complete resets allowed before failing. + None means unlimited resets. + max_round_count: Maximum total coordination rounds before stopping with + partial result. None means unlimited rounds. + + Returns: + Self for method chaining + """ + ... + + @overload + def with_manager( + self, + *, + agent_factory: Callable[[], SupportsAgentRun], + task_ledger: _MagenticTaskLedger | None = None, + # Prompt overrides + task_ledger_facts_prompt: str | None = None, + task_ledger_plan_prompt: str | None = None, + task_ledger_full_prompt: str | None = None, + task_ledger_facts_update_prompt: str | None = None, + task_ledger_plan_update_prompt: str | None = None, + progress_ledger_prompt: str | None = None, + final_answer_prompt: str | None = None, + # Limits + max_stall_count: int = 3, + max_reset_count: int | None = None, + max_round_count: int | None = None, + ) -> Self: + """Configure the workflow with a factory for creating the manager agent. + + This will create a StandardMagenticManager using the provided agent factory. + + Args: + agent_factory: Callable that returns a new SupportsAgentRun instance for the standard + magentic manager (`StandardMagenticManager`) + task_ledger: Optional custom task ledger implementation for specialized + prompting or structured output requirements + task_ledger_facts_prompt: Custom prompt template for extracting facts from + task description + task_ledger_plan_prompt: Custom prompt template for generating initial plan + task_ledger_full_prompt: Custom prompt template for complete task ledger + (facts + plan combined) + task_ledger_facts_update_prompt: Custom prompt template for updating facts + based on agent progress + task_ledger_plan_update_prompt: Custom prompt template for replanning when + needed + progress_ledger_prompt: Custom prompt template for assessing progress and + determining next actions + final_answer_prompt: Custom prompt template for synthesizing final response + when task is complete + max_stall_count: Maximum consecutive rounds without progress before triggering + replan (default 3). Set to 0 to disable stall detection. + max_reset_count: Maximum number of complete resets allowed before failing. + None means unlimited resets. + max_round_count: Maximum total coordination rounds before stopping with + partial result. None means unlimited rounds. + + Returns: + Self for method chaining + """ + ... + + def with_manager( self, *, manager: MagenticManagerBase | None = None, manager_factory: Callable[[], MagenticManagerBase] | None = None, - manager_agent: SupportsAgentRun | None = None, - manager_agent_factory: Callable[[], SupportsAgentRun] | None = None, + agent_factory: Callable[[], SupportsAgentRun] | None = None, # Constructor args for StandardMagenticManager when manager is not provided + agent: SupportsAgentRun | None = None, task_ledger: _MagenticTaskLedger | None = None, # Prompt overrides task_ledger_facts_prompt: str | None = None, @@ -1596,37 +1736,123 @@ def _set_manager( max_stall_count: int = 3, max_reset_count: int | None = None, max_round_count: int | None = None, - ) -> None: - """Configure the workflow manager for task planning and agent coordination (internal). + ) -> Self: + """Configure the workflow manager for task planning and agent coordination. + + The manager is responsible for creating plans, selecting agents, tracking progress, + and deciding when to replan or complete. This method supports four usage patterns: + + 1. **Provide existing manager**: Pass a pre-configured manager instance (custom + or standard) for full control over behavior + 2. **Factory for custom manager**: Pass a callable that returns a new manager + instance for more advanced scenarios so that the builder can be reused + 3. **Factory for agent**: Pass a callable that returns a new agent instance to + automatically create a `StandardMagenticManager` + 4. **Auto-create with agent**: Pass an agent to automatically create a `StandardMagenticManager` Args: - manager: Pre-configured manager instance. + manager: Pre-configured manager instance (`StandardMagenticManager` or custom + `MagenticManagerBase` subclass). If provided, all other arguments are ignored. manager_factory: Callable that returns a new manager instance. - manager_agent: Agent instance for creating a StandardMagenticManager. - manager_agent_factory: Callable that returns a new agent instance for creating a StandardMagenticManager. - task_ledger: Optional custom task ledger implementation. - task_ledger_facts_prompt: Custom prompt for extracting facts. - task_ledger_plan_prompt: Custom prompt for generating initial plan. - task_ledger_full_prompt: Custom prompt for complete task ledger. - task_ledger_facts_update_prompt: Custom prompt for updating facts. - task_ledger_plan_update_prompt: Custom prompt for replanning. - progress_ledger_prompt: Custom prompt for assessing progress. - final_answer_prompt: Custom prompt for synthesizing final response. - max_stall_count: Max consecutive rounds without progress before replan (default 3). - max_reset_count: Max number of resets allowed. None means unlimited. - max_round_count: Max total coordination rounds. None means unlimited. + agent_factory: Callable that returns a new agent instance. + agent: Agent instance for generating plans and decisions. The agent's + configured instructions and options (temperature, seed, etc.) will be + applied. + task_ledger: Optional custom task ledger implementation for specialized + prompting or structured output requirements + task_ledger_facts_prompt: Custom prompt template for extracting facts from + task description + task_ledger_plan_prompt: Custom prompt template for generating initial plan + task_ledger_full_prompt: Custom prompt template for complete task ledger + (facts + plan combined) + task_ledger_facts_update_prompt: Custom prompt template for updating facts + based on agent progress + task_ledger_plan_update_prompt: Custom prompt template for replanning when + needed + progress_ledger_prompt: Custom prompt template for assessing progress and + determining next actions + final_answer_prompt: Custom prompt template for synthesizing final response + when task is complete + max_stall_count: Maximum consecutive rounds without progress before triggering + replan (default 3). Set to 0 to disable stall detection. + max_reset_count: Maximum number of complete resets allowed before failing. + None means unlimited resets. + max_round_count: Maximum total coordination rounds before stopping with + partial result. None means unlimited rounds. + + Returns: + Self for method chaining Raises: - ValueError: If a manager has already been set or if none or multiple - of the primary parameters are provided. + ValueError: If manager is None and agent is not provided. + + Usage with agent (recommended): + + .. code-block:: python + + from agent_framework import Agent, ChatOptions + from agent_framework.openai import OpenAIChatClient + + # Configure manager agent with specific options and instructions + manager_agent = Agent( + name="Coordinator", + chat_client=OpenAIChatClient(model_id="gpt-4o"), + options=ChatOptions(temperature=0.3, seed=42), + instructions="Be concise and focus on accuracy", + ) + + workflow = ( + MagenticBuilder() + .participants(agent1=agent1, agent2=agent2) + .with_manager( + agent=manager_agent, + max_round_count=20, + max_stall_count=3, + ) + .build() + ) + + Usage with custom manager: + + .. code-block:: python + + class MyManager(MagenticManagerBase): + async def plan(self, context: MagenticContext) -> Message: + # Custom planning logic + return Message(role="assistant", text="...") + + + manager = MyManager() + workflow = MagenticBuilder().participants(agent1=agent1).with_manager(manager).build() + + Usage with prompt customization: + + .. code-block:: python + + workflow = ( + MagenticBuilder() + .participants(coder=coder_agent, reviewer=reviewer_agent) + .with_manager( + agent=manager_agent, + task_ledger_plan_prompt="Create a detailed step-by-step plan...", + progress_ledger_prompt="Assess progress and decide next action...", + max_stall_count=2, + ) + .build() + ) + + Notes: + - StandardMagenticManager uses structured LLM calls for all decisions + - Custom managers can implement alternative selection strategies + - Prompt templates support Jinja2-style variable substitution + - Stall detection helps prevent infinite loops in stuck scenarios + - The agent's instructions are used as system instructions for all manager prompts """ if any([self._manager, self._manager_factory, self._manager_agent_factory]): - raise ValueError("Manager has already been configured. Set manager config once only.") + raise ValueError("with_manager() has already been called on this builder instance.") - if sum(x is not None for x in [manager, manager_agent, manager_factory, manager_agent_factory]) != 1: - raise ValueError( - "Exactly one of manager, manager_agent, manager_factory, or manager_agent_factory must be provided." - ) + if sum(x is not None for x in [manager, agent, manager_factory, agent_factory]) != 1: + raise ValueError("Exactly one of manager, agent, manager_factory, or agent_factory must be provided.") def _log_warning_if_constructor_args_provided() -> None: if any( @@ -1645,14 +1871,14 @@ def _log_warning_if_constructor_args_provided() -> None: max_round_count, ] ): - logger.warning("Custom manager provided; all other manager arguments will be ignored.") + logger.warning("Customer manager provided; all other with_manager() arguments will be ignored.") if manager is not None: self._manager = manager _log_warning_if_constructor_args_provided() - elif manager_agent is not None: + elif agent is not None: self._manager = StandardMagenticManager( - agent=manager_agent, + agent=agent, task_ledger=task_ledger, task_ledger_facts_prompt=task_ledger_facts_prompt, task_ledger_plan_prompt=task_ledger_plan_prompt, @@ -1668,8 +1894,8 @@ def _log_warning_if_constructor_args_provided() -> None: elif manager_factory is not None: self._manager_factory = manager_factory _log_warning_if_constructor_args_provided() - elif manager_agent_factory is not None: - self._manager_agent_factory = manager_agent_factory + elif agent_factory is not None: + self._manager_agent_factory = agent_factory self._standard_manager_options = { "task_ledger": task_ledger, "task_ledger_facts_prompt": task_ledger_facts_prompt, @@ -1684,6 +1910,21 @@ def _log_warning_if_constructor_args_provided() -> None: "max_round_count": max_round_count, } + return self + + def with_intermediate_outputs(self) -> Self: + """Enable intermediate outputs from agent participants before aggregation. + + When enabled, the workflow returns each agent participant's response or yields + streaming updates as they become available. The output of the orchestrator will + always be available as the final output of the workflow. + + Returns: + Self for fluent chaining + """ + self._intermediate_outputs = True + return self + def _resolve_orchestrator(self, participants: Sequence[Executor]) -> Executor: """Determine the orchestrator to use for the workflow. @@ -1691,11 +1932,8 @@ def _resolve_orchestrator(self, participants: Sequence[Executor]) -> Executor: participants: List of resolved participant executors """ if all(x is None for x in [self._manager, self._manager_factory, self._manager_agent_factory]): - raise ValueError( - "No manager configured. " - "Pass manager, manager_factory, manager_agent, or manager_agent_factory to the constructor." - ) - # We don't need to check if multiple are set since that is handled in _set_manager() + raise ValueError("No manager configured. Call with_manager(...) before building the orchestrator.") + # We don't need to check if multiple are set since that is handled in with_orchestrator() if self._manager: manager = self._manager @@ -1709,10 +1947,7 @@ def _resolve_orchestrator(self, participants: Sequence[Executor]) -> Executor: ) else: # This should never be reached due to the checks above - raise RuntimeError( - "Manager could not be resolved. " - "Pass manager, manager_factory, manager_agent, or manager_agent_factory to the constructor." - ) + raise RuntimeError("Manager could not be resolved. Please set the manager properly with with_manager().") return MagenticOrchestrator( manager=manager, @@ -1748,15 +1983,17 @@ def build(self) -> Workflow: orchestrator: Executor = self._resolve_orchestrator(participants) # Build workflow graph - workflow_builder = WorkflowBuilder( - start_executor=orchestrator, - checkpoint_storage=self._checkpoint_storage, - output_executors=[orchestrator] if not self._intermediate_outputs else None, - ) + workflow_builder = WorkflowBuilder().set_start_executor(orchestrator) for participant in participants: # Orchestrator and participant bi-directional edges workflow_builder = workflow_builder.add_edge(orchestrator, participant) workflow_builder = workflow_builder.add_edge(participant, orchestrator) + if self._checkpoint_storage is not None: + workflow_builder = workflow_builder.with_checkpointing(self._checkpoint_storage) + + if not self._intermediate_outputs: + # Constrain output to orchestrator only + workflow_builder = workflow_builder.with_output_from([orchestrator]) return workflow_builder.build() diff --git a/python/packages/orchestrations/agent_framework_orchestrations/_orchestration_request_info.py b/python/packages/orchestrations/agent_framework_orchestrations/_orchestration_request_info.py index 51f4e27898..5e4a5d6a28 100644 --- a/python/packages/orchestrations/agent_framework_orchestrations/_orchestration_request_info.py +++ b/python/packages/orchestrations/agent_framework_orchestrations/_orchestration_request_info.py @@ -3,7 +3,7 @@ from dataclasses import dataclass from agent_framework._agents import SupportsAgentRun -from agent_framework._types import ChatMessage +from agent_framework._types import Message from agent_framework._workflows._agent_executor import AgentExecutor, AgentExecutorRequest, AgentExecutorResponse from agent_framework._workflows._agent_utils import resolve_agent_id from agent_framework._workflows._executor import Executor, handler @@ -44,18 +44,18 @@ class AgentRequestInfoResponse: """Response containing additional information requested from users for agents. Attributes: - messages: list[ChatMessage]: Additional messages provided by users. If empty, + messages: list[Message]: Additional messages provided by users. If empty, the agent response is approved as-is. """ - messages: list[ChatMessage] + messages: list[Message] @staticmethod - def from_messages(messages: list[ChatMessage]) -> "AgentRequestInfoResponse": + def from_messages(messages: list[Message]) -> "AgentRequestInfoResponse": """Create an AgentRequestInfoResponse from a list of ChatMessages. Args: - messages: List of ChatMessage instances provided by users. + messages: List of Message instances provided by users. Returns: AgentRequestInfoResponse instance. @@ -72,7 +72,7 @@ def from_strings(texts: list[str]) -> "AgentRequestInfoResponse": Returns: AgentRequestInfoResponse instance. """ - return AgentRequestInfoResponse(messages=[ChatMessage(role="user", text=text) for text in texts]) + return AgentRequestInfoResponse(messages=[Message(role="user", text=text) for text in texts]) @staticmethod def approve() -> "AgentRequestInfoResponse": diff --git a/python/packages/orchestrations/agent_framework_orchestrations/_orchestration_state.py b/python/packages/orchestrations/agent_framework_orchestrations/_orchestration_state.py index fe8ba64126..0f23f96dc0 100644 --- a/python/packages/orchestrations/agent_framework_orchestrations/_orchestration_state.py +++ b/python/packages/orchestrations/agent_framework_orchestrations/_orchestration_state.py @@ -11,11 +11,11 @@ from dataclasses import dataclass, field from typing import Any -from agent_framework._types import ChatMessage +from agent_framework._types import Message -def _new_chat_message_list() -> list[ChatMessage]: - """Factory function for typed empty ChatMessage list. +def _new_chat_message_list() -> list[Message]: + """Factory function for typed empty Message list. Satisfies the type checker. """ @@ -47,11 +47,11 @@ class OrchestrationState: task: Optional primary task/question being orchestrated """ - conversation: list[ChatMessage] = field(default_factory=_new_chat_message_list) + conversation: list[Message] = field(default_factory=_new_chat_message_list) round_index: int = 0 orchestrator_name: str = "" metadata: dict[str, Any] = field(default_factory=_new_metadata_dict) - task: ChatMessage | None = None + task: Message | None = None def to_dict(self) -> dict[str, Any]: """Serialize to dict for checkpointing. diff --git a/python/packages/orchestrations/agent_framework_orchestrations/_orchestrator_helpers.py b/python/packages/orchestrations/agent_framework_orchestrations/_orchestrator_helpers.py index c48af3c6de..757e77f095 100644 --- a/python/packages/orchestrations/agent_framework_orchestrations/_orchestrator_helpers.py +++ b/python/packages/orchestrations/agent_framework_orchestrations/_orchestrator_helpers.py @@ -8,12 +8,12 @@ import logging -from agent_framework._types import ChatMessage +from agent_framework._types import Message logger = logging.getLogger(__name__) -def clean_conversation_for_handoff(conversation: list[ChatMessage]) -> list[ChatMessage]: +def clean_conversation_for_handoff(conversation: list[Message]) -> list[Message]: """Remove tool-related content from conversation for clean handoffs. During handoffs, tool calls can cause API errors because: @@ -37,7 +37,7 @@ def clean_conversation_for_handoff(conversation: list[ChatMessage]) -> list[Chat Returns: Cleaned conversation safe for handoff routing """ - cleaned: list[ChatMessage] = [] + cleaned: list[Message] = [] for msg in conversation: # Skip tool response messages entirely if msg.role == "tool": @@ -58,7 +58,7 @@ def clean_conversation_for_handoff(conversation: list[ChatMessage]) -> list[Chat # Has tool content - only keep if it also has text if msg.text and msg.text.strip(): # Create fresh text-only message while preserving additional_properties - msg_copy = ChatMessage( + msg_copy = Message( role=msg.role, text=msg.text, author_name=msg.author_name, @@ -74,7 +74,7 @@ def create_completion_message( text: str | None = None, author_name: str, reason: str = "completed", -) -> ChatMessage: +) -> Message: """Create a standardized completion message. Simple helper to avoid duplicating completion message creation. @@ -85,10 +85,10 @@ def create_completion_message( reason: Reason for completion (for default text generation) Returns: - ChatMessage with assistant role + Message with assistant role """ message_text = text or f"Conversation {reason}." - return ChatMessage( + return Message( role="assistant", text=message_text, author_name=author_name, diff --git a/python/packages/orchestrations/agent_framework_orchestrations/_sequential.py b/python/packages/orchestrations/agent_framework_orchestrations/_sequential.py index ee9b0f2928..2779242592 100644 --- a/python/packages/orchestrations/agent_framework_orchestrations/_sequential.py +++ b/python/packages/orchestrations/agent_framework_orchestrations/_sequential.py @@ -5,7 +5,7 @@ This module provides a high-level, agent-focused API to assemble a sequential workflow where: - Participants are provided as SupportsAgentRun or Executor instances via `participants=[...]` -- A shared conversation context (list[ChatMessage]) is passed along the chain +- A shared conversation context (list[Message]) is passed along the chain - Agents append their assistant messages to the context - Custom executors can transform or summarize and return a refined context - The workflow finishes with the final context produced by the last participant @@ -16,16 +16,16 @@ Notes: - Participants can mix SupportsAgentRun and Executor objects - Agents are auto-wrapped by WorkflowBuilder as AgentExecutor (unless already wrapped) -- AgentExecutor produces AgentExecutorResponse; _ResponseToConversation converts this to list[ChatMessage] -- Non-agent executors must define a handler that consumes `list[ChatMessage]` and sends back - the updated `list[ChatMessage]` via their workflow context +- AgentExecutor produces AgentExecutorResponse; _ResponseToConversation converts this to list[Message] +- Non-agent executors must define a handler that consumes `list[Message]` and sends back + the updated `list[Message]` via their workflow context Why include the small internal adapter executors? - Input normalization ("input-conversation"): ensures the workflow always starts with a - `list[ChatMessage]` regardless of whether callers pass a `str`, a single `ChatMessage`, + `list[Message]` regardless of whether callers pass a `str`, a single `Message`, or a list. This keeps the first hop strongly typed and avoids boilerplate in participants. - Agent response adaptation ("to-conversation:"): agents (via AgentExecutor) - emit `AgentExecutorResponse`. The adapter converts that to a `list[ChatMessage]` + emit `AgentExecutorResponse`. The adapter converts that to a `list[Message]` using `full_conversation` so original prompts aren't lost when chaining. - Result output ("end"): yields the final conversation list and the workflow becomes idle giving a consistent terminal payload shape for both agents and custom executors. @@ -40,7 +40,7 @@ from collections.abc import Sequence from typing import Any -from agent_framework import ChatMessage, SupportsAgentRun +from agent_framework import Message, SupportsAgentRun from agent_framework._workflows._agent_executor import ( AgentExecutor, AgentExecutorResponse, @@ -62,18 +62,18 @@ class _InputToConversation(Executor): - """Normalizes initial input into a list[ChatMessage] conversation.""" + """Normalizes initial input into a list[Message] conversation.""" @handler - async def from_str(self, prompt: str, ctx: WorkflowContext[list[ChatMessage]]) -> None: + async def from_str(self, prompt: str, ctx: WorkflowContext[list[Message]]) -> None: await ctx.send_message(normalize_messages_input(prompt)) @handler - async def from_message(self, message: ChatMessage, ctx: WorkflowContext[list[ChatMessage]]) -> None: + async def from_message(self, message: Message, ctx: WorkflowContext[list[Message]]) -> None: await ctx.send_message(normalize_messages_input(message)) @handler - async def from_messages(self, messages: list[str | ChatMessage], ctx: WorkflowContext[list[ChatMessage]]) -> None: + async def from_messages(self, messages: list[str | Message], ctx: WorkflowContext[list[Message]]) -> None: await ctx.send_message(normalize_messages_input(messages)) @@ -83,10 +83,10 @@ class _EndWithConversation(Executor): @handler async def end_with_messages( self, - conversation: list[ChatMessage], - ctx: WorkflowContext[Any, list[ChatMessage]], + conversation: list[Message], + ctx: WorkflowContext[Any, list[Message]], ) -> None: - """Handler for ending with a list of ChatMessage. + """Handler for ending with a list of Message. This is used when the last participant is a custom executor. """ @@ -96,7 +96,7 @@ async def end_with_messages( async def end_with_agent_executor_response( self, response: AgentExecutorResponse, - ctx: WorkflowContext[Any, list[ChatMessage] | None], + ctx: WorkflowContext[Any, list[Message] | None], ) -> None: """Handle case where last participant is an agent. @@ -109,10 +109,10 @@ class SequentialBuilder: r"""High-level builder for sequential agent/executor workflows with shared context. - `participants=[...]` accepts a list of SupportsAgentRun (recommended) or Executor instances - - Executors must define a handler that consumes list[ChatMessage] and sends out a list[ChatMessage] - - The workflow wires participants in order, passing a list[ChatMessage] down the chain + - Executors must define a handler that consumes list[Message] and sends out a list[Message] + - The workflow wires participants in order, passing a list[Message] down the chain - Agents append their assistant messages to the conversation - - Custom executors can transform/summarize and return a list[ChatMessage] + - Custom executors can transform/summarize and return a list[Message] - The final output is the conversation produced by the last participant Usage: @@ -122,17 +122,18 @@ class SequentialBuilder: from agent_framework_orchestrations import SequentialBuilder # With agent instances - workflow = SequentialBuilder(participants=[agent1, agent2, summarizer_exec]).build() + workflow = SequentialBuilder().participants([agent1, agent2, summarizer_exec]).build() # Enable checkpoint persistence - workflow = SequentialBuilder(participants=[agent1, agent2], checkpoint_storage=storage).build() + workflow = SequentialBuilder().participants([agent1, agent2]).with_checkpointing(storage).build() # Enable request info for mid-workflow feedback (pauses before each agent) - workflow = SequentialBuilder(participants=[agent1, agent2]).with_request_info().build() + workflow = SequentialBuilder().participants([agent1, agent2]).with_request_info().build() # Enable request info only for specific agents workflow = ( - SequentialBuilder(participants=[agent1, agent2, agent3]) + SequentialBuilder() + .participants([agent1, agent2, agent3]) .with_request_info(agents=[agent2]) # Only pause before agent2 .build() ) @@ -156,14 +157,14 @@ def __init__( self._checkpoint_storage: CheckpointStorage | None = checkpoint_storage self._request_info_enabled: bool = False self._request_info_filter: set[str] | None = None - self._intermediate_outputs: bool = intermediate_outputs + self._intermediate_outputs: bool = False self._set_participants(participants) def _set_participants(self, participants: Sequence[SupportsAgentRun | Executor]) -> None: """Set participants (internal).""" if self._participants: - raise ValueError("participants already set.") + raise ValueError("participants() has already been called on this builder instance.") if not participants: raise ValueError("participants cannot be empty") @@ -184,6 +185,12 @@ def _set_participants(self, participants: Sequence[SupportsAgentRun | Executor]) seen_agent_ids.add(pid) self._participants = list(participants) + return self + + def with_checkpointing(self, checkpoint_storage: CheckpointStorage) -> "SequentialBuilder": + """Enable checkpointing for the built workflow using the provided storage.""" + self._checkpoint_storage = checkpoint_storage + return self def with_request_info( self, @@ -218,6 +225,19 @@ def with_request_info( return self + def with_intermediate_outputs(self) -> "SequentialBuilder": + """Enable intermediate outputs from agent participants. + + When enabled, the workflow returns each agent participant's response or yields + streaming updates as they become available. The output of the last participant + will always be available as the final output of the workflow. + + Returns: + Self for fluent chaining + """ + self._intermediate_outputs = True + return self + def _resolve_participants(self) -> list[Executor]: """Resolve participant instances into Executor objects.""" if not self._participants: @@ -246,7 +266,7 @@ def build(self) -> Workflow: """Build and validate the sequential workflow. Wiring pattern: - - _InputToConversation normalizes the initial input into list[ChatMessage] + - _InputToConversation normalizes the initial input into list[Message] - For each participant in order: - If Agent (or AgentExecutor): pass conversation to the agent, then optionally route through a request info interceptor, then convert response to conversation @@ -261,11 +281,8 @@ def build(self) -> Workflow: # Resolve participants and participant factories to executors participants: list[Executor] = self._resolve_participants() - builder = WorkflowBuilder( - start_executor=input_conv, - checkpoint_storage=self._checkpoint_storage, - output_executors=[end] if not self._intermediate_outputs else None, - ) + builder = WorkflowBuilder() + builder.set_start_executor(input_conv) # Start of the chain is the input normalizer prior: Executor | SupportsAgentRun = input_conv @@ -275,4 +292,11 @@ def build(self) -> Workflow: # Terminate with the final conversation builder.add_edge(prior, end) + if not self._intermediate_outputs: + # Constrain output to end only + builder = builder.with_output_from([end]) + + if self._checkpoint_storage is not None: + builder = builder.with_checkpointing(self._checkpoint_storage) + return builder.build() diff --git a/python/packages/orchestrations/tests/test_concurrent.py b/python/packages/orchestrations/tests/test_concurrent.py index cca8187fcd..55100af4c3 100644 --- a/python/packages/orchestrations/tests/test_concurrent.py +++ b/python/packages/orchestrations/tests/test_concurrent.py @@ -7,8 +7,8 @@ AgentExecutorRequest, AgentExecutorResponse, AgentResponse, - ChatMessage, Executor, + Message, WorkflowContext, WorkflowRunState, handler, @@ -32,7 +32,7 @@ def __init__(self, id: str, reply_text: str) -> None: @handler async def run(self, request: AgentExecutorRequest, ctx: WorkflowContext[AgentExecutorResponse]) -> None: - response = AgentResponse(messages=ChatMessage(role="assistant", text=self._reply_text)) + response = AgentResponse(messages=Message(role="assistant", text=self._reply_text)) full_conversation = list(request.messages) + list(response.messages) await ctx.send_message(AgentExecutorResponse(self.id, response, full_conversation=full_conversation)) @@ -58,18 +58,18 @@ async def test_concurrent_default_aggregator_emits_single_user_and_assistants() wf = ConcurrentBuilder(participants=[e1, e2, e3]).build() completed = False - output: list[ChatMessage] | None = None + output: list[Message] | None = None async for ev in wf.run("prompt: hello world", stream=True): if ev.type == "status" and ev.state == WorkflowRunState.IDLE: completed = True elif ev.type == "output": - output = cast(list[ChatMessage], ev.data) + output = cast(list[Message], ev.data) if completed and output is not None: break assert completed assert output is not None - messages: list[ChatMessage] = output + messages: list[Message] = output # Expect one user message + one assistant message per participant assert len(messages) == 1 + 3 @@ -89,7 +89,7 @@ async def test_concurrent_custom_aggregator_callback_is_used() -> None: async def summarize(results: list[AgentExecutorResponse]) -> str: texts: list[str] = [] for r in results: - msgs: list[ChatMessage] = r.agent_response.messages + msgs: list[Message] = r.agent_response.messages texts.append(msgs[-1].text if msgs else "") return " | ".join(sorted(texts)) @@ -120,7 +120,7 @@ async def test_concurrent_custom_aggregator_sync_callback_is_used() -> None: def summarize_sync(results: list[AgentExecutorResponse], _ctx: WorkflowContext[Any]) -> str: # type: ignore[unused-argument] texts: list[str] = [] for r in results: - msgs: list[ChatMessage] = r.agent_response.messages + msgs: list[Message] = r.agent_response.messages texts.append(msgs[-1].text if msgs else "") return " | ".join(sorted(texts)) @@ -164,7 +164,7 @@ class CustomAggregator(Executor): async def aggregate(self, results: list[AgentExecutorResponse], ctx: WorkflowContext[Never, str]) -> None: texts: list[str] = [] for r in results: - msgs: list[ChatMessage] = r.agent_response.messages + msgs: list[Message] = r.agent_response.messages texts.append(msgs[-1].text if msgs else "") await ctx.yield_output(" & ".join(sorted(texts))) @@ -215,7 +215,7 @@ async def test_concurrent_checkpoint_resume_round_trip() -> None: wf = ConcurrentBuilder(participants=list(participants), checkpoint_storage=storage).build() - baseline_output: list[ChatMessage] | None = None + baseline_output: list[Message] | None = None async for ev in wf.run("checkpoint concurrent", stream=True): if ev.type == "output": baseline_output = ev.data # type: ignore[assignment] @@ -239,7 +239,7 @@ async def test_concurrent_checkpoint_resume_round_trip() -> None: ) wf_resume = ConcurrentBuilder(participants=list(resumed_participants), checkpoint_storage=storage).build() - resumed_output: list[ChatMessage] | None = None + resumed_output: list[Message] | None = None async for ev in wf_resume.run(checkpoint_id=resume_checkpoint.checkpoint_id, stream=True): if ev.type == "output": resumed_output = ev.data # type: ignore[assignment] @@ -261,7 +261,7 @@ async def test_concurrent_checkpoint_runtime_only() -> None: agents = [_FakeAgentExec(id="agent1", reply_text="A1"), _FakeAgentExec(id="agent2", reply_text="A2")] wf = ConcurrentBuilder(participants=agents).build() - baseline_output: list[ChatMessage] | None = None + baseline_output: list[Message] | None = None async for ev in wf.run("runtime checkpoint test", checkpoint_storage=storage, stream=True): if ev.type == "output": baseline_output = ev.data # type: ignore[assignment] @@ -282,7 +282,7 @@ async def test_concurrent_checkpoint_runtime_only() -> None: resumed_agents = [_FakeAgentExec(id="agent1", reply_text="A1"), _FakeAgentExec(id="agent2", reply_text="A2")] wf_resume = ConcurrentBuilder(participants=resumed_agents).build() - resumed_output: list[ChatMessage] | None = None + resumed_output: list[Message] | None = None async for ev in wf_resume.run( checkpoint_id=resume_checkpoint.checkpoint_id, checkpoint_storage=storage, stream=True ): @@ -311,7 +311,7 @@ async def test_concurrent_checkpoint_runtime_overrides_buildtime() -> None: agents = [_FakeAgentExec(id="agent1", reply_text="A1"), _FakeAgentExec(id="agent2", reply_text="A2")] wf = ConcurrentBuilder(participants=agents, checkpoint_storage=buildtime_storage).build() - baseline_output: list[ChatMessage] | None = None + baseline_output: list[Message] | None = None async for ev in wf.run("override test", checkpoint_storage=runtime_storage, stream=True): if ev.type == "output": baseline_output = ev.data # type: ignore[assignment] diff --git a/python/packages/orchestrations/tests/test_group_chat.py b/python/packages/orchestrations/tests/test_group_chat.py index bbd7df2c55..174e7364b4 100644 --- a/python/packages/orchestrations/tests/test_group_chat.py +++ b/python/packages/orchestrations/tests/test_group_chat.py @@ -5,16 +5,16 @@ import pytest from agent_framework import ( + Agent, AgentExecutorResponse, AgentResponse, AgentResponseUpdate, AgentThread, BaseAgent, - ChatAgent, - ChatMessage, ChatResponse, ChatResponseUpdate, Content, + Message, WorkflowEvent, WorkflowRunState, ) @@ -38,7 +38,7 @@ def __init__(self, agent_name: str, reply_text: str, **kwargs: Any) -> None: def run( # type: ignore[override] self, - messages: str | ChatMessage | Sequence[str | ChatMessage] | None = None, + messages: str | Message | Sequence[str | Message] | None = None, *, stream: bool = False, thread: AgentThread | None = None, @@ -49,7 +49,7 @@ def run( # type: ignore[override] return self._run_impl() async def _run_impl(self) -> AgentResponse: - response = ChatMessage(role="assistant", text=self._reply_text, author_name=self.name) + response = Message(role="assistant", text=self._reply_text, author_name=self.name) return AgentResponse(messages=[response]) async def _run_stream_impl(self) -> AsyncIterable[AgentResponseUpdate]: @@ -69,14 +69,14 @@ async def get_response( raise NotImplementedError -class StubManagerAgent(ChatAgent): +class StubManagerAgent(Agent): def __init__(self) -> None: super().__init__(chat_client=MockChatClient(), name="manager_agent", description="Stub manager") self._call_count = 0 async def run( self, - messages: str | ChatMessage | Sequence[str | ChatMessage] | None = None, + messages: str | Message | Sequence[str | Message] | None = None, *, thread: AgentThread | None = None, **kwargs: Any, @@ -87,7 +87,7 @@ async def run( payload = {"terminate": False, "reason": "Selecting agent", "next_speaker": "agent", "final_message": None} return AgentResponse( messages=[ - ChatMessage( + Message( role="assistant", text=( '{"terminate": false, "reason": "Selecting agent", ' @@ -108,7 +108,7 @@ async def run( } return AgentResponse( messages=[ - ChatMessage( + Message( role="assistant", text=( '{"terminate": true, "reason": "Task complete", ' @@ -143,10 +143,10 @@ def __init__(self) -> None: super().__init__(max_stall_count=3, max_round_count=5) self._round = 0 - async def plan(self, magentic_context: MagenticContext) -> ChatMessage: - return ChatMessage(role="assistant", text="plan", author_name="magentic_manager") + async def plan(self, magentic_context: MagenticContext) -> Message: + return Message(role="assistant", text="plan", author_name="magentic_manager") - async def replan(self, magentic_context: MagenticContext) -> ChatMessage: + async def replan(self, magentic_context: MagenticContext) -> Message: return await self.plan(magentic_context) async def create_progress_ledger(self, magentic_context: MagenticContext) -> MagenticProgressLedger: @@ -169,8 +169,8 @@ async def create_progress_ledger(self, magentic_context: MagenticContext) -> Mag instruction_or_question=MagenticProgressLedgerItem(reason="", answer=""), ) - async def prepare_final_answer(self, magentic_context: MagenticContext) -> ChatMessage: - return ChatMessage(role="assistant", text="final", author_name="magentic_manager") + async def prepare_final_answer(self, magentic_context: MagenticContext) -> Message: + return Message(role="assistant", text="final", author_name="magentic_manager") async def test_group_chat_builder_basic_flow() -> None: @@ -178,19 +178,20 @@ async def test_group_chat_builder_basic_flow() -> None: alpha = StubAgent("alpha", "ack from alpha") beta = StubAgent("beta", "ack from beta") - workflow = GroupChatBuilder( - participants=[alpha, beta], - max_rounds=2, # Limit rounds to prevent infinite loop - selection_func=selector, - orchestrator_name="manager", - ).build() + workflow = ( + GroupChatBuilder() + .with_orchestrator(selection_func=selector, orchestrator_name="manager") + .participants([alpha, beta]) + .with_max_rounds(2) # Limit rounds to prevent infinite loop + .build() + ) - outputs: list[list[ChatMessage]] = [] + outputs: list[list[Message]] = [] async for event in workflow.run("coordinate task", stream=True): if event.type == "output": data = event.data if isinstance(data, list): - outputs.append(cast(list[ChatMessage], data)) + outputs.append(cast(list[Message], data)) assert len(outputs) == 1 assert len(outputs[0]) >= 1 @@ -204,17 +205,18 @@ async def test_group_chat_as_agent_accepts_conversation() -> None: alpha = StubAgent("alpha", "ack from alpha") beta = StubAgent("beta", "ack from beta") - workflow = GroupChatBuilder( - participants=[alpha, beta], - max_rounds=2, # Limit rounds to prevent infinite loop - selection_func=selector, - orchestrator_name="manager", - ).build() + workflow = ( + GroupChatBuilder() + .with_orchestrator(selection_func=selector, orchestrator_name="manager") + .participants([alpha, beta]) + .with_max_rounds(2) # Limit rounds to prevent infinite loop + .build() + ) agent = workflow.as_agent(name="group-chat-agent") conversation = [ - ChatMessage(role="user", text="kickoff", author_name="user"), - ChatMessage(role="assistant", text="noted", author_name="alpha"), + Message(role="user", text="kickoff", author_name="user"), + Message(role="assistant", text="noted", author_name="alpha"), ] response = await agent.run(conversation) @@ -231,11 +233,10 @@ def test_build_without_manager_raises_error(self) -> None: """Test that building without a manager raises ValueError.""" agent = StubAgent("test", "response") - builder = GroupChatBuilder(participants=[agent]) + builder = GroupChatBuilder().participants([agent]) with pytest.raises( - ValueError, - match=r"No orchestrator has been configured\.", + ValueError, match=r"No orchestrator has been configured\. Call with_orchestrator\(\) to set one\." ): builder.build() @@ -251,24 +252,51 @@ def test_duplicate_manager_configuration_raises_error(self) -> None: def selector(state: GroupChatState) -> str: return "agent" + builder = GroupChatBuilder().with_orchestrator(selection_func=selector) + + with pytest.raises( + ValueError, + match=r"No participants provided\. Call \.participants\(\) or \.register_participants\(\) first\.", + ): + builder.build() + + def test_duplicate_manager_configuration_raises_error(self) -> None: + """Test that configuring multiple managers raises ValueError.""" + + def selector(state: GroupChatState) -> str: + return "agent" + + builder = GroupChatBuilder().with_orchestrator(selection_func=selector) + with pytest.raises( ValueError, - match=r"Exactly one of", + match=r"A selection function has already been configured\. Call with_orchestrator\(\.\.\.\) once only\.", ): - GroupChatBuilder(participants=[agent], selection_func=selector, orchestrator_agent=StubManagerAgent()) + builder.with_orchestrator(selection_func=selector) def test_empty_participants_raises_error(self) -> None: """Test that empty participants list raises ValueError.""" + + def selector(state: GroupChatState) -> str: + return "agent" + + builder = GroupChatBuilder().with_orchestrator(selection_func=selector) + with pytest.raises(ValueError, match="participants cannot be empty"): - GroupChatBuilder(participants=[]) + builder.participants([]) def test_duplicate_participant_names_raises_error(self) -> None: """Test that duplicate participant names raise ValueError.""" agent1 = StubAgent("test", "response1") agent2 = StubAgent("test", "response2") + def selector(state: GroupChatState) -> str: + return "agent" + + builder = GroupChatBuilder().with_orchestrator(selection_func=selector) + with pytest.raises(ValueError, match="Duplicate participant name 'test'"): - GroupChatBuilder(participants=[agent1, agent2]) + builder.participants([agent1, agent2]) def test_agent_without_name_raises_error(self) -> None: """Test that agent without name attribute raises ValueError.""" @@ -293,15 +321,25 @@ async def _run_impl(self) -> AgentResponse: agent = AgentWithoutName() + def selector(state: GroupChatState) -> str: + return "agent" + + builder = GroupChatBuilder().with_orchestrator(selection_func=selector) + with pytest.raises(ValueError, match="SupportsAgentRun participants must have a non-empty name"): - GroupChatBuilder(participants=[agent]) + builder.participants([agent]) def test_empty_participant_name_raises_error(self) -> None: """Test that empty participant name raises ValueError.""" agent = StubAgent("", "response") # Agent with empty name + def selector(state: GroupChatState) -> str: + return "agent" + + builder = GroupChatBuilder().with_orchestrator(selection_func=selector) + with pytest.raises(ValueError, match="SupportsAgentRun participants must have a non-empty name"): - GroupChatBuilder(participants=[agent]) + builder.participants([agent]) class TestGroupChatWorkflow: @@ -318,18 +356,20 @@ def selector(state: GroupChatState) -> str: agent = StubAgent("agent", "response") - workflow = GroupChatBuilder( - participants=[agent], - max_rounds=2, # Limit to 2 rounds - selection_func=selector, - ).build() + workflow = ( + GroupChatBuilder() + .with_orchestrator(selection_func=selector) + .participants([agent]) + .with_max_rounds(2) # Limit to 2 rounds + .build() + ) - outputs: list[list[ChatMessage]] = [] + outputs: list[list[Message]] = [] async for event in workflow.run("test task", stream=True): if event.type == "output": data = event.data if isinstance(data, list): - outputs.append(cast(list[ChatMessage], data)) + outputs.append(cast(list[Message], data)) # Should have terminated due to max_rounds, expect at least one output assert len(outputs) >= 1 @@ -345,24 +385,26 @@ async def test_termination_condition_halts_conversation(self) -> None: def selector(state: GroupChatState) -> str: return "agent" - def termination_condition(conversation: list[ChatMessage]) -> bool: + def termination_condition(conversation: list[Message]) -> bool: replies = [msg for msg in conversation if msg.role == "assistant" and msg.author_name == "agent"] return len(replies) >= 2 agent = StubAgent("agent", "response") - workflow = GroupChatBuilder( - participants=[agent], - termination_condition=termination_condition, - selection_func=selector, - ).build() + workflow = ( + GroupChatBuilder() + .with_orchestrator(selection_func=selector) + .participants([agent]) + .with_termination_condition(termination_condition) + .build() + ) - outputs: list[list[ChatMessage]] = [] + outputs: list[list[Message]] = [] async for event in workflow.run("test task", stream=True): if event.type == "output": data = event.data if isinstance(data, list): - outputs.append(cast(list[ChatMessage], data)) + outputs.append(cast(list[Message], data)) assert outputs, "Expected termination to yield output" conversation = outputs[-1] @@ -377,18 +419,20 @@ async def test_termination_condition_agent_manager_finalizes(self) -> None: manager = StubManagerAgent() worker = StubAgent("agent", "response") - workflow = GroupChatBuilder( - participants=[worker], - termination_condition=lambda conv: any(msg.author_name == "agent" for msg in conv), - orchestrator_agent=manager, - ).build() + workflow = ( + GroupChatBuilder() + .with_orchestrator(agent=manager) + .participants([worker]) + .with_termination_condition(lambda conv: any(msg.author_name == "agent" for msg in conv)) + .build() + ) - outputs: list[list[ChatMessage]] = [] + outputs: list[list[Message]] = [] async for event in workflow.run("test task", stream=True): if event.type == "output": data = event.data if isinstance(data, list): - outputs.append(cast(list[ChatMessage], data)) + outputs.append(cast(list[Message], data)) assert outputs, "Expected termination to yield output" conversation = outputs[-1] @@ -403,7 +447,7 @@ def selector(state: GroupChatState) -> str: agent = StubAgent("agent", "response") - workflow = GroupChatBuilder(participants=[agent], selection_func=selector).build() + workflow = GroupChatBuilder().with_orchestrator(selection_func=selector).participants([agent]).build() with pytest.raises(RuntimeError, match="Selection function returned unknown participant 'unknown_agent'"): async for _ in workflow.run("test task", stream=True): @@ -422,19 +466,21 @@ def selector(state: GroupChatState) -> str: agent = StubAgent("agent", "response") storage = InMemoryCheckpointStorage() - workflow = GroupChatBuilder( - participants=[agent], - max_rounds=1, - checkpoint_storage=storage, - selection_func=selector, - ).build() + workflow = ( + GroupChatBuilder() + .with_orchestrator(selection_func=selector) + .participants([agent]) + .with_max_rounds(1) + .with_checkpointing(storage) + .build() + ) - outputs: list[list[ChatMessage]] = [] + outputs: list[list[Message]] = [] async for event in workflow.run("test task", stream=True): if event.type == "output": data = event.data if isinstance(data, list): - outputs.append(cast(list[ChatMessage], data)) + outputs.append(cast(list[Message], data)) assert len(outputs) == 1 # Should complete normally @@ -450,14 +496,20 @@ def selector(state: GroupChatState) -> str: agent = StubAgent("agent", "response") - workflow = GroupChatBuilder(participants=[agent], max_rounds=1, selection_func=selector).build() + workflow = ( + GroupChatBuilder() + .with_orchestrator(selection_func=selector) + .participants([agent]) + .with_max_rounds(1) + .build() + ) - with pytest.raises(ValueError, match="At least one ChatMessage is required to start the group chat workflow."): + with pytest.raises(ValueError, match="At least one Message is required to start the group chat workflow."): async for _ in workflow.run([], stream=True): pass async def test_handle_string_input(self) -> None: - """Test handling string input creates proper ChatMessage.""" + """Test handling string input creates proper Message.""" def selector(state: GroupChatState) -> str: # Verify the conversation has the user message @@ -468,20 +520,26 @@ def selector(state: GroupChatState) -> str: agent = StubAgent("agent", "response") - workflow = GroupChatBuilder(participants=[agent], max_rounds=1, selection_func=selector).build() + workflow = ( + GroupChatBuilder() + .with_orchestrator(selection_func=selector) + .participants([agent]) + .with_max_rounds(1) + .build() + ) - outputs: list[list[ChatMessage]] = [] + outputs: list[list[Message]] = [] async for event in workflow.run("test string", stream=True): if event.type == "output": data = event.data if isinstance(data, list): - outputs.append(cast(list[ChatMessage], data)) + outputs.append(cast(list[Message], data)) assert len(outputs) == 1 async def test_handle_chat_message_input(self) -> None: - """Test handling ChatMessage input directly.""" - task_message = ChatMessage(role="user", text="test message") + """Test handling Message input directly.""" + task_message = Message(role="user", text="test message") def selector(state: GroupChatState) -> str: # Verify the task message was preserved in conversation @@ -491,22 +549,28 @@ def selector(state: GroupChatState) -> str: agent = StubAgent("agent", "response") - workflow = GroupChatBuilder(participants=[agent], max_rounds=1, selection_func=selector).build() + workflow = ( + GroupChatBuilder() + .with_orchestrator(selection_func=selector) + .participants([agent]) + .with_max_rounds(1) + .build() + ) - outputs: list[list[ChatMessage]] = [] + outputs: list[list[Message]] = [] async for event in workflow.run(task_message, stream=True): if event.type == "output": data = event.data if isinstance(data, list): - outputs.append(cast(list[ChatMessage], data)) + outputs.append(cast(list[Message], data)) assert len(outputs) == 1 async def test_handle_conversation_list_input(self) -> None: """Test handling conversation list preserves context.""" conversation = [ - ChatMessage(role="system", text="system message"), - ChatMessage(role="user", text="user message"), + Message(role="system", text="system message"), + Message(role="user", text="user message"), ] def selector(state: GroupChatState) -> str: @@ -517,14 +581,20 @@ def selector(state: GroupChatState) -> str: agent = StubAgent("agent", "response") - workflow = GroupChatBuilder(participants=[agent], max_rounds=1, selection_func=selector).build() + workflow = ( + GroupChatBuilder() + .with_orchestrator(selection_func=selector) + .participants([agent]) + .with_max_rounds(1) + .build() + ) - outputs: list[list[ChatMessage]] = [] + outputs: list[list[Message]] = [] async for event in workflow.run(conversation, stream=True): if event.type == "output": data = event.data if isinstance(data, list): - outputs.append(cast(list[ChatMessage], data)) + outputs.append(cast(list[Message], data)) assert len(outputs) == 1 @@ -543,18 +613,20 @@ def selector(state: GroupChatState) -> str: agent = StubAgent("agent", "response") - workflow = GroupChatBuilder( - participants=[agent], - max_rounds=1, # Very low limit - selection_func=selector, - ).build() + workflow = ( + GroupChatBuilder() + .with_orchestrator(selection_func=selector) + .participants([agent]) + .with_max_rounds(1) # Very low limit + .build() + ) - outputs: list[list[ChatMessage]] = [] + outputs: list[list[Message]] = [] async for event in workflow.run("test", stream=True): if event.type == "output": data = event.data if isinstance(data, list): - outputs.append(cast(list[ChatMessage], data)) + outputs.append(cast(list[Message], data)) # Should have at least one output (the round limit message) assert len(outputs) >= 1 @@ -576,18 +648,20 @@ def selector(state: GroupChatState) -> str: agent = StubAgent("agent", "response from agent") - workflow = GroupChatBuilder( - participants=[agent], - max_rounds=1, # Hit limit after first response - selection_func=selector, - ).build() + workflow = ( + GroupChatBuilder() + .with_orchestrator(selection_func=selector) + .participants([agent]) + .with_max_rounds(1) # Hit limit after first response + .build() + ) - outputs: list[list[ChatMessage]] = [] + outputs: list[list[Message]] = [] async for event in workflow.run("test", stream=True): if event.type == "output": data = event.data if isinstance(data, list): - outputs.append(cast(list[ChatMessage], data)) + outputs.append(cast(list[Message], data)) # Should have at least one output (the round limit message) assert len(outputs) >= 1 @@ -606,12 +680,18 @@ async def test_group_chat_checkpoint_runtime_only() -> None: agent_b = StubAgent("agentB", "Reply from B") selector = make_sequence_selector() - wf = GroupChatBuilder(participants=[agent_a, agent_b], max_rounds=2, selection_func=selector).build() + wf = ( + GroupChatBuilder() + .participants([agent_a, agent_b]) + .with_orchestrator(selection_func=selector) + .with_max_rounds(2) + .build() + ) - baseline_output: list[ChatMessage] | None = None + baseline_output: list[Message] | None = None async for ev in wf.run("runtime checkpoint test", checkpoint_storage=storage, stream=True): if ev.type == "output": - baseline_output = cast(list[ChatMessage], ev.data) if isinstance(ev.data, list) else None # type: ignore + baseline_output = cast(list[Message], ev.data) if isinstance(ev.data, list) else None # type: ignore if ev.type == "status" and ev.state in ( WorkflowRunState.IDLE, WorkflowRunState.IDLE_WITH_PENDING_REQUESTS, @@ -638,16 +718,18 @@ async def test_group_chat_checkpoint_runtime_overrides_buildtime() -> None: agent_b = StubAgent("agentB", "Reply from B") selector = make_sequence_selector() - wf = GroupChatBuilder( - participants=[agent_a, agent_b], - max_rounds=2, - checkpoint_storage=buildtime_storage, - selection_func=selector, - ).build() - baseline_output: list[ChatMessage] | None = None + wf = ( + GroupChatBuilder() + .participants([agent_a, agent_b]) + .with_orchestrator(selection_func=selector) + .with_max_rounds(2) + .with_checkpointing(buildtime_storage) + .build() + ) + baseline_output: list[Message] | None = None async for ev in wf.run("override test", checkpoint_storage=runtime_storage, stream=True): if ev.type == "output": - baseline_output = cast(list[ChatMessage], ev.data) if isinstance(ev.data, list) else None # type: ignore + baseline_output = cast(list[Message], ev.data) if isinstance(ev.data, list) else None # type: ignore if ev.type == "status" and ev.state in ( WorkflowRunState.IDLE, WorkflowRunState.IDLE_WITH_PENDING_REQUESTS, @@ -683,12 +765,10 @@ async def selector(state: GroupChatState) -> str: return "alpha" workflow = ( - GroupChatBuilder( - participants=[alpha, beta], - max_rounds=2, - selection_func=selector, - orchestrator_name="manager", - ) + GroupChatBuilder() + .with_orchestrator(selection_func=selector, orchestrator_name="manager") + .participants([alpha, beta]) + .with_max_rounds(2) .with_request_info(agents=["beta"]) # Only pause before beta runs .build() ) @@ -737,12 +817,10 @@ async def selector(state: GroupChatState) -> str: return "alpha" workflow = ( - GroupChatBuilder( - participants=[alpha], - max_rounds=1, - selection_func=selector, - orchestrator_name="manager", - ) + GroupChatBuilder() + .with_orchestrator(selection_func=selector, orchestrator_name="manager") + .participants([alpha]) + .with_max_rounds(1) .with_request_info() # No filter - pause for all .build() ) @@ -761,13 +839,12 @@ async def selector(state: GroupChatState) -> str: def test_group_chat_builder_with_request_info_returns_self(): """Test that with_request_info() returns self for method chaining.""" - agent = StubAgent("test", "response") - builder = GroupChatBuilder(participants=[agent]) + builder = GroupChatBuilder() result = builder.with_request_info() assert result is builder # Also test with agents parameter - builder2 = GroupChatBuilder(participants=[agent]) + builder2 = GroupChatBuilder() result2 = builder2.with_request_info(agents=["test"]) assert result2 is builder2 @@ -781,18 +858,19 @@ def test_group_chat_builder_rejects_multiple_orchestrator_configurations(): def selector(state: GroupChatState) -> str: return list(state.participants.keys())[0] - def agent_factory() -> ChatAgent: - return cast(ChatAgent, StubManagerAgent()) + def agent_factory() -> Agent: + return cast(Agent, StubManagerAgent()) - agent = StubAgent("test", "response") + builder = GroupChatBuilder().with_orchestrator(selection_func=selector) - # Both selection_func and orchestrator_agent provided simultaneously - should fail - with pytest.raises(ValueError, match=r"Exactly one of"): - GroupChatBuilder(participants=[agent], selection_func=selector, orchestrator_agent=StubManagerAgent()) + # Already has a selection_func, should fail on second call + with pytest.raises(ValueError, match=r"A selection function has already been configured"): + builder.with_orchestrator(selection_func=selector) - # Test with agent_factory - already has factory, should fail with second config - with pytest.raises(ValueError, match=r"Exactly one of"): - GroupChatBuilder(participants=[agent], orchestrator_agent=agent_factory, selection_func=selector) + # Test with agent_factory + builder2 = GroupChatBuilder().with_orchestrator(agent=agent_factory) + with pytest.raises(ValueError, match=r"A factory has already been configured"): + builder2.with_orchestrator(agent=agent_factory) def test_group_chat_builder_requires_exactly_one_orchestrator_option(): @@ -801,25 +879,23 @@ def test_group_chat_builder_requires_exactly_one_orchestrator_option(): def selector(state: GroupChatState) -> str: return list(state.participants.keys())[0] - def agent_factory() -> ChatAgent: - return cast(ChatAgent, StubManagerAgent()) + def agent_factory() -> Agent: + return cast(Agent, StubManagerAgent()) - agent = StubAgent("test", "response") - - # No orchestrator options provided - only fails at build() time - with pytest.raises(ValueError, match="No orchestrator has been configured"): - GroupChatBuilder(participants=[agent]).build() + # No options provided + with pytest.raises(ValueError, match="Exactly one of"): + GroupChatBuilder().with_orchestrator() # type: ignore # Multiple options provided with pytest.raises(ValueError, match="Exactly one of"): - GroupChatBuilder(participants=[agent], selection_func=selector, orchestrator_agent=agent_factory) + GroupChatBuilder().with_orchestrator(selection_func=selector, agent=agent_factory) # type: ignore async def test_group_chat_with_orchestrator_factory_returning_chat_agent(): - """Test workflow creation using orchestrator_factory that returns ChatAgent.""" + """Test workflow creation using orchestrator_factory that returns Agent.""" factory_call_count = 0 - class DynamicManagerAgent(ChatAgent): + class DynamicManagerAgent(Agent): """Manager agent that dynamically selects from available participants.""" def __init__(self) -> None: @@ -828,7 +904,7 @@ def __init__(self) -> None: async def run( self, - messages: str | ChatMessage | Sequence[str | ChatMessage] | None = None, + messages: str | Message | Sequence[str | Message] | None = None, *, thread: AgentThread | None = None, **kwargs: Any, @@ -843,7 +919,7 @@ async def run( } return AgentResponse( messages=[ - ChatMessage( + Message( role="assistant", text=( '{"terminate": false, "reason": "Selecting alpha", ' @@ -863,7 +939,7 @@ async def run( } return AgentResponse( messages=[ - ChatMessage( + Message( role="assistant", text=( '{"terminate": true, "reason": "Task complete", ' @@ -875,15 +951,15 @@ async def run( value=payload, ) - def agent_factory() -> ChatAgent: + def agent_factory() -> Agent: nonlocal factory_call_count factory_call_count += 1 - return cast(ChatAgent, DynamicManagerAgent()) + return cast(Agent, DynamicManagerAgent()) alpha = StubAgent("alpha", "reply from alpha") beta = StubAgent("beta", "reply from beta") - workflow = GroupChatBuilder(participants=[alpha, beta], orchestrator_agent=agent_factory).build() + workflow = GroupChatBuilder().participants([alpha, beta]).with_orchestrator(agent=agent_factory).build() # Factory should be called during build assert factory_call_count == 1 @@ -899,7 +975,7 @@ def agent_factory() -> ChatAgent: assert isinstance(final_messages, list) assert any( msg.text == "dynamic manager final" - for msg in cast(list[ChatMessage], final_messages) + for msg in cast(list[Message], final_messages) if msg.author_name == "dynamic_manager" ) @@ -927,7 +1003,7 @@ def orchestrator_factory() -> BaseGroupChatOrchestrator: alpha = StubAgent("alpha", "reply from alpha") - workflow = GroupChatBuilder(participants=[alpha], orchestrator=orchestrator_factory).build() + workflow = GroupChatBuilder().participants([alpha]).with_orchestrator(orchestrator=orchestrator_factory).build() # Factory should be called during build assert factory_call_count == 1 @@ -939,15 +1015,15 @@ async def test_group_chat_orchestrator_factory_reusable_builder(): """Test that the builder can be reused to build multiple workflows with orchestrator factory.""" factory_call_count = 0 - def agent_factory() -> ChatAgent: + def agent_factory() -> Agent: nonlocal factory_call_count factory_call_count += 1 - return cast(ChatAgent, StubManagerAgent()) + return cast(Agent, StubManagerAgent()) alpha = StubAgent("alpha", "reply from alpha") beta = StubAgent("beta", "reply from beta") - builder = GroupChatBuilder(participants=[alpha, beta], orchestrator_agent=agent_factory) + builder = GroupChatBuilder().participants([alpha, beta]).with_orchestrator(agent=agent_factory) # Build first workflow wf1 = builder.build() @@ -971,15 +1047,15 @@ def invalid_factory() -> Any: with pytest.raises( TypeError, - match=r"Orchestrator factory must return ChatAgent or BaseGroupChatOrchestrator instance", + match=r"Orchestrator factory must return Agent or BaseGroupChatOrchestrator instance", ): - GroupChatBuilder(participants=[alpha], orchestrator=invalid_factory).build() + (GroupChatBuilder().participants([alpha]).with_orchestrator(orchestrator=invalid_factory).build()) with pytest.raises( TypeError, - match=r"Orchestrator factory must return ChatAgent or BaseGroupChatOrchestrator instance", + match=r"Orchestrator factory must return Agent or BaseGroupChatOrchestrator instance", ): - GroupChatBuilder(participants=[alpha], orchestrator_agent=invalid_factory).build() + (GroupChatBuilder().participants([alpha]).with_orchestrator(agent=invalid_factory).build()) # endregion diff --git a/python/packages/orchestrations/tests/test_handoff.py b/python/packages/orchestrations/tests/test_handoff.py index efed85c9ee..2df03fff2d 100644 --- a/python/packages/orchestrations/tests/test_handoff.py +++ b/python/packages/orchestrations/tests/test_handoff.py @@ -6,13 +6,13 @@ import pytest from agent_framework import ( - ChatAgent, - ChatMessage, + Agent, ChatResponse, ChatResponseUpdate, Content, Context, ContextProvider, + Message, ResponseStream, WorkflowEvent, resolve_agent_id, @@ -50,7 +50,7 @@ def __init__( def _inner_get_response( self, *, - messages: Sequence[ChatMessage], + messages: Sequence[Message], stream: bool, options: Mapping[str, Any], **kwargs: Any, @@ -60,7 +60,7 @@ def _inner_get_response( async def _get() -> ChatResponse: contents = _build_reply_contents(self._name, self._handoff_to, self._next_call_id()) - reply = ChatMessage( + reply = Message( role="assistant", contents=contents, ) @@ -105,7 +105,7 @@ def _build_reply_contents( return contents -class MockHandoffAgent(ChatAgent): +class MockHandoffAgent(Agent): """Mock agent that can hand off to another agent.""" def __init__( @@ -196,7 +196,7 @@ async def test_autonomous_mode_yields_output_without_user_request(): final_conversation = outputs[-1].data assert isinstance(final_conversation, list) - conversation_list = cast(list[ChatMessage], final_conversation) + conversation_list = cast(list[Message], final_conversation) assert any(msg.role == "assistant" and (msg.text or "").startswith("specialist reply") for msg in conversation_list) @@ -237,7 +237,7 @@ async def test_handoff_async_termination_condition() -> None: """Test that async termination conditions work correctly.""" termination_call_count = 0 - async def async_termination(conv: list[ChatMessage]) -> bool: + async def async_termination(conv: list[Message]) -> bool: nonlocal termination_call_count termination_call_count += 1 user_count = sum(1 for msg in conv if msg.role == "user") @@ -258,7 +258,7 @@ async def async_termination(conv: list[ChatMessage]) -> bool: events = await _drain( workflow.run( - stream=True, responses={requests[-1].request_id: [ChatMessage(role="user", text="Second user message")]} + stream=True, responses={requests[-1].request_id: [Message(role="user", text="Second user message")]} ) ) outputs = [ev for ev in events if ev.type == "output"] @@ -266,7 +266,7 @@ async def async_termination(conv: list[ChatMessage]) -> bool: final_conversation = outputs[0].data assert isinstance(final_conversation, list) - final_conv_list = cast(list[ChatMessage], final_conversation) + final_conv_list = cast(list[Message], final_conversation) user_messages = [msg for msg in final_conv_list if msg.role == "user"] assert len(user_messages) == 2 assert termination_call_count > 0 @@ -281,7 +281,7 @@ async def mock_get_response(messages: Any, options: dict[str, Any] | None = None if options: recorded_tool_choices.append(options.get("tool_choice")) return ChatResponse( - messages=[ChatMessage(role="assistant", text="Response")], + messages=[Message(role="assistant", text="Response")], response_id="test_response", ) @@ -289,7 +289,7 @@ async def mock_get_response(messages: Any, options: dict[str, Any] | None = None mock_client.get_response = AsyncMock(side_effect=mock_get_response) # Create agent with specific tool_choice configuration via default_options - agent = ChatAgent( + agent = Agent( chat_client=mock_client, name="test_agent", default_options={"tool_choice": {"mode": "required"}}, # type: ignore @@ -313,7 +313,7 @@ async def test_context_provider_preserved_during_handoff(): class TestContextProvider(ContextProvider): """A test context provider that tracks its invocations.""" - async def invoking(self, messages: Sequence[ChatMessage], **kwargs: Any) -> Context: + async def invoking(self, messages: Sequence[Message], **kwargs: Any) -> Context: provider_calls.append("invoking") return Context(instructions="Test context from provider.") @@ -324,7 +324,7 @@ async def invoking(self, messages: Sequence[ChatMessage], **kwargs: Any) -> Cont mock_client = MockChatClient(name="test_agent") # Create agent with context provider using proper constructor - agent = ChatAgent( + agent = Agent( chat_client=mock_client, name="test_agent", id="test_agent", diff --git a/python/packages/orchestrations/tests/test_magentic.py b/python/packages/orchestrations/tests/test_magentic.py index c3f7be6d4a..62305702d9 100644 --- a/python/packages/orchestrations/tests/test_magentic.py +++ b/python/packages/orchestrations/tests/test_magentic.py @@ -11,9 +11,9 @@ AgentResponseUpdate, AgentThread, BaseAgent, - ChatMessage, Content, Executor, + Message, SupportsAgentRun, Workflow, WorkflowCheckpoint, @@ -48,7 +48,7 @@ def test_magentic_context_reset_behavior(): participant_descriptions={"Alice": "Researcher"}, ) # seed context state - ctx.chat_history.append(ChatMessage("assistant", ["draft"])) + ctx.chat_history.append(Message("assistant", ["draft"])) ctx.stall_count = 2 prev_reset = ctx.reset_count @@ -61,8 +61,8 @@ def test_magentic_context_reset_behavior(): @dataclass class _SimpleLedger: - facts: ChatMessage - plan: ChatMessage + facts: Message + plan: Message class FakeManager(MagenticManagerBase): @@ -108,25 +108,25 @@ def on_checkpoint_restore(self, state: dict[str, Any]) -> None: plan_payload = cast(dict[str, Any] | None, ledger_dict.get("plan")) if facts_payload is not None and plan_payload is not None: try: - facts = ChatMessage.from_dict(facts_payload) - plan = ChatMessage.from_dict(plan_payload) + facts = Message.from_dict(facts_payload) + plan = Message.from_dict(plan_payload) self.task_ledger = _SimpleLedger(facts=facts, plan=plan) except Exception: # pragma: no cover - defensive pass - async def plan(self, magentic_context: MagenticContext) -> ChatMessage: - facts = ChatMessage("assistant", ["GIVEN OR VERIFIED FACTS\n- A\n"]) - plan = ChatMessage("assistant", ["- Do X\n- Do Y\n"]) + async def plan(self, magentic_context: MagenticContext) -> Message: + facts = Message("assistant", ["GIVEN OR VERIFIED FACTS\n- A\n"]) + plan = Message("assistant", ["- Do X\n- Do Y\n"]) self.task_ledger = _SimpleLedger(facts=facts, plan=plan) combined = f"Task: {magentic_context.task}\n\nFacts:\n{facts.text}\n\nPlan:\n{plan.text}" - return ChatMessage("assistant", [combined], author_name=self.name) + return Message("assistant", [combined], author_name=self.name) - async def replan(self, magentic_context: MagenticContext) -> ChatMessage: - facts = ChatMessage("assistant", ["GIVEN OR VERIFIED FACTS\n- A2\n"]) - plan = ChatMessage("assistant", ["- Do Z\n"]) + async def replan(self, magentic_context: MagenticContext) -> Message: + facts = Message("assistant", ["GIVEN OR VERIFIED FACTS\n- A2\n"]) + plan = Message("assistant", ["- Do Z\n"]) self.task_ledger = _SimpleLedger(facts=facts, plan=plan) combined = f"Task: {magentic_context.task}\n\nFacts:\n{facts.text}\n\nPlan:\n{plan.text}" - return ChatMessage("assistant", [combined], author_name=self.name) + return Message("assistant", [combined], author_name=self.name) async def create_progress_ledger(self, magentic_context: MagenticContext) -> MagenticProgressLedger: # At least two messages in chat history means request is satisfied for testing @@ -139,8 +139,8 @@ async def create_progress_ledger(self, magentic_context: MagenticContext) -> Mag instruction_or_question=MagenticProgressLedgerItem(reason="test", answer=self.instruction_text), ) - async def prepare_final_answer(self, magentic_context: MagenticContext) -> ChatMessage: - return ChatMessage("assistant", [self.FINAL_ANSWER], author_name=self.name) + async def prepare_final_answer(self, magentic_context: MagenticContext) -> Message: + return Message("assistant", [self.FINAL_ANSWER], author_name=self.name) class StubAgent(BaseAgent): @@ -150,7 +150,7 @@ def __init__(self, agent_name: str, reply_text: str, **kwargs: Any) -> None: def run( # type: ignore[override] self, - messages: str | ChatMessage | Sequence[str | ChatMessage] | None = None, + messages: str | Message | Sequence[str | Message] | None = None, *, stream: bool = False, thread: AgentThread | None = None, @@ -160,7 +160,7 @@ def run( # type: ignore[override] return self._run_stream() async def _run() -> AgentResponse: - response = ChatMessage("assistant", [self._reply_text], author_name=self.name) + response = Message("assistant", [self._reply_text], author_name=self.name) return AgentResponse(messages=[response]) return _run() @@ -177,7 +177,7 @@ def __init__(self, name: str) -> None: @handler async def _noop( - self, message: GroupChatRequestMessage, ctx: WorkflowContext[ChatMessage] + self, message: GroupChatRequestMessage, ctx: WorkflowContext[Message] ) -> None: # pragma: no cover - not called pass @@ -190,13 +190,13 @@ async def test_magentic_builder_returns_workflow_and_runs() -> None: assert isinstance(workflow, Workflow) - outputs: list[ChatMessage] = [] + outputs: list[Message] = [] orchestrator_event_count = 0 async for event in workflow.run("compose summary", stream=True): if event.type == "output": msg = event.data if isinstance(msg, list): - outputs.extend(cast(list[ChatMessage], msg)) + outputs.extend(cast(list[Message], msg)) elif event.type == "magentic_orchestrator": orchestrator_event_count += 1 @@ -216,8 +216,8 @@ async def test_magentic_as_agent_does_not_accept_conversation() -> None: agent = workflow.as_agent(name="magentic-agent") conversation = [ - ChatMessage("system", ["Guidelines"], author_name="system"), - ChatMessage("user", ["Summarize the findings"], author_name="requester"), + Message("system", ["Guidelines"], author_name="system"), + Message("user", ["Summarize the findings"], author_name="requester"), ] with pytest.raises(ValueError, match="Magentic only support a single task message to start the workflow."): await agent.run(conversation) @@ -250,7 +250,7 @@ async def test_magentic_workflow_plan_review_approval_to_completion(): assert isinstance(req_event.data, MagenticPlanReviewRequest) completed = False - output: list[ChatMessage] | None = None + output: list[Message] | None = None async for ev in wf.run(stream=True, responses={req_event.request_id: req_event.data.approve()}): if ev.type == "status" and ev.state == WorkflowRunState.IDLE: completed = True @@ -262,7 +262,7 @@ async def test_magentic_workflow_plan_review_approval_to_completion(): assert completed assert output is not None assert isinstance(output, list) - assert all(isinstance(msg, ChatMessage) for msg in output) + assert all(isinstance(msg, Message) for msg in output) async def test_magentic_plan_review_with_revise(): @@ -273,7 +273,7 @@ class CountingManager(FakeManager): def __init__(self, *args, **kwargs) -> None: # type: ignore[no-untyped-def] super().__init__(*args, **kwargs) - async def replan(self, magentic_context: MagenticContext) -> ChatMessage: # type: ignore[override] + async def replan(self, magentic_context: MagenticContext) -> Message: # type: ignore[override] self.replan_count += 1 return await super().replan(magentic_context) @@ -340,7 +340,7 @@ async def test_magentic_orchestrator_round_limit_produces_partial_result(): assert isinstance(data, list) assert len(data) > 0 # type: ignore assert data[-1].role == "assistant" # type: ignore - assert all(isinstance(msg, ChatMessage) for msg in data) # type: ignore + assert all(isinstance(msg, Message) for msg in data) # type: ignore async def test_magentic_checkpoint_resume_round_trip(): @@ -406,7 +406,7 @@ class StubManagerAgent(BaseAgent): def run( self, - messages: str | ChatMessage | Sequence[str | ChatMessage] | None = None, + messages: str | Message | Sequence[str | Message] | None = None, *, stream: bool = False, thread: Any = None, @@ -416,22 +416,22 @@ def run( return self._run_stream() async def _run() -> AgentResponse: - return AgentResponse(messages=[ChatMessage("assistant", ["ok"])]) + return AgentResponse(messages=[Message("assistant", ["ok"])]) return _run() async def _run_stream(self) -> AsyncIterable[AgentResponseUpdate]: - yield AgentResponseUpdate(message_deltas=[ChatMessage("assistant", ["ok"])]) + yield AgentResponseUpdate(message_deltas=[Message("assistant", ["ok"])]) async def test_standard_manager_plan_and_replan_via_complete_monkeypatch(): mgr = StandardMagenticManager(StubManagerAgent()) - async def fake_complete_plan(messages: list[ChatMessage], **kwargs: Any) -> ChatMessage: + async def fake_complete_plan(messages: list[Message], **kwargs: Any) -> Message: # Return a different response depending on call order length if any("FACTS" in (m.text or "") for m in messages): - return ChatMessage("assistant", ["- step A\n- step B"]) - return ChatMessage("assistant", ["GIVEN OR VERIFIED FACTS\n- fact1"]) + return Message("assistant", ["- step A\n- step B"]) + return Message("assistant", ["GIVEN OR VERIFIED FACTS\n- fact1"]) # First, patch to produce facts then plan mgr._complete = fake_complete_plan # type: ignore[attr-defined] @@ -444,10 +444,10 @@ async def fake_complete_plan(messages: list[ChatMessage], **kwargs: Any) -> Chat assert any(t in combined.text for t in ("- step A", "- step B", "- step")) # Now replan with new outputs - async def fake_complete_replan(messages: list[ChatMessage], **kwargs: Any) -> ChatMessage: + async def fake_complete_replan(messages: list[Message], **kwargs: Any) -> Message: if any("Please briefly explain" in (m.text or "") for m in messages): - return ChatMessage("assistant", ["- new step"]) - return ChatMessage("assistant", ["GIVEN OR VERIFIED FACTS\n- updated"]) + return Message("assistant", ["- new step"]) + return Message("assistant", ["GIVEN OR VERIFIED FACTS\n- updated"]) mgr._complete = fake_complete_replan # type: ignore[attr-defined] combined2 = await mgr.replan(ctx.clone()) @@ -459,7 +459,7 @@ async def test_standard_manager_progress_ledger_success_and_error(): ctx = MagenticContext(task="task", participant_descriptions={"alice": "desc"}) # Success path: valid JSON - async def fake_complete_ok(messages: list[ChatMessage], **kwargs: Any) -> ChatMessage: + async def fake_complete_ok(messages: list[Message], **kwargs: Any) -> Message: json_text = ( '{"is_request_satisfied": {"reason": "r", "answer": false}, ' '"is_in_loop": {"reason": "r", "answer": false}, ' @@ -467,15 +467,15 @@ async def fake_complete_ok(messages: list[ChatMessage], **kwargs: Any) -> ChatMe '"next_speaker": {"reason": "r", "answer": "alice"}, ' '"instruction_or_question": {"reason": "r", "answer": "do"}}' ) - return ChatMessage("assistant", [json_text]) + return Message("assistant", [json_text]) mgr._complete = fake_complete_ok # type: ignore[attr-defined] ledger = await mgr.create_progress_ledger(ctx.clone()) assert ledger.next_speaker.answer == "alice" # Error path: invalid JSON now raises to avoid emitting planner-oriented instructions to agents - async def fake_complete_bad(messages: list[ChatMessage], **kwargs: Any) -> ChatMessage: - return ChatMessage("assistant", ["not-json"]) + async def fake_complete_bad(messages: list[Message], **kwargs: Any) -> Message: + return Message("assistant", ["not-json"]) mgr._complete = fake_complete_bad # type: ignore[attr-defined] with pytest.raises(RuntimeError): @@ -487,11 +487,11 @@ def __init__(self) -> None: super().__init__(max_round_count=5, max_stall_count=3, max_reset_count=2) self._invoked = False - async def plan(self, magentic_context: MagenticContext) -> ChatMessage: - return ChatMessage("assistant", ["ledger"]) + async def plan(self, magentic_context: MagenticContext) -> Message: + return Message("assistant", ["ledger"]) - async def replan(self, magentic_context: MagenticContext) -> ChatMessage: - return ChatMessage("assistant", ["re-ledger"]) + async def replan(self, magentic_context: MagenticContext) -> Message: + return Message("assistant", ["re-ledger"]) async def create_progress_ledger(self, magentic_context: MagenticContext) -> MagenticProgressLedger: if not self._invoked: @@ -513,8 +513,8 @@ async def create_progress_ledger(self, magentic_context: MagenticContext) -> Mag instruction_or_question=MagenticProgressLedgerItem(reason="r", answer="done"), ) - async def prepare_final_answer(self, magentic_context: MagenticContext) -> ChatMessage: - return ChatMessage("assistant", ["final"]) + async def prepare_final_answer(self, magentic_context: MagenticContext) -> Message: + return Message("assistant", ["final"]) class StubThreadAgent(BaseAgent): @@ -526,7 +526,7 @@ def run(self, messages=None, *, stream: bool = False, thread=None, **kwargs): # return self._run_stream() async def _run(): - return AgentResponse(messages=[ChatMessage("assistant", ["thread-ok"], author_name=self.name)]) + return AgentResponse(messages=[Message("assistant", ["thread-ok"], author_name=self.name)]) return _run() @@ -554,7 +554,7 @@ def run(self, messages=None, *, stream: bool = False, thread=None, **kwargs): # return self._run_stream() async def _run(): - return AgentResponse(messages=[ChatMessage("assistant", ["assistants-ok"], author_name=self.name)]) + return AgentResponse(messages=[Message("assistant", ["assistants-ok"], author_name=self.name)]) return _run() @@ -566,8 +566,8 @@ async def _run_stream(self): ) -async def _collect_agent_responses_setup(participant: SupportsAgentRun) -> list[ChatMessage]: - captured: list[ChatMessage] = [] +async def _collect_agent_responses_setup(participant: SupportsAgentRun) -> list[Message]: + captured: list[Message] = [] wf = MagenticBuilder(participants=[participant], intermediate_outputs=True, manager=InvokeOnceManager()).build() @@ -578,7 +578,7 @@ async def _collect_agent_responses_setup(participant: SupportsAgentRun) -> list[ # Capture streaming updates (type="output" with AgentResponseUpdate data) if ev.type == "output" and isinstance(ev.data, AgentResponseUpdate): captured.append( - ChatMessage( + Message( role=ev.data.role or "assistant", text=ev.data.text or "", author_name=ev.data.author_name, @@ -711,11 +711,11 @@ class NotProgressingManager(MagenticManagerBase): A manager that never marks progress being made, to test stall/reset limits. """ - async def plan(self, magentic_context: MagenticContext) -> ChatMessage: - return ChatMessage("assistant", ["ledger"]) + async def plan(self, magentic_context: MagenticContext) -> Message: + return Message("assistant", ["ledger"]) - async def replan(self, magentic_context: MagenticContext) -> ChatMessage: - return ChatMessage("assistant", ["re-ledger"]) + async def replan(self, magentic_context: MagenticContext) -> Message: + return Message("assistant", ["re-ledger"]) async def create_progress_ledger(self, magentic_context: MagenticContext) -> MagenticProgressLedger: return MagenticProgressLedger( @@ -726,8 +726,8 @@ async def create_progress_ledger(self, magentic_context: MagenticContext) -> Mag instruction_or_question=MagenticProgressLedgerItem(reason="r", answer="done"), ) - async def prepare_final_answer(self, magentic_context: MagenticContext) -> ChatMessage: - return ChatMessage("assistant", ["final"]) + async def prepare_final_answer(self, magentic_context: MagenticContext) -> Message: + return Message("assistant", ["final"]) async def test_magentic_stall_and_reset_reach_limits(): @@ -747,7 +747,7 @@ async def test_magentic_stall_and_reset_reach_limits(): output_event = next((e for e in events if e.type == "output"), None) assert output_event is not None assert isinstance(output_event.data, list) - assert all(isinstance(msg, ChatMessage) for msg in output_event.data) # type: ignore + assert all(isinstance(msg, Message) for msg in output_event.data) # type: ignore assert len(output_event.data) > 0 # type: ignore assert output_event.data[-1].text is not None # type: ignore assert output_event.data[-1].text == "Workflow terminated due to reaching maximum reset count." # type: ignore @@ -760,7 +760,7 @@ async def test_magentic_checkpoint_runtime_only() -> None: manager = FakeManager(max_round_count=10) wf = MagenticBuilder(participants=[DummyExec("agentA")], manager=manager).build() - baseline_output: ChatMessage | None = None + baseline_output: Message | None = None async for ev in wf.run("runtime checkpoint test", checkpoint_storage=storage, stream=True): if ev.type == "output": baseline_output = ev.data # type: ignore[assignment] @@ -794,7 +794,7 @@ async def test_magentic_checkpoint_runtime_overrides_buildtime() -> None: participants=[DummyExec("agentA")], checkpoint_storage=buildtime_storage, manager=manager ).build() - baseline_output: ChatMessage | None = None + baseline_output: Message | None = None async for ev in wf.run("override test", checkpoint_storage=runtime_storage, stream=True): if ev.type == "output": baseline_output = ev.data # type: ignore[assignment] @@ -821,8 +821,8 @@ async def test_magentic_context_no_duplicate_on_reset(): ctx = MagenticContext(task="task", participant_descriptions={"Alice": "Researcher"}) # Add some history - ctx.chat_history.append(ChatMessage("assistant", ["response1"])) - ctx.chat_history.append(ChatMessage("assistant", ["response2"])) + ctx.chat_history.append(Message("assistant", ["response1"])) + ctx.chat_history.append(Message("assistant", ["response2"])) assert len(ctx.chat_history) == 2 # Reset @@ -832,7 +832,7 @@ async def test_magentic_context_no_duplicate_on_reset(): assert len(ctx.chat_history) == 0, "chat_history should be empty after reset" # Add new history - ctx.chat_history.append(ChatMessage("assistant", ["new_response"])) + ctx.chat_history.append(Message("assistant", ["new_response"])) assert len(ctx.chat_history) == 1, "Should have exactly 1 message after adding to reset context" @@ -844,8 +844,8 @@ async def test_magentic_checkpoint_restore_no_duplicate_history(): wf = MagenticBuilder(participants=[DummyExec("agentA")], checkpoint_storage=storage, manager=manager).build() # Run with conversation history to create initial checkpoint - conversation: list[ChatMessage] = [ - ChatMessage("user", ["task_msg"]), + conversation: list[Message] = [ + Message("user", ["task_msg"]), ] async for event in wf.run(conversation, stream=True): @@ -1022,8 +1022,8 @@ def agent_factory() -> SupportsAgentRun: from agent_framework_orchestrations._magentic import _MagenticTaskLedger # type: ignore custom_task_ledger = _MagenticTaskLedger( - facts=ChatMessage("assistant", ["Custom facts"]), - plan=ChatMessage("assistant", ["Custom plan"]), + facts=Message("assistant", ["Custom facts"]), + plan=Message("assistant", ["Custom plan"]), ) participant = StubAgent("agentA", "reply from agentA") diff --git a/python/packages/orchestrations/tests/test_orchestration_request_info.py b/python/packages/orchestrations/tests/test_orchestration_request_info.py index 88fcdf757e..1e2b8a4af6 100644 --- a/python/packages/orchestrations/tests/test_orchestration_request_info.py +++ b/python/packages/orchestrations/tests/test_orchestration_request_info.py @@ -11,7 +11,7 @@ AgentResponse, AgentResponseUpdate, AgentThread, - ChatMessage, + Message, SupportsAgentRun, ) from agent_framework._workflows._agent_executor import AgentExecutorRequest, AgentExecutorResponse @@ -72,16 +72,16 @@ class TestAgentRequestInfoResponse: def test_create_response_with_messages(self): """Test creating an AgentRequestInfoResponse with messages.""" - messages = [ChatMessage(role="user", text="Additional info")] + messages = [Message(role="user", text="Additional info")] response = AgentRequestInfoResponse(messages=messages) assert response.messages == messages def test_from_messages_factory(self): - """Test creating response from ChatMessage list.""" + """Test creating response from Message list.""" messages = [ - ChatMessage(role="user", text="Message 1"), - ChatMessage(role="user", text="Message 2"), + Message(role="user", text="Message 1"), + Message(role="user", text="Message 2"), ] response = AgentRequestInfoResponse.from_messages(messages) @@ -113,7 +113,7 @@ async def test_request_info_handler(self): """Test that request_info handler calls ctx.request_info.""" executor = AgentRequestInfoExecutor(id="test_executor") - agent_response = AgentResponse(messages=[ChatMessage(role="assistant", text="Agent response")]) + agent_response = AgentResponse(messages=[Message(role="assistant", text="Agent response")]) agent_response = AgentExecutorResponse( executor_id="test_agent", agent_response=agent_response, @@ -131,7 +131,7 @@ async def test_handle_request_info_response_with_messages(self): """Test response handler when user provides additional messages.""" executor = AgentRequestInfoExecutor(id="test_executor") - agent_response = AgentResponse(messages=[ChatMessage(role="assistant", text="Original")]) + agent_response = AgentResponse(messages=[Message(role="assistant", text="Original")]) original_request = AgentExecutorResponse( executor_id="test_agent", agent_response=agent_response, @@ -157,7 +157,7 @@ async def test_handle_request_info_response_approval(self): """Test response handler when user approves (no additional messages).""" executor = AgentRequestInfoExecutor(id="test_executor") - agent_response = AgentResponse(messages=[ChatMessage(role="assistant", text="Original")]) + agent_response = AgentResponse(messages=[Message(role="assistant", text="Original")]) original_request = AgentExecutorResponse( executor_id="test_agent", agent_response=agent_response, @@ -200,7 +200,7 @@ def description(self) -> str | None: async def run( self, - messages: str | ChatMessage | list[str] | list[ChatMessage] | None = None, + messages: str | Message | list[str] | list[Message] | None = None, *, stream: bool = False, thread: AgentThread | None = None, @@ -209,10 +209,10 @@ async def run( """Dummy run method.""" if stream: return self._run_stream_impl() - return AgentResponse(messages=[ChatMessage(role="assistant", text="Test response")]) + return AgentResponse(messages=[Message(role="assistant", text="Test response")]) async def _run_stream_impl(self) -> AsyncIterable[AgentResponseUpdate]: - yield AgentResponseUpdate(messages=[ChatMessage(role="assistant", text="Test response stream")]) + yield AgentResponseUpdate(messages=[Message(role="assistant", text="Test response stream")]) def get_new_thread(self, **kwargs: Any) -> AgentThread: """Creates a new conversation thread for the agent.""" diff --git a/python/packages/orchestrations/tests/test_sequential.py b/python/packages/orchestrations/tests/test_sequential.py index e450bebefe..880e33761d 100644 --- a/python/packages/orchestrations/tests/test_sequential.py +++ b/python/packages/orchestrations/tests/test_sequential.py @@ -10,9 +10,9 @@ AgentResponseUpdate, AgentThread, BaseAgent, - ChatMessage, Content, Executor, + Message, TypeCompatibilityError, WorkflowContext, WorkflowRunState, @@ -27,7 +27,7 @@ class _EchoAgent(BaseAgent): def run( # type: ignore[override] self, - messages: str | ChatMessage | list[str] | list[ChatMessage] | None = None, + messages: str | Message | list[str] | list[Message] | None = None, *, stream: bool = False, thread: AgentThread | None = None, @@ -37,7 +37,7 @@ def run( # type: ignore[override] return self._run_stream() async def _run() -> AgentResponse: - return AgentResponse(messages=[ChatMessage("assistant", [f"{self.name} reply"])]) + return AgentResponse(messages=[Message("assistant", [f"{self.name} reply"])]) return _run() @@ -50,11 +50,11 @@ class _SummarizerExec(Executor): """Custom executor that summarizes by appending a short assistant message.""" @handler - async def summarize(self, agent_response: AgentExecutorResponse, ctx: WorkflowContext[list[ChatMessage]]) -> None: + async def summarize(self, agent_response: AgentExecutorResponse, ctx: WorkflowContext[list[Message]]) -> None: conversation = agent_response.full_conversation or [] user_texts = [m.text for m in conversation if m.role == "user"] agents = [m.author_name or m.role for m in conversation if m.role == "assistant"] - summary = ChatMessage("assistant", [f"Summary of users:{len(user_texts)} agents:{len(agents)}"]) + summary = Message("assistant", [f"Summary of users:{len(user_texts)} agents:{len(agents)}"]) await ctx.send_message(list(conversation) + [summary]) @@ -62,7 +62,7 @@ class _InvalidExecutor(Executor): """Invalid executor that does not have a handler that accepts a list of chat messages""" @handler - async def summarize(self, conversation: list[str], ctx: WorkflowContext[list[ChatMessage]]) -> None: + async def summarize(self, conversation: list[str], ctx: WorkflowContext[list[Message]]) -> None: pass @@ -84,7 +84,7 @@ async def test_sequential_agents_append_to_context() -> None: wf = SequentialBuilder(participants=[a1, a2]).build() completed = False - output: list[ChatMessage] | None = None + output: list[Message] | None = None async for ev in wf.run("hello sequential", stream=True): if ev.type == "status" and ev.state == WorkflowRunState.IDLE: completed = True @@ -96,7 +96,7 @@ async def test_sequential_agents_append_to_context() -> None: assert completed assert output is not None assert isinstance(output, list) - msgs: list[ChatMessage] = output + msgs: list[Message] = output assert len(msgs) == 3 assert msgs[0].role == "user" and "hello sequential" in msgs[0].text assert msgs[1].role == "assistant" and (msgs[1].author_name == "A1" or True) @@ -112,7 +112,7 @@ async def test_sequential_with_custom_executor_summary() -> None: wf = SequentialBuilder(participants=[a1, summarizer]).build() completed = False - output: list[ChatMessage] | None = None + output: list[Message] | None = None async for ev in wf.run("topic X", stream=True): if ev.type == "status" and ev.state == WorkflowRunState.IDLE: completed = True @@ -123,7 +123,7 @@ async def test_sequential_with_custom_executor_summary() -> None: assert completed assert output is not None - msgs: list[ChatMessage] = output + msgs: list[Message] = output # Expect: [user, A1 reply, summary] assert len(msgs) == 3 assert msgs[0].role == "user" @@ -137,7 +137,7 @@ async def test_sequential_checkpoint_resume_round_trip() -> None: initial_agents = (_EchoAgent(id="agent1", name="A1"), _EchoAgent(id="agent2", name="A2")) wf = SequentialBuilder(participants=list(initial_agents), checkpoint_storage=storage).build() - baseline_output: list[ChatMessage] | None = None + baseline_output: list[Message] | None = None async for ev in wf.run("checkpoint sequential", stream=True): if ev.type == "output": baseline_output = ev.data # type: ignore[assignment] @@ -158,7 +158,7 @@ async def test_sequential_checkpoint_resume_round_trip() -> None: resumed_agents = (_EchoAgent(id="agent1", name="A1"), _EchoAgent(id="agent2", name="A2")) wf_resume = SequentialBuilder(participants=list(resumed_agents), checkpoint_storage=storage).build() - resumed_output: list[ChatMessage] | None = None + resumed_output: list[Message] | None = None async for ev in wf_resume.run(checkpoint_id=resume_checkpoint.checkpoint_id, stream=True): if ev.type == "output": resumed_output = ev.data # type: ignore[assignment] @@ -180,7 +180,7 @@ async def test_sequential_checkpoint_runtime_only() -> None: agents = (_EchoAgent(id="agent1", name="A1"), _EchoAgent(id="agent2", name="A2")) wf = SequentialBuilder(participants=list(agents)).build() - baseline_output: list[ChatMessage] | None = None + baseline_output: list[Message] | None = None async for ev in wf.run("runtime checkpoint test", checkpoint_storage=storage, stream=True): if ev.type == "output": baseline_output = ev.data # type: ignore[assignment] @@ -201,7 +201,7 @@ async def test_sequential_checkpoint_runtime_only() -> None: resumed_agents = (_EchoAgent(id="agent1", name="A1"), _EchoAgent(id="agent2", name="A2")) wf_resume = SequentialBuilder(participants=list(resumed_agents)).build() - resumed_output: list[ChatMessage] | None = None + resumed_output: list[Message] | None = None async for ev in wf_resume.run( checkpoint_id=resume_checkpoint.checkpoint_id, checkpoint_storage=storage, stream=True ): @@ -231,7 +231,7 @@ async def test_sequential_checkpoint_runtime_overrides_buildtime() -> None: agents = (_EchoAgent(id="agent1", name="A1"), _EchoAgent(id="agent2", name="A2")) wf = SequentialBuilder(participants=list(agents), checkpoint_storage=buildtime_storage).build() - baseline_output: list[ChatMessage] | None = None + baseline_output: list[Message] | None = None async for ev in wf.run("override test", checkpoint_storage=runtime_storage, stream=True): if ev.type == "output": baseline_output = ev.data # type: ignore[assignment] diff --git a/python/packages/purview/AGENTS.md b/python/packages/purview/AGENTS.md index 3d09982e70..30be4ffbf7 100644 --- a/python/packages/purview/AGENTS.md +++ b/python/packages/purview/AGENTS.md @@ -32,7 +32,7 @@ from agent_framework.microsoft import PurviewPolicyMiddleware, PurviewSettings settings = PurviewSettings(...) middleware = PurviewPolicyMiddleware(settings=settings) -agent = ChatAgent(..., middleware=[middleware]) +agent = Agent(..., middleware=[middleware]) ``` ## Import Path diff --git a/python/packages/purview/README.md b/python/packages/purview/README.md index b016f00c8b..650cbd2d7f 100644 --- a/python/packages/purview/README.md +++ b/python/packages/purview/README.md @@ -8,7 +8,7 @@ - Middleware-based policy enforcement (agent-level and chat-client level) - Blocks or allows content at both ingress (prompt) and egress (response) -- Works with any `ChatAgent` / agent orchestration using the standard Agent Framework middleware pipeline +- Works with any `Agent` / agent orchestration using the standard Agent Framework middleware pipeline - Supports both synchronous `TokenCredential` and `AsyncTokenCredential` from `azure-identity` - Configuration via `PurviewSettings` / `PurviewAppLocation` - Built-in caching with configurable TTL and size limits for protection scopes in `PurviewSettings` @@ -53,7 +53,7 @@ Add Purview when you need to: ```python import asyncio -from agent_framework import ChatAgent, ChatMessage, Role +from agent_framework import Agent, Message, Role from agent_framework.azure import AzureOpenAIChatClient from agent_framework.microsoft import PurviewPolicyMiddleware, PurviewSettings from azure.identity import InteractiveBrowserCredential @@ -66,13 +66,13 @@ async def main(): settings=PurviewSettings(app_name="My Sample App") ) - agent = ChatAgent( + agent = Agent( chat_client=chat_client, instructions="You are a helpful assistant.", middleware=[purview_middleware] ) - response = await agent.run(ChatMessage("user", ["Summarize zero trust in one sentence."])) + response = await agent.run(Message("user", ["Summarize zero trust in one sentence."])) print(response) asyncio.run(main()) @@ -218,7 +218,7 @@ settings = PurviewSettings( Use the agent middleware when you already have / want the full agent pipeline: ```python -from agent_framework import ChatAgent +from agent_framework import Agent from agent_framework.azure import AzureOpenAIChatClient from agent_framework.microsoft import PurviewPolicyMiddleware, PurviewSettings from azure.identity import DefaultAzureCredential @@ -226,7 +226,7 @@ from azure.identity import DefaultAzureCredential credential = DefaultAzureCredential() client = AzureOpenAIChatClient() -agent = ChatAgent( +agent = Agent( chat_client=client, instructions="You are helpful.", middleware=[PurviewPolicyMiddleware(credential, PurviewSettings(app_name="My App"))] @@ -237,7 +237,7 @@ Use the chat middleware when you attach directly to a chat client (e.g. minimal ```python import os -from agent_framework import ChatAgent +from agent_framework import Agent from agent_framework.azure import AzureOpenAIChatClient from agent_framework.microsoft import PurviewChatPolicyMiddleware, PurviewSettings from azure.identity import DefaultAzureCredential @@ -253,7 +253,7 @@ chat_client = AzureOpenAIChatClient( ], ) -agent = ChatAgent(chat_client=chat_client, instructions="You are helpful.") +agent = Agent(chat_client=chat_client, instructions="You are helpful.") ``` The policy logic is identical; the difference is only the hook point in the pipeline. @@ -272,7 +272,7 @@ The policy logic is identical; the difference is only the hook point in the pipe 3. **After successful agent execution** (`response phase`): the produced messages are evaluated using the same user_id from the prompt phase. 4. **If blocked**: result messages are replaced with a blocking notice. -The user identifier is discovered from `ChatMessage.additional_properties['user_id']` during the prompt phase and reused for the response phase, ensuring both evaluations map consistently to the same user. If no user_id is present, policy evaluation is skipped entirely. +The user identifier is discovered from `Message.additional_properties['user_id']` during the prompt phase and reused for the response phase, ensuring both evaluations map consistently to the same user. If no user_id is present, policy evaluation is skipped entirely. You can customize the blocking messages using the `blocked_prompt_message` and `blocked_response_message` fields in `PurviewSettings`. For more advanced scenarios, you can wrap the middleware or post-process `context.result` in later middleware. @@ -315,7 +315,7 @@ except (PurviewAuthenticationError, PurviewRateLimitError, PurviewRequestError, --- ## Notes -- **User Identification**: Provide a `user_id` per request (e.g. in `ChatMessage(..., additional_properties={"user_id": ""})`) for per-user policy scoping. If no user_id is provided, policy evaluation is skipped entirely. +- **User Identification**: Provide a `user_id` per request (e.g. in `Message(..., additional_properties={"user_id": ""})`) for per-user policy scoping. If no user_id is provided, policy evaluation is skipped entirely. - **Blocking Messages**: Can be customized via `blocked_prompt_message` and `blocked_response_message` in `PurviewSettings`. By default, they are "Prompt blocked by policy" and "Response blocked by policy" respectively. - **Streaming Responses**: Post-response policy evaluation presently applies only to non-streaming chat responses. - **Error Handling**: Use `ignore_exceptions` and `ignore_payment_required` settings for graceful degradation. When enabled, errors are logged but don't fail the request. diff --git a/python/packages/purview/agent_framework_purview/_middleware.py b/python/packages/purview/agent_framework_purview/_middleware.py index 52a74ffc10..e50793f79d 100644 --- a/python/packages/purview/agent_framework_purview/_middleware.py +++ b/python/packages/purview/agent_framework_purview/_middleware.py @@ -26,11 +26,11 @@ class PurviewPolicyMiddleware(AgentMiddleware): .. code-block:: python from agent_framework.microsoft import PurviewPolicyMiddleware, PurviewSettings - from agent_framework import ChatAgent + from agent_framework import Agent credential = ... # TokenCredential or AsyncTokenCredential settings = PurviewSettings(app_name="My App") - agent = ChatAgent( + agent = Agent( chat_client=client, instructions="...", middleware=[PurviewPolicyMiddleware(credential, settings)] ) """ @@ -77,10 +77,10 @@ async def process( context.messages, Activity.UPLOAD_TEXT, session_id=session_id ) if should_block_prompt: - from agent_framework import AgentResponse, ChatMessage + from agent_framework import AgentResponse, Message context.result = AgentResponse( - messages=[ChatMessage(role="system", text=self._settings.blocked_prompt_message)] + messages=[Message(role="system", text=self._settings.blocked_prompt_message)] ) raise MiddlewareTermination except MiddlewareTermination: @@ -110,10 +110,10 @@ async def process( user_id=resolved_user_id, ) if should_block_response: - from agent_framework import AgentResponse, ChatMessage + from agent_framework import AgentResponse, Message context.result = AgentResponse( - messages=[ChatMessage(role="system", text=self._settings.blocked_response_message)] + messages=[Message(role="system", text=self._settings.blocked_response_message)] ) else: # Streaming responses are not supported for post-checks @@ -173,9 +173,9 @@ async def process( context.messages, Activity.UPLOAD_TEXT, session_id=session_id ) if should_block_prompt: - from agent_framework import ChatMessage, ChatResponse + from agent_framework import ChatResponse, Message - blocked_message = ChatMessage(role="system", text=self._settings.blocked_prompt_message) + blocked_message = Message(role="system", text=self._settings.blocked_prompt_message) context.result = ChatResponse(messages=[blocked_message]) raise MiddlewareTermination except MiddlewareTermination: @@ -205,9 +205,9 @@ async def process( messages, Activity.UPLOAD_TEXT, session_id=session_id_response, user_id=resolved_user_id ) if should_block_response: - from agent_framework import ChatMessage, ChatResponse + from agent_framework import ChatResponse, Message - blocked_message = ChatMessage(role="system", text=self._settings.blocked_response_message) + blocked_message = Message(role="system", text=self._settings.blocked_response_message) context.result = ChatResponse(messages=[blocked_message]) else: logger.debug("Streaming responses are not supported for Purview policy post-checks") diff --git a/python/packages/purview/agent_framework_purview/_processor.py b/python/packages/purview/agent_framework_purview/_processor.py index e2206a781b..5525897a9e 100644 --- a/python/packages/purview/agent_framework_purview/_processor.py +++ b/python/packages/purview/agent_framework_purview/_processor.py @@ -6,7 +6,7 @@ from collections.abc import Iterable, MutableMapping from typing import Any -from agent_framework import ChatMessage +from agent_framework import Message from agent_framework._logging import get_logger from ._cache import CacheProvider, InMemoryCacheProvider, create_protection_scopes_cache_key @@ -64,7 +64,7 @@ def __init__(self, client: PurviewClient, settings: PurviewSettings, cache_provi async def process_messages( self, - messages: Iterable[ChatMessage], + messages: Iterable[Message], activity: Activity, session_id: str | None = None, user_id: str | None = None, @@ -97,7 +97,7 @@ async def process_messages( async def _map_messages( self, - messages: Iterable[ChatMessage], + messages: Iterable[Message], activity: Activity, session_id: str | None = None, provided_user_id: str | None = None, diff --git a/python/packages/purview/tests/test_chat_middleware.py b/python/packages/purview/tests/test_chat_middleware.py index 41ed8e0e4e..dac88ace57 100644 --- a/python/packages/purview/tests/test_chat_middleware.py +++ b/python/packages/purview/tests/test_chat_middleware.py @@ -5,7 +5,7 @@ from unittest.mock import AsyncMock, MagicMock, patch import pytest -from agent_framework import ChatContext, ChatMessage, MiddlewareTermination +from agent_framework import ChatContext, Message, MiddlewareTermination from azure.core.credentials import AccessToken from agent_framework_purview import PurviewChatPolicyMiddleware, PurviewSettings @@ -37,9 +37,7 @@ def chat_context(self) -> ChatContext: chat_client = DummyChatClient() chat_options = MagicMock() chat_options.model = "test-model" - return ChatContext( - chat_client=chat_client, messages=[ChatMessage(role="user", text="Hello")], options=chat_options - ) + return ChatContext(chat_client=chat_client, messages=[Message(role="user", text="Hello")], options=chat_options) async def test_initialization(self, middleware: PurviewChatPolicyMiddleware) -> None: assert middleware._client is not None @@ -57,7 +55,7 @@ async def mock_next(ctx: ChatContext) -> None: class Result: def __init__(self): - self.messages = [ChatMessage(role="assistant", text="Hi there")] + self.messages = [Message(role="assistant", text="Hi there")] ctx.result = Result() @@ -93,7 +91,7 @@ async def side_effect(messages, activity, session_id=None, user_id=None): async def mock_next(ctx: ChatContext) -> None: class Result: def __init__(self): - self.messages = [ChatMessage(role="assistant", text="Sensitive output")] # pragma: no cover + self.messages = [Message(role="assistant", text="Sensitive output")] # pragma: no cover ctx.result = Result() @@ -110,7 +108,7 @@ async def test_streaming_skips_post_check(self, middleware: PurviewChatPolicyMid chat_options.model = "test-model" streaming_context = ChatContext( chat_client=chat_client, - messages=[ChatMessage(role="user", text="Hello")], + messages=[Message(role="user", text="Hello")], options=chat_options, stream=True, ) @@ -142,7 +140,7 @@ async def mock_process_messages(*args, **kwargs): async def mock_next(ctx: ChatContext) -> None: result = MagicMock() - result.messages = [ChatMessage(role="assistant", text="Response")] + result.messages = [Message(role="assistant", text="Response")] ctx.result = result await middleware.process(chat_context, mock_next) @@ -166,7 +164,7 @@ async def mock_process_messages(messages, activity, session_id=None, user_id=Non async def mock_next(ctx: ChatContext) -> None: result = MagicMock() - result.messages = [ChatMessage(role="assistant", text="Response")] + result.messages = [Message(role="assistant", text="Response")] ctx.result = result await middleware.process(chat_context, mock_next) @@ -190,7 +188,7 @@ async def test_chat_middleware_handles_payment_required_pre_check(self, mock_cre chat_options = MagicMock() chat_options.model = "test-model" context = ChatContext( - chat_client=chat_client, messages=[ChatMessage(role="user", text="Hello")], options=chat_options + chat_client=chat_client, messages=[Message(role="user", text="Hello")], options=chat_options ) async def mock_process_messages(*args, **kwargs): @@ -216,7 +214,7 @@ async def test_chat_middleware_handles_payment_required_post_check(self, mock_cr chat_options = MagicMock() chat_options.model = "test-model" context = ChatContext( - chat_client=chat_client, messages=[ChatMessage(role="user", text="Hello")], options=chat_options + chat_client=chat_client, messages=[Message(role="user", text="Hello")], options=chat_options ) call_count = 0 @@ -232,7 +230,7 @@ async def side_effect(*args, **kwargs): async def mock_next(ctx: ChatContext) -> None: result = MagicMock() - result.messages = [ChatMessage(role="assistant", text="OK")] + result.messages = [Message(role="assistant", text="OK")] ctx.result = result with pytest.raises(PurviewPaymentRequiredError): @@ -249,7 +247,7 @@ async def test_chat_middleware_ignores_payment_required_when_configured(self, mo chat_options = MagicMock() chat_options.model = "test-model" context = ChatContext( - chat_client=chat_client, messages=[ChatMessage(role="user", text="Hello")], options=chat_options + chat_client=chat_client, messages=[Message(role="user", text="Hello")], options=chat_options ) async def mock_process_messages(*args, **kwargs): @@ -259,7 +257,7 @@ async def mock_process_messages(*args, **kwargs): async def mock_next(ctx: ChatContext) -> None: result = MagicMock() - result.messages = [ChatMessage(role="assistant", text="Response")] + result.messages = [Message(role="assistant", text="Response")] context.result = result # Should not raise, just log @@ -291,7 +289,7 @@ async def test_chat_middleware_with_ignore_exceptions(self, mock_credential: Asy chat_options = MagicMock() chat_options.model = "test-model" context = ChatContext( - chat_client=chat_client, messages=[ChatMessage(role="user", text="Hello")], options=chat_options + chat_client=chat_client, messages=[Message(role="user", text="Hello")], options=chat_options ) async def mock_process_messages(*args, **kwargs): @@ -301,7 +299,7 @@ async def mock_process_messages(*args, **kwargs): async def mock_next(ctx: ChatContext) -> None: result = MagicMock() - result.messages = [ChatMessage(role="assistant", text="Response")] + result.messages = [Message(role="assistant", text="Response")] context.result = result # Should not raise, just log @@ -320,7 +318,7 @@ async def test_chat_middleware_raises_on_pre_check_exception_when_ignore_excepti chat_options = MagicMock() chat_options.model = "test-model" context = ChatContext( - chat_client=chat_client, messages=[ChatMessage(role="user", text="Hello")], options=chat_options + chat_client=chat_client, messages=[Message(role="user", text="Hello")], options=chat_options ) with patch.object(middleware._processor, "process_messages", side_effect=ValueError("boom")): @@ -342,7 +340,7 @@ async def test_chat_middleware_raises_on_post_check_exception_when_ignore_except chat_options = MagicMock() chat_options.model = "test-model" context = ChatContext( - chat_client=chat_client, messages=[ChatMessage(role="user", text="Hello")], options=chat_options + chat_client=chat_client, messages=[Message(role="user", text="Hello")], options=chat_options ) call_count = 0 @@ -358,7 +356,7 @@ async def side_effect(*args, **kwargs): async def mock_next(ctx: ChatContext) -> None: result = MagicMock() - result.messages = [ChatMessage(role="assistant", text="OK")] + result.messages = [Message(role="assistant", text="OK")] ctx.result = result with pytest.raises(ValueError, match="post"): diff --git a/python/packages/purview/tests/test_middleware.py b/python/packages/purview/tests/test_middleware.py index 71eaa93056..ff77331155 100644 --- a/python/packages/purview/tests/test_middleware.py +++ b/python/packages/purview/tests/test_middleware.py @@ -5,7 +5,7 @@ from unittest.mock import AsyncMock, MagicMock, patch import pytest -from agent_framework import AgentContext, AgentResponse, AgentThread, ChatMessage, MiddlewareTermination +from agent_framework import AgentContext, AgentResponse, AgentThread, Message, MiddlewareTermination from azure.core.credentials import AccessToken from agent_framework_purview import PurviewPolicyMiddleware, PurviewSettings @@ -50,7 +50,7 @@ async def test_middleware_allows_clean_prompt( self, middleware: PurviewPolicyMiddleware, mock_agent: MagicMock ) -> None: """Test middleware allows prompt that passes policy check.""" - context = AgentContext(agent=mock_agent, messages=[ChatMessage(role="user", text="Hello, how are you?")]) + context = AgentContext(agent=mock_agent, messages=[Message(role="user", text="Hello, how are you?")]) with patch.object(middleware._processor, "process_messages", return_value=(False, "user-123")): next_called = False @@ -58,7 +58,7 @@ async def test_middleware_allows_clean_prompt( async def mock_next(ctx: AgentContext) -> None: nonlocal next_called next_called = True - ctx.result = AgentResponse(messages=[ChatMessage(role="assistant", text="I'm good, thanks!")]) + ctx.result = AgentResponse(messages=[Message(role="assistant", text="I'm good, thanks!")]) await middleware.process(context, mock_next) @@ -69,7 +69,7 @@ async def test_middleware_blocks_prompt_on_policy_violation( self, middleware: PurviewPolicyMiddleware, mock_agent: MagicMock ) -> None: """Test middleware blocks prompt that violates policy.""" - context = AgentContext(agent=mock_agent, messages=[ChatMessage(role="user", text="Sensitive information")]) + context = AgentContext(agent=mock_agent, messages=[Message(role="user", text="Sensitive information")]) with patch.object(middleware._processor, "process_messages", return_value=(True, "user-123")): next_called = False @@ -89,7 +89,7 @@ async def mock_next(ctx: AgentContext) -> None: async def test_middleware_checks_response(self, middleware: PurviewPolicyMiddleware, mock_agent: MagicMock) -> None: """Test middleware checks agent response for policy violations.""" - context = AgentContext(agent=mock_agent, messages=[ChatMessage(role="user", text="Hello")]) + context = AgentContext(agent=mock_agent, messages=[Message(role="user", text="Hello")]) call_count = 0 @@ -103,7 +103,7 @@ async def mock_process_messages(messages, activity, session_id=None, user_id=Non async def mock_next(ctx: AgentContext) -> None: ctx.result = AgentResponse( - messages=[ChatMessage(role="assistant", text="Here's some sensitive information")] + messages=[Message(role="assistant", text="Here's some sensitive information")] ) await middleware.process(context, mock_next) @@ -121,7 +121,7 @@ async def test_middleware_handles_result_without_messages( # Set ignore_exceptions to True so AttributeError is caught and logged middleware._settings.ignore_exceptions = True - context = AgentContext(agent=mock_agent, messages=[ChatMessage(role="user", text="Hello")]) + context = AgentContext(agent=mock_agent, messages=[Message(role="user", text="Hello")]) with patch.object(middleware._processor, "process_messages", return_value=(False, "user-123")): @@ -138,12 +138,12 @@ async def test_middleware_processor_receives_correct_activity( """Test middleware passes correct activity type to processor.""" from agent_framework_purview._models import Activity - context = AgentContext(agent=mock_agent, messages=[ChatMessage(role="user", text="Test")]) + context = AgentContext(agent=mock_agent, messages=[Message(role="user", text="Test")]) with patch.object(middleware._processor, "process_messages", return_value=(False, "user-123")) as mock_process: async def mock_next(ctx: AgentContext) -> None: - ctx.result = AgentResponse(messages=[ChatMessage(role="assistant", text="Response")]) + ctx.result = AgentResponse(messages=[Message(role="assistant", text="Response")]) await middleware.process(context, mock_next) @@ -155,13 +155,13 @@ async def test_middleware_streaming_skips_post_check( self, middleware: PurviewPolicyMiddleware, mock_agent: MagicMock ) -> None: """Test that streaming results skip post-check evaluation.""" - context = AgentContext(agent=mock_agent, messages=[ChatMessage(role="user", text="Hello")]) + context = AgentContext(agent=mock_agent, messages=[Message(role="user", text="Hello")]) context.stream = True with patch.object(middleware._processor, "process_messages", return_value=(False, "user-123")) as mock_proc: async def mock_next(ctx: AgentContext) -> None: - ctx.result = AgentResponse(messages=[ChatMessage(role="assistant", text="streaming")]) + ctx.result = AgentResponse(messages=[Message(role="assistant", text="streaming")]) await middleware.process(context, mock_next) @@ -173,7 +173,7 @@ async def test_middleware_payment_required_in_pre_check_raises_by_default( """Test that 402 in pre-check is raised when ignore_payment_required=False.""" from agent_framework_purview._exceptions import PurviewPaymentRequiredError - context = AgentContext(agent=mock_agent, messages=[ChatMessage(role="user", text="Hello")]) + context = AgentContext(agent=mock_agent, messages=[Message(role="user", text="Hello")]) with patch.object( middleware._processor, @@ -193,7 +193,7 @@ async def test_middleware_payment_required_in_post_check_raises_by_default( """Test that 402 in post-check is raised when ignore_payment_required=False.""" from agent_framework_purview._exceptions import PurviewPaymentRequiredError - context = AgentContext(agent=mock_agent, messages=[ChatMessage(role="user", text="Hello")]) + context = AgentContext(agent=mock_agent, messages=[Message(role="user", text="Hello")]) call_count = 0 @@ -207,7 +207,7 @@ async def side_effect(*args, **kwargs): with patch.object(middleware._processor, "process_messages", side_effect=side_effect): async def mock_next(ctx: AgentContext) -> None: - ctx.result = AgentResponse(messages=[ChatMessage(role="assistant", text="OK")]) + ctx.result = AgentResponse(messages=[Message(role="assistant", text="OK")]) with pytest.raises(PurviewPaymentRequiredError): await middleware.process(context, mock_next) @@ -218,7 +218,7 @@ async def test_middleware_post_check_exception_raises_when_ignore_exceptions_fal """Test that post-check exceptions are propagated when ignore_exceptions=False.""" middleware._settings.ignore_exceptions = False - context = AgentContext(agent=mock_agent, messages=[ChatMessage(role="user", text="Hello")]) + context = AgentContext(agent=mock_agent, messages=[Message(role="user", text="Hello")]) call_count = 0 @@ -232,7 +232,7 @@ async def side_effect(*args, **kwargs): with patch.object(middleware._processor, "process_messages", side_effect=side_effect): async def mock_next(ctx: AgentContext) -> None: - ctx.result = AgentResponse(messages=[ChatMessage(role="assistant", text="OK")]) + ctx.result = AgentResponse(messages=[Message(role="assistant", text="OK")]) with pytest.raises(ValueError, match="Post-check blew up"): await middleware.process(context, mock_next) @@ -244,14 +244,14 @@ async def test_middleware_handles_pre_check_exception( # Set ignore_exceptions to True middleware._settings.ignore_exceptions = True - context = AgentContext(agent=mock_agent, messages=[ChatMessage(role="user", text="Test")]) + context = AgentContext(agent=mock_agent, messages=[Message(role="user", text="Test")]) with patch.object( middleware._processor, "process_messages", side_effect=Exception("Pre-check error") ) as mock_process: async def mock_next(ctx: AgentContext) -> None: - ctx.result = AgentResponse(messages=[ChatMessage(role="assistant", text="Response")]) + ctx.result = AgentResponse(messages=[Message(role="assistant", text="Response")]) await middleware.process(context, mock_next) @@ -267,7 +267,7 @@ async def test_middleware_handles_post_check_exception( # Set ignore_exceptions to True middleware._settings.ignore_exceptions = True - context = AgentContext(agent=mock_agent, messages=[ChatMessage(role="user", text="Test")]) + context = AgentContext(agent=mock_agent, messages=[Message(role="user", text="Test")]) call_count = 0 @@ -281,7 +281,7 @@ async def mock_process_messages(*args, **kwargs): with patch.object(middleware._processor, "process_messages", side_effect=mock_process_messages): async def mock_next(ctx: AgentContext) -> None: - ctx.result = AgentResponse(messages=[ChatMessage(role="assistant", text="Response")]) + ctx.result = AgentResponse(messages=[Message(role="assistant", text="Response")]) await middleware.process(context, mock_next) @@ -298,7 +298,7 @@ async def test_middleware_with_ignore_exceptions_true(self, mock_credential: Asy mock_agent = MagicMock() mock_agent.name = "test-agent" - context = AgentContext(agent=mock_agent, messages=[ChatMessage(role="user", text="Test")]) + context = AgentContext(agent=mock_agent, messages=[Message(role="user", text="Test")]) # Mock processor to raise an exception async def mock_process_messages(*args, **kwargs): @@ -307,7 +307,7 @@ async def mock_process_messages(*args, **kwargs): with patch.object(middleware._processor, "process_messages", side_effect=mock_process_messages): async def mock_next(ctx): - ctx.result = AgentResponse(messages=[ChatMessage(role="assistant", text="Response")]) + ctx.result = AgentResponse(messages=[Message(role="assistant", text="Response")]) # Should not raise, just log await middleware.process(context, mock_next) @@ -322,7 +322,7 @@ async def test_middleware_with_ignore_exceptions_false(self, mock_credential: As mock_agent = MagicMock() mock_agent.name = "test-agent" - context = AgentContext(agent=mock_agent, messages=[ChatMessage(role="user", text="Test")]) + context = AgentContext(agent=mock_agent, messages=[Message(role="user", text="Test")]) # Mock processor to raise an exception async def mock_process_messages(*args, **kwargs): @@ -342,12 +342,12 @@ async def test_middleware_uses_thread_service_thread_id_as_session_id( ) -> None: """Test that session_id is extracted from thread.service_thread_id.""" thread = AgentThread(service_thread_id="thread-123") - context = AgentContext(agent=mock_agent, messages=[ChatMessage(role="user", text="Hello")], thread=thread) + context = AgentContext(agent=mock_agent, messages=[Message(role="user", text="Hello")], thread=thread) with patch.object(middleware._processor, "process_messages", return_value=(False, "user-123")) as mock_proc: async def mock_next(ctx: AgentContext) -> None: - ctx.result = AgentResponse(messages=[ChatMessage(role="assistant", text="Hi")]) + ctx.result = AgentResponse(messages=[Message(role="assistant", text="Hi")]) await middleware.process(context, mock_next) @@ -359,13 +359,13 @@ async def test_middleware_uses_message_conversation_id_as_session_id( self, middleware: PurviewPolicyMiddleware, mock_agent: MagicMock ) -> None: """Test that session_id is extracted from message.additional_properties['conversation_id'].""" - messages = [ChatMessage(role="user", text="Hello", additional_properties={"conversation_id": "conv-456"})] + messages = [Message(role="user", text="Hello", additional_properties={"conversation_id": "conv-456"})] context = AgentContext(agent=mock_agent, messages=messages) with patch.object(middleware._processor, "process_messages", return_value=(False, "user-123")) as mock_proc: async def mock_next(ctx: AgentContext) -> None: - ctx.result = AgentResponse(messages=[ChatMessage(role="assistant", text="Hi")]) + ctx.result = AgentResponse(messages=[Message(role="assistant", text="Hi")]) await middleware.process(context, mock_next) @@ -378,13 +378,13 @@ async def test_middleware_thread_id_takes_precedence_over_message_conversation_i ) -> None: """Test that thread.service_thread_id takes precedence over message conversation_id.""" thread = AgentThread(service_thread_id="thread-789") - messages = [ChatMessage(role="user", text="Hello", additional_properties={"conversation_id": "conv-456"})] + messages = [Message(role="user", text="Hello", additional_properties={"conversation_id": "conv-456"})] context = AgentContext(agent=mock_agent, messages=messages, thread=thread) with patch.object(middleware._processor, "process_messages", return_value=(False, "user-123")) as mock_proc: async def mock_next(ctx: AgentContext) -> None: - ctx.result = AgentResponse(messages=[ChatMessage(role="assistant", text="Hi")]) + ctx.result = AgentResponse(messages=[Message(role="assistant", text="Hi")]) await middleware.process(context, mock_next) @@ -395,12 +395,12 @@ async def test_middleware_passes_none_session_id_when_not_available( self, middleware: PurviewPolicyMiddleware, mock_agent: MagicMock ) -> None: """Test that session_id is None when no thread or conversation_id is available.""" - context = AgentContext(agent=mock_agent, messages=[ChatMessage(role="user", text="Hello")]) + context = AgentContext(agent=mock_agent, messages=[Message(role="user", text="Hello")]) with patch.object(middleware._processor, "process_messages", return_value=(False, "user-123")) as mock_proc: async def mock_next(ctx: AgentContext) -> None: - ctx.result = AgentResponse(messages=[ChatMessage(role="assistant", text="Hi")]) + ctx.result = AgentResponse(messages=[Message(role="assistant", text="Hi")]) await middleware.process(context, mock_next) @@ -412,12 +412,12 @@ async def test_middleware_session_id_used_in_post_check( ) -> None: """Test that session_id is passed to post-check process_messages call.""" thread = AgentThread(service_thread_id="thread-999") - context = AgentContext(agent=mock_agent, messages=[ChatMessage(role="user", text="Hello")], thread=thread) + context = AgentContext(agent=mock_agent, messages=[Message(role="user", text="Hello")], thread=thread) with patch.object(middleware._processor, "process_messages", return_value=(False, "user-123")) as mock_proc: async def mock_next(ctx: AgentContext) -> None: - ctx.result = AgentResponse(messages=[ChatMessage(role="assistant", text="Response")]) + ctx.result = AgentResponse(messages=[Message(role="assistant", text="Response")]) await middleware.process(context, mock_next) diff --git a/python/packages/purview/tests/test_processor.py b/python/packages/purview/tests/test_processor.py index be4d4aca89..ab96999921 100644 --- a/python/packages/purview/tests/test_processor.py +++ b/python/packages/purview/tests/test_processor.py @@ -5,7 +5,7 @@ from unittest.mock import AsyncMock, MagicMock, patch import pytest -from agent_framework import ChatMessage +from agent_framework import Message from agent_framework_purview import PurviewAppLocation, PurviewLocationType, PurviewSettings from agent_framework_purview._models import ( @@ -83,8 +83,8 @@ async def test_processor_initialization( async def test_process_messages_with_defaults(self, processor: ScopedContentProcessor) -> None: """Test process_messages with settings that have defaults.""" messages = [ - ChatMessage(role="user", text="Hello"), - ChatMessage(role="assistant", text="Hi there"), + Message(role="user", text="Hello"), + Message(role="assistant", text="Hi there"), ] with patch.object(processor, "_map_messages", return_value=([], None)) as mock_map: @@ -98,7 +98,7 @@ async def test_process_messages_blocks_content( self, processor: ScopedContentProcessor, process_content_request_factory ) -> None: """Test process_messages returns True when content should be blocked.""" - messages = [ChatMessage(role="user", text="Sensitive content")] + messages = [Message(role="user", text="Sensitive content")] mock_request = process_content_request_factory("Sensitive content") @@ -120,7 +120,7 @@ async def test_map_messages_creates_requests( ) -> None: """Test _map_messages creates ProcessContentRequest objects.""" messages = [ - ChatMessage( + Message( role="user", text="Test message", message_id="msg-123", @@ -139,7 +139,7 @@ async def test_map_messages_without_defaults_gets_token_info(self, mock_client: """Test _map_messages gets token info when settings lack some defaults.""" settings = PurviewSettings(app_name="Test App", tenant_id="12345678-1234-1234-1234-123456789012") processor = ScopedContentProcessor(mock_client, settings) - messages = [ChatMessage(role="user", text="Test", message_id="msg-123")] + messages = [Message(role="user", text="Test", message_id="msg-123")] requests, user_id = await processor._map_messages(messages, Activity.UPLOAD_TEXT) @@ -156,7 +156,7 @@ async def test_map_messages_raises_on_missing_tenant_id(self, mock_client: Async return_value={"user_id": "test-user", "client_id": "test-client"} ) - messages = [ChatMessage(role="user", text="Test", message_id="msg-123")] + messages = [Message(role="user", text="Test", message_id="msg-123")] with pytest.raises(ValueError, match="Tenant id required"): await processor._map_messages(messages, Activity.UPLOAD_TEXT) @@ -331,7 +331,7 @@ async def test_map_messages_with_user_id_in_additional_properties(self, mock_cli processor = ScopedContentProcessor(mock_client, settings) messages = [ - ChatMessage( + Message( role="user", text="Test message", additional_properties={"user_id": "22345678-1234-1234-1234-123456789012"}, @@ -355,7 +355,7 @@ async def test_map_messages_with_provided_user_id_fallback(self, mock_client: As ) processor = ScopedContentProcessor(mock_client, settings) - messages = [ChatMessage(role="user", text="Test message")] + messages = [Message(role="user", text="Test message")] requests, user_id = await processor._map_messages( messages, Activity.UPLOAD_TEXT, provided_user_id="32345678-1234-1234-1234-123456789012" @@ -376,7 +376,7 @@ async def test_map_messages_returns_empty_when_no_user_id(self, mock_client: Asy ) processor = ScopedContentProcessor(mock_client, settings) - messages = [ChatMessage(role="user", text="Test message")] + messages = [Message(role="user", text="Test message")] requests, user_id = await processor._map_messages(messages, Activity.UPLOAD_TEXT) @@ -479,7 +479,7 @@ async def test_user_id_from_token_when_no_other_source(self, mock_client: AsyncM settings = PurviewSettings(app_name="Test App") # No tenant_id or app_location processor = ScopedContentProcessor(mock_client, settings) - messages = [ChatMessage(role="user", text="Test")] + messages = [Message(role="user", text="Test")] requests, user_id = await processor._map_messages(messages, Activity.UPLOAD_TEXT) @@ -493,7 +493,7 @@ async def test_user_id_from_additional_properties_takes_priority( processor = ScopedContentProcessor(mock_client, settings) messages = [ - ChatMessage( + Message( role="user", text="Test", additional_properties={"user_id": "22222222-2222-2222-2222-222222222222"}, @@ -513,7 +513,7 @@ async def test_user_id_from_author_name_as_fallback( processor = ScopedContentProcessor(mock_client, settings) messages = [ - ChatMessage( + Message( role="user", text="Test", author_name="33333333-3333-3333-3333-333333333333", @@ -531,7 +531,7 @@ async def test_author_name_ignored_if_not_valid_guid( processor = ScopedContentProcessor(mock_client, settings) messages = [ - ChatMessage( + Message( role="user", text="Test", author_name="John Doe", # Not a GUID @@ -550,7 +550,7 @@ async def test_provided_user_id_used_as_last_resort( """Test provided_user_id parameter is used as last resort.""" processor = ScopedContentProcessor(mock_client, settings) - messages = [ChatMessage(role="user", text="Test")] + messages = [Message(role="user", text="Test")] requests, user_id = await processor._map_messages( messages, Activity.UPLOAD_TEXT, provided_user_id="44444444-4444-4444-4444-444444444444" @@ -562,7 +562,7 @@ async def test_invalid_provided_user_id_ignored(self, mock_client: AsyncMock, se """Test invalid provided_user_id is ignored.""" processor = ScopedContentProcessor(mock_client, settings) - messages = [ChatMessage(role="user", text="Test")] + messages = [Message(role="user", text="Test")] requests, user_id = await processor._map_messages(messages, Activity.UPLOAD_TEXT, provided_user_id="not-a-guid") @@ -574,11 +574,11 @@ async def test_multiple_messages_same_user_id(self, mock_client: AsyncMock, sett processor = ScopedContentProcessor(mock_client, settings) messages = [ - ChatMessage( + Message( role="user", text="First", additional_properties={"user_id": "55555555-5555-5555-5555-555555555555"} ), - ChatMessage(role="assistant", text="Response"), - ChatMessage(role="user", text="Second"), + Message(role="assistant", text="Response"), + Message(role="user", text="Second"), ] requests, user_id = await processor._map_messages(messages, Activity.UPLOAD_TEXT) @@ -594,13 +594,13 @@ async def test_first_valid_user_id_in_messages_is_used( processor = ScopedContentProcessor(mock_client, settings) messages = [ - ChatMessage(role="user", text="First", author_name="Not a GUID"), - ChatMessage( + Message(role="user", text="First", author_name="Not a GUID"), + Message( role="assistant", text="Response", additional_properties={"user_id": "66666666-6666-6666-6666-666666666666"}, ), - ChatMessage( + Message( role="user", text="Third", additional_properties={"user_id": "77777777-7777-7777-7777-777777777777"} ), ] @@ -654,7 +654,7 @@ async def test_protection_scopes_cached_on_first_call( scope_identifier="scope-123", scopes=[] ) - messages = [ChatMessage(role="user", text="Test")] + messages = [Message(role="user", text="Test")] await processor.process_messages(messages, Activity.UPLOAD_TEXT, user_id="12345678-1234-1234-1234-123456789012") @@ -676,7 +676,7 @@ async def test_payment_required_exception_cached_at_tenant_level( mock_client.get_protection_scopes.side_effect = PurviewPaymentRequiredError("Payment required") - messages = [ChatMessage(role="user", text="Test")] + messages = [Message(role="user", text="Test")] with pytest.raises(PurviewPaymentRequiredError): await processor.process_messages( diff --git a/python/packages/redis/AGENTS.md b/python/packages/redis/AGENTS.md index 60acfc1f77..3b575e5029 100644 --- a/python/packages/redis/AGENTS.md +++ b/python/packages/redis/AGENTS.md @@ -13,7 +13,7 @@ Redis-based storage for agent threads and context. from agent_framework.redis import RedisChatMessageStore store = RedisChatMessageStore(redis_url="redis://localhost:6379") -agent = ChatAgent(..., chat_message_store_factory=lambda: store) +agent = Agent(..., chat_message_store_factory=lambda: store) ``` ## Import Path diff --git a/python/packages/redis/agent_framework_redis/_chat_message_store.py b/python/packages/redis/agent_framework_redis/_chat_message_store.py index 4b50c63571..5ace6c13af 100644 --- a/python/packages/redis/agent_framework_redis/_chat_message_store.py +++ b/python/packages/redis/agent_framework_redis/_chat_message_store.py @@ -7,7 +7,7 @@ from uuid import uuid4 import redis.asyncio as redis -from agent_framework import ChatMessage +from agent_framework import Message from agent_framework._serialization import SerializationMixin from redis.credentials import CredentialProvider @@ -64,7 +64,7 @@ def __init__( thread_id: str | None = None, key_prefix: str = "chat_messages", max_messages: int | None = None, - messages: Sequence[ChatMessage] | None = None, + messages: Sequence[Message] | None = None, ) -> None: """Initialize the Redis chat message store. @@ -186,14 +186,14 @@ async def _ensure_initial_messages_added(self) -> None: self._initial_messages_added = True self._initial_messages.clear() - async def _add_redis_messages(self, messages: Sequence[ChatMessage]) -> None: + async def _add_redis_messages(self, messages: Sequence[Message]) -> None: """Add multiple messages to Redis using atomic pipeline operation. This internal method efficiently adds multiple messages to the Redis list using a single atomic transaction to ensure consistency. Args: - messages: Sequence of ChatMessage objects to add to Redis. + messages: Sequence of Message objects to add to Redis. """ if not messages: return @@ -207,7 +207,7 @@ async def _add_redis_messages(self, messages: Sequence[ChatMessage]) -> None: await pipe.rpush(self.redis_key, serialized_message) # type: ignore[misc] await pipe.execute() - async def add_messages(self, messages: Sequence[ChatMessage]) -> None: + async def add_messages(self, messages: Sequence[Message]) -> None: """Add messages to the Redis store (ChatMessageStoreProtocol protocol method). This method implements the required ChatMessageStoreProtocol protocol for adding messages. @@ -215,7 +215,7 @@ async def add_messages(self, messages: Sequence[ChatMessage]) -> None: trimming if message limits are configured. Args: - messages: Sequence of ChatMessage objects to add to the store. + messages: Sequence of Message objects to add to the store. Can be empty (no-op) or contain multiple messages. Thread Safety: @@ -225,7 +225,7 @@ async def add_messages(self, messages: Sequence[ChatMessage]) -> None: Example: .. code-block:: python - messages = [ChatMessage(role="user", text="Hello"), ChatMessage(role="assistant", text="Hi there!")] + messages = [Message(role="user", text="Hello"), Message(role="assistant", text="Hi there!")] await store.add_messages(messages) """ if not messages: @@ -244,14 +244,14 @@ async def add_messages(self, messages: Sequence[ChatMessage]) -> None: # Keep only the most recent max_messages using LTRIM await self._redis_client.ltrim(self.redis_key, -self.max_messages, -1) # type: ignore[misc] - async def list_messages(self) -> list[ChatMessage]: + async def list_messages(self) -> list[Message]: """Get all messages from the store in chronological order (ChatMessageStoreProtocol protocol method). This method implements the required ChatMessageStoreProtocol protocol for retrieving messages. Returns all messages stored in Redis, ordered from oldest (index 0) to newest (index -1). Returns: - List of ChatMessage objects in chronological order (oldest first). + List of Message objects in chronological order (oldest first). Returns empty list if no messages exist or if Redis connection fails. Example: @@ -269,7 +269,7 @@ async def list_messages(self) -> list[ChatMessage]: if redis_messages: for serialized_message in redis_messages: - # Deserialize each JSON message back to ChatMessage + # Deserialize each JSON message back to Message message = self._deserialize_message(serialized_message) messages.append(message) @@ -390,11 +390,11 @@ async def clear(self) -> None: """ await self._redis_client.delete(self.redis_key) - def _serialize_message(self, message: ChatMessage) -> str: - """Serialize a ChatMessage to JSON string. + def _serialize_message(self, message: Message) -> str: + """Serialize a Message to JSON string. Args: - message: ChatMessage to serialize. + message: Message to serialize. Returns: JSON string representation of the message. @@ -402,17 +402,17 @@ def _serialize_message(self, message: ChatMessage) -> str: # Serialize to compact JSON (no extra whitespace for Redis efficiency) return message.to_json(separators=(",", ":")) - def _deserialize_message(self, serialized_message: str) -> ChatMessage: - """Deserialize a JSON string to ChatMessage. + def _deserialize_message(self, serialized_message: str) -> Message: + """Deserialize a JSON string to Message. Args: serialized_message: JSON string representation of a message. Returns: - ChatMessage object. + Message object. """ - # Reconstruct ChatMessage using custom deserialization - return ChatMessage.from_json(serialized_message) + # Reconstruct Message using custom deserialization + return Message.from_json(serialized_message) # ============================================================================ # List-like Convenience Methods (Redis-optimized async versions) @@ -446,14 +446,14 @@ async def __len__(self) -> int: await self._ensure_initial_messages_added() return await self._redis_client.llen(self.redis_key) # type: ignore[misc,no-any-return] - async def getitem(self, index: int) -> ChatMessage: + async def getitem(self, index: int) -> Message: """Get a message by index using Redis LINDEX. Args: index: The index of the message to retrieve. Returns: - The ChatMessage at the specified index. + The Message at the specified index. Raises: IndexError: If the index is out of range. @@ -467,12 +467,12 @@ async def getitem(self, index: int) -> ChatMessage: return self._deserialize_message(serialized_message) - async def setitem(self, index: int, item: ChatMessage) -> None: + async def setitem(self, index: int, item: Message) -> None: """Set a message at the specified index using Redis LSET. Args: index: The index at which to set the message. - item: The ChatMessage to set at the specified index. + item: The Message to set at the specified index. Raises: IndexError: If the index is out of range. @@ -490,11 +490,11 @@ async def setitem(self, index: int, item: ChatMessage) -> None: serialized_message = self._serialize_message(item) await self._redis_client.lset(self.redis_key, index, serialized_message) # type: ignore[misc] - async def append(self, item: ChatMessage) -> None: + async def append(self, item: Message) -> None: """Append a message to the end of the store. Args: - item: The ChatMessage to append. + item: The Message to append. """ await self.add_messages([item]) @@ -507,14 +507,14 @@ async def count(self) -> int: await self._ensure_initial_messages_added() return await self._redis_client.llen(self.redis_key) # type: ignore[misc,no-any-return] - async def index(self, item: ChatMessage) -> int: + async def index(self, item: Message) -> int: """Return the index of the first occurrence of the specified message. Uses Redis LINDEX to iterate through the list without loading all messages. Still O(N) but more memory efficient for large lists. Args: - item: The ChatMessage to find. + item: The Message to find. Returns: The index of the first occurrence of the message. @@ -533,16 +533,16 @@ async def index(self, item: ChatMessage) -> int: if redis_message == target_serialized: return i - raise ValueError("ChatMessage not found in store") + raise ValueError("Message not found in store") - async def remove(self, item: ChatMessage) -> None: + async def remove(self, item: Message) -> None: """Remove the first occurrence of the specified message from the store. Uses Redis LREM command for efficient removal by value. O(N) but performed natively in Redis without data transfer. Args: - item: The ChatMessage to remove. + item: The Message to remove. Raises: ValueError: If the message is not found in the store. @@ -556,13 +556,13 @@ async def remove(self, item: ChatMessage) -> None: removed_count = await self._redis_client.lrem(self.redis_key, 1, target_serialized) # type: ignore[misc] if removed_count == 0: - raise ValueError("ChatMessage not found in store") + raise ValueError("Message not found in store") - async def extend(self, items: Sequence[ChatMessage]) -> None: + async def extend(self, items: Sequence[Message]) -> None: """Extend the store by appending all messages from the iterable. Args: - items: Sequence of ChatMessage objects to append. + items: Sequence of Message objects to append. """ await self.add_messages(items) diff --git a/python/packages/redis/agent_framework_redis/_provider.py b/python/packages/redis/agent_framework_redis/_provider.py index f8449962b7..193ea444d3 100644 --- a/python/packages/redis/agent_framework_redis/_provider.py +++ b/python/packages/redis/agent_framework_redis/_provider.py @@ -10,7 +10,7 @@ from typing import Any, Literal, cast import numpy as np -from agent_framework import ChatMessage, Context, ContextProvider +from agent_framework import Context, ContextProvider, Message from agent_framework.exceptions import ( AgentException, ServiceInitializationError, @@ -484,19 +484,17 @@ async def thread_created(self, thread_id: str | None) -> None: @override async def invoked( self, - request_messages: ChatMessage | Sequence[ChatMessage], - response_messages: ChatMessage | Sequence[ChatMessage] | None = None, + request_messages: Message | Sequence[Message], + response_messages: Message | Sequence[Message] | None = None, invoke_exception: Exception | None = None, **kwargs: Any, ) -> None: self._validate_filters() - request_messages_list = ( - [request_messages] if isinstance(request_messages, ChatMessage) else list(request_messages) - ) + request_messages_list = [request_messages] if isinstance(request_messages, Message) else list(request_messages) response_messages_list = ( [response_messages] - if isinstance(response_messages, ChatMessage) + if isinstance(response_messages, Message) else list(response_messages) if response_messages else [] @@ -518,7 +516,7 @@ async def invoked( await self._add(data=messages) @override - async def invoking(self, messages: ChatMessage | MutableSequence[ChatMessage], **kwargs: Any) -> Context: + async def invoking(self, messages: Message | MutableSequence[Message], **kwargs: Any) -> Context: """Called before invoking the model to provide scoped context. Concatenates recent messages into a query, fetches matching memories from Redis. @@ -534,7 +532,7 @@ async def invoking(self, messages: ChatMessage | MutableSequence[ChatMessage], * Context: Context object containing instructions with memories. """ self._validate_filters() - messages_list = [messages] if isinstance(messages, ChatMessage) else list(messages) + messages_list = [messages] if isinstance(messages, Message) else list(messages) input_text = "\n".join(msg.text for msg in messages_list if msg and msg.text and msg.text.strip()) memories = await self._redis_search(text=input_text) @@ -543,7 +541,7 @@ async def invoking(self, messages: ChatMessage | MutableSequence[ChatMessage], * ) return Context( - messages=[ChatMessage(role="user", text=f"{self.context_prompt}\n{line_separated_memories}")] + messages=[Message(role="user", text=f"{self.context_prompt}\n{line_separated_memories}")] if line_separated_memories else None ) diff --git a/python/packages/redis/tests/test_redis_chat_message_store.py b/python/packages/redis/tests/test_redis_chat_message_store.py index 152d99fdf1..99a3038870 100644 --- a/python/packages/redis/tests/test_redis_chat_message_store.py +++ b/python/packages/redis/tests/test_redis_chat_message_store.py @@ -3,7 +3,7 @@ from unittest.mock import AsyncMock, MagicMock, patch import pytest -from agent_framework import ChatMessage, Content +from agent_framework import Content, Message from agent_framework_redis import RedisChatMessageStore @@ -19,9 +19,9 @@ class TestRedisChatMessageStore: def sample_messages(self): """Sample chat messages for testing.""" return [ - ChatMessage(role="user", text="Hello", message_id="msg1"), - ChatMessage(role="assistant", text="Hi there!", message_id="msg2"), - ChatMessage(role="user", text="How are you?", message_id="msg3"), + Message(role="user", text="Hello", message_id="msg1"), + Message(role="assistant", text="Hi there!", message_id="msg2"), + Message(role="user", text="How are you?", message_id="msg3"), ] @pytest.fixture @@ -250,7 +250,7 @@ async def test_add_messages_with_max_limit(self, mock_redis_client): store = RedisChatMessageStore(redis_url="redis://localhost:6379", thread_id="test123", max_messages=3) store._redis_client = mock_redis_client - message = ChatMessage(role="user", text="Test") + message = Message(role="user", text="Test") await store.add_messages([message]) # Should trim after adding to keep only last 3 messages @@ -269,8 +269,8 @@ async def test_list_messages_with_data(self, redis_store, mock_redis_client, sam """Test listing messages with data in Redis.""" # Create proper serialized messages using the actual serialization method test_messages = [ - ChatMessage(role="user", text="Hello", message_id="msg1"), - ChatMessage(role="assistant", text="Hi there!", message_id="msg2"), + Message(role="user", text="Hello", message_id="msg1"), + Message(role="assistant", text="Hi there!", message_id="msg2"), ] serialized_messages = [redis_store._serialize_message(msg) for msg in test_messages] mock_redis_client.lrange.return_value = serialized_messages @@ -411,7 +411,7 @@ async def test_message_serialization_with_complex_content(self): store = RedisChatMessageStore(redis_url="redis://localhost:6379", thread_id="test123") # Message with multiple content types - message = ChatMessage( + message = Message( role="assistant", contents=[Content.from_text(text="Hello"), Content.from_text(text="World")], author_name="TestBot", @@ -444,7 +444,7 @@ async def test_redis_connection_error_handling(self): store = RedisChatMessageStore(redis_url="redis://localhost:6379", thread_id="test123") store._redis_client = mock_client - message = ChatMessage(role="user", text="Test") + message = Message(role="user", text="Test") # Should propagate Redis connection errors with pytest.raises(Exception, match="Connection failed"): @@ -485,7 +485,7 @@ async def test_setitem(self, redis_store, mock_redis_client, sample_messages): mock_redis_client.llen.return_value = 2 mock_redis_client.lset = AsyncMock() - new_message = ChatMessage(role="user", text="Updated message") + new_message = Message(role="user", text="Updated message") await redis_store.setitem(0, new_message) mock_redis_client.lset.assert_called_once() @@ -497,13 +497,13 @@ async def test_setitem_index_error(self, redis_store, mock_redis_client): """Test setitem raises IndexError for invalid index.""" mock_redis_client.llen.return_value = 0 - new_message = ChatMessage(role="user", text="Test") + new_message = Message(role="user", text="Test") with pytest.raises(IndexError): await redis_store.setitem(0, new_message) async def test_append(self, redis_store, mock_redis_client): """Test append method delegates to add_messages.""" - message = ChatMessage(role="user", text="Appended message") + message = Message(role="user", text="Appended message") await redis_store.append(message) # Should call pipeline operations via add_messages @@ -572,7 +572,7 @@ async def test_index_not_found(self, redis_store, mock_redis_client, sample_mess mock_redis_client.llen.return_value = 1 mock_redis_client.lindex = AsyncMock(return_value="different_message") - with pytest.raises(ValueError, match="ChatMessage not found in store"): + with pytest.raises(ValueError, match="Message not found in store"): await redis_store.index(sample_messages[0]) async def test_remove(self, redis_store, mock_redis_client, sample_messages): @@ -589,7 +589,7 @@ async def test_remove_not_found(self, redis_store, mock_redis_client, sample_mes """Test remove method when message is not found.""" mock_redis_client.lrem = AsyncMock(return_value=0) # 0 elements removed - with pytest.raises(ValueError, match="ChatMessage not found in store"): + with pytest.raises(ValueError, match="Message not found in store"): await redis_store.remove(sample_messages[0]) async def test_extend(self, redis_store, mock_redis_client, sample_messages): diff --git a/python/packages/redis/tests/test_redis_provider.py b/python/packages/redis/tests/test_redis_provider.py index 41ce7b37b8..8e842b3de7 100644 --- a/python/packages/redis/tests/test_redis_provider.py +++ b/python/packages/redis/tests/test_redis_provider.py @@ -5,7 +5,7 @@ import numpy as np import pytest -from agent_framework import ChatMessage +from agent_framework import Message from agent_framework.exceptions import AgentException, ServiceInitializationError from redisvl.utils.vectorize import CustomTextVectorizer @@ -113,18 +113,18 @@ def test_schema_without_vector_field(self, patch_index_from_dict): class TestRedisProviderMessages: @pytest.fixture - def sample_messages(self) -> list[ChatMessage]: + def sample_messages(self) -> list[Message]: return [ - ChatMessage(role="user", text="Hello, how are you?"), - ChatMessage(role="assistant", text="I'm doing well, thank you!"), - ChatMessage(role="system", text="You are a helpful assistant"), + Message(role="user", text="Hello, how are you?"), + Message(role="assistant", text="I'm doing well, thank you!"), + Message(role="system", text="You are a helpful assistant"), ] # Writes require at least one scoping filter to avoid unbounded operations async def test_messages_adding_requires_filters(self, patch_index_from_dict): # noqa: ARG002 provider = RedisProvider() with pytest.raises(ServiceInitializationError): - await provider.invoked("thread123", ChatMessage(role="user", text="Hello")) + await provider.invoked("thread123", Message(role="user", text="Hello")) # Captures the per-operation thread id when provided async def test_thread_created_sets_per_operation_id(self, patch_index_from_dict): # noqa: ARG002 @@ -157,7 +157,7 @@ class TestRedisProviderModelInvoking: async def test_model_invoking_requires_filters(self, patch_index_from_dict): # noqa: ARG002 provider = RedisProvider() with pytest.raises(ServiceInitializationError): - await provider.invoking(ChatMessage(role="user", text="Hi")) + await provider.invoking(Message(role="user", text="Hi")) # Ensures text-only search path is used and context is composed from hits async def test_textquery_path_and_context_contents( @@ -168,7 +168,7 @@ async def test_textquery_path_and_context_contents( provider = RedisProvider(user_id="u1") # Act - ctx = await provider.invoking([ChatMessage(role="user", text="q1")]) + ctx = await provider.invoking([Message(role="user", text="q1")]) # Assert: TextQuery used (not HybridQuery), filter_expression included assert patch_queries["TextQuery"].call_count == 1 @@ -190,7 +190,7 @@ async def test_model_invoking_empty_results_returns_empty_context( ): # noqa: ARG002 mock_index.query = AsyncMock(return_value=[]) provider = RedisProvider(user_id="u1") - ctx = await provider.invoking([ChatMessage(role="user", text="any")]) + ctx = await provider.invoking([Message(role="user", text="any")]) assert ctx.messages == [] # Ensures hybrid vector-text search is used when a vectorizer and vector field are configured @@ -198,7 +198,7 @@ async def test_hybridquery_path_with_vectorizer(self, mock_index: AsyncMock, pat mock_index.query = AsyncMock(return_value=[{"content": "Hit"}]) provider = RedisProvider(user_id="u1", redis_vectorizer=CUSTOM_VECTORIZER, vector_field_name="vec") - ctx = await provider.invoking([ChatMessage(role="user", text="hello")]) + ctx = await provider.invoking([Message(role="user", text="hello")]) # Assert: HybridQuery used with vector and vector field assert patch_queries["HybridQuery"].call_count == 1 @@ -240,9 +240,9 @@ async def test_messages_adding_adds_partition_defaults_and_roles( ) msgs = [ - ChatMessage(role="user", text="u"), - ChatMessage(role="assistant", text="a"), - ChatMessage(role="system", text="s"), + Message(role="user", text="u"), + Message(role="assistant", text="a"), + Message(role="system", text="s"), ] await provider.invoked(msgs) @@ -265,8 +265,8 @@ async def test_messages_adding_ignores_blank_and_disallowed_roles( ): # noqa: ARG002 provider = RedisProvider(user_id="u1", scope_to_per_operation_thread_id=True) msgs = [ - ChatMessage(role="user", text=" "), - ChatMessage(role="tool", text="tool output"), + Message(role="user", text=" "), + Message(role="tool", text="tool output"), ] await provider.invoked(msgs) # No valid messages -> no load @@ -279,8 +279,8 @@ async def test_messages_adding_triggers_index_create_once_when_drop_true( self, mock_index: AsyncMock, patch_index_from_dict ): # noqa: ARG002 provider = RedisProvider(user_id="u1") - await provider.invoked(ChatMessage(role="user", text="m1")) - await provider.invoked(ChatMessage(role="user", text="m2")) + await provider.invoked(Message(role="user", text="m1")) + await provider.invoked(Message(role="user", text="m2")) # create only on first call assert mock_index.create.await_count == 1 @@ -291,7 +291,7 @@ async def test_model_invoking_triggers_create_when_drop_false_and_not_exists( mock_index.exists = AsyncMock(return_value=False) provider = RedisProvider(user_id="u1") mock_index.query = AsyncMock(return_value=[{"content": "C"}]) - await provider.invoking([ChatMessage(role="user", text="q")]) + await provider.invoking([Message(role="user", text="q")]) assert mock_index.create.await_count == 1 @@ -321,7 +321,7 @@ async def test_messages_adding_populates_vector_field_when_vectorizer_present( vector_field_name="vec", ) - await provider.invoked(ChatMessage(role="user", text="hello")) + await provider.invoked(Message(role="user", text="hello")) assert mock_index.load.await_count == 1 (loaded_args, _kwargs) = mock_index.load.call_args docs = loaded_args[0] diff --git a/python/samples/autogen-migration/README.md b/python/samples/autogen-migration/README.md index 509b518f8a..36010fa223 100644 --- a/python/samples/autogen-migration/README.md +++ b/python/samples/autogen-migration/README.md @@ -6,7 +6,7 @@ This gallery helps AutoGen developers move to the Microsoft Agent Framework (AF) ### Single-Agent Parity -- [01_basic_assistant_agent.py](single_agent/01_basic_assistant_agent.py) — Minimal AutoGen `AssistantAgent` and AF `ChatAgent` comparison. +- [01_basic_assistant_agent.py](single_agent/01_basic_assistant_agent.py) — Minimal AutoGen `AssistantAgent` and AF `Agent` comparison. - [02_assistant_agent_with_tool.py](single_agent/02_assistant_agent_with_tool.py) — Function tool integration in both SDKs. - [03_assistant_agent_thread_and_stream.py](single_agent/03_assistant_agent_thread_and_stream.py) — Thread management and streaming responses. - [04_agent_as_tool.py](single_agent/04_agent_as_tool.py) — Using agents as tools (hierarchical agent pattern) and streaming with tools. @@ -51,7 +51,7 @@ python samples/autogen-migration/orchestrations/04_magentic_one.py ## Tips for Migration -- **Default behavior differences**: AutoGen's `AssistantAgent` is single-turn by default (`max_tool_iterations=1`), while AF's `ChatAgent` is multi-turn and continues tool execution automatically. +- **Default behavior differences**: AutoGen's `AssistantAgent` is single-turn by default (`max_tool_iterations=1`), while AF's `Agent` is multi-turn and continues tool execution automatically. - **Thread management**: AF agents are stateless by default. Use `agent.get_new_thread()` and pass it to `run()` to maintain conversation state, similar to AutoGen's conversation context. - **Tools**: AutoGen uses `FunctionTool` wrappers; AF uses `@tool` decorators with automatic schema inference. - **Orchestration patterns**: diff --git a/python/samples/autogen-migration/orchestrations/04_magentic_one.py b/python/samples/autogen-migration/orchestrations/04_magentic_one.py index f14cee5a26..a83f7cc33e 100644 --- a/python/samples/autogen-migration/orchestrations/04_magentic_one.py +++ b/python/samples/autogen-migration/orchestrations/04_magentic_one.py @@ -21,7 +21,7 @@ from agent_framework import ( AgentResponseUpdate, - ChatMessage, + Message, WorkflowEvent, ) from agent_framework.orchestrations import MagenticProgressLedger @@ -129,7 +129,7 @@ async def run_agent_framework() -> None: elif event.type == "magentic_orchestrator": print(f"\n[Magentic Orchestrator Event] Type: {event.data.event_type.name}") - if isinstance(event.data.content, ChatMessage): + if isinstance(event.data.content, Message): print(f"Please review the plan:\n{event.data.content.text}") elif isinstance(event.data.content, MagenticProgressLedger): print(f"Please review progress ledger:\n{json.dumps(event.data.content.to_dict(), indent=2)}") @@ -150,7 +150,7 @@ async def run_agent_framework() -> None: print("Final Output:") # The output of the Magentic workflow is a list of ChatMessages with only one final message # generated by the orchestrator. - output_messages = cast(list[ChatMessage], output_event.data) + output_messages = cast(list[Message], output_event.data) if output_messages: output = output_messages[-1].text print(output) diff --git a/python/samples/autogen-migration/single_agent/01_basic_assistant_agent.py b/python/samples/autogen-migration/single_agent/01_basic_assistant_agent.py index 711bd648c8..9f7ae98d6e 100644 --- a/python/samples/autogen-migration/single_agent/01_basic_assistant_agent.py +++ b/python/samples/autogen-migration/single_agent/01_basic_assistant_agent.py @@ -9,7 +9,7 @@ # uv run samples/autogen-migration/single_agent/01_basic_assistant_agent.py # Copyright (c) Microsoft. All rights reserved. -"""Basic AutoGen AssistantAgent vs Agent Framework ChatAgent. +"""Basic AutoGen AssistantAgent vs Agent Framework Agent. Both samples expect OpenAI-compatible environment variables (OPENAI_API_KEY or Azure OpenAI configuration). Update the prompts or client wiring to match your @@ -38,10 +38,10 @@ async def run_autogen() -> None: async def run_agent_framework() -> None: - """Call Agent Framework's ChatAgent created from OpenAIChatClient.""" + """Call Agent Framework's Agent created from OpenAIChatClient.""" from agent_framework.openai import OpenAIChatClient - # AF constructs a lightweight ChatAgent backed by OpenAIChatClient + # AF constructs a lightweight Agent backed by OpenAIChatClient client = OpenAIChatClient(model_id="gpt-4.1-mini") agent = client.as_agent( name="assistant", diff --git a/python/samples/autogen-migration/single_agent/02_assistant_agent_with_tool.py b/python/samples/autogen-migration/single_agent/02_assistant_agent_with_tool.py index ff56e694a0..cb027d636c 100644 --- a/python/samples/autogen-migration/single_agent/02_assistant_agent_with_tool.py +++ b/python/samples/autogen-migration/single_agent/02_assistant_agent_with_tool.py @@ -10,7 +10,7 @@ # uv run samples/autogen-migration/single_agent/02_assistant_agent_with_tool.py # Copyright (c) Microsoft. All rights reserved. -"""AutoGen AssistantAgent vs Agent Framework ChatAgent with function tools. +"""AutoGen AssistantAgent vs Agent Framework Agent with function tools. Demonstrates how to create and attach tools to agents in both frameworks. """ diff --git a/python/samples/concepts/response_stream.py b/python/samples/concepts/response_stream.py index 6d99058062..5cedc6711a 100644 --- a/python/samples/concepts/response_stream.py +++ b/python/samples/concepts/response_stream.py @@ -94,9 +94,9 @@ === Chaining with .map() and .with_finalizer() === -When building a ChatAgent on top of a ChatClient, we face a challenge: +When building a Agent on top of a ChatClient, we face a challenge: - The ChatClient returns a ResponseStream[ChatResponseUpdate, ChatResponse] -- The ChatAgent needs to return a ResponseStream[AgentResponseUpdate, AgentResponse] +- The Agent needs to return a ResponseStream[AgentResponseUpdate, AgentResponse] - We can't iterate the ChatClient's stream twice! The `.map()` and `.with_finalizer()` methods solve this by creating new ResponseStreams that: @@ -123,7 +123,7 @@ stream is wrapped/mapped. ```python -# ChatAgent does something like this internally: +# Agent does something like this internally: chat_stream = chat_client.get_response(messages, stream=True) agent_stream = ( chat_stream @@ -135,7 +135,7 @@ This ensures: - The underlying ChatClient stream is only consumed once - The agent can add its own transform hooks, result hooks, and cleanup logic -- Each layer (ChatClient, ChatAgent, middleware) can add independent behavior +- Each layer (ChatClient, Agent, middleware) can add independent behavior - Inner stream post-processing (like context provider notification) still runs - Types flow naturally through the chain """ @@ -281,7 +281,7 @@ def wrap_in_quotes_hook(response: ChatResponse) -> ChatResponse: # Simulate what ChatClient returns inner_stream = ResponseStream(generate_updates(), finalizer=combine_updates) - # Simulate what ChatAgent does: wrap the inner stream + # Simulate what Agent does: wrap the inner stream def to_agent_format(update: ChatResponseUpdate) -> ChatResponseUpdate: """Map ChatResponseUpdate to agent format (simulated transformation).""" # In real code, this would convert to AgentResponseUpdate diff --git a/python/samples/concepts/tools/README.md b/python/samples/concepts/tools/README.md index 6643a42126..b8a7704abc 100644 --- a/python/samples/concepts/tools/README.md +++ b/python/samples/concepts/tools/README.md @@ -20,7 +20,7 @@ sequenceDiagram participant Agent as Agent.run() participant AML as AgentMiddlewareLayer participant AMP as AgentMiddlewarePipeline - participant RawAgent as RawChatAgent.run() + participant RawAgent as RawAgent.run() participant CML as ChatMiddlewareLayer participant CMP as ChatMiddlewarePipeline participant FIL as FunctionInvocationLayer @@ -132,7 +132,7 @@ sequenceDiagram | Field | Type | Description | |-------|------|-------------| | `agent` | `SupportsAgentRun` | The agent being invoked | -| `messages` | `list[ChatMessage]` | Input messages (mutable) | +| `messages` | `list[Message]` | Input messages (mutable) | | `thread` | `AgentThread \| None` | Conversation thread | | `options` | `Mapping[str, Any]` | Chat options dict | | `stream` | `bool` | Whether streaming is enabled | @@ -144,7 +144,7 @@ sequenceDiagram 1. `categorize_middleware()` separates middleware by type (agent, chat, function) 2. Chat and function middleware are forwarded to `chat_client` 3. `AgentMiddlewarePipeline.execute()` runs the agent middleware chain -4. Final handler calls `RawChatAgent.run()` +4. Final handler calls `RawAgent.run()` **What Can Be Modified:** - `context.messages` - Add, remove, or modify input messages @@ -160,8 +160,8 @@ sequenceDiagram | Field | Type | Description | |-------|------|-------------| -| `chat_client` | `ChatClientProtocol` | The chat client | -| `messages` | `Sequence[ChatMessage]` | Messages to send | +| `chat_client` | `SupportsChatGetResponse` | The chat client | +| `messages` | `Sequence[Message]` | Messages to send | | `options` | `Mapping[str, Any]` | Chat options | | `stream` | `bool` | Whether streaming | | `metadata` | `dict` | Shared data between middleware | @@ -275,7 +275,7 @@ class TerminatingMiddleware(FunctionMiddleware): ### Agent Layer → Chat Layer ```python -# RawChatAgent._prepare_run_context() builds: +# RawAgent._prepare_run_context() builds: { "thread": AgentThread, # Validated/created thread "input_messages": [...], # Normalized input messages diff --git a/python/samples/concepts/typed_options.py b/python/samples/concepts/typed_options.py index 533b214ebe..85ba00ac20 100644 --- a/python/samples/concepts/typed_options.py +++ b/python/samples/concepts/typed_options.py @@ -3,13 +3,13 @@ import asyncio from typing import Literal -from agent_framework import ChatAgent +from agent_framework import Agent from agent_framework.anthropic import AnthropicClient from agent_framework.openai import OpenAIChatClient, OpenAIChatOptions """TypedDict-based Chat Options. -In Agent Framework, we have made ChatClient and ChatAgent generic over a ChatOptions typeddict, this means that +In Agent Framework, we have made ChatClient and Agent generic over a ChatOptions typeddict, this means that you can override which options are available for a given client or agent by providing your own TypedDict subclass. And we include the most common options for all ChatClient providers out of the box. @@ -21,7 +21,7 @@ including overriding unsupported options. The sample shows usage with both OpenAI and Anthropic clients, demonstrating -how provider-specific options work for ChatClient and ChatAgent. But the same approach works for other providers too. +how provider-specific options work for ChatClient and Agent. But the same approach works for other providers too. """ @@ -49,13 +49,13 @@ async def demo_anthropic_chat_client() -> None: async def demo_anthropic_agent() -> None: - """Demonstrate ChatAgent with Anthropic client and typed options.""" - print("\n=== ChatAgent with Anthropic and Typed Options ===\n") + """Demonstrate Agent with Anthropic client and typed options.""" + print("\n=== Agent with Anthropic and Typed Options ===\n") client = AnthropicClient(model_id="claude-sonnet-4-5-20250929") # Create a typed agent for Anthropic - IDE knows Anthropic-specific options! - agent = ChatAgent( + agent = Agent( chat_client=client, name="claude-assistant", instructions="You are a helpful assistant powered by Claude. Be concise.", @@ -132,14 +132,14 @@ async def demo_openai_chat_client_reasoning_models() -> None: async def demo_openai_agent() -> None: - """Demonstrate ChatAgent with OpenAI client and typed options.""" - print("\n=== ChatAgent with OpenAI and Typed Options ===\n") + """Demonstrate Agent with OpenAI client and typed options.""" + print("\n=== Agent with OpenAI and Typed Options ===\n") # Create a typed agent - IDE will autocomplete options! # The type annotation can be done either on the agent like below, # or on the client when constructing the client instance: # client = OpenAIChatClient[OpenAIReasoningChatOptions]() - agent = ChatAgent[OpenAIReasoningChatOptions]( + agent = Agent[OpenAIReasoningChatOptions]( chat_client=OpenAIChatClient(), name="weather-assistant", instructions="You are a helpful assistant. Answer concisely.", diff --git a/python/samples/demos/chatkit-integration/README.md b/python/samples/demos/chatkit-integration/README.md index 9636c4b190..d688eb3a6c 100644 --- a/python/samples/demos/chatkit-integration/README.md +++ b/python/samples/demos/chatkit-integration/README.md @@ -38,7 +38,7 @@ graph TB subgraph Integration["Agent Framework Integration"] Converter[ThreadItemConverter] Streamer[stream_agent_response] - Agent[ChatAgent] + Agent[Agent] end Widgets[Widget Rendering
render_weather_widget
render_city_selector_widget] @@ -61,7 +61,7 @@ graph TB AttStore -.->|save files| Files AttStore -.->|save metadata| SQLite - Converter -->|ChatMessage array| Agent + Converter -->|Message array| Agent Agent -->|AgentResponseUpdate| Streamer Streamer -->|ThreadStreamEvent| ChatKit @@ -88,7 +88,7 @@ The sample implements a ChatKit server using the `ChatKitServer` base class from - **`WeatherChatKitServer`**: Custom ChatKit server implementation that: - Extends `ChatKitServer[dict[str, Any]]` - - Uses Agent Framework's `ChatAgent` with Azure OpenAI + - Uses Agent Framework's `Agent` with Azure OpenAI - Converts ChatKit messages to Agent Framework format using `ThreadItemConverter` - Streams responses back to ChatKit using `stream_agent_response` - Creates and streams interactive widgets after agent responses diff --git a/python/samples/demos/chatkit-integration/app.py b/python/samples/demos/chatkit-integration/app.py index 44a2e125f6..543b508339 100644 --- a/python/samples/demos/chatkit-integration/app.py +++ b/python/samples/demos/chatkit-integration/app.py @@ -28,7 +28,7 @@ import uvicorn # Agent Framework imports -from agent_framework import AgentResponseUpdate, ChatAgent, ChatMessage, tool +from agent_framework import AgentResponseUpdate, Agent, Message, FunctionResultContent, Role, tool from agent_framework.azure import AzureOpenAIChatClient # Agent Framework ChatKit integration @@ -217,7 +217,7 @@ def __init__(self, data_store: SQLiteStore, attachment_store: FileBasedAttachmen # Create Agent Framework agent with Azure OpenAI # For authentication, run `az login` command in terminal try: - self.weather_agent = ChatAgent( + self.weather_agent = Agent( chat_client=AzureOpenAIChatClient(credential=AzureCliCredential()), instructions=( "You are a helpful weather assistant with image analysis capabilities. " @@ -290,8 +290,8 @@ async def _update_thread_title( conversation_context = "\n".join(user_messages[:3]) title_prompt = [ - ChatMessage( - role="user", + Message( + role=Role.USER, text=( f"Generate a very short, concise title (max 40 characters) for a conversation " f"that starts with:\n\n{conversation_context}\n\n" @@ -342,6 +342,7 @@ async def respond( runs the agent, converts the response back to ChatKit events using stream_agent_response, and creates interactive weather widgets when weather data is queried. """ + from agent_framework import FunctionResultContent if input_user_message is None: logger.debug("Received None user message, skipping") @@ -384,7 +385,7 @@ async def intercept_stream() -> AsyncIterator[AgentResponseUpdate]: # Check for function results in the update if update.contents: for content in update.contents: - if content.type == "function_result": + if isinstance(content, FunctionResultContent): result = content.result # Check if it's a WeatherResponse (string subclass with weather_data attribute) @@ -467,7 +468,7 @@ async def action( weather_data: WeatherData | None = None # Create an agent message asking about the weather - agent_messages = [ChatMessage(role="user", text=f"What's the weather in {city_label}?")] + agent_messages = [Message(role=Role.USER, text=f"What's the weather in {city_label}?")] logger.debug(f"Processing weather query: {agent_messages[0].text}") @@ -481,7 +482,7 @@ async def intercept_stream() -> AsyncIterator[AgentResponseUpdate]: # Check for function results in the update if update.contents: for content in update.contents: - if content.type == "function_result": + if isinstance(content, FunctionResultContent): result = content.result # Check if it's a WeatherResponse (string subclass with weather_data attribute) @@ -572,7 +573,7 @@ async def chatkit_endpoint(request: Request): @app.post("/upload/{attachment_id}") -async def upload_file(attachment_id: str, file: Annotated[UploadFile, File()]): +async def upload_file(attachment_id: str, file: UploadFile = File(...)): """Handle file upload for two-phase upload. The client POSTs the file bytes here after creating the attachment @@ -594,7 +595,7 @@ async def upload_file(attachment_id: str, file: Annotated[UploadFile, File()]): attachment = await data_store.load_attachment(attachment_id, {"user_id": DEFAULT_USER_ID}) # Clear the upload_url since upload is complete - attachment.upload_url = None # type: ignore[union-attr] + attachment.upload_url = None # Save the updated attachment back to the store await data_store.save_attachment(attachment, {"user_id": DEFAULT_USER_ID}) diff --git a/python/samples/demos/hosted_agents/agent_with_text_search_rag/main.py b/python/samples/demos/hosted_agents/agent_with_text_search_rag/main.py index 0c0660ceb0..b777fbfd22 100644 --- a/python/samples/demos/hosted_agents/agent_with_text_search_rag/main.py +++ b/python/samples/demos/hosted_agents/agent_with_text_search_rag/main.py @@ -6,7 +6,7 @@ from dataclasses import dataclass from typing import Any -from agent_framework import ChatMessage, Context, ContextProvider +from agent_framework import Message, Context, ContextProvider from agent_framework.azure import AzureOpenAIChatClient from azure.ai.agentserver.agentframework import from_agent_framework # pyright: ignore[reportUnknownVariableType] from azure.identity import DefaultAzureCredential @@ -27,16 +27,16 @@ class TextSearchResult: class TextSearchContextProvider(ContextProvider): """A simple context provider that simulates text search results based on keywords in the user's message.""" - def _get_most_recent_message(self, messages: ChatMessage | MutableSequence[ChatMessage]) -> ChatMessage: + def _get_most_recent_message(self, messages: Message | MutableSequence[Message]) -> Message: """Helper method to extract the most recent message from the input.""" - if isinstance(messages, ChatMessage): + if isinstance(messages, Message): return messages if messages: return messages[-1] raise ValueError("No messages provided") @override - async def invoking(self, messages: ChatMessage | MutableSequence[ChatMessage], **kwargs: Any) -> Context: + async def invoking(self, messages: Message | MutableSequence[Message], **kwargs: Any) -> Context: message = self._get_most_recent_message(messages) query = message.text.lower() @@ -84,7 +84,7 @@ async def invoking(self, messages: ChatMessage | MutableSequence[ChatMessage], * return Context( messages=[ - ChatMessage( + Message( role="user", text="\n\n".join(json.dumps(result.__dict__, indent=2) for result in results) ) ] diff --git a/python/samples/demos/m365-agent/m365_agent_demo/app.py b/python/samples/demos/m365-agent/m365_agent_demo/app.py index 212941efa7..d4c6460652 100644 --- a/python/samples/demos/m365-agent/m365_agent_demo/app.py +++ b/python/samples/demos/m365-agent/m365_agent_demo/app.py @@ -18,7 +18,7 @@ from random import randint from typing import Annotated -from agent_framework import ChatAgent, tool +from agent_framework import Agent, tool from agent_framework.openai import OpenAIChatClient from aiohttp import web from aiohttp.web_middlewares import middleware @@ -95,7 +95,7 @@ def get_weather( return f"The weather in {location} is {conditions[randint(0, 3)]} with a high of {randint(10, 30)}°C." -def build_agent() -> ChatAgent: +def build_agent() -> Agent: """Create and return the chat agent instance with weather tool registered.""" return OpenAIChatClient().as_agent( name="WeatherAgent", instructions="You are a helpful weather agent.", tools=get_weather diff --git a/python/samples/demos/workflow_evaluation/create_workflow.py b/python/samples/demos/workflow_evaluation/create_workflow.py index 2eb31d3492..b335ad274d 100644 --- a/python/samples/demos/workflow_evaluation/create_workflow.py +++ b/python/samples/demos/workflow_evaluation/create_workflow.py @@ -48,7 +48,7 @@ from agent_framework import ( AgentExecutorResponse, AgentResponseUpdate, - ChatMessage, + Message, Executor, WorkflowBuilder, WorkflowContext, @@ -65,9 +65,9 @@ @executor(id="start_executor") -async def start_executor(input: str, ctx: WorkflowContext[list[ChatMessage]]) -> None: +async def start_executor(input: str, ctx: WorkflowContext[list[Message]]) -> None: """Initiates the workflow by sending the user query to all specialized agents.""" - await ctx.send_message([ChatMessage("user", [input])]) + await ctx.send_message([Message("user", [input])]) class ResearchLead(Executor): @@ -102,11 +102,11 @@ async def fan_in_handle(self, responses: list[AgentExecutorResponse], ctx: Workf # Generate comprehensive travel plan summary messages = [ - ChatMessage( + Message( role="system", text="You are a travel planning coordinator. Summarize findings from multiple specialized travel agents and provide a clear, comprehensive travel plan based on the user's query.", ), - ChatMessage( + Message( role="user", text=f"Original query: {user_query}\n\nFindings from specialized travel agents:\n{summary_text}\n\nPlease provide a comprehensive travel plan based on these findings.", ), diff --git a/python/samples/getting_started/agents/azure_ai/azure_ai_provider_methods.py b/python/samples/getting_started/agents/azure_ai/azure_ai_provider_methods.py index b05ec92f80..2bc42ec97a 100644 --- a/python/samples/getting_started/agents/azure_ai/azure_ai_provider_methods.py +++ b/python/samples/getting_started/agents/azure_ai/azure_ai_provider_methods.py @@ -23,7 +23,7 @@ It also shows how to use a single provider instance to spawn multiple agents with different configurations, which is efficient for multi-agent scenarios. -Each method returns a ChatAgent that can be used for conversations. +Each method returns a Agent that can be used for conversations. """ @@ -41,7 +41,7 @@ async def create_agent_example() -> None: """Example of using provider.create_agent() to create a new agent. This method creates a new agent version on the Azure AI service and returns - a ChatAgent. Use this when you want to create a fresh agent with + a Agent. Use this when you want to create a fresh agent with specific configuration. """ print("=== provider.create_agent() Example ===") @@ -199,7 +199,7 @@ async def multiple_agents_example() -> None: async def as_agent_example() -> None: """Example of using provider.as_agent() to wrap an SDK object without HTTP calls. - This method wraps an existing AgentVersionDetails into a ChatAgent without + This method wraps an existing AgentVersionDetails into a Agent without making additional HTTP calls. Use this when you already have the full AgentVersionDetails from a previous SDK operation. """ diff --git a/python/samples/getting_started/agents/azure_ai/azure_ai_with_application_endpoint.py b/python/samples/getting_started/agents/azure_ai/azure_ai_with_application_endpoint.py index 89bb77af11..c26b521c46 100644 --- a/python/samples/getting_started/agents/azure_ai/azure_ai_with_application_endpoint.py +++ b/python/samples/getting_started/agents/azure_ai/azure_ai_with_application_endpoint.py @@ -3,7 +3,7 @@ import asyncio import os -from agent_framework import ChatAgent +from agent_framework import Agent from agent_framework.azure import AzureAIClient from azure.ai.projects.aio import AIProjectClient from azure.identity.aio import AzureCliCredential @@ -23,7 +23,7 @@ async def main() -> None: # Endpoint here should be application endpoint with format: # /api/projects//applications//protocols AIProjectClient(endpoint=os.environ["AZURE_AI_PROJECT_ENDPOINT"], credential=credential) as project_client, - ChatAgent( + Agent( chat_client=AzureAIClient( project_client=project_client, ), diff --git a/python/samples/getting_started/agents/azure_ai/azure_ai_with_code_interpreter_file_download.py b/python/samples/getting_started/agents/azure_ai/azure_ai_with_code_interpreter_file_download.py index ff0d9df4dc..7a9d7cdf40 100644 --- a/python/samples/getting_started/agents/azure_ai/azure_ai_with_code_interpreter_file_download.py +++ b/python/samples/getting_started/agents/azure_ai/azure_ai_with_code_interpreter_file_download.py @@ -6,10 +6,10 @@ from agent_framework import ( AgentResponseUpdate, - Annotation, - ChatAgent, - Content, + Agent, + CitationAnnotation, HostedCodeInterpreterTool, + HostedFileContent, ) from agent_framework.azure import AzureAIProjectAgentProvider from azure.identity.aio import AzureCliCredential @@ -33,19 +33,21 @@ ) -async def download_container_files(file_contents: list[Annotation | Content], agent: ChatAgent) -> list[Path]: +async def download_container_files( + file_contents: list[CitationAnnotation | HostedFileContent], agent: Agent +) -> list[Path]: """Download container files using the OpenAI containers API. Code interpreter generates files in containers, which require both file_id and container_id to download. The container_id is stored in additional_properties. - This function works for both streaming (Content with type="hosted_file") and non-streaming - (Annotation) responses. + This function works for both streaming (HostedFileContent) and non-streaming + (CitationAnnotation) responses. Args: - file_contents: List of Annotation or Content objects + file_contents: List of CitationAnnotation or HostedFileContent objects containing file_id and container_id. - agent: The ChatAgent instance with access to the AzureAIClient. + agent: The Agent instance with access to the AzureAIClient. Returns: List of Path objects for successfully downloaded files. @@ -61,36 +63,28 @@ async def download_container_files(file_contents: list[Annotation | Content], ag print(f"\nDownloading {len(file_contents)} container file(s) to {output_dir.absolute()}...") # Access the OpenAI client from AzureAIClient - openai_client = agent.chat_client.client # type: ignore[attr-defined] + openai_client = agent.chat_client.client downloaded_files: list[Path] = [] for content in file_contents: - # Handle both Annotation (TypedDict) and Content objects - if isinstance(content, dict): # Annotation TypedDict - file_id = content.get("file_id") - additional_props = content.get("additional_properties", {}) - url = content.get("url") - else: # Content object - file_id = content.file_id - additional_props = content.additional_properties or {} - url = content.uri + file_id = content.file_id # Extract container_id from additional_properties - if not additional_props or "container_id" not in additional_props: + if not content.additional_properties or "container_id" not in content.additional_properties: print(f" File {file_id}: ✗ Missing container_id") continue - container_id = additional_props["container_id"] + container_id = content.additional_properties["container_id"] # Extract filename based on content type - if isinstance(content, dict): # Annotation TypedDict - filename = url or f"{file_id}.txt" + if isinstance(content, CitationAnnotation): + filename = content.url or f"{file_id}.txt" # Extract filename from sandbox URL if present (e.g., sandbox:/mnt/data/sample.txt) if filename.startswith("sandbox:"): filename = filename.split("/")[-1] - else: # Content - filename = additional_props.get("filename") or f"{file_id}.txt" + else: # HostedFileContent + filename = content.additional_properties.get("filename") or f"{file_id}.txt" output_path = output_dir / filename @@ -138,18 +132,17 @@ async def non_streaming_example() -> None: print(f"Agent: {result.text}\n") # Check for annotations in the response - annotations_found: list[Annotation] = [] - # AgentResponse has messages property, which contains ChatMessage objects + annotations_found: list[CitationAnnotation] = [] + # AgentResponse has messages property, which contains Message objects for message in result.messages: for content in message.contents: if content.type == "text" and content.annotations: for annotation in content.annotations: - if annotation.get("file_id"): + if isinstance(annotation, CitationAnnotation) and annotation.file_id: annotations_found.append(annotation) - print(f"Found file annotation: file_id={annotation['file_id']}") - additional_props = annotation.get("additional_properties", {}) - if additional_props and "container_id" in additional_props: - print(f" container_id={additional_props['container_id']}") + print(f"Found file annotation: file_id={annotation.file_id}") + if annotation.additional_properties and "container_id" in annotation.additional_properties: + print(f" container_id={annotation.additional_properties['container_id']}") if annotations_found: print(f"SUCCESS: Found {len(annotations_found)} file annotation(s)") @@ -180,7 +173,7 @@ async def streaming_example() -> None: ) print(f"User: {QUERY}\n") - file_contents_found: list[Content] = [] + file_contents_found: list[HostedFileContent] = [] text_chunks: list[str] = [] async for update in agent.run(QUERY, stream=True): @@ -191,11 +184,11 @@ async def streaming_example() -> None: text_chunks.append(content.text) if content.annotations: for annotation in content.annotations: - if annotation.get("file_id"): - print(f"Found streaming annotation: file_id={annotation['file_id']}") - elif content.type == "hosted_file": + if isinstance(annotation, CitationAnnotation) and annotation.file_id: + print(f"Found streaming CitationAnnotation: file_id={annotation.file_id}") + elif isinstance(content, HostedFileContent): file_contents_found.append(content) - print(f"Found streaming hosted_file: file_id={content.file_id}") + print(f"Found streaming HostedFileContent: file_id={content.file_id}") if content.additional_properties and "container_id" in content.additional_properties: print(f" container_id={content.additional_properties['container_id']}") diff --git a/python/samples/getting_started/agents/azure_ai/azure_ai_with_code_interpreter_file_generation.py b/python/samples/getting_started/agents/azure_ai/azure_ai_with_code_interpreter_file_generation.py index 9c9fc48feb..9761c279ac 100644 --- a/python/samples/getting_started/agents/azure_ai/azure_ai_with_code_interpreter_file_generation.py +++ b/python/samples/getting_started/agents/azure_ai/azure_ai_with_code_interpreter_file_generation.py @@ -44,7 +44,7 @@ async def non_streaming_example() -> None: # Check for annotations in the response annotations_found: list[str] = [] - # AgentResponse has messages property, which contains ChatMessage objects + # AgentResponse has messages property, which contains Message objects for message in result.messages: for content in message.contents: if content.type == "text" and content.annotations: diff --git a/python/samples/getting_started/agents/azure_ai/azure_ai_with_existing_agent.py b/python/samples/getting_started/agents/azure_ai/azure_ai_with_existing_agent.py index 7341068f10..0549c642c2 100644 --- a/python/samples/getting_started/agents/azure_ai/azure_ai_with_existing_agent.py +++ b/python/samples/getting_started/agents/azure_ai/azure_ai_with_existing_agent.py @@ -36,7 +36,7 @@ async def using_provider_get_agent() -> None: ) try: - # Get newly created agent as ChatAgent by using provider.get_agent() + # Get newly created agent as Agent by using provider.get_agent() provider = AzureAIProjectAgentProvider(project_client=project_client) agent = await provider.get_agent(name=azure_ai_agent.name) diff --git a/python/samples/getting_started/agents/azure_ai/azure_ai_with_hosted_mcp.py b/python/samples/getting_started/agents/azure_ai/azure_ai_with_hosted_mcp.py index 7f0660a5e8..b9e24bd5d4 100644 --- a/python/samples/getting_started/agents/azure_ai/azure_ai_with_hosted_mcp.py +++ b/python/samples/getting_started/agents/azure_ai/azure_ai_with_hosted_mcp.py @@ -3,7 +3,7 @@ import asyncio from typing import Any -from agent_framework import AgentResponse, AgentThread, ChatMessage, HostedMCPTool, SupportsAgentRun +from agent_framework import SupportsAgentRun, AgentResponse, AgentThread, Message, HostedMCPTool from agent_framework.azure import AzureAIProjectAgentProvider from azure.identity.aio import AzureCliCredential @@ -25,10 +25,10 @@ async def handle_approvals_without_thread(query: str, agent: "SupportsAgentRun") f"User Input Request for function from {agent.name}: {user_input_needed.function_call.name}" f" with arguments: {user_input_needed.function_call.arguments}" ) - new_inputs.append(ChatMessage("assistant", [user_input_needed])) + new_inputs.append(Message("assistant", [user_input_needed])) user_approval = input("Approve function call? (y/n): ") new_inputs.append( - ChatMessage("user", [user_input_needed.to_function_approval_response(user_approval.lower() == "y")]) + Message("user", [user_input_needed.to_function_approval_response(user_approval.lower() == "y")]) ) result = await agent.run(new_inputs, store=False) @@ -48,7 +48,7 @@ async def handle_approvals_with_thread(query: str, agent: "SupportsAgentRun", th ) user_approval = input("Approve function call? (y/n): ") new_input.append( - ChatMessage( + Message( role="user", contents=[user_input_needed.to_function_approval_response(user_approval.lower() == "y")], ) diff --git a/python/samples/getting_started/agents/azure_ai_agent/README.md b/python/samples/getting_started/agents/azure_ai_agent/README.md index 5440b2d3ba..02fa708102 100644 --- a/python/samples/getting_started/agents/azure_ai_agent/README.md +++ b/python/samples/getting_started/agents/azure_ai_agent/README.md @@ -8,7 +8,7 @@ All examples in this folder use the `AzureAIAgentsProvider` class which provides - **`create_agent()`** - Create a new agent on the Azure AI service - **`get_agent()`** - Retrieve an existing agent by ID or from a pre-fetched Agent object -- **`as_agent()`** - Wrap an SDK Agent object as a ChatAgent without HTTP calls +- **`as_agent()`** - Wrap an SDK Agent object as a Agent without HTTP calls ```python from agent_framework.azure import AzureAIAgentsProvider diff --git a/python/samples/getting_started/agents/azure_ai_agent/azure_ai_with_hosted_mcp.py b/python/samples/getting_started/agents/azure_ai_agent/azure_ai_with_hosted_mcp.py index 19de064106..261e209506 100644 --- a/python/samples/getting_started/agents/azure_ai_agent/azure_ai_with_hosted_mcp.py +++ b/python/samples/getting_started/agents/azure_ai_agent/azure_ai_with_hosted_mcp.py @@ -17,7 +17,7 @@ async def handle_approvals_with_thread(query: str, agent: "SupportsAgentRun", thread: "AgentThread") -> AgentResponse: """Here we let the thread deal with the previous responses, and we just rerun with the approval.""" - from agent_framework import ChatMessage + from agent_framework import Message result = await agent.run(query, thread=thread, store=True) while len(result.user_input_requests) > 0: @@ -29,7 +29,7 @@ async def handle_approvals_with_thread(query: str, agent: "SupportsAgentRun", th ) user_approval = input("Approve function call? (y/n): ") new_input.append( - ChatMessage( + Message( role="user", contents=[user_input_needed.to_function_approval_response(user_approval.lower() == "y")], ) diff --git a/python/samples/getting_started/agents/azure_ai_agent/azure_ai_with_local_mcp.py b/python/samples/getting_started/agents/azure_ai_agent/azure_ai_with_local_mcp.py index 0586ffb78e..8e26edfccc 100644 --- a/python/samples/getting_started/agents/azure_ai_agent/azure_ai_with_local_mcp.py +++ b/python/samples/getting_started/agents/azure_ai_agent/azure_ai_with_local_mcp.py @@ -51,7 +51,7 @@ async def mcp_tools_on_agent_level() -> None: print("=== Tools Defined on Agent Level ===") # Tools are provided when creating the agent - # The ChatAgent will connect to the MCP server through its context manager + # The Agent will connect to the MCP server through its context manager # and discover tools at runtime async with ( AzureCliCredential() as credential, diff --git a/python/samples/getting_started/agents/azure_ai_agent/azure_ai_with_multiple_tools.py b/python/samples/getting_started/agents/azure_ai_agent/azure_ai_with_multiple_tools.py index b7700dd6c2..5782b77a9a 100644 --- a/python/samples/getting_started/agents/azure_ai_agent/azure_ai_with_multiple_tools.py +++ b/python/samples/getting_started/agents/azure_ai_agent/azure_ai_with_multiple_tools.py @@ -45,7 +45,7 @@ def get_time() -> str: async def handle_approvals_with_thread(query: str, agent: "SupportsAgentRun", thread: "AgentThread"): """Here we let the thread deal with the previous responses, and we just rerun with the approval.""" - from agent_framework import ChatMessage + from agent_framework import Message result = await agent.run(query, thread=thread, store=True) while len(result.user_input_requests) > 0: @@ -57,7 +57,7 @@ async def handle_approvals_with_thread(query: str, agent: "SupportsAgentRun", th ) user_approval = input("Approve function call? (y/n): ") new_input.append( - ChatMessage( + Message( role="user", contents=[user_input_needed.to_function_approval_response(user_approval.lower() == "y")], ) diff --git a/python/samples/getting_started/agents/azure_openai/README.md b/python/samples/getting_started/agents/azure_openai/README.md index 466860de3e..78f4f9a6ac 100644 --- a/python/samples/getting_started/agents/azure_openai/README.md +++ b/python/samples/getting_started/agents/azure_openai/README.md @@ -6,17 +6,17 @@ This folder contains examples demonstrating different ways to create and use age | File | Description | |------|-------------| -| [`azure_assistants_basic.py`](azure_assistants_basic.py) | The simplest way to create an agent using `ChatAgent` with `AzureOpenAIAssistantsClient`. Shows both streaming and non-streaming responses with automatic assistant creation and cleanup. | +| [`azure_assistants_basic.py`](azure_assistants_basic.py) | The simplest way to create an agent using `Agent` with `AzureOpenAIAssistantsClient`. Shows both streaming and non-streaming responses with automatic assistant creation and cleanup. | | [`azure_assistants_with_code_interpreter.py`](azure_assistants_with_code_interpreter.py) | Shows how to use the HostedCodeInterpreterTool with Azure agents to write and execute Python code. Includes helper methods for accessing code interpreter data from response chunks. | | [`azure_assistants_with_existing_assistant.py`](azure_assistants_with_existing_assistant.py) | Shows how to work with a pre-existing assistant by providing the assistant ID to the Azure Assistants client. Demonstrates proper cleanup of manually created assistants. | | [`azure_assistants_with_explicit_settings.py`](azure_assistants_with_explicit_settings.py) | Shows how to initialize an agent with a specific assistants client, configuring settings explicitly including endpoint and deployment name. | | [`azure_assistants_with_function_tools.py`](azure_assistants_with_function_tools.py) | Demonstrates how to use function tools with agents. Shows both agent-level tools (defined when creating the agent) and query-level tools (provided with specific queries). | | [`azure_assistants_with_thread.py`](azure_assistants_with_thread.py) | Demonstrates thread management with Azure agents, including automatic thread creation for stateless conversations and explicit thread management for maintaining conversation context across multiple interactions. | -| [`azure_chat_client_basic.py`](azure_chat_client_basic.py) | The simplest way to create an agent using `ChatAgent` with `AzureOpenAIChatClient`. Shows both streaming and non-streaming responses for chat-based interactions with Azure OpenAI models. | +| [`azure_chat_client_basic.py`](azure_chat_client_basic.py) | The simplest way to create an agent using `Agent` with `AzureOpenAIChatClient`. Shows both streaming and non-streaming responses for chat-based interactions with Azure OpenAI models. | | [`azure_chat_client_with_explicit_settings.py`](azure_chat_client_with_explicit_settings.py) | Shows how to initialize an agent with a specific chat client, configuring settings explicitly including endpoint and deployment name. | | [`azure_chat_client_with_function_tools.py`](azure_chat_client_with_function_tools.py) | Demonstrates how to use function tools with agents. Shows both agent-level tools (defined when creating the agent) and query-level tools (provided with specific queries). | | [`azure_chat_client_with_thread.py`](azure_chat_client_with_thread.py) | Demonstrates thread management with Azure agents, including automatic thread creation for stateless conversations and explicit thread management for maintaining conversation context across multiple interactions. | -| [`azure_responses_client_basic.py`](azure_responses_client_basic.py) | The simplest way to create an agent using `ChatAgent` with `AzureOpenAIResponsesClient`. Shows both streaming and non-streaming responses for structured response generation with Azure OpenAI models. | +| [`azure_responses_client_basic.py`](azure_responses_client_basic.py) | The simplest way to create an agent using `Agent` with `AzureOpenAIResponsesClient`. Shows both streaming and non-streaming responses for structured response generation with Azure OpenAI models. | | [`azure_responses_client_code_interpreter_files.py`](azure_responses_client_code_interpreter_files.py) | Demonstrates using HostedCodeInterpreterTool with file uploads for data analysis. Shows how to create, upload, and analyze CSV files using Python code execution with Azure OpenAI Responses. | | [`azure_responses_client_image_analysis.py`](azure_responses_client_image_analysis.py) | Shows how to use Azure OpenAI Responses for image analysis and vision tasks. Demonstrates multi-modal messages combining text and image content using remote URLs. | | [`azure_responses_client_with_code_interpreter.py`](azure_responses_client_with_code_interpreter.py) | Shows how to use the HostedCodeInterpreterTool with Azure agents to write and execute Python code. Includes helper methods for accessing code interpreter data from response chunks. | diff --git a/python/samples/getting_started/agents/azure_openai/azure_assistants_with_code_interpreter.py b/python/samples/getting_started/agents/azure_openai/azure_assistants_with_code_interpreter.py index 3445bbcbc0..8f3a96e321 100644 --- a/python/samples/getting_started/agents/azure_openai/azure_assistants_with_code_interpreter.py +++ b/python/samples/getting_started/agents/azure_openai/azure_assistants_with_code_interpreter.py @@ -2,7 +2,7 @@ import asyncio -from agent_framework import AgentResponseUpdate, ChatAgent, ChatResponseUpdate, HostedCodeInterpreterTool +from agent_framework import AgentResponseUpdate, Agent, ChatResponseUpdate, HostedCodeInterpreterTool from agent_framework.azure import AzureOpenAIAssistantsClient from azure.identity import AzureCliCredential from openai.types.beta.threads.runs import ( @@ -46,7 +46,7 @@ async def main() -> None: # For authentication, run `az login` command in terminal or replace AzureCliCredential with preferred # authentication option. - async with ChatAgent( + async with Agent( chat_client=AzureOpenAIAssistantsClient(credential=AzureCliCredential()), instructions="You are a helpful assistant that can write and execute Python code to solve problems.", tools=HostedCodeInterpreterTool(), diff --git a/python/samples/getting_started/agents/azure_openai/azure_assistants_with_existing_assistant.py b/python/samples/getting_started/agents/azure_openai/azure_assistants_with_existing_assistant.py index 7e373d4fad..b79c85258a 100644 --- a/python/samples/getting_started/agents/azure_openai/azure_assistants_with_existing_assistant.py +++ b/python/samples/getting_started/agents/azure_openai/azure_assistants_with_existing_assistant.py @@ -5,7 +5,7 @@ from random import randint from typing import Annotated -from agent_framework import ChatAgent, tool +from agent_framework import Agent, tool from agent_framework.azure import AzureOpenAIAssistantsClient from azure.identity import AzureCliCredential, get_bearer_token_provider from openai import AsyncAzureOpenAI @@ -46,7 +46,7 @@ async def main() -> None: ) try: - async with ChatAgent( + async with Agent( chat_client=AzureOpenAIAssistantsClient(async_client=client, assistant_id=created_assistant.id), instructions="You are a helpful weather agent.", tools=get_weather, diff --git a/python/samples/getting_started/agents/azure_openai/azure_assistants_with_function_tools.py b/python/samples/getting_started/agents/azure_openai/azure_assistants_with_function_tools.py index 8333e7fdc8..dc6393684f 100644 --- a/python/samples/getting_started/agents/azure_openai/azure_assistants_with_function_tools.py +++ b/python/samples/getting_started/agents/azure_openai/azure_assistants_with_function_tools.py @@ -5,7 +5,7 @@ from random import randint from typing import Annotated -from agent_framework import ChatAgent, tool +from agent_framework import Agent, tool from agent_framework.azure import AzureOpenAIAssistantsClient from azure.identity import AzureCliCredential from pydantic import Field @@ -43,7 +43,7 @@ async def tools_on_agent_level() -> None: # The agent can use these tools for any query during its lifetime # For authentication, run `az login` command in terminal or replace AzureCliCredential with preferred # authentication option. - async with ChatAgent( + async with Agent( chat_client=AzureOpenAIAssistantsClient(credential=AzureCliCredential()), instructions="You are a helpful assistant that can provide weather and time information.", tools=[get_weather, get_time], # Tools defined at agent creation @@ -74,7 +74,7 @@ async def tools_on_run_level() -> None: # Agent created without tools # For authentication, run `az login` command in terminal or replace AzureCliCredential with preferred # authentication option. - async with ChatAgent( + async with Agent( chat_client=AzureOpenAIAssistantsClient(credential=AzureCliCredential()), instructions="You are a helpful assistant.", # No tools defined here @@ -105,7 +105,7 @@ async def mixed_tools_example() -> None: # Agent created with some base tools # For authentication, run `az login` command in terminal or replace AzureCliCredential with preferred # authentication option. - async with ChatAgent( + async with Agent( chat_client=AzureOpenAIAssistantsClient(credential=AzureCliCredential()), instructions="You are a comprehensive assistant that can help with various information requests.", tools=[get_weather], # Base tool available for all queries diff --git a/python/samples/getting_started/agents/azure_openai/azure_assistants_with_thread.py b/python/samples/getting_started/agents/azure_openai/azure_assistants_with_thread.py index 793f8260c3..cdc8a17877 100644 --- a/python/samples/getting_started/agents/azure_openai/azure_assistants_with_thread.py +++ b/python/samples/getting_started/agents/azure_openai/azure_assistants_with_thread.py @@ -4,7 +4,7 @@ from random import randint from typing import Annotated -from agent_framework import AgentThread, ChatAgent, tool +from agent_framework import AgentThread, Agent, tool from agent_framework.azure import AzureOpenAIAssistantsClient from azure.identity import AzureCliCredential from pydantic import Field @@ -33,7 +33,7 @@ async def example_with_automatic_thread_creation() -> None: # For authentication, run `az login` command in terminal or replace AzureCliCredential with preferred # authentication option. - async with ChatAgent( + async with Agent( chat_client=AzureOpenAIAssistantsClient(credential=AzureCliCredential()), instructions="You are a helpful weather agent.", tools=get_weather, @@ -59,7 +59,7 @@ async def example_with_thread_persistence() -> None: # For authentication, run `az login` command in terminal or replace AzureCliCredential with preferred # authentication option. - async with ChatAgent( + async with Agent( chat_client=AzureOpenAIAssistantsClient(credential=AzureCliCredential()), instructions="You are a helpful weather agent.", tools=get_weather, @@ -97,7 +97,7 @@ async def example_with_existing_thread_id() -> None: # For authentication, run `az login` command in terminal or replace AzureCliCredential with preferred # authentication option. - async with ChatAgent( + async with Agent( chat_client=AzureOpenAIAssistantsClient(credential=AzureCliCredential()), instructions="You are a helpful weather agent.", tools=get_weather, @@ -117,7 +117,7 @@ async def example_with_existing_thread_id() -> None: print("\n--- Continuing with the same thread ID in a new agent instance ---") # Create a new agent instance but use the existing thread ID - async with ChatAgent( + async with Agent( chat_client=AzureOpenAIAssistantsClient(thread_id=existing_thread_id, credential=AzureCliCredential()), instructions="You are a helpful weather agent.", tools=get_weather, diff --git a/python/samples/getting_started/agents/azure_openai/azure_chat_client_with_function_tools.py b/python/samples/getting_started/agents/azure_openai/azure_chat_client_with_function_tools.py index 777bcc51b1..90ff87e507 100644 --- a/python/samples/getting_started/agents/azure_openai/azure_chat_client_with_function_tools.py +++ b/python/samples/getting_started/agents/azure_openai/azure_chat_client_with_function_tools.py @@ -5,7 +5,7 @@ from random import randint from typing import Annotated -from agent_framework import ChatAgent, tool +from agent_framework import Agent, tool from agent_framework.azure import AzureOpenAIChatClient from azure.identity import AzureCliCredential from pydantic import Field @@ -43,7 +43,7 @@ async def tools_on_agent_level() -> None: # The agent can use these tools for any query during its lifetime # For authentication, run `az login` command in terminal or replace AzureCliCredential with preferred # authentication option. - agent = ChatAgent( + agent = Agent( chat_client=AzureOpenAIChatClient(credential=AzureCliCredential()), instructions="You are a helpful assistant that can provide weather and time information.", tools=[get_weather, get_time], # Tools defined at agent creation @@ -75,7 +75,7 @@ async def tools_on_run_level() -> None: # Agent created without tools # For authentication, run `az login` command in terminal or replace AzureCliCredential with preferred # authentication option. - agent = ChatAgent( + agent = Agent( chat_client=AzureOpenAIChatClient(credential=AzureCliCredential()), instructions="You are a helpful assistant.", # No tools defined here @@ -107,7 +107,7 @@ async def mixed_tools_example() -> None: # Agent created with some base tools # For authentication, run `az login` command in terminal or replace AzureCliCredential with preferred # authentication option. - agent = ChatAgent( + agent = Agent( chat_client=AzureOpenAIChatClient(credential=AzureCliCredential()), instructions="You are a comprehensive assistant that can help with various information requests.", tools=[get_weather], # Base tool available for all queries diff --git a/python/samples/getting_started/agents/azure_openai/azure_chat_client_with_thread.py b/python/samples/getting_started/agents/azure_openai/azure_chat_client_with_thread.py index 08ada3ba97..ab34eb548d 100644 --- a/python/samples/getting_started/agents/azure_openai/azure_chat_client_with_thread.py +++ b/python/samples/getting_started/agents/azure_openai/azure_chat_client_with_thread.py @@ -4,7 +4,7 @@ from random import randint from typing import Annotated -from agent_framework import AgentThread, ChatAgent, ChatMessageStore, tool +from agent_framework import AgentThread, Agent, ChatMessageStore, tool from agent_framework.azure import AzureOpenAIChatClient from azure.identity import AzureCliCredential from pydantic import Field @@ -33,7 +33,7 @@ async def example_with_automatic_thread_creation() -> None: # For authentication, run `az login` command in terminal or replace AzureCliCredential with preferred # authentication option. - agent = ChatAgent( + agent = Agent( chat_client=AzureOpenAIChatClient(credential=AzureCliCredential()), instructions="You are a helpful weather agent.", tools=get_weather, @@ -60,7 +60,7 @@ async def example_with_thread_persistence() -> None: # For authentication, run `az login` command in terminal or replace AzureCliCredential with preferred # authentication option. - agent = ChatAgent( + agent = Agent( chat_client=AzureOpenAIChatClient(credential=AzureCliCredential()), instructions="You are a helpful weather agent.", tools=get_weather, @@ -95,7 +95,7 @@ async def example_with_existing_thread_messages() -> None: # For authentication, run `az login` command in terminal or replace AzureCliCredential with preferred # authentication option. - agent = ChatAgent( + agent = Agent( chat_client=AzureOpenAIChatClient(credential=AzureCliCredential()), instructions="You are a helpful weather agent.", tools=get_weather, @@ -117,7 +117,7 @@ async def example_with_existing_thread_messages() -> None: print("\n--- Continuing with the same thread in a new agent instance ---") # Create a new agent instance but use the existing thread with its message history - new_agent = ChatAgent( + new_agent = Agent( chat_client=AzureOpenAIChatClient(credential=AzureCliCredential()), instructions="You are a helpful weather agent.", tools=get_weather, diff --git a/python/samples/getting_started/agents/azure_openai/azure_responses_client_code_interpreter_files.py b/python/samples/getting_started/agents/azure_openai/azure_responses_client_code_interpreter_files.py index 187e354264..e3a69d0792 100644 --- a/python/samples/getting_started/agents/azure_openai/azure_responses_client_code_interpreter_files.py +++ b/python/samples/getting_started/agents/azure_openai/azure_responses_client_code_interpreter_files.py @@ -4,7 +4,7 @@ import os import tempfile -from agent_framework import ChatAgent, HostedCodeInterpreterTool +from agent_framework import Agent, HostedCodeInterpreterTool from agent_framework.azure import AzureOpenAIResponsesClient from azure.identity import AzureCliCredential from openai import AsyncAzureOpenAI @@ -76,7 +76,7 @@ async def get_token(): temp_file_path, file_id = await create_sample_file_and_upload(openai_client) # Create agent using Azure OpenAI Responses client - agent = ChatAgent( + agent = Agent( chat_client=AzureOpenAIResponsesClient(credential=credential), instructions="You are a helpful assistant that can analyze data files using Python code.", tools=HostedCodeInterpreterTool(inputs=[{"file_id": file_id}]), diff --git a/python/samples/getting_started/agents/azure_openai/azure_responses_client_image_analysis.py b/python/samples/getting_started/agents/azure_openai/azure_responses_client_image_analysis.py index 9bf05e32e0..6d91aca7a4 100644 --- a/python/samples/getting_started/agents/azure_openai/azure_responses_client_image_analysis.py +++ b/python/samples/getting_started/agents/azure_openai/azure_responses_client_image_analysis.py @@ -2,7 +2,7 @@ import asyncio -from agent_framework import ChatMessage, Content +from agent_framework import Message, Content from agent_framework.azure import AzureOpenAIResponsesClient from azure.identity import AzureCliCredential @@ -24,7 +24,7 @@ async def main(): ) # 2. Create a simple message with both text and image content - user_message = ChatMessage( + user_message = Message( role="user", contents=[ Content.from_text(text="What do you see in this image?"), diff --git a/python/samples/getting_started/agents/azure_openai/azure_responses_client_with_code_interpreter.py b/python/samples/getting_started/agents/azure_openai/azure_responses_client_with_code_interpreter.py index 70c8fb832f..6e8d474198 100644 --- a/python/samples/getting_started/agents/azure_openai/azure_responses_client_with_code_interpreter.py +++ b/python/samples/getting_started/agents/azure_openai/azure_responses_client_with_code_interpreter.py @@ -2,7 +2,7 @@ import asyncio -from agent_framework import ChatAgent, ChatResponse, HostedCodeInterpreterTool +from agent_framework import Agent, ChatResponse, HostedCodeInterpreterTool from agent_framework.azure import AzureOpenAIResponsesClient from azure.identity import AzureCliCredential from openai.types.responses.response import Response as OpenAIResponse @@ -22,7 +22,7 @@ async def main() -> None: # For authentication, run `az login` command in terminal or replace AzureCliCredential with preferred # authentication option. - agent = ChatAgent( + agent = Agent( chat_client=AzureOpenAIResponsesClient(credential=AzureCliCredential()), instructions="You are a helpful assistant that can write and execute Python code to solve problems.", tools=HostedCodeInterpreterTool(), diff --git a/python/samples/getting_started/agents/azure_openai/azure_responses_client_with_file_search.py b/python/samples/getting_started/agents/azure_openai/azure_responses_client_with_file_search.py index 08f35eb659..6423041b6e 100644 --- a/python/samples/getting_started/agents/azure_openai/azure_responses_client_with_file_search.py +++ b/python/samples/getting_started/agents/azure_openai/azure_responses_client_with_file_search.py @@ -2,7 +2,7 @@ import asyncio -from agent_framework import ChatAgent, Content, HostedFileSearchTool +from agent_framework import Agent, HostedFileSearchTool, HostedVectorStoreContent from agent_framework.azure import AzureOpenAIResponsesClient from azure.identity import AzureCliCredential @@ -22,7 +22,7 @@ # Helper functions -async def create_vector_store(client: AzureOpenAIResponsesClient) -> tuple[str, Content]: +async def create_vector_store(client: AzureOpenAIResponsesClient) -> tuple[str, HostedVectorStoreContent]: """Create a vector store with sample documents.""" file = await client.client.files.create( file=("todays_weather.txt", b"The weather today is sunny with a high of 75F."), purpose="assistants" @@ -35,7 +35,7 @@ async def create_vector_store(client: AzureOpenAIResponsesClient) -> tuple[str, if result.last_error is not None: raise Exception(f"Vector store file processing failed with status: {result.last_error.message}") - return file.id, Content.from_hosted_vector_store(vector_store_id=vector_store.id) + return file.id, HostedVectorStoreContent(vector_store_id=vector_store.id) async def delete_vector_store(client: AzureOpenAIResponsesClient, file_id: str, vector_store_id: str) -> None: @@ -53,7 +53,7 @@ async def main() -> None: file_id, vector_store = await create_vector_store(client) - agent = ChatAgent( + agent = Agent( chat_client=client, instructions="You are a helpful assistant that can search through files to find information.", tools=[HostedFileSearchTool(inputs=vector_store)], diff --git a/python/samples/getting_started/agents/azure_openai/azure_responses_client_with_function_tools.py b/python/samples/getting_started/agents/azure_openai/azure_responses_client_with_function_tools.py index a5d6d85aa6..e2bcef4250 100644 --- a/python/samples/getting_started/agents/azure_openai/azure_responses_client_with_function_tools.py +++ b/python/samples/getting_started/agents/azure_openai/azure_responses_client_with_function_tools.py @@ -5,7 +5,7 @@ from random import randint from typing import Annotated -from agent_framework import ChatAgent, tool +from agent_framework import Agent, tool from agent_framework.azure import AzureOpenAIResponsesClient from azure.identity import AzureCliCredential from pydantic import Field @@ -43,7 +43,7 @@ async def tools_on_agent_level() -> None: # The agent can use these tools for any query during its lifetime # For authentication, run `az login` command in terminal or replace AzureCliCredential with preferred # authentication option. - agent = ChatAgent( + agent = Agent( chat_client=AzureOpenAIResponsesClient(credential=AzureCliCredential()), instructions="You are a helpful assistant that can provide weather and time information.", tools=[get_weather, get_time], # Tools defined at agent creation @@ -75,7 +75,7 @@ async def tools_on_run_level() -> None: # Agent created without tools # For authentication, run `az login` command in terminal or replace AzureCliCredential with preferred # authentication option. - agent = ChatAgent( + agent = Agent( chat_client=AzureOpenAIResponsesClient(credential=AzureCliCredential()), instructions="You are a helpful assistant.", # No tools defined here @@ -107,7 +107,7 @@ async def mixed_tools_example() -> None: # Agent created with some base tools # For authentication, run `az login` command in terminal or replace AzureCliCredential with preferred # authentication option. - agent = ChatAgent( + agent = Agent( chat_client=AzureOpenAIResponsesClient(credential=AzureCliCredential()), instructions="You are a comprehensive assistant that can help with various information requests.", tools=[get_weather], # Base tool available for all queries diff --git a/python/samples/getting_started/agents/azure_openai/azure_responses_client_with_hosted_mcp.py b/python/samples/getting_started/agents/azure_openai/azure_responses_client_with_hosted_mcp.py index eddc54d48c..ed735d2290 100644 --- a/python/samples/getting_started/agents/azure_openai/azure_responses_client_with_hosted_mcp.py +++ b/python/samples/getting_started/agents/azure_openai/azure_responses_client_with_hosted_mcp.py @@ -3,7 +3,7 @@ import asyncio from typing import TYPE_CHECKING, Any -from agent_framework import ChatAgent, HostedMCPTool +from agent_framework import Agent, HostedMCPTool from agent_framework.azure import AzureOpenAIResponsesClient from azure.identity import AzureCliCredential @@ -20,7 +20,7 @@ async def handle_approvals_without_thread(query: str, agent: "SupportsAgentRun"): """When we don't have a thread, we need to ensure we return with the input, approval request and approval.""" - from agent_framework import ChatMessage + from agent_framework import Message result = await agent.run(query) while len(result.user_input_requests) > 0: @@ -30,10 +30,10 @@ async def handle_approvals_without_thread(query: str, agent: "SupportsAgentRun") f"User Input Request for function from {agent.name}: {user_input_needed.function_call.name}" f" with arguments: {user_input_needed.function_call.arguments}" ) - new_inputs.append(ChatMessage(role="assistant", contents=[user_input_needed])) + new_inputs.append(Message(role="assistant", contents=[user_input_needed])) user_approval = input("Approve function call? (y/n): ") new_inputs.append( - ChatMessage(role="user", contents=[user_input_needed.to_function_approval_response(user_approval.lower() == "y")]) + Message(role="user", contents=[user_input_needed.to_function_approval_response(user_approval.lower() == "y")]) ) result = await agent.run(new_inputs) @@ -42,7 +42,7 @@ async def handle_approvals_without_thread(query: str, agent: "SupportsAgentRun") async def handle_approvals_with_thread(query: str, agent: "SupportsAgentRun", thread: "AgentThread"): """Here we let the thread deal with the previous responses, and we just rerun with the approval.""" - from agent_framework import ChatMessage + from agent_framework import Message result = await agent.run(query, thread=thread, store=True) while len(result.user_input_requests) > 0: @@ -54,7 +54,7 @@ async def handle_approvals_with_thread(query: str, agent: "SupportsAgentRun", th ) user_approval = input("Approve function call? (y/n): ") new_input.append( - ChatMessage( + Message( role="user", contents=[user_input_needed.to_function_approval_response(user_approval.lower() == "y")], ) @@ -65,13 +65,13 @@ async def handle_approvals_with_thread(query: str, agent: "SupportsAgentRun", th async def handle_approvals_with_thread_streaming(query: str, agent: "SupportsAgentRun", thread: "AgentThread"): """Here we let the thread deal with the previous responses, and we just rerun with the approval.""" - from agent_framework import ChatMessage + from agent_framework import Message - new_input: list[ChatMessage] = [] + new_input: list[Message] = [] new_input_added = True while new_input_added: new_input_added = False - new_input.append(ChatMessage(role="user", text=query)) + new_input.append(Message(role="user", text=query)) async for update in agent.run(new_input, thread=thread, options={"store": True}, stream=True): if update.user_input_requests: for user_input_needed in update.user_input_requests: @@ -81,7 +81,7 @@ async def handle_approvals_with_thread_streaming(query: str, agent: "SupportsAge ) user_approval = input("Approve function call? (y/n): ") new_input.append( - ChatMessage( + Message( role="user", contents=[user_input_needed.to_function_approval_response(user_approval.lower() == "y")] ) ) @@ -96,7 +96,7 @@ async def run_hosted_mcp_without_thread_and_specific_approval() -> None: credential = AzureCliCredential() # Tools are provided when creating the agent # The agent can use these tools for any query during its lifetime - async with ChatAgent( + async with Agent( chat_client=AzureOpenAIResponsesClient( credential=credential, ), @@ -129,7 +129,7 @@ async def run_hosted_mcp_without_approval() -> None: credential = AzureCliCredential() # Tools are provided when creating the agent # The agent can use these tools for any query during its lifetime - async with ChatAgent( + async with Agent( chat_client=AzureOpenAIResponsesClient( credential=credential, ), @@ -163,7 +163,7 @@ async def run_hosted_mcp_with_thread() -> None: credential = AzureCliCredential() # Tools are provided when creating the agent # The agent can use these tools for any query during its lifetime - async with ChatAgent( + async with Agent( chat_client=AzureOpenAIResponsesClient( credential=credential, ), @@ -196,7 +196,7 @@ async def run_hosted_mcp_with_thread_streaming() -> None: credential = AzureCliCredential() # Tools are provided when creating the agent # The agent can use these tools for any query during its lifetime - async with ChatAgent( + async with Agent( chat_client=AzureOpenAIResponsesClient( credential=credential, ), diff --git a/python/samples/getting_started/agents/azure_openai/azure_responses_client_with_local_mcp.py b/python/samples/getting_started/agents/azure_openai/azure_responses_client_with_local_mcp.py index 4958a64b44..bfd2785640 100644 --- a/python/samples/getting_started/agents/azure_openai/azure_responses_client_with_local_mcp.py +++ b/python/samples/getting_started/agents/azure_openai/azure_responses_client_with_local_mcp.py @@ -3,7 +3,7 @@ import asyncio import os -from agent_framework import ChatAgent, MCPStreamableHTTPTool +from agent_framework import Agent, MCPStreamableHTTPTool from agent_framework.azure import AzureOpenAIResponsesClient from azure.identity import AzureCliCredential @@ -37,7 +37,7 @@ async def main(): credential=credential, ) - agent: ChatAgent = responses_client.as_agent( + agent: Agent = responses_client.as_agent( name="DocsAgent", instructions=("You are a helpful assistant that can help with Microsoft documentation questions."), ) diff --git a/python/samples/getting_started/agents/azure_openai/azure_responses_client_with_thread.py b/python/samples/getting_started/agents/azure_openai/azure_responses_client_with_thread.py index 01ade8da6f..cfcc297d84 100644 --- a/python/samples/getting_started/agents/azure_openai/azure_responses_client_with_thread.py +++ b/python/samples/getting_started/agents/azure_openai/azure_responses_client_with_thread.py @@ -4,7 +4,7 @@ from random import randint from typing import Annotated -from agent_framework import AgentThread, ChatAgent, tool +from agent_framework import AgentThread, Agent, tool from agent_framework.azure import AzureOpenAIResponsesClient from azure.identity import AzureCliCredential from pydantic import Field @@ -33,7 +33,7 @@ async def example_with_automatic_thread_creation() -> None: # For authentication, run `az login` command in terminal or replace AzureCliCredential with preferred # authentication option. - agent = ChatAgent( + agent = Agent( chat_client=AzureOpenAIResponsesClient(credential=AzureCliCredential()), instructions="You are a helpful weather agent.", tools=get_weather, @@ -62,7 +62,7 @@ async def example_with_thread_persistence_in_memory() -> None: # For authentication, run `az login` command in terminal or replace AzureCliCredential with preferred # authentication option. - agent = ChatAgent( + agent = Agent( chat_client=AzureOpenAIResponsesClient(credential=AzureCliCredential()), instructions="You are a helpful weather agent.", tools=get_weather, @@ -103,7 +103,7 @@ async def example_with_existing_thread_id() -> None: # For authentication, run `az login` command in terminal or replace AzureCliCredential with preferred # authentication option. - agent = ChatAgent( + agent = Agent( chat_client=AzureOpenAIResponsesClient(credential=AzureCliCredential()), instructions="You are a helpful weather agent.", tools=get_weather, @@ -125,7 +125,7 @@ async def example_with_existing_thread_id() -> None: if existing_thread_id: print("\n--- Continuing with the same thread ID in a new agent instance ---") - agent = ChatAgent( + agent = Agent( chat_client=AzureOpenAIResponsesClient(credential=AzureCliCredential()), instructions="You are a helpful weather agent.", tools=get_weather, diff --git a/python/samples/getting_started/agents/custom/README.md b/python/samples/getting_started/agents/custom/README.md index eba87c4350..f8921b1f24 100644 --- a/python/samples/getting_started/agents/custom/README.md +++ b/python/samples/getting_started/agents/custom/README.md @@ -7,7 +7,7 @@ This folder contains examples demonstrating how to implement custom agents and c | File | Description | |------|-------------| | [`custom_agent.py`](custom_agent.py) | Shows how to create custom agents by extending the `BaseAgent` class. Demonstrates the `EchoAgent` implementation with both streaming and non-streaming responses, proper thread management, and message history handling. | -| [`custom_chat_client.py`](../../chat_client/custom_chat_client.py) | Demonstrates how to create custom chat clients by extending the `BaseChatClient` class. Shows a `EchoingChatClient` implementation and how to integrate it with `ChatAgent` using the `as_agent()` method. | +| [`custom_chat_client.py`](../../chat_client/custom_chat_client.py) | Demonstrates how to create custom chat clients by extending the `BaseChatClient` class. Shows a `EchoingChatClient` implementation and how to integrate it with `Agent` using the `as_agent()` method. | ## Key Takeaways @@ -20,7 +20,7 @@ This folder contains examples demonstrating how to implement custom agents and c ### Custom Chat Clients - Custom chat clients allow you to integrate any backend service or create new LLM providers - You must implement `_inner_get_response()` with a stream parameter to handle both streaming and non-streaming responses -- Custom chat clients can be used with `ChatAgent` to leverage all agent framework features +- Custom chat clients can be used with `Agent` to leverage all agent framework features - Use the `as_agent()` method to easily create agents from your custom chat clients Both approaches allow you to extend the framework for your specific use cases while maintaining compatibility with the broader Agent Framework ecosystem. diff --git a/python/samples/getting_started/agents/custom/custom_agent.py b/python/samples/getting_started/agents/custom/custom_agent.py index 7df37ba781..a4b2ed6c35 100644 --- a/python/samples/getting_started/agents/custom/custom_agent.py +++ b/python/samples/getting_started/agents/custom/custom_agent.py @@ -9,10 +9,9 @@ AgentResponseUpdate, AgentThread, BaseAgent, - ChatMessage, + Message, Content, Role, - normalize_messages, ) """ @@ -57,7 +56,7 @@ def __init__( def run( self, - messages: str | ChatMessage | list[str] | list[ChatMessage] | None = None, + messages: str | Message | list[str] | list[Message] | None = None, *, stream: bool = False, thread: AgentThread | None = None, @@ -81,21 +80,19 @@ def run( async def _run( self, - messages: str | ChatMessage | list[str] | list[ChatMessage] | None = None, + messages: str | Message | list[str] | list[Message] | None = None, *, thread: AgentThread | None = None, **kwargs: Any, ) -> AgentResponse: """Non-streaming implementation.""" # Normalize input messages to a list - normalized_messages = normalize_messages(messages) + normalized_messages = self._normalize_messages(messages) if not normalized_messages: - response_message = ChatMessage( + response_message = Message( role=Role.ASSISTANT, - contents=[ - Content.from_text(text="Hello! I'm a custom echo agent. Send me a message and I'll echo it back.") - ], + contents=[Content.from_text(text="Hello! I'm a custom echo agent. Send me a message and I'll echo it back.")], ) else: # For simplicity, echo the last user message @@ -105,7 +102,7 @@ async def _run( else: echo_text = f"{self.echo_prefix}[Non-text message received]" - response_message = ChatMessage(role=Role.ASSISTANT, contents=[Content.from_text(text=echo_text)]) + response_message = Message(role=Role.ASSISTANT, contents=[Content.from_text(text=echo_text)]) # Notify the thread of new messages if provided if thread is not None: @@ -115,14 +112,14 @@ async def _run( async def _run_stream( self, - messages: str | ChatMessage | list[str] | list[ChatMessage] | None = None, + messages: str | Message | list[str] | list[Message] | None = None, *, thread: AgentThread | None = None, **kwargs: Any, ) -> AsyncIterable[AgentResponseUpdate]: """Streaming implementation.""" # Normalize input messages to a list - normalized_messages = normalize_messages(messages) + normalized_messages = self._normalize_messages(messages) if not normalized_messages: response_text = "Hello! I'm a custom echo agent. Send me a message and I'll echo it back." @@ -150,7 +147,7 @@ async def _run_stream( # Notify the thread of the complete response if provided if thread is not None: - complete_response = ChatMessage(role=Role.ASSISTANT, contents=[Content.from_text(text=response_text)]) + complete_response = Message(role=Role.ASSISTANT, contents=[Content.from_text(text=response_text)]) await self._notify_thread_of_new_messages(thread, normalized_messages, complete_response) diff --git a/python/samples/getting_started/agents/ollama/ollama_chat_multimodal.py b/python/samples/getting_started/agents/ollama/ollama_chat_multimodal.py index 3deb6f6e92..cbee158337 100644 --- a/python/samples/getting_started/agents/ollama/ollama_chat_multimodal.py +++ b/python/samples/getting_started/agents/ollama/ollama_chat_multimodal.py @@ -2,7 +2,7 @@ import asyncio -from agent_framework import ChatMessage, Content +from agent_framework import Message, Content from agent_framework.ollama import OllamaChatClient """ @@ -32,7 +32,7 @@ async def test_image() -> None: image_uri = create_sample_image() - message = ChatMessage( + message = Message( role="user", contents=[ Content.from_text(text="What's in this image?"), diff --git a/python/samples/getting_started/agents/openai/README.md b/python/samples/getting_started/agents/openai/README.md index 4feff05d22..0db2e5648f 100644 --- a/python/samples/getting_started/agents/openai/README.md +++ b/python/samples/getting_started/agents/openai/README.md @@ -15,14 +15,14 @@ This folder contains examples demonstrating different ways to create and use age | [`openai_assistants_with_function_tools.py`](openai_assistants_with_function_tools.py) | Function tools with `OpenAIAssistantProvider` at both agent-level and query-level. | | [`openai_assistants_with_response_format.py`](openai_assistants_with_response_format.py) | Structured outputs with `OpenAIAssistantProvider` using Pydantic models. | | [`openai_assistants_with_thread.py`](openai_assistants_with_thread.py) | Thread management with `OpenAIAssistantProvider` for conversation context persistence. | -| [`openai_chat_client_basic.py`](openai_chat_client_basic.py) | The simplest way to create an agent using `ChatAgent` with `OpenAIChatClient`. Shows both streaming and non-streaming responses for chat-based interactions with OpenAI models. | +| [`openai_chat_client_basic.py`](openai_chat_client_basic.py) | The simplest way to create an agent using `Agent` with `OpenAIChatClient`. Shows both streaming and non-streaming responses for chat-based interactions with OpenAI models. | | [`openai_chat_client_with_explicit_settings.py`](openai_chat_client_with_explicit_settings.py) | Shows how to initialize an agent with a specific chat client, configuring settings explicitly including API key and model ID. | | [`openai_chat_client_with_function_tools.py`](openai_chat_client_with_function_tools.py) | Demonstrates how to use function tools with agents. Shows both agent-level tools (defined when creating the agent) and query-level tools (provided with specific queries). | | [`openai_chat_client_with_local_mcp.py`](openai_chat_client_with_local_mcp.py) | Shows how to integrate OpenAI agents with local Model Context Protocol (MCP) servers for enhanced functionality and tool integration. | | [`openai_chat_client_with_thread.py`](openai_chat_client_with_thread.py) | Demonstrates thread management with OpenAI agents, including automatic thread creation for stateless conversations and explicit thread management for maintaining conversation context across multiple interactions. | | [`openai_chat_client_with_web_search.py`](openai_chat_client_with_web_search.py) | Shows how to use web search capabilities with OpenAI agents to retrieve and use information from the internet in responses. | | [`openai_chat_client_with_runtime_json_schema.py`](openai_chat_client_with_runtime_json_schema.py) | Shows how to supply a runtime JSON Schema via `additional_chat_options` for structured output without defining a Pydantic model. | -| [`openai_responses_client_basic.py`](openai_responses_client_basic.py) | The simplest way to create an agent using `ChatAgent` with `OpenAIResponsesClient`. Shows both streaming and non-streaming responses for structured response generation with OpenAI models. | +| [`openai_responses_client_basic.py`](openai_responses_client_basic.py) | The simplest way to create an agent using `Agent` with `OpenAIResponsesClient`. Shows both streaming and non-streaming responses for structured response generation with OpenAI models. | | [`openai_responses_client_image_analysis.py`](openai_responses_client_image_analysis.py) | Demonstrates how to use vision capabilities with agents to analyze images. | | [`openai_responses_client_image_generation.py`](openai_responses_client_image_generation.py) | Demonstrates how to use image generation capabilities with OpenAI agents to create images based on text descriptions. Requires PIL (Pillow) for image display. | | [`openai_responses_client_reasoning.py`](openai_responses_client_reasoning.py) | Demonstrates how to use reasoning capabilities with OpenAI agents, showing how the agent can provide detailed reasoning for its responses. | diff --git a/python/samples/getting_started/agents/openai/openai_chat_client_with_function_tools.py b/python/samples/getting_started/agents/openai/openai_chat_client_with_function_tools.py index 057989d228..3e43061654 100644 --- a/python/samples/getting_started/agents/openai/openai_chat_client_with_function_tools.py +++ b/python/samples/getting_started/agents/openai/openai_chat_client_with_function_tools.py @@ -5,7 +5,7 @@ from random import randint from typing import Annotated -from agent_framework import ChatAgent, tool +from agent_framework import Agent, tool from agent_framework.openai import OpenAIChatClient from pydantic import Field @@ -40,7 +40,7 @@ async def tools_on_agent_level() -> None: # Tools are provided when creating the agent # The agent can use these tools for any query during its lifetime - agent = ChatAgent( + agent = Agent( chat_client=OpenAIChatClient(), instructions="You are a helpful assistant that can provide weather and time information.", tools=[get_weather, get_time], # Tools defined at agent creation @@ -70,7 +70,7 @@ async def tools_on_run_level() -> None: print("=== Tools Passed to Run Method ===") # Agent created without tools - agent = ChatAgent( + agent = Agent( chat_client=OpenAIChatClient(), instructions="You are a helpful assistant.", # No tools defined here @@ -100,7 +100,7 @@ async def mixed_tools_example() -> None: print("=== Mixed Tools Example (Agent + Run Method) ===") # Agent created with some base tools - agent = ChatAgent( + agent = Agent( chat_client=OpenAIChatClient(), instructions="You are a comprehensive assistant that can help with various information requests.", tools=[get_weather], # Base tool available for all queries diff --git a/python/samples/getting_started/agents/openai/openai_chat_client_with_local_mcp.py b/python/samples/getting_started/agents/openai/openai_chat_client_with_local_mcp.py index e49304adcc..e7cb84ec25 100644 --- a/python/samples/getting_started/agents/openai/openai_chat_client_with_local_mcp.py +++ b/python/samples/getting_started/agents/openai/openai_chat_client_with_local_mcp.py @@ -2,7 +2,7 @@ import asyncio -from agent_framework import ChatAgent, MCPStreamableHTTPTool +from agent_framework import Agent, MCPStreamableHTTPTool from agent_framework.openai import OpenAIChatClient """ @@ -29,7 +29,7 @@ async def mcp_tools_on_run_level() -> None: name="Microsoft Learn MCP", url="https://learn.microsoft.com/api/mcp", ) as mcp_server, - ChatAgent( + Agent( chat_client=OpenAIChatClient(), name="DocsAgent", instructions="You are a helpful assistant that can help with microsoft documentation questions.", diff --git a/python/samples/getting_started/agents/openai/openai_chat_client_with_thread.py b/python/samples/getting_started/agents/openai/openai_chat_client_with_thread.py index f7a824c370..5103d580b7 100644 --- a/python/samples/getting_started/agents/openai/openai_chat_client_with_thread.py +++ b/python/samples/getting_started/agents/openai/openai_chat_client_with_thread.py @@ -4,7 +4,7 @@ from random import randint from typing import Annotated -from agent_framework import AgentThread, ChatAgent, ChatMessageStore, tool +from agent_framework import AgentThread, Agent, ChatMessageStore, tool from agent_framework.openai import OpenAIChatClient from pydantic import Field @@ -30,7 +30,7 @@ async def example_with_automatic_thread_creation() -> None: """Example showing automatic thread creation (service-managed thread).""" print("=== Automatic Thread Creation Example ===") - agent = ChatAgent( + agent = Agent( chat_client=OpenAIChatClient(), instructions="You are a helpful weather agent.", tools=get_weather, @@ -55,7 +55,7 @@ async def example_with_thread_persistence() -> None: print("=== Thread Persistence Example ===") print("Using the same thread across multiple conversations to maintain context.\n") - agent = ChatAgent( + agent = Agent( chat_client=OpenAIChatClient(), instructions="You are a helpful weather agent.", tools=get_weather, @@ -88,7 +88,7 @@ async def example_with_existing_thread_messages() -> None: """Example showing how to work with existing thread messages for OpenAI.""" print("=== Existing Thread Messages Example ===") - agent = ChatAgent( + agent = Agent( chat_client=OpenAIChatClient(), instructions="You are a helpful weather agent.", tools=get_weather, @@ -110,7 +110,7 @@ async def example_with_existing_thread_messages() -> None: print("\n--- Continuing with the same thread in a new agent instance ---") # Create a new agent instance but use the existing thread with its message history - new_agent = ChatAgent( + new_agent = Agent( chat_client=OpenAIChatClient(), instructions="You are a helpful weather agent.", tools=get_weather, diff --git a/python/samples/getting_started/agents/openai/openai_chat_client_with_web_search.py b/python/samples/getting_started/agents/openai/openai_chat_client_with_web_search.py index eb1072f945..244e66a92b 100644 --- a/python/samples/getting_started/agents/openai/openai_chat_client_with_web_search.py +++ b/python/samples/getting_started/agents/openai/openai_chat_client_with_web_search.py @@ -2,7 +2,7 @@ import asyncio -from agent_framework import ChatAgent, HostedWebSearchTool +from agent_framework import Agent, HostedWebSearchTool from agent_framework.openai import OpenAIChatClient """ @@ -22,7 +22,7 @@ async def main() -> None: } } - agent = ChatAgent( + agent = Agent( chat_client=OpenAIChatClient(model_id="gpt-4o-search-preview"), instructions="You are a helpful assistant that can search the web for current information.", tools=[HostedWebSearchTool(additional_properties=additional_properties)], diff --git a/python/samples/getting_started/agents/openai/openai_responses_client_basic.py b/python/samples/getting_started/agents/openai/openai_responses_client_basic.py index b564f07d51..bdc907ba7a 100644 --- a/python/samples/getting_started/agents/openai/openai_responses_client_basic.py +++ b/python/samples/getting_started/agents/openai/openai_responses_client_basic.py @@ -5,15 +5,7 @@ from random import randint from typing import Annotated -from agent_framework import ( - ChatAgent, - ChatContext, - ChatMessage, - ChatResponse, - MiddlewareTermination, - chat_middleware, - tool, -) +from agent_framework import Agent, ChatContext, Message, ChatResponse, Role, chat_middleware, tool from agent_framework.openai import OpenAIResponsesClient from pydantic import Field @@ -46,8 +38,8 @@ async def security_and_override_middleware( # Override the response instead of calling AI context.result = ChatResponse( messages=[ - ChatMessage( - role="assistant", + Message( + role=Role.ASSISTANT, text="I cannot process requests containing sensitive information. " "Please rephrase your question without including passwords, secrets, or other " "sensitive data.", @@ -56,7 +48,8 @@ async def security_and_override_middleware( ) # Set terminate flag to stop execution - raise MiddlewareTermination + context.terminate = True + return # Continue to next middleware or AI execution await call_next(context) @@ -79,7 +72,7 @@ async def non_streaming_example() -> None: """Example of non-streaming response (get the complete result at once).""" print("=== Non-streaming Response Example ===") - agent = ChatAgent( + agent = Agent( chat_client=OpenAIResponsesClient(), instructions="You are a helpful weather agent.", tools=get_weather, @@ -95,7 +88,7 @@ async def streaming_example() -> None: """Example of streaming response (get results as they are generated).""" print("=== Streaming Response Example ===") - agent = ChatAgent( + agent = Agent( chat_client=OpenAIResponsesClient( middleware=[security_and_override_middleware], ), diff --git a/python/samples/getting_started/agents/openai/openai_responses_client_image_analysis.py b/python/samples/getting_started/agents/openai/openai_responses_client_image_analysis.py index c9c56d5e48..3db8ddf54f 100644 --- a/python/samples/getting_started/agents/openai/openai_responses_client_image_analysis.py +++ b/python/samples/getting_started/agents/openai/openai_responses_client_image_analysis.py @@ -2,7 +2,7 @@ import asyncio -from agent_framework import ChatMessage, Content +from agent_framework import Message, Content from agent_framework.openai import OpenAIResponsesClient """ @@ -23,7 +23,7 @@ async def main(): ) # 2. Create a simple message with both text and image content - user_message = ChatMessage( + user_message = Message( role="user", contents=[ Content.from_text(text="What do you see in this image?"), diff --git a/python/samples/getting_started/agents/openai/openai_responses_client_with_code_interpreter.py b/python/samples/getting_started/agents/openai/openai_responses_client_with_code_interpreter.py index 71d81d9ba8..e80e9bde0f 100644 --- a/python/samples/getting_started/agents/openai/openai_responses_client_with_code_interpreter.py +++ b/python/samples/getting_started/agents/openai/openai_responses_client_with_code_interpreter.py @@ -3,7 +3,7 @@ import asyncio from agent_framework import ( - ChatAgent, + Agent, HostedCodeInterpreterTool, ) from agent_framework.openai import OpenAIResponsesClient @@ -20,7 +20,7 @@ async def main() -> None: """Example showing how to use the HostedCodeInterpreterTool with OpenAI Responses.""" print("=== OpenAI Responses Agent with Code Interpreter Example ===") - agent = ChatAgent( + agent = Agent( chat_client=OpenAIResponsesClient(), instructions="You are a helpful assistant that can write and execute Python code to solve problems.", tools=HostedCodeInterpreterTool(), diff --git a/python/samples/getting_started/agents/openai/openai_responses_client_with_code_interpreter_files.py b/python/samples/getting_started/agents/openai/openai_responses_client_with_code_interpreter_files.py index f3d311e307..3f9320db82 100644 --- a/python/samples/getting_started/agents/openai/openai_responses_client_with_code_interpreter_files.py +++ b/python/samples/getting_started/agents/openai/openai_responses_client_with_code_interpreter_files.py @@ -4,7 +4,7 @@ import os import tempfile -from agent_framework import ChatAgent, HostedCodeInterpreterTool +from agent_framework import Agent, HostedCodeInterpreterTool from agent_framework.openai import OpenAIResponsesClient from openai import AsyncOpenAI @@ -66,7 +66,7 @@ async def main() -> None: temp_file_path, file_id = await create_sample_file_and_upload(openai_client) # Create agent using OpenAI Responses client - agent = ChatAgent( + agent = Agent( chat_client=OpenAIResponsesClient(), instructions="You are a helpful assistant that can analyze data files using Python code.", tools=HostedCodeInterpreterTool(inputs=[{"file_id": file_id}]), diff --git a/python/samples/getting_started/agents/openai/openai_responses_client_with_file_search.py b/python/samples/getting_started/agents/openai/openai_responses_client_with_file_search.py index 3784c5a715..431187f64b 100644 --- a/python/samples/getting_started/agents/openai/openai_responses_client_with_file_search.py +++ b/python/samples/getting_started/agents/openai/openai_responses_client_with_file_search.py @@ -2,7 +2,7 @@ import asyncio -from agent_framework import ChatAgent, Content, HostedFileSearchTool +from agent_framework import Agent, Content, HostedFileSearchTool from agent_framework.openai import OpenAIResponsesClient """ @@ -47,7 +47,7 @@ async def main() -> None: print(f"User: {message}") file_id, vector_store = await create_vector_store(client) - agent = ChatAgent( + agent = Agent( chat_client=client, instructions="You are a helpful assistant that can search through files to find information.", tools=[HostedFileSearchTool(inputs=vector_store)], diff --git a/python/samples/getting_started/agents/openai/openai_responses_client_with_function_tools.py b/python/samples/getting_started/agents/openai/openai_responses_client_with_function_tools.py index 032a8b20d8..c173a5d0f7 100644 --- a/python/samples/getting_started/agents/openai/openai_responses_client_with_function_tools.py +++ b/python/samples/getting_started/agents/openai/openai_responses_client_with_function_tools.py @@ -5,7 +5,7 @@ from random import randint from typing import Annotated -from agent_framework import ChatAgent, tool +from agent_framework import Agent, tool from agent_framework.openai import OpenAIResponsesClient from pydantic import Field @@ -40,7 +40,7 @@ async def tools_on_agent_level() -> None: # Tools are provided when creating the agent # The agent can use these tools for any query during its lifetime - agent = ChatAgent( + agent = Agent( chat_client=OpenAIResponsesClient(), instructions="You are a helpful assistant that can provide weather and time information.", tools=[get_weather, get_time], # Tools defined at agent creation @@ -70,7 +70,7 @@ async def tools_on_run_level() -> None: print("=== Tools Passed to Run Method ===") # Agent created without tools - agent = ChatAgent( + agent = Agent( chat_client=OpenAIResponsesClient(), instructions="You are a helpful assistant.", # No tools defined here @@ -100,7 +100,7 @@ async def mixed_tools_example() -> None: print("=== Mixed Tools Example (Agent + Run Method) ===") # Agent created with some base tools - agent = ChatAgent( + agent = Agent( chat_client=OpenAIResponsesClient(), instructions="You are a comprehensive assistant that can help with various information requests.", tools=[get_weather], # Base tool available for all queries diff --git a/python/samples/getting_started/agents/openai/openai_responses_client_with_hosted_mcp.py b/python/samples/getting_started/agents/openai/openai_responses_client_with_hosted_mcp.py index 526503f813..192a18ea49 100644 --- a/python/samples/getting_started/agents/openai/openai_responses_client_with_hosted_mcp.py +++ b/python/samples/getting_started/agents/openai/openai_responses_client_with_hosted_mcp.py @@ -3,7 +3,7 @@ import asyncio from typing import TYPE_CHECKING, Any -from agent_framework import ChatAgent, HostedMCPTool +from agent_framework import Agent, HostedMCPTool from agent_framework.openai import OpenAIResponsesClient """ @@ -19,7 +19,7 @@ async def handle_approvals_without_thread(query: str, agent: "SupportsAgentRun"): """When we don't have a thread, we need to ensure we return with the input, approval request and approval.""" - from agent_framework import ChatMessage + from agent_framework import Message result = await agent.run(query) while len(result.user_input_requests) > 0: @@ -29,10 +29,10 @@ async def handle_approvals_without_thread(query: str, agent: "SupportsAgentRun") f"User Input Request for function from {agent.name}: {user_input_needed.function_call.name}" f" with arguments: {user_input_needed.function_call.arguments}" ) - new_inputs.append(ChatMessage(role="assistant", contents=[user_input_needed])) + new_inputs.append(Message(role="assistant", contents=[user_input_needed])) user_approval = input("Approve function call? (y/n): ") new_inputs.append( - ChatMessage(role="user", contents=[user_input_needed.to_function_approval_response(user_approval.lower() == "y")]) + Message(role="user", contents=[user_input_needed.to_function_approval_response(user_approval.lower() == "y")]) ) result = await agent.run(new_inputs) @@ -41,7 +41,7 @@ async def handle_approvals_without_thread(query: str, agent: "SupportsAgentRun") async def handle_approvals_with_thread(query: str, agent: "SupportsAgentRun", thread: "AgentThread"): """Here we let the thread deal with the previous responses, and we just rerun with the approval.""" - from agent_framework import ChatMessage + from agent_framework import Message result = await agent.run(query, thread=thread, store=True) while len(result.user_input_requests) > 0: @@ -53,7 +53,7 @@ async def handle_approvals_with_thread(query: str, agent: "SupportsAgentRun", th ) user_approval = input("Approve function call? (y/n): ") new_input.append( - ChatMessage( + Message( role="user", contents=[user_input_needed.to_function_approval_response(user_approval.lower() == "y")], ) @@ -64,13 +64,13 @@ async def handle_approvals_with_thread(query: str, agent: "SupportsAgentRun", th async def handle_approvals_with_thread_streaming(query: str, agent: "SupportsAgentRun", thread: "AgentThread"): """Here we let the thread deal with the previous responses, and we just rerun with the approval.""" - from agent_framework import ChatMessage + from agent_framework import Message - new_input: list[ChatMessage] = [] + new_input: list[Message] = [] new_input_added = True while new_input_added: new_input_added = False - new_input.append(ChatMessage(role="user", text=query)) + new_input.append(Message(role="user", text=query)) async for update in agent.run(new_input, thread=thread, stream=True, options={"store": True}): if update.user_input_requests: for user_input_needed in update.user_input_requests: @@ -80,7 +80,7 @@ async def handle_approvals_with_thread_streaming(query: str, agent: "SupportsAge ) user_approval = input("Approve function call? (y/n): ") new_input.append( - ChatMessage( + Message( role="user", contents=[user_input_needed.to_function_approval_response(user_approval.lower() == "y")] ) ) @@ -95,7 +95,7 @@ async def run_hosted_mcp_without_thread_and_specific_approval() -> None: # Tools are provided when creating the agent # The agent can use these tools for any query during its lifetime - async with ChatAgent( + async with Agent( chat_client=OpenAIResponsesClient(), name="DocsAgent", instructions="You are a helpful assistant that can help with microsoft documentation questions.", @@ -126,7 +126,7 @@ async def run_hosted_mcp_without_approval() -> None: # Tools are provided when creating the agent # The agent can use these tools for any query during its lifetime - async with ChatAgent( + async with Agent( chat_client=OpenAIResponsesClient(), name="DocsAgent", instructions="You are a helpful assistant that can help with microsoft documentation questions.", @@ -158,7 +158,7 @@ async def run_hosted_mcp_with_thread() -> None: # Tools are provided when creating the agent # The agent can use these tools for any query during its lifetime - async with ChatAgent( + async with Agent( chat_client=OpenAIResponsesClient(), name="DocsAgent", instructions="You are a helpful assistant that can help with microsoft documentation questions.", @@ -189,7 +189,7 @@ async def run_hosted_mcp_with_thread_streaming() -> None: # Tools are provided when creating the agent # The agent can use these tools for any query during its lifetime - async with ChatAgent( + async with Agent( chat_client=OpenAIResponsesClient(), name="DocsAgent", instructions="You are a helpful assistant that can help with microsoft documentation questions.", diff --git a/python/samples/getting_started/agents/openai/openai_responses_client_with_local_mcp.py b/python/samples/getting_started/agents/openai/openai_responses_client_with_local_mcp.py index 50ebcf9ad7..c6a620f2bc 100644 --- a/python/samples/getting_started/agents/openai/openai_responses_client_with_local_mcp.py +++ b/python/samples/getting_started/agents/openai/openai_responses_client_with_local_mcp.py @@ -2,7 +2,7 @@ import asyncio -from agent_framework import ChatAgent, MCPStreamableHTTPTool +from agent_framework import Agent, MCPStreamableHTTPTool from agent_framework.openai import OpenAIResponsesClient """ @@ -22,7 +22,7 @@ async def streaming_with_mcp(show_raw_stream: bool = False) -> None: print("=== Tools Defined on Agent Level ===") # Tools are provided when creating the agent # The agent can use these tools for any query during its lifetime - async with ChatAgent( + async with Agent( chat_client=OpenAIResponsesClient(), name="DocsAgent", instructions="You are a helpful assistant that can help with microsoft documentation questions.", @@ -60,7 +60,7 @@ async def run_with_mcp() -> None: # Tools are provided when creating the agent # The agent can use these tools for any query during its lifetime - async with ChatAgent( + async with Agent( chat_client=OpenAIResponsesClient(), name="DocsAgent", instructions="You are a helpful assistant that can help with microsoft documentation questions.", diff --git a/python/samples/getting_started/agents/openai/openai_responses_client_with_thread.py b/python/samples/getting_started/agents/openai/openai_responses_client_with_thread.py index e17c2d2748..ce158f9e9a 100644 --- a/python/samples/getting_started/agents/openai/openai_responses_client_with_thread.py +++ b/python/samples/getting_started/agents/openai/openai_responses_client_with_thread.py @@ -4,7 +4,7 @@ from random import randint from typing import Annotated -from agent_framework import AgentThread, ChatAgent, tool +from agent_framework import AgentThread, Agent, tool from agent_framework.openai import OpenAIResponsesClient from pydantic import Field @@ -30,7 +30,7 @@ async def example_with_automatic_thread_creation() -> None: """Example showing automatic thread creation.""" print("=== Automatic Thread Creation Example ===") - agent = ChatAgent( + agent = Agent( chat_client=OpenAIResponsesClient(), instructions="You are a helpful weather agent.", tools=get_weather, @@ -57,7 +57,7 @@ async def example_with_thread_persistence_in_memory() -> None: """ print("=== Thread Persistence Example (In-Memory) ===") - agent = ChatAgent( + agent = Agent( chat_client=OpenAIResponsesClient(), instructions="You are a helpful weather agent.", tools=get_weather, @@ -96,7 +96,7 @@ async def example_with_existing_thread_id() -> None: # First, create a conversation and capture the thread ID existing_thread_id = None - agent = ChatAgent( + agent = Agent( chat_client=OpenAIResponsesClient(), instructions="You are a helpful weather agent.", tools=get_weather, @@ -117,7 +117,7 @@ async def example_with_existing_thread_id() -> None: if existing_thread_id: print("\n--- Continuing with the same thread ID in a new agent instance ---") - agent = ChatAgent( + agent = Agent( chat_client=OpenAIResponsesClient(), instructions="You are a helpful weather agent.", tools=get_weather, diff --git a/python/samples/getting_started/agents/openai/openai_responses_client_with_web_search.py b/python/samples/getting_started/agents/openai/openai_responses_client_with_web_search.py index 24e0368512..1cc27737b0 100644 --- a/python/samples/getting_started/agents/openai/openai_responses_client_with_web_search.py +++ b/python/samples/getting_started/agents/openai/openai_responses_client_with_web_search.py @@ -2,7 +2,7 @@ import asyncio -from agent_framework import ChatAgent, HostedWebSearchTool +from agent_framework import Agent, HostedWebSearchTool from agent_framework.openai import OpenAIResponsesClient """ @@ -22,7 +22,7 @@ async def main() -> None: } } - agent = ChatAgent( + agent = Agent( chat_client=OpenAIResponsesClient(), instructions="You are a helpful assistant that can search the web for current information.", tools=[HostedWebSearchTool(additional_properties=additional_properties)], diff --git a/python/samples/getting_started/azure_functions/02_multi_agent/README.md b/python/samples/getting_started/azure_functions/02_multi_agent/README.md index 473d6bb236..e10b9d4d51 100644 --- a/python/samples/getting_started/azure_functions/02_multi_agent/README.md +++ b/python/samples/getting_started/azure_functions/02_multi_agent/README.md @@ -76,8 +76,8 @@ Expected response: { "status": "healthy", "agents": [ - {"name": "WeatherAgent", "type": "ChatAgent"}, - {"name": "MathAgent", "type": "ChatAgent"} + {"name": "WeatherAgent", "type": "Agent"}, + {"name": "MathAgent", "type": "Agent"} ], "agent_count": 2 } diff --git a/python/samples/getting_started/chat_client/README.md b/python/samples/getting_started/chat_client/README.md index 20060f691d..5bf9b471ad 100644 --- a/python/samples/getting_started/chat_client/README.md +++ b/python/samples/getting_started/chat_client/README.md @@ -14,7 +14,7 @@ This folder contains simple examples demonstrating direct usage of various chat | [`openai_assistants_client.py`](openai_assistants_client.py) | Direct usage of OpenAI Assistants Client for basic chat interactions with OpenAI assistants. | | [`openai_chat_client.py`](openai_chat_client.py) | Direct usage of OpenAI Chat Client for chat interactions with OpenAI models. | | [`openai_responses_client.py`](openai_responses_client.py) | Direct usage of OpenAI Responses Client for structured response generation with OpenAI models. | -| [`custom_chat_client.py`](custom_chat_client.py) | Demonstrates how to create custom chat clients by extending the `BaseChatClient` class. Shows a `EchoingChatClient` implementation and how to integrate it with `ChatAgent` using the `as_agent()` method. | +| [`custom_chat_client.py`](custom_chat_client.py) | Demonstrates how to create custom chat clients by extending the `BaseChatClient` class. Shows a `EchoingChatClient` implementation and how to integrate it with `Agent` using the `as_agent()` method. | ## Environment Variables diff --git a/python/samples/getting_started/chat_client/custom_chat_client.py b/python/samples/getting_started/chat_client/custom_chat_client.py index 149b7230e1..69228b68ab 100644 --- a/python/samples/getting_started/chat_client/custom_chat_client.py +++ b/python/samples/getting_started/chat_client/custom_chat_client.py @@ -8,12 +8,12 @@ from agent_framework import ( BaseChatClient, - ChatMessage, ChatMiddlewareLayer, ChatResponse, ChatResponseUpdate, Content, FunctionInvocationLayer, + Message, ResponseStream, Role, ) @@ -61,7 +61,7 @@ def __init__(self, *, prefix: str = "Echo:", **kwargs: Any) -> None: def _inner_get_response( self, *, - messages: Sequence[ChatMessage], + messages: Sequence[Message], stream: bool = False, options: Mapping[str, Any], **kwargs: Any, @@ -82,7 +82,7 @@ def _inner_get_response( else: response_text = f"{self.prefix} [No text message found]" - response_message = ChatMessage(role=Role.ASSISTANT, contents=[Content.from_text(response_text)]) + response_message = Message(role=Role.ASSISTANT, contents=[Content.from_text(response_text)]) response = ChatResponse( messages=[response_message], @@ -124,7 +124,7 @@ class EchoingChatClientWithLayers( # type: ignore[misc,type-var] async def main() -> None: - """Demonstrates how to implement and use a custom chat client with ChatAgent.""" + """Demonstrates how to implement and use a custom chat client with Agent.""" print("=== Custom Chat Client Example ===\n") # Create the custom chat client diff --git a/python/samples/getting_started/context_providers/README.md b/python/samples/getting_started/context_providers/README.md index ddcc5ffe8d..70b2fdb8ff 100644 --- a/python/samples/getting_started/context_providers/README.md +++ b/python/samples/getting_started/context_providers/README.md @@ -139,14 +139,14 @@ Different agents with isolated or shared memory configurations. To create a custom context provider, implement the `ContextProvider` protocol: ```python -from agent_framework import ContextProvider, Context, ChatMessage +from agent_framework import ContextProvider, Context, Message from collections.abc import MutableSequence, Sequence from typing import Any class MyContextProvider(ContextProvider): async def invoking( self, - messages: ChatMessage | MutableSequence[ChatMessage], + messages: Message | MutableSequence[Message], **kwargs: Any ) -> Context: """Provide context before the agent processes the request.""" @@ -155,8 +155,8 @@ class MyContextProvider(ContextProvider): async def invoked( self, - request_messages: ChatMessage | Sequence[ChatMessage], - response_messages: ChatMessage | Sequence[ChatMessage] | None = None, + request_messages: Message | Sequence[Message], + response_messages: Message | Sequence[Message] | None = None, invoke_exception: Exception | None = None, **kwargs: Any, ) -> None: diff --git a/python/samples/getting_started/context_providers/aggregate_context_provider.py b/python/samples/getting_started/context_providers/aggregate_context_provider.py index 4d44c0766c..2d2be413c5 100644 --- a/python/samples/getting_started/context_providers/aggregate_context_provider.py +++ b/python/samples/getting_started/context_providers/aggregate_context_provider.py @@ -17,7 +17,7 @@ from types import TracebackType from typing import TYPE_CHECKING, Any, cast -from agent_framework import ChatAgent, ChatMessage, Context, ContextProvider +from agent_framework import Agent, Message, Context, ContextProvider from agent_framework.azure import AzureAIClient from azure.identity.aio import AzureCliCredential @@ -47,7 +47,7 @@ class AggregateContextProvider(ContextProvider): Examples: .. code-block:: python - from agent_framework import ChatAgent + from agent_framework import Agent # Create multiple context providers provider1 = CustomContextProvider1() @@ -58,7 +58,7 @@ class AggregateContextProvider(ContextProvider): aggregate = AggregateContextProvider([provider1, provider2, provider3]) # Pass the aggregate to the agent - agent = ChatAgent(chat_client=client, name="assistant", context_provider=aggregate) + agent = Agent(chat_client=client, name="assistant", context_provider=aggregate) # You can also add more providers later provider4 = CustomContextProvider4() @@ -90,10 +90,10 @@ async def thread_created(self, thread_id: str | None = None) -> None: await asyncio.gather(*[x.thread_created(thread_id) for x in self.providers]) @override - async def invoking(self, messages: ChatMessage | MutableSequence[ChatMessage], **kwargs: Any) -> Context: + async def invoking(self, messages: Message | MutableSequence[Message], **kwargs: Any) -> Context: contexts = await asyncio.gather(*[provider.invoking(messages, **kwargs) for provider in self.providers]) instructions: str = "" - return_messages: list[ChatMessage] = [] + return_messages: list[Message] = [] tools: list["ToolProtocol"] = [] for ctx in contexts: if ctx.instructions: @@ -107,8 +107,8 @@ async def invoking(self, messages: ChatMessage | MutableSequence[ChatMessage], * @override async def invoked( self, - request_messages: ChatMessage | Sequence[ChatMessage], - response_messages: ChatMessage | Sequence[ChatMessage] | None = None, + request_messages: Message | Sequence[Message], + response_messages: Message | Sequence[Message] | None = None, invoke_exception: Exception | None = None, **kwargs: Any, ) -> None: @@ -167,7 +167,7 @@ class TimeContextProvider(ContextProvider): """A simple context provider that adds time-related instructions.""" @override - async def invoking(self, messages: ChatMessage | MutableSequence[ChatMessage], **kwargs: Any) -> Context: + async def invoking(self, messages: Message | MutableSequence[Message], **kwargs: Any) -> Context: from datetime import datetime current_time = datetime.now().strftime("%Y-%m-%d %H:%M:%S") @@ -181,7 +181,7 @@ def __init__(self, persona: str): self.persona = persona @override - async def invoking(self, messages: ChatMessage | MutableSequence[ChatMessage], **kwargs: Any) -> Context: + async def invoking(self, messages: Message | MutableSequence[Message], **kwargs: Any) -> Context: return Context(instructions=f"Your persona: {self.persona}. ") @@ -192,7 +192,7 @@ def __init__(self): self.preferences: dict[str, str] = {} @override - async def invoking(self, messages: ChatMessage | MutableSequence[ChatMessage], **kwargs: Any) -> Context: + async def invoking(self, messages: Message | MutableSequence[Message], **kwargs: Any) -> Context: if not self.preferences: return Context() prefs_str = ", ".join(f"{k}: {v}" for k, v in self.preferences.items()) @@ -201,14 +201,14 @@ async def invoking(self, messages: ChatMessage | MutableSequence[ChatMessage], * @override async def invoked( self, - request_messages: ChatMessage | Sequence[ChatMessage], - response_messages: ChatMessage | Sequence[ChatMessage] | None = None, + request_messages: Message | Sequence[Message], + response_messages: Message | Sequence[Message] | None = None, invoke_exception: Exception | None = None, **kwargs: Any, ) -> None: # Simple example: extract and store preferences from user messages # In a real implementation, you might use structured extraction - msgs = [request_messages] if isinstance(request_messages, ChatMessage) else list(request_messages) + msgs = [request_messages] if isinstance(request_messages, Message) else list(request_messages) for msg in msgs: content = msg.text if hasattr(msg, "text") else "" @@ -245,7 +245,7 @@ async def main(): ]) # Create the agent with the aggregate provider - async with ChatAgent( + async with Agent( chat_client=chat_client, instructions="You are a helpful assistant.", context_provider=aggregate_provider, diff --git a/python/samples/getting_started/context_providers/azure_ai_search/README.md b/python/samples/getting_started/context_providers/azure_ai_search/README.md index fe7635e72f..71c48f2732 100644 --- a/python/samples/getting_started/context_providers/azure_ai_search/README.md +++ b/python/samples/getting_started/context_providers/azure_ai_search/README.md @@ -126,7 +126,7 @@ AZURE_OPENAI_RESOURCE_URL=https://myresource.openai.azure.com ### Semantic Mode ```python -from agent_framework import ChatAgent +from agent_framework import Agent from agent_framework.azure import AzureAIAgentClient, AzureAISearchContextProvider from azure.identity.aio import DefaultAzureCredential @@ -141,7 +141,7 @@ search_provider = AzureAISearchContextProvider( # Create agent with search context async with AzureAIAgentClient(credential=DefaultAzureCredential()) as client: - async with ChatAgent( + async with Agent( chat_client=client, model=model_deployment, context_provider=search_provider, @@ -166,7 +166,7 @@ search_provider = AzureAISearchContextProvider( ) # Use with agent (same as semantic mode) -async with ChatAgent( +async with Agent( chat_client=client, model=model_deployment, context_provider=search_provider, diff --git a/python/samples/getting_started/context_providers/azure_ai_search/azure_ai_with_search_context_agentic.py b/python/samples/getting_started/context_providers/azure_ai_search/azure_ai_with_search_context_agentic.py index 6e3e40a216..4574587330 100644 --- a/python/samples/getting_started/context_providers/azure_ai_search/azure_ai_with_search_context_agentic.py +++ b/python/samples/getting_started/context_providers/azure_ai_search/azure_ai_with_search_context_agentic.py @@ -3,7 +3,7 @@ import asyncio import os -from agent_framework import ChatAgent +from agent_framework import Agent from agent_framework.azure import AzureAIAgentClient, AzureAISearchContextProvider from azure.identity.aio import AzureCliCredential from dotenv import load_dotenv @@ -112,7 +112,7 @@ async def main() -> None: model_deployment_name=model_deployment, credential=AzureCliCredential(), ) as client, - ChatAgent( + Agent( chat_client=client, name="SearchAgent", instructions=( diff --git a/python/samples/getting_started/context_providers/azure_ai_search/azure_ai_with_search_context_semantic.py b/python/samples/getting_started/context_providers/azure_ai_search/azure_ai_with_search_context_semantic.py index 4fce526a1f..a44f3ba149 100644 --- a/python/samples/getting_started/context_providers/azure_ai_search/azure_ai_with_search_context_semantic.py +++ b/python/samples/getting_started/context_providers/azure_ai_search/azure_ai_with_search_context_semantic.py @@ -3,7 +3,7 @@ import asyncio import os -from agent_framework import ChatAgent +from agent_framework import Agent from agent_framework.azure import AzureAIAgentClient, AzureAISearchContextProvider from azure.identity.aio import AzureCliCredential from dotenv import load_dotenv @@ -69,7 +69,7 @@ async def main() -> None: model_deployment_name=model_deployment, credential=AzureCliCredential(), ) as client, - ChatAgent( + Agent( chat_client=client, name="SearchAgent", instructions=( diff --git a/python/samples/getting_started/context_providers/redis/redis_basics.py b/python/samples/getting_started/context_providers/redis/redis_basics.py index 9f5a654ea1..f984354df7 100644 --- a/python/samples/getting_started/context_providers/redis/redis_basics.py +++ b/python/samples/getting_started/context_providers/redis/redis_basics.py @@ -30,7 +30,7 @@ import asyncio import os -from agent_framework import ChatMessage, tool +from agent_framework import Message, tool from agent_framework.openai import OpenAIChatClient from agent_framework_redis._provider import RedisProvider from redisvl.extensions.cache.embeddings import EmbeddingsCache @@ -128,9 +128,9 @@ async def main() -> None: # Build sample chat messages to persist to Redis messages = [ - ChatMessage("user", ["runA CONVO: User Message"]), - ChatMessage("assistant", ["runA CONVO: Assistant Message"]), - ChatMessage("system", ["runA CONVO: System Message"]), + Message("user", ["runA CONVO: User Message"]), + Message("assistant", ["runA CONVO: Assistant Message"]), + Message("system", ["runA CONVO: System Message"]), ] # Declare/start a conversation/thread and write messages under 'runA'. @@ -142,7 +142,7 @@ async def main() -> None: # Retrieve relevant memories for a hypothetical model call. The provider uses # the current request messages as the retrieval query and returns context to # be injected into the model's instructions. - ctx = await provider.invoking([ChatMessage("system", ["B: Assistant Message"])]) + ctx = await provider.invoking([Message("system", ["B: Assistant Message"])]) # Inspect retrieved memories that would be injected into instructions # (Debug-only output so you can verify retrieval works as expected.) diff --git a/python/samples/getting_started/context_providers/simple_context_provider.py b/python/samples/getting_started/context_providers/simple_context_provider.py index e32266cb14..fdb35c48c2 100644 --- a/python/samples/getting_started/context_providers/simple_context_provider.py +++ b/python/samples/getting_started/context_providers/simple_context_provider.py @@ -4,7 +4,7 @@ from collections.abc import MutableSequence, Sequence from typing import Any -from agent_framework import ChatAgent, ChatClientProtocol, ChatMessage, Context, ContextProvider +from agent_framework import Agent, SupportsChatGetResponse, Message, Context, ContextProvider from agent_framework.azure import AzureAIClient from azure.identity.aio import AzureCliCredential from pydantic import BaseModel @@ -16,7 +16,7 @@ class UserInfo(BaseModel): class UserInfoMemory(ContextProvider): - def __init__(self, chat_client: ChatClientProtocol, user_info: UserInfo | None = None, **kwargs: Any): + def __init__(self, chat_client: SupportsChatGetResponse, user_info: UserInfo | None = None, **kwargs: Any): """Create the memory. If you pass in kwargs, they will be attempted to be used to create a UserInfo object. @@ -32,8 +32,8 @@ def __init__(self, chat_client: ChatClientProtocol, user_info: UserInfo | None = async def invoked( self, - request_messages: ChatMessage | Sequence[ChatMessage], - response_messages: ChatMessage | Sequence[ChatMessage] | None = None, + request_messages: Message | Sequence[Message], + response_messages: Message | Sequence[Message] | None = None, invoke_exception: Exception | None = None, **kwargs: Any, ) -> None: @@ -64,7 +64,7 @@ async def invoked( except Exception: pass # Failed to extract, continue without updating - async def invoking(self, messages: ChatMessage | MutableSequence[ChatMessage], **kwargs: Any) -> Context: + async def invoking(self, messages: Message | MutableSequence[Message], **kwargs: Any) -> Context: """Provide user information context before each agent call.""" instructions: list[str] = [] @@ -98,7 +98,7 @@ async def main(): memory_provider = UserInfoMemory(chat_client) # Create the agent with memory - async with ChatAgent( + async with Agent( chat_client=chat_client, instructions="You are a friendly assistant. Always address the user by their name.", context_provider=memory_provider, diff --git a/python/samples/getting_started/declarative/README.md b/python/samples/getting_started/declarative/README.md index 6241f632d4..35a75ef36b 100644 --- a/python/samples/getting_started/declarative/README.md +++ b/python/samples/getting_started/declarative/README.md @@ -175,7 +175,7 @@ agent = agent_factory.create_agent_from_yaml_path(Path("custom_provider.yaml")) This allows you to extend the declarative framework with custom chat client implementations. The mapping requires: - **package**: The Python package/module to import from -- **name**: The class name of your ChatClientProtocol implementation +- **name**: The class name of your SupportsChatGetResponse implementation - **model_id_field**: The constructor parameter name that accepts the value of the `model.id` field from the YAML You can reference your custom provider using either `Provider.ApiType` format or just `Provider` in your YAML configuration, as long as it matches the registered mapping. diff --git a/python/samples/getting_started/devui/README.md b/python/samples/getting_started/devui/README.md index bfbee3a70b..2a70f16cd9 100644 --- a/python/samples/getting_started/devui/README.md +++ b/python/samples/getting_started/devui/README.md @@ -44,7 +44,7 @@ Each agent/workflow follows a strict structure required by DevUI's discovery sys ``` agent_name/ -├── __init__.py # Must export: agent = ChatAgent(...) +├── __init__.py # Must export: agent = Agent(...) ├── agent.py # Agent implementation └── .env.example # Example environment variables ``` @@ -100,10 +100,10 @@ Example: ```python # my_agent/__init__.py -from agent_framework import ChatAgent +from agent_framework import Agent from agent_framework.openai import OpenAIChatClient -agent = ChatAgent( +agent = Agent( name="MyAgent", description="My custom agent", chat_client=OpenAIChatClient(), diff --git a/python/samples/getting_started/devui/azure_responses_agent/agent.py b/python/samples/getting_started/devui/azure_responses_agent/agent.py index b2fbe9c995..3405fce5c0 100644 --- a/python/samples/getting_started/devui/azure_responses_agent/agent.py +++ b/python/samples/getting_started/devui/azure_responses_agent/agent.py @@ -21,7 +21,7 @@ import os from typing import Annotated -from agent_framework import ChatAgent, tool +from agent_framework import Agent, tool from agent_framework.azure import AzureOpenAIResponsesClient logger = logging.getLogger(__name__) @@ -68,7 +68,7 @@ def extract_key_points( # Agent using Azure OpenAI Responses API (supports PDF uploads!) -agent = ChatAgent( +agent = Agent( name="AzureResponsesAgent", description="An agent that can analyze PDFs, images, and other documents using Azure OpenAI Responses API", instructions=""" diff --git a/python/samples/getting_started/devui/foundry_agent/agent.py b/python/samples/getting_started/devui/foundry_agent/agent.py index f2ce12058d..002eb1af02 100644 --- a/python/samples/getting_started/devui/foundry_agent/agent.py +++ b/python/samples/getting_started/devui/foundry_agent/agent.py @@ -8,7 +8,7 @@ import os from typing import Annotated -from agent_framework import ChatAgent, tool +from agent_framework import Agent, tool from agent_framework.azure import AzureAIAgentClient from azure.identity.aio import AzureCliCredential from pydantic import Field @@ -43,7 +43,7 @@ def get_forecast( # Agent instance following Agent Framework conventions -agent = ChatAgent( +agent = Agent( name="FoundryWeatherAgent", chat_client=AzureAIAgentClient( project_endpoint=os.environ.get("AZURE_AI_PROJECT_ENDPOINT"), diff --git a/python/samples/getting_started/devui/in_memory_mode.py b/python/samples/getting_started/devui/in_memory_mode.py index 9f98d9be50..0fce3976c5 100644 --- a/python/samples/getting_started/devui/in_memory_mode.py +++ b/python/samples/getting_started/devui/in_memory_mode.py @@ -10,7 +10,7 @@ import os from typing import Annotated -from agent_framework import ChatAgent, Executor, WorkflowBuilder, WorkflowContext, handler, tool +from agent_framework import Agent, Executor, WorkflowBuilder, WorkflowContext, handler, tool from agent_framework.azure import AzureOpenAIChatClient from agent_framework.devui import serve from typing_extensions import Never @@ -76,7 +76,7 @@ def main(): ) # Create agents - weather_agent = ChatAgent( + weather_agent = Agent( name="weather-assistant", description="Provides weather information and time", instructions=( @@ -87,7 +87,7 @@ def main(): tools=[get_weather, get_time], ) - simple_agent = ChatAgent( + simple_agent = Agent( name="general-assistant", description="A simple conversational agent", instructions="You are a helpful assistant.", diff --git a/python/samples/getting_started/devui/weather_agent_azure/agent.py b/python/samples/getting_started/devui/weather_agent_azure/agent.py index 0ebf985913..94b787c7e8 100644 --- a/python/samples/getting_started/devui/weather_agent_azure/agent.py +++ b/python/samples/getting_started/devui/weather_agent_azure/agent.py @@ -7,15 +7,14 @@ from typing import Annotated from agent_framework import ( - ChatAgent, + Agent, ChatContext, - ChatMessage, + Message, ChatResponse, ChatResponseUpdate, Content, FunctionInvocationContext, - MiddlewareTermination, - ResponseStream, + Role, chat_middleware, function_middleware, tool, @@ -44,7 +43,7 @@ async def security_filter_middleware( # Check only the last message (most recent user input) last_message = context.messages[-1] if context.messages else None - if last_message and last_message.role == "user" and last_message.text: + if last_message and last_message.role == Role.USER and last_message.text: message_lower = last_message.text.lower() for term in blocked_terms: if term in message_lower: @@ -56,25 +55,26 @@ async def security_filter_middleware( if context.stream: # Streaming mode: return async generator - async def blocked_stream(msg: str = error_message) -> AsyncIterable[ChatResponseUpdate]: + async def blocked_stream() -> AsyncIterable[ChatResponseUpdate]: yield ChatResponseUpdate( - contents=[Content.from_text(text=msg)], - role="assistant", + contents=[Content.from_text(text=error_message)], + role=Role.ASSISTANT, ) - context.result = ResponseStream(blocked_stream(), finalizer=ChatResponse.from_updates) + context.result = blocked_stream() else: # Non-streaming mode: return complete response context.result = ChatResponse( messages=[ - ChatMessage( - role="assistant", + Message( + role=Role.ASSISTANT, text=error_message, ) ] ) - raise MiddlewareTermination + context.terminate = True + return await call_next(context) @@ -92,7 +92,8 @@ async def atlantis_location_filter_middleware( "Blocked! Hold up right there!! Tell the user that " "'Atlantis is a special place, we must never ask about the weather there!!'" ) - raise MiddlewareTermination + context.terminate = True + return await call_next(context) @@ -136,7 +137,7 @@ def send_email( # Agent instance following Agent Framework conventions -agent = ChatAgent( +agent = Agent( name="AzureWeatherAgent", description="A helpful agent that provides weather information and forecasts", instructions=""" diff --git a/python/samples/getting_started/durabletask/01_single_agent/worker.py b/python/samples/getting_started/durabletask/01_single_agent/worker.py index 8afbbb3a44..64023113b4 100644 --- a/python/samples/getting_started/durabletask/01_single_agent/worker.py +++ b/python/samples/getting_started/durabletask/01_single_agent/worker.py @@ -15,7 +15,7 @@ import logging import os -from agent_framework import ChatAgent +from agent_framework import Agent from agent_framework.azure import AzureOpenAIChatClient, DurableAIAgentWorker from azure.identity import AzureCliCredential, DefaultAzureCredential from durabletask.azuremanaged.worker import DurableTaskSchedulerWorker @@ -25,11 +25,11 @@ logger = logging.getLogger(__name__) -def create_joker_agent() -> ChatAgent: +def create_joker_agent() -> Agent: """Create the Joker agent using Azure OpenAI. Returns: - ChatAgent: The configured Joker agent + Agent: The configured Joker agent """ return AzureOpenAIChatClient(credential=AzureCliCredential()).as_agent( name="Joker", diff --git a/python/samples/getting_started/durabletask/02_multi_agent/worker.py b/python/samples/getting_started/durabletask/02_multi_agent/worker.py index 88d9c2949d..3a6db39b7a 100644 --- a/python/samples/getting_started/durabletask/02_multi_agent/worker.py +++ b/python/samples/getting_started/durabletask/02_multi_agent/worker.py @@ -65,7 +65,7 @@ def create_weather_agent(): """Create the Weather agent using Azure OpenAI. Returns: - ChatAgent: The configured Weather agent with weather tool + Agent: The configured Weather agent with weather tool """ return AzureOpenAIChatClient(credential=AzureCliCredential()).as_agent( name=WEATHER_AGENT_NAME, @@ -78,7 +78,7 @@ def create_math_agent(): """Create the Math agent using Azure OpenAI. Returns: - ChatAgent: The configured Math agent with calculation tools + Agent: The configured Math agent with calculation tools """ return AzureOpenAIChatClient(credential=AzureCliCredential()).as_agent( name=MATH_AGENT_NAME, diff --git a/python/samples/getting_started/durabletask/03_single_agent_streaming/worker.py b/python/samples/getting_started/durabletask/03_single_agent_streaming/worker.py index c2eb2e973b..769b24888c 100644 --- a/python/samples/getting_started/durabletask/03_single_agent_streaming/worker.py +++ b/python/samples/getting_started/durabletask/03_single_agent_streaming/worker.py @@ -18,7 +18,7 @@ from datetime import timedelta import redis.asyncio as aioredis -from agent_framework import AgentResponseUpdate, ChatAgent +from agent_framework import AgentResponseUpdate, Agent from agent_framework.azure import ( AgentCallbackContext, AgentResponseCallbackProtocol, @@ -143,11 +143,11 @@ async def on_agent_response(self, response: object, context: AgentCallbackContex logger.error(f"Error writing end-of-stream marker: {ex}", exc_info=True) -def create_travel_agent() -> "ChatAgent": +def create_travel_agent() -> "Agent": """Create the TravelPlanner agent using Azure OpenAI. Returns: - ChatAgent: The configured TravelPlanner agent with travel planning tools. + Agent: The configured TravelPlanner agent with travel planning tools. """ return AzureOpenAIChatClient(credential=AzureCliCredential()).as_agent( name="TravelPlanner", diff --git a/python/samples/getting_started/durabletask/04_single_agent_orchestration_chaining/worker.py b/python/samples/getting_started/durabletask/04_single_agent_orchestration_chaining/worker.py index f10a35b61b..18a7e7fdc4 100644 --- a/python/samples/getting_started/durabletask/04_single_agent_orchestration_chaining/worker.py +++ b/python/samples/getting_started/durabletask/04_single_agent_orchestration_chaining/worker.py @@ -17,7 +17,7 @@ import os from collections.abc import Generator -from agent_framework import AgentResponse, ChatAgent +from agent_framework import AgentResponse, Agent from agent_framework.azure import AzureOpenAIChatClient, DurableAIAgentOrchestrationContext, DurableAIAgentWorker from azure.identity import AzureCliCredential, DefaultAzureCredential from durabletask.azuremanaged.worker import DurableTaskSchedulerWorker @@ -31,14 +31,14 @@ WRITER_AGENT_NAME = "WriterAgent" -def create_writer_agent() -> "ChatAgent": +def create_writer_agent() -> "Agent": """Create the Writer agent using Azure OpenAI. This agent refines short pieces of text, enhancing initial sentences and polishing improved versions further. Returns: - ChatAgent: The configured Writer agent + Agent: The configured Writer agent """ instructions = ( "You refine short pieces of text. When given an initial sentence you enhance it;\n" diff --git a/python/samples/getting_started/durabletask/05_multi_agent_orchestration_concurrency/worker.py b/python/samples/getting_started/durabletask/05_multi_agent_orchestration_concurrency/worker.py index 8f045805f0..76b7913770 100644 --- a/python/samples/getting_started/durabletask/05_multi_agent_orchestration_concurrency/worker.py +++ b/python/samples/getting_started/durabletask/05_multi_agent_orchestration_concurrency/worker.py @@ -18,7 +18,7 @@ from collections.abc import Generator from typing import Any -from agent_framework import AgentResponse, ChatAgent +from agent_framework import AgentResponse, Agent from agent_framework.azure import AzureOpenAIChatClient, DurableAIAgentOrchestrationContext, DurableAIAgentWorker from azure.identity import AzureCliCredential, DefaultAzureCredential from durabletask.azuremanaged.worker import DurableTaskSchedulerWorker @@ -33,11 +33,11 @@ CHEMIST_AGENT_NAME = "ChemistAgent" -def create_physicist_agent() -> "ChatAgent": +def create_physicist_agent() -> "Agent": """Create the Physicist agent using Azure OpenAI. Returns: - ChatAgent: The configured Physicist agent + Agent: The configured Physicist agent """ return AzureOpenAIChatClient(credential=AzureCliCredential()).as_agent( name=PHYSICIST_AGENT_NAME, @@ -45,11 +45,11 @@ def create_physicist_agent() -> "ChatAgent": ) -def create_chemist_agent() -> "ChatAgent": +def create_chemist_agent() -> "Agent": """Create the Chemist agent using Azure OpenAI. Returns: - ChatAgent: The configured Chemist agent + Agent: The configured Chemist agent """ return AzureOpenAIChatClient(credential=AzureCliCredential()).as_agent( name=CHEMIST_AGENT_NAME, diff --git a/python/samples/getting_started/durabletask/06_multi_agent_orchestration_conditionals/worker.py b/python/samples/getting_started/durabletask/06_multi_agent_orchestration_conditionals/worker.py index 92b689d5cf..6c7ef5c33b 100644 --- a/python/samples/getting_started/durabletask/06_multi_agent_orchestration_conditionals/worker.py +++ b/python/samples/getting_started/durabletask/06_multi_agent_orchestration_conditionals/worker.py @@ -18,7 +18,7 @@ from collections.abc import Generator from typing import Any, cast -from agent_framework import AgentResponse, ChatAgent +from agent_framework import AgentResponse, Agent from agent_framework.azure import AzureOpenAIChatClient, DurableAIAgentOrchestrationContext, DurableAIAgentWorker from azure.identity import AzureCliCredential, DefaultAzureCredential from durabletask.azuremanaged.worker import DurableTaskSchedulerWorker @@ -51,11 +51,11 @@ class EmailPayload(BaseModel): email_content: str -def create_spam_agent() -> "ChatAgent": +def create_spam_agent() -> "Agent": """Create the Spam Detection agent using Azure OpenAI. Returns: - ChatAgent: The configured Spam Detection agent + Agent: The configured Spam Detection agent """ return AzureOpenAIChatClient(credential=AzureCliCredential()).as_agent( name=SPAM_AGENT_NAME, @@ -63,11 +63,11 @@ def create_spam_agent() -> "ChatAgent": ) -def create_email_agent() -> "ChatAgent": +def create_email_agent() -> "Agent": """Create the Email Assistant agent using Azure OpenAI. Returns: - ChatAgent: The configured Email Assistant agent + Agent: The configured Email Assistant agent """ return AzureOpenAIChatClient(credential=AzureCliCredential()).as_agent( name=EMAIL_AGENT_NAME, diff --git a/python/samples/getting_started/durabletask/07_single_agent_orchestration_hitl/worker.py b/python/samples/getting_started/durabletask/07_single_agent_orchestration_hitl/worker.py index db9a47002f..aed1849bd7 100644 --- a/python/samples/getting_started/durabletask/07_single_agent_orchestration_hitl/worker.py +++ b/python/samples/getting_started/durabletask/07_single_agent_orchestration_hitl/worker.py @@ -19,7 +19,7 @@ from datetime import timedelta from typing import Any, cast -from agent_framework import AgentResponse, ChatAgent +from agent_framework import AgentResponse, Agent from agent_framework.azure import AzureOpenAIChatClient, DurableAIAgentOrchestrationContext, DurableAIAgentWorker from azure.identity import AzureCliCredential, DefaultAzureCredential from durabletask.azuremanaged.worker import DurableTaskSchedulerWorker @@ -54,11 +54,11 @@ class HumanApproval(BaseModel): feedback: str = "" -def create_writer_agent() -> "ChatAgent": +def create_writer_agent() -> "Agent": """Create the Writer agent using Azure OpenAI. Returns: - ChatAgent: The configured Writer agent + Agent: The configured Writer agent """ instructions = ( "You are a professional content writer who creates high-quality articles on various topics. " diff --git a/python/samples/getting_started/evaluation/self_reflection/self_reflection.py b/python/samples/getting_started/evaluation/self_reflection/self_reflection.py index 931d292dd1..54bfd37f44 100644 --- a/python/samples/getting_started/evaluation/self_reflection/self_reflection.py +++ b/python/samples/getting_started/evaluation/self_reflection/self_reflection.py @@ -17,7 +17,7 @@ import openai import pandas as pd -from agent_framework import ChatAgent, ChatMessage +from agent_framework import Agent, Message from agent_framework.azure import AzureOpenAIChatClient from azure.ai.projects import AIProjectClient from azure.identity import AzureCliCredential @@ -142,7 +142,7 @@ def run_eval( async def execute_query_with_self_reflection( *, client: openai.OpenAI, - agent: ChatAgent, + agent: Agent, eval_object: openai.types.EvalCreateResponse, full_user_query: str, context: str, @@ -152,7 +152,7 @@ async def execute_query_with_self_reflection( Execute a query with self-reflection loop. Args: - agent: ChatAgent instance to use for generating responses + agent: Agent instance to use for generating responses full_user_query: Complete prompt including system prompt, user request, and context context: Context document for groundedness evaluation evaluator: Groundedness evaluator function @@ -170,7 +170,7 @@ async def execute_query_with_self_reflection( - total_groundedness_eval_time: Time spent on evaluations (seconds) - total_end_to_end_time: Total execution time (seconds) """ - messages = [ChatMessage("user", [full_user_query])] + messages = [Message("user", [full_user_query])] best_score = 0 max_score = 5 @@ -223,14 +223,14 @@ async def execute_query_with_self_reflection( print(f" → No improvement (score: {score}/{max_score}). Trying again...") # Add to conversation history - messages.append(ChatMessage("assistant", [agent_response])) + messages.append(Message("assistant", [agent_response])) # Request improvement reflection_prompt = ( f"The groundedness score of your response is {score}/{max_score}. " f"Reflect on your answer and improve it to get the maximum score of {max_score} " ) - messages.append(ChatMessage("user", [reflection_prompt])) + messages.append(Message("user", [reflection_prompt])) end_time = time.time() latency = end_time - start_time diff --git a/python/samples/getting_started/mcp/mcp_api_key_auth.py b/python/samples/getting_started/mcp/mcp_api_key_auth.py index d80d92d4fa..d049e2a6cb 100644 --- a/python/samples/getting_started/mcp/mcp_api_key_auth.py +++ b/python/samples/getting_started/mcp/mcp_api_key_auth.py @@ -2,7 +2,7 @@ import os -from agent_framework import ChatAgent, MCPStreamableHTTPTool +from agent_framework import Agent, MCPStreamableHTTPTool from agent_framework.openai import OpenAIResponsesClient from httpx import AsyncClient @@ -43,7 +43,7 @@ async def api_key_auth_example() -> None: url=mcp_server_url, http_client=http_client, # Pass HTTP client with authentication headers ) as mcp_tool, - ChatAgent( + Agent( chat_client=OpenAIResponsesClient(), name="Agent", instructions="You are a helpful assistant.", diff --git a/python/samples/getting_started/mcp/mcp_github_pat.py b/python/samples/getting_started/mcp/mcp_github_pat.py index 3d9d8c4916..84fef2f032 100644 --- a/python/samples/getting_started/mcp/mcp_github_pat.py +++ b/python/samples/getting_started/mcp/mcp_github_pat.py @@ -3,7 +3,7 @@ import asyncio import os -from agent_framework import ChatAgent, HostedMCPTool +from agent_framework import Agent, HostedMCPTool from agent_framework.openai import OpenAIResponsesClient from dotenv import load_dotenv @@ -54,7 +54,7 @@ async def github_mcp_example() -> None: ) # 5. Create agent with the GitHub MCP tool - async with ChatAgent( + async with Agent( chat_client=OpenAIResponsesClient(), name="GitHubAgent", instructions=( diff --git a/python/samples/getting_started/middleware/chat_middleware.py b/python/samples/getting_started/middleware/chat_middleware.py index e35ba5981f..092350d6a4 100644 --- a/python/samples/getting_started/middleware/chat_middleware.py +++ b/python/samples/getting_started/middleware/chat_middleware.py @@ -7,7 +7,7 @@ from agent_framework import ( ChatContext, - ChatMessage, + Message, ChatMiddleware, ChatResponse, MiddlewareTermination, @@ -69,7 +69,7 @@ async def process( print(f"[InputObserverMiddleware] Total messages: {len(context.messages)}") # Modify user messages by creating new messages with enhanced text - modified_messages: list[ChatMessage] = [] + modified_messages: list[Message] = [] modified_count = 0 for message in context.messages: @@ -81,7 +81,7 @@ async def process( updated_text = self.replacement print(f"[InputObserverMiddleware] Updated: '{original_text}' -> '{updated_text}'") - modified_message = ChatMessage(message.role, [updated_text]) + modified_message = Message(message.role, [updated_text]) modified_messages.append(modified_message) modified_count += 1 else: @@ -118,7 +118,7 @@ async def security_and_override_middleware( # Override the response instead of calling AI context.result = ChatResponse( messages=[ - ChatMessage( + Message( role="assistant", text="I cannot process requests containing sensitive information. " "Please rephrase your question without including passwords, secrets, or other " diff --git a/python/samples/getting_started/middleware/class_based_middleware.py b/python/samples/getting_started/middleware/class_based_middleware.py index ab6bfd5ab4..858e031e97 100644 --- a/python/samples/getting_started/middleware/class_based_middleware.py +++ b/python/samples/getting_started/middleware/class_based_middleware.py @@ -10,7 +10,7 @@ AgentContext, AgentMiddleware, AgentResponse, - ChatMessage, + Message, FunctionInvocationContext, FunctionMiddleware, tool, @@ -61,7 +61,7 @@ async def process( print("[SecurityAgentMiddleware] Security Warning: Detected sensitive information, blocking request.") # Override the result with warning message context.result = AgentResponse( - messages=[ChatMessage("assistant", ["Detected sensitive information, the request is blocked."])] + messages=[Message("assistant", ["Detected sensitive information, the request is blocked."])] ) # Simply don't call call_next() to prevent execution return diff --git a/python/samples/getting_started/middleware/middleware_termination.py b/python/samples/getting_started/middleware/middleware_termination.py index 96c5917f58..4bab79ed99 100644 --- a/python/samples/getting_started/middleware/middleware_termination.py +++ b/python/samples/getting_started/middleware/middleware_termination.py @@ -9,8 +9,7 @@ AgentContext, AgentMiddleware, AgentResponse, - ChatMessage, - MiddlewareTermination, + Message, tool, ) from agent_framework.azure import AzureAIAgentClient @@ -62,7 +61,7 @@ async def process( # Set a custom response context.result = AgentResponse( messages=[ - ChatMessage( + Message( role="assistant", text=( f"Sorry, I cannot process requests containing '{blocked_word}'. " @@ -73,7 +72,8 @@ async def process( ) # Set terminate flag to prevent further processing - raise MiddlewareTermination + context.terminate = True + break await call_next(context) @@ -98,7 +98,7 @@ async def process( f"[PostTerminationMiddleware] Maximum responses ({self.max_responses}) reached. " "Terminating further processing." ) - raise MiddlewareTermination + context.terminate = True # Allow the agent to process normally await call_next(context) diff --git a/python/samples/getting_started/middleware/override_result_with_middleware.py b/python/samples/getting_started/middleware/override_result_with_middleware.py index 6f83c4bee2..9520b27a0d 100644 --- a/python/samples/getting_started/middleware/override_result_with_middleware.py +++ b/python/samples/getting_started/middleware/override_result_with_middleware.py @@ -11,10 +11,11 @@ AgentResponse, AgentResponseUpdate, ChatContext, - ChatMessage, + Message, ChatResponse, ChatResponseUpdate, ResponseStream, + Role, tool, ) from agent_framework.openai import OpenAIResponsesClient @@ -75,12 +76,12 @@ def _update_hook(update: ChatResponseUpdate) -> ChatResponseUpdate: index["value"] += 1 return update - context.result.with_transform_hook(_update_hook) + context.result.with_update_hook(_update_hook) else: # For non-streaming: just replace with a new message - current_text = context.result.text or "" # type: ignore + current_text = context.result.text or "" custom_message = f"Weather Advisory: [0] {''.join(chunks)} Original message was: {current_text}" - context.result = ChatResponse(messages=[ChatMessage(role="assistant", text=custom_message)]) + context.result = ChatResponse(messages=[Message(role=Role.ASSISTANT, text=custom_message)]) async def validate_weather_middleware(context: ChatContext, call_next: Callable[[ChatContext], Awaitable[None]]) -> None: @@ -95,12 +96,12 @@ async def validate_weather_middleware(context: ChatContext, call_next: Callable[ if context.stream and isinstance(context.result, ResponseStream): def _append_validation_note(response: ChatResponse) -> ChatResponse: - response.messages.append(ChatMessage(role="assistant", text=validation_note)) + response.messages.append(Message(role=Role.ASSISTANT, text=validation_note)) return response - context.result.with_result_hook(_append_validation_note) + context.result.with_finalizer(_append_validation_note) elif isinstance(context.result, ChatResponse): - context.result.messages.append(ChatMessage(role="assistant", text=validation_note)) + context.result.messages.append(Message(role=Role.ASSISTANT, text=validation_note)) async def agent_cleanup_middleware(context: AgentContext, call_next: Callable[[AgentContext], Awaitable[None]]) -> None: @@ -117,7 +118,7 @@ async def agent_cleanup_middleware(context: AgentContext, call_next: Callable[[A def _sanitize(response: AgentResponse) -> AgentResponse: found_prefix = state["found_prefix"] found_validation = False - cleaned_messages: list[ChatMessage] = [] + cleaned_messages: list[Message] = [] for message in response.messages: text = message.text @@ -138,7 +139,7 @@ def _sanitize(response: AgentResponse) -> AgentResponse: text = re.sub(r"\[\d+\]\s*", "", text) cleaned_messages.append( - ChatMessage( + Message( role=message.role, text=text.strip(), author_name=message.author_name, @@ -153,7 +154,7 @@ def _sanitize(response: AgentResponse) -> AgentResponse: if not found_validation: raise RuntimeError("Expected validation note not found in agent response.") - cleaned_messages.append(ChatMessage(role="assistant", text=" Agent: OK")) + cleaned_messages.append(Message(role=Role.ASSISTANT, text=" Agent: OK")) response.messages = cleaned_messages return response @@ -171,8 +172,8 @@ def _clean_update(update: AgentResponseUpdate) -> AgentResponseUpdate: content.text = text return update - context.result.with_transform_hook(_clean_update) - context.result.with_result_hook(_sanitize) + context.result.with_update_hook(_clean_update) + context.result.with_finalizer(_sanitize) elif isinstance(context.result, AgentResponse): context.result = _sanitize(context.result) @@ -191,19 +192,6 @@ async def main() -> None: tools=get_weather, middleware=[agent_cleanup_middleware], ) - # Streaming example - print("\n--- Streaming Example ---") - query = "What's the weather like in Portland?" - print(f"User: {query}") - print("Agent: ", end="", flush=True) - response = agent.run(query, stream=True) - # add the hooks to print what you want to see - response.with_transform_hook(lambda chunk: print(chunk.text, end="", flush=True)).with_result_hook( - lambda final: print(f"\nFinal streamed response: {final.text}", flush=True) - ) - # consume the stream to trigger the hooks - await response.get_final_response() - # Non-streaming example print("\n--- Non-streaming Example ---") query = "What's the weather like in Seattle?" @@ -211,6 +199,18 @@ async def main() -> None: result = await agent.run(query) print(f"Agent: {result}") + # Streaming example + print("\n--- Streaming Example ---") + query = "What's the weather like in Portland?" + print(f"User: {query}") + print("Agent: ", end="", flush=True) + response = agent.run(query, stream=True) + async for chunk in response: + if chunk.text: + print(chunk.text, end="", flush=True) + print("\n") + print(f"Final Result: {(await response.get_final_response()).text}") + if __name__ == "__main__": asyncio.run(main()) diff --git a/python/samples/getting_started/multimodal_input/azure_chat_multimodal.py b/python/samples/getting_started/multimodal_input/azure_chat_multimodal.py index 826afcd28d..d7ff61b800 100644 --- a/python/samples/getting_started/multimodal_input/azure_chat_multimodal.py +++ b/python/samples/getting_started/multimodal_input/azure_chat_multimodal.py @@ -2,7 +2,7 @@ import asyncio -from agent_framework import ChatMessage, Content +from agent_framework import Message, Content from agent_framework.azure import AzureOpenAIChatClient from azure.identity import AzureCliCredential @@ -24,7 +24,7 @@ async def test_image() -> None: client = AzureOpenAIChatClient(credential=AzureCliCredential()) image_uri = create_sample_image() - message = ChatMessage( + message = Message( role="user", contents=[ Content.from_text(text="What's in this image?"), diff --git a/python/samples/getting_started/multimodal_input/azure_responses_multimodal.py b/python/samples/getting_started/multimodal_input/azure_responses_multimodal.py index af9bdb0f0a..cc22439872 100644 --- a/python/samples/getting_started/multimodal_input/azure_responses_multimodal.py +++ b/python/samples/getting_started/multimodal_input/azure_responses_multimodal.py @@ -3,7 +3,7 @@ import asyncio from pathlib import Path -from agent_framework import ChatMessage, Content +from agent_framework import Message, Content from agent_framework.azure import AzureOpenAIResponsesClient from azure.identity import AzureCliCredential @@ -33,7 +33,7 @@ async def test_image() -> None: client = AzureOpenAIResponsesClient(credential=AzureCliCredential()) image_uri = create_sample_image() - message = ChatMessage( + message = Message( role="user", contents=[ Content.from_text(text="What's in this image?"), @@ -50,7 +50,7 @@ async def test_pdf() -> None: client = AzureOpenAIResponsesClient(credential=AzureCliCredential()) pdf_bytes = load_sample_pdf() - message = ChatMessage( + message = Message( role="user", contents=[ Content.from_text(text="What information can you extract from this document?"), diff --git a/python/samples/getting_started/multimodal_input/openai_chat_multimodal.py b/python/samples/getting_started/multimodal_input/openai_chat_multimodal.py index 669b963609..3fcd01585f 100644 --- a/python/samples/getting_started/multimodal_input/openai_chat_multimodal.py +++ b/python/samples/getting_started/multimodal_input/openai_chat_multimodal.py @@ -5,7 +5,7 @@ import struct from pathlib import Path -from agent_framework import ChatMessage, Content +from agent_framework import Message, Content from agent_framework.openai import OpenAIChatClient ASSETS_DIR = Path(__file__).resolve().parent.parent / "sample_assets" @@ -45,7 +45,7 @@ async def test_image() -> None: client = OpenAIChatClient(model_id="gpt-4o") image_uri = create_sample_image() - message = ChatMessage( + message = Message( role="user", contents=[ Content.from_text(text="What's in this image?"), @@ -62,7 +62,7 @@ async def test_audio() -> None: client = OpenAIChatClient(model_id="gpt-4o-audio-preview") audio_uri = create_sample_audio() - message = ChatMessage( + message = Message( role="user", contents=[ Content.from_text(text="What do you hear in this audio?"), @@ -79,7 +79,7 @@ async def test_pdf() -> None: client = OpenAIChatClient(model_id="gpt-4o") pdf_bytes = load_sample_pdf() - message = ChatMessage( + message = Message( role="user", contents=[ Content.from_text(text="What information can you extract from this document?"), diff --git a/python/samples/getting_started/observability/advanced_zero_code.py b/python/samples/getting_started/observability/advanced_zero_code.py index 5ac0c70c22..ef4fe3b202 100644 --- a/python/samples/getting_started/observability/advanced_zero_code.py +++ b/python/samples/getting_started/observability/advanced_zero_code.py @@ -12,7 +12,7 @@ from pydantic import Field if TYPE_CHECKING: - from agent_framework import ChatClientProtocol + from agent_framework import SupportsChatGetResponse """ @@ -51,7 +51,7 @@ async def get_weather( return f"The weather in {location} is {conditions[randint(0, 3)]} with a high of {randint(10, 30)}°C." -async def run_chat_client(client: "ChatClientProtocol", stream: bool = False) -> None: +async def run_chat_client(client: "SupportsChatGetResponse", stream: bool = False) -> None: """Run an AI service. This function runs an AI service and prints the output. diff --git a/python/samples/getting_started/observability/agent_observability.py b/python/samples/getting_started/observability/agent_observability.py index 278b508de6..bd852b8107 100644 --- a/python/samples/getting_started/observability/agent_observability.py +++ b/python/samples/getting_started/observability/agent_observability.py @@ -4,7 +4,7 @@ from random import randint from typing import Annotated -from agent_framework import ChatAgent, tool +from agent_framework import Agent, tool from agent_framework.observability import configure_otel_providers, get_tracer from agent_framework.openai import OpenAIChatClient from opentelemetry.trace import SpanKind @@ -39,7 +39,7 @@ async def main(): with get_tracer().start_as_current_span("Scenario: Agent Chat", kind=SpanKind.CLIENT) as current_span: print(f"Trace ID: {format_trace_id(current_span.get_span_context().trace_id)}") - agent = ChatAgent( + agent = Agent( chat_client=OpenAIChatClient(), tools=get_weather, name="WeatherAgent", diff --git a/python/samples/getting_started/observability/agent_with_foundry_tracing.py b/python/samples/getting_started/observability/agent_with_foundry_tracing.py index 431c5b7868..1892fb85e5 100644 --- a/python/samples/getting_started/observability/agent_with_foundry_tracing.py +++ b/python/samples/getting_started/observability/agent_with_foundry_tracing.py @@ -16,7 +16,7 @@ from typing import Annotated import dotenv -from agent_framework import ChatAgent, tool +from agent_framework import Agent, tool from agent_framework.observability import create_resource, enable_instrumentation, get_tracer from agent_framework.openai import OpenAIResponsesClient from azure.ai.projects.aio import AIProjectClient @@ -30,7 +30,7 @@ This sample shows you can can setup telemetry in Microsoft Foundry for a custom agent. First ensure you have a Foundry workspace with Application Insights enabled. And use the Operate tab to Register an Agent. -Set the OpenTelemetry agent ID to the value used below in the ChatAgent creation: `weather-agent` (or change both). +Set the OpenTelemetry agent ID to the value used below in the Agent creation: `weather-agent` (or change both). The sample uses the Azure Monitor OpenTelemetry exporter to send traces to Application Insights. So ensure you have the `azure-monitor-opentelemetry` package installed. """ @@ -85,7 +85,7 @@ async def main(): with get_tracer().start_as_current_span("Weather Agent Chat", kind=SpanKind.CLIENT) as current_span: print(f"Trace ID: {format_trace_id(current_span.get_span_context().trace_id)}") - agent = ChatAgent( + agent = Agent( chat_client=OpenAIResponsesClient(), tools=get_weather, name="WeatherAgent", diff --git a/python/samples/getting_started/observability/azure_ai_agent_observability.py b/python/samples/getting_started/observability/azure_ai_agent_observability.py index 08ac327913..fedd85d455 100644 --- a/python/samples/getting_started/observability/azure_ai_agent_observability.py +++ b/python/samples/getting_started/observability/azure_ai_agent_observability.py @@ -6,7 +6,7 @@ from typing import Annotated import dotenv -from agent_framework import ChatAgent, tool +from agent_framework import Agent, tool from agent_framework.azure import AzureAIClient from agent_framework.observability import get_tracer from azure.ai.projects.aio import AIProjectClient @@ -56,7 +56,7 @@ async def main(): with get_tracer().start_as_current_span("Single Agent Chat", kind=SpanKind.CLIENT) as current_span: print(f"Trace ID: {format_trace_id(current_span.get_span_context().trace_id)}") - agent = ChatAgent( + agent = Agent( chat_client=client, tools=get_weather, name="WeatherAgent", diff --git a/python/samples/getting_started/observability/configure_otel_providers_with_env_var.py b/python/samples/getting_started/observability/configure_otel_providers_with_env_var.py index 014f387033..b21bd5bb67 100644 --- a/python/samples/getting_started/observability/configure_otel_providers_with_env_var.py +++ b/python/samples/getting_started/observability/configure_otel_providers_with_env_var.py @@ -14,7 +14,7 @@ from pydantic import Field if TYPE_CHECKING: - from agent_framework import ChatClientProtocol + from agent_framework import SupportsChatGetResponse """ This sample, show how you can configure observability of an application via the @@ -42,7 +42,7 @@ async def get_weather( return f"The weather in {location} is {conditions[randint(0, 3)]} with a high of {randint(10, 30)}°C." -async def run_chat_client(client: "ChatClientProtocol", stream: bool = False) -> None: +async def run_chat_client(client: "SupportsChatGetResponse", stream: bool = False) -> None: """Run an AI service. This function runs an AI service and prints the output. diff --git a/python/samples/getting_started/observability/configure_otel_providers_with_parameters.py b/python/samples/getting_started/observability/configure_otel_providers_with_parameters.py index e82cbdb2be..252e836b82 100644 --- a/python/samples/getting_started/observability/configure_otel_providers_with_parameters.py +++ b/python/samples/getting_started/observability/configure_otel_providers_with_parameters.py @@ -14,7 +14,7 @@ from pydantic import Field if TYPE_CHECKING: - from agent_framework import ChatClientProtocol + from agent_framework import SupportsChatGetResponse """ This sample shows how you can configure observability with custom exporters passed directly @@ -42,7 +42,7 @@ async def get_weather( return f"The weather in {location} is {conditions[randint(0, 3)]} with a high of {randint(10, 30)}°C." -async def run_chat_client(client: "ChatClientProtocol", stream: bool = False) -> None: +async def run_chat_client(client: "SupportsChatGetResponse", stream: bool = False) -> None: """Run an AI service. This function runs an AI service and prints the output. diff --git a/python/samples/getting_started/orchestrations/README.md b/python/samples/getting_started/orchestrations/README.md index 14f0be5fad..0965ff2178 100644 --- a/python/samples/getting_started/orchestrations/README.md +++ b/python/samples/getting_started/orchestrations/README.md @@ -52,10 +52,10 @@ from agent_framework.orchestrations import ( **Magentic checkpointing tip**: Treat `MagenticBuilder.participants` keys as stable identifiers. When resuming from a checkpoint, the rebuilt workflow must reuse the same participant names; otherwise the checkpoint cannot be applied and the run will fail fast. -**Handoff workflow tip**: Handoff workflows maintain the full conversation history including any `ChatMessage.additional_properties` emitted by your agents. This ensures routing metadata remains intact across all agent transitions. For specialist-to-specialist handoffs, use `.add_handoff(source, targets)` to configure which agents can route to which others with a fluent, type-safe API. +**Handoff workflow tip**: Handoff workflows maintain the full conversation history including any `Message.additional_properties` emitted by your agents. This ensures routing metadata remains intact across all agent transitions. For specialist-to-specialist handoffs, use `.add_handoff(source, targets)` to configure which agents can route to which others with a fluent, type-safe API. **Sequential orchestration note**: Sequential orchestration uses a few small adapter nodes for plumbing: -- `input-conversation` normalizes input to `list[ChatMessage]` +- `input-conversation` normalizes input to `list[Message]` - `to-conversation:` converts agent responses into the shared conversation - `complete` publishes the final output event (type='output') diff --git a/python/samples/getting_started/orchestrations/concurrent_agents.py b/python/samples/getting_started/orchestrations/concurrent_agents.py index 8333b91c89..19ff4ebed4 100644 --- a/python/samples/getting_started/orchestrations/concurrent_agents.py +++ b/python/samples/getting_started/orchestrations/concurrent_agents.py @@ -3,7 +3,7 @@ import asyncio from typing import Any -from agent_framework import ChatMessage +from agent_framework import Message from agent_framework.azure import AzureOpenAIChatClient from agent_framework.orchestrations import ConcurrentBuilder from azure.identity import AzureCliCredential @@ -14,7 +14,7 @@ Build a high-level concurrent workflow using ConcurrentBuilder and three domain agents. The default dispatcher fans out the same user prompt to all agents in parallel. The default aggregator fans in their results and yields output containing -a list[ChatMessage] representing the concatenated conversations from all agents. +a list[Message] representing the concatenated conversations from all agents. Demonstrates: - Minimal wiring with ConcurrentBuilder(participants=[...]).build() @@ -66,7 +66,7 @@ async def main() -> None: if outputs: print("===== Final Aggregated Conversation (messages) =====") for output in outputs: - messages: list[ChatMessage] | Any = output + messages: list[Message] | Any = output for i, msg in enumerate(messages, start=1): name = msg.author_name if msg.author_name else "user" print(f"{'-' * 60}\n\n{i:02d} [{name}]:\n{msg.text}") diff --git a/python/samples/getting_started/orchestrations/concurrent_custom_agent_executors.py b/python/samples/getting_started/orchestrations/concurrent_custom_agent_executors.py index 9463ba1915..8682f94117 100644 --- a/python/samples/getting_started/orchestrations/concurrent_custom_agent_executors.py +++ b/python/samples/getting_started/orchestrations/concurrent_custom_agent_executors.py @@ -6,8 +6,8 @@ from agent_framework import ( AgentExecutorRequest, AgentExecutorResponse, - ChatAgent, - ChatMessage, + Agent, + Message, Executor, WorkflowContext, handler, @@ -20,15 +20,15 @@ Sample: Concurrent Orchestration with Custom Agent Executors This sample shows a concurrent fan-out/fan-in pattern using child Executor classes -that each own their ChatAgent. The executors accept AgentExecutorRequest inputs +that each own their Agent. The executors accept AgentExecutorRequest inputs and emit AgentExecutorResponse outputs, which allows reuse of the high-level ConcurrentBuilder API and the default aggregator. Demonstrates: -- Executors that create their ChatAgent in __init__ (via AzureOpenAIChatClient) +- Executors that create their Agent in __init__ (via AzureOpenAIChatClient) - A @handler that converts AgentExecutorRequest -> AgentExecutorResponse -- ConcurrentBuilder(participants=[...]) to build fan-out/fan-in -- Default aggregator returning list[ChatMessage] (one user + one assistant per agent) +- ConcurrentBuilder().participants([...]) to build fan-out/fan-in +- Default aggregator returning list[Message] (one user + one assistant per agent) - Workflow completion when all participants become idle Prerequisites: @@ -37,7 +37,7 @@ class ResearcherExec(Executor): - agent: ChatAgent + agent: Agent def __init__(self, chat_client: AzureOpenAIChatClient, id: str = "researcher"): self.agent = chat_client.as_agent( @@ -57,7 +57,7 @@ async def run(self, request: AgentExecutorRequest, ctx: WorkflowContext[AgentExe class MarketerExec(Executor): - agent: ChatAgent + agent: Agent def __init__(self, chat_client: AzureOpenAIChatClient, id: str = "marketer"): self.agent = chat_client.as_agent( @@ -77,7 +77,7 @@ async def run(self, request: AgentExecutorRequest, ctx: WorkflowContext[AgentExe class LegalExec(Executor): - agent: ChatAgent + agent: Agent def __init__(self, chat_client: AzureOpenAIChatClient, id: str = "legal"): self.agent = chat_client.as_agent( @@ -103,14 +103,14 @@ async def main() -> None: marketer = MarketerExec(chat_client) legal = LegalExec(chat_client) - workflow = ConcurrentBuilder(participants=[researcher, marketer, legal]).build() + workflow = ConcurrentBuilder().participants([researcher, marketer, legal]).build() events = await workflow.run("We are launching a new budget-friendly electric bike for urban commuters.") outputs = events.get_outputs() if outputs: print("===== Final Aggregated Conversation (messages) =====") - messages: list[ChatMessage] | Any = outputs[0] # Get the first (and typically only) output + messages: list[Message] | Any = outputs[0] # Get the first (and typically only) output for i, msg in enumerate(messages, start=1): name = msg.author_name if msg.author_name else "user" print(f"{'-' * 60}\n\n{i:02d} [{name}]:\n{msg.text}") diff --git a/python/samples/getting_started/orchestrations/concurrent_custom_aggregator.py b/python/samples/getting_started/orchestrations/concurrent_custom_aggregator.py index a15cae06fd..b11bcc7c29 100644 --- a/python/samples/getting_started/orchestrations/concurrent_custom_aggregator.py +++ b/python/samples/getting_started/orchestrations/concurrent_custom_aggregator.py @@ -3,7 +3,7 @@ import asyncio from typing import Any -from agent_framework import ChatMessage +from agent_framework import Message from agent_framework.azure import AzureOpenAIChatClient from agent_framework.orchestrations import ConcurrentBuilder from azure.identity import AzureCliCredential @@ -66,14 +66,14 @@ async def summarize_results(results: list[Any]) -> str: expert_sections.append(f"{getattr(r, 'executor_id', 'expert')}: (error: {type(e).__name__}: {e})") # Ask the model to synthesize a concise summary of the experts' outputs - system_msg = ChatMessage( + system_msg = Message( "system", text=( "You are a helpful assistant that consolidates multiple domain expert outputs " "into one cohesive, concise summary with clear takeaways. Keep it under 200 words." ), ) - user_msg = ChatMessage("user", text="\n\n".join(expert_sections)) + user_msg = Message("user", text="\n\n".join(expert_sections)) response = await chat_client.get_response([system_msg, user_msg]) # Return the model's final assistant text as the completion result @@ -83,7 +83,7 @@ async def summarize_results(results: list[Any]) -> str: # - participants([...]) accepts SupportsAgentRun (agents) or Executor instances. # Each participant becomes a parallel branch (fan-out) from an internal dispatcher. # - with_aggregator(...) overrides the default aggregator: - # • Default aggregator -> returns list[ChatMessage] (one user + one assistant per agent) + # • Default aggregator -> returns list[Message] (one user + one assistant per agent) # • Custom callback -> return value becomes workflow output (string here) # The callback can be sync or async; it receives list[AgentExecutorResponse]. workflow = ( diff --git a/python/samples/getting_started/orchestrations/group_chat_agent_manager.py b/python/samples/getting_started/orchestrations/group_chat_agent_manager.py index 33d62d98da..07b0bb3f54 100644 --- a/python/samples/getting_started/orchestrations/group_chat_agent_manager.py +++ b/python/samples/getting_started/orchestrations/group_chat_agent_manager.py @@ -5,8 +5,8 @@ from agent_framework import ( AgentResponseUpdate, - ChatAgent, - ChatMessage, + Agent, + Message, ) from agent_framework.azure import AzureOpenAIChatClient from agent_framework.orchestrations import GroupChatBuilder @@ -17,7 +17,7 @@ What it does: - Demonstrates the new set_manager() API for agent-based coordination -- Manager is a full ChatAgent with access to tools, context, and observability +- Manager is a full Agent with access to tools, context, and observability - Coordinates a researcher and writer agent to solve tasks collaboratively Prerequisites: @@ -42,7 +42,7 @@ async def main() -> None: # Note: This agent (and the underlying chat client) must support structured outputs. # The group chat workflow relies on this to parse the orchestrator's decisions. # `response_format` is set internally by the GroupChat workflow when the agent is invoked. - orchestrator_agent = ChatAgent( + orchestrator_agent = Agent( name="Orchestrator", description="Coordinates multi-agent collaboration by selecting speakers", instructions=ORCHESTRATOR_AGENT_INSTRUCTIONS, @@ -50,14 +50,14 @@ async def main() -> None: ) # Participant agents - researcher = ChatAgent( + researcher = Agent( name="Researcher", description="Collects relevant background information", instructions="Gather concise facts that help a teammate answer the question.", chat_client=chat_client, ) - writer = ChatAgent( + writer = Agent( name="Writer", description="Synthesizes polished answers from gathered information", instructions="Compose clear and structured answers using any notes provided.", @@ -103,7 +103,7 @@ async def main() -> None: print(data.text, end="", flush=True) elif event.type == "output": # The output of the group chat workflow is a collection of chat messages from all participants - outputs = cast(list[ChatMessage], event.data) + outputs = cast(list[Message], event.data) print("\n" + "=" * 80) print("\nFinal Conversation Transcript:\n") for message in outputs: diff --git a/python/samples/getting_started/orchestrations/group_chat_philosophical_debate.py b/python/samples/getting_started/orchestrations/group_chat_philosophical_debate.py index be2579f496..e26e9ad8cf 100644 --- a/python/samples/getting_started/orchestrations/group_chat_philosophical_debate.py +++ b/python/samples/getting_started/orchestrations/group_chat_philosophical_debate.py @@ -6,8 +6,8 @@ from agent_framework import ( AgentResponseUpdate, - ChatAgent, - ChatMessage, + Agent, + Message, ) from agent_framework.azure import AzureOpenAIChatClient from agent_framework.orchestrations import GroupChatBuilder @@ -48,7 +48,7 @@ def _get_chat_client() -> AzureOpenAIChatClient: async def main() -> None: # Create debate moderator with structured output for speaker selection # Note: Participant names and descriptions are automatically injected by the orchestrator - moderator = ChatAgent( + moderator = Agent( name="Moderator", description="Guides philosophical discussion by selecting next speaker", instructions=""" @@ -78,7 +78,7 @@ async def main() -> None: chat_client=_get_chat_client(), ) - farmer = ChatAgent( + farmer = Agent( name="Farmer", description="A rural farmer from Southeast Asia", instructions=""" @@ -94,7 +94,7 @@ async def main() -> None: chat_client=_get_chat_client(), ) - developer = ChatAgent( + developer = Agent( name="Developer", description="An urban software developer from the United States", instructions=""" @@ -110,7 +110,7 @@ async def main() -> None: chat_client=_get_chat_client(), ) - teacher = ChatAgent( + teacher = Agent( name="Teacher", description="A retired history teacher from Eastern Europe", instructions=""" @@ -127,7 +127,7 @@ async def main() -> None: chat_client=_get_chat_client(), ) - activist = ChatAgent( + activist = Agent( name="Activist", description="A young activist from South America", instructions=""" @@ -143,7 +143,7 @@ async def main() -> None: chat_client=_get_chat_client(), ) - spiritual_leader = ChatAgent( + spiritual_leader = Agent( name="SpiritualLeader", description="A spiritual leader from the Middle East", instructions=""" @@ -159,7 +159,7 @@ async def main() -> None: chat_client=_get_chat_client(), ) - artist = ChatAgent( + artist = Agent( name="Artist", description="An artist from Africa", instructions=""" @@ -175,7 +175,7 @@ async def main() -> None: chat_client=_get_chat_client(), ) - immigrant = ChatAgent( + immigrant = Agent( name="Immigrant", description="An immigrant entrepreneur from Asia living in Canada", instructions=""" @@ -191,7 +191,7 @@ async def main() -> None: chat_client=_get_chat_client(), ) - doctor = ChatAgent( + doctor = Agent( name="Doctor", description="A doctor from Scandinavia", instructions=""" @@ -255,7 +255,7 @@ async def main() -> None: print(data.text, end="", flush=True) elif event.type == "output": # The output of the group chat workflow is a collection of chat messages from all participants - outputs = cast(list[ChatMessage], event.data) + outputs = cast(list[Message], event.data) print("\n" + "=" * 80) print("\nFinal Conversation Transcript:\n") for message in outputs: diff --git a/python/samples/getting_started/orchestrations/group_chat_simple_selector.py b/python/samples/getting_started/orchestrations/group_chat_simple_selector.py index bb76e97de1..b9f22237df 100644 --- a/python/samples/getting_started/orchestrations/group_chat_simple_selector.py +++ b/python/samples/getting_started/orchestrations/group_chat_simple_selector.py @@ -5,8 +5,8 @@ from agent_framework import ( AgentResponseUpdate, - ChatAgent, - ChatMessage, + Agent, + Message, ) from agent_framework.azure import AzureOpenAIChatClient from agent_framework.orchestrations import GroupChatBuilder, GroupChatState @@ -36,7 +36,7 @@ async def main() -> None: chat_client = AzureOpenAIChatClient(credential=AzureCliCredential()) # Participant agents - expert = ChatAgent( + expert = Agent( name="PythonExpert", instructions=( "You are an expert in Python in a workgroup. " @@ -46,7 +46,7 @@ async def main() -> None: chat_client=chat_client, ) - verifier = ChatAgent( + verifier = Agent( name="AnswerVerifier", instructions=( "You are a programming expert in a workgroup. " @@ -57,7 +57,7 @@ async def main() -> None: chat_client=chat_client, ) - clarifier = ChatAgent( + clarifier = Agent( name="AnswerClarifier", instructions=( "You are an accessibility expert in a workgroup. " @@ -68,7 +68,7 @@ async def main() -> None: chat_client=chat_client, ) - skeptic = ChatAgent( + skeptic = Agent( name="Skeptic", instructions=( "You are a devil's advocate in a workgroup. " @@ -124,7 +124,7 @@ async def main() -> None: print(data.text, end="", flush=True) elif event.type == "output": # The output of the group chat workflow is a collection of chat messages from all participants - outputs = cast(list[ChatMessage], event.data) + outputs = cast(list[Message], event.data) print("\n" + "=" * 80) print("\nFinal Conversation Transcript:\n") for message in outputs: diff --git a/python/samples/getting_started/orchestrations/handoff_autonomous.py b/python/samples/getting_started/orchestrations/handoff_autonomous.py index 9b151b656a..54eae77526 100644 --- a/python/samples/getting_started/orchestrations/handoff_autonomous.py +++ b/python/samples/getting_started/orchestrations/handoff_autonomous.py @@ -6,8 +6,8 @@ from agent_framework import ( AgentResponseUpdate, - ChatAgent, - ChatMessage, + Agent, + Message, resolve_agent_id, ) from agent_framework.azure import AzureOpenAIChatClient @@ -38,7 +38,7 @@ def create_agents( chat_client: AzureOpenAIChatClient, -) -> tuple[ChatAgent, ChatAgent, ChatAgent]: +) -> tuple[Agent, Agent, Agent]: """Create coordinator and specialists for autonomous iteration.""" coordinator = chat_client.as_agent( instructions=( @@ -129,7 +129,7 @@ async def main() -> None: print(data.text, end="", flush=True) elif event.type == "output": # The output of the handoff workflow is a collection of chat messages from all participants - outputs = cast(list[ChatMessage], event.data) + outputs = cast(list[Message], event.data) print("\n" + "=" * 80) print("\nFinal Conversation Transcript:\n") for message in outputs: diff --git a/python/samples/getting_started/orchestrations/handoff_simple.py b/python/samples/getting_started/orchestrations/handoff_simple.py index 53e6bbcd60..d79819436a 100644 --- a/python/samples/getting_started/orchestrations/handoff_simple.py +++ b/python/samples/getting_started/orchestrations/handoff_simple.py @@ -5,8 +5,8 @@ from agent_framework import ( AgentResponse, - ChatAgent, - ChatMessage, + Agent, + Message, WorkflowEvent, WorkflowRunState, tool, @@ -54,7 +54,7 @@ def process_return(order_number: Annotated[str, "Order number to process return return f"Return initiated successfully for order {order_number}. You will receive return instructions via email." -def create_agents(chat_client: AzureOpenAIChatClient) -> tuple[ChatAgent, ChatAgent, ChatAgent, ChatAgent]: +def create_agents(chat_client: AzureOpenAIChatClient) -> tuple[Agent, Agent, Agent, Agent]: """Create and configure the triage and specialist agents. Args: @@ -138,7 +138,7 @@ def _handle_events(events: list[WorkflowEvent]) -> list[WorkflowEvent[HandoffAge print(f"- {speaker}: {message.text}") elif event.type == "output": # The output of the handoff workflow is a collection of chat messages from all participants - conversation = cast(list[ChatMessage], event.data) + conversation = cast(list[Message], event.data) if isinstance(conversation, list): print("\n=== Final Conversation Snapshot ===") for message in conversation: diff --git a/python/samples/getting_started/orchestrations/handoff_with_code_interpreter_file.py b/python/samples/getting_started/orchestrations/handoff_with_code_interpreter_file.py index 159105d54c..223c7f946c 100644 --- a/python/samples/getting_started/orchestrations/handoff_with_code_interpreter_file.py +++ b/python/samples/getting_started/orchestrations/handoff_with_code_interpreter_file.py @@ -32,8 +32,8 @@ from agent_framework import ( AgentResponseUpdate, - ChatAgent, - ChatMessage, + Agent, + Message, HostedCodeInterpreterTool, WorkflowEvent, WorkflowRunState, @@ -83,7 +83,7 @@ def _handle_events(events: list[WorkflowEvent]) -> tuple[list[WorkflowEvent[Hand file_ids.append(file_id) print(f"[Found file annotation: file_id={file_id}]") elif event.type == "output": - conversation = cast(list[ChatMessage], event.data) + conversation = cast(list[Message], event.data) if isinstance(conversation, list): print("\n=== Final Conversation Snapshot ===") for message in conversation: @@ -95,7 +95,7 @@ def _handle_events(events: list[WorkflowEvent]) -> tuple[list[WorkflowEvent[Hand @asynccontextmanager -async def create_agents_v1(credential: AzureCliCredential) -> AsyncIterator[tuple[ChatAgent, ChatAgent]]: +async def create_agents_v1(credential: AzureCliCredential) -> AsyncIterator[tuple[Agent, Agent]]: """Create agents using V1 AzureAIAgentClient.""" from agent_framework.azure import AzureAIAgentClient @@ -122,7 +122,7 @@ async def create_agents_v1(credential: AzureCliCredential) -> AsyncIterator[tupl @asynccontextmanager -async def create_agents_v2(credential: AzureCliCredential) -> AsyncIterator[tuple[ChatAgent, ChatAgent]]: +async def create_agents_v2(credential: AzureCliCredential) -> AsyncIterator[tuple[Agent, Agent]]: """Create agents using V2 AzureAIClient. Each agent needs its own client instance because the V2 client binds diff --git a/python/samples/getting_started/orchestrations/magentic.py b/python/samples/getting_started/orchestrations/magentic.py index d0e4f13703..4fac876111 100644 --- a/python/samples/getting_started/orchestrations/magentic.py +++ b/python/samples/getting_started/orchestrations/magentic.py @@ -7,8 +7,8 @@ from agent_framework import ( AgentResponseUpdate, - ChatAgent, - ChatMessage, + Agent, + Message, HostedCodeInterpreterTool, WorkflowEvent, ) @@ -24,9 +24,9 @@ What it does: - Orchestrates multiple agents using `MagenticBuilder` with streaming callbacks. -- ResearcherAgent (ChatAgent backed by an OpenAI chat client) for +- ResearcherAgent (Agent backed by an OpenAI chat client) for finding information. -- CoderAgent (ChatAgent backed by OpenAI Assistants with the hosted +- CoderAgent (Agent backed by OpenAI Assistants with the hosted code interpreter tool) for analysis and computation. The workflow is configured with: @@ -44,7 +44,7 @@ async def main() -> None: - researcher_agent = ChatAgent( + researcher_agent = Agent( name="ResearcherAgent", description="Specialist in research and information gathering", instructions=( @@ -54,7 +54,7 @@ async def main() -> None: chat_client=OpenAIChatClient(model_id="gpt-4o-search-preview"), ) - coder_agent = ChatAgent( + coder_agent = Agent( name="CoderAgent", description="A helpful assistant that writes and executes code to process and analyze data.", instructions="You solve questions using code. Please provide detailed analysis and computation process.", @@ -63,7 +63,7 @@ async def main() -> None: ) # Create a manager agent for orchestration - manager_agent = ChatAgent( + manager_agent = Agent( name="MagenticManager", description="Orchestrator that coordinates the research and coding workflow", instructions="You coordinate a team to complete complex tasks efficiently.", @@ -110,7 +110,7 @@ async def main() -> None: elif event.type == "magentic_orchestrator": print(f"\n[Magentic Orchestrator Event] Type: {event.data.event_type.name}") - if isinstance(event.data.content, ChatMessage): + if isinstance(event.data.content, Message): print(f"Please review the plan:\n{event.data.content.text}") elif isinstance(event.data.content, MagenticProgressLedger): print(f"Please review progress ledger:\n{json.dumps(event.data.content.to_dict(), indent=2)}") @@ -130,7 +130,7 @@ async def main() -> None: if output_event: # The output of the magentic workflow is a collection of chat messages from all participants - outputs = cast(list[ChatMessage], output_event.data) + outputs = cast(list[Message], output_event.data) print("\n" + "=" * 80) print("\nFinal Conversation Transcript:\n") for message in outputs: diff --git a/python/samples/getting_started/orchestrations/magentic_checkpoint.py b/python/samples/getting_started/orchestrations/magentic_checkpoint.py index 08e26909e0..410687a0fb 100644 --- a/python/samples/getting_started/orchestrations/magentic_checkpoint.py +++ b/python/samples/getting_started/orchestrations/magentic_checkpoint.py @@ -6,8 +6,8 @@ from typing import cast from agent_framework import ( - ChatAgent, - ChatMessage, + Agent, + Message, FileCheckpointStorage, WorkflowCheckpoint, WorkflowEvent, @@ -52,14 +52,14 @@ def build_workflow(checkpoint_storage: FileCheckpointStorage): # Two vanilla ChatAgents act as participants in the orchestration. They do not need # extra state handling because their inputs/outputs are fully described by chat messages. - researcher = ChatAgent( + researcher = Agent( name="ResearcherAgent", description="Collects background facts and references for the project.", instructions=("You are the research lead. Gather crisp bullet points the team should know."), chat_client=AzureOpenAIChatClient(credential=AzureCliCredential()), ) - writer = ChatAgent( + writer = Agent( name="WriterAgent", description="Synthesizes the final brief for stakeholders.", instructions=("You convert the research notes into a structured brief with milestones and risks."), @@ -67,7 +67,7 @@ def build_workflow(checkpoint_storage: FileCheckpointStorage): ) # Create a manager agent for orchestration - manager_agent = ChatAgent( + manager_agent = Agent( name="MagenticManager", description="Orchestrator that coordinates the research and writing workflow", instructions="You coordinate a team to complete complex tasks efficiently.", @@ -167,7 +167,7 @@ async def main() -> None: if not result: print("No result data from workflow.") return - output_messages = cast(list[ChatMessage], result) + output_messages = cast(list[Message], result) print("\n=== Final Answer ===") # The output of the Magentic workflow is a list of ChatMessages with only one final message # generated by the orchestrator. @@ -234,7 +234,7 @@ def _pending_message_count(cp: WorkflowCheckpoint) -> int: print("No result data from post-plan resume.") return - output_messages = cast(list[ChatMessage], post_result) + output_messages = cast(list[Message], post_result) print("\n=== Final Answer (post-plan resume) ===") # The output of the Magentic workflow is a list of ChatMessages with only one final message # generated by the orchestrator. diff --git a/python/samples/getting_started/orchestrations/magentic_human_plan_review.py b/python/samples/getting_started/orchestrations/magentic_human_plan_review.py index 24757a1692..2ed71d29bf 100644 --- a/python/samples/getting_started/orchestrations/magentic_human_plan_review.py +++ b/python/samples/getting_started/orchestrations/magentic_human_plan_review.py @@ -7,8 +7,8 @@ from agent_framework import ( AgentResponseUpdate, - ChatAgent, - ChatMessage, + Agent, + Message, WorkflowEvent, ) from agent_framework.openai import OpenAIChatClient @@ -64,7 +64,7 @@ async def process_event_stream(stream: AsyncIterable[WorkflowEvent]) -> dict[str print("=" * 60) print("Final discussion summary:") # To make the type checker happy, we cast event.data to the expected type - outputs = cast(list[ChatMessage], event.data) + outputs = cast(list[Message], event.data) for msg in outputs: speaker = msg.author_name or msg.role print(f"[{speaker}]: {msg.text}") @@ -92,21 +92,21 @@ async def process_event_stream(stream: AsyncIterable[WorkflowEvent]) -> dict[str async def main() -> None: - researcher_agent = ChatAgent( + researcher_agent = Agent( name="ResearcherAgent", description="Specialist in research and information gathering", instructions="You are a Researcher. You find information and gather facts.", chat_client=OpenAIChatClient(model_id="gpt-4o"), ) - analyst_agent = ChatAgent( + analyst_agent = Agent( name="AnalystAgent", description="Data analyst who processes and summarizes research findings", instructions="You are an Analyst. You analyze findings and create summaries.", chat_client=OpenAIChatClient(model_id="gpt-4o"), ) - manager_agent = ChatAgent( + manager_agent = Agent( name="MagenticManager", description="Orchestrator that coordinates the workflow", instructions="You coordinate a team to complete tasks efficiently.", diff --git a/python/samples/getting_started/orchestrations/sequential_agents.py b/python/samples/getting_started/orchestrations/sequential_agents.py index 37c9afe975..7de09651cc 100644 --- a/python/samples/getting_started/orchestrations/sequential_agents.py +++ b/python/samples/getting_started/orchestrations/sequential_agents.py @@ -3,7 +3,7 @@ import asyncio from typing import cast -from agent_framework import ChatMessage +from agent_framework import Message from agent_framework.azure import AzureOpenAIChatClient from agent_framework.orchestrations import SequentialBuilder from azure.identity import AzureCliCredential @@ -12,7 +12,7 @@ Sample: Sequential workflow (agent-focused API) with shared conversation context Build a high-level sequential workflow using SequentialBuilder and two domain agents. -The shared conversation (list[ChatMessage]) flows through each participant. Each agent +The shared conversation (list[Message]) flows through each participant. Each agent appends its assistant message to the context. The workflow outputs the final conversation list when complete. @@ -46,10 +46,10 @@ async def main() -> None: workflow = SequentialBuilder(participants=[writer, reviewer]).build() # 3) Run and collect outputs - outputs: list[list[ChatMessage]] = [] + outputs: list[list[Message]] = [] async for event in workflow.run("Write a tagline for a budget-friendly eBike.", stream=True): if event.type == "output": - outputs.append(cast(list[ChatMessage], event.data)) + outputs.append(cast(list[Message], event.data)) if outputs: print("===== Final Conversation =====") diff --git a/python/samples/getting_started/orchestrations/sequential_custom_executors.py b/python/samples/getting_started/orchestrations/sequential_custom_executors.py index d421e85f1c..06cbd89764 100644 --- a/python/samples/getting_started/orchestrations/sequential_custom_executors.py +++ b/python/samples/getting_started/orchestrations/sequential_custom_executors.py @@ -5,7 +5,7 @@ from agent_framework import ( AgentExecutorResponse, - ChatMessage, + Message, Executor, WorkflowContext, handler, @@ -18,13 +18,13 @@ Sample: Sequential workflow mixing agents and a custom summarizer executor This demonstrates how SequentialBuilder chains participants with a shared -conversation context (list[ChatMessage]). An agent produces content; a custom +conversation context (list[Message]). An agent produces content; a custom executor appends a compact summary to the conversation. The workflow completes after all participants have executed in sequence, and the final output contains the complete conversation. Custom executor contract: -- Provide at least one @handler accepting AgentExecutorResponse and a WorkflowContext[list[ChatMessage]] +- Provide at least one @handler accepting AgentExecutorResponse and a WorkflowContext[list[Message]] - Emit the updated conversation via ctx.send_message([...]) Prerequisites: @@ -36,22 +36,22 @@ class Summarizer(Executor): """Simple summarizer: consumes full conversation and appends an assistant summary.""" @handler - async def summarize(self, agent_response: AgentExecutorResponse, ctx: WorkflowContext[list[ChatMessage]]) -> None: + async def summarize(self, agent_response: AgentExecutorResponse, ctx: WorkflowContext[list[Message]]) -> None: """Append a summary message to a copy of the full conversation. Note: A custom executor must be able to handle the message type from the prior participant, and produce the message type expected by the next participant. In this case, the prior participant is an agent thus the input is AgentExecutorResponse (an agent will be wrapped in an AgentExecutor, which produces `AgentExecutorResponse`). If the next participant is also an agent or this is the final participant, - the output must be `list[ChatMessage]`. + the output must be `list[Message]`. """ if not agent_response.full_conversation: - await ctx.send_message([ChatMessage("assistant", ["No conversation to summarize."])]) + await ctx.send_message([Message("assistant", ["No conversation to summarize."])]) return users = sum(1 for m in agent_response.full_conversation if m.role == "user") assistants = sum(1 for m in agent_response.full_conversation if m.role == "assistant") - summary = ChatMessage("assistant", [f"Summary -> users:{users} assistants:{assistants}"]) + summary = Message("assistant", [f"Summary -> users:{users} assistants:{assistants}"]) final_conversation = list(agent_response.full_conversation) + [summary] await ctx.send_message(final_conversation) @@ -74,7 +74,7 @@ async def main() -> None: if outputs: print("===== Final Conversation =====") - messages: list[ChatMessage] | Any = outputs[0] + messages: list[Message] | Any = outputs[0] for i, msg in enumerate(messages, start=1): name = msg.author_name or ("assistant" if msg.role == "assistant" else "user") print(f"{'-' * 60}\n{i:02d} [{name}]\n{msg.text}") diff --git a/python/samples/getting_started/purview_agent/README.md b/python/samples/getting_started/purview_agent/README.md index 8982a68830..1e8e671644 100644 --- a/python/samples/getting_started/purview_agent/README.md +++ b/python/samples/getting_started/purview_agent/README.md @@ -1,6 +1,6 @@ ## Purview Policy Enforcement Sample (Python) -This getting-started sample shows how to attach Microsoft Purview policy evaluation to an Agent Framework `ChatAgent` using the **middleware** approach. +This getting-started sample shows how to attach Microsoft Purview policy evaluation to an Agent Framework `Agent` using the **middleware** approach. **What this sample demonstrates:** 1. Configure an Azure OpenAI chat client @@ -99,7 +99,7 @@ Prompt blocks set a system-level message: `Prompt blocked by policy` and termina ### Agent Middleware Injection ```python -agent = ChatAgent( +agent = Agent( chat_client=chat_client, instructions="You are good at telling jokes.", name="Joker", diff --git a/python/samples/getting_started/purview_agent/sample_purview_agent.py b/python/samples/getting_started/purview_agent/sample_purview_agent.py index b5231c2a5f..7d759a49b5 100644 --- a/python/samples/getting_started/purview_agent/sample_purview_agent.py +++ b/python/samples/getting_started/purview_agent/sample_purview_agent.py @@ -25,7 +25,7 @@ import os from typing import Any -from agent_framework import AgentResponse, ChatAgent, ChatMessage +from agent_framework import AgentResponse, Agent, Message from agent_framework.azure import AzureOpenAIChatClient from agent_framework.microsoft import ( PurviewChatPolicyMiddleware, @@ -150,7 +150,7 @@ async def run_with_agent_middleware() -> None: ), ) - agent = ChatAgent( + agent = Agent( chat_client=chat_client, instructions=JOKER_INSTRUCTIONS, name=JOKER_NAME, @@ -159,12 +159,12 @@ async def run_with_agent_middleware() -> None: print("-- Agent MiddlewareTypes Path --") first: AgentResponse = await agent.run( - ChatMessage("user", ["Tell me a joke about a pirate."], additional_properties={"user_id": user_id}) + Message("user", ["Tell me a joke about a pirate."], additional_properties={"user_id": user_id}) ) print("First response (agent middleware):\n", first) second: AgentResponse = await agent.run( - ChatMessage( + Message( role="user", text="That was funny. Tell me another one.", additional_properties={"user_id": user_id} ) ) @@ -194,7 +194,7 @@ async def run_with_chat_middleware() -> None: ], ) - agent = ChatAgent( + agent = Agent( chat_client=chat_client, instructions=JOKER_INSTRUCTIONS, name=JOKER_NAME, @@ -202,7 +202,7 @@ async def run_with_chat_middleware() -> None: print("-- Chat MiddlewareTypes Path --") first: AgentResponse = await agent.run( - ChatMessage( + Message( role="user", text="Give me a short clean joke.", additional_properties={"user_id": user_id}, @@ -211,7 +211,7 @@ async def run_with_chat_middleware() -> None: print("First response (chat middleware):\n", first) second: AgentResponse = await agent.run( - ChatMessage( + Message( role="user", text="One more please.", additional_properties={"user_id": user_id}, @@ -241,7 +241,7 @@ async def run_with_custom_cache_provider() -> None: cache_provider=custom_cache, ) - agent = ChatAgent( + agent = Agent( chat_client=chat_client, instructions=JOKER_INSTRUCTIONS, name=JOKER_NAME, @@ -252,14 +252,14 @@ async def run_with_custom_cache_provider() -> None: print("Using SimpleDictCacheProvider") first: AgentResponse = await agent.run( - ChatMessage( + Message( role="user", text="Tell me a joke about a programmer.", additional_properties={"user_id": user_id} ) ) print("First response (custom provider):\n", first) second: AgentResponse = await agent.run( - ChatMessage("user", ["That's hilarious! One more?"], additional_properties={"user_id": user_id}) + Message("user", ["That's hilarious! One more?"], additional_properties={"user_id": user_id}) ) print("Second response (custom provider):\n", second) @@ -283,7 +283,7 @@ async def run_with_custom_cache_provider() -> None: ), ) - agent = ChatAgent( + agent = Agent( chat_client=chat_client, instructions=JOKER_INSTRUCTIONS, name=JOKER_NAME, @@ -294,12 +294,12 @@ async def run_with_custom_cache_provider() -> None: print("Using default InMemoryCacheProvider with settings-based configuration") first: AgentResponse = await agent.run( - ChatMessage("user", ["Tell me a joke about AI."], additional_properties={"user_id": user_id}) + Message("user", ["Tell me a joke about AI."], additional_properties={"user_id": user_id}) ) print("First response (default cache):\n", first) second: AgentResponse = await agent.run( - ChatMessage("user", ["Nice! Another AI joke please."], additional_properties={"user_id": user_id}) + Message("user", ["Nice! Another AI joke please."], additional_properties={"user_id": user_id}) ) print("Second response (default cache):\n", second) diff --git a/python/samples/getting_started/threads/custom_chat_message_store_thread.py b/python/samples/getting_started/threads/custom_chat_message_store_thread.py index 709f9d45de..96b49cf0ec 100644 --- a/python/samples/getting_started/threads/custom_chat_message_store_thread.py +++ b/python/samples/getting_started/threads/custom_chat_message_store_thread.py @@ -4,7 +4,7 @@ from collections.abc import Collection from typing import Any -from agent_framework import ChatMessage, ChatMessageStoreProtocol +from agent_framework import Message, ChatMessageStoreProtocol from agent_framework._threads import ChatMessageStoreState from agent_framework.openai import OpenAIChatClient @@ -21,15 +21,15 @@ class CustomChatMessageStore(ChatMessageStoreProtocol): """Implementation of custom chat message store. In real applications, this can be an implementation of relational database or vector store.""" - def __init__(self, messages: Collection[ChatMessage] | None = None) -> None: - self._messages: list[ChatMessage] = [] + def __init__(self, messages: Collection[Message] | None = None) -> None: + self._messages: list[Message] = [] if messages: self._messages.extend(messages) - async def add_messages(self, messages: Collection[ChatMessage]) -> None: + async def add_messages(self, messages: Collection[Message]) -> None: self._messages.extend(messages) - async def list_messages(self) -> list[ChatMessage]: + async def list_messages(self) -> list[Message]: return self._messages @classmethod diff --git a/python/samples/getting_started/tools/function_tool_with_approval.py b/python/samples/getting_started/tools/function_tool_with_approval.py index 4a76c631e6..855ee20719 100644 --- a/python/samples/getting_started/tools/function_tool_with_approval.py +++ b/python/samples/getting_started/tools/function_tool_with_approval.py @@ -4,7 +4,7 @@ from random import randrange from typing import TYPE_CHECKING, Annotated, Any -from agent_framework import AgentResponse, ChatAgent, ChatMessage, tool +from agent_framework import AgentResponse, Agent, Message, tool from agent_framework.openai import OpenAIResponsesClient if TYPE_CHECKING: @@ -59,14 +59,14 @@ async def handle_approvals(query: str, agent: "SupportsAgentRun") -> AgentRespon ) # Add the assistant message with the approval request - new_inputs.append(ChatMessage("assistant", [user_input_needed])) + new_inputs.append(Message("assistant", [user_input_needed])) # Get user approval user_approval = await asyncio.to_thread(input, "\nApprove function call? (y/n): ") # Add the user's approval response new_inputs.append( - ChatMessage("user", [user_input_needed.to_function_approval_response(user_approval.lower() == "y")]) + Message("user", [user_input_needed.to_function_approval_response(user_approval.lower() == "y")]) ) # Run again with all the context @@ -109,14 +109,14 @@ async def handle_approvals_streaming(query: str, agent: "SupportsAgentRun") -> N ) # Add the assistant message with the approval request - new_inputs.append(ChatMessage("assistant", [user_input_needed])) + new_inputs.append(Message("assistant", [user_input_needed])) # Get user approval user_approval = await asyncio.to_thread(input, "\nApprove function call? (y/n): ") # Add the user's approval response new_inputs.append( - ChatMessage("user", [user_input_needed.to_function_approval_response(user_approval.lower() == "y")]) + Message("user", [user_input_needed.to_function_approval_response(user_approval.lower() == "y")]) ) # Update input with all the context for next iteration @@ -127,7 +127,7 @@ async def run_weather_agent_with_approval(stream: bool) -> None: """Example showing AI function with approval requirement.""" print(f"\n=== Weather Agent with Approval Required ({'Streaming' if stream else 'Non-Streaming'}) ===\n") - async with ChatAgent( + async with Agent( chat_client=OpenAIResponsesClient(), name="WeatherAgent", instructions=("You are a helpful weather assistant. Use the get_weather tool to provide weather information."), diff --git a/python/samples/getting_started/tools/function_tool_with_approval_and_threads.py b/python/samples/getting_started/tools/function_tool_with_approval_and_threads.py index de1da05991..614c71e936 100644 --- a/python/samples/getting_started/tools/function_tool_with_approval_and_threads.py +++ b/python/samples/getting_started/tools/function_tool_with_approval_and_threads.py @@ -3,7 +3,7 @@ import asyncio from typing import Annotated -from agent_framework import ChatAgent, ChatMessage, tool +from agent_framework import Agent, Message, tool from agent_framework.azure import AzureOpenAIChatClient """ @@ -28,7 +28,7 @@ async def approval_example() -> None: """Example showing approval with threads.""" print("=== Tool Approval with Thread ===\n") - agent = ChatAgent( + agent = Agent( chat_client=AzureOpenAIChatClient(), name="CalendarAgent", instructions="You are a helpful calendar assistant.", @@ -55,7 +55,7 @@ async def approval_example() -> None: # Step 2: Send approval response approval_response = request.to_function_approval_response(approved=approved) - result = await agent.run(ChatMessage("user", [approval_response]), thread=thread) + result = await agent.run(Message("user", [approval_response]), thread=thread) print(f"Agent: {result}\n") @@ -64,7 +64,7 @@ async def rejection_example() -> None: """Example showing rejection with threads.""" print("=== Tool Rejection with Thread ===\n") - agent = ChatAgent( + agent = Agent( chat_client=AzureOpenAIChatClient(), name="CalendarAgent", instructions="You are a helpful calendar assistant.", @@ -88,7 +88,7 @@ async def rejection_example() -> None: # Send rejection response rejection_response = request.to_function_approval_response(approved=False) - result = await agent.run(ChatMessage("user", [rejection_response]), thread=thread) + result = await agent.run(Message("user", [rejection_response]), thread=thread) print(f"Agent: {result}\n") diff --git a/python/samples/getting_started/workflows/README.md b/python/samples/getting_started/workflows/README.md index 7b368335a3..ce4aee4172 100644 --- a/python/samples/getting_started/workflows/README.md +++ b/python/samples/getting_started/workflows/README.md @@ -161,7 +161,7 @@ Notes Sequential orchestration uses a few small adapter nodes for plumbing: -- "input-conversation" normalizes input to `list[ChatMessage]` +- "input-conversation" normalizes input to `list[Message]` - "to-conversation:" converts agent responses into the shared conversation - "complete" publishes the final output event (type='output') These may appear in event streams (executor_invoked/executor_completed). They're analogous to diff --git a/python/samples/getting_started/workflows/_start-here/step3_streaming.py b/python/samples/getting_started/workflows/_start-here/step3_streaming.py index c9cfa6843d..c69f569756 100644 --- a/python/samples/getting_started/workflows/_start-here/step3_streaming.py +++ b/python/samples/getting_started/workflows/_start-here/step3_streaming.py @@ -2,7 +2,7 @@ import asyncio -from agent_framework import AgentResponseUpdate, ChatMessage, WorkflowBuilder +from agent_framework import AgentResponseUpdate, Message, WorkflowBuilder from agent_framework.azure import AzureOpenAIChatClient from azure.identity import AzureCliCredential @@ -52,7 +52,7 @@ async def main(): # Run the workflow with the user's initial message and stream events as they occur. async for event in workflow.run( - ChatMessage("user", ["Create a slogan for a new electric SUV that is affordable and fun to drive."]), + Message("user", ["Create a slogan for a new electric SUV that is affordable and fun to drive."]), stream=True, ): # The outputs of the workflow are whatever the agents produce. So the events are expected to diff --git a/python/samples/getting_started/workflows/agents/azure_chat_agents_and_executor.py b/python/samples/getting_started/workflows/agents/azure_chat_agents_and_executor.py index 3e3751fd86..8de5b71b73 100644 --- a/python/samples/getting_started/workflows/agents/azure_chat_agents_and_executor.py +++ b/python/samples/getting_started/workflows/agents/azure_chat_agents_and_executor.py @@ -7,7 +7,7 @@ AgentExecutorRequest, AgentExecutorResponse, AgentResponseUpdate, - ChatMessage, + Message, WorkflowBuilder, WorkflowContext, executor, @@ -84,7 +84,7 @@ async def enrich_with_references( f"{external_note}\n\n" "Please update the prior assistant answer so it weaves this note into the guidance." ) - conversation.append(ChatMessage("user", [follow_up])) + conversation.append(Message("user", [follow_up])) # Output a new AgentExecutorRequest for the next agent in the workflow. # Agents in workflows handle this type and will generate a response based on the request. diff --git a/python/samples/getting_started/workflows/agents/azure_chat_agents_tool_calls_with_feedback.py b/python/samples/getting_started/workflows/agents/azure_chat_agents_tool_calls_with_feedback.py index 2c22edb017..1e7bde49c6 100644 --- a/python/samples/getting_started/workflows/agents/azure_chat_agents_tool_calls_with_feedback.py +++ b/python/samples/getting_started/workflows/agents/azure_chat_agents_tool_calls_with_feedback.py @@ -11,8 +11,8 @@ AgentExecutorResponse, AgentResponse, AgentResponseUpdate, - ChatAgent, - ChatMessage, + Agent, + Message, Executor, WorkflowBuilder, WorkflowContext, @@ -90,7 +90,7 @@ class DraftFeedbackRequest: prompt: str = "" draft_text: str = "" - conversation: list[ChatMessage] = field(default_factory=list) # type: ignore[reportUnknownVariableType] + conversation: list[Message] = field(default_factory=list) # type: ignore[reportUnknownVariableType] class Coordinator(Executor): @@ -116,7 +116,7 @@ async def on_writer_response( # Writer agent response; request human feedback. # Preserve the full conversation so the final editor # can see tool traces and the initial prompt. - conversation: list[ChatMessage] + conversation: list[Message] if draft.full_conversation is not None: conversation = list(draft.full_conversation) else: @@ -147,7 +147,7 @@ async def on_human_feedback( # Human approved the draft as-is; forward it unchanged. await ctx.send_message( AgentExecutorRequest( - messages=original_request.conversation + [ChatMessage("user", text="The draft is approved as-is.")], + messages=original_request.conversation + [Message("user", text="The draft is approved as-is.")], should_respond=True, ), target_id=self.final_editor_id, @@ -155,20 +155,20 @@ async def on_human_feedback( return # Human provided feedback; prompt the writer to revise. - conversation: list[ChatMessage] = list(original_request.conversation) + conversation: list[Message] = list(original_request.conversation) instruction = ( "A human reviewer shared the following guidance:\n" f"{note or 'No specific guidance provided.'}\n\n" "Rewrite the draft from the previous assistant message into a polished final version. " "Keep the response under 120 words and reflect any requested tone adjustments." ) - conversation.append(ChatMessage("user", text=instruction)) + conversation.append(Message("user", text=instruction)) await ctx.send_message( AgentExecutorRequest(messages=conversation, should_respond=True), target_id=self.writer_id ) -def create_writer_agent() -> ChatAgent: +def create_writer_agent() -> Agent: """Creates a writer agent with tools.""" return AzureOpenAIChatClient(credential=AzureCliCredential()).as_agent( name="writer_agent", @@ -182,7 +182,7 @@ def create_writer_agent() -> ChatAgent: ) -def create_final_editor_agent() -> ChatAgent: +def create_final_editor_agent() -> Agent: """Creates a final editor agent.""" return AzureOpenAIChatClient(credential=AzureCliCredential()).as_agent( name="final_editor_agent", diff --git a/python/samples/getting_started/workflows/agents/custom_agent_executors.py b/python/samples/getting_started/workflows/agents/custom_agent_executors.py index c193e7368d..ee597fc5a0 100644 --- a/python/samples/getting_started/workflows/agents/custom_agent_executors.py +++ b/python/samples/getting_started/workflows/agents/custom_agent_executors.py @@ -3,8 +3,8 @@ import asyncio from agent_framework import ( - ChatAgent, - ChatMessage, + Agent, + Message, Executor, WorkflowBuilder, WorkflowContext, @@ -37,11 +37,11 @@ class Writer(Executor): """Custom executor that owns a domain specific agent responsible for generating content. This class demonstrates: - - Attaching a ChatAgent to an Executor so it participates as a node in a workflow. + - Attaching a Agent to an Executor so it participates as a node in a workflow. - Using a @handler method to accept a typed input and forward a typed output via ctx.send_message. """ - agent: ChatAgent + agent: Agent def __init__(self, id: str = "writer"): # Create a domain specific agent using your configured AzureOpenAIChatClient. @@ -54,12 +54,12 @@ def __init__(self, id: str = "writer"): super().__init__(id=id) @handler - async def handle(self, message: ChatMessage, ctx: WorkflowContext[list[ChatMessage], str]) -> None: + async def handle(self, message: Message, ctx: WorkflowContext[list[Message], str]) -> None: """Generate content using the agent and forward the updated conversation. Contract for this handler: - - message is the inbound user ChatMessage. - - ctx is a WorkflowContext that expects a list[ChatMessage] to be sent downstream. + - message is the inbound user Message. + - ctx is a WorkflowContext that expects a list[Message] to be sent downstream. Pattern shown here: 1) Seed the conversation with the inbound message. @@ -67,7 +67,7 @@ async def handle(self, message: ChatMessage, ctx: WorkflowContext[list[ChatMessa 3) Forward the cumulative messages to the next executor with ctx.send_message. """ # Start the conversation with the incoming user message. - messages: list[ChatMessage] = [message] + messages: list[Message] = [message] # Run the agent and extend the conversation with the agent's messages. response = await self.agent.run(messages) messages.extend(response.messages) @@ -83,7 +83,7 @@ class Reviewer(Executor): - Yielding the final text outcome to complete the workflow. """ - agent: ChatAgent + agent: Agent def __init__(self, id: str = "reviewer"): # Create a domain specific agent that evaluates and refines content. @@ -95,7 +95,7 @@ def __init__(self, id: str = "reviewer"): super().__init__(id=id) @handler - async def handle(self, messages: list[ChatMessage], ctx: WorkflowContext[list[ChatMessage], str]) -> None: + async def handle(self, messages: list[Message], ctx: WorkflowContext[list[Message], str]) -> None: """Review the full conversation transcript and complete with a final string. This node consumes all messages so far. It uses its agent to produce the final text, @@ -118,7 +118,7 @@ async def main(): # Run the workflow with the user's initial message. # For foundational clarity, use run (non streaming) and print the workflow output. events = await workflow.run( - ChatMessage("user", ["Create a slogan for a new electric SUV that is affordable and fun to drive."]) + Message("user", ["Create a slogan for a new electric SUV that is affordable and fun to drive."]) ) # The terminal node yields output; print its contents. outputs = events.get_outputs() diff --git a/python/samples/getting_started/workflows/agents/group_chat_workflow_as_agent.py b/python/samples/getting_started/workflows/agents/group_chat_workflow_as_agent.py index 1693aeb642..b259865824 100644 --- a/python/samples/getting_started/workflows/agents/group_chat_workflow_as_agent.py +++ b/python/samples/getting_started/workflows/agents/group_chat_workflow_as_agent.py @@ -2,7 +2,7 @@ import asyncio -from agent_framework import ChatAgent +from agent_framework import Agent from agent_framework.openai import OpenAIChatClient, OpenAIResponsesClient from agent_framework.orchestrations import GroupChatBuilder @@ -19,14 +19,14 @@ async def main() -> None: - researcher = ChatAgent( + researcher = Agent( name="Researcher", description="Collects relevant background information.", instructions="Gather concise facts that help a teammate answer the question.", chat_client=OpenAIChatClient(model_id="gpt-4o-mini"), ) - writer = ChatAgent( + writer = Agent( name="Writer", description="Synthesizes a polished answer using the gathered notes.", instructions="Compose clear and structured answers using any notes provided.", diff --git a/python/samples/getting_started/workflows/agents/handoff_workflow_as_agent.py b/python/samples/getting_started/workflows/agents/handoff_workflow_as_agent.py index f3dcefab7a..15ac42987b 100644 --- a/python/samples/getting_started/workflows/agents/handoff_workflow_as_agent.py +++ b/python/samples/getting_started/workflows/agents/handoff_workflow_as_agent.py @@ -5,8 +5,8 @@ from agent_framework import ( AgentResponse, - ChatAgent, - ChatMessage, + Agent, + Message, Content, WorkflowAgent, tool, @@ -57,7 +57,7 @@ def process_return(order_number: Annotated[str, "Order number to process return return f"Return initiated successfully for order {order_number}. You will receive return instructions via email." -def create_agents(chat_client: AzureOpenAIChatClient) -> tuple[ChatAgent, ChatAgent, ChatAgent, ChatAgent]: +def create_agents(chat_client: AzureOpenAIChatClient) -> tuple[Agent, Agent, Agent, Agent]: """Create and configure the triage and specialist agents. Args: @@ -213,7 +213,7 @@ async def main() -> None: function_results = [ Content.from_function_result(call_id=req_id, result=response) for req_id, response in responses.items() ] - response = await agent.run(ChatMessage("tool", function_results)) + response = await agent.run(Message("tool", function_results)) pending_requests = handle_response_and_requests(response) diff --git a/python/samples/getting_started/workflows/agents/magentic_workflow_as_agent.py b/python/samples/getting_started/workflows/agents/magentic_workflow_as_agent.py index 4d687514c1..f6078cd494 100644 --- a/python/samples/getting_started/workflows/agents/magentic_workflow_as_agent.py +++ b/python/samples/getting_started/workflows/agents/magentic_workflow_as_agent.py @@ -3,7 +3,7 @@ import asyncio from agent_framework import ( - ChatAgent, + Agent, HostedCodeInterpreterTool, ) from agent_framework.openai import OpenAIChatClient, OpenAIResponsesClient @@ -22,7 +22,7 @@ async def main() -> None: - researcher_agent = ChatAgent( + researcher_agent = Agent( name="ResearcherAgent", description="Specialist in research and information gathering", instructions=( @@ -32,7 +32,7 @@ async def main() -> None: chat_client=OpenAIChatClient(model_id="gpt-4o-search-preview"), ) - coder_agent = ChatAgent( + coder_agent = Agent( name="CoderAgent", description="A helpful assistant that writes and executes code to process and analyze data.", instructions="You solve questions using code. Please provide detailed analysis and computation process.", @@ -41,7 +41,7 @@ async def main() -> None: ) # Create a manager agent for orchestration - manager_agent = ChatAgent( + manager_agent = Agent( name="MagenticManager", description="Orchestrator that coordinates the research and coding workflow", instructions="You coordinate a team to complete complex tasks efficiently.", diff --git a/python/samples/getting_started/workflows/agents/workflow_as_agent_human_in_the_loop.py b/python/samples/getting_started/workflows/agents/workflow_as_agent_human_in_the_loop.py index 1c32b4ed5c..d0cf835037 100644 --- a/python/samples/getting_started/workflows/agents/workflow_as_agent_human_in_the_loop.py +++ b/python/samples/getting_started/workflows/agents/workflow_as_agent_human_in_the_loop.py @@ -16,7 +16,7 @@ sys.path.insert(0, str(_SAMPLES_ROOT)) from agent_framework import ( # noqa: E402 - ChatMessage, + Message, Content, Executor, WorkflowAgent, @@ -159,7 +159,7 @@ async def main() -> None: result=human_response, ) # Send the human review result back to the agent. - response = await agent.run(ChatMessage("tool", [human_review_function_result])) + response = await agent.run(Message("tool", [human_review_function_result])) print(f"📤 Agent Response: {response.messages[-1].text}") print("=" * 50) diff --git a/python/samples/getting_started/workflows/agents/workflow_as_agent_reflection_pattern.py b/python/samples/getting_started/workflows/agents/workflow_as_agent_reflection_pattern.py index 2547bcbe20..bd8baabcae 100644 --- a/python/samples/getting_started/workflows/agents/workflow_as_agent_reflection_pattern.py +++ b/python/samples/getting_started/workflows/agents/workflow_as_agent_reflection_pattern.py @@ -6,8 +6,8 @@ from agent_framework import ( AgentResponse, - ChatClientProtocol, - ChatMessage, + SupportsChatGetResponse, + Message, Executor, WorkflowBuilder, WorkflowContext, @@ -44,8 +44,8 @@ class ReviewRequest: """Structured request passed from Worker to Reviewer for evaluation.""" request_id: str - user_messages: list[ChatMessage] - agent_messages: list[ChatMessage] + user_messages: list[Message] + agent_messages: list[Message] @dataclass @@ -60,7 +60,7 @@ class ReviewResponse: class Reviewer(Executor): """Executor that reviews agent responses and provides structured feedback.""" - def __init__(self, id: str, chat_client: ChatClientProtocol) -> None: + def __init__(self, id: str, chat_client: SupportsChatGetResponse) -> None: super().__init__(id=id) self._chat_client = chat_client @@ -75,7 +75,7 @@ class _Response(BaseModel): # Construct review instructions and context. messages = [ - ChatMessage( + Message( role="system", text=( "You are a reviewer for an AI agent. Provide feedback on the " @@ -93,7 +93,7 @@ class _Response(BaseModel): messages.extend(request.agent_messages) # Add explicit review instruction. - messages.append(ChatMessage("user", ["Please review the agent's responses."])) + messages.append(Message("user", ["Please review the agent's responses."])) print("Reviewer: Sending review request to LLM...") response = await self._chat_client.get_response(messages=messages, options={"response_format": _Response}) @@ -112,17 +112,17 @@ class _Response(BaseModel): class Worker(Executor): """Executor that generates responses and incorporates feedback when necessary.""" - def __init__(self, id: str, chat_client: ChatClientProtocol) -> None: + def __init__(self, id: str, chat_client: SupportsChatGetResponse) -> None: super().__init__(id=id) self._chat_client = chat_client - self._pending_requests: dict[str, tuple[ReviewRequest, list[ChatMessage]]] = {} + self._pending_requests: dict[str, tuple[ReviewRequest, list[Message]]] = {} @handler - async def handle_user_messages(self, user_messages: list[ChatMessage], ctx: WorkflowContext[ReviewRequest]) -> None: + async def handle_user_messages(self, user_messages: list[Message], ctx: WorkflowContext[ReviewRequest]) -> None: print("Worker: Received user messages, generating response...") # Initialize chat with system prompt. - messages = [ChatMessage("system", ["You are a helpful assistant."])] + messages = [Message("system", ["You are a helpful assistant."])] messages.extend(user_messages) print("Worker: Calling LLM to generate response...") @@ -161,8 +161,8 @@ async def handle_review_response( print("Worker: Regenerating response with feedback...") # Incorporate review feedback. - messages.append(ChatMessage("system", [review.feedback])) - messages.append(ChatMessage("system", ["Please incorporate the feedback and regenerate the response."])) + messages.append(Message("system", [review.feedback])) + messages.append(Message("system", ["Please incorporate the feedback and regenerate the response."])) messages.extend(request.user_messages) # Retry with updated prompt. diff --git a/python/samples/getting_started/workflows/checkpoint/checkpoint_with_human_in_the_loop.py b/python/samples/getting_started/workflows/checkpoint/checkpoint_with_human_in_the_loop.py index ab68dc44aa..b6794dba59 100644 --- a/python/samples/getting_started/workflows/checkpoint/checkpoint_with_human_in_the_loop.py +++ b/python/samples/getting_started/workflows/checkpoint/checkpoint_with_human_in_the_loop.py @@ -20,7 +20,7 @@ AgentExecutor, AgentExecutorRequest, AgentExecutorResponse, - ChatMessage, + Message, Executor, FileCheckpointStorage, Workflow, @@ -97,7 +97,7 @@ async def prepare(self, brief: str, ctx: WorkflowContext[AgentExecutorRequest, s # Hand the prompt to the writer agent. We always route through the # workflow context so the runtime can capture messages for checkpointing. await ctx.send_message( - AgentExecutorRequest(messages=[ChatMessage("user", text=prompt)], should_respond=True), + AgentExecutorRequest(messages=[Message("user", text=prompt)], should_respond=True), target_id=self._agent_id, ) @@ -159,7 +159,7 @@ async def on_human_feedback( f"Human guidance: {reply}" ) await ctx.send_message( - AgentExecutorRequest(messages=[ChatMessage("user", text=prompt)], should_respond=True), + AgentExecutorRequest(messages=[Message("user", text=prompt)], should_respond=True), target_id=self._writer_id, ) diff --git a/python/samples/getting_started/workflows/checkpoint/handoff_with_tool_approval_checkpoint_resume.py b/python/samples/getting_started/workflows/checkpoint/handoff_with_tool_approval_checkpoint_resume.py index 99875c94c6..b9c89e54f9 100644 --- a/python/samples/getting_started/workflows/checkpoint/handoff_with_tool_approval_checkpoint_resume.py +++ b/python/samples/getting_started/workflows/checkpoint/handoff_with_tool_approval_checkpoint_resume.py @@ -8,8 +8,8 @@ from agent_framework import ( AgentResponse, - ChatAgent, - ChatMessage, + Agent, + Message, Content, FileCheckpointStorage, Workflow, @@ -57,7 +57,7 @@ def submit_refund(refund_description: str, amount: str, order_id: str) -> str: return f"refund recorded for order {order_id} (amount: {amount}) with details: {refund_description}" -def create_agents(client: AzureOpenAIChatClient) -> tuple[ChatAgent, ChatAgent, ChatAgent]: +def create_agents(client: AzureOpenAIChatClient) -> tuple[Agent, Agent, Agent]: """Create a simple handoff scenario: triage, refund, and order specialists.""" triage = client.as_agent( @@ -91,7 +91,7 @@ def create_agents(client: AzureOpenAIChatClient) -> tuple[ChatAgent, ChatAgent, return triage, refund, order -def create_workflow(checkpoint_storage: FileCheckpointStorage) -> tuple[Workflow, ChatAgent, ChatAgent, ChatAgent]: +def create_workflow(checkpoint_storage: FileCheckpointStorage) -> tuple[Workflow, Agent, Agent, Agent]: """Build the handoff workflow with checkpointing enabled.""" client = AzureOpenAIChatClient(credential=AzureCliCredential()) @@ -284,9 +284,9 @@ async def resume_with_responses( elif event.type == "output": print("\n[Workflow Output Event - Conversation Update]") - if event.data and isinstance(event.data, list) and all(isinstance(msg, ChatMessage) for msg in event.data): # type: ignore - # Now safe to cast event.data to list[ChatMessage] - conversation = cast(list[ChatMessage], event.data) # type: ignore + if event.data and isinstance(event.data, list) and all(isinstance(msg, Message) for msg in event.data): # type: ignore + # Now safe to cast event.data to list[Message] + conversation = cast(list[Message], event.data) # type: ignore for msg in conversation[-3:]: # Show last 3 messages author = msg.author_name or msg.role text = msg.text[:100] + "..." if len(msg.text) > 100 else msg.text diff --git a/python/samples/getting_started/workflows/composition/sub_workflow_kwargs.py b/python/samples/getting_started/workflows/composition/sub_workflow_kwargs.py index 5d74ec42d3..879d4c3222 100644 --- a/python/samples/getting_started/workflows/composition/sub_workflow_kwargs.py +++ b/python/samples/getting_started/workflows/composition/sub_workflow_kwargs.py @@ -5,7 +5,7 @@ from typing import Annotated, Any from agent_framework import ( - ChatMessage, + Message, WorkflowExecutor, tool, ) @@ -134,7 +134,7 @@ async def main() -> None: output_data = event.data if isinstance(output_data, list): for item in output_data: # type: ignore - if isinstance(item, ChatMessage) and item.text: + if isinstance(item, Message) and item.text: print(f"\n[Final Answer]: {item.text}") print("\n" + "=" * 70) diff --git a/python/samples/getting_started/workflows/control-flow/edge_condition.py b/python/samples/getting_started/workflows/control-flow/edge_condition.py index 03a438a8de..ee91fbf69e 100644 --- a/python/samples/getting_started/workflows/control-flow/edge_condition.py +++ b/python/samples/getting_started/workflows/control-flow/edge_condition.py @@ -8,8 +8,8 @@ AgentExecutor, AgentExecutorRequest, # Input message bundle for an AgentExecutor AgentExecutorResponse, - ChatAgent, # Output from an AgentExecutor - ChatMessage, + Agent, # Output from an AgentExecutor + Message, WorkflowBuilder, # Fluent builder for wiring executors and edges WorkflowContext, # Per-run context and event bus executor, # Decorator to declare a Python function as a workflow executor @@ -122,13 +122,13 @@ async def to_email_assistant_request( Extracts DetectionResult.email_content and forwards it as a user message. """ - # Bridge executor. Converts a structured DetectionResult into a ChatMessage and forwards it as a new request. + # Bridge executor. Converts a structured DetectionResult into a Message and forwards it as a new request. detection = DetectionResult.model_validate_json(response.agent_response.text) - user_msg = ChatMessage("user", text=detection.email_content) + user_msg = Message("user", text=detection.email_content) await ctx.send_message(AgentExecutorRequest(messages=[user_msg], should_respond=True)) -def create_spam_detector_agent() -> ChatAgent: +def create_spam_detector_agent() -> Agent: """Helper to create a spam detection agent.""" # AzureCliCredential uses your current az login. This avoids embedding secrets in code. return AzureOpenAIChatClient(credential=AzureCliCredential()).as_agent( @@ -142,7 +142,7 @@ def create_spam_detector_agent() -> ChatAgent: ) -def create_email_assistant_agent() -> ChatAgent: +def create_email_assistant_agent() -> Agent: """Helper to create an email assistant agent.""" # AzureCliCredential uses your current az login. This avoids embedding secrets in code. return AzureOpenAIChatClient(credential=AzureCliCredential()).as_agent( @@ -185,7 +185,7 @@ async def main() -> None: # Execute the workflow. Since the start is an AgentExecutor, pass an AgentExecutorRequest. # The workflow completes when it becomes idle (no more work to do). - request = AgentExecutorRequest(messages=[ChatMessage("user", text=email)], should_respond=True) + request = AgentExecutorRequest(messages=[Message("user", text=email)], should_respond=True) events = await workflow.run(request) outputs = events.get_outputs() if outputs: diff --git a/python/samples/getting_started/workflows/control-flow/multi_selection_edge_group.py b/python/samples/getting_started/workflows/control-flow/multi_selection_edge_group.py index 68f14c1eac..0cf8c560e3 100644 --- a/python/samples/getting_started/workflows/control-flow/multi_selection_edge_group.py +++ b/python/samples/getting_started/workflows/control-flow/multi_selection_edge_group.py @@ -12,8 +12,8 @@ AgentExecutor, AgentExecutorRequest, AgentExecutorResponse, - ChatAgent, - ChatMessage, + Agent, + Message, WorkflowBuilder, WorkflowContext, WorkflowEvent, @@ -91,7 +91,7 @@ async def store_email(email_text: str, ctx: WorkflowContext[AgentExecutorRequest ctx.set_state(CURRENT_EMAIL_ID_KEY, new_email.email_id) await ctx.send_message( - AgentExecutorRequest(messages=[ChatMessage("user", text=new_email.email_content)], should_respond=True) + AgentExecutorRequest(messages=[Message("user", text=new_email.email_content)], should_respond=True) ) @@ -118,7 +118,7 @@ async def submit_to_email_assistant(analysis: AnalysisResult, ctx: WorkflowConte email: Email = ctx.get_state(f"{EMAIL_STATE_PREFIX}{analysis.email_id}") await ctx.send_message( - AgentExecutorRequest(messages=[ChatMessage("user", text=email.email_content)], should_respond=True) + AgentExecutorRequest(messages=[Message("user", text=email.email_content)], should_respond=True) ) @@ -133,7 +133,7 @@ async def summarize_email(analysis: AnalysisResult, ctx: WorkflowContext[AgentEx # Only called for long NotSpam emails by selection_func email: Email = ctx.get_state(f"{EMAIL_STATE_PREFIX}{analysis.email_id}") await ctx.send_message( - AgentExecutorRequest(messages=[ChatMessage("user", text=email.email_content)], should_respond=True) + AgentExecutorRequest(messages=[Message("user", text=email.email_content)], should_respond=True) ) @@ -180,7 +180,7 @@ async def database_access(analysis: AnalysisResult, ctx: WorkflowContext[Never, await ctx.add_event(DatabaseEvent(f"Email {analysis.email_id} saved to database.")) -def create_email_analysis_agent() -> ChatAgent: +def create_email_analysis_agent() -> Agent: """Creates the email analysis agent.""" return AzureOpenAIChatClient(credential=AzureCliCredential()).as_agent( instructions=( @@ -193,7 +193,7 @@ def create_email_analysis_agent() -> ChatAgent: ) -def create_email_assistant_agent() -> ChatAgent: +def create_email_assistant_agent() -> Agent: """Creates the email assistant agent.""" return AzureOpenAIChatClient(credential=AzureCliCredential()).as_agent( instructions=("You are an email assistant that helps users draft responses to emails with professionalism."), @@ -202,7 +202,7 @@ def create_email_assistant_agent() -> ChatAgent: ) -def create_email_summary_agent() -> ChatAgent: +def create_email_summary_agent() -> Agent: """Creates the email summary agent.""" return AzureOpenAIChatClient(credential=AzureCliCredential()).as_agent( instructions=("You are an assistant that helps users summarize emails."), diff --git a/python/samples/getting_started/workflows/control-flow/simple_loop.py b/python/samples/getting_started/workflows/control-flow/simple_loop.py index 1361d4be2b..c26ac48942 100644 --- a/python/samples/getting_started/workflows/control-flow/simple_loop.py +++ b/python/samples/getting_started/workflows/control-flow/simple_loop.py @@ -7,8 +7,8 @@ AgentExecutor, AgentExecutorRequest, AgentExecutorResponse, - ChatAgent, - ChatMessage, + Agent, + Message, Executor, WorkflowBuilder, WorkflowContext, @@ -95,7 +95,7 @@ async def submit(self, guess: int, ctx: WorkflowContext[AgentExecutorRequest]) - f"Target: {self._target}\nGuess: {guess}\nResponse:" ) await ctx.send_message( - AgentExecutorRequest(messages=[ChatMessage("user", text=prompt)], should_respond=True), + AgentExecutorRequest(messages=[Message("user", text=prompt)], should_respond=True), target_id=self._judge_agent_id, ) @@ -114,7 +114,7 @@ async def parse(self, response: AgentExecutorResponse, ctx: WorkflowContext[Numb await ctx.send_message(NumberSignal.BELOW) -def create_judge_agent() -> ChatAgent: +def create_judge_agent() -> Agent: """Create a judge agent that evaluates guesses.""" return AzureOpenAIChatClient(credential=AzureCliCredential()).as_agent( instructions=("You strictly respond with one of: MATCHED, ABOVE, BELOW based on the given target and guess."), diff --git a/python/samples/getting_started/workflows/control-flow/switch_case_edge_group.py b/python/samples/getting_started/workflows/control-flow/switch_case_edge_group.py index 28378ccbd2..78fb3c5c80 100644 --- a/python/samples/getting_started/workflows/control-flow/switch_case_edge_group.py +++ b/python/samples/getting_started/workflows/control-flow/switch_case_edge_group.py @@ -11,8 +11,8 @@ AgentExecutorRequest, # Message bundle sent to an AgentExecutor AgentExecutorResponse, # Result returned by an AgentExecutor Case, - ChatAgent, # Case entry for a switch-case edge group - ChatMessage, + Agent, # Case entry for a switch-case edge group + Message, Default, # Default branch when no cases match WorkflowBuilder, # Fluent builder for assembling the graph WorkflowContext, # Per-run context and event bus @@ -99,7 +99,7 @@ async def store_email(email_text: str, ctx: WorkflowContext[AgentExecutorRequest # Kick off the detector by forwarding the email as a user message to the spam_detection_agent. await ctx.send_message( - AgentExecutorRequest(messages=[ChatMessage("user", text=new_email.email_content)], should_respond=True) + AgentExecutorRequest(messages=[Message("user", text=new_email.email_content)], should_respond=True) ) @@ -120,7 +120,7 @@ async def submit_to_email_assistant(detection: DetectionResult, ctx: WorkflowCon # Load the original content from workflow state using the id carried in DetectionResult. email: Email = ctx.get_state(f"{EMAIL_STATE_PREFIX}{detection.email_id}") await ctx.send_message( - AgentExecutorRequest(messages=[ChatMessage("user", text=email.email_content)], should_respond=True) + AgentExecutorRequest(messages=[Message("user", text=email.email_content)], should_respond=True) ) @@ -152,7 +152,7 @@ async def handle_uncertain(detection: DetectionResult, ctx: WorkflowContext[Neve raise RuntimeError("This executor should only handle Uncertain messages.") -def create_spam_detection_agent() -> ChatAgent: +def create_spam_detection_agent() -> Agent: """Create and return the spam detection agent.""" return AzureOpenAIChatClient(credential=AzureCliCredential()).as_agent( instructions=( @@ -166,7 +166,7 @@ def create_spam_detection_agent() -> ChatAgent: ) -def create_email_assistant_agent() -> ChatAgent: +def create_email_assistant_agent() -> Agent: """Create and return the email assistant agent.""" return AzureOpenAIChatClient(credential=AzureCliCredential()).as_agent( instructions=("You are an email assistant that helps users draft responses to emails with professionalism."), diff --git a/python/samples/getting_started/workflows/human-in-the-loop/agents_with_HITL.py b/python/samples/getting_started/workflows/human-in-the-loop/agents_with_HITL.py index 7923bced7a..56430de9bd 100644 --- a/python/samples/getting_started/workflows/human-in-the-loop/agents_with_HITL.py +++ b/python/samples/getting_started/workflows/human-in-the-loop/agents_with_HITL.py @@ -9,7 +9,7 @@ AgentExecutorResponse, AgentResponse, AgentResponseUpdate, - ChatMessage, + Message, Executor, WorkflowBuilder, WorkflowContext, @@ -47,7 +47,7 @@ class DraftFeedbackRequest: """Payload sent for human review.""" prompt: str = "" - conversation: list[ChatMessage] = field(default_factory=lambda: []) + conversation: list[Message] = field(default_factory=lambda: []) class Coordinator(Executor): @@ -71,7 +71,7 @@ async def on_writer_response( # Writer agent response; request human feedback. # Preserve the full conversation so that the final editor has context. - conversation: list[ChatMessage] + conversation: list[Message] if draft.full_conversation is not None: conversation = list(draft.full_conversation) else: @@ -100,7 +100,7 @@ async def on_human_feedback( # Human approved the draft as-is; forward it unchanged. await ctx.send_message( AgentExecutorRequest( - messages=original_request.conversation + [ChatMessage("user", text="The draft is approved as-is.")], + messages=original_request.conversation + [Message("user", text="The draft is approved as-is.")], should_respond=True, ), target_id=self.final_editor_name, @@ -108,14 +108,14 @@ async def on_human_feedback( return # Human provided feedback; prompt the writer to revise. - conversation: list[ChatMessage] = list(original_request.conversation) + conversation: list[Message] = list(original_request.conversation) instruction = ( "A human reviewer shared the following guidance:\n" f"{note or 'No specific guidance provided.'}\n\n" "Rewrite the draft from the previous assistant message into a polished final version. " "Keep the response under 120 words and reflect any requested tone adjustments." ) - conversation.append(ChatMessage("user", text=instruction)) + conversation.append(Message("user", text=instruction)) await ctx.send_message( AgentExecutorRequest(messages=conversation, should_respond=True), target_id=self.writer_name ) diff --git a/python/samples/getting_started/workflows/human-in-the-loop/concurrent_request_info.py b/python/samples/getting_started/workflows/human-in-the-loop/concurrent_request_info.py index fbc996038c..56b3a49a99 100644 --- a/python/samples/getting_started/workflows/human-in-the-loop/concurrent_request_info.py +++ b/python/samples/getting_started/workflows/human-in-the-loop/concurrent_request_info.py @@ -27,7 +27,7 @@ from agent_framework import ( AgentExecutorResponse, - ChatMessage, + Message, WorkflowEvent, ) from agent_framework.azure import AzureOpenAIChatClient @@ -76,7 +76,7 @@ async def aggregate_with_synthesis(results: list[AgentExecutorResponse]) -> Any: # Build prompt with human guidance if provided guidance_text = f"\n\nHuman guidance: {human_guidance}" if human_guidance else "" - system_msg = ChatMessage( + system_msg = Message( "system", text=( "You are a synthesis expert. Consolidate the following analyst perspectives " @@ -84,7 +84,7 @@ async def aggregate_with_synthesis(results: list[AgentExecutorResponse]) -> Any: "prioritize aspects as directed." ), ) - user_msg = ChatMessage("user", text="\n\n".join(expert_sections) + guidance_text) + user_msg = Message("user", text="\n\n".join(expert_sections) + guidance_text) response = await _chat_client.get_response([system_msg, user_msg]) return response.messages[-1].text if response.messages else "" diff --git a/python/samples/getting_started/workflows/human-in-the-loop/group_chat_request_info.py b/python/samples/getting_started/workflows/human-in-the-loop/group_chat_request_info.py index 6a400a5bab..5a61d8cf4e 100644 --- a/python/samples/getting_started/workflows/human-in-the-loop/group_chat_request_info.py +++ b/python/samples/getting_started/workflows/human-in-the-loop/group_chat_request_info.py @@ -28,7 +28,7 @@ from agent_framework import ( AgentExecutorResponse, - ChatMessage, + Message, WorkflowEvent, ) from agent_framework.azure import AzureOpenAIChatClient @@ -51,7 +51,7 @@ async def process_event_stream(stream: AsyncIterable[WorkflowEvent]) -> dict[str print("=" * 60) print("Final discussion summary:") # To make the type checker happy, we cast event.data to the expected type - outputs = cast(list[ChatMessage], event.data) + outputs = cast(list[Message], event.data) for msg in outputs: speaker = msg.author_name or msg.role print(f"[{speaker}]: {msg.text}") diff --git a/python/samples/getting_started/workflows/human-in-the-loop/guessing_game_with_human_input.py b/python/samples/getting_started/workflows/human-in-the-loop/guessing_game_with_human_input.py index fcadfe1575..babd9d54e9 100644 --- a/python/samples/getting_started/workflows/human-in-the-loop/guessing_game_with_human_input.py +++ b/python/samples/getting_started/workflows/human-in-the-loop/guessing_game_with_human_input.py @@ -8,7 +8,7 @@ AgentExecutorRequest, AgentExecutorResponse, AgentResponseUpdate, - ChatMessage, + Message, Executor, WorkflowBuilder, WorkflowContext, @@ -84,7 +84,7 @@ async def start(self, _: str, ctx: WorkflowContext[AgentExecutorRequest]) -> Non - Input is a simple starter token (ignored here). - Output is an AgentExecutorRequest that triggers the agent to produce a guess. """ - user = ChatMessage("user", text="Start by making your first guess.") + user = Message("user", text="Start by making your first guess.") await ctx.send_message(AgentExecutorRequest(messages=[user], should_respond=True)) @handler @@ -136,7 +136,7 @@ async def on_human_feedback( f"Feedback: {reply}. Your last guess was {last_guess}. " f"Use this feedback to adjust and make your next guess (1-10)." ) - user_msg = ChatMessage("user", text=feedback_text) + user_msg = Message("user", text=feedback_text) await ctx.send_message(AgentExecutorRequest(messages=[user_msg], should_respond=True)) diff --git a/python/samples/getting_started/workflows/human-in-the-loop/sequential_request_info.py b/python/samples/getting_started/workflows/human-in-the-loop/sequential_request_info.py index 503f016a71..f42c476086 100644 --- a/python/samples/getting_started/workflows/human-in-the-loop/sequential_request_info.py +++ b/python/samples/getting_started/workflows/human-in-the-loop/sequential_request_info.py @@ -27,7 +27,7 @@ from agent_framework import ( AgentExecutorResponse, - ChatMessage, + Message, WorkflowEvent, ) from agent_framework.azure import AzureOpenAIChatClient @@ -49,7 +49,7 @@ async def process_event_stream(stream: AsyncIterable[WorkflowEvent]) -> dict[str print("WORKFLOW COMPLETE") print("=" * 60) print("Final output:") - outputs = cast(list[ChatMessage], event.data) + outputs = cast(list[Message], event.data) for message in outputs: print(f"[{message.author_name or message.role}]: {message.text}") diff --git a/python/samples/getting_started/workflows/parallelism/fan_out_fan_in_edges.py b/python/samples/getting_started/workflows/parallelism/fan_out_fan_in_edges.py index b136be2b72..1dd78a1d76 100644 --- a/python/samples/getting_started/workflows/parallelism/fan_out_fan_in_edges.py +++ b/python/samples/getting_started/workflows/parallelism/fan_out_fan_in_edges.py @@ -7,8 +7,8 @@ AgentExecutor, # Wraps a ChatAgent as an Executor for use in workflows AgentExecutorRequest, # The message bundle sent to an AgentExecutor AgentExecutorResponse, # The structured result returned by an AgentExecutor - ChatMessage, # Chat message structure Executor, # Base class for custom Python executors + Message, # Chat message structure WorkflowBuilder, # Fluent builder for wiring the workflow graph WorkflowContext, # Per run context and event bus handler, # Decorator to mark an Executor method as invokable @@ -41,7 +41,7 @@ class DispatchToExperts(Executor): @handler async def dispatch(self, prompt: str, ctx: WorkflowContext[AgentExecutorRequest]) -> None: # Wrap the incoming prompt as a user message for each expert and request a response. - initial_message = ChatMessage("user", text=prompt) + initial_message = Message("user", text=prompt) await ctx.send_message(AgentExecutorRequest(messages=[initial_message], should_respond=True)) diff --git a/python/samples/getting_started/workflows/state-management/state_with_agents.py b/python/samples/getting_started/workflows/state-management/state_with_agents.py index 8567a8386a..068ac70eb4 100644 --- a/python/samples/getting_started/workflows/state-management/state_with_agents.py +++ b/python/samples/getting_started/workflows/state-management/state_with_agents.py @@ -9,8 +9,8 @@ from agent_framework import ( AgentExecutorRequest, AgentExecutorResponse, - ChatAgent, - ChatMessage, + Agent, + Message, WorkflowBuilder, WorkflowContext, executor, @@ -103,7 +103,7 @@ async def store_email(email_text: str, ctx: WorkflowContext[AgentExecutorRequest ctx.set_state(CURRENT_EMAIL_ID_KEY, new_email.email_id) await ctx.send_message( - AgentExecutorRequest(messages=[ChatMessage("user", text=new_email.email_content)], should_respond=True) + AgentExecutorRequest(messages=[Message("user", text=new_email.email_content)], should_respond=True) ) @@ -134,7 +134,7 @@ async def submit_to_email_assistant(detection: DetectionResult, ctx: WorkflowCon # Load the original content by id from workflow state and forward it to the assistant. email: Email = ctx.get_state(f"{EMAIL_STATE_PREFIX}{detection.email_id}") await ctx.send_message( - AgentExecutorRequest(messages=[ChatMessage("user", text=email.email_content)], should_respond=True) + AgentExecutorRequest(messages=[Message("user", text=email.email_content)], should_respond=True) ) @@ -154,7 +154,7 @@ async def handle_spam(detection: DetectionResult, ctx: WorkflowContext[Never, st raise RuntimeError("This executor should only handle spam messages.") -def create_spam_detection_agent() -> ChatAgent: +def create_spam_detection_agent() -> Agent: """Creates a spam detection agent.""" return AzureOpenAIChatClient(credential=AzureCliCredential()).as_agent( instructions=( @@ -167,7 +167,7 @@ def create_spam_detection_agent() -> ChatAgent: ) -def create_email_assistant_agent() -> ChatAgent: +def create_email_assistant_agent() -> Agent: """Creates an email assistant agent.""" return AzureOpenAIChatClient(credential=AzureCliCredential()).as_agent( instructions=( diff --git a/python/samples/getting_started/workflows/state-management/workflow_kwargs.py b/python/samples/getting_started/workflows/state-management/workflow_kwargs.py index d89115463f..d574c85dfb 100644 --- a/python/samples/getting_started/workflows/state-management/workflow_kwargs.py +++ b/python/samples/getting_started/workflows/state-management/workflow_kwargs.py @@ -4,7 +4,7 @@ import json from typing import Annotated, Any, cast -from agent_framework import ChatMessage, tool +from agent_framework import Message, tool from agent_framework.openai import OpenAIChatClient from agent_framework.orchestrations import SequentialBuilder from pydantic import Field @@ -121,10 +121,10 @@ async def main() -> None: stream=True, ): if event.type == "output": - output_data = cast(list[ChatMessage], event.data) + output_data = cast(list[Message], event.data) if isinstance(output_data, list): for item in output_data: - if isinstance(item, ChatMessage) and item.text: + if isinstance(item, Message) and item.text: print(f"\n[Final Answer]: {item.text}") print("\n" + "=" * 70) diff --git a/python/samples/getting_started/workflows/tool-approval/concurrent_builder_tool_approval.py b/python/samples/getting_started/workflows/tool-approval/concurrent_builder_tool_approval.py index 6eb6e2bc6a..a8da54f17e 100644 --- a/python/samples/getting_started/workflows/tool-approval/concurrent_builder_tool_approval.py +++ b/python/samples/getting_started/workflows/tool-approval/concurrent_builder_tool_approval.py @@ -5,7 +5,7 @@ from typing import Annotated from agent_framework import ( - ChatMessage, + Message, Content, WorkflowEvent, tool, @@ -91,10 +91,10 @@ def _print_output(event: WorkflowEvent) -> None: if not event.data: raise ValueError("WorkflowEvent has no data") - if not isinstance(event.data, list) and not all(isinstance(msg, ChatMessage) for msg in event.data): - raise ValueError("WorkflowEvent data is not a list of ChatMessage") + if not isinstance(event.data, list) and not all(isinstance(msg, Message) for msg in event.data): + raise ValueError("WorkflowEvent data is not a list of Message") - messages: list[ChatMessage] = event.data # type: ignore + messages: list[Message] = event.data # type: ignore print("\n" + "-" * 60) print("Workflow completed. Aggregated results from both agents:") diff --git a/python/samples/getting_started/workflows/tool-approval/group_chat_builder_tool_approval.py b/python/samples/getting_started/workflows/tool-approval/group_chat_builder_tool_approval.py index ebabfc508f..ef8a900cda 100644 --- a/python/samples/getting_started/workflows/tool-approval/group_chat_builder_tool_approval.py +++ b/python/samples/getting_started/workflows/tool-approval/group_chat_builder_tool_approval.py @@ -5,7 +5,7 @@ from typing import Annotated, cast from agent_framework import ( - ChatMessage, + Message, Content, WorkflowEvent, tool, @@ -105,7 +105,7 @@ async def process_event_stream(stream: AsyncIterable[WorkflowEvent]) -> dict[str # The output of the workflow comes from the orchestrator and it's a list of messages print("\n" + "=" * 60) print("Workflow summary:") - outputs = cast(list[ChatMessage], event.data) + outputs = cast(list[Message], event.data) for msg in outputs: speaker = msg.author_name or msg.role print(f"[{speaker}]: {msg.text}") diff --git a/python/samples/getting_started/workflows/tool-approval/sequential_builder_tool_approval.py b/python/samples/getting_started/workflows/tool-approval/sequential_builder_tool_approval.py index c203ecc084..359fc4bb00 100644 --- a/python/samples/getting_started/workflows/tool-approval/sequential_builder_tool_approval.py +++ b/python/samples/getting_started/workflows/tool-approval/sequential_builder_tool_approval.py @@ -5,7 +5,7 @@ from typing import Annotated, cast from agent_framework import ( - ChatMessage, + Message, Content, WorkflowEvent, tool, @@ -78,7 +78,7 @@ async def process_event_stream(stream: AsyncIterable[WorkflowEvent]) -> dict[str # The output of the workflow comes from the orchestrator and it's a list of messages print("\n" + "=" * 60) print("Workflow summary:") - outputs = cast(list[ChatMessage], event.data) + outputs = cast(list[Message], event.data) for msg in outputs: speaker = msg.author_name or msg.role print(f"[{speaker}]: {msg.text}") diff --git a/python/samples/getting_started/workflows/visualization/concurrent_with_visualization.py b/python/samples/getting_started/workflows/visualization/concurrent_with_visualization.py index 803730e9fd..e9e042020d 100644 --- a/python/samples/getting_started/workflows/visualization/concurrent_with_visualization.py +++ b/python/samples/getting_started/workflows/visualization/concurrent_with_visualization.py @@ -7,8 +7,8 @@ AgentExecutor, AgentExecutorRequest, AgentExecutorResponse, - ChatMessage, Executor, + Message, WorkflowBuilder, WorkflowContext, WorkflowViz, @@ -39,7 +39,7 @@ class DispatchToExperts(Executor): @handler async def dispatch(self, prompt: str, ctx: WorkflowContext[AgentExecutorRequest]) -> None: # Wrap the incoming prompt as a user message for each expert and request a response. - initial_message = ChatMessage("user", text=prompt) + initial_message = Message("user", text=prompt) await ctx.send_message(AgentExecutorRequest(messages=[initial_message], should_respond=True)) diff --git a/python/samples/semantic-kernel-migration/README.md b/python/samples/semantic-kernel-migration/README.md index c1fa894a4c..d04239a00d 100644 --- a/python/samples/semantic-kernel-migration/README.md +++ b/python/samples/semantic-kernel-migration/README.md @@ -7,7 +7,7 @@ This gallery helps Semantic Kernel (SK) developers move to the Microsoft Agent F ## What’s Included ### Chat completion parity -- [01_basic_chat_completion.py](chat_completion/01_basic_chat_completion.py) — Minimal SK `ChatCompletionAgent` and AF `ChatAgent` conversation. +- [01_basic_chat_completion.py](chat_completion/01_basic_chat_completion.py) — Minimal SK `ChatCompletionAgent` and AF `Agent` conversation. - [02_chat_completion_with_tool.py](chat_completion/02_chat_completion_with_tool.py) — Adds a simple tool/function call in both SDKs. - [03_chat_completion_thread_and_stream.py](chat_completion/03_chat_completion_thread_and_stream.py) — Demonstrates thread reuse and streaming prompts. diff --git a/python/samples/semantic-kernel-migration/chat_completion/01_basic_chat_completion.py b/python/samples/semantic-kernel-migration/chat_completion/01_basic_chat_completion.py index 74ecd1ecf5..63db51fb43 100644 --- a/python/samples/semantic-kernel-migration/chat_completion/01_basic_chat_completion.py +++ b/python/samples/semantic-kernel-migration/chat_completion/01_basic_chat_completion.py @@ -8,7 +8,7 @@ # uv run samples/semantic-kernel-migration/chat_completion/01_basic_chat_completion.py # Copyright (c) Microsoft. All rights reserved. -"""Basic SK ChatCompletionAgent vs Agent Framework ChatAgent. +"""Basic SK ChatCompletionAgent vs Agent Framework Agent. Both samples expect OpenAI-compatible environment variables (OPENAI_API_KEY or Azure OpenAI configuration). Update the prompts or client wiring to match your @@ -34,10 +34,10 @@ async def run_semantic_kernel() -> None: async def run_agent_framework() -> None: - """Call Agent Framework's ChatAgent created from OpenAIChatClient.""" + """Call Agent Framework's Agent created from OpenAIChatClient.""" from agent_framework.openai import OpenAIChatClient - # AF constructs a lightweight ChatAgent backed by OpenAIChatClient. + # AF constructs a lightweight Agent backed by OpenAIChatClient. chat_agent = OpenAIChatClient().as_agent( name="Support", instructions="Answer in one sentence.", diff --git a/python/samples/semantic-kernel-migration/openai_responses/01_basic_responses_agent.py b/python/samples/semantic-kernel-migration/openai_responses/01_basic_responses_agent.py index 3402a2e1e3..2cea118570 100644 --- a/python/samples/semantic-kernel-migration/openai_responses/01_basic_responses_agent.py +++ b/python/samples/semantic-kernel-migration/openai_responses/01_basic_responses_agent.py @@ -35,11 +35,11 @@ async def run_semantic_kernel() -> None: async def run_agent_framework() -> None: - from agent_framework import ChatAgent + from agent_framework import Agent from agent_framework.openai import OpenAIResponsesClient - # AF ChatAgent can swap in an OpenAIResponsesClient directly. - chat_agent = ChatAgent( + # AF Agent can swap in an OpenAIResponsesClient directly. + chat_agent = Agent( chat_client=OpenAIResponsesClient(), instructions="Answer in one concise sentence.", name="Expert", diff --git a/python/samples/semantic-kernel-migration/openai_responses/02_responses_agent_with_tool.py b/python/samples/semantic-kernel-migration/openai_responses/02_responses_agent_with_tool.py index c770763bce..b2e57f073b 100644 --- a/python/samples/semantic-kernel-migration/openai_responses/02_responses_agent_with_tool.py +++ b/python/samples/semantic-kernel-migration/openai_responses/02_responses_agent_with_tool.py @@ -42,7 +42,7 @@ def add(self, a: float, b: float) -> float: async def run_agent_framework() -> None: - from agent_framework import ChatAgent + from agent_framework import Agent from agent_framework._tools import tool from agent_framework.openai import OpenAIResponsesClient @@ -50,7 +50,7 @@ async def run_agent_framework() -> None: async def add(a: float, b: float) -> float: return a + b - chat_agent = ChatAgent( + chat_agent = Agent( chat_client=OpenAIResponsesClient(), instructions="Use the add tool when math is required.", name="MathExpert", diff --git a/python/samples/semantic-kernel-migration/openai_responses/03_responses_agent_structured_output.py b/python/samples/semantic-kernel-migration/openai_responses/03_responses_agent_structured_output.py index bd37c3b33c..4526657ffe 100644 --- a/python/samples/semantic-kernel-migration/openai_responses/03_responses_agent_structured_output.py +++ b/python/samples/semantic-kernel-migration/openai_responses/03_responses_agent_structured_output.py @@ -47,10 +47,10 @@ async def run_semantic_kernel() -> None: async def run_agent_framework() -> None: - from agent_framework import ChatAgent + from agent_framework import Agent from agent_framework.openai import OpenAIResponsesClient - chat_agent = ChatAgent( + chat_agent = Agent( chat_client=OpenAIResponsesClient(), instructions="Return launch briefs as structured JSON.", name="ProductMarketer", diff --git a/python/samples/semantic-kernel-migration/orchestrations/concurrent_basic.py b/python/samples/semantic-kernel-migration/orchestrations/concurrent_basic.py index 72f0c24252..e98ac967e9 100644 --- a/python/samples/semantic-kernel-migration/orchestrations/concurrent_basic.py +++ b/python/samples/semantic-kernel-migration/orchestrations/concurrent_basic.py @@ -15,10 +15,12 @@ from collections.abc import Sequence from typing import cast -from agent_framework import ChatMessage +from agent_framework import Message from agent_framework.azure import AzureOpenAIChatClient +from agent_framework.orchestrations import ConcurrentBuilder from azure.identity import AzureCliCredential -from semantic_kernel.agents import Agent, ChatCompletionAgent, ConcurrentOrchestration +from semantic_kernel.agents import Agent as SKAgent +from semantic_kernel.agents import ChatCompletionAgent, ConcurrentOrchestration from semantic_kernel.agents.runtime import InProcessRuntime from semantic_kernel.connectors.ai.open_ai import AzureChatCompletion from semantic_kernel.contents import ChatMessageContent @@ -31,7 +33,7 @@ ###################################################################### -def build_semantic_kernel_agents() -> list[Agent]: +def build_semantic_kernel_agents() -> list[SKAgent]: credential = AzureCliCredential() physics_agent = ChatCompletionAgent( @@ -83,7 +85,7 @@ def _print_semantic_kernel_outputs(outputs: Sequence[ChatMessageContent]) -> Non ###################################################################### -async def run_agent_framework_example(prompt: str) -> Sequence[list[ChatMessage]]: +async def run_agent_framework_example(prompt: str) -> Sequence[list[Message]]: chat_client = AzureOpenAIChatClient(credential=AzureCliCredential()) physics = chat_client.as_agent( @@ -98,15 +100,15 @@ async def run_agent_framework_example(prompt: str) -> Sequence[list[ChatMessage] workflow = ConcurrentBuilder(participants=[physics, chemistry]).build() - outputs: list[list[ChatMessage]] = [] + outputs: list[list[Message]] = [] async for event in workflow.run(prompt, stream=True): if event.type == "output": - outputs.append(cast(list[ChatMessage], event.data)) + outputs.append(cast(list[Message], event.data)) return outputs -def _print_agent_framework_outputs(conversations: Sequence[Sequence[ChatMessage]]) -> None: +def _print_agent_framework_outputs(conversations: Sequence[Sequence[Message]]) -> None: if not conversations: print("No Agent Framework output.") return diff --git a/python/samples/semantic-kernel-migration/orchestrations/group_chat.py b/python/samples/semantic-kernel-migration/orchestrations/group_chat.py index 539041a537..235f21e61a 100644 --- a/python/samples/semantic-kernel-migration/orchestrations/group_chat.py +++ b/python/samples/semantic-kernel-migration/orchestrations/group_chat.py @@ -16,11 +16,10 @@ from collections.abc import Sequence from typing import Any, cast -from agent_framework import ChatAgent, ChatMessage +from agent_framework import Agent, GroupChatBuilder, Message from agent_framework.azure import AzureOpenAIChatClient, AzureOpenAIResponsesClient -from agent_framework.orchestrations import GroupChatBuilder from azure.identity import AzureCliCredential -from semantic_kernel.agents import Agent, ChatCompletionAgent, GroupChatOrchestration +from semantic_kernel.agents import ChatCompletionAgent, GroupChatOrchestration from semantic_kernel.agents.orchestration.group_chat import ( BooleanResult, GroupChatManager, @@ -224,7 +223,7 @@ async def run_semantic_kernel_example(task: str) -> str: async def run_agent_framework_example(task: str) -> str: credential = AzureCliCredential() - researcher = ChatAgent( + researcher = Agent( name="Researcher", description="Collects background information and potential resources.", instructions=( @@ -234,17 +233,19 @@ async def run_agent_framework_example(task: str) -> str: chat_client=AzureOpenAIChatClient(credential=credential), ) - planner = ChatAgent( + planner = Agent( name="Planner", description="Turns the collected notes into a concrete action plan.", instructions=("Propose a structured action plan that accounts for logistics, roles, and timeline."), chat_client=AzureOpenAIResponsesClient(credential=credential), ) - workflow = GroupChatBuilder( - participants=[researcher, planner], - orchestrator_agent=AzureOpenAIChatClient(credential=credential).as_agent(), - ).build() + workflow = ( + GroupChatBuilder() + .with_orchestrator(agent=AzureOpenAIChatClient(credential=credential).as_agent()) + .participants([researcher, planner]) + .build() + ) final_response = "" async for event in workflow.run(task, stream=True): @@ -253,7 +254,7 @@ async def run_agent_framework_example(task: str) -> str: if isinstance(data, list) and len(data) > 0: # Get the final message from the conversation final_message = data[-1] - final_response = final_message.text or "" if isinstance(final_message, ChatMessage) else str(data) + final_response = final_message.text or "" if isinstance(final_message, Message) else str(data) else: final_response = str(data) return final_response diff --git a/python/samples/semantic-kernel-migration/orchestrations/handoff.py b/python/samples/semantic-kernel-migration/orchestrations/handoff.py index 3fe024a9f4..125413cfc9 100644 --- a/python/samples/semantic-kernel-migration/orchestrations/handoff.py +++ b/python/samples/semantic-kernel-migration/orchestrations/handoff.py @@ -16,7 +16,7 @@ from typing import cast from agent_framework import ( - ChatMessage, + Message, WorkflowEvent, ) from agent_framework.orchestrations import HandoffBuilder, HandoffUserInputRequest @@ -228,10 +228,10 @@ def _collect_handoff_requests(events: list[WorkflowEvent]) -> list[WorkflowEvent return requests -def _extract_final_conversation(events: list[WorkflowEvent]) -> list[ChatMessage]: +def _extract_final_conversation(events: list[WorkflowEvent]) -> list[Message]: for event in events: if event.type == "output": - data = cast(list[ChatMessage], event.data) + data = cast(list[Message], event.data) return data return [] diff --git a/python/samples/semantic-kernel-migration/orchestrations/magentic.py b/python/samples/semantic-kernel-migration/orchestrations/magentic.py index d6509fb4d7..263b1a1b5d 100644 --- a/python/samples/semantic-kernel-migration/orchestrations/magentic.py +++ b/python/samples/semantic-kernel-migration/orchestrations/magentic.py @@ -15,11 +15,9 @@ from collections.abc import Sequence from typing import cast -from agent_framework import ChatAgent, HostedCodeInterpreterTool +from agent_framework import Agent, HostedCodeInterpreterTool from agent_framework.openai import OpenAIChatClient, OpenAIResponsesClient -from agent_framework.orchestrations import MagenticBuilder from semantic_kernel.agents import ( - Agent, ChatCompletionAgent, MagenticOrchestration, OpenAIAssistantAgent, @@ -129,7 +127,7 @@ def _print_semantic_kernel_outputs(outputs: Sequence[ChatMessageContent]) -> Non async def run_agent_framework_example(prompt: str) -> str | None: - researcher = ChatAgent( + researcher = Agent( name="ResearcherAgent", description="Specialist in research and information gathering", instructions=( @@ -138,7 +136,7 @@ async def run_agent_framework_example(prompt: str) -> str | None: chat_client=OpenAIChatClient(ai_model_id="gpt-4o-search-preview"), ) - coder = ChatAgent( + coder = Agent( name="CoderAgent", description="A helpful assistant that writes and executes code to process and analyze data.", instructions="You solve questions using code. Please provide detailed analysis and computation process.", @@ -147,14 +145,14 @@ async def run_agent_framework_example(prompt: str) -> str | None: ) # Create a manager agent for orchestration - manager_agent = ChatAgent( + manager_agent = Agent( name="MagenticManager", description="Orchestrator that coordinates the research and coding workflow", instructions="You coordinate a team to complete complex tasks efficiently.", chat_client=OpenAIChatClient(), ) - workflow = MagenticBuilder(participants=[researcher, coder], manager_agent=manager_agent).build() + workflow = MagenticBuilder().participants([researcher, coder]).with_manager(agent=manager_agent).build() final_text: str | None = None async for event in workflow.run(prompt, stream=True): diff --git a/python/samples/semantic-kernel-migration/orchestrations/sequential.py b/python/samples/semantic-kernel-migration/orchestrations/sequential.py index 13bfdf82a0..d0d21343cd 100644 --- a/python/samples/semantic-kernel-migration/orchestrations/sequential.py +++ b/python/samples/semantic-kernel-migration/orchestrations/sequential.py @@ -15,7 +15,7 @@ from collections.abc import Sequence from typing import cast -from agent_framework import ChatMessage +from agent_framework import Message from agent_framework.azure import AzureOpenAIChatClient from agent_framework.orchestrations import SequentialBuilder from azure.identity import AzureCliCredential @@ -70,7 +70,7 @@ async def sk_agent_response_callback( ###################################################################### -async def run_agent_framework_example(prompt: str) -> list[ChatMessage]: +async def run_agent_framework_example(prompt: str) -> list[Message]: chat_client = AzureOpenAIChatClient(credential=AzureCliCredential()) writer = chat_client.as_agent( @@ -85,10 +85,10 @@ async def run_agent_framework_example(prompt: str) -> list[ChatMessage]: workflow = SequentialBuilder(participants=[writer, reviewer]).build() - conversation_outputs: list[list[ChatMessage]] = [] + conversation_outputs: list[list[Message]] = [] async for event in workflow.run(prompt, stream=True): if event.type == "output": - conversation_outputs.append(cast(list[ChatMessage], event.data)) + conversation_outputs.append(cast(list[Message], event.data)) return conversation_outputs[-1] if conversation_outputs else [] @@ -112,7 +112,7 @@ async def run_semantic_kernel_example(prompt: str) -> str: await runtime.stop_when_idle() -def _format_conversation(conversation: list[ChatMessage]) -> None: +def _format_conversation(conversation: list[Message]) -> None: if not conversation: print("No Agent Framework output.") return From 84e4d1b0d1836b9ef1bee6f1105dac44104f7184 Mon Sep 17 00:00:00 2001 From: Eduard van Valkenburg Date: Sat, 7 Feb 2026 16:21:23 +0100 Subject: [PATCH 02/16] [BREAKING] Rename Agent chat_client parameter to client --- python/CODING_STANDARD.md | 4 +- python/DEV_SETUP.md | 2 +- python/README.md | 12 +- python/packages/ag-ui/README.md | 4 +- .../ag-ui/agent_framework_ag_ui/_client.py | 8 +- .../_orchestration/_tooling.py | 10 +- .../ag-ui/agent_framework_ag_ui/_run.py | 8 +- .../agent_framework_ag_ui_examples/README.md | 50 +-- .../agents/document_writer_agent.py | 6 +- .../agents/human_in_the_loop_agent.py | 6 +- .../agents/recipe_agent.py | 6 +- .../agents/research_assistant_agent.py | 6 +- .../agents/simple_agent.py | 6 +- .../agents/task_planner_agent.py | 6 +- .../agents/task_steps_agent.py | 16 +- .../agents/ui_generator_agent.py | 6 +- .../agents/weather_agent.py | 6 +- .../server/api/backend_tool_rendering.py | 4 +- .../server/main.py | 16 +- .../packages/ag-ui/getting_started/README.md | 4 +- .../getting_started/client_with_agent.py | 4 +- .../packages/ag-ui/getting_started/server.py | 2 +- python/packages/ag-ui/tests/ag_ui/conftest.py | 4 +- .../ag_ui/test_agent_wrapper_comprehensive.py | 54 +-- .../ag-ui/tests/ag_ui/test_endpoint.py | 46 +- .../tests/ag_ui/test_structured_output.py | 14 +- .../ag-ui/tests/ag_ui/test_tooling.py | 8 +- .../anthropic/tests/test_anthropic_client.py | 146 +++--- .../_agent_provider.py | 2 +- .../_project_provider.py | 2 +- .../tests/test_azure_ai_agent_client.py | 422 +++++++++--------- .../azure-ai/tests/test_azure_ai_client.py | 8 +- .../bedrock/samples/bedrock_sample.py | 2 +- python/packages/chatkit/README.md | 2 +- python/packages/core/AGENTS.md | 2 +- python/packages/core/README.md | 12 +- .../packages/core/agent_framework/_agents.py | 16 +- .../packages/core/agent_framework/_clients.py | 2 +- python/packages/core/agent_framework/_mcp.py | 32 +- .../packages/core/agent_framework/_memory.py | 2 +- .../core/agent_framework/_middleware.py | 24 +- .../core/agent_framework/_serialization.py | 2 +- .../_workflows/_agent_executor.py | 4 +- .../openai/_assistant_provider.py | 4 +- .../azure/test_azure_assistants_client.py | 82 ++-- .../tests/azure/test_azure_chat_client.py | 12 +- .../azure/test_azure_responses_client.py | 4 +- python/packages/core/tests/core/conftest.py | 8 +- .../packages/core/tests/core/test_agents.py | 152 +++---- .../core/test_as_tool_kwargs_propagation.py | 50 +-- .../packages/core/tests/core/test_clients.py | 12 +- .../core/test_function_invocation_logic.py | 4 +- python/packages/core/tests/core/test_mcp.py | 4 +- .../core/tests/core/test_middleware.py | 46 +- .../core/test_middleware_context_result.py | 4 +- .../tests/core/test_middleware_with_agent.py | 166 +++---- .../tests/core/test_middleware_with_chat.py | 24 +- .../openai/test_openai_assistants_client.py | 204 ++++----- .../test_agent_executor_tool_calls.py | 8 +- .../agent_framework_declarative/_loader.py | 26 +- .../_workflows/_factory.py | 4 +- .../tests/test_declarative_loader.py | 22 +- python/packages/devui/README.md | 6 +- .../devui/agent_framework_devui/_server.py | 4 +- .../devui/agent_framework_devui/_utils.py | 12 +- python/packages/devui/tests/devui/conftest.py | 12 +- .../devui/tests/devui/test_execution.py | 2 +- .../packages/devui/tests/devui/test_server.py | 6 +- .../agent_framework_durabletask/_worker.py | 2 +- .../packages/lab/gaia/samples/openai_agent.py | 4 +- python/packages/lab/lightning/README.md | 2 +- .../lab/lightning/samples/train_math_agent.py | 2 +- .../lab/lightning/tests/test_lightning.py | 4 +- python/packages/lab/tau2/README.md | 4 +- .../tau2/agent_framework_lab_tau2/runner.py | 4 +- .../_handoff.py | 2 +- .../_magentic.py | 2 +- .../orchestrations/tests/test_group_chat.py | 4 +- .../orchestrations/tests/test_handoff.py | 6 +- .../orchestrations/tests/test_magentic.py | 4 +- python/packages/purview/README.md | 10 +- .../agent_framework_purview/_middleware.py | 2 +- .../purview/tests/test_chat_middleware.py | 32 +- python/samples/README.md | 18 +- python/samples/concepts/response_stream.py | 2 +- python/samples/concepts/tools/README.md | 14 +- python/samples/concepts/typed_options.py | 4 +- .../samples/demos/chatkit-integration/app.py | 4 +- .../workflow_evaluation/create_workflow.py | 18 +- .../azure_ai_with_application_endpoint.py | 2 +- ..._ai_with_code_interpreter_file_download.py | 2 +- .../azure_assistants_with_code_interpreter.py | 2 +- ...zure_assistants_with_existing_assistant.py | 2 +- .../azure_assistants_with_function_tools.py | 6 +- .../azure_assistants_with_thread.py | 8 +- .../azure_chat_client_with_function_tools.py | 6 +- .../azure_chat_client_with_thread.py | 8 +- ...responses_client_code_interpreter_files.py | 2 +- ..._responses_client_with_code_interpreter.py | 2 +- ...azure_responses_client_with_file_search.py | 2 +- ...re_responses_client_with_function_tools.py | 6 +- .../azure_responses_client_with_hosted_mcp.py | 8 +- .../azure_responses_client_with_thread.py | 8 +- .../getting_started/agents/custom/README.md | 2 +- .../openai_chat_client_with_function_tools.py | 6 +- .../openai_chat_client_with_local_mcp.py | 2 +- .../openai/openai_chat_client_with_thread.py | 8 +- .../openai_chat_client_with_web_search.py | 2 +- .../openai/openai_responses_client_basic.py | 4 +- ..._responses_client_with_code_interpreter.py | 2 +- ...nses_client_with_code_interpreter_files.py | 2 +- ...penai_responses_client_with_file_search.py | 2 +- ...ai_responses_client_with_function_tools.py | 6 +- ...openai_responses_client_with_hosted_mcp.py | 8 +- .../openai_responses_client_with_local_mcp.py | 4 +- .../openai_responses_client_with_thread.py | 8 +- ...openai_responses_client_with_web_search.py | 2 +- .../02_multi_agent/function_app.py | 6 +- .../function_app.py | 6 +- .../function_app.py | 6 +- .../azure_functions/08_mcp_server/README.md | 8 +- .../08_mcp_server/function_app.py | 8 +- .../chat_client/chat_response_cancellation.py | 4 +- .../aggregate_context_provider.py | 6 +- .../azure_ai_search/README.md | 4 +- .../azure_ai_with_search_context_agentic.py | 2 +- .../azure_ai_with_search_context_semantic.py | 2 +- .../simple_context_provider.py | 10 +- .../declarative/get_weather_agent.py | 2 +- .../samples/getting_started/devui/README.md | 2 +- .../devui/azure_responses_agent/agent.py | 2 +- .../devui/foundry_agent/agent.py | 2 +- .../getting_started/devui/in_memory_mode.py | 6 +- .../devui/weather_agent_azure/agent.py | 2 +- .../devui/workflow_agents/workflow.py | 12 +- .../getting_started/mcp/mcp_api_key_auth.py | 2 +- .../getting_started/mcp/mcp_github_pat.py | 2 +- .../observability/agent_observability.py | 2 +- .../agent_with_foundry_tracing.py | 2 +- .../azure_ai_agent_observability.py | 2 +- .../configure_otel_providers_with_env_var.py | 8 +- ...onfigure_otel_providers_with_parameters.py | 8 +- .../getting_started/orchestrations/README.md | 2 +- .../orchestrations/concurrent_agents.py | 8 +- .../concurrent_custom_agent_executors.py | 20 +- .../concurrent_custom_aggregator.py | 12 +- .../group_chat_agent_manager.py | 8 +- .../group_chat_philosophical_debate.py | 18 +- .../group_chat_simple_selector.py | 10 +- .../orchestrations/handoff_autonomous.py | 12 +- .../orchestrations/handoff_simple.py | 16 +- .../orchestrations/magentic.py | 6 +- .../orchestrations/magentic_checkpoint.py | 6 +- .../magentic_human_plan_review.py | 6 +- .../orchestrations/sequential_agents.py | 6 +- .../sequential_custom_executors.py | 4 +- .../getting_started/purview_agent/README.md | 2 +- .../purview_agent/sample_purview_agent.py | 16 +- .../tools/function_tool_with_approval.py | 2 +- ...function_tool_with_approval_and_threads.py | 4 +- .../getting_started/workflows/README.md | 2 +- .../_start-here/step2_agents_in_a_workflow.py | 6 +- .../workflows/_start-here/step3_streaming.py | 6 +- .../agents/concurrent_workflow_as_agent.py | 8 +- .../agents/group_chat_workflow_as_agent.py | 4 +- .../agents/handoff_workflow_as_agent.py | 16 +- .../agents/magentic_workflow_as_agent.py | 6 +- .../agents/sequential_workflow_as_agent.py | 6 +- .../workflow_as_agent_human_in_the_loop.py | 2 +- .../agents/workflow_as_agent_kwargs.py | 4 +- .../workflow_as_agent_reflection_pattern.py | 12 +- .../agents/workflow_as_agent_with_thread.py | 4 +- .../workflow_as_agent_checkpoint.py | 14 +- .../composition/sub_workflow_kwargs.py | 4 +- .../declarative/customer_support/main.py | 14 +- .../declarative/deep_research/main.py | 16 +- .../declarative/function_tools/README.md | 4 +- .../declarative/function_tools/main.py | 4 +- .../workflows/declarative/marketing/main.py | 8 +- .../declarative/student_teacher/main.py | 6 +- .../group_chat_request_info.py | 10 +- .../sequential_request_info.py | 8 +- .../state-management/workflow_kwargs.py | 4 +- .../concurrent_builder_tool_approval.py | 6 +- .../group_chat_builder_tool_approval.py | 6 +- .../sequential_builder_tool_approval.py | 4 +- .../01_basic_responses_agent.py | 2 +- .../02_responses_agent_with_tool.py | 2 +- .../03_responses_agent_structured_output.py | 2 +- .../orchestrations/concurrent_basic.py | 6 +- .../orchestrations/group_chat.py | 4 +- .../orchestrations/magentic.py | 6 +- .../orchestrations/sequential.py | 6 +- .../test_chat_client_samples.py | 16 +- 194 files changed, 1322 insertions(+), 1322 deletions(-) diff --git a/python/CODING_STANDARD.md b/python/CODING_STANDARD.md index c05f6c2e18..e052b60669 100644 --- a/python/CODING_STANDARD.md +++ b/python/CODING_STANDARD.md @@ -381,12 +381,12 @@ def create_client( Use Google-style docstrings for all public APIs: ```python -def create_agent(name: str, chat_client: SupportsChatGetResponse) -> Agent: +def create_agent(name: str, client: SupportsChatGetResponse) -> Agent: """Create a new agent with the specified configuration. Args: name: The name of the agent. - chat_client: The chat client to use for communication. + client: The chat client to use for communication. Returns: True if the strings are the same, False otherwise. diff --git a/python/DEV_SETUP.md b/python/DEV_SETUP.md index f189031468..025e3ce36e 100644 --- a/python/DEV_SETUP.md +++ b/python/DEV_SETUP.md @@ -116,7 +116,7 @@ You will then configure the ChatClient class with the keyword argument `env_file ```python from agent_framework.openai import OpenAIChatClient -chat_client = OpenAIChatClient(env_file_path="openai.env") +client = OpenAIChatClient(env_file_path="openai.env") ``` ## Tests diff --git a/python/README.md b/python/README.md index 4e7ac202f1..4c3d68bba1 100644 --- a/python/README.md +++ b/python/README.md @@ -62,7 +62,7 @@ You can also override environment variables by explicitly passing configuration ```python from agent_framework.azure import AzureOpenAIChatClient -chat_client = AzureOpenAIChatClient( +client = AzureOpenAIChatClient( api_key='', endpoint='', deployment_name='', @@ -83,7 +83,7 @@ from agent_framework.openai import OpenAIChatClient async def main(): agent = Agent( - chat_client=OpenAIChatClient(), + client=OpenAIChatClient(), instructions=""" 1) A robot may not injure a human being... 2) A robot must obey orders given it by human beings... @@ -163,7 +163,7 @@ def get_menu_specials() -> str: async def main(): agent = Agent( - chat_client=OpenAIChatClient(), + client=OpenAIChatClient(), instructions="You are a helpful assistant that can provide weather and restaurant information.", tools=[get_weather, get_menu_specials] ) @@ -196,13 +196,13 @@ from agent_framework.openai import OpenAIChatClient async def main(): # Create specialized agents writer = Agent( - chat_client=OpenAIChatClient(), + client=OpenAIChatClient(), name="Writer", instructions="You are a creative content writer. Generate and refine slogans based on feedback." ) reviewer = Agent( - chat_client=OpenAIChatClient(), + client=OpenAIChatClient(), name="Reviewer", instructions="You are a critical reviewer. Provide detailed feedback on proposed slogans." ) @@ -238,7 +238,7 @@ For more advanced orchestration patterns including Sequential, Concurrent, Group ## More Examples & Samples - [Getting Started with Agents](https://github.com/microsoft/agent-framework/tree/main/python/samples/getting_started/agents): Basic agent creation and tool usage -- [Chat Client Examples](https://github.com/microsoft/agent-framework/tree/main/python/samples/getting_started/chat_client): Direct chat client usage patterns +- [Chat Client Examples](https://github.com/microsoft/agent-framework/tree/main/python/samples/getting_started/client): Direct chat client usage patterns - [Azure AI Integration](https://github.com/microsoft/agent-framework/tree/main/python/packages/azure-ai): Azure AI integration - [Workflow Samples](https://github.com/microsoft/agent-framework/tree/main/python/samples/getting_started/workflows): Advanced multi-agent patterns diff --git a/python/packages/ag-ui/README.md b/python/packages/ag-ui/README.md index 7ac367611c..3488d9c8bf 100644 --- a/python/packages/ag-ui/README.md +++ b/python/packages/ag-ui/README.md @@ -22,7 +22,7 @@ from agent_framework.ag_ui import add_agent_framework_fastapi_endpoint agent = Agent( name="my_agent", instructions="You are a helpful assistant.", - chat_client=AzureOpenAIChatClient( + client=AzureOpenAIChatClient( endpoint="https://your-resource.openai.azure.com/", deployment_name="gpt-4o-mini", api_key="your-api-key", @@ -104,7 +104,7 @@ async def verify_api_key(api_key: str | None = Security(API_KEY_HEADER)) -> None raise HTTPException(status_code=401, detail="Invalid or missing API key") # Create agent and app -agent = Agent(name="my_agent", instructions="...", chat_client=...) +agent = Agent(name="my_agent", instructions="...", client=...) app = FastAPI() # Register endpoint WITH authentication diff --git a/python/packages/ag-ui/agent_framework_ag_ui/_client.py b/python/packages/ag-ui/agent_framework_ag_ui/_client.py index 717d8cdefa..1df1ba84e2 100644 --- a/python/packages/ag-ui/agent_framework_ag_ui/_client.py +++ b/python/packages/ag-ui/agent_framework_ag_ui/_client.py @@ -69,10 +69,10 @@ def _unwrap_server_function_call_contents(contents: MutableSequence[Content | di ) -def _apply_server_function_call_unwrap(chat_client: BaseChatClientT) -> BaseChatClientT: +def _apply_server_function_call_unwrap(client: BaseChatClientT) -> BaseChatClientT: """Class decorator that unwraps server-side function calls after tool handling.""" - original_get_response = chat_client.get_response + original_get_response = client.get_response @wraps(original_get_response) def response_wrapper( @@ -105,8 +105,8 @@ def _map_update(update: ChatResponseUpdate) -> ChatResponseUpdate: _unwrap_server_function_call_contents(cast(MutableSequence[Content | dict[str, Any]], update.contents)) return update - chat_client.get_response = response_wrapper # type: ignore[assignment] - return chat_client + client.get_response = response_wrapper # type: ignore[assignment] + return client @_apply_server_function_call_unwrap diff --git a/python/packages/ag-ui/agent_framework_ag_ui/_orchestration/_tooling.py b/python/packages/ag-ui/agent_framework_ag_ui/_orchestration/_tooling.py index ede8a49985..442138649a 100644 --- a/python/packages/ag-ui/agent_framework_ag_ui/_orchestration/_tooling.py +++ b/python/packages/ag-ui/agent_framework_ag_ui/_orchestration/_tooling.py @@ -71,18 +71,18 @@ def register_additional_client_tools(agent: SupportsAgentRun, client_tools: list Args: agent: Agent instance to register tools on. Works with Agent - or any agent with a chat_client attribute. + or any agent with a client attribute. client_tools: List of client tools to register. """ if not client_tools: return - chat_client = getattr(agent, "chat_client", None) - if chat_client is None: + client = getattr(agent, "client", None) + if client is None: return - if isinstance(chat_client, BaseChatClient) and chat_client.function_invocation_configuration is not None: # type: ignore[attr-defined] - chat_client.function_invocation_configuration["additional_tools"] = client_tools # type: ignore[attr-defined] + if isinstance(client, BaseChatClient) and client.function_invocation_configuration is not None: # type: ignore[attr-defined] + client.function_invocation_configuration["additional_tools"] = client_tools # type: ignore[attr-defined] logger.debug(f"[TOOLS] Registered {len(client_tools)} client tools as additional_tools (declaration-only)") diff --git a/python/packages/ag-ui/agent_framework_ag_ui/_run.py b/python/packages/ag-ui/agent_framework_ag_ui/_run.py index eda625f0ef..54233dc78b 100644 --- a/python/packages/ag-ui/agent_framework_ag_ui/_run.py +++ b/python/packages/ag-ui/agent_framework_ag_ui/_run.py @@ -592,7 +592,7 @@ async def _resolve_approval_responses( Args: messages: List of messages (will be modified in place) tools: List of available tools - agent: The agent instance (to get chat_client and config) + agent: The agent instance (to get client and config) run_kwargs: Kwargs for tool execution """ fcc_todo = _collect_approval_responses(messages) @@ -605,12 +605,12 @@ async def _resolve_approval_responses( # Execute approved tool calls if approved_responses and tools: - chat_client = getattr(agent, "chat_client", None) + client = getattr(agent, "client", None) config = normalize_function_invocation_configuration( - getattr(chat_client, "function_invocation_configuration", None) + getattr(client, "function_invocation_configuration", None) ) middleware_pipeline = FunctionMiddlewarePipeline( - *getattr(chat_client, "function_middleware", ()), + *getattr(client, "function_middleware", ()), *run_kwargs.get("middleware", ()), ) # Filter out AG-UI-specific kwargs that should not be passed to tool execution diff --git a/python/packages/ag-ui/agent_framework_ag_ui_examples/README.md b/python/packages/ag-ui/agent_framework_ag_ui_examples/README.md index 97d6a3b911..e11a05d863 100644 --- a/python/packages/ag-ui/agent_framework_ag_ui_examples/README.md +++ b/python/packages/ag-ui/agent_framework_ag_ui_examples/README.md @@ -46,7 +46,7 @@ from agent_framework.ag_ui import add_agent_framework_fastapi_endpoint agent = Agent( name="my_agent", instructions="You are a helpful assistant.", - chat_client=AzureOpenAIChatClient(model_id="gpt-4o"), + client=AzureOpenAIChatClient(model_id="gpt-4o"), ) # Create FastAPI app and add AG-UI endpoint @@ -76,15 +76,15 @@ All example agents are implemented as **factory functions** that accept any chat Complete examples for all AG-UI features are available: -- `simple_agent(chat_client)` - Basic agentic chat (Feature 1) -- `weather_agent(chat_client)` - Backend tool rendering (Feature 2) -- `human_in_the_loop_agent(chat_client)` - Human-in-the-loop with step customization (Feature 3) -- `task_steps_agent_wrapped(chat_client)` - Agentic generative UI with step execution (Feature 4) -- `ui_generator_agent(chat_client)` - Tool-based generative UI (Feature 5) -- `recipe_agent(chat_client)` - Shared state management (Feature 6) -- `document_writer_agent(chat_client)` - Predictive state updates (Feature 7) -- `research_assistant_agent(chat_client)` - Research with progress events -- `task_planner_agent(chat_client)` - Task planning with approvals +- `simple_agent(client)` - Basic agentic chat (Feature 1) +- `weather_agent(client)` - Backend tool rendering (Feature 2) +- `human_in_the_loop_agent(client)` - Human-in-the-loop with step customization (Feature 3) +- `task_steps_agent_wrapped(client)` - Agentic generative UI with step execution (Feature 4) +- `ui_generator_agent(client)` - Tool-based generative UI (Feature 5) +- `recipe_agent(client)` - Shared state management (Feature 6) +- `document_writer_agent(client)` - Predictive state updates (Feature 7) +- `research_assistant_agent(client)` - Research with progress events +- `task_planner_agent(client)` - Task planning with approvals ### Using Example Agents @@ -150,16 +150,16 @@ from agent_framework_ag_ui_examples.agents import ( app = FastAPI(title="AG-UI Examples") # Create a chat client (shared across all agents, or create individual ones) -chat_client = AzureOpenAIChatClient(model_id="gpt-4") +client = AzureOpenAIChatClient(model_id="gpt-4") # Add all example endpoints -add_agent_framework_fastapi_endpoint(app, simple_agent(chat_client), "/agentic_chat") -add_agent_framework_fastapi_endpoint(app, weather_agent(chat_client), "/backend_tool_rendering") -add_agent_framework_fastapi_endpoint(app, human_in_the_loop_agent(chat_client), "/human_in_the_loop") -add_agent_framework_fastapi_endpoint(app, task_steps_agent_wrapped(chat_client), "/agentic_generative_ui") # type: ignore[arg-type] -add_agent_framework_fastapi_endpoint(app, ui_generator_agent(chat_client), "/tool_based_generative_ui") -add_agent_framework_fastapi_endpoint(app, recipe_agent(chat_client), "/shared_state") -add_agent_framework_fastapi_endpoint(app, document_writer_agent(chat_client), "/predictive_state_updates") +add_agent_framework_fastapi_endpoint(app, simple_agent(client), "/agentic_chat") +add_agent_framework_fastapi_endpoint(app, weather_agent(client), "/backend_tool_rendering") +add_agent_framework_fastapi_endpoint(app, human_in_the_loop_agent(client), "/human_in_the_loop") +add_agent_framework_fastapi_endpoint(app, task_steps_agent_wrapped(client), "/agentic_generative_ui") # type: ignore[arg-type] +add_agent_framework_fastapi_endpoint(app, ui_generator_agent(client), "/tool_based_generative_ui") +add_agent_framework_fastapi_endpoint(app, recipe_agent(client), "/shared_state") +add_agent_framework_fastapi_endpoint(app, document_writer_agent(client), "/predictive_state_updates") ``` ## Architecture @@ -196,11 +196,11 @@ def my_tool(param: str) -> str: """My custom tool.""" return f"Result: {param}" -def my_custom_agent(chat_client: SupportsChatGetResponse) -> AgentFrameworkAgent: +def my_custom_agent(client: SupportsChatGetResponse) -> AgentFrameworkAgent: """Create a custom agent with the specified chat client. Args: - chat_client: The chat client to use for the agent + client: The chat client to use for the agent Returns: A configured AgentFrameworkAgent instance @@ -208,7 +208,7 @@ def my_custom_agent(chat_client: SupportsChatGetResponse) -> AgentFrameworkAgent agent = Agent( name="my_custom_agent", instructions="Custom instructions here", - chat_client=chat_client, + client=client, tools=[my_tool], ) @@ -220,8 +220,8 @@ def my_custom_agent(chat_client: SupportsChatGetResponse) -> AgentFrameworkAgent # Use it from agent_framework.azure import AzureOpenAIChatClient -chat_client = AzureOpenAIChatClient() -agent = my_custom_agent(chat_client) +client = AzureOpenAIChatClient() +agent = my_custom_agent(client) ``` ### Shared State @@ -236,7 +236,7 @@ from agent_framework.ag_ui import AgentFrameworkAgent # Create your agent agent = Agent( name="recipe_agent", - chat_client=AzureOpenAIChatClient(model_id="gpt-4o"), + client=AzureOpenAIChatClient(model_id="gpt-4o"), ) state_schema = { @@ -273,7 +273,7 @@ from agent_framework.ag_ui import AgentFrameworkAgent # Create your agent agent = Agent( name="document_writer", - chat_client=AzureOpenAIChatClient(model_id="gpt-4o"), + client=AzureOpenAIChatClient(model_id="gpt-4o"), ) predict_state_config = { diff --git a/python/packages/ag-ui/agent_framework_ag_ui_examples/agents/document_writer_agent.py b/python/packages/ag-ui/agent_framework_ag_ui_examples/agents/document_writer_agent.py index 80c109b6bd..427583a589 100644 --- a/python/packages/ag-ui/agent_framework_ag_ui_examples/agents/document_writer_agent.py +++ b/python/packages/ag-ui/agent_framework_ag_ui_examples/agents/document_writer_agent.py @@ -40,11 +40,11 @@ def write_document(document: str) -> str: ) -def document_writer_agent(chat_client: SupportsChatGetResponse) -> AgentFrameworkAgent: +def document_writer_agent(client: SupportsChatGetResponse) -> AgentFrameworkAgent: """Create a document writer agent with predictive state updates. Args: - chat_client: The chat client to use for the agent + client: The chat client to use for the agent Returns: A configured AgentFrameworkAgent instance with document writing capabilities @@ -52,7 +52,7 @@ def document_writer_agent(chat_client: SupportsChatGetResponse) -> AgentFramewor agent = Agent( name="document_writer", instructions=_DOCUMENT_WRITER_INSTRUCTIONS, - chat_client=chat_client, + client=client, tools=[write_document], ) diff --git a/python/packages/ag-ui/agent_framework_ag_ui_examples/agents/human_in_the_loop_agent.py b/python/packages/ag-ui/agent_framework_ag_ui_examples/agents/human_in_the_loop_agent.py index 34a5ee952b..b04b6619e4 100644 --- a/python/packages/ag-ui/agent_framework_ag_ui_examples/agents/human_in_the_loop_agent.py +++ b/python/packages/ag-ui/agent_framework_ag_ui_examples/agents/human_in_the_loop_agent.py @@ -43,11 +43,11 @@ def generate_task_steps(steps: list[TaskStep]) -> str: return f"Generated {len(steps)} execution steps for the task." -def human_in_the_loop_agent(chat_client: SupportsChatGetResponse[Any]) -> Agent[Any]: +def human_in_the_loop_agent(client: SupportsChatGetResponse[Any]) -> Agent[Any]: """Create a human-in-the-loop agent using tool-based approach for predictive state. Args: - chat_client: The chat client to use for the agent + client: The chat client to use for the agent Returns: A configured Agent instance with human-in-the-loop capabilities @@ -81,6 +81,6 @@ def human_in_the_loop_agent(chat_client: SupportsChatGetResponse[Any]) -> Agent[ After the user approves and the function executes, THEN provide a brief acknowledgment like: "The plan has been created with X steps selected." """, - chat_client=chat_client, + client=client, tools=[generate_task_steps], ) diff --git a/python/packages/ag-ui/agent_framework_ag_ui_examples/agents/recipe_agent.py b/python/packages/ag-ui/agent_framework_ag_ui_examples/agents/recipe_agent.py index 10c4969bdd..f2d1aecdff 100644 --- a/python/packages/ag-ui/agent_framework_ag_ui_examples/agents/recipe_agent.py +++ b/python/packages/ag-ui/agent_framework_ag_ui_examples/agents/recipe_agent.py @@ -104,11 +104,11 @@ def update_recipe(recipe: Recipe) -> str: """ -def recipe_agent(chat_client: SupportsChatGetResponse[Any]) -> AgentFrameworkAgent: +def recipe_agent(client: SupportsChatGetResponse[Any]) -> AgentFrameworkAgent: """Create a recipe agent with streaming state updates. Args: - chat_client: The chat client to use for the agent + client: The chat client to use for the agent Returns: A configured AgentFrameworkAgent instance with recipe management @@ -116,7 +116,7 @@ def recipe_agent(chat_client: SupportsChatGetResponse[Any]) -> AgentFrameworkAge agent = Agent( name="recipe_agent", instructions=_RECIPE_INSTRUCTIONS, - chat_client=chat_client, + client=client, tools=[update_recipe], ) diff --git a/python/packages/ag-ui/agent_framework_ag_ui_examples/agents/research_assistant_agent.py b/python/packages/ag-ui/agent_framework_ag_ui_examples/agents/research_assistant_agent.py index 8847cf0aab..fbd6a0b89c 100644 --- a/python/packages/ag-ui/agent_framework_ag_ui_examples/agents/research_assistant_agent.py +++ b/python/packages/ag-ui/agent_framework_ag_ui_examples/agents/research_assistant_agent.py @@ -88,11 +88,11 @@ async def analyze_data(dataset: str) -> str: ) -def research_assistant_agent(chat_client: SupportsChatGetResponse[Any]) -> AgentFrameworkAgent: +def research_assistant_agent(client: SupportsChatGetResponse[Any]) -> AgentFrameworkAgent: """Create a research assistant agent. Args: - chat_client: The chat client to use for the agent + client: The chat client to use for the agent Returns: A configured AgentFrameworkAgent instance with research capabilities @@ -100,7 +100,7 @@ def research_assistant_agent(chat_client: SupportsChatGetResponse[Any]) -> Agent agent = Agent( name="research_assistant", instructions=_RESEARCH_ASSISTANT_INSTRUCTIONS, - chat_client=chat_client, + client=client, tools=[research_topic, create_presentation, analyze_data], ) diff --git a/python/packages/ag-ui/agent_framework_ag_ui_examples/agents/simple_agent.py b/python/packages/ag-ui/agent_framework_ag_ui_examples/agents/simple_agent.py index 99cb67db31..5be88cbfd3 100644 --- a/python/packages/ag-ui/agent_framework_ag_ui_examples/agents/simple_agent.py +++ b/python/packages/ag-ui/agent_framework_ag_ui_examples/agents/simple_agent.py @@ -7,11 +7,11 @@ from agent_framework import Agent, SupportsChatGetResponse -def simple_agent(chat_client: SupportsChatGetResponse[Any]) -> Agent[Any]: +def simple_agent(client: SupportsChatGetResponse[Any]) -> Agent[Any]: """Create a simple chat agent. Args: - chat_client: The chat client to use for the agent + client: The chat client to use for the agent Returns: A configured Agent instance @@ -19,5 +19,5 @@ def simple_agent(chat_client: SupportsChatGetResponse[Any]) -> Agent[Any]: return Agent[Any]( name="simple_chat_agent", instructions="You are a helpful assistant. Be concise and friendly.", - chat_client=chat_client, + client=client, ) diff --git a/python/packages/ag-ui/agent_framework_ag_ui_examples/agents/task_planner_agent.py b/python/packages/ag-ui/agent_framework_ag_ui_examples/agents/task_planner_agent.py index 085e6ff1c4..18065dd15f 100644 --- a/python/packages/ag-ui/agent_framework_ag_ui_examples/agents/task_planner_agent.py +++ b/python/packages/ag-ui/agent_framework_ag_ui_examples/agents/task_planner_agent.py @@ -61,11 +61,11 @@ def book_meeting_room(room_name: str, date: str, start_time: str, end_time: str) ) -def task_planner_agent(chat_client: SupportsChatGetResponse[Any]) -> AgentFrameworkAgent: +def task_planner_agent(client: SupportsChatGetResponse[Any]) -> AgentFrameworkAgent: """Create a task planner agent with user approval for actions. Args: - chat_client: The chat client to use for the agent + client: The chat client to use for the agent Returns: A configured AgentFrameworkAgent instance with task planning capabilities @@ -73,7 +73,7 @@ def task_planner_agent(chat_client: SupportsChatGetResponse[Any]) -> AgentFramew agent = Agent( name="task_planner", instructions=_TASK_PLANNER_INSTRUCTIONS, - chat_client=chat_client, + client=client, tools=[create_calendar_event, send_email, book_meeting_room], ) diff --git a/python/packages/ag-ui/agent_framework_ag_ui_examples/agents/task_steps_agent.py b/python/packages/ag-ui/agent_framework_ag_ui_examples/agents/task_steps_agent.py index 8bc31a55a4..be2da28a9d 100644 --- a/python/packages/ag-ui/agent_framework_ag_ui_examples/agents/task_steps_agent.py +++ b/python/packages/ag-ui/agent_framework_ag_ui_examples/agents/task_steps_agent.py @@ -54,11 +54,11 @@ def generate_task_steps(steps: list[TaskStep]) -> str: return "Steps generated." -def _create_task_steps_agent(chat_client: SupportsChatGetResponse[Any]) -> AgentFrameworkAgent: +def _create_task_steps_agent(client: SupportsChatGetResponse[Any]) -> AgentFrameworkAgent: """Create the task steps agent using tool-based approach for streaming. Args: - chat_client: The chat client to use for the agent + client: The chat client to use for the agent Returns: A configured AgentFrameworkAgent instance @@ -83,7 +83,7 @@ def _create_task_steps_agent(chat_client: SupportsChatGetResponse[Any]) -> Agent - "Installing platform" - "Adding finishing touches" """, - chat_client=chat_client, + client=client, tools=[generate_task_steps], ) @@ -220,7 +220,7 @@ async def run_agent(self, input_data: dict[str, Any]) -> AsyncGenerator[Any]: # Get the underlying chat agent and client chat_agent = self._base_agent.agent # type: ignore - chat_client = chat_agent.chat_client # type: ignore + client = chat_agent.client # type: ignore # Build messages for summary call @@ -270,7 +270,7 @@ async def run_agent(self, input_data: dict[str, Any]) -> AsyncGenerator[Any]: # Stream completion accumulated_text = "" - async for chunk in chat_client.get_response(messages=messages, stream=True): + async for chunk in client.get_response(messages=messages, stream=True): # chunk is ChatResponseUpdate if hasattr(chunk, "text") and chunk.text: accumulated_text += chunk.text @@ -332,14 +332,14 @@ async def run_agent(self, input_data: dict[str, Any]) -> AsyncGenerator[Any]: yield run_finished_event -def task_steps_agent_wrapped(chat_client: SupportsChatGetResponse[Any]) -> TaskStepsAgentWithExecution: +def task_steps_agent_wrapped(client: SupportsChatGetResponse[Any]) -> TaskStepsAgentWithExecution: """Create a task steps agent with execution simulation. Args: - chat_client: The chat client to use for the agent + client: The chat client to use for the agent Returns: A wrapped agent instance with step execution simulation """ - base_agent = _create_task_steps_agent(chat_client) + base_agent = _create_task_steps_agent(client) return TaskStepsAgentWithExecution(base_agent) diff --git a/python/packages/ag-ui/agent_framework_ag_ui_examples/agents/ui_generator_agent.py b/python/packages/ag-ui/agent_framework_ag_ui_examples/agents/ui_generator_agent.py index e4fb275494..20cf76a891 100644 --- a/python/packages/ag-ui/agent_framework_ag_ui_examples/agents/ui_generator_agent.py +++ b/python/packages/ag-ui/agent_framework_ag_ui_examples/agents/ui_generator_agent.py @@ -168,11 +168,11 @@ OptionsT = TypeVar("OptionsT", bound=TypedDict, default="ChatOptions") # type: ignore[valid-type] -def ui_generator_agent(chat_client: SupportsChatGetResponse[TOptions]) -> AgentFrameworkAgent: +def ui_generator_agent(client: SupportsChatGetResponse[TOptions]) -> AgentFrameworkAgent: """Create a UI generator agent with custom React component rendering. Args: - chat_client: The chat client to use for the agent + client: The chat client to use for the agent Returns: A configured AgentFrameworkAgent instance with UI generation capabilities @@ -180,7 +180,7 @@ def ui_generator_agent(chat_client: SupportsChatGetResponse[TOptions]) -> AgentF agent = Agent( name="ui_generator", instructions=_UI_GENERATOR_INSTRUCTIONS, - chat_client=chat_client, + client=client, tools=[generate_haiku, create_chart, display_timeline, show_comparison_table], # Force tool usage - the LLM MUST call a tool, cannot respond with plain text default_options={"tool_choice": "required"}, # type: ignore diff --git a/python/packages/ag-ui/agent_framework_ag_ui_examples/agents/weather_agent.py b/python/packages/ag-ui/agent_framework_ag_ui_examples/agents/weather_agent.py index 23616af7a5..7e80fccfd7 100644 --- a/python/packages/ag-ui/agent_framework_ag_ui_examples/agents/weather_agent.py +++ b/python/packages/ag-ui/agent_framework_ag_ui_examples/agents/weather_agent.py @@ -59,11 +59,11 @@ def get_forecast(location: str, days: int = 3) -> str: return f"{days}-day forecast for {location}:\n" + "\n".join(forecast) -def weather_agent(chat_client: SupportsChatGetResponse[Any]) -> Agent[Any]: +def weather_agent(client: SupportsChatGetResponse[Any]) -> Agent[Any]: """Create a weather agent with get_weather and get_forecast tools. Args: - chat_client: The chat client to use for the agent + client: The chat client to use for the agent Returns: A configured Agent instance with weather tools @@ -76,6 +76,6 @@ def weather_agent(chat_client: SupportsChatGetResponse[Any]) -> Agent[Any]: "Always provide friendly and informative responses. " "First return the weather result, and then return details about the forecast." ), - chat_client=chat_client, + client=client, tools=[get_weather, get_forecast], ) diff --git a/python/packages/ag-ui/agent_framework_ag_ui_examples/server/api/backend_tool_rendering.py b/python/packages/ag-ui/agent_framework_ag_ui_examples/server/api/backend_tool_rendering.py index 44c30290c2..b18fc103e8 100644 --- a/python/packages/ag-ui/agent_framework_ag_ui_examples/server/api/backend_tool_rendering.py +++ b/python/packages/ag-ui/agent_framework_ag_ui_examples/server/api/backend_tool_rendering.py @@ -19,10 +19,10 @@ def register_backend_tool_rendering(app: FastAPI) -> None: app: The FastAPI application. """ # Create a chat client and call the factory function - chat_client = cast(SupportsChatGetResponse[Any], AzureOpenAIChatClient()) + client = cast(SupportsChatGetResponse[Any], AzureOpenAIChatClient()) add_agent_framework_fastapi_endpoint( app, - weather_agent(chat_client), + weather_agent(client), "/backend_tool_rendering", ) diff --git a/python/packages/ag-ui/agent_framework_ag_ui_examples/server/main.py b/python/packages/ag-ui/agent_framework_ag_ui_examples/server/main.py index 53b494c4de..f45b30816f 100644 --- a/python/packages/ag-ui/agent_framework_ag_ui_examples/server/main.py +++ b/python/packages/ag-ui/agent_framework_ag_ui_examples/server/main.py @@ -67,7 +67,7 @@ # Create a shared chat client for all agents # You can use different chat clients for different agents if needed # Set CHAT_CLIENT=anthropic to use Anthropic, defaults to Azure OpenAI -chat_client: SupportsChatGetResponse[ChatOptions] = cast( +client: SupportsChatGetResponse[ChatOptions] = cast( SupportsChatGetResponse[ChatOptions], AnthropicClient() if os.getenv("CHAT_CLIENT", "").lower() == "anthropic" else AzureOpenAIChatClient(), ) @@ -75,35 +75,35 @@ # Agentic Chat - basic chat agent add_agent_framework_fastapi_endpoint( app=app, - agent=simple_agent(chat_client), + agent=simple_agent(client), path="/agentic_chat", ) # Backend Tool Rendering - agent with tools add_agent_framework_fastapi_endpoint( app=app, - agent=weather_agent(chat_client), + agent=weather_agent(client), path="/backend_tool_rendering", ) # Shared State - recipe agent with structured output add_agent_framework_fastapi_endpoint( app=app, - agent=recipe_agent(chat_client), + agent=recipe_agent(client), path="/shared_state", ) # Predictive State Updates - document writer with predictive state add_agent_framework_fastapi_endpoint( app=app, - agent=document_writer_agent(chat_client), + agent=document_writer_agent(client), path="/predictive_state_updates", ) # Human in the Loop - human-in-the-loop agent with step customization add_agent_framework_fastapi_endpoint( app=app, - agent=human_in_the_loop_agent(chat_client), + agent=human_in_the_loop_agent(client), path="/human_in_the_loop", state_schema={"steps": {"type": "array"}}, predict_state_config={"steps": {"tool": "generate_task_steps", "tool_argument": "steps"}}, @@ -112,14 +112,14 @@ # Agentic Generative UI - task steps agent with streaming state updates add_agent_framework_fastapi_endpoint( app=app, - agent=task_steps_agent_wrapped(chat_client), # type: ignore[arg-type] + agent=task_steps_agent_wrapped(client), # type: ignore[arg-type] path="/agentic_generative_ui", ) # Tool-based Generative UI - UI generator with frontend-rendered tools add_agent_framework_fastapi_endpoint( app=app, - agent=ui_generator_agent(chat_client), + agent=ui_generator_agent(client), path="/tool_based_generative_ui", ) diff --git a/python/packages/ag-ui/getting_started/README.md b/python/packages/ag-ui/getting_started/README.md index 5422f8621e..d3d14694a5 100644 --- a/python/packages/ag-ui/getting_started/README.md +++ b/python/packages/ag-ui/getting_started/README.md @@ -205,7 +205,7 @@ if not api_key: agent = Agent( name="AGUIAssistant", instructions="You are a helpful assistant.", - chat_client=AzureOpenAIChatClient( + client=AzureOpenAIChatClient( endpoint=endpoint, deployment_name=deployment_name, api_key=api_key, @@ -239,7 +239,7 @@ if __name__ == "__main__": agent = Agent( name="AGUIAssistant", instructions="You are a helpful assistant.", - chat_client=AzureOpenAIChatClient(), # Reads from environment automatically + client=AzureOpenAIChatClient(), # Reads from environment automatically ) ``` diff --git a/python/packages/ag-ui/getting_started/client_with_agent.py b/python/packages/ag-ui/getting_started/client_with_agent.py index 01e4a8563b..f0d7630294 100644 --- a/python/packages/ag-ui/getting_started/client_with_agent.py +++ b/python/packages/ag-ui/getting_started/client_with_agent.py @@ -63,7 +63,7 @@ async def main(): - RunStreamingAsync(messages, thread) Python equivalent: - - agent = Agent(chat_client=AGUIChatClient(...), tools=[...]) + - agent = Agent(client=AGUIChatClient(...), tools=[...]) - thread = agent.get_new_thread() # Creates thread with message_store - agent.run(message, stream=True, thread=thread) # Thread accumulates history """ @@ -86,7 +86,7 @@ async def main(): agent = Agent( name="remote_assistant", instructions="You are a helpful assistant. Remember user information across the conversation.", - chat_client=remote_client, + client=remote_client, tools=[get_weather], ) diff --git a/python/packages/ag-ui/getting_started/server.py b/python/packages/ag-ui/getting_started/server.py index 4d83832051..8d32009fb1 100644 --- a/python/packages/ag-ui/getting_started/server.py +++ b/python/packages/ag-ui/getting_started/server.py @@ -119,7 +119,7 @@ def get_time_zone(location: str) -> str: agent = Agent( name="AGUIAssistant", instructions="You are a helpful assistant. Use get_weather for weather and get_time_zone for time zones.", - chat_client=AzureOpenAIChatClient( + client=AzureOpenAIChatClient( endpoint=endpoint, deployment_name=deployment_name, ), diff --git a/python/packages/ag-ui/tests/ag_ui/conftest.py b/python/packages/ag-ui/tests/ag_ui/conftest.py index a86b0f0b67..82f6267863 100644 --- a/python/packages/ag-ui/tests/ag_ui/conftest.py +++ b/python/packages/ag-ui/tests/ag_ui/conftest.py @@ -159,7 +159,7 @@ def __init__( agent_id: str = "stub-agent", agent_name: str | None = "stub-agent", default_options: Any | None = None, - chat_client: Any | None = None, + client: Any | None = None, ) -> None: self.id = agent_id self.name = agent_name @@ -168,7 +168,7 @@ def __init__( self.default_options: dict[str, Any] = ( default_options if isinstance(default_options, dict) else {"tools": None, "response_format": None} ) - self.chat_client = chat_client or SimpleNamespace(function_invocation_configuration=None) + self.client = client or SimpleNamespace(function_invocation_configuration=None) self.messages_received: list[Any] = [] self.tools_received: list[Any] | None = None diff --git a/python/packages/ag-ui/tests/ag_ui/test_agent_wrapper_comprehensive.py b/python/packages/ag-ui/tests/ag_ui/test_agent_wrapper_comprehensive.py index f57f2809d3..f597c081f4 100644 --- a/python/packages/ag-ui/tests/ag_ui/test_agent_wrapper_comprehensive.py +++ b/python/packages/ag-ui/tests/ag_ui/test_agent_wrapper_comprehensive.py @@ -21,7 +21,7 @@ async def stream_fn( yield ChatResponseUpdate(contents=[Content.from_text(text="Hello")]) agent = Agent[ChatOptions]( - chat_client=streaming_chat_client_stub(stream_fn), + client=streaming_chat_client_stub(stream_fn), name="test_agent", instructions="Test", ) @@ -42,7 +42,7 @@ async def stream_fn( ) -> AsyncIterator[ChatResponseUpdate]: yield ChatResponseUpdate(contents=[Content.from_text(text="Hello")]) - agent = Agent(name="test_agent", instructions="Test", chat_client=streaming_chat_client_stub(stream_fn)) + agent = Agent(name="test_agent", instructions="Test", client=streaming_chat_client_stub(stream_fn)) state_schema: dict[str, dict[str, Any]] = {"document": {"type": "string"}} wrapper = AgentFrameworkAgent(agent=agent, state_schema=state_schema) @@ -58,7 +58,7 @@ async def stream_fn( ) -> AsyncIterator[ChatResponseUpdate]: yield ChatResponseUpdate(contents=[Content.from_text(text="Hello")]) - agent = Agent(name="test_agent", instructions="Test", chat_client=streaming_chat_client_stub(stream_fn)) + agent = Agent(name="test_agent", instructions="Test", client=streaming_chat_client_stub(stream_fn)) predict_config = {"document": {"tool": "write_doc", "tool_argument": "content"}} wrapper = AgentFrameworkAgent(agent=agent, predict_state_config=predict_config) @@ -78,7 +78,7 @@ class MyState(BaseModel): document: str tags: list[str] = [] - agent = Agent(name="test_agent", instructions="Test", chat_client=streaming_chat_client_stub(stream_fn)) + agent = Agent(name="test_agent", instructions="Test", client=streaming_chat_client_stub(stream_fn)) wrapper_class_schema = AgentFrameworkAgent(agent=agent, state_schema=MyState) wrapper_instance_schema = AgentFrameworkAgent(agent=agent, state_schema=MyState(document="hi")) @@ -97,7 +97,7 @@ async def stream_fn( ) -> AsyncIterator[ChatResponseUpdate]: yield ChatResponseUpdate(contents=[Content.from_text(text="Hello")]) - agent = Agent(name="test_agent", instructions="Test", chat_client=streaming_chat_client_stub(stream_fn)) + agent = Agent(name="test_agent", instructions="Test", client=streaming_chat_client_stub(stream_fn)) wrapper = AgentFrameworkAgent(agent=agent) input_data = {"messages": [{"role": "user", "content": "Hi"}]} @@ -121,7 +121,7 @@ async def stream_fn( ) -> AsyncIterator[ChatResponseUpdate]: yield ChatResponseUpdate(contents=[Content.from_text(text="Hello")]) - agent = Agent(name="test_agent", instructions="Test", chat_client=streaming_chat_client_stub(stream_fn)) + agent = Agent(name="test_agent", instructions="Test", client=streaming_chat_client_stub(stream_fn)) predict_config = { "document": {"tool": "write_doc", "tool_argument": "content"}, "summary": {"tool": "summarize", "tool_argument": "text"}, @@ -153,7 +153,7 @@ async def stream_fn( ) -> AsyncIterator[ChatResponseUpdate]: yield ChatResponseUpdate(contents=[Content.from_text(text="Hello")]) - agent = Agent(name="test_agent", instructions="Test", chat_client=streaming_chat_client_stub(stream_fn)) + agent = Agent(name="test_agent", instructions="Test", client=streaming_chat_client_stub(stream_fn)) state_schema = {"document": {"type": "string"}} wrapper = AgentFrameworkAgent(agent=agent, state_schema=state_schema) @@ -183,7 +183,7 @@ async def stream_fn( ) -> AsyncIterator[ChatResponseUpdate]: yield ChatResponseUpdate(contents=[Content.from_text(text="Hello")]) - agent = Agent(name="test_agent", instructions="Test", chat_client=streaming_chat_client_stub(stream_fn)) + agent = Agent(name="test_agent", instructions="Test", client=streaming_chat_client_stub(stream_fn)) state_schema: dict[str, dict[str, Any]] = {"recipe": {"type": "object", "properties": {}}} wrapper = AgentFrameworkAgent(agent=agent, state_schema=state_schema) @@ -210,7 +210,7 @@ async def stream_fn( ) -> AsyncIterator[ChatResponseUpdate]: yield ChatResponseUpdate(contents=[Content.from_text(text="Hello")]) - agent = Agent(name="test_agent", instructions="Test", chat_client=streaming_chat_client_stub(stream_fn)) + agent = Agent(name="test_agent", instructions="Test", client=streaming_chat_client_stub(stream_fn)) state_schema: dict[str, dict[str, Any]] = {"steps": {"type": "array", "items": {}}} wrapper = AgentFrameworkAgent(agent=agent, state_schema=state_schema) @@ -237,7 +237,7 @@ async def stream_fn( ) -> AsyncIterator[ChatResponseUpdate]: yield ChatResponseUpdate(contents=[Content.from_text(text="Hello")]) - agent = Agent(name="test_agent", instructions="Test", chat_client=streaming_chat_client_stub(stream_fn)) + agent = Agent(name="test_agent", instructions="Test", client=streaming_chat_client_stub(stream_fn)) wrapper = AgentFrameworkAgent(agent=agent) input_data = {"messages": [{"role": "user", "content": "Hi"}]} @@ -259,7 +259,7 @@ async def stream_fn( ) -> AsyncIterator[ChatResponseUpdate]: yield ChatResponseUpdate(contents=[Content.from_text(text="Document updated")]) - agent = Agent(name="test_agent", instructions="Test", chat_client=streaming_chat_client_stub(stream_fn)) + agent = Agent(name="test_agent", instructions="Test", client=streaming_chat_client_stub(stream_fn)) wrapper = AgentFrameworkAgent( agent=agent, state_schema={"document": {"type": "string"}}, @@ -306,7 +306,7 @@ async def stream_fn( ) -> AsyncIterator[ChatResponseUpdate]: yield ChatResponseUpdate(contents=[Content.from_text(text="OK")]) - agent = Agent(name="test_agent", instructions="Test", chat_client=streaming_chat_client_stub(stream_fn)) + agent = Agent(name="test_agent", instructions="Test", client=streaming_chat_client_stub(stream_fn)) wrapper = AgentFrameworkAgent(agent=agent) # Simulate tool result message with rejection @@ -340,7 +340,7 @@ async def stream_fn( ) -> AsyncIterator[ChatResponseUpdate]: yield ChatResponseUpdate(contents=[Content.from_text(text="OK")]) - agent = Agent(name="test_agent", instructions="Test", chat_client=streaming_chat_client_stub(stream_fn)) + agent = Agent(name="test_agent", instructions="Test", client=streaming_chat_client_stub(stream_fn)) wrapper = AgentFrameworkAgent(agent=agent) # Simulate tool result with multiple steps @@ -386,7 +386,7 @@ async def stream_fn( ) -> AsyncIterator[ChatResponseUpdate]: yield ChatResponseUpdate(contents=[Content.from_text(text="OK")]) - agent = Agent(name="test_agent", instructions="Test", chat_client=streaming_chat_client_stub(stream_fn)) + agent = Agent(name="test_agent", instructions="Test", client=streaming_chat_client_stub(stream_fn)) wrapper = AgentFrameworkAgent(agent=agent) # Simulate tool result rejection with steps @@ -431,7 +431,7 @@ async def stream_fn( captured_options.update(options) yield ChatResponseUpdate(contents=[Content.from_text(text="Hello")]) - agent = Agent(name="test_agent", instructions="Test", chat_client=streaming_chat_client_stub(stream_fn)) + agent = Agent(name="test_agent", instructions="Test", client=streaming_chat_client_stub(stream_fn)) wrapper = AgentFrameworkAgent(agent=agent) input_data = { @@ -445,7 +445,7 @@ async def stream_fn( events.append(event) # AG-UI internal metadata should be stored in thread.metadata - thread = agent.chat_client.last_thread + thread = agent.client.last_thread thread_metadata = thread.metadata if thread and hasattr(thread, "metadata") else {} assert thread_metadata.get("ag_ui_thread_id") == "test_thread_123" assert thread_metadata.get("ag_ui_run_id") == "test_run_456" @@ -473,7 +473,7 @@ async def stream_fn( captured_options.update(options) yield ChatResponseUpdate(contents=[Content.from_text(text="Hello")]) - agent = Agent(name="test_agent", instructions="Test", chat_client=streaming_chat_client_stub(stream_fn)) + agent = Agent(name="test_agent", instructions="Test", client=streaming_chat_client_stub(stream_fn)) wrapper = AgentFrameworkAgent( agent=agent, state_schema={"document": {"type": "string"}}, @@ -489,7 +489,7 @@ async def stream_fn( events.append(event) # Current state should be stored in thread.metadata - thread = agent.chat_client.last_thread + thread = agent.client.last_thread thread_metadata = thread.metadata if thread and hasattr(thread, "metadata") else {} current_state = thread_metadata.get("current_state") if isinstance(current_state, str): @@ -510,7 +510,7 @@ async def stream_fn( ) -> AsyncIterator[ChatResponseUpdate]: yield ChatResponseUpdate(contents=[Content.from_text(text="Hello")]) - agent = Agent(name="test_agent", instructions="Test", chat_client=streaming_chat_client_stub(stream_fn)) + agent = Agent(name="test_agent", instructions="Test", client=streaming_chat_client_stub(stream_fn)) wrapper = AgentFrameworkAgent(agent=agent) input_data: dict[str, Any] = {"messages": []} @@ -534,7 +534,7 @@ async def stream_fn( ) -> AsyncIterator[ChatResponseUpdate]: yield ChatResponseUpdate(contents=[Content.from_text(text="Hello world")]) - agent = Agent(name="test_agent", instructions="Test", chat_client=streaming_chat_client_stub(stream_fn)) + agent = Agent(name="test_agent", instructions="Test", client=streaming_chat_client_stub(stream_fn)) wrapper = AgentFrameworkAgent(agent=agent) input_data: dict[str, Any] = {"messages": [{"role": "user", "content": "Hi"}]} @@ -564,7 +564,7 @@ async def stream_fn( yield ChatResponseUpdate(contents=[]) raise RuntimeError("Simulated failure") - agent = Agent(name="test_agent", instructions="Test", chat_client=streaming_chat_client_stub(stream_fn)) + agent = Agent(name="test_agent", instructions="Test", client=streaming_chat_client_stub(stream_fn)) wrapper = AgentFrameworkAgent(agent=agent) input_data: dict[str, Any] = {"messages": [{"role": "user", "content": "Hi"}]} @@ -585,7 +585,7 @@ async def stream_fn( yield ChatResponseUpdate(contents=[]) raise AssertionError("ChatClient should not be called with orphaned tool result") - agent = Agent(name="test_agent", instructions="Test", chat_client=streaming_chat_client_stub(stream_fn)) + agent = Agent(name="test_agent", instructions="Test", client=streaming_chat_client_stub(stream_fn)) wrapper = AgentFrameworkAgent(agent=agent) # Send invalid JSON as tool result without preceding tool call @@ -624,7 +624,7 @@ async def stream_fn( contents=[Content.from_text(text="Response")], response_id="resp_67890", conversation_id="conv_12345" ) - agent = Agent(chat_client=streaming_chat_client_stub(stream_fn)) + agent = Agent(client=streaming_chat_client_stub(stream_fn)) wrapper = AgentFrameworkAgent(agent=agent, use_service_thread=False) input_data = {"messages": [{"role": "user", "content": "Hi"}], "thread_id": "conv_123456"} @@ -651,7 +651,7 @@ async def stream_fn( contents=[Content.from_text(text="Response")], response_id="resp_67890", conversation_id="conv_12345" ) - agent = Agent(chat_client=streaming_chat_client_stub(stream_fn)) + agent = Agent(client=streaming_chat_client_stub(stream_fn)) wrapper = AgentFrameworkAgent(agent=agent, use_service_thread=True) input_data = {"messages": [{"role": "user", "content": "Hi"}], "thread_id": "conv_123456"} @@ -659,7 +659,7 @@ async def stream_fn( events: list[Any] = [] async for event in wrapper.run_agent(input_data): events.append(event) - request_service_thread_id = agent.chat_client.last_service_thread_id + request_service_thread_id = agent.client.last_service_thread_id assert request_service_thread_id == "conv_123456" # type: ignore[attr-defined] (service_thread_id should be set) @@ -687,7 +687,7 @@ async def stream_fn( yield ChatResponseUpdate(contents=[Content.from_text(text="Processing completed")]) agent = Agent( - chat_client=streaming_chat_client_stub(stream_fn), + client=streaming_chat_client_stub(stream_fn), name="test_agent", instructions="Test", tools=[get_datetime], @@ -780,7 +780,7 @@ async def stream_fn( agent = Agent( name="test_agent", instructions="Test", - chat_client=streaming_chat_client_stub(stream_fn), + client=streaming_chat_client_stub(stream_fn), tools=[delete_all_data], ) wrapper = AgentFrameworkAgent(agent=agent) diff --git a/python/packages/ag-ui/tests/ag_ui/test_endpoint.py b/python/packages/ag-ui/tests/ag_ui/test_endpoint.py index 5ebfc471f6..6d80fff588 100644 --- a/python/packages/ag-ui/tests/ag_ui/test_endpoint.py +++ b/python/packages/ag-ui/tests/ag_ui/test_endpoint.py @@ -28,7 +28,7 @@ def _build(response_text: str = "Test response"): async def test_add_endpoint_with_agent_protocol(build_chat_client): """Test adding endpoint with raw SupportsAgentRun.""" app = FastAPI() - agent = Agent(name="test", instructions="Test agent", chat_client=build_chat_client()) + agent = Agent(name="test", instructions="Test agent", client=build_chat_client()) add_agent_framework_fastapi_endpoint(app, agent, path="/test-agent") @@ -42,7 +42,7 @@ async def test_add_endpoint_with_agent_protocol(build_chat_client): async def test_add_endpoint_with_wrapped_agent(build_chat_client): """Test adding endpoint with pre-wrapped AgentFrameworkAgent.""" app = FastAPI() - agent = Agent(name="test", instructions="Test agent", chat_client=build_chat_client()) + agent = Agent(name="test", instructions="Test agent", client=build_chat_client()) wrapped_agent = AgentFrameworkAgent(agent=agent, name="wrapped") add_agent_framework_fastapi_endpoint(app, wrapped_agent, path="/wrapped-agent") @@ -57,7 +57,7 @@ async def test_add_endpoint_with_wrapped_agent(build_chat_client): async def test_endpoint_with_state_schema(build_chat_client): """Test endpoint with state_schema parameter.""" app = FastAPI() - agent = Agent(name="test", instructions="Test agent", chat_client=build_chat_client()) + agent = Agent(name="test", instructions="Test agent", client=build_chat_client()) state_schema = {"document": {"type": "string"}} add_agent_framework_fastapi_endpoint(app, agent, path="/stateful", state_schema=state_schema) @@ -73,7 +73,7 @@ async def test_endpoint_with_state_schema(build_chat_client): async def test_endpoint_with_default_state_seed(build_chat_client): """Test endpoint seeds default state when client omits it.""" app = FastAPI() - agent = Agent(name="test", instructions="Test agent", chat_client=build_chat_client()) + agent = Agent(name="test", instructions="Test agent", client=build_chat_client()) state_schema = {"proverbs": {"type": "array"}} default_state = {"proverbs": ["Keep the original."]} @@ -100,7 +100,7 @@ async def test_endpoint_with_default_state_seed(build_chat_client): async def test_endpoint_with_predict_state_config(build_chat_client): """Test endpoint with predict_state_config parameter.""" app = FastAPI() - agent = Agent(name="test", instructions="Test agent", chat_client=build_chat_client()) + agent = Agent(name="test", instructions="Test agent", client=build_chat_client()) predict_config = {"document": {"tool": "write_doc", "tool_argument": "content"}} add_agent_framework_fastapi_endpoint(app, agent, path="/predictive", predict_state_config=predict_config) @@ -114,7 +114,7 @@ async def test_endpoint_with_predict_state_config(build_chat_client): async def test_endpoint_request_logging(build_chat_client): """Test that endpoint logs request details.""" app = FastAPI() - agent = Agent(name="test", instructions="Test agent", chat_client=build_chat_client()) + agent = Agent(name="test", instructions="Test agent", client=build_chat_client()) add_agent_framework_fastapi_endpoint(app, agent, path="/logged") @@ -134,7 +134,7 @@ async def test_endpoint_request_logging(build_chat_client): async def test_endpoint_event_streaming(build_chat_client): """Test that endpoint streams events correctly.""" app = FastAPI() - agent = Agent(name="test", instructions="Test agent", chat_client=build_chat_client("Streamed response")) + agent = Agent(name="test", instructions="Test agent", client=build_chat_client("Streamed response")) add_agent_framework_fastapi_endpoint(app, agent, path="/stream") @@ -168,7 +168,7 @@ async def test_endpoint_event_streaming(build_chat_client): async def test_endpoint_error_handling(build_chat_client): """Test endpoint error handling during request parsing.""" app = FastAPI() - agent = Agent(name="test", instructions="Test agent", chat_client=build_chat_client()) + agent = Agent(name="test", instructions="Test agent", client=build_chat_client()) add_agent_framework_fastapi_endpoint(app, agent, path="/failing") @@ -184,8 +184,8 @@ async def test_endpoint_error_handling(build_chat_client): async def test_endpoint_multiple_paths(build_chat_client): """Test adding multiple endpoints with different paths.""" app = FastAPI() - agent1 = Agent(name="agent1", instructions="First agent", chat_client=build_chat_client("Response 1")) - agent2 = Agent(name="agent2", instructions="Second agent", chat_client=build_chat_client("Response 2")) + agent1 = Agent(name="agent1", instructions="First agent", client=build_chat_client("Response 1")) + agent2 = Agent(name="agent2", instructions="Second agent", client=build_chat_client("Response 2")) add_agent_framework_fastapi_endpoint(app, agent1, path="/agent1") add_agent_framework_fastapi_endpoint(app, agent2, path="/agent2") @@ -202,7 +202,7 @@ async def test_endpoint_multiple_paths(build_chat_client): async def test_endpoint_default_path(build_chat_client): """Test endpoint with default path.""" app = FastAPI() - agent = Agent(name="test", instructions="Test agent", chat_client=build_chat_client()) + agent = Agent(name="test", instructions="Test agent", client=build_chat_client()) add_agent_framework_fastapi_endpoint(app, agent) @@ -215,7 +215,7 @@ async def test_endpoint_default_path(build_chat_client): async def test_endpoint_response_headers(build_chat_client): """Test that endpoint sets correct response headers.""" app = FastAPI() - agent = Agent(name="test", instructions="Test agent", chat_client=build_chat_client()) + agent = Agent(name="test", instructions="Test agent", client=build_chat_client()) add_agent_framework_fastapi_endpoint(app, agent, path="/headers") @@ -231,7 +231,7 @@ async def test_endpoint_response_headers(build_chat_client): async def test_endpoint_empty_messages(build_chat_client): """Test endpoint with empty messages list.""" app = FastAPI() - agent = Agent(name="test", instructions="Test agent", chat_client=build_chat_client()) + agent = Agent(name="test", instructions="Test agent", client=build_chat_client()) add_agent_framework_fastapi_endpoint(app, agent, path="/empty") @@ -244,7 +244,7 @@ async def test_endpoint_empty_messages(build_chat_client): async def test_endpoint_complex_input(build_chat_client): """Test endpoint with complex input data.""" app = FastAPI() - agent = Agent(name="test", instructions="Test agent", chat_client=build_chat_client()) + agent = Agent(name="test", instructions="Test agent", client=build_chat_client()) add_agent_framework_fastapi_endpoint(app, agent, path="/complex") @@ -269,7 +269,7 @@ async def test_endpoint_complex_input(build_chat_client): async def test_endpoint_openapi_schema(build_chat_client): """Test that endpoint generates proper OpenAPI schema with request model.""" app = FastAPI() - agent = Agent(name="test", instructions="Test agent", chat_client=build_chat_client()) + agent = Agent(name="test", instructions="Test agent", client=build_chat_client()) add_agent_framework_fastapi_endpoint(app, agent, path="/schema-test") @@ -313,7 +313,7 @@ async def test_endpoint_openapi_schema(build_chat_client): async def test_endpoint_default_tags(build_chat_client): """Test that endpoint uses default 'AG-UI' tag.""" app = FastAPI() - agent = Agent(name="test", instructions="Test agent", chat_client=build_chat_client()) + agent = Agent(name="test", instructions="Test agent", client=build_chat_client()) add_agent_framework_fastapi_endpoint(app, agent, path="/default-tags") @@ -331,7 +331,7 @@ async def test_endpoint_default_tags(build_chat_client): async def test_endpoint_custom_tags(build_chat_client): """Test that endpoint accepts custom tags.""" app = FastAPI() - agent = Agent(name="test", instructions="Test agent", chat_client=build_chat_client()) + agent = Agent(name="test", instructions="Test agent", client=build_chat_client()) add_agent_framework_fastapi_endpoint(app, agent, path="/custom-tags", tags=["Custom", "Agent"]) @@ -349,7 +349,7 @@ async def test_endpoint_custom_tags(build_chat_client): async def test_endpoint_missing_required_field(build_chat_client): """Test that endpoint validates required fields with Pydantic.""" app = FastAPI() - agent = Agent(name="test", instructions="Test agent", chat_client=build_chat_client()) + agent = Agent(name="test", instructions="Test agent", client=build_chat_client()) add_agent_framework_fastapi_endpoint(app, agent, path="/validation") @@ -368,7 +368,7 @@ async def test_endpoint_internal_error_handling(build_chat_client): from unittest.mock import patch app = FastAPI() - agent = Agent(name="test", instructions="Test agent", chat_client=build_chat_client()) + agent = Agent(name="test", instructions="Test agent", client=build_chat_client()) # Use default_state to trigger the code path that can raise an exception add_agent_framework_fastapi_endpoint(app, agent, path="/error-test", default_state={"key": "value"}) @@ -387,7 +387,7 @@ async def test_endpoint_internal_error_handling(build_chat_client): async def test_endpoint_with_dependencies_blocks_unauthorized(build_chat_client): """Test that endpoint blocks requests when authentication dependency fails.""" app = FastAPI() - agent = Agent(name="test", instructions="Test agent", chat_client=build_chat_client()) + agent = Agent(name="test", instructions="Test agent", client=build_chat_client()) async def require_api_key(x_api_key: str | None = Header(None)): if x_api_key != "secret-key": @@ -406,7 +406,7 @@ async def require_api_key(x_api_key: str | None = Header(None)): async def test_endpoint_with_dependencies_allows_authorized(build_chat_client): """Test that endpoint allows requests when authentication dependency passes.""" app = FastAPI() - agent = Agent(name="test", instructions="Test agent", chat_client=build_chat_client()) + agent = Agent(name="test", instructions="Test agent", client=build_chat_client()) async def require_api_key(x_api_key: str | None = Header(None)): if x_api_key != "secret-key": @@ -429,7 +429,7 @@ async def require_api_key(x_api_key: str | None = Header(None)): async def test_endpoint_with_multiple_dependencies(build_chat_client): """Test that endpoint supports multiple dependencies.""" app = FastAPI() - agent = Agent(name="test", instructions="Test agent", chat_client=build_chat_client()) + agent = Agent(name="test", instructions="Test agent", client=build_chat_client()) execution_order: list[str] = [] @@ -457,7 +457,7 @@ async def second_dependency(): async def test_endpoint_without_dependencies_is_accessible(build_chat_client): """Test that endpoint without dependencies remains accessible (backward compatibility).""" app = FastAPI() - agent = Agent(name="test", instructions="Test agent", chat_client=build_chat_client()) + agent = Agent(name="test", instructions="Test agent", client=build_chat_client()) # No dependencies parameter - should be accessible without auth add_agent_framework_fastapi_endpoint(app, agent, path="/open") diff --git a/python/packages/ag-ui/tests/ag_ui/test_structured_output.py b/python/packages/ag-ui/tests/ag_ui/test_structured_output.py index cd690fcfb7..a8d9404a42 100644 --- a/python/packages/ag-ui/tests/ag_ui/test_structured_output.py +++ b/python/packages/ag-ui/tests/ag_ui/test_structured_output.py @@ -41,7 +41,7 @@ async def stream_fn( contents=[Content.from_text(text='{"recipe": {"name": "Pasta"}, "message": "Here is your recipe"}')] ) - agent = Agent(name="test", instructions="Test", chat_client=streaming_chat_client_stub(stream_fn)) + agent = Agent(name="test", instructions="Test", client=streaming_chat_client_stub(stream_fn)) agent.default_options = ChatOptions(response_format=RecipeOutput) wrapper = AgentFrameworkAgent( @@ -83,7 +83,7 @@ async def stream_fn( } yield ChatResponseUpdate(contents=[Content.from_text(text=json.dumps(steps_data))]) - agent = Agent(name="test", instructions="Test", chat_client=streaming_chat_client_stub(stream_fn)) + agent = Agent(name="test", instructions="Test", client=streaming_chat_client_stub(stream_fn)) agent.default_options = ChatOptions(response_format=StepsOutput) wrapper = AgentFrameworkAgent( @@ -117,7 +117,7 @@ async def test_structured_output_with_no_schema_match(streaming_chat_client_stub ] agent = Agent( - name="test", instructions="Test", chat_client=streaming_chat_client_stub(stream_from_updates_fixture(updates)) + name="test", instructions="Test", client=streaming_chat_client_stub(stream_from_updates_fixture(updates)) ) agent.default_options = ChatOptions(response_format=GenericOutput) @@ -153,7 +153,7 @@ async def stream_fn( ) -> AsyncIterator[ChatResponseUpdate]: yield ChatResponseUpdate(contents=[Content.from_text(text='{"data": {"key": "value"}, "info": "processed"}')]) - agent = Agent(name="test", instructions="Test", chat_client=streaming_chat_client_stub(stream_fn)) + agent = Agent(name="test", instructions="Test", client=streaming_chat_client_stub(stream_fn)) agent.default_options = ChatOptions(response_format=DataOutput) wrapper = AgentFrameworkAgent( @@ -185,7 +185,7 @@ async def test_no_structured_output_when_no_response_format(streaming_chat_clien agent = Agent( name="test", instructions="Test", - chat_client=streaming_chat_client_stub(stream_from_updates_fixture(updates)), + client=streaming_chat_client_stub(stream_from_updates_fixture(updates)), ) # No response_format set @@ -213,7 +213,7 @@ async def stream_fn( output_data = {"recipe": {"name": "Salad"}, "message": "Fresh salad recipe ready"} yield ChatResponseUpdate(contents=[Content.from_text(text=json.dumps(output_data))]) - agent = Agent(name="test", instructions="Test", chat_client=streaming_chat_client_stub(stream_fn)) + agent = Agent(name="test", instructions="Test", client=streaming_chat_client_stub(stream_fn)) agent.default_options = ChatOptions(response_format=RecipeOutput) wrapper = AgentFrameworkAgent( @@ -248,7 +248,7 @@ async def stream_fn( if False: yield ChatResponseUpdate(contents=[]) - agent = Agent(name="test", instructions="Test", chat_client=streaming_chat_client_stub(stream_fn)) + agent = Agent(name="test", instructions="Test", client=streaming_chat_client_stub(stream_fn)) agent.default_options = ChatOptions(response_format=RecipeOutput) wrapper = AgentFrameworkAgent(agent=agent) diff --git a/python/packages/ag-ui/tests/ag_ui/test_tooling.py b/python/packages/ag-ui/tests/ag_ui/test_tooling.py index f174897087..e8567a586d 100644 --- a/python/packages/ag-ui/tests/ag_ui/test_tooling.py +++ b/python/packages/ag-ui/tests/ag_ui/test_tooling.py @@ -38,7 +38,7 @@ def _create_chat_agent_with_tool(tool_name: str = "regular_tool") -> Agent: will always be named 'regular_tool' since tool uses the function name. """ mock_chat_client = MagicMock() - return Agent(chat_client=mock_chat_client, tools=[regular_tool]) + return Agent(client=mock_chat_client, tools=[regular_tool]) def test_merge_tools_filters_duplicates() -> None: @@ -59,7 +59,7 @@ def test_register_additional_client_tools_assigns_when_configured() -> None: mock_chat_client = MagicMock(spec=BaseChatClient) mock_chat_client.function_invocation_configuration = normalize_function_invocation_configuration(None) - agent = Agent(chat_client=mock_chat_client) + agent = Agent(client=mock_chat_client) tools = [DummyTool("x")] register_additional_client_tools(agent, tools) @@ -148,14 +148,14 @@ class MockAgent: def test_register_additional_client_tools_no_tools() -> None: """register_additional_client_tools does nothing with None tools.""" mock_chat_client = MagicMock() - agent = Agent(chat_client=mock_chat_client) + agent = Agent(client=mock_chat_client) # Should not raise register_additional_client_tools(agent, None) def test_register_additional_client_tools_no_chat_client() -> None: - """register_additional_client_tools does nothing when agent has no chat_client.""" + """register_additional_client_tools does nothing when agent has no client.""" from agent_framework_ag_ui._orchestration._tooling import register_additional_client_tools class MockAgent: diff --git a/python/packages/anthropic/tests/test_anthropic_client.py b/python/packages/anthropic/tests/test_anthropic_client.py index c3749d247e..21e23de0e8 100644 --- a/python/packages/anthropic/tests/test_anthropic_client.py +++ b/python/packages/anthropic/tests/test_anthropic_client.py @@ -98,11 +98,11 @@ def test_anthropic_settings_missing_api_key(anthropic_unit_test_env: dict[str, s def test_anthropic_client_init_with_client(mock_anthropic_client: MagicMock) -> None: """Test AnthropicClient initialization with existing anthropic_client.""" - chat_client = create_test_anthropic_client(mock_anthropic_client, model_id="claude-3-5-sonnet-20241022") + client = create_test_anthropic_client(mock_anthropic_client, model_id="claude-3-5-sonnet-20241022") - assert chat_client.anthropic_client is mock_anthropic_client - assert chat_client.model_id == "claude-3-5-sonnet-20241022" - assert isinstance(chat_client, SupportsChatGetResponse) + assert client.anthropic_client is mock_anthropic_client + assert client.model_id == "claude-3-5-sonnet-20241022" + assert isinstance(client, SupportsChatGetResponse) def test_anthropic_client_init_auto_create_client(anthropic_unit_test_env: dict[str, str]) -> None: @@ -138,8 +138,8 @@ def test_anthropic_client_init_validation_error() -> None: def test_anthropic_client_service_url(mock_anthropic_client: MagicMock) -> None: """Test service_url method.""" - chat_client = create_test_anthropic_client(mock_anthropic_client) - assert chat_client.service_url() == "https://api.anthropic.com" + client = create_test_anthropic_client(mock_anthropic_client) + assert client.service_url() == "https://api.anthropic.com" # Message Conversion Tests @@ -147,10 +147,10 @@ def test_anthropic_client_service_url(mock_anthropic_client: MagicMock) -> None: def test_prepare_message_for_anthropic_text(mock_anthropic_client: MagicMock) -> None: """Test converting text message to Anthropic format.""" - chat_client = create_test_anthropic_client(mock_anthropic_client) + client = create_test_anthropic_client(mock_anthropic_client) message = Message(role="user", text="Hello, world!") - result = chat_client._prepare_message_for_anthropic(message) + result = client._prepare_message_for_anthropic(message) assert result["role"] == "user" assert len(result["content"]) == 1 @@ -160,7 +160,7 @@ def test_prepare_message_for_anthropic_text(mock_anthropic_client: MagicMock) -> def test_prepare_message_for_anthropic_function_call(mock_anthropic_client: MagicMock) -> None: """Test converting function call message to Anthropic format.""" - chat_client = create_test_anthropic_client(mock_anthropic_client) + client = create_test_anthropic_client(mock_anthropic_client) message = Message( role="assistant", contents=[ @@ -172,7 +172,7 @@ def test_prepare_message_for_anthropic_function_call(mock_anthropic_client: Magi ], ) - result = chat_client._prepare_message_for_anthropic(message) + result = client._prepare_message_for_anthropic(message) assert result["role"] == "assistant" assert len(result["content"]) == 1 @@ -184,7 +184,7 @@ def test_prepare_message_for_anthropic_function_call(mock_anthropic_client: Magi def test_prepare_message_for_anthropic_function_result(mock_anthropic_client: MagicMock) -> None: """Test converting function result message to Anthropic format.""" - chat_client = create_test_anthropic_client(mock_anthropic_client) + client = create_test_anthropic_client(mock_anthropic_client) message = Message( role="tool", contents=[ @@ -195,7 +195,7 @@ def test_prepare_message_for_anthropic_function_result(mock_anthropic_client: Ma ], ) - result = chat_client._prepare_message_for_anthropic(message) + result = client._prepare_message_for_anthropic(message) assert result["role"] == "user" assert len(result["content"]) == 1 @@ -209,13 +209,13 @@ def test_prepare_message_for_anthropic_function_result(mock_anthropic_client: Ma def test_prepare_message_for_anthropic_text_reasoning(mock_anthropic_client: MagicMock) -> None: """Test converting text reasoning message to Anthropic format.""" - chat_client = create_test_anthropic_client(mock_anthropic_client) + client = create_test_anthropic_client(mock_anthropic_client) message = Message( role="assistant", contents=[Content.from_text_reasoning(text="Let me think about this...")], ) - result = chat_client._prepare_message_for_anthropic(message) + result = client._prepare_message_for_anthropic(message) assert result["role"] == "assistant" assert len(result["content"]) == 1 @@ -225,13 +225,13 @@ def test_prepare_message_for_anthropic_text_reasoning(mock_anthropic_client: Mag def test_prepare_messages_for_anthropic_with_system(mock_anthropic_client: MagicMock) -> None: """Test converting messages list with system message.""" - chat_client = create_test_anthropic_client(mock_anthropic_client) + client = create_test_anthropic_client(mock_anthropic_client) messages = [ Message(role="system", text="You are a helpful assistant."), Message(role="user", text="Hello!"), ] - result = chat_client._prepare_messages_for_anthropic(messages) + result = client._prepare_messages_for_anthropic(messages) # System message should be skipped assert len(result) == 1 @@ -241,13 +241,13 @@ def test_prepare_messages_for_anthropic_with_system(mock_anthropic_client: Magic def test_prepare_messages_for_anthropic_without_system(mock_anthropic_client: MagicMock) -> None: """Test converting messages list without system message.""" - chat_client = create_test_anthropic_client(mock_anthropic_client) + client = create_test_anthropic_client(mock_anthropic_client) messages = [ Message(role="user", text="Hello!"), Message(role="assistant", text="Hi there!"), ] - result = chat_client._prepare_messages_for_anthropic(messages) + result = client._prepare_messages_for_anthropic(messages) assert len(result) == 2 assert result[0]["role"] == "user" @@ -259,7 +259,7 @@ def test_prepare_messages_for_anthropic_without_system(mock_anthropic_client: Ma def test_prepare_tools_for_anthropic_tool(mock_anthropic_client: MagicMock) -> None: """Test converting FunctionTool to Anthropic format.""" - chat_client = create_test_anthropic_client(mock_anthropic_client) + client = create_test_anthropic_client(mock_anthropic_client) @tool(approval_mode="never_require") def get_weather(location: Annotated[str, Field(description="Location to get weather for")]) -> str: @@ -267,7 +267,7 @@ def get_weather(location: Annotated[str, Field(description="Location to get weat return f"Weather for {location}" chat_options = ChatOptions(tools=[get_weather]) - result = chat_client._prepare_tools_for_anthropic(chat_options) + result = client._prepare_tools_for_anthropic(chat_options) assert result is not None assert "tools" in result @@ -279,10 +279,10 @@ def get_weather(location: Annotated[str, Field(description="Location to get weat def test_prepare_tools_for_anthropic_web_search(mock_anthropic_client: MagicMock) -> None: """Test converting HostedWebSearchTool to Anthropic format.""" - chat_client = create_test_anthropic_client(mock_anthropic_client) + client = create_test_anthropic_client(mock_anthropic_client) chat_options = ChatOptions(tools=[HostedWebSearchTool()]) - result = chat_client._prepare_tools_for_anthropic(chat_options) + result = client._prepare_tools_for_anthropic(chat_options) assert result is not None assert "tools" in result @@ -293,10 +293,10 @@ def test_prepare_tools_for_anthropic_web_search(mock_anthropic_client: MagicMock def test_prepare_tools_for_anthropic_code_interpreter(mock_anthropic_client: MagicMock) -> None: """Test converting HostedCodeInterpreterTool to Anthropic format.""" - chat_client = create_test_anthropic_client(mock_anthropic_client) + client = create_test_anthropic_client(mock_anthropic_client) chat_options = ChatOptions(tools=[HostedCodeInterpreterTool()]) - result = chat_client._prepare_tools_for_anthropic(chat_options) + result = client._prepare_tools_for_anthropic(chat_options) assert result is not None assert "tools" in result @@ -307,10 +307,10 @@ def test_prepare_tools_for_anthropic_code_interpreter(mock_anthropic_client: Mag def test_prepare_tools_for_anthropic_mcp_tool(mock_anthropic_client: MagicMock) -> None: """Test converting HostedMCPTool to Anthropic format.""" - chat_client = create_test_anthropic_client(mock_anthropic_client) + client = create_test_anthropic_client(mock_anthropic_client) chat_options = ChatOptions(tools=[HostedMCPTool(name="test-mcp", url="https://example.com/mcp")]) - result = chat_client._prepare_tools_for_anthropic(chat_options) + result = client._prepare_tools_for_anthropic(chat_options) assert result is not None assert "mcp_servers" in result @@ -322,7 +322,7 @@ def test_prepare_tools_for_anthropic_mcp_tool(mock_anthropic_client: MagicMock) def test_prepare_tools_for_anthropic_mcp_with_auth(mock_anthropic_client: MagicMock) -> None: """Test converting HostedMCPTool with authorization headers.""" - chat_client = create_test_anthropic_client(mock_anthropic_client) + client = create_test_anthropic_client(mock_anthropic_client) chat_options = ChatOptions( tools=[ HostedMCPTool( @@ -333,7 +333,7 @@ def test_prepare_tools_for_anthropic_mcp_with_auth(mock_anthropic_client: MagicM ] ) - result = chat_client._prepare_tools_for_anthropic(chat_options) + result = client._prepare_tools_for_anthropic(chat_options) assert result is not None assert "mcp_servers" in result @@ -344,10 +344,10 @@ def test_prepare_tools_for_anthropic_mcp_with_auth(mock_anthropic_client: MagicM def test_prepare_tools_for_anthropic_dict_tool(mock_anthropic_client: MagicMock) -> None: """Test converting dict tool to Anthropic format.""" - chat_client = create_test_anthropic_client(mock_anthropic_client) + client = create_test_anthropic_client(mock_anthropic_client) chat_options = ChatOptions(tools=[{"type": "custom", "name": "custom_tool", "description": "A custom tool"}]) - result = chat_client._prepare_tools_for_anthropic(chat_options) + result = client._prepare_tools_for_anthropic(chat_options) assert result is not None assert "tools" in result @@ -357,10 +357,10 @@ def test_prepare_tools_for_anthropic_dict_tool(mock_anthropic_client: MagicMock) def test_prepare_tools_for_anthropic_none(mock_anthropic_client: MagicMock) -> None: """Test converting None tools.""" - chat_client = create_test_anthropic_client(mock_anthropic_client) + client = create_test_anthropic_client(mock_anthropic_client) chat_options = ChatOptions() - result = chat_client._prepare_tools_for_anthropic(chat_options) + result = client._prepare_tools_for_anthropic(chat_options) assert result is None @@ -370,14 +370,14 @@ def test_prepare_tools_for_anthropic_none(mock_anthropic_client: MagicMock) -> N async def test_prepare_options_basic(mock_anthropic_client: MagicMock) -> None: """Test _prepare_options with basic ChatOptions.""" - chat_client = create_test_anthropic_client(mock_anthropic_client) + client = create_test_anthropic_client(mock_anthropic_client) messages = [Message(role="user", text="Hello")] chat_options = ChatOptions(max_tokens=100, temperature=0.7) - run_options = chat_client._prepare_options(messages, chat_options) + run_options = client._prepare_options(messages, chat_options) - assert run_options["model"] == chat_client.model_id + assert run_options["model"] == client.model_id assert run_options["max_tokens"] == 100 assert run_options["temperature"] == 0.7 assert "messages" in run_options @@ -385,7 +385,7 @@ async def test_prepare_options_basic(mock_anthropic_client: MagicMock) -> None: async def test_prepare_options_with_system_message(mock_anthropic_client: MagicMock) -> None: """Test _prepare_options with system message.""" - chat_client = create_test_anthropic_client(mock_anthropic_client) + client = create_test_anthropic_client(mock_anthropic_client) messages = [ Message(role="system", text="You are helpful."), @@ -393,7 +393,7 @@ async def test_prepare_options_with_system_message(mock_anthropic_client: MagicM ] chat_options = ChatOptions() - run_options = chat_client._prepare_options(messages, chat_options) + run_options = client._prepare_options(messages, chat_options) assert run_options["system"] == "You are helpful." assert len(run_options["messages"]) == 1 # System message not in messages list @@ -401,25 +401,25 @@ async def test_prepare_options_with_system_message(mock_anthropic_client: MagicM async def test_prepare_options_with_tool_choice_auto(mock_anthropic_client: MagicMock) -> None: """Test _prepare_options with auto tool choice.""" - chat_client = create_test_anthropic_client(mock_anthropic_client) + client = create_test_anthropic_client(mock_anthropic_client) messages = [Message(role="user", text="Hello")] chat_options = ChatOptions(tool_choice="auto") - run_options = chat_client._prepare_options(messages, chat_options) + run_options = client._prepare_options(messages, chat_options) assert run_options["tool_choice"]["type"] == "auto" async def test_prepare_options_with_tool_choice_required(mock_anthropic_client: MagicMock) -> None: """Test _prepare_options with required tool choice.""" - chat_client = create_test_anthropic_client(mock_anthropic_client) + client = create_test_anthropic_client(mock_anthropic_client) messages = [Message(role="user", text="Hello")] # For required with specific function, need to pass as dict chat_options = ChatOptions(tool_choice={"mode": "required", "required_function_name": "get_weather"}) - run_options = chat_client._prepare_options(messages, chat_options) + run_options = client._prepare_options(messages, chat_options) assert run_options["tool_choice"]["type"] == "tool" assert run_options["tool_choice"]["name"] == "get_weather" @@ -427,19 +427,19 @@ async def test_prepare_options_with_tool_choice_required(mock_anthropic_client: async def test_prepare_options_with_tool_choice_none(mock_anthropic_client: MagicMock) -> None: """Test _prepare_options with none tool choice.""" - chat_client = create_test_anthropic_client(mock_anthropic_client) + client = create_test_anthropic_client(mock_anthropic_client) messages = [Message(role="user", text="Hello")] chat_options = ChatOptions(tool_choice="none") - run_options = chat_client._prepare_options(messages, chat_options) + run_options = client._prepare_options(messages, chat_options) assert run_options["tool_choice"]["type"] == "none" async def test_prepare_options_with_tools(mock_anthropic_client: MagicMock) -> None: """Test _prepare_options with tools.""" - chat_client = create_test_anthropic_client(mock_anthropic_client) + client = create_test_anthropic_client(mock_anthropic_client) @tool(approval_mode="never_require") def get_weather(location: str) -> str: @@ -449,7 +449,7 @@ def get_weather(location: str) -> str: messages = [Message(role="user", text="Hello")] chat_options = ChatOptions(tools=[get_weather]) - run_options = chat_client._prepare_options(messages, chat_options) + run_options = client._prepare_options(messages, chat_options) assert "tools" in run_options assert len(run_options["tools"]) == 1 @@ -457,24 +457,24 @@ def get_weather(location: str) -> str: async def test_prepare_options_with_stop_sequences(mock_anthropic_client: MagicMock) -> None: """Test _prepare_options with stop sequences.""" - chat_client = create_test_anthropic_client(mock_anthropic_client) + client = create_test_anthropic_client(mock_anthropic_client) messages = [Message(role="user", text="Hello")] chat_options = ChatOptions(stop=["STOP", "END"]) - run_options = chat_client._prepare_options(messages, chat_options) + run_options = client._prepare_options(messages, chat_options) assert run_options["stop_sequences"] == ["STOP", "END"] async def test_prepare_options_with_top_p(mock_anthropic_client: MagicMock) -> None: """Test _prepare_options with top_p.""" - chat_client = create_test_anthropic_client(mock_anthropic_client) + client = create_test_anthropic_client(mock_anthropic_client) messages = [Message(role="user", text="Hello")] chat_options = ChatOptions(top_p=0.9) - run_options = chat_client._prepare_options(messages, chat_options) + run_options = client._prepare_options(messages, chat_options) assert run_options["top_p"] == 0.9 @@ -485,7 +485,7 @@ async def test_prepare_options_filters_internal_kwargs(mock_anthropic_client: Ma Internal kwargs like _function_middleware_pipeline, thread, and middleware should be filtered out before being passed to the Anthropic API. """ - chat_client = create_test_anthropic_client(mock_anthropic_client) + client = create_test_anthropic_client(mock_anthropic_client) messages = [Message(role="user", text="Hello")] chat_options: ChatOptions = {} @@ -499,7 +499,7 @@ async def test_prepare_options_filters_internal_kwargs(mock_anthropic_client: Ma "middleware": [object()], } - run_options = chat_client._prepare_options(messages, chat_options, **internal_kwargs) + run_options = client._prepare_options(messages, chat_options, **internal_kwargs) # Internal kwargs should be filtered out assert "_function_middleware_pipeline" not in run_options @@ -514,7 +514,7 @@ async def test_prepare_options_filters_internal_kwargs(mock_anthropic_client: Ma def test_process_message_basic(mock_anthropic_client: MagicMock) -> None: """Test _process_message with basic text response.""" - chat_client = create_test_anthropic_client(mock_anthropic_client) + client = create_test_anthropic_client(mock_anthropic_client) mock_message = MagicMock(spec=BetaMessage) mock_message.id = "msg_123" @@ -523,7 +523,7 @@ def test_process_message_basic(mock_anthropic_client: MagicMock) -> None: mock_message.usage = BetaUsage(input_tokens=10, output_tokens=5) mock_message.stop_reason = "end_turn" - response = chat_client._process_message(mock_message, {}) + response = client._process_message(mock_message, {}) assert response.response_id == "msg_123" assert response.model_id == "claude-3-5-sonnet-20241022" @@ -540,7 +540,7 @@ def test_process_message_basic(mock_anthropic_client: MagicMock) -> None: def test_process_message_with_tool_use(mock_anthropic_client: MagicMock) -> None: """Test _process_message with tool use.""" - chat_client = create_test_anthropic_client(mock_anthropic_client) + client = create_test_anthropic_client(mock_anthropic_client) mock_message = MagicMock(spec=BetaMessage) mock_message.id = "msg_123" @@ -556,7 +556,7 @@ def test_process_message_with_tool_use(mock_anthropic_client: MagicMock) -> None mock_message.usage = BetaUsage(input_tokens=10, output_tokens=5) mock_message.stop_reason = "tool_use" - response = chat_client._process_message(mock_message, {}) + response = client._process_message(mock_message, {}) assert len(response.messages[0].contents) == 1 assert response.messages[0].contents[0].type == "function_call" @@ -567,10 +567,10 @@ def test_process_message_with_tool_use(mock_anthropic_client: MagicMock) -> None def test_parse_usage_from_anthropic_basic(mock_anthropic_client: MagicMock) -> None: """Test _parse_usage_from_anthropic with basic usage.""" - chat_client = create_test_anthropic_client(mock_anthropic_client) + client = create_test_anthropic_client(mock_anthropic_client) usage = BetaUsage(input_tokens=10, output_tokens=5) - result = chat_client._parse_usage_from_anthropic(usage) + result = client._parse_usage_from_anthropic(usage) assert result is not None assert result["input_token_count"] == 10 @@ -579,19 +579,19 @@ def test_parse_usage_from_anthropic_basic(mock_anthropic_client: MagicMock) -> N def test_parse_usage_from_anthropic_none(mock_anthropic_client: MagicMock) -> None: """Test _parse_usage_from_anthropic with None usage.""" - chat_client = create_test_anthropic_client(mock_anthropic_client) + client = create_test_anthropic_client(mock_anthropic_client) - result = chat_client._parse_usage_from_anthropic(None) + result = client._parse_usage_from_anthropic(None) assert result is None def test_parse_contents_from_anthropic_text(mock_anthropic_client: MagicMock) -> None: """Test _parse_contents_from_anthropic with text content.""" - chat_client = create_test_anthropic_client(mock_anthropic_client) + client = create_test_anthropic_client(mock_anthropic_client) content = [BetaTextBlock(type="text", text="Hello!")] - result = chat_client._parse_contents_from_anthropic(content) + result = client._parse_contents_from_anthropic(content) assert len(result) == 1 assert result[0].type == "text" @@ -600,7 +600,7 @@ def test_parse_contents_from_anthropic_text(mock_anthropic_client: MagicMock) -> def test_parse_contents_from_anthropic_tool_use(mock_anthropic_client: MagicMock) -> None: """Test _parse_contents_from_anthropic with tool use.""" - chat_client = create_test_anthropic_client(mock_anthropic_client) + client = create_test_anthropic_client(mock_anthropic_client) content = [ BetaToolUseBlock( @@ -610,7 +610,7 @@ def test_parse_contents_from_anthropic_tool_use(mock_anthropic_client: MagicMock input={"location": "SF"}, ) ] - result = chat_client._parse_contents_from_anthropic(content) + result = client._parse_contents_from_anthropic(content) assert len(result) == 1 assert result[0].type == "function_call" @@ -625,7 +625,7 @@ def test_parse_contents_from_anthropic_input_json_delta_no_duplicate_name(mock_a and subsequent input_json_delta events should have name="" to prevent ag-ui from emitting duplicate ToolCallStartEvents. """ - chat_client = create_test_anthropic_client(mock_anthropic_client) + client = create_test_anthropic_client(mock_anthropic_client) # First, simulate a tool_use event that sets _last_call_id_name tool_use_content = MagicMock() @@ -634,7 +634,7 @@ def test_parse_contents_from_anthropic_input_json_delta_no_duplicate_name(mock_a tool_use_content.name = "get_weather" tool_use_content.input = {} - result = chat_client._parse_contents_from_anthropic([tool_use_content]) + result = client._parse_contents_from_anthropic([tool_use_content]) assert len(result) == 1 assert result[0].type == "function_call" assert result[0].call_id == "call_123" @@ -645,7 +645,7 @@ def test_parse_contents_from_anthropic_input_json_delta_no_duplicate_name(mock_a delta_content_1.type = "input_json_delta" delta_content_1.partial_json = '{"location":' - result = chat_client._parse_contents_from_anthropic([delta_content_1]) + result = client._parse_contents_from_anthropic([delta_content_1]) assert len(result) == 1 assert result[0].type == "function_call" assert result[0].call_id == "call_123" @@ -657,7 +657,7 @@ def test_parse_contents_from_anthropic_input_json_delta_no_duplicate_name(mock_a delta_content_2.type = "input_json_delta" delta_content_2.partial_json = '"San Francisco"}' - result = chat_client._parse_contents_from_anthropic([delta_content_2]) + result = client._parse_contents_from_anthropic([delta_content_2]) assert len(result) == 1 assert result[0].type == "function_call" assert result[0].call_id == "call_123" @@ -670,13 +670,13 @@ def test_parse_contents_from_anthropic_input_json_delta_no_duplicate_name(mock_a def test_process_stream_event_simple(mock_anthropic_client: MagicMock) -> None: """Test _process_stream_event with simple mock event.""" - chat_client = create_test_anthropic_client(mock_anthropic_client) + client = create_test_anthropic_client(mock_anthropic_client) # Test with a basic mock event - the actual implementation will handle real events mock_event = MagicMock() mock_event.type = "message_stop" - result = chat_client._process_stream_event(mock_event) + result = client._process_stream_event(mock_event) # message_stop events return None assert result is None @@ -684,7 +684,7 @@ def test_process_stream_event_simple(mock_anthropic_client: MagicMock) -> None: async def test_inner_get_response(mock_anthropic_client: MagicMock) -> None: """Test _inner_get_response method.""" - chat_client = create_test_anthropic_client(mock_anthropic_client) + client = create_test_anthropic_client(mock_anthropic_client) # Create a mock message response mock_message = MagicMock(spec=BetaMessage) @@ -699,7 +699,7 @@ async def test_inner_get_response(mock_anthropic_client: MagicMock) -> None: messages = [Message(role="user", text="Hi")] chat_options = ChatOptions(max_tokens=10) - response = await chat_client._inner_get_response( # type: ignore[attr-defined] + response = await client._inner_get_response( # type: ignore[attr-defined] messages=messages, options=chat_options ) @@ -710,7 +710,7 @@ async def test_inner_get_response(mock_anthropic_client: MagicMock) -> None: async def test_inner_get_response_streaming(mock_anthropic_client: MagicMock) -> None: """Test _inner_get_response method with streaming.""" - chat_client = create_test_anthropic_client(mock_anthropic_client) + client = create_test_anthropic_client(mock_anthropic_client) # Create mock streaming response async def mock_stream(): @@ -724,7 +724,7 @@ async def mock_stream(): chat_options = ChatOptions(max_tokens=10) chunks: list[ChatResponseUpdate] = [] - async for chunk in chat_client._inner_get_response( # type: ignore[attr-defined] + async for chunk in client._inner_get_response( # type: ignore[attr-defined] messages=messages, options=chat_options, stream=True ): if chunk: diff --git a/python/packages/azure-ai/agent_framework_azure_ai/_agent_provider.py b/python/packages/azure-ai/agent_framework_azure_ai/_agent_provider.py index c78f359331..b00e08d55c 100644 --- a/python/packages/azure-ai/agent_framework_azure_ai/_agent_provider.py +++ b/python/packages/azure-ai/agent_framework_azure_ai/_agent_provider.py @@ -407,7 +407,7 @@ def _to_chat_agent_from_agent( merged_tools = self._merge_tools(agent.tools, provided_tools) return Agent( # type: ignore[return-value] - chat_client=client, + client=client, id=agent.id, name=agent.name, description=agent.description, diff --git a/python/packages/azure-ai/agent_framework_azure_ai/_project_provider.py b/python/packages/azure-ai/agent_framework_azure_ai/_project_provider.py index e9beac3c7b..053527ee3b 100644 --- a/python/packages/azure-ai/agent_framework_azure_ai/_project_provider.py +++ b/python/packages/azure-ai/agent_framework_azure_ai/_project_provider.py @@ -401,7 +401,7 @@ def _to_chat_agent_from_details( merged_tools = self._merge_tools(details.definition.tools, provided_tools) return Agent( # type: ignore[return-value] - chat_client=client, + client=client, id=details.id, name=details.name, description=details.description, diff --git a/python/packages/azure-ai/tests/test_azure_ai_agent_client.py b/python/packages/azure-ai/tests/test_azure_ai_agent_client.py index 2eadc6cafc..6f14255613 100644 --- a/python/packages/azure-ai/tests/test_azure_ai_agent_client.py +++ b/python/packages/azure-ai/tests/test_azure_ai_agent_client.py @@ -127,14 +127,14 @@ def test_azure_ai_settings_init_with_explicit_values() -> None: def test_azure_ai_chat_client_init_with_client(mock_agents_client: MagicMock) -> None: """Test AzureAIAgentClient initialization with existing agents_client.""" - chat_client = create_test_azure_ai_chat_client( + client = create_test_azure_ai_chat_client( mock_agents_client, agent_id="existing-agent-id", thread_id="test-thread-id" ) - assert chat_client.agents_client is mock_agents_client - assert chat_client.agent_id == "existing-agent-id" - assert chat_client.thread_id == "test-thread-id" - assert isinstance(chat_client, SupportsChatGetResponse) + assert client.agents_client is mock_agents_client + assert client.agent_id == "existing-agent-id" + assert client.thread_id == "test-thread-id" + assert isinstance(client, SupportsChatGetResponse) def test_azure_ai_chat_client_init_auto_create_client( @@ -145,19 +145,19 @@ def test_azure_ai_chat_client_init_auto_create_client( azure_ai_settings = AzureAISettings(**azure_ai_unit_test_env) # type: ignore # Create client instance directly - chat_client = object.__new__(AzureAIAgentClient) - chat_client.agents_client = mock_agents_client - chat_client.agent_id = None - chat_client.thread_id = None - chat_client._should_close_client = False # type: ignore - chat_client.credential = None - chat_client.model_id = azure_ai_settings.model_deployment_name - chat_client.agent_name = None - chat_client.additional_properties = {} - chat_client.middleware = None + client = object.__new__(AzureAIAgentClient) + client.agents_client = mock_agents_client + client.agent_id = None + client.thread_id = None + client._should_close_client = False # type: ignore + client.credential = None + client.model_id = azure_ai_settings.model_deployment_name + client.agent_name = None + client.additional_properties = {} + client.middleware = None - assert chat_client.agents_client is mock_agents_client - assert chat_client.agent_id is None + assert client.agents_client is mock_agents_client + assert client.agent_id is None def test_azure_ai_chat_client_init_missing_project_endpoint() -> None: @@ -253,7 +253,7 @@ async def test_azure_ai_chat_client_get_agent_id_or_create_with_temperature_and_ ) -> None: """Test _get_agent_id_or_create with temperature and top_p in run_options.""" azure_ai_settings = AzureAISettings(model_deployment_name=azure_ai_unit_test_env["AZURE_AI_MODEL_DEPLOYMENT_NAME"]) - chat_client = create_test_azure_ai_chat_client(mock_agents_client, azure_ai_settings=azure_ai_settings) + client = create_test_azure_ai_chat_client(mock_agents_client, azure_ai_settings=azure_ai_settings) run_options = { "model": azure_ai_settings.model_deployment_name, @@ -261,7 +261,7 @@ async def test_azure_ai_chat_client_get_agent_id_or_create_with_temperature_and_ "top_p": 0.9, } - agent_id = await chat_client._get_agent_id_or_create(run_options) # type: ignore + agent_id = await client._get_agent_id_or_create(run_options) # type: ignore assert agent_id == "test-agent-id" # Verify create_agent was called with temperature and top_p parameters @@ -275,12 +275,12 @@ async def test_azure_ai_chat_client_get_agent_id_or_create_existing_agent( mock_agents_client: MagicMock, ) -> None: """Test _get_agent_id_or_create when agent_id is already provided.""" - chat_client = create_test_azure_ai_chat_client(mock_agents_client, agent_id="existing-agent-id") + client = create_test_azure_ai_chat_client(mock_agents_client, agent_id="existing-agent-id") - agent_id = await chat_client._get_agent_id_or_create() # type: ignore + agent_id = await client._get_agent_id_or_create() # type: ignore assert agent_id == "existing-agent-id" - assert not chat_client._agent_created + assert not client._agent_created async def test_azure_ai_chat_client_get_agent_id_or_create_create_new( @@ -289,17 +289,17 @@ async def test_azure_ai_chat_client_get_agent_id_or_create_create_new( ) -> None: """Test _get_agent_id_or_create when creating a new agent.""" azure_ai_settings = AzureAISettings(model_deployment_name=azure_ai_unit_test_env["AZURE_AI_MODEL_DEPLOYMENT_NAME"]) - chat_client = create_test_azure_ai_chat_client(mock_agents_client, azure_ai_settings=azure_ai_settings) + client = create_test_azure_ai_chat_client(mock_agents_client, azure_ai_settings=azure_ai_settings) - agent_id = await chat_client._get_agent_id_or_create(run_options={"model": azure_ai_settings.model_deployment_name}) # type: ignore + agent_id = await client._get_agent_id_or_create(run_options={"model": azure_ai_settings.model_deployment_name}) # type: ignore assert agent_id == "test-agent-id" - assert chat_client._agent_created + assert client._agent_created async def test_azure_ai_chat_client_thread_management_through_public_api(mock_agents_client: MagicMock) -> None: """Test thread creation and management through public API.""" - chat_client = create_test_azure_ai_chat_client(mock_agents_client, agent_id="test-agent") + client = create_test_azure_ai_chat_client(mock_agents_client, agent_id="test-agent") # Mock get_agent to avoid the async error mock_agents_client.get_agent = AsyncMock(return_value=None) @@ -322,7 +322,7 @@ async def empty_async_iter(): messages = [Message(role="user", text="Hello")] # Call without existing thread - should create new one - response = chat_client.get_response(messages, stream=True) + response = client.get_response(messages, stream=True) # Consume the generator to trigger the method execution async for _ in response: pass @@ -336,20 +336,20 @@ async def test_azure_ai_chat_client_get_agent_id_or_create_missing_model( mock_agents_client: MagicMock, azure_ai_unit_test_env: dict[str, str] ) -> None: """Test _get_agent_id_or_create when model_deployment_name is missing.""" - chat_client = create_test_azure_ai_chat_client(mock_agents_client) + client = create_test_azure_ai_chat_client(mock_agents_client) with pytest.raises(ServiceInitializationError, match="Model deployment name is required"): - await chat_client._get_agent_id_or_create() # type: ignore + await client._get_agent_id_or_create() # type: ignore async def test_azure_ai_chat_client_prepare_options_basic(mock_agents_client: MagicMock) -> None: """Test _prepare_options with basic ChatOptions.""" - chat_client = create_test_azure_ai_chat_client(mock_agents_client) + client = create_test_azure_ai_chat_client(mock_agents_client) messages = [Message(role="user", text="Hello")] chat_options: ChatOptions = {"max_tokens": 100, "temperature": 0.7} - run_options, tool_results = await chat_client._prepare_options(messages, chat_options) # type: ignore + run_options, tool_results = await client._prepare_options(messages, chat_options) # type: ignore assert run_options is not None assert tool_results is None @@ -357,11 +357,11 @@ async def test_azure_ai_chat_client_prepare_options_basic(mock_agents_client: Ma async def test_azure_ai_chat_client_prepare_options_no_chat_options(mock_agents_client: MagicMock) -> None: """Test _prepare_options with default ChatOptions.""" - chat_client = create_test_azure_ai_chat_client(mock_agents_client) + client = create_test_azure_ai_chat_client(mock_agents_client) messages = [Message(role="user", text="Hello")] - run_options, tool_results = await chat_client._prepare_options(messages, {}) # type: ignore + run_options, tool_results = await client._prepare_options(messages, {}) # type: ignore assert run_options is not None assert tool_results is None @@ -370,7 +370,7 @@ async def test_azure_ai_chat_client_prepare_options_no_chat_options(mock_agents_ async def test_azure_ai_chat_client_prepare_options_with_image_content(mock_agents_client: MagicMock) -> None: """Test _prepare_options with image content.""" - chat_client = create_test_azure_ai_chat_client(mock_agents_client, agent_id="test-agent") + client = create_test_azure_ai_chat_client(mock_agents_client, agent_id="test-agent") # Mock get_agent mock_agents_client.get_agent = AsyncMock(return_value=None) @@ -378,7 +378,7 @@ async def test_azure_ai_chat_client_prepare_options_with_image_content(mock_agen image_content = Content.from_uri(uri="https://example.com/image.jpg", media_type="image/jpeg") messages = [Message(role="user", contents=[image_content])] - run_options, _ = await chat_client._prepare_options(messages, {}) # type: ignore + run_options, _ = await client._prepare_options(messages, {}) # type: ignore assert "additional_messages" in run_options assert len(run_options["additional_messages"]) == 1 @@ -389,9 +389,9 @@ async def test_azure_ai_chat_client_prepare_options_with_image_content(mock_agen def test_azure_ai_chat_client_prepare_tool_outputs_for_azure_ai_none(mock_agents_client: MagicMock) -> None: """Test _prepare_tool_outputs_for_azure_ai with None input.""" - chat_client = create_test_azure_ai_chat_client(mock_agents_client) + client = create_test_azure_ai_chat_client(mock_agents_client) - run_id, tool_outputs, tool_approvals = chat_client._prepare_tool_outputs_for_azure_ai(None) # type: ignore + run_id, tool_outputs, tool_approvals = client._prepare_tool_outputs_for_azure_ai(None) # type: ignore assert run_id is None assert tool_outputs is None @@ -400,22 +400,22 @@ def test_azure_ai_chat_client_prepare_tool_outputs_for_azure_ai_none(mock_agents async def test_azure_ai_chat_client_close_client_when_should_close_true(mock_agents_client: MagicMock) -> None: """Test _close_client_if_needed closes agents_client when should_close_client is True.""" - chat_client = create_test_azure_ai_chat_client(mock_agents_client) - chat_client._should_close_client = True # type: ignore + client = create_test_azure_ai_chat_client(mock_agents_client) + client._should_close_client = True # type: ignore mock_agents_client.close = AsyncMock() - await chat_client._close_client_if_needed() # type: ignore + await client._close_client_if_needed() # type: ignore mock_agents_client.close.assert_called_once() async def test_azure_ai_chat_client_close_client_when_should_close_false(mock_agents_client: MagicMock) -> None: """Test _close_client_if_needed does not close agents_client when should_close_client is False.""" - chat_client = create_test_azure_ai_chat_client(mock_agents_client) - chat_client._should_close_client = False # type: ignore + client = create_test_azure_ai_chat_client(mock_agents_client) + client._should_close_client = False # type: ignore - await chat_client._close_client_if_needed() # type: ignore + await client._close_client_if_needed() # type: ignore mock_agents_client.close.assert_not_called() @@ -424,44 +424,44 @@ def test_azure_ai_chat_client_update_agent_name_and_description_when_current_is_ mock_agents_client: MagicMock, ) -> None: """Test _update_agent_name_and_description updates name when current agent_name is None.""" - chat_client = create_test_azure_ai_chat_client(mock_agents_client) - chat_client.agent_name = None # type: ignore + client = create_test_azure_ai_chat_client(mock_agents_client) + client.agent_name = None # type: ignore - chat_client._update_agent_name_and_description("NewAgentName", "description") # type: ignore + client._update_agent_name_and_description("NewAgentName", "description") # type: ignore - assert chat_client.agent_name == "NewAgentName" - assert chat_client.agent_description == "description" + assert client.agent_name == "NewAgentName" + assert client.agent_description == "description" def test_azure_ai_chat_client_update_agent_name_and_description_when_current_exists( mock_agents_client: MagicMock, ) -> None: """Test _update_agent_name_and_description does not update when current agent_name exists.""" - chat_client = create_test_azure_ai_chat_client(mock_agents_client) - chat_client.agent_name = "ExistingName" # type: ignore - chat_client.agent_description = "ExistingDescription" # type: ignore + client = create_test_azure_ai_chat_client(mock_agents_client) + client.agent_name = "ExistingName" # type: ignore + client.agent_description = "ExistingDescription" # type: ignore - chat_client._update_agent_name_and_description("NewAgentName", "description") # type: ignore + client._update_agent_name_and_description("NewAgentName", "description") # type: ignore - assert chat_client.agent_name == "ExistingName" - assert chat_client.agent_description == "ExistingDescription" + assert client.agent_name == "ExistingName" + assert client.agent_description == "ExistingDescription" def test_azure_ai_chat_client_update_agent_name_and_description_with_none_input(mock_agents_client: MagicMock) -> None: """Test _update_agent_name_and_description with None input.""" - chat_client = create_test_azure_ai_chat_client(mock_agents_client) - chat_client.agent_name = None # type: ignore - chat_client.agent_description = None # type: ignore + client = create_test_azure_ai_chat_client(mock_agents_client) + client.agent_name = None # type: ignore + client.agent_description = None # type: ignore - chat_client._update_agent_name_and_description(None, None) # type: ignore + client._update_agent_name_and_description(None, None) # type: ignore - assert chat_client.agent_name is None - assert chat_client.agent_description is None + assert client.agent_name is None + assert client.agent_description is None async def test_azure_ai_chat_client_prepare_options_with_messages(mock_agents_client: MagicMock) -> None: """Test _prepare_options with different message types.""" - chat_client = create_test_azure_ai_chat_client(mock_agents_client) + client = create_test_azure_ai_chat_client(mock_agents_client) # Test with system message (becomes instruction) messages = [ @@ -469,7 +469,7 @@ async def test_azure_ai_chat_client_prepare_options_with_messages(mock_agents_cl Message(role="user", text="Hello"), ] - run_options, _ = await chat_client._prepare_options(messages, {}) # type: ignore + run_options, _ = await client._prepare_options(messages, {}) # type: ignore assert "instructions" in run_options assert "You are a helpful assistant" in run_options["instructions"] @@ -485,7 +485,7 @@ async def test_azure_ai_chat_client_prepare_options_with_instructions_from_optio This verifies that agent instructions set via as_agent(instructions=...) are properly included in the API call. """ - chat_client = create_test_azure_ai_chat_client(mock_agents_client, agent_id="test-agent") + client = create_test_azure_ai_chat_client(mock_agents_client, agent_id="test-agent") mock_agents_client.get_agent = AsyncMock(return_value=None) messages = [Message(role="user", text="Hello")] @@ -493,7 +493,7 @@ async def test_azure_ai_chat_client_prepare_options_with_instructions_from_optio "instructions": "You are a thoughtful reviewer. Give brief feedback.", } - run_options, _ = await chat_client._prepare_options(messages, chat_options) # type: ignore + run_options, _ = await client._prepare_options(messages, chat_options) # type: ignore assert "instructions" in run_options assert "reviewer" in run_options["instructions"].lower() @@ -507,7 +507,7 @@ async def test_azure_ai_chat_client_prepare_options_merges_instructions_from_mes When instructions come from both system/developer messages AND from options, both should be included in the final instructions. """ - chat_client = create_test_azure_ai_chat_client(mock_agents_client, agent_id="test-agent") + client = create_test_azure_ai_chat_client(mock_agents_client, agent_id="test-agent") mock_agents_client.get_agent = AsyncMock(return_value=None) messages = [ @@ -518,7 +518,7 @@ async def test_azure_ai_chat_client_prepare_options_merges_instructions_from_mes "instructions": "Be concise and constructive in your feedback.", } - run_options, _ = await chat_client._prepare_options(messages, chat_options) # type: ignore + run_options, _ = await client._prepare_options(messages, chat_options) # type: ignore assert "instructions" in run_options instructions_text = run_options["instructions"] @@ -529,13 +529,13 @@ async def test_azure_ai_chat_client_prepare_options_merges_instructions_from_mes async def test_azure_ai_chat_client_inner_get_response(mock_agents_client: MagicMock) -> None: """Test _inner_get_response method.""" - chat_client = create_test_azure_ai_chat_client(mock_agents_client, agent_id="test-agent") + client = create_test_azure_ai_chat_client(mock_agents_client, agent_id="test-agent") async def mock_streaming_response(): yield ChatResponseUpdate(role="assistant", contents=[Content.from_text("Hello back")]) with ( - patch.object(chat_client, "_inner_get_response", return_value=mock_streaming_response()), + patch.object(client, "_inner_get_response", return_value=mock_streaming_response()), patch("agent_framework.ChatResponse.from_update_generator") as mock_from_generator, ): mock_response = ChatResponse(messages=[Message(role="assistant", text="Hello back")]) @@ -552,7 +552,7 @@ async def test_azure_ai_chat_client_get_agent_id_or_create_with_run_options( ) -> None: """Test _get_agent_id_or_create with run_options containing tools and instructions.""" azure_ai_settings = AzureAISettings(model_deployment_name=azure_ai_unit_test_env["AZURE_AI_MODEL_DEPLOYMENT_NAME"]) - chat_client = create_test_azure_ai_chat_client(mock_agents_client, azure_ai_settings=azure_ai_settings) + client = create_test_azure_ai_chat_client(mock_agents_client, azure_ai_settings=azure_ai_settings) run_options = { "tools": [{"type": "function", "function": {"name": "test_tool"}}], @@ -561,7 +561,7 @@ async def test_azure_ai_chat_client_get_agent_id_or_create_with_run_options( "model": azure_ai_settings.model_deployment_name, } - agent_id = await chat_client._get_agent_id_or_create(run_options) # type: ignore + agent_id = await client._get_agent_id_or_create(run_options) # type: ignore assert agent_id == "test-agent-id" # Verify create_agent was called with run_options parameters @@ -574,7 +574,7 @@ async def test_azure_ai_chat_client_get_agent_id_or_create_with_run_options( async def test_azure_ai_chat_client_prepare_thread_cancels_active_run(mock_agents_client: MagicMock) -> None: """Test _prepare_thread cancels active thread run when provided.""" - chat_client = create_test_azure_ai_chat_client(mock_agents_client, agent_id="test-agent") + client = create_test_azure_ai_chat_client(mock_agents_client, agent_id="test-agent") mock_thread_run = MagicMock() mock_thread_run.id = "run_123" @@ -582,7 +582,7 @@ async def test_azure_ai_chat_client_prepare_thread_cancels_active_run(mock_agent run_options = {"additional_messages": []} # type: ignore - result = await chat_client._prepare_thread("test-thread", mock_thread_run, run_options) # type: ignore + result = await client._prepare_thread("test-thread", mock_thread_run, run_options) # type: ignore assert result == "test-thread" mock_agents_client.runs.cancel.assert_called_once_with("test-thread", "run_123") @@ -590,7 +590,7 @@ async def test_azure_ai_chat_client_prepare_thread_cancels_active_run(mock_agent def test_azure_ai_chat_client_parse_function_calls_from_azure_ai_basic(mock_agents_client: MagicMock) -> None: """Test _parse_function_calls_from_azure_ai with basic function call.""" - chat_client = create_test_azure_ai_chat_client(mock_agents_client) + client = create_test_azure_ai_chat_client(mock_agents_client) mock_tool_call = MagicMock(spec=RequiredFunctionToolCall) mock_tool_call.id = "call_123" @@ -603,7 +603,7 @@ def test_azure_ai_chat_client_parse_function_calls_from_azure_ai_basic(mock_agen mock_event_data = MagicMock(spec=ThreadRun) mock_event_data.required_action = mock_submit_action - result = chat_client._parse_function_calls_from_azure_ai(mock_event_data, "response_123") # type: ignore + result = client._parse_function_calls_from_azure_ai(mock_event_data, "response_123") # type: ignore assert len(result) == 1 assert result[0].type == "function_call" @@ -615,12 +615,12 @@ def test_azure_ai_chat_client_parse_function_calls_from_azure_ai_no_submit_actio mock_agents_client: MagicMock, ) -> None: """Test _parse_function_calls_from_azure_ai when required_action is not SubmitToolOutputsAction.""" - chat_client = create_test_azure_ai_chat_client(mock_agents_client) + client = create_test_azure_ai_chat_client(mock_agents_client) mock_event_data = MagicMock(spec=ThreadRun) mock_event_data.required_action = MagicMock() - result = chat_client._parse_function_calls_from_azure_ai(mock_event_data, "response_123") # type: ignore + result = client._parse_function_calls_from_azure_ai(mock_event_data, "response_123") # type: ignore assert result == [] @@ -629,7 +629,7 @@ def test_azure_ai_chat_client_parse_function_calls_from_azure_ai_non_function_to mock_agents_client: MagicMock, ) -> None: """Test _parse_function_calls_from_azure_ai with non-function tool call.""" - chat_client = create_test_azure_ai_chat_client(mock_agents_client) + client = create_test_azure_ai_chat_client(mock_agents_client) mock_tool_call = MagicMock() @@ -639,7 +639,7 @@ def test_azure_ai_chat_client_parse_function_calls_from_azure_ai_non_function_to mock_event_data = MagicMock(spec=ThreadRun) mock_event_data.required_action = mock_submit_action - result = chat_client._parse_function_calls_from_azure_ai(mock_event_data, "response_123") # type: ignore + result = client._parse_function_calls_from_azure_ai(mock_event_data, "response_123") # type: ignore assert result == [] @@ -648,11 +648,11 @@ async def test_azure_ai_chat_client_prepare_options_with_none_tool_choice( mock_agents_client: MagicMock, ) -> None: """Test _prepare_options with tool_choice set to 'none'.""" - chat_client = create_test_azure_ai_chat_client(mock_agents_client) + client = create_test_azure_ai_chat_client(mock_agents_client) chat_options: ChatOptions = {"tool_choice": "none"} - run_options, _ = await chat_client._prepare_options([], chat_options) # type: ignore + run_options, _ = await client._prepare_options([], chat_options) # type: ignore assert run_options["tool_choice"] == AgentsToolChoiceOptionMode.NONE @@ -661,11 +661,11 @@ async def test_azure_ai_chat_client_prepare_options_with_auto_tool_choice( mock_agents_client: MagicMock, ) -> None: """Test _prepare_options with tool_choice set to 'auto'.""" - chat_client = create_test_azure_ai_chat_client(mock_agents_client) + client = create_test_azure_ai_chat_client(mock_agents_client) chat_options = {"tool_choice": "auto"} - run_options, _ = await chat_client._prepare_options([], chat_options) # type: ignore + run_options, _ = await client._prepare_options([], chat_options) # type: ignore assert run_options["tool_choice"] == AgentsToolChoiceOptionMode.AUTO @@ -674,7 +674,7 @@ async def test_azure_ai_chat_client_prepare_options_tool_choice_required_specifi mock_agents_client: MagicMock, ) -> None: """Test _prepare_options with required tool_choice specifying a specific function name.""" - chat_client = create_test_azure_ai_chat_client(mock_agents_client) + client = create_test_azure_ai_chat_client(mock_agents_client) required_tool_mode = {"mode": "required", "required_function_name": "specific_function_name"} @@ -683,7 +683,7 @@ async def test_azure_ai_chat_client_prepare_options_tool_choice_required_specifi chat_options = {"tools": [dict_tool], "tool_choice": required_tool_mode} messages = [Message(role="user", text="Hello")] - run_options, _ = await chat_client._prepare_options(messages, chat_options) # type: ignore + run_options, _ = await client._prepare_options(messages, chat_options) # type: ignore # Verify tool_choice is set to the specific named function assert "tool_choice" in run_options @@ -697,14 +697,14 @@ async def test_azure_ai_chat_client_prepare_options_with_response_format( mock_agents_client: MagicMock, ) -> None: """Test _prepare_options with response_format configured.""" - chat_client = create_test_azure_ai_chat_client(mock_agents_client) + client = create_test_azure_ai_chat_client(mock_agents_client) class TestResponseModel(BaseModel): name: str = Field(description="Test name") chat_options: ChatOptions = {"response_format": TestResponseModel} - run_options, _ = await chat_client._prepare_options([], chat_options) # type: ignore + run_options, _ = await client._prepare_options([], chat_options) # type: ignore assert "response_format" in run_options response_format = run_options["response_format"] @@ -714,15 +714,15 @@ class TestResponseModel(BaseModel): def test_azure_ai_chat_client_service_url_method(mock_agents_client: MagicMock) -> None: """Test service_url method returns endpoint.""" mock_agents_client._config.endpoint = "https://test-endpoint.com/" - chat_client = create_test_azure_ai_chat_client(mock_agents_client) + client = create_test_azure_ai_chat_client(mock_agents_client) - url = chat_client.service_url() + url = client.service_url() assert url == "https://test-endpoint.com/" async def test_azure_ai_chat_client_prepare_options_mcp_never_require(mock_agents_client: MagicMock) -> None: """Test _prepare_options with HostedMCPTool having never_require approval mode.""" - chat_client = create_test_azure_ai_chat_client(mock_agents_client) + client = create_test_azure_ai_chat_client(mock_agents_client) mcp_tool = HostedMCPTool(name="Test MCP Tool", url="https://example.com/mcp", approval_mode="never_require") @@ -734,7 +734,7 @@ async def test_azure_ai_chat_client_prepare_options_mcp_never_require(mock_agent mock_mcp_tool_instance.definitions = [{"type": "mcp", "name": "test_mcp"}] mock_mcp_tool_class.return_value = mock_mcp_tool_instance - run_options, _ = await chat_client._prepare_options(messages, chat_options) # type: ignore + run_options, _ = await client._prepare_options(messages, chat_options) # type: ignore # Verify tool_resources is created with correct MCP approval structure assert "tool_resources" in run_options, ( @@ -750,7 +750,7 @@ async def test_azure_ai_chat_client_prepare_options_mcp_never_require(mock_agent async def test_azure_ai_chat_client_prepare_options_mcp_with_headers(mock_agents_client: MagicMock) -> None: """Test _prepare_options with HostedMCPTool having headers.""" - chat_client = create_test_azure_ai_chat_client(mock_agents_client) + client = create_test_azure_ai_chat_client(mock_agents_client) # Test with headers headers = {"Authorization": "Bearer DUMMY_TOKEN", "X-API-Key": "DUMMY_KEY"} @@ -766,7 +766,7 @@ async def test_azure_ai_chat_client_prepare_options_mcp_with_headers(mock_agents mock_mcp_tool_instance.definitions = [{"type": "mcp", "name": "test_mcp"}] mock_mcp_tool_class.return_value = mock_mcp_tool_instance - run_options, _ = await chat_client._prepare_options(messages, chat_options) # type: ignore + run_options, _ = await client._prepare_options(messages, chat_options) # type: ignore # Verify tool_resources is created with headers assert "tool_resources" in run_options @@ -784,7 +784,7 @@ async def test_azure_ai_chat_client_prepare_tools_for_azure_ai_web_search_bing_g ) -> None: """Test _prepare_tools_for_azure_ai with HostedWebSearchTool using Bing Grounding.""" - chat_client = create_test_azure_ai_chat_client(mock_agents_client, agent_id="test-agent") + client = create_test_azure_ai_chat_client(mock_agents_client, agent_id="test-agent") web_search_tool = HostedWebSearchTool( additional_properties={ @@ -802,7 +802,7 @@ async def test_azure_ai_chat_client_prepare_tools_for_azure_ai_web_search_bing_g mock_bing_tool.definitions = [{"type": "bing_grounding"}] mock_bing_grounding.return_value = mock_bing_tool - result = await chat_client._prepare_tools_for_azure_ai([web_search_tool]) # type: ignore + result = await client._prepare_tools_for_azure_ai([web_search_tool]) # type: ignore assert len(result) == 1 assert result[0] == {"type": "bing_grounding"} @@ -819,7 +819,7 @@ async def test_azure_ai_chat_client_prepare_tools_for_azure_ai_web_search_bing_g ) -> None: """Test _prepare_tools_... with HostedWebSearchTool using Bing Grounding with connection_id (no HTTP call).""" - chat_client = create_test_azure_ai_chat_client(mock_agents_client, agent_id="test-agent") + client = create_test_azure_ai_chat_client(mock_agents_client, agent_id="test-agent") web_search_tool = HostedWebSearchTool( additional_properties={ @@ -834,7 +834,7 @@ async def test_azure_ai_chat_client_prepare_tools_for_azure_ai_web_search_bing_g mock_bing_tool.definitions = [{"type": "bing_grounding"}] mock_bing_grounding.return_value = mock_bing_tool - result = await chat_client._prepare_tools_for_azure_ai([web_search_tool]) # type: ignore + result = await client._prepare_tools_for_azure_ai([web_search_tool]) # type: ignore assert len(result) == 1 assert result[0] == {"type": "bing_grounding"} @@ -846,7 +846,7 @@ async def test_azure_ai_chat_client_prepare_tools_for_azure_ai_web_search_custom ) -> None: """Test _prepare_tools_for_azure_ai with HostedWebSearchTool using Custom Bing Search.""" - chat_client = create_test_azure_ai_chat_client(mock_agents_client, agent_id="test-agent") + client = create_test_azure_ai_chat_client(mock_agents_client, agent_id="test-agent") web_search_tool = HostedWebSearchTool( additional_properties={ @@ -862,7 +862,7 @@ async def test_azure_ai_chat_client_prepare_tools_for_azure_ai_web_search_custom mock_custom_tool.definitions = [{"type": "bing_custom_search"}] mock_custom_bing.return_value = mock_custom_tool - result = await chat_client._prepare_tools_for_azure_ai([web_search_tool]) # type: ignore + result = await client._prepare_tools_for_azure_ai([web_search_tool]) # type: ignore assert len(result) == 1 assert result[0] == {"type": "bing_custom_search"} @@ -873,7 +873,7 @@ async def test_azure_ai_chat_client_prepare_tools_for_azure_ai_file_search_with_ ) -> None: """Test _prepare_tools_for_azure_ai with HostedFileSearchTool using vector stores.""" - chat_client = create_test_azure_ai_chat_client(mock_agents_client, agent_id="test-agent") + client = create_test_azure_ai_chat_client(mock_agents_client, agent_id="test-agent") vector_store_input = Content.from_hosted_vector_store(vector_store_id="vs-123") file_search_tool = HostedFileSearchTool(inputs=[vector_store_input]) @@ -886,7 +886,7 @@ async def test_azure_ai_chat_client_prepare_tools_for_azure_ai_file_search_with_ mock_file_search.return_value = mock_file_tool run_options = {} - result = await chat_client._prepare_tools_for_azure_ai([file_search_tool], run_options) # type: ignore + result = await client._prepare_tools_for_azure_ai([file_search_tool], run_options) # type: ignore assert len(result) == 1 assert result[0] == {"type": "file_search"} @@ -898,13 +898,13 @@ async def test_azure_ai_chat_client_create_agent_stream_submit_tool_approvals( mock_agents_client: MagicMock, ) -> None: """Test _create_agent_stream with tool approvals submission path.""" - chat_client = create_test_azure_ai_chat_client(mock_agents_client, agent_id="test-agent") + client = create_test_azure_ai_chat_client(mock_agents_client, agent_id="test-agent") # Mock active thread run that matches the tool run ID mock_thread_run = MagicMock() mock_thread_run.thread_id = "test-thread" mock_thread_run.id = "test-run-id" - chat_client._get_active_thread_run = AsyncMock(return_value=mock_thread_run) # type: ignore + client._get_active_thread_run = AsyncMock(return_value=mock_thread_run) # type: ignore # Mock required action results with approval response that matches run ID approval_response = Content.from_function_approval_response( @@ -920,7 +920,7 @@ async def test_azure_ai_chat_client_create_agent_stream_submit_tool_approvals( mock_agents_client.runs.submit_tool_outputs_stream = AsyncMock() with patch("azure.ai.agents.models.AsyncAgentEventHandler", return_value=mock_handler): - stream, final_thread_id = await chat_client._create_agent_stream( # type: ignore + stream, final_thread_id = await client._create_agent_stream( # type: ignore "test-agent", {"thread_id": "test-thread"}, [approval_response] ) @@ -938,7 +938,7 @@ async def test_azure_ai_chat_client_create_agent_stream_submit_tool_approvals( async def test_azure_ai_chat_client_get_active_thread_run_with_active_run(mock_agents_client: MagicMock) -> None: """Test _get_active_thread_run when there's an active run.""" - chat_client = create_test_azure_ai_chat_client(mock_agents_client, agent_id="test-agent") + client = create_test_azure_ai_chat_client(mock_agents_client, agent_id="test-agent") # Mock an active run mock_run = MagicMock() @@ -949,7 +949,7 @@ async def mock_list_runs(*args, **kwargs): # type: ignore mock_agents_client.runs.list = mock_list_runs - result = await chat_client._get_active_thread_run("thread-123") # type: ignore + result = await client._get_active_thread_run("thread-123") # type: ignore assert result == mock_run @@ -957,7 +957,7 @@ async def mock_list_runs(*args, **kwargs): # type: ignore async def test_azure_ai_chat_client_get_active_thread_run_no_active_run(mock_agents_client: MagicMock) -> None: """Test _get_active_thread_run when there's no active run.""" - chat_client = create_test_azure_ai_chat_client(mock_agents_client, agent_id="test-agent") + client = create_test_azure_ai_chat_client(mock_agents_client, agent_id="test-agent") # Mock a completed run (not active) mock_run = MagicMock() @@ -968,16 +968,16 @@ async def mock_list_runs(*args, **kwargs): # type: ignore mock_agents_client.runs.list = mock_list_runs - result = await chat_client._get_active_thread_run("thread-123") # type: ignore + result = await client._get_active_thread_run("thread-123") # type: ignore assert result is None async def test_azure_ai_chat_client_get_active_thread_run_no_thread(mock_agents_client: MagicMock) -> None: """Test _get_active_thread_run with None thread_id.""" - chat_client = create_test_azure_ai_chat_client(mock_agents_client, agent_id="test-agent") + client = create_test_azure_ai_chat_client(mock_agents_client, agent_id="test-agent") - result = await chat_client._get_active_thread_run(None) # type: ignore + result = await client._get_active_thread_run(None) # type: ignore assert result is None # Should not call list since thread_id is None @@ -986,14 +986,14 @@ async def test_azure_ai_chat_client_get_active_thread_run_no_thread(mock_agents_ async def test_azure_ai_chat_client_service_url(mock_agents_client: MagicMock) -> None: """Test service_url method.""" - chat_client = create_test_azure_ai_chat_client(mock_agents_client, agent_id="test-agent") + client = create_test_azure_ai_chat_client(mock_agents_client, agent_id="test-agent") # Mock the config endpoint mock_config = MagicMock() mock_config.endpoint = "https://test-endpoint.com/" mock_agents_client._config = mock_config - result = chat_client.service_url() + result = client.service_url() assert result == "https://test-endpoint.com/" @@ -1002,12 +1002,12 @@ async def test_azure_ai_chat_client_prepare_tool_outputs_for_azure_tool_result( mock_agents_client: MagicMock, ) -> None: """Test _prepare_tool_outputs_for_azure_ai with FunctionResultContent.""" - chat_client = create_test_azure_ai_chat_client(mock_agents_client, agent_id="test-agent") + client = create_test_azure_ai_chat_client(mock_agents_client, agent_id="test-agent") # Test with simple result function_result = Content.from_function_result(call_id='["run_123", "call_456"]', result="Simple result") - run_id, tool_outputs, tool_approvals = chat_client._prepare_tool_outputs_for_azure_ai([function_result]) # type: ignore + run_id, tool_outputs, tool_approvals = client._prepare_tool_outputs_for_azure_ai([function_result]) # type: ignore assert run_id == "run_123" assert tool_approvals is None @@ -1020,25 +1020,25 @@ async def test_azure_ai_chat_client_prepare_tool_outputs_for_azure_tool_result( async def test_azure_ai_chat_client_convert_required_action_invalid_call_id(mock_agents_client: MagicMock) -> None: """Test _prepare_tool_outputs_for_azure_ai with invalid call_id format.""" - chat_client = create_test_azure_ai_chat_client(mock_agents_client, agent_id="test-agent") + client = create_test_azure_ai_chat_client(mock_agents_client, agent_id="test-agent") # Invalid call_id format - should raise JSONDecodeError function_result = Content.from_function_result(call_id="invalid_json", result="result") with pytest.raises(json.JSONDecodeError): - chat_client._prepare_tool_outputs_for_azure_ai([function_result]) # type: ignore + client._prepare_tool_outputs_for_azure_ai([function_result]) # type: ignore async def test_azure_ai_chat_client_convert_required_action_invalid_structure( mock_agents_client: MagicMock, ) -> None: """Test _prepare_tool_outputs_for_azure_ai with invalid call_id structure.""" - chat_client = create_test_azure_ai_chat_client(mock_agents_client, agent_id="test-agent") + client = create_test_azure_ai_chat_client(mock_agents_client, agent_id="test-agent") # Valid JSON but invalid structure (missing second element) function_result = Content.from_function_result(call_id='["run_123"]', result="result") - run_id, tool_outputs, tool_approvals = chat_client._prepare_tool_outputs_for_azure_ai([function_result]) # type: ignore + run_id, tool_outputs, tool_approvals = client._prepare_tool_outputs_for_azure_ai([function_result]) # type: ignore # Should return None values when structure is invalid assert run_id is None @@ -1056,13 +1056,13 @@ def __init__(self, name: str, value: int): self.name = name self.value = value - chat_client = create_test_azure_ai_chat_client(mock_agents_client, agent_id="test-agent") + client = create_test_azure_ai_chat_client(mock_agents_client, agent_id="test-agent") # Test with BaseModel result mock_result = MockResult(name="test", value=42) function_result = Content.from_function_result(call_id='["run_123", "call_456"]', result=mock_result) - run_id, tool_outputs, tool_approvals = chat_client._prepare_tool_outputs_for_azure_ai([function_result]) # type: ignore + run_id, tool_outputs, tool_approvals = client._prepare_tool_outputs_for_azure_ai([function_result]) # type: ignore assert run_id == "run_123" assert tool_approvals is None @@ -1083,14 +1083,14 @@ class MockResult(SerializationMixin): def __init__(self, data: str): self.data = data - chat_client = create_test_azure_ai_chat_client(mock_agents_client, agent_id="test-agent") + client = create_test_azure_ai_chat_client(mock_agents_client, agent_id="test-agent") # Test with multiple results - mix of BaseModel and regular objects mock_basemodel = MockResult(data="model_data") results_list = [mock_basemodel, {"key": "value"}, "string_result"] function_result = Content.from_function_result(call_id='["run_123", "call_456"]', result=results_list) - run_id, tool_outputs, tool_approvals = chat_client._prepare_tool_outputs_for_azure_ai([function_result]) # type: ignore + run_id, tool_outputs, tool_approvals = client._prepare_tool_outputs_for_azure_ai([function_result]) # type: ignore assert run_id == "run_123" assert tool_outputs is not None @@ -1111,7 +1111,7 @@ async def test_azure_ai_chat_client_convert_required_action_approval_response( mock_agents_client: MagicMock, ) -> None: """Test _prepare_tool_outputs_for_azure_ai with FunctionApprovalResponseContent.""" - chat_client = create_test_azure_ai_chat_client(mock_agents_client, agent_id="test-agent") + client = create_test_azure_ai_chat_client(mock_agents_client, agent_id="test-agent") # Test with approval response - need to provide required fields approval_response = Content.from_function_approval_response( @@ -1122,7 +1122,7 @@ async def test_azure_ai_chat_client_convert_required_action_approval_response( approved=True, ) - run_id, tool_outputs, tool_approvals = chat_client._prepare_tool_outputs_for_azure_ai([approval_response]) # type: ignore + run_id, tool_outputs, tool_approvals = client._prepare_tool_outputs_for_azure_ai([approval_response]) # type: ignore assert run_id == "run_123" assert tool_outputs is None @@ -1136,7 +1136,7 @@ async def test_azure_ai_chat_client_parse_function_calls_from_azure_ai_approval_ mock_agents_client: MagicMock, ) -> None: """Test _parse_function_calls_from_azure_ai with approval action.""" - chat_client = create_test_azure_ai_chat_client(mock_agents_client, agent_id="test-agent") + client = create_test_azure_ai_chat_client(mock_agents_client, agent_id="test-agent") # Mock SubmitToolApprovalAction with RequiredMcpToolCall mock_tool_call = MagicMock(spec=RequiredMcpToolCall) @@ -1150,7 +1150,7 @@ async def test_azure_ai_chat_client_parse_function_calls_from_azure_ai_approval_ mock_event_data = MagicMock(spec=ThreadRun) mock_event_data.required_action = mock_approval_action - result = chat_client._parse_function_calls_from_azure_ai(mock_event_data, "response_123") # type: ignore + result = client._parse_function_calls_from_azure_ai(mock_event_data, "response_123") # type: ignore assert len(result) == 1 assert result[0].type == "function_approval_request" @@ -1164,12 +1164,12 @@ async def test_azure_ai_chat_client_get_agent_id_or_create_with_agent_name( ) -> None: """Test _get_agent_id_or_create uses default name when no agent_name set.""" azure_ai_settings = AzureAISettings(model_deployment_name=azure_ai_unit_test_env["AZURE_AI_MODEL_DEPLOYMENT_NAME"]) - chat_client = create_test_azure_ai_chat_client(mock_agents_client, azure_ai_settings=azure_ai_settings) + client = create_test_azure_ai_chat_client(mock_agents_client, azure_ai_settings=azure_ai_settings) # Ensure agent_name is None to test the default - chat_client.agent_name = None # type: ignore + client.agent_name = None # type: ignore - agent_id = await chat_client._get_agent_id_or_create(run_options={"model": azure_ai_settings.model_deployment_name}) # type: ignore + agent_id = await client._get_agent_id_or_create(run_options={"model": azure_ai_settings.model_deployment_name}) # type: ignore assert agent_id == "test-agent-id" # Verify create_agent was called with default "UnnamedAgent" @@ -1183,12 +1183,12 @@ async def test_azure_ai_chat_client_get_agent_id_or_create_with_response_format( ) -> None: """Test _get_agent_id_or_create with response_format in run_options.""" azure_ai_settings = AzureAISettings(model_deployment_name=azure_ai_unit_test_env["AZURE_AI_MODEL_DEPLOYMENT_NAME"]) - chat_client = create_test_azure_ai_chat_client(mock_agents_client, azure_ai_settings=azure_ai_settings) + client = create_test_azure_ai_chat_client(mock_agents_client, azure_ai_settings=azure_ai_settings) # Test with response_format in run_options run_options = {"response_format": {"type": "json_object"}, "model": azure_ai_settings.model_deployment_name} - agent_id = await chat_client._get_agent_id_or_create(run_options) # type: ignore + agent_id = await client._get_agent_id_or_create(run_options) # type: ignore assert agent_id == "test-agent-id" # Verify create_agent was called with response_format @@ -1202,7 +1202,7 @@ async def test_azure_ai_chat_client_get_agent_id_or_create_with_tool_resources( ) -> None: """Test _get_agent_id_or_create with tool_resources in run_options.""" azure_ai_settings = AzureAISettings(model_deployment_name=azure_ai_unit_test_env["AZURE_AI_MODEL_DEPLOYMENT_NAME"]) - chat_client = create_test_azure_ai_chat_client(mock_agents_client, azure_ai_settings=azure_ai_settings) + client = create_test_azure_ai_chat_client(mock_agents_client, azure_ai_settings=azure_ai_settings) # Test with tool_resources in run_options run_options = { @@ -1210,7 +1210,7 @@ async def test_azure_ai_chat_client_get_agent_id_or_create_with_tool_resources( "model": azure_ai_settings.model_deployment_name, } - agent_id = await chat_client._get_agent_id_or_create(run_options) # type: ignore + agent_id = await client._get_agent_id_or_create(run_options) # type: ignore assert agent_id == "test-agent-id" # Verify create_agent was called with tool_resources @@ -1223,13 +1223,13 @@ async def test_azure_ai_chat_client_create_agent_stream_submit_tool_outputs( mock_agents_client: MagicMock, ) -> None: """Test _create_agent_stream with tool outputs submission path.""" - chat_client = create_test_azure_ai_chat_client(mock_agents_client, agent_id="test-agent") + client = create_test_azure_ai_chat_client(mock_agents_client, agent_id="test-agent") # Mock active thread run that matches the tool run ID mock_thread_run = MagicMock() mock_thread_run.thread_id = "test-thread" mock_thread_run.id = "test-run-id" - chat_client._get_active_thread_run = AsyncMock(return_value=mock_thread_run) # type: ignore + client._get_active_thread_run = AsyncMock(return_value=mock_thread_run) # type: ignore # Mock required action results with matching run ID function_result = Content.from_function_result(call_id='["test-run-id", "test-call-id"]', result="test result") @@ -1239,7 +1239,7 @@ async def test_azure_ai_chat_client_create_agent_stream_submit_tool_outputs( mock_agents_client.runs.submit_tool_outputs_stream = AsyncMock() with patch("azure.ai.agents.models.AsyncAgentEventHandler", return_value=mock_handler): - stream, final_thread_id = await chat_client._create_agent_stream( # type: ignore + stream, final_thread_id = await client._create_agent_stream( # type: ignore agent_id="test-agent", run_options={"thread_id": "test-thread"}, required_action_results=[function_result] ) @@ -1250,7 +1250,7 @@ async def test_azure_ai_chat_client_create_agent_stream_submit_tool_outputs( def test_azure_ai_chat_client_extract_url_citations_with_citations(mock_agents_client: MagicMock) -> None: """Test _extract_url_citations with MessageDeltaChunk containing URL citations.""" - chat_client = create_test_azure_ai_chat_client(mock_agents_client, agent_id="test-agent") + client = create_test_azure_ai_chat_client(mock_agents_client, agent_id="test-agent") # Create mock URL citation annotation mock_url_citation = MagicMock() @@ -1278,7 +1278,7 @@ def test_azure_ai_chat_client_extract_url_citations_with_citations(mock_agents_c mock_chunk.delta = mock_delta # Call the method with empty azure_search_tool_calls - citations = chat_client._extract_url_citations(mock_chunk, []) # type: ignore + citations = client._extract_url_citations(mock_chunk, []) # type: ignore # Verify results assert len(citations) == 1 @@ -1296,7 +1296,7 @@ def test_azure_ai_chat_client_extract_file_path_contents_with_file_path_annotati mock_agents_client: MagicMock, ) -> None: """Test _extract_file_path_contents with MessageDeltaChunk containing file path annotation.""" - chat_client = create_test_azure_ai_chat_client(mock_agents_client, agent_id="test-agent") + client = create_test_azure_ai_chat_client(mock_agents_client, agent_id="test-agent") # Create mock file_path annotation mock_file_path = MagicMock() @@ -1321,7 +1321,7 @@ def test_azure_ai_chat_client_extract_file_path_contents_with_file_path_annotati mock_chunk.delta = mock_delta # Call the method - file_contents = chat_client._extract_file_path_contents(mock_chunk) + file_contents = client._extract_file_path_contents(mock_chunk) # Verify results assert len(file_contents) == 1 @@ -1333,7 +1333,7 @@ def test_azure_ai_chat_client_extract_file_path_contents_with_file_citation_anno mock_agents_client: MagicMock, ) -> None: """Test _extract_file_path_contents with MessageDeltaChunk containing file citation annotation.""" - chat_client = create_test_azure_ai_chat_client(mock_agents_client, agent_id="test-agent") + client = create_test_azure_ai_chat_client(mock_agents_client, agent_id="test-agent") # Create mock file_citation annotation mock_file_citation = MagicMock() @@ -1358,7 +1358,7 @@ def test_azure_ai_chat_client_extract_file_path_contents_with_file_citation_anno mock_chunk.delta = mock_delta # Call the method - file_contents = chat_client._extract_file_path_contents(mock_chunk) + file_contents = client._extract_file_path_contents(mock_chunk) # Verify results assert len(file_contents) == 1 @@ -1370,7 +1370,7 @@ def test_azure_ai_chat_client_extract_file_path_contents_empty_annotations( mock_agents_client: MagicMock, ) -> None: """Test _extract_file_path_contents with no annotations returns empty list.""" - chat_client = create_test_azure_ai_chat_client(mock_agents_client, agent_id="test-agent") + client = create_test_azure_ai_chat_client(mock_agents_client, agent_id="test-agent") # Create mock text content with no annotations mock_text = MagicMock() @@ -1388,7 +1388,7 @@ def test_azure_ai_chat_client_extract_file_path_contents_empty_annotations( mock_chunk.delta = mock_delta # Call the method - file_contents = chat_client._extract_file_path_contents(mock_chunk) + file_contents = client._extract_file_path_contents(mock_chunk) # Verify results assert len(file_contents) == 0 @@ -1511,7 +1511,7 @@ async def test_azure_ai_chat_client_streaming_tools() -> None: async def test_azure_ai_chat_client_agent_basic_run() -> None: """Test Agent basic run functionality with AzureAIAgentClient.""" async with Agent( - chat_client=AzureAIAgentClient(credential=AzureCliCredential()), + client=AzureAIAgentClient(credential=AzureCliCredential()), ) as agent: # Run a simple query response = await agent.run("Hello! Please respond with 'Hello World' exactly.") @@ -1528,7 +1528,7 @@ async def test_azure_ai_chat_client_agent_basic_run() -> None: async def test_azure_ai_chat_client_agent_basic_run_streaming() -> None: """Test Agent basic streaming functionality with AzureAIAgentClient.""" async with Agent( - chat_client=AzureAIAgentClient(credential=AzureCliCredential()), + client=AzureAIAgentClient(credential=AzureCliCredential()), ) as agent: # Run streaming query full_message: str = "" @@ -1548,7 +1548,7 @@ async def test_azure_ai_chat_client_agent_basic_run_streaming() -> None: async def test_azure_ai_chat_client_agent_thread_persistence() -> None: """Test Agent thread persistence across runs with AzureAIAgentClient.""" async with Agent( - chat_client=AzureAIAgentClient(credential=AzureCliCredential()), + client=AzureAIAgentClient(credential=AzureCliCredential()), instructions="You are a helpful assistant with good memory.", ) as agent: # Create a new thread that will be reused @@ -1574,7 +1574,7 @@ async def test_azure_ai_chat_client_agent_thread_persistence() -> None: async def test_azure_ai_chat_client_agent_existing_thread_id() -> None: """Test Agent existing thread ID functionality with AzureAIAgentClient.""" async with Agent( - chat_client=AzureAIAgentClient(credential=AzureCliCredential()), + client=AzureAIAgentClient(credential=AzureCliCredential()), instructions="You are a helpful assistant with good memory.", ) as first_agent: # Start a conversation and get the thread ID @@ -1591,7 +1591,7 @@ async def test_azure_ai_chat_client_agent_existing_thread_id() -> None: # Now continue with the same thread ID in a new agent instance async with Agent( - chat_client=AzureAIAgentClient(thread_id=existing_thread_id, credential=AzureCliCredential()), + client=AzureAIAgentClient(thread_id=existing_thread_id, credential=AzureCliCredential()), instructions="You are a helpful assistant with good memory.", ) as second_agent: # Create a thread with the existing ID @@ -1613,7 +1613,7 @@ async def test_azure_ai_chat_client_agent_code_interpreter(): """Test Agent with code interpreter through AzureAIAgentClient.""" async with Agent( - chat_client=AzureAIAgentClient(credential=AzureCliCredential()), + client=AzureAIAgentClient(credential=AzureCliCredential()), instructions="You are a helpful assistant that can write and execute Python code.", tools=[HostedCodeInterpreterTool()], ) as agent: @@ -1650,7 +1650,7 @@ async def test_azure_ai_chat_client_agent_file_search(): ) async with Agent( - chat_client=client, + client=client, instructions="You are a helpful assistant that can search through uploaded employee files.", tools=[file_search_tool], ) as agent: @@ -1689,7 +1689,7 @@ async def test_azure_ai_chat_client_agent_hosted_mcp_tool() -> None: ) async with Agent( - chat_client=AzureAIAgentClient(credential=AzureCliCredential()), + client=AzureAIAgentClient(credential=AzureCliCredential()), instructions="You are a helpful assistant that can help with microsoft documentation questions.", tools=[mcp_tool], ) as agent: @@ -1716,7 +1716,7 @@ async def test_azure_ai_chat_client_agent_hosted_mcp_tool() -> None: async def test_azure_ai_chat_client_agent_level_tool_persistence(): """Test that agent-level tools persist across multiple runs with AzureAIAgentClient.""" async with Agent( - chat_client=AzureAIAgentClient(credential=AzureCliCredential()), + client=AzureAIAgentClient(credential=AzureCliCredential()), instructions="You are a helpful assistant that uses available tools.", tools=[get_weather], ) as agent: @@ -1741,7 +1741,7 @@ async def test_azure_ai_chat_client_agent_level_tool_persistence(): async def test_azure_ai_chat_client_agent_chat_options_run_level() -> None: """Test ChatOptions parameter coverage at run level.""" async with Agent( - chat_client=AzureAIAgentClient(credential=AzureCliCredential()), + client=AzureAIAgentClient(credential=AzureCliCredential()), instructions="You are a helpful assistant.", ) as agent: response = await agent.run( @@ -1765,7 +1765,7 @@ async def test_azure_ai_chat_client_agent_chat_options_run_level() -> None: async def test_azure_ai_chat_client_agent_chat_options_agent_level() -> None: """Test ChatOptions parameter coverage agent level.""" async with Agent( - chat_client=AzureAIAgentClient(credential=AzureCliCredential()), + client=AzureAIAgentClient(credential=AzureCliCredential()), instructions="You are a helpful assistant.", tools=[get_weather], default_options={ @@ -1789,59 +1789,59 @@ async def test_azure_ai_chat_client_cleanup_agent_when_enabled_and_created( mock_agents_client: MagicMock, ) -> None: """Test that agent is cleaned up when should_cleanup_agent=True and agent was created by client.""" - chat_client = create_test_azure_ai_chat_client(mock_agents_client, agent_id=None, should_cleanup_agent=True) + client = create_test_azure_ai_chat_client(mock_agents_client, agent_id=None, should_cleanup_agent=True) # Simulate agent creation - chat_client.agent_id = "created-agent-id" - chat_client._agent_created = True # type: ignore + client.agent_id = "created-agent-id" + client._agent_created = True # type: ignore - await chat_client._cleanup_agent_if_needed() # type: ignore + await client._cleanup_agent_if_needed() # type: ignore # Verify agent was deleted mock_agents_client.delete_agent.assert_called_once_with("created-agent-id") - assert chat_client.agent_id is None - assert chat_client._agent_created is False # type: ignore + assert client.agent_id is None + assert client._agent_created is False # type: ignore async def test_azure_ai_chat_client_no_cleanup_when_disabled( mock_agents_client: MagicMock, ) -> None: """Test that agent is not cleaned up when should_cleanup_agent=False.""" - chat_client = create_test_azure_ai_chat_client(mock_agents_client, agent_id=None, should_cleanup_agent=False) + client = create_test_azure_ai_chat_client(mock_agents_client, agent_id=None, should_cleanup_agent=False) # Simulate agent creation - chat_client.agent_id = "created-agent-id" - chat_client._agent_created = True + client.agent_id = "created-agent-id" + client._agent_created = True - await chat_client._cleanup_agent_if_needed() # type: ignore + await client._cleanup_agent_if_needed() # type: ignore # Verify agent was NOT deleted mock_agents_client.delete_agent.assert_not_called() - assert chat_client.agent_id == "created-agent-id" - assert chat_client._agent_created is True + assert client.agent_id == "created-agent-id" + assert client._agent_created is True async def test_azure_ai_chat_client_no_cleanup_when_agent_not_created_by_client( mock_agents_client: MagicMock, ) -> None: """Test that agent is not cleaned up when it was not created by this client instance.""" - chat_client = create_test_azure_ai_chat_client( + client = create_test_azure_ai_chat_client( mock_agents_client, agent_id="existing-agent-id", should_cleanup_agent=True ) # Agent exists but was not created by this client (_agent_created = False) - assert chat_client._agent_created is False # type: ignore + assert client._agent_created is False # type: ignore - await chat_client._cleanup_agent_if_needed() # type: ignore + await client._cleanup_agent_if_needed() # type: ignore # Verify agent was NOT deleted mock_agents_client.delete_agent.assert_not_called() - assert chat_client.agent_id == "existing-agent-id" + assert client.agent_id == "existing-agent-id" def test_azure_ai_chat_client_capture_azure_search_tool_calls(mock_agents_client: MagicMock) -> None: """Test _capture_azure_search_tool_calls method.""" - chat_client = create_test_azure_ai_chat_client(mock_agents_client) + client = create_test_azure_ai_chat_client(mock_agents_client) # Mock Azure AI Search tool call mock_tool_call = MagicMock() @@ -1855,7 +1855,7 @@ def test_azure_ai_chat_client_capture_azure_search_tool_calls(mock_agents_client # Call the method with a list to capture tool calls azure_search_tool_calls: list[dict[str, Any]] = [] - chat_client._capture_azure_search_tool_calls(mock_step_data, azure_search_tool_calls) # type: ignore + client._capture_azure_search_tool_calls(mock_step_data, azure_search_tool_calls) # type: ignore # Verify tool call was captured assert len(azure_search_tool_calls) == 1 @@ -1869,10 +1869,10 @@ def test_azure_ai_chat_client_get_real_url_from_citation_reference_no_tool_calls mock_agents_client: MagicMock, ) -> None: """Test _get_real_url_from_citation_reference with no tool calls.""" - chat_client = create_test_azure_ai_chat_client(mock_agents_client) + client = create_test_azure_ai_chat_client(mock_agents_client) # No tool calls - pass empty list - result = chat_client._get_real_url_from_citation_reference("doc_1", []) # type: ignore + result = client._get_real_url_from_citation_reference("doc_1", []) # type: ignore assert result == "doc_1" @@ -1880,51 +1880,51 @@ def test_azure_ai_chat_client_get_real_url_from_citation_reference_invalid_outpu mock_agents_client: MagicMock, ) -> None: """Test _get_real_url_from_citation_reference with invalid output format.""" - chat_client = create_test_azure_ai_chat_client(mock_agents_client) + client = create_test_azure_ai_chat_client(mock_agents_client) # Tool call with invalid output format azure_search_tool_calls = [ {"id": "call_123", "type": "azure_ai_search", "azure_ai_search": {"output": "invalid_json_format"}} ] - result = chat_client._get_real_url_from_citation_reference("doc_1", azure_search_tool_calls) # type: ignore + result = client._get_real_url_from_citation_reference("doc_1", azure_search_tool_calls) # type: ignore assert result == "doc_1" async def test_azure_ai_chat_client_context_manager(mock_agents_client: MagicMock) -> None: """Test AzureAIAgentClient as async context manager.""" - chat_client = create_test_azure_ai_chat_client(mock_agents_client) + client = create_test_azure_ai_chat_client(mock_agents_client) # Mock close method to avoid actual cleanup - chat_client.close = AsyncMock() + client.close = AsyncMock() - async with chat_client as client: - assert client is chat_client + async with client as client: + assert client is client # Verify close was called on exit - chat_client.close.assert_called_once() + client.close.assert_called_once() async def test_azure_ai_chat_client_close_method(mock_agents_client: MagicMock) -> None: """Test AzureAIAgentClient close method.""" - chat_client = create_test_azure_ai_chat_client(mock_agents_client) + client = create_test_azure_ai_chat_client(mock_agents_client) # Mock cleanup methods - chat_client._cleanup_agent_if_needed = AsyncMock() - chat_client._close_client_if_needed = AsyncMock() + client._cleanup_agent_if_needed = AsyncMock() + client._close_client_if_needed = AsyncMock() - await chat_client.close() + await client.close() # Verify cleanup methods were called - chat_client._cleanup_agent_if_needed.assert_called_once() - chat_client._close_client_if_needed.assert_called_once() + client._cleanup_agent_if_needed.assert_called_once() + client._close_client_if_needed.assert_called_once() def test_azure_ai_chat_client_extract_url_citations_with_azure_search_enhanced_url( mock_agents_client: MagicMock, ) -> None: """Test _extract_url_citations with Azure AI Search URL enhancement.""" - chat_client = create_test_azure_ai_chat_client(mock_agents_client) + client = create_test_azure_ai_chat_client(mock_agents_client) # Add Azure Search tool calls for URL enhancement azure_search_tool_calls = [ @@ -1961,7 +1961,7 @@ def test_azure_ai_chat_client_extract_url_citations_with_azure_search_enhanced_u mock_chunk = MagicMock(spec=MessageDeltaChunk) mock_chunk.delta = mock_delta - citations = chat_client._extract_url_citations(mock_chunk, azure_search_tool_calls) # type: ignore + citations = client._extract_url_citations(mock_chunk, azure_search_tool_calls) # type: ignore # Verify real URL was used assert len(citations) == 1 @@ -2006,7 +2006,7 @@ async def test_azure_ai_chat_client_prepare_options_with_mapping_response_format mock_agents_client: MagicMock, ) -> None: """Test _prepare_options with Mapping-based response_format (runtime JSON schema).""" - chat_client = create_test_azure_ai_chat_client(mock_agents_client) + client = create_test_azure_ai_chat_client(mock_agents_client) # Runtime JSON schema dict response_format_dict = { @@ -2019,7 +2019,7 @@ async def test_azure_ai_chat_client_prepare_options_with_mapping_response_format chat_options: ChatOptions = {"response_format": response_format_dict} # type: ignore[typeddict-item] - run_options, _ = await chat_client._prepare_options([], chat_options) # type: ignore + run_options, _ = await client._prepare_options([], chat_options) # type: ignore assert "response_format" in run_options # Should pass through as-is for Mapping types @@ -2030,20 +2030,20 @@ async def test_azure_ai_chat_client_prepare_options_with_invalid_response_format mock_agents_client: MagicMock, ) -> None: """Test _prepare_options with invalid response_format raises error.""" - chat_client = create_test_azure_ai_chat_client(mock_agents_client) + client = create_test_azure_ai_chat_client(mock_agents_client) # Invalid response_format (not BaseModel or Mapping) chat_options: ChatOptions = {"response_format": "invalid_format"} # type: ignore[typeddict-item] with pytest.raises(ServiceInvalidRequestError, match="response_format must be a Pydantic BaseModel"): - await chat_client._prepare_options([], chat_options) # type: ignore + await client._prepare_options([], chat_options) # type: ignore async def test_azure_ai_chat_client_prepare_tool_definitions_with_agent_tool_resources( mock_agents_client: MagicMock, ) -> None: """Test _prepare_tool_definitions_and_resources copies tool_resources from agent definition.""" - chat_client = create_test_azure_ai_chat_client(mock_agents_client, agent_id="test-agent") + client = create_test_azure_ai_chat_client(mock_agents_client, agent_id="test-agent") # Create mock agent definition with tool_resources mock_agent_definition = MagicMock() @@ -2053,7 +2053,7 @@ async def test_azure_ai_chat_client_prepare_tool_definitions_with_agent_tool_res run_options: dict[str, Any] = {} options: dict[str, Any] = {} - await chat_client._prepare_tool_definitions_and_resources(options, mock_agent_definition, run_options) # type: ignore + await client._prepare_tool_definitions_and_resources(options, mock_agent_definition, run_options) # type: ignore # Verify tool_resources was copied to run_options assert "tool_resources" in run_options @@ -2064,7 +2064,7 @@ def test_azure_ai_chat_client_prepare_mcp_resources_with_dict_approval_mode( mock_agents_client: MagicMock, ) -> None: """Test _prepare_mcp_resources with dict-based approval mode (always_require_approval).""" - chat_client = create_test_azure_ai_chat_client(mock_agents_client) + client = create_test_azure_ai_chat_client(mock_agents_client) # MCP tool with dict-based approval mode mcp_tool = HostedMCPTool( @@ -2073,7 +2073,7 @@ def test_azure_ai_chat_client_prepare_mcp_resources_with_dict_approval_mode( approval_mode={"always_require_approval": {"tool1", "tool2"}}, ) - result = chat_client._prepare_mcp_resources([mcp_tool]) # type: ignore + result = client._prepare_mcp_resources([mcp_tool]) # type: ignore assert len(result) == 1 assert result[0]["server_label"] == "Test_MCP" @@ -2085,7 +2085,7 @@ def test_azure_ai_chat_client_prepare_mcp_resources_with_never_require_dict( mock_agents_client: MagicMock, ) -> None: """Test _prepare_mcp_resources with dict-based approval mode (never_require_approval).""" - chat_client = create_test_azure_ai_chat_client(mock_agents_client) + client = create_test_azure_ai_chat_client(mock_agents_client) # MCP tool with never_require_approval dict mcp_tool = HostedMCPTool( @@ -2094,7 +2094,7 @@ def test_azure_ai_chat_client_prepare_mcp_resources_with_never_require_dict( approval_mode={"never_require_approval": {"safe_tool"}}, ) - result = chat_client._prepare_mcp_resources([mcp_tool]) # type: ignore + result = client._prepare_mcp_resources([mcp_tool]) # type: ignore assert len(result) == 1 assert result[0]["require_approval"] == {"never": {"safe_tool"}} @@ -2104,12 +2104,12 @@ def test_azure_ai_chat_client_prepare_messages_with_function_result( mock_agents_client: MagicMock, ) -> None: """Test _prepare_messages extracts function_result content.""" - chat_client = create_test_azure_ai_chat_client(mock_agents_client) + client = create_test_azure_ai_chat_client(mock_agents_client) function_result = Content.from_function_result(call_id='["run_123", "call_456"]', result="test result") messages = [Message(role="user", contents=[function_result])] - additional_messages, instructions, required_action_results = chat_client._prepare_messages(messages) # type: ignore + additional_messages, instructions, required_action_results = client._prepare_messages(messages) # type: ignore # function_result should be extracted, not added to additional_messages assert additional_messages is None @@ -2122,14 +2122,14 @@ def test_azure_ai_chat_client_prepare_messages_with_raw_content_block( mock_agents_client: MagicMock, ) -> None: """Test _prepare_messages handles raw MessageInputContentBlock in content.""" - chat_client = create_test_azure_ai_chat_client(mock_agents_client) + client = create_test_azure_ai_chat_client(mock_agents_client) # Create content with raw_representation that is a MessageInputContentBlock raw_block = MessageInputTextBlock(text="Raw block text") custom_content = Content(type="custom", raw_representation=raw_block) messages = [Message(role="user", contents=[custom_content])] - additional_messages, instructions, required_action_results = chat_client._prepare_messages(messages) # type: ignore + additional_messages, instructions, required_action_results = client._prepare_messages(messages) # type: ignore assert additional_messages is not None assert len(additional_messages) == 1 @@ -2141,7 +2141,7 @@ async def test_azure_ai_chat_client_prepare_tools_for_azure_ai_mcp_tool( mock_agents_client: MagicMock, ) -> None: """Test _prepare_tools_for_azure_ai with HostedMCPTool.""" - chat_client = create_test_azure_ai_chat_client(mock_agents_client, agent_id="test-agent") + client = create_test_azure_ai_chat_client(mock_agents_client, agent_id="test-agent") mcp_tool = HostedMCPTool( name="Test MCP Server", @@ -2149,7 +2149,7 @@ async def test_azure_ai_chat_client_prepare_tools_for_azure_ai_mcp_tool( allowed_tools=["tool1", "tool2"], ) - tool_definitions = await chat_client._prepare_tools_for_azure_ai([mcp_tool]) # type: ignore + tool_definitions = await client._prepare_tools_for_azure_ai([mcp_tool]) # type: ignore assert len(tool_definitions) >= 1 # The McpTool.definitions property returns the tool definitions @@ -2162,12 +2162,12 @@ async def test_azure_ai_chat_client_prepare_tools_for_azure_ai_tool_definition( mock_agents_client: MagicMock, ) -> None: """Test _prepare_tools_for_azure_ai with ToolDefinition passthrough.""" - chat_client = create_test_azure_ai_chat_client(mock_agents_client, agent_id="test-agent") + client = create_test_azure_ai_chat_client(mock_agents_client, agent_id="test-agent") # Pass a ToolDefinition directly - should be passed through as-is tool_def = CodeInterpreterToolDefinition() - tool_definitions = await chat_client._prepare_tools_for_azure_ai([tool_def]) # type: ignore + tool_definitions = await client._prepare_tools_for_azure_ai([tool_def]) # type: ignore assert len(tool_definitions) == 1 assert tool_definitions[0] is tool_def @@ -2177,12 +2177,12 @@ async def test_azure_ai_chat_client_prepare_tools_for_azure_ai_dict_passthrough( mock_agents_client: MagicMock, ) -> None: """Test _prepare_tools_for_azure_ai with dict passthrough.""" - chat_client = create_test_azure_ai_chat_client(mock_agents_client, agent_id="test-agent") + client = create_test_azure_ai_chat_client(mock_agents_client, agent_id="test-agent") # Pass a dict tool definition - should be passed through as-is dict_tool = {"type": "function", "function": {"name": "test_func", "parameters": {}}} - tool_definitions = await chat_client._prepare_tools_for_azure_ai([dict_tool]) # type: ignore + tool_definitions = await client._prepare_tools_for_azure_ai([dict_tool]) # type: ignore assert len(tool_definitions) == 1 assert tool_definitions[0] is dict_tool @@ -2192,7 +2192,7 @@ async def test_azure_ai_chat_client_prepare_tools_for_azure_ai_unsupported_type( mock_agents_client: MagicMock, ) -> None: """Test _prepare_tools_for_azure_ai raises error for unsupported tool type.""" - chat_client = create_test_azure_ai_chat_client(mock_agents_client, agent_id="test-agent") + client = create_test_azure_ai_chat_client(mock_agents_client, agent_id="test-agent") # Pass an unsupported tool type class UnsupportedTool: @@ -2201,4 +2201,4 @@ class UnsupportedTool: unsupported_tool = UnsupportedTool() with pytest.raises(ServiceInitializationError, match="Unsupported tool type"): - await chat_client._prepare_tools_for_azure_ai([unsupported_tool]) # type: ignore + await client._prepare_tools_for_azure_ai([unsupported_tool]) # type: ignore diff --git a/python/packages/azure-ai/tests/test_azure_ai_client.py b/python/packages/azure-ai/tests/test_azure_ai_client.py index cc15a7bc09..230c6677c7 100644 --- a/python/packages/azure-ai/tests/test_azure_ai_client.py +++ b/python/packages/azure-ai/tests/test_azure_ai_client.py @@ -95,12 +95,12 @@ async def temporary_chat_client(agent_name: str) -> AsyncIterator[AzureAIClient] AzureCliCredential() as credential, AIProjectClient(endpoint=endpoint, credential=credential) as project_client, ): - chat_client = AzureAIClient( + client = AzureAIClient( project_client=project_client, agent_name=agent_name, ) try: - yield chat_client + yield client finally: await project_client.agents.delete(agent_name=agent_name) @@ -1622,7 +1622,7 @@ async def test_integration_agent_existing_thread(): async with ( temporary_chat_client(agent_name="af-int-test-existing-thread") as client, Agent( - chat_client=client, + client=client, instructions="You are a helpful assistant with good memory.", ) as first_agent, ): @@ -1641,7 +1641,7 @@ async def test_integration_agent_existing_thread(): async with ( temporary_chat_client(agent_name="af-int-test-existing-thread-2") as client, Agent( - chat_client=client, + client=client, instructions="You are a helpful assistant with good memory.", ) as second_agent, ): diff --git a/python/packages/bedrock/samples/bedrock_sample.py b/python/packages/bedrock/samples/bedrock_sample.py index b9ecc1101d..188e6bf1da 100644 --- a/python/packages/bedrock/samples/bedrock_sample.py +++ b/python/packages/bedrock/samples/bedrock_sample.py @@ -18,7 +18,7 @@ def get_weather(city: str) -> dict[str, str]: async def main() -> None: """Run the Bedrock sample agent, invoke the weather tool, and log the response.""" agent = Agent( - chat_client=BedrockChatClient(), + client=BedrockChatClient(), instructions="You are a concise travel assistant.", name="BedrockWeatherAgent", tool_choice="auto", diff --git a/python/packages/chatkit/README.md b/python/packages/chatkit/README.md index c901ce3f58..f52225c1d9 100644 --- a/python/packages/chatkit/README.md +++ b/python/packages/chatkit/README.md @@ -75,7 +75,7 @@ from your_store import YourStore # type: ignore[import-not-found] # Replace wi # Define your agent with tools agent = Agent( - chat_client=AzureOpenAIChatClient(credential=AzureCliCredential()), + client=AzureOpenAIChatClient(credential=AzureCliCredential()), instructions="You are a helpful assistant.", tools=[], # Add your tools here ) diff --git a/python/packages/core/AGENTS.md b/python/packages/core/AGENTS.md index ad2965200b..823b601b76 100644 --- a/python/packages/core/AGENTS.md +++ b/python/packages/core/AGENTS.md @@ -95,7 +95,7 @@ from agent_framework import Agent from agent_framework.openai import OpenAIChatClient agent = Agent( - chat_client=OpenAIChatClient(), + client=OpenAIChatClient(), instructions="You are helpful.", tools=[my_function], ) diff --git a/python/packages/core/README.md b/python/packages/core/README.md index a633a745cb..5ec3822c79 100644 --- a/python/packages/core/README.md +++ b/python/packages/core/README.md @@ -45,7 +45,7 @@ You can also override environment variables by explicitly passing configuration ```python from agent_framework.azure import AzureOpenAIChatClient -chat_client = AzureOpenAIChatClient( +client = AzureOpenAIChatClient( api_key="", endpoint="", deployment_name="", @@ -66,7 +66,7 @@ from agent_framework.openai import OpenAIChatClient async def main(): agent = Agent( - chat_client=OpenAIChatClient(), + client=OpenAIChatClient(), instructions=""" 1) A robot may not injure a human being... 2) A robot must obey orders given it by human beings... @@ -146,7 +146,7 @@ def get_menu_specials() -> str: async def main(): agent = Agent( - chat_client=OpenAIChatClient(), + client=OpenAIChatClient(), instructions="You are a helpful assistant that can provide weather and restaurant information.", tools=[get_weather, get_menu_specials] ) @@ -176,13 +176,13 @@ from agent_framework.openai import OpenAIChatClient async def main(): # Create specialized agents writer = Agent( - chat_client=OpenAIChatClient(), + client=OpenAIChatClient(), name="Writer", instructions="You are a creative content writer. Generate and refine slogans based on feedback." ) reviewer = Agent( - chat_client=OpenAIChatClient(), + client=OpenAIChatClient(), name="Reviewer", instructions="You are a critical reviewer. Provide detailed feedback on proposed slogans." ) @@ -218,7 +218,7 @@ if __name__ == "__main__": ## More Examples & Samples - [Getting Started with Agents](https://github.com/microsoft/agent-framework/tree/main/python/samples/getting_started/agents): Basic agent creation and tool usage -- [Chat Client Examples](https://github.com/microsoft/agent-framework/tree/main/python/samples/getting_started/chat_client): Direct chat client usage patterns +- [Chat Client Examples](https://github.com/microsoft/agent-framework/tree/main/python/samples/getting_started/client): Direct chat client usage patterns - [Azure AI Integration](https://github.com/microsoft/agent-framework/tree/main/python/packages/azure-ai): Azure AI integration - [.NET Workflows Samples](https://github.com/microsoft/agent-framework/tree/main/dotnet/samples/GettingStarted/Workflows): Advanced multi-agent patterns (.NET) diff --git a/python/packages/core/agent_framework/_agents.py b/python/packages/core/agent_framework/_agents.py index 523528a5f2..8a194bede3 100644 --- a/python/packages/core/agent_framework/_agents.py +++ b/python/packages/core/agent_framework/_agents.py @@ -462,13 +462,13 @@ def as_tool( from agent_framework import Agent # Create an agent - agent = Agent(chat_client=client, name="research-agent", description="Performs research tasks") + agent = Agent(client=client, name="research-agent", description="Performs research tasks") # Convert the agent to a tool research_tool = agent.as_tool() # Use the tool with another agent - coordinator = Agent(chat_client=client, name="coordinator", tools=research_tool) + coordinator = Agent(client=client, name="coordinator", tools=research_tool) """ # Verify that self implements SupportsAgentRun if not isinstance(self, SupportsAgentRun): @@ -549,7 +549,7 @@ class RawAgent(BaseAgent, Generic[OptionsCoT]): # type: ignore[misc] # Create a basic chat agent client = OpenAIChatClient(model_id="gpt-4") - agent = Agent(chat_client=client, name="assistant", description="A helpful assistant") + agent = Agent(client=client, name="assistant", description="A helpful assistant") # Run the agent with a simple message response = await agent.run("Hello, how are you?") @@ -565,7 +565,7 @@ def get_weather(location: str) -> str: agent = Agent( - chat_client=client, + client=client, name="weather-agent", instructions="You are a weather assistant.", tools=get_weather, @@ -588,7 +588,7 @@ def get_weather(location: str) -> str: client = OpenAIChatClient(model_id="gpt-4o") agent: Agent[OpenAIChatOptions] = Agent( - chat_client=client, + client=client, name="reasoning-agent", instructions="You are a reasoning assistant.", options={ @@ -730,7 +730,7 @@ def __init__( async def __aenter__(self) -> Self: """Enter the async context manager. - If any of the chat_client or local_mcp_tools are context managers, + If any of the client or local_mcp_tools are context managers, they will be entered into the async exit stack to ensure proper cleanup. Note: @@ -1386,7 +1386,7 @@ class Agent( def __init__( self, - chat_client: SupportsChatGetResponse[OptionsCoT], + client: SupportsChatGetResponse[OptionsCoT], instructions: str | None = None, *, id: str | None = None, @@ -1405,7 +1405,7 @@ def __init__( ) -> None: """Initialize a Agent instance.""" super().__init__( - chat_client=chat_client, + client=client, instructions=instructions, id=id, name=name, diff --git a/python/packages/core/agent_framework/_clients.py b/python/packages/core/agent_framework/_clients.py index 44af994e26..6e5356b8d3 100644 --- a/python/packages/core/agent_framework/_clients.py +++ b/python/packages/core/agent_framework/_clients.py @@ -497,7 +497,7 @@ def as_agent( from ._agents import Agent return Agent( - chat_client=self, + client=self, id=id, name=name, description=description, diff --git a/python/packages/core/agent_framework/_mcp.py b/python/packages/core/agent_framework/_mcp.py index 9e960c56d6..116ffbb265 100644 --- a/python/packages/core/agent_framework/_mcp.py +++ b/python/packages/core/agent_framework/_mcp.py @@ -335,7 +335,7 @@ def __init__( parse_prompt_results: Literal[True] | Callable[[types.GetPromptResult], Any] | None = True, session: ClientSession | None = None, request_timeout: int | None = None, - chat_client: SupportsChatGetResponse | None = None, + client: SupportsChatGetResponse | None = None, additional_properties: dict[str, Any] | None = None, ) -> None: """Initialize the MCP Tool base. @@ -356,7 +356,7 @@ def __init__( self._exit_stack = AsyncExitStack() self.session = session self.request_timeout = request_timeout - self.chat_client = chat_client + self.client = client self._functions: list[FunctionTool[Any, Any]] = [] self.is_connected: bool = False self._tools_loaded: bool = False @@ -507,7 +507,7 @@ async def sampling_callback( Returns: Either a CreateMessageResult with the generated message or ErrorData if generation fails. """ - if not self.chat_client: + if not self.client: return types.ErrorData( code=types.INTERNAL_ERROR, message="No chat client available. Please set a chat client.", @@ -517,7 +517,7 @@ async def sampling_callback( for msg in params.messages: messages.append(_parse_message_from_mcp(msg)) try: - response = await self.chat_client.get_response( + response = await self.client.get_response( messages, temperature=params.temperature, max_tokens=params.maxTokens, @@ -921,7 +921,7 @@ class MCPStdioTool(MCPTool): # Use with a chat agent async with mcp_tool: - agent = Agent(chat_client=client, name="assistant", tools=mcp_tool) + agent = Agent(client=client, name="assistant", tools=mcp_tool) response = await agent.run("List files in the directory") """ @@ -942,7 +942,7 @@ def __init__( args: list[str] | None = None, env: dict[str, str] | None = None, encoding: str | None = None, - chat_client: SupportsChatGetResponse | None = None, + client: SupportsChatGetResponse | None = None, additional_properties: dict[str, Any] | None = None, **kwargs: Any, ) -> None: @@ -982,7 +982,7 @@ def __init__( args: The arguments to pass to the command. env: The environment variables to set for the command. encoding: The encoding to use for the command output. - chat_client: The chat client to use for sampling. + client: The chat client to use for sampling. kwargs: Any extra arguments to pass to the stdio client. """ super().__init__( @@ -992,7 +992,7 @@ def __init__( allowed_tools=allowed_tools, additional_properties=additional_properties, session=session, - chat_client=chat_client, + client=client, load_tools=load_tools, parse_tool_results=parse_tool_results, load_prompts=load_prompts, @@ -1042,7 +1042,7 @@ class MCPStreamableHTTPTool(MCPTool): # Use with a chat agent async with mcp_tool: - agent = Agent(chat_client=client, name="assistant", tools=mcp_tool) + agent = Agent(client=client, name="assistant", tools=mcp_tool) response = await agent.run("Fetch data from the API") """ @@ -1061,7 +1061,7 @@ def __init__( approval_mode: (Literal["always_require", "never_require"] | HostedMCPSpecificApproval | None) = None, allowed_tools: Collection[str] | None = None, terminate_on_close: bool | None = None, - chat_client: SupportsChatGetResponse | None = None, + client: SupportsChatGetResponse | None = None, additional_properties: dict[str, Any] | None = None, http_client: httpx.AsyncClient | None = None, **kwargs: Any, @@ -1101,7 +1101,7 @@ def __init__( allowed_tools: A list of tools that are allowed to use this tool. additional_properties: Additional properties. terminate_on_close: Close the transport when the MCP client is terminated. - chat_client: The chat client to use for sampling. + client: The chat client to use for sampling. http_client: Optional httpx.AsyncClient to use. If not provided, the ``streamable_http_client`` API will create and manage a default client. To configure headers, timeouts, or other HTTP client settings, create @@ -1115,7 +1115,7 @@ def __init__( allowed_tools=allowed_tools, additional_properties=additional_properties, session=session, - chat_client=chat_client, + client=client, load_tools=load_tools, parse_tool_results=parse_tool_results, load_prompts=load_prompts, @@ -1157,7 +1157,7 @@ class MCPWebsocketTool(MCPTool): # Use with a chat agent async with mcp_tool: - agent = Agent(chat_client=client, name="assistant", tools=mcp_tool) + agent = Agent(client=client, name="assistant", tools=mcp_tool) response = await agent.run("Connect to the real-time service") """ @@ -1175,7 +1175,7 @@ def __init__( description: str | None = None, approval_mode: (Literal["always_require", "never_require"] | HostedMCPSpecificApproval | None) = None, allowed_tools: Collection[str] | None = None, - chat_client: SupportsChatGetResponse | None = None, + client: SupportsChatGetResponse | None = None, additional_properties: dict[str, Any] | None = None, **kwargs: Any, ) -> None: @@ -1213,7 +1213,7 @@ def __init__( A tool should not be listed in both, if so, it will require approval. allowed_tools: A list of tools that are allowed to use this tool. additional_properties: Additional properties. - chat_client: The chat client to use for sampling. + client: The chat client to use for sampling. kwargs: Any extra arguments to pass to the WebSocket client. """ super().__init__( @@ -1223,7 +1223,7 @@ def __init__( allowed_tools=allowed_tools, additional_properties=additional_properties, session=session, - chat_client=chat_client, + client=client, load_tools=load_tools, parse_tool_results=parse_tool_results, load_prompts=load_prompts, diff --git a/python/packages/core/agent_framework/_memory.py b/python/packages/core/agent_framework/_memory.py index f84b614ae0..cab5294b55 100644 --- a/python/packages/core/agent_framework/_memory.py +++ b/python/packages/core/agent_framework/_memory.py @@ -96,7 +96,7 @@ async def invoking(self, messages, **kwargs): # Use with a chat agent async with CustomContextProvider() as provider: - agent = Agent(chat_client=client, name="assistant", context_provider=provider) + agent = Agent(client=client, name="assistant", context_provider=provider) """ # Default prompt to be used by all context providers when assembling memories/instructions diff --git a/python/packages/core/agent_framework/_middleware.py b/python/packages/core/agent_framework/_middleware.py index 915b6a4f8f..ac6630a03f 100644 --- a/python/packages/core/agent_framework/_middleware.py +++ b/python/packages/core/agent_framework/_middleware.py @@ -263,7 +263,7 @@ class ChatContext: about the chat request. Attributes: - chat_client: The chat client being invoked. + client: The chat client being invoked. messages: The messages being sent to the chat client. options: The options for the chat request as a dict. stream: Whether this is a streaming invocation. @@ -302,7 +302,7 @@ async def process(self, context: ChatContext, call_next): def __init__( self, - chat_client: SupportsChatGetResponse, + client: SupportsChatGetResponse, messages: Sequence[Message], options: Mapping[str, Any] | None, stream: bool = False, @@ -319,7 +319,7 @@ def __init__( """Initialize the ChatContext. Args: - chat_client: The chat client being invoked. + client: The chat client being invoked. messages: The messages being sent to the chat client. options: The options for the chat request as a dict. stream: Whether this is a streaming invocation. @@ -330,7 +330,7 @@ def __init__( stream_result_hooks: Result hooks to apply to the finalized streaming response. stream_cleanup_hooks: Cleanup hooks to run after streaming completes. """ - self.chat_client = chat_client + self.client = client self.messages = messages self.options = options self.stream = stream @@ -372,7 +372,7 @@ async def process(self, context: AgentContext, call_next): # Use with an agent - agent = Agent(chat_client=client, name="assistant", middleware=[RetryMiddleware()]) + agent = Agent(client=client, name="assistant", middleware=[RetryMiddleware()]) """ @abstractmethod @@ -439,7 +439,7 @@ async def process(self, context: FunctionInvocationContext, call_next): # Use with an agent - agent = Agent(chat_client=client, name="assistant", middleware=[CachingMiddleware()]) + agent = Agent(client=client, name="assistant", middleware=[CachingMiddleware()]) """ @abstractmethod @@ -498,7 +498,7 @@ async def process(self, context: ChatContext, call_next): # Use with an agent agent = Agent( - chat_client=client, + client=client, name="assistant", middleware=[SystemPromptMiddleware("You are a helpful assistant.")], ) @@ -583,7 +583,7 @@ async def logging_middleware(context: AgentContext, call_next): # Use with an agent - agent = Agent(chat_client=client, name="assistant", middleware=[logging_middleware]) + agent = Agent(client=client, name="assistant", middleware=[logging_middleware]) """ # Add marker attribute to identify this as agent middleware func._middleware_type: MiddlewareType = MiddlewareType.AGENT # type: ignore @@ -616,7 +616,7 @@ async def logging_middleware(context: FunctionInvocationContext, call_next): # Use with an agent - agent = Agent(chat_client=client, name="assistant", middleware=[logging_middleware]) + agent = Agent(client=client, name="assistant", middleware=[logging_middleware]) """ # Add marker attribute to identify this as function middleware func._middleware_type: MiddlewareType = MiddlewareType.FUNCTION # type: ignore @@ -649,7 +649,7 @@ async def logging_middleware(context: ChatContext, call_next): # Use with an agent - agent = Agent(chat_client=client, name="assistant", middleware=[logging_middleware]) + agent = Agent(client=client, name="assistant", middleware=[logging_middleware]) """ # Add marker attribute to identify this as chat middleware func._middleware_type: MiddlewareType = MiddlewareType.CHAT # type: ignore @@ -1035,7 +1035,7 @@ def get_response( ) context = ChatContext( - chat_client=self, # type: ignore[arg-type] + client=self, # type: ignore[arg-type] messages=prepare_messages(messages), options=options, stream=stream, @@ -1090,7 +1090,7 @@ def __init__( self.agent_middleware = middleware_list["agent"] # Pass middleware to super so BaseAgent can store it for dynamic rebuild super().__init__(*args, middleware=middleware, **kwargs) # type: ignore[call-arg] - # Note: We intentionally don't extend chat_client's middleware lists here. + # Note: We intentionally don't extend client's middleware lists here. # Chat and function middleware is passed to the chat client at runtime via kwargs # in AgentMiddlewareLayer.run(), where it's properly combined with run-level middleware. diff --git a/python/packages/core/agent_framework/_serialization.py b/python/packages/core/agent_framework/_serialization.py index ed367ffbce..8588e0be5a 100644 --- a/python/packages/core/agent_framework/_serialization.py +++ b/python/packages/core/agent_framework/_serialization.py @@ -443,7 +443,7 @@ def from_dict( dependencies = {"open_ai_chat_client": {"client": openai_client}} # The chat client is reconstructed with the OpenAI client injected - chat_client = OpenAIChatClient.from_dict(client_data, dependencies=dependencies) + client = OpenAIChatClient.from_dict(client_data, dependencies=dependencies) # Now ready to make API calls with the injected client **Function Injection for Tools** - FunctionTool runtime dependency: diff --git a/python/packages/core/agent_framework/_workflows/_agent_executor.py b/python/packages/core/agent_framework/_workflows/_agent_executor.py index 0024f2afe0..0923e5c93c 100644 --- a/python/packages/core/agent_framework/_workflows/_agent_executor.py +++ b/python/packages/core/agent_framework/_workflows/_agent_executor.py @@ -216,8 +216,8 @@ async def on_checkpoint_save(self) -> dict[str, Any]: """ # Check if using AzureAIAgentClient with server-side thread and warn about checkpointing limitations if is_chat_agent(self._agent) and self._agent_thread.service_thread_id is not None: - client_class_name = self._agent.chat_client.__class__.__name__ - client_module = self._agent.chat_client.__class__.__module__ + client_class_name = self._agent.client.__class__.__name__ + client_module = self._agent.client.__class__.__module__ if client_class_name == "AzureAIAgentClient" and "azure_ai" in client_module: logger.warning( diff --git a/python/packages/core/agent_framework/openai/_assistant_provider.py b/python/packages/core/agent_framework/openai/_assistant_provider.py index 7ac165ed09..818b8b482e 100644 --- a/python/packages/core/agent_framework/openai/_assistant_provider.py +++ b/python/packages/core/agent_framework/openai/_assistant_provider.py @@ -542,7 +542,7 @@ def _create_chat_agent_from_assistant( A configured Agent instance. """ # Create the chat client with the assistant - chat_client = OpenAIAssistantsClient( + client = OpenAIAssistantsClient( model_id=assistant.model, assistant_id=assistant.id, assistant_name=assistant.name, @@ -555,7 +555,7 @@ def _create_chat_agent_from_assistant( # Create and return Agent return Agent( - chat_client=chat_client, + client=client, id=assistant.id, name=assistant.name, description=assistant.description, diff --git a/python/packages/core/tests/azure/test_azure_assistants_client.py b/python/packages/core/tests/azure/test_azure_assistants_client.py index e89b38ae5a..93cd02c6f3 100644 --- a/python/packages/core/tests/azure/test_azure_assistants_client.py +++ b/python/packages/core/tests/azure/test_azure_assistants_client.py @@ -83,19 +83,19 @@ def mock_async_azure_openai() -> MagicMock: def test_azure_assistants_client_init_with_client(mock_async_azure_openai: MagicMock) -> None: """Test AzureOpenAIAssistantsClient initialization with existing client.""" - chat_client = create_test_azure_assistants_client( + client = create_test_azure_assistants_client( mock_async_azure_openai, deployment_name="test_chat_deployment", assistant_id="existing-assistant-id", thread_id="test-thread-id", ) - assert chat_client.client is mock_async_azure_openai - assert chat_client.model_id == "test_chat_deployment" - assert chat_client.assistant_id == "existing-assistant-id" - assert chat_client.thread_id == "test-thread-id" - assert not chat_client._should_delete_assistant # type: ignore - assert isinstance(chat_client, SupportsChatGetResponse) + assert client.client is mock_async_azure_openai + assert client.model_id == "test_chat_deployment" + assert client.assistant_id == "existing-assistant-id" + assert client.thread_id == "test-thread-id" + assert not client._should_delete_assistant # type: ignore + assert isinstance(client, SupportsChatGetResponse) def test_azure_assistants_client_init_auto_create_client( @@ -103,7 +103,7 @@ def test_azure_assistants_client_init_auto_create_client( mock_async_azure_openai: MagicMock, ) -> None: """Test AzureOpenAIAssistantsClient initialization with auto-created client.""" - chat_client = AzureOpenAIAssistantsClient( + client = AzureOpenAIAssistantsClient( deployment_name=azure_openai_unit_test_env["AZURE_OPENAI_CHAT_DEPLOYMENT_NAME"], assistant_name="TestAssistant", api_key=azure_openai_unit_test_env["AZURE_OPENAI_API_KEY"], @@ -111,11 +111,11 @@ def test_azure_assistants_client_init_auto_create_client( async_client=mock_async_azure_openai, ) - assert chat_client.client is mock_async_azure_openai - assert chat_client.model_id == azure_openai_unit_test_env["AZURE_OPENAI_CHAT_DEPLOYMENT_NAME"] - assert chat_client.assistant_id is None - assert chat_client.assistant_name == "TestAssistant" - assert not chat_client._should_delete_assistant # type: ignore + assert client.client is mock_async_azure_openai + assert client.model_id == azure_openai_unit_test_env["AZURE_OPENAI_CHAT_DEPLOYMENT_NAME"] + assert client.assistant_id is None + assert client.assistant_name == "TestAssistant" + assert not client._should_delete_assistant # type: ignore def test_azure_assistants_client_init_validation_fail() -> None: @@ -138,32 +138,32 @@ def test_azure_assistants_client_init_with_default_headers(azure_openai_unit_tes """Test AzureOpenAIAssistantsClient initialization with default headers.""" default_headers = {"X-Unit-Test": "test-guid"} - chat_client = AzureOpenAIAssistantsClient( + client = AzureOpenAIAssistantsClient( deployment_name="test_chat_deployment", api_key=azure_openai_unit_test_env["AZURE_OPENAI_API_KEY"], endpoint=azure_openai_unit_test_env["AZURE_OPENAI_ENDPOINT"], default_headers=default_headers, ) - assert chat_client.model_id == "test_chat_deployment" - assert isinstance(chat_client, SupportsChatGetResponse) + assert client.model_id == "test_chat_deployment" + assert isinstance(client, SupportsChatGetResponse) # Assert that the default header we added is present in the client's default headers for key, value in default_headers.items(): - assert key in chat_client.client.default_headers - assert chat_client.client.default_headers[key] == value + assert key in client.client.default_headers + assert client.client.default_headers[key] == value async def test_azure_assistants_client_get_assistant_id_or_create_existing_assistant( mock_async_azure_openai: MagicMock, ) -> None: """Test _get_assistant_id_or_create when assistant_id is already provided.""" - chat_client = create_test_azure_assistants_client(mock_async_azure_openai, assistant_id="existing-assistant-id") + client = create_test_azure_assistants_client(mock_async_azure_openai, assistant_id="existing-assistant-id") - assistant_id = await chat_client._get_assistant_id_or_create() # type: ignore + assistant_id = await client._get_assistant_id_or_create() # type: ignore assert assistant_id == "existing-assistant-id" - assert not chat_client._should_delete_assistant # type: ignore + assert not client._should_delete_assistant # type: ignore mock_async_azure_openai.beta.assistants.create.assert_not_called() @@ -171,14 +171,14 @@ async def test_azure_assistants_client_get_assistant_id_or_create_create_new( mock_async_azure_openai: MagicMock, ) -> None: """Test _get_assistant_id_or_create when creating a new assistant.""" - chat_client = create_test_azure_assistants_client( + client = create_test_azure_assistants_client( mock_async_azure_openai, deployment_name="test_chat_deployment", assistant_name="TestAssistant" ) - assistant_id = await chat_client._get_assistant_id_or_create() # type: ignore + assistant_id = await client._get_assistant_id_or_create() # type: ignore assert assistant_id == "test-assistant-id" - assert chat_client._should_delete_assistant # type: ignore + assert client._should_delete_assistant # type: ignore mock_async_azure_openai.beta.assistants.create.assert_called_once() @@ -186,38 +186,38 @@ async def test_azure_assistants_client_aclose_should_not_delete( mock_async_azure_openai: MagicMock, ) -> None: """Test close when assistant should not be deleted.""" - chat_client = create_test_azure_assistants_client( + client = create_test_azure_assistants_client( mock_async_azure_openai, assistant_id="assistant-to-keep", should_delete_assistant=False ) - await chat_client.close() # type: ignore + await client.close() # type: ignore # Verify assistant deletion was not called mock_async_azure_openai.beta.assistants.delete.assert_not_called() - assert not chat_client._should_delete_assistant # type: ignore + assert not client._should_delete_assistant # type: ignore async def test_azure_assistants_client_aclose_should_delete(mock_async_azure_openai: MagicMock) -> None: """Test close method calls cleanup.""" - chat_client = create_test_azure_assistants_client( + client = create_test_azure_assistants_client( mock_async_azure_openai, assistant_id="assistant-to-delete", should_delete_assistant=True ) - await chat_client.close() + await client.close() # Verify assistant deletion was called mock_async_azure_openai.beta.assistants.delete.assert_called_once_with("assistant-to-delete") - assert not chat_client._should_delete_assistant # type: ignore + assert not client._should_delete_assistant # type: ignore async def test_azure_assistants_client_async_context_manager(mock_async_azure_openai: MagicMock) -> None: """Test async context manager functionality.""" - chat_client = create_test_azure_assistants_client( + client = create_test_azure_assistants_client( mock_async_azure_openai, assistant_id="assistant-to-delete", should_delete_assistant=True ) # Test context manager - async with chat_client: + async with client: pass # Just test that we can enter and exit # Verify cleanup was called on exit @@ -229,7 +229,7 @@ def test_azure_assistants_client_serialize(azure_openai_unit_test_env: dict[str, default_headers = {"X-Unit-Test": "test-guid"} # Test basic initialization and to_dict - chat_client = AzureOpenAIAssistantsClient( + client = AzureOpenAIAssistantsClient( deployment_name="test_chat_deployment", assistant_id="test-assistant-id", assistant_name="TestAssistant", @@ -239,7 +239,7 @@ def test_azure_assistants_client_serialize(azure_openai_unit_test_env: dict[str, default_headers=default_headers, ) - dumped_settings = chat_client.to_dict() + dumped_settings = client.to_dict() assert dumped_settings["model_id"] == "test_chat_deployment" assert dumped_settings["assistant_id"] == "test-assistant-id" @@ -399,7 +399,7 @@ async def test_azure_assistants_client_with_existing_assistant() -> None: async def test_azure_assistants_agent_basic_run(): """Test Agent basic run functionality with AzureOpenAIAssistantsClient.""" async with Agent( - chat_client=AzureOpenAIAssistantsClient(credential=AzureCliCredential()), + client=AzureOpenAIAssistantsClient(credential=AzureCliCredential()), ) as agent: # Run a simple query response = await agent.run("Hello! Please respond with 'Hello World' exactly.") @@ -416,7 +416,7 @@ async def test_azure_assistants_agent_basic_run(): async def test_azure_assistants_agent_basic_run_streaming(): """Test Agent basic streaming functionality with AzureOpenAIAssistantsClient.""" async with Agent( - chat_client=AzureOpenAIAssistantsClient(credential=AzureCliCredential()), + client=AzureOpenAIAssistantsClient(credential=AzureCliCredential()), ) as agent: # Run streaming query full_message: str = "" @@ -436,7 +436,7 @@ async def test_azure_assistants_agent_basic_run_streaming(): async def test_azure_assistants_agent_thread_persistence(): """Test Agent thread persistence across runs with AzureOpenAIAssistantsClient.""" async with Agent( - chat_client=AzureOpenAIAssistantsClient(credential=AzureCliCredential()), + client=AzureOpenAIAssistantsClient(credential=AzureCliCredential()), instructions="You are a helpful assistant with good memory.", ) as agent: # Create a new thread that will be reused @@ -468,7 +468,7 @@ async def test_azure_assistants_agent_existing_thread_id(): existing_thread_id = None async with Agent( - chat_client=AzureOpenAIAssistantsClient(credential=AzureCliCredential()), + client=AzureOpenAIAssistantsClient(credential=AzureCliCredential()), instructions="You are a helpful weather agent.", tools=[get_weather], ) as agent: @@ -488,7 +488,7 @@ async def test_azure_assistants_agent_existing_thread_id(): # Now continue with the same thread ID in a new agent instance async with Agent( - chat_client=AzureOpenAIAssistantsClient(thread_id=existing_thread_id, credential=AzureCliCredential()), + client=AzureOpenAIAssistantsClient(thread_id=existing_thread_id, credential=AzureCliCredential()), instructions="You are a helpful weather agent.", tools=[get_weather], ) as agent: @@ -511,7 +511,7 @@ async def test_azure_assistants_agent_code_interpreter(): """Test Agent with code interpreter through AzureOpenAIAssistantsClient.""" async with Agent( - chat_client=AzureOpenAIAssistantsClient(credential=AzureCliCredential()), + client=AzureOpenAIAssistantsClient(credential=AzureCliCredential()), instructions="You are a helpful assistant that can write and execute Python code.", tools=[HostedCodeInterpreterTool()], ) as agent: @@ -531,7 +531,7 @@ async def test_azure_assistants_client_agent_level_tool_persistence(): """Test that agent-level tools persist across multiple runs with Azure Assistants Client.""" async with Agent( - chat_client=AzureOpenAIAssistantsClient(credential=AzureCliCredential()), + client=AzureOpenAIAssistantsClient(credential=AzureCliCredential()), instructions="You are a helpful assistant that uses available tools.", tools=[get_weather], # Agent-level tool ) as agent: diff --git a/python/packages/core/tests/azure/test_azure_chat_client.py b/python/packages/core/tests/azure/test_azure_chat_client.py index 37687e20e7..f0b34cc13d 100644 --- a/python/packages/core/tests/azure/test_azure_chat_client.py +++ b/python/packages/core/tests/azure/test_azure_chat_client.py @@ -766,7 +766,7 @@ async def test_azure_openai_chat_client_streaming_tools() -> None: async def test_azure_openai_chat_client_agent_basic_run(): """Test Azure OpenAI chat client agent basic run functionality with AzureOpenAIChatClient.""" async with Agent( - chat_client=AzureOpenAIChatClient(credential=AzureCliCredential()), + client=AzureOpenAIChatClient(credential=AzureCliCredential()), ) as agent: # Test basic run response = await agent.run("Please respond with exactly: 'This is a response test.'") @@ -782,7 +782,7 @@ async def test_azure_openai_chat_client_agent_basic_run(): async def test_azure_openai_chat_client_agent_basic_run_streaming(): """Test Azure OpenAI chat client agent basic streaming functionality with AzureOpenAIChatClient.""" async with Agent( - chat_client=AzureOpenAIChatClient(credential=AzureCliCredential()), + client=AzureOpenAIChatClient(credential=AzureCliCredential()), ) as agent: # Test streaming run full_text = "" @@ -800,7 +800,7 @@ async def test_azure_openai_chat_client_agent_basic_run_streaming(): async def test_azure_openai_chat_client_agent_thread_persistence(): """Test Azure OpenAI chat client agent thread persistence across runs with AzureOpenAIChatClient.""" async with Agent( - chat_client=AzureOpenAIChatClient(credential=AzureCliCredential()), + client=AzureOpenAIChatClient(credential=AzureCliCredential()), instructions="You are a helpful assistant with good memory.", ) as agent: # Create a new thread that will be reused @@ -828,7 +828,7 @@ async def test_azure_openai_chat_client_agent_existing_thread(): preserved_thread = None async with Agent( - chat_client=AzureOpenAIChatClient(credential=AzureCliCredential()), + client=AzureOpenAIChatClient(credential=AzureCliCredential()), instructions="You are a helpful assistant with good memory.", ) as first_agent: # Start a conversation and capture the thread @@ -844,7 +844,7 @@ async def test_azure_openai_chat_client_agent_existing_thread(): # Second conversation - reuse the thread in a new agent instance if preserved_thread: async with Agent( - chat_client=AzureOpenAIChatClient(credential=AzureCliCredential()), + client=AzureOpenAIChatClient(credential=AzureCliCredential()), instructions="You are a helpful assistant with good memory.", ) as second_agent: # Reuse the preserved thread @@ -861,7 +861,7 @@ async def test_azure_chat_client_agent_level_tool_persistence(): """Test that agent-level tools persist across multiple runs with Azure Chat Client.""" async with Agent( - chat_client=AzureOpenAIChatClient(credential=AzureCliCredential()), + client=AzureOpenAIChatClient(credential=AzureCliCredential()), instructions="You are a helpful assistant that uses available tools.", tools=[get_weather], # Agent-level tool ) as agent: diff --git a/python/packages/core/tests/azure/test_azure_responses_client.py b/python/packages/core/tests/azure/test_azure_responses_client.py index 3a847e8c96..186b9ba57f 100644 --- a/python/packages/core/tests/azure/test_azure_responses_client.py +++ b/python/packages/core/tests/azure/test_azure_responses_client.py @@ -427,7 +427,7 @@ async def test_integration_client_agent_existing_thread(): preserved_thread = None async with Agent( - chat_client=AzureOpenAIResponsesClient(credential=AzureCliCredential()), + client=AzureOpenAIResponsesClient(credential=AzureCliCredential()), instructions="You are a helpful assistant with good memory.", ) as first_agent: # Start a conversation and capture the thread @@ -443,7 +443,7 @@ async def test_integration_client_agent_existing_thread(): # Second conversation - reuse the thread in a new agent instance if preserved_thread: async with Agent( - chat_client=AzureOpenAIResponsesClient(credential=AzureCliCredential()), + client=AzureOpenAIResponsesClient(credential=AzureCliCredential()), instructions="You are a helpful assistant with good memory.", ) as second_agent: # Reuse the preserved thread diff --git a/python/packages/core/tests/core/conftest.py b/python/packages/core/tests/core/conftest.py index f21433b8f8..f3f56afd9e 100644 --- a/python/packages/core/tests/core/conftest.py +++ b/python/packages/core/tests/core/conftest.py @@ -251,7 +251,7 @@ def max_iterations(request: Any) -> int: @fixture -def chat_client(enable_function_calling: bool, max_iterations: int) -> MockChatClient: +def client(enable_function_calling: bool, max_iterations: int) -> MockChatClient: if enable_function_calling: with patch("agent_framework._tools.DEFAULT_MAX_ITERATIONS", max_iterations): return type("FunctionInvokingMockChatClient", (FunctionInvocationLayer, MockChatClient), {})() @@ -261,10 +261,10 @@ def chat_client(enable_function_calling: bool, max_iterations: int) -> MockChatC @fixture def chat_client_base(enable_function_calling: bool, max_iterations: int) -> MockBaseChatClient: with patch("agent_framework._tools.DEFAULT_MAX_ITERATIONS", max_iterations): - chat_client = MockBaseChatClient() + client = MockBaseChatClient() if not enable_function_calling: - chat_client.function_invocation_configuration["enabled"] = False - return chat_client + client.function_invocation_configuration["enabled"] = False + return client # region Agents diff --git a/python/packages/core/tests/core/test_agents.py b/python/packages/core/tests/core/test_agents.py index 3e77bfc6f0..e722632b77 100644 --- a/python/packages/core/tests/core/test_agents.py +++ b/python/packages/core/tests/core/test_agents.py @@ -55,54 +55,54 @@ async def collect_updates(updates: AsyncIterable[AgentResponseUpdate]) -> list[A assert updates[0].text == "Response" -def test_chat_client_agent_type(chat_client: SupportsChatGetResponse) -> None: - chat_client_agent = Agent(chat_client=chat_client) +def test_chat_client_agent_type(client: SupportsChatGetResponse) -> None: + chat_client_agent = Agent(client=client) assert isinstance(chat_client_agent, SupportsAgentRun) -async def test_chat_client_agent_init(chat_client: SupportsChatGetResponse) -> None: +async def test_chat_client_agent_init(client: SupportsChatGetResponse) -> None: agent_id = str(uuid4()) - agent = Agent(chat_client=chat_client, id=agent_id, description="Test") + agent = Agent(client=client, id=agent_id, description="Test") assert agent.id == agent_id assert agent.name is None assert agent.description == "Test" -async def test_chat_client_agent_init_with_name(chat_client: SupportsChatGetResponse) -> None: +async def test_chat_client_agent_init_with_name(client: SupportsChatGetResponse) -> None: agent_id = str(uuid4()) - agent = Agent(chat_client=chat_client, id=agent_id, name="Test Agent", description="Test") + agent = Agent(client=client, id=agent_id, name="Test Agent", description="Test") assert agent.id == agent_id assert agent.name == "Test Agent" assert agent.description == "Test" -async def test_chat_client_agent_run(chat_client: SupportsChatGetResponse) -> None: - agent = Agent(chat_client=chat_client) +async def test_chat_client_agent_run(client: SupportsChatGetResponse) -> None: + agent = Agent(client=client) result = await agent.run("Hello") assert result.text == "test response" -async def test_chat_client_agent_run_streaming(chat_client: SupportsChatGetResponse) -> None: - agent = Agent(chat_client=chat_client) +async def test_chat_client_agent_run_streaming(client: SupportsChatGetResponse) -> None: + agent = Agent(client=client) result = await AgentResponse.from_update_generator(agent.run("Hello", stream=True)) assert result.text == "test streaming response another update" -async def test_chat_client_agent_get_new_thread(chat_client: SupportsChatGetResponse) -> None: - agent = Agent(chat_client=chat_client) +async def test_chat_client_agent_get_new_thread(client: SupportsChatGetResponse) -> None: + agent = Agent(client=client) thread = agent.get_new_thread() assert isinstance(thread, AgentThread) -async def test_chat_client_agent_prepare_thread_and_messages(chat_client: SupportsChatGetResponse) -> None: - agent = Agent(chat_client=chat_client) +async def test_chat_client_agent_prepare_thread_and_messages(client: SupportsChatGetResponse) -> None: + agent = Agent(client=client) message = Message(role="user", text="Hello") thread = AgentThread(message_store=ChatMessageStore(messages=[message])) @@ -116,9 +116,9 @@ async def test_chat_client_agent_prepare_thread_and_messages(chat_client: Suppor assert result_messages[1].text == "Test" -async def test_prepare_thread_does_not_mutate_agent_chat_options(chat_client: SupportsChatGetResponse) -> None: +async def test_prepare_thread_does_not_mutate_agent_chat_options(client: SupportsChatGetResponse) -> None: tool = HostedCodeInterpreterTool() - agent = Agent(chat_client=chat_client, tools=[tool]) + agent = Agent(client=client, tools=[tool]) assert agent.default_options.get("tools") is not None base_tools = agent.default_options["tools"] @@ -143,7 +143,7 @@ async def test_chat_client_agent_update_thread_id(chat_client_base: SupportsChat ) chat_client_base.run_responses = [mock_response] agent = Agent( - chat_client=chat_client_base, + client=chat_client_base, tools=HostedCodeInterpreterTool(), ) thread = agent.get_new_thread() @@ -154,8 +154,8 @@ async def test_chat_client_agent_update_thread_id(chat_client_base: SupportsChat assert thread.service_thread_id == "123" -async def test_chat_client_agent_update_thread_messages(chat_client: SupportsChatGetResponse) -> None: - agent = Agent(chat_client=chat_client) +async def test_chat_client_agent_update_thread_messages(client: SupportsChatGetResponse) -> None: + agent = Agent(client=client) thread = agent.get_new_thread() result = await agent.run("Hello", thread=thread) @@ -172,26 +172,26 @@ async def test_chat_client_agent_update_thread_messages(chat_client: SupportsCha assert chat_messages[1].text == "test response" -async def test_chat_client_agent_update_thread_conversation_id_missing(chat_client: SupportsChatGetResponse) -> None: - agent = Agent(chat_client=chat_client) +async def test_chat_client_agent_update_thread_conversation_id_missing(client: SupportsChatGetResponse) -> None: + agent = Agent(client=client) thread = AgentThread(service_thread_id="123") with raises(AgentExecutionException, match="Service did not return a valid conversation id"): await agent._update_thread_with_type_and_conversation_id(thread, None) # type: ignore[reportPrivateUsage] -async def test_chat_client_agent_default_author_name(chat_client: SupportsChatGetResponse) -> None: +async def test_chat_client_agent_default_author_name(client: SupportsChatGetResponse) -> None: # Name is not specified here, so default name should be used - agent = Agent(chat_client=chat_client) + agent = Agent(client=client) result = await agent.run("Hello") assert result.text == "test response" assert result.messages[0].author_name == "UnnamedAgent" -async def test_chat_client_agent_author_name_as_agent_name(chat_client: SupportsChatGetResponse) -> None: +async def test_chat_client_agent_author_name_as_agent_name(client: SupportsChatGetResponse) -> None: # Name is specified here, so it should be used as author name - agent = Agent(chat_client=chat_client, name="TestAgent") + agent = Agent(client=client, name="TestAgent") result = await agent.run("Hello") assert result.text == "test response" @@ -207,7 +207,7 @@ async def test_chat_client_agent_author_name_is_used_from_response(chat_client_b ) ] - agent = Agent(chat_client=chat_client_base, tools=HostedCodeInterpreterTool()) + agent = Agent(client=chat_client_base, tools=HostedCodeInterpreterTool()) result = await agent.run("Hello") assert result.text == "test response" @@ -251,10 +251,10 @@ async def invoking(self, messages: Message | MutableSequence[Message], **kwargs: return Context(messages=self.context_messages) -async def test_chat_agent_context_providers_model_invoking(chat_client: SupportsChatGetResponse) -> None: +async def test_chat_agent_context_providers_model_invoking(client: SupportsChatGetResponse) -> None: """Test that context providers' invoking is called during agent run.""" mock_provider = MockContextProvider(messages=[Message(role="system", text="Test context instructions")]) - agent = Agent(chat_client=chat_client, context_provider=mock_provider) + agent = Agent(client=client, context_provider=mock_provider) await agent.run("Hello") @@ -271,7 +271,7 @@ async def test_chat_agent_context_providers_thread_created(chat_client_base: Sup ) ] - agent = Agent(chat_client=chat_client_base, context_provider=mock_provider) + agent = Agent(client=chat_client_base, context_provider=mock_provider) await agent.run("Hello") @@ -279,10 +279,10 @@ async def test_chat_agent_context_providers_thread_created(chat_client_base: Sup assert mock_provider.thread_created_thread_id == "test-thread-id" -async def test_chat_agent_context_providers_messages_adding(chat_client: SupportsChatGetResponse) -> None: +async def test_chat_agent_context_providers_messages_adding(client: SupportsChatGetResponse) -> None: """Test that context providers' invoked is called during agent run.""" mock_provider = MockContextProvider() - agent = Agent(chat_client=chat_client, context_provider=mock_provider) + agent = Agent(client=client, context_provider=mock_provider) await agent.run("Hello") @@ -291,10 +291,10 @@ async def test_chat_agent_context_providers_messages_adding(chat_client: Support assert len(mock_provider.new_messages) >= 2 -async def test_chat_agent_context_instructions_in_messages(chat_client: SupportsChatGetResponse) -> None: +async def test_chat_agent_context_instructions_in_messages(client: SupportsChatGetResponse) -> None: """Test that AI context instructions are included in messages.""" mock_provider = MockContextProvider(messages=[Message(role="system", text="Context-specific instructions")]) - agent = Agent(chat_client=chat_client, instructions="Agent instructions", context_provider=mock_provider) + agent = Agent(client=client, instructions="Agent instructions", context_provider=mock_provider) # We need to test the _prepare_thread_and_messages method directly _, _, messages = await agent._prepare_thread_and_messages( # type: ignore[reportPrivateUsage] @@ -307,13 +307,13 @@ async def test_chat_agent_context_instructions_in_messages(chat_client: Supports assert messages[0].text == "Context-specific instructions" assert messages[1].role == "user" assert messages[1].text == "Hello" - # instructions system message is added by a chat_client + # instructions system message is added by a client -async def test_chat_agent_no_context_instructions(chat_client: SupportsChatGetResponse) -> None: +async def test_chat_agent_no_context_instructions(client: SupportsChatGetResponse) -> None: """Test behavior when AI context has no instructions.""" mock_provider = MockContextProvider() - agent = Agent(chat_client=chat_client, instructions="Agent instructions", context_provider=mock_provider) + agent = Agent(client=client, instructions="Agent instructions", context_provider=mock_provider) _, _, messages = await agent._prepare_thread_and_messages( # type: ignore[reportPrivateUsage] thread=None, input_messages=[Message(role="user", text="Hello")] @@ -325,10 +325,10 @@ async def test_chat_agent_no_context_instructions(chat_client: SupportsChatGetRe assert messages[0].text == "Hello" -async def test_chat_agent_run_stream_context_providers(chat_client: SupportsChatGetResponse) -> None: +async def test_chat_agent_run_stream_context_providers(client: SupportsChatGetResponse) -> None: """Test that context providers work with run method.""" mock_provider = MockContextProvider(messages=[Message(role="system", text="Stream context instructions")]) - agent = Agent(chat_client=chat_client, context_provider=mock_provider) + agent = Agent(client=client, context_provider=mock_provider) # Collect all stream updates and get final response stream = agent.run("Hello", stream=True) @@ -355,7 +355,7 @@ async def test_chat_agent_context_providers_with_thread_service_id(chat_client_b ) ] - agent = Agent(chat_client=chat_client_base, context_provider=mock_provider) + agent = Agent(client=chat_client_base, context_provider=mock_provider) # Use existing service-managed thread thread = agent.get_new_thread(service_thread_id="existing-thread-id") @@ -366,9 +366,9 @@ async def test_chat_agent_context_providers_with_thread_service_id(chat_client_b # Tests for as_tool method -async def test_chat_agent_as_tool_basic(chat_client: SupportsChatGetResponse) -> None: +async def test_chat_agent_as_tool_basic(client: SupportsChatGetResponse) -> None: """Test basic as_tool functionality.""" - agent = Agent(chat_client=chat_client, name="TestAgent", description="Test agent for as_tool") + agent = Agent(client=client, name="TestAgent", description="Test agent for as_tool") tool = agent.as_tool() @@ -378,9 +378,9 @@ async def test_chat_agent_as_tool_basic(chat_client: SupportsChatGetResponse) -> assert hasattr(tool, "input_model") -async def test_chat_agent_as_tool_custom_parameters(chat_client: SupportsChatGetResponse) -> None: +async def test_chat_agent_as_tool_custom_parameters(client: SupportsChatGetResponse) -> None: """Test as_tool with custom parameters.""" - agent = Agent(chat_client=chat_client, name="TestAgent", description="Original description") + agent = Agent(client=client, name="TestAgent", description="Original description") tool = agent.as_tool( name="CustomTool", @@ -398,10 +398,10 @@ async def test_chat_agent_as_tool_custom_parameters(chat_client: SupportsChatGet assert schema["properties"]["query"]["description"] == "Custom input description" -async def test_chat_agent_as_tool_defaults(chat_client: SupportsChatGetResponse) -> None: +async def test_chat_agent_as_tool_defaults(client: SupportsChatGetResponse) -> None: """Test as_tool with default parameters.""" agent = Agent( - chat_client=chat_client, + client=client, name="TestAgent", # No description provided ) @@ -417,18 +417,18 @@ async def test_chat_agent_as_tool_defaults(chat_client: SupportsChatGetResponse) assert "Task for TestAgent" in schema["properties"]["task"]["description"] -async def test_chat_agent_as_tool_no_name(chat_client: SupportsChatGetResponse) -> None: +async def test_chat_agent_as_tool_no_name(client: SupportsChatGetResponse) -> None: """Test as_tool when agent has no name (should raise ValueError).""" - agent = Agent(chat_client=chat_client) # No name provided + agent = Agent(client=client) # No name provided # Should raise ValueError since agent has no name with raises(ValueError, match="Agent tool name cannot be None"): agent.as_tool() -async def test_chat_agent_as_tool_function_execution(chat_client: SupportsChatGetResponse) -> None: +async def test_chat_agent_as_tool_function_execution(client: SupportsChatGetResponse) -> None: """Test that the generated FunctionTool can be executed.""" - agent = Agent(chat_client=chat_client, name="TestAgent", description="Test agent") + agent = Agent(client=client, name="TestAgent", description="Test agent") tool = agent.as_tool() @@ -440,9 +440,9 @@ async def test_chat_agent_as_tool_function_execution(chat_client: SupportsChatGe assert result == "test response" # From mock chat client -async def test_chat_agent_as_tool_with_stream_callback(chat_client: SupportsChatGetResponse) -> None: +async def test_chat_agent_as_tool_with_stream_callback(client: SupportsChatGetResponse) -> None: """Test as_tool with stream callback functionality.""" - agent = Agent(chat_client=chat_client, name="StreamingAgent") + agent = Agent(client=client, name="StreamingAgent") # Collect streaming updates collected_updates: list[AgentResponseUpdate] = [] @@ -463,9 +463,9 @@ def stream_callback(update: AgentResponseUpdate) -> None: assert result == expected_text -async def test_chat_agent_as_tool_with_custom_arg_name(chat_client: SupportsChatGetResponse) -> None: +async def test_chat_agent_as_tool_with_custom_arg_name(client: SupportsChatGetResponse) -> None: """Test as_tool with custom argument name.""" - agent = Agent(chat_client=chat_client, name="CustomArgAgent") + agent = Agent(client=client, name="CustomArgAgent") tool = agent.as_tool(arg_name="prompt", arg_description="Custom prompt input") @@ -474,9 +474,9 @@ async def test_chat_agent_as_tool_with_custom_arg_name(chat_client: SupportsChat assert result == "test response" -async def test_chat_agent_as_tool_with_async_stream_callback(chat_client: SupportsChatGetResponse) -> None: +async def test_chat_agent_as_tool_with_async_stream_callback(client: SupportsChatGetResponse) -> None: """Test as_tool with async stream callback functionality.""" - agent = Agent(chat_client=chat_client, name="AsyncStreamingAgent") + agent = Agent(client=client, name="AsyncStreamingAgent") # Collect streaming updates using an async callback collected_updates: list[AgentResponseUpdate] = [] @@ -497,7 +497,7 @@ async def async_stream_callback(update: AgentResponseUpdate) -> None: assert result == expected_text -async def test_chat_agent_as_tool_name_sanitization(chat_client: SupportsChatGetResponse) -> None: +async def test_chat_agent_as_tool_name_sanitization(client: SupportsChatGetResponse) -> None: """Test as_tool name sanitization.""" test_cases = [ ("Invoice & Billing Agent", "Invoice_Billing_Agent"), @@ -510,14 +510,14 @@ async def test_chat_agent_as_tool_name_sanitization(chat_client: SupportsChatGet ] for agent_name, expected_tool_name in test_cases: - agent = Agent(chat_client=chat_client, name=agent_name, description="Test agent") + agent = Agent(client=client, name=agent_name, description="Test agent") tool = agent.as_tool() assert tool.name == expected_tool_name, f"Expected {expected_tool_name}, got {tool.name} for input {agent_name}" -async def test_chat_agent_as_mcp_server_basic(chat_client: SupportsChatGetResponse) -> None: +async def test_chat_agent_as_mcp_server_basic(client: SupportsChatGetResponse) -> None: """Test basic as_mcp_server functionality.""" - agent = Agent(chat_client=chat_client, name="TestAgent", description="Test agent for MCP") + agent = Agent(client=client, name="TestAgent", description="Test agent for MCP") # Create MCP server with default parameters server = agent.as_mcp_server() @@ -528,9 +528,9 @@ async def test_chat_agent_as_mcp_server_basic(chat_client: SupportsChatGetRespon assert hasattr(server, "version") -async def test_chat_agent_run_with_mcp_tools(chat_client: SupportsChatGetResponse) -> None: +async def test_chat_agent_run_with_mcp_tools(client: SupportsChatGetResponse) -> None: """Test run method with MCP tools to cover MCP tool handling code.""" - agent = Agent(chat_client=chat_client, name="TestAgent", description="Test agent") + agent = Agent(client=client, name="TestAgent", description="Test agent") # Create a mock MCP tool mock_mcp_tool = MagicMock(spec=MCPTool) @@ -547,7 +547,7 @@ async def test_chat_agent_run_with_mcp_tools(chat_client: SupportsChatGetRespons await agent.run(messages="Test message", tools=[mock_mcp_tool]) -async def test_chat_agent_with_local_mcp_tools(chat_client: SupportsChatGetResponse) -> None: +async def test_chat_agent_with_local_mcp_tools(client: SupportsChatGetResponse) -> None: """Test agent initialization with local MCP tools.""" # Create a mock MCP tool mock_mcp_tool = MagicMock(spec=MCPTool) @@ -557,7 +557,7 @@ async def test_chat_agent_with_local_mcp_tools(chat_client: SupportsChatGetRespo # Test agent with MCP tools in constructor with contextlib.suppress(Exception): - agent = Agent(chat_client=chat_client, name="TestAgent", description="Test agent", tools=[mock_mcp_tool]) + agent = Agent(client=client, name="TestAgent", description="Test agent", tools=[mock_mcp_tool]) # Test async context manager with MCP tools async with agent: pass @@ -588,7 +588,7 @@ def echo_thread_info(text: str, **kwargs: Any) -> str: # type: ignore[reportUnk ChatResponse(messages=Message(role="assistant", text="done")), ] - agent = Agent(chat_client=chat_client_base, tools=[echo_thread_info], chat_message_store_factory=ChatMessageStore) + agent = Agent(client=chat_client_base, tools=[echo_thread_info], chat_message_store_factory=ChatMessageStore) thread = agent.get_new_thread() result = await agent.run("hello", thread=thread, options={"additional_function_arguments": {"thread": thread}}) @@ -616,7 +616,7 @@ async def capturing_inner( # Create agent with agent-level tool_choice="auto" and a tool (tools required for tool_choice to be meaningful) agent = Agent( - chat_client=chat_client_base, + client=chat_client_base, tools=[tool_tool], options={"tool_choice": "auto"}, ) @@ -647,7 +647,7 @@ async def capturing_inner( # Create agent with agent-level tool_choice="required" and a tool agent = Agent( - chat_client=chat_client_base, + client=chat_client_base, tools=[tool_tool], default_options={"tool_choice": "required"}, ) @@ -678,7 +678,7 @@ async def capturing_inner( # Create agent with agent-level tool_choice="auto" and a tool agent = Agent( - chat_client=chat_client_base, + client=chat_client_base, tools=[tool_tool], default_options={"tool_choice": "auto"}, ) @@ -808,7 +808,7 @@ def test_sanitize_agent_name_replaces_invalid_chars(): @pytest.mark.asyncio async def test_agent_get_new_thread(chat_client_base: SupportsChatGetResponse, tool_tool: ToolProtocol): """Test that get_new_thread returns a new AgentThread.""" - agent = Agent(chat_client=chat_client_base, tools=[tool_tool]) + agent = Agent(client=chat_client_base, tools=[tool_tool]) thread = agent.get_new_thread() @@ -827,7 +827,7 @@ async def invoking(self, messages, **kwargs): return Context() provider = TestContextProvider() - agent = Agent(chat_client=chat_client_base, tools=[tool_tool], context_provider=provider) + agent = Agent(client=chat_client_base, tools=[tool_tool], context_provider=provider) thread = agent.get_new_thread() @@ -840,7 +840,7 @@ async def test_agent_get_new_thread_with_service_thread_id( chat_client_base: SupportsChatGetResponse, tool_tool: ToolProtocol ): """Test that get_new_thread passes kwargs like service_thread_id to the thread.""" - agent = Agent(chat_client=chat_client_base, tools=[tool_tool]) + agent = Agent(client=chat_client_base, tools=[tool_tool]) thread = agent.get_new_thread(service_thread_id="test-thread-123") @@ -851,7 +851,7 @@ async def test_agent_get_new_thread_with_service_thread_id( @pytest.mark.asyncio async def test_agent_deserialize_thread(chat_client_base: SupportsChatGetResponse, tool_tool: ToolProtocol): """Test deserialize_thread restores a thread from serialized state.""" - agent = Agent(chat_client=chat_client_base, tools=[tool_tool]) + agent = Agent(client=chat_client_base, tools=[tool_tool]) # Create serialized thread state with messages serialized_state = { @@ -885,7 +885,7 @@ async def test_chat_agent_raises_with_both_conversation_id_and_store(): with pytest.raises(AgentInitializationError, match="Cannot specify both"): Agent( - chat_client=mock_client, + client=mock_client, default_options={"conversation_id": "test_id"}, chat_message_store_factory=mock_store_factory, ) @@ -897,7 +897,7 @@ def test_chat_agent_calls_update_agent_name_on_client(): mock_client._update_agent_name_and_description = MagicMock() Agent( - chat_client=mock_client, + client=mock_client, name="TestAgent", description="Test description", ) @@ -920,7 +920,7 @@ async def invoking(self, messages, **kwargs): return Context(tools=[context_tool]) provider = ToolContextProvider() - agent = Agent(chat_client=chat_client_base, context_provider=provider) + agent = Agent(client=chat_client_base, context_provider=provider) # Agent starts with empty tools list assert agent.default_options.get("tools") == [] @@ -946,7 +946,7 @@ async def invoking(self, messages, **kwargs): return Context(instructions="Context-provided instructions") provider = InstructionContextProvider() - agent = Agent(chat_client=chat_client_base, context_provider=provider) + agent = Agent(client=chat_client_base, context_provider=provider) # Verify agent has no default instructions assert agent.default_options.get("instructions") is None @@ -964,7 +964,7 @@ async def invoking(self, messages, **kwargs): async def test_chat_agent_raises_on_conversation_id_mismatch(chat_client_base: SupportsChatGetResponse): """Test that Agent raises when thread and agent have different conversation IDs.""" agent = Agent( - chat_client=chat_client_base, + client=chat_client_base, default_options={"conversation_id": "agent-conversation-id"}, ) diff --git a/python/packages/core/tests/core/test_as_tool_kwargs_propagation.py b/python/packages/core/tests/core/test_as_tool_kwargs_propagation.py index b073badaef..b34164b86b 100644 --- a/python/packages/core/tests/core/test_as_tool_kwargs_propagation.py +++ b/python/packages/core/tests/core/test_as_tool_kwargs_propagation.py @@ -14,7 +14,7 @@ class TestAsToolKwargsPropagation: """Test cases for kwargs propagation through as_tool() delegation.""" - async def test_as_tool_forwards_runtime_kwargs(self, chat_client: MockChatClient) -> None: + async def test_as_tool_forwards_runtime_kwargs(self, client: MockChatClient) -> None: """Test that runtime kwargs are forwarded through as_tool() to sub-agent.""" captured_kwargs: dict[str, Any] = {} @@ -27,13 +27,13 @@ async def capture_middleware( await call_next(context) # Setup mock response - chat_client.responses = [ + client.responses = [ ChatResponse(messages=[Message(role="assistant", text="Response from sub-agent")]), ] # Create sub-agent with middleware sub_agent = Agent( - chat_client=chat_client, + client=client, name="sub_agent", middleware=[capture_middleware], ) @@ -57,7 +57,7 @@ async def capture_middleware( assert "session_id" in captured_kwargs assert captured_kwargs["session_id"] == "session-789" - async def test_as_tool_excludes_arg_name_from_forwarded_kwargs(self, chat_client: MockChatClient) -> None: + async def test_as_tool_excludes_arg_name_from_forwarded_kwargs(self, client: MockChatClient) -> None: """Test that the arg_name parameter is not forwarded as a kwarg.""" captured_kwargs: dict[str, Any] = {} @@ -69,12 +69,12 @@ async def capture_middleware( await call_next(context) # Setup mock response - chat_client.responses = [ + client.responses = [ ChatResponse(messages=[Message(role="assistant", text="Response from sub-agent")]), ] sub_agent = Agent( - chat_client=chat_client, + client=client, name="sub_agent", middleware=[capture_middleware], ) @@ -94,7 +94,7 @@ async def capture_middleware( assert "api_token" in captured_kwargs assert captured_kwargs["api_token"] == "token-123" - async def test_as_tool_nested_delegation_propagates_kwargs(self, chat_client: MockChatClient) -> None: + async def test_as_tool_nested_delegation_propagates_kwargs(self, client: MockChatClient) -> None: """Test that kwargs propagate through multiple levels of delegation (A → B → C).""" captured_kwargs_list: list[dict[str, Any]] = [] @@ -107,7 +107,7 @@ async def capture_middleware( await call_next(context) # Setup mock responses to trigger nested tool invocation: B calls tool C, then completes. - chat_client.responses = [ + client.responses = [ ChatResponse( messages=[ Message( @@ -128,14 +128,14 @@ async def capture_middleware( # Create agent C (bottom level) agent_c = Agent( - chat_client=chat_client, + client=client, name="agent_c", middleware=[capture_middleware], ) # Create agent B (middle level) - delegates to C agent_b = Agent( - chat_client=chat_client, + client=client, name="agent_b", tools=[agent_c.as_tool(name="call_c")], middleware=[capture_middleware], @@ -157,7 +157,7 @@ async def capture_middleware( assert captured_kwargs_list[0].get("trace_id") == "trace-abc-123" assert captured_kwargs_list[0].get("tenant_id") == "tenant-xyz" - async def test_as_tool_streaming_mode_forwards_kwargs(self, chat_client: MockChatClient) -> None: + async def test_as_tool_streaming_mode_forwards_kwargs(self, client: MockChatClient) -> None: """Test that kwargs are forwarded in streaming mode.""" captured_kwargs: dict[str, Any] = {} @@ -171,12 +171,12 @@ async def capture_middleware( # Setup mock streaming responses from agent_framework import ChatResponseUpdate - chat_client.streaming_responses = [ + client.streaming_responses = [ [ChatResponseUpdate(contents=[Content.from_text(text="Streaming response")], role="assistant")], ] sub_agent = Agent( - chat_client=chat_client, + client=client, name="sub_agent", middleware=[capture_middleware], ) @@ -199,15 +199,15 @@ async def stream_callback(update: Any) -> None: assert captured_kwargs["api_key"] == "streaming-key-999" assert len(captured_updates) == 1 - async def test_as_tool_empty_kwargs_still_works(self, chat_client: MockChatClient) -> None: + async def test_as_tool_empty_kwargs_still_works(self, client: MockChatClient) -> None: """Test that as_tool works correctly when no extra kwargs are provided.""" # Setup mock response - chat_client.responses = [ + client.responses = [ ChatResponse(messages=[Message(role="assistant", text="Response from agent")]), ] sub_agent = Agent( - chat_client=chat_client, + client=client, name="sub_agent", ) @@ -219,7 +219,7 @@ async def test_as_tool_empty_kwargs_still_works(self, chat_client: MockChatClien # Verify tool executed successfully assert result is not None - async def test_as_tool_kwargs_with_chat_options(self, chat_client: MockChatClient) -> None: + async def test_as_tool_kwargs_with_chat_options(self, client: MockChatClient) -> None: """Test that kwargs including chat_options are properly forwarded.""" captured_kwargs: dict[str, Any] = {} @@ -231,12 +231,12 @@ async def capture_middleware( await call_next(context) # Setup mock response - chat_client.responses = [ + client.responses = [ ChatResponse(messages=[Message(role="assistant", text="Response with options")]), ] sub_agent = Agent( - chat_client=chat_client, + client=client, name="sub_agent", middleware=[capture_middleware], ) @@ -259,7 +259,7 @@ async def capture_middleware( assert "custom_param" in captured_kwargs assert captured_kwargs["custom_param"] == "custom_value" - async def test_as_tool_kwargs_isolated_per_invocation(self, chat_client: MockChatClient) -> None: + async def test_as_tool_kwargs_isolated_per_invocation(self, client: MockChatClient) -> None: """Test that kwargs are isolated per invocation and don't leak between calls.""" first_call_kwargs: dict[str, Any] = {} second_call_kwargs: dict[str, Any] = {} @@ -278,13 +278,13 @@ async def capture_middleware( await call_next(context) # Setup mock responses for both calls - chat_client.responses = [ + client.responses = [ ChatResponse(messages=[Message(role="assistant", text="First response")]), ChatResponse(messages=[Message(role="assistant", text="Second response")]), ] sub_agent = Agent( - chat_client=chat_client, + client=client, name="sub_agent", middleware=[capture_middleware], ) @@ -313,7 +313,7 @@ async def capture_middleware( assert second_call_kwargs.get("session_id") == "session-2" assert second_call_kwargs.get("api_token") == "token-2" - async def test_as_tool_excludes_conversation_id_from_forwarded_kwargs(self, chat_client: MockChatClient) -> None: + async def test_as_tool_excludes_conversation_id_from_forwarded_kwargs(self, client: MockChatClient) -> None: """Test that conversation_id is not forwarded to sub-agent.""" captured_kwargs: dict[str, Any] = {} @@ -325,12 +325,12 @@ async def capture_middleware( await call_next(context) # Setup mock response - chat_client.responses = [ + client.responses = [ ChatResponse(messages=[Message(role="assistant", text="Response from sub-agent")]), ] sub_agent = Agent( - chat_client=chat_client, + client=client, name="sub_agent", middleware=[capture_middleware], ) diff --git a/python/packages/core/tests/core/test_clients.py b/python/packages/core/tests/core/test_clients.py index 8fd8e3cb6b..8306df575b 100644 --- a/python/packages/core/tests/core/test_clients.py +++ b/python/packages/core/tests/core/test_clients.py @@ -11,18 +11,18 @@ ) -def test_chat_client_type(chat_client: SupportsChatGetResponse): - assert isinstance(chat_client, SupportsChatGetResponse) +def test_chat_client_type(client: SupportsChatGetResponse): + assert isinstance(client, SupportsChatGetResponse) -async def test_chat_client_get_response(chat_client: SupportsChatGetResponse): - response = await chat_client.get_response(Message(role="user", text="Hello")) +async def test_chat_client_get_response(client: SupportsChatGetResponse): + response = await client.get_response(Message(role="user", text="Hello")) assert response.text == "test response" assert response.messages[0].role == "assistant" -async def test_chat_client_get_response_streaming(chat_client: SupportsChatGetResponse): - async for update in chat_client.get_response(Message(role="user", text="Hello"), stream=True): +async def test_chat_client_get_response_streaming(client: SupportsChatGetResponse): + async for update in client.get_response(Message(role="user", text="Hello"), stream=True): assert update.text == "test streaming response " or update.text == "another update" assert update.role == "assistant" diff --git a/python/packages/core/tests/core/test_function_invocation_logic.py b/python/packages/core/tests/core/test_function_invocation_logic.py index ebb3171b19..dcc28958f5 100644 --- a/python/packages/core/tests/core/test_function_invocation_logic.py +++ b/python/packages/core/tests/core/test_function_invocation_logic.py @@ -165,7 +165,7 @@ def ai_func(user_query: str) -> str: ChatResponse(messages=Message(role="assistant", text="done")), ] - agent = Agent(chat_client=chat_client_base, tools=[ai_func]) + agent = Agent(client=chat_client_base, tools=[ai_func]) async def handler(request: web.Request) -> web.Response: thread = agent.get_new_thread() @@ -222,7 +222,7 @@ def ai_func(user_query: str) -> str: ChatResponse(messages=Message(role="assistant", text="done")), ] - agent = Agent(chat_client=chat_client_base, tools=[ai_func]) + agent = Agent(client=chat_client_base, tools=[ai_func]) ready_event = threading.Event() port_queue: Queue[int] = Queue() diff --git a/python/packages/core/tests/core/test_mcp.py b/python/packages/core/tests/core/test_mcp.py index a0309f8b73..b81426e12f 100644 --- a/python/packages/core/tests/core/test_mcp.py +++ b/python/packages/core/tests/core/test_mcp.py @@ -1391,7 +1391,7 @@ async def test_mcp_tool_sampling_callback_chat_client_exception(): mock_chat_client = AsyncMock() mock_chat_client.get_response.side_effect = RuntimeError("Chat client error") - tool.chat_client = mock_chat_client + tool.client = mock_chat_client # Create mock params params = Mock() @@ -1434,7 +1434,7 @@ async def test_mcp_tool_sampling_callback_no_valid_content(): mock_response.model_id = "test-model" mock_chat_client.get_response.return_value = mock_response - tool.chat_client = mock_chat_client + tool.client = mock_chat_client # Create mock params params = Mock() diff --git a/python/packages/core/tests/core/test_middleware.py b/python/packages/core/tests/core/test_middleware.py index 3dd10ac871..41c15b2c70 100644 --- a/python/packages/core/tests/core/test_middleware.py +++ b/python/packages/core/tests/core/test_middleware.py @@ -101,9 +101,9 @@ def test_init_with_defaults(self, mock_chat_client: Any) -> None: """Test ChatContext initialization with default values.""" messages = [Message(role="user", text="test")] chat_options: dict[str, Any] = {} - context = ChatContext(chat_client=mock_chat_client, messages=messages, options=chat_options) + context = ChatContext(client=mock_chat_client, messages=messages, options=chat_options) - assert context.chat_client is mock_chat_client + assert context.client is mock_chat_client assert context.messages == messages assert context.options is chat_options assert context.stream is False @@ -117,14 +117,14 @@ def test_init_with_custom_values(self, mock_chat_client: Any) -> None: metadata = {"key": "value"} context = ChatContext( - chat_client=mock_chat_client, + client=mock_chat_client, messages=messages, options=chat_options, stream=True, metadata=metadata, ) - assert context.chat_client is mock_chat_client + assert context.client is mock_chat_client assert context.messages == messages assert context.options is chat_options assert context.stream is True @@ -574,7 +574,7 @@ async def test_execute_no_middleware(self, mock_chat_client: Any) -> None: pipeline = ChatMiddlewarePipeline() messages = [Message(role="user", text="test")] chat_options: dict[str, Any] = {} - context = ChatContext(chat_client=mock_chat_client, messages=messages, options=chat_options) + context = ChatContext(client=mock_chat_client, messages=messages, options=chat_options) expected_response = ChatResponse(messages=[Message(role="assistant", text="response")]) @@ -601,7 +601,7 @@ async def process(self, context: ChatContext, call_next: Callable[[ChatContext], pipeline = ChatMiddlewarePipeline(middleware) messages = [Message(role="user", text="test")] chat_options: dict[str, Any] = {} - context = ChatContext(chat_client=mock_chat_client, messages=messages, options=chat_options) + context = ChatContext(client=mock_chat_client, messages=messages, options=chat_options) expected_response = ChatResponse(messages=[Message(role="assistant", text="response")]) @@ -618,7 +618,7 @@ async def test_execute_stream_no_middleware(self, mock_chat_client: Any) -> None pipeline = ChatMiddlewarePipeline() messages = [Message(role="user", text="test")] chat_options: dict[str, Any] = {} - context = ChatContext(chat_client=mock_chat_client, messages=messages, options=chat_options, stream=True) + context = ChatContext(client=mock_chat_client, messages=messages, options=chat_options, stream=True) def final_handler(ctx: ChatContext) -> ResponseStream[ChatResponseUpdate, ChatResponse]: async def _stream() -> AsyncIterable[ChatResponseUpdate]: @@ -653,7 +653,7 @@ async def process(self, context: ChatContext, call_next: Callable[[ChatContext], pipeline = ChatMiddlewarePipeline(middleware) messages = [Message(role="user", text="test")] chat_options: dict[str, Any] = {} - context = ChatContext(chat_client=mock_chat_client, messages=messages, options=chat_options, stream=True) + context = ChatContext(client=mock_chat_client, messages=messages, options=chat_options, stream=True) def final_handler(ctx: ChatContext) -> ResponseStream[ChatResponseUpdate, ChatResponse]: async def _stream() -> AsyncIterable[ChatResponseUpdate]: @@ -680,7 +680,7 @@ async def test_execute_with_pre_next_termination(self, mock_chat_client: Any) -> pipeline = ChatMiddlewarePipeline(middleware) messages = [Message(role="user", text="test")] chat_options: dict[str, Any] = {} - context = ChatContext(chat_client=mock_chat_client, messages=messages, options=chat_options) + context = ChatContext(client=mock_chat_client, messages=messages, options=chat_options) execution_order: list[str] = [] async def final_handler(ctx: ChatContext) -> ChatResponse: @@ -699,7 +699,7 @@ async def test_execute_with_post_next_termination(self, mock_chat_client: Any) - pipeline = ChatMiddlewarePipeline(middleware) messages = [Message(role="user", text="test")] chat_options: dict[str, Any] = {} - context = ChatContext(chat_client=mock_chat_client, messages=messages, options=chat_options) + context = ChatContext(client=mock_chat_client, messages=messages, options=chat_options) execution_order: list[str] = [] async def final_handler(ctx: ChatContext) -> ChatResponse: @@ -718,7 +718,7 @@ async def test_execute_stream_with_pre_next_termination(self, mock_chat_client: pipeline = ChatMiddlewarePipeline(middleware) messages = [Message(role="user", text="test")] chat_options: dict[str, Any] = {} - context = ChatContext(chat_client=mock_chat_client, messages=messages, options=chat_options, stream=True) + context = ChatContext(client=mock_chat_client, messages=messages, options=chat_options, stream=True) execution_order: list[str] = [] def final_handler(ctx: ChatContext) -> ResponseStream[ChatResponseUpdate, ChatResponse]: @@ -743,7 +743,7 @@ async def test_execute_stream_with_post_next_termination(self, mock_chat_client: pipeline = ChatMiddlewarePipeline(middleware) messages = [Message(role="user", text="test")] chat_options: dict[str, Any] = {} - context = ChatContext(chat_client=mock_chat_client, messages=messages, options=chat_options, stream=True) + context = ChatContext(client=mock_chat_client, messages=messages, options=chat_options, stream=True) execution_order: list[str] = [] def final_handler(ctx: ChatContext) -> ResponseStream[ChatResponseUpdate, ChatResponse]: @@ -977,7 +977,7 @@ async def function_chat_middleware( pipeline = ChatMiddlewarePipeline(ClassChatMiddleware(), function_chat_middleware) messages = [Message(role="user", text="test")] chat_options: dict[str, Any] = {} - context = ChatContext(chat_client=mock_chat_client, messages=messages, options=chat_options) + context = ChatContext(client=mock_chat_client, messages=messages, options=chat_options) async def final_handler(ctx: ChatContext) -> ChatResponse: execution_order.append("handler") @@ -1108,7 +1108,7 @@ async def process(self, context: ChatContext, call_next: Callable[[ChatContext], pipeline = ChatMiddlewarePipeline(*middleware) messages = [Message(role="user", text="test")] chat_options: dict[str, Any] = {} - context = ChatContext(chat_client=mock_chat_client, messages=messages, options=chat_options) + context = ChatContext(client=mock_chat_client, messages=messages, options=chat_options) async def final_handler(ctx: ChatContext) -> ChatResponse: execution_order.append("handler") @@ -1215,7 +1215,7 @@ async def test_chat_context_validation(self, mock_chat_client: Any) -> None: class ChatContextValidationMiddleware(ChatMiddleware): async def process(self, context: ChatContext, call_next: Callable[[ChatContext], Awaitable[None]]) -> None: # Verify context has all expected attributes - assert hasattr(context, "chat_client") + assert hasattr(context, "client") assert hasattr(context, "messages") assert hasattr(context, "options") assert hasattr(context, "stream") @@ -1223,7 +1223,7 @@ async def process(self, context: ChatContext, call_next: Callable[[ChatContext], assert hasattr(context, "result") # Verify context content - assert context.chat_client is mock_chat_client + assert context.client is mock_chat_client assert len(context.messages) == 1 assert context.messages[0].role == "user" assert context.messages[0].text == "test" @@ -1241,7 +1241,7 @@ async def process(self, context: ChatContext, call_next: Callable[[ChatContext], pipeline = ChatMiddlewarePipeline(middleware) messages = [Message(role="user", text="test")] chat_options: dict[str, Any] = {"temperature": 0.5} - context = ChatContext(chat_client=mock_chat_client, messages=messages, options=chat_options) + context = ChatContext(client=mock_chat_client, messages=messages, options=chat_options) async def final_handler(ctx: ChatContext) -> ChatResponse: # Verify metadata was set by middleware @@ -1355,7 +1355,7 @@ async def process(self, context: ChatContext, call_next: Callable[[ChatContext], chat_options: dict[str, Any] = {} # Test non-streaming - context = ChatContext(chat_client=mock_chat_client, messages=messages, options=chat_options) + context = ChatContext(client=mock_chat_client, messages=messages, options=chat_options) async def final_handler(ctx: ChatContext) -> ChatResponse: streaming_flags.append(ctx.stream) @@ -1364,7 +1364,7 @@ async def final_handler(ctx: ChatContext) -> ChatResponse: await pipeline.execute(context, final_handler) # Test streaming - context_stream = ChatContext(chat_client=mock_chat_client, messages=messages, options=chat_options, stream=True) + context_stream = ChatContext(client=mock_chat_client, messages=messages, options=chat_options, stream=True) def final_stream_handler(ctx: ChatContext) -> ResponseStream[ChatResponseUpdate, ChatResponse]: async def _stream() -> AsyncIterable[ChatResponseUpdate]: @@ -1395,7 +1395,7 @@ async def process(self, context: ChatContext, call_next: Callable[[ChatContext], pipeline = ChatMiddlewarePipeline(middleware) messages = [Message(role="user", text="test")] chat_options: dict[str, Any] = {} - context = ChatContext(chat_client=mock_chat_client, messages=messages, options=chat_options, stream=True) + context = ChatContext(client=mock_chat_client, messages=messages, options=chat_options, stream=True) def final_stream_handler(ctx: ChatContext) -> ResponseStream[ChatResponseUpdate, ChatResponse]: async def _stream() -> AsyncIterable[ChatResponseUpdate]: @@ -1609,7 +1609,7 @@ async def process(self, context: ChatContext, call_next: Callable[[ChatContext], pipeline = ChatMiddlewarePipeline(middleware) messages = [Message(role="user", text="test")] chat_options: dict[str, Any] = {} - context = ChatContext(chat_client=mock_chat_client, messages=messages, options=chat_options) + context = ChatContext(client=mock_chat_client, messages=messages, options=chat_options) handler_called = False @@ -1637,7 +1637,7 @@ async def process(self, context: ChatContext, call_next: Callable[[ChatContext], pipeline = ChatMiddlewarePipeline(middleware) messages = [Message(role="user", text="test")] chat_options: dict[str, Any] = {} - context = ChatContext(chat_client=mock_chat_client, messages=messages, options=chat_options, stream=True) + context = ChatContext(client=mock_chat_client, messages=messages, options=chat_options, stream=True) handler_called = False @@ -1682,7 +1682,7 @@ async def process(self, context: ChatContext, call_next: Callable[[ChatContext], pipeline = ChatMiddlewarePipeline(FirstChatMiddleware(), SecondChatMiddleware()) messages = [Message(role="user", text="test")] chat_options: dict[str, Any] = {} - context = ChatContext(chat_client=mock_chat_client, messages=messages, options=chat_options) + context = ChatContext(client=mock_chat_client, messages=messages, options=chat_options) handler_called = False diff --git a/python/packages/core/tests/core/test_middleware_context_result.py b/python/packages/core/tests/core/test_middleware_context_result.py index 74f35f7e2c..d17e99a85e 100644 --- a/python/packages/core/tests/core/test_middleware_context_result.py +++ b/python/packages/core/tests/core/test_middleware_context_result.py @@ -158,7 +158,7 @@ async def process( # Create Agent with override middleware middleware = ChatAgentResponseOverrideMiddleware() - agent = Agent(chat_client=mock_chat_client, middleware=[middleware]) + agent = Agent(client=mock_chat_client, middleware=[middleware]) # Test override case override_messages = [Message(role="user", text="Give me a special response")] @@ -196,7 +196,7 @@ async def process( # Create Agent with override middleware middleware = ChatAgentStreamOverrideMiddleware() - agent = Agent(chat_client=mock_chat_client, middleware=[middleware]) + agent = Agent(client=mock_chat_client, middleware=[middleware]) # Test streaming override case override_messages = [Message(role="user", text="Give me a custom stream")] diff --git a/python/packages/core/tests/core/test_middleware_with_agent.py b/python/packages/core/tests/core/test_middleware_with_agent.py index f296e6a7b1..17f0faf4f0 100644 --- a/python/packages/core/tests/core/test_middleware_with_agent.py +++ b/python/packages/core/tests/core/test_middleware_with_agent.py @@ -36,7 +36,7 @@ class TestChatAgentClassBasedMiddleware: """Test cases for class-based middleware integration with Agent.""" - async def test_class_based_agent_middleware_with_chat_agent(self, chat_client: SupportsChatGetResponse) -> None: + async def test_class_based_agent_middleware_with_chat_agent(self, client: SupportsChatGetResponse) -> None: """Test class-based agent middleware with Agent.""" execution_order: list[str] = [] @@ -53,7 +53,7 @@ async def process( # Create Agent with middleware middleware = TrackingAgentMiddleware("agent_middleware") - agent = Agent(chat_client=chat_client, middleware=[middleware]) + agent = Agent(client=client, middleware=[middleware]) # Execute the agent messages = [Message(role="user", text="test message")] @@ -69,7 +69,7 @@ async def process( # Verify middleware execution order assert execution_order == ["agent_middleware_before", "agent_middleware_after"] - async def test_class_based_function_middleware_with_chat_agent(self, chat_client: "MockChatClient") -> None: + async def test_class_based_function_middleware_with_chat_agent(self, client: "MockChatClient") -> None: """Test class-based function middleware with Agent.""" class TrackingFunctionMiddleware(FunctionMiddleware): @@ -81,7 +81,7 @@ async def process( await call_next(context) middleware = TrackingFunctionMiddleware() - Agent(chat_client=chat_client, middleware=[middleware]) + Agent(client=client, middleware=[middleware]) async def test_class_based_function_middleware_with_chat_agent_supported_client( self, chat_client_base: "MockBaseChatClient" @@ -103,7 +103,7 @@ async def process( execution_order.append(f"{self.name}_after") middleware = TrackingFunctionMiddleware("function_middleware") - agent = Agent(chat_client=chat_client_base, middleware=[middleware]) + agent = Agent(client=chat_client_base, middleware=[middleware]) messages = [Message(role="user", text="test message")] response = await agent.run(messages) @@ -117,7 +117,7 @@ async def process( class TestChatAgentFunctionBasedMiddleware: """Test cases for function-based middleware integration with Agent.""" - async def test_agent_middleware_with_pre_termination(self, chat_client: "MockChatClient") -> None: + async def test_agent_middleware_with_pre_termination(self, client: "MockChatClient") -> None: """Test that agent middleware can terminate execution before calling next().""" execution_order: list[str] = [] @@ -133,7 +133,7 @@ async def process( # Create Agent with terminating middleware middleware = PreTerminationMiddleware() - agent = Agent(chat_client=chat_client, middleware=[middleware]) + agent = Agent(client=client, middleware=[middleware]) # Execute the agent with multiple messages messages = [ @@ -146,9 +146,9 @@ async def process( assert response is None # Only middleware_before runs - middleware_after is unreachable after raise assert execution_order == ["middleware_before"] - assert chat_client.call_count == 0 # No calls should be made due to termination + assert client.call_count == 0 # No calls should be made due to termination - async def test_agent_middleware_with_post_termination(self, chat_client: "MockChatClient") -> None: + async def test_agent_middleware_with_post_termination(self, client: "MockChatClient") -> None: """Test that agent middleware can terminate execution after calling next().""" execution_order: list[str] = [] @@ -163,7 +163,7 @@ async def process( # Create Agent with terminating middleware middleware = PostTerminationMiddleware() - agent = Agent(chat_client=chat_client, middleware=[middleware]) + agent = Agent(client=client, middleware=[middleware]) # Execute the agent with multiple messages messages = [ @@ -183,9 +183,9 @@ async def process( "middleware_before", "middleware_after", ] - assert chat_client.call_count == 1 + assert client.call_count == 1 - async def test_function_middleware_with_pre_termination(self, chat_client: "MockChatClient") -> None: + async def test_function_middleware_with_pre_termination(self, client: "MockChatClient") -> None: """Test that function middleware can terminate execution before calling next().""" execution_order: list[str] = [] @@ -201,9 +201,9 @@ async def process( await call_next(context) execution_order.append("middleware_after") - Agent(chat_client=chat_client, middleware=[PreTerminationFunctionMiddleware()], tools=[]) + Agent(client=client, middleware=[PreTerminationFunctionMiddleware()], tools=[]) - async def test_function_middleware_with_post_termination(self, chat_client: "MockChatClient") -> None: + async def test_function_middleware_with_post_termination(self, client: "MockChatClient") -> None: """Test that function middleware can terminate execution after calling next().""" execution_order: list[str] = [] @@ -218,9 +218,9 @@ async def process( execution_order.append("middleware_after") context.terminate = True - Agent(chat_client=chat_client, middleware=[PostTerminationFunctionMiddleware()], tools=[]) + Agent(client=client, middleware=[PostTerminationFunctionMiddleware()], tools=[]) - async def test_function_based_agent_middleware_with_chat_agent(self, chat_client: "MockChatClient") -> None: + async def test_function_based_agent_middleware_with_chat_agent(self, client: "MockChatClient") -> None: """Test function-based agent middleware with Agent.""" execution_order: list[str] = [] @@ -232,7 +232,7 @@ async def tracking_agent_middleware( execution_order.append("agent_function_after") # Create Agent with function middleware - agent = Agent(chat_client=chat_client, middleware=[tracking_agent_middleware]) + agent = Agent(client=client, middleware=[tracking_agent_middleware]) # Execute the agent messages = [Message(role="user", text="test message")] @@ -243,12 +243,12 @@ async def tracking_agent_middleware( assert len(response.messages) > 0 assert response.messages[0].role == "assistant" assert response.messages[0].text == "test response" - assert chat_client.call_count == 1 + assert client.call_count == 1 # Verify middleware execution order assert execution_order == ["agent_function_before", "agent_function_after"] - async def test_function_based_function_middleware_with_chat_agent(self, chat_client: "MockChatClient") -> None: + async def test_function_based_function_middleware_with_chat_agent(self, client: "MockChatClient") -> None: """Test function-based function middleware with Agent.""" async def tracking_function_middleware( @@ -256,7 +256,7 @@ async def tracking_function_middleware( ) -> None: await call_next(context) - Agent(chat_client=chat_client, middleware=[tracking_function_middleware]) + Agent(client=client, middleware=[tracking_function_middleware]) async def test_function_based_function_middleware_with_supported_client( self, chat_client_base: "MockBaseChatClient" @@ -271,7 +271,7 @@ async def tracking_function_middleware( await call_next(context) execution_order.append("function_function_after") - agent = Agent(chat_client=chat_client_base, middleware=[tracking_function_middleware]) + agent = Agent(client=chat_client_base, middleware=[tracking_function_middleware]) messages = [Message(role="user", text="test message")] response = await agent.run(messages) @@ -284,7 +284,7 @@ async def tracking_function_middleware( class TestChatAgentStreamingMiddleware: """Test cases for streaming middleware integration with Agent.""" - async def test_agent_middleware_with_streaming(self, chat_client: "MockChatClient") -> None: + async def test_agent_middleware_with_streaming(self, client: "MockChatClient") -> None: """Test agent middleware with streaming Agent responses.""" execution_order: list[str] = [] streaming_flags: list[bool] = [] @@ -300,10 +300,10 @@ async def process( # Create Agent with middleware middleware = StreamingTrackingMiddleware() - agent = Agent(chat_client=chat_client, middleware=[middleware]) + agent = Agent(client=client, middleware=[middleware]) # Set up mock streaming responses - chat_client.streaming_responses = [ + client.streaming_responses = [ [ ChatResponseUpdate(contents=[Content.from_text(text="Streaming")], role="assistant"), ChatResponseUpdate(contents=[Content.from_text(text=" response")], role="assistant"), @@ -320,7 +320,7 @@ async def process( assert len(updates) == 2 assert updates[0].text == "Streaming" assert updates[1].text == " response" - assert chat_client.call_count == 1 + assert client.call_count == 1 # Verify middleware was called and streaming flag was set correctly assert execution_order == [ @@ -329,7 +329,7 @@ async def process( ] assert streaming_flags == [True] # Context should indicate streaming - async def test_non_streaming_vs_streaming_flag_validation(self, chat_client: "MockChatClient") -> None: + async def test_non_streaming_vs_streaming_flag_validation(self, client: "MockChatClient") -> None: """Test that stream flag is correctly set for different execution modes.""" streaming_flags: list[bool] = [] @@ -342,7 +342,7 @@ async def process( # Create Agent with middleware middleware = FlagTrackingMiddleware() - agent = Agent(chat_client=chat_client, middleware=[middleware]) + agent = Agent(client=client, middleware=[middleware]) messages = [Message(role="user", text="test message")] # Test non-streaming execution @@ -360,7 +360,7 @@ async def process( class TestChatAgentMultipleMiddlewareOrdering: """Test cases for multiple middleware execution order with Agent.""" - async def test_multiple_agent_middleware_execution_order(self, chat_client: "MockChatClient") -> None: + async def test_multiple_agent_middleware_execution_order(self, client: "MockChatClient") -> None: """Test that multiple agent middleware execute in correct order with Agent.""" execution_order: list[str] = [] @@ -381,7 +381,7 @@ async def process( middleware3 = OrderedMiddleware("third") # Create Agent with multiple middleware - agent = Agent(chat_client=chat_client, middleware=[middleware1, middleware2, middleware3]) + agent = Agent(client=client, middleware=[middleware1, middleware2, middleware3]) # Execute the agent messages = [Message(role="user", text="test message")] @@ -389,7 +389,7 @@ async def process( # Verify response assert response is not None - assert chat_client.call_count == 1 + assert client.call_count == 1 # Verify execution order (should be nested: first wraps second wraps third) expected_order = ["first_before", "second_before", "third_before", "third_after", "second_after", "first_after"] @@ -432,7 +432,7 @@ async def function_function_middleware( execution_order.append("function_function_after") agent = Agent( - chat_client=chat_client_base, + client=chat_client_base, middleware=[ ClassAgentMiddleware(), function_agent_middleware, @@ -469,7 +469,7 @@ async def function_function_middleware( execution_order.append("function_function_after") agent = Agent( - chat_client=chat_client_base, + client=chat_client_base, middleware=[ ClassAgentMiddleware(), function_agent_middleware, @@ -549,7 +549,7 @@ async def process( # Create Agent with function middleware and tools middleware = TrackingFunctionMiddleware("function_middleware") agent = Agent( - chat_client=chat_client_base, + client=chat_client_base, middleware=[middleware], tools=[sample_tool_function], ) @@ -610,7 +610,7 @@ async def tracking_function_middleware( # Create Agent with function middleware and tools agent = Agent( - chat_client=chat_client_base, + client=chat_client_base, middleware=[tracking_function_middleware], tools=[sample_tool_function], ) @@ -684,7 +684,7 @@ async def process( # Create Agent with both agent and function middleware and tools agent = Agent( - chat_client=chat_client_base, + client=chat_client_base, middleware=[TrackingAgentMiddleware(), TrackingFunctionMiddleware()], tools=[sample_tool_function], ) @@ -767,7 +767,7 @@ async def kwargs_middleware( ] # Create Agent with function middleware - agent = Agent(chat_client=chat_client_base, middleware=[kwargs_middleware], tools=[sample_tool_function]) + agent = Agent(client=chat_client_base, middleware=[kwargs_middleware], tools=[sample_tool_function]) # Execute the agent with custom parameters passed as kwargs messages = [Message(role="user", text="test message")] @@ -806,13 +806,13 @@ async def process(self, context: AgentContext, call_next: Callable[[AgentContext await call_next(context) self.execution_log.append(f"{self.name}_end") - async def test_middleware_dynamic_rebuild_non_streaming(self, chat_client: "MockChatClient") -> None: + async def test_middleware_dynamic_rebuild_non_streaming(self, client: "MockChatClient") -> None: """Test that middleware pipeline is rebuilt when agent.middleware collection is modified for non-streaming.""" execution_log: list[str] = [] # Create agent with initial middleware middleware1 = self.TrackingAgentMiddleware("middleware1", execution_log) - agent = Agent(chat_client=chat_client, middleware=[middleware1]) + agent = Agent(client=client, middleware=[middleware1]) # First execution - should use middleware1 await agent.run("Test message 1") @@ -856,13 +856,13 @@ async def test_middleware_dynamic_rebuild_non_streaming(self, chat_client: "Mock await agent.run("Test message 4") assert len(execution_log) == 0 - async def test_middleware_dynamic_rebuild_streaming(self, chat_client: "MockChatClient") -> None: + async def test_middleware_dynamic_rebuild_streaming(self, client: "MockChatClient") -> None: """Test that middleware pipeline is rebuilt for streaming when agent.middleware collection is modified.""" execution_log: list[str] = [] # Create agent with initial middleware middleware1 = self.TrackingAgentMiddleware("stream_middleware1", execution_log) - agent = Agent(chat_client=chat_client, middleware=[middleware1]) + agent = Agent(client=client, middleware=[middleware1]) # First streaming execution updates: list[AgentResponseUpdate] = [] @@ -889,7 +889,7 @@ async def test_middleware_dynamic_rebuild_streaming(self, chat_client: "MockChat assert "stream_middleware2_start" in execution_log assert "stream_middleware2_end" in execution_log - async def test_middleware_order_change_detection(self, chat_client: "MockChatClient") -> None: + async def test_middleware_order_change_detection(self, client: "MockChatClient") -> None: """Test that changing the order of middleware is detected and applied.""" execution_log: list[str] = [] @@ -897,7 +897,7 @@ async def test_middleware_order_change_detection(self, chat_client: "MockChatCli middleware2 = self.TrackingAgentMiddleware("second", execution_log) # Create agent with middleware in order [first, second] - agent = Agent(chat_client=chat_client, middleware=[middleware1, middleware2]) + agent = Agent(client=client, middleware=[middleware1, middleware2]) # First execution await agent.run("Test message 1") @@ -929,12 +929,12 @@ async def process(self, context: AgentContext, call_next: Callable[[AgentContext await call_next(context) self.execution_log.append(f"{self.name}_end") - async def test_run_level_middleware_isolation(self, chat_client: "MockChatClient") -> None: + async def test_run_level_middleware_isolation(self, client: "MockChatClient") -> None: """Test that run-level middleware is isolated between multiple runs.""" execution_log: list[str] = [] # Create agent without any agent-level middleware - agent = Agent(chat_client=chat_client) + agent = Agent(client=client) # Create run-level middleware run_middleware1 = self.TrackingAgentMiddleware("run1", execution_log) @@ -967,7 +967,7 @@ async def test_run_level_middleware_isolation(self, chat_client: "MockChatClient await agent.run("Test message 4", middleware=[run_middleware1, run_middleware2]) assert execution_log == ["run1_start", "run2_start", "run2_end", "run1_end"] - async def test_agent_plus_run_middleware_execution_order(self, chat_client: "MockChatClient") -> None: + async def test_agent_plus_run_middleware_execution_order(self, client: "MockChatClient") -> None: """Test that agent middleware executes first, followed by run middleware.""" execution_log: list[str] = [] metadata_log: list[str] = [] @@ -1003,7 +1003,7 @@ async def process( # Create agent with agent-level middleware agent_middleware = MetadataAgentMiddleware("agent") - agent = Agent(chat_client=chat_client, middleware=[agent_middleware]) + agent = Agent(client=client, middleware=[agent_middleware]) # Create run-level middleware run_middleware = MetadataRunMiddleware("run") @@ -1018,12 +1018,12 @@ async def process( # Verify that run middleware can read agent middleware metadata assert "run_reads_agent_key:agent_value" in metadata_log - async def test_run_level_middleware_non_streaming(self, chat_client: "MockChatClient") -> None: + async def test_run_level_middleware_non_streaming(self, client: "MockChatClient") -> None: """Test run-level middleware with non-streaming execution.""" execution_log: list[str] = [] # Create agent without agent-level middleware - agent = Agent(chat_client=chat_client) + agent = Agent(client=client) # Create run-level middleware run_middleware = self.TrackingAgentMiddleware("run_nonstream", execution_log) @@ -1040,7 +1040,7 @@ async def test_run_level_middleware_non_streaming(self, chat_client: "MockChatCl # Verify middleware was executed assert execution_log == ["run_nonstream_start", "run_nonstream_end"] - async def test_run_level_middleware_streaming(self, chat_client: "MockChatClient") -> None: + async def test_run_level_middleware_streaming(self, client: "MockChatClient") -> None: """Test run-level middleware with streaming execution.""" execution_log: list[str] = [] streaming_flags: list[bool] = [] @@ -1058,10 +1058,10 @@ async def process( execution_log.append(f"{self.name}_end") # Create agent without agent-level middleware - agent = Agent(chat_client=chat_client) + agent = Agent(client=client) # Set up mock streaming responses - chat_client.streaming_responses = [ + client.streaming_responses = [ [ ChatResponseUpdate(contents=[Content.from_text(text="Stream")], role="assistant"), ChatResponseUpdate(contents=[Content.from_text(text=" response")], role="assistant"), @@ -1166,7 +1166,7 @@ def custom_tool(message: str) -> str: # Create agent with agent-level middleware agent = Agent( - chat_client=chat_client_base, + client=chat_client_base, middleware=[AgentLevelAgentMiddleware(), AgentLevelFunctionMiddleware()], tools=[custom_tool_wrapped], ) @@ -1259,7 +1259,7 @@ def custom_tool(message: str) -> str: # Should work without errors agent = Agent( - chat_client=chat_client_base, + client=chat_client_base, middleware=[matching_agent_middleware, matching_function_middleware], tools=[custom_tool_wrapped], ) @@ -1270,7 +1270,7 @@ def custom_tool(message: str) -> str: assert "decorator_type_match_agent" in execution_order assert "decorator_type_match_function" not in execution_order - async def test_decorator_and_type_mismatch(self, chat_client: MockChatClient) -> None: + async def test_decorator_and_type_mismatch(self, client: MockChatClient) -> None: """Both decorator and parameter type specified but don't match.""" # This will cause a type error at decoration time, so we need to test differently @@ -1284,7 +1284,7 @@ async def mismatched_middleware( ) -> None: await call_next(context) - agent = Agent(chat_client=chat_client, middleware=[mismatched_middleware]) + agent = Agent(client=client, middleware=[mismatched_middleware]) await agent.run([Message(role="user", text="test")]) async def test_only_decorator_specified(self, chat_client_base: "MockBaseChatClient") -> None: @@ -1330,7 +1330,7 @@ def custom_tool(message: str) -> str: # Should work - relies on decorator agent = Agent( - chat_client=chat_client_base, + client=chat_client_base, middleware=[decorator_only_agent, decorator_only_function], tools=[custom_tool_wrapped], ) @@ -1386,7 +1386,7 @@ def custom_tool(message: str) -> str: # Should work - relies on type annotations agent = Agent( - chat_client=chat_client_base, middleware=[type_only_agent, type_only_function], tools=[custom_tool_wrapped] + client=chat_client_base, middleware=[type_only_agent, type_only_function], tools=[custom_tool_wrapped] ) response = await agent.run([Message(role="user", text="test")]) @@ -1395,7 +1395,7 @@ def custom_tool(message: str) -> str: assert "type_only_agent" in execution_order assert "type_only_function" not in execution_order - async def test_neither_decorator_nor_type(self, chat_client: Any) -> None: + async def test_neither_decorator_nor_type(self, client: Any) -> None: """Neither decorator nor parameter type specified - should throw exception.""" async def no_info_middleware(context: Any, call_next: Any) -> None: # No decorator, no type @@ -1403,10 +1403,10 @@ async def no_info_middleware(context: Any, call_next: Any) -> None: # No decora # Should raise MiddlewareException with pytest.raises(MiddlewareException, match="Cannot determine middleware type"): - agent = Agent(chat_client=chat_client, middleware=[no_info_middleware]) + agent = Agent(client=client, middleware=[no_info_middleware]) await agent.run([Message(role="user", text="test")]) - async def test_insufficient_parameters_error(self, chat_client: Any) -> None: + async def test_insufficient_parameters_error(self, client: Any) -> None: """Test that middleware with insufficient parameters raises an error.""" from agent_framework import Agent, agent_middleware @@ -1417,7 +1417,7 @@ async def test_insufficient_parameters_error(self, chat_client: Any) -> None: async def insufficient_params_middleware(context: Any) -> None: # Missing 'next' parameter pass - agent = Agent(chat_client=chat_client, middleware=[insufficient_params_middleware]) + agent = Agent(client=client, middleware=[insufficient_params_middleware]) await agent.run([Message(role="user", text="test")]) async def test_decorator_markers_preserved(self) -> None: @@ -1442,7 +1442,7 @@ async def test_function_middleware(context: Any, call_next: Any) -> None: class TestChatAgentThreadBehavior: """Test cases for thread behavior in AgentContext across multiple runs.""" - async def test_agent_context_thread_behavior_across_multiple_runs(self, chat_client: "MockChatClient") -> None: + async def test_agent_context_thread_behavior_across_multiple_runs(self, client: "MockChatClient") -> None: """Test that AgentContext.thread property behaves correctly across multiple agent runs.""" thread_states: list[dict[str, Any]] = [] @@ -1485,7 +1485,7 @@ async def process( # Create Agent with thread tracking middleware and a message store factory middleware = ThreadTrackingMiddleware() - agent = Agent(chat_client=chat_client, middleware=[middleware], chat_message_store_factory=ChatMessageStore) + agent = Agent(client=client, middleware=[middleware], chat_message_store_factory=ChatMessageStore) # Create a thread that will persist messages between runs thread = agent.get_new_thread() @@ -1566,9 +1566,9 @@ async def process(self, context: ChatContext, call_next: Callable[[ChatContext], execution_order.append("chat_middleware_after") # Create Agent with chat middleware - chat_client = MockBaseChatClient() + client = MockBaseChatClient() middleware = TrackingChatMiddleware() - agent = Agent(chat_client=chat_client, middleware=[middleware]) + agent = Agent(client=client, middleware=[middleware]) # Execute the agent messages = [Message(role="user", text="test message")] @@ -1596,8 +1596,8 @@ async def tracking_chat_middleware( execution_order.append("chat_middleware_after") # Create Agent with function-based chat middleware - chat_client = MockBaseChatClient() - agent = Agent(chat_client=chat_client, middleware=[tracking_chat_middleware]) + client = MockBaseChatClient() + agent = Agent(client=client, middleware=[tracking_chat_middleware]) # Execute the agent messages = [Message(role="user", text="test message")] @@ -1631,8 +1631,8 @@ async def message_modifier_middleware( await call_next(context) # Create Agent with message-modifying middleware - chat_client = MockBaseChatClient() - agent = Agent(chat_client=chat_client, middleware=[message_modifier_middleware]) + client = MockBaseChatClient() + agent = Agent(client=client, middleware=[message_modifier_middleware]) # Execute the agent messages = [Message(role="user", text="test message")] @@ -1657,8 +1657,8 @@ async def response_override_middleware( context.terminate = True # Create Agent with response-overriding middleware - chat_client = MockBaseChatClient() - agent = Agent(chat_client=chat_client, middleware=[response_override_middleware]) + client = MockBaseChatClient() + agent = Agent(client=client, middleware=[response_override_middleware]) # Execute the agent messages = [Message(role="user", text="test message")] @@ -1687,8 +1687,8 @@ async def second_middleware(context: ChatContext, call_next: Callable[[ChatConte execution_order.append("second_after") # Create Agent with multiple chat middleware - chat_client = MockBaseChatClient() - agent = Agent(chat_client=chat_client, middleware=[first_middleware, second_middleware]) + client = MockBaseChatClient() + agent = Agent(client=client, middleware=[first_middleware, second_middleware]) # Execute the agent messages = [Message(role="user", text="test message")] @@ -1716,12 +1716,12 @@ async def process(self, context: ChatContext, call_next: Callable[[ChatContext], execution_order.append("streaming_chat_after") # Create Agent with chat middleware - chat_client = MockBaseChatClient() - agent = Agent(chat_client=chat_client, middleware=[StreamingTrackingChatMiddleware()]) + client = MockBaseChatClient() + agent = Agent(client=client, middleware=[StreamingTrackingChatMiddleware()]) # Set up mock streaming responses # TODO: refactor to return a ResponseStream object - chat_client.streaming_responses = [ + client.streaming_responses = [ [ ChatResponseUpdate(contents=[Content.from_text(text="Stream")], role="assistant"), ChatResponseUpdate(contents=[Content.from_text(text=" response")], role="assistant"), @@ -1759,8 +1759,8 @@ async def process(self, context: ChatContext, call_next: Callable[[ChatContext], execution_order.append("middleware_after") # Create Agent with terminating middleware - chat_client = MockBaseChatClient() - agent = Agent(chat_client=chat_client, middleware=[PreTerminationChatMiddleware()]) + client = MockBaseChatClient() + agent = Agent(client=client, middleware=[PreTerminationChatMiddleware()]) # Execute the agent messages = [Message(role="user", text="test message")] @@ -1784,8 +1784,8 @@ async def process(self, context: ChatContext, call_next: Callable[[ChatContext], context.terminate = True # Create Agent with terminating middleware - chat_client = MockBaseChatClient() - agent = Agent(chat_client=chat_client, middleware=[PostTerminationChatMiddleware()]) + client = MockBaseChatClient() + agent = Agent(client=client, middleware=[PostTerminationChatMiddleware()]) # Execute the agent messages = [Message(role="user", text="test message")] @@ -1823,7 +1823,7 @@ async def function_middleware( # Create Agent with function middleware and tools agent = Agent( - chat_client=MockBaseChatClient(), + client=MockBaseChatClient(), middleware=[chat_middleware, function_middleware, agent_middleware], tools=[sample_tool_function], ) @@ -1859,8 +1859,8 @@ async def kwargs_middleware( await call_next(context) # Create Agent with agent middleware - chat_client = MockBaseChatClient() - agent = Agent(chat_client=chat_client, middleware=[kwargs_middleware]) + client = MockBaseChatClient() + agent = Agent(client=client, middleware=[kwargs_middleware]) # Execute the agent with custom parameters messages = [Message(role="user", text="test message")] diff --git a/python/packages/core/tests/core/test_middleware_with_chat.py b/python/packages/core/tests/core/test_middleware_with_chat.py index 76883ffca3..3c9d0246c7 100644 --- a/python/packages/core/tests/core/test_middleware_with_chat.py +++ b/python/packages/core/tests/core/test_middleware_with_chat.py @@ -180,10 +180,10 @@ async def agent_level_chat_middleware( await call_next(context) execution_order.append("agent_chat_middleware_after") - chat_client = MockBaseChatClient() + client = MockBaseChatClient() # Create Agent with chat middleware - agent = Agent(chat_client=chat_client, middleware=[agent_level_chat_middleware]) + agent = Agent(client=client, middleware=[agent_level_chat_middleware]) # Execute the agent messages = [Message(role="user", text="test message")] @@ -217,7 +217,7 @@ async def second_middleware(context: ChatContext, call_next: Callable[[ChatConte execution_order.append("second_after") # Create Agent with multiple chat middleware - agent = Agent(chat_client=chat_client_base, middleware=[first_middleware, second_middleware]) + agent = Agent(client=chat_client_base, middleware=[first_middleware, second_middleware]) # Execute the agent messages = [Message(role="user", text="test message")] @@ -375,10 +375,10 @@ def sample_tool(location: str) -> str: ) # Create function-invocation enabled chat client (MockBaseChatClient already includes FunctionInvocationLayer) - chat_client = MockBaseChatClient() + client = MockBaseChatClient() # Set function middleware directly on the chat client - chat_client.function_middleware = [test_function_middleware] + client.function_middleware = [test_function_middleware] # Prepare responses that will trigger function invocation function_call_response = ChatResponse( @@ -399,15 +399,15 @@ def sample_tool(location: str) -> str: messages=[Message(role="assistant", text="Based on the weather data, it's sunny!")] ) - chat_client.run_responses = [function_call_response, final_response] + client.run_responses = [function_call_response, final_response] # Execute the chat client directly with tools - this should trigger function invocation and middleware messages = [Message(role="user", text="What's the weather in San Francisco?")] - response = await chat_client.get_response(messages, options={"tools": [sample_tool_wrapped]}) + response = await client.get_response(messages, options={"tools": [sample_tool_wrapped]}) # Verify response assert response is not None assert len(response.messages) > 0 - assert chat_client.call_count == 2 # Two calls: function call + final response + assert client.call_count == 2 # Two calls: function call + final response # Verify function middleware was executed assert execution_order == [ @@ -440,7 +440,7 @@ def sample_tool(location: str) -> str: ) # Create function-invocation enabled chat client (MockBaseChatClient already includes FunctionInvocationLayer) - chat_client = MockBaseChatClient() + client = MockBaseChatClient() # Prepare responses that will trigger function invocation function_call_response = ChatResponse( @@ -457,18 +457,18 @@ def sample_tool(location: str) -> str: ) ] ) - chat_client.run_responses = [function_call_response] + client.run_responses = [function_call_response] # Execute the chat client directly with run-level middleware and tools messages = [Message(role="user", text="What's the weather in New York?")] - response = await chat_client.get_response( + response = await client.get_response( messages, options={"tools": [sample_tool_wrapped]}, middleware=[run_level_function_middleware] ) # Verify response assert response is not None assert len(response.messages) > 0 - assert chat_client.call_count == 2 # Two calls: function call + final response + assert client.call_count == 2 # Two calls: function call + final response # Verify run-level function middleware was executed once (during function invocation) assert execution_order == [ diff --git a/python/packages/core/tests/openai/test_openai_assistants_client.py b/python/packages/core/tests/openai/test_openai_assistants_client.py index 4d0eb9ce7a..d7967b18b3 100644 --- a/python/packages/core/tests/openai/test_openai_assistants_client.py +++ b/python/packages/core/tests/openai/test_openai_assistants_client.py @@ -113,16 +113,16 @@ def mock_async_openai() -> MagicMock: def test_init_with_client(mock_async_openai: MagicMock) -> None: """Test OpenAIAssistantsClient initialization with existing client.""" - chat_client = create_test_openai_assistants_client( + client = create_test_openai_assistants_client( mock_async_openai, model_id="gpt-4", assistant_id="existing-assistant-id", thread_id="test-thread-id" ) - assert chat_client.client is mock_async_openai - assert chat_client.model_id == "gpt-4" - assert chat_client.assistant_id == "existing-assistant-id" - assert chat_client.thread_id == "test-thread-id" - assert not chat_client._should_delete_assistant # type: ignore - assert isinstance(chat_client, SupportsChatGetResponse) + assert client.client is mock_async_openai + assert client.model_id == "gpt-4" + assert client.assistant_id == "existing-assistant-id" + assert client.thread_id == "test-thread-id" + assert not client._should_delete_assistant # type: ignore + assert isinstance(client, SupportsChatGetResponse) def test_init_auto_create_client( @@ -130,7 +130,7 @@ def test_init_auto_create_client( mock_async_openai: MagicMock, ) -> None: """Test OpenAIAssistantsClient initialization with auto-created client.""" - chat_client = OpenAIAssistantsClient( + client = OpenAIAssistantsClient( model_id=openai_unit_test_env["OPENAI_CHAT_MODEL_ID"], assistant_name="TestAssistant", api_key=openai_unit_test_env["OPENAI_API_KEY"], @@ -138,11 +138,11 @@ def test_init_auto_create_client( async_client=mock_async_openai, ) - assert chat_client.client is mock_async_openai - assert chat_client.model_id == openai_unit_test_env["OPENAI_CHAT_MODEL_ID"] - assert chat_client.assistant_id is None - assert chat_client.assistant_name == "TestAssistant" - assert not chat_client._should_delete_assistant # type: ignore + assert client.client is mock_async_openai + assert client.model_id == openai_unit_test_env["OPENAI_CHAT_MODEL_ID"] + assert client.assistant_id is None + assert client.assistant_name == "TestAssistant" + assert not client._should_delete_assistant # type: ignore def test_init_validation_fail() -> None: @@ -172,31 +172,31 @@ def test_init_with_default_headers(openai_unit_test_env: dict[str, str]) -> None """Test OpenAIAssistantsClient initialization with default headers.""" default_headers = {"X-Unit-Test": "test-guid"} - chat_client = OpenAIAssistantsClient( + client = OpenAIAssistantsClient( model_id="gpt-4", api_key=openai_unit_test_env["OPENAI_API_KEY"], default_headers=default_headers, ) - assert chat_client.model_id == "gpt-4" - assert isinstance(chat_client, SupportsChatGetResponse) + assert client.model_id == "gpt-4" + assert isinstance(client, SupportsChatGetResponse) # Assert that the default header we added is present in the client's default headers for key, value in default_headers.items(): - assert key in chat_client.client.default_headers - assert chat_client.client.default_headers[key] == value + assert key in client.client.default_headers + assert client.client.default_headers[key] == value async def test_get_assistant_id_or_create_existing_assistant( mock_async_openai: MagicMock, ) -> None: """Test _get_assistant_id_or_create when assistant_id is already provided.""" - chat_client = create_test_openai_assistants_client(mock_async_openai, assistant_id="existing-assistant-id") + client = create_test_openai_assistants_client(mock_async_openai, assistant_id="existing-assistant-id") - assistant_id = await chat_client._get_assistant_id_or_create() # type: ignore + assistant_id = await client._get_assistant_id_or_create() # type: ignore assert assistant_id == "existing-assistant-id" - assert not chat_client._should_delete_assistant # type: ignore + assert not client._should_delete_assistant # type: ignore mock_async_openai.beta.assistants.create.assert_not_called() @@ -204,14 +204,14 @@ async def test_get_assistant_id_or_create_create_new( mock_async_openai: MagicMock, ) -> None: """Test _get_assistant_id_or_create when creating a new assistant.""" - chat_client = create_test_openai_assistants_client( + client = create_test_openai_assistants_client( mock_async_openai, model_id="gpt-4", assistant_name="TestAssistant" ) - assistant_id = await chat_client._get_assistant_id_or_create() # type: ignore + assistant_id = await client._get_assistant_id_or_create() # type: ignore assert assistant_id == "test-assistant-id" - assert chat_client._should_delete_assistant # type: ignore + assert client._should_delete_assistant # type: ignore mock_async_openai.beta.assistants.create.assert_called_once() @@ -219,38 +219,38 @@ async def test_aclose_should_not_delete( mock_async_openai: MagicMock, ) -> None: """Test close when assistant should not be deleted.""" - chat_client = create_test_openai_assistants_client( + client = create_test_openai_assistants_client( mock_async_openai, assistant_id="assistant-to-keep", should_delete_assistant=False ) - await chat_client.close() # type: ignore + await client.close() # type: ignore # Verify assistant deletion was not called mock_async_openai.beta.assistants.delete.assert_not_called() - assert not chat_client._should_delete_assistant # type: ignore + assert not client._should_delete_assistant # type: ignore async def test_aclose_should_delete(mock_async_openai: MagicMock) -> None: """Test close method calls cleanup.""" - chat_client = create_test_openai_assistants_client( + client = create_test_openai_assistants_client( mock_async_openai, assistant_id="assistant-to-delete", should_delete_assistant=True ) - await chat_client.close() + await client.close() # Verify assistant deletion was called mock_async_openai.beta.assistants.delete.assert_called_once_with("assistant-to-delete") - assert not chat_client._should_delete_assistant # type: ignore + assert not client._should_delete_assistant # type: ignore async def test_async_context_manager(mock_async_openai: MagicMock) -> None: """Test async context manager functionality.""" - chat_client = create_test_openai_assistants_client( + client = create_test_openai_assistants_client( mock_async_openai, assistant_id="assistant-to-delete", should_delete_assistant=True ) # Test context manager - async with chat_client: + async with client: pass # Just test that we can enter and exit # Verify cleanup was called on exit @@ -262,7 +262,7 @@ def test_serialize(openai_unit_test_env: dict[str, str]) -> None: default_headers = {"X-Unit-Test": "test-guid"} # Test basic initialization and to_dict - chat_client = OpenAIAssistantsClient( + client = OpenAIAssistantsClient( model_id="gpt-4", assistant_id="test-assistant-id", assistant_name="TestAssistant", @@ -272,7 +272,7 @@ def test_serialize(openai_unit_test_env: dict[str, str]) -> None: default_headers=default_headers, ) - dumped_settings = chat_client.to_dict() + dumped_settings = client.to_dict() assert dumped_settings["model_id"] == "gpt-4" assert dumped_settings["assistant_id"] == "test-assistant-id" @@ -290,9 +290,9 @@ def test_serialize(openai_unit_test_env: dict[str, str]) -> None: async def test_get_active_thread_run_none_thread_id(mock_async_openai: MagicMock) -> None: """Test _get_active_thread_run with None thread_id returns None.""" - chat_client = create_test_openai_assistants_client(mock_async_openai) + client = create_test_openai_assistants_client(mock_async_openai) - result = await chat_client._get_active_thread_run(None) # type: ignore + result = await client._get_active_thread_run(None) # type: ignore assert result is None # Should not call the API when thread_id is None @@ -302,7 +302,7 @@ async def test_get_active_thread_run_none_thread_id(mock_async_openai: MagicMock async def test_get_active_thread_run_with_active_run(mock_async_openai: MagicMock) -> None: """Test _get_active_thread_run finds an active run.""" - chat_client = create_test_openai_assistants_client(mock_async_openai) + client = create_test_openai_assistants_client(mock_async_openai) # Mock an active run (status not in completed states) mock_run = MagicMock() @@ -314,7 +314,7 @@ async def mock_runs_list(*args: Any, **kwargs: Any) -> Any: mock_async_openai.beta.threads.runs.list.return_value.__aiter__ = mock_runs_list - result = await chat_client._get_active_thread_run("thread-123") # type: ignore + result = await client._get_active_thread_run("thread-123") # type: ignore assert result == mock_run mock_async_openai.beta.threads.runs.list.assert_called_once_with(thread_id="thread-123", limit=1, order="desc") @@ -322,7 +322,7 @@ async def mock_runs_list(*args: Any, **kwargs: Any) -> Any: async def test_prepare_thread_create_new(mock_async_openai: MagicMock) -> None: """Test _prepare_thread creates new thread when thread_id is None.""" - chat_client = create_test_openai_assistants_client(mock_async_openai) + client = create_test_openai_assistants_client(mock_async_openai) # Mock thread creation mock_thread = MagicMock() @@ -336,7 +336,7 @@ async def test_prepare_thread_create_new(mock_async_openai: MagicMock) -> None: "metadata": {"test": "true"}, } - result = await chat_client._prepare_thread(None, None, run_options) # type: ignore + result = await client._prepare_thread(None, None, run_options) # type: ignore assert result == "new-thread-123" assert run_options["additional_messages"] == [] # Should be cleared @@ -349,7 +349,7 @@ async def test_prepare_thread_create_new(mock_async_openai: MagicMock) -> None: async def test_prepare_thread_cancel_existing_run(mock_async_openai: MagicMock) -> None: """Test _prepare_thread cancels existing run when provided.""" - chat_client = create_test_openai_assistants_client(mock_async_openai) + client = create_test_openai_assistants_client(mock_async_openai) # Mock an existing thread run mock_thread_run = MagicMock() @@ -357,7 +357,7 @@ async def test_prepare_thread_cancel_existing_run(mock_async_openai: MagicMock) run_options: dict[str, Any] = {"additional_messages": []} - result = await chat_client._prepare_thread("thread-123", mock_thread_run, run_options) # type: ignore + result = await client._prepare_thread("thread-123", mock_thread_run, run_options) # type: ignore assert result == "thread-123" mock_async_openai.beta.threads.runs.cancel.assert_called_once_with(run_id="run-456", thread_id="thread-123") @@ -365,11 +365,11 @@ async def test_prepare_thread_cancel_existing_run(mock_async_openai: MagicMock) async def test_prepare_thread_existing_no_run(mock_async_openai: MagicMock) -> None: """Test _prepare_thread with existing thread_id but no active run.""" - chat_client = create_test_openai_assistants_client(mock_async_openai) + client = create_test_openai_assistants_client(mock_async_openai) run_options: dict[str, list[dict[str, str]]] = {"additional_messages": []} - result = await chat_client._prepare_thread("thread-123", None, run_options) # type: ignore + result = await client._prepare_thread("thread-123", None, run_options) # type: ignore assert result == "thread-123" # Should not call cancel since no thread_run provided @@ -378,7 +378,7 @@ async def test_prepare_thread_existing_no_run(mock_async_openai: MagicMock) -> N async def test_process_stream_events_thread_run_created(mock_async_openai: MagicMock) -> None: """Test _process_stream_events with thread.run.created event.""" - chat_client = create_test_openai_assistants_client(mock_async_openai) + client = create_test_openai_assistants_client(mock_async_openai) # Create a mock stream response for thread.run.created mock_response = MagicMock() @@ -396,7 +396,7 @@ async def async_iterator() -> Any: thread_id = "thread-123" updates: list[ChatResponseUpdate] = [] - async for update in chat_client._process_stream_events(mock_stream, thread_id): # type: ignore + async for update in client._process_stream_events(mock_stream, thread_id): # type: ignore updates.append(update) # Should yield one ChatResponseUpdate for thread.run.created @@ -411,7 +411,7 @@ async def async_iterator() -> Any: async def test_process_stream_events_message_delta_text(mock_async_openai: MagicMock) -> None: """Test _process_stream_events with thread.message.delta event containing text.""" - chat_client = create_test_openai_assistants_client(mock_async_openai) + client = create_test_openai_assistants_client(mock_async_openai) # Create a mock TextDeltaBlock with proper spec mock_delta_block = MagicMock(spec=TextDeltaBlock) @@ -440,7 +440,7 @@ async def async_iterator() -> Any: thread_id = "thread-456" updates: list[ChatResponseUpdate] = [] - async for update in chat_client._process_stream_events(mock_stream, thread_id): # type: ignore + async for update in client._process_stream_events(mock_stream, thread_id): # type: ignore updates.append(update) # Should yield one text update @@ -455,11 +455,11 @@ async def async_iterator() -> Any: async def test_process_stream_events_requires_action(mock_async_openai: MagicMock) -> None: """Test _process_stream_events with thread.run.requires_action event.""" - chat_client = create_test_openai_assistants_client(mock_async_openai) + client = create_test_openai_assistants_client(mock_async_openai) # Mock the _parse_function_calls_from_assistants method to return test content test_function_content = Content.from_function_call(call_id="call-123", name="test_func", arguments={"arg": "value"}) - chat_client._parse_function_calls_from_assistants = MagicMock(return_value=[test_function_content]) # type: ignore + client._parse_function_calls_from_assistants = MagicMock(return_value=[test_function_content]) # type: ignore # Create a mock Run object mock_run = MagicMock(spec=Run) @@ -479,7 +479,7 @@ async def async_iterator() -> Any: thread_id = "thread-789" updates: list[ChatResponseUpdate] = [] - async for update in chat_client._process_stream_events(mock_stream, thread_id): # type: ignore + async for update in client._process_stream_events(mock_stream, thread_id): # type: ignore updates.append(update) # Should yield one function call update @@ -493,13 +493,13 @@ async def async_iterator() -> Any: assert update.raw_representation == mock_run # Verify _parse_function_calls_from_assistants was called correctly - chat_client._parse_function_calls_from_assistants.assert_called_once_with(mock_run, None) # type: ignore + client._parse_function_calls_from_assistants.assert_called_once_with(mock_run, None) # type: ignore async def test_process_stream_events_run_step_created(mock_async_openai: MagicMock) -> None: """Test _process_stream_events with thread.run.step.created event.""" - chat_client = create_test_openai_assistants_client(mock_async_openai) + client = create_test_openai_assistants_client(mock_async_openai) # Create a mock RunStep object mock_run_step = MagicMock(spec=RunStep) @@ -520,7 +520,7 @@ async def async_iterator() -> Any: thread_id = "thread-789" updates: list[ChatResponseUpdate] = [] - async for update in chat_client._process_stream_events(mock_stream, thread_id): # type: ignore + async for update in client._process_stream_events(mock_stream, thread_id): # type: ignore updates.append(update) # The run step creation itself doesn't yield an update, @@ -533,7 +533,7 @@ async def test_process_stream_events_run_completed_with_usage( ) -> None: """Test _process_stream_events with thread.run.completed event containing usage.""" - chat_client = create_test_openai_assistants_client(mock_async_openai) + client = create_test_openai_assistants_client(mock_async_openai) # Create a mock Run object with usage information mock_usage = MagicMock() @@ -559,7 +559,7 @@ async def async_iterator() -> Any: thread_id = "thread-999" updates: list[ChatResponseUpdate] = [] - async for update in chat_client._process_stream_events(mock_stream, thread_id): # type: ignore + async for update in client._process_stream_events(mock_stream, thread_id): # type: ignore updates.append(update) # Should yield one usage update @@ -582,7 +582,7 @@ async def async_iterator() -> Any: def test_parse_function_calls_from_assistants_basic(mock_async_openai: MagicMock) -> None: """Test _parse_function_calls_from_assistants with a simple function call.""" - chat_client = create_test_openai_assistants_client(mock_async_openai) + client = create_test_openai_assistants_client(mock_async_openai) # Create a mock Run event that requires action mock_run = MagicMock() @@ -599,7 +599,7 @@ def test_parse_function_calls_from_assistants_basic(mock_async_openai: MagicMock # Call the method response_id = "response_456" - contents = chat_client._parse_function_calls_from_assistants(mock_run, response_id) # type: ignore + contents = client._parse_function_calls_from_assistants(mock_run, response_id) # type: ignore # Test that one function call content was created assert len(contents) == 1 @@ -685,7 +685,7 @@ def test_parse_run_step_with_mcp_tool_call(mock_async_openai: MagicMock) -> None def test_prepare_options_basic(mock_async_openai: MagicMock) -> None: """Test _prepare_options with basic chat options.""" - chat_client = create_test_openai_assistants_client(mock_async_openai) + client = create_test_openai_assistants_client(mock_async_openai) # Create basic chat options as a dict options = { @@ -698,7 +698,7 @@ def test_prepare_options_basic(mock_async_openai: MagicMock) -> None: messages = [Message(role="user", text="Hello")] # Call the method - run_options, tool_results = chat_client._prepare_options(messages, options) # type: ignore + run_options, tool_results = client._prepare_options(messages, options) # type: ignore # Check basic options were set assert run_options["max_completion_tokens"] == 100 @@ -711,7 +711,7 @@ def test_prepare_options_basic(mock_async_openai: MagicMock) -> None: def test_prepare_options_with_tool_tool(mock_async_openai: MagicMock) -> None: """Test _prepare_options with a FunctionTool.""" - chat_client = create_test_openai_assistants_client(mock_async_openai) + client = create_test_openai_assistants_client(mock_async_openai) # Create a simple function for testing and decorate it @tool(approval_mode="never_require") @@ -727,7 +727,7 @@ def test_function(query: str) -> str: messages = [Message(role="user", text="Hello")] # Call the method - run_options, tool_results = chat_client._prepare_options(messages, options) # type: ignore + run_options, tool_results = client._prepare_options(messages, options) # type: ignore # Check tools were set correctly assert "tools" in run_options @@ -739,7 +739,7 @@ def test_function(query: str) -> str: def test_prepare_options_with_code_interpreter(mock_async_openai: MagicMock) -> None: """Test _prepare_options with HostedCodeInterpreterTool.""" - chat_client = create_test_openai_assistants_client(mock_async_openai) + client = create_test_openai_assistants_client(mock_async_openai) # Create a real HostedCodeInterpreterTool code_tool = HostedCodeInterpreterTool() @@ -752,7 +752,7 @@ def test_prepare_options_with_code_interpreter(mock_async_openai: MagicMock) -> messages = [Message(role="user", text="Calculate something")] # Call the method - run_options, tool_results = chat_client._prepare_options(messages, options) # type: ignore + run_options, tool_results = client._prepare_options(messages, options) # type: ignore # Check code interpreter tool was set correctly assert "tools" in run_options @@ -763,7 +763,7 @@ def test_prepare_options_with_code_interpreter(mock_async_openai: MagicMock) -> def test_prepare_options_tool_choice_none(mock_async_openai: MagicMock) -> None: """Test _prepare_options with tool_choice set to 'none' and no tools.""" - chat_client = create_test_openai_assistants_client(mock_async_openai) + client = create_test_openai_assistants_client(mock_async_openai) options = { "tool_choice": "none", @@ -772,7 +772,7 @@ def test_prepare_options_tool_choice_none(mock_async_openai: MagicMock) -> None: messages = [Message(role="user", text="Hello")] # Call the method - run_options, tool_results = chat_client._prepare_options(messages, options) # type: ignore + run_options, tool_results = client._prepare_options(messages, options) # type: ignore # Should set tool_choice to none - no tools because none were provided assert run_options["tool_choice"] == "none" @@ -785,7 +785,7 @@ def test_prepare_options_tool_choice_none_with_tools(mock_async_openai: MagicMoc When tool_choice='none', the model won't call tools, but tools should still be sent to the API so they're available for future turns in the conversation. """ - chat_client = create_test_openai_assistants_client(mock_async_openai) + client = create_test_openai_assistants_client(mock_async_openai) # Create a function tool @tool(approval_mode="never_require") @@ -800,7 +800,7 @@ def test_func(arg: str) -> str: messages = [Message(role="user", text="Hello")] # Call the method - run_options, tool_results = chat_client._prepare_options(messages, options) # type: ignore + run_options, tool_results = client._prepare_options(messages, options) # type: ignore # Should set tool_choice to none BUT still include tools assert run_options["tool_choice"] == "none" @@ -810,7 +810,7 @@ def test_func(arg: str) -> str: def test_prepare_options_required_function(mock_async_openai: MagicMock) -> None: """Test _prepare_options with required function tool choice.""" - chat_client = create_test_openai_assistants_client(mock_async_openai) + client = create_test_openai_assistants_client(mock_async_openai) # Create a required function tool choice as dict tool_choice = {"mode": "required", "required_function_name": "specific_function"} @@ -822,7 +822,7 @@ def test_prepare_options_required_function(mock_async_openai: MagicMock) -> None messages = [Message(role="user", text="Hello")] # Call the method - run_options, tool_results = chat_client._prepare_options(messages, options) # type: ignore + run_options, tool_results = client._prepare_options(messages, options) # type: ignore # Check required function tool choice was set correctly expected_tool_choice = { @@ -835,7 +835,7 @@ def test_prepare_options_required_function(mock_async_openai: MagicMock) -> None def test_prepare_options_with_file_search_tool(mock_async_openai: MagicMock) -> None: """Test _prepare_options with HostedFileSearchTool.""" - chat_client = create_test_openai_assistants_client(mock_async_openai) + client = create_test_openai_assistants_client(mock_async_openai) # Create a HostedFileSearchTool with max_results file_search_tool = HostedFileSearchTool(max_results=10) @@ -848,7 +848,7 @@ def test_prepare_options_with_file_search_tool(mock_async_openai: MagicMock) -> messages = [Message(role="user", text="Search for information")] # Call the method - run_options, tool_results = chat_client._prepare_options(messages, options) # type: ignore + run_options, tool_results = client._prepare_options(messages, options) # type: ignore # Check file search tool was set correctly assert "tools" in run_options @@ -860,7 +860,7 @@ def test_prepare_options_with_file_search_tool(mock_async_openai: MagicMock) -> def test_prepare_options_with_mapping_tool(mock_async_openai: MagicMock) -> None: """Test _prepare_options with MutableMapping tool.""" - chat_client = create_test_openai_assistants_client(mock_async_openai) + client = create_test_openai_assistants_client(mock_async_openai) # Create a tool as a MutableMapping (dict) mapping_tool = {"type": "custom_tool", "parameters": {"setting": "value"}} @@ -873,7 +873,7 @@ def test_prepare_options_with_mapping_tool(mock_async_openai: MagicMock) -> None messages = [Message(role="user", text="Use custom tool")] # Call the method - run_options, tool_results = chat_client._prepare_options(messages, options) # type: ignore + run_options, tool_results = client._prepare_options(messages, options) # type: ignore # Check mapping tool was set correctly assert "tools" in run_options @@ -891,11 +891,11 @@ class TestResponse(BaseModel): value: int model_config = ConfigDict(extra="forbid") - chat_client = create_test_openai_assistants_client(mock_async_openai) + client = create_test_openai_assistants_client(mock_async_openai) messages = [Message(role="user", text="Test")] options = {"response_format": TestResponse} - run_options, _ = chat_client._prepare_options(messages, options) # type: ignore + run_options, _ = client._prepare_options(messages, options) # type: ignore assert "response_format" in run_options assert run_options["response_format"]["type"] == "json_schema" @@ -905,7 +905,7 @@ class TestResponse(BaseModel): def test_prepare_options_with_system_message(mock_async_openai: MagicMock) -> None: """Test _prepare_options with system message converted to instructions.""" - chat_client = create_test_openai_assistants_client(mock_async_openai) + client = create_test_openai_assistants_client(mock_async_openai) messages = [ Message(role="system", text="You are a helpful assistant."), @@ -913,7 +913,7 @@ def test_prepare_options_with_system_message(mock_async_openai: MagicMock) -> No ] # Call the method - run_options, tool_results = chat_client._prepare_options(messages, {}) # type: ignore + run_options, tool_results = client._prepare_options(messages, {}) # type: ignore # Check that additional_messages only contains the user message # System message should be converted to instructions (though this is handled internally) @@ -925,14 +925,14 @@ def test_prepare_options_with_system_message(mock_async_openai: MagicMock) -> No def test_prepare_options_with_image_content(mock_async_openai: MagicMock) -> None: """Test _prepare_options with image content.""" - chat_client = create_test_openai_assistants_client(mock_async_openai) + client = create_test_openai_assistants_client(mock_async_openai) # Create message with image content image_content = Content.from_uri(uri="https://example.com/image.jpg", media_type="image/jpeg") messages = [Message(role="user", contents=[image_content])] # Call the method - run_options, tool_results = chat_client._prepare_options(messages, {}) # type: ignore + run_options, tool_results = client._prepare_options(messages, {}) # type: ignore # Check that image content was processed assert "additional_messages" in run_options @@ -946,9 +946,9 @@ def test_prepare_options_with_image_content(mock_async_openai: MagicMock) -> Non def test_prepare_tool_outputs_for_assistants_empty(mock_async_openai: MagicMock) -> None: """Test _prepare_tool_outputs_for_assistants with empty list.""" - chat_client = create_test_openai_assistants_client(mock_async_openai) + client = create_test_openai_assistants_client(mock_async_openai) - run_id, tool_outputs = chat_client._prepare_tool_outputs_for_assistants([]) # type: ignore + run_id, tool_outputs = client._prepare_tool_outputs_for_assistants([]) # type: ignore assert run_id is None assert tool_outputs is None @@ -956,12 +956,12 @@ def test_prepare_tool_outputs_for_assistants_empty(mock_async_openai: MagicMock) def test_prepare_tool_outputs_for_assistants_valid(mock_async_openai: MagicMock) -> None: """Test _prepare_tool_outputs_for_assistants with valid function results.""" - chat_client = create_test_openai_assistants_client(mock_async_openai) + client = create_test_openai_assistants_client(mock_async_openai) call_id = json.dumps(["run-123", "call-456"]) function_result = Content.from_function_result(call_id=call_id, result="Function executed successfully") - run_id, tool_outputs = chat_client._prepare_tool_outputs_for_assistants([function_result]) # type: ignore + run_id, tool_outputs = client._prepare_tool_outputs_for_assistants([function_result]) # type: ignore assert run_id == "run-123" assert tool_outputs is not None @@ -974,7 +974,7 @@ def test_prepare_tool_outputs_for_assistants_mismatched_run_ids( mock_async_openai: MagicMock, ) -> None: """Test _prepare_tool_outputs_for_assistants with mismatched run IDs.""" - chat_client = create_test_openai_assistants_client(mock_async_openai) + client = create_test_openai_assistants_client(mock_async_openai) # Create function results with different run IDs call_id1 = json.dumps(["run-123", "call-456"]) @@ -982,7 +982,7 @@ def test_prepare_tool_outputs_for_assistants_mismatched_run_ids( function_result1 = Content.from_function_result(call_id=call_id1, result="Result 1") function_result2 = Content.from_function_result(call_id=call_id2, result="Result 2") - run_id, tool_outputs = chat_client._prepare_tool_outputs_for_assistants([function_result1, function_result2]) # type: ignore + run_id, tool_outputs = client._prepare_tool_outputs_for_assistants([function_result1, function_result2]) # type: ignore # Should only process the first one since run IDs don't match assert run_id == "run-123" @@ -994,36 +994,36 @@ def test_prepare_tool_outputs_for_assistants_mismatched_run_ids( def test_update_agent_name_and_description(mock_async_openai: MagicMock) -> None: """Test _update_agent_name_and_description method updates assistant_name when not already set.""" # Test updating agent name when assistant_name is None - chat_client = create_test_openai_assistants_client(mock_async_openai, assistant_name=None) + client = create_test_openai_assistants_client(mock_async_openai, assistant_name=None) # Call the private method to update agent name - chat_client._update_agent_name_and_description("New Assistant Name") # type: ignore + client._update_agent_name_and_description("New Assistant Name") # type: ignore - assert chat_client.assistant_name == "New Assistant Name" + assert client.assistant_name == "New Assistant Name" def test_update_agent_name_and_description_existing(mock_async_openai: MagicMock) -> None: """Test _update_agent_name_and_description method doesn't override existing assistant_name.""" # Test that existing assistant_name is not overridden - chat_client = create_test_openai_assistants_client(mock_async_openai, assistant_name="Existing Assistant") + client = create_test_openai_assistants_client(mock_async_openai, assistant_name="Existing Assistant") # Call the private method to update agent name - chat_client._update_agent_name_and_description("New Assistant Name") # type: ignore + client._update_agent_name_and_description("New Assistant Name") # type: ignore # Should keep the existing name - assert chat_client.assistant_name == "Existing Assistant" + assert client.assistant_name == "Existing Assistant" def test_update_agent_name_and_description_none(mock_async_openai: MagicMock) -> None: """Test _update_agent_name_and_description method with None agent_name parameter.""" # Test that None agent_name doesn't change anything - chat_client = create_test_openai_assistants_client(mock_async_openai, assistant_name=None) + client = create_test_openai_assistants_client(mock_async_openai, assistant_name=None) # Call the private method with None - chat_client._update_agent_name_and_description(None) # type: ignore + client._update_agent_name_and_description(None) # type: ignore # Should remain None - assert chat_client.assistant_name is None + assert client.assistant_name is None @tool(approval_mode="never_require") @@ -1234,7 +1234,7 @@ async def test_file_search_streaming() -> None: async def test_openai_assistants_agent_basic_run(): """Test Agent basic run functionality with OpenAIAssistantsClient.""" async with Agent( - chat_client=OpenAIAssistantsClient(model_id=INTEGRATION_TEST_MODEL), + client=OpenAIAssistantsClient(model_id=INTEGRATION_TEST_MODEL), ) as agent: # Run a simple query response = await agent.run("Hello! Please respond with 'Hello World' exactly.") @@ -1251,7 +1251,7 @@ async def test_openai_assistants_agent_basic_run(): async def test_openai_assistants_agent_basic_run_streaming(): """Test Agent basic streaming functionality with OpenAIAssistantsClient.""" async with Agent( - chat_client=OpenAIAssistantsClient(model_id=INTEGRATION_TEST_MODEL), + client=OpenAIAssistantsClient(model_id=INTEGRATION_TEST_MODEL), ) as agent: # Run streaming query full_message: str = "" @@ -1271,7 +1271,7 @@ async def test_openai_assistants_agent_basic_run_streaming(): async def test_openai_assistants_agent_thread_persistence(): """Test Agent thread persistence across runs with OpenAIAssistantsClient.""" async with Agent( - chat_client=OpenAIAssistantsClient(model_id=INTEGRATION_TEST_MODEL), + client=OpenAIAssistantsClient(model_id=INTEGRATION_TEST_MODEL), instructions="You are a helpful assistant with good memory.", ) as agent: # Create a new thread that will be reused @@ -1303,7 +1303,7 @@ async def test_openai_assistants_agent_existing_thread_id(): existing_thread_id = None async with Agent( - chat_client=OpenAIAssistantsClient(model_id=INTEGRATION_TEST_MODEL), + client=OpenAIAssistantsClient(model_id=INTEGRATION_TEST_MODEL), instructions="You are a helpful weather agent.", tools=[get_weather], ) as agent: @@ -1323,7 +1323,7 @@ async def test_openai_assistants_agent_existing_thread_id(): # Now continue with the same thread ID in a new agent instance async with Agent( - chat_client=OpenAIAssistantsClient(thread_id=existing_thread_id), + client=OpenAIAssistantsClient(thread_id=existing_thread_id), instructions="You are a helpful weather agent.", tools=[get_weather], ) as agent: @@ -1346,7 +1346,7 @@ async def test_openai_assistants_agent_code_interpreter(): """Test Agent with code interpreter through OpenAIAssistantsClient.""" async with Agent( - chat_client=OpenAIAssistantsClient(model_id=INTEGRATION_TEST_MODEL), + client=OpenAIAssistantsClient(model_id=INTEGRATION_TEST_MODEL), instructions="You are a helpful assistant that can write and execute Python code.", tools=[HostedCodeInterpreterTool()], ) as agent: @@ -1366,7 +1366,7 @@ async def test_agent_level_tool_persistence(): """Test that agent-level tools persist across multiple runs with OpenAI Assistants Client.""" async with Agent( - chat_client=OpenAIAssistantsClient(model_id=INTEGRATION_TEST_MODEL), + client=OpenAIAssistantsClient(model_id=INTEGRATION_TEST_MODEL), instructions="You are a helpful assistant that uses available tools.", tools=[get_weather], # Agent-level tool ) as agent: diff --git a/python/packages/core/tests/workflow/test_agent_executor_tool_calls.py b/python/packages/core/tests/workflow/test_agent_executor_tool_calls.py index 0e93196bbd..41faca0c9a 100644 --- a/python/packages/core/tests/workflow/test_agent_executor_tool_calls.py +++ b/python/packages/core/tests/workflow/test_agent_executor_tool_calls.py @@ -244,7 +244,7 @@ async def test_agent_executor_tool_call_with_approval() -> None: """Test that AgentExecutor handles tool calls requiring approval.""" # Arrange agent = Agent( - chat_client=MockChatClient(), + client=MockChatClient(), name="ApprovalAgent", tools=[mock_tool_requiring_approval], ) @@ -278,7 +278,7 @@ async def test_agent_executor_tool_call_with_approval_streaming() -> None: """Test that AgentExecutor handles tool calls requiring approval in streaming mode.""" # Arrange agent = Agent( - chat_client=MockChatClient(), + client=MockChatClient(), name="ApprovalAgent", tools=[mock_tool_requiring_approval], ) @@ -315,7 +315,7 @@ async def test_agent_executor_parallel_tool_call_with_approval() -> None: """Test that AgentExecutor handles parallel tool calls requiring approval.""" # Arrange agent = Agent( - chat_client=MockChatClient(parallel_request=True), + client=MockChatClient(parallel_request=True), name="ApprovalAgent", tools=[mock_tool_requiring_approval], ) @@ -351,7 +351,7 @@ async def test_agent_executor_parallel_tool_call_with_approval_streaming() -> No """Test that AgentExecutor handles parallel tool calls requiring approval in streaming mode.""" # Arrange agent = Agent( - chat_client=MockChatClient(parallel_request=True), + client=MockChatClient(parallel_request=True), name="ApprovalAgent", tools=[mock_tool_requiring_approval], ) diff --git a/python/packages/declarative/agent_framework_declarative/_loader.py b/python/packages/declarative/agent_framework_declarative/_loader.py index b868e7d7bb..e6eb768c8c 100644 --- a/python/packages/declarative/agent_framework_declarative/_loader.py +++ b/python/packages/declarative/agent_framework_declarative/_loader.py @@ -150,7 +150,7 @@ class AgentFactory: # With pre-configured chat client client = AzureOpenAIChatClient() - factory = AgentFactory(chat_client=client) + factory = AgentFactory(client=client) agent = factory.create_agent_from_yaml_path("agent.yaml") .. code-block:: python @@ -174,7 +174,7 @@ class AgentFactory: def __init__( self, *, - chat_client: SupportsChatGetResponse | None = None, + client: SupportsChatGetResponse | None = None, bindings: Mapping[str, Any] | None = None, connections: Mapping[str, Any] | None = None, client_kwargs: Mapping[str, Any] | None = None, @@ -187,7 +187,7 @@ def __init__( """Create the agent factory. Args: - chat_client: An optional SupportsChatGetResponse instance to use as a dependency. + client: An optional SupportsChatGetResponse instance to use as a dependency. This will be passed to the Agent that gets created. If you need to create multiple agents with different chat clients, do not pass this and instead provide the chat client in the YAML definition. @@ -241,7 +241,7 @@ def __init__( # With shared chat client client = AzureOpenAIChatClient() factory = AgentFactory( - chat_client=client, + client=client, env_file_path=".env", ) @@ -260,7 +260,7 @@ def __init__( }, ) """ - self.chat_client = chat_client + self.client = client self.bindings = bindings self.connections = connections self.client_kwargs = client_kwargs or {} @@ -455,7 +455,7 @@ def create_agent_from_dict(self, agent_def: dict[str, Any]) -> Agent: chat_options["response_format"] = _create_model_from_json_schema("agent", output_schema.to_json_schema()) # Step 3: Create the agent instance return Agent( - chat_client=client, + client=client, name=prompt_agent.name, description=prompt_agent.description, instructions=prompt_agent.instructions, @@ -572,7 +572,7 @@ async def create_agent_from_dict_async(self, agent_def: dict[str, Any]) -> Agent if output_schema := prompt_agent.outputSchema: chat_options["response_format"] = _create_model_from_json_schema("agent", output_schema.to_json_schema()) return Agent( - chat_client=client, + client=client, name=prompt_agent.name, description=prompt_agent.description, instructions=prompt_agent.instructions, @@ -634,9 +634,9 @@ async def _create_agent_with_provider(self, prompt_agent: PromptAgent, mapping: def _get_client(self, prompt_agent: PromptAgent) -> SupportsChatGetResponse: """Create the SupportsChatGetResponse instance based on the PromptAgent model.""" if not prompt_agent.model: - # if no model is defined, use the supplied chat_client - if self.chat_client: - return self.chat_client + # if no model is defined, use the supplied client + if self.client: + return self.client raise DeclarativeLoaderError( "ChatClient must be provided to create agent from PromptAgent, " "alternatively define a model in the PromptAgent." @@ -670,9 +670,9 @@ def _get_client(self, prompt_agent: PromptAgent) -> SupportsChatGetResponse: # Any client we create, needs a model.id if not prompt_agent.model.id: - # if prompt_agent.model is defined, but no id, use the supplied chat_client - if self.chat_client: - return self.chat_client + # if prompt_agent.model is defined, but no id, use the supplied client + if self.client: + return self.client # or raise, since we cannot create a client without model id raise DeclarativeLoaderError( "ChatClient must be provided to create agent from PromptAgent, or define model.id in the PromptAgent." diff --git a/python/packages/declarative/agent_framework_declarative/_workflows/_factory.py b/python/packages/declarative/agent_framework_declarative/_workflows/_factory.py index 2633f82f99..576ef73ac6 100644 --- a/python/packages/declarative/agent_framework_declarative/_workflows/_factory.py +++ b/python/packages/declarative/agent_framework_declarative/_workflows/_factory.py @@ -73,8 +73,8 @@ class WorkflowFactory: from agent_framework.declarative import WorkflowFactory # Pre-register agents for InvokeAzureAgent actions - chat_client = AzureOpenAIChatClient() - agent = chat_client.as_agent(name="MyAgent", instructions="You are helpful.") + client = AzureOpenAIChatClient() + agent = client.as_agent(name="MyAgent", instructions="You are helpful.") factory = WorkflowFactory(agents={"MyAgent": agent}) workflow = factory.create_workflow_from_yaml_path("workflow.yaml") diff --git a/python/packages/declarative/tests/test_declarative_loader.py b/python/packages/declarative/tests/test_declarative_loader.py index 2d31a66d58..93497b7343 100644 --- a/python/packages/declarative/tests/test_declarative_loader.py +++ b/python/packages/declarative/tests/test_declarative_loader.py @@ -483,7 +483,7 @@ def test_create_agent_from_dict_parses_prompt_agent(self): mock_client = MagicMock() mock_client.create_agent.return_value = MagicMock() - factory = AgentFactory(chat_client=mock_client) + factory = AgentFactory(client=mock_client) agent = factory.create_agent_from_dict(agent_def) assert agent is not None @@ -512,7 +512,7 @@ def test_create_agent_from_dict_matches_yaml(self): mock_client = MagicMock() mock_client.create_agent.return_value = MagicMock() - factory = AgentFactory(chat_client=mock_client) + factory = AgentFactory(client=mock_client) # Create from YAML string agent_from_yaml = factory.create_agent_from_yaml(yaml_content) @@ -540,7 +540,7 @@ def test_create_agent_from_dict_invalid_kind_raises(self): factory.create_agent_from_dict(agent_def) def test_create_agent_from_dict_without_model_or_client_raises(self): - """Test that missing both model and chat_client raises DeclarativeLoaderError.""" + """Test that missing both model and client raises DeclarativeLoaderError.""" from agent_framework_declarative import AgentFactory from agent_framework_declarative._loader import DeclarativeLoaderError @@ -592,7 +592,7 @@ def test_agent_factory_safe_mode_blocks_env_in_yaml(self, monkeypatch): monkeypatch.setenv("TEST_DESCRIPTION", "Description from env") # With safe_mode=True (default), Env access should fail and return original value - factory = AgentFactory(chat_client=mock_client, safe_mode=True) + factory = AgentFactory(client=mock_client, safe_mode=True) agent = factory.create_agent_from_yaml(yaml_content) # The description should NOT be resolved from env (PowerFx fails, returns original) @@ -618,7 +618,7 @@ def test_agent_factory_safe_mode_false_allows_env_in_yaml(self, monkeypatch): """ # With safe_mode=False, Env access should work - factory = AgentFactory(chat_client=mock_client, safe_mode=False) + factory = AgentFactory(client=mock_client, safe_mode=False) agent = factory.create_agent_from_yaml(yaml_content) # The description should be resolved from env @@ -726,7 +726,7 @@ def test_mcp_tool_with_api_key_connection_sets_headers(self): mock_client = MagicMock() mock_client.create_agent.return_value = MagicMock() - factory = AgentFactory(chat_client=mock_client) + factory = AgentFactory(client=mock_client) agent = factory.create_agent_from_yaml(yaml_content) # Find the MCP tool in the agent's tools @@ -761,7 +761,7 @@ def test_mcp_tool_with_remote_connection_sets_additional_properties(self): mock_client = MagicMock() mock_client.create_agent.return_value = MagicMock() - factory = AgentFactory(chat_client=mock_client) + factory = AgentFactory(client=mock_client) agent = factory.create_agent_from_yaml(yaml_content) # Find the MCP tool in the agent's tools @@ -800,7 +800,7 @@ def test_mcp_tool_with_reference_connection_sets_additional_properties(self): mock_client = MagicMock() mock_client.create_agent.return_value = MagicMock() - factory = AgentFactory(chat_client=mock_client) + factory = AgentFactory(client=mock_client) agent = factory.create_agent_from_yaml(yaml_content) # Find the MCP tool in the agent's tools @@ -836,7 +836,7 @@ def test_mcp_tool_with_anonymous_connection_no_headers_or_properties(self): mock_client = MagicMock() mock_client.create_agent.return_value = MagicMock() - factory = AgentFactory(chat_client=mock_client) + factory = AgentFactory(client=mock_client) agent = factory.create_agent_from_yaml(yaml_content) # Find the MCP tool in the agent's tools @@ -868,7 +868,7 @@ def test_mcp_tool_without_connection_preserves_existing_behavior(self): mock_client = MagicMock() mock_client.create_agent.return_value = MagicMock() - factory = AgentFactory(chat_client=mock_client) + factory = AgentFactory(client=mock_client) agent = factory.create_agent_from_yaml(yaml_content) # Find the MCP tool in the agent's tools @@ -907,7 +907,7 @@ def test_mcp_tool_with_remote_connection_with_endpoint(self): mock_client = MagicMock() mock_client.create_agent.return_value = MagicMock() - factory = AgentFactory(chat_client=mock_client) + factory = AgentFactory(client=mock_client) agent = factory.create_agent_from_yaml(yaml_content) # Find the MCP tool in the agent's tools diff --git a/python/packages/devui/README.md b/python/packages/devui/README.md index 50c2758ca3..6c3745236a 100644 --- a/python/packages/devui/README.md +++ b/python/packages/devui/README.md @@ -28,7 +28,7 @@ def get_weather(location: str) -> str: # Create your agent agent = Agent( name="WeatherAgent", - chat_client=OpenAIChatClient(), + client=OpenAIChatClient(), tools=[get_weather] ) @@ -55,7 +55,7 @@ When DevUI starts with no discovered entities, it displays a **sample entity gal ```python # ✅ Correct - DevUI handles cleanup automatically -mcp_tool = MCPStreamableHTTPTool(url="http://localhost:8011/mcp", chat_client=chat_client) +mcp_tool = MCPStreamableHTTPTool(url="http://localhost:8011/mcp", client=client) agent = Agent(tools=mcp_tool) serve(entities=[agent]) ``` @@ -74,7 +74,7 @@ from agent_framework_devui import register_cleanup, serve credential = DefaultAzureCredential() client = AzureOpenAIChatClient() -agent = Agent(name="MyAgent", chat_client=client) +agent = Agent(name="MyAgent", client=client) # Register cleanup hook - credential will be closed on shutdown register_cleanup(agent, credential.close) diff --git a/python/packages/devui/agent_framework_devui/_server.py b/python/packages/devui/agent_framework_devui/_server.py index 1045c82923..374bab9962 100644 --- a/python/packages/devui/agent_framework_devui/_server.py +++ b/python/packages/devui/agent_framework_devui/_server.py @@ -222,8 +222,8 @@ async def _cleanup_entities(self) -> None: # Step 2: Close chat clients and their credentials (EXISTING) entity_obj = self.executor.entity_discovery.get_entity_object(entity_id) - if entity_obj and hasattr(entity_obj, "chat_client"): - client = entity_obj.chat_client + if entity_obj and hasattr(entity_obj, "client"): + client = entity_obj.client # Close the chat client itself if hasattr(client, "close") and callable(client.close): diff --git a/python/packages/devui/agent_framework_devui/_utils.py b/python/packages/devui/agent_framework_devui/_utils.py index 3ddcbd1588..a6a89e5cac 100644 --- a/python/packages/devui/agent_framework_devui/_utils.py +++ b/python/packages/devui/agent_framework_devui/_utils.py @@ -45,7 +45,7 @@ def extract_agent_metadata(entity_object: Any) -> dict[str, Any]: elif hasattr(chat_opts, "instructions"): metadata["instructions"] = chat_opts.instructions - # Try to get model - check both default_options and chat_client + # Try to get model - check both default_options and client if hasattr(entity_object, "default_options"): chat_opts = entity_object.default_options if isinstance(chat_opts, dict): @@ -55,14 +55,14 @@ def extract_agent_metadata(entity_object: Any) -> dict[str, Any]: metadata["model"] = chat_opts.model_id if ( metadata["model"] is None - and hasattr(entity_object, "chat_client") - and hasattr(entity_object.chat_client, "model_id") + and hasattr(entity_object, "client") + and hasattr(entity_object.client, "model_id") ): - metadata["model"] = entity_object.chat_client.model_id + metadata["model"] = entity_object.client.model_id # Try to get chat client type - if hasattr(entity_object, "chat_client"): - metadata["chat_client_type"] = entity_object.chat_client.__class__.__name__ + if hasattr(entity_object, "client"): + metadata["chat_client_type"] = entity_object.client.__class__.__name__ # Try to get context providers if ( diff --git a/python/packages/devui/tests/devui/conftest.py b/python/packages/devui/tests/devui/conftest.py index 6902cef2ba..1b1fd0610f 100644 --- a/python/packages/devui/tests/devui/conftest.py +++ b/python/packages/devui/tests/devui/conftest.py @@ -445,7 +445,7 @@ async def executor_with_real_agent() -> tuple[AgentFrameworkExecutor, str, MockB id="test_chat_agent", name="Test Chat Agent", description="A real Agent for testing execution flow", - chat_client=mock_client, + client=mock_client, system_message="You are a helpful test assistant.", ) @@ -477,14 +477,14 @@ async def sequential_workflow() -> tuple[AgentFrameworkExecutor, str, MockBaseCh id="writer", name="Writer", description="Content writer agent", - chat_client=mock_client, + client=mock_client, system_message="You are a content writer. Create clear, engaging content.", ) reviewer = Agent( id="reviewer", name="Reviewer", description="Content reviewer agent", - chat_client=mock_client, + client=mock_client, system_message="You are a reviewer. Provide constructive feedback.", ) @@ -522,21 +522,21 @@ async def concurrent_workflow() -> tuple[AgentFrameworkExecutor, str, MockBaseCh id="researcher", name="Researcher", description="Research agent", - chat_client=mock_client, + client=mock_client, system_message="You are a researcher. Find key data and insights.", ) analyst = Agent( id="analyst", name="Analyst", description="Analysis agent", - chat_client=mock_client, + client=mock_client, system_message="You are an analyst. Identify trends and patterns.", ) summarizer = Agent( id="summarizer", name="Summarizer", description="Summary agent", - chat_client=mock_client, + client=mock_client, system_message="You are a summarizer. Provide concise summaries.", ) diff --git a/python/packages/devui/tests/devui/test_execution.py b/python/packages/devui/tests/devui/test_execution.py index 1c9d544fdd..3fff11ad79 100644 --- a/python/packages/devui/tests/devui/test_execution.py +++ b/python/packages/devui/tests/devui/test_execution.py @@ -308,7 +308,7 @@ async def test_full_pipeline_workflow_events_are_json_serializable(): id="serialization_test_agent", name="Serialization Test Agent", description="Agent for testing serialization", - chat_client=mock_client, + client=mock_client, system_message="You are a test assistant.", ) diff --git a/python/packages/devui/tests/devui/test_server.py b/python/packages/devui/tests/devui/test_server.py index 766c03e8bf..b6215ddab5 100644 --- a/python/packages/devui/tests/devui/test_server.py +++ b/python/packages/devui/tests/devui/test_server.py @@ -155,7 +155,7 @@ async def test_credential_cleanup() -> None: mock_client.function_invocation_configuration = None # Create agent with mock client - agent = Agent(name="TestAgent", chat_client=mock_client, instructions="Test agent") + agent = Agent(name="TestAgent", client=mock_client, instructions="Test agent") # Create DevUI server with agent server = DevServer() @@ -188,7 +188,7 @@ async def test_credential_cleanup_error_handling() -> None: mock_client.function_invocation_configuration = None # Create agent with mock client - agent = Agent(name="TestAgent", chat_client=mock_client, instructions="Test agent") + agent = Agent(name="TestAgent", client=mock_client, instructions="Test agent") # Create DevUI server with agent server = DevServer() @@ -223,7 +223,7 @@ async def test_multiple_credential_attributes() -> None: mock_client.function_invocation_configuration = None # Create agent with mock client - agent = Agent(name="TestAgent", chat_client=mock_client, instructions="Test agent") + agent = Agent(name="TestAgent", client=mock_client, instructions="Test agent") # Create DevUI server with agent server = DevServer() diff --git a/python/packages/durabletask/agent_framework_durabletask/_worker.py b/python/packages/durabletask/agent_framework_durabletask/_worker.py index 781c8fc953..636dadff2a 100644 --- a/python/packages/durabletask/agent_framework_durabletask/_worker.py +++ b/python/packages/durabletask/agent_framework_durabletask/_worker.py @@ -39,7 +39,7 @@ class DurableAIAgentWorker: agent_worker = DurableAIAgentWorker(worker) # Register agents - my_agent = Agent(chat_client=client, name="assistant") + my_agent = Agent(client=client, name="assistant") agent_worker.add_agent(my_agent) # Start the worker diff --git a/python/packages/lab/gaia/samples/openai_agent.py b/python/packages/lab/gaia/samples/openai_agent.py index 5380866dba..5e413be9b6 100644 --- a/python/packages/lab/gaia/samples/openai_agent.py +++ b/python/packages/lab/gaia/samples/openai_agent.py @@ -47,9 +47,9 @@ async def create_gaia_agent() -> AsyncIterator[Agent]: result = await agent.run("What is the capital of France?") print(result.text) """ - chat_client = OpenAIResponsesClient() + client = OpenAIResponsesClient() - async with chat_client.as_agent( + async with client.as_agent( name="GaiaAgent", instructions="Solve tasks to your best ability. Use Web Search to find " "information and Code Interpreter to perform calculations and data analysis.", diff --git a/python/packages/lab/lightning/README.md b/python/packages/lab/lightning/README.md index 4219713b77..e9fd3ec91d 100644 --- a/python/packages/lab/lightning/README.md +++ b/python/packages/lab/lightning/README.md @@ -50,7 +50,7 @@ async def math_agent(task: TaskType, llm: LLM) -> float: async with ( MCPStdioTool(name="calculator", command="uvx", args=["mcp-server-calculator"]) as mcp_server, Agent( - chat_client=OpenAIChatClient( + client=OpenAIChatClient( model_id=llm.model, api_key="your-api-key", base_url=llm.endpoint, diff --git a/python/packages/lab/lightning/samples/train_math_agent.py b/python/packages/lab/lightning/samples/train_math_agent.py index d9164adf42..f702b5a631 100644 --- a/python/packages/lab/lightning/samples/train_math_agent.py +++ b/python/packages/lab/lightning/samples/train_math_agent.py @@ -167,7 +167,7 @@ async def math_agent(task: MathProblem, llm: LLM) -> float: async with ( MCPStdioTool(name="calculator", command="uvx", args=["mcp-server-calculator"]) as mcp_server, Agent( - chat_client=OpenAIChatClient( + client=OpenAIChatClient( model_id=llm.model, # This is the model being trained api_key=os.getenv("OPENAI_API_KEY") or "dummy", # Can be dummy when connecting to training LLM base_url=llm.endpoint, # vLLM server endpoint provided by agent-lightning diff --git a/python/packages/lab/lightning/tests/test_lightning.py b/python/packages/lab/lightning/tests/test_lightning.py index d302d71353..76e6b98506 100644 --- a/python/packages/lab/lightning/tests/test_lightning.py +++ b/python/packages/lab/lightning/tests/test_lightning.py @@ -81,13 +81,13 @@ def workflow_two_agents(): ): # Create the two agents analyzer_agent = Agent( - chat_client=first_chat_client, + client=first_chat_client, name="DataAnalyzer", instructions="You are a data analyst. Analyze the given data and provide insights.", ) advisor_agent = Agent( - chat_client=second_chat_client, + client=second_chat_client, name="InvestmentAdvisor", instructions="You are an investment advisor. Based on analysis results, provide recommendations.", ) diff --git a/python/packages/lab/tau2/README.md b/python/packages/lab/tau2/README.md index 66da215b47..083fd05a9d 100644 --- a/python/packages/lab/tau2/README.md +++ b/python/packages/lab/tau2/README.md @@ -144,7 +144,7 @@ class CustomTaskRunner(TaskRunner): def assistant_agent(self, assistant_chat_client): # Override to customize the assistant agent return Agent( - chat_client=assistant_chat_client, + client=assistant_chat_client, instructions="Your custom system prompt here", # Add custom tools, temperature, etc. ) @@ -152,7 +152,7 @@ class CustomTaskRunner(TaskRunner): def user_simulator(self, user_chat_client, task): # Override to customize the user simulator return Agent( - chat_client=user_chat_client, + client=user_chat_client, instructions="Custom user simulator prompt", ) ``` diff --git a/python/packages/lab/tau2/agent_framework_lab_tau2/runner.py b/python/packages/lab/tau2/agent_framework_lab_tau2/runner.py index 7cb161ca66..68205c880e 100644 --- a/python/packages/lab/tau2/agent_framework_lab_tau2/runner.py +++ b/python/packages/lab/tau2/agent_framework_lab_tau2/runner.py @@ -197,7 +197,7 @@ def assistant_agent(self, assistant_chat_client: SupportsChatGetResponse) -> Age # - Sliding window memory to handle long conversations within token limits # - Temperature-controlled response generation return Agent( - chat_client=assistant_chat_client, + client=assistant_chat_client, instructions=assistant_system_prompt, tools=tools, temperature=self.assistant_sampling_temperature, @@ -231,7 +231,7 @@ def user_simulator(self, user_simuator_chat_client: SupportsChatGetResponse, tas """ return Agent( - chat_client=user_simuator_chat_client, + client=user_simuator_chat_client, instructions=user_sim_system_prompt, temperature=0.0, # No sliding window for user simulator to maintain full conversation context diff --git a/python/packages/orchestrations/agent_framework_orchestrations/_handoff.py b/python/packages/orchestrations/agent_framework_orchestrations/_handoff.py index 86830ec95e..65db808b83 100644 --- a/python/packages/orchestrations/agent_framework_orchestrations/_handoff.py +++ b/python/packages/orchestrations/agent_framework_orchestrations/_handoff.py @@ -302,7 +302,7 @@ def _clone_chat_agent(self, agent: Agent) -> Agent: } return Agent( - chat_client=agent.chat_client, + client=agent.client, id=agent.id, name=agent.name, description=agent.description, diff --git a/python/packages/orchestrations/agent_framework_orchestrations/_magentic.py b/python/packages/orchestrations/agent_framework_orchestrations/_magentic.py index 4ba5e0ebff..433261083c 100644 --- a/python/packages/orchestrations/agent_framework_orchestrations/_magentic.py +++ b/python/packages/orchestrations/agent_framework_orchestrations/_magentic.py @@ -1796,7 +1796,7 @@ def with_manager( # Configure manager agent with specific options and instructions manager_agent = Agent( name="Coordinator", - chat_client=OpenAIChatClient(model_id="gpt-4o"), + client=OpenAIChatClient(model_id="gpt-4o"), options=ChatOptions(temperature=0.3, seed=42), instructions="Be concise and focus on accuracy", ) diff --git a/python/packages/orchestrations/tests/test_group_chat.py b/python/packages/orchestrations/tests/test_group_chat.py index 174e7364b4..5c4bce8741 100644 --- a/python/packages/orchestrations/tests/test_group_chat.py +++ b/python/packages/orchestrations/tests/test_group_chat.py @@ -71,7 +71,7 @@ async def get_response( class StubManagerAgent(Agent): def __init__(self) -> None: - super().__init__(chat_client=MockChatClient(), name="manager_agent", description="Stub manager") + super().__init__(client=MockChatClient(), name="manager_agent", description="Stub manager") self._call_count = 0 async def run( @@ -899,7 +899,7 @@ class DynamicManagerAgent(Agent): """Manager agent that dynamically selects from available participants.""" def __init__(self) -> None: - super().__init__(chat_client=MockChatClient(), name="dynamic_manager", description="Dynamic manager") + super().__init__(client=MockChatClient(), name="dynamic_manager", description="Dynamic manager") self._call_count = 0 async def run( diff --git a/python/packages/orchestrations/tests/test_handoff.py b/python/packages/orchestrations/tests/test_handoff.py index 2df03fff2d..38ff6ea49a 100644 --- a/python/packages/orchestrations/tests/test_handoff.py +++ b/python/packages/orchestrations/tests/test_handoff.py @@ -121,7 +121,7 @@ def __init__( handoff_to: The name of the agent to hand off to, or None for no handoff. This is hardcoded for testing purposes so that the agent always attempts to hand off. """ - super().__init__(chat_client=MockChatClient(name=name, handoff_to=handoff_to), name=name, id=name) + super().__init__(client=MockChatClient(name=name, handoff_to=handoff_to), name=name, id=name) async def _drain(stream: AsyncIterable[WorkflowEvent]) -> list[WorkflowEvent]: @@ -290,7 +290,7 @@ async def mock_get_response(messages: Any, options: dict[str, Any] | None = None # Create agent with specific tool_choice configuration via default_options agent = Agent( - chat_client=mock_client, + client=mock_client, name="test_agent", default_options={"tool_choice": {"mode": "required"}}, # type: ignore ) @@ -325,7 +325,7 @@ async def invoking(self, messages: Sequence[Message], **kwargs: Any) -> Context: # Create agent with context provider using proper constructor agent = Agent( - chat_client=mock_client, + client=mock_client, name="test_agent", id="test_agent", context_provider=context_provider, diff --git a/python/packages/orchestrations/tests/test_magentic.py b/python/packages/orchestrations/tests/test_magentic.py index 62305702d9..b24284f9c3 100644 --- a/python/packages/orchestrations/tests/test_magentic.py +++ b/python/packages/orchestrations/tests/test_magentic.py @@ -543,11 +543,11 @@ class StubAssistantsClient: class StubAssistantsAgent(BaseAgent): - chat_client: object | None = None # allow assignment via Pydantic field + client: object | None = None # allow assignment via Pydantic field def __init__(self) -> None: super().__init__(name="agentA") - self.chat_client = StubAssistantsClient() # type name contains 'AssistantsClient' + self.client = StubAssistantsClient() # type name contains 'AssistantsClient' def run(self, messages=None, *, stream: bool = False, thread=None, **kwargs): # type: ignore[override] if stream: diff --git a/python/packages/purview/README.md b/python/packages/purview/README.md index 650cbd2d7f..f23da59457 100644 --- a/python/packages/purview/README.md +++ b/python/packages/purview/README.md @@ -59,7 +59,7 @@ from agent_framework.microsoft import PurviewPolicyMiddleware, PurviewSettings from azure.identity import InteractiveBrowserCredential async def main(): - chat_client = AzureOpenAIChatClient() # uses environment for endpoint + deployment + client = AzureOpenAIChatClient() # uses environment for endpoint + deployment purview_middleware = PurviewPolicyMiddleware( credential=InteractiveBrowserCredential(), @@ -67,7 +67,7 @@ async def main(): ) agent = Agent( - chat_client=chat_client, + client=client, instructions="You are a helpful assistant.", middleware=[purview_middleware] ) @@ -227,7 +227,7 @@ credential = DefaultAzureCredential() client = AzureOpenAIChatClient() agent = Agent( - chat_client=client, + client=client, instructions="You are helpful.", middleware=[PurviewPolicyMiddleware(credential, PurviewSettings(app_name="My App"))] ) @@ -244,7 +244,7 @@ from azure.identity import DefaultAzureCredential credential = DefaultAzureCredential() -chat_client = AzureOpenAIChatClient( +client = AzureOpenAIChatClient( deployment_name=os.environ["AZURE_OPENAI_DEPLOYMENT_NAME"], endpoint=os.environ["AZURE_OPENAI_ENDPOINT"], credential=credential, @@ -253,7 +253,7 @@ chat_client = AzureOpenAIChatClient( ], ) -agent = Agent(chat_client=chat_client, instructions="You are helpful.") +agent = Agent(client=client, instructions="You are helpful.") ``` The policy logic is identical; the difference is only the hook point in the pipeline. diff --git a/python/packages/purview/agent_framework_purview/_middleware.py b/python/packages/purview/agent_framework_purview/_middleware.py index e50793f79d..083e44ad99 100644 --- a/python/packages/purview/agent_framework_purview/_middleware.py +++ b/python/packages/purview/agent_framework_purview/_middleware.py @@ -31,7 +31,7 @@ class PurviewPolicyMiddleware(AgentMiddleware): credential = ... # TokenCredential or AsyncTokenCredential settings = PurviewSettings(app_name="My App") agent = Agent( - chat_client=client, instructions="...", middleware=[PurviewPolicyMiddleware(credential, settings)] + client=client, instructions="...", middleware=[PurviewPolicyMiddleware(credential, settings)] ) """ diff --git a/python/packages/purview/tests/test_chat_middleware.py b/python/packages/purview/tests/test_chat_middleware.py index dac88ace57..0301f4496c 100644 --- a/python/packages/purview/tests/test_chat_middleware.py +++ b/python/packages/purview/tests/test_chat_middleware.py @@ -34,10 +34,10 @@ def middleware(self, mock_credential: AsyncMock, settings: PurviewSettings) -> P @pytest.fixture def chat_context(self) -> ChatContext: - chat_client = DummyChatClient() + client = DummyChatClient() chat_options = MagicMock() chat_options.model = "test-model" - return ChatContext(chat_client=chat_client, messages=[Message(role="user", text="Hello")], options=chat_options) + return ChatContext(client=client, messages=[Message(role="user", text="Hello")], options=chat_options) async def test_initialization(self, middleware: PurviewChatPolicyMiddleware) -> None: assert middleware._client is not None @@ -103,11 +103,11 @@ def __init__(self): assert "blocked" in first_msg.text.lower() async def test_streaming_skips_post_check(self, middleware: PurviewChatPolicyMiddleware) -> None: - chat_client = DummyChatClient() + client = DummyChatClient() chat_options = MagicMock() chat_options.model = "test-model" streaming_context = ChatContext( - chat_client=chat_client, + client=client, messages=[Message(role="user", text="Hello")], options=chat_options, stream=True, @@ -184,11 +184,11 @@ async def test_chat_middleware_handles_payment_required_pre_check(self, mock_cre settings = PurviewSettings(app_name="Test App", ignore_payment_required=False) middleware = PurviewChatPolicyMiddleware(mock_credential, settings) - chat_client = DummyChatClient() + client = DummyChatClient() chat_options = MagicMock() chat_options.model = "test-model" context = ChatContext( - chat_client=chat_client, messages=[Message(role="user", text="Hello")], options=chat_options + client=client, messages=[Message(role="user", text="Hello")], options=chat_options ) async def mock_process_messages(*args, **kwargs): @@ -210,11 +210,11 @@ async def test_chat_middleware_handles_payment_required_post_check(self, mock_cr settings = PurviewSettings(app_name="Test App", ignore_payment_required=False) middleware = PurviewChatPolicyMiddleware(mock_credential, settings) - chat_client = DummyChatClient() + client = DummyChatClient() chat_options = MagicMock() chat_options.model = "test-model" context = ChatContext( - chat_client=chat_client, messages=[Message(role="user", text="Hello")], options=chat_options + client=client, messages=[Message(role="user", text="Hello")], options=chat_options ) call_count = 0 @@ -243,11 +243,11 @@ async def test_chat_middleware_ignores_payment_required_when_configured(self, mo settings = PurviewSettings(app_name="Test App", ignore_payment_required=True) middleware = PurviewChatPolicyMiddleware(mock_credential, settings) - chat_client = DummyChatClient() + client = DummyChatClient() chat_options = MagicMock() chat_options.model = "test-model" context = ChatContext( - chat_client=chat_client, messages=[Message(role="user", text="Hello")], options=chat_options + client=client, messages=[Message(role="user", text="Hello")], options=chat_options ) async def mock_process_messages(*args, **kwargs): @@ -285,11 +285,11 @@ async def test_chat_middleware_with_ignore_exceptions(self, mock_credential: Asy settings = PurviewSettings(app_name="Test App", ignore_exceptions=True) middleware = PurviewChatPolicyMiddleware(mock_credential, settings) - chat_client = DummyChatClient() + client = DummyChatClient() chat_options = MagicMock() chat_options.model = "test-model" context = ChatContext( - chat_client=chat_client, messages=[Message(role="user", text="Hello")], options=chat_options + client=client, messages=[Message(role="user", text="Hello")], options=chat_options ) async def mock_process_messages(*args, **kwargs): @@ -314,11 +314,11 @@ async def test_chat_middleware_raises_on_pre_check_exception_when_ignore_excepti settings = PurviewSettings(app_name="Test App", ignore_exceptions=False) middleware = PurviewChatPolicyMiddleware(mock_credential, settings) - chat_client = DummyChatClient() + client = DummyChatClient() chat_options = MagicMock() chat_options.model = "test-model" context = ChatContext( - chat_client=chat_client, messages=[Message(role="user", text="Hello")], options=chat_options + client=client, messages=[Message(role="user", text="Hello")], options=chat_options ) with patch.object(middleware._processor, "process_messages", side_effect=ValueError("boom")): @@ -336,11 +336,11 @@ async def test_chat_middleware_raises_on_post_check_exception_when_ignore_except settings = PurviewSettings(app_name="Test App", ignore_exceptions=False) middleware = PurviewChatPolicyMiddleware(mock_credential, settings) - chat_client = DummyChatClient() + client = DummyChatClient() chat_options = MagicMock() chat_options.model = "test-model" context = ChatContext( - chat_client=chat_client, messages=[Message(role="user", text="Hello")], options=chat_options + client=client, messages=[Message(role="user", text="Hello")], options=chat_options ) call_count = 0 diff --git a/python/samples/README.md b/python/samples/README.md index fc64dced52..0b50473588 100644 --- a/python/samples/README.md +++ b/python/samples/README.md @@ -95,7 +95,7 @@ This directory contains samples demonstrating the capabilities of Microsoft Agen | File | Description | |------|-------------| | [`getting_started/agents/custom/custom_agent.py`](./getting_started/agents/custom/custom_agent.py) | Custom Agent Implementation Example | -| [`getting_started/chat_client/custom_chat_client.py`](./getting_started/chat_client/custom_chat_client.py) | Custom Chat Client Implementation Example | +| [`getting_started/client/custom_chat_client.py`](./getting_started/client/custom_chat_client.py) | Custom Chat Client Implementation Example | ### Ollama @@ -145,14 +145,14 @@ The recommended way to use Ollama is via the native `OllamaChatClient` from the | File | Description | |------|-------------| -| [`getting_started/chat_client/azure_ai_chat_client.py`](./getting_started/chat_client/azure_ai_chat_client.py) | Azure AI Chat Client Direct Usage Example | -| [`getting_started/chat_client/azure_assistants_client.py`](./getting_started/chat_client/azure_assistants_client.py) | Azure OpenAI Assistants Client Direct Usage Example | -| [`getting_started/chat_client/azure_chat_client.py`](./getting_started/chat_client/azure_chat_client.py) | Azure Chat Client Direct Usage Example | -| [`getting_started/chat_client/azure_responses_client.py`](./getting_started/chat_client/azure_responses_client.py) | Azure OpenAI Responses Client Direct Usage Example | -| [`getting_started/chat_client/chat_response_cancellation.py`](./getting_started/chat_client/chat_response_cancellation.py) | Chat Response Cancellation Example | -| [`getting_started/chat_client/openai_assistants_client.py`](./getting_started/chat_client/openai_assistants_client.py) | OpenAI Assistants Client Direct Usage Example | -| [`getting_started/chat_client/openai_chat_client.py`](./getting_started/chat_client/openai_chat_client.py) | OpenAI Chat Client Direct Usage Example | -| [`getting_started/chat_client/openai_responses_client.py`](./getting_started/chat_client/openai_responses_client.py) | OpenAI Responses Client Direct Usage Example | +| [`getting_started/client/azure_ai_chat_client.py`](./getting_started/client/azure_ai_chat_client.py) | Azure AI Chat Client Direct Usage Example | +| [`getting_started/client/azure_assistants_client.py`](./getting_started/client/azure_assistants_client.py) | Azure OpenAI Assistants Client Direct Usage Example | +| [`getting_started/client/azure_chat_client.py`](./getting_started/client/azure_chat_client.py) | Azure Chat Client Direct Usage Example | +| [`getting_started/client/azure_responses_client.py`](./getting_started/client/azure_responses_client.py) | Azure OpenAI Responses Client Direct Usage Example | +| [`getting_started/client/chat_response_cancellation.py`](./getting_started/client/chat_response_cancellation.py) | Chat Response Cancellation Example | +| [`getting_started/client/openai_assistants_client.py`](./getting_started/client/openai_assistants_client.py) | OpenAI Assistants Client Direct Usage Example | +| [`getting_started/client/openai_chat_client.py`](./getting_started/client/openai_chat_client.py) | OpenAI Chat Client Direct Usage Example | +| [`getting_started/client/openai_responses_client.py`](./getting_started/client/openai_responses_client.py) | OpenAI Responses Client Direct Usage Example | ## Context Providers diff --git a/python/samples/concepts/response_stream.py b/python/samples/concepts/response_stream.py index 5cedc6711a..1b26ac5e90 100644 --- a/python/samples/concepts/response_stream.py +++ b/python/samples/concepts/response_stream.py @@ -124,7 +124,7 @@ ```python # Agent does something like this internally: -chat_stream = chat_client.get_response(messages, stream=True) +chat_stream = client.get_response(messages, stream=True) agent_stream = ( chat_stream .map(_to_agent_update, _to_agent_response) diff --git a/python/samples/concepts/tools/README.md b/python/samples/concepts/tools/README.md index b8a7704abc..91c481842d 100644 --- a/python/samples/concepts/tools/README.md +++ b/python/samples/concepts/tools/README.md @@ -46,14 +46,14 @@ sequenceDiagram alt Non-Streaming (stream=False) RawAgent->>RawAgent: _prepare_run_context() [async] Note right of RawAgent: Builds: thread_messages, chat_options, tools - RawAgent->>CML: chat_client.get_response(stream=False) + RawAgent->>CML: client.get_response(stream=False) else Streaming (stream=True) RawAgent->>RawAgent: ResponseStream.from_awaitable() Note right of RawAgent: Defers async prep to stream consumption RawAgent-->>User: Returns ResponseStream immediately Note over RawAgent,CML: Async work happens on iteration RawAgent->>RawAgent: _prepare_run_context() [deferred] - RawAgent->>CML: chat_client.get_response(stream=True) + RawAgent->>CML: client.get_response(stream=True) end Note over CML,CMP: Chat Middleware Layer @@ -142,7 +142,7 @@ sequenceDiagram **Key Operations:** 1. `categorize_middleware()` separates middleware by type (agent, chat, function) -2. Chat and function middleware are forwarded to `chat_client` +2. Chat and function middleware are forwarded to `client` 3. `AgentMiddlewarePipeline.execute()` runs the agent middleware chain 4. Final handler calls `RawAgent.run()` @@ -154,13 +154,13 @@ sequenceDiagram ### 2. Chat Middleware Layer (`ChatMiddlewareLayer`) -**Entry Point:** `chat_client.get_response(messages, options)` +**Entry Point:** `client.get_response(messages, options)` **Context Object:** `ChatContext` | Field | Type | Description | |-------|------|-------------| -| `chat_client` | `SupportsChatGetResponse` | The chat client | +| `client` | `SupportsChatGetResponse` | The chat client | | `messages` | `Sequence[Message]` | Messages to send | | `options` | `Mapping[str, Any]` | Chat options | | `stream` | `bool` | Whether streaming | @@ -463,7 +463,7 @@ Returns `Awaitable[AgentResponse]`: ```python async def _run_non_streaming(): ctx = await self._prepare_run_context(...) # Async preparation - response = await self.chat_client.get_response(stream=False, ...) + response = await self.client.get_response(stream=False, ...) await self._finalize_response_and_update_thread(...) return AgentResponse(...) ``` @@ -476,7 +476,7 @@ Returns `ResponseStream[AgentResponseUpdate, AgentResponse]` **synchronously**: # Async preparation is deferred using ResponseStream.from_awaitable() async def _get_stream(): ctx = await self._prepare_run_context(...) # Deferred until iteration - return self.chat_client.get_response(stream=True, ...) + return self.client.get_response(stream=True, ...) return ( ResponseStream.from_awaitable(_get_stream()) diff --git a/python/samples/concepts/typed_options.py b/python/samples/concepts/typed_options.py index 85ba00ac20..e111222601 100644 --- a/python/samples/concepts/typed_options.py +++ b/python/samples/concepts/typed_options.py @@ -56,7 +56,7 @@ async def demo_anthropic_agent() -> None: # Create a typed agent for Anthropic - IDE knows Anthropic-specific options! agent = Agent( - chat_client=client, + client=client, name="claude-assistant", instructions="You are a helpful assistant powered by Claude. Be concise.", default_options={ @@ -140,7 +140,7 @@ async def demo_openai_agent() -> None: # or on the client when constructing the client instance: # client = OpenAIChatClient[OpenAIReasoningChatOptions]() agent = Agent[OpenAIReasoningChatOptions]( - chat_client=OpenAIChatClient(), + client=OpenAIChatClient(), name="weather-assistant", instructions="You are a helpful assistant. Answer concisely.", # Options can be set at construction time diff --git a/python/samples/demos/chatkit-integration/app.py b/python/samples/demos/chatkit-integration/app.py index 543b508339..832e2ce8c1 100644 --- a/python/samples/demos/chatkit-integration/app.py +++ b/python/samples/demos/chatkit-integration/app.py @@ -218,7 +218,7 @@ def __init__(self, data_store: SQLiteStore, attachment_store: FileBasedAttachmen # For authentication, run `az login` command in terminal try: self.weather_agent = Agent( - chat_client=AzureOpenAIChatClient(credential=AzureCliCredential()), + client=AzureOpenAIChatClient(credential=AzureCliCredential()), instructions=( "You are a helpful weather assistant with image analysis capabilities. " "You can provide weather information for any location, tell the current time, " @@ -301,7 +301,7 @@ async def _update_thread_title( ] # Use the chat client directly for a quick, lightweight call - response = await self.weather_agent.chat_client.get_response( + response = await self.weather_agent.client.get_response( messages=title_prompt, options={ "temperature": 0.3, diff --git a/python/samples/demos/workflow_evaluation/create_workflow.py b/python/samples/demos/workflow_evaluation/create_workflow.py index b335ad274d..2b8840c1e0 100644 --- a/python/samples/demos/workflow_evaluation/create_workflow.py +++ b/python/samples/demos/workflow_evaluation/create_workflow.py @@ -73,9 +73,9 @@ async def start_executor(input: str, ctx: WorkflowContext[list[Message]]) -> Non class ResearchLead(Executor): """Aggregates and summarizes travel planning findings from all specialized agents.""" - def __init__(self, chat_client: AzureAIClient, id: str = "travel-planning-coordinator"): + def __init__(self, client: AzureAIClient, id: str = "travel-planning-coordinator"): # store=True to preserve conversation history for evaluation - self.agent = chat_client.as_agent( + self.agent = client.as_agent( id="travel-planning-coordinator", instructions=( "You are the final coordinator. You will receive responses from multiple agents: " @@ -142,17 +142,17 @@ def _extract_agent_findings(self, responses: list[AgentExecutorResponse]) -> lis return agent_findings -async def run_workflow_with_response_tracking(query: str, chat_client: AzureAIClient | None = None) -> dict: +async def run_workflow_with_response_tracking(query: str, client: AzureAIClient | None = None) -> dict: """Run multi-agent workflow and track conversation IDs, response IDs, and interaction sequence. Args: query: The user query to process through the multi-agent workflow - chat_client: Optional AzureAIClient instance + client: Optional AzureAIClient instance Returns: Dictionary containing interaction sequence, conversation/response IDs, and conversation analysis """ - if chat_client is None: + if client is None: try: async with DefaultAzureCredential() as credential: # Create AIProjectClient with the correct API version for V2 prompt agents @@ -171,10 +171,10 @@ async def run_workflow_with_response_tracking(query: str, chat_client: AzureAICl print(f"Error during workflow execution: {e}") raise else: - return await _run_workflow_with_client(query, chat_client) + return await _run_workflow_with_client(query, client) -async def _run_workflow_with_client(query: str, chat_client: AzureAIClient) -> dict: +async def _run_workflow_with_client(query: str, client: AzureAIClient) -> dict: """Execute workflow with given client and track all interactions.""" # Initialize tracking variables - use lists to track multiple responses per agent @@ -184,7 +184,7 @@ async def _run_workflow_with_client(query: str, chat_client: AzureAIClient) -> d # Create workflow components and keep agent references # Pass project_client and credential to create separate client instances per agent - workflow, agent_map = await _create_workflow(chat_client.project_client, chat_client.credential) + workflow, agent_map = await _create_workflow(client.project_client, client.credential) # Process workflow events events = workflow.run(query, stream=True) @@ -210,7 +210,7 @@ async def _create_workflow(project_client, credential): final_coordinator_client = AzureAIClient( project_client=project_client, credential=credential, agent_name="final-coordinator" ) - final_coordinator = ResearchLead(chat_client=final_coordinator_client, id="final-coordinator") + final_coordinator = ResearchLead(client=final_coordinator_client, id="final-coordinator") # Agent 1: Travel Request Handler (initial coordinator) # Create separate client with unique agent_name diff --git a/python/samples/getting_started/agents/azure_ai/azure_ai_with_application_endpoint.py b/python/samples/getting_started/agents/azure_ai/azure_ai_with_application_endpoint.py index c26b521c46..db1c80a597 100644 --- a/python/samples/getting_started/agents/azure_ai/azure_ai_with_application_endpoint.py +++ b/python/samples/getting_started/agents/azure_ai/azure_ai_with_application_endpoint.py @@ -24,7 +24,7 @@ async def main() -> None: # /api/projects//applications//protocols AIProjectClient(endpoint=os.environ["AZURE_AI_PROJECT_ENDPOINT"], credential=credential) as project_client, Agent( - chat_client=AzureAIClient( + client=AzureAIClient( project_client=project_client, ), ) as agent, diff --git a/python/samples/getting_started/agents/azure_ai/azure_ai_with_code_interpreter_file_download.py b/python/samples/getting_started/agents/azure_ai/azure_ai_with_code_interpreter_file_download.py index 7a9d7cdf40..f7ac089528 100644 --- a/python/samples/getting_started/agents/azure_ai/azure_ai_with_code_interpreter_file_download.py +++ b/python/samples/getting_started/agents/azure_ai/azure_ai_with_code_interpreter_file_download.py @@ -63,7 +63,7 @@ async def download_container_files( print(f"\nDownloading {len(file_contents)} container file(s) to {output_dir.absolute()}...") # Access the OpenAI client from AzureAIClient - openai_client = agent.chat_client.client + openai_client = agent.client.client downloaded_files: list[Path] = [] diff --git a/python/samples/getting_started/agents/azure_openai/azure_assistants_with_code_interpreter.py b/python/samples/getting_started/agents/azure_openai/azure_assistants_with_code_interpreter.py index 8f3a96e321..5bf4f5b2d2 100644 --- a/python/samples/getting_started/agents/azure_openai/azure_assistants_with_code_interpreter.py +++ b/python/samples/getting_started/agents/azure_openai/azure_assistants_with_code_interpreter.py @@ -47,7 +47,7 @@ async def main() -> None: # For authentication, run `az login` command in terminal or replace AzureCliCredential with preferred # authentication option. async with Agent( - chat_client=AzureOpenAIAssistantsClient(credential=AzureCliCredential()), + client=AzureOpenAIAssistantsClient(credential=AzureCliCredential()), instructions="You are a helpful assistant that can write and execute Python code to solve problems.", tools=HostedCodeInterpreterTool(), ) as agent: diff --git a/python/samples/getting_started/agents/azure_openai/azure_assistants_with_existing_assistant.py b/python/samples/getting_started/agents/azure_openai/azure_assistants_with_existing_assistant.py index b79c85258a..b1812463c4 100644 --- a/python/samples/getting_started/agents/azure_openai/azure_assistants_with_existing_assistant.py +++ b/python/samples/getting_started/agents/azure_openai/azure_assistants_with_existing_assistant.py @@ -47,7 +47,7 @@ async def main() -> None: try: async with Agent( - chat_client=AzureOpenAIAssistantsClient(async_client=client, assistant_id=created_assistant.id), + client=AzureOpenAIAssistantsClient(async_client=client, assistant_id=created_assistant.id), instructions="You are a helpful weather agent.", tools=get_weather, ) as agent: diff --git a/python/samples/getting_started/agents/azure_openai/azure_assistants_with_function_tools.py b/python/samples/getting_started/agents/azure_openai/azure_assistants_with_function_tools.py index dc6393684f..8f3db478c9 100644 --- a/python/samples/getting_started/agents/azure_openai/azure_assistants_with_function_tools.py +++ b/python/samples/getting_started/agents/azure_openai/azure_assistants_with_function_tools.py @@ -44,7 +44,7 @@ async def tools_on_agent_level() -> None: # For authentication, run `az login` command in terminal or replace AzureCliCredential with preferred # authentication option. async with Agent( - chat_client=AzureOpenAIAssistantsClient(credential=AzureCliCredential()), + client=AzureOpenAIAssistantsClient(credential=AzureCliCredential()), instructions="You are a helpful assistant that can provide weather and time information.", tools=[get_weather, get_time], # Tools defined at agent creation ) as agent: @@ -75,7 +75,7 @@ async def tools_on_run_level() -> None: # For authentication, run `az login` command in terminal or replace AzureCliCredential with preferred # authentication option. async with Agent( - chat_client=AzureOpenAIAssistantsClient(credential=AzureCliCredential()), + client=AzureOpenAIAssistantsClient(credential=AzureCliCredential()), instructions="You are a helpful assistant.", # No tools defined here ) as agent: @@ -106,7 +106,7 @@ async def mixed_tools_example() -> None: # For authentication, run `az login` command in terminal or replace AzureCliCredential with preferred # authentication option. async with Agent( - chat_client=AzureOpenAIAssistantsClient(credential=AzureCliCredential()), + client=AzureOpenAIAssistantsClient(credential=AzureCliCredential()), instructions="You are a comprehensive assistant that can help with various information requests.", tools=[get_weather], # Base tool available for all queries ) as agent: diff --git a/python/samples/getting_started/agents/azure_openai/azure_assistants_with_thread.py b/python/samples/getting_started/agents/azure_openai/azure_assistants_with_thread.py index cdc8a17877..d868adf514 100644 --- a/python/samples/getting_started/agents/azure_openai/azure_assistants_with_thread.py +++ b/python/samples/getting_started/agents/azure_openai/azure_assistants_with_thread.py @@ -34,7 +34,7 @@ async def example_with_automatic_thread_creation() -> None: # For authentication, run `az login` command in terminal or replace AzureCliCredential with preferred # authentication option. async with Agent( - chat_client=AzureOpenAIAssistantsClient(credential=AzureCliCredential()), + client=AzureOpenAIAssistantsClient(credential=AzureCliCredential()), instructions="You are a helpful weather agent.", tools=get_weather, ) as agent: @@ -60,7 +60,7 @@ async def example_with_thread_persistence() -> None: # For authentication, run `az login` command in terminal or replace AzureCliCredential with preferred # authentication option. async with Agent( - chat_client=AzureOpenAIAssistantsClient(credential=AzureCliCredential()), + client=AzureOpenAIAssistantsClient(credential=AzureCliCredential()), instructions="You are a helpful weather agent.", tools=get_weather, ) as agent: @@ -98,7 +98,7 @@ async def example_with_existing_thread_id() -> None: # For authentication, run `az login` command in terminal or replace AzureCliCredential with preferred # authentication option. async with Agent( - chat_client=AzureOpenAIAssistantsClient(credential=AzureCliCredential()), + client=AzureOpenAIAssistantsClient(credential=AzureCliCredential()), instructions="You are a helpful weather agent.", tools=get_weather, ) as agent: @@ -118,7 +118,7 @@ async def example_with_existing_thread_id() -> None: # Create a new agent instance but use the existing thread ID async with Agent( - chat_client=AzureOpenAIAssistantsClient(thread_id=existing_thread_id, credential=AzureCliCredential()), + client=AzureOpenAIAssistantsClient(thread_id=existing_thread_id, credential=AzureCliCredential()), instructions="You are a helpful weather agent.", tools=get_weather, ) as agent: diff --git a/python/samples/getting_started/agents/azure_openai/azure_chat_client_with_function_tools.py b/python/samples/getting_started/agents/azure_openai/azure_chat_client_with_function_tools.py index 90ff87e507..07dc88dd6d 100644 --- a/python/samples/getting_started/agents/azure_openai/azure_chat_client_with_function_tools.py +++ b/python/samples/getting_started/agents/azure_openai/azure_chat_client_with_function_tools.py @@ -44,7 +44,7 @@ async def tools_on_agent_level() -> None: # For authentication, run `az login` command in terminal or replace AzureCliCredential with preferred # authentication option. agent = Agent( - chat_client=AzureOpenAIChatClient(credential=AzureCliCredential()), + client=AzureOpenAIChatClient(credential=AzureCliCredential()), instructions="You are a helpful assistant that can provide weather and time information.", tools=[get_weather, get_time], # Tools defined at agent creation ) @@ -76,7 +76,7 @@ async def tools_on_run_level() -> None: # For authentication, run `az login` command in terminal or replace AzureCliCredential with preferred # authentication option. agent = Agent( - chat_client=AzureOpenAIChatClient(credential=AzureCliCredential()), + client=AzureOpenAIChatClient(credential=AzureCliCredential()), instructions="You are a helpful assistant.", # No tools defined here ) @@ -108,7 +108,7 @@ async def mixed_tools_example() -> None: # For authentication, run `az login` command in terminal or replace AzureCliCredential with preferred # authentication option. agent = Agent( - chat_client=AzureOpenAIChatClient(credential=AzureCliCredential()), + client=AzureOpenAIChatClient(credential=AzureCliCredential()), instructions="You are a comprehensive assistant that can help with various information requests.", tools=[get_weather], # Base tool available for all queries ) diff --git a/python/samples/getting_started/agents/azure_openai/azure_chat_client_with_thread.py b/python/samples/getting_started/agents/azure_openai/azure_chat_client_with_thread.py index ab34eb548d..8e262d5999 100644 --- a/python/samples/getting_started/agents/azure_openai/azure_chat_client_with_thread.py +++ b/python/samples/getting_started/agents/azure_openai/azure_chat_client_with_thread.py @@ -34,7 +34,7 @@ async def example_with_automatic_thread_creation() -> None: # For authentication, run `az login` command in terminal or replace AzureCliCredential with preferred # authentication option. agent = Agent( - chat_client=AzureOpenAIChatClient(credential=AzureCliCredential()), + client=AzureOpenAIChatClient(credential=AzureCliCredential()), instructions="You are a helpful weather agent.", tools=get_weather, ) @@ -61,7 +61,7 @@ async def example_with_thread_persistence() -> None: # For authentication, run `az login` command in terminal or replace AzureCliCredential with preferred # authentication option. agent = Agent( - chat_client=AzureOpenAIChatClient(credential=AzureCliCredential()), + client=AzureOpenAIChatClient(credential=AzureCliCredential()), instructions="You are a helpful weather agent.", tools=get_weather, ) @@ -96,7 +96,7 @@ async def example_with_existing_thread_messages() -> None: # For authentication, run `az login` command in terminal or replace AzureCliCredential with preferred # authentication option. agent = Agent( - chat_client=AzureOpenAIChatClient(credential=AzureCliCredential()), + client=AzureOpenAIChatClient(credential=AzureCliCredential()), instructions="You are a helpful weather agent.", tools=get_weather, ) @@ -118,7 +118,7 @@ async def example_with_existing_thread_messages() -> None: # Create a new agent instance but use the existing thread with its message history new_agent = Agent( - chat_client=AzureOpenAIChatClient(credential=AzureCliCredential()), + client=AzureOpenAIChatClient(credential=AzureCliCredential()), instructions="You are a helpful weather agent.", tools=get_weather, ) diff --git a/python/samples/getting_started/agents/azure_openai/azure_responses_client_code_interpreter_files.py b/python/samples/getting_started/agents/azure_openai/azure_responses_client_code_interpreter_files.py index e3a69d0792..6ef31b02f8 100644 --- a/python/samples/getting_started/agents/azure_openai/azure_responses_client_code_interpreter_files.py +++ b/python/samples/getting_started/agents/azure_openai/azure_responses_client_code_interpreter_files.py @@ -77,7 +77,7 @@ async def get_token(): # Create agent using Azure OpenAI Responses client agent = Agent( - chat_client=AzureOpenAIResponsesClient(credential=credential), + client=AzureOpenAIResponsesClient(credential=credential), instructions="You are a helpful assistant that can analyze data files using Python code.", tools=HostedCodeInterpreterTool(inputs=[{"file_id": file_id}]), ) diff --git a/python/samples/getting_started/agents/azure_openai/azure_responses_client_with_code_interpreter.py b/python/samples/getting_started/agents/azure_openai/azure_responses_client_with_code_interpreter.py index 6e8d474198..7e21853f49 100644 --- a/python/samples/getting_started/agents/azure_openai/azure_responses_client_with_code_interpreter.py +++ b/python/samples/getting_started/agents/azure_openai/azure_responses_client_with_code_interpreter.py @@ -23,7 +23,7 @@ async def main() -> None: # For authentication, run `az login` command in terminal or replace AzureCliCredential with preferred # authentication option. agent = Agent( - chat_client=AzureOpenAIResponsesClient(credential=AzureCliCredential()), + client=AzureOpenAIResponsesClient(credential=AzureCliCredential()), instructions="You are a helpful assistant that can write and execute Python code to solve problems.", tools=HostedCodeInterpreterTool(), ) diff --git a/python/samples/getting_started/agents/azure_openai/azure_responses_client_with_file_search.py b/python/samples/getting_started/agents/azure_openai/azure_responses_client_with_file_search.py index 6423041b6e..0710f09664 100644 --- a/python/samples/getting_started/agents/azure_openai/azure_responses_client_with_file_search.py +++ b/python/samples/getting_started/agents/azure_openai/azure_responses_client_with_file_search.py @@ -54,7 +54,7 @@ async def main() -> None: file_id, vector_store = await create_vector_store(client) agent = Agent( - chat_client=client, + client=client, instructions="You are a helpful assistant that can search through files to find information.", tools=[HostedFileSearchTool(inputs=vector_store)], ) diff --git a/python/samples/getting_started/agents/azure_openai/azure_responses_client_with_function_tools.py b/python/samples/getting_started/agents/azure_openai/azure_responses_client_with_function_tools.py index e2bcef4250..e7ea434fb5 100644 --- a/python/samples/getting_started/agents/azure_openai/azure_responses_client_with_function_tools.py +++ b/python/samples/getting_started/agents/azure_openai/azure_responses_client_with_function_tools.py @@ -44,7 +44,7 @@ async def tools_on_agent_level() -> None: # For authentication, run `az login` command in terminal or replace AzureCliCredential with preferred # authentication option. agent = Agent( - chat_client=AzureOpenAIResponsesClient(credential=AzureCliCredential()), + client=AzureOpenAIResponsesClient(credential=AzureCliCredential()), instructions="You are a helpful assistant that can provide weather and time information.", tools=[get_weather, get_time], # Tools defined at agent creation ) @@ -76,7 +76,7 @@ async def tools_on_run_level() -> None: # For authentication, run `az login` command in terminal or replace AzureCliCredential with preferred # authentication option. agent = Agent( - chat_client=AzureOpenAIResponsesClient(credential=AzureCliCredential()), + client=AzureOpenAIResponsesClient(credential=AzureCliCredential()), instructions="You are a helpful assistant.", # No tools defined here ) @@ -108,7 +108,7 @@ async def mixed_tools_example() -> None: # For authentication, run `az login` command in terminal or replace AzureCliCredential with preferred # authentication option. agent = Agent( - chat_client=AzureOpenAIResponsesClient(credential=AzureCliCredential()), + client=AzureOpenAIResponsesClient(credential=AzureCliCredential()), instructions="You are a comprehensive assistant that can help with various information requests.", tools=[get_weather], # Base tool available for all queries ) diff --git a/python/samples/getting_started/agents/azure_openai/azure_responses_client_with_hosted_mcp.py b/python/samples/getting_started/agents/azure_openai/azure_responses_client_with_hosted_mcp.py index ed735d2290..dce8d96c0b 100644 --- a/python/samples/getting_started/agents/azure_openai/azure_responses_client_with_hosted_mcp.py +++ b/python/samples/getting_started/agents/azure_openai/azure_responses_client_with_hosted_mcp.py @@ -97,7 +97,7 @@ async def run_hosted_mcp_without_thread_and_specific_approval() -> None: # Tools are provided when creating the agent # The agent can use these tools for any query during its lifetime async with Agent( - chat_client=AzureOpenAIResponsesClient( + client=AzureOpenAIResponsesClient( credential=credential, ), name="DocsAgent", @@ -130,7 +130,7 @@ async def run_hosted_mcp_without_approval() -> None: # Tools are provided when creating the agent # The agent can use these tools for any query during its lifetime async with Agent( - chat_client=AzureOpenAIResponsesClient( + client=AzureOpenAIResponsesClient( credential=credential, ), name="DocsAgent", @@ -164,7 +164,7 @@ async def run_hosted_mcp_with_thread() -> None: # Tools are provided when creating the agent # The agent can use these tools for any query during its lifetime async with Agent( - chat_client=AzureOpenAIResponsesClient( + client=AzureOpenAIResponsesClient( credential=credential, ), name="DocsAgent", @@ -197,7 +197,7 @@ async def run_hosted_mcp_with_thread_streaming() -> None: # Tools are provided when creating the agent # The agent can use these tools for any query during its lifetime async with Agent( - chat_client=AzureOpenAIResponsesClient( + client=AzureOpenAIResponsesClient( credential=credential, ), name="DocsAgent", diff --git a/python/samples/getting_started/agents/azure_openai/azure_responses_client_with_thread.py b/python/samples/getting_started/agents/azure_openai/azure_responses_client_with_thread.py index cfcc297d84..44f90d2ca2 100644 --- a/python/samples/getting_started/agents/azure_openai/azure_responses_client_with_thread.py +++ b/python/samples/getting_started/agents/azure_openai/azure_responses_client_with_thread.py @@ -34,7 +34,7 @@ async def example_with_automatic_thread_creation() -> None: # For authentication, run `az login` command in terminal or replace AzureCliCredential with preferred # authentication option. agent = Agent( - chat_client=AzureOpenAIResponsesClient(credential=AzureCliCredential()), + client=AzureOpenAIResponsesClient(credential=AzureCliCredential()), instructions="You are a helpful weather agent.", tools=get_weather, ) @@ -63,7 +63,7 @@ async def example_with_thread_persistence_in_memory() -> None: # For authentication, run `az login` command in terminal or replace AzureCliCredential with preferred # authentication option. agent = Agent( - chat_client=AzureOpenAIResponsesClient(credential=AzureCliCredential()), + client=AzureOpenAIResponsesClient(credential=AzureCliCredential()), instructions="You are a helpful weather agent.", tools=get_weather, ) @@ -104,7 +104,7 @@ async def example_with_existing_thread_id() -> None: # For authentication, run `az login` command in terminal or replace AzureCliCredential with preferred # authentication option. agent = Agent( - chat_client=AzureOpenAIResponsesClient(credential=AzureCliCredential()), + client=AzureOpenAIResponsesClient(credential=AzureCliCredential()), instructions="You are a helpful weather agent.", tools=get_weather, ) @@ -126,7 +126,7 @@ async def example_with_existing_thread_id() -> None: print("\n--- Continuing with the same thread ID in a new agent instance ---") agent = Agent( - chat_client=AzureOpenAIResponsesClient(credential=AzureCliCredential()), + client=AzureOpenAIResponsesClient(credential=AzureCliCredential()), instructions="You are a helpful weather agent.", tools=get_weather, ) diff --git a/python/samples/getting_started/agents/custom/README.md b/python/samples/getting_started/agents/custom/README.md index f8921b1f24..52b9e9853d 100644 --- a/python/samples/getting_started/agents/custom/README.md +++ b/python/samples/getting_started/agents/custom/README.md @@ -7,7 +7,7 @@ This folder contains examples demonstrating how to implement custom agents and c | File | Description | |------|-------------| | [`custom_agent.py`](custom_agent.py) | Shows how to create custom agents by extending the `BaseAgent` class. Demonstrates the `EchoAgent` implementation with both streaming and non-streaming responses, proper thread management, and message history handling. | -| [`custom_chat_client.py`](../../chat_client/custom_chat_client.py) | Demonstrates how to create custom chat clients by extending the `BaseChatClient` class. Shows a `EchoingChatClient` implementation and how to integrate it with `Agent` using the `as_agent()` method. | +| [`custom_chat_client.py`](../../client/custom_chat_client.py) | Demonstrates how to create custom chat clients by extending the `BaseChatClient` class. Shows a `EchoingChatClient` implementation and how to integrate it with `Agent` using the `as_agent()` method. | ## Key Takeaways diff --git a/python/samples/getting_started/agents/openai/openai_chat_client_with_function_tools.py b/python/samples/getting_started/agents/openai/openai_chat_client_with_function_tools.py index 3e43061654..6149695128 100644 --- a/python/samples/getting_started/agents/openai/openai_chat_client_with_function_tools.py +++ b/python/samples/getting_started/agents/openai/openai_chat_client_with_function_tools.py @@ -41,7 +41,7 @@ async def tools_on_agent_level() -> None: # Tools are provided when creating the agent # The agent can use these tools for any query during its lifetime agent = Agent( - chat_client=OpenAIChatClient(), + client=OpenAIChatClient(), instructions="You are a helpful assistant that can provide weather and time information.", tools=[get_weather, get_time], # Tools defined at agent creation ) @@ -71,7 +71,7 @@ async def tools_on_run_level() -> None: # Agent created without tools agent = Agent( - chat_client=OpenAIChatClient(), + client=OpenAIChatClient(), instructions="You are a helpful assistant.", # No tools defined here ) @@ -101,7 +101,7 @@ async def mixed_tools_example() -> None: # Agent created with some base tools agent = Agent( - chat_client=OpenAIChatClient(), + client=OpenAIChatClient(), instructions="You are a comprehensive assistant that can help with various information requests.", tools=[get_weather], # Base tool available for all queries ) diff --git a/python/samples/getting_started/agents/openai/openai_chat_client_with_local_mcp.py b/python/samples/getting_started/agents/openai/openai_chat_client_with_local_mcp.py index e7cb84ec25..d741a1f6b8 100644 --- a/python/samples/getting_started/agents/openai/openai_chat_client_with_local_mcp.py +++ b/python/samples/getting_started/agents/openai/openai_chat_client_with_local_mcp.py @@ -30,7 +30,7 @@ async def mcp_tools_on_run_level() -> None: url="https://learn.microsoft.com/api/mcp", ) as mcp_server, Agent( - chat_client=OpenAIChatClient(), + client=OpenAIChatClient(), name="DocsAgent", instructions="You are a helpful assistant that can help with microsoft documentation questions.", ) as agent, diff --git a/python/samples/getting_started/agents/openai/openai_chat_client_with_thread.py b/python/samples/getting_started/agents/openai/openai_chat_client_with_thread.py index 5103d580b7..98cf81091e 100644 --- a/python/samples/getting_started/agents/openai/openai_chat_client_with_thread.py +++ b/python/samples/getting_started/agents/openai/openai_chat_client_with_thread.py @@ -31,7 +31,7 @@ async def example_with_automatic_thread_creation() -> None: print("=== Automatic Thread Creation Example ===") agent = Agent( - chat_client=OpenAIChatClient(), + client=OpenAIChatClient(), instructions="You are a helpful weather agent.", tools=get_weather, ) @@ -56,7 +56,7 @@ async def example_with_thread_persistence() -> None: print("Using the same thread across multiple conversations to maintain context.\n") agent = Agent( - chat_client=OpenAIChatClient(), + client=OpenAIChatClient(), instructions="You are a helpful weather agent.", tools=get_weather, ) @@ -89,7 +89,7 @@ async def example_with_existing_thread_messages() -> None: print("=== Existing Thread Messages Example ===") agent = Agent( - chat_client=OpenAIChatClient(), + client=OpenAIChatClient(), instructions="You are a helpful weather agent.", tools=get_weather, ) @@ -111,7 +111,7 @@ async def example_with_existing_thread_messages() -> None: # Create a new agent instance but use the existing thread with its message history new_agent = Agent( - chat_client=OpenAIChatClient(), + client=OpenAIChatClient(), instructions="You are a helpful weather agent.", tools=get_weather, ) diff --git a/python/samples/getting_started/agents/openai/openai_chat_client_with_web_search.py b/python/samples/getting_started/agents/openai/openai_chat_client_with_web_search.py index 244e66a92b..977af9713d 100644 --- a/python/samples/getting_started/agents/openai/openai_chat_client_with_web_search.py +++ b/python/samples/getting_started/agents/openai/openai_chat_client_with_web_search.py @@ -23,7 +23,7 @@ async def main() -> None: } agent = Agent( - chat_client=OpenAIChatClient(model_id="gpt-4o-search-preview"), + client=OpenAIChatClient(model_id="gpt-4o-search-preview"), instructions="You are a helpful assistant that can search the web for current information.", tools=[HostedWebSearchTool(additional_properties=additional_properties)], ) diff --git a/python/samples/getting_started/agents/openai/openai_responses_client_basic.py b/python/samples/getting_started/agents/openai/openai_responses_client_basic.py index bdc907ba7a..70241f2cd5 100644 --- a/python/samples/getting_started/agents/openai/openai_responses_client_basic.py +++ b/python/samples/getting_started/agents/openai/openai_responses_client_basic.py @@ -73,7 +73,7 @@ async def non_streaming_example() -> None: print("=== Non-streaming Response Example ===") agent = Agent( - chat_client=OpenAIResponsesClient(), + client=OpenAIResponsesClient(), instructions="You are a helpful weather agent.", tools=get_weather, ) @@ -89,7 +89,7 @@ async def streaming_example() -> None: print("=== Streaming Response Example ===") agent = Agent( - chat_client=OpenAIResponsesClient( + client=OpenAIResponsesClient( middleware=[security_and_override_middleware], ), instructions="You are a helpful weather agent.", diff --git a/python/samples/getting_started/agents/openai/openai_responses_client_with_code_interpreter.py b/python/samples/getting_started/agents/openai/openai_responses_client_with_code_interpreter.py index e80e9bde0f..86a95bbe02 100644 --- a/python/samples/getting_started/agents/openai/openai_responses_client_with_code_interpreter.py +++ b/python/samples/getting_started/agents/openai/openai_responses_client_with_code_interpreter.py @@ -21,7 +21,7 @@ async def main() -> None: print("=== OpenAI Responses Agent with Code Interpreter Example ===") agent = Agent( - chat_client=OpenAIResponsesClient(), + client=OpenAIResponsesClient(), instructions="You are a helpful assistant that can write and execute Python code to solve problems.", tools=HostedCodeInterpreterTool(), ) diff --git a/python/samples/getting_started/agents/openai/openai_responses_client_with_code_interpreter_files.py b/python/samples/getting_started/agents/openai/openai_responses_client_with_code_interpreter_files.py index 3f9320db82..425a428bda 100644 --- a/python/samples/getting_started/agents/openai/openai_responses_client_with_code_interpreter_files.py +++ b/python/samples/getting_started/agents/openai/openai_responses_client_with_code_interpreter_files.py @@ -67,7 +67,7 @@ async def main() -> None: # Create agent using OpenAI Responses client agent = Agent( - chat_client=OpenAIResponsesClient(), + client=OpenAIResponsesClient(), instructions="You are a helpful assistant that can analyze data files using Python code.", tools=HostedCodeInterpreterTool(inputs=[{"file_id": file_id}]), ) diff --git a/python/samples/getting_started/agents/openai/openai_responses_client_with_file_search.py b/python/samples/getting_started/agents/openai/openai_responses_client_with_file_search.py index 431187f64b..1bca129314 100644 --- a/python/samples/getting_started/agents/openai/openai_responses_client_with_file_search.py +++ b/python/samples/getting_started/agents/openai/openai_responses_client_with_file_search.py @@ -48,7 +48,7 @@ async def main() -> None: file_id, vector_store = await create_vector_store(client) agent = Agent( - chat_client=client, + client=client, instructions="You are a helpful assistant that can search through files to find information.", tools=[HostedFileSearchTool(inputs=vector_store)], ) diff --git a/python/samples/getting_started/agents/openai/openai_responses_client_with_function_tools.py b/python/samples/getting_started/agents/openai/openai_responses_client_with_function_tools.py index c173a5d0f7..ca448134ec 100644 --- a/python/samples/getting_started/agents/openai/openai_responses_client_with_function_tools.py +++ b/python/samples/getting_started/agents/openai/openai_responses_client_with_function_tools.py @@ -41,7 +41,7 @@ async def tools_on_agent_level() -> None: # Tools are provided when creating the agent # The agent can use these tools for any query during its lifetime agent = Agent( - chat_client=OpenAIResponsesClient(), + client=OpenAIResponsesClient(), instructions="You are a helpful assistant that can provide weather and time information.", tools=[get_weather, get_time], # Tools defined at agent creation ) @@ -71,7 +71,7 @@ async def tools_on_run_level() -> None: # Agent created without tools agent = Agent( - chat_client=OpenAIResponsesClient(), + client=OpenAIResponsesClient(), instructions="You are a helpful assistant.", # No tools defined here ) @@ -101,7 +101,7 @@ async def mixed_tools_example() -> None: # Agent created with some base tools agent = Agent( - chat_client=OpenAIResponsesClient(), + client=OpenAIResponsesClient(), instructions="You are a comprehensive assistant that can help with various information requests.", tools=[get_weather], # Base tool available for all queries ) diff --git a/python/samples/getting_started/agents/openai/openai_responses_client_with_hosted_mcp.py b/python/samples/getting_started/agents/openai/openai_responses_client_with_hosted_mcp.py index 192a18ea49..3aa5174387 100644 --- a/python/samples/getting_started/agents/openai/openai_responses_client_with_hosted_mcp.py +++ b/python/samples/getting_started/agents/openai/openai_responses_client_with_hosted_mcp.py @@ -96,7 +96,7 @@ async def run_hosted_mcp_without_thread_and_specific_approval() -> None: # Tools are provided when creating the agent # The agent can use these tools for any query during its lifetime async with Agent( - chat_client=OpenAIResponsesClient(), + client=OpenAIResponsesClient(), name="DocsAgent", instructions="You are a helpful assistant that can help with microsoft documentation questions.", tools=HostedMCPTool( @@ -127,7 +127,7 @@ async def run_hosted_mcp_without_approval() -> None: # Tools are provided when creating the agent # The agent can use these tools for any query during its lifetime async with Agent( - chat_client=OpenAIResponsesClient(), + client=OpenAIResponsesClient(), name="DocsAgent", instructions="You are a helpful assistant that can help with microsoft documentation questions.", tools=HostedMCPTool( @@ -159,7 +159,7 @@ async def run_hosted_mcp_with_thread() -> None: # Tools are provided when creating the agent # The agent can use these tools for any query during its lifetime async with Agent( - chat_client=OpenAIResponsesClient(), + client=OpenAIResponsesClient(), name="DocsAgent", instructions="You are a helpful assistant that can help with microsoft documentation questions.", tools=HostedMCPTool( @@ -190,7 +190,7 @@ async def run_hosted_mcp_with_thread_streaming() -> None: # Tools are provided when creating the agent # The agent can use these tools for any query during its lifetime async with Agent( - chat_client=OpenAIResponsesClient(), + client=OpenAIResponsesClient(), name="DocsAgent", instructions="You are a helpful assistant that can help with microsoft documentation questions.", tools=HostedMCPTool( diff --git a/python/samples/getting_started/agents/openai/openai_responses_client_with_local_mcp.py b/python/samples/getting_started/agents/openai/openai_responses_client_with_local_mcp.py index c6a620f2bc..1b1e55c28d 100644 --- a/python/samples/getting_started/agents/openai/openai_responses_client_with_local_mcp.py +++ b/python/samples/getting_started/agents/openai/openai_responses_client_with_local_mcp.py @@ -23,7 +23,7 @@ async def streaming_with_mcp(show_raw_stream: bool = False) -> None: # Tools are provided when creating the agent # The agent can use these tools for any query during its lifetime async with Agent( - chat_client=OpenAIResponsesClient(), + client=OpenAIResponsesClient(), name="DocsAgent", instructions="You are a helpful assistant that can help with microsoft documentation questions.", tools=MCPStreamableHTTPTool( # Tools defined at agent creation @@ -61,7 +61,7 @@ async def run_with_mcp() -> None: # Tools are provided when creating the agent # The agent can use these tools for any query during its lifetime async with Agent( - chat_client=OpenAIResponsesClient(), + client=OpenAIResponsesClient(), name="DocsAgent", instructions="You are a helpful assistant that can help with microsoft documentation questions.", tools=MCPStreamableHTTPTool( # Tools defined at agent creation diff --git a/python/samples/getting_started/agents/openai/openai_responses_client_with_thread.py b/python/samples/getting_started/agents/openai/openai_responses_client_with_thread.py index ce158f9e9a..d9ce68dc42 100644 --- a/python/samples/getting_started/agents/openai/openai_responses_client_with_thread.py +++ b/python/samples/getting_started/agents/openai/openai_responses_client_with_thread.py @@ -31,7 +31,7 @@ async def example_with_automatic_thread_creation() -> None: print("=== Automatic Thread Creation Example ===") agent = Agent( - chat_client=OpenAIResponsesClient(), + client=OpenAIResponsesClient(), instructions="You are a helpful weather agent.", tools=get_weather, ) @@ -58,7 +58,7 @@ async def example_with_thread_persistence_in_memory() -> None: print("=== Thread Persistence Example (In-Memory) ===") agent = Agent( - chat_client=OpenAIResponsesClient(), + client=OpenAIResponsesClient(), instructions="You are a helpful weather agent.", tools=get_weather, ) @@ -97,7 +97,7 @@ async def example_with_existing_thread_id() -> None: existing_thread_id = None agent = Agent( - chat_client=OpenAIResponsesClient(), + client=OpenAIResponsesClient(), instructions="You are a helpful weather agent.", tools=get_weather, ) @@ -118,7 +118,7 @@ async def example_with_existing_thread_id() -> None: print("\n--- Continuing with the same thread ID in a new agent instance ---") agent = Agent( - chat_client=OpenAIResponsesClient(), + client=OpenAIResponsesClient(), instructions="You are a helpful weather agent.", tools=get_weather, ) diff --git a/python/samples/getting_started/agents/openai/openai_responses_client_with_web_search.py b/python/samples/getting_started/agents/openai/openai_responses_client_with_web_search.py index 1cc27737b0..d35ceb5e60 100644 --- a/python/samples/getting_started/agents/openai/openai_responses_client_with_web_search.py +++ b/python/samples/getting_started/agents/openai/openai_responses_client_with_web_search.py @@ -23,7 +23,7 @@ async def main() -> None: } agent = Agent( - chat_client=OpenAIResponsesClient(), + client=OpenAIResponsesClient(), instructions="You are a helpful assistant that can search the web for current information.", tools=[HostedWebSearchTool(additional_properties=additional_properties)], ) diff --git a/python/samples/getting_started/azure_functions/02_multi_agent/function_app.py b/python/samples/getting_started/azure_functions/02_multi_agent/function_app.py index 6a3f396bcb..15e034dd22 100644 --- a/python/samples/getting_started/azure_functions/02_multi_agent/function_app.py +++ b/python/samples/getting_started/azure_functions/02_multi_agent/function_app.py @@ -56,15 +56,15 @@ def calculate_tip(bill_amount: float, tip_percentage: float = 15.0) -> dict[str, # 1. Create multiple agents, each with its own instruction set and tools. -chat_client = AzureOpenAIChatClient(credential=AzureCliCredential()) +client = AzureOpenAIChatClient(credential=AzureCliCredential()) -weather_agent = chat_client.as_agent( +weather_agent = client.as_agent( name="WeatherAgent", instructions="You are a helpful weather assistant. Provide current weather information.", tools=[get_weather], ) -math_agent = chat_client.as_agent( +math_agent = client.as_agent( name="MathAgent", instructions="You are a helpful math assistant. Help users with calculations like tip calculations.", tools=[calculate_tip], diff --git a/python/samples/getting_started/azure_functions/05_multi_agent_orchestration_concurrency/function_app.py b/python/samples/getting_started/azure_functions/05_multi_agent_orchestration_concurrency/function_app.py index aad945288c..0be448295d 100644 --- a/python/samples/getting_started/azure_functions/05_multi_agent_orchestration_concurrency/function_app.py +++ b/python/samples/getting_started/azure_functions/05_multi_agent_orchestration_concurrency/function_app.py @@ -30,14 +30,14 @@ # 2. Instantiate both agents that the orchestration will run concurrently. def _create_agents() -> list[Any]: - chat_client = AzureOpenAIChatClient(credential=AzureCliCredential()) + client = AzureOpenAIChatClient(credential=AzureCliCredential()) - physicist = chat_client.as_agent( + physicist = client.as_agent( name=PHYSICIST_AGENT_NAME, instructions="You are an expert in physics. You answer questions from a physics perspective.", ) - chemist = chat_client.as_agent( + chemist = client.as_agent( name=CHEMIST_AGENT_NAME, instructions="You are an expert in chemistry. You answer questions from a chemistry perspective.", ) diff --git a/python/samples/getting_started/azure_functions/06_multi_agent_orchestration_conditionals/function_app.py b/python/samples/getting_started/azure_functions/06_multi_agent_orchestration_conditionals/function_app.py index 54728332f0..0dbfeefd5c 100644 --- a/python/samples/getting_started/azure_functions/06_multi_agent_orchestration_conditionals/function_app.py +++ b/python/samples/getting_started/azure_functions/06_multi_agent_orchestration_conditionals/function_app.py @@ -45,14 +45,14 @@ class EmailPayload(BaseModel): # 2. Instantiate both agents so they can be registered with AgentFunctionApp. def _create_agents() -> list[Any]: - chat_client = AzureOpenAIChatClient(credential=AzureCliCredential()) + client = AzureOpenAIChatClient(credential=AzureCliCredential()) - spam_agent = chat_client.as_agent( + spam_agent = client.as_agent( name=SPAM_AGENT_NAME, instructions="You are a spam detection assistant that identifies spam emails.", ) - email_agent = chat_client.as_agent( + email_agent = client.as_agent( name=EMAIL_AGENT_NAME, instructions="You are an email assistant that helps users draft responses to emails with professionalism.", ) diff --git a/python/samples/getting_started/azure_functions/08_mcp_server/README.md b/python/samples/getting_started/azure_functions/08_mcp_server/README.md index 02fcbbb957..a475823a1a 100644 --- a/python/samples/getting_started/azure_functions/08_mcp_server/README.md +++ b/python/samples/getting_started/azure_functions/08_mcp_server/README.md @@ -142,20 +142,20 @@ The sample shows how to enable MCP tool triggers with flexible agent configurati from agent_framework.azure import AgentFunctionApp, AzureOpenAIChatClient # Create Azure OpenAI Chat Client -chat_client = AzureOpenAIChatClient() +client = AzureOpenAIChatClient() # Define agents with different roles -joker_agent = chat_client.as_agent( +joker_agent = client.as_agent( name="Joker", instructions="You are good at telling jokes.", ) -stock_agent = chat_client.as_agent( +stock_agent = client.as_agent( name="StockAdvisor", instructions="Check stock prices.", ) -plant_agent = chat_client.as_agent( +plant_agent = client.as_agent( name="PlantAdvisor", instructions="Recommend plants.", description="Get plant recommendations.", diff --git a/python/samples/getting_started/azure_functions/08_mcp_server/function_app.py b/python/samples/getting_started/azure_functions/08_mcp_server/function_app.py index 2d67ddec81..b34361d10e 100644 --- a/python/samples/getting_started/azure_functions/08_mcp_server/function_app.py +++ b/python/samples/getting_started/azure_functions/08_mcp_server/function_app.py @@ -28,23 +28,23 @@ # Create Azure OpenAI Chat Client # This uses AzureCliCredential for authentication (requires 'az login') -chat_client = AzureOpenAIChatClient() +client = AzureOpenAIChatClient() # Define three AI agents with different roles # Agent 1: Joker - HTTP trigger only (default) -agent1 = chat_client.as_agent( +agent1 = client.as_agent( name="Joker", instructions="You are good at telling jokes.", ) # Agent 2: StockAdvisor - MCP tool trigger only -agent2 = chat_client.as_agent( +agent2 = client.as_agent( name="StockAdvisor", instructions="Check stock prices.", ) # Agent 3: PlantAdvisor - Both HTTP and MCP tool triggers -agent3 = chat_client.as_agent( +agent3 = client.as_agent( name="PlantAdvisor", instructions="Recommend plants.", description="Get plant recommendations.", diff --git a/python/samples/getting_started/chat_client/chat_response_cancellation.py b/python/samples/getting_started/chat_client/chat_response_cancellation.py index 6ed214808d..3435363512 100644 --- a/python/samples/getting_started/chat_client/chat_response_cancellation.py +++ b/python/samples/getting_started/chat_client/chat_response_cancellation.py @@ -21,10 +21,10 @@ async def main() -> None: - OpenAI model ID: Use "model_id" parameter or "OPENAI_CHAT_MODEL_ID" environment variable - OpenAI API key: Use "api_key" parameter or "OPENAI_API_KEY" environment variable """ - chat_client = OpenAIChatClient() + client = OpenAIChatClient() try: - task = asyncio.create_task(chat_client.get_response(messages=["Tell me a fantasy story."])) + task = asyncio.create_task(client.get_response(messages=["Tell me a fantasy story."])) await asyncio.sleep(1) task.cancel() await task diff --git a/python/samples/getting_started/context_providers/aggregate_context_provider.py b/python/samples/getting_started/context_providers/aggregate_context_provider.py index 2d2be413c5..3278577c92 100644 --- a/python/samples/getting_started/context_providers/aggregate_context_provider.py +++ b/python/samples/getting_started/context_providers/aggregate_context_provider.py @@ -58,7 +58,7 @@ class AggregateContextProvider(ContextProvider): aggregate = AggregateContextProvider([provider1, provider2, provider3]) # Pass the aggregate to the agent - agent = Agent(chat_client=client, name="assistant", context_provider=aggregate) + agent = Agent(client=client, name="assistant", context_provider=aggregate) # You can also add more providers later provider4 = CustomContextProvider4() @@ -230,7 +230,7 @@ async def invoked( async def main(): """Demonstrate using AggregateContextProvider to combine multiple providers.""" async with AzureCliCredential() as credential: - chat_client = AzureAIClient(credential=credential) + client = AzureAIClient(credential=credential) # Create individual context providers time_provider = TimeContextProvider() @@ -246,7 +246,7 @@ async def main(): # Create the agent with the aggregate provider async with Agent( - chat_client=chat_client, + client=client, instructions="You are a helpful assistant.", context_provider=aggregate_provider, ) as agent: diff --git a/python/samples/getting_started/context_providers/azure_ai_search/README.md b/python/samples/getting_started/context_providers/azure_ai_search/README.md index 71c48f2732..ecb00f68b4 100644 --- a/python/samples/getting_started/context_providers/azure_ai_search/README.md +++ b/python/samples/getting_started/context_providers/azure_ai_search/README.md @@ -142,7 +142,7 @@ search_provider = AzureAISearchContextProvider( # Create agent with search context async with AzureAIAgentClient(credential=DefaultAzureCredential()) as client: async with Agent( - chat_client=client, + client=client, model=model_deployment, context_provider=search_provider, ) as agent: @@ -167,7 +167,7 @@ search_provider = AzureAISearchContextProvider( # Use with agent (same as semantic mode) async with Agent( - chat_client=client, + client=client, model=model_deployment, context_provider=search_provider, ) as agent: diff --git a/python/samples/getting_started/context_providers/azure_ai_search/azure_ai_with_search_context_agentic.py b/python/samples/getting_started/context_providers/azure_ai_search/azure_ai_with_search_context_agentic.py index 4574587330..7b68265885 100644 --- a/python/samples/getting_started/context_providers/azure_ai_search/azure_ai_with_search_context_agentic.py +++ b/python/samples/getting_started/context_providers/azure_ai_search/azure_ai_with_search_context_agentic.py @@ -113,7 +113,7 @@ async def main() -> None: credential=AzureCliCredential(), ) as client, Agent( - chat_client=client, + client=client, name="SearchAgent", instructions=( "You are a helpful assistant with advanced reasoning capabilities. " diff --git a/python/samples/getting_started/context_providers/azure_ai_search/azure_ai_with_search_context_semantic.py b/python/samples/getting_started/context_providers/azure_ai_search/azure_ai_with_search_context_semantic.py index a44f3ba149..04e26e535e 100644 --- a/python/samples/getting_started/context_providers/azure_ai_search/azure_ai_with_search_context_semantic.py +++ b/python/samples/getting_started/context_providers/azure_ai_search/azure_ai_with_search_context_semantic.py @@ -70,7 +70,7 @@ async def main() -> None: credential=AzureCliCredential(), ) as client, Agent( - chat_client=client, + client=client, name="SearchAgent", instructions=( "You are a helpful assistant. Use the provided context from the " diff --git a/python/samples/getting_started/context_providers/simple_context_provider.py b/python/samples/getting_started/context_providers/simple_context_provider.py index fdb35c48c2..15892e286d 100644 --- a/python/samples/getting_started/context_providers/simple_context_provider.py +++ b/python/samples/getting_started/context_providers/simple_context_provider.py @@ -16,13 +16,13 @@ class UserInfo(BaseModel): class UserInfoMemory(ContextProvider): - def __init__(self, chat_client: SupportsChatGetResponse, user_info: UserInfo | None = None, **kwargs: Any): + def __init__(self, client: SupportsChatGetResponse, user_info: UserInfo | None = None, **kwargs: Any): """Create the memory. If you pass in kwargs, they will be attempted to be used to create a UserInfo object. """ - self._chat_client = chat_client + self._chat_client = client if user_info: self.user_info = user_info elif kwargs: @@ -92,14 +92,14 @@ def serialize(self) -> str: async def main(): async with AzureCliCredential() as credential: - chat_client = AzureAIClient(credential=credential) + client = AzureAIClient(credential=credential) # Create the memory provider - memory_provider = UserInfoMemory(chat_client) + memory_provider = UserInfoMemory(client) # Create the agent with memory async with Agent( - chat_client=chat_client, + client=client, instructions="You are a friendly assistant. Always address the user by their name.", context_provider=memory_provider, ) as agent: diff --git a/python/samples/getting_started/declarative/get_weather_agent.py b/python/samples/getting_started/declarative/get_weather_agent.py index 4e54af2461..af44382c00 100644 --- a/python/samples/getting_started/declarative/get_weather_agent.py +++ b/python/samples/getting_started/declarative/get_weather_agent.py @@ -26,7 +26,7 @@ async def main(): # create the AgentFactory with a chat client and bindings agent_factory = AgentFactory( - chat_client=AzureOpenAIResponsesClient(credential=AzureCliCredential()), + client=AzureOpenAIResponsesClient(credential=AzureCliCredential()), bindings={"get_weather": get_weather}, ) # create the agent from the yaml diff --git a/python/samples/getting_started/devui/README.md b/python/samples/getting_started/devui/README.md index 2a70f16cd9..5c16e1de71 100644 --- a/python/samples/getting_started/devui/README.md +++ b/python/samples/getting_started/devui/README.md @@ -106,7 +106,7 @@ from agent_framework.openai import OpenAIChatClient agent = Agent( name="MyAgent", description="My custom agent", - chat_client=OpenAIChatClient(), + client=OpenAIChatClient(), # ... your configuration ) ``` diff --git a/python/samples/getting_started/devui/azure_responses_agent/agent.py b/python/samples/getting_started/devui/azure_responses_agent/agent.py index 3405fce5c0..bf167f55c2 100644 --- a/python/samples/getting_started/devui/azure_responses_agent/agent.py +++ b/python/samples/getting_started/devui/azure_responses_agent/agent.py @@ -85,7 +85,7 @@ def extract_key_points( For PDFs, you can read and understand the text, tables, and structure. For images, you can describe what you see and extract any text. """, - chat_client=AzureOpenAIResponsesClient( + client=AzureOpenAIResponsesClient( deployment_name=_deployment_name, endpoint=_endpoint, api_version="2025-03-01-preview", # Required for Responses API diff --git a/python/samples/getting_started/devui/foundry_agent/agent.py b/python/samples/getting_started/devui/foundry_agent/agent.py index 002eb1af02..01a033689b 100644 --- a/python/samples/getting_started/devui/foundry_agent/agent.py +++ b/python/samples/getting_started/devui/foundry_agent/agent.py @@ -45,7 +45,7 @@ def get_forecast( # Agent instance following Agent Framework conventions agent = Agent( name="FoundryWeatherAgent", - chat_client=AzureAIAgentClient( + client=AzureAIAgentClient( project_endpoint=os.environ.get("AZURE_AI_PROJECT_ENDPOINT"), model_deployment_name=os.environ.get("FOUNDRY_MODEL_DEPLOYMENT_NAME"), credential=AzureCliCredential(), diff --git a/python/samples/getting_started/devui/in_memory_mode.py b/python/samples/getting_started/devui/in_memory_mode.py index 0fce3976c5..5d32861740 100644 --- a/python/samples/getting_started/devui/in_memory_mode.py +++ b/python/samples/getting_started/devui/in_memory_mode.py @@ -68,7 +68,7 @@ def main(): logger = logging.getLogger(__name__) # Create Azure OpenAI chat client - chat_client = AzureOpenAIChatClient( + client = AzureOpenAIChatClient( api_key=os.environ.get("AZURE_OPENAI_API_KEY"), azure_endpoint=os.environ.get("AZURE_OPENAI_ENDPOINT"), api_version=os.environ.get("AZURE_OPENAI_API_VERSION", "2024-10-21"), @@ -83,7 +83,7 @@ def main(): "You are a helpful weather and time assistant. Use the available tools to " "provide accurate weather information and current time for any location." ), - chat_client=chat_client, + client=client, tools=[get_weather, get_time], ) @@ -91,7 +91,7 @@ def main(): name="general-assistant", description="A simple conversational agent", instructions="You are a helpful assistant.", - chat_client=chat_client, + client=client, ) # Create a basic workflow: Input -> UpperCase -> AddExclamation -> Output diff --git a/python/samples/getting_started/devui/weather_agent_azure/agent.py b/python/samples/getting_started/devui/weather_agent_azure/agent.py index 94b787c7e8..65fa8d0b61 100644 --- a/python/samples/getting_started/devui/weather_agent_azure/agent.py +++ b/python/samples/getting_started/devui/weather_agent_azure/agent.py @@ -145,7 +145,7 @@ def send_email( and forecasts for any location. Always be helpful and provide detailed weather information when asked. """, - chat_client=AzureOpenAIChatClient( + client=AzureOpenAIChatClient( api_key=os.environ.get("AZURE_OPENAI_API_KEY", ""), ), tools=[get_weather, get_forecast, send_email], diff --git a/python/samples/getting_started/devui/workflow_agents/workflow.py b/python/samples/getting_started/devui/workflow_agents/workflow.py index 288c9d5279..4331650bf1 100644 --- a/python/samples/getting_started/devui/workflow_agents/workflow.py +++ b/python/samples/getting_started/devui/workflow_agents/workflow.py @@ -59,10 +59,10 @@ def is_approved(message: Any) -> bool: # Create Azure OpenAI chat client -chat_client = AzureOpenAIChatClient(api_key=os.environ.get("AZURE_OPENAI_API_KEY", "")) +client = AzureOpenAIChatClient(api_key=os.environ.get("AZURE_OPENAI_API_KEY", "")) # Create Writer agent - generates content -writer = chat_client.as_agent( +writer = client.as_agent( name="Writer", instructions=( "You are an excellent content writer. " @@ -72,7 +72,7 @@ def is_approved(message: Any) -> bool: ) # Create Reviewer agent - evaluates and provides structured feedback -reviewer = chat_client.as_agent( +reviewer = client.as_agent( name="Reviewer", instructions=( "You are an expert content reviewer. " @@ -90,7 +90,7 @@ def is_approved(message: Any) -> bool: ) # Create Editor agent - improves content based on feedback -editor = chat_client.as_agent( +editor = client.as_agent( name="Editor", instructions=( "You are a skilled editor. " @@ -101,7 +101,7 @@ def is_approved(message: Any) -> bool: ) # Create Publisher agent - formats content for publication -publisher = chat_client.as_agent( +publisher = client.as_agent( name="Publisher", instructions=( "You are a publishing agent. " @@ -111,7 +111,7 @@ def is_approved(message: Any) -> bool: ) # Create Summarizer agent - creates final publication report -summarizer = chat_client.as_agent( +summarizer = client.as_agent( name="Summarizer", instructions=( "You are a summarizer agent. " diff --git a/python/samples/getting_started/mcp/mcp_api_key_auth.py b/python/samples/getting_started/mcp/mcp_api_key_auth.py index d049e2a6cb..5790580116 100644 --- a/python/samples/getting_started/mcp/mcp_api_key_auth.py +++ b/python/samples/getting_started/mcp/mcp_api_key_auth.py @@ -44,7 +44,7 @@ async def api_key_auth_example() -> None: http_client=http_client, # Pass HTTP client with authentication headers ) as mcp_tool, Agent( - chat_client=OpenAIResponsesClient(), + client=OpenAIResponsesClient(), name="Agent", instructions="You are a helpful assistant.", tools=mcp_tool, diff --git a/python/samples/getting_started/mcp/mcp_github_pat.py b/python/samples/getting_started/mcp/mcp_github_pat.py index 84fef2f032..f814946283 100644 --- a/python/samples/getting_started/mcp/mcp_github_pat.py +++ b/python/samples/getting_started/mcp/mcp_github_pat.py @@ -55,7 +55,7 @@ async def github_mcp_example() -> None: # 5. Create agent with the GitHub MCP tool async with Agent( - chat_client=OpenAIResponsesClient(), + client=OpenAIResponsesClient(), name="GitHubAgent", instructions=( "You are a helpful assistant that can help users interact with GitHub. " diff --git a/python/samples/getting_started/observability/agent_observability.py b/python/samples/getting_started/observability/agent_observability.py index bd852b8107..606b633a1c 100644 --- a/python/samples/getting_started/observability/agent_observability.py +++ b/python/samples/getting_started/observability/agent_observability.py @@ -40,7 +40,7 @@ async def main(): print(f"Trace ID: {format_trace_id(current_span.get_span_context().trace_id)}") agent = Agent( - chat_client=OpenAIChatClient(), + client=OpenAIChatClient(), tools=get_weather, name="WeatherAgent", instructions="You are a weather assistant.", diff --git a/python/samples/getting_started/observability/agent_with_foundry_tracing.py b/python/samples/getting_started/observability/agent_with_foundry_tracing.py index 1892fb85e5..2b67ba9ea6 100644 --- a/python/samples/getting_started/observability/agent_with_foundry_tracing.py +++ b/python/samples/getting_started/observability/agent_with_foundry_tracing.py @@ -86,7 +86,7 @@ async def main(): print(f"Trace ID: {format_trace_id(current_span.get_span_context().trace_id)}") agent = Agent( - chat_client=OpenAIResponsesClient(), + client=OpenAIResponsesClient(), tools=get_weather, name="WeatherAgent", instructions="You are a weather assistant.", diff --git a/python/samples/getting_started/observability/azure_ai_agent_observability.py b/python/samples/getting_started/observability/azure_ai_agent_observability.py index fedd85d455..e7036cd9e4 100644 --- a/python/samples/getting_started/observability/azure_ai_agent_observability.py +++ b/python/samples/getting_started/observability/azure_ai_agent_observability.py @@ -57,7 +57,7 @@ async def main(): print(f"Trace ID: {format_trace_id(current_span.get_span_context().trace_id)}") agent = Agent( - chat_client=client, + client=client, tools=get_weather, name="WeatherAgent", instructions="You are a weather assistant.", diff --git a/python/samples/getting_started/observability/configure_otel_providers_with_env_var.py b/python/samples/getting_started/observability/configure_otel_providers_with_env_var.py index b21bd5bb67..379f5c95f6 100644 --- a/python/samples/getting_started/observability/configure_otel_providers_with_env_var.py +++ b/python/samples/getting_started/observability/configure_otel_providers_with_env_var.py @@ -28,7 +28,7 @@ """ # Define the scenarios that can be run to show the telemetry data collected by the SDK -SCENARIOS = ["chat_client", "chat_client_stream", "tool", "all"] +SCENARIOS = ["client", "client_stream", "tool", "all"] # NOTE: approval_mode="never_require" is for sample brevity. Use "always_require" in production; see samples/getting_started/tools/function_tool_with_approval.py and samples/getting_started/tools/function_tool_with_approval_and_threads.py. @@ -97,7 +97,7 @@ async def run_tool() -> None: print(f"Weather in Amsterdam:\n{weather}") -async def main(scenario: Literal["chat_client", "chat_client_stream", "tool", "all"] = "all"): +async def main(scenario: Literal["client", "client_stream", "tool", "all"] = "all"): """Run the selected scenario(s).""" # This will enable tracing and create the necessary tracing, logging and metrics providers @@ -113,10 +113,10 @@ async def main(scenario: Literal["chat_client", "chat_client_stream", "tool", "a if scenario == "tool" or scenario == "all": with suppress(Exception): await run_tool() - if scenario == "chat_client_stream" or scenario == "all": + if scenario == "client_stream" or scenario == "all": with suppress(Exception): await run_chat_client(client, stream=True) - if scenario == "chat_client" or scenario == "all": + if scenario == "client" or scenario == "all": with suppress(Exception): await run_chat_client(client, stream=False) diff --git a/python/samples/getting_started/observability/configure_otel_providers_with_parameters.py b/python/samples/getting_started/observability/configure_otel_providers_with_parameters.py index 252e836b82..f04bd2cd22 100644 --- a/python/samples/getting_started/observability/configure_otel_providers_with_parameters.py +++ b/python/samples/getting_started/observability/configure_otel_providers_with_parameters.py @@ -28,7 +28,7 @@ """ # Define the scenarios that can be run to show the telemetry data collected by the SDK -SCENARIOS = ["chat_client", "chat_client_stream", "tool", "all"] +SCENARIOS = ["client", "client_stream", "tool", "all"] # NOTE: approval_mode="never_require" is for sample brevity. Use "always_require" in production; see samples/getting_started/tools/function_tool_with_approval.py and samples/getting_started/tools/function_tool_with_approval_and_threads.py. @@ -97,7 +97,7 @@ async def run_tool() -> None: print(f"Weather in Amsterdam:\n{weather}") -async def main(scenario: Literal["chat_client", "chat_client_stream", "tool", "all"] = "all"): +async def main(scenario: Literal["client", "client_stream", "tool", "all"] = "all"): """Run the selected scenario(s).""" # Setup the logging with the more complete format @@ -148,10 +148,10 @@ async def main(scenario: Literal["chat_client", "chat_client_stream", "tool", "a if scenario == "tool" or scenario == "all": with suppress(Exception): await run_tool() - if scenario == "chat_client_stream" or scenario == "all": + if scenario == "client_stream" or scenario == "all": with suppress(Exception): await run_chat_client(client, stream=True) - if scenario == "chat_client" or scenario == "all": + if scenario == "client" or scenario == "all": with suppress(Exception): await run_chat_client(client, stream=False) diff --git a/python/samples/getting_started/orchestrations/README.md b/python/samples/getting_started/orchestrations/README.md index 0965ff2178..8ecf961d16 100644 --- a/python/samples/getting_started/orchestrations/README.md +++ b/python/samples/getting_started/orchestrations/README.md @@ -63,7 +63,7 @@ These may appear in event streams (executor_invoked/executor_completed). They're ## Environment Variables -- **AzureOpenAIChatClient**: Set Azure OpenAI environment variables as documented [here](https://github.com/microsoft/agent-framework/blob/main/python/samples/getting_started/chat_client/README.md#environment-variables). +- **AzureOpenAIChatClient**: Set Azure OpenAI environment variables as documented [here](https://github.com/microsoft/agent-framework/blob/main/python/samples/getting_started/client/README.md#environment-variables). - **OpenAI** (used in some orchestration samples): - [OpenAIChatClient env vars](https://github.com/microsoft/agent-framework/blob/main/python/samples/getting_started/agents/openai_chat_client/README.md) diff --git a/python/samples/getting_started/orchestrations/concurrent_agents.py b/python/samples/getting_started/orchestrations/concurrent_agents.py index 19ff4ebed4..2d216a131b 100644 --- a/python/samples/getting_started/orchestrations/concurrent_agents.py +++ b/python/samples/getting_started/orchestrations/concurrent_agents.py @@ -29,9 +29,9 @@ async def main() -> None: # 1) Create three domain agents using AzureOpenAIChatClient - chat_client = AzureOpenAIChatClient(credential=AzureCliCredential()) + client = AzureOpenAIChatClient(credential=AzureCliCredential()) - researcher = chat_client.as_agent( + researcher = client.as_agent( instructions=( "You're an expert market and product researcher. Given a prompt, provide concise, factual insights," " opportunities, and risks." @@ -39,7 +39,7 @@ async def main() -> None: name="researcher", ) - marketer = chat_client.as_agent( + marketer = client.as_agent( instructions=( "You're a creative marketing strategist. Craft compelling value propositions and target messaging" " aligned to the prompt." @@ -47,7 +47,7 @@ async def main() -> None: name="marketer", ) - legal = chat_client.as_agent( + legal = client.as_agent( instructions=( "You're a cautious legal/compliance reviewer. Highlight constraints, disclaimers, and policy concerns" " based on the prompt." diff --git a/python/samples/getting_started/orchestrations/concurrent_custom_agent_executors.py b/python/samples/getting_started/orchestrations/concurrent_custom_agent_executors.py index 8682f94117..008fa4755f 100644 --- a/python/samples/getting_started/orchestrations/concurrent_custom_agent_executors.py +++ b/python/samples/getting_started/orchestrations/concurrent_custom_agent_executors.py @@ -39,8 +39,8 @@ class ResearcherExec(Executor): agent: Agent - def __init__(self, chat_client: AzureOpenAIChatClient, id: str = "researcher"): - self.agent = chat_client.as_agent( + def __init__(self, client: AzureOpenAIChatClient, id: str = "researcher"): + self.agent = client.as_agent( instructions=( "You're an expert market and product researcher. Given a prompt, provide concise, factual insights," " opportunities, and risks." @@ -59,8 +59,8 @@ async def run(self, request: AgentExecutorRequest, ctx: WorkflowContext[AgentExe class MarketerExec(Executor): agent: Agent - def __init__(self, chat_client: AzureOpenAIChatClient, id: str = "marketer"): - self.agent = chat_client.as_agent( + def __init__(self, client: AzureOpenAIChatClient, id: str = "marketer"): + self.agent = client.as_agent( instructions=( "You're a creative marketing strategist. Craft compelling value propositions and target messaging" " aligned to the prompt." @@ -79,8 +79,8 @@ async def run(self, request: AgentExecutorRequest, ctx: WorkflowContext[AgentExe class LegalExec(Executor): agent: Agent - def __init__(self, chat_client: AzureOpenAIChatClient, id: str = "legal"): - self.agent = chat_client.as_agent( + def __init__(self, client: AzureOpenAIChatClient, id: str = "legal"): + self.agent = client.as_agent( instructions=( "You're a cautious legal/compliance reviewer. Highlight constraints, disclaimers, and policy concerns" " based on the prompt." @@ -97,11 +97,11 @@ async def run(self, request: AgentExecutorRequest, ctx: WorkflowContext[AgentExe async def main() -> None: - chat_client = AzureOpenAIChatClient(credential=AzureCliCredential()) + client = AzureOpenAIChatClient(credential=AzureCliCredential()) - researcher = ResearcherExec(chat_client) - marketer = MarketerExec(chat_client) - legal = LegalExec(chat_client) + researcher = ResearcherExec(client) + marketer = MarketerExec(client) + legal = LegalExec(client) workflow = ConcurrentBuilder().participants([researcher, marketer, legal]).build() diff --git a/python/samples/getting_started/orchestrations/concurrent_custom_aggregator.py b/python/samples/getting_started/orchestrations/concurrent_custom_aggregator.py index b11bcc7c29..17b1496e0b 100644 --- a/python/samples/getting_started/orchestrations/concurrent_custom_aggregator.py +++ b/python/samples/getting_started/orchestrations/concurrent_custom_aggregator.py @@ -20,7 +20,7 @@ Demonstrates: - ConcurrentBuilder(participants=[...]).with_aggregator(callback) - Fan-out to agents and fan-in at an aggregator -- Aggregation implemented via an LLM call (chat_client.get_response) +- Aggregation implemented via an LLM call (client.get_response) - Workflow output yielded with the synthesized summary string Prerequisites: @@ -29,23 +29,23 @@ async def main() -> None: - chat_client = AzureOpenAIChatClient(credential=AzureCliCredential()) + client = AzureOpenAIChatClient(credential=AzureCliCredential()) - researcher = chat_client.as_agent( + researcher = client.as_agent( instructions=( "You're an expert market and product researcher. Given a prompt, provide concise, factual insights," " opportunities, and risks." ), name="researcher", ) - marketer = chat_client.as_agent( + marketer = client.as_agent( instructions=( "You're a creative marketing strategist. Craft compelling value propositions and target messaging" " aligned to the prompt." ), name="marketer", ) - legal = chat_client.as_agent( + legal = client.as_agent( instructions=( "You're a cautious legal/compliance reviewer. Highlight constraints, disclaimers, and policy concerns" " based on the prompt." @@ -75,7 +75,7 @@ async def summarize_results(results: list[Any]) -> str: ) user_msg = Message("user", text="\n\n".join(expert_sections)) - response = await chat_client.get_response([system_msg, user_msg]) + response = await client.get_response([system_msg, user_msg]) # Return the model's final assistant text as the completion result return response.messages[-1].text if response.messages else "" diff --git a/python/samples/getting_started/orchestrations/group_chat_agent_manager.py b/python/samples/getting_started/orchestrations/group_chat_agent_manager.py index 07b0bb3f54..5338a77714 100644 --- a/python/samples/getting_started/orchestrations/group_chat_agent_manager.py +++ b/python/samples/getting_started/orchestrations/group_chat_agent_manager.py @@ -36,7 +36,7 @@ async def main() -> None: # Create a chat client using Azure OpenAI and Azure CLI credentials for all agents - chat_client = AzureOpenAIChatClient(credential=AzureCliCredential()) + client = AzureOpenAIChatClient(credential=AzureCliCredential()) # Orchestrator agent that manages the conversation # Note: This agent (and the underlying chat client) must support structured outputs. @@ -46,7 +46,7 @@ async def main() -> None: name="Orchestrator", description="Coordinates multi-agent collaboration by selecting speakers", instructions=ORCHESTRATOR_AGENT_INSTRUCTIONS, - chat_client=chat_client, + client=client, ) # Participant agents @@ -54,14 +54,14 @@ async def main() -> None: name="Researcher", description="Collects relevant background information", instructions="Gather concise facts that help a teammate answer the question.", - chat_client=chat_client, + client=client, ) writer = Agent( name="Writer", description="Synthesizes polished answers from gathered information", instructions="Compose clear and structured answers using any notes provided.", - chat_client=chat_client, + client=client, ) # Build the group chat workflow diff --git a/python/samples/getting_started/orchestrations/group_chat_philosophical_debate.py b/python/samples/getting_started/orchestrations/group_chat_philosophical_debate.py index e26e9ad8cf..d4dc65e735 100644 --- a/python/samples/getting_started/orchestrations/group_chat_philosophical_debate.py +++ b/python/samples/getting_started/orchestrations/group_chat_philosophical_debate.py @@ -75,7 +75,7 @@ async def main() -> None: In your final_message, provide a brief synthesis highlighting key themes that emerged. """, - chat_client=_get_chat_client(), + client=_get_chat_client(), ) farmer = Agent( @@ -91,7 +91,7 @@ async def main() -> None: - Use concrete examples from your experience - Keep responses thoughtful but concise (2-4 sentences) """, - chat_client=_get_chat_client(), + client=_get_chat_client(), ) developer = Agent( @@ -107,7 +107,7 @@ async def main() -> None: - Use concrete examples from your experience - Keep responses thoughtful but concise (2-4 sentences) """, - chat_client=_get_chat_client(), + client=_get_chat_client(), ) teacher = Agent( @@ -124,7 +124,7 @@ async def main() -> None: - Use concrete examples from history or your teaching experience - Keep responses thoughtful but concise (2-4 sentences) """, - chat_client=_get_chat_client(), + client=_get_chat_client(), ) activist = Agent( @@ -140,7 +140,7 @@ async def main() -> None: - Use concrete examples from your activism - Keep responses thoughtful but concise (2-4 sentences) """, - chat_client=_get_chat_client(), + client=_get_chat_client(), ) spiritual_leader = Agent( @@ -156,7 +156,7 @@ async def main() -> None: - Use examples from spiritual teachings or community work - Keep responses thoughtful but concise (2-4 sentences) """, - chat_client=_get_chat_client(), + client=_get_chat_client(), ) artist = Agent( @@ -172,7 +172,7 @@ async def main() -> None: - Use examples from your art or cultural traditions - Keep responses thoughtful but concise (2-4 sentences) """, - chat_client=_get_chat_client(), + client=_get_chat_client(), ) immigrant = Agent( @@ -188,7 +188,7 @@ async def main() -> None: - Use examples from your immigrant and entrepreneurial journey - Keep responses thoughtful but concise (2-4 sentences) """, - chat_client=_get_chat_client(), + client=_get_chat_client(), ) doctor = Agent( @@ -204,7 +204,7 @@ async def main() -> None: - Use examples from healthcare and societal systems - Keep responses thoughtful but concise (2-4 sentences) """, - chat_client=_get_chat_client(), + client=_get_chat_client(), ) # termination_condition: stop after 10 assistant messages diff --git a/python/samples/getting_started/orchestrations/group_chat_simple_selector.py b/python/samples/getting_started/orchestrations/group_chat_simple_selector.py index b9f22237df..270333c8d5 100644 --- a/python/samples/getting_started/orchestrations/group_chat_simple_selector.py +++ b/python/samples/getting_started/orchestrations/group_chat_simple_selector.py @@ -33,7 +33,7 @@ def round_robin_selector(state: GroupChatState) -> str: async def main() -> None: # Create a chat client using Azure OpenAI and Azure CLI credentials for all agents - chat_client = AzureOpenAIChatClient(credential=AzureCliCredential()) + client = AzureOpenAIChatClient(credential=AzureCliCredential()) # Participant agents expert = Agent( @@ -43,7 +43,7 @@ async def main() -> None: "Your job is to answer Python related questions and refine your answer " "based on feedback from all the other participants." ), - chat_client=chat_client, + client=client, ) verifier = Agent( @@ -54,7 +54,7 @@ async def main() -> None: "out statements that are technically true but practically dangerous." "If there is nothing woth pointing out, respond with 'The answer looks good to me.'" ), - chat_client=chat_client, + client=client, ) clarifier = Agent( @@ -65,7 +65,7 @@ async def main() -> None: "out jargons or complex terms that may be difficult for a beginner to understand." "If there is nothing worth pointing out, respond with 'The answer looks clear to me.'" ), - chat_client=chat_client, + client=client, ) skeptic = Agent( @@ -76,7 +76,7 @@ async def main() -> None: "out caveats, exceptions, and alternative perspectives." "If there is nothing worth pointing out, respond with 'I have no further questions.'" ), - chat_client=chat_client, + client=client, ) # Build the group chat workflow diff --git a/python/samples/getting_started/orchestrations/handoff_autonomous.py b/python/samples/getting_started/orchestrations/handoff_autonomous.py index 54eae77526..de08ba7854 100644 --- a/python/samples/getting_started/orchestrations/handoff_autonomous.py +++ b/python/samples/getting_started/orchestrations/handoff_autonomous.py @@ -37,10 +37,10 @@ def create_agents( - chat_client: AzureOpenAIChatClient, + client: AzureOpenAIChatClient, ) -> tuple[Agent, Agent, Agent]: """Create coordinator and specialists for autonomous iteration.""" - coordinator = chat_client.as_agent( + coordinator = client.as_agent( instructions=( "You are a coordinator. You break down a user query into a research task and a summary task. " "Assign the two tasks to the appropriate specialists, one after the other." @@ -48,7 +48,7 @@ def create_agents( name="coordinator", ) - research_agent = chat_client.as_agent( + research_agent = client.as_agent( instructions=( "You are a research specialist that explores topics thoroughly using web search. " "When given a research task, break it down into multiple aspects and explore each one. " @@ -60,7 +60,7 @@ def create_agents( name="research_agent", ) - summary_agent = chat_client.as_agent( + summary_agent = client.as_agent( instructions=( "You summarize research findings. Provide a concise, well-organized summary. When done, return " "control to the coordinator." @@ -73,8 +73,8 @@ def create_agents( async def main() -> None: """Run an autonomous handoff workflow with specialist iteration enabled.""" - chat_client = AzureOpenAIChatClient(credential=AzureCliCredential()) - coordinator, research_agent, summary_agent = create_agents(chat_client) + client = AzureOpenAIChatClient(credential=AzureCliCredential()) + coordinator, research_agent, summary_agent = create_agents(client) # Build the workflow with autonomous mode # In autonomous mode, agents continue iterating until they invoke a handoff tool diff --git a/python/samples/getting_started/orchestrations/handoff_simple.py b/python/samples/getting_started/orchestrations/handoff_simple.py index d79819436a..30f46cd940 100644 --- a/python/samples/getting_started/orchestrations/handoff_simple.py +++ b/python/samples/getting_started/orchestrations/handoff_simple.py @@ -54,17 +54,17 @@ def process_return(order_number: Annotated[str, "Order number to process return return f"Return initiated successfully for order {order_number}. You will receive return instructions via email." -def create_agents(chat_client: AzureOpenAIChatClient) -> tuple[Agent, Agent, Agent, Agent]: +def create_agents(client: AzureOpenAIChatClient) -> tuple[Agent, Agent, Agent, Agent]: """Create and configure the triage and specialist agents. Args: - chat_client: The AzureOpenAIChatClient to use for creating agents. + client: The AzureOpenAIChatClient to use for creating agents. Returns: Tuple of (triage_agent, refund_agent, order_agent, return_agent) """ # Triage agent: Acts as the frontline dispatcher - triage_agent = chat_client.as_agent( + triage_agent = client.as_agent( instructions=( "You are frontline support triage. Route customer issues to the appropriate specialist agents " "based on the problem described." @@ -73,7 +73,7 @@ def create_agents(chat_client: AzureOpenAIChatClient) -> tuple[Agent, Agent, Age ) # Refund specialist: Handles refund requests - refund_agent = chat_client.as_agent( + refund_agent = client.as_agent( instructions="You process refund requests.", name="refund_agent", # In a real application, an agent can have multiple tools; here we keep it simple @@ -81,7 +81,7 @@ def create_agents(chat_client: AzureOpenAIChatClient) -> tuple[Agent, Agent, Age ) # Order/shipping specialist: Resolves delivery issues - order_agent = chat_client.as_agent( + order_agent = client.as_agent( instructions="You handle order and shipping inquiries.", name="order_agent", # In a real application, an agent can have multiple tools; here we keep it simple @@ -89,7 +89,7 @@ def create_agents(chat_client: AzureOpenAIChatClient) -> tuple[Agent, Agent, Age ) # Return specialist: Handles return requests - return_agent = chat_client.as_agent( + return_agent = client.as_agent( instructions="You manage product return requests.", name="return_agent", # In a real application, an agent can have multiple tools; here we keep it simple @@ -189,10 +189,10 @@ async def main() -> None: replace the scripted_responses with actual user input collection. """ # Initialize the Azure OpenAI chat client - chat_client = AzureOpenAIChatClient(credential=AzureCliCredential()) + client = AzureOpenAIChatClient(credential=AzureCliCredential()) # Create all agents: triage + specialists - triage, refund, order, support = create_agents(chat_client) + triage, refund, order, support = create_agents(client) # Build the handoff workflow # - participants: All agents that can participate in the workflow diff --git a/python/samples/getting_started/orchestrations/magentic.py b/python/samples/getting_started/orchestrations/magentic.py index 4fac876111..32bf11f5dc 100644 --- a/python/samples/getting_started/orchestrations/magentic.py +++ b/python/samples/getting_started/orchestrations/magentic.py @@ -51,14 +51,14 @@ async def main() -> None: "You are a Researcher. You find information without additional computation or quantitative analysis." ), # This agent requires the gpt-4o-search-preview model to perform web searches. - chat_client=OpenAIChatClient(model_id="gpt-4o-search-preview"), + client=OpenAIChatClient(model_id="gpt-4o-search-preview"), ) coder_agent = Agent( name="CoderAgent", description="A helpful assistant that writes and executes code to process and analyze data.", instructions="You solve questions using code. Please provide detailed analysis and computation process.", - chat_client=OpenAIResponsesClient(), + client=OpenAIResponsesClient(), tools=HostedCodeInterpreterTool(), ) @@ -67,7 +67,7 @@ async def main() -> None: name="MagenticManager", description="Orchestrator that coordinates the research and coding workflow", instructions="You coordinate a team to complete complex tasks efficiently.", - chat_client=OpenAIChatClient(), + client=OpenAIChatClient(), ) print("\nBuilding Magentic Workflow...") diff --git a/python/samples/getting_started/orchestrations/magentic_checkpoint.py b/python/samples/getting_started/orchestrations/magentic_checkpoint.py index 410687a0fb..4e138c0212 100644 --- a/python/samples/getting_started/orchestrations/magentic_checkpoint.py +++ b/python/samples/getting_started/orchestrations/magentic_checkpoint.py @@ -56,14 +56,14 @@ def build_workflow(checkpoint_storage: FileCheckpointStorage): name="ResearcherAgent", description="Collects background facts and references for the project.", instructions=("You are the research lead. Gather crisp bullet points the team should know."), - chat_client=AzureOpenAIChatClient(credential=AzureCliCredential()), + client=AzureOpenAIChatClient(credential=AzureCliCredential()), ) writer = Agent( name="WriterAgent", description="Synthesizes the final brief for stakeholders.", instructions=("You convert the research notes into a structured brief with milestones and risks."), - chat_client=AzureOpenAIChatClient(credential=AzureCliCredential()), + client=AzureOpenAIChatClient(credential=AzureCliCredential()), ) # Create a manager agent for orchestration @@ -71,7 +71,7 @@ def build_workflow(checkpoint_storage: FileCheckpointStorage): name="MagenticManager", description="Orchestrator that coordinates the research and writing workflow", instructions="You coordinate a team to complete complex tasks efficiently.", - chat_client=AzureOpenAIChatClient(credential=AzureCliCredential()), + client=AzureOpenAIChatClient(credential=AzureCliCredential()), ) # The builder wires in the Magentic orchestrator, sets the plan review path, and diff --git a/python/samples/getting_started/orchestrations/magentic_human_plan_review.py b/python/samples/getting_started/orchestrations/magentic_human_plan_review.py index 2ed71d29bf..b44ad4c35c 100644 --- a/python/samples/getting_started/orchestrations/magentic_human_plan_review.py +++ b/python/samples/getting_started/orchestrations/magentic_human_plan_review.py @@ -96,21 +96,21 @@ async def main() -> None: name="ResearcherAgent", description="Specialist in research and information gathering", instructions="You are a Researcher. You find information and gather facts.", - chat_client=OpenAIChatClient(model_id="gpt-4o"), + client=OpenAIChatClient(model_id="gpt-4o"), ) analyst_agent = Agent( name="AnalystAgent", description="Data analyst who processes and summarizes research findings", instructions="You are an Analyst. You analyze findings and create summaries.", - chat_client=OpenAIChatClient(model_id="gpt-4o"), + client=OpenAIChatClient(model_id="gpt-4o"), ) manager_agent = Agent( name="MagenticManager", description="Orchestrator that coordinates the workflow", instructions="You coordinate a team to complete tasks efficiently.", - chat_client=OpenAIChatClient(model_id="gpt-4o"), + client=OpenAIChatClient(model_id="gpt-4o"), ) print("\nBuilding Magentic Workflow with Human Plan Review...") diff --git a/python/samples/getting_started/orchestrations/sequential_agents.py b/python/samples/getting_started/orchestrations/sequential_agents.py index 7de09651cc..7d77ef35c6 100644 --- a/python/samples/getting_started/orchestrations/sequential_agents.py +++ b/python/samples/getting_started/orchestrations/sequential_agents.py @@ -30,14 +30,14 @@ async def main() -> None: # 1) Create agents - chat_client = AzureOpenAIChatClient(credential=AzureCliCredential()) + client = AzureOpenAIChatClient(credential=AzureCliCredential()) - writer = chat_client.as_agent( + writer = client.as_agent( instructions=("You are a concise copywriter. Provide a single, punchy marketing sentence based on the prompt."), name="writer", ) - reviewer = chat_client.as_agent( + reviewer = client.as_agent( instructions=("You are a thoughtful reviewer. Give brief feedback on the previous assistant message."), name="reviewer", ) diff --git a/python/samples/getting_started/orchestrations/sequential_custom_executors.py b/python/samples/getting_started/orchestrations/sequential_custom_executors.py index 06cbd89764..4de13cc4d3 100644 --- a/python/samples/getting_started/orchestrations/sequential_custom_executors.py +++ b/python/samples/getting_started/orchestrations/sequential_custom_executors.py @@ -58,8 +58,8 @@ async def summarize(self, agent_response: AgentExecutorResponse, ctx: WorkflowCo async def main() -> None: # 1) Create a content agent - chat_client = AzureOpenAIChatClient(credential=AzureCliCredential()) - content = chat_client.as_agent( + client = AzureOpenAIChatClient(credential=AzureCliCredential()) + content = client.as_agent( instructions="Produce a concise paragraph answering the user's request.", name="content", ) diff --git a/python/samples/getting_started/purview_agent/README.md b/python/samples/getting_started/purview_agent/README.md index 1e8e671644..175839e9d3 100644 --- a/python/samples/getting_started/purview_agent/README.md +++ b/python/samples/getting_started/purview_agent/README.md @@ -100,7 +100,7 @@ Prompt blocks set a system-level message: `Prompt blocked by policy` and termina ```python agent = Agent( - chat_client=chat_client, + client=client, instructions="You are good at telling jokes.", name="Joker", middleware=[ diff --git a/python/samples/getting_started/purview_agent/sample_purview_agent.py b/python/samples/getting_started/purview_agent/sample_purview_agent.py index 7d759a49b5..7ad4eec87e 100644 --- a/python/samples/getting_started/purview_agent/sample_purview_agent.py +++ b/python/samples/getting_started/purview_agent/sample_purview_agent.py @@ -141,7 +141,7 @@ async def run_with_agent_middleware() -> None: deployment = os.environ.get("AZURE_OPENAI_DEPLOYMENT_NAME", "gpt-4o-mini") user_id = os.environ.get("PURVIEW_DEFAULT_USER_ID") - chat_client = AzureOpenAIChatClient(deployment_name=deployment, endpoint=endpoint, credential=AzureCliCredential()) + client = AzureOpenAIChatClient(deployment_name=deployment, endpoint=endpoint, credential=AzureCliCredential()) purview_agent_middleware = PurviewPolicyMiddleware( build_credential(), @@ -151,7 +151,7 @@ async def run_with_agent_middleware() -> None: ) agent = Agent( - chat_client=chat_client, + client=client, instructions=JOKER_INSTRUCTIONS, name=JOKER_NAME, middleware=[purview_agent_middleware], @@ -180,7 +180,7 @@ async def run_with_chat_middleware() -> None: deployment = os.environ.get("AZURE_OPENAI_DEPLOYMENT_NAME", default="gpt-4o-mini") user_id = os.environ.get("PURVIEW_DEFAULT_USER_ID") - chat_client = AzureOpenAIChatClient( + client = AzureOpenAIChatClient( deployment_name=deployment, endpoint=endpoint, credential=AzureCliCredential(), @@ -195,7 +195,7 @@ async def run_with_chat_middleware() -> None: ) agent = Agent( - chat_client=chat_client, + client=client, instructions=JOKER_INSTRUCTIONS, name=JOKER_NAME, ) @@ -229,7 +229,7 @@ async def run_with_custom_cache_provider() -> None: deployment = os.environ.get("AZURE_OPENAI_DEPLOYMENT_NAME", "gpt-4o-mini") user_id = os.environ.get("PURVIEW_DEFAULT_USER_ID") - chat_client = AzureOpenAIChatClient(deployment_name=deployment, endpoint=endpoint, credential=AzureCliCredential()) + client = AzureOpenAIChatClient(deployment_name=deployment, endpoint=endpoint, credential=AzureCliCredential()) custom_cache = SimpleDictCacheProvider() @@ -242,7 +242,7 @@ async def run_with_custom_cache_provider() -> None: ) agent = Agent( - chat_client=chat_client, + client=client, instructions=JOKER_INSTRUCTIONS, name=JOKER_NAME, middleware=[purview_agent_middleware], @@ -271,7 +271,7 @@ async def run_with_custom_cache_provider() -> None: deployment = os.environ.get("AZURE_OPENAI_DEPLOYMENT_NAME", "gpt-4o-mini") user_id = os.environ.get("PURVIEW_DEFAULT_USER_ID") - chat_client = AzureOpenAIChatClient(deployment_name=deployment, endpoint=endpoint, credential=AzureCliCredential()) + client = AzureOpenAIChatClient(deployment_name=deployment, endpoint=endpoint, credential=AzureCliCredential()) # No cache_provider specified - uses default InMemoryCacheProvider purview_agent_middleware = PurviewPolicyMiddleware( @@ -284,7 +284,7 @@ async def run_with_custom_cache_provider() -> None: ) agent = Agent( - chat_client=chat_client, + client=client, instructions=JOKER_INSTRUCTIONS, name=JOKER_NAME, middleware=[purview_agent_middleware], diff --git a/python/samples/getting_started/tools/function_tool_with_approval.py b/python/samples/getting_started/tools/function_tool_with_approval.py index 855ee20719..b9f1a84975 100644 --- a/python/samples/getting_started/tools/function_tool_with_approval.py +++ b/python/samples/getting_started/tools/function_tool_with_approval.py @@ -128,7 +128,7 @@ async def run_weather_agent_with_approval(stream: bool) -> None: print(f"\n=== Weather Agent with Approval Required ({'Streaming' if stream else 'Non-Streaming'}) ===\n") async with Agent( - chat_client=OpenAIResponsesClient(), + client=OpenAIResponsesClient(), name="WeatherAgent", instructions=("You are a helpful weather assistant. Use the get_weather tool to provide weather information."), tools=[get_weather, get_weather_detail], diff --git a/python/samples/getting_started/tools/function_tool_with_approval_and_threads.py b/python/samples/getting_started/tools/function_tool_with_approval_and_threads.py index 614c71e936..e3f442ecee 100644 --- a/python/samples/getting_started/tools/function_tool_with_approval_and_threads.py +++ b/python/samples/getting_started/tools/function_tool_with_approval_and_threads.py @@ -29,7 +29,7 @@ async def approval_example() -> None: print("=== Tool Approval with Thread ===\n") agent = Agent( - chat_client=AzureOpenAIChatClient(), + client=AzureOpenAIChatClient(), name="CalendarAgent", instructions="You are a helpful calendar assistant.", tools=[add_to_calendar], @@ -65,7 +65,7 @@ async def rejection_example() -> None: print("=== Tool Rejection with Thread ===\n") agent = Agent( - chat_client=AzureOpenAIChatClient(), + client=AzureOpenAIChatClient(), name="CalendarAgent", instructions="You are a helpful calendar assistant.", tools=[add_to_calendar], diff --git a/python/samples/getting_started/workflows/README.md b/python/samples/getting_started/workflows/README.md index ce4aee4172..393047fb2f 100644 --- a/python/samples/getting_started/workflows/README.md +++ b/python/samples/getting_started/workflows/README.md @@ -169,7 +169,7 @@ Sequential orchestration uses a few small adapter nodes for plumbing: ### Environment Variables -- **AzureOpenAIChatClient**: Set Azure OpenAI environment variables as documented [here](https://github.com/microsoft/agent-framework/blob/main/python/samples/getting_started/chat_client/README.md#environment-variables). +- **AzureOpenAIChatClient**: Set Azure OpenAI environment variables as documented [here](https://github.com/microsoft/agent-framework/blob/main/python/samples/getting_started/client/README.md#environment-variables). These variables are required for samples that construct `AzureOpenAIChatClient` - **OpenAI** (used in orchestration samples): diff --git a/python/samples/getting_started/workflows/_start-here/step2_agents_in_a_workflow.py b/python/samples/getting_started/workflows/_start-here/step2_agents_in_a_workflow.py index aa6378c433..8a8ac369e4 100644 --- a/python/samples/getting_started/workflows/_start-here/step2_agents_in_a_workflow.py +++ b/python/samples/getting_started/workflows/_start-here/step2_agents_in_a_workflow.py @@ -27,15 +27,15 @@ async def main(): """Build and run a simple two node agent workflow: Writer then Reviewer.""" # Create the Azure chat client. AzureCliCredential uses your current az login. - chat_client = AzureOpenAIChatClient(credential=AzureCliCredential()) - writer_agent = chat_client.as_agent( + client = AzureOpenAIChatClient(credential=AzureCliCredential()) + writer_agent = client.as_agent( instructions=( "You are an excellent content writer. You create new content and edit contents based on the feedback." ), name="writer", ) - reviewer_agent = chat_client.as_agent( + reviewer_agent = client.as_agent( instructions=( "You are an excellent content reviewer." "Provide actionable feedback to the writer about the provided content." diff --git a/python/samples/getting_started/workflows/_start-here/step3_streaming.py b/python/samples/getting_started/workflows/_start-here/step3_streaming.py index c69f569756..7c5a7c86a7 100644 --- a/python/samples/getting_started/workflows/_start-here/step3_streaming.py +++ b/python/samples/getting_started/workflows/_start-here/step3_streaming.py @@ -26,15 +26,15 @@ async def main(): """Build the two node workflow and run it with streaming to observe events.""" # Create the Azure chat client. AzureCliCredential uses your current az login. - chat_client = AzureOpenAIChatClient(credential=AzureCliCredential()) - writer_agent = chat_client.as_agent( + client = AzureOpenAIChatClient(credential=AzureCliCredential()) + writer_agent = client.as_agent( instructions=( "You are an excellent content writer. You create new content and edit contents based on the feedback." ), name="writer", ) - reviewer_agent = chat_client.as_agent( + reviewer_agent = client.as_agent( instructions=( "You are an excellent content reviewer." "Provide actionable feedback to the writer about the provided content." diff --git a/python/samples/getting_started/workflows/agents/concurrent_workflow_as_agent.py b/python/samples/getting_started/workflows/agents/concurrent_workflow_as_agent.py index 7c10455eaa..42202aec5f 100644 --- a/python/samples/getting_started/workflows/agents/concurrent_workflow_as_agent.py +++ b/python/samples/getting_started/workflows/agents/concurrent_workflow_as_agent.py @@ -38,9 +38,9 @@ def clear_and_redraw(buffers: dict[str, str], agent_order: list[str]) -> None: async def main() -> None: # 1) Create three domain agents using AzureOpenAIChatClient - chat_client = AzureOpenAIChatClient(credential=AzureCliCredential()) + client = AzureOpenAIChatClient(credential=AzureCliCredential()) - researcher = chat_client.as_agent( + researcher = client.as_agent( instructions=( "You're an expert market and product researcher. Given a prompt, provide concise, factual insights," " opportunities, and risks." @@ -48,7 +48,7 @@ async def main() -> None: name="researcher", ) - marketer = chat_client.as_agent( + marketer = client.as_agent( instructions=( "You're a creative marketing strategist. Craft compelling value propositions and target messaging" " aligned to the prompt." @@ -56,7 +56,7 @@ async def main() -> None: name="marketer", ) - legal = chat_client.as_agent( + legal = client.as_agent( instructions=( "You're a cautious legal/compliance reviewer. Highlight constraints, disclaimers, and policy concerns" " based on the prompt." diff --git a/python/samples/getting_started/workflows/agents/group_chat_workflow_as_agent.py b/python/samples/getting_started/workflows/agents/group_chat_workflow_as_agent.py index b259865824..9bf24c82e1 100644 --- a/python/samples/getting_started/workflows/agents/group_chat_workflow_as_agent.py +++ b/python/samples/getting_started/workflows/agents/group_chat_workflow_as_agent.py @@ -23,14 +23,14 @@ async def main() -> None: name="Researcher", description="Collects relevant background information.", instructions="Gather concise facts that help a teammate answer the question.", - chat_client=OpenAIChatClient(model_id="gpt-4o-mini"), + client=OpenAIChatClient(model_id="gpt-4o-mini"), ) writer = Agent( name="Writer", description="Synthesizes a polished answer using the gathered notes.", instructions="Compose clear and structured answers using any notes provided.", - chat_client=OpenAIResponsesClient(), + client=OpenAIResponsesClient(), ) # intermediate_outputs=True: Enable intermediate outputs to observe the conversation as it unfolds diff --git a/python/samples/getting_started/workflows/agents/handoff_workflow_as_agent.py b/python/samples/getting_started/workflows/agents/handoff_workflow_as_agent.py index 15ac42987b..a03cf4aec2 100644 --- a/python/samples/getting_started/workflows/agents/handoff_workflow_as_agent.py +++ b/python/samples/getting_started/workflows/agents/handoff_workflow_as_agent.py @@ -57,17 +57,17 @@ def process_return(order_number: Annotated[str, "Order number to process return return f"Return initiated successfully for order {order_number}. You will receive return instructions via email." -def create_agents(chat_client: AzureOpenAIChatClient) -> tuple[Agent, Agent, Agent, Agent]: +def create_agents(client: AzureOpenAIChatClient) -> tuple[Agent, Agent, Agent, Agent]: """Create and configure the triage and specialist agents. Args: - chat_client: The AzureOpenAIChatClient to use for creating agents. + client: The AzureOpenAIChatClient to use for creating agents. Returns: Tuple of (triage_agent, refund_agent, order_agent, return_agent) """ # Triage agent: Acts as the frontline dispatcher - triage_agent = chat_client.as_agent( + triage_agent = client.as_agent( instructions=( "You are frontline support triage. Route customer issues to the appropriate specialist agents " "based on the problem described." @@ -76,7 +76,7 @@ def create_agents(chat_client: AzureOpenAIChatClient) -> tuple[Agent, Agent, Age ) # Refund specialist: Handles refund requests - refund_agent = chat_client.as_agent( + refund_agent = client.as_agent( instructions="You process refund requests.", name="refund_agent", # In a real application, an agent can have multiple tools; here we keep it simple @@ -84,7 +84,7 @@ def create_agents(chat_client: AzureOpenAIChatClient) -> tuple[Agent, Agent, Age ) # Order/shipping specialist: Resolves delivery issues - order_agent = chat_client.as_agent( + order_agent = client.as_agent( instructions="You handle order and shipping inquiries.", name="order_agent", # In a real application, an agent can have multiple tools; here we keep it simple @@ -92,7 +92,7 @@ def create_agents(chat_client: AzureOpenAIChatClient) -> tuple[Agent, Agent, Age ) # Return specialist: Handles return requests - return_agent = chat_client.as_agent( + return_agent = client.as_agent( instructions="You manage product return requests.", name="return_agent", # In a real application, an agent can have multiple tools; here we keep it simple @@ -147,10 +147,10 @@ async def main() -> None: replace the scripted_responses with actual user input collection. """ # Initialize the Azure OpenAI chat client - chat_client = AzureOpenAIChatClient(credential=AzureCliCredential()) + client = AzureOpenAIChatClient(credential=AzureCliCredential()) # Create all agents: triage + specialists - triage, refund, order, support = create_agents(chat_client) + triage, refund, order, support = create_agents(client) # Build the handoff workflow # - participants: All agents that can participate in the workflow diff --git a/python/samples/getting_started/workflows/agents/magentic_workflow_as_agent.py b/python/samples/getting_started/workflows/agents/magentic_workflow_as_agent.py index f6078cd494..8951294606 100644 --- a/python/samples/getting_started/workflows/agents/magentic_workflow_as_agent.py +++ b/python/samples/getting_started/workflows/agents/magentic_workflow_as_agent.py @@ -29,14 +29,14 @@ async def main() -> None: "You are a Researcher. You find information without additional computation or quantitative analysis." ), # This agent requires the gpt-4o-search-preview model to perform web searches. - chat_client=OpenAIChatClient(model_id="gpt-4o-search-preview"), + client=OpenAIChatClient(model_id="gpt-4o-search-preview"), ) coder_agent = Agent( name="CoderAgent", description="A helpful assistant that writes and executes code to process and analyze data.", instructions="You solve questions using code. Please provide detailed analysis and computation process.", - chat_client=OpenAIResponsesClient(), + client=OpenAIResponsesClient(), tools=HostedCodeInterpreterTool(), ) @@ -45,7 +45,7 @@ async def main() -> None: name="MagenticManager", description="Orchestrator that coordinates the research and coding workflow", instructions="You coordinate a team to complete complex tasks efficiently.", - chat_client=OpenAIChatClient(), + client=OpenAIChatClient(), ) print("\nBuilding Magentic Workflow...") diff --git a/python/samples/getting_started/workflows/agents/sequential_workflow_as_agent.py b/python/samples/getting_started/workflows/agents/sequential_workflow_as_agent.py index 7fc1720cbc..73e8cbb2c7 100644 --- a/python/samples/getting_started/workflows/agents/sequential_workflow_as_agent.py +++ b/python/samples/getting_started/workflows/agents/sequential_workflow_as_agent.py @@ -27,14 +27,14 @@ async def main() -> None: # 1) Create agents - chat_client = AzureOpenAIChatClient(credential=AzureCliCredential()) + client = AzureOpenAIChatClient(credential=AzureCliCredential()) - writer = chat_client.as_agent( + writer = client.as_agent( instructions=("You are a concise copywriter. Provide a single, punchy marketing sentence based on the prompt."), name="writer", ) - reviewer = chat_client.as_agent( + reviewer = client.as_agent( instructions=("You are a thoughtful reviewer. Give brief feedback on the previous assistant message."), name="reviewer", ) diff --git a/python/samples/getting_started/workflows/agents/workflow_as_agent_human_in_the_loop.py b/python/samples/getting_started/workflows/agents/workflow_as_agent_human_in_the_loop.py index d0cf835037..30c1d78a3e 100644 --- a/python/samples/getting_started/workflows/agents/workflow_as_agent_human_in_the_loop.py +++ b/python/samples/getting_started/workflows/agents/workflow_as_agent_human_in_the_loop.py @@ -16,9 +16,9 @@ sys.path.insert(0, str(_SAMPLES_ROOT)) from agent_framework import ( # noqa: E402 - Message, Content, Executor, + Message, WorkflowAgent, WorkflowBuilder, WorkflowContext, diff --git a/python/samples/getting_started/workflows/agents/workflow_as_agent_kwargs.py b/python/samples/getting_started/workflows/agents/workflow_as_agent_kwargs.py index aefcf9b1e5..a41ede52d1 100644 --- a/python/samples/getting_started/workflows/agents/workflow_as_agent_kwargs.py +++ b/python/samples/getting_started/workflows/agents/workflow_as_agent_kwargs.py @@ -80,10 +80,10 @@ async def main() -> None: print("=" * 70) # Create chat client - chat_client = OpenAIChatClient() + client = OpenAIChatClient() # Create agent with tools that use kwargs - agent = chat_client.as_agent( + agent = client.as_agent( name="assistant", instructions=( "You are a helpful assistant. Use the available tools to help users. " diff --git a/python/samples/getting_started/workflows/agents/workflow_as_agent_reflection_pattern.py b/python/samples/getting_started/workflows/agents/workflow_as_agent_reflection_pattern.py index bd8baabcae..d2aa65c9a2 100644 --- a/python/samples/getting_started/workflows/agents/workflow_as_agent_reflection_pattern.py +++ b/python/samples/getting_started/workflows/agents/workflow_as_agent_reflection_pattern.py @@ -6,9 +6,9 @@ from agent_framework import ( AgentResponse, - SupportsChatGetResponse, - Message, Executor, + Message, + SupportsChatGetResponse, WorkflowBuilder, WorkflowContext, handler, @@ -60,9 +60,9 @@ class ReviewResponse: class Reviewer(Executor): """Executor that reviews agent responses and provides structured feedback.""" - def __init__(self, id: str, chat_client: SupportsChatGetResponse) -> None: + def __init__(self, id: str, client: SupportsChatGetResponse) -> None: super().__init__(id=id) - self._chat_client = chat_client + self._chat_client = client @handler async def review(self, request: ReviewRequest, ctx: WorkflowContext[ReviewResponse]) -> None: @@ -112,9 +112,9 @@ class _Response(BaseModel): class Worker(Executor): """Executor that generates responses and incorporates feedback when necessary.""" - def __init__(self, id: str, chat_client: SupportsChatGetResponse) -> None: + def __init__(self, id: str, client: SupportsChatGetResponse) -> None: super().__init__(id=id) - self._chat_client = chat_client + self._chat_client = client self._pending_requests: dict[str, tuple[ReviewRequest, list[Message]]] = {} @handler diff --git a/python/samples/getting_started/workflows/agents/workflow_as_agent_with_thread.py b/python/samples/getting_started/workflows/agents/workflow_as_agent_with_thread.py index 3b5044e05e..1165deac55 100644 --- a/python/samples/getting_started/workflows/agents/workflow_as_agent_with_thread.py +++ b/python/samples/getting_started/workflows/agents/workflow_as_agent_with_thread.py @@ -37,7 +37,7 @@ async def main() -> None: # Create a chat client - chat_client = OpenAIChatClient() + client = OpenAIChatClient() assistant = chat_client.as_agent( name="assistant", @@ -119,7 +119,7 @@ async def demonstrate_thread_serialization() -> None: This shows how conversation history can be persisted and restored, enabling long-running conversational workflows. """ - chat_client = OpenAIChatClient() + client = OpenAIChatClient() memory_assistant = chat_client.as_agent( name="memory_assistant", diff --git a/python/samples/getting_started/workflows/checkpoint/workflow_as_agent_checkpoint.py b/python/samples/getting_started/workflows/checkpoint/workflow_as_agent_checkpoint.py index a18ab83bf7..4fc980e008 100644 --- a/python/samples/getting_started/workflows/checkpoint/workflow_as_agent_checkpoint.py +++ b/python/samples/getting_started/workflows/checkpoint/workflow_as_agent_checkpoint.py @@ -40,14 +40,14 @@ async def basic_checkpointing() -> None: print("Basic Checkpointing with Workflow as Agent") print("=" * 60) - chat_client = OpenAIChatClient() + client = OpenAIChatClient() - assistant = chat_client.as_agent( + assistant = client.as_agent( name="assistant", instructions="You are a helpful assistant. Keep responses brief.", ) - reviewer = chat_client.as_agent( + reviewer = client.as_agent( name="reviewer", instructions="You are a reviewer. Provide a one-sentence summary of the assistant's response.", ) @@ -81,9 +81,9 @@ async def checkpointing_with_thread() -> None: print("Checkpointing with Thread Conversation History") print("=" * 60) - chat_client = OpenAIChatClient() + client = OpenAIChatClient() - assistant = chat_client.as_agent( + assistant = client.as_agent( name="memory_assistant", instructions="You are a helpful assistant with good memory. Reference previous conversation when relevant.", ) @@ -124,9 +124,9 @@ async def streaming_with_checkpoints() -> None: print("Streaming with Checkpointing") print("=" * 60) - chat_client = OpenAIChatClient() + client = OpenAIChatClient() - assistant = chat_client.as_agent( + assistant = client.as_agent( name="streaming_assistant", instructions="You are a helpful assistant.", ) diff --git a/python/samples/getting_started/workflows/composition/sub_workflow_kwargs.py b/python/samples/getting_started/workflows/composition/sub_workflow_kwargs.py index 879d4c3222..af6ed4d61a 100644 --- a/python/samples/getting_started/workflows/composition/sub_workflow_kwargs.py +++ b/python/samples/getting_started/workflows/composition/sub_workflow_kwargs.py @@ -74,10 +74,10 @@ async def main() -> None: print("=" * 70) # Create chat client - chat_client = OpenAIChatClient() + client = OpenAIChatClient() # Create an agent with tools that use kwargs - inner_agent = chat_client.as_agent( + inner_agent = client.as_agent( name="data_agent", instructions=( "You are a data access agent. Use the available tools to help users. " diff --git a/python/samples/getting_started/workflows/declarative/customer_support/main.py b/python/samples/getting_started/workflows/declarative/customer_support/main.py index b06633524f..7b47fa2930 100644 --- a/python/samples/getting_started/workflows/declarative/customer_support/main.py +++ b/python/samples/getting_started/workflows/declarative/customer_support/main.py @@ -164,43 +164,43 @@ async def main() -> None: plugin = TicketingPlugin() # Create Azure OpenAI client - chat_client = AzureOpenAIChatClient(credential=AzureCliCredential()) + client = AzureOpenAIChatClient(credential=AzureCliCredential()) # Create agents with structured outputs - self_service_agent = chat_client.as_agent( + self_service_agent = client.as_agent( name="SelfServiceAgent", instructions=SELF_SERVICE_INSTRUCTIONS, default_options={"response_format": SelfServiceResponse}, ) - ticketing_agent = chat_client.as_agent( + ticketing_agent = client.as_agent( name="TicketingAgent", instructions=TICKETING_INSTRUCTIONS, tools=plugin.get_functions(), default_options={"response_format": TicketingResponse}, ) - routing_agent = chat_client.as_agent( + routing_agent = client.as_agent( name="TicketRoutingAgent", instructions=TICKET_ROUTING_INSTRUCTIONS, tools=[plugin.get_ticket], default_options={"response_format": RoutingResponse}, ) - windows_support_agent = chat_client.as_agent( + windows_support_agent = client.as_agent( name="WindowsSupportAgent", instructions=WINDOWS_SUPPORT_INSTRUCTIONS, tools=[plugin.get_ticket], default_options={"response_format": SupportResponse}, ) - resolution_agent = chat_client.as_agent( + resolution_agent = client.as_agent( name="TicketResolutionAgent", instructions=RESOLUTION_INSTRUCTIONS, tools=[plugin.resolve_ticket], ) - escalation_agent = chat_client.as_agent( + escalation_agent = client.as_agent( name="TicketEscalationAgent", instructions=ESCALATION_INSTRUCTIONS, tools=[plugin.get_ticket, plugin.send_notification], diff --git a/python/samples/getting_started/workflows/declarative/deep_research/main.py b/python/samples/getting_started/workflows/declarative/deep_research/main.py index 3e4ecf7d19..d949a210f9 100644 --- a/python/samples/getting_started/workflows/declarative/deep_research/main.py +++ b/python/samples/getting_started/workflows/declarative/deep_research/main.py @@ -122,41 +122,41 @@ class ManagerResponse(BaseModel): async def main() -> None: """Run the deep research workflow.""" # Create Azure OpenAI client - chat_client = AzureOpenAIChatClient(credential=AzureCliCredential()) + client = AzureOpenAIChatClient(credential=AzureCliCredential()) # Create agents - research_agent = chat_client.as_agent( + research_agent = client.as_agent( name="ResearchAgent", instructions=RESEARCH_INSTRUCTIONS, ) - planner_agent = chat_client.as_agent( + planner_agent = client.as_agent( name="PlannerAgent", instructions=PLANNER_INSTRUCTIONS, ) - manager_agent = chat_client.as_agent( + manager_agent = client.as_agent( name="ManagerAgent", instructions=MANAGER_INSTRUCTIONS, default_options={"response_format": ManagerResponse}, ) - summary_agent = chat_client.as_agent( + summary_agent = client.as_agent( name="SummaryAgent", instructions=SUMMARY_INSTRUCTIONS, ) - knowledge_agent = chat_client.as_agent( + knowledge_agent = client.as_agent( name="KnowledgeAgent", instructions=KNOWLEDGE_INSTRUCTIONS, ) - coder_agent = chat_client.as_agent( + coder_agent = client.as_agent( name="CoderAgent", instructions=CODER_INSTRUCTIONS, ) - weather_agent = chat_client.as_agent( + weather_agent = client.as_agent( name="WeatherAgent", instructions=WEATHER_INSTRUCTIONS, ) diff --git a/python/samples/getting_started/workflows/declarative/function_tools/README.md b/python/samples/getting_started/workflows/declarative/function_tools/README.md index 42f3dc6497..78e7cf361e 100644 --- a/python/samples/getting_started/workflows/declarative/function_tools/README.md +++ b/python/samples/getting_started/workflows/declarative/function_tools/README.md @@ -72,8 +72,8 @@ Session Complete ```python # Create the agent with tools -chat_client = AzureOpenAIChatClient(credential=AzureCliCredential()) -menu_agent = chat_client.as_agent( +client = AzureOpenAIChatClient(credential=AzureCliCredential()) +menu_agent = client.as_agent( name="MenuAgent", instructions="You are a helpful restaurant menu assistant...", tools=[get_menu, get_specials, get_item_price], diff --git a/python/samples/getting_started/workflows/declarative/function_tools/main.py b/python/samples/getting_started/workflows/declarative/function_tools/main.py index 6e4b3f272c..056cf419a4 100644 --- a/python/samples/getting_started/workflows/declarative/function_tools/main.py +++ b/python/samples/getting_started/workflows/declarative/function_tools/main.py @@ -62,8 +62,8 @@ def get_item_price(name: Annotated[str, Field(description="Menu item name")]) -> async def main(): # Create agent with tools - chat_client = AzureOpenAIChatClient(credential=AzureCliCredential()) - menu_agent = chat_client.as_agent( + client = AzureOpenAIChatClient(credential=AzureCliCredential()) + menu_agent = client.as_agent( name="MenuAgent", instructions="Answer questions about menu items, specials, and prices.", tools=[get_menu, get_specials, get_item_price], diff --git a/python/samples/getting_started/workflows/declarative/marketing/main.py b/python/samples/getting_started/workflows/declarative/marketing/main.py index 2f5e999aa7..7e5b5ec7c2 100644 --- a/python/samples/getting_started/workflows/declarative/marketing/main.py +++ b/python/samples/getting_started/workflows/declarative/marketing/main.py @@ -49,17 +49,17 @@ async def main() -> None: """Run the marketing workflow with real Azure AI agents.""" - chat_client = AzureOpenAIChatClient(credential=AzureCliCredential()) + client = AzureOpenAIChatClient(credential=AzureCliCredential()) - analyst_agent = chat_client.as_agent( + analyst_agent = client.as_agent( name="AnalystAgent", instructions=ANALYST_INSTRUCTIONS, ) - writer_agent = chat_client.as_agent( + writer_agent = client.as_agent( name="WriterAgent", instructions=WRITER_INSTRUCTIONS, ) - editor_agent = chat_client.as_agent( + editor_agent = client.as_agent( name="EditorAgent", instructions=EDITOR_INSTRUCTIONS, ) diff --git a/python/samples/getting_started/workflows/declarative/student_teacher/main.py b/python/samples/getting_started/workflows/declarative/student_teacher/main.py index ec06c4fc7d..28c9ab0446 100644 --- a/python/samples/getting_started/workflows/declarative/student_teacher/main.py +++ b/python/samples/getting_started/workflows/declarative/student_teacher/main.py @@ -51,15 +51,15 @@ async def main() -> None: """Run the student-teacher workflow with real Azure AI agents.""" # Create chat client - chat_client = AzureOpenAIChatClient(credential=AzureCliCredential()) + client = AzureOpenAIChatClient(credential=AzureCliCredential()) # Create student and teacher agents - student_agent = chat_client.as_agent( + student_agent = client.as_agent( name="StudentAgent", instructions=STUDENT_INSTRUCTIONS, ) - teacher_agent = chat_client.as_agent( + teacher_agent = client.as_agent( name="TeacherAgent", instructions=TEACHER_INSTRUCTIONS, ) diff --git a/python/samples/getting_started/workflows/human-in-the-loop/group_chat_request_info.py b/python/samples/getting_started/workflows/human-in-the-loop/group_chat_request_info.py index 5a61d8cf4e..85417a0f91 100644 --- a/python/samples/getting_started/workflows/human-in-the-loop/group_chat_request_info.py +++ b/python/samples/getting_started/workflows/human-in-the-loop/group_chat_request_info.py @@ -91,10 +91,10 @@ async def process_event_stream(stream: AsyncIterable[WorkflowEvent]) -> dict[str async def main() -> None: - chat_client = AzureOpenAIChatClient(credential=AzureCliCredential()) + client = AzureOpenAIChatClient(credential=AzureCliCredential()) # Create agents for a group discussion - optimist = chat_client.as_agent( + optimist = client.as_agent( name="optimist", instructions=( "You are an optimistic team member. You see opportunities and potential " @@ -103,7 +103,7 @@ async def main() -> None: ), ) - pragmatist = chat_client.as_agent( + pragmatist = client.as_agent( name="pragmatist", instructions=( "You are a pragmatic team member. You focus on practical implementation " @@ -112,7 +112,7 @@ async def main() -> None: ), ) - creative = chat_client.as_agent( + creative = client.as_agent( name="creative", instructions=( "You are a creative team member. You propose innovative solutions and " @@ -122,7 +122,7 @@ async def main() -> None: ) # Orchestrator coordinates the discussion - orchestrator = chat_client.as_agent( + orchestrator = client.as_agent( name="orchestrator", instructions=( "You are a discussion manager coordinating a team conversation between participants. " diff --git a/python/samples/getting_started/workflows/human-in-the-loop/sequential_request_info.py b/python/samples/getting_started/workflows/human-in-the-loop/sequential_request_info.py index f42c476086..eb3578c6b0 100644 --- a/python/samples/getting_started/workflows/human-in-the-loop/sequential_request_info.py +++ b/python/samples/getting_started/workflows/human-in-the-loop/sequential_request_info.py @@ -88,15 +88,15 @@ async def process_event_stream(stream: AsyncIterable[WorkflowEvent]) -> dict[str async def main() -> None: - chat_client = AzureOpenAIChatClient(credential=AzureCliCredential()) + client = AzureOpenAIChatClient(credential=AzureCliCredential()) # Create agents for a sequential document review workflow - drafter = chat_client.as_agent( + drafter = client.as_agent( name="drafter", instructions=("You are a document drafter. When given a topic, create a brief draft (2-3 sentences)."), ) - editor = chat_client.as_agent( + editor = client.as_agent( name="editor", instructions=( "You are an editor. Review the draft and make improvements. " @@ -104,7 +104,7 @@ async def main() -> None: ), ) - finalizer = chat_client.as_agent( + finalizer = client.as_agent( name="finalizer", instructions=( "You are a finalizer. Take the edited content and create a polished final version. " diff --git a/python/samples/getting_started/workflows/state-management/workflow_kwargs.py b/python/samples/getting_started/workflows/state-management/workflow_kwargs.py index d574c85dfb..5125464a1a 100644 --- a/python/samples/getting_started/workflows/state-management/workflow_kwargs.py +++ b/python/samples/getting_started/workflows/state-management/workflow_kwargs.py @@ -74,10 +74,10 @@ async def main() -> None: print("=" * 70) # Create chat client - chat_client = OpenAIChatClient() + client = OpenAIChatClient() # Create agent with tools that use kwargs - agent = chat_client.as_agent( + agent = client.as_agent( name="assistant", instructions=( "You are a helpful assistant. Use the available tools to help users. " diff --git a/python/samples/getting_started/workflows/tool-approval/concurrent_builder_tool_approval.py b/python/samples/getting_started/workflows/tool-approval/concurrent_builder_tool_approval.py index a8da54f17e..eb0375551e 100644 --- a/python/samples/getting_started/workflows/tool-approval/concurrent_builder_tool_approval.py +++ b/python/samples/getting_started/workflows/tool-approval/concurrent_builder_tool_approval.py @@ -126,9 +126,9 @@ async def process_event_stream(stream: AsyncIterable[WorkflowEvent]) -> dict[str async def main() -> None: # 3. Create two agents focused on different stocks but with the same tool sets - chat_client = OpenAIChatClient() + client = OpenAIChatClient() - microsoft_agent = chat_client.as_agent( + microsoft_agent = client.as_agent( name="MicrosoftAgent", instructions=( "You are a personal trading assistant focused on Microsoft (MSFT). " @@ -137,7 +137,7 @@ async def main() -> None: tools=[get_stock_price, get_market_sentiment, get_portfolio_balance, execute_trade], ) - google_agent = chat_client.as_agent( + google_agent = client.as_agent( name="GoogleAgent", instructions=( "You are a personal trading assistant focused on Google (GOOGL). " diff --git a/python/samples/getting_started/workflows/tool-approval/group_chat_builder_tool_approval.py b/python/samples/getting_started/workflows/tool-approval/group_chat_builder_tool_approval.py index ef8a900cda..9eb5ac667d 100644 --- a/python/samples/getting_started/workflows/tool-approval/group_chat_builder_tool_approval.py +++ b/python/samples/getting_started/workflows/tool-approval/group_chat_builder_tool_approval.py @@ -126,9 +126,9 @@ async def process_event_stream(stream: AsyncIterable[WorkflowEvent]) -> dict[str async def main() -> None: # 3. Create specialized agents - chat_client = OpenAIChatClient() + client = OpenAIChatClient() - qa_engineer = chat_client.as_agent( + qa_engineer = client.as_agent( name="QAEngineer", instructions=( "You are a QA engineer responsible for running tests before deployment. " @@ -137,7 +137,7 @@ async def main() -> None: tools=[run_tests], ) - devops_engineer = chat_client.as_agent( + devops_engineer = client.as_agent( name="DevOpsEngineer", instructions=( "You are a DevOps engineer responsible for deployments. First check staging " diff --git a/python/samples/getting_started/workflows/tool-approval/sequential_builder_tool_approval.py b/python/samples/getting_started/workflows/tool-approval/sequential_builder_tool_approval.py index 359fc4bb00..20a778c745 100644 --- a/python/samples/getting_started/workflows/tool-approval/sequential_builder_tool_approval.py +++ b/python/samples/getting_started/workflows/tool-approval/sequential_builder_tool_approval.py @@ -99,8 +99,8 @@ async def process_event_stream(stream: AsyncIterable[WorkflowEvent]) -> dict[str async def main() -> None: # 2. Create the agent with tools (approval mode is set per-tool via decorator) - chat_client = OpenAIChatClient() - database_agent = chat_client.as_agent( + client = OpenAIChatClient() + database_agent = client.as_agent( name="DatabaseAgent", instructions=( "You are a database assistant. You can view the database schema and execute " diff --git a/python/samples/semantic-kernel-migration/openai_responses/01_basic_responses_agent.py b/python/samples/semantic-kernel-migration/openai_responses/01_basic_responses_agent.py index 2cea118570..fce6ecb6ad 100644 --- a/python/samples/semantic-kernel-migration/openai_responses/01_basic_responses_agent.py +++ b/python/samples/semantic-kernel-migration/openai_responses/01_basic_responses_agent.py @@ -40,7 +40,7 @@ async def run_agent_framework() -> None: # AF Agent can swap in an OpenAIResponsesClient directly. chat_agent = Agent( - chat_client=OpenAIResponsesClient(), + client=OpenAIResponsesClient(), instructions="Answer in one concise sentence.", name="Expert", ) diff --git a/python/samples/semantic-kernel-migration/openai_responses/02_responses_agent_with_tool.py b/python/samples/semantic-kernel-migration/openai_responses/02_responses_agent_with_tool.py index b2e57f073b..599367f9c5 100644 --- a/python/samples/semantic-kernel-migration/openai_responses/02_responses_agent_with_tool.py +++ b/python/samples/semantic-kernel-migration/openai_responses/02_responses_agent_with_tool.py @@ -51,7 +51,7 @@ async def add(a: float, b: float) -> float: return a + b chat_agent = Agent( - chat_client=OpenAIResponsesClient(), + client=OpenAIResponsesClient(), instructions="Use the add tool when math is required.", name="MathExpert", # AF registers the async function as a tool at construction. diff --git a/python/samples/semantic-kernel-migration/openai_responses/03_responses_agent_structured_output.py b/python/samples/semantic-kernel-migration/openai_responses/03_responses_agent_structured_output.py index 4526657ffe..07d9d0b4c7 100644 --- a/python/samples/semantic-kernel-migration/openai_responses/03_responses_agent_structured_output.py +++ b/python/samples/semantic-kernel-migration/openai_responses/03_responses_agent_structured_output.py @@ -51,7 +51,7 @@ async def run_agent_framework() -> None: from agent_framework.openai import OpenAIResponsesClient chat_agent = Agent( - chat_client=OpenAIResponsesClient(), + client=OpenAIResponsesClient(), instructions="Return launch briefs as structured JSON.", name="ProductMarketer", ) diff --git a/python/samples/semantic-kernel-migration/orchestrations/concurrent_basic.py b/python/samples/semantic-kernel-migration/orchestrations/concurrent_basic.py index e98ac967e9..1a02767a5a 100644 --- a/python/samples/semantic-kernel-migration/orchestrations/concurrent_basic.py +++ b/python/samples/semantic-kernel-migration/orchestrations/concurrent_basic.py @@ -86,14 +86,14 @@ def _print_semantic_kernel_outputs(outputs: Sequence[ChatMessageContent]) -> Non async def run_agent_framework_example(prompt: str) -> Sequence[list[Message]]: - chat_client = AzureOpenAIChatClient(credential=AzureCliCredential()) + client = AzureOpenAIChatClient(credential=AzureCliCredential()) - physics = chat_client.as_agent( + physics = client.as_agent( instructions=("You are an expert in physics. Answer questions from a physics perspective."), name="physics", ) - chemistry = chat_client.as_agent( + chemistry = client.as_agent( instructions=("You are an expert in chemistry. Answer questions from a chemistry perspective."), name="chemistry", ) diff --git a/python/samples/semantic-kernel-migration/orchestrations/group_chat.py b/python/samples/semantic-kernel-migration/orchestrations/group_chat.py index 235f21e61a..d7fd6bdbb7 100644 --- a/python/samples/semantic-kernel-migration/orchestrations/group_chat.py +++ b/python/samples/semantic-kernel-migration/orchestrations/group_chat.py @@ -230,14 +230,14 @@ async def run_agent_framework_example(task: str) -> str: "Gather concise facts or considerations that help plan a community hackathon. " "Keep your responses factual and scannable." ), - chat_client=AzureOpenAIChatClient(credential=credential), + client=AzureOpenAIChatClient(credential=credential), ) planner = Agent( name="Planner", description="Turns the collected notes into a concrete action plan.", instructions=("Propose a structured action plan that accounts for logistics, roles, and timeline."), - chat_client=AzureOpenAIResponsesClient(credential=credential), + client=AzureOpenAIResponsesClient(credential=credential), ) workflow = ( diff --git a/python/samples/semantic-kernel-migration/orchestrations/magentic.py b/python/samples/semantic-kernel-migration/orchestrations/magentic.py index 263b1a1b5d..a249f61b9b 100644 --- a/python/samples/semantic-kernel-migration/orchestrations/magentic.py +++ b/python/samples/semantic-kernel-migration/orchestrations/magentic.py @@ -133,14 +133,14 @@ async def run_agent_framework_example(prompt: str) -> str | None: instructions=( "You are a Researcher. You find information without additional computation or quantitative analysis." ), - chat_client=OpenAIChatClient(ai_model_id="gpt-4o-search-preview"), + client=OpenAIChatClient(ai_model_id="gpt-4o-search-preview"), ) coder = Agent( name="CoderAgent", description="A helpful assistant that writes and executes code to process and analyze data.", instructions="You solve questions using code. Please provide detailed analysis and computation process.", - chat_client=OpenAIResponsesClient(), + client=OpenAIResponsesClient(), tools=HostedCodeInterpreterTool(), ) @@ -149,7 +149,7 @@ async def run_agent_framework_example(prompt: str) -> str | None: name="MagenticManager", description="Orchestrator that coordinates the research and coding workflow", instructions="You coordinate a team to complete complex tasks efficiently.", - chat_client=OpenAIChatClient(), + client=OpenAIChatClient(), ) workflow = MagenticBuilder().participants([researcher, coder]).with_manager(agent=manager_agent).build() diff --git a/python/samples/semantic-kernel-migration/orchestrations/sequential.py b/python/samples/semantic-kernel-migration/orchestrations/sequential.py index d0d21343cd..c678bc22b8 100644 --- a/python/samples/semantic-kernel-migration/orchestrations/sequential.py +++ b/python/samples/semantic-kernel-migration/orchestrations/sequential.py @@ -71,14 +71,14 @@ async def sk_agent_response_callback( async def run_agent_framework_example(prompt: str) -> list[Message]: - chat_client = AzureOpenAIChatClient(credential=AzureCliCredential()) + client = AzureOpenAIChatClient(credential=AzureCliCredential()) - writer = chat_client.as_agent( + writer = client.as_agent( instructions=("You are a concise copywriter. Provide a single, punchy marketing sentence based on the prompt."), name="writer", ) - reviewer = chat_client.as_agent( + reviewer = client.as_agent( instructions=("You are a thoughtful reviewer. Give brief feedback on the previous assistant message."), name="reviewer", ) diff --git a/python/tests/samples/getting_started/test_chat_client_samples.py b/python/tests/samples/getting_started/test_chat_client_samples.py index 0a699c5908..df3c18b6d5 100644 --- a/python/tests/samples/getting_started/test_chat_client_samples.py +++ b/python/tests/samples/getting_started/test_chat_client_samples.py @@ -8,28 +8,28 @@ import pytest from pytest import MonkeyPatch, mark, param -from samples.getting_started.chat_client.azure_ai_chat_client import ( +from samples.getting_started.client.azure_ai_chat_client import ( main as azure_ai_chat_client, ) -from samples.getting_started.chat_client.azure_assistants_client import ( +from samples.getting_started.client.azure_assistants_client import ( main as azure_assistants_client, ) -from samples.getting_started.chat_client.azure_chat_client import ( +from samples.getting_started.client.azure_chat_client import ( main as azure_chat_client, ) -from samples.getting_started.chat_client.azure_responses_client import ( +from samples.getting_started.client.azure_responses_client import ( main as azure_responses_client, ) -from samples.getting_started.chat_client.chat_response_cancellation import ( +from samples.getting_started.client.chat_response_cancellation import ( main as chat_response_cancellation, ) -from samples.getting_started.chat_client.openai_assistants_client import ( +from samples.getting_started.client.openai_assistants_client import ( main as openai_assistants_client, ) -from samples.getting_started.chat_client.openai_chat_client import ( +from samples.getting_started.client.openai_chat_client import ( main as openai_chat_client, ) -from samples.getting_started.chat_client.openai_responses_client import ( +from samples.getting_started.client.openai_responses_client import ( main as openai_responses_client, ) From 44ff311be04b23cc32b340c88b84bf101804865f Mon Sep 17 00:00:00 2001 From: Eduard van Valkenburg Date: Sat, 7 Feb 2026 20:58:00 +0100 Subject: [PATCH 03/16] Fix rebase issues: WorkflowMessage references and broken markdown links --- python/README.md | 2 +- python/packages/core/README.md | 2 +- .../agent_framework/_workflows/_executor.py | 8 +- .../core/tests/core/test_observability.py | 12 +-- .../packages/core/tests/workflow/test_edge.py | 82 +++++++++---------- .../core/tests/workflow/test_executor.py | 31 +++---- .../tests/workflow/test_function_executor.py | 62 +++++++------- .../core/tests/workflow/test_runner.py | 4 +- .../core/tests/workflow/test_workflow.py | 11 +-- .../workflow/test_workflow_observability.py | 4 +- python/samples/README.md | 18 ++-- .../getting_started/agents/custom/README.md | 2 +- .../getting_started/orchestrations/README.md | 2 +- .../getting_started/workflows/README.md | 2 +- 14 files changed, 122 insertions(+), 120 deletions(-) diff --git a/python/README.md b/python/README.md index 4c3d68bba1..160a7affb0 100644 --- a/python/README.md +++ b/python/README.md @@ -238,7 +238,7 @@ For more advanced orchestration patterns including Sequential, Concurrent, Group ## More Examples & Samples - [Getting Started with Agents](https://github.com/microsoft/agent-framework/tree/main/python/samples/getting_started/agents): Basic agent creation and tool usage -- [Chat Client Examples](https://github.com/microsoft/agent-framework/tree/main/python/samples/getting_started/client): Direct chat client usage patterns +- [Chat Client Examples](https://github.com/microsoft/agent-framework/tree/main/python/samples/getting_started/chat_client): Direct chat client usage patterns - [Azure AI Integration](https://github.com/microsoft/agent-framework/tree/main/python/packages/azure-ai): Azure AI integration - [Workflow Samples](https://github.com/microsoft/agent-framework/tree/main/python/samples/getting_started/workflows): Advanced multi-agent patterns diff --git a/python/packages/core/README.md b/python/packages/core/README.md index 5ec3822c79..b919b229e4 100644 --- a/python/packages/core/README.md +++ b/python/packages/core/README.md @@ -218,7 +218,7 @@ if __name__ == "__main__": ## More Examples & Samples - [Getting Started with Agents](https://github.com/microsoft/agent-framework/tree/main/python/samples/getting_started/agents): Basic agent creation and tool usage -- [Chat Client Examples](https://github.com/microsoft/agent-framework/tree/main/python/samples/getting_started/client): Direct chat client usage patterns +- [Chat Client Examples](https://github.com/microsoft/agent-framework/tree/main/python/samples/getting_started/chat_client): Direct chat client usage patterns - [Azure AI Integration](https://github.com/microsoft/agent-framework/tree/main/python/packages/azure-ai): Azure AI integration - [.NET Workflows Samples](https://github.com/microsoft/agent-framework/tree/main/dotnet/samples/GettingStarted/Workflows): Advanced multi-agent patterns (.NET) diff --git a/python/packages/core/agent_framework/_workflows/_executor.py b/python/packages/core/agent_framework/_workflows/_executor.py index 2bd4be229c..f219c0c28f 100644 --- a/python/packages/core/agent_framework/_workflows/_executor.py +++ b/python/packages/core/agent_framework/_workflows/_executor.py @@ -245,12 +245,12 @@ async def execute( self.id, self.__class__.__name__, str(MessageType.STANDARD if not isinstance(message, WorkflowMessage) else message.type), - type(WorkflowMessage).__name__, + type(message).__name__, source_trace_contexts=trace_contexts, source_span_ids=source_span_ids, ): # Find the handler and handler spec that matches the message type. - handler = self._find_handler(WorkflowMessage) + handler = self._find_handler(message) original_message = message if isinstance(message, WorkflowMessage): @@ -272,7 +272,7 @@ async def execute( # Invoke the handler with the message and context # Use deepcopy to capture original input state before handler can mutate it with _framework_event_origin(): - invoke_event = WorkflowEvent.executor_invoked(self.id, copy.deepcopy(WorkflowMessage)) + invoke_event = WorkflowEvent.executor_invoked(self.id, copy.deepcopy(message)) await context.add_event(invoke_event) try: await handler(message, context) @@ -487,7 +487,7 @@ def _find_handler(self, message: Any) -> Callable[[Any, WorkflowContext[Any, Any for message_type in self._handlers: if is_instance_of(message, message_type): return self._handlers[message_type] - raise RuntimeError(f"Executor {self.__class__.__name__} cannot handle message of type {type(WorkflowMessage)}.") + raise RuntimeError(f"Executor {self.__class__.__name__} cannot handle message of type {type(message)}.") async def on_checkpoint_save(self) -> dict[str, Any]: """Hook called when the workflow is being saved to a checkpoint. diff --git a/python/packages/core/tests/core/test_observability.py b/python/packages/core/tests/core/test_observability.py index b2c35c386c..120b027f6f 100644 --- a/python/packages/core/tests/core/test_observability.py +++ b/python/packages/core/tests/core/test_observability.py @@ -27,7 +27,7 @@ from agent_framework.observability import ( ROLE_EVENT_MAP, AgentTelemetryLayer, - ChatMessageListTimestampFilter, + MessageListTimestampFilter, ChatTelemetryLayer, OtelAttr, get_function_span, @@ -54,12 +54,12 @@ def test_enum_values(): assert OtelAttr.AGENT_INVOKE_OPERATION == "invoke_agent" -# region Test ChatMessageListTimestampFilter +# region Test MessageListTimestampFilter def test_filter_without_index_key(): """Test filter method when record doesn't have INDEX_KEY.""" - log_filter = ChatMessageListTimestampFilter() + log_filter = MessageListTimestampFilter() record = logging.LogRecord( name="test", level=logging.INFO, pathname="", lineno=0, msg="test message", args=(), exc_info=None ) @@ -73,14 +73,14 @@ def test_filter_without_index_key(): def test_filter_with_index_key(): """Test filter method when record has INDEX_KEY.""" - log_filter = ChatMessageListTimestampFilter() + log_filter = MessageListTimestampFilter() record = logging.LogRecord( name="test", level=logging.INFO, pathname="", lineno=0, msg="test message", args=(), exc_info=None ) original_created = record.created # Add the index key - setattr(record, ChatMessageListTimestampFilter.INDEX_KEY, 5) + setattr(record, MessageListTimestampFilter.INDEX_KEY, 5) result = log_filter.filter(record) @@ -91,7 +91,7 @@ def test_filter_with_index_key(): def test_index_key_constant(): """Test that INDEX_KEY constant is correctly defined.""" - assert ChatMessageListTimestampFilter.INDEX_KEY == "chat_message_index" + assert MessageListTimestampFilter.INDEX_KEY == "chat_message_index" # region Test get_function_span diff --git a/python/packages/core/tests/workflow/test_edge.py b/python/packages/core/tests/workflow/test_edge.py index 42ff6e5d36..308e503560 100644 --- a/python/packages/core/tests/workflow/test_edge.py +++ b/python/packages/core/tests/workflow/test_edge.py @@ -9,7 +9,7 @@ from agent_framework import ( Executor, InProcRunnerContext, - Message, + WorkflowMessage, WorkflowContext, handler, ) @@ -193,7 +193,7 @@ async def test_single_edge_group_send_message() -> None: ctx = InProcRunnerContext() data = MockMessage(data="test") - message = Message(data=data, source_id=source.id) + message = WorkflowMessage(data=data, source_id=source.id) success = await edge_runner.send_message(message, state, ctx) assert success is True @@ -212,7 +212,7 @@ async def test_single_edge_group_send_message_with_target() -> None: ctx = InProcRunnerContext() data = MockMessage(data="test") - message = Message(data=data, source_id=source.id, target_id=target.id) + message = WorkflowMessage(data=data, source_id=source.id, target_id=target.id) success = await edge_runner.send_message(message, state, ctx) assert success is True @@ -231,7 +231,7 @@ async def test_single_edge_group_send_message_with_invalid_target() -> None: ctx = InProcRunnerContext() data = MockMessage(data="test") - message = Message(data=data, source_id=source.id, target_id="invalid_target") + message = WorkflowMessage(data=data, source_id=source.id, target_id="invalid_target") success = await edge_runner.send_message(message, state, ctx) assert success is False @@ -250,7 +250,7 @@ async def test_single_edge_group_send_message_with_invalid_data() -> None: ctx = InProcRunnerContext() data = "invalid_data" - message = Message(data=data, source_id=source.id) + message = WorkflowMessage(data=data, source_id=source.id) success = await edge_runner.send_message(message, state, ctx) assert success is False @@ -270,7 +270,7 @@ async def test_single_edge_group_send_message_with_condition_pass() -> None: ctx = InProcRunnerContext() data = MockMessage(data="test") - message = Message(data=data, source_id=source.id) + message = WorkflowMessage(data=data, source_id=source.id) success = await edge_runner.send_message(message, state, ctx) assert success is True @@ -292,7 +292,7 @@ async def test_single_edge_group_send_message_with_condition_fail() -> None: ctx = InProcRunnerContext() data = MockMessage(data="different") - message = Message(data=data, source_id=source.id) + message = WorkflowMessage(data=data, source_id=source.id) success = await edge_runner.send_message(message, state, ctx) # Should return True because message was processed, but condition failed @@ -318,7 +318,7 @@ async def test_single_edge_group_tracing_success(span_exporter) -> None: source_span_ids = ["00f067aa0ba902b7"] data = MockMessage(data="test") - message = Message(data=data, source_id=source.id, trace_contexts=trace_contexts, source_span_ids=source_span_ids) + message = WorkflowMessage(data=data, source_id=source.id, trace_contexts=trace_contexts, source_span_ids=source_span_ids) # Clear any build spans span_exporter.clear() @@ -363,7 +363,7 @@ async def test_single_edge_group_tracing_condition_failure(span_exporter) -> Non ctx = InProcRunnerContext() data = MockMessage(data="fail") - message = Message(data=data, source_id=source.id) + message = WorkflowMessage(data=data, source_id=source.id) # Clear any build spans span_exporter.clear() @@ -398,7 +398,7 @@ async def test_single_edge_group_tracing_type_mismatch(span_exporter) -> None: # Send incompatible data type data = "invalid_data" - message = Message(data=data, source_id=source.id) + message = WorkflowMessage(data=data, source_id=source.id) # Clear any build spans span_exporter.clear() @@ -432,7 +432,7 @@ async def test_single_edge_group_tracing_target_mismatch(span_exporter) -> None: ctx = InProcRunnerContext() data = MockMessage(data="test") - message = Message(data=data, source_id=source.id, target_id="wrong_target") + message = WorkflowMessage(data=data, source_id=source.id, target_id="wrong_target") # Clear any build spans span_exporter.clear() @@ -500,7 +500,7 @@ async def test_source_edge_group_send_message() -> None: ctx = InProcRunnerContext() data = MockMessage(data="test") - message = Message(data=data, source_id=source.id) + message = WorkflowMessage(data=data, source_id=source.id) success = await edge_runner.send_message(message, state, ctx) @@ -523,7 +523,7 @@ async def test_source_edge_group_send_message_with_target() -> None: ctx = InProcRunnerContext() data = MockMessage(data="test") - message = Message(data=data, source_id=source.id, target_id=target1.id) + message = WorkflowMessage(data=data, source_id=source.id, target_id=target1.id) success = await edge_runner.send_message(message, state, ctx) @@ -546,7 +546,7 @@ async def test_source_edge_group_send_message_with_invalid_target() -> None: ctx = InProcRunnerContext() data = MockMessage(data="test") - message = Message(data=data, source_id=source.id, target_id="invalid_target") + message = WorkflowMessage(data=data, source_id=source.id, target_id="invalid_target") success = await edge_runner.send_message(message, state, ctx) assert success is False @@ -566,7 +566,7 @@ async def test_source_edge_group_send_message_with_invalid_data() -> None: ctx = InProcRunnerContext() data = "invalid_data" - message = Message(data=data, source_id=source.id) + message = WorkflowMessage(data=data, source_id=source.id) success = await edge_runner.send_message(message, state, ctx) assert success is False @@ -586,7 +586,7 @@ async def test_source_edge_group_send_message_only_one_successful_send() -> None ctx = InProcRunnerContext() data = MockMessage(data="test") - message = Message(data=data, source_id=source.id) + message = WorkflowMessage(data=data, source_id=source.id) success = await edge_runner.send_message(message, state, ctx) @@ -635,7 +635,7 @@ async def test_source_edge_group_with_selection_func_send_message() -> None: ctx = InProcRunnerContext() data = MockMessage(data="test") - message = Message(data=data, source_id=source.id) + message = WorkflowMessage(data=data, source_id=source.id) with patch("agent_framework._workflows._edge_runner.EdgeRunner._execute_on_target") as mock_send: success = await edge_runner.send_message(message, state, ctx) @@ -663,7 +663,7 @@ async def test_source_edge_group_with_selection_func_send_message_with_invalid_s ctx = InProcRunnerContext() data = MockMessage(data="test") - message = Message(data=data, source_id=source.id) + message = WorkflowMessage(data=data, source_id=source.id) with pytest.raises(RuntimeError): await edge_runner.send_message(message, state, ctx) @@ -688,7 +688,7 @@ async def test_source_edge_group_with_selection_func_send_message_with_target() ctx = InProcRunnerContext() data = MockMessage(data="test") - message = Message(data=data, source_id=source.id, target_id=target1.id) + message = WorkflowMessage(data=data, source_id=source.id, target_id=target1.id) with patch("agent_framework._workflows._edge_runner.EdgeRunner._execute_on_target") as mock_send: success = await edge_runner.send_message(message, state, ctx) @@ -717,7 +717,7 @@ async def test_source_edge_group_with_selection_func_send_message_with_target_no ctx = InProcRunnerContext() data = MockMessage(data="test") - message = Message(data=data, source_id=source.id, target_id=target2.id) + message = WorkflowMessage(data=data, source_id=source.id, target_id=target2.id) success = await edge_runner.send_message(message, state, ctx) assert success is False @@ -742,7 +742,7 @@ async def test_source_edge_group_with_selection_func_send_message_with_invalid_d ctx = InProcRunnerContext() data = "invalid_data" - message = Message(data=data, source_id=source.id) + message = WorkflowMessage(data=data, source_id=source.id) success = await edge_runner.send_message(message, state, ctx) assert success is False @@ -767,7 +767,7 @@ async def test_source_edge_group_with_selection_func_send_message_with_target_in ctx = InProcRunnerContext() data = "invalid_data" - message = Message(data=data, source_id=source.id, target_id=target1.id) + message = WorkflowMessage(data=data, source_id=source.id, target_id=target1.id) success = await edge_runner.send_message(message, state, ctx) assert success is False @@ -791,7 +791,7 @@ async def test_fan_out_edge_group_tracing_success(span_exporter) -> None: source_span_ids = ["00f067aa0ba902b7"] data = MockMessage(data="test") - message = Message(data=data, source_id=source.id, trace_contexts=trace_contexts, source_span_ids=source_span_ids) + message = WorkflowMessage(data=data, source_id=source.id, trace_contexts=trace_contexts, source_span_ids=source_span_ids) # Clear any build spans span_exporter.clear() @@ -841,7 +841,7 @@ async def test_fan_out_edge_group_tracing_with_target(span_exporter) -> None: source_span_ids = ["00f067aa0ba902b7"] data = MockMessage(data="test") - message = Message( + message = WorkflowMessage( data=data, source_id=source.id, target_id=target1.id, @@ -927,7 +927,7 @@ async def test_target_edge_group_send_message_buffer() -> None: with patch("agent_framework._workflows._edge_runner.EdgeRunner._execute_on_target") as mock_send: success = await edge_runner.send_message( - Message(data=data, source_id=source1.id), + WorkflowMessage(data=data, source_id=source1.id), state, ctx, ) @@ -937,7 +937,7 @@ async def test_target_edge_group_send_message_buffer() -> None: assert len(edge_runner._buffer[source1.id]) == 1 # type: ignore success = await edge_runner.send_message( - Message(data=data, source_id=source2.id), + WorkflowMessage(data=data, source_id=source2.id), state, ctx, ) @@ -963,7 +963,7 @@ async def test_target_edge_group_send_message_with_invalid_target() -> None: ctx = InProcRunnerContext() data = MockMessage(data="test") - message = Message(data=data, source_id=source1.id, target_id="invalid_target") + message = WorkflowMessage(data=data, source_id=source1.id, target_id="invalid_target") success = await edge_runner.send_message(message, state, ctx) assert success is False @@ -984,7 +984,7 @@ async def test_target_edge_group_send_message_with_invalid_data() -> None: ctx = InProcRunnerContext() data = "invalid_data" - message = Message(data=data, source_id=source1.id) + message = WorkflowMessage(data=data, source_id=source1.id) success = await edge_runner.send_message(message, state, ctx) assert success is False @@ -1017,7 +1017,7 @@ async def test_fan_in_edge_group_tracing_buffered(span_exporter) -> None: # Send first message (should be buffered) success = await edge_runner.send_message( - Message(data=data, source_id=source1.id, trace_contexts=trace_contexts1, source_span_ids=source_span_ids1), + WorkflowMessage(data=data, source_id=source1.id, trace_contexts=trace_contexts1, source_span_ids=source_span_ids1), state, ctx, ) @@ -1049,7 +1049,7 @@ async def test_fan_in_edge_group_tracing_buffered(span_exporter) -> None: span_exporter.clear() success = await edge_runner.send_message( - Message(data=data, source_id=source2.id, trace_contexts=trace_contexts2, source_span_ids=source_span_ids2), + WorkflowMessage(data=data, source_id=source2.id, trace_contexts=trace_contexts2, source_span_ids=source_span_ids2), state, ctx, ) @@ -1093,7 +1093,7 @@ async def test_fan_in_edge_group_tracing_type_mismatch(span_exporter) -> None: # Send incompatible data type data = "invalid_data" - message = Message(data=data, source_id=source1.id) + message = WorkflowMessage(data=data, source_id=source1.id) # Clear any build spans span_exporter.clear() @@ -1130,7 +1130,7 @@ async def test_fan_in_edge_group_with_multiple_message_types() -> None: data = MockMessage(data="test") success = await edge_runner.send_message( - Message(data=data, source_id=source1.id), + WorkflowMessage(data=data, source_id=source1.id), state, ctx, ) @@ -1138,7 +1138,7 @@ async def test_fan_in_edge_group_with_multiple_message_types() -> None: data2 = MockMessageSecondary(data="test") success = await edge_runner.send_message( - Message(data=data2, source_id=source2.id), + WorkflowMessage(data=data2, source_id=source2.id), state, ctx, ) @@ -1161,7 +1161,7 @@ async def test_fan_in_edge_group_with_multiple_message_types_failed() -> None: data = MockMessage(data="test") success = await edge_runner.send_message( - Message(data=data, source_id=source1.id), + WorkflowMessage(data=data, source_id=source1.id), state, ctx, ) @@ -1175,7 +1175,7 @@ async def test_fan_in_edge_group_with_multiple_message_types_failed() -> None: # source executors as a union. data2 = MockMessageSecondary(data="test") _ = await edge_runner.send_message( - Message(data=data2, source_id=source2.id), + WorkflowMessage(data=data2, source_id=source2.id), state, ctx, ) @@ -1275,7 +1275,7 @@ async def test_switch_case_edge_group_send_message() -> None: ctx = InProcRunnerContext() data = MockMessage(data=-1) - message = Message(data=data, source_id=source.id) + message = WorkflowMessage(data=data, source_id=source.id) with patch("agent_framework._workflows._edge_runner.EdgeRunner._execute_on_target") as mock_send: success = await edge_runner.send_message(message, state, ctx) @@ -1285,7 +1285,7 @@ async def test_switch_case_edge_group_send_message() -> None: # Default condition should data = MockMessage(data=1) - message = Message(data=data, source_id=source.id) + message = WorkflowMessage(data=data, source_id=source.id) with patch("agent_framework._workflows._edge_runner.EdgeRunner._execute_on_target") as mock_send: success = await edge_runner.send_message(message, state, ctx) @@ -1314,7 +1314,7 @@ async def test_switch_case_edge_group_send_message_with_invalid_target() -> None ctx = InProcRunnerContext() data = MockMessage(data=-1) - message = Message(data=data, source_id=source.id, target_id="invalid_target") + message = WorkflowMessage(data=data, source_id=source.id, target_id="invalid_target") success = await edge_runner.send_message(message, state, ctx) assert success is False @@ -1341,13 +1341,13 @@ async def test_switch_case_edge_group_send_message_with_valid_target() -> None: ctx = InProcRunnerContext() data = MockMessage(data=1) # Condition will fail - message = Message(data=data, source_id=source.id, target_id=target1.id) + message = WorkflowMessage(data=data, source_id=source.id, target_id=target1.id) success = await edge_runner.send_message(message, state, ctx) assert success is False data = MockMessage(data=-1) # Condition will pass - message = Message(data=data, source_id=source.id, target_id=target1.id) + message = WorkflowMessage(data=data, source_id=source.id, target_id=target1.id) success = await edge_runner.send_message(message, state, ctx) assert success is True @@ -1373,7 +1373,7 @@ async def test_switch_case_edge_group_send_message_with_invalid_data() -> None: ctx = InProcRunnerContext() data = "invalid_data" - message = Message(data=data, source_id=source.id) + message = WorkflowMessage(data=data, source_id=source.id) success = await edge_runner.send_message(message, state, ctx) assert success is False diff --git a/python/packages/core/tests/workflow/test_executor.py b/python/packages/core/tests/workflow/test_executor.py index c089fb30f3..f974b07272 100644 --- a/python/packages/core/tests/workflow/test_executor.py +++ b/python/packages/core/tests/workflow/test_executor.py @@ -8,6 +8,7 @@ from agent_framework import ( Executor, Message, + WorkflowMessage, WorkflowBuilder, WorkflowContext, WorkflowEvent, @@ -97,9 +98,9 @@ async def handle_number(self, number: int, ctx: WorkflowContext) -> None: # typ executor = MockExecutorWithValidHandlers(id="test") assert executor.id is not None assert len(executor._handlers) == 2 # type: ignore - assert executor.can_handle(Message(data="text", source_id="mock")) is True - assert executor.can_handle(Message(data=42, source_id="mock")) is True - assert executor.can_handle(Message(data=3.14, source_id="mock")) is False + assert executor.can_handle(WorkflowMessage(data="text", source_id="mock")) is True + assert executor.can_handle(WorkflowMessage(data=42, source_id="mock")) is True + assert executor.can_handle(WorkflowMessage(data=3.14, source_id="mock")) is False def test_executor_handlers_with_output_types(): @@ -580,9 +581,9 @@ async def handle(self, message: Any, ctx: WorkflowContext) -> None: assert len(exec_instance._handlers) == 1 # Can handle str messages - assert exec_instance.can_handle(Message(data="hello", source_id="mock")) + assert exec_instance.can_handle(WorkflowMessage(data="hello", source_id="mock")) # Cannot handle int messages (since explicit type is str) - assert not exec_instance.can_handle(Message(data=42, source_id="mock")) + assert not exec_instance.can_handle(WorkflowMessage(data=42, source_id="mock")) def test_handler_with_explicit_output_type(self): """Test that explicit output works when input is also specified.""" @@ -622,8 +623,8 @@ async def handle(self, message: Any, ctx: WorkflowContext) -> None: assert handler_func._handler_spec["output_types"] == [list] # Verify can_handle - assert exec_instance.can_handle(Message(data={"key": "value"}, source_id="mock")) - assert not exec_instance.can_handle(Message(data="string", source_id="mock")) + assert exec_instance.can_handle(WorkflowMessage(data={"key": "value"}, source_id="mock")) + assert not exec_instance.can_handle(WorkflowMessage(data="string", source_id="mock")) def test_handler_with_explicit_union_input_type(self): """Test that explicit union input_type is handled correctly.""" @@ -641,10 +642,10 @@ async def handle(self, message: Any, ctx: WorkflowContext) -> None: assert len(exec_instance._handlers) == 1 # Can handle both str and int messages - assert exec_instance.can_handle(Message(data="hello", source_id="mock")) - assert exec_instance.can_handle(Message(data=42, source_id="mock")) + assert exec_instance.can_handle(WorkflowMessage(data="hello", source_id="mock")) + assert exec_instance.can_handle(WorkflowMessage(data=42, source_id="mock")) # Cannot handle float - assert not exec_instance.can_handle(Message(data=3.14, source_id="mock")) + assert not exec_instance.can_handle(WorkflowMessage(data=3.14, source_id="mock")) def test_handler_with_explicit_union_output_type(self): """Test that explicit union output is normalized to a list.""" @@ -735,7 +736,7 @@ async def handle(self, message, ctx: WorkflowContext) -> None: # type: ignore[n # Should work with explicit input_type assert str in exec_instance._handlers - assert exec_instance.can_handle(Message(data="hello", source_id="mock")) + assert exec_instance.can_handle(WorkflowMessage(data="hello", source_id="mock")) def test_handler_multiple_handlers_mixed_explicit_and_introspected(self): """Test executor with multiple handlers, some with explicit types and some introspected.""" @@ -772,7 +773,7 @@ async def handle(self, message, ctx: WorkflowContext) -> None: # type: ignore[n # Should resolve the string to the actual type assert ForwardRefMessage in exec_instance._handlers - assert exec_instance.can_handle(Message(data=ForwardRefMessage("hello"), source_id="mock")) + assert exec_instance.can_handle(WorkflowMessage(data=ForwardRefMessage("hello"), source_id="mock")) def test_handler_with_string_forward_reference_union(self): """Test that string forward references work with union types.""" @@ -785,8 +786,8 @@ async def handle(self, message, ctx: WorkflowContext) -> None: # type: ignore[n exec_instance = StringUnionExecutor(id="string_union") # Should handle both types - assert exec_instance.can_handle(Message(data=ForwardRefTypeA("hello"), source_id="mock")) - assert exec_instance.can_handle(Message(data=ForwardRefTypeB(42), source_id="mock")) + assert exec_instance.can_handle(WorkflowMessage(data=ForwardRefTypeA("hello"), source_id="mock")) + assert exec_instance.can_handle(WorkflowMessage(data=ForwardRefTypeB(42), source_id="mock")) def test_handler_with_string_forward_reference_output_type(self): """Test that string forward references work for output_type.""" @@ -850,7 +851,7 @@ async def handle(self, message: Any, ctx: WorkflowContext) -> None: # Check input type assert str in exec_instance._handlers - assert exec_instance.can_handle(Message(data="hello", source_id="mock")) + assert exec_instance.can_handle(WorkflowMessage(data="hello", source_id="mock")) # Check output_type assert int in exec_instance.output_types diff --git a/python/packages/core/tests/workflow/test_function_executor.py b/python/packages/core/tests/workflow/test_function_executor.py index 3d274f8cd7..f9194a212a 100644 --- a/python/packages/core/tests/workflow/test_function_executor.py +++ b/python/packages/core/tests/workflow/test_function_executor.py @@ -8,7 +8,7 @@ from agent_framework import ( FunctionExecutor, - Message, + WorkflowMessage, WorkflowBuilder, WorkflowContext, executor, @@ -253,9 +253,9 @@ def test_can_handle_method(self): async def string_processor(text: str, ctx: WorkflowContext[str]) -> None: await ctx.send_message(text) - assert string_processor.can_handle(Message(data="hello", source_id="Mock")) - assert not string_processor.can_handle(Message(data=123, source_id="Mock")) - assert not string_processor.can_handle(Message(data=[], source_id="Mock")) + assert string_processor.can_handle(WorkflowMessage(data="hello", source_id="Mock")) + assert not string_processor.can_handle(WorkflowMessage(data=123, source_id="Mock")) + assert not string_processor.can_handle(WorkflowMessage(data=[], source_id="Mock")) def test_duplicate_handler_registration(self): """Test that registering duplicate handlers raises an error.""" @@ -332,9 +332,9 @@ def test_single_parameter_can_handle(self): async def int_processor(value: int): return value * 2 - assert int_processor.can_handle(Message(data=42, source_id="mock")) - assert not int_processor.can_handle(Message(data="hello", source_id="mock")) - assert not int_processor.can_handle(Message(data=[], source_id="mock")) + assert int_processor.can_handle(WorkflowMessage(data=42, source_id="mock")) + assert not int_processor.can_handle(WorkflowMessage(data="hello", source_id="mock")) + assert not int_processor.can_handle(WorkflowMessage(data=[], source_id="mock")) async def test_single_parameter_execution(self): """Test that single-parameter functions can be executed properly.""" @@ -348,7 +348,7 @@ async def double_value(value: int): WorkflowBuilder(start_executor=double_value).build() # For testing purposes, we can check that the handler is registered correctly - assert double_value.can_handle(Message(data=5, source_id="mock")) + assert double_value.can_handle(WorkflowMessage(data=5, source_id="mock")) assert int in double_value._handlers def test_sync_function_basic(self): @@ -392,9 +392,9 @@ def test_sync_function_can_handle(self): def string_handler(text: str): return text.strip() - assert string_handler.can_handle(Message(data="hello", source_id="mock")) - assert not string_handler.can_handle(Message(data=123, source_id="mock")) - assert not string_handler.can_handle(Message(data=[], source_id="mock")) + assert string_handler.can_handle(WorkflowMessage(data="hello", source_id="mock")) + assert not string_handler.can_handle(WorkflowMessage(data=123, source_id="mock")) + assert not string_handler.can_handle(WorkflowMessage(data=[], source_id="mock")) def test_sync_function_validation(self): """Test validation for synchronous functions.""" @@ -436,8 +436,8 @@ async def async_func(data: str): assert isinstance(async_func, FunctionExecutor) # Both should handle strings - assert sync_func.can_handle(Message(data="test", source_id="mock")) - assert async_func.can_handle(Message(data="test", source_id="mock")) + assert sync_func.can_handle(WorkflowMessage(data="test", source_id="mock")) + assert async_func.can_handle(WorkflowMessage(data="test", source_id="mock")) # Both should be different instances assert sync_func is not async_func @@ -466,8 +466,8 @@ async def reverse_async(text: str, ctx: WorkflowContext[Any, str]): assert async_spec["workflow_output_types"] == [str] # Second parameter is str # Verify the executors can handle their input types - assert to_upper_sync.can_handle(Message(data="hello", source_id="mock")) - assert reverse_async.can_handle(Message(data="HELLO", source_id="mock")) + assert to_upper_sync.can_handle(WorkflowMessage(data="hello", source_id="mock")) + assert reverse_async.can_handle(WorkflowMessage(data="HELLO", source_id="mock")) # For integration testing, we mainly verify that the handlers are properly registered # and the functions are wrapped correctly @@ -574,9 +574,9 @@ async def process(message, ctx: WorkflowContext) -> None: # type: ignore[no-unt assert len(process._handlers) == 1 # Can handle str messages - assert process.can_handle(Message(data="hello", source_id="mock")) + assert process.can_handle(WorkflowMessage(data="hello", source_id="mock")) # Cannot handle int messages - assert not process.can_handle(Message(data=42, source_id="mock")) + assert not process.can_handle(WorkflowMessage(data=42, source_id="mock")) def test_executor_with_explicit_output_type(self): """Test that explicit output_type takes precedence over introspection.""" @@ -609,8 +609,8 @@ async def process(message, ctx: WorkflowContext) -> None: # type: ignore[no-unt assert spec["output_types"] == [list] # Verify can_handle - assert process.can_handle(Message(data={"key": "value"}, source_id="mock")) - assert not process.can_handle(Message(data="string", source_id="mock")) + assert process.can_handle(WorkflowMessage(data={"key": "value"}, source_id="mock")) + assert not process.can_handle(WorkflowMessage(data="string", source_id="mock")) def test_executor_with_explicit_union_input_type(self): """Test that explicit union input_type is handled correctly.""" @@ -623,10 +623,10 @@ async def process(message, ctx: WorkflowContext) -> None: # type: ignore[no-unt assert len(process._handlers) == 1 # Can handle both str and int messages - assert process.can_handle(Message(data="hello", source_id="mock")) - assert process.can_handle(Message(data=42, source_id="mock")) + assert process.can_handle(WorkflowMessage(data="hello", source_id="mock")) + assert process.can_handle(WorkflowMessage(data=42, source_id="mock")) # Cannot handle float - assert not process.can_handle(Message(data=3.14, source_id="mock")) + assert not process.can_handle(WorkflowMessage(data=3.14, source_id="mock")) def test_executor_with_explicit_union_output_type(self): """Test that explicit union output_type is normalized to a list.""" @@ -695,7 +695,7 @@ async def process(message, ctx: WorkflowContext) -> None: # type: ignore[no-unt # Should work with explicit input_type assert str in process._handlers - assert process.can_handle(Message(data="hello", source_id="mock")) + assert process.can_handle(WorkflowMessage(data="hello", source_id="mock")) def test_executor_explicit_types_with_id(self): """Test that explicit types work together with id parameter.""" @@ -717,8 +717,8 @@ async def process(message): # type: ignore[no-untyped-def] # Should work with explicit input_type assert str in process._handlers - assert process.can_handle(Message(data="hello", source_id="mock")) - assert not process.can_handle(Message(data=42, source_id="mock")) + assert process.can_handle(WorkflowMessage(data="hello", source_id="mock")) + assert not process.can_handle(WorkflowMessage(data=42, source_id="mock")) def test_executor_explicit_types_with_sync_function(self): """Test that explicit types work with synchronous functions.""" @@ -752,8 +752,8 @@ async def process(message, ctx: WorkflowContext) -> None: # type: ignore[no-unt pass # Can handle both str and int - assert process.can_handle(Message(data="hello", source_id="mock")) - assert process.can_handle(Message(data=42, source_id="mock")) + assert process.can_handle(WorkflowMessage(data="hello", source_id="mock")) + assert process.can_handle(WorkflowMessage(data=42, source_id="mock")) # Output types should include both assert set(process.output_types) == {bool, float} @@ -767,7 +767,7 @@ async def process(message, ctx: WorkflowContext) -> None: # type: ignore[no-unt # Should resolve the string to the actual type assert FuncExecForwardRefMessage in process._handlers - assert process.can_handle(Message(data=FuncExecForwardRefMessage("hello"), source_id="mock")) + assert process.can_handle(WorkflowMessage(data=FuncExecForwardRefMessage("hello"), source_id="mock")) def test_executor_with_string_forward_reference_union(self): """Test that string forward references work with union types.""" @@ -777,8 +777,8 @@ async def process(message, ctx: WorkflowContext) -> None: # type: ignore[no-unt pass # Should handle both types - assert process.can_handle(Message(data=FuncExecForwardRefTypeA("hello"), source_id="mock")) - assert process.can_handle(Message(data=FuncExecForwardRefTypeB(42), source_id="mock")) + assert process.can_handle(WorkflowMessage(data=FuncExecForwardRefTypeA("hello"), source_id="mock")) + assert process.can_handle(WorkflowMessage(data=FuncExecForwardRefTypeB(42), source_id="mock")) def test_executor_with_string_forward_reference_output_type(self): """Test that string forward references work for output_type.""" @@ -827,7 +827,7 @@ async def process(message: Any, ctx: WorkflowContext) -> None: # Check input type assert str in process._handlers - assert process.can_handle(Message(data="hello", source_id="mock")) + assert process.can_handle(WorkflowMessage(data="hello", source_id="mock")) # Check output_type assert int in process.output_types diff --git a/python/packages/core/tests/workflow/test_runner.py b/python/packages/core/tests/workflow/test_runner.py index 7af722e45a..e527ba13fa 100644 --- a/python/packages/core/tests/workflow/test_runner.py +++ b/python/packages/core/tests/workflow/test_runner.py @@ -20,8 +20,8 @@ from agent_framework._workflows._runner import Runner from agent_framework._workflows._runner_context import ( InProcRunnerContext, - Message, RunnerContext, + WorkflowMessage, ) from agent_framework._workflows._state import State @@ -179,7 +179,7 @@ async def test_runner_emits_runner_completion_for_agent_response_without_targets runner = Runner([], {}, State(), ctx) await ctx.send_message( - Message( + WorkflowMessage( data=AgentExecutorResponse("agent", AgentResponse()), source_id="agent", ) diff --git a/python/packages/core/tests/workflow/test_workflow.py b/python/packages/core/tests/workflow/test_workflow.py index c8923f4774..6728bcfcb1 100644 --- a/python/packages/core/tests/workflow/test_workflow.py +++ b/python/packages/core/tests/workflow/test_workflow.py @@ -25,6 +25,7 @@ WorkflowContext, WorkflowConvergenceException, WorkflowEvent, + WorkflowMessage, WorkflowRunState, handler, response_handler, @@ -274,7 +275,7 @@ async def test_workflow_with_checkpointing_enabled(simple_executor: Executor): ) # Verify workflow was created and can run - test_message = Message(data="test message", source_id="test", target_id=None) + test_message = WorkflowMessage(data="test message", source_id="test", target_id=None) result = await workflow.run(test_message) assert result is not None @@ -535,7 +536,7 @@ async def test_workflow_checkpoint_runtime_only_configuration( workflow = WorkflowBuilder(start_executor=simple_executor).add_edge(simple_executor, simple_executor).build() # Run with runtime checkpoint storage - should create checkpoints - test_message = Message(data="runtime checkpoint test", source_id="test", target_id=None) + test_message = WorkflowMessage(data="runtime checkpoint test", source_id="test", target_id=None) result = await workflow.run(test_message, checkpoint_storage=storage) assert result is not None assert result.get_final_state() == WorkflowRunState.IDLE @@ -586,7 +587,7 @@ async def test_workflow_checkpoint_runtime_overrides_buildtime( ) # Run with runtime checkpoint storage override - test_message = Message(data="override test", source_id="test", target_id=None) + test_message = WorkflowMessage(data="override test", source_id="test", target_id=None) result = await workflow.run(test_message, checkpoint_storage=runtime_storage) assert result is not None @@ -910,7 +911,7 @@ async def test_workflow_run_parameter_validation(simple_executor: Executor) -> N """Test that stream properly validate parameter combinations.""" workflow = WorkflowBuilder(start_executor=simple_executor).add_edge(simple_executor, simple_executor).build() - test_message = Message(data="test", source_id="test", target_id=None) + test_message = WorkflowMessage(data="test", source_id="test", target_id=None) # Valid: message only (new run) result = await workflow.run(test_message) @@ -941,7 +942,7 @@ async def test_workflow_run_stream_parameter_validation( """Test stream=True specific parameter validation scenarios.""" workflow = WorkflowBuilder(start_executor=simple_executor).add_edge(simple_executor, simple_executor).build() - test_message = Message(data="test", source_id="test", target_id=None) + test_message = WorkflowMessage(data="test", source_id="test", target_id=None) # Valid: message only (new run) events: list[WorkflowEvent] = [] diff --git a/python/packages/core/tests/workflow/test_workflow_observability.py b/python/packages/core/tests/workflow/test_workflow_observability.py index 6dcad66a88..d5c20ad429 100644 --- a/python/packages/core/tests/workflow/test_workflow_observability.py +++ b/python/packages/core/tests/workflow/test_workflow_observability.py @@ -8,7 +8,7 @@ from agent_framework import InMemoryCheckpointStorage, WorkflowBuilder from agent_framework._workflows._executor import Executor, handler -from agent_framework._workflows._runner_context import InProcRunnerContext, Message, MessageType +from agent_framework._workflows._runner_context import InProcRunnerContext, MessageType, WorkflowMessage from agent_framework._workflows._state import State from agent_framework._workflows._workflow import Workflow from agent_framework._workflows._workflow_context import WorkflowContext @@ -440,7 +440,7 @@ async def test_message_trace_context_serialization(span_exporter: InMemorySpanEx ctx = InProcRunnerContext(InMemoryCheckpointStorage()) # Create message with trace context - message = Message( + message = WorkflowMessage( data="test", source_id="source", target_id="target", diff --git a/python/samples/README.md b/python/samples/README.md index 0b50473588..fc64dced52 100644 --- a/python/samples/README.md +++ b/python/samples/README.md @@ -95,7 +95,7 @@ This directory contains samples demonstrating the capabilities of Microsoft Agen | File | Description | |------|-------------| | [`getting_started/agents/custom/custom_agent.py`](./getting_started/agents/custom/custom_agent.py) | Custom Agent Implementation Example | -| [`getting_started/client/custom_chat_client.py`](./getting_started/client/custom_chat_client.py) | Custom Chat Client Implementation Example | +| [`getting_started/chat_client/custom_chat_client.py`](./getting_started/chat_client/custom_chat_client.py) | Custom Chat Client Implementation Example | ### Ollama @@ -145,14 +145,14 @@ The recommended way to use Ollama is via the native `OllamaChatClient` from the | File | Description | |------|-------------| -| [`getting_started/client/azure_ai_chat_client.py`](./getting_started/client/azure_ai_chat_client.py) | Azure AI Chat Client Direct Usage Example | -| [`getting_started/client/azure_assistants_client.py`](./getting_started/client/azure_assistants_client.py) | Azure OpenAI Assistants Client Direct Usage Example | -| [`getting_started/client/azure_chat_client.py`](./getting_started/client/azure_chat_client.py) | Azure Chat Client Direct Usage Example | -| [`getting_started/client/azure_responses_client.py`](./getting_started/client/azure_responses_client.py) | Azure OpenAI Responses Client Direct Usage Example | -| [`getting_started/client/chat_response_cancellation.py`](./getting_started/client/chat_response_cancellation.py) | Chat Response Cancellation Example | -| [`getting_started/client/openai_assistants_client.py`](./getting_started/client/openai_assistants_client.py) | OpenAI Assistants Client Direct Usage Example | -| [`getting_started/client/openai_chat_client.py`](./getting_started/client/openai_chat_client.py) | OpenAI Chat Client Direct Usage Example | -| [`getting_started/client/openai_responses_client.py`](./getting_started/client/openai_responses_client.py) | OpenAI Responses Client Direct Usage Example | +| [`getting_started/chat_client/azure_ai_chat_client.py`](./getting_started/chat_client/azure_ai_chat_client.py) | Azure AI Chat Client Direct Usage Example | +| [`getting_started/chat_client/azure_assistants_client.py`](./getting_started/chat_client/azure_assistants_client.py) | Azure OpenAI Assistants Client Direct Usage Example | +| [`getting_started/chat_client/azure_chat_client.py`](./getting_started/chat_client/azure_chat_client.py) | Azure Chat Client Direct Usage Example | +| [`getting_started/chat_client/azure_responses_client.py`](./getting_started/chat_client/azure_responses_client.py) | Azure OpenAI Responses Client Direct Usage Example | +| [`getting_started/chat_client/chat_response_cancellation.py`](./getting_started/chat_client/chat_response_cancellation.py) | Chat Response Cancellation Example | +| [`getting_started/chat_client/openai_assistants_client.py`](./getting_started/chat_client/openai_assistants_client.py) | OpenAI Assistants Client Direct Usage Example | +| [`getting_started/chat_client/openai_chat_client.py`](./getting_started/chat_client/openai_chat_client.py) | OpenAI Chat Client Direct Usage Example | +| [`getting_started/chat_client/openai_responses_client.py`](./getting_started/chat_client/openai_responses_client.py) | OpenAI Responses Client Direct Usage Example | ## Context Providers diff --git a/python/samples/getting_started/agents/custom/README.md b/python/samples/getting_started/agents/custom/README.md index 52b9e9853d..f8921b1f24 100644 --- a/python/samples/getting_started/agents/custom/README.md +++ b/python/samples/getting_started/agents/custom/README.md @@ -7,7 +7,7 @@ This folder contains examples demonstrating how to implement custom agents and c | File | Description | |------|-------------| | [`custom_agent.py`](custom_agent.py) | Shows how to create custom agents by extending the `BaseAgent` class. Demonstrates the `EchoAgent` implementation with both streaming and non-streaming responses, proper thread management, and message history handling. | -| [`custom_chat_client.py`](../../client/custom_chat_client.py) | Demonstrates how to create custom chat clients by extending the `BaseChatClient` class. Shows a `EchoingChatClient` implementation and how to integrate it with `Agent` using the `as_agent()` method. | +| [`custom_chat_client.py`](../../chat_client/custom_chat_client.py) | Demonstrates how to create custom chat clients by extending the `BaseChatClient` class. Shows a `EchoingChatClient` implementation and how to integrate it with `Agent` using the `as_agent()` method. | ## Key Takeaways diff --git a/python/samples/getting_started/orchestrations/README.md b/python/samples/getting_started/orchestrations/README.md index 8ecf961d16..0965ff2178 100644 --- a/python/samples/getting_started/orchestrations/README.md +++ b/python/samples/getting_started/orchestrations/README.md @@ -63,7 +63,7 @@ These may appear in event streams (executor_invoked/executor_completed). They're ## Environment Variables -- **AzureOpenAIChatClient**: Set Azure OpenAI environment variables as documented [here](https://github.com/microsoft/agent-framework/blob/main/python/samples/getting_started/client/README.md#environment-variables). +- **AzureOpenAIChatClient**: Set Azure OpenAI environment variables as documented [here](https://github.com/microsoft/agent-framework/blob/main/python/samples/getting_started/chat_client/README.md#environment-variables). - **OpenAI** (used in some orchestration samples): - [OpenAIChatClient env vars](https://github.com/microsoft/agent-framework/blob/main/python/samples/getting_started/agents/openai_chat_client/README.md) diff --git a/python/samples/getting_started/workflows/README.md b/python/samples/getting_started/workflows/README.md index 393047fb2f..ce4aee4172 100644 --- a/python/samples/getting_started/workflows/README.md +++ b/python/samples/getting_started/workflows/README.md @@ -169,7 +169,7 @@ Sequential orchestration uses a few small adapter nodes for plumbing: ### Environment Variables -- **AzureOpenAIChatClient**: Set Azure OpenAI environment variables as documented [here](https://github.com/microsoft/agent-framework/blob/main/python/samples/getting_started/client/README.md#environment-variables). +- **AzureOpenAIChatClient**: Set Azure OpenAI environment variables as documented [here](https://github.com/microsoft/agent-framework/blob/main/python/samples/getting_started/chat_client/README.md#environment-variables). These variables are required for samples that construct `AzureOpenAIChatClient` - **OpenAI** (used in orchestration samples): From 7665782ad2af553c84975efd1434450f4bf1184f Mon Sep 17 00:00:00 2001 From: Eduard van Valkenburg Date: Sat, 7 Feb 2026 21:54:43 +0100 Subject: [PATCH 04/16] Fix formatting and lint issues from code quality checks --- .../ag-ui/agent_framework_ag_ui/_run.py | 4 +--- .../core/tests/core/test_observability.py | 2 +- .../openai/test_openai_assistants_client.py | 4 +--- .../packages/core/tests/workflow/test_edge.py | 18 ++++++++++---- .../core/tests/workflow/test_executor.py | 2 +- .../tests/workflow/test_function_executor.py | 2 +- .../devui/agent_framework_devui/_utils.py | 6 +---- .../agent_framework_purview/_middleware.py | 4 +--- .../purview/tests/test_chat_middleware.py | 24 +++++-------------- 9 files changed, 26 insertions(+), 40 deletions(-) diff --git a/python/packages/ag-ui/agent_framework_ag_ui/_run.py b/python/packages/ag-ui/agent_framework_ag_ui/_run.py index 54233dc78b..853127e630 100644 --- a/python/packages/ag-ui/agent_framework_ag_ui/_run.py +++ b/python/packages/ag-ui/agent_framework_ag_ui/_run.py @@ -606,9 +606,7 @@ async def _resolve_approval_responses( # Execute approved tool calls if approved_responses and tools: client = getattr(agent, "client", None) - config = normalize_function_invocation_configuration( - getattr(client, "function_invocation_configuration", None) - ) + config = normalize_function_invocation_configuration(getattr(client, "function_invocation_configuration", None)) middleware_pipeline = FunctionMiddlewarePipeline( *getattr(client, "function_middleware", ()), *run_kwargs.get("middleware", ()), diff --git a/python/packages/core/tests/core/test_observability.py b/python/packages/core/tests/core/test_observability.py index 120b027f6f..77b88a873e 100644 --- a/python/packages/core/tests/core/test_observability.py +++ b/python/packages/core/tests/core/test_observability.py @@ -27,8 +27,8 @@ from agent_framework.observability import ( ROLE_EVENT_MAP, AgentTelemetryLayer, - MessageListTimestampFilter, ChatTelemetryLayer, + MessageListTimestampFilter, OtelAttr, get_function_span, ) diff --git a/python/packages/core/tests/openai/test_openai_assistants_client.py b/python/packages/core/tests/openai/test_openai_assistants_client.py index d7967b18b3..9514062ecc 100644 --- a/python/packages/core/tests/openai/test_openai_assistants_client.py +++ b/python/packages/core/tests/openai/test_openai_assistants_client.py @@ -204,9 +204,7 @@ async def test_get_assistant_id_or_create_create_new( mock_async_openai: MagicMock, ) -> None: """Test _get_assistant_id_or_create when creating a new assistant.""" - client = create_test_openai_assistants_client( - mock_async_openai, model_id="gpt-4", assistant_name="TestAssistant" - ) + client = create_test_openai_assistants_client(mock_async_openai, model_id="gpt-4", assistant_name="TestAssistant") assistant_id = await client._get_assistant_id_or_create() # type: ignore diff --git a/python/packages/core/tests/workflow/test_edge.py b/python/packages/core/tests/workflow/test_edge.py index 308e503560..f63cf9b45b 100644 --- a/python/packages/core/tests/workflow/test_edge.py +++ b/python/packages/core/tests/workflow/test_edge.py @@ -9,8 +9,8 @@ from agent_framework import ( Executor, InProcRunnerContext, - WorkflowMessage, WorkflowContext, + WorkflowMessage, handler, ) from agent_framework._workflows._edge import ( @@ -318,7 +318,9 @@ async def test_single_edge_group_tracing_success(span_exporter) -> None: source_span_ids = ["00f067aa0ba902b7"] data = MockMessage(data="test") - message = WorkflowMessage(data=data, source_id=source.id, trace_contexts=trace_contexts, source_span_ids=source_span_ids) + message = WorkflowMessage( + data=data, source_id=source.id, trace_contexts=trace_contexts, source_span_ids=source_span_ids + ) # Clear any build spans span_exporter.clear() @@ -791,7 +793,9 @@ async def test_fan_out_edge_group_tracing_success(span_exporter) -> None: source_span_ids = ["00f067aa0ba902b7"] data = MockMessage(data="test") - message = WorkflowMessage(data=data, source_id=source.id, trace_contexts=trace_contexts, source_span_ids=source_span_ids) + message = WorkflowMessage( + data=data, source_id=source.id, trace_contexts=trace_contexts, source_span_ids=source_span_ids + ) # Clear any build spans span_exporter.clear() @@ -1017,7 +1021,9 @@ async def test_fan_in_edge_group_tracing_buffered(span_exporter) -> None: # Send first message (should be buffered) success = await edge_runner.send_message( - WorkflowMessage(data=data, source_id=source1.id, trace_contexts=trace_contexts1, source_span_ids=source_span_ids1), + WorkflowMessage( + data=data, source_id=source1.id, trace_contexts=trace_contexts1, source_span_ids=source_span_ids1 + ), state, ctx, ) @@ -1049,7 +1055,9 @@ async def test_fan_in_edge_group_tracing_buffered(span_exporter) -> None: span_exporter.clear() success = await edge_runner.send_message( - WorkflowMessage(data=data, source_id=source2.id, trace_contexts=trace_contexts2, source_span_ids=source_span_ids2), + WorkflowMessage( + data=data, source_id=source2.id, trace_contexts=trace_contexts2, source_span_ids=source_span_ids2 + ), state, ctx, ) diff --git a/python/packages/core/tests/workflow/test_executor.py b/python/packages/core/tests/workflow/test_executor.py index f974b07272..06d027f19d 100644 --- a/python/packages/core/tests/workflow/test_executor.py +++ b/python/packages/core/tests/workflow/test_executor.py @@ -8,10 +8,10 @@ from agent_framework import ( Executor, Message, - WorkflowMessage, WorkflowBuilder, WorkflowContext, WorkflowEvent, + WorkflowMessage, executor, handler, response_handler, diff --git a/python/packages/core/tests/workflow/test_function_executor.py b/python/packages/core/tests/workflow/test_function_executor.py index f9194a212a..c0b73156ff 100644 --- a/python/packages/core/tests/workflow/test_function_executor.py +++ b/python/packages/core/tests/workflow/test_function_executor.py @@ -8,9 +8,9 @@ from agent_framework import ( FunctionExecutor, - WorkflowMessage, WorkflowBuilder, WorkflowContext, + WorkflowMessage, executor, ) diff --git a/python/packages/devui/agent_framework_devui/_utils.py b/python/packages/devui/agent_framework_devui/_utils.py index a6a89e5cac..66886b8ea7 100644 --- a/python/packages/devui/agent_framework_devui/_utils.py +++ b/python/packages/devui/agent_framework_devui/_utils.py @@ -53,11 +53,7 @@ def extract_agent_metadata(entity_object: Any) -> dict[str, Any]: metadata["model"] = chat_opts.get("model_id") elif hasattr(chat_opts, "model_id") and chat_opts.model_id: metadata["model"] = chat_opts.model_id - if ( - metadata["model"] is None - and hasattr(entity_object, "client") - and hasattr(entity_object.client, "model_id") - ): + if metadata["model"] is None and hasattr(entity_object, "client") and hasattr(entity_object.client, "model_id"): metadata["model"] = entity_object.client.model_id # Try to get chat client type diff --git a/python/packages/purview/agent_framework_purview/_middleware.py b/python/packages/purview/agent_framework_purview/_middleware.py index 083e44ad99..10e0443b0b 100644 --- a/python/packages/purview/agent_framework_purview/_middleware.py +++ b/python/packages/purview/agent_framework_purview/_middleware.py @@ -30,9 +30,7 @@ class PurviewPolicyMiddleware(AgentMiddleware): credential = ... # TokenCredential or AsyncTokenCredential settings = PurviewSettings(app_name="My App") - agent = Agent( - client=client, instructions="...", middleware=[PurviewPolicyMiddleware(credential, settings)] - ) + agent = Agent(client=client, instructions="...", middleware=[PurviewPolicyMiddleware(credential, settings)]) """ def __init__( diff --git a/python/packages/purview/tests/test_chat_middleware.py b/python/packages/purview/tests/test_chat_middleware.py index 0301f4496c..bf7ace1dbf 100644 --- a/python/packages/purview/tests/test_chat_middleware.py +++ b/python/packages/purview/tests/test_chat_middleware.py @@ -187,9 +187,7 @@ async def test_chat_middleware_handles_payment_required_pre_check(self, mock_cre client = DummyChatClient() chat_options = MagicMock() chat_options.model = "test-model" - context = ChatContext( - client=client, messages=[Message(role="user", text="Hello")], options=chat_options - ) + context = ChatContext(client=client, messages=[Message(role="user", text="Hello")], options=chat_options) async def mock_process_messages(*args, **kwargs): raise PurviewPaymentRequiredError("Payment required") @@ -213,9 +211,7 @@ async def test_chat_middleware_handles_payment_required_post_check(self, mock_cr client = DummyChatClient() chat_options = MagicMock() chat_options.model = "test-model" - context = ChatContext( - client=client, messages=[Message(role="user", text="Hello")], options=chat_options - ) + context = ChatContext(client=client, messages=[Message(role="user", text="Hello")], options=chat_options) call_count = 0 @@ -246,9 +242,7 @@ async def test_chat_middleware_ignores_payment_required_when_configured(self, mo client = DummyChatClient() chat_options = MagicMock() chat_options.model = "test-model" - context = ChatContext( - client=client, messages=[Message(role="user", text="Hello")], options=chat_options - ) + context = ChatContext(client=client, messages=[Message(role="user", text="Hello")], options=chat_options) async def mock_process_messages(*args, **kwargs): raise PurviewPaymentRequiredError("Payment required") @@ -288,9 +282,7 @@ async def test_chat_middleware_with_ignore_exceptions(self, mock_credential: Asy client = DummyChatClient() chat_options = MagicMock() chat_options.model = "test-model" - context = ChatContext( - client=client, messages=[Message(role="user", text="Hello")], options=chat_options - ) + context = ChatContext(client=client, messages=[Message(role="user", text="Hello")], options=chat_options) async def mock_process_messages(*args, **kwargs): raise ValueError("Some error") @@ -317,9 +309,7 @@ async def test_chat_middleware_raises_on_pre_check_exception_when_ignore_excepti client = DummyChatClient() chat_options = MagicMock() chat_options.model = "test-model" - context = ChatContext( - client=client, messages=[Message(role="user", text="Hello")], options=chat_options - ) + context = ChatContext(client=client, messages=[Message(role="user", text="Hello")], options=chat_options) with patch.object(middleware._processor, "process_messages", side_effect=ValueError("boom")): @@ -339,9 +329,7 @@ async def test_chat_middleware_raises_on_post_check_exception_when_ignore_except client = DummyChatClient() chat_options = MagicMock() chat_options.model = "test-model" - context = ChatContext( - client=client, messages=[Message(role="user", text="Hello")], options=chat_options - ) + context = ChatContext(client=client, messages=[Message(role="user", text="Hello")], options=chat_options) call_count = 0 From bdf58e1b47783adbf0baf71b65cc141a83dd919a Mon Sep 17 00:00:00 2001 From: Eduard van Valkenburg Date: Sat, 7 Feb 2026 21:59:48 +0100 Subject: [PATCH 05/16] Fix import ordering in workflow sample files --- .../human-in-the-loop/guessing_game_with_human_input.py | 2 +- .../workflows/state-management/state_with_agents.py | 2 +- .../workflows/tool-approval/concurrent_builder_tool_approval.py | 2 +- .../workflows/tool-approval/group_chat_builder_tool_approval.py | 2 +- .../workflows/tool-approval/sequential_builder_tool_approval.py | 2 +- 5 files changed, 5 insertions(+), 5 deletions(-) diff --git a/python/samples/getting_started/workflows/human-in-the-loop/guessing_game_with_human_input.py b/python/samples/getting_started/workflows/human-in-the-loop/guessing_game_with_human_input.py index babd9d54e9..d6b8161f98 100644 --- a/python/samples/getting_started/workflows/human-in-the-loop/guessing_game_with_human_input.py +++ b/python/samples/getting_started/workflows/human-in-the-loop/guessing_game_with_human_input.py @@ -8,8 +8,8 @@ AgentExecutorRequest, AgentExecutorResponse, AgentResponseUpdate, - Message, Executor, + Message, WorkflowBuilder, WorkflowContext, WorkflowEvent, diff --git a/python/samples/getting_started/workflows/state-management/state_with_agents.py b/python/samples/getting_started/workflows/state-management/state_with_agents.py index 068ac70eb4..97b9fab240 100644 --- a/python/samples/getting_started/workflows/state-management/state_with_agents.py +++ b/python/samples/getting_started/workflows/state-management/state_with_agents.py @@ -7,9 +7,9 @@ from uuid import uuid4 from agent_framework import ( + Agent, AgentExecutorRequest, AgentExecutorResponse, - Agent, Message, WorkflowBuilder, WorkflowContext, diff --git a/python/samples/getting_started/workflows/tool-approval/concurrent_builder_tool_approval.py b/python/samples/getting_started/workflows/tool-approval/concurrent_builder_tool_approval.py index eb0375551e..34d59b62d7 100644 --- a/python/samples/getting_started/workflows/tool-approval/concurrent_builder_tool_approval.py +++ b/python/samples/getting_started/workflows/tool-approval/concurrent_builder_tool_approval.py @@ -5,8 +5,8 @@ from typing import Annotated from agent_framework import ( - Message, Content, + Message, WorkflowEvent, tool, ) diff --git a/python/samples/getting_started/workflows/tool-approval/group_chat_builder_tool_approval.py b/python/samples/getting_started/workflows/tool-approval/group_chat_builder_tool_approval.py index 9eb5ac667d..159299b9b8 100644 --- a/python/samples/getting_started/workflows/tool-approval/group_chat_builder_tool_approval.py +++ b/python/samples/getting_started/workflows/tool-approval/group_chat_builder_tool_approval.py @@ -5,8 +5,8 @@ from typing import Annotated, cast from agent_framework import ( - Message, Content, + Message, WorkflowEvent, tool, ) diff --git a/python/samples/getting_started/workflows/tool-approval/sequential_builder_tool_approval.py b/python/samples/getting_started/workflows/tool-approval/sequential_builder_tool_approval.py index 20a778c745..2f7ecea0ac 100644 --- a/python/samples/getting_started/workflows/tool-approval/sequential_builder_tool_approval.py +++ b/python/samples/getting_started/workflows/tool-approval/sequential_builder_tool_approval.py @@ -5,8 +5,8 @@ from typing import Annotated, cast from agent_framework import ( - Message, Content, + Message, WorkflowEvent, tool, ) From 5247d89302c12d5029fb0ada3857ef1241823982 Mon Sep 17 00:00:00 2001 From: Eduard van Valkenburg Date: Sat, 7 Feb 2026 22:28:30 +0100 Subject: [PATCH 06/16] fixed rebase --- .../core/tests/workflow/test_executor.py | 31 +- .../tests/workflow/test_function_executor.py | 62 ++-- .../core/tests/workflow/test_workflow.py | 11 +- .../workflow/test_workflow_observability.py | 4 +- .../devui/agent_framework_devui/_discovery.py | 2 +- .../_concurrent.py | 57 +-- .../_handoff.py | 17 +- .../_magentic.py | 345 +++--------------- .../_sequential.py | 46 +-- .../orchestrations/tests/test_group_chat.py | 263 +++++-------- ..._ai_with_code_interpreter_file_download.py | 59 +-- .../concurrent_custom_agent_executors.py | 4 +- .../guessing_game_with_human_input.py | 2 +- .../state-management/state_with_agents.py | 2 +- .../concurrent_builder_tool_approval.py | 2 +- .../group_chat_builder_tool_approval.py | 2 +- .../sequential_builder_tool_approval.py | 2 +- .../orchestrations/concurrent_basic.py | 3 +- .../orchestrations/group_chat.py | 15 +- .../orchestrations/magentic.py | 4 +- 20 files changed, 295 insertions(+), 638 deletions(-) diff --git a/python/packages/core/tests/workflow/test_executor.py b/python/packages/core/tests/workflow/test_executor.py index 06d027f19d..c089fb30f3 100644 --- a/python/packages/core/tests/workflow/test_executor.py +++ b/python/packages/core/tests/workflow/test_executor.py @@ -11,7 +11,6 @@ WorkflowBuilder, WorkflowContext, WorkflowEvent, - WorkflowMessage, executor, handler, response_handler, @@ -98,9 +97,9 @@ async def handle_number(self, number: int, ctx: WorkflowContext) -> None: # typ executor = MockExecutorWithValidHandlers(id="test") assert executor.id is not None assert len(executor._handlers) == 2 # type: ignore - assert executor.can_handle(WorkflowMessage(data="text", source_id="mock")) is True - assert executor.can_handle(WorkflowMessage(data=42, source_id="mock")) is True - assert executor.can_handle(WorkflowMessage(data=3.14, source_id="mock")) is False + assert executor.can_handle(Message(data="text", source_id="mock")) is True + assert executor.can_handle(Message(data=42, source_id="mock")) is True + assert executor.can_handle(Message(data=3.14, source_id="mock")) is False def test_executor_handlers_with_output_types(): @@ -581,9 +580,9 @@ async def handle(self, message: Any, ctx: WorkflowContext) -> None: assert len(exec_instance._handlers) == 1 # Can handle str messages - assert exec_instance.can_handle(WorkflowMessage(data="hello", source_id="mock")) + assert exec_instance.can_handle(Message(data="hello", source_id="mock")) # Cannot handle int messages (since explicit type is str) - assert not exec_instance.can_handle(WorkflowMessage(data=42, source_id="mock")) + assert not exec_instance.can_handle(Message(data=42, source_id="mock")) def test_handler_with_explicit_output_type(self): """Test that explicit output works when input is also specified.""" @@ -623,8 +622,8 @@ async def handle(self, message: Any, ctx: WorkflowContext) -> None: assert handler_func._handler_spec["output_types"] == [list] # Verify can_handle - assert exec_instance.can_handle(WorkflowMessage(data={"key": "value"}, source_id="mock")) - assert not exec_instance.can_handle(WorkflowMessage(data="string", source_id="mock")) + assert exec_instance.can_handle(Message(data={"key": "value"}, source_id="mock")) + assert not exec_instance.can_handle(Message(data="string", source_id="mock")) def test_handler_with_explicit_union_input_type(self): """Test that explicit union input_type is handled correctly.""" @@ -642,10 +641,10 @@ async def handle(self, message: Any, ctx: WorkflowContext) -> None: assert len(exec_instance._handlers) == 1 # Can handle both str and int messages - assert exec_instance.can_handle(WorkflowMessage(data="hello", source_id="mock")) - assert exec_instance.can_handle(WorkflowMessage(data=42, source_id="mock")) + assert exec_instance.can_handle(Message(data="hello", source_id="mock")) + assert exec_instance.can_handle(Message(data=42, source_id="mock")) # Cannot handle float - assert not exec_instance.can_handle(WorkflowMessage(data=3.14, source_id="mock")) + assert not exec_instance.can_handle(Message(data=3.14, source_id="mock")) def test_handler_with_explicit_union_output_type(self): """Test that explicit union output is normalized to a list.""" @@ -736,7 +735,7 @@ async def handle(self, message, ctx: WorkflowContext) -> None: # type: ignore[n # Should work with explicit input_type assert str in exec_instance._handlers - assert exec_instance.can_handle(WorkflowMessage(data="hello", source_id="mock")) + assert exec_instance.can_handle(Message(data="hello", source_id="mock")) def test_handler_multiple_handlers_mixed_explicit_and_introspected(self): """Test executor with multiple handlers, some with explicit types and some introspected.""" @@ -773,7 +772,7 @@ async def handle(self, message, ctx: WorkflowContext) -> None: # type: ignore[n # Should resolve the string to the actual type assert ForwardRefMessage in exec_instance._handlers - assert exec_instance.can_handle(WorkflowMessage(data=ForwardRefMessage("hello"), source_id="mock")) + assert exec_instance.can_handle(Message(data=ForwardRefMessage("hello"), source_id="mock")) def test_handler_with_string_forward_reference_union(self): """Test that string forward references work with union types.""" @@ -786,8 +785,8 @@ async def handle(self, message, ctx: WorkflowContext) -> None: # type: ignore[n exec_instance = StringUnionExecutor(id="string_union") # Should handle both types - assert exec_instance.can_handle(WorkflowMessage(data=ForwardRefTypeA("hello"), source_id="mock")) - assert exec_instance.can_handle(WorkflowMessage(data=ForwardRefTypeB(42), source_id="mock")) + assert exec_instance.can_handle(Message(data=ForwardRefTypeA("hello"), source_id="mock")) + assert exec_instance.can_handle(Message(data=ForwardRefTypeB(42), source_id="mock")) def test_handler_with_string_forward_reference_output_type(self): """Test that string forward references work for output_type.""" @@ -851,7 +850,7 @@ async def handle(self, message: Any, ctx: WorkflowContext) -> None: # Check input type assert str in exec_instance._handlers - assert exec_instance.can_handle(WorkflowMessage(data="hello", source_id="mock")) + assert exec_instance.can_handle(Message(data="hello", source_id="mock")) # Check output_type assert int in exec_instance.output_types diff --git a/python/packages/core/tests/workflow/test_function_executor.py b/python/packages/core/tests/workflow/test_function_executor.py index c0b73156ff..3d274f8cd7 100644 --- a/python/packages/core/tests/workflow/test_function_executor.py +++ b/python/packages/core/tests/workflow/test_function_executor.py @@ -8,9 +8,9 @@ from agent_framework import ( FunctionExecutor, + Message, WorkflowBuilder, WorkflowContext, - WorkflowMessage, executor, ) @@ -253,9 +253,9 @@ def test_can_handle_method(self): async def string_processor(text: str, ctx: WorkflowContext[str]) -> None: await ctx.send_message(text) - assert string_processor.can_handle(WorkflowMessage(data="hello", source_id="Mock")) - assert not string_processor.can_handle(WorkflowMessage(data=123, source_id="Mock")) - assert not string_processor.can_handle(WorkflowMessage(data=[], source_id="Mock")) + assert string_processor.can_handle(Message(data="hello", source_id="Mock")) + assert not string_processor.can_handle(Message(data=123, source_id="Mock")) + assert not string_processor.can_handle(Message(data=[], source_id="Mock")) def test_duplicate_handler_registration(self): """Test that registering duplicate handlers raises an error.""" @@ -332,9 +332,9 @@ def test_single_parameter_can_handle(self): async def int_processor(value: int): return value * 2 - assert int_processor.can_handle(WorkflowMessage(data=42, source_id="mock")) - assert not int_processor.can_handle(WorkflowMessage(data="hello", source_id="mock")) - assert not int_processor.can_handle(WorkflowMessage(data=[], source_id="mock")) + assert int_processor.can_handle(Message(data=42, source_id="mock")) + assert not int_processor.can_handle(Message(data="hello", source_id="mock")) + assert not int_processor.can_handle(Message(data=[], source_id="mock")) async def test_single_parameter_execution(self): """Test that single-parameter functions can be executed properly.""" @@ -348,7 +348,7 @@ async def double_value(value: int): WorkflowBuilder(start_executor=double_value).build() # For testing purposes, we can check that the handler is registered correctly - assert double_value.can_handle(WorkflowMessage(data=5, source_id="mock")) + assert double_value.can_handle(Message(data=5, source_id="mock")) assert int in double_value._handlers def test_sync_function_basic(self): @@ -392,9 +392,9 @@ def test_sync_function_can_handle(self): def string_handler(text: str): return text.strip() - assert string_handler.can_handle(WorkflowMessage(data="hello", source_id="mock")) - assert not string_handler.can_handle(WorkflowMessage(data=123, source_id="mock")) - assert not string_handler.can_handle(WorkflowMessage(data=[], source_id="mock")) + assert string_handler.can_handle(Message(data="hello", source_id="mock")) + assert not string_handler.can_handle(Message(data=123, source_id="mock")) + assert not string_handler.can_handle(Message(data=[], source_id="mock")) def test_sync_function_validation(self): """Test validation for synchronous functions.""" @@ -436,8 +436,8 @@ async def async_func(data: str): assert isinstance(async_func, FunctionExecutor) # Both should handle strings - assert sync_func.can_handle(WorkflowMessage(data="test", source_id="mock")) - assert async_func.can_handle(WorkflowMessage(data="test", source_id="mock")) + assert sync_func.can_handle(Message(data="test", source_id="mock")) + assert async_func.can_handle(Message(data="test", source_id="mock")) # Both should be different instances assert sync_func is not async_func @@ -466,8 +466,8 @@ async def reverse_async(text: str, ctx: WorkflowContext[Any, str]): assert async_spec["workflow_output_types"] == [str] # Second parameter is str # Verify the executors can handle their input types - assert to_upper_sync.can_handle(WorkflowMessage(data="hello", source_id="mock")) - assert reverse_async.can_handle(WorkflowMessage(data="HELLO", source_id="mock")) + assert to_upper_sync.can_handle(Message(data="hello", source_id="mock")) + assert reverse_async.can_handle(Message(data="HELLO", source_id="mock")) # For integration testing, we mainly verify that the handlers are properly registered # and the functions are wrapped correctly @@ -574,9 +574,9 @@ async def process(message, ctx: WorkflowContext) -> None: # type: ignore[no-unt assert len(process._handlers) == 1 # Can handle str messages - assert process.can_handle(WorkflowMessage(data="hello", source_id="mock")) + assert process.can_handle(Message(data="hello", source_id="mock")) # Cannot handle int messages - assert not process.can_handle(WorkflowMessage(data=42, source_id="mock")) + assert not process.can_handle(Message(data=42, source_id="mock")) def test_executor_with_explicit_output_type(self): """Test that explicit output_type takes precedence over introspection.""" @@ -609,8 +609,8 @@ async def process(message, ctx: WorkflowContext) -> None: # type: ignore[no-unt assert spec["output_types"] == [list] # Verify can_handle - assert process.can_handle(WorkflowMessage(data={"key": "value"}, source_id="mock")) - assert not process.can_handle(WorkflowMessage(data="string", source_id="mock")) + assert process.can_handle(Message(data={"key": "value"}, source_id="mock")) + assert not process.can_handle(Message(data="string", source_id="mock")) def test_executor_with_explicit_union_input_type(self): """Test that explicit union input_type is handled correctly.""" @@ -623,10 +623,10 @@ async def process(message, ctx: WorkflowContext) -> None: # type: ignore[no-unt assert len(process._handlers) == 1 # Can handle both str and int messages - assert process.can_handle(WorkflowMessage(data="hello", source_id="mock")) - assert process.can_handle(WorkflowMessage(data=42, source_id="mock")) + assert process.can_handle(Message(data="hello", source_id="mock")) + assert process.can_handle(Message(data=42, source_id="mock")) # Cannot handle float - assert not process.can_handle(WorkflowMessage(data=3.14, source_id="mock")) + assert not process.can_handle(Message(data=3.14, source_id="mock")) def test_executor_with_explicit_union_output_type(self): """Test that explicit union output_type is normalized to a list.""" @@ -695,7 +695,7 @@ async def process(message, ctx: WorkflowContext) -> None: # type: ignore[no-unt # Should work with explicit input_type assert str in process._handlers - assert process.can_handle(WorkflowMessage(data="hello", source_id="mock")) + assert process.can_handle(Message(data="hello", source_id="mock")) def test_executor_explicit_types_with_id(self): """Test that explicit types work together with id parameter.""" @@ -717,8 +717,8 @@ async def process(message): # type: ignore[no-untyped-def] # Should work with explicit input_type assert str in process._handlers - assert process.can_handle(WorkflowMessage(data="hello", source_id="mock")) - assert not process.can_handle(WorkflowMessage(data=42, source_id="mock")) + assert process.can_handle(Message(data="hello", source_id="mock")) + assert not process.can_handle(Message(data=42, source_id="mock")) def test_executor_explicit_types_with_sync_function(self): """Test that explicit types work with synchronous functions.""" @@ -752,8 +752,8 @@ async def process(message, ctx: WorkflowContext) -> None: # type: ignore[no-unt pass # Can handle both str and int - assert process.can_handle(WorkflowMessage(data="hello", source_id="mock")) - assert process.can_handle(WorkflowMessage(data=42, source_id="mock")) + assert process.can_handle(Message(data="hello", source_id="mock")) + assert process.can_handle(Message(data=42, source_id="mock")) # Output types should include both assert set(process.output_types) == {bool, float} @@ -767,7 +767,7 @@ async def process(message, ctx: WorkflowContext) -> None: # type: ignore[no-unt # Should resolve the string to the actual type assert FuncExecForwardRefMessage in process._handlers - assert process.can_handle(WorkflowMessage(data=FuncExecForwardRefMessage("hello"), source_id="mock")) + assert process.can_handle(Message(data=FuncExecForwardRefMessage("hello"), source_id="mock")) def test_executor_with_string_forward_reference_union(self): """Test that string forward references work with union types.""" @@ -777,8 +777,8 @@ async def process(message, ctx: WorkflowContext) -> None: # type: ignore[no-unt pass # Should handle both types - assert process.can_handle(WorkflowMessage(data=FuncExecForwardRefTypeA("hello"), source_id="mock")) - assert process.can_handle(WorkflowMessage(data=FuncExecForwardRefTypeB(42), source_id="mock")) + assert process.can_handle(Message(data=FuncExecForwardRefTypeA("hello"), source_id="mock")) + assert process.can_handle(Message(data=FuncExecForwardRefTypeB(42), source_id="mock")) def test_executor_with_string_forward_reference_output_type(self): """Test that string forward references work for output_type.""" @@ -827,7 +827,7 @@ async def process(message: Any, ctx: WorkflowContext) -> None: # Check input type assert str in process._handlers - assert process.can_handle(WorkflowMessage(data="hello", source_id="mock")) + assert process.can_handle(Message(data="hello", source_id="mock")) # Check output_type assert int in process.output_types diff --git a/python/packages/core/tests/workflow/test_workflow.py b/python/packages/core/tests/workflow/test_workflow.py index 6728bcfcb1..c8923f4774 100644 --- a/python/packages/core/tests/workflow/test_workflow.py +++ b/python/packages/core/tests/workflow/test_workflow.py @@ -25,7 +25,6 @@ WorkflowContext, WorkflowConvergenceException, WorkflowEvent, - WorkflowMessage, WorkflowRunState, handler, response_handler, @@ -275,7 +274,7 @@ async def test_workflow_with_checkpointing_enabled(simple_executor: Executor): ) # Verify workflow was created and can run - test_message = WorkflowMessage(data="test message", source_id="test", target_id=None) + test_message = Message(data="test message", source_id="test", target_id=None) result = await workflow.run(test_message) assert result is not None @@ -536,7 +535,7 @@ async def test_workflow_checkpoint_runtime_only_configuration( workflow = WorkflowBuilder(start_executor=simple_executor).add_edge(simple_executor, simple_executor).build() # Run with runtime checkpoint storage - should create checkpoints - test_message = WorkflowMessage(data="runtime checkpoint test", source_id="test", target_id=None) + test_message = Message(data="runtime checkpoint test", source_id="test", target_id=None) result = await workflow.run(test_message, checkpoint_storage=storage) assert result is not None assert result.get_final_state() == WorkflowRunState.IDLE @@ -587,7 +586,7 @@ async def test_workflow_checkpoint_runtime_overrides_buildtime( ) # Run with runtime checkpoint storage override - test_message = WorkflowMessage(data="override test", source_id="test", target_id=None) + test_message = Message(data="override test", source_id="test", target_id=None) result = await workflow.run(test_message, checkpoint_storage=runtime_storage) assert result is not None @@ -911,7 +910,7 @@ async def test_workflow_run_parameter_validation(simple_executor: Executor) -> N """Test that stream properly validate parameter combinations.""" workflow = WorkflowBuilder(start_executor=simple_executor).add_edge(simple_executor, simple_executor).build() - test_message = WorkflowMessage(data="test", source_id="test", target_id=None) + test_message = Message(data="test", source_id="test", target_id=None) # Valid: message only (new run) result = await workflow.run(test_message) @@ -942,7 +941,7 @@ async def test_workflow_run_stream_parameter_validation( """Test stream=True specific parameter validation scenarios.""" workflow = WorkflowBuilder(start_executor=simple_executor).add_edge(simple_executor, simple_executor).build() - test_message = WorkflowMessage(data="test", source_id="test", target_id=None) + test_message = Message(data="test", source_id="test", target_id=None) # Valid: message only (new run) events: list[WorkflowEvent] = [] diff --git a/python/packages/core/tests/workflow/test_workflow_observability.py b/python/packages/core/tests/workflow/test_workflow_observability.py index d5c20ad429..6dcad66a88 100644 --- a/python/packages/core/tests/workflow/test_workflow_observability.py +++ b/python/packages/core/tests/workflow/test_workflow_observability.py @@ -8,7 +8,7 @@ from agent_framework import InMemoryCheckpointStorage, WorkflowBuilder from agent_framework._workflows._executor import Executor, handler -from agent_framework._workflows._runner_context import InProcRunnerContext, MessageType, WorkflowMessage +from agent_framework._workflows._runner_context import InProcRunnerContext, Message, MessageType from agent_framework._workflows._state import State from agent_framework._workflows._workflow import Workflow from agent_framework._workflows._workflow_context import WorkflowContext @@ -440,7 +440,7 @@ async def test_message_trace_context_serialization(span_exporter: InMemorySpanEx ctx = InProcRunnerContext(InMemoryCheckpointStorage()) # Create message with trace context - message = WorkflowMessage( + message = Message( data="test", source_id="source", target_id="target", diff --git a/python/packages/devui/agent_framework_devui/_discovery.py b/python/packages/devui/agent_framework_devui/_discovery.py index 83bfde898e..a5fada1ba9 100644 --- a/python/packages/devui/agent_framework_devui/_discovery.py +++ b/python/packages/devui/agent_framework_devui/_discovery.py @@ -542,7 +542,7 @@ def _has_entity_exports(self, file_path: Path) -> bool: This safely checks for module-level assignments like: - agent = Agent(...) - - workflow = WorkflowBuilder()... + - workflow = WorkflowBuilder(start_executor=...)... Args: file_path: Python file to check diff --git a/python/packages/orchestrations/agent_framework_orchestrations/_concurrent.py b/python/packages/orchestrations/agent_framework_orchestrations/_concurrent.py index 9b3cf84d35..062e87806c 100644 --- a/python/packages/orchestrations/agent_framework_orchestrations/_concurrent.py +++ b/python/packages/orchestrations/agent_framework_orchestrations/_concurrent.py @@ -194,7 +194,7 @@ class ConcurrentBuilder: from agent_framework_orchestrations import ConcurrentBuilder # Minimal: use default aggregator (returns list[Message]) - workflow = ConcurrentBuilder().participants([agent1, agent2, agent3]).build() + workflow = ConcurrentBuilder(participants=[agent1, agent2, agent3]).build() # Custom aggregator via callback (sync or async). The callback receives @@ -203,14 +203,14 @@ def summarize(results: list[AgentExecutorResponse]) -> str: return " | ".join(r.agent_response.messages[-1].text for r in results) - workflow = ConcurrentBuilder().participants([agent1, agent2, agent3]).with_aggregator(summarize).build() + workflow = ConcurrentBuilder(participants=[agent1, agent2, agent3]).with_aggregator(summarize).build() # Enable checkpoint persistence so runs can resume - workflow = ConcurrentBuilder().participants([agent1, agent2, agent3]).with_checkpointing(storage).build() + workflow = ConcurrentBuilder(participants=[agent1, agent2, agent3], checkpoint_storage=storage).build() # Enable request info before aggregation - workflow = ConcurrentBuilder().participants([agent1, agent2]).with_request_info().build() + workflow = ConcurrentBuilder(participants=[agent1, agent2]).with_request_info().build() """ def __init__( @@ -233,14 +233,14 @@ def __init__( self._checkpoint_storage: CheckpointStorage | None = checkpoint_storage self._request_info_enabled: bool = False self._request_info_filter: set[str] | None = None - self._intermediate_outputs: bool = False + self._intermediate_outputs: bool = intermediate_outputs self._set_participants(participants) def _set_participants(self, participants: Sequence[SupportsAgentRun | Executor]) -> None: """Set participants (internal).""" if self._participants: - raise ValueError("participants() has already been called on this builder instance.") + raise ValueError("participants already set.") if not participants: raise ValueError("participants cannot be empty") @@ -262,7 +262,6 @@ def _set_participants(self, participants: Sequence[SupportsAgentRun | Executor]) raise TypeError(f"participants must be SupportsAgentRun or Executor instances; got {type(p).__name__}") self._participants = list(participants) - return self def with_aggregator( self, @@ -291,7 +290,7 @@ async def aggregate(self, results: list[AgentExecutorResponse], ctx: WorkflowCon await ctx.yield_output(" | ".join(r.agent_response.messages[-1].text for r in results)) - wf = ConcurrentBuilder().participants([a1, a2, a3]).with_aggregator(CustomAggregator()).build() + wf = ConcurrentBuilder(participants=[a1, a2, a3]).with_aggregator(CustomAggregator()).build() # Callback-based aggregator (string result) @@ -299,7 +298,7 @@ async def summarize(results: list[AgentExecutorResponse]) -> str: return " | ".join(r.agent_response.messages[-1].text for r in results) - wf = ConcurrentBuilder().participants([a1, a2, a3]).with_aggregator(summarize).build() + wf = ConcurrentBuilder(participants=[a1, a2, a3]).with_aggregator(summarize).build() # Callback-based aggregator (yield result) @@ -307,7 +306,7 @@ async def summarize(results: list[AgentExecutorResponse], ctx: WorkflowContext[N await ctx.yield_output(" | ".join(r.agent_response.messages[-1].text for r in results)) - wf = ConcurrentBuilder().participants([a1, a2, a3]).with_aggregator(summarize).build() + wf = ConcurrentBuilder(participants=[a1, a2, a3]).with_aggregator(summarize).build() """ if self._aggregator is not None: raise ValueError("with_aggregator() has already been called on this builder instance.") @@ -321,15 +320,6 @@ async def summarize(results: list[AgentExecutorResponse], ctx: WorkflowContext[N return self - def with_checkpointing(self, checkpoint_storage: CheckpointStorage) -> "ConcurrentBuilder": - """Enable checkpoint persistence using the provided storage backend. - - Args: - checkpoint_storage: CheckpointStorage instance for persisting workflow state - """ - self._checkpoint_storage = checkpoint_storage - return self - def with_request_info( self, *, @@ -363,19 +353,6 @@ def with_request_info( return self - def with_intermediate_outputs(self) -> "ConcurrentBuilder": - """Enable intermediate outputs from agent participants before aggregation. - - When enabled, the workflow returns each agent participant's response or yields - streaming updates as they become available. The output of the aggregator will - always be available as the final output of the workflow. - - Returns: - Self for fluent chaining - """ - self._intermediate_outputs = True - return self - def _resolve_participants(self) -> list[Executor]: """Resolve participant instances into Executor objects.""" if not self._participants: @@ -422,7 +399,7 @@ def build(self) -> Workflow: .. code-block:: python - workflow = ConcurrentBuilder().participants([agent1, agent2]).build() + workflow = ConcurrentBuilder(participants=[agent1, agent2]).build() """ # Internal nodes dispatcher = _DispatchToAllParticipants(id="dispatcher") @@ -431,18 +408,14 @@ def build(self) -> Workflow: # Resolve participants and participant factories to executors participants: list[Executor] = self._resolve_participants() - builder = WorkflowBuilder() - builder.set_start_executor(dispatcher) + builder = WorkflowBuilder( + start_executor=dispatcher, + checkpoint_storage=self._checkpoint_storage, + output_executors=[aggregator] if not self._intermediate_outputs else None, + ) # Fan-out for parallel execution builder.add_fan_out_edges(dispatcher, participants) # Direct fan-in to aggregator builder.add_fan_in_edges(participants, aggregator) - if not self._intermediate_outputs: - # Constrain output to aggregator only - builder = builder.with_output_from([aggregator]) - - if self._checkpoint_storage is not None: - builder = builder.with_checkpointing(self._checkpoint_storage) - return builder.build() diff --git a/python/packages/orchestrations/agent_framework_orchestrations/_handoff.py b/python/packages/orchestrations/agent_framework_orchestrations/_handoff.py index 65db808b83..37f499d763 100644 --- a/python/packages/orchestrations/agent_framework_orchestrations/_handoff.py +++ b/python/packages/orchestrations/agent_framework_orchestrations/_handoff.py @@ -574,6 +574,8 @@ def __init__( name: str | None = None, participants: Sequence[SupportsAgentRun] | None = None, description: str | None = None, + checkpoint_storage: CheckpointStorage | None = None, + termination_condition: TerminationCondition | None = None, ) -> None: r"""Initialize a HandoffBuilder for creating conversational handoff workflows. @@ -592,6 +594,9 @@ def __init__( unique identifier (`.name` is preferred if set, otherwise `.id` is used). description: Optional human-readable description explaining the workflow's purpose. Useful for documentation and observability. + checkpoint_storage: Optional checkpoint storage for enabling workflow state persistence. + termination_condition: Optional callable that receives the full conversation and returns True + (or awaitable True) if the workflow should terminate. """ self._name = name self._description = description @@ -607,7 +612,7 @@ def __init__( self._handoff_config: dict[str, set[HandoffConfiguration]] = {} # Checkpoint related members - self._checkpoint_storage: CheckpointStorage | None = None + self._checkpoint_storage: CheckpointStorage | None = checkpoint_storage # Autonomous mode related self._autonomous_mode: bool = False @@ -616,7 +621,7 @@ def __init__( self._autonomous_mode_enabled_agents: list[str] = [] # Termination related members - self._termination_condition: Callable[[list[Message]], bool | Awaitable[bool]] | None = None + self._termination_condition: Callable[[list[Message]], bool | Awaitable[bool]] | None = termination_condition def participants(self, participants: Sequence[SupportsAgentRun]) -> "HandoffBuilder": """Register the agents that will participate in the handoff workflow. @@ -920,7 +925,9 @@ def build(self) -> Workflow: builder = WorkflowBuilder( name=self._name, description=self._description, - ).set_start_executor(start_executor) + start_executor=start_executor, + checkpoint_storage=self._checkpoint_storage, + ) # Add the appropriate edges # In handoff workflows, all executors are connected, making a fully connected graph. @@ -936,10 +943,6 @@ def build(self) -> Workflow: elif len(targets) == 1: builder = builder.add_edge(executor, targets[0]) - # Configure checkpointing if enabled - if self._checkpoint_storage: - builder.with_checkpointing(self._checkpoint_storage) - return builder.build() # region Internal Helper Methods diff --git a/python/packages/orchestrations/agent_framework_orchestrations/_magentic.py b/python/packages/orchestrations/agent_framework_orchestrations/_magentic.py index 433261083c..eec597cdda 100644 --- a/python/packages/orchestrations/agent_framework_orchestrations/_magentic.py +++ b/python/packages/orchestrations/agent_framework_orchestrations/_magentic.py @@ -10,7 +10,7 @@ from collections.abc import Callable, Sequence from dataclasses import dataclass, field from enum import Enum -from typing import Any, ClassVar, TypeVar, cast, overload +from typing import Any, ClassVar, TypeVar, cast from agent_framework import ( AgentResponse, @@ -41,10 +41,6 @@ from typing import override # type: ignore # pragma: no cover else: from typing_extensions import override # type: ignore # pragma: no cover -if sys.version_info >= (3, 11): - from typing import Self # type: ignore # pragma: no cover -else: - from typing_extensions import Self # type: ignore # pragma: no cover logger = logging.getLogger(__name__) @@ -1364,7 +1360,7 @@ class MagenticBuilder: Human-in-the-loop Support: Magentic provides specialized HITL mechanisms via: - - `.with_plan_review()` - Review and approve/revise plans before execution + - `enable_plan_review=True` - Review and approve/revise plans before execution - `.with_human_input_on_stall()` - Intervene when workflow stalls - Tool approval via `function_approval_request` - Approve individual tool calls @@ -1429,12 +1425,12 @@ def __init__( self._manager_factory: Callable[[], MagenticManagerBase] | None = None self._manager_agent_factory: Callable[[], SupportsAgentRun] | None = None self._standard_manager_options: dict[str, Any] = {} - self._enable_plan_review: bool = False + self._enable_plan_review: bool = enable_plan_review - self._checkpoint_storage: CheckpointStorage | None = None + self._checkpoint_storage: CheckpointStorage | None = checkpoint_storage # Intermediate outputs - self._intermediate_outputs = False + self._intermediate_outputs = intermediate_outputs self._set_participants(participants) @@ -1461,7 +1457,7 @@ def __init__( def _set_participants(self, participants: Sequence[SupportsAgentRun | Executor]) -> None: """Set participants (internal).""" if self._participants: - raise ValueError("participants have already been set. Call participants(...) at most once.") + raise ValueError("participants already set.") if not participants: raise ValueError("participants cannot be empty.") @@ -1487,8 +1483,6 @@ def _set_participants(self, participants: Sequence[SupportsAgentRun | Executor]) self._participants = named - return self - def with_plan_review(self, enable: bool = True) -> "MagenticBuilder": """Enable or disable human-in-the-loop plan review before task execution. @@ -1514,9 +1508,7 @@ def with_plan_review(self, enable: bool = True) -> "MagenticBuilder": .. code-block:: python workflow = ( - MagenticBuilder() - .participants(agent1=agent1) - .with_manager(agent=manager_agent) + MagenticBuilder(participants=[agent1], manager_agent=manager_agent) .with_plan_review(enable=True) .build() ) @@ -1561,11 +1553,7 @@ def with_checkpointing(self, checkpoint_storage: CheckpointStorage) -> "Magentic storage = InMemoryCheckpointStorage() workflow = ( - MagenticBuilder() - .participants([agent1]) - .with_manager(agent=manager_agent) - .with_checkpointing(storage) - .build() + MagenticBuilder(participants=[agent1], manager_agent=manager_agent).with_checkpointing(storage).build() ) # First run @@ -1585,144 +1573,14 @@ def with_checkpointing(self, checkpoint_storage: CheckpointStorage) -> "Magentic self._checkpoint_storage = checkpoint_storage return self - @overload - def with_manager(self, *, manager: MagenticManagerBase) -> Self: - """Configure the workflow with a pre-defined Magentic manager instance. - - Args: - manager: A custom manager instance (subclass of MagenticManagerBase) - - Returns: - Self for method chaining - """ - ... - - @overload - def with_manager(self, *, manager_factory: Callable[[], MagenticManagerBase]) -> Self: - """Configure the workflow with a factory for creating custom Magentic manager instances. - - Args: - manager_factory: Callable that returns a new MagenticManagerBase instance - - Returns: - Self for method chaining - """ - ... - - @overload - def with_manager( - self, - *, - agent: SupportsAgentRun, - task_ledger: _MagenticTaskLedger | None = None, - # Prompt overrides - task_ledger_facts_prompt: str | None = None, - task_ledger_plan_prompt: str | None = None, - task_ledger_full_prompt: str | None = None, - task_ledger_facts_update_prompt: str | None = None, - task_ledger_plan_update_prompt: str | None = None, - progress_ledger_prompt: str | None = None, - final_answer_prompt: str | None = None, - # Limits - max_stall_count: int = 3, - max_reset_count: int | None = None, - max_round_count: int | None = None, - ) -> Self: - """Configure the workflow with an agent for creating a standard manager. - - This will create a StandardMagenticManager using the provided agent. - - Args: - agent: SupportsAgentRun instance for the standard magentic manager - (`StandardMagenticManager`) - task_ledger: Optional custom task ledger implementation for specialized - prompting or structured output requirements - task_ledger_facts_prompt: Custom prompt template for extracting facts from - task description - task_ledger_plan_prompt: Custom prompt template for generating initial plan - task_ledger_full_prompt: Custom prompt template for complete task ledger - (facts + plan combined) - task_ledger_facts_update_prompt: Custom prompt template for updating facts - based on agent progress - task_ledger_plan_update_prompt: Custom prompt template for replanning when - needed - progress_ledger_prompt: Custom prompt template for assessing progress and - determining next actions - final_answer_prompt: Custom prompt template for synthesizing final response - when task is complete - max_stall_count: Maximum consecutive rounds without progress before triggering - replan (default 3). Set to 0 to disable stall detection. - max_reset_count: Maximum number of complete resets allowed before failing. - None means unlimited resets. - max_round_count: Maximum total coordination rounds before stopping with - partial result. None means unlimited rounds. - - Returns: - Self for method chaining - """ - ... - - @overload - def with_manager( - self, - *, - agent_factory: Callable[[], SupportsAgentRun], - task_ledger: _MagenticTaskLedger | None = None, - # Prompt overrides - task_ledger_facts_prompt: str | None = None, - task_ledger_plan_prompt: str | None = None, - task_ledger_full_prompt: str | None = None, - task_ledger_facts_update_prompt: str | None = None, - task_ledger_plan_update_prompt: str | None = None, - progress_ledger_prompt: str | None = None, - final_answer_prompt: str | None = None, - # Limits - max_stall_count: int = 3, - max_reset_count: int | None = None, - max_round_count: int | None = None, - ) -> Self: - """Configure the workflow with a factory for creating the manager agent. - - This will create a StandardMagenticManager using the provided agent factory. - - Args: - agent_factory: Callable that returns a new SupportsAgentRun instance for the standard - magentic manager (`StandardMagenticManager`) - task_ledger: Optional custom task ledger implementation for specialized - prompting or structured output requirements - task_ledger_facts_prompt: Custom prompt template for extracting facts from - task description - task_ledger_plan_prompt: Custom prompt template for generating initial plan - task_ledger_full_prompt: Custom prompt template for complete task ledger - (facts + plan combined) - task_ledger_facts_update_prompt: Custom prompt template for updating facts - based on agent progress - task_ledger_plan_update_prompt: Custom prompt template for replanning when - needed - progress_ledger_prompt: Custom prompt template for assessing progress and - determining next actions - final_answer_prompt: Custom prompt template for synthesizing final response - when task is complete - max_stall_count: Maximum consecutive rounds without progress before triggering - replan (default 3). Set to 0 to disable stall detection. - max_reset_count: Maximum number of complete resets allowed before failing. - None means unlimited resets. - max_round_count: Maximum total coordination rounds before stopping with - partial result. None means unlimited rounds. - - Returns: - Self for method chaining - """ - ... - - def with_manager( + def _set_manager( self, *, manager: MagenticManagerBase | None = None, manager_factory: Callable[[], MagenticManagerBase] | None = None, - agent_factory: Callable[[], SupportsAgentRun] | None = None, + manager_agent: SupportsAgentRun | None = None, + manager_agent_factory: Callable[[], SupportsAgentRun] | None = None, # Constructor args for StandardMagenticManager when manager is not provided - agent: SupportsAgentRun | None = None, task_ledger: _MagenticTaskLedger | None = None, # Prompt overrides task_ledger_facts_prompt: str | None = None, @@ -1736,123 +1594,37 @@ def with_manager( max_stall_count: int = 3, max_reset_count: int | None = None, max_round_count: int | None = None, - ) -> Self: - """Configure the workflow manager for task planning and agent coordination. - - The manager is responsible for creating plans, selecting agents, tracking progress, - and deciding when to replan or complete. This method supports four usage patterns: - - 1. **Provide existing manager**: Pass a pre-configured manager instance (custom - or standard) for full control over behavior - 2. **Factory for custom manager**: Pass a callable that returns a new manager - instance for more advanced scenarios so that the builder can be reused - 3. **Factory for agent**: Pass a callable that returns a new agent instance to - automatically create a `StandardMagenticManager` - 4. **Auto-create with agent**: Pass an agent to automatically create a `StandardMagenticManager` + ) -> None: + """Configure the workflow manager for task planning and agent coordination (internal). Args: - manager: Pre-configured manager instance (`StandardMagenticManager` or custom - `MagenticManagerBase` subclass). If provided, all other arguments are ignored. + manager: Pre-configured manager instance. manager_factory: Callable that returns a new manager instance. - agent_factory: Callable that returns a new agent instance. - agent: Agent instance for generating plans and decisions. The agent's - configured instructions and options (temperature, seed, etc.) will be - applied. - task_ledger: Optional custom task ledger implementation for specialized - prompting or structured output requirements - task_ledger_facts_prompt: Custom prompt template for extracting facts from - task description - task_ledger_plan_prompt: Custom prompt template for generating initial plan - task_ledger_full_prompt: Custom prompt template for complete task ledger - (facts + plan combined) - task_ledger_facts_update_prompt: Custom prompt template for updating facts - based on agent progress - task_ledger_plan_update_prompt: Custom prompt template for replanning when - needed - progress_ledger_prompt: Custom prompt template for assessing progress and - determining next actions - final_answer_prompt: Custom prompt template for synthesizing final response - when task is complete - max_stall_count: Maximum consecutive rounds without progress before triggering - replan (default 3). Set to 0 to disable stall detection. - max_reset_count: Maximum number of complete resets allowed before failing. - None means unlimited resets. - max_round_count: Maximum total coordination rounds before stopping with - partial result. None means unlimited rounds. - - Returns: - Self for method chaining + manager_agent: Agent instance for creating a StandardMagenticManager. + manager_agent_factory: Callable that returns a new agent instance for creating a StandardMagenticManager. + task_ledger: Optional custom task ledger implementation. + task_ledger_facts_prompt: Custom prompt for extracting facts. + task_ledger_plan_prompt: Custom prompt for generating initial plan. + task_ledger_full_prompt: Custom prompt for complete task ledger. + task_ledger_facts_update_prompt: Custom prompt for updating facts. + task_ledger_plan_update_prompt: Custom prompt for replanning. + progress_ledger_prompt: Custom prompt for assessing progress. + final_answer_prompt: Custom prompt for synthesizing final response. + max_stall_count: Max consecutive rounds without progress before replan (default 3). + max_reset_count: Max number of resets allowed. None means unlimited. + max_round_count: Max total coordination rounds. None means unlimited. Raises: - ValueError: If manager is None and agent is not provided. - - Usage with agent (recommended): - - .. code-block:: python - - from agent_framework import Agent, ChatOptions - from agent_framework.openai import OpenAIChatClient - - # Configure manager agent with specific options and instructions - manager_agent = Agent( - name="Coordinator", - client=OpenAIChatClient(model_id="gpt-4o"), - options=ChatOptions(temperature=0.3, seed=42), - instructions="Be concise and focus on accuracy", - ) - - workflow = ( - MagenticBuilder() - .participants(agent1=agent1, agent2=agent2) - .with_manager( - agent=manager_agent, - max_round_count=20, - max_stall_count=3, - ) - .build() - ) - - Usage with custom manager: - - .. code-block:: python - - class MyManager(MagenticManagerBase): - async def plan(self, context: MagenticContext) -> Message: - # Custom planning logic - return Message(role="assistant", text="...") - - - manager = MyManager() - workflow = MagenticBuilder().participants(agent1=agent1).with_manager(manager).build() - - Usage with prompt customization: - - .. code-block:: python - - workflow = ( - MagenticBuilder() - .participants(coder=coder_agent, reviewer=reviewer_agent) - .with_manager( - agent=manager_agent, - task_ledger_plan_prompt="Create a detailed step-by-step plan...", - progress_ledger_prompt="Assess progress and decide next action...", - max_stall_count=2, - ) - .build() - ) - - Notes: - - StandardMagenticManager uses structured LLM calls for all decisions - - Custom managers can implement alternative selection strategies - - Prompt templates support Jinja2-style variable substitution - - Stall detection helps prevent infinite loops in stuck scenarios - - The agent's instructions are used as system instructions for all manager prompts + ValueError: If a manager has already been set or if none or multiple + of the primary parameters are provided. """ if any([self._manager, self._manager_factory, self._manager_agent_factory]): - raise ValueError("with_manager() has already been called on this builder instance.") + raise ValueError("Manager has already been configured. Set manager config once only.") - if sum(x is not None for x in [manager, agent, manager_factory, agent_factory]) != 1: - raise ValueError("Exactly one of manager, agent, manager_factory, or agent_factory must be provided.") + if sum(x is not None for x in [manager, manager_agent, manager_factory, manager_agent_factory]) != 1: + raise ValueError( + "Exactly one of manager, manager_agent, manager_factory, or manager_agent_factory must be provided." + ) def _log_warning_if_constructor_args_provided() -> None: if any( @@ -1871,14 +1643,14 @@ def _log_warning_if_constructor_args_provided() -> None: max_round_count, ] ): - logger.warning("Customer manager provided; all other with_manager() arguments will be ignored.") + logger.warning("Custom manager provided; all other manager arguments will be ignored.") if manager is not None: self._manager = manager _log_warning_if_constructor_args_provided() - elif agent is not None: + elif manager_agent is not None: self._manager = StandardMagenticManager( - agent=agent, + agent=manager_agent, task_ledger=task_ledger, task_ledger_facts_prompt=task_ledger_facts_prompt, task_ledger_plan_prompt=task_ledger_plan_prompt, @@ -1894,8 +1666,8 @@ def _log_warning_if_constructor_args_provided() -> None: elif manager_factory is not None: self._manager_factory = manager_factory _log_warning_if_constructor_args_provided() - elif agent_factory is not None: - self._manager_agent_factory = agent_factory + elif manager_agent_factory is not None: + self._manager_agent_factory = manager_agent_factory self._standard_manager_options = { "task_ledger": task_ledger, "task_ledger_facts_prompt": task_ledger_facts_prompt, @@ -1910,21 +1682,6 @@ def _log_warning_if_constructor_args_provided() -> None: "max_round_count": max_round_count, } - return self - - def with_intermediate_outputs(self) -> Self: - """Enable intermediate outputs from agent participants before aggregation. - - When enabled, the workflow returns each agent participant's response or yields - streaming updates as they become available. The output of the orchestrator will - always be available as the final output of the workflow. - - Returns: - Self for fluent chaining - """ - self._intermediate_outputs = True - return self - def _resolve_orchestrator(self, participants: Sequence[Executor]) -> Executor: """Determine the orchestrator to use for the workflow. @@ -1932,8 +1689,11 @@ def _resolve_orchestrator(self, participants: Sequence[Executor]) -> Executor: participants: List of resolved participant executors """ if all(x is None for x in [self._manager, self._manager_factory, self._manager_agent_factory]): - raise ValueError("No manager configured. Call with_manager(...) before building the orchestrator.") - # We don't need to check if multiple are set since that is handled in with_orchestrator() + raise ValueError( + "No manager configured. " + "Pass manager, manager_factory, manager_agent, or manager_agent_factory to the constructor." + ) + # We don't need to check if multiple are set since that is handled in _set_manager() if self._manager: manager = self._manager @@ -1947,7 +1707,10 @@ def _resolve_orchestrator(self, participants: Sequence[Executor]) -> Executor: ) else: # This should never be reached due to the checks above - raise RuntimeError("Manager could not be resolved. Please set the manager properly with with_manager().") + raise RuntimeError( + "Manager could not be resolved. " + "Pass manager, manager_factory, manager_agent, or manager_agent_factory to the constructor." + ) return MagenticOrchestrator( manager=manager, @@ -1983,17 +1746,15 @@ def build(self) -> Workflow: orchestrator: Executor = self._resolve_orchestrator(participants) # Build workflow graph - workflow_builder = WorkflowBuilder().set_start_executor(orchestrator) + workflow_builder = WorkflowBuilder( + start_executor=orchestrator, + checkpoint_storage=self._checkpoint_storage, + output_executors=[orchestrator] if not self._intermediate_outputs else None, + ) for participant in participants: # Orchestrator and participant bi-directional edges workflow_builder = workflow_builder.add_edge(orchestrator, participant) workflow_builder = workflow_builder.add_edge(participant, orchestrator) - if self._checkpoint_storage is not None: - workflow_builder = workflow_builder.with_checkpointing(self._checkpoint_storage) - - if not self._intermediate_outputs: - # Constrain output to orchestrator only - workflow_builder = workflow_builder.with_output_from([orchestrator]) return workflow_builder.build() diff --git a/python/packages/orchestrations/agent_framework_orchestrations/_sequential.py b/python/packages/orchestrations/agent_framework_orchestrations/_sequential.py index 2779242592..5ef4f7fe8c 100644 --- a/python/packages/orchestrations/agent_framework_orchestrations/_sequential.py +++ b/python/packages/orchestrations/agent_framework_orchestrations/_sequential.py @@ -122,18 +122,17 @@ class SequentialBuilder: from agent_framework_orchestrations import SequentialBuilder # With agent instances - workflow = SequentialBuilder().participants([agent1, agent2, summarizer_exec]).build() + workflow = SequentialBuilder(participants=[agent1, agent2, summarizer_exec]).build() # Enable checkpoint persistence - workflow = SequentialBuilder().participants([agent1, agent2]).with_checkpointing(storage).build() + workflow = SequentialBuilder(participants=[agent1, agent2], checkpoint_storage=storage).build() # Enable request info for mid-workflow feedback (pauses before each agent) - workflow = SequentialBuilder().participants([agent1, agent2]).with_request_info().build() + workflow = SequentialBuilder(participants=[agent1, agent2]).with_request_info().build() # Enable request info only for specific agents workflow = ( - SequentialBuilder() - .participants([agent1, agent2, agent3]) + SequentialBuilder(participants=[agent1, agent2, agent3]) .with_request_info(agents=[agent2]) # Only pause before agent2 .build() ) @@ -157,14 +156,14 @@ def __init__( self._checkpoint_storage: CheckpointStorage | None = checkpoint_storage self._request_info_enabled: bool = False self._request_info_filter: set[str] | None = None - self._intermediate_outputs: bool = False + self._intermediate_outputs: bool = intermediate_outputs self._set_participants(participants) def _set_participants(self, participants: Sequence[SupportsAgentRun | Executor]) -> None: """Set participants (internal).""" if self._participants: - raise ValueError("participants() has already been called on this builder instance.") + raise ValueError("participants already set.") if not participants: raise ValueError("participants cannot be empty") @@ -185,12 +184,6 @@ def _set_participants(self, participants: Sequence[SupportsAgentRun | Executor]) seen_agent_ids.add(pid) self._participants = list(participants) - return self - - def with_checkpointing(self, checkpoint_storage: CheckpointStorage) -> "SequentialBuilder": - """Enable checkpointing for the built workflow using the provided storage.""" - self._checkpoint_storage = checkpoint_storage - return self def with_request_info( self, @@ -225,19 +218,6 @@ def with_request_info( return self - def with_intermediate_outputs(self) -> "SequentialBuilder": - """Enable intermediate outputs from agent participants. - - When enabled, the workflow returns each agent participant's response or yields - streaming updates as they become available. The output of the last participant - will always be available as the final output of the workflow. - - Returns: - Self for fluent chaining - """ - self._intermediate_outputs = True - return self - def _resolve_participants(self) -> list[Executor]: """Resolve participant instances into Executor objects.""" if not self._participants: @@ -281,8 +261,11 @@ def build(self) -> Workflow: # Resolve participants and participant factories to executors participants: list[Executor] = self._resolve_participants() - builder = WorkflowBuilder() - builder.set_start_executor(input_conv) + builder = WorkflowBuilder( + start_executor=input_conv, + checkpoint_storage=self._checkpoint_storage, + output_executors=[end] if not self._intermediate_outputs else None, + ) # Start of the chain is the input normalizer prior: Executor | SupportsAgentRun = input_conv @@ -292,11 +275,4 @@ def build(self) -> Workflow: # Terminate with the final conversation builder.add_edge(prior, end) - if not self._intermediate_outputs: - # Constrain output to end only - builder = builder.with_output_from([end]) - - if self._checkpoint_storage is not None: - builder = builder.with_checkpointing(self._checkpoint_storage) - return builder.build() diff --git a/python/packages/orchestrations/tests/test_group_chat.py b/python/packages/orchestrations/tests/test_group_chat.py index 5c4bce8741..01a1b6c7ac 100644 --- a/python/packages/orchestrations/tests/test_group_chat.py +++ b/python/packages/orchestrations/tests/test_group_chat.py @@ -178,13 +178,12 @@ async def test_group_chat_builder_basic_flow() -> None: alpha = StubAgent("alpha", "ack from alpha") beta = StubAgent("beta", "ack from beta") - workflow = ( - GroupChatBuilder() - .with_orchestrator(selection_func=selector, orchestrator_name="manager") - .participants([alpha, beta]) - .with_max_rounds(2) # Limit rounds to prevent infinite loop - .build() - ) + workflow = GroupChatBuilder( + participants=[alpha, beta], + max_rounds=2, # Limit rounds to prevent infinite loop + selection_func=selector, + orchestrator_name="manager", + ).build() outputs: list[list[Message]] = [] async for event in workflow.run("coordinate task", stream=True): @@ -205,13 +204,12 @@ async def test_group_chat_as_agent_accepts_conversation() -> None: alpha = StubAgent("alpha", "ack from alpha") beta = StubAgent("beta", "ack from beta") - workflow = ( - GroupChatBuilder() - .with_orchestrator(selection_func=selector, orchestrator_name="manager") - .participants([alpha, beta]) - .with_max_rounds(2) # Limit rounds to prevent infinite loop - .build() - ) + workflow = GroupChatBuilder( + participants=[alpha, beta], + max_rounds=2, # Limit rounds to prevent infinite loop + selection_func=selector, + orchestrator_name="manager", + ).build() agent = workflow.as_agent(name="group-chat-agent") conversation = [ @@ -233,10 +231,11 @@ def test_build_without_manager_raises_error(self) -> None: """Test that building without a manager raises ValueError.""" agent = StubAgent("test", "response") - builder = GroupChatBuilder().participants([agent]) + builder = GroupChatBuilder(participants=[agent]) with pytest.raises( - ValueError, match=r"No orchestrator has been configured\. Call with_orchestrator\(\) to set one\." + ValueError, + match=r"No orchestrator has been configured\.", ): builder.build() @@ -256,47 +255,35 @@ def selector(state: GroupChatState) -> str: with pytest.raises( ValueError, - match=r"No participants provided\. Call \.participants\(\) or \.register_participants\(\) first\.", + match=r"Either participants or participant_factories must be provided\.", ): - builder.build() + GroupChatBuilder() def test_duplicate_manager_configuration_raises_error(self) -> None: - """Test that configuring multiple managers raises ValueError.""" + """Test that configuring multiple orchestrator options raises ValueError.""" + agent = StubAgent("test", "response") def selector(state: GroupChatState) -> str: return "agent" - builder = GroupChatBuilder().with_orchestrator(selection_func=selector) - with pytest.raises( ValueError, - match=r"A selection function has already been configured\. Call with_orchestrator\(\.\.\.\) once only\.", + match=r"Exactly one of", ): - builder.with_orchestrator(selection_func=selector) + GroupChatBuilder(participants=[agent], selection_func=selector, orchestrator_agent=StubManagerAgent()) def test_empty_participants_raises_error(self) -> None: """Test that empty participants list raises ValueError.""" - - def selector(state: GroupChatState) -> str: - return "agent" - - builder = GroupChatBuilder().with_orchestrator(selection_func=selector) - with pytest.raises(ValueError, match="participants cannot be empty"): - builder.participants([]) + GroupChatBuilder(participants=[]) def test_duplicate_participant_names_raises_error(self) -> None: """Test that duplicate participant names raise ValueError.""" agent1 = StubAgent("test", "response1") agent2 = StubAgent("test", "response2") - def selector(state: GroupChatState) -> str: - return "agent" - - builder = GroupChatBuilder().with_orchestrator(selection_func=selector) - with pytest.raises(ValueError, match="Duplicate participant name 'test'"): - builder.participants([agent1, agent2]) + GroupChatBuilder(participants=[agent1, agent2]) def test_agent_without_name_raises_error(self) -> None: """Test that agent without name attribute raises ValueError.""" @@ -321,25 +308,15 @@ async def _run_impl(self) -> AgentResponse: agent = AgentWithoutName() - def selector(state: GroupChatState) -> str: - return "agent" - - builder = GroupChatBuilder().with_orchestrator(selection_func=selector) - with pytest.raises(ValueError, match="SupportsAgentRun participants must have a non-empty name"): - builder.participants([agent]) + GroupChatBuilder(participants=[agent]) def test_empty_participant_name_raises_error(self) -> None: """Test that empty participant name raises ValueError.""" agent = StubAgent("", "response") # Agent with empty name - def selector(state: GroupChatState) -> str: - return "agent" - - builder = GroupChatBuilder().with_orchestrator(selection_func=selector) - with pytest.raises(ValueError, match="SupportsAgentRun participants must have a non-empty name"): - builder.participants([agent]) + GroupChatBuilder(participants=[agent]) class TestGroupChatWorkflow: @@ -356,13 +333,11 @@ def selector(state: GroupChatState) -> str: agent = StubAgent("agent", "response") - workflow = ( - GroupChatBuilder() - .with_orchestrator(selection_func=selector) - .participants([agent]) - .with_max_rounds(2) # Limit to 2 rounds - .build() - ) + workflow = GroupChatBuilder( + participants=[agent], + max_rounds=2, # Limit to 2 rounds + selection_func=selector, + ).build() outputs: list[list[Message]] = [] async for event in workflow.run("test task", stream=True): @@ -391,13 +366,11 @@ def termination_condition(conversation: list[Message]) -> bool: agent = StubAgent("agent", "response") - workflow = ( - GroupChatBuilder() - .with_orchestrator(selection_func=selector) - .participants([agent]) - .with_termination_condition(termination_condition) - .build() - ) + workflow = GroupChatBuilder( + participants=[agent], + termination_condition=termination_condition, + selection_func=selector, + ).build() outputs: list[list[Message]] = [] async for event in workflow.run("test task", stream=True): @@ -419,13 +392,11 @@ async def test_termination_condition_agent_manager_finalizes(self) -> None: manager = StubManagerAgent() worker = StubAgent("agent", "response") - workflow = ( - GroupChatBuilder() - .with_orchestrator(agent=manager) - .participants([worker]) - .with_termination_condition(lambda conv: any(msg.author_name == "agent" for msg in conv)) - .build() - ) + workflow = GroupChatBuilder( + participants=[worker], + termination_condition=lambda conv: any(msg.author_name == "agent" for msg in conv), + orchestrator_agent=manager, + ).build() outputs: list[list[Message]] = [] async for event in workflow.run("test task", stream=True): @@ -447,7 +418,7 @@ def selector(state: GroupChatState) -> str: agent = StubAgent("agent", "response") - workflow = GroupChatBuilder().with_orchestrator(selection_func=selector).participants([agent]).build() + workflow = GroupChatBuilder(participants=[agent], selection_func=selector).build() with pytest.raises(RuntimeError, match="Selection function returned unknown participant 'unknown_agent'"): async for _ in workflow.run("test task", stream=True): @@ -466,14 +437,12 @@ def selector(state: GroupChatState) -> str: agent = StubAgent("agent", "response") storage = InMemoryCheckpointStorage() - workflow = ( - GroupChatBuilder() - .with_orchestrator(selection_func=selector) - .participants([agent]) - .with_max_rounds(1) - .with_checkpointing(storage) - .build() - ) + workflow = GroupChatBuilder( + participants=[agent], + max_rounds=1, + checkpoint_storage=storage, + selection_func=selector, + ).build() outputs: list[list[Message]] = [] async for event in workflow.run("test task", stream=True): @@ -496,13 +465,7 @@ def selector(state: GroupChatState) -> str: agent = StubAgent("agent", "response") - workflow = ( - GroupChatBuilder() - .with_orchestrator(selection_func=selector) - .participants([agent]) - .with_max_rounds(1) - .build() - ) + workflow = GroupChatBuilder(participants=[agent], max_rounds=1, selection_func=selector).build() with pytest.raises(ValueError, match="At least one Message is required to start the group chat workflow."): async for _ in workflow.run([], stream=True): @@ -520,13 +483,7 @@ def selector(state: GroupChatState) -> str: agent = StubAgent("agent", "response") - workflow = ( - GroupChatBuilder() - .with_orchestrator(selection_func=selector) - .participants([agent]) - .with_max_rounds(1) - .build() - ) + workflow = GroupChatBuilder(participants=[agent], max_rounds=1, selection_func=selector).build() outputs: list[list[Message]] = [] async for event in workflow.run("test string", stream=True): @@ -549,13 +506,7 @@ def selector(state: GroupChatState) -> str: agent = StubAgent("agent", "response") - workflow = ( - GroupChatBuilder() - .with_orchestrator(selection_func=selector) - .participants([agent]) - .with_max_rounds(1) - .build() - ) + workflow = GroupChatBuilder(participants=[agent], max_rounds=1, selection_func=selector).build() outputs: list[list[Message]] = [] async for event in workflow.run(task_message, stream=True): @@ -581,13 +532,7 @@ def selector(state: GroupChatState) -> str: agent = StubAgent("agent", "response") - workflow = ( - GroupChatBuilder() - .with_orchestrator(selection_func=selector) - .participants([agent]) - .with_max_rounds(1) - .build() - ) + workflow = GroupChatBuilder(participants=[agent], max_rounds=1, selection_func=selector).build() outputs: list[list[Message]] = [] async for event in workflow.run(conversation, stream=True): @@ -613,13 +558,11 @@ def selector(state: GroupChatState) -> str: agent = StubAgent("agent", "response") - workflow = ( - GroupChatBuilder() - .with_orchestrator(selection_func=selector) - .participants([agent]) - .with_max_rounds(1) # Very low limit - .build() - ) + workflow = GroupChatBuilder( + participants=[agent], + max_rounds=1, # Very low limit + selection_func=selector, + ).build() outputs: list[list[Message]] = [] async for event in workflow.run("test", stream=True): @@ -648,13 +591,11 @@ def selector(state: GroupChatState) -> str: agent = StubAgent("agent", "response from agent") - workflow = ( - GroupChatBuilder() - .with_orchestrator(selection_func=selector) - .participants([agent]) - .with_max_rounds(1) # Hit limit after first response - .build() - ) + workflow = GroupChatBuilder( + participants=[agent], + max_rounds=1, # Hit limit after first response + selection_func=selector, + ).build() outputs: list[list[Message]] = [] async for event in workflow.run("test", stream=True): @@ -680,13 +621,7 @@ async def test_group_chat_checkpoint_runtime_only() -> None: agent_b = StubAgent("agentB", "Reply from B") selector = make_sequence_selector() - wf = ( - GroupChatBuilder() - .participants([agent_a, agent_b]) - .with_orchestrator(selection_func=selector) - .with_max_rounds(2) - .build() - ) + wf = GroupChatBuilder(participants=[agent_a, agent_b], max_rounds=2, selection_func=selector).build() baseline_output: list[Message] | None = None async for ev in wf.run("runtime checkpoint test", checkpoint_storage=storage, stream=True): @@ -718,14 +653,12 @@ async def test_group_chat_checkpoint_runtime_overrides_buildtime() -> None: agent_b = StubAgent("agentB", "Reply from B") selector = make_sequence_selector() - wf = ( - GroupChatBuilder() - .participants([agent_a, agent_b]) - .with_orchestrator(selection_func=selector) - .with_max_rounds(2) - .with_checkpointing(buildtime_storage) - .build() - ) + wf = GroupChatBuilder( + participants=[agent_a, agent_b], + max_rounds=2, + checkpoint_storage=buildtime_storage, + selection_func=selector, + ).build() baseline_output: list[Message] | None = None async for ev in wf.run("override test", checkpoint_storage=runtime_storage, stream=True): if ev.type == "output": @@ -765,10 +698,12 @@ async def selector(state: GroupChatState) -> str: return "alpha" workflow = ( - GroupChatBuilder() - .with_orchestrator(selection_func=selector, orchestrator_name="manager") - .participants([alpha, beta]) - .with_max_rounds(2) + GroupChatBuilder( + participants=[alpha, beta], + max_rounds=2, + selection_func=selector, + orchestrator_name="manager", + ) .with_request_info(agents=["beta"]) # Only pause before beta runs .build() ) @@ -817,10 +752,12 @@ async def selector(state: GroupChatState) -> str: return "alpha" workflow = ( - GroupChatBuilder() - .with_orchestrator(selection_func=selector, orchestrator_name="manager") - .participants([alpha]) - .with_max_rounds(1) + GroupChatBuilder( + participants=[alpha], + max_rounds=1, + selection_func=selector, + orchestrator_name="manager", + ) .with_request_info() # No filter - pause for all .build() ) @@ -839,12 +776,13 @@ async def selector(state: GroupChatState) -> str: def test_group_chat_builder_with_request_info_returns_self(): """Test that with_request_info() returns self for method chaining.""" - builder = GroupChatBuilder() + agent = StubAgent("test", "response") + builder = GroupChatBuilder(participants=[agent]) result = builder.with_request_info() assert result is builder # Also test with agents parameter - builder2 = GroupChatBuilder() + builder2 = GroupChatBuilder(participants=[agent]) result2 = builder2.with_request_info(agents=["test"]) assert result2 is builder2 @@ -861,16 +799,15 @@ def selector(state: GroupChatState) -> str: def agent_factory() -> Agent: return cast(Agent, StubManagerAgent()) - builder = GroupChatBuilder().with_orchestrator(selection_func=selector) + agent = StubAgent("test", "response") - # Already has a selection_func, should fail on second call - with pytest.raises(ValueError, match=r"A selection function has already been configured"): - builder.with_orchestrator(selection_func=selector) + # Both selection_func and orchestrator_agent provided simultaneously - should fail + with pytest.raises(ValueError, match=r"Exactly one of"): + GroupChatBuilder(participants=[agent], selection_func=selector, orchestrator_agent=StubManagerAgent()) - # Test with agent_factory - builder2 = GroupChatBuilder().with_orchestrator(agent=agent_factory) - with pytest.raises(ValueError, match=r"A factory has already been configured"): - builder2.with_orchestrator(agent=agent_factory) + # Test with agent_factory - already has factory, should fail with second config + with pytest.raises(ValueError, match=r"Exactly one of"): + GroupChatBuilder(participants=[agent], orchestrator_agent=agent_factory, selection_func=selector) def test_group_chat_builder_requires_exactly_one_orchestrator_option(): @@ -882,13 +819,15 @@ def selector(state: GroupChatState) -> str: def agent_factory() -> Agent: return cast(Agent, StubManagerAgent()) - # No options provided - with pytest.raises(ValueError, match="Exactly one of"): - GroupChatBuilder().with_orchestrator() # type: ignore + agent = StubAgent("test", "response") + + # No orchestrator options provided - only fails at build() time + with pytest.raises(ValueError, match="No orchestrator has been configured"): + GroupChatBuilder(participants=[agent]).build() # Multiple options provided with pytest.raises(ValueError, match="Exactly one of"): - GroupChatBuilder().with_orchestrator(selection_func=selector, agent=agent_factory) # type: ignore + GroupChatBuilder(participants=[agent], selection_func=selector, orchestrator_agent=agent_factory) async def test_group_chat_with_orchestrator_factory_returning_chat_agent(): @@ -959,7 +898,7 @@ def agent_factory() -> Agent: alpha = StubAgent("alpha", "reply from alpha") beta = StubAgent("beta", "reply from beta") - workflow = GroupChatBuilder().participants([alpha, beta]).with_orchestrator(agent=agent_factory).build() + workflow = GroupChatBuilder(participants=[alpha, beta], orchestrator_agent=agent_factory).build() # Factory should be called during build assert factory_call_count == 1 @@ -1003,7 +942,7 @@ def orchestrator_factory() -> BaseGroupChatOrchestrator: alpha = StubAgent("alpha", "reply from alpha") - workflow = GroupChatBuilder().participants([alpha]).with_orchestrator(orchestrator=orchestrator_factory).build() + workflow = GroupChatBuilder(participants=[alpha], orchestrator=orchestrator_factory).build() # Factory should be called during build assert factory_call_count == 1 @@ -1023,7 +962,7 @@ def agent_factory() -> Agent: alpha = StubAgent("alpha", "reply from alpha") beta = StubAgent("beta", "reply from beta") - builder = GroupChatBuilder().participants([alpha, beta]).with_orchestrator(agent=agent_factory) + builder = GroupChatBuilder(participants=[alpha, beta], orchestrator_agent=agent_factory) # Build first workflow wf1 = builder.build() @@ -1049,13 +988,13 @@ def invalid_factory() -> Any: TypeError, match=r"Orchestrator factory must return Agent or BaseGroupChatOrchestrator instance", ): - (GroupChatBuilder().participants([alpha]).with_orchestrator(orchestrator=invalid_factory).build()) + GroupChatBuilder(participants=[alpha], orchestrator=invalid_factory).build() with pytest.raises( TypeError, match=r"Orchestrator factory must return Agent or BaseGroupChatOrchestrator instance", ): - (GroupChatBuilder().participants([alpha]).with_orchestrator(agent=invalid_factory).build()) + GroupChatBuilder(participants=[alpha], orchestrator_agent=invalid_factory).build() # endregion diff --git a/python/samples/getting_started/agents/azure_ai/azure_ai_with_code_interpreter_file_download.py b/python/samples/getting_started/agents/azure_ai/azure_ai_with_code_interpreter_file_download.py index f7ac089528..157e0c3397 100644 --- a/python/samples/getting_started/agents/azure_ai/azure_ai_with_code_interpreter_file_download.py +++ b/python/samples/getting_started/agents/azure_ai/azure_ai_with_code_interpreter_file_download.py @@ -6,10 +6,10 @@ from agent_framework import ( AgentResponseUpdate, + Annotation, Agent, - CitationAnnotation, + Content, HostedCodeInterpreterTool, - HostedFileContent, ) from agent_framework.azure import AzureAIProjectAgentProvider from azure.identity.aio import AzureCliCredential @@ -33,19 +33,17 @@ ) -async def download_container_files( - file_contents: list[CitationAnnotation | HostedFileContent], agent: Agent -) -> list[Path]: +async def download_container_files(file_contents: list[Annotation | Content], agent: Agent) -> list[Path]: """Download container files using the OpenAI containers API. Code interpreter generates files in containers, which require both file_id and container_id to download. The container_id is stored in additional_properties. - This function works for both streaming (HostedFileContent) and non-streaming - (CitationAnnotation) responses. + This function works for both streaming (Content with type="hosted_file") and non-streaming + (Annotation) responses. Args: - file_contents: List of CitationAnnotation or HostedFileContent objects + file_contents: List of Annotation or Content objects containing file_id and container_id. agent: The Agent instance with access to the AzureAIClient. @@ -63,28 +61,36 @@ async def download_container_files( print(f"\nDownloading {len(file_contents)} container file(s) to {output_dir.absolute()}...") # Access the OpenAI client from AzureAIClient - openai_client = agent.client.client + openai_client = agent.client.client # type: ignore[attr-defined] downloaded_files: list[Path] = [] for content in file_contents: - file_id = content.file_id + # Handle both Annotation (TypedDict) and Content objects + if isinstance(content, dict): # Annotation TypedDict + file_id = content.get("file_id") + additional_props = content.get("additional_properties", {}) + url = content.get("url") + else: # Content object + file_id = content.file_id + additional_props = content.additional_properties or {} + url = content.uri # Extract container_id from additional_properties - if not content.additional_properties or "container_id" not in content.additional_properties: + if not additional_props or "container_id" not in additional_props: print(f" File {file_id}: ✗ Missing container_id") continue - container_id = content.additional_properties["container_id"] + container_id = additional_props["container_id"] # Extract filename based on content type - if isinstance(content, CitationAnnotation): - filename = content.url or f"{file_id}.txt" + if isinstance(content, dict): # Annotation TypedDict + filename = url or f"{file_id}.txt" # Extract filename from sandbox URL if present (e.g., sandbox:/mnt/data/sample.txt) if filename.startswith("sandbox:"): filename = filename.split("/")[-1] - else: # HostedFileContent - filename = content.additional_properties.get("filename") or f"{file_id}.txt" + else: # Content + filename = additional_props.get("filename") or f"{file_id}.txt" output_path = output_dir / filename @@ -132,17 +138,18 @@ async def non_streaming_example() -> None: print(f"Agent: {result.text}\n") # Check for annotations in the response - annotations_found: list[CitationAnnotation] = [] + annotations_found: list[Annotation] = [] # AgentResponse has messages property, which contains Message objects for message in result.messages: for content in message.contents: if content.type == "text" and content.annotations: for annotation in content.annotations: - if isinstance(annotation, CitationAnnotation) and annotation.file_id: + if annotation.get("file_id"): annotations_found.append(annotation) - print(f"Found file annotation: file_id={annotation.file_id}") - if annotation.additional_properties and "container_id" in annotation.additional_properties: - print(f" container_id={annotation.additional_properties['container_id']}") + print(f"Found file annotation: file_id={annotation['file_id']}") + additional_props = annotation.get("additional_properties", {}) + if additional_props and "container_id" in additional_props: + print(f" container_id={additional_props['container_id']}") if annotations_found: print(f"SUCCESS: Found {len(annotations_found)} file annotation(s)") @@ -173,7 +180,7 @@ async def streaming_example() -> None: ) print(f"User: {QUERY}\n") - file_contents_found: list[HostedFileContent] = [] + file_contents_found: list[Content] = [] text_chunks: list[str] = [] async for update in agent.run(QUERY, stream=True): @@ -184,11 +191,11 @@ async def streaming_example() -> None: text_chunks.append(content.text) if content.annotations: for annotation in content.annotations: - if isinstance(annotation, CitationAnnotation) and annotation.file_id: - print(f"Found streaming CitationAnnotation: file_id={annotation.file_id}") - elif isinstance(content, HostedFileContent): + if annotation.get("file_id"): + print(f"Found streaming annotation: file_id={annotation['file_id']}") + elif content.type == "hosted_file": file_contents_found.append(content) - print(f"Found streaming HostedFileContent: file_id={content.file_id}") + print(f"Found streaming hosted_file: file_id={content.file_id}") if content.additional_properties and "container_id" in content.additional_properties: print(f" container_id={content.additional_properties['container_id']}") diff --git a/python/samples/getting_started/orchestrations/concurrent_custom_agent_executors.py b/python/samples/getting_started/orchestrations/concurrent_custom_agent_executors.py index 008fa4755f..eedcce198e 100644 --- a/python/samples/getting_started/orchestrations/concurrent_custom_agent_executors.py +++ b/python/samples/getting_started/orchestrations/concurrent_custom_agent_executors.py @@ -27,7 +27,7 @@ Demonstrates: - Executors that create their Agent in __init__ (via AzureOpenAIChatClient) - A @handler that converts AgentExecutorRequest -> AgentExecutorResponse -- ConcurrentBuilder().participants([...]) to build fan-out/fan-in +- ConcurrentBuilder(participants=[...]) to build fan-out/fan-in - Default aggregator returning list[Message] (one user + one assistant per agent) - Workflow completion when all participants become idle @@ -103,7 +103,7 @@ async def main() -> None: marketer = MarketerExec(client) legal = LegalExec(client) - workflow = ConcurrentBuilder().participants([researcher, marketer, legal]).build() + workflow = ConcurrentBuilder(participants=[researcher, marketer, legal]).build() events = await workflow.run("We are launching a new budget-friendly electric bike for urban commuters.") outputs = events.get_outputs() diff --git a/python/samples/getting_started/workflows/human-in-the-loop/guessing_game_with_human_input.py b/python/samples/getting_started/workflows/human-in-the-loop/guessing_game_with_human_input.py index d6b8161f98..babd9d54e9 100644 --- a/python/samples/getting_started/workflows/human-in-the-loop/guessing_game_with_human_input.py +++ b/python/samples/getting_started/workflows/human-in-the-loop/guessing_game_with_human_input.py @@ -8,8 +8,8 @@ AgentExecutorRequest, AgentExecutorResponse, AgentResponseUpdate, - Executor, Message, + Executor, WorkflowBuilder, WorkflowContext, WorkflowEvent, diff --git a/python/samples/getting_started/workflows/state-management/state_with_agents.py b/python/samples/getting_started/workflows/state-management/state_with_agents.py index 97b9fab240..068ac70eb4 100644 --- a/python/samples/getting_started/workflows/state-management/state_with_agents.py +++ b/python/samples/getting_started/workflows/state-management/state_with_agents.py @@ -7,9 +7,9 @@ from uuid import uuid4 from agent_framework import ( - Agent, AgentExecutorRequest, AgentExecutorResponse, + Agent, Message, WorkflowBuilder, WorkflowContext, diff --git a/python/samples/getting_started/workflows/tool-approval/concurrent_builder_tool_approval.py b/python/samples/getting_started/workflows/tool-approval/concurrent_builder_tool_approval.py index 34d59b62d7..eb0375551e 100644 --- a/python/samples/getting_started/workflows/tool-approval/concurrent_builder_tool_approval.py +++ b/python/samples/getting_started/workflows/tool-approval/concurrent_builder_tool_approval.py @@ -5,8 +5,8 @@ from typing import Annotated from agent_framework import ( - Content, Message, + Content, WorkflowEvent, tool, ) diff --git a/python/samples/getting_started/workflows/tool-approval/group_chat_builder_tool_approval.py b/python/samples/getting_started/workflows/tool-approval/group_chat_builder_tool_approval.py index 159299b9b8..9eb5ac667d 100644 --- a/python/samples/getting_started/workflows/tool-approval/group_chat_builder_tool_approval.py +++ b/python/samples/getting_started/workflows/tool-approval/group_chat_builder_tool_approval.py @@ -5,8 +5,8 @@ from typing import Annotated, cast from agent_framework import ( - Content, Message, + Content, WorkflowEvent, tool, ) diff --git a/python/samples/getting_started/workflows/tool-approval/sequential_builder_tool_approval.py b/python/samples/getting_started/workflows/tool-approval/sequential_builder_tool_approval.py index 2f7ecea0ac..20a778c745 100644 --- a/python/samples/getting_started/workflows/tool-approval/sequential_builder_tool_approval.py +++ b/python/samples/getting_started/workflows/tool-approval/sequential_builder_tool_approval.py @@ -5,8 +5,8 @@ from typing import Annotated, cast from agent_framework import ( - Content, Message, + Content, WorkflowEvent, tool, ) diff --git a/python/samples/semantic-kernel-migration/orchestrations/concurrent_basic.py b/python/samples/semantic-kernel-migration/orchestrations/concurrent_basic.py index 1a02767a5a..7a107d31ec 100644 --- a/python/samples/semantic-kernel-migration/orchestrations/concurrent_basic.py +++ b/python/samples/semantic-kernel-migration/orchestrations/concurrent_basic.py @@ -19,7 +19,6 @@ from agent_framework.azure import AzureOpenAIChatClient from agent_framework.orchestrations import ConcurrentBuilder from azure.identity import AzureCliCredential -from semantic_kernel.agents import Agent as SKAgent from semantic_kernel.agents import ChatCompletionAgent, ConcurrentOrchestration from semantic_kernel.agents.runtime import InProcessRuntime from semantic_kernel.connectors.ai.open_ai import AzureChatCompletion @@ -33,7 +32,7 @@ ###################################################################### -def build_semantic_kernel_agents() -> list[SKAgent]: +def build_semantic_kernel_agents() -> list[Agent]: credential = AzureCliCredential() physics_agent = ChatCompletionAgent( diff --git a/python/samples/semantic-kernel-migration/orchestrations/group_chat.py b/python/samples/semantic-kernel-migration/orchestrations/group_chat.py index d7fd6bdbb7..e244bd0c01 100644 --- a/python/samples/semantic-kernel-migration/orchestrations/group_chat.py +++ b/python/samples/semantic-kernel-migration/orchestrations/group_chat.py @@ -16,10 +16,11 @@ from collections.abc import Sequence from typing import Any, cast -from agent_framework import Agent, GroupChatBuilder, Message +from agent_framework import Agent, Message from agent_framework.azure import AzureOpenAIChatClient, AzureOpenAIResponsesClient +from agent_framework.orchestrations import GroupChatBuilder from azure.identity import AzureCliCredential -from semantic_kernel.agents import ChatCompletionAgent, GroupChatOrchestration +from semantic_kernel.agents import Agent, ChatCompletionAgent, GroupChatOrchestration from semantic_kernel.agents.orchestration.group_chat import ( BooleanResult, GroupChatManager, @@ -240,12 +241,10 @@ async def run_agent_framework_example(task: str) -> str: client=AzureOpenAIResponsesClient(credential=credential), ) - workflow = ( - GroupChatBuilder() - .with_orchestrator(agent=AzureOpenAIChatClient(credential=credential).as_agent()) - .participants([researcher, planner]) - .build() - ) + workflow = GroupChatBuilder( + participants=[researcher, planner], + orchestrator_agent=AzureOpenAIChatClient(credential=credential).as_agent(), + ).build() final_response = "" async for event in workflow.run(task, stream=True): diff --git a/python/samples/semantic-kernel-migration/orchestrations/magentic.py b/python/samples/semantic-kernel-migration/orchestrations/magentic.py index a249f61b9b..d0633c02d8 100644 --- a/python/samples/semantic-kernel-migration/orchestrations/magentic.py +++ b/python/samples/semantic-kernel-migration/orchestrations/magentic.py @@ -17,7 +17,9 @@ from agent_framework import Agent, HostedCodeInterpreterTool from agent_framework.openai import OpenAIChatClient, OpenAIResponsesClient +from agent_framework.orchestrations import MagenticBuilder from semantic_kernel.agents import ( + Agent, ChatCompletionAgent, MagenticOrchestration, OpenAIAssistantAgent, @@ -152,7 +154,7 @@ async def run_agent_framework_example(prompt: str) -> str | None: client=OpenAIChatClient(), ) - workflow = MagenticBuilder().participants([researcher, coder]).with_manager(agent=manager_agent).build() + workflow = MagenticBuilder(participants=[researcher, coder], manager_agent=manager_agent).build() final_text: str | None = None async for event in workflow.run(prompt, stream=True): From be3d913555b5d2dced1ba1e08e54c60025ee5ef9 Mon Sep 17 00:00:00 2001 From: eavanvalkenburg Date: Mon, 9 Feb 2026 16:34:23 +0100 Subject: [PATCH 07/16] =?UTF-8?q?Fix=20test=20failures:=20use=20WorkflowMe?= =?UTF-8?q?ssage=20and=20A2AMessage=20after=20ChatMessage=E2=86=92Message?= =?UTF-8?q?=20rename?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - Replace Message(data=..., source_id=...) with WorkflowMessage(...) in workflow tests - Fix isinstance check in A2A agent to use A2AMessage instead of Message - Fix import in test_workflow_observability.py (Message→WorkflowMessage) --- .../a2a/agent_framework_a2a/_agent.py | 3 +- .../core/tests/workflow/test_executor.py | 31 +++++----- .../tests/workflow/test_function_executor.py | 62 +++++++++---------- .../core/tests/workflow/test_workflow.py | 11 ++-- .../workflow/test_workflow_observability.py | 4 +- 5 files changed, 57 insertions(+), 54 deletions(-) diff --git a/python/packages/a2a/agent_framework_a2a/_agent.py b/python/packages/a2a/agent_framework_a2a/_agent.py index b93ca1516b..71d3e3f487 100644 --- a/python/packages/a2a/agent_framework_a2a/_agent.py +++ b/python/packages/a2a/agent_framework_a2a/_agent.py @@ -290,7 +290,8 @@ async def _map_a2a_stream( When True, they are yielded with a continuation token. """ async for item in a2a_stream: - if isinstance(item, Message): + if isinstance(item, A2AMessage): + # Process A2A Message contents = self._parse_contents_from_a2a(item.parts) yield AgentResponseUpdate( contents=contents, diff --git a/python/packages/core/tests/workflow/test_executor.py b/python/packages/core/tests/workflow/test_executor.py index c089fb30f3..06d027f19d 100644 --- a/python/packages/core/tests/workflow/test_executor.py +++ b/python/packages/core/tests/workflow/test_executor.py @@ -11,6 +11,7 @@ WorkflowBuilder, WorkflowContext, WorkflowEvent, + WorkflowMessage, executor, handler, response_handler, @@ -97,9 +98,9 @@ async def handle_number(self, number: int, ctx: WorkflowContext) -> None: # typ executor = MockExecutorWithValidHandlers(id="test") assert executor.id is not None assert len(executor._handlers) == 2 # type: ignore - assert executor.can_handle(Message(data="text", source_id="mock")) is True - assert executor.can_handle(Message(data=42, source_id="mock")) is True - assert executor.can_handle(Message(data=3.14, source_id="mock")) is False + assert executor.can_handle(WorkflowMessage(data="text", source_id="mock")) is True + assert executor.can_handle(WorkflowMessage(data=42, source_id="mock")) is True + assert executor.can_handle(WorkflowMessage(data=3.14, source_id="mock")) is False def test_executor_handlers_with_output_types(): @@ -580,9 +581,9 @@ async def handle(self, message: Any, ctx: WorkflowContext) -> None: assert len(exec_instance._handlers) == 1 # Can handle str messages - assert exec_instance.can_handle(Message(data="hello", source_id="mock")) + assert exec_instance.can_handle(WorkflowMessage(data="hello", source_id="mock")) # Cannot handle int messages (since explicit type is str) - assert not exec_instance.can_handle(Message(data=42, source_id="mock")) + assert not exec_instance.can_handle(WorkflowMessage(data=42, source_id="mock")) def test_handler_with_explicit_output_type(self): """Test that explicit output works when input is also specified.""" @@ -622,8 +623,8 @@ async def handle(self, message: Any, ctx: WorkflowContext) -> None: assert handler_func._handler_spec["output_types"] == [list] # Verify can_handle - assert exec_instance.can_handle(Message(data={"key": "value"}, source_id="mock")) - assert not exec_instance.can_handle(Message(data="string", source_id="mock")) + assert exec_instance.can_handle(WorkflowMessage(data={"key": "value"}, source_id="mock")) + assert not exec_instance.can_handle(WorkflowMessage(data="string", source_id="mock")) def test_handler_with_explicit_union_input_type(self): """Test that explicit union input_type is handled correctly.""" @@ -641,10 +642,10 @@ async def handle(self, message: Any, ctx: WorkflowContext) -> None: assert len(exec_instance._handlers) == 1 # Can handle both str and int messages - assert exec_instance.can_handle(Message(data="hello", source_id="mock")) - assert exec_instance.can_handle(Message(data=42, source_id="mock")) + assert exec_instance.can_handle(WorkflowMessage(data="hello", source_id="mock")) + assert exec_instance.can_handle(WorkflowMessage(data=42, source_id="mock")) # Cannot handle float - assert not exec_instance.can_handle(Message(data=3.14, source_id="mock")) + assert not exec_instance.can_handle(WorkflowMessage(data=3.14, source_id="mock")) def test_handler_with_explicit_union_output_type(self): """Test that explicit union output is normalized to a list.""" @@ -735,7 +736,7 @@ async def handle(self, message, ctx: WorkflowContext) -> None: # type: ignore[n # Should work with explicit input_type assert str in exec_instance._handlers - assert exec_instance.can_handle(Message(data="hello", source_id="mock")) + assert exec_instance.can_handle(WorkflowMessage(data="hello", source_id="mock")) def test_handler_multiple_handlers_mixed_explicit_and_introspected(self): """Test executor with multiple handlers, some with explicit types and some introspected.""" @@ -772,7 +773,7 @@ async def handle(self, message, ctx: WorkflowContext) -> None: # type: ignore[n # Should resolve the string to the actual type assert ForwardRefMessage in exec_instance._handlers - assert exec_instance.can_handle(Message(data=ForwardRefMessage("hello"), source_id="mock")) + assert exec_instance.can_handle(WorkflowMessage(data=ForwardRefMessage("hello"), source_id="mock")) def test_handler_with_string_forward_reference_union(self): """Test that string forward references work with union types.""" @@ -785,8 +786,8 @@ async def handle(self, message, ctx: WorkflowContext) -> None: # type: ignore[n exec_instance = StringUnionExecutor(id="string_union") # Should handle both types - assert exec_instance.can_handle(Message(data=ForwardRefTypeA("hello"), source_id="mock")) - assert exec_instance.can_handle(Message(data=ForwardRefTypeB(42), source_id="mock")) + assert exec_instance.can_handle(WorkflowMessage(data=ForwardRefTypeA("hello"), source_id="mock")) + assert exec_instance.can_handle(WorkflowMessage(data=ForwardRefTypeB(42), source_id="mock")) def test_handler_with_string_forward_reference_output_type(self): """Test that string forward references work for output_type.""" @@ -850,7 +851,7 @@ async def handle(self, message: Any, ctx: WorkflowContext) -> None: # Check input type assert str in exec_instance._handlers - assert exec_instance.can_handle(Message(data="hello", source_id="mock")) + assert exec_instance.can_handle(WorkflowMessage(data="hello", source_id="mock")) # Check output_type assert int in exec_instance.output_types diff --git a/python/packages/core/tests/workflow/test_function_executor.py b/python/packages/core/tests/workflow/test_function_executor.py index 3d274f8cd7..c0b73156ff 100644 --- a/python/packages/core/tests/workflow/test_function_executor.py +++ b/python/packages/core/tests/workflow/test_function_executor.py @@ -8,9 +8,9 @@ from agent_framework import ( FunctionExecutor, - Message, WorkflowBuilder, WorkflowContext, + WorkflowMessage, executor, ) @@ -253,9 +253,9 @@ def test_can_handle_method(self): async def string_processor(text: str, ctx: WorkflowContext[str]) -> None: await ctx.send_message(text) - assert string_processor.can_handle(Message(data="hello", source_id="Mock")) - assert not string_processor.can_handle(Message(data=123, source_id="Mock")) - assert not string_processor.can_handle(Message(data=[], source_id="Mock")) + assert string_processor.can_handle(WorkflowMessage(data="hello", source_id="Mock")) + assert not string_processor.can_handle(WorkflowMessage(data=123, source_id="Mock")) + assert not string_processor.can_handle(WorkflowMessage(data=[], source_id="Mock")) def test_duplicate_handler_registration(self): """Test that registering duplicate handlers raises an error.""" @@ -332,9 +332,9 @@ def test_single_parameter_can_handle(self): async def int_processor(value: int): return value * 2 - assert int_processor.can_handle(Message(data=42, source_id="mock")) - assert not int_processor.can_handle(Message(data="hello", source_id="mock")) - assert not int_processor.can_handle(Message(data=[], source_id="mock")) + assert int_processor.can_handle(WorkflowMessage(data=42, source_id="mock")) + assert not int_processor.can_handle(WorkflowMessage(data="hello", source_id="mock")) + assert not int_processor.can_handle(WorkflowMessage(data=[], source_id="mock")) async def test_single_parameter_execution(self): """Test that single-parameter functions can be executed properly.""" @@ -348,7 +348,7 @@ async def double_value(value: int): WorkflowBuilder(start_executor=double_value).build() # For testing purposes, we can check that the handler is registered correctly - assert double_value.can_handle(Message(data=5, source_id="mock")) + assert double_value.can_handle(WorkflowMessage(data=5, source_id="mock")) assert int in double_value._handlers def test_sync_function_basic(self): @@ -392,9 +392,9 @@ def test_sync_function_can_handle(self): def string_handler(text: str): return text.strip() - assert string_handler.can_handle(Message(data="hello", source_id="mock")) - assert not string_handler.can_handle(Message(data=123, source_id="mock")) - assert not string_handler.can_handle(Message(data=[], source_id="mock")) + assert string_handler.can_handle(WorkflowMessage(data="hello", source_id="mock")) + assert not string_handler.can_handle(WorkflowMessage(data=123, source_id="mock")) + assert not string_handler.can_handle(WorkflowMessage(data=[], source_id="mock")) def test_sync_function_validation(self): """Test validation for synchronous functions.""" @@ -436,8 +436,8 @@ async def async_func(data: str): assert isinstance(async_func, FunctionExecutor) # Both should handle strings - assert sync_func.can_handle(Message(data="test", source_id="mock")) - assert async_func.can_handle(Message(data="test", source_id="mock")) + assert sync_func.can_handle(WorkflowMessage(data="test", source_id="mock")) + assert async_func.can_handle(WorkflowMessage(data="test", source_id="mock")) # Both should be different instances assert sync_func is not async_func @@ -466,8 +466,8 @@ async def reverse_async(text: str, ctx: WorkflowContext[Any, str]): assert async_spec["workflow_output_types"] == [str] # Second parameter is str # Verify the executors can handle their input types - assert to_upper_sync.can_handle(Message(data="hello", source_id="mock")) - assert reverse_async.can_handle(Message(data="HELLO", source_id="mock")) + assert to_upper_sync.can_handle(WorkflowMessage(data="hello", source_id="mock")) + assert reverse_async.can_handle(WorkflowMessage(data="HELLO", source_id="mock")) # For integration testing, we mainly verify that the handlers are properly registered # and the functions are wrapped correctly @@ -574,9 +574,9 @@ async def process(message, ctx: WorkflowContext) -> None: # type: ignore[no-unt assert len(process._handlers) == 1 # Can handle str messages - assert process.can_handle(Message(data="hello", source_id="mock")) + assert process.can_handle(WorkflowMessage(data="hello", source_id="mock")) # Cannot handle int messages - assert not process.can_handle(Message(data=42, source_id="mock")) + assert not process.can_handle(WorkflowMessage(data=42, source_id="mock")) def test_executor_with_explicit_output_type(self): """Test that explicit output_type takes precedence over introspection.""" @@ -609,8 +609,8 @@ async def process(message, ctx: WorkflowContext) -> None: # type: ignore[no-unt assert spec["output_types"] == [list] # Verify can_handle - assert process.can_handle(Message(data={"key": "value"}, source_id="mock")) - assert not process.can_handle(Message(data="string", source_id="mock")) + assert process.can_handle(WorkflowMessage(data={"key": "value"}, source_id="mock")) + assert not process.can_handle(WorkflowMessage(data="string", source_id="mock")) def test_executor_with_explicit_union_input_type(self): """Test that explicit union input_type is handled correctly.""" @@ -623,10 +623,10 @@ async def process(message, ctx: WorkflowContext) -> None: # type: ignore[no-unt assert len(process._handlers) == 1 # Can handle both str and int messages - assert process.can_handle(Message(data="hello", source_id="mock")) - assert process.can_handle(Message(data=42, source_id="mock")) + assert process.can_handle(WorkflowMessage(data="hello", source_id="mock")) + assert process.can_handle(WorkflowMessage(data=42, source_id="mock")) # Cannot handle float - assert not process.can_handle(Message(data=3.14, source_id="mock")) + assert not process.can_handle(WorkflowMessage(data=3.14, source_id="mock")) def test_executor_with_explicit_union_output_type(self): """Test that explicit union output_type is normalized to a list.""" @@ -695,7 +695,7 @@ async def process(message, ctx: WorkflowContext) -> None: # type: ignore[no-unt # Should work with explicit input_type assert str in process._handlers - assert process.can_handle(Message(data="hello", source_id="mock")) + assert process.can_handle(WorkflowMessage(data="hello", source_id="mock")) def test_executor_explicit_types_with_id(self): """Test that explicit types work together with id parameter.""" @@ -717,8 +717,8 @@ async def process(message): # type: ignore[no-untyped-def] # Should work with explicit input_type assert str in process._handlers - assert process.can_handle(Message(data="hello", source_id="mock")) - assert not process.can_handle(Message(data=42, source_id="mock")) + assert process.can_handle(WorkflowMessage(data="hello", source_id="mock")) + assert not process.can_handle(WorkflowMessage(data=42, source_id="mock")) def test_executor_explicit_types_with_sync_function(self): """Test that explicit types work with synchronous functions.""" @@ -752,8 +752,8 @@ async def process(message, ctx: WorkflowContext) -> None: # type: ignore[no-unt pass # Can handle both str and int - assert process.can_handle(Message(data="hello", source_id="mock")) - assert process.can_handle(Message(data=42, source_id="mock")) + assert process.can_handle(WorkflowMessage(data="hello", source_id="mock")) + assert process.can_handle(WorkflowMessage(data=42, source_id="mock")) # Output types should include both assert set(process.output_types) == {bool, float} @@ -767,7 +767,7 @@ async def process(message, ctx: WorkflowContext) -> None: # type: ignore[no-unt # Should resolve the string to the actual type assert FuncExecForwardRefMessage in process._handlers - assert process.can_handle(Message(data=FuncExecForwardRefMessage("hello"), source_id="mock")) + assert process.can_handle(WorkflowMessage(data=FuncExecForwardRefMessage("hello"), source_id="mock")) def test_executor_with_string_forward_reference_union(self): """Test that string forward references work with union types.""" @@ -777,8 +777,8 @@ async def process(message, ctx: WorkflowContext) -> None: # type: ignore[no-unt pass # Should handle both types - assert process.can_handle(Message(data=FuncExecForwardRefTypeA("hello"), source_id="mock")) - assert process.can_handle(Message(data=FuncExecForwardRefTypeB(42), source_id="mock")) + assert process.can_handle(WorkflowMessage(data=FuncExecForwardRefTypeA("hello"), source_id="mock")) + assert process.can_handle(WorkflowMessage(data=FuncExecForwardRefTypeB(42), source_id="mock")) def test_executor_with_string_forward_reference_output_type(self): """Test that string forward references work for output_type.""" @@ -827,7 +827,7 @@ async def process(message: Any, ctx: WorkflowContext) -> None: # Check input type assert str in process._handlers - assert process.can_handle(Message(data="hello", source_id="mock")) + assert process.can_handle(WorkflowMessage(data="hello", source_id="mock")) # Check output_type assert int in process.output_types diff --git a/python/packages/core/tests/workflow/test_workflow.py b/python/packages/core/tests/workflow/test_workflow.py index c8923f4774..6728bcfcb1 100644 --- a/python/packages/core/tests/workflow/test_workflow.py +++ b/python/packages/core/tests/workflow/test_workflow.py @@ -25,6 +25,7 @@ WorkflowContext, WorkflowConvergenceException, WorkflowEvent, + WorkflowMessage, WorkflowRunState, handler, response_handler, @@ -274,7 +275,7 @@ async def test_workflow_with_checkpointing_enabled(simple_executor: Executor): ) # Verify workflow was created and can run - test_message = Message(data="test message", source_id="test", target_id=None) + test_message = WorkflowMessage(data="test message", source_id="test", target_id=None) result = await workflow.run(test_message) assert result is not None @@ -535,7 +536,7 @@ async def test_workflow_checkpoint_runtime_only_configuration( workflow = WorkflowBuilder(start_executor=simple_executor).add_edge(simple_executor, simple_executor).build() # Run with runtime checkpoint storage - should create checkpoints - test_message = Message(data="runtime checkpoint test", source_id="test", target_id=None) + test_message = WorkflowMessage(data="runtime checkpoint test", source_id="test", target_id=None) result = await workflow.run(test_message, checkpoint_storage=storage) assert result is not None assert result.get_final_state() == WorkflowRunState.IDLE @@ -586,7 +587,7 @@ async def test_workflow_checkpoint_runtime_overrides_buildtime( ) # Run with runtime checkpoint storage override - test_message = Message(data="override test", source_id="test", target_id=None) + test_message = WorkflowMessage(data="override test", source_id="test", target_id=None) result = await workflow.run(test_message, checkpoint_storage=runtime_storage) assert result is not None @@ -910,7 +911,7 @@ async def test_workflow_run_parameter_validation(simple_executor: Executor) -> N """Test that stream properly validate parameter combinations.""" workflow = WorkflowBuilder(start_executor=simple_executor).add_edge(simple_executor, simple_executor).build() - test_message = Message(data="test", source_id="test", target_id=None) + test_message = WorkflowMessage(data="test", source_id="test", target_id=None) # Valid: message only (new run) result = await workflow.run(test_message) @@ -941,7 +942,7 @@ async def test_workflow_run_stream_parameter_validation( """Test stream=True specific parameter validation scenarios.""" workflow = WorkflowBuilder(start_executor=simple_executor).add_edge(simple_executor, simple_executor).build() - test_message = Message(data="test", source_id="test", target_id=None) + test_message = WorkflowMessage(data="test", source_id="test", target_id=None) # Valid: message only (new run) events: list[WorkflowEvent] = [] diff --git a/python/packages/core/tests/workflow/test_workflow_observability.py b/python/packages/core/tests/workflow/test_workflow_observability.py index 6dcad66a88..d5c20ad429 100644 --- a/python/packages/core/tests/workflow/test_workflow_observability.py +++ b/python/packages/core/tests/workflow/test_workflow_observability.py @@ -8,7 +8,7 @@ from agent_framework import InMemoryCheckpointStorage, WorkflowBuilder from agent_framework._workflows._executor import Executor, handler -from agent_framework._workflows._runner_context import InProcRunnerContext, Message, MessageType +from agent_framework._workflows._runner_context import InProcRunnerContext, MessageType, WorkflowMessage from agent_framework._workflows._state import State from agent_framework._workflows._workflow import Workflow from agent_framework._workflows._workflow_context import WorkflowContext @@ -440,7 +440,7 @@ async def test_message_trace_context_serialization(span_exporter: InMemorySpanEx ctx = InProcRunnerContext(InMemoryCheckpointStorage()) # Create message with trace context - message = Message( + message = WorkflowMessage( data="test", source_id="source", target_id="target", From 50974dd3ec367d7cc4796e006e8e609424cd4395 Mon Sep 17 00:00:00 2001 From: eavanvalkenburg Date: Mon, 9 Feb 2026 17:13:37 +0100 Subject: [PATCH 08/16] =?UTF-8?q?Fix=20lint,=20fmt,=20and=20sample=20error?= =?UTF-8?q?s=20after=20ChatMessage=E2=86=92Message=20rename?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - Auto-fix 70+ ruff lint issues across samples (ChatMessage→Message refs) - Fix HostedVectorStoreContent→Content.from_hosted_vector_store in file search sample - Fix _normalize_messages→normalize_messages in custom agent sample - Fix context.terminate→raise MiddlewareTermination in middleware samples - Fix with_update_hook→with_transform_hook in override middleware sample - Add TOptions_co import back to custom_chat_client sample - Add noqa for FastAPI File() default in chatkit sample - Fix B023 loop variable capture in weather agent sample --- .../samples/demos/chatkit-integration/app.py | 4 ++-- .../agent_with_text_search_rag/main.py | 2 +- .../workflow_evaluation/create_workflow.py | 2 +- ..._ai_with_code_interpreter_file_download.py | 2 +- .../azure_ai/azure_ai_with_hosted_mcp.py | 2 +- .../azure_assistants_with_code_interpreter.py | 2 +- .../azure_assistants_with_thread.py | 2 +- .../azure_chat_client_with_thread.py | 2 +- .../azure_responses_client_image_analysis.py | 2 +- ...azure_responses_client_with_file_search.py | 6 +++--- .../azure_responses_client_with_thread.py | 2 +- .../agents/custom/custom_agent.py | 7 ++++--- .../agents/ollama/ollama_chat_multimodal.py | 2 +- .../openai/openai_chat_client_with_thread.py | 2 +- .../openai/openai_responses_client_basic.py | 16 ++++++++++---- .../openai_responses_client_image_analysis.py | 2 +- .../openai_responses_client_with_thread.py | 2 +- .../aggregate_context_provider.py | 2 +- .../simple_context_provider.py | 2 +- .../devui/weather_agent_azure/agent.py | 21 +++++++++++-------- .../03_single_agent_streaming/worker.py | 2 +- .../worker.py | 2 +- .../worker.py | 2 +- .../worker.py | 2 +- .../worker.py | 2 +- .../middleware/chat_middleware.py | 2 +- .../middleware/class_based_middleware.py | 2 +- .../middleware/middleware_termination.py | 8 +++---- .../override_result_with_middleware.py | 8 +++---- .../multimodal_input/azure_chat_multimodal.py | 2 +- .../azure_responses_multimodal.py | 2 +- .../openai_chat_multimodal.py | 2 +- .../concurrent_custom_agent_executors.py | 4 ++-- .../group_chat_agent_manager.py | 2 +- .../group_chat_philosophical_debate.py | 2 +- .../group_chat_simple_selector.py | 2 +- .../orchestrations/handoff_autonomous.py | 2 +- .../orchestrations/handoff_simple.py | 2 +- .../handoff_with_code_interpreter_file.py | 4 ++-- .../orchestrations/magentic.py | 4 ++-- .../orchestrations/magentic_checkpoint.py | 2 +- .../magentic_human_plan_review.py | 2 +- .../sequential_custom_executors.py | 2 +- .../purview_agent/sample_purview_agent.py | 2 +- .../custom_chat_message_store_thread.py | 2 +- .../tools/function_tool_with_approval.py | 2 +- ...re_chat_agents_tool_calls_with_feedback.py | 3 +-- .../agents/custom_agent_executors.py | 2 +- .../agents/handoff_workflow_as_agent.py | 4 ++-- .../checkpoint_with_human_in_the_loop.py | 2 +- ...ff_with_tool_approval_checkpoint_resume.py | 4 ++-- .../workflows/control-flow/edge_condition.py | 1 - .../multi_selection_edge_group.py | 1 - .../workflows/control-flow/simple_loop.py | 3 +-- .../control-flow/switch_case_edge_group.py | 3 +-- .../human-in-the-loop/agents_with_HITL.py | 2 +- .../guessing_game_with_human_input.py | 2 +- .../state-management/state_with_agents.py | 2 +- .../concurrent_builder_tool_approval.py | 2 +- .../group_chat_builder_tool_approval.py | 2 +- .../sequential_builder_tool_approval.py | 2 +- 61 files changed, 98 insertions(+), 91 deletions(-) diff --git a/python/samples/demos/chatkit-integration/app.py b/python/samples/demos/chatkit-integration/app.py index 832e2ce8c1..8167bb74b6 100644 --- a/python/samples/demos/chatkit-integration/app.py +++ b/python/samples/demos/chatkit-integration/app.py @@ -28,7 +28,7 @@ import uvicorn # Agent Framework imports -from agent_framework import AgentResponseUpdate, Agent, Message, FunctionResultContent, Role, tool +from agent_framework import Agent, AgentResponseUpdate, FunctionResultContent, Message, Role, tool from agent_framework.azure import AzureOpenAIChatClient # Agent Framework ChatKit integration @@ -573,7 +573,7 @@ async def chatkit_endpoint(request: Request): @app.post("/upload/{attachment_id}") -async def upload_file(attachment_id: str, file: UploadFile = File(...)): +async def upload_file(attachment_id: str, file: UploadFile = File(...)): # noqa: B008 """Handle file upload for two-phase upload. The client POSTs the file bytes here after creating the attachment diff --git a/python/samples/demos/hosted_agents/agent_with_text_search_rag/main.py b/python/samples/demos/hosted_agents/agent_with_text_search_rag/main.py index b777fbfd22..8e6c77d712 100644 --- a/python/samples/demos/hosted_agents/agent_with_text_search_rag/main.py +++ b/python/samples/demos/hosted_agents/agent_with_text_search_rag/main.py @@ -6,7 +6,7 @@ from dataclasses import dataclass from typing import Any -from agent_framework import Message, Context, ContextProvider +from agent_framework import Context, ContextProvider, Message from agent_framework.azure import AzureOpenAIChatClient from azure.ai.agentserver.agentframework import from_agent_framework # pyright: ignore[reportUnknownVariableType] from azure.identity import DefaultAzureCredential diff --git a/python/samples/demos/workflow_evaluation/create_workflow.py b/python/samples/demos/workflow_evaluation/create_workflow.py index 2b8840c1e0..d1f679b778 100644 --- a/python/samples/demos/workflow_evaluation/create_workflow.py +++ b/python/samples/demos/workflow_evaluation/create_workflow.py @@ -48,8 +48,8 @@ from agent_framework import ( AgentExecutorResponse, AgentResponseUpdate, - Message, Executor, + Message, WorkflowBuilder, WorkflowContext, executor, diff --git a/python/samples/getting_started/agents/azure_ai/azure_ai_with_code_interpreter_file_download.py b/python/samples/getting_started/agents/azure_ai/azure_ai_with_code_interpreter_file_download.py index 157e0c3397..657820446e 100644 --- a/python/samples/getting_started/agents/azure_ai/azure_ai_with_code_interpreter_file_download.py +++ b/python/samples/getting_started/agents/azure_ai/azure_ai_with_code_interpreter_file_download.py @@ -5,9 +5,9 @@ from pathlib import Path from agent_framework import ( + Agent, AgentResponseUpdate, Annotation, - Agent, Content, HostedCodeInterpreterTool, ) diff --git a/python/samples/getting_started/agents/azure_ai/azure_ai_with_hosted_mcp.py b/python/samples/getting_started/agents/azure_ai/azure_ai_with_hosted_mcp.py index b9e24bd5d4..9f150ae003 100644 --- a/python/samples/getting_started/agents/azure_ai/azure_ai_with_hosted_mcp.py +++ b/python/samples/getting_started/agents/azure_ai/azure_ai_with_hosted_mcp.py @@ -3,7 +3,7 @@ import asyncio from typing import Any -from agent_framework import SupportsAgentRun, AgentResponse, AgentThread, Message, HostedMCPTool +from agent_framework import AgentResponse, AgentThread, HostedMCPTool, Message, SupportsAgentRun from agent_framework.azure import AzureAIProjectAgentProvider from azure.identity.aio import AzureCliCredential diff --git a/python/samples/getting_started/agents/azure_openai/azure_assistants_with_code_interpreter.py b/python/samples/getting_started/agents/azure_openai/azure_assistants_with_code_interpreter.py index 5bf4f5b2d2..777aad463b 100644 --- a/python/samples/getting_started/agents/azure_openai/azure_assistants_with_code_interpreter.py +++ b/python/samples/getting_started/agents/azure_openai/azure_assistants_with_code_interpreter.py @@ -2,7 +2,7 @@ import asyncio -from agent_framework import AgentResponseUpdate, Agent, ChatResponseUpdate, HostedCodeInterpreterTool +from agent_framework import Agent, AgentResponseUpdate, ChatResponseUpdate, HostedCodeInterpreterTool from agent_framework.azure import AzureOpenAIAssistantsClient from azure.identity import AzureCliCredential from openai.types.beta.threads.runs import ( diff --git a/python/samples/getting_started/agents/azure_openai/azure_assistants_with_thread.py b/python/samples/getting_started/agents/azure_openai/azure_assistants_with_thread.py index d868adf514..f5f8ce21e2 100644 --- a/python/samples/getting_started/agents/azure_openai/azure_assistants_with_thread.py +++ b/python/samples/getting_started/agents/azure_openai/azure_assistants_with_thread.py @@ -4,7 +4,7 @@ from random import randint from typing import Annotated -from agent_framework import AgentThread, Agent, tool +from agent_framework import Agent, AgentThread, tool from agent_framework.azure import AzureOpenAIAssistantsClient from azure.identity import AzureCliCredential from pydantic import Field diff --git a/python/samples/getting_started/agents/azure_openai/azure_chat_client_with_thread.py b/python/samples/getting_started/agents/azure_openai/azure_chat_client_with_thread.py index 8e262d5999..ded509a6d1 100644 --- a/python/samples/getting_started/agents/azure_openai/azure_chat_client_with_thread.py +++ b/python/samples/getting_started/agents/azure_openai/azure_chat_client_with_thread.py @@ -4,7 +4,7 @@ from random import randint from typing import Annotated -from agent_framework import AgentThread, Agent, ChatMessageStore, tool +from agent_framework import Agent, AgentThread, ChatMessageStore, tool from agent_framework.azure import AzureOpenAIChatClient from azure.identity import AzureCliCredential from pydantic import Field diff --git a/python/samples/getting_started/agents/azure_openai/azure_responses_client_image_analysis.py b/python/samples/getting_started/agents/azure_openai/azure_responses_client_image_analysis.py index 6d91aca7a4..d144745d16 100644 --- a/python/samples/getting_started/agents/azure_openai/azure_responses_client_image_analysis.py +++ b/python/samples/getting_started/agents/azure_openai/azure_responses_client_image_analysis.py @@ -2,7 +2,7 @@ import asyncio -from agent_framework import Message, Content +from agent_framework import Content, Message from agent_framework.azure import AzureOpenAIResponsesClient from azure.identity import AzureCliCredential diff --git a/python/samples/getting_started/agents/azure_openai/azure_responses_client_with_file_search.py b/python/samples/getting_started/agents/azure_openai/azure_responses_client_with_file_search.py index 0710f09664..975f8a378d 100644 --- a/python/samples/getting_started/agents/azure_openai/azure_responses_client_with_file_search.py +++ b/python/samples/getting_started/agents/azure_openai/azure_responses_client_with_file_search.py @@ -2,7 +2,7 @@ import asyncio -from agent_framework import Agent, HostedFileSearchTool, HostedVectorStoreContent +from agent_framework import Agent, Content, HostedFileSearchTool from agent_framework.azure import AzureOpenAIResponsesClient from azure.identity import AzureCliCredential @@ -22,7 +22,7 @@ # Helper functions -async def create_vector_store(client: AzureOpenAIResponsesClient) -> tuple[str, HostedVectorStoreContent]: +async def create_vector_store(client: AzureOpenAIResponsesClient) -> tuple[str, Content]: """Create a vector store with sample documents.""" file = await client.client.files.create( file=("todays_weather.txt", b"The weather today is sunny with a high of 75F."), purpose="assistants" @@ -35,7 +35,7 @@ async def create_vector_store(client: AzureOpenAIResponsesClient) -> tuple[str, if result.last_error is not None: raise Exception(f"Vector store file processing failed with status: {result.last_error.message}") - return file.id, HostedVectorStoreContent(vector_store_id=vector_store.id) + return file.id, Content.from_hosted_vector_store(vector_store_id=vector_store.id) async def delete_vector_store(client: AzureOpenAIResponsesClient, file_id: str, vector_store_id: str) -> None: diff --git a/python/samples/getting_started/agents/azure_openai/azure_responses_client_with_thread.py b/python/samples/getting_started/agents/azure_openai/azure_responses_client_with_thread.py index 44f90d2ca2..aded345bee 100644 --- a/python/samples/getting_started/agents/azure_openai/azure_responses_client_with_thread.py +++ b/python/samples/getting_started/agents/azure_openai/azure_responses_client_with_thread.py @@ -4,7 +4,7 @@ from random import randint from typing import Annotated -from agent_framework import AgentThread, Agent, tool +from agent_framework import Agent, AgentThread, tool from agent_framework.azure import AzureOpenAIResponsesClient from azure.identity import AzureCliCredential from pydantic import Field diff --git a/python/samples/getting_started/agents/custom/custom_agent.py b/python/samples/getting_started/agents/custom/custom_agent.py index a4b2ed6c35..51fb2452c8 100644 --- a/python/samples/getting_started/agents/custom/custom_agent.py +++ b/python/samples/getting_started/agents/custom/custom_agent.py @@ -9,9 +9,10 @@ AgentResponseUpdate, AgentThread, BaseAgent, - Message, Content, + Message, Role, + normalize_messages, ) """ @@ -87,7 +88,7 @@ async def _run( ) -> AgentResponse: """Non-streaming implementation.""" # Normalize input messages to a list - normalized_messages = self._normalize_messages(messages) + normalized_messages = normalize_messages(messages) if not normalized_messages: response_message = Message( @@ -119,7 +120,7 @@ async def _run_stream( ) -> AsyncIterable[AgentResponseUpdate]: """Streaming implementation.""" # Normalize input messages to a list - normalized_messages = self._normalize_messages(messages) + normalized_messages = normalize_messages(messages) if not normalized_messages: response_text = "Hello! I'm a custom echo agent. Send me a message and I'll echo it back." diff --git a/python/samples/getting_started/agents/ollama/ollama_chat_multimodal.py b/python/samples/getting_started/agents/ollama/ollama_chat_multimodal.py index cbee158337..68c1246ad2 100644 --- a/python/samples/getting_started/agents/ollama/ollama_chat_multimodal.py +++ b/python/samples/getting_started/agents/ollama/ollama_chat_multimodal.py @@ -2,7 +2,7 @@ import asyncio -from agent_framework import Message, Content +from agent_framework import Content, Message from agent_framework.ollama import OllamaChatClient """ diff --git a/python/samples/getting_started/agents/openai/openai_chat_client_with_thread.py b/python/samples/getting_started/agents/openai/openai_chat_client_with_thread.py index 98cf81091e..a9482d9664 100644 --- a/python/samples/getting_started/agents/openai/openai_chat_client_with_thread.py +++ b/python/samples/getting_started/agents/openai/openai_chat_client_with_thread.py @@ -4,7 +4,7 @@ from random import randint from typing import Annotated -from agent_framework import AgentThread, Agent, ChatMessageStore, tool +from agent_framework import Agent, AgentThread, ChatMessageStore, tool from agent_framework.openai import OpenAIChatClient from pydantic import Field diff --git a/python/samples/getting_started/agents/openai/openai_responses_client_basic.py b/python/samples/getting_started/agents/openai/openai_responses_client_basic.py index 70241f2cd5..b6ab9fb42c 100644 --- a/python/samples/getting_started/agents/openai/openai_responses_client_basic.py +++ b/python/samples/getting_started/agents/openai/openai_responses_client_basic.py @@ -5,7 +5,16 @@ from random import randint from typing import Annotated -from agent_framework import Agent, ChatContext, Message, ChatResponse, Role, chat_middleware, tool +from agent_framework import ( + Agent, + ChatContext, + ChatResponse, + Message, + MiddlewareTermination, + Role, + chat_middleware, + tool, +) from agent_framework.openai import OpenAIResponsesClient from pydantic import Field @@ -47,9 +56,8 @@ async def security_and_override_middleware( ] ) - # Set terminate flag to stop execution - context.terminate = True - return + # Terminate middleware execution with the blocked response + raise MiddlewareTermination(result=context.result) # Continue to next middleware or AI execution await call_next(context) diff --git a/python/samples/getting_started/agents/openai/openai_responses_client_image_analysis.py b/python/samples/getting_started/agents/openai/openai_responses_client_image_analysis.py index 3db8ddf54f..a1064ff93d 100644 --- a/python/samples/getting_started/agents/openai/openai_responses_client_image_analysis.py +++ b/python/samples/getting_started/agents/openai/openai_responses_client_image_analysis.py @@ -2,7 +2,7 @@ import asyncio -from agent_framework import Message, Content +from agent_framework import Content, Message from agent_framework.openai import OpenAIResponsesClient """ diff --git a/python/samples/getting_started/agents/openai/openai_responses_client_with_thread.py b/python/samples/getting_started/agents/openai/openai_responses_client_with_thread.py index d9ce68dc42..2b2ec80d87 100644 --- a/python/samples/getting_started/agents/openai/openai_responses_client_with_thread.py +++ b/python/samples/getting_started/agents/openai/openai_responses_client_with_thread.py @@ -4,7 +4,7 @@ from random import randint from typing import Annotated -from agent_framework import AgentThread, Agent, tool +from agent_framework import Agent, AgentThread, tool from agent_framework.openai import OpenAIResponsesClient from pydantic import Field diff --git a/python/samples/getting_started/context_providers/aggregate_context_provider.py b/python/samples/getting_started/context_providers/aggregate_context_provider.py index 3278577c92..d3c7f324b4 100644 --- a/python/samples/getting_started/context_providers/aggregate_context_provider.py +++ b/python/samples/getting_started/context_providers/aggregate_context_provider.py @@ -17,7 +17,7 @@ from types import TracebackType from typing import TYPE_CHECKING, Any, cast -from agent_framework import Agent, Message, Context, ContextProvider +from agent_framework import Agent, Context, ContextProvider, Message from agent_framework.azure import AzureAIClient from azure.identity.aio import AzureCliCredential diff --git a/python/samples/getting_started/context_providers/simple_context_provider.py b/python/samples/getting_started/context_providers/simple_context_provider.py index 15892e286d..e151651199 100644 --- a/python/samples/getting_started/context_providers/simple_context_provider.py +++ b/python/samples/getting_started/context_providers/simple_context_provider.py @@ -4,7 +4,7 @@ from collections.abc import MutableSequence, Sequence from typing import Any -from agent_framework import Agent, SupportsChatGetResponse, Message, Context, ContextProvider +from agent_framework import Agent, Context, ContextProvider, Message, SupportsChatGetResponse from agent_framework.azure import AzureAIClient from azure.identity.aio import AzureCliCredential from pydantic import BaseModel diff --git a/python/samples/getting_started/devui/weather_agent_azure/agent.py b/python/samples/getting_started/devui/weather_agent_azure/agent.py index 65fa8d0b61..dca5b69bbc 100644 --- a/python/samples/getting_started/devui/weather_agent_azure/agent.py +++ b/python/samples/getting_started/devui/weather_agent_azure/agent.py @@ -9,11 +9,13 @@ from agent_framework import ( Agent, ChatContext, - Message, ChatResponse, ChatResponseUpdate, Content, FunctionInvocationContext, + Message, + MiddlewareTermination, + ResponseStream, Role, chat_middleware, function_middleware, @@ -54,14 +56,17 @@ async def security_filter_middleware( ) if context.stream: - # Streaming mode: return async generator - async def blocked_stream() -> AsyncIterable[ChatResponseUpdate]: + # Streaming mode: wrap in ResponseStream + async def blocked_stream(msg: str = error_message) -> AsyncIterable[ChatResponseUpdate]: yield ChatResponseUpdate( - contents=[Content.from_text(text=error_message)], + contents=[Content.from_text(text=msg)], role=Role.ASSISTANT, ) - context.result = blocked_stream() + response = ChatResponse( + messages=[Message(role=Role.ASSISTANT, text=error_message)] + ) + context.result = ResponseStream(blocked_stream(), finalizer=lambda _, r=response: r) else: # Non-streaming mode: return complete response context.result = ChatResponse( @@ -73,8 +78,7 @@ async def blocked_stream() -> AsyncIterable[ChatResponseUpdate]: ] ) - context.terminate = True - return + raise MiddlewareTermination(result=context.result) await call_next(context) @@ -92,8 +96,7 @@ async def atlantis_location_filter_middleware( "Blocked! Hold up right there!! Tell the user that " "'Atlantis is a special place, we must never ask about the weather there!!'" ) - context.terminate = True - return + raise MiddlewareTermination(result=context.result) await call_next(context) diff --git a/python/samples/getting_started/durabletask/03_single_agent_streaming/worker.py b/python/samples/getting_started/durabletask/03_single_agent_streaming/worker.py index 769b24888c..320c008cde 100644 --- a/python/samples/getting_started/durabletask/03_single_agent_streaming/worker.py +++ b/python/samples/getting_started/durabletask/03_single_agent_streaming/worker.py @@ -18,7 +18,7 @@ from datetime import timedelta import redis.asyncio as aioredis -from agent_framework import AgentResponseUpdate, Agent +from agent_framework import Agent, AgentResponseUpdate from agent_framework.azure import ( AgentCallbackContext, AgentResponseCallbackProtocol, diff --git a/python/samples/getting_started/durabletask/04_single_agent_orchestration_chaining/worker.py b/python/samples/getting_started/durabletask/04_single_agent_orchestration_chaining/worker.py index 18a7e7fdc4..581c95a06a 100644 --- a/python/samples/getting_started/durabletask/04_single_agent_orchestration_chaining/worker.py +++ b/python/samples/getting_started/durabletask/04_single_agent_orchestration_chaining/worker.py @@ -17,7 +17,7 @@ import os from collections.abc import Generator -from agent_framework import AgentResponse, Agent +from agent_framework import Agent, AgentResponse from agent_framework.azure import AzureOpenAIChatClient, DurableAIAgentOrchestrationContext, DurableAIAgentWorker from azure.identity import AzureCliCredential, DefaultAzureCredential from durabletask.azuremanaged.worker import DurableTaskSchedulerWorker diff --git a/python/samples/getting_started/durabletask/05_multi_agent_orchestration_concurrency/worker.py b/python/samples/getting_started/durabletask/05_multi_agent_orchestration_concurrency/worker.py index 76b7913770..67861cc8c9 100644 --- a/python/samples/getting_started/durabletask/05_multi_agent_orchestration_concurrency/worker.py +++ b/python/samples/getting_started/durabletask/05_multi_agent_orchestration_concurrency/worker.py @@ -18,7 +18,7 @@ from collections.abc import Generator from typing import Any -from agent_framework import AgentResponse, Agent +from agent_framework import Agent, AgentResponse from agent_framework.azure import AzureOpenAIChatClient, DurableAIAgentOrchestrationContext, DurableAIAgentWorker from azure.identity import AzureCliCredential, DefaultAzureCredential from durabletask.azuremanaged.worker import DurableTaskSchedulerWorker diff --git a/python/samples/getting_started/durabletask/06_multi_agent_orchestration_conditionals/worker.py b/python/samples/getting_started/durabletask/06_multi_agent_orchestration_conditionals/worker.py index 6c7ef5c33b..0016627cdc 100644 --- a/python/samples/getting_started/durabletask/06_multi_agent_orchestration_conditionals/worker.py +++ b/python/samples/getting_started/durabletask/06_multi_agent_orchestration_conditionals/worker.py @@ -18,7 +18,7 @@ from collections.abc import Generator from typing import Any, cast -from agent_framework import AgentResponse, Agent +from agent_framework import Agent, AgentResponse from agent_framework.azure import AzureOpenAIChatClient, DurableAIAgentOrchestrationContext, DurableAIAgentWorker from azure.identity import AzureCliCredential, DefaultAzureCredential from durabletask.azuremanaged.worker import DurableTaskSchedulerWorker diff --git a/python/samples/getting_started/durabletask/07_single_agent_orchestration_hitl/worker.py b/python/samples/getting_started/durabletask/07_single_agent_orchestration_hitl/worker.py index aed1849bd7..da86d869a0 100644 --- a/python/samples/getting_started/durabletask/07_single_agent_orchestration_hitl/worker.py +++ b/python/samples/getting_started/durabletask/07_single_agent_orchestration_hitl/worker.py @@ -19,7 +19,7 @@ from datetime import timedelta from typing import Any, cast -from agent_framework import AgentResponse, Agent +from agent_framework import Agent, AgentResponse from agent_framework.azure import AzureOpenAIChatClient, DurableAIAgentOrchestrationContext, DurableAIAgentWorker from azure.identity import AzureCliCredential, DefaultAzureCredential from durabletask.azuremanaged.worker import DurableTaskSchedulerWorker diff --git a/python/samples/getting_started/middleware/chat_middleware.py b/python/samples/getting_started/middleware/chat_middleware.py index 092350d6a4..424db96457 100644 --- a/python/samples/getting_started/middleware/chat_middleware.py +++ b/python/samples/getting_started/middleware/chat_middleware.py @@ -7,9 +7,9 @@ from agent_framework import ( ChatContext, - Message, ChatMiddleware, ChatResponse, + Message, MiddlewareTermination, chat_middleware, tool, diff --git a/python/samples/getting_started/middleware/class_based_middleware.py b/python/samples/getting_started/middleware/class_based_middleware.py index 858e031e97..208dddc96d 100644 --- a/python/samples/getting_started/middleware/class_based_middleware.py +++ b/python/samples/getting_started/middleware/class_based_middleware.py @@ -10,9 +10,9 @@ AgentContext, AgentMiddleware, AgentResponse, - Message, FunctionInvocationContext, FunctionMiddleware, + Message, tool, ) from agent_framework.azure import AzureAIAgentClient diff --git a/python/samples/getting_started/middleware/middleware_termination.py b/python/samples/getting_started/middleware/middleware_termination.py index 4bab79ed99..9f48e662c5 100644 --- a/python/samples/getting_started/middleware/middleware_termination.py +++ b/python/samples/getting_started/middleware/middleware_termination.py @@ -10,6 +10,7 @@ AgentMiddleware, AgentResponse, Message, + MiddlewareTermination, tool, ) from agent_framework.azure import AzureAIAgentClient @@ -71,9 +72,8 @@ async def process( ] ) - # Set terminate flag to prevent further processing - context.terminate = True - break + # Terminate to prevent further processing + raise MiddlewareTermination(result=context.result) await call_next(context) @@ -98,7 +98,7 @@ async def process( f"[PostTerminationMiddleware] Maximum responses ({self.max_responses}) reached. " "Terminating further processing." ) - context.terminate = True + raise MiddlewareTermination # Allow the agent to process normally await call_next(context) diff --git a/python/samples/getting_started/middleware/override_result_with_middleware.py b/python/samples/getting_started/middleware/override_result_with_middleware.py index 9520b27a0d..2239136c3c 100644 --- a/python/samples/getting_started/middleware/override_result_with_middleware.py +++ b/python/samples/getting_started/middleware/override_result_with_middleware.py @@ -11,9 +11,9 @@ AgentResponse, AgentResponseUpdate, ChatContext, - Message, ChatResponse, ChatResponseUpdate, + Message, ResponseStream, Role, tool, @@ -76,10 +76,10 @@ def _update_hook(update: ChatResponseUpdate) -> ChatResponseUpdate: index["value"] += 1 return update - context.result.with_update_hook(_update_hook) + context.result.with_transform_hook(_update_hook) else: # For non-streaming: just replace with a new message - current_text = context.result.text or "" + current_text = context.result.text if isinstance(context.result, ChatResponse) else "" custom_message = f"Weather Advisory: [0] {''.join(chunks)} Original message was: {current_text}" context.result = ChatResponse(messages=[Message(role=Role.ASSISTANT, text=custom_message)]) @@ -172,7 +172,7 @@ def _clean_update(update: AgentResponseUpdate) -> AgentResponseUpdate: content.text = text return update - context.result.with_update_hook(_clean_update) + context.result.with_transform_hook(_clean_update) context.result.with_finalizer(_sanitize) elif isinstance(context.result, AgentResponse): context.result = _sanitize(context.result) diff --git a/python/samples/getting_started/multimodal_input/azure_chat_multimodal.py b/python/samples/getting_started/multimodal_input/azure_chat_multimodal.py index d7ff61b800..369221ac36 100644 --- a/python/samples/getting_started/multimodal_input/azure_chat_multimodal.py +++ b/python/samples/getting_started/multimodal_input/azure_chat_multimodal.py @@ -2,7 +2,7 @@ import asyncio -from agent_framework import Message, Content +from agent_framework import Content, Message from agent_framework.azure import AzureOpenAIChatClient from azure.identity import AzureCliCredential diff --git a/python/samples/getting_started/multimodal_input/azure_responses_multimodal.py b/python/samples/getting_started/multimodal_input/azure_responses_multimodal.py index cc22439872..decf27aefe 100644 --- a/python/samples/getting_started/multimodal_input/azure_responses_multimodal.py +++ b/python/samples/getting_started/multimodal_input/azure_responses_multimodal.py @@ -3,7 +3,7 @@ import asyncio from pathlib import Path -from agent_framework import Message, Content +from agent_framework import Content, Message from agent_framework.azure import AzureOpenAIResponsesClient from azure.identity import AzureCliCredential diff --git a/python/samples/getting_started/multimodal_input/openai_chat_multimodal.py b/python/samples/getting_started/multimodal_input/openai_chat_multimodal.py index 3fcd01585f..f34576c00f 100644 --- a/python/samples/getting_started/multimodal_input/openai_chat_multimodal.py +++ b/python/samples/getting_started/multimodal_input/openai_chat_multimodal.py @@ -5,7 +5,7 @@ import struct from pathlib import Path -from agent_framework import Message, Content +from agent_framework import Content, Message from agent_framework.openai import OpenAIChatClient ASSETS_DIR = Path(__file__).resolve().parent.parent / "sample_assets" diff --git a/python/samples/getting_started/orchestrations/concurrent_custom_agent_executors.py b/python/samples/getting_started/orchestrations/concurrent_custom_agent_executors.py index eedcce198e..bd3b8b93a5 100644 --- a/python/samples/getting_started/orchestrations/concurrent_custom_agent_executors.py +++ b/python/samples/getting_started/orchestrations/concurrent_custom_agent_executors.py @@ -4,11 +4,11 @@ from typing import Any from agent_framework import ( + Agent, AgentExecutorRequest, AgentExecutorResponse, - Agent, - Message, Executor, + Message, WorkflowContext, handler, ) diff --git a/python/samples/getting_started/orchestrations/group_chat_agent_manager.py b/python/samples/getting_started/orchestrations/group_chat_agent_manager.py index 5338a77714..78eb8535ae 100644 --- a/python/samples/getting_started/orchestrations/group_chat_agent_manager.py +++ b/python/samples/getting_started/orchestrations/group_chat_agent_manager.py @@ -4,8 +4,8 @@ from typing import cast from agent_framework import ( - AgentResponseUpdate, Agent, + AgentResponseUpdate, Message, ) from agent_framework.azure import AzureOpenAIChatClient diff --git a/python/samples/getting_started/orchestrations/group_chat_philosophical_debate.py b/python/samples/getting_started/orchestrations/group_chat_philosophical_debate.py index d4dc65e735..e4723c01e0 100644 --- a/python/samples/getting_started/orchestrations/group_chat_philosophical_debate.py +++ b/python/samples/getting_started/orchestrations/group_chat_philosophical_debate.py @@ -5,8 +5,8 @@ from typing import cast from agent_framework import ( - AgentResponseUpdate, Agent, + AgentResponseUpdate, Message, ) from agent_framework.azure import AzureOpenAIChatClient diff --git a/python/samples/getting_started/orchestrations/group_chat_simple_selector.py b/python/samples/getting_started/orchestrations/group_chat_simple_selector.py index 270333c8d5..13cd3d3e5a 100644 --- a/python/samples/getting_started/orchestrations/group_chat_simple_selector.py +++ b/python/samples/getting_started/orchestrations/group_chat_simple_selector.py @@ -4,8 +4,8 @@ from typing import cast from agent_framework import ( - AgentResponseUpdate, Agent, + AgentResponseUpdate, Message, ) from agent_framework.azure import AzureOpenAIChatClient diff --git a/python/samples/getting_started/orchestrations/handoff_autonomous.py b/python/samples/getting_started/orchestrations/handoff_autonomous.py index de08ba7854..524898590e 100644 --- a/python/samples/getting_started/orchestrations/handoff_autonomous.py +++ b/python/samples/getting_started/orchestrations/handoff_autonomous.py @@ -5,8 +5,8 @@ from typing import cast from agent_framework import ( - AgentResponseUpdate, Agent, + AgentResponseUpdate, Message, resolve_agent_id, ) diff --git a/python/samples/getting_started/orchestrations/handoff_simple.py b/python/samples/getting_started/orchestrations/handoff_simple.py index 30f46cd940..b2f40f438f 100644 --- a/python/samples/getting_started/orchestrations/handoff_simple.py +++ b/python/samples/getting_started/orchestrations/handoff_simple.py @@ -4,8 +4,8 @@ from typing import Annotated, cast from agent_framework import ( - AgentResponse, Agent, + AgentResponse, Message, WorkflowEvent, WorkflowRunState, diff --git a/python/samples/getting_started/orchestrations/handoff_with_code_interpreter_file.py b/python/samples/getting_started/orchestrations/handoff_with_code_interpreter_file.py index 223c7f946c..c855a2bf21 100644 --- a/python/samples/getting_started/orchestrations/handoff_with_code_interpreter_file.py +++ b/python/samples/getting_started/orchestrations/handoff_with_code_interpreter_file.py @@ -31,10 +31,10 @@ from typing import cast from agent_framework import ( - AgentResponseUpdate, Agent, - Message, + AgentResponseUpdate, HostedCodeInterpreterTool, + Message, WorkflowEvent, WorkflowRunState, ) diff --git a/python/samples/getting_started/orchestrations/magentic.py b/python/samples/getting_started/orchestrations/magentic.py index 32bf11f5dc..61292ae0cb 100644 --- a/python/samples/getting_started/orchestrations/magentic.py +++ b/python/samples/getting_started/orchestrations/magentic.py @@ -6,10 +6,10 @@ from typing import cast from agent_framework import ( - AgentResponseUpdate, Agent, - Message, + AgentResponseUpdate, HostedCodeInterpreterTool, + Message, WorkflowEvent, ) from agent_framework.openai import OpenAIChatClient, OpenAIResponsesClient diff --git a/python/samples/getting_started/orchestrations/magentic_checkpoint.py b/python/samples/getting_started/orchestrations/magentic_checkpoint.py index 4e138c0212..05437a8601 100644 --- a/python/samples/getting_started/orchestrations/magentic_checkpoint.py +++ b/python/samples/getting_started/orchestrations/magentic_checkpoint.py @@ -7,8 +7,8 @@ from agent_framework import ( Agent, - Message, FileCheckpointStorage, + Message, WorkflowCheckpoint, WorkflowEvent, WorkflowRunState, diff --git a/python/samples/getting_started/orchestrations/magentic_human_plan_review.py b/python/samples/getting_started/orchestrations/magentic_human_plan_review.py index b44ad4c35c..95f8de5f46 100644 --- a/python/samples/getting_started/orchestrations/magentic_human_plan_review.py +++ b/python/samples/getting_started/orchestrations/magentic_human_plan_review.py @@ -6,8 +6,8 @@ from typing import cast from agent_framework import ( - AgentResponseUpdate, Agent, + AgentResponseUpdate, Message, WorkflowEvent, ) diff --git a/python/samples/getting_started/orchestrations/sequential_custom_executors.py b/python/samples/getting_started/orchestrations/sequential_custom_executors.py index 4de13cc4d3..7f3e61fe2e 100644 --- a/python/samples/getting_started/orchestrations/sequential_custom_executors.py +++ b/python/samples/getting_started/orchestrations/sequential_custom_executors.py @@ -5,8 +5,8 @@ from agent_framework import ( AgentExecutorResponse, - Message, Executor, + Message, WorkflowContext, handler, ) diff --git a/python/samples/getting_started/purview_agent/sample_purview_agent.py b/python/samples/getting_started/purview_agent/sample_purview_agent.py index 7ad4eec87e..0a5e251ae4 100644 --- a/python/samples/getting_started/purview_agent/sample_purview_agent.py +++ b/python/samples/getting_started/purview_agent/sample_purview_agent.py @@ -25,7 +25,7 @@ import os from typing import Any -from agent_framework import AgentResponse, Agent, Message +from agent_framework import Agent, AgentResponse, Message from agent_framework.azure import AzureOpenAIChatClient from agent_framework.microsoft import ( PurviewChatPolicyMiddleware, diff --git a/python/samples/getting_started/threads/custom_chat_message_store_thread.py b/python/samples/getting_started/threads/custom_chat_message_store_thread.py index 96b49cf0ec..b5ab03bbcb 100644 --- a/python/samples/getting_started/threads/custom_chat_message_store_thread.py +++ b/python/samples/getting_started/threads/custom_chat_message_store_thread.py @@ -4,7 +4,7 @@ from collections.abc import Collection from typing import Any -from agent_framework import Message, ChatMessageStoreProtocol +from agent_framework import ChatMessageStoreProtocol, Message from agent_framework._threads import ChatMessageStoreState from agent_framework.openai import OpenAIChatClient diff --git a/python/samples/getting_started/tools/function_tool_with_approval.py b/python/samples/getting_started/tools/function_tool_with_approval.py index b9f1a84975..e149289091 100644 --- a/python/samples/getting_started/tools/function_tool_with_approval.py +++ b/python/samples/getting_started/tools/function_tool_with_approval.py @@ -4,7 +4,7 @@ from random import randrange from typing import TYPE_CHECKING, Annotated, Any -from agent_framework import AgentResponse, Agent, Message, tool +from agent_framework import Agent, AgentResponse, Message, tool from agent_framework.openai import OpenAIResponsesClient if TYPE_CHECKING: diff --git a/python/samples/getting_started/workflows/agents/azure_chat_agents_tool_calls_with_feedback.py b/python/samples/getting_started/workflows/agents/azure_chat_agents_tool_calls_with_feedback.py index 1e7bde49c6..4cc4e62e5b 100644 --- a/python/samples/getting_started/workflows/agents/azure_chat_agents_tool_calls_with_feedback.py +++ b/python/samples/getting_started/workflows/agents/azure_chat_agents_tool_calls_with_feedback.py @@ -11,9 +11,8 @@ AgentExecutorResponse, AgentResponse, AgentResponseUpdate, - Agent, - Message, Executor, + Message, WorkflowBuilder, WorkflowContext, WorkflowEvent, diff --git a/python/samples/getting_started/workflows/agents/custom_agent_executors.py b/python/samples/getting_started/workflows/agents/custom_agent_executors.py index ee597fc5a0..a44aff4f09 100644 --- a/python/samples/getting_started/workflows/agents/custom_agent_executors.py +++ b/python/samples/getting_started/workflows/agents/custom_agent_executors.py @@ -4,8 +4,8 @@ from agent_framework import ( Agent, - Message, Executor, + Message, WorkflowBuilder, WorkflowContext, handler, diff --git a/python/samples/getting_started/workflows/agents/handoff_workflow_as_agent.py b/python/samples/getting_started/workflows/agents/handoff_workflow_as_agent.py index a03cf4aec2..955446ca80 100644 --- a/python/samples/getting_started/workflows/agents/handoff_workflow_as_agent.py +++ b/python/samples/getting_started/workflows/agents/handoff_workflow_as_agent.py @@ -4,10 +4,10 @@ from typing import Annotated from agent_framework import ( - AgentResponse, Agent, - Message, + AgentResponse, Content, + Message, WorkflowAgent, tool, ) diff --git a/python/samples/getting_started/workflows/checkpoint/checkpoint_with_human_in_the_loop.py b/python/samples/getting_started/workflows/checkpoint/checkpoint_with_human_in_the_loop.py index b6794dba59..ec194d0fa3 100644 --- a/python/samples/getting_started/workflows/checkpoint/checkpoint_with_human_in_the_loop.py +++ b/python/samples/getting_started/workflows/checkpoint/checkpoint_with_human_in_the_loop.py @@ -20,9 +20,9 @@ AgentExecutor, AgentExecutorRequest, AgentExecutorResponse, - Message, Executor, FileCheckpointStorage, + Message, Workflow, WorkflowBuilder, WorkflowCheckpoint, diff --git a/python/samples/getting_started/workflows/checkpoint/handoff_with_tool_approval_checkpoint_resume.py b/python/samples/getting_started/workflows/checkpoint/handoff_with_tool_approval_checkpoint_resume.py index b9c89e54f9..f39c997457 100644 --- a/python/samples/getting_started/workflows/checkpoint/handoff_with_tool_approval_checkpoint_resume.py +++ b/python/samples/getting_started/workflows/checkpoint/handoff_with_tool_approval_checkpoint_resume.py @@ -7,11 +7,11 @@ from typing import cast from agent_framework import ( - AgentResponse, Agent, - Message, + AgentResponse, Content, FileCheckpointStorage, + Message, Workflow, WorkflowEvent, tool, diff --git a/python/samples/getting_started/workflows/control-flow/edge_condition.py b/python/samples/getting_started/workflows/control-flow/edge_condition.py index ee91fbf69e..36872a4cd8 100644 --- a/python/samples/getting_started/workflows/control-flow/edge_condition.py +++ b/python/samples/getting_started/workflows/control-flow/edge_condition.py @@ -8,7 +8,6 @@ AgentExecutor, AgentExecutorRequest, # Input message bundle for an AgentExecutor AgentExecutorResponse, - Agent, # Output from an AgentExecutor Message, WorkflowBuilder, # Fluent builder for wiring executors and edges WorkflowContext, # Per-run context and event bus diff --git a/python/samples/getting_started/workflows/control-flow/multi_selection_edge_group.py b/python/samples/getting_started/workflows/control-flow/multi_selection_edge_group.py index 0cf8c560e3..1a804c3ada 100644 --- a/python/samples/getting_started/workflows/control-flow/multi_selection_edge_group.py +++ b/python/samples/getting_started/workflows/control-flow/multi_selection_edge_group.py @@ -12,7 +12,6 @@ AgentExecutor, AgentExecutorRequest, AgentExecutorResponse, - Agent, Message, WorkflowBuilder, WorkflowContext, diff --git a/python/samples/getting_started/workflows/control-flow/simple_loop.py b/python/samples/getting_started/workflows/control-flow/simple_loop.py index c26ac48942..239ebd2a86 100644 --- a/python/samples/getting_started/workflows/control-flow/simple_loop.py +++ b/python/samples/getting_started/workflows/control-flow/simple_loop.py @@ -7,9 +7,8 @@ AgentExecutor, AgentExecutorRequest, AgentExecutorResponse, - Agent, - Message, Executor, + Message, WorkflowBuilder, WorkflowContext, handler, diff --git a/python/samples/getting_started/workflows/control-flow/switch_case_edge_group.py b/python/samples/getting_started/workflows/control-flow/switch_case_edge_group.py index 78fb3c5c80..49a98e3291 100644 --- a/python/samples/getting_started/workflows/control-flow/switch_case_edge_group.py +++ b/python/samples/getting_started/workflows/control-flow/switch_case_edge_group.py @@ -11,9 +11,8 @@ AgentExecutorRequest, # Message bundle sent to an AgentExecutor AgentExecutorResponse, # Result returned by an AgentExecutor Case, - Agent, # Case entry for a switch-case edge group - Message, Default, # Default branch when no cases match + Message, WorkflowBuilder, # Fluent builder for assembling the graph WorkflowContext, # Per-run context and event bus executor, # Decorator to turn a function into a workflow executor diff --git a/python/samples/getting_started/workflows/human-in-the-loop/agents_with_HITL.py b/python/samples/getting_started/workflows/human-in-the-loop/agents_with_HITL.py index 56430de9bd..6cf292ce4f 100644 --- a/python/samples/getting_started/workflows/human-in-the-loop/agents_with_HITL.py +++ b/python/samples/getting_started/workflows/human-in-the-loop/agents_with_HITL.py @@ -9,8 +9,8 @@ AgentExecutorResponse, AgentResponse, AgentResponseUpdate, - Message, Executor, + Message, WorkflowBuilder, WorkflowContext, WorkflowEvent, diff --git a/python/samples/getting_started/workflows/human-in-the-loop/guessing_game_with_human_input.py b/python/samples/getting_started/workflows/human-in-the-loop/guessing_game_with_human_input.py index babd9d54e9..d6b8161f98 100644 --- a/python/samples/getting_started/workflows/human-in-the-loop/guessing_game_with_human_input.py +++ b/python/samples/getting_started/workflows/human-in-the-loop/guessing_game_with_human_input.py @@ -8,8 +8,8 @@ AgentExecutorRequest, AgentExecutorResponse, AgentResponseUpdate, - Message, Executor, + Message, WorkflowBuilder, WorkflowContext, WorkflowEvent, diff --git a/python/samples/getting_started/workflows/state-management/state_with_agents.py b/python/samples/getting_started/workflows/state-management/state_with_agents.py index 068ac70eb4..97b9fab240 100644 --- a/python/samples/getting_started/workflows/state-management/state_with_agents.py +++ b/python/samples/getting_started/workflows/state-management/state_with_agents.py @@ -7,9 +7,9 @@ from uuid import uuid4 from agent_framework import ( + Agent, AgentExecutorRequest, AgentExecutorResponse, - Agent, Message, WorkflowBuilder, WorkflowContext, diff --git a/python/samples/getting_started/workflows/tool-approval/concurrent_builder_tool_approval.py b/python/samples/getting_started/workflows/tool-approval/concurrent_builder_tool_approval.py index eb0375551e..34d59b62d7 100644 --- a/python/samples/getting_started/workflows/tool-approval/concurrent_builder_tool_approval.py +++ b/python/samples/getting_started/workflows/tool-approval/concurrent_builder_tool_approval.py @@ -5,8 +5,8 @@ from typing import Annotated from agent_framework import ( - Message, Content, + Message, WorkflowEvent, tool, ) diff --git a/python/samples/getting_started/workflows/tool-approval/group_chat_builder_tool_approval.py b/python/samples/getting_started/workflows/tool-approval/group_chat_builder_tool_approval.py index 9eb5ac667d..159299b9b8 100644 --- a/python/samples/getting_started/workflows/tool-approval/group_chat_builder_tool_approval.py +++ b/python/samples/getting_started/workflows/tool-approval/group_chat_builder_tool_approval.py @@ -5,8 +5,8 @@ from typing import Annotated, cast from agent_framework import ( - Message, Content, + Message, WorkflowEvent, tool, ) diff --git a/python/samples/getting_started/workflows/tool-approval/sequential_builder_tool_approval.py b/python/samples/getting_started/workflows/tool-approval/sequential_builder_tool_approval.py index 20a778c745..2f7ecea0ac 100644 --- a/python/samples/getting_started/workflows/tool-approval/sequential_builder_tool_approval.py +++ b/python/samples/getting_started/workflows/tool-approval/sequential_builder_tool_approval.py @@ -5,8 +5,8 @@ from typing import Annotated, cast from agent_framework import ( - Message, Content, + Message, WorkflowEvent, tool, ) From 277402dc27a1ac98790b3f506af352c3b7c2d0e0 Mon Sep 17 00:00:00 2001 From: eavanvalkenburg Date: Tue, 10 Feb 2026 14:09:06 +0100 Subject: [PATCH 09/16] fix: update Agent constructor calls from chat_client to client in declaration-only tool tests --- .../core/tests/workflow/test_agent_executor_tool_calls.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/python/packages/core/tests/workflow/test_agent_executor_tool_calls.py b/python/packages/core/tests/workflow/test_agent_executor_tool_calls.py index 41faca0c9a..3bb51d2224 100644 --- a/python/packages/core/tests/workflow/test_agent_executor_tool_calls.py +++ b/python/packages/core/tests/workflow/test_agent_executor_tool_calls.py @@ -484,7 +484,7 @@ async def _stream_response(self) -> AsyncIterable[ChatResponseUpdate]: async def test_agent_executor_declaration_only_tool_emits_request_info() -> None: """Test that AgentExecutor emits request_info when agent calls a declaration-only tool.""" agent = Agent( - chat_client=DeclarationOnlyMockChatClient(), + client=DeclarationOnlyMockChatClient(), name="DeclarationOnlyAgent", tools=[declaration_only_tool], ) @@ -520,7 +520,7 @@ async def test_agent_executor_declaration_only_tool_emits_request_info() -> None async def test_agent_executor_declaration_only_tool_emits_request_info_streaming() -> None: """Test that AgentExecutor emits request_info for declaration-only tools in streaming mode.""" agent = Agent( - chat_client=DeclarationOnlyMockChatClient(), + client=DeclarationOnlyMockChatClient(), name="DeclarationOnlyAgent", tools=[declaration_only_tool], ) @@ -559,7 +559,7 @@ async def test_agent_executor_declaration_only_tool_emits_request_info_streaming async def test_agent_executor_parallel_declaration_only_tool_emits_request_info() -> None: """Test that AgentExecutor emits request_info for parallel declaration-only tool calls.""" agent = Agent( - chat_client=DeclarationOnlyMockChatClient(parallel_request=True), + client=DeclarationOnlyMockChatClient(parallel_request=True), name="DeclarationOnlyAgent", tools=[declaration_only_tool], ) From cae5c67cd458bf60cd90ca97dc62240fb56a6541 Mon Sep 17 00:00:00 2001 From: eavanvalkenburg Date: Tue, 10 Feb 2026 14:17:52 +0100 Subject: [PATCH 10/16] fix: add register_cleanup to devui lazy-loading proxy and type stub --- python/packages/core/agent_framework/devui/__init__.py | 1 + python/packages/core/agent_framework/devui/__init__.pyi | 2 ++ 2 files changed, 3 insertions(+) diff --git a/python/packages/core/agent_framework/devui/__init__.py b/python/packages/core/agent_framework/devui/__init__.py index 3e3312f10c..cd18b0c5da 100644 --- a/python/packages/core/agent_framework/devui/__init__.py +++ b/python/packages/core/agent_framework/devui/__init__.py @@ -14,6 +14,7 @@ "OpenAIResponse", "ResponseStreamEvent", "main", + "register_cleanup", "serve", "__version__", ] diff --git a/python/packages/core/agent_framework/devui/__init__.pyi b/python/packages/core/agent_framework/devui/__init__.pyi index 3c1cac827f..9396af54bb 100644 --- a/python/packages/core/agent_framework/devui/__init__.pyi +++ b/python/packages/core/agent_framework/devui/__init__.pyi @@ -10,6 +10,7 @@ from agent_framework_devui import ( ResponseStreamEvent, __version__, main, + register_cleanup, serve, ) @@ -23,5 +24,6 @@ __all__ = [ "ResponseStreamEvent", "__version__", "main", + "register_cleanup", "serve", ] From ecc35ed6285dfe970eb31cac5f00c6c1e3340468 Mon Sep 17 00:00:00 2001 From: eavanvalkenburg Date: Tue, 10 Feb 2026 21:04:19 +0100 Subject: [PATCH 11/16] fixed tests and updated new pieces --- .../skills/python-development/SKILL.md | 6 +- .../core/agent_framework/_sessions.py | 32 +++---- .../packages/core/tests/core/test_sessions.py | 92 +++++++++---------- .../purview/tests/{ => purview}/conftest.py | 0 .../purview/tests/{ => purview}/test_cache.py | 0 .../{ => purview}/test_chat_middleware.py | 18 ++-- .../tests/{ => purview}/test_exceptions.py | 0 .../tests/{ => purview}/test_middleware.py | 0 .../tests/{ => purview}/test_processor.py | 0 .../{ => purview}/test_purview_client.py | 0 .../{ => purview}/test_purview_models.py | 0 .../tests/{ => purview}/test_settings.py | 0 12 files changed, 74 insertions(+), 74 deletions(-) rename python/packages/purview/tests/{ => purview}/conftest.py (100%) rename python/packages/purview/tests/{ => purview}/test_cache.py (100%) rename python/packages/purview/tests/{ => purview}/test_chat_middleware.py (96%) rename python/packages/purview/tests/{ => purview}/test_exceptions.py (100%) rename python/packages/purview/tests/{ => purview}/test_middleware.py (100%) rename python/packages/purview/tests/{ => purview}/test_processor.py (100%) rename python/packages/purview/tests/{ => purview}/test_purview_client.py (100%) rename python/packages/purview/tests/{ => purview}/test_purview_models.py (100%) rename python/packages/purview/tests/{ => purview}/test_settings.py (100%) diff --git a/python/.github/skills/python-development/SKILL.md b/python/.github/skills/python-development/SKILL.md index 7b119d13d8..c19f273588 100644 --- a/python/.github/skills/python-development/SKILL.md +++ b/python/.github/skills/python-development/SKILL.md @@ -69,7 +69,7 @@ def equal(arg1: str, arg2: str) -> bool: ```python # Core -from agent_framework import ChatAgent, ChatMessage, tool +from agent_framework import ChatAgent, Message, tool # Components from agent_framework.observability import enable_instrumentation @@ -84,10 +84,10 @@ from agent_framework.azure import AzureOpenAIChatClient Define `__all__` in each module. Avoid `from module import *` in `__init__.py` files: ```python -__all__ = ["ChatAgent", "ChatMessage", "ChatResponse"] +__all__ = ["ChatAgent", "Message", "ChatResponse"] from ._agents import ChatAgent -from ._types import ChatMessage, ChatResponse +from ._types import Message, ChatResponse ``` ## Performance Guidelines diff --git a/python/packages/core/agent_framework/_sessions.py b/python/packages/core/agent_framework/_sessions.py index 770c35a9a9..a1786775ce 100644 --- a/python/packages/core/agent_framework/_sessions.py +++ b/python/packages/core/agent_framework/_sessions.py @@ -19,7 +19,7 @@ from typing import TYPE_CHECKING, Any from ._tools import ToolProtocol -from ._types import AgentResponse, ChatMessage +from ._types import AgentResponse, Message if TYPE_CHECKING: from ._agents import SupportsAgentRun @@ -80,7 +80,7 @@ def _deserialize_state(state: dict[str, Any]) -> dict[str, Any]: # Register known types -_register_state_type(ChatMessage) +_register_state_type(Message) class SessionContext: @@ -107,8 +107,8 @@ def __init__( *, session_id: str | None = None, service_session_id: str | None = None, - input_messages: list[ChatMessage], - context_messages: dict[str, list[ChatMessage]] | None = None, + input_messages: list[Message], + context_messages: dict[str, list[Message]] | None = None, instructions: list[str] | None = None, tools: list[ToolProtocol] | None = None, options: dict[str, Any] | None = None, @@ -129,7 +129,7 @@ def __init__( self.session_id = session_id self.service_session_id = service_session_id self.input_messages = input_messages - self.context_messages: dict[str, list[ChatMessage]] = context_messages or {} + self.context_messages: dict[str, list[Message]] = context_messages or {} self.instructions: list[str] = instructions or [] self.tools: list[ToolProtocol] = tools or [] self._response: AgentResponse | None = None @@ -141,7 +141,7 @@ def response(self) -> AgentResponse | None: """The agent's response. Set by the framework after invocation, read-only for providers.""" return self._response - def extend_messages(self, source: str | object, messages: Sequence[ChatMessage]) -> None: + def extend_messages(self, source: str | object, messages: Sequence[Message]) -> None: """Add context messages from a specific source. Messages are copied before attribution is added, so the caller's @@ -164,7 +164,7 @@ def extend_messages(self, source: str | object, messages: Sequence[ChatMessage]) source_id = source.source_id # type: ignore[attr-defined] attribution = {"source_id": source_id, "source_type": type(source).__name__} - copied: list[ChatMessage] = [] + copied: list[Message] = [] for message in messages: msg_copy = copy.copy(message) msg_copy.additional_properties = dict(message.additional_properties) @@ -206,7 +206,7 @@ def get_messages( exclude_sources: set[str] | None = None, include_input: bool = False, include_response: bool = False, - ) -> list[ChatMessage]: + ) -> list[Message]: """Get context messages, optionally filtered and including input/response. Returns messages in provider execution order (dict insertion order), @@ -221,7 +221,7 @@ def get_messages( Returns: Flattened list of messages in conversation order. """ - result: list[ChatMessage] = [] + result: list[Message] = [] for source_id, messages in self.context_messages.items(): if sources is not None and source_id not in sources: continue @@ -353,7 +353,7 @@ def __init__( self.store_outputs = store_outputs @abstractmethod - async def get_messages(self, session_id: str | None, **kwargs: Any) -> list[ChatMessage]: + async def get_messages(self, session_id: str | None, **kwargs: Any) -> list[Message]: """Retrieve stored messages for this session. Args: @@ -366,7 +366,7 @@ async def get_messages(self, session_id: str | None, **kwargs: Any) -> list[Chat ... @abstractmethod - async def save_messages(self, session_id: str | None, messages: Sequence[ChatMessage], **kwargs: Any) -> None: + async def save_messages(self, session_id: str | None, messages: Sequence[Message], **kwargs: Any) -> None: """Persist messages for this session. Args: @@ -376,7 +376,7 @@ async def save_messages(self, session_id: str | None, messages: Sequence[ChatMes """ ... - def _get_context_messages_to_store(self, context: SessionContext) -> list[ChatMessage]: + def _get_context_messages_to_store(self, context: SessionContext) -> list[Message]: """Get context messages that should be stored based on configuration.""" if not self.store_context_messages: return [] @@ -405,7 +405,7 @@ async def after_run( state: dict[str, Any], ) -> None: """Store messages based on configuration.""" - messages_to_store: list[ChatMessage] = [] + messages_to_store: list[Message] = [] messages_to_store.extend(self._get_context_messages_to_store(context)) if self.store_inputs: messages_to_store.extend(context.input_messages) @@ -487,7 +487,7 @@ class InMemoryHistoryProvider(BaseHistoryProvider): """Built-in history provider that stores messages in session.state. Messages are stored in ``state[source_id]["messages"]`` as a list of - ``ChatMessage`` objects. Serialization to/from dicts is handled by + ``Message`` objects. Serialization to/from dicts is handled by ``AgentSession.to_dict()``/``from_dict()`` using ``SerializationProtocol``. This provider holds no instance state — all data lives in the session's @@ -499,7 +499,7 @@ class InMemoryHistoryProvider(BaseHistoryProvider): async def get_messages( self, session_id: str | None, *, state: dict[str, Any] | None = None, **kwargs: Any - ) -> list[ChatMessage]: + ) -> list[Message]: """Retrieve messages from session state.""" if state is None: return [] @@ -509,7 +509,7 @@ async def get_messages( async def save_messages( self, session_id: str | None, - messages: Sequence[ChatMessage], + messages: Sequence[Message], *, state: dict[str, Any] | None = None, **kwargs: Any, diff --git a/python/packages/core/tests/core/test_sessions.py b/python/packages/core/tests/core/test_sessions.py index 8b64a8ca72..bd3a22e70d 100644 --- a/python/packages/core/tests/core/test_sessions.py +++ b/python/packages/core/tests/core/test_sessions.py @@ -3,7 +3,7 @@ import json from collections.abc import Sequence -from agent_framework import ChatMessage +from agent_framework import Message from agent_framework._sessions import ( AgentSession, BaseContextProvider, @@ -32,7 +32,7 @@ def test_init_defaults(self) -> None: def test_extend_messages_creates_key(self) -> None: ctx = SessionContext(input_messages=[]) - msg = ChatMessage(role="user", contents=["hello"]) + msg = Message(role="user", contents=["hello"]) ctx.extend_messages("rag", [msg]) assert "rag" in ctx.context_messages assert len(ctx.context_messages["rag"]) == 1 @@ -40,22 +40,22 @@ def test_extend_messages_creates_key(self) -> None: def test_extend_messages_appends_to_existing(self) -> None: ctx = SessionContext(input_messages=[]) - msg1 = ChatMessage(role="user", contents=["first"]) - msg2 = ChatMessage(role="user", contents=["second"]) + msg1 = Message(role="user", contents=["first"]) + msg2 = Message(role="user", contents=["second"]) ctx.extend_messages("src", [msg1]) ctx.extend_messages("src", [msg2]) assert len(ctx.context_messages["src"]) == 2 def test_extend_messages_preserves_source_order(self) -> None: ctx = SessionContext(input_messages=[]) - ctx.extend_messages("a", [ChatMessage(role="user", contents=["a"])]) - ctx.extend_messages("b", [ChatMessage(role="user", contents=["b"])]) - ctx.extend_messages("c", [ChatMessage(role="user", contents=["c"])]) + ctx.extend_messages("a", [Message(role="user", contents=["a"])]) + ctx.extend_messages("b", [Message(role="user", contents=["b"])]) + ctx.extend_messages("c", [Message(role="user", contents=["c"])]) assert list(ctx.context_messages.keys()) == ["a", "b", "c"] def test_extend_messages_sets_attribution(self) -> None: ctx = SessionContext(input_messages=[]) - msg = ChatMessage(role="system", contents=["context"]) + msg = Message(role="system", contents=["context"]) ctx.extend_messages("rag", [msg]) stored = ctx.context_messages["rag"][0] assert stored.additional_properties["_attribution"] == {"source_id": "rag"} @@ -64,7 +64,7 @@ def test_extend_messages_sets_attribution(self) -> None: def test_extend_messages_does_not_overwrite_existing_attribution(self) -> None: ctx = SessionContext(input_messages=[]) - msg = ChatMessage( + msg = Message( role="system", contents=["context"], additional_properties={"_attribution": {"source_id": "custom"}} ) ctx.extend_messages("rag", [msg]) @@ -73,7 +73,7 @@ def test_extend_messages_does_not_overwrite_existing_attribution(self) -> None: def test_extend_messages_copies_messages(self) -> None: ctx = SessionContext(input_messages=[]) - msg = ChatMessage(role="user", contents=["hello"]) + msg = Message(role="user", contents=["hello"]) ctx.extend_messages("src", [msg]) stored = ctx.context_messages["src"][0] assert stored is not msg @@ -87,7 +87,7 @@ class MyProvider: source_id = "rag" ctx = SessionContext(input_messages=[]) - msg = ChatMessage(role="system", contents=["ctx"]) + msg = Message(role="system", contents=["ctx"]) ctx.extend_messages(MyProvider(), [msg]) stored = ctx.context_messages["rag"][0] assert stored.additional_properties["_attribution"] == {"source_id": "rag", "source_type": "MyProvider"} @@ -104,8 +104,8 @@ def test_extend_instructions_sequence(self) -> None: def test_get_messages_all(self) -> None: ctx = SessionContext(input_messages=[]) - ctx.extend_messages("a", [ChatMessage(role="user", contents=["a"])]) - ctx.extend_messages("b", [ChatMessage(role="user", contents=["b"])]) + ctx.extend_messages("a", [Message(role="user", contents=["a"])]) + ctx.extend_messages("b", [Message(role="user", contents=["b"])]) result = ctx.get_messages() assert len(result) == 2 assert result[0].text == "a" @@ -113,24 +113,24 @@ def test_get_messages_all(self) -> None: def test_get_messages_filter_sources(self) -> None: ctx = SessionContext(input_messages=[]) - ctx.extend_messages("a", [ChatMessage(role="user", contents=["a"])]) - ctx.extend_messages("b", [ChatMessage(role="user", contents=["b"])]) + ctx.extend_messages("a", [Message(role="user", contents=["a"])]) + ctx.extend_messages("b", [Message(role="user", contents=["b"])]) result = ctx.get_messages(sources=["a"]) assert len(result) == 1 assert result[0].text == "a" def test_get_messages_exclude_sources(self) -> None: ctx = SessionContext(input_messages=[]) - ctx.extend_messages("a", [ChatMessage(role="user", contents=["a"])]) - ctx.extend_messages("b", [ChatMessage(role="user", contents=["b"])]) + ctx.extend_messages("a", [Message(role="user", contents=["a"])]) + ctx.extend_messages("b", [Message(role="user", contents=["b"])]) result = ctx.get_messages(exclude_sources=["a"]) assert len(result) == 1 assert result[0].text == "b" def test_get_messages_include_input(self) -> None: - input_msg = ChatMessage(role="user", contents=["input"]) + input_msg = Message(role="user", contents=["input"]) ctx = SessionContext(input_messages=[input_msg]) - ctx.extend_messages("a", [ChatMessage(role="user", contents=["context"])]) + ctx.extend_messages("a", [Message(role="user", contents=["context"])]) result = ctx.get_messages(include_input=True) assert len(result) == 2 assert result[1].text == "input" @@ -139,7 +139,7 @@ def test_get_messages_include_response(self) -> None: from agent_framework import AgentResponse ctx = SessionContext(input_messages=[]) - ctx._response = AgentResponse(messages=[ChatMessage(role="assistant", contents=["reply"])]) + ctx._response = AgentResponse(messages=[Message(role="assistant", contents=["reply"])]) result = ctx.get_messages(include_response=True) assert len(result) == 1 assert result[0].text == "reply" @@ -187,15 +187,15 @@ async def test_after_run_is_noop(self) -> None: class ConcreteHistoryProvider(BaseHistoryProvider): """Concrete test implementation.""" - def __init__(self, source_id: str, stored_messages: list[ChatMessage] | None = None, **kwargs) -> None: + def __init__(self, source_id: str, stored_messages: list[Message] | None = None, **kwargs) -> None: super().__init__(source_id, **kwargs) - self.stored: list[ChatMessage] = [] + self.stored: list[Message] = [] self._stored_messages = stored_messages or [] - async def get_messages(self, session_id: str | None, **kwargs) -> list[ChatMessage]: + async def get_messages(self, session_id: str | None, **kwargs) -> list[Message]: return list(self._stored_messages) - async def save_messages(self, session_id: str | None, messages: Sequence[ChatMessage], **kwargs) -> None: + async def save_messages(self, session_id: str | None, messages: Sequence[Message], **kwargs) -> None: self.stored.extend(messages) @@ -222,7 +222,7 @@ def test_custom_flags(self) -> None: assert provider.store_context_from == {"rag"} async def test_before_run_loads_messages(self) -> None: - msgs = [ChatMessage(role="user", contents=["history"])] + msgs = [Message(role="user", contents=["history"])] provider = ConcreteHistoryProvider("mem", stored_messages=msgs) session = AgentSession() ctx = SessionContext(session_id="s1", input_messages=[]) @@ -235,8 +235,8 @@ async def test_after_run_stores_inputs_and_responses(self) -> None: provider = ConcreteHistoryProvider("mem") session = AgentSession() - input_msg = ChatMessage(role="user", contents=["hello"]) - resp_msg = ChatMessage(role="assistant", contents=["hi"]) + input_msg = Message(role="user", contents=["hello"]) + resp_msg = Message(role="assistant", contents=["hi"]) ctx = SessionContext(session_id="s1", input_messages=[input_msg]) ctx._response = AgentResponse(messages=[resp_msg]) await provider.after_run(agent=None, session=session, context=ctx, state={}) # type: ignore[arg-type] @@ -248,8 +248,8 @@ async def test_after_run_skips_inputs_when_disabled(self) -> None: from agent_framework import AgentResponse provider = ConcreteHistoryProvider("mem", store_inputs=False) - ctx = SessionContext(session_id="s1", input_messages=[ChatMessage(role="user", contents=["hello"])]) - ctx._response = AgentResponse(messages=[ChatMessage(role="assistant", contents=["hi"])]) + ctx = SessionContext(session_id="s1", input_messages=[Message(role="user", contents=["hello"])]) + ctx._response = AgentResponse(messages=[Message(role="assistant", contents=["hi"])]) await provider.after_run(agent=None, session=AgentSession(), context=ctx, state={}) # type: ignore[arg-type] assert len(provider.stored) == 1 assert provider.stored[0].text == "hi" @@ -258,8 +258,8 @@ async def test_after_run_skips_responses_when_disabled(self) -> None: from agent_framework import AgentResponse provider = ConcreteHistoryProvider("mem", store_outputs=False) - ctx = SessionContext(session_id="s1", input_messages=[ChatMessage(role="user", contents=["hello"])]) - ctx._response = AgentResponse(messages=[ChatMessage(role="assistant", contents=["hi"])]) + ctx = SessionContext(session_id="s1", input_messages=[Message(role="user", contents=["hello"])]) + ctx._response = AgentResponse(messages=[Message(role="assistant", contents=["hi"])]) await provider.after_run(agent=None, session=AgentSession(), context=ctx, state={}) # type: ignore[arg-type] assert len(provider.stored) == 1 assert provider.stored[0].text == "hello" @@ -268,9 +268,9 @@ async def test_after_run_stores_context_messages(self) -> None: from agent_framework import AgentResponse provider = ConcreteHistoryProvider("audit", load_messages=False, store_context_messages=True) - ctx = SessionContext(session_id="s1", input_messages=[ChatMessage(role="user", contents=["hello"])]) - ctx.extend_messages("rag", [ChatMessage(role="system", contents=["context"])]) - ctx._response = AgentResponse(messages=[ChatMessage(role="assistant", contents=["hi"])]) + ctx = SessionContext(session_id="s1", input_messages=[Message(role="user", contents=["hello"])]) + ctx.extend_messages("rag", [Message(role="system", contents=["context"])]) + ctx._response = AgentResponse(messages=[Message(role="assistant", contents=["hi"])]) await provider.after_run(agent=None, session=AgentSession(), context=ctx, state={}) # type: ignore[arg-type] # Should store: context from rag + input + response texts = [m.text for m in provider.stored] @@ -285,8 +285,8 @@ async def test_after_run_stores_context_from_specific_sources(self) -> None: "audit", load_messages=False, store_context_messages=True, store_context_from={"rag"} ) ctx = SessionContext(session_id="s1", input_messages=[]) - ctx.extend_messages("rag", [ChatMessage(role="system", contents=["rag-context"])]) - ctx.extend_messages("other", [ChatMessage(role="system", contents=["other-context"])]) + ctx.extend_messages("rag", [Message(role="system", contents=["rag-context"])]) + ctx.extend_messages("other", [Message(role="system", contents=["other-context"])]) ctx._response = AgentResponse(messages=[]) await provider.after_run(agent=None, session=AgentSession(), context=ctx, state={}) # type: ignore[arg-type] texts = [m.text for m in provider.stored] @@ -372,15 +372,15 @@ async def test_stores_and_loads_messages(self) -> None: session = AgentSession() # First run: send input, get response - input_msg = ChatMessage(role="user", contents=["hello"]) - resp_msg = ChatMessage(role="assistant", contents=["hi there"]) + input_msg = Message(role="user", contents=["hello"]) + resp_msg = Message(role="assistant", contents=["hi there"]) ctx1 = SessionContext(session_id="s1", input_messages=[input_msg]) await provider.before_run(agent=None, session=session, context=ctx1, state=session.state) # type: ignore[arg-type] ctx1._response = AgentResponse(messages=[resp_msg]) await provider.after_run(agent=None, session=session, context=ctx1, state=session.state) # type: ignore[arg-type] # Second run: should load previous messages - ctx2 = SessionContext(session_id="s1", input_messages=[ChatMessage(role="user", contents=["again"])]) + ctx2 = SessionContext(session_id="s1", input_messages=[Message(role="user", contents=["again"])]) await provider.before_run(agent=None, session=session, context=ctx2, state=session.state) # type: ignore[arg-type] loaded = ctx2.context_messages.get("memory", []) assert len(loaded) == 2 @@ -393,23 +393,23 @@ async def test_state_is_serializable(self) -> None: provider = InMemoryHistoryProvider("memory") session = AgentSession() - input_msg = ChatMessage(role="user", contents=["test"]) + input_msg = Message(role="user", contents=["test"]) ctx = SessionContext(session_id="s1", input_messages=[input_msg]) await provider.before_run(agent=None, session=session, context=ctx, state=session.state) # type: ignore[arg-type] - ctx._response = AgentResponse(messages=[ChatMessage(role="assistant", contents=["reply"])]) + ctx._response = AgentResponse(messages=[Message(role="assistant", contents=["reply"])]) await provider.after_run(agent=None, session=session, context=ctx, state=session.state) # type: ignore[arg-type] - # State contains ChatMessage objects (not dicts) - assert isinstance(session.state["memory"]["messages"][0], ChatMessage) + # State contains Message objects (not dicts) + assert isinstance(session.state["memory"]["messages"][0], Message) # to_dict() serializes them via SerializationProtocol session_dict = session.to_dict() json_str = json.dumps(session_dict) assert json_str # no error - # Round-trip through session serialization restores ChatMessage objects + # Round-trip through session serialization restores Message objects restored = AgentSession.from_dict(json.loads(json_str)) - assert isinstance(restored.state["memory"]["messages"][0], ChatMessage) + assert isinstance(restored.state["memory"]["messages"][0], Message) assert restored.state["memory"]["messages"][0].text == "test" assert restored.state["memory"]["messages"][1].text == "reply" @@ -417,5 +417,5 @@ async def test_source_id_attribution(self) -> None: provider = InMemoryHistoryProvider("custom-source") assert provider.source_id == "custom-source" ctx = SessionContext(session_id="s1", input_messages=[]) - ctx.extend_messages("custom-source", [ChatMessage(role="user", contents=["test"])]) + ctx.extend_messages("custom-source", [Message(role="user", contents=["test"])]) assert "custom-source" in ctx.context_messages diff --git a/python/packages/purview/tests/conftest.py b/python/packages/purview/tests/purview/conftest.py similarity index 100% rename from python/packages/purview/tests/conftest.py rename to python/packages/purview/tests/purview/conftest.py diff --git a/python/packages/purview/tests/test_cache.py b/python/packages/purview/tests/purview/test_cache.py similarity index 100% rename from python/packages/purview/tests/test_cache.py rename to python/packages/purview/tests/purview/test_cache.py diff --git a/python/packages/purview/tests/test_chat_middleware.py b/python/packages/purview/tests/purview/test_chat_middleware.py similarity index 96% rename from python/packages/purview/tests/test_chat_middleware.py rename to python/packages/purview/tests/purview/test_chat_middleware.py index bf7ace1dbf..677e3e277b 100644 --- a/python/packages/purview/tests/test_chat_middleware.py +++ b/python/packages/purview/tests/purview/test_chat_middleware.py @@ -355,15 +355,15 @@ async def test_chat_middleware_uses_conversation_id_from_options( ) -> None: """Test that session_id is extracted from context.options['conversation_id'].""" chat_client = DummyChatClient() - messages = [ChatMessage(role="user", text="Hello")] + messages = [Message(role="user", text="Hello")] options = {"conversation_id": "conv-123", "model": "test-model"} - context = ChatContext(chat_client=chat_client, messages=messages, options=options) + context = ChatContext(client=chat_client, messages=messages, options=options) with patch.object(middleware._processor, "process_messages", return_value=(False, "user-123")) as mock_proc: async def mock_next(ctx: ChatContext) -> None: result = MagicMock() - result.messages = [ChatMessage(role="assistant", text="Hi")] + result.messages = [Message(role="assistant", text="Hi")] ctx.result = result await middleware.process(context, mock_next) @@ -377,14 +377,14 @@ async def test_chat_middleware_passes_none_session_id_when_options_missing( ) -> None: """Test that session_id is None when options don't contain conversation_id.""" chat_client = DummyChatClient() - messages = [ChatMessage(role="user", text="Hello")] - context = ChatContext(chat_client=chat_client, messages=messages, options=None) + messages = [Message(role="user", text="Hello")] + context = ChatContext(client=chat_client, messages=messages, options=None) with patch.object(middleware._processor, "process_messages", return_value=(False, "user-123")) as mock_proc: async def mock_next(ctx: ChatContext) -> None: result = MagicMock() - result.messages = [ChatMessage(role="assistant", text="Hi")] + result.messages = [Message(role="assistant", text="Hi")] ctx.result = result await middleware.process(context, mock_next) @@ -395,15 +395,15 @@ async def mock_next(ctx: ChatContext) -> None: async def test_chat_middleware_session_id_used_in_post_check(self, middleware: PurviewChatPolicyMiddleware) -> None: """Test that session_id is passed to post-check process_messages call.""" chat_client = DummyChatClient() - messages = [ChatMessage(role="user", text="Hello")] + messages = [Message(role="user", text="Hello")] options = {"conversation_id": "conv-999"} - context = ChatContext(chat_client=chat_client, messages=messages, options=options) + context = ChatContext(client=chat_client, messages=messages, options=options) with patch.object(middleware._processor, "process_messages", return_value=(False, "user-123")) as mock_proc: async def mock_next(ctx: ChatContext) -> None: result = MagicMock() - result.messages = [ChatMessage(role="assistant", text="Response")] + result.messages = [Message(role="assistant", text="Response")] ctx.result = result await middleware.process(context, mock_next) diff --git a/python/packages/purview/tests/test_exceptions.py b/python/packages/purview/tests/purview/test_exceptions.py similarity index 100% rename from python/packages/purview/tests/test_exceptions.py rename to python/packages/purview/tests/purview/test_exceptions.py diff --git a/python/packages/purview/tests/test_middleware.py b/python/packages/purview/tests/purview/test_middleware.py similarity index 100% rename from python/packages/purview/tests/test_middleware.py rename to python/packages/purview/tests/purview/test_middleware.py diff --git a/python/packages/purview/tests/test_processor.py b/python/packages/purview/tests/purview/test_processor.py similarity index 100% rename from python/packages/purview/tests/test_processor.py rename to python/packages/purview/tests/purview/test_processor.py diff --git a/python/packages/purview/tests/test_purview_client.py b/python/packages/purview/tests/purview/test_purview_client.py similarity index 100% rename from python/packages/purview/tests/test_purview_client.py rename to python/packages/purview/tests/purview/test_purview_client.py diff --git a/python/packages/purview/tests/test_purview_models.py b/python/packages/purview/tests/purview/test_purview_models.py similarity index 100% rename from python/packages/purview/tests/test_purview_models.py rename to python/packages/purview/tests/purview/test_purview_models.py diff --git a/python/packages/purview/tests/test_settings.py b/python/packages/purview/tests/purview/test_settings.py similarity index 100% rename from python/packages/purview/tests/test_settings.py rename to python/packages/purview/tests/purview/test_settings.py From 16e2a794a6c821213879b9775cbbea523b5c4467 Mon Sep 17 00:00:00 2001 From: eavanvalkenburg Date: Tue, 10 Feb 2026 21:27:56 +0100 Subject: [PATCH 12/16] fix agui typevar --- .../agent_framework_ag_ui_examples/agents/ui_generator_agent.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/python/packages/ag-ui/agent_framework_ag_ui_examples/agents/ui_generator_agent.py b/python/packages/ag-ui/agent_framework_ag_ui_examples/agents/ui_generator_agent.py index 20cf76a891..961f276603 100644 --- a/python/packages/ag-ui/agent_framework_ag_ui_examples/agents/ui_generator_agent.py +++ b/python/packages/ag-ui/agent_framework_ag_ui_examples/agents/ui_generator_agent.py @@ -168,7 +168,7 @@ OptionsT = TypeVar("OptionsT", bound=TypedDict, default="ChatOptions") # type: ignore[valid-type] -def ui_generator_agent(client: SupportsChatGetResponse[TOptions]) -> AgentFrameworkAgent: +def ui_generator_agent(client: SupportsChatGetResponse[OptionsT]) -> AgentFrameworkAgent: """Create a UI generator agent with custom React component rendering. Args: From d7c7ec7178e72195846433bdec6147b49fa45086 Mon Sep 17 00:00:00 2001 From: eavanvalkenburg Date: Tue, 10 Feb 2026 22:14:47 +0100 Subject: [PATCH 13/16] fix merge errors --- python/packages/a2a/agent_framework_a2a/_agent.py | 8 ++++---- .../core/tests/openai/test_openai_responses_client.py | 8 ++++---- python/samples/concepts/background_responses.py | 6 +++--- 3 files changed, 11 insertions(+), 11 deletions(-) diff --git a/python/packages/a2a/agent_framework_a2a/_agent.py b/python/packages/a2a/agent_framework_a2a/_agent.py index 71d3e3f487..a938128f10 100644 --- a/python/packages/a2a/agent_framework_a2a/_agent.py +++ b/python/packages/a2a/agent_framework_a2a/_agent.py @@ -82,7 +82,7 @@ class A2AAgent(AgentTelemetryLayer, BaseAgent): """Agent2Agent (A2A) protocol implementation. Wraps an A2A Client to connect the Agent Framework with external A2A-compliant agents - via HTTP/JSON-RPC. Converts framework ChatMessages to A2A Messages on send, and converts + via HTTP/JSON-RPC. Converts framework Messages to A2A Messages on send, and converts A2A responses (Messages/Tasks) back to framework types. Inherits BaseAgent capabilities while managing the underlying A2A protocol communication. @@ -377,8 +377,8 @@ async def poll_task(self, continuation_token: A2AContinuationToken) -> AgentResp return AgentResponse.from_updates(updates) return AgentResponse(messages=[], response_id=task.id, raw_representation=task) - def _prepare_message_for_a2a(self, message: ChatMessage) -> A2AMessage: - """Prepare a ChatMessage for the A2A protocol. + def _prepare_message_for_a2a(self, message: Message) -> A2AMessage: + """Prepare a Message for the A2A protocol. Transforms Agent Framework Message objects into A2A protocol Messages by: - Converting all message contents to appropriate A2A Part types @@ -512,7 +512,7 @@ def _parse_contents_from_a2a(self, parts: Sequence[A2APart]) -> list[Content]: return contents def _parse_messages_from_task(self, task: Task) -> list[Message]: - """Parse A2A Task artifacts into ChatMessages with ASSISTANT role.""" + """Parse A2A Task artifacts into Messages with ASSISTANT role.""" messages: list[Message] = [] if task.artifacts is not None: diff --git a/python/packages/core/tests/openai/test_openai_responses_client.py b/python/packages/core/tests/openai/test_openai_responses_client.py index 6bc07bbd0a..cd985a097c 100644 --- a/python/packages/core/tests/openai/test_openai_responses_client.py +++ b/python/packages/core/tests/openai/test_openai_responses_client.py @@ -2458,7 +2458,7 @@ def test_chat_response_with_continuation_token() -> None: token = OpenAIContinuationToken(response_id="resp_123") response = ChatResponse( - messages=ChatMessage(role="assistant", contents=[Content.from_text(text="Hello")]), + messages=Message(role="assistant", contents=[Content.from_text(text="Hello")]), response_id="resp_123", continuation_token=token, ) @@ -2469,7 +2469,7 @@ def test_chat_response_with_continuation_token() -> None: def test_chat_response_without_continuation_token() -> None: """Test that ChatResponse defaults continuation_token to None.""" response = ChatResponse( - messages=ChatMessage(role="assistant", contents=[Content.from_text(text="Hello")]), + messages=Message(role="assistant", contents=[Content.from_text(text="Hello")]), ) assert response.continuation_token is None @@ -2495,7 +2495,7 @@ def test_agent_response_with_continuation_token() -> None: token = OpenAIContinuationToken(response_id="resp_789") response = AgentResponse( - messages=ChatMessage(role="assistant", contents=[Content.from_text(text="done")]), + messages=Message(role="assistant", contents=[Content.from_text(text="done")]), continuation_token=token, ) assert response.continuation_token is not None @@ -2679,7 +2679,7 @@ async def test_prepare_options_excludes_continuation_token() -> None: """Test that _prepare_options does not pass continuation_token to OpenAI API.""" client = OpenAIResponsesClient(model_id="test-model", api_key="test-key") - messages = [ChatMessage(role="user", contents=[Content.from_text(text="Hello")])] + messages = [Message(role="user", contents=[Content.from_text(text="Hello")])] options: dict[str, Any] = { "model_id": "test-model", "continuation_token": {"response_id": "resp_123"}, diff --git a/python/samples/concepts/background_responses.py b/python/samples/concepts/background_responses.py index 11776b8b68..674c2439eb 100644 --- a/python/samples/concepts/background_responses.py +++ b/python/samples/concepts/background_responses.py @@ -2,7 +2,7 @@ import asyncio -from agent_framework import ChatAgent +from agent_framework import Agent from agent_framework.openai import OpenAIResponsesClient """Background Responses Sample. @@ -22,10 +22,10 @@ # 1. Create the agent with an OpenAI Responses client. -agent = ChatAgent( +agent = Agent( name="researcher", instructions="You are a helpful research assistant. Be concise.", - chat_client=OpenAIResponsesClient(model_id="o3"), + client=OpenAIResponsesClient(model_id="o3"), ) From 58c4f9b771b269ef4edadabfba3cd7032932c53d Mon Sep 17 00:00:00 2001 From: eavanvalkenburg Date: Tue, 10 Feb 2026 23:00:57 +0100 Subject: [PATCH 14/16] fix merge conflicts --- .../_context_provider.py | 8 ++-- .../test_aisearch_new_context_provider.py | 14 +++---- .../agent_framework_mem0/_context_provider.py | 6 +-- .../tests/test_mem0_new_context_provider.py | 40 +++++++++---------- .../_context_provider.py | 6 +-- .../_history_provider.py | 27 ++++++------- .../redis/tests/test_new_providers.py | 36 ++++++++--------- 7 files changed, 67 insertions(+), 70 deletions(-) diff --git a/python/packages/azure-ai-search/agent_framework_azure_ai_search/_context_provider.py b/python/packages/azure-ai-search/agent_framework_azure_ai_search/_context_provider.py index de0a1b2adc..bad955d57e 100644 --- a/python/packages/azure-ai-search/agent_framework_azure_ai_search/_context_provider.py +++ b/python/packages/azure-ai-search/agent_framework_azure_ai_search/_context_provider.py @@ -13,7 +13,7 @@ from collections.abc import Awaitable, Callable from typing import TYPE_CHECKING, Any, ClassVar, Literal -from agent_framework import AGENT_FRAMEWORK_USER_AGENT, ChatMessage +from agent_framework import AGENT_FRAMEWORK_USER_AGENT, Message from agent_framework._logging import get_logger from agent_framework._sessions import AgentSession, BaseContextProvider, SessionContext from agent_framework.exceptions import ServiceInitializationError @@ -343,8 +343,8 @@ def get_role_value(role: str | Any) -> str: if not search_result_parts: return - context_messages = [ChatMessage(role="user", text=self.context_prompt)] - context_messages.extend([ChatMessage(role="user", text=part) for part in search_result_parts]) + context_messages = [Message(role="user", text=self.context_prompt)] + context_messages.extend([Message(role="user", text=part) for part in search_result_parts]) context.extend_messages(self.source_id, context_messages) # -- Internal methods (ported from AzureAISearchContextProvider) ----------- @@ -546,7 +546,7 @@ async def _ensure_knowledge_base(self) -> None: user_agent=AGENT_FRAMEWORK_USER_AGENT, ) - async def _agentic_search(self, messages: list[ChatMessage]) -> list[str]: + async def _agentic_search(self, messages: list[Message]) -> list[str]: """Perform agentic retrieval with multi-hop reasoning.""" await self._ensure_knowledge_base() diff --git a/python/packages/azure-ai-search/tests/test_aisearch_new_context_provider.py b/python/packages/azure-ai-search/tests/test_aisearch_new_context_provider.py index 1bd4a8eb94..e9af893273 100644 --- a/python/packages/azure-ai-search/tests/test_aisearch_new_context_provider.py +++ b/python/packages/azure-ai-search/tests/test_aisearch_new_context_provider.py @@ -5,7 +5,7 @@ from unittest.mock import AsyncMock, patch import pytest -from agent_framework import ChatMessage +from agent_framework import Message from agent_framework._sessions import AgentSession, SessionContext from agent_framework.exceptions import ServiceInitializationError @@ -179,7 +179,7 @@ async def test_results_added_to_context(self, mock_search_client: AsyncMock) -> session = AgentSession(session_id="test-session") ctx = SessionContext( - input_messages=[ChatMessage(role="user", contents=["test query"])], + input_messages=[Message(role="user", contents=["test query"])], session_id="s1", ) await provider.before_run(agent=None, session=session, context=ctx, state=session.state) # type: ignore[arg-type] @@ -206,7 +206,7 @@ async def test_no_results_no_messages(self, mock_search_client_empty: AsyncMock) session = AgentSession(session_id="test-session") ctx = SessionContext( - input_messages=[ChatMessage(role="user", contents=["test query"])], + input_messages=[Message(role="user", contents=["test query"])], session_id="s1", ) await provider.before_run(agent=None, session=session, context=ctx, state=session.state) # type: ignore[arg-type] @@ -221,7 +221,7 @@ async def test_context_prompt_prepended(self, mock_search_client: AsyncMock) -> session = AgentSession(session_id="test-session") ctx = SessionContext( - input_messages=[ChatMessage(role="user", contents=["test query"])], + input_messages=[Message(role="user", contents=["test query"])], session_id="s1", ) await provider.before_run(agent=None, session=session, context=ctx, state=session.state) # type: ignore[arg-type] @@ -243,8 +243,8 @@ async def test_filters_non_user_assistant(self, mock_search_client: AsyncMock) - session = AgentSession(session_id="test-session") ctx = SessionContext( input_messages=[ - ChatMessage(role="system", contents=["system prompt"]), - ChatMessage(role="user", contents=["actual question"]), + Message(role="system", contents=["system prompt"]), + Message(role="user", contents=["actual question"]), ], session_id="s1", ) @@ -262,7 +262,7 @@ async def test_only_system_messages_no_search(self, mock_search_client: AsyncMoc session = AgentSession(session_id="test-session") ctx = SessionContext( - input_messages=[ChatMessage(role="system", contents=["system prompt"])], + input_messages=[Message(role="system", contents=["system prompt"])], session_id="s1", ) await provider.before_run(agent=None, session=session, context=ctx, state=session.state) # type: ignore[arg-type] diff --git a/python/packages/mem0/agent_framework_mem0/_context_provider.py b/python/packages/mem0/agent_framework_mem0/_context_provider.py index 04befe294b..6a09887b72 100644 --- a/python/packages/mem0/agent_framework_mem0/_context_provider.py +++ b/python/packages/mem0/agent_framework_mem0/_context_provider.py @@ -13,7 +13,7 @@ from contextlib import AbstractAsyncContextManager from typing import TYPE_CHECKING, Any -from agent_framework import ChatMessage +from agent_framework import Message from agent_framework._sessions import AgentSession, BaseContextProvider, SessionContext from agent_framework.exceptions import ServiceInitializationError from mem0 import AsyncMemory, AsyncMemoryClient @@ -131,7 +131,7 @@ async def before_run( if line_separated_memories: context.extend_messages( self.source_id, - [ChatMessage(role="user", text=f"{self.context_prompt}\n{line_separated_memories}")], + [Message(role="user", text=f"{self.context_prompt}\n{line_separated_memories}")], ) async def after_run( @@ -145,7 +145,7 @@ async def after_run( """Store request/response messages to Mem0 for future retrieval.""" self._validate_filters() - messages_to_store: list[ChatMessage] = list(context.input_messages) + messages_to_store: list[Message] = list(context.input_messages) if context.response and context.response.messages: messages_to_store.extend(context.response.messages) diff --git a/python/packages/mem0/tests/test_mem0_new_context_provider.py b/python/packages/mem0/tests/test_mem0_new_context_provider.py index 51d814efaf..a56e427e68 100644 --- a/python/packages/mem0/tests/test_mem0_new_context_provider.py +++ b/python/packages/mem0/tests/test_mem0_new_context_provider.py @@ -6,7 +6,7 @@ from unittest.mock import AsyncMock, patch import pytest -from agent_framework import AgentResponse, ChatMessage +from agent_framework import AgentResponse, Message from agent_framework._sessions import AgentSession, SessionContext from agent_framework.exceptions import ServiceInitializationError @@ -84,7 +84,7 @@ async def test_memories_added_to_context(self, mock_mem0_client: AsyncMock) -> N ] provider = _Mem0ContextProvider(source_id="mem0", mem0_client=mock_mem0_client, user_id="u1") session = AgentSession(session_id="test-session") - ctx = SessionContext(input_messages=[ChatMessage(role="user", text="Hello")], session_id="s1") + ctx = SessionContext(input_messages=[Message(role="user", text="Hello")], session_id="s1") await provider.before_run(agent=None, session=session, context=ctx, state=session.state) # type: ignore[arg-type] @@ -100,7 +100,7 @@ async def test_empty_input_skips_search(self, mock_mem0_client: AsyncMock) -> No """Empty input messages → no search performed.""" provider = _Mem0ContextProvider(source_id="mem0", mem0_client=mock_mem0_client, user_id="u1") session = AgentSession(session_id="test-session") - ctx = SessionContext(input_messages=[ChatMessage(role="user", text="")], session_id="s1") + ctx = SessionContext(input_messages=[Message(role="user", text="")], session_id="s1") await provider.before_run(agent=None, session=session, context=ctx, state=session.state) # type: ignore[arg-type] @@ -112,7 +112,7 @@ async def test_empty_search_results_no_messages(self, mock_mem0_client: AsyncMoc mock_mem0_client.search.return_value = [] provider = _Mem0ContextProvider(source_id="mem0", mem0_client=mock_mem0_client, user_id="u1") session = AgentSession(session_id="test-session") - ctx = SessionContext(input_messages=[ChatMessage(role="user", text="test")], session_id="s1") + ctx = SessionContext(input_messages=[Message(role="user", text="test")], session_id="s1") await provider.before_run(agent=None, session=session, context=ctx, state=session.state) # type: ignore[arg-type] @@ -122,7 +122,7 @@ async def test_validates_filters_before_search(self, mock_mem0_client: AsyncMock """Raises ServiceInitializationError when no filters.""" provider = _Mem0ContextProvider(source_id="mem0", mem0_client=mock_mem0_client) session = AgentSession(session_id="test-session") - ctx = SessionContext(input_messages=[ChatMessage(role="user", text="test")], session_id="s1") + ctx = SessionContext(input_messages=[Message(role="user", text="test")], session_id="s1") with pytest.raises(ServiceInitializationError, match="At least one of the filters"): await provider.before_run(agent=None, session=session, context=ctx, state=session.state) # type: ignore[arg-type] @@ -132,7 +132,7 @@ async def test_v1_1_response_format(self, mock_mem0_client: AsyncMock) -> None: mock_mem0_client.search.return_value = {"results": [{"memory": "remembered fact"}]} provider = _Mem0ContextProvider(source_id="mem0", mem0_client=mock_mem0_client, user_id="u1") session = AgentSession(session_id="test-session") - ctx = SessionContext(input_messages=[ChatMessage(role="user", text="test")], session_id="s1") + ctx = SessionContext(input_messages=[Message(role="user", text="test")], session_id="s1") await provider.before_run(agent=None, session=session, context=ctx, state=session.state) # type: ignore[arg-type] @@ -146,8 +146,8 @@ async def test_search_query_combines_input_messages(self, mock_mem0_client: Asyn session = AgentSession(session_id="test-session") ctx = SessionContext( input_messages=[ - ChatMessage(role="user", text="Hello"), - ChatMessage(role="user", text="World"), + Message(role="user", text="Hello"), + Message(role="user", text="World"), ], session_id="s1", ) @@ -168,8 +168,8 @@ async def test_stores_input_and_response(self, mock_mem0_client: AsyncMock) -> N """Stores input+response messages to mem0 via client.add.""" provider = _Mem0ContextProvider(source_id="mem0", mem0_client=mock_mem0_client, user_id="u1") session = AgentSession(session_id="test-session") - ctx = SessionContext(input_messages=[ChatMessage(role="user", text="question")], session_id="s1") - ctx._response = AgentResponse(messages=[ChatMessage(role="assistant", text="answer")]) + ctx = SessionContext(input_messages=[Message(role="user", text="question")], session_id="s1") + ctx._response = AgentResponse(messages=[Message(role="assistant", text="answer")]) await provider.after_run(agent=None, session=session, context=ctx, state=session.state) # type: ignore[arg-type] @@ -188,12 +188,12 @@ async def test_only_stores_user_assistant_system(self, mock_mem0_client: AsyncMo session = AgentSession(session_id="test-session") ctx = SessionContext( input_messages=[ - ChatMessage(role="user", text="hello"), - ChatMessage(role="tool", text="tool output"), + Message(role="user", text="hello"), + Message(role="tool", text="tool output"), ], session_id="s1", ) - ctx._response = AgentResponse(messages=[ChatMessage(role="assistant", text="reply")]) + ctx._response = AgentResponse(messages=[Message(role="assistant", text="reply")]) await provider.after_run(agent=None, session=session, context=ctx, state=session.state) # type: ignore[arg-type] @@ -208,8 +208,8 @@ async def test_skips_empty_messages(self, mock_mem0_client: AsyncMock) -> None: session = AgentSession(session_id="test-session") ctx = SessionContext( input_messages=[ - ChatMessage(role="user", text=""), - ChatMessage(role="user", text=" "), + Message(role="user", text=""), + Message(role="user", text=" "), ], session_id="s1", ) @@ -223,8 +223,8 @@ async def test_uses_session_id_as_run_id(self, mock_mem0_client: AsyncMock) -> N """Uses session_id as run_id.""" provider = _Mem0ContextProvider(source_id="mem0", mem0_client=mock_mem0_client, user_id="u1") session = AgentSession(session_id="test-session") - ctx = SessionContext(input_messages=[ChatMessage(role="user", text="hi")], session_id="my-session") - ctx._response = AgentResponse(messages=[ChatMessage(role="assistant", text="hey")]) + ctx = SessionContext(input_messages=[Message(role="user", text="hi")], session_id="my-session") + ctx._response = AgentResponse(messages=[Message(role="assistant", text="hey")]) await provider.after_run(agent=None, session=session, context=ctx, state=session.state) # type: ignore[arg-type] @@ -234,8 +234,8 @@ async def test_validates_filters(self, mock_mem0_client: AsyncMock) -> None: """Raises ServiceInitializationError when no filters.""" provider = _Mem0ContextProvider(source_id="mem0", mem0_client=mock_mem0_client) session = AgentSession(session_id="test-session") - ctx = SessionContext(input_messages=[ChatMessage(role="user", text="hi")], session_id="s1") - ctx._response = AgentResponse(messages=[ChatMessage(role="assistant", text="hey")]) + ctx = SessionContext(input_messages=[Message(role="user", text="hi")], session_id="s1") + ctx._response = AgentResponse(messages=[Message(role="assistant", text="hey")]) with pytest.raises(ServiceInitializationError, match="At least one of the filters"): await provider.after_run(agent=None, session=session, context=ctx, state=session.state) # type: ignore[arg-type] @@ -246,7 +246,7 @@ async def test_stores_with_application_id_metadata(self, mock_mem0_client: Async source_id="mem0", mem0_client=mock_mem0_client, user_id="u1", application_id="app1" ) session = AgentSession(session_id="test-session") - ctx = SessionContext(input_messages=[ChatMessage(role="user", text="hi")], session_id="s1") + ctx = SessionContext(input_messages=[Message(role="user", text="hi")], session_id="s1") ctx._response = AgentResponse(messages=[]) await provider.after_run(agent=None, session=session, context=ctx, state=session.state) # type: ignore[arg-type] diff --git a/python/packages/redis/agent_framework_redis/_context_provider.py b/python/packages/redis/agent_framework_redis/_context_provider.py index c0532b8051..f4e44a8677 100644 --- a/python/packages/redis/agent_framework_redis/_context_provider.py +++ b/python/packages/redis/agent_framework_redis/_context_provider.py @@ -16,7 +16,7 @@ from typing import TYPE_CHECKING, Any, Literal, cast import numpy as np -from agent_framework import ChatMessage +from agent_framework import Message from agent_framework._sessions import AgentSession, BaseContextProvider, SessionContext from agent_framework.exceptions import ( AgentException, @@ -142,7 +142,7 @@ async def before_run( if line_separated_memories: context.extend_messages( self.source_id, - [ChatMessage(role="user", text=f"{self.context_prompt}\n{line_separated_memories}")], + [Message(role="user", text=f"{self.context_prompt}\n{line_separated_memories}")], ) @override @@ -157,7 +157,7 @@ async def after_run( """Store request/response messages to Redis for future retrieval.""" self._validate_filters() - messages_to_store: list[ChatMessage] = list(context.input_messages) + messages_to_store: list[Message] = list(context.input_messages) if context.response and context.response.messages: messages_to_store.extend(context.response.messages) diff --git a/python/packages/redis/agent_framework_redis/_history_provider.py b/python/packages/redis/agent_framework_redis/_history_provider.py index a4954058d9..54d1ec5f81 100644 --- a/python/packages/redis/agent_framework_redis/_history_provider.py +++ b/python/packages/redis/agent_framework_redis/_history_provider.py @@ -3,34 +3,31 @@ """New-pattern Redis history provider using BaseHistoryProvider. This module provides ``_RedisHistoryProvider``, a side-by-side implementation of -:class:`RedisChatMessageStore` built on the new :class:`BaseHistoryProvider` hooks pattern. +:class:`RedisMessageStore` built on the new :class:`BaseHistoryProvider` hooks pattern. It will be renamed to ``RedisHistoryProvider`` in PR2 when the old class is removed. """ from __future__ import annotations from collections.abc import Sequence -from typing import TYPE_CHECKING, Any +from typing import Any import redis.asyncio as redis -from agent_framework import ChatMessage +from agent_framework import Message from agent_framework._sessions import BaseHistoryProvider from redis.credentials import CredentialProvider -if TYPE_CHECKING: - pass - class _RedisHistoryProvider(BaseHistoryProvider): """Redis-backed history provider using the new BaseHistoryProvider hooks pattern. Stores conversation history in Redis Lists, with each session isolated by a unique Redis key. This is the new-pattern equivalent of - :class:`RedisChatMessageStore`. + :class:`RedisMessageStore`. Note: This class uses a temporary ``_`` prefix to coexist with the existing - :class:`RedisChatMessageStore`. It will be renamed to ``RedisHistoryProvider`` + :class:`RedisMessageStore`. It will be renamed to ``RedisHistoryProvider`` in PR2. """ @@ -115,7 +112,7 @@ def _redis_key(self, session_id: str | None) -> str: """Get the Redis key for a given session's messages.""" return f"{self.key_prefix}:{session_id or 'default'}" - async def get_messages(self, session_id: str | None, **kwargs: Any) -> list[ChatMessage]: + async def get_messages(self, session_id: str | None, **kwargs: Any) -> list[Message]: """Retrieve stored messages for this session from Redis. Args: @@ -123,17 +120,17 @@ async def get_messages(self, session_id: str | None, **kwargs: Any) -> list[Chat **kwargs: Additional arguments (unused). Returns: - List of stored ChatMessage objects in chronological order. + List of stored Message objects in chronological order. """ key = self._redis_key(session_id) redis_messages = await self._redis_client.lrange(key, 0, -1) # type: ignore[misc] - messages: list[ChatMessage] = [] + messages: list[Message] = [] if redis_messages: for serialized in redis_messages: - messages.append(ChatMessage.from_dict(self._deserialize_json(serialized))) + messages.append(Message.from_dict(self._deserialize_json(serialized))) return messages - async def save_messages(self, session_id: str | None, messages: Sequence[ChatMessage], **kwargs: Any) -> None: + async def save_messages(self, session_id: str | None, messages: Sequence[Message], **kwargs: Any) -> None: """Persist messages for this session to Redis. Args: @@ -158,8 +155,8 @@ async def save_messages(self, session_id: str | None, messages: Sequence[ChatMes await self._redis_client.ltrim(key, -self.max_messages, -1) # type: ignore[misc] @staticmethod - def _serialize_json(message: ChatMessage) -> str: - """Serialize a ChatMessage to a JSON string for Redis storage.""" + def _serialize_json(message: Message) -> str: + """Serialize a Message to a JSON string for Redis storage.""" import json return json.dumps(message.to_dict()) diff --git a/python/packages/redis/tests/test_new_providers.py b/python/packages/redis/tests/test_new_providers.py index 0a350c0bea..3540386873 100644 --- a/python/packages/redis/tests/test_new_providers.py +++ b/python/packages/redis/tests/test_new_providers.py @@ -8,7 +8,7 @@ from unittest.mock import AsyncMock, MagicMock, patch import pytest -from agent_framework import AgentResponse, ChatMessage +from agent_framework import AgentResponse, Message from agent_framework._sessions import AgentSession, SessionContext from agent_framework.exceptions import ServiceInitializationError @@ -142,7 +142,7 @@ async def test_search_results_added_to_context( mock_index.query = AsyncMock(return_value=[{"content": "Memory A"}, {"content": "Memory B"}]) provider = _RedisContextProvider(source_id="ctx", user_id="u1") session = AgentSession(session_id="test-session") - ctx = SessionContext(input_messages=[ChatMessage(role="user", contents=["test query"])], session_id="s1") + ctx = SessionContext(input_messages=[Message(role="user", contents=["test query"])], session_id="s1") await provider.before_run(agent=None, session=session, context=ctx, state=session.state) # type: ignore[arg-type] @@ -159,7 +159,7 @@ async def test_empty_input_no_search( ): provider = _RedisContextProvider(source_id="ctx", user_id="u1") session = AgentSession(session_id="test-session") - ctx = SessionContext(input_messages=[ChatMessage(role="user", contents=[" "])], session_id="s1") + ctx = SessionContext(input_messages=[Message(role="user", contents=[" "])], session_id="s1") await provider.before_run(agent=None, session=session, context=ctx, state=session.state) # type: ignore[arg-type] @@ -174,7 +174,7 @@ async def test_empty_results_no_messages( mock_index.query = AsyncMock(return_value=[]) provider = _RedisContextProvider(source_id="ctx", user_id="u1") session = AgentSession(session_id="test-session") - ctx = SessionContext(input_messages=[ChatMessage(role="user", contents=["hello"])], session_id="s1") + ctx = SessionContext(input_messages=[Message(role="user", contents=["hello"])], session_id="s1") await provider.before_run(agent=None, session=session, context=ctx, state=session.state) # type: ignore[arg-type] @@ -189,8 +189,8 @@ async def test_stores_messages( ): provider = _RedisContextProvider(source_id="ctx", user_id="u1") session = AgentSession(session_id="test-session") - response = AgentResponse(messages=[ChatMessage(role="assistant", contents=["response text"])]) - ctx = SessionContext(input_messages=[ChatMessage(role="user", contents=["user input"])], session_id="s1") + response = AgentResponse(messages=[Message(role="assistant", contents=["response text"])]) + ctx = SessionContext(input_messages=[Message(role="user", contents=["user input"])], session_id="s1") ctx._response = response await provider.after_run(agent=None, session=session, context=ctx, state=session.state) # type: ignore[arg-type] @@ -208,7 +208,7 @@ async def test_skips_empty_conversations( ): provider = _RedisContextProvider(source_id="ctx", user_id="u1") session = AgentSession(session_id="test-session") - ctx = SessionContext(input_messages=[ChatMessage(role="user", contents=[" "])], session_id="s1") + ctx = SessionContext(input_messages=[Message(role="user", contents=[" "])], session_id="s1") await provider.after_run(agent=None, session=session, context=ctx, state=session.state) # type: ignore[arg-type] @@ -221,7 +221,7 @@ async def test_stores_partition_fields( ): provider = _RedisContextProvider(source_id="ctx", application_id="app", agent_id="ag", user_id="u1") session = AgentSession(session_id="test-session") - ctx = SessionContext(input_messages=[ChatMessage(role="user", contents=["hello"])], session_id="s1") + ctx = SessionContext(input_messages=[Message(role="user", contents=["hello"])], session_id="s1") await provider.after_run(agent=None, session=session, context=ctx, state=session.state) # type: ignore[arg-type] @@ -325,8 +325,8 @@ def test_key_format(self, mock_redis_client: MagicMock): class TestRedisHistoryProviderGetMessages: async def test_returns_deserialized_messages(self, mock_redis_client: MagicMock): - msg1 = ChatMessage(role="user", contents=["Hello"]) - msg2 = ChatMessage(role="assistant", contents=["Hi!"]) + msg1 = Message(role="user", contents=["Hello"]) + msg2 = Message(role="assistant", contents=["Hi!"]) mock_redis_client.lrange = AsyncMock(return_value=[json.dumps(msg1.to_dict()), json.dumps(msg2.to_dict())]) with patch("agent_framework_redis._history_provider.redis.from_url") as mock_from_url: @@ -357,7 +357,7 @@ async def test_saves_serialized_messages(self, mock_redis_client: MagicMock): mock_from_url.return_value = mock_redis_client provider = _RedisHistoryProvider("mem", redis_url="redis://localhost:6379") - msgs = [ChatMessage(role="user", contents=["Hello"]), ChatMessage(role="assistant", contents=["Hi"])] + msgs = [Message(role="user", contents=["Hello"]), Message(role="assistant", contents=["Hi"])] await provider.save_messages("s1", msgs) pipeline = mock_redis_client.pipeline.return_value.__aenter__.return_value @@ -379,7 +379,7 @@ async def test_max_messages_trimming(self, mock_redis_client: MagicMock): mock_from_url.return_value = mock_redis_client provider = _RedisHistoryProvider("mem", redis_url="redis://localhost:6379", max_messages=10) - await provider.save_messages("s1", [ChatMessage(role="user", contents=["msg"])]) + await provider.save_messages("s1", [Message(role="user", contents=["msg"])]) mock_redis_client.ltrim.assert_called_once_with("chat_messages:s1", -10, -1) @@ -390,7 +390,7 @@ async def test_no_trim_when_under_limit(self, mock_redis_client: MagicMock): mock_from_url.return_value = mock_redis_client provider = _RedisHistoryProvider("mem", redis_url="redis://localhost:6379", max_messages=10) - await provider.save_messages("s1", [ChatMessage(role="user", contents=["msg"])]) + await provider.save_messages("s1", [Message(role="user", contents=["msg"])]) mock_redis_client.ltrim.assert_not_called() @@ -409,7 +409,7 @@ class TestRedisHistoryProviderBeforeAfterRun: """Test before_run/after_run integration via BaseHistoryProvider defaults.""" async def test_before_run_loads_history(self, mock_redis_client: MagicMock): - msg = ChatMessage(role="user", contents=["old msg"]) + msg = Message(role="user", contents=["old msg"]) mock_redis_client.lrange = AsyncMock(return_value=[json.dumps(msg.to_dict())]) with patch("agent_framework_redis._history_provider.redis.from_url") as mock_from_url: @@ -417,7 +417,7 @@ async def test_before_run_loads_history(self, mock_redis_client: MagicMock): provider = _RedisHistoryProvider("mem", redis_url="redis://localhost:6379") session = AgentSession(session_id="test") - ctx = SessionContext(input_messages=[ChatMessage(role="user", contents=["new msg"])], session_id="s1") + ctx = SessionContext(input_messages=[Message(role="user", contents=["new msg"])], session_id="s1") await provider.before_run(agent=None, session=session, context=ctx, state=session.state) # type: ignore[arg-type] @@ -431,8 +431,8 @@ async def test_after_run_stores_input_and_response(self, mock_redis_client: Magi provider = _RedisHistoryProvider("mem", redis_url="redis://localhost:6379") session = AgentSession(session_id="test") - ctx = SessionContext(input_messages=[ChatMessage(role="user", contents=["hi"])], session_id="s1") - ctx._response = AgentResponse(messages=[ChatMessage(role="assistant", contents=["hello"])]) + ctx = SessionContext(input_messages=[Message(role="user", contents=["hi"])], session_id="s1") + ctx._response = AgentResponse(messages=[Message(role="assistant", contents=["hello"])]) await provider.after_run(agent=None, session=session, context=ctx, state=session.state) # type: ignore[arg-type] @@ -448,7 +448,7 @@ async def test_after_run_skips_when_no_messages(self, mock_redis_client: MagicMo ) session = AgentSession(session_id="test") - ctx = SessionContext(input_messages=[ChatMessage(role="user", contents=["hi"])], session_id="s1") + ctx = SessionContext(input_messages=[Message(role="user", contents=["hi"])], session_id="s1") await provider.after_run(agent=None, session=session, context=ctx, state=session.state) # type: ignore[arg-type] From a06676306b27eb5829a2093d7f95c1bd82dafb16 Mon Sep 17 00:00:00 2001 From: eavanvalkenburg Date: Tue, 10 Feb 2026 23:46:02 +0100 Subject: [PATCH 15/16] fiux merge --- .../tests/workflow/test_workflow_agent.py | 6 +- .../_group_chat.py | 242 +++++++----------- .../orchestrations/tests/test_group_chat.py | 15 -- ...re_chat_agents_tool_calls_with_feedback.py | 1 + .../agents/workflow_as_agent_with_thread.py | 6 +- .../workflows/control-flow/edge_condition.py | 1 + .../multi_selection_edge_group.py | 1 + .../workflows/control-flow/simple_loop.py | 1 + .../control-flow/switch_case_edge_group.py | 1 + 9 files changed, 106 insertions(+), 168 deletions(-) diff --git a/python/packages/core/tests/workflow/test_workflow_agent.py b/python/packages/core/tests/workflow/test_workflow_agent.py index 2013cd77e2..1ccc400f92 100644 --- a/python/packages/core/tests/workflow/test_workflow_agent.py +++ b/python/packages/core/tests/workflow/test_workflow_agent.py @@ -12,10 +12,10 @@ AgentResponse, AgentResponseUpdate, AgentThread, + ChatMessageStore, Content, Executor, Message, - MessageStore, ResponseStream, SupportsAgentRun, UsageDetails, @@ -527,7 +527,7 @@ async def test_thread_conversation_history_included_in_workflow_run(self) -> Non Message(role="user", text="Previous user message"), Message(role="assistant", text="Previous assistant response"), ] - message_store = MessageStore(messages=history_messages) + message_store = ChatMessageStore(messages=history_messages) thread = AgentThread(message_store=message_store) # Run the agent with the thread and a new message @@ -558,7 +558,7 @@ async def test_thread_conversation_history_included_in_workflow_stream(self) -> Message(role="user", text="Hello"), Message("assistant", ["Hi there!"]), ] - message_store = MessageStore(messages=history_messages) + message_store = ChatMessageStore(messages=history_messages) thread = AgentThread(message_store=message_store) # Stream from the agent with the thread and a new message diff --git a/python/packages/orchestrations/agent_framework_orchestrations/_group_chat.py b/python/packages/orchestrations/agent_framework_orchestrations/_group_chat.py index f53628c74e..d5ead8d9e7 100644 --- a/python/packages/orchestrations/agent_framework_orchestrations/_group_chat.py +++ b/python/packages/orchestrations/agent_framework_orchestrations/_group_chat.py @@ -26,11 +26,9 @@ from collections import OrderedDict from collections.abc import Awaitable, Callable, Sequence from dataclasses import dataclass -from typing import Any, ClassVar, cast, overload +from typing import Any, ClassVar, cast -from agent_framework import Agent, SupportsAgentRun -from agent_framework._threads import AgentThread -from agent_framework._types import Message +from agent_framework import Agent, AgentThread, Message, SupportsAgentRun from agent_framework._workflows._agent_executor import AgentExecutor, AgentExecutorRequest, AgentExecutorResponse from agent_framework._workflows._agent_utils import resolve_agent_id from agent_framework._workflows._checkpoint import CheckpointStorage @@ -526,7 +524,8 @@ class GroupChatBuilder: def __init__( self, *, - participants: Sequence[SupportsAgentRun | Executor], + participants: Sequence[SupportsAgentRun | Executor] | None = None, + participant_factories: Sequence[Callable[[], SupportsAgentRun | Executor]] | None = None, # Orchestrator config (exactly one required) orchestrator_agent: Agent | Callable[[], Agent] | None = None, orchestrator: BaseGroupChatOrchestrator | Callable[[], BaseGroupChatOrchestrator] | None = None, @@ -541,8 +540,9 @@ def __init__( """Initialize the GroupChatBuilder. Args: - participants: Sequence of agent or executor instances for the group chat. - orchestrator_agent: An instance of ChatAgent or a callable that produces one to manage the group chat. + participants: Optional sequence of agent or executor instances for the group chat. + participant_factories: Optional sequence of callables returning agent or executor instances. + orchestrator_agent: An instance of Agent or a callable that produces one to manage the group chat. orchestrator: An instance of BaseGroupChatOrchestrator or a callable that produces one to manage the group chat. selection_func: Callable that receives the current GroupChatState and returns the name of the next @@ -555,104 +555,56 @@ def __init__( intermediate_outputs: If True, enables intermediate outputs from agent participants. """ self._participants: dict[str, SupportsAgentRun | Executor] = {} + self._participant_factories: list[Callable[[], SupportsAgentRun | Executor]] = [] # Orchestrator related members self._orchestrator: BaseGroupChatOrchestrator | None = None self._orchestrator_factory: Callable[[], Agent | BaseGroupChatOrchestrator] | None = None self._selection_func: GroupChatSelectionFunction | None = None self._agent_orchestrator: Agent | None = None - self._termination_condition: TerminationCondition | None = None - self._max_rounds: int | None = None + self._termination_condition: TerminationCondition | None = termination_condition + self._max_rounds: int | None = max_rounds self._orchestrator_name: str | None = None # Checkpoint related members - self._checkpoint_storage: CheckpointStorage | None = None + self._checkpoint_storage: CheckpointStorage | None = checkpoint_storage # Request info related members self._request_info_enabled: bool = False self._request_info_filter: set[str] = set() # Intermediate outputs - self._intermediate_outputs = False - - self._set_participants(participants) - - @overload - def with_orchestrator(self, *, agent: Agent | Callable[[], Agent]) -> GroupChatBuilder: - """Set the orchestrator for this group chat workflow using a Agent. - - Args: - agent: An instance of Agent or a callable that produces one to manage the group chat. - - Returns: - Self for fluent chaining. - """ - ... - - @overload - def with_orchestrator( - self, *, orchestrator: BaseGroupChatOrchestrator | Callable[[], BaseGroupChatOrchestrator] - ) -> GroupChatBuilder: - """Set the orchestrator for this group chat workflow using a custom orchestrator. - - Args: - orchestrator: An instance of BaseGroupChatOrchestrator or a callable that produces one to - manage the group chat. - - Returns: - Self for fluent chaining. - - Note: - When using a custom orchestrator that implements `BaseGroupChatOrchestrator`, setting - `termination_condition` and `max_rounds` on the builder will have no effect since the - orchestrator is already fully defined. - """ - ... - - @overload - def with_orchestrator( - self, - *, - selection_func: GroupChatSelectionFunction, - orchestrator_name: str | None = None, - ) -> GroupChatBuilder: - """Set the orchestrator for this group chat workflow using a selection function. - - Args: - selection_func: Callable that receives the current GroupChatState and returns - the name of the next participant to speak, or None to finish. - orchestrator_name: Optional display name for the orchestrator in the workflow. - If not provided, defaults to `GroupChatBuilder.DEFAULT_ORCHESTRATOR_ID`. - - Returns: - Self for fluent chaining. - """ - ... + self._intermediate_outputs = intermediate_outputs + + if participants is None and participant_factories is None: + raise ValueError("Either participants or participant_factories must be provided.") + + if participant_factories is not None: + self._set_participant_factories(participant_factories) + if participants is not None: + self._set_participants(participants) + + # Set orchestrator if provided + if any(x is not None for x in [orchestrator_agent, orchestrator, selection_func]): + self._set_orchestrator( + orchestrator_agent=orchestrator_agent, + orchestrator=orchestrator, + selection_func=selection_func, + orchestrator_name=orchestrator_name, + ) - def with_orchestrator( + def _set_orchestrator( self, *, - agent: Agent | Callable[[], Agent] | None = None, + orchestrator_agent: Agent | Callable[[], Agent] | None = None, orchestrator: BaseGroupChatOrchestrator | Callable[[], BaseGroupChatOrchestrator] | None = None, selection_func: GroupChatSelectionFunction | None = None, orchestrator_name: str | None = None, - ) -> GroupChatBuilder: - """Set the orchestrator for this group chat workflow. - - An group chat orchestrator is responsible for managing the flow of conversation, making - sure all participants are synced and picking the next speaker according to the defined logic - until the termination conditions are met. - - There are a few ways to configure the orchestrator: - 1. Provide a Agent instance or a factory function that produces one to use an agent-based orchestrator - 2. Provide a BaseGroupChatOrchestrator instance or a factory function that produces one to use a custom - orchestrator - 3. Provide a selection function to use that picks the next speaker based on the function logic - - You can only use one of the above methods to configure the orchestrator. + ) -> None: + """Set the orchestrator for this group chat workflow (internal). Args: - agent: An instance of Agent or a callable that produces one to manage the group chat. + orchestrator_agent: An instance of Agent or a callable that produces one to manage the group chat. orchestrator: An instance of BaseGroupChatOrchestrator or a callable that produces one to manage the group chat. selection_func: Callable that receives the current GroupChatState and returns @@ -662,61 +614,58 @@ def with_orchestrator( `GroupChatBuilder.DEFAULT_ORCHESTRATOR_ID`. This parameter is ignored if using an agent or custom orchestrator. - Returns: - Self for fluent chaining. - Raises: ValueError: If an orchestrator has already been set or if none or multiple of the parameters are provided. - - Note: - When using a custom orchestrator that implements `BaseGroupChatOrchestrator`, either - via the `orchestrator` or `orchestrator_factory` parameters, setting `termination_condition` - and `max_rounds` on the builder will have no effect since the orchestrator is already - fully defined. - - Example: - .. code-block:: python - - from agent_framework_orchestrations import GroupChatBuilder - - - orchestrator = CustomGroupChatOrchestrator(...) - workflow = GroupChatBuilder().with_orchestrator(orchestrator).participants([agent1, agent2]).build() """ if self._agent_orchestrator is not None: - raise ValueError( - "An agent orchestrator has already been configured. Call with_orchestrator(...) once only." - ) + raise ValueError("An agent orchestrator has already been configured. Set orchestrator config once only.") if self._orchestrator is not None: - raise ValueError("An orchestrator has already been configured. Call with_orchestrator(...) once only.") + raise ValueError("An orchestrator has already been configured. Set orchestrator config once only.") if self._orchestrator_factory is not None: - raise ValueError("A factory has already been configured. Call with_orchestrator(...) once only.") + raise ValueError("A factory has already been configured. Set orchestrator config once only.") if self._selection_func is not None: - raise ValueError("A selection function has already been configured. Call with_orchestrator(...) once only.") + raise ValueError("A selection function has already been configured. Set orchestrator config once only.") - if sum(x is not None for x in [agent, orchestrator, selection_func]) != 1: - raise ValueError("Exactly one of agent, orchestrator, or selection_func must be provided.") + if sum(x is not None for x in [orchestrator_agent, orchestrator, selection_func]) != 1: + raise ValueError("Exactly one of orchestrator_agent, orchestrator, or selection_func must be provided.") - if agent is not None and isinstance(agent, Agent): - self._agent_orchestrator = agent + if orchestrator_agent is not None and isinstance(orchestrator_agent, Agent): + self._agent_orchestrator = orchestrator_agent elif orchestrator is not None and isinstance(orchestrator, BaseGroupChatOrchestrator): self._orchestrator = orchestrator elif selection_func is not None: self._selection_func = selection_func self._orchestrator_name = orchestrator_name else: - self._orchestrator_factory = agent or orchestrator + self._orchestrator_factory = orchestrator_agent or orchestrator - return self + def _set_participant_factories( + self, + participant_factories: Sequence[Callable[[], SupportsAgentRun | Executor]], + ) -> None: + """Set participant factories (internal).""" + if self._participants: + raise ValueError("Cannot provide both participants and participant_factories.") + + if self._participant_factories: + raise ValueError("participant_factories already set.") + + if not participant_factories: + raise ValueError("participant_factories cannot be empty") + + self._participant_factories = list(participant_factories) def _set_participants(self, participants: Sequence[SupportsAgentRun | Executor]) -> None: """Set participants (internal).""" + if self._participant_factories: + raise ValueError("Cannot provide both participants and participant_factories.") + if self._participants: - raise ValueError("participants have already been set. Call participants() at most once.") + raise ValueError("participants already set.") if not participants: raise ValueError("participants cannot be empty.") @@ -767,9 +716,10 @@ def stop_after_two_calls(conversation: list[Message]) -> bool: specialist_agent = ... workflow = ( - GroupChatBuilder() - .with_orchestrator(selection_func=my_selection_function) - .participants([agent1, specialist_agent]) + GroupChatBuilder( + participants=[agent1, specialist_agent], + selection_func=my_selection_function, + ) .with_termination_condition(stop_after_two_calls) .build() ) @@ -821,9 +771,10 @@ def with_checkpointing(self, checkpoint_storage: CheckpointStorage) -> GroupChat storage = MemoryCheckpointStorage() workflow = ( - GroupChatBuilder() - .with_orchestrator(selection_func=my_selection_function) - .participants([agent1, agent2]) + GroupChatBuilder( + participants=[agent1, agent2], + selection_func=my_selection_function, + ) .with_checkpointing(storage) .build() ) @@ -860,19 +811,6 @@ def with_request_info(self, *, agents: Sequence[str | SupportsAgentRun] | None = return self - def with_intermediate_outputs(self) -> GroupChatBuilder: - """Enable intermediate outputs from agent participants. - - When enabled, the workflow returns each agent participant's response or yields - streaming updates as they become available. The output of the orchestrator will - always be available as the final output of the workflow. - - Returns: - Self for fluent chaining - """ - self._intermediate_outputs = True - return self - def _resolve_orchestrator(self, participants: Sequence[Executor]) -> Executor: """Determine the orchestrator to use for the workflow. @@ -883,8 +821,11 @@ def _resolve_orchestrator(self, participants: Sequence[Executor]) -> Executor: x is None for x in [self._agent_orchestrator, self._selection_func, self._orchestrator, self._orchestrator_factory] ): - raise ValueError("No orchestrator has been configured. Call with_orchestrator() to set one.") - # We don't need to check if multiple are set since that is handled in with_orchestrator() + raise ValueError( + "No orchestrator has been configured. " + "Pass orchestrator_agent, orchestrator, or selection_func to the constructor." + ) + # We don't need to check if multiple are set since that is handled in _set_orchestrator() if self._agent_orchestrator: return AgentBasedGroupChatOrchestrator( @@ -924,14 +865,24 @@ def _resolve_orchestrator(self, participants: Sequence[Executor]) -> Executor: ) # This should never be reached due to the checks above - raise RuntimeError("Orchestrator could not be resolved. Please provide one via with_orchestrator()") + raise RuntimeError( + "Orchestrator could not be resolved. " + "Pass orchestrator_agent, orchestrator, or selection_func to the constructor." + ) def _resolve_participants(self) -> list[Executor]: """Resolve participant instances into Executor objects.""" - if not self._participants: - raise ValueError("No participants provided. Pass participants to the constructor.") - - participants: list[Executor | SupportsAgentRun] = list(self._participants.values()) + if not self._participants and not self._participant_factories: + raise ValueError("No participants provided. Pass participants or participant_factories to the constructor.") + # We don't need to check if both are set since that is handled in the respective methods + + participants: list[Executor | SupportsAgentRun] = [] + if self._participant_factories: + for factory in self._participant_factories: + participant = factory() + participants.append(participant) + else: + participants = list(self._participants.values()) executors: list[Executor] = [] for participant in participants: @@ -967,19 +918,16 @@ def build(self) -> Workflow: orchestrator: Executor = self._resolve_orchestrator(participants) # Build workflow graph - workflow_builder = WorkflowBuilder().set_start_executor(orchestrator) + workflow_builder = WorkflowBuilder( + start_executor=orchestrator, + checkpoint_storage=self._checkpoint_storage, + output_executors=[orchestrator] if not self._intermediate_outputs else None, + ) for participant in participants: # Orchestrator and participant bi-directional edges workflow_builder = workflow_builder.add_edge(orchestrator, participant) workflow_builder = workflow_builder.add_edge(participant, orchestrator) - if not self._intermediate_outputs: - # Constrain output to orchestrator only - workflow_builder = workflow_builder.with_output_from([orchestrator]) - - if self._checkpoint_storage is not None: - workflow_builder = workflow_builder.with_checkpointing(self._checkpoint_storage) - return workflow_builder.build() diff --git a/python/packages/orchestrations/tests/test_group_chat.py b/python/packages/orchestrations/tests/test_group_chat.py index 01a1b6c7ac..9eb94b19d4 100644 --- a/python/packages/orchestrations/tests/test_group_chat.py +++ b/python/packages/orchestrations/tests/test_group_chat.py @@ -244,21 +244,6 @@ def test_build_without_participants_raises_error(self) -> None: with pytest.raises(ValueError): GroupChatBuilder(participants=[]) - def test_duplicate_manager_configuration_raises_error(self) -> None: - """Test that configuring multiple orchestrator options raises ValueError.""" - agent = StubAgent("test", "response") - - def selector(state: GroupChatState) -> str: - return "agent" - - builder = GroupChatBuilder().with_orchestrator(selection_func=selector) - - with pytest.raises( - ValueError, - match=r"Either participants or participant_factories must be provided\.", - ): - GroupChatBuilder() - def test_duplicate_manager_configuration_raises_error(self) -> None: """Test that configuring multiple orchestrator options raises ValueError.""" agent = StubAgent("test", "response") diff --git a/python/samples/getting_started/workflows/agents/azure_chat_agents_tool_calls_with_feedback.py b/python/samples/getting_started/workflows/agents/azure_chat_agents_tool_calls_with_feedback.py index 4cc4e62e5b..cacaa2b493 100644 --- a/python/samples/getting_started/workflows/agents/azure_chat_agents_tool_calls_with_feedback.py +++ b/python/samples/getting_started/workflows/agents/azure_chat_agents_tool_calls_with_feedback.py @@ -6,6 +6,7 @@ from typing import Annotated from agent_framework import ( + Agent, AgentExecutor, AgentExecutorRequest, AgentExecutorResponse, diff --git a/python/samples/getting_started/workflows/agents/workflow_as_agent_with_thread.py b/python/samples/getting_started/workflows/agents/workflow_as_agent_with_thread.py index 1165deac55..0e84b10821 100644 --- a/python/samples/getting_started/workflows/agents/workflow_as_agent_with_thread.py +++ b/python/samples/getting_started/workflows/agents/workflow_as_agent_with_thread.py @@ -39,7 +39,7 @@ async def main() -> None: # Create a chat client client = OpenAIChatClient() - assistant = chat_client.as_agent( + assistant = client.as_agent( name="assistant", instructions=( "You are a helpful assistant. Answer questions based on the conversation " @@ -47,7 +47,7 @@ async def main() -> None: ), ) - summarizer = chat_client.as_agent( + summarizer = client.as_agent( name="summarizer", instructions=( "You are a summarizer. After the assistant responds, provide a brief " @@ -121,7 +121,7 @@ async def demonstrate_thread_serialization() -> None: """ client = OpenAIChatClient() - memory_assistant = chat_client.as_agent( + memory_assistant = client.as_agent( name="memory_assistant", instructions="You are a helpful assistant with good memory. Remember details from our conversation.", ) diff --git a/python/samples/getting_started/workflows/control-flow/edge_condition.py b/python/samples/getting_started/workflows/control-flow/edge_condition.py index 36872a4cd8..c7d8cbeb2d 100644 --- a/python/samples/getting_started/workflows/control-flow/edge_condition.py +++ b/python/samples/getting_started/workflows/control-flow/edge_condition.py @@ -5,6 +5,7 @@ from typing import Any from agent_framework import ( # Core chat primitives used to build requests + Agent, AgentExecutor, AgentExecutorRequest, # Input message bundle for an AgentExecutor AgentExecutorResponse, diff --git a/python/samples/getting_started/workflows/control-flow/multi_selection_edge_group.py b/python/samples/getting_started/workflows/control-flow/multi_selection_edge_group.py index 1a804c3ada..f6c32c7882 100644 --- a/python/samples/getting_started/workflows/control-flow/multi_selection_edge_group.py +++ b/python/samples/getting_started/workflows/control-flow/multi_selection_edge_group.py @@ -9,6 +9,7 @@ from uuid import uuid4 from agent_framework import ( + Agent, AgentExecutor, AgentExecutorRequest, AgentExecutorResponse, diff --git a/python/samples/getting_started/workflows/control-flow/simple_loop.py b/python/samples/getting_started/workflows/control-flow/simple_loop.py index 239ebd2a86..f0232863bc 100644 --- a/python/samples/getting_started/workflows/control-flow/simple_loop.py +++ b/python/samples/getting_started/workflows/control-flow/simple_loop.py @@ -4,6 +4,7 @@ from enum import Enum from agent_framework import ( + Agent, AgentExecutor, AgentExecutorRequest, AgentExecutorResponse, diff --git a/python/samples/getting_started/workflows/control-flow/switch_case_edge_group.py b/python/samples/getting_started/workflows/control-flow/switch_case_edge_group.py index 49a98e3291..43c5a2354d 100644 --- a/python/samples/getting_started/workflows/control-flow/switch_case_edge_group.py +++ b/python/samples/getting_started/workflows/control-flow/switch_case_edge_group.py @@ -7,6 +7,7 @@ from uuid import uuid4 from agent_framework import ( # Core chat primitives used to form LLM requests + Agent, AgentExecutor, AgentExecutorRequest, # Message bundle sent to an AgentExecutor AgentExecutorResponse, # Result returned by an AgentExecutor From 4dcd4e8ada04436f203ea981dbe64defd5be021a Mon Sep 17 00:00:00 2001 From: Evan Mattson Date: Wed, 11 Feb 2026 07:57:48 +0900 Subject: [PATCH 16/16] Remove unused links --- python/samples/getting_started/orchestrations/README.md | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/python/samples/getting_started/orchestrations/README.md b/python/samples/getting_started/orchestrations/README.md index 0965ff2178..9b603eda34 100644 --- a/python/samples/getting_started/orchestrations/README.md +++ b/python/samples/getting_started/orchestrations/README.md @@ -30,23 +30,20 @@ from agent_framework.orchestrations import ( | Sample | File | Concepts | | ------------------------------------------------- | ------------------------------------------------------------------------------------ | ---------------------------------------------------------------------------------------------------------------- | -| Concurrent Orchestration (Default Aggregator) | [concurrent_agents.py](./concurrent_agents.py) | Fan-out to multiple agents; fan-in with default aggregator returning combined ChatMessages | +| Concurrent Orchestration (Default Aggregator) | [concurrent_agents.py](./concurrent_agents.py) | Fan-out to multiple agents; fan-in with default aggregator returning combined Messages | | Concurrent Orchestration (Custom Aggregator) | [concurrent_custom_aggregator.py](./concurrent_custom_aggregator.py) | Override aggregator via callback; summarize results with an LLM | -| Concurrent Orchestration (Custom Agent Executors) | [concurrent_custom_agent_executors.py](./concurrent_custom_agent_executors.py) | Child executors own ChatAgents; concurrent fan-out/fan-in via ConcurrentBuilder | -| Concurrent Orchestration (Participant Factory) | [concurrent_participant_factory.py](./concurrent_participant_factory.py) | Use participant factories for state isolation between workflow instances | +| Concurrent Orchestration (Custom Agent Executors) | [concurrent_custom_agent_executors.py](./concurrent_custom_agent_executors.py) | Child executors own Agents; concurrent fan-out/fan-in via ConcurrentBuilder | | Group Chat with Agent Manager | [group_chat_agent_manager.py](./group_chat_agent_manager.py) | Agent-based manager using `with_orchestrator(agent=)` to select next speaker | | Group Chat Philosophical Debate | [group_chat_philosophical_debate.py](./group_chat_philosophical_debate.py) | Agent manager moderates long-form, multi-round debate across diverse participants | | Group Chat with Simple Function Selector | [group_chat_simple_selector.py](./group_chat_simple_selector.py) | Group chat with a simple function selector for next speaker | | Handoff (Simple) | [handoff_simple.py](./handoff_simple.py) | Single-tier routing: triage agent routes to specialists, control returns to user after each specialist response | | Handoff (Autonomous) | [handoff_autonomous.py](./handoff_autonomous.py) | Autonomous mode: specialists iterate independently until invoking a handoff tool using `.with_autonomous_mode()` | -| Handoff (Participant Factory) | [handoff_participant_factory.py](./handoff_participant_factory.py) | Use participant factories for state isolation between workflow instances | | Handoff with Code Interpreter | [handoff_with_code_interpreter_file.py](./handoff_with_code_interpreter_file.py) | Retrieve file IDs from code interpreter output in handoff workflow | | Magentic Workflow (Multi-Agent) | [magentic.py](./magentic.py) | Orchestrate multiple agents with Magentic manager and streaming | | Magentic + Human Plan Review | [magentic_human_plan_review.py](./magentic_human_plan_review.py) | Human reviews/updates the plan before execution | | Magentic + Checkpoint Resume | [magentic_checkpoint.py](./magentic_checkpoint.py) | Resume Magentic orchestration from saved checkpoints | | Sequential Orchestration (Agents) | [sequential_agents.py](./sequential_agents.py) | Chain agents sequentially with shared conversation context | | Sequential Orchestration (Custom Executor) | [sequential_custom_executors.py](./sequential_custom_executors.py) | Mix agents with a summarizer that appends a compact summary | -| Sequential Orchestration (Participant Factories) | [sequential_participant_factory.py](./sequential_participant_factory.py) | Use participant factories for state isolation between workflow instances | ## Tips