diff --git a/.github/workflows/python-merge-tests.yml b/.github/workflows/python-merge-tests.yml index 0dafc1266f..f6ed0063cc 100644 --- a/.github/workflows/python-merge-tests.yml +++ b/.github/workflows/python-merge-tests.yml @@ -97,7 +97,7 @@ jobs: id: azure-functions-setup - name: Test with pytest timeout-minutes: 10 - run: uv run poe all-tests -n logical --dist loadfile --dist worksteal --timeout 600 --retries 3 --retry-delay 10 + run: uv run poe all-tests -n logical --dist loadfile --dist worksteal --timeout 900 --retries 3 --retry-delay 10 working-directory: ./python - name: Test core samples timeout-minutes: 10 diff --git a/python/CODING_STANDARD.md b/python/CODING_STANDARD.md index 6858f79f43..1b7b2726b8 100644 --- a/python/CODING_STANDARD.md +++ b/python/CODING_STANDARD.md @@ -92,7 +92,7 @@ The package follows a flat import structure: - **Core**: Import directly from `agent_framework` ```python - from agent_framework import ChatAgent, ai_function + from agent_framework import ChatAgent, tool ``` - **Components**: Import from `agent_framework.` @@ -336,7 +336,7 @@ Think about caching where appropriate. Cache the results of expensive operations ```python # ✅ Preferred - cache expensive computations -class AIFunction: +class FunctionTool: def __init__(self, ...): self._cached_parameters: dict[str, Any] | None = None diff --git a/python/packages/ag-ui/agent_framework_ag_ui/_client.py b/python/packages/ag-ui/agent_framework_ag_ui/_client.py index 7a03949b66..7c0f7b8cf9 100644 --- a/python/packages/ag-ui/agent_framework_ag_ui/_client.py +++ b/python/packages/ag-ui/agent_framework_ag_ui/_client.py @@ -12,12 +12,12 @@ import httpx from agent_framework import ( - AIFunction, BaseChatClient, ChatMessage, ChatResponse, ChatResponseUpdate, Content, + FunctionTool, use_chat_middleware, use_function_invocation, ) @@ -239,7 +239,7 @@ def _register_server_tool_placeholder(self, tool_name: str) -> None: if any(getattr(tool, "name", None) == tool_name for tool in config.additional_tools): return - placeholder: AIFunction[Any, Any] = AIFunction( + placeholder: FunctionTool[Any, Any] = FunctionTool( name=tool_name, description="Server-managed tool placeholder (AG-UI)", func=None, diff --git a/python/packages/ag-ui/agent_framework_ag_ui/_utils.py b/python/packages/ag-ui/agent_framework_ag_ui/_utils.py index 967653fff8..f7f01261f5 100644 --- a/python/packages/ag-ui/agent_framework_ag_ui/_utils.py +++ b/python/packages/ag-ui/agent_framework_ag_ui/_utils.py @@ -10,7 +10,7 @@ from datetime import date, datetime from typing import Any -from agent_framework import AgentResponseUpdate, AIFunction, ChatResponseUpdate, Role, ToolProtocol +from agent_framework import AgentResponseUpdate, ChatResponseUpdate, FunctionTool, Role, ToolProtocol # Role mapping constants AGUI_TO_FRAMEWORK_ROLE: dict[str, Role] = { @@ -160,10 +160,10 @@ def make_json_safe(obj: Any) -> Any: # noqa: ANN401 def convert_agui_tools_to_agent_framework( agui_tools: list[dict[str, Any]] | None, -) -> list[AIFunction[Any, Any]] | None: - """Convert AG-UI tool definitions to Agent Framework AIFunction declarations. +) -> list[FunctionTool[Any, Any]] | None: + """Convert AG-UI tool definitions to Agent Framework FunctionTool declarations. - Creates declaration-only AIFunction instances (no executable implementation). + Creates declaration-only FunctionTool instances (no executable implementation). These are used to tell the LLM about available tools. The actual execution happens on the client side via @use_function_invocation. @@ -174,18 +174,18 @@ def convert_agui_tools_to_agent_framework( agui_tools: List of AG-UI tool definitions with name, description, parameters Returns: - List of AIFunction declarations, or None if no tools provided + List of FunctionTool declarations, or None if no tools provided """ if not agui_tools: return None - result: list[AIFunction[Any, Any]] = [] + result: list[FunctionTool[Any, Any]] = [] for tool_def in agui_tools: - # Create declaration-only AIFunction (func=None means no implementation) + # Create declaration-only FunctionTool (func=None means no implementation) # When func=None, the declaration_only property returns True, # which tells @use_function_invocation to return the function call # without executing it (so it can be sent back to the client) - func: AIFunction[Any, Any] = AIFunction( + func: FunctionTool[Any, Any] = FunctionTool( name=tool_def.get("name", ""), description=tool_def.get("description", ""), func=None, # CRITICAL: Makes declaration_only=True @@ -229,24 +229,24 @@ def convert_tools_to_agui_format( results: list[dict[str, Any]] = [] - for tool in tool_list: - if isinstance(tool, dict): + for tool_item in tool_list: + if isinstance(tool_item, dict): # Already in dict format, pass through - results.append(tool) # type: ignore[arg-type] - elif isinstance(tool, AIFunction): - # Convert AIFunction to AG-UI tool format + results.append(tool_item) # type: ignore[arg-type] + elif isinstance(tool_item, FunctionTool): + # Convert FunctionTool to AG-UI tool format results.append( { - "name": tool.name, - "description": tool.description, - "parameters": tool.parameters(), + "name": tool_item.name, + "description": tool_item.description, + "parameters": tool_item.parameters(), } ) - elif callable(tool): - # Convert callable to AIFunction first, then to AG-UI format - from agent_framework import ai_function + elif callable(tool_item): + # Convert callable to FunctionTool first, then to AG-UI format + from agent_framework import tool - ai_func = ai_function(tool) + ai_func = tool(tool_item) results.append( { "name": ai_func.name, @@ -254,11 +254,11 @@ def convert_tools_to_agui_format( "parameters": ai_func.parameters(), } ) - elif isinstance(tool, ToolProtocol): + elif isinstance(tool_item, ToolProtocol): # Handle other ToolProtocol implementations - # For now, we'll skip non-AIFunction tools as they may not have + # For now, we'll skip non-FunctionTool instances as they may not have # the parameters() method. This matches .NET behavior which only - # converts AIFunctionDeclaration instances. + # converts FunctionToolDeclaration instances. continue return results if results else None diff --git a/python/packages/ag-ui/agent_framework_ag_ui_examples/README.md b/python/packages/ag-ui/agent_framework_ag_ui_examples/README.md index f22969f883..df07cff85d 100644 --- a/python/packages/ag-ui/agent_framework_ag_ui_examples/README.md +++ b/python/packages/ag-ui/agent_framework_ag_ui_examples/README.md @@ -187,11 +187,11 @@ The package uses a clean, orchestrator-based architecture: You can create your own agent factories following the same pattern as the examples: ```python -from agent_framework import ChatAgent, ai_function +from agent_framework import ChatAgent, tool from agent_framework import ChatClientProtocol from agent_framework.ag_ui import AgentFrameworkAgent -@ai_function +@tool def my_tool(param: str) -> str: """My custom tool.""" return f"Result: {param}" @@ -294,9 +294,9 @@ wrapped_agent = AgentFrameworkAgent( Human-in-the-loop is automatically handled when tools are marked for approval: ```python -from agent_framework import ai_function +from agent_framework import tool -@ai_function(approval_mode="always_require") +@tool(approval_mode="always_require") def sensitive_action(param: str) -> str: """This action requires user approval.""" return f"Executed with {param}" diff --git a/python/packages/ag-ui/agent_framework_ag_ui_examples/agents/document_writer_agent.py b/python/packages/ag-ui/agent_framework_ag_ui_examples/agents/document_writer_agent.py index 34ade05032..221b167fa8 100644 --- a/python/packages/ag-ui/agent_framework_ag_ui_examples/agents/document_writer_agent.py +++ b/python/packages/ag-ui/agent_framework_ag_ui_examples/agents/document_writer_agent.py @@ -2,11 +2,11 @@ """Example agent demonstrating predictive state updates with document writing.""" -from agent_framework import ChatAgent, ChatClientProtocol, ai_function +from agent_framework import ChatAgent, ChatClientProtocol, tool from agent_framework.ag_ui import AgentFrameworkAgent -@ai_function(approval_mode="always_require") +@tool(approval_mode="always_require") def write_document(document: str) -> str: """Write a document. Use markdown formatting to format the document. diff --git a/python/packages/ag-ui/agent_framework_ag_ui_examples/agents/human_in_the_loop_agent.py b/python/packages/ag-ui/agent_framework_ag_ui_examples/agents/human_in_the_loop_agent.py index dbfdab5272..368c4e47ed 100644 --- a/python/packages/ag-ui/agent_framework_ag_ui_examples/agents/human_in_the_loop_agent.py +++ b/python/packages/ag-ui/agent_framework_ag_ui_examples/agents/human_in_the_loop_agent.py @@ -5,7 +5,7 @@ from enum import Enum from typing import Any -from agent_framework import ChatAgent, ChatClientProtocol, ai_function +from agent_framework import ChatAgent, ChatClientProtocol, tool from pydantic import BaseModel, Field @@ -23,7 +23,7 @@ class TaskStep(BaseModel): status: StepStatus = Field(default=StepStatus.ENABLED, description="Whether the step is enabled or disabled") -@ai_function( +@tool( name="generate_task_steps", description="Generate execution steps for a task", approval_mode="always_require", diff --git a/python/packages/ag-ui/agent_framework_ag_ui_examples/agents/recipe_agent.py b/python/packages/ag-ui/agent_framework_ag_ui_examples/agents/recipe_agent.py index 2d38e612aa..39f3803f9a 100644 --- a/python/packages/ag-ui/agent_framework_ag_ui_examples/agents/recipe_agent.py +++ b/python/packages/ag-ui/agent_framework_ag_ui_examples/agents/recipe_agent.py @@ -5,7 +5,7 @@ from enum import Enum from typing import Any -from agent_framework import ChatAgent, ChatClientProtocol, ai_function +from agent_framework import ChatAgent, ChatClientProtocol, tool from agent_framework.ag_ui import AgentFrameworkAgent from pydantic import BaseModel, Field @@ -49,7 +49,7 @@ class Recipe(BaseModel): instructions: list[str] = Field(..., description="Step-by-step cooking instructions") -@ai_function +@tool def update_recipe(recipe: Recipe) -> str: """Update the recipe with new or modified content. diff --git a/python/packages/ag-ui/agent_framework_ag_ui_examples/agents/research_assistant_agent.py b/python/packages/ag-ui/agent_framework_ag_ui_examples/agents/research_assistant_agent.py index 52515bc0a4..b92874421a 100644 --- a/python/packages/ag-ui/agent_framework_ag_ui_examples/agents/research_assistant_agent.py +++ b/python/packages/ag-ui/agent_framework_ag_ui_examples/agents/research_assistant_agent.py @@ -5,11 +5,11 @@ import asyncio from typing import Any -from agent_framework import ChatAgent, ChatClientProtocol, ai_function +from agent_framework import ChatAgent, ChatClientProtocol, tool from agent_framework.ag_ui import AgentFrameworkAgent -@ai_function +@tool async def research_topic(topic: str) -> str: """Research a topic and generate a comprehensive report. @@ -35,7 +35,7 @@ async def research_topic(topic: str) -> str: return f"Research report on '{topic}':\n" + "\n".join(results) -@ai_function +@tool async def create_presentation(title: str, num_slides: int) -> str: """Create a presentation with multiple slides. @@ -55,7 +55,7 @@ async def create_presentation(title: str, num_slides: int) -> str: return f"Created presentation '{title}' with {num_slides} slides:\n" + "\n".join(slides) -@ai_function +@tool async def analyze_data(dataset: str) -> str: """Analyze a dataset and produce insights. diff --git a/python/packages/ag-ui/agent_framework_ag_ui_examples/agents/task_planner_agent.py b/python/packages/ag-ui/agent_framework_ag_ui_examples/agents/task_planner_agent.py index 442c9e6182..57e14bb6c3 100644 --- a/python/packages/ag-ui/agent_framework_ag_ui_examples/agents/task_planner_agent.py +++ b/python/packages/ag-ui/agent_framework_ag_ui_examples/agents/task_planner_agent.py @@ -4,11 +4,11 @@ from typing import Any -from agent_framework import ChatAgent, ChatClientProtocol, ai_function +from agent_framework import ChatAgent, ChatClientProtocol, tool from agent_framework.ag_ui import AgentFrameworkAgent -@ai_function(approval_mode="always_require") +@tool(approval_mode="always_require") def create_calendar_event(title: str, date: str, time: str) -> str: """Create a calendar event. @@ -23,7 +23,7 @@ def create_calendar_event(title: str, date: str, time: str) -> str: return f"Calendar event '{title}' created for {date} at {time}" -@ai_function(approval_mode="always_require") +@tool(approval_mode="always_require") def send_email(to: str, subject: str, body: str) -> str: """Send an email. @@ -38,7 +38,7 @@ def send_email(to: str, subject: str, body: str) -> str: return f"Email sent to {to} with subject '{subject}'" -@ai_function(approval_mode="always_require") +@tool(approval_mode="always_require") def book_meeting_room(room_name: str, date: str, start_time: str, end_time: str) -> str: """Book a meeting room. diff --git a/python/packages/ag-ui/agent_framework_ag_ui_examples/agents/task_steps_agent.py b/python/packages/ag-ui/agent_framework_ag_ui_examples/agents/task_steps_agent.py index 9a4acf4319..645b1b4822 100644 --- a/python/packages/ag-ui/agent_framework_ag_ui_examples/agents/task_steps_agent.py +++ b/python/packages/ag-ui/agent_framework_ag_ui_examples/agents/task_steps_agent.py @@ -18,7 +18,7 @@ TextMessageStartEvent, ToolCallStartEvent, ) -from agent_framework import ChatAgent, ChatClientProtocol, ChatMessage, Content, ai_function +from agent_framework import ChatAgent, ChatClientProtocol, ChatMessage, Content, tool from agent_framework.ag_ui import AgentFrameworkAgent from pydantic import BaseModel, Field @@ -39,7 +39,7 @@ class TaskStep(BaseModel): status: StepStatus = Field(default=StepStatus.PENDING, description="The status of the step") -@ai_function +@tool def generate_task_steps(steps: list[TaskStep]) -> str: """Generate a list of task steps for completing a task. diff --git a/python/packages/ag-ui/agent_framework_ag_ui_examples/agents/ui_generator_agent.py b/python/packages/ag-ui/agent_framework_ag_ui_examples/agents/ui_generator_agent.py index db1788fd25..c158dd7749 100644 --- a/python/packages/ag-ui/agent_framework_ag_ui_examples/agents/ui_generator_agent.py +++ b/python/packages/ag-ui/agent_framework_ag_ui_examples/agents/ui_generator_agent.py @@ -3,9 +3,9 @@ """Example agent demonstrating Tool-based Generative UI (Feature 5).""" import sys -from typing import Any, TypedDict +from typing import TYPE_CHECKING, Any, TypedDict -from agent_framework import AIFunction, ChatAgent, ChatClientProtocol, ChatOptions +from agent_framework import ChatAgent, ChatClientProtocol, FunctionTool from agent_framework.ag_ui import AgentFrameworkAgent if sys.version_info >= (3, 13): @@ -13,8 +13,11 @@ else: from typing_extensions import TypeVar # type: ignore # pragma: no cover +if TYPE_CHECKING: + from agent_framework import ChatOptions + # Declaration-only tools (func=None) - actual rendering happens on the client side -generate_haiku = AIFunction[Any, str]( +generate_haiku = FunctionTool[Any, str]( name="generate_haiku", description="""Generate a haiku with image and gradient background (FRONTEND_RENDER). @@ -62,7 +65,7 @@ }, ) -create_chart = AIFunction[Any, str]( +create_chart = FunctionTool[Any, str]( name="create_chart", description="""Create an interactive chart (FRONTEND_RENDER). @@ -90,7 +93,7 @@ }, ) -display_timeline = AIFunction[Any, str]( +display_timeline = FunctionTool[Any, str]( name="display_timeline", description="""Display an interactive timeline (FRONTEND_RENDER). @@ -118,7 +121,7 @@ }, ) -show_comparison_table = AIFunction[Any, str]( +show_comparison_table = FunctionTool[Any, str]( name="show_comparison_table", description="""Show a comparison table (FRONTEND_RENDER). diff --git a/python/packages/ag-ui/agent_framework_ag_ui_examples/agents/weather_agent.py b/python/packages/ag-ui/agent_framework_ag_ui_examples/agents/weather_agent.py index 5ebdc10d73..269a732e92 100644 --- a/python/packages/ag-ui/agent_framework_ag_ui_examples/agents/weather_agent.py +++ b/python/packages/ag-ui/agent_framework_ag_ui_examples/agents/weather_agent.py @@ -4,10 +4,10 @@ from typing import Any -from agent_framework import ChatAgent, ChatClientProtocol, ai_function +from agent_framework import ChatAgent, ChatClientProtocol, tool -@ai_function +@tool def get_weather(location: str) -> dict[str, Any]: """Get the current weather for a location. @@ -39,7 +39,7 @@ def get_weather(location: str) -> dict[str, Any]: } -@ai_function +@tool def get_forecast(location: str, days: int = 3) -> str: """Get the weather forecast for a location. diff --git a/python/packages/ag-ui/getting_started/client_advanced.py b/python/packages/ag-ui/getting_started/client_advanced.py index 3c7ae6a334..87a5e66378 100644 --- a/python/packages/ag-ui/getting_started/client_advanced.py +++ b/python/packages/ag-ui/getting_started/client_advanced.py @@ -12,11 +12,11 @@ import asyncio import os -from agent_framework import ai_function +from agent_framework import tool from agent_framework.ag_ui import AGUIChatClient -@ai_function +@tool def get_weather(location: str) -> str: """Get the current weather for a location. @@ -33,7 +33,7 @@ def get_weather(location: str) -> str: return weather_data.get(location.lower(), f"Weather data not available for {location}") -@ai_function +@tool def calculate(a: float, b: float, operation: str) -> str: """Perform basic arithmetic operations. diff --git a/python/packages/ag-ui/getting_started/client_with_agent.py b/python/packages/ag-ui/getting_started/client_with_agent.py index 63a89b4344..be23404583 100644 --- a/python/packages/ag-ui/getting_started/client_with_agent.py +++ b/python/packages/ag-ui/getting_started/client_with_agent.py @@ -22,7 +22,7 @@ import logging import os -from agent_framework import ChatAgent, ai_function +from agent_framework import ChatAgent, tool from agent_framework.ag_ui import AGUIChatClient # Enable debug logging @@ -33,7 +33,7 @@ logger = logging.getLogger(__name__) -@ai_function(description="Get the current weather for a location.") +@tool(description="Get the current weather for a location.") def get_weather(location: str) -> str: """Get the current weather for a location. diff --git a/python/packages/ag-ui/getting_started/server.py b/python/packages/ag-ui/getting_started/server.py index c8889126e9..2cbd612c42 100644 --- a/python/packages/ag-ui/getting_started/server.py +++ b/python/packages/ag-ui/getting_started/server.py @@ -5,7 +5,7 @@ import logging import os -from agent_framework import ChatAgent, ai_function +from agent_framework import ChatAgent, tool from agent_framework.ag_ui import add_agent_framework_fastapi_endpoint from agent_framework.azure import AzureOpenAIChatClient from dotenv import load_dotenv @@ -87,7 +87,7 @@ async def verify_api_key(api_key: str | None = Security(API_KEY_HEADER)) -> None # Server-side tool (executes on server) -@ai_function(description="Get the time zone for a location.") +@tool(description="Get the time zone for a location.") def get_time_zone(location: str) -> str: """Get the time zone for a location. diff --git a/python/packages/ag-ui/tests/test_ag_ui_client.py b/python/packages/ag-ui/tests/test_ag_ui_client.py index b05810972e..af9c7fb916 100644 --- a/python/packages/ag-ui/tests/test_ag_ui_client.py +++ b/python/packages/ag-ui/tests/test_ag_ui_client.py @@ -13,7 +13,7 @@ ChatResponseUpdate, Content, Role, - ai_function, + tool, ) from pytest import MonkeyPatch @@ -231,9 +231,9 @@ async def test_tool_handling(self, monkeypatch: MonkeyPatch) -> None: When server requests a client function, @use_function_invocation decorator intercepts and executes it locally. This matches .NET AG-UI implementation. """ - from agent_framework import ai_function + from agent_framework import tool - @ai_function + @tool def test_tool(param: str) -> str: """Test tool.""" return "result" @@ -299,7 +299,7 @@ async def mock_post_run(*args: object, **kwargs: Any) -> AsyncGenerator[dict[str async def test_server_tool_calls_not_executed_locally(self, monkeypatch: MonkeyPatch) -> None: """Server tools should not trigger local function invocation even when client tools exist.""" - @ai_function + @tool def client_tool() -> str: """Client tool stub.""" return "client" diff --git a/python/packages/ag-ui/tests/test_agent_wrapper_comprehensive.py b/python/packages/ag-ui/tests/test_agent_wrapper_comprehensive.py index 8acd56a094..0955aee554 100644 --- a/python/packages/ag-ui/tests/test_agent_wrapper_comprehensive.py +++ b/python/packages/ag-ui/tests/test_agent_wrapper_comprehensive.py @@ -680,12 +680,12 @@ async def stream_fn( async def test_function_approval_mode_executes_tool(): """Test that function approval with approval_mode='always_require' sends the correct messages.""" - from agent_framework import ai_function + from agent_framework import tool from agent_framework.ag_ui import AgentFrameworkAgent messages_received: list[Any] = [] - @ai_function( + @tool( name="get_datetime", description="Get the current date and time", approval_mode="always_require", @@ -771,12 +771,12 @@ async def stream_fn( async def test_function_approval_mode_rejection(): """Test that function approval rejection creates a rejection response.""" - from agent_framework import ai_function + from agent_framework import tool from agent_framework.ag_ui import AgentFrameworkAgent messages_received: list[Any] = [] - @ai_function( + @tool( name="delete_all_data", description="Delete all user data", approval_mode="always_require", diff --git a/python/packages/ag-ui/tests/test_tooling.py b/python/packages/ag-ui/tests/test_tooling.py index b8c9700cd4..36a912ee3b 100644 --- a/python/packages/ag-ui/tests/test_tooling.py +++ b/python/packages/ag-ui/tests/test_tooling.py @@ -2,7 +2,7 @@ from unittest.mock import MagicMock -from agent_framework import ChatAgent, ai_function +from agent_framework import ChatAgent, tool from agent_framework_ag_ui._orchestration._tooling import ( collect_server_tools, @@ -25,7 +25,7 @@ def __init__(self, functions: list[DummyTool], is_connected: bool = True) -> Non self.is_connected = is_connected -@ai_function +@tool def regular_tool() -> str: """Regular tool for testing.""" return "result" @@ -35,7 +35,7 @@ def _create_chat_agent_with_tool(tool_name: str = "regular_tool") -> ChatAgent: """Create a ChatAgent with a mocked chat client and a simple tool. Note: tool_name parameter is kept for API compatibility but the tool - will always be named 'regular_tool' since ai_function uses the function name. + will always be named 'regular_tool' since tool uses the function name. """ mock_chat_client = MagicMock() return ChatAgent(chat_client=mock_chat_client, tools=[regular_tool]) diff --git a/python/packages/ag-ui/tests/test_utils.py b/python/packages/ag-ui/tests/test_utils.py index 5d956d1ec6..7f1de812c4 100644 --- a/python/packages/ag-ui/tests/test_utils.py +++ b/python/packages/ag-ui/tests/test_utils.py @@ -252,13 +252,13 @@ class ContainerDataclass: assert json_str is not None -def test_convert_tools_to_agui_format_with_ai_function(): - """Test converting AIFunction to AG-UI format.""" - from agent_framework import ai_function +def test_convert_tools_to_agui_format_with_tool(): + """Test converting FunctionTool to AG-UI format.""" + from agent_framework import tool from agent_framework_ag_ui._utils import convert_tools_to_agui_format - @ai_function + @tool def test_func(param: str, count: int = 5) -> str: """Test function.""" return f"{param} {count}" @@ -318,11 +318,11 @@ def test_convert_tools_to_agui_format_with_none(): def test_convert_tools_to_agui_format_with_single_tool(): """Test converting single tool (not in list).""" - from agent_framework import ai_function + from agent_framework import tool from agent_framework_ag_ui._utils import convert_tools_to_agui_format - @ai_function + @tool def single_tool(arg: str) -> str: """Single tool.""" return arg @@ -336,16 +336,16 @@ def single_tool(arg: str) -> str: def test_convert_tools_to_agui_format_with_multiple_tools(): """Test converting multiple tools.""" - from agent_framework import ai_function + from agent_framework import tool from agent_framework_ag_ui._utils import convert_tools_to_agui_format - @ai_function + @tool def tool1(x: int) -> int: """Tool 1.""" return x - @ai_function + @tool def tool2(y: str) -> str: """Tool 2.""" return y diff --git a/python/packages/anthropic/agent_framework_anthropic/_chat_client.py b/python/packages/anthropic/agent_framework_anthropic/_chat_client.py index 40066b4779..0413e8ab3c 100644 --- a/python/packages/anthropic/agent_framework_anthropic/_chat_client.py +++ b/python/packages/anthropic/agent_framework_anthropic/_chat_client.py @@ -6,7 +6,6 @@ from agent_framework import ( AGENT_FRAMEWORK_USER_AGENT, - AIFunction, Annotation, BaseChatClient, ChatMessage, @@ -15,6 +14,7 @@ ChatResponseUpdate, Content, FinishReason, + FunctionTool, HostedCodeInterpreterTool, HostedMCPTool, HostedWebSearchTool, @@ -583,7 +583,7 @@ def _prepare_tools_for_anthropic(self, options: dict[str, Any]) -> dict[str, Any match tool: case MutableMapping(): tool_list.append(tool) - case AIFunction(): + case FunctionTool(): tool_list.append({ "type": "custom", "name": tool.name, diff --git a/python/packages/anthropic/tests/test_anthropic_client.py b/python/packages/anthropic/tests/test_anthropic_client.py index 826dcf1870..6b06843b73 100644 --- a/python/packages/anthropic/tests/test_anthropic_client.py +++ b/python/packages/anthropic/tests/test_anthropic_client.py @@ -16,7 +16,7 @@ HostedMCPTool, HostedWebSearchTool, Role, - ai_function, + tool, ) from agent_framework.exceptions import ServiceInitializationError from anthropic.types.beta import ( @@ -259,11 +259,11 @@ def test_prepare_messages_for_anthropic_without_system(mock_anthropic_client: Ma # Tool Conversion Tests -def test_prepare_tools_for_anthropic_ai_function(mock_anthropic_client: MagicMock) -> None: - """Test converting AIFunction to Anthropic format.""" +def test_prepare_tools_for_anthropic_tool(mock_anthropic_client: MagicMock) -> None: + """Test converting FunctionTool to Anthropic format.""" chat_client = create_test_anthropic_client(mock_anthropic_client) - @ai_function + @tool(approval_mode="never_require") def get_weather(location: Annotated[str, Field(description="Location to get weather for")]) -> str: """Get weather for a location.""" return f"Weather for {location}" @@ -443,7 +443,7 @@ async def test_prepare_options_with_tools(mock_anthropic_client: MagicMock) -> N """Test _prepare_options with tools.""" chat_client = create_test_anthropic_client(mock_anthropic_client) - @ai_function + @tool(approval_mode="never_require") def get_weather(location: str) -> str: """Get weather for a location.""" return f"Weather for {location}" @@ -709,7 +709,7 @@ async def mock_stream(): # Integration Tests -@ai_function +@tool(approval_mode="never_require") def get_weather( location: Annotated[str, Field(description="The location to get the weather for.")], ) -> str: diff --git a/python/packages/azure-ai/agent_framework_azure_ai/_agent_provider.py b/python/packages/azure-ai/agent_framework_azure_ai/_agent_provider.py index 6ed8853977..e5e9410ecf 100644 --- a/python/packages/azure-ai/agent_framework_azure_ai/_agent_provider.py +++ b/python/packages/azure-ai/agent_framework_azure_ai/_agent_provider.py @@ -6,9 +6,9 @@ from agent_framework import ( AGENT_FRAMEWORK_USER_AGENT, - AIFunction, ChatAgent, ContextProvider, + FunctionTool, Middleware, ToolProtocol, normalize_tools, @@ -445,9 +445,9 @@ def _merge_tools( # Add user-provided function tools and MCP tools if provided_tools: for provided_tool in provided_tools: - # AIFunction - has implementation for function calling + # FunctionTool - has implementation for function calling # MCPTool - ChatAgent handles MCP connection and tool discovery at runtime - if isinstance(provided_tool, (AIFunction, MCPTool)): + if isinstance(provided_tool, (FunctionTool, MCPTool)): merged.append(provided_tool) # type: ignore[reportUnknownArgumentType] return merged @@ -488,7 +488,7 @@ def _validate_function_tools( provided_names: set[str] = set() if provided_tools: for tool in provided_tools: - if isinstance(tool, AIFunction): + if isinstance(tool, FunctionTool): provided_names.add(tool.name) # Check for missing implementations diff --git a/python/packages/azure-ai/agent_framework_azure_ai/_chat_client.py b/python/packages/azure-ai/agent_framework_azure_ai/_chat_client.py index 4bb646da19..5626ead9a6 100644 --- a/python/packages/azure-ai/agent_framework_azure_ai/_chat_client.py +++ b/python/packages/azure-ai/agent_framework_azure_ai/_chat_client.py @@ -10,7 +10,6 @@ from agent_framework import ( AGENT_FRAMEWORK_USER_AGENT, - AIFunction, Annotation, BaseChatClient, ChatAgent, @@ -21,6 +20,7 @@ ChatResponseUpdate, Content, ContextProvider, + FunctionTool, HostedCodeInterpreterTool, HostedFileSearchTool, HostedMCPTool, @@ -1117,7 +1117,7 @@ async def _prepare_tools_for_azure_ai( tool_definitions: list[ToolDefinition | dict[str, Any]] = [] for tool in tools: match tool: - case AIFunction(): + case FunctionTool(): tool_definitions.append(tool.to_json_schema_spec()) # type: ignore[reportUnknownArgumentType] case HostedWebSearchTool(): additional_props = tool.additional_properties or {} diff --git a/python/packages/azure-ai/agent_framework_azure_ai/_project_provider.py b/python/packages/azure-ai/agent_framework_azure_ai/_project_provider.py index 0cbb37b854..dc0d8ea279 100644 --- a/python/packages/azure-ai/agent_framework_azure_ai/_project_provider.py +++ b/python/packages/azure-ai/agent_framework_azure_ai/_project_provider.py @@ -6,9 +6,9 @@ from agent_framework import ( AGENT_FRAMEWORK_USER_AGENT, - AIFunction, ChatAgent, ContextProvider, + FunctionTool, Middleware, ToolProtocol, get_logger, @@ -20,10 +20,12 @@ from azure.ai.projects.models import ( AgentReference, AgentVersionDetails, - FunctionTool, PromptAgentDefinition, PromptAgentDefinitionText, ) +from azure.ai.projects.models import ( + FunctionTool as AzureFunctionTool, +) from azure.core.credentials_async import AsyncTokenCredential from pydantic import ValidationError @@ -224,7 +226,7 @@ async def create_agent( # Connect MCP tools and discover their functions BEFORE creating the agent # This is required because Azure AI Responses API doesn't accept tools at request time - mcp_discovered_functions: list[AIFunction[Any, Any]] = [] + mcp_discovered_functions: list[FunctionTool] = [] for mcp_tool in mcp_tools: if not mcp_tool.is_connected: await mcp_tool.connect() @@ -433,9 +435,9 @@ def _merge_tools( # Add user-provided function tools and MCP tools if provided_tools: for provided_tool in provided_tools: - # AIFunction - has implementation for function calling + # FunctionTool - has implementation for function calling # MCPTool - ChatAgent handles MCP connection and tool discovery at runtime - if isinstance(provided_tool, (AIFunction, MCPTool)): + if isinstance(provided_tool, (FunctionTool, MCPTool)): merged.append(provided_tool) # type: ignore[reportUnknownArgumentType] return merged @@ -452,12 +454,14 @@ def _validate_function_tools( """Validate that required function tools are provided.""" # Normalize and validate function tools normalized_tools = normalize_tools(provided_tools) - tool_names = {tool.name for tool in normalized_tools if isinstance(tool, AIFunction)} + tool_names = {tool.name for tool in normalized_tools if isinstance(tool, FunctionTool)} # If function tools exist in agent definition but were not provided, # we need to raise an error, as it won't be possible to invoke the function. missing_tools = [ - tool.name for tool in (agent_tools or []) if isinstance(tool, FunctionTool) and tool.name not in tool_names + tool.name + for tool in (agent_tools or []) + if isinstance(tool, AzureFunctionTool) and tool.name not in tool_names ] if missing_tools: diff --git a/python/packages/azure-ai/agent_framework_azure_ai/_shared.py b/python/packages/azure-ai/agent_framework_azure_ai/_shared.py index aa5d114ba5..1cf33b24d8 100644 --- a/python/packages/azure-ai/agent_framework_azure_ai/_shared.py +++ b/python/packages/azure-ai/agent_framework_azure_ai/_shared.py @@ -5,8 +5,8 @@ from typing import Any, ClassVar, Literal, cast from agent_framework import ( - AIFunction, Content, + FunctionTool, HostedCodeInterpreterTool, HostedFileSearchTool, HostedImageGenerationTool, @@ -29,7 +29,6 @@ ApproximateLocation, CodeInterpreterTool, CodeInterpreterToolAuto, - FunctionTool, ImageGenTool, ImageGenToolInputImageMask, MCPTool, @@ -42,6 +41,9 @@ from azure.ai.projects.models import ( FileSearchTool as ProjectsFileSearchTool, ) +from azure.ai.projects.models import ( + FunctionTool as AzureFunctionTool, +) from pydantic import BaseModel logger = get_logger("agent_framework.azure") @@ -141,7 +143,7 @@ def to_azure_ai_agent_tools( tool_definitions: list[ToolDefinition | dict[str, Any]] = [] for tool in tools: match tool: - case AIFunction(): + case FunctionTool(): tool_definitions.append(tool.to_json_schema_spec()) # type: ignore[reportUnknownArgumentType] case HostedWebSearchTool(): additional_props = tool.additional_properties or {} @@ -439,11 +441,11 @@ def to_azure_ai_tools( container = CodeInterpreterToolAuto(file_ids=file_ids if file_ids else None) ci_tool: CodeInterpreterTool = CodeInterpreterTool(container=container) azure_tools.append(ci_tool) - case AIFunction(): + case FunctionTool(): params = tool.parameters() params["additionalProperties"] = False azure_tools.append( - FunctionTool( + AzureFunctionTool( name=tool.name, parameters=params, strict=False, diff --git a/python/packages/azure-ai/tests/test_agent_provider.py b/python/packages/azure-ai/tests/test_agent_provider.py index edfd749f4c..7b50a4f7c7 100644 --- a/python/packages/azure-ai/tests/test_agent_provider.py +++ b/python/packages/azure-ai/tests/test_agent_provider.py @@ -12,7 +12,7 @@ HostedFileSearchTool, HostedMCPTool, HostedWebSearchTool, - ai_function, + tool, ) from agent_framework.exceptions import ServiceInitializationError from azure.ai.agents.models import ( @@ -222,7 +222,7 @@ async def test_create_agent_with_tools( provider = AzureAIAgentsProvider(agents_client=mock_agents_client) - @ai_function + @tool(approval_mode="never_require") def get_weather(city: str) -> str: """Get weather for a city.""" return f"Weather in {city}" @@ -366,7 +366,7 @@ async def test_get_agent_with_provided_function_tools( mock_agent.tools = [mock_function_tool] mock_agents_client.get_agent = AsyncMock(return_value=mock_agent) - @ai_function + @tool(approval_mode="never_require") def get_weather(city: str) -> str: """Get weather for a city.""" return f"Weather in {city}" @@ -483,9 +483,9 @@ def test_to_azure_ai_agent_tools_empty() -> None: def test_to_azure_ai_agent_tools_function() -> None: - """Test converting AIFunction to Azure tool definition.""" + """Test converting FunctionTool to Azure tool definition.""" - @ai_function + @tool(approval_mode="never_require") def get_weather(city: str) -> str: """Get weather for a city.""" return f"Weather in {city}" diff --git a/python/packages/azure-ai/tests/test_azure_ai_agent_client.py b/python/packages/azure-ai/tests/test_azure_ai_agent_client.py index 7b20caea7d..0ade997ce7 100644 --- a/python/packages/azure-ai/tests/test_azure_ai_agent_client.py +++ b/python/packages/azure-ai/tests/test_azure_ai_agent_client.py @@ -23,6 +23,7 @@ HostedMCPTool, HostedWebSearchTool, Role, + tool, ) from agent_framework._serialization import SerializationMixin from agent_framework.exceptions import ServiceInitializationError @@ -940,7 +941,7 @@ async def test_azure_ai_chat_client_service_url(mock_agents_client: MagicMock) - assert result == "https://test-endpoint.com/" -async def test_azure_ai_chat_client_prepare_tool_outputs_for_azure_ai_function_result( +async def test_azure_ai_chat_client_prepare_tool_outputs_for_azure_tool_result( mock_agents_client: MagicMock, ) -> None: """Test _prepare_tool_outputs_for_azure_ai with FunctionResultContent.""" @@ -1336,6 +1337,7 @@ def test_azure_ai_chat_client_extract_file_path_contents_empty_annotations( assert len(file_contents) == 0 +@tool(approval_mode="never_require") def get_weather( location: Annotated[str, Field(description="The location to get the weather for.")], ) -> str: diff --git a/python/packages/azure-ai/tests/test_azure_ai_client.py b/python/packages/azure-ai/tests/test_azure_ai_client.py index aba45b3f1b..56221a4f1d 100644 --- a/python/packages/azure-ai/tests/test_azure_ai_client.py +++ b/python/packages/azure-ai/tests/test_azure_ai_client.py @@ -22,6 +22,7 @@ HostedMCPTool, HostedWebSearchTool, Role, + tool, ) from agent_framework.exceptions import ServiceInitializationError from azure.ai.projects.aio import AIProjectClient @@ -1025,6 +1026,7 @@ def test_from_azure_ai_tools() -> None: # region Integration Tests +@tool(approval_mode="never_require") def get_weather( location: Annotated[str, Field(description="The location to get the weather for.")], ) -> str: diff --git a/python/packages/azure-ai/tests/test_provider.py b/python/packages/azure-ai/tests/test_provider.py index 2a9808db9c..32cd738bc3 100644 --- a/python/packages/azure-ai/tests/test_provider.py +++ b/python/packages/azure-ai/tests/test_provider.py @@ -4,16 +4,18 @@ from unittest.mock import AsyncMock, MagicMock, patch import pytest -from agent_framework import AIFunction, ChatAgent +from agent_framework import ChatAgent, FunctionTool from agent_framework._mcp import MCPTool from agent_framework.exceptions import ServiceInitializationError from azure.ai.projects.aio import AIProjectClient from azure.ai.projects.models import ( AgentReference, AgentVersionDetails, - FunctionTool, PromptAgentDefinition, ) +from azure.ai.projects.models import ( + FunctionTool as AzureFunctionTool, +) from azure.identity.aio import AzureCliCredential from agent_framework_azure_ai import AzureAIProjectAgentProvider @@ -373,7 +375,7 @@ async def test_provider_get_agent_missing_function_tools(mock_project_client: Ma mock_agent_version.description = None mock_agent_version.definition = MagicMock(spec=PromptAgentDefinition) mock_agent_version.definition.tools = [ - FunctionTool(name="test_tool", parameters=[], strict=True, description="Test tool") + AzureFunctionTool(name="test_tool", parameters=[], strict=True, description="Test tool") ] mock_agent_object = MagicMock() @@ -494,7 +496,7 @@ class MockMCPTool(MCPTool): # pyright: ignore[reportGeneralTypeIssues] unit testing. We only need isinstance(obj, MCPTool) to return True. """ - def __init__(self, functions: list[AIFunction] | None = None) -> None: + def __init__(self, functions: list[FunctionTool] | None = None) -> None: self.name = "MockMCPTool" self.description = "A mock MCP tool for testing" self.is_connected = False @@ -502,7 +504,7 @@ def __init__(self, functions: list[AIFunction] | None = None) -> None: self._connect_called = False @property - def functions(self) -> list[AIFunction]: + def functions(self) -> list[FunctionTool]: return self._mock_functions async def connect(self, *, reset: bool = False) -> None: @@ -520,13 +522,13 @@ def mock_mcp_tool() -> MockMCPTool: return MockMCPTool(functions=mock_functions) -def create_mock_ai_function(name: str, description: str = "A mock function") -> AIFunction: - """Create a real AIFunction for testing.""" +def create_mock_ai_function(name: str, description: str = "A mock function") -> FunctionTool: + """Create a real FunctionTool for testing.""" def mock_func(arg: str) -> str: return f"Result from {name}: {arg}" - return AIFunction(func=mock_func, name=name, description=description) + return FunctionTool(func=mock_func, name=name, description=description, approval_mode="never_require") async def test_provider_create_agent_with_mcp_tool( @@ -593,8 +595,8 @@ async def test_provider_create_agent_with_mcp_and_regular_tools( azure_ai_unit_test_env: dict[str, str], mock_mcp_tool: "MockMCPTool", ) -> None: - """Test that create_agent handles both MCP tools and regular AIFunctions.""" - # Create a regular AIFunction + """Test that create_agent handles both MCP tools and regular FunctionTools.""" + # Create a regular FunctionTool regular_function = create_mock_ai_function("regular_function", "A regular function") # Patch normalize_tools to return tools as-is in a list (avoids callable check) @@ -637,7 +639,7 @@ def mock_normalize_tools(tools): ) # Verify to_azure_ai_tools was called with: - # - The regular AIFunction (1) + # - The regular FunctionTool (1) # - The 2 discovered MCP functions mock_to_azure_tools.assert_called_once() tools_passed = mock_to_azure_tools.call_args[0][0] diff --git a/python/packages/bedrock/agent_framework_bedrock/_chat_client.py b/python/packages/bedrock/agent_framework_bedrock/_chat_client.py index a6325a6603..a58d68e077 100644 --- a/python/packages/bedrock/agent_framework_bedrock/_chat_client.py +++ b/python/packages/bedrock/agent_framework_bedrock/_chat_client.py @@ -10,7 +10,6 @@ from agent_framework import ( AGENT_FRAMEWORK_USER_AGENT, - AIFunction, BaseChatClient, ChatMessage, ChatOptions, @@ -18,6 +17,7 @@ ChatResponseUpdate, Content, FinishReason, + FunctionTool, Role, ToolProtocol, UsageDetails, @@ -548,7 +548,7 @@ def _prepare_tools(self, tools: list[ToolProtocol | MutableMapping[str, Any]] | if isinstance(tool, MutableMapping): converted.append(dict(tool)) continue - if isinstance(tool, AIFunction): + if isinstance(tool, FunctionTool): converted.append({ "toolSpec": { "name": tool.name, diff --git a/python/packages/bedrock/samples/bedrock_sample.py b/python/packages/bedrock/samples/bedrock_sample.py index 57901f49e8..15a347997d 100644 --- a/python/packages/bedrock/samples/bedrock_sample.py +++ b/python/packages/bedrock/samples/bedrock_sample.py @@ -2,22 +2,13 @@ import asyncio import logging -from collections.abc import Sequence - -from agent_framework import ( - AgentResponse, - ChatAgent, - FunctionCallContent, - FunctionResultContent, - Role, - TextContent, - ai_function, -) + +from agent_framework import ChatAgent, tool from agent_framework_bedrock import BedrockChatClient -@ai_function +@tool(approval_mode="never_require") def get_weather(city: str) -> dict[str, str]: """Return a mock forecast for the requested city.""" normalized = city.strip() or "New York" @@ -36,27 +27,18 @@ async def main() -> None: response = await agent.run("Use the weather tool to check the forecast for new york.") logging.info("\nAssistant reply:", response.text or "") - _log_response(response) - - -def _log_response(response: AgentResponse) -> None: logging.info("\nConversation transcript:") - for idx, message in enumerate(response.messages, start=1): - tag = f"{idx}. {message.role.value if isinstance(message.role, Role) else message.role}" - _log_contents(tag, message.contents) - - -def _log_contents(tag: str, contents: Sequence[object]) -> None: - logging.info(f"[{tag}] {len(contents)} content blocks") - for idx, content in enumerate(contents, start=1): - if isinstance(content, TextContent): - logging.info(f" {idx}. text -> {content.text}") - elif isinstance(content, FunctionCallContent): - logging.info(f" {idx}. tool_call ({content.name}) -> {content.arguments}") - elif isinstance(content, FunctionResultContent): - logging.info(f" {idx}. tool_result ({content.call_id}) -> {content.result}") - else: # pragma: no cover - defensive - logging.info(f" {idx}. {content.type}") + for message in response.messages: + for idx, content in enumerate(message.contents, start=1): + match content.type: + case "text": + logging.info(f" {idx}. text -> {content.text}") + case "function_call": + logging.info(f" {idx}. function_call ({content.name}) -> {content.arguments}") + case "function_result": + logging.info(f" {idx}. function_result ({content.call_id}) -> {content.result}") + case _: + logging.info(f" {idx}. {content.type}") if __name__ == "__main__": diff --git a/python/packages/bedrock/tests/test_bedrock_settings.py b/python/packages/bedrock/tests/test_bedrock_settings.py index 07898303de..d98cf00817 100644 --- a/python/packages/bedrock/tests/test_bedrock_settings.py +++ b/python/packages/bedrock/tests/test_bedrock_settings.py @@ -6,10 +6,10 @@ import pytest from agent_framework import ( - AIFunction, ChatMessage, ChatOptions, Content, + FunctionTool, Role, ) from pydantic import BaseModel @@ -42,7 +42,7 @@ def test_settings_load_from_environment(monkeypatch: pytest.MonkeyPatch) -> None def test_build_request_includes_tool_config() -> None: client = _build_client() - tool = AIFunction(name="get_weather", description="desc", func=_dummy_weather, input_model=_WeatherArgs) + tool = FunctionTool(name="get_weather", description="desc", func=_dummy_weather, input_model=_WeatherArgs) options = { "tools": [tool], "tool_choice": {"mode": "required", "required_function_name": "get_weather"}, diff --git a/python/packages/core/agent_framework/_agents.py b/python/packages/core/agent_framework/_agents.py index 2092ebcb32..f33b26d053 100644 --- a/python/packages/core/agent_framework/_agents.py +++ b/python/packages/core/agent_framework/_agents.py @@ -31,7 +31,7 @@ from ._middleware import Middleware, use_agent_middleware from ._serialization import SerializationMixin from ._threads import AgentThread, ChatMessageStoreProtocol -from ._tools import FUNCTION_INVOKING_CHAT_CLIENT_MARKER, AIFunction, ToolProtocol +from ._tools import FUNCTION_INVOKING_CHAT_CLIENT_MARKER, FunctionTool, ToolProtocol from ._types import ( AgentResponse, AgentResponseUpdate, @@ -417,8 +417,8 @@ def as_tool( stream_callback: Callable[[AgentResponseUpdate], None] | Callable[[AgentResponseUpdate], Awaitable[None]] | None = None, - ) -> AIFunction[BaseModel, str]: - """Create an AIFunction tool that wraps this agent. + ) -> FunctionTool[BaseModel, str]: + """Create a FunctionTool that wraps this agent. Keyword Args: name: The name for the tool. If None, uses the agent's name. @@ -429,7 +429,7 @@ def as_tool( stream_callback: Optional callback for streaming responses. If provided, uses run_stream. Returns: - An AIFunction that can be used as a tool by other agents. + A FunctionTool that can be used as a tool by other agents. Raises: TypeError: If the agent does not implement AgentProtocol. @@ -491,11 +491,12 @@ async def agent_wrapper(**kwargs: Any) -> str: # Create final text from accumulated updates return AgentResponse.from_agent_run_response_updates(response_updates).text - agent_tool: AIFunction[BaseModel, str] = AIFunction( + agent_tool: FunctionTool[BaseModel, str] = FunctionTool( name=tool_name, description=tool_description, func=agent_wrapper, input_model=input_model, # type: ignore + approval_mode="never_require", ) agent_tool._forward_runtime_kwargs = True # type: ignore return agent_tool @@ -1213,7 +1214,19 @@ async def _prepare_thread_and_messages( Raises: AgentExecutionException: If the conversation IDs on the thread and agent don't match. """ - chat_options = deepcopy(self.default_options) if self.default_options else {} + # Create a shallow copy of options and deep copy non-tool values + # Tools containing HTTP clients or other non-copyable objects cannot be deep copied + if self.default_options: + chat_options: dict[str, Any] = {} + for key, value in self.default_options.items(): + if key == "tools": + # Keep tool references as-is (don't deep copy) + chat_options[key] = list(value) if value else [] + else: + # Deep copy other options to prevent mutation + chat_options[key] = deepcopy(value) + else: + chat_options = {} thread = thread or self.get_new_thread() if thread.service_thread_id and thread.context_provider: await thread.context_provider.thread_created(thread.service_thread_id) @@ -1237,7 +1250,7 @@ async def _prepare_thread_and_messages( if context.instructions: chat_options["instructions"] = ( context.instructions - if not chat_options.get("instructions") + if "instructions" not in chat_options else f"{chat_options['instructions']}\n{context.instructions}" ) thread_messages.extend(input_messages or []) diff --git a/python/packages/core/agent_framework/_mcp.py b/python/packages/core/agent_framework/_mcp.py index 333af611c1..51116b71ae 100644 --- a/python/packages/core/agent_framework/_mcp.py +++ b/python/packages/core/agent_framework/_mcp.py @@ -25,7 +25,7 @@ from pydantic import BaseModel, create_model from ._tools import ( - AIFunction, + FunctionTool, HostedMCPSpecificApproval, _build_pydantic_model_from_json_schema, ) @@ -356,7 +356,7 @@ def __init__( self.session = session self.request_timeout = request_timeout self.chat_client = chat_client - self._functions: list[AIFunction[Any, Any]] = [] + self._functions: list[FunctionTool[Any, Any]] = [] self.is_connected: bool = False self._tools_loaded: bool = False self._prompts_loaded: bool = False @@ -365,7 +365,7 @@ def __str__(self) -> str: return f"MCPTool(name={self.name}, description={self.description})" @property - def functions(self) -> list[AIFunction[Any, Any]]: + def functions(self) -> list[FunctionTool[Any, Any]]: """Get the list of functions that are allowed.""" if not self.allowed_tools: return self._functions @@ -609,7 +609,7 @@ async def load_prompts(self) -> None: """Load prompts from the MCP server. Retrieves available prompts from the connected MCP server and converts - them into AIFunction instances. Handles pagination automatically. + them into FunctionTool instances. Handles pagination automatically. Raises: ToolExecutionException: If the MCP server is not connected. @@ -633,7 +633,7 @@ async def load_prompts(self) -> None: input_model = _get_input_model_from_mcp_prompt(prompt) approval_mode = self._determine_approval_mode(local_name) - func: AIFunction[BaseModel, list[ChatMessage] | Any | types.GetPromptResult] = AIFunction( + func: FunctionTool[BaseModel, list[ChatMessage] | Any | types.GetPromptResult] = FunctionTool( func=partial(self.get_prompt, prompt.name), name=local_name, description=prompt.description or "", @@ -652,7 +652,7 @@ async def load_tools(self) -> None: """Load tools from the MCP server. Retrieves available tools from the connected MCP server and converts - them into AIFunction instances. Handles pagination automatically. + them into FunctionTool instances. Handles pagination automatically. Raises: ToolExecutionException: If the MCP server is not connected. @@ -676,8 +676,8 @@ async def load_tools(self) -> None: input_model = _get_input_model_from_mcp_tool(tool) approval_mode = self._determine_approval_mode(local_name) - # Create AIFunctions out of each tool - func: AIFunction[BaseModel, list[Content] | Any | types.CallToolResult] = AIFunction( + # Create FunctionTools out of each tool + func: FunctionTool[BaseModel, list[Content] | Any | types.CallToolResult] = FunctionTool( func=partial(self.call_tool, tool.name), name=local_name, description=tool.description or "", diff --git a/python/packages/core/agent_framework/_middleware.py b/python/packages/core/agent_framework/_middleware.py index aeafd91ac6..4ba922c464 100644 --- a/python/packages/core/agent_framework/_middleware.py +++ b/python/packages/core/agent_framework/_middleware.py @@ -17,7 +17,7 @@ from ._agents import AgentProtocol from ._clients import ChatClientProtocol from ._threads import AgentThread - from ._tools import AIFunction + from ._tools import FunctionTool from ._types import ChatResponse, ChatResponseUpdate @@ -172,7 +172,7 @@ async def process(self, context: FunctionInvocationContext, next): def __init__( self, - function: "AIFunction[Any, Any]", + function: "FunctionTool[Any, Any]", arguments: "BaseModel", metadata: dict[str, Any] | None = None, result: Any = None, diff --git a/python/packages/core/agent_framework/_serialization.py b/python/packages/core/agent_framework/_serialization.py index 8aa9b6adcf..e4866c12d6 100644 --- a/python/packages/core/agent_framework/_serialization.py +++ b/python/packages/core/agent_framework/_serialization.py @@ -444,11 +444,11 @@ def from_dict( chat_client = OpenAIChatClient.from_dict(client_data, dependencies=dependencies) # Now ready to make API calls with the injected client - **Function Injection for Tools** - AIFunction runtime dependency: + **Function Injection for Tools** - FunctionTool runtime dependency: .. code-block:: python - from agent_framework import AIFunction + from agent_framework import FunctionTool from typing import Annotated @@ -458,19 +458,19 @@ async def get_current_weather(location: Annotated[str, "The city name"]) -> str: return f"Current weather in {location}: 72°F and sunny" - # AIFunction has INJECTABLE = {"func"} + # FunctionTool has INJECTABLE = {"func"} function_data = { - "type": "ai_function", + "type": "function_tool", "name": "get_weather", "description": "Get current weather for a location", # func is excluded from serialization } # Inject the actual function implementation during deserialization - dependencies = {"ai_function": {"func": get_current_weather}} + dependencies = {"function_tool": {"func": get_current_weather}} - # Reconstruct the AIFunction with the callable injected - weather_func = AIFunction.from_dict(function_data, dependencies=dependencies) + # Reconstruct the FunctionTool with the callable injected + weather_func = FunctionTool.from_dict(function_data, dependencies=dependencies) # The function is now callable and ready for agent use **Middleware Context Injection** - Agent execution context: diff --git a/python/packages/core/agent_framework/_tools.py b/python/packages/core/agent_framework/_tools.py index 60d8783b08..8ef899a5f7 100644 --- a/python/packages/core/agent_framework/_tools.py +++ b/python/packages/core/agent_framework/_tools.py @@ -33,7 +33,7 @@ runtime_checkable, ) -from opentelemetry.metrics import Histogram +from opentelemetry.metrics import Histogram, NoOpHistogram from pydantic import AnyUrl, BaseModel, Field, ValidationError, create_model from ._logging import get_logger @@ -64,12 +64,20 @@ else: from typing_extensions import override # type: ignore[import] # pragma: no cover +# TypeVar with defaults support for Python < 3.13 +if sys.version_info >= (3, 13): + from typing import TypeVar as TypeVarWithDefaults # type: ignore # pragma: no cover +else: + from typing_extensions import ( + TypeVar as TypeVarWithDefaults, # type: ignore[import] # pragma: no cover + ) + logger = get_logger() __all__ = [ "FUNCTION_INVOKING_CHAT_CLIENT_MARKER", - "AIFunction", "FunctionInvocationConfiguration", + "FunctionTool", "HostedCodeInterpreterTool", "HostedFileSearchTool", "HostedImageGenerationTool", @@ -77,7 +85,7 @@ "HostedMCPTool", "HostedWebSearchTool", "ToolProtocol", - "ai_function", + "tool", "use_function_invocation", ] @@ -89,16 +97,8 @@ TChatClient = TypeVar("TChatClient", bound="ChatClientProtocol[Any]") # region Helpers -ArgsT = TypeVar("ArgsT", bound=BaseModel) -ReturnT = TypeVar("ReturnT") - - -class _NoOpHistogram: - def record(self, *args: Any, **kwargs: Any) -> None: # pragma: no cover - trivial - return None - - -_NOOP_HISTOGRAM = _NoOpHistogram() +ArgsT = TypeVarWithDefaults("ArgsT", bound=BaseModel, default=BaseModel) +ReturnT = TypeVarWithDefaults("ReturnT", default=Any) def _parse_inputs( @@ -163,7 +163,7 @@ class ToolProtocol(Protocol): This protocol defines the interface that all tools must implement to be compatible with the agent framework. It is implemented by various tool classes such as HostedMCPTool, - HostedWebSearchTool, and AIFunction's. A AIFunction is usually created by the `ai_function` decorator. + HostedWebSearchTool, and FunctionTool's. A FunctionTool is usually created by the `tool` decorator. Since each connector needs to parse tools differently, users can pass a dict to specify a service-specific tool when no abstraction is available. @@ -190,7 +190,7 @@ class BaseTool(SerializationMixin): """Base class for AI tools, providing common attributes and methods. Used as the base class for the various tools in the agent framework, such as HostedMCPTool, - HostedWebSearchTool, and AIFunction. + HostedWebSearchTool, and FunctionTool. Since each connector needs to parse tools differently, this class is not exposed directly to end users. In most cases, users can pass a dict to specify a service-specific tool when no abstraction is available. @@ -543,7 +543,10 @@ def _default_histogram() -> Histogram: from .observability import OBSERVABILITY_SETTINGS # local import to avoid circulars if not OBSERVABILITY_SETTINGS.ENABLED: # type: ignore[name-defined] - return _NOOP_HISTOGRAM # type: ignore[return-value] + return NoOpHistogram( + name=OtelAttr.MEASUREMENT_FUNCTION_INVOCATION_DURATION, + unit=OtelAttr.DURATION_UNIT, + ) meter = get_meter() try: return meter.create_histogram( @@ -567,7 +570,7 @@ class EmptyInputModel(BaseModel): """An empty input model for functions with no parameters.""" -class AIFunction(BaseTool, Generic[ArgsT, ReturnT]): +class FunctionTool(BaseTool, Generic[ArgsT, ReturnT]): """A tool that wraps a Python function to make it callable by AI models. This class wraps a Python function to make it callable by AI models with automatic @@ -578,11 +581,11 @@ class AIFunction(BaseTool, Generic[ArgsT, ReturnT]): from typing import Annotated from pydantic import BaseModel, Field - from agent_framework import AIFunction, ai_function + from agent_framework import FunctionTool, tool # Using the decorator with string annotations - @ai_function + @tool(approval_mode="never_require") def get_weather( location: Annotated[str, "The city name"], unit: Annotated[str, "Temperature unit"] = "celsius", @@ -597,7 +600,7 @@ class WeatherArgs(BaseModel): unit: Annotated[str, Field(description="Temperature unit")] = "celsius" - weather_func = AIFunction( + weather_func = FunctionTool( name="get_weather", description="Get the weather for a location", func=lambda location, unit="celsius": f"Weather in {location}: 22°{unit[0].upper()}", @@ -625,13 +628,13 @@ def __init__( input_model: type[ArgsT] | Mapping[str, Any] | None = None, **kwargs: Any, ) -> None: - """Initialize the AIFunction. + """Initialize the FunctionTool. Keyword Args: name: The name of the function. description: A description of the function. approval_mode: Whether or not approval is required to run this tool. - Default is that approval is not needed. + Default is that approval is required. max_invocations: The maximum number of times this function can be invoked. If None, there is no limit. Should be at least 1. max_invocation_exceptions: The maximum number of exceptions allowed during invocations. @@ -652,7 +655,7 @@ def __init__( self.func = func self._instance = None # Store the instance for bound methods self.input_model = self._resolve_input_model(input_model) - self._cached_parameters: dict[str, Any] | None = None # Cache for model_json_schema() + self._cached_parameters: dict[str, Any] | None = None self.approval_mode = approval_mode or "never_require" if max_invocations is not None and max_invocations < 1: raise ValueError("max_invocations must be at least 1 or None.") @@ -663,7 +666,7 @@ def __init__( self.max_invocation_exceptions = max_invocation_exceptions self.invocation_exception_count = 0 self._invocation_duration_histogram = _default_histogram() - self.type: Literal["ai_function"] = "ai_function" + self.type: Literal["function_tool"] = "function_tool" self._forward_runtime_kwargs: bool = False if self.func: sig = inspect.signature(self.func) @@ -680,10 +683,10 @@ def declaration_only(self) -> bool: return True return self.func is None - def __get__(self, obj: Any, objtype: type | None = None) -> "AIFunction[ArgsT, ReturnT]": + def __get__(self, obj: Any, objtype: type | None = None) -> "FunctionTool[ArgsT, ReturnT]": """Implement the descriptor protocol to support bound methods. - When an AIFunction is accessed as an attribute of a class instance, + When a FunctionTool is accessed as an attribute of a class instance, this method is called to bind the instance to the function. Args: @@ -691,7 +694,7 @@ def __get__(self, obj: Any, objtype: type | None = None) -> "AIFunction[ArgsT, R objtype: The type that owns the descriptor. Returns: - A new AIFunction with the instance bound to the wrapped function. + A new FunctionTool with the instance bound to the wrapped function. """ if obj is None: # Accessed from the class, not an instance @@ -702,7 +705,7 @@ def __get__(self, obj: Any, objtype: type | None = None) -> "AIFunction[ArgsT, R sig = inspect.signature(self.func) params = list(sig.parameters.keys()) if params and params[0] in {"self", "cls"}: - # Create a new AIFunction with the bound method + # Create a new FunctionTool with the bound method import copy bound_func = copy.copy(self) @@ -849,7 +852,7 @@ def parameters(self) -> dict[str, Any]: return self._cached_parameters def to_json_schema_spec(self) -> dict[str, Any]: - """Convert a AIFunction to the JSON Schema function specification format. + """Convert a FunctionTool to the JSON Schema function specification format. Returns: A dictionary containing the function specification in JSON Schema format. @@ -892,29 +895,29 @@ def _tools_to_dict( if not tools: return None if not isinstance(tools, list): - if isinstance(tools, AIFunction): + if isinstance(tools, FunctionTool): return [tools.to_json_schema_spec()] if isinstance(tools, SerializationMixin): return [tools.to_dict()] if isinstance(tools, dict): return [tools] if callable(tools): - return [ai_function(tools).to_json_schema_spec()] + return [tool(tools).to_json_schema_spec()] logger.warning("Can't parse tool.") return None results: list[str | dict[str, Any]] = [] - for tool in tools: - if isinstance(tool, AIFunction): - results.append(tool.to_json_schema_spec()) + for tool_item in tools: + if isinstance(tool_item, FunctionTool): + results.append(tool_item.to_json_schema_spec()) continue - if isinstance(tool, SerializationMixin): - results.append(tool.to_dict()) + if isinstance(tool_item, SerializationMixin): + results.append(tool_item.to_dict()) continue - if isinstance(tool, dict): - results.append(tool) + if isinstance(tool_item, dict): + results.append(tool_item) continue - if callable(tool): - results.append(ai_function(tool).to_json_schema_spec()) + if callable(tool_item): + results.append(tool(tool_item).to_json_schema_spec()) continue logger.warning("Can't parse tool.") return results @@ -958,10 +961,8 @@ def _parse_annotation(annotation: Any) -> Any: def _create_input_model_from_func(func: Callable[..., Any], name: str) -> type[BaseModel]: """Create a Pydantic model from a function's signature.""" - # Unwrap AIFunction objects to get the underlying function - from agent_framework._tools import AIFunction - - if isinstance(func, AIFunction): + # Unwrap FunctionTool objects to get the underlying function + if isinstance(func, FunctionTool): func = func.func # type: ignore[assignment] sig = inspect.signature(func) @@ -1212,7 +1213,7 @@ def _create_model_from_json_schema(tool_name: str, schema_json: Mapping[str, Any @overload -def ai_function( +def tool( func: Callable[..., ReturnT | Awaitable[ReturnT]], *, name: str | None = None, @@ -1221,11 +1222,11 @@ def ai_function( max_invocations: int | None = None, max_invocation_exceptions: int | None = None, additional_properties: dict[str, Any] | None = None, -) -> AIFunction[Any, ReturnT]: ... +) -> FunctionTool[Any, ReturnT]: ... @overload -def ai_function( +def tool( func: None = None, *, name: str | None = None, @@ -1234,10 +1235,10 @@ def ai_function( max_invocations: int | None = None, max_invocation_exceptions: int | None = None, additional_properties: dict[str, Any] | None = None, -) -> Callable[[Callable[..., ReturnT | Awaitable[ReturnT]]], AIFunction[Any, ReturnT]]: ... +) -> Callable[[Callable[..., ReturnT | Awaitable[ReturnT]]], FunctionTool[Any, ReturnT]]: ... -def ai_function( +def tool( func: Callable[..., ReturnT | Awaitable[ReturnT]] | None = None, *, name: str | None = None, @@ -1246,8 +1247,8 @@ def ai_function( max_invocations: int | None = None, max_invocation_exceptions: int | None = None, additional_properties: dict[str, Any] | None = None, -) -> AIFunction[Any, ReturnT] | Callable[[Callable[..., ReturnT | Awaitable[ReturnT]]], AIFunction[Any, ReturnT]]: - """Decorate a function to turn it into a AIFunction that can be passed to models and executed automatically. +) -> FunctionTool[Any, ReturnT] | Callable[[Callable[..., ReturnT | Awaitable[ReturnT]]], FunctionTool[Any, ReturnT]]: + """Decorate a function to turn it into a FunctionTool that can be passed to models and executed automatically. This decorator creates a Pydantic model from the function's signature, which will be used to validate the arguments passed to the function @@ -1266,7 +1267,7 @@ def ai_function( description: A description of the function. If not provided, the function's docstring will be used. approval_mode: Whether or not approval is required to run this tool. - Default is that approval is not needed. + Default is that approval is required. max_invocations: The maximum number of times this function can be invoked. If None, there is no limit, should be at least 1. max_invocation_exceptions: The maximum number of exceptions allowed during invocations. @@ -1283,12 +1284,12 @@ def ai_function( .. code-block:: python - from agent_framework import ai_function + from agent_framework import tool from typing import Annotated - @ai_function - def ai_function_example( + @tool(approval_mode="never_require") + def tool_example( arg1: Annotated[str, "The first argument"], arg2: Annotated[int, "The second argument"], ) -> str: @@ -1297,8 +1298,8 @@ def ai_function_example( # the same function but with approval required to run - @ai_function(approval_mode="always_require") - def ai_function_example( + @tool(approval_mode="always_require") + def tool_example( arg1: Annotated[str, "The first argument"], arg2: Annotated[int, "The second argument"], ) -> str: @@ -1307,13 +1308,13 @@ def ai_function_example( # With custom name and description - @ai_function(name="custom_weather", description="Custom weather function") + @tool(name="custom_weather", description="Custom weather function") def another_weather_func(location: str) -> str: return f"Weather in {location}" # Async functions are also supported - @ai_function + @tool(approval_mode="never_require") async def async_get_weather(location: str) -> str: '''Get weather asynchronously.''' # Simulate async operation @@ -1321,12 +1322,12 @@ async def async_get_weather(location: str) -> str: """ - def decorator(func: Callable[..., ReturnT | Awaitable[ReturnT]]) -> AIFunction[Any, ReturnT]: + def decorator(func: Callable[..., ReturnT | Awaitable[ReturnT]]) -> FunctionTool[Any, ReturnT]: @wraps(func) - def wrapper(f: Callable[..., ReturnT | Awaitable[ReturnT]]) -> AIFunction[Any, ReturnT]: + def wrapper(f: Callable[..., ReturnT | Awaitable[ReturnT]]) -> FunctionTool[Any, ReturnT]: tool_name: str = name or getattr(f, "__name__", "unknown_function") # type: ignore[assignment] tool_desc: str = description or (f.__doc__ or "") - return AIFunction[Any, ReturnT]( + return FunctionTool[Any, ReturnT]( name=tool_name, description=tool_desc, approval_mode=approval_mode, @@ -1490,7 +1491,7 @@ async def _auto_invoke_function( custom_args: dict[str, Any] | None = None, *, config: FunctionInvocationConfiguration, - tool_map: dict[str, AIFunction[BaseModel, Any]], + tool_map: dict[str, FunctionTool[BaseModel, Any]], sequence_index: int | None = None, request_index: int | None = None, middleware_pipeline: Any = None, # Optional MiddlewarePipeline @@ -1503,7 +1504,7 @@ async def _auto_invoke_function( Keyword Args: config: The function invocation configuration. - tool_map: A mapping of tool names to AIFunction instances. + tool_map: A mapping of tool names to FunctionTool instances. sequence_index: The index of the function call in the sequence. request_index: The index of the request iteration. middleware_pipeline: Optional middleware pipeline to apply during execution. @@ -1522,7 +1523,7 @@ async def _auto_invoke_function( # this function is called. This function only handles the actual execution of approved, # non-declaration-only functions. - tool: AIFunction[BaseModel, Any] | None = None + tool: FunctionTool[BaseModel, Any] | None = None if function_call_content.type == "function_call": tool = tool_map.get(function_call_content.name) # type: ignore[arg-type] # Tool should exist because _try_execute_function_calls validates this @@ -1645,17 +1646,17 @@ def _get_tool_map( | Callable[..., Any] \ | MutableMapping[str, Any] \ | Sequence[ToolProtocol | Callable[..., Any] | MutableMapping[str, Any]]", -) -> dict[str, AIFunction[Any, Any]]: - ai_function_list: dict[str, AIFunction[Any, Any]] = {} - for tool in tools if isinstance(tools, list) else [tools]: - if isinstance(tool, AIFunction): - ai_function_list[tool.name] = tool +) -> dict[str, FunctionTool[Any, Any]]: + tool_list: dict[str, FunctionTool[Any, Any]] = {} + for tool_item in tools if isinstance(tools, list) else [tools]: + if isinstance(tool_item, FunctionTool): + tool_list[tool_item.name] = tool_item continue - if callable(tool): + if callable(tool_item): # Convert to AITool if it's a function or callable - ai_tool = ai_function(tool) - ai_function_list[ai_tool.name] = ai_tool - return ai_function_list + ai_tool = tool(tool_item) + tool_list[ai_tool.name] = ai_tool + return tool_list async def _try_execute_function_calls( diff --git a/python/packages/core/agent_framework/_types.py b/python/packages/core/agent_framework/_types.py index d8c34c769e..3517fcfb41 100644 --- a/python/packages/core/agent_framework/_types.py +++ b/python/packages/core/agent_framework/_types.py @@ -18,7 +18,7 @@ from ._logging import get_logger from ._serialization import SerializationMixin -from ._tools import ToolProtocol, ai_function +from ._tools import ToolProtocol, tool from .exceptions import AdditionItemMismatch, ContentError __all__ = [ @@ -2854,7 +2854,7 @@ def normalize_tools( ) -> list[ToolProtocol | MutableMapping[str, Any]]: """Normalize tools into a list. - Converts callables to AIFunction objects and ensures all tools are either + Converts callables to FunctionTool objects and ensures all tools are either ToolProtocol instances or MutableMappings. Args: @@ -2866,10 +2866,10 @@ def normalize_tools( Examples: .. code-block:: python - from agent_framework import normalize_tools, ai_function + from agent_framework import normalize_tools, tool - @ai_function + @tool def my_tool(x: int) -> int: return x * 2 @@ -2886,14 +2886,14 @@ def my_tool(x: int) -> int: if not isinstance(tools, Sequence) or isinstance(tools, (str, MutableMapping)): # Single tool (not a sequence, or is a mapping which shouldn't be treated as sequence) if not isinstance(tools, (ToolProtocol, MutableMapping)): - return [ai_function(tools)] + return [tool(tools)] return [tools] - for tool in tools: - if isinstance(tool, (ToolProtocol, MutableMapping)): - final_tools.append(tool) + for tool_item in tools: + if isinstance(tool_item, (ToolProtocol, MutableMapping)): + final_tools.append(tool_item) else: - # Convert callable to AIFunction - final_tools.append(ai_function(tool)) + # Convert callable to FunctionTool + final_tools.append(tool(tool_item)) return final_tools @@ -2908,7 +2908,7 @@ async def validate_tools( ) -> list[ToolProtocol | MutableMapping[str, Any]]: """Validate and normalize tools into a list. - Converts callables to AIFunction objects, expands MCP tools to their constituent + Converts callables to FunctionTool objects, expands MCP tools to their constituent functions (connecting them if needed), and ensures all tools are either ToolProtocol instances or MutableMappings. @@ -2921,10 +2921,10 @@ async def validate_tools( Examples: .. code-block:: python - from agent_framework import validate_tools, ai_function + from agent_framework import validate_tools, tool - @ai_function + @tool def my_tool(x: int) -> int: return x * 2 @@ -2935,22 +2935,22 @@ def my_tool(x: int) -> int: # List of tools tools = await validate_tools([my_tool, another_tool]) """ - # Use normalize_tools for common sync logic (converts callables to AIFunction) + # Use normalize_tools for common sync logic (converts callables to FunctionTool) normalized = normalize_tools(tools) # Handle MCP tool expansion (async-only) final_tools: list[ToolProtocol | MutableMapping[str, Any]] = [] - for tool in normalized: + for tool_ in normalized: # Import MCPTool here to avoid circular imports from ._mcp import MCPTool - if isinstance(tool, MCPTool): + if isinstance(tool_, MCPTool): # Expand MCP tools to their constituent functions - if not tool.is_connected: - await tool.connect() - final_tools.extend(tool.functions) # type: ignore + if not tool_.is_connected: + await tool_.connect() + final_tools.extend(tool_.functions) # type: ignore else: - final_tools.append(tool) + final_tools.append(tool_) return final_tools diff --git a/python/packages/core/agent_framework/_workflows/_agent.py b/python/packages/core/agent_framework/_workflows/_agent.py index 345e120c1f..6b82823be1 100644 --- a/python/packages/core/agent_framework/_workflows/_agent.py +++ b/python/packages/core/agent_framework/_workflows/_agent.py @@ -138,7 +138,7 @@ async def run( used to load and restore the checkpoint. When provided without checkpoint_id, enables checkpointing for this run. **kwargs: Additional keyword arguments passed through to underlying workflow - and ai_function tools. + and tool functions. Returns: The final workflow response as an AgentResponse. @@ -185,7 +185,7 @@ async def run_stream( used to load and restore the checkpoint. When provided without checkpoint_id, enables checkpointing for this run. **kwargs: Additional keyword arguments passed through to underlying workflow - and ai_function tools. + and tool functions. Yields: AgentResponseUpdate objects representing the workflow execution progress. @@ -225,7 +225,7 @@ async def _run_stream_impl( checkpoint_id: ID of checkpoint to restore from. checkpoint_storage: Runtime checkpoint storage. **kwargs: Additional keyword arguments passed through to the underlying - workflow and ai_function tools. + workflow and tool functions. Yields: AgentResponseUpdate objects representing the workflow execution progress. diff --git a/python/packages/core/agent_framework/_workflows/_const.py b/python/packages/core/agent_framework/_workflows/_const.py index 34bde1da47..4d27c609b1 100644 --- a/python/packages/core/agent_framework/_workflows/_const.py +++ b/python/packages/core/agent_framework/_workflows/_const.py @@ -11,7 +11,7 @@ # SharedState key for storing run kwargs that should be passed to agent invocations. # Used by all orchestration patterns (Sequential, Concurrent, GroupChat, Handoff, Magentic) -# to pass kwargs from workflow.run_stream() through to agent.run_stream() and @ai_function tools. +# to pass kwargs from workflow.run_stream() through to agent.run_stream() and @tool functions. WORKFLOW_RUN_KWARGS_KEY = "_workflow_run_kwargs" diff --git a/python/packages/core/agent_framework/_workflows/_handoff.py b/python/packages/core/agent_framework/_workflows/_handoff.py index 8d329b618d..00aa36dd99 100644 --- a/python/packages/core/agent_framework/_workflows/_handoff.py +++ b/python/packages/core/agent_framework/_workflows/_handoff.py @@ -41,7 +41,7 @@ from .._agents import AgentProtocol, ChatAgent from .._middleware import FunctionInvocationContext, FunctionMiddleware from .._threads import AgentThread -from .._tools import AIFunction, ai_function +from .._tools import FunctionTool, tool from .._types import AgentResponse, ChatMessage, Role from ._agent_executor import AgentExecutor, AgentExecutorRequest, AgentExecutorResponse from ._agent_utils import resolve_agent_id @@ -331,7 +331,7 @@ def _apply_auto_tools(self, agent: ChatAgent, targets: Sequence[HandoffConfigura existing_tools = list(default_options.get("tools") or []) existing_names = {getattr(tool, "name", "") for tool in existing_tools if hasattr(tool, "name")} - new_tools: list[AIFunction[Any, Any]] = [] + new_tools: list[FunctionTool[Any, Any]] = [] for target in targets: tool = self._create_handoff_tool(target.target_id, target.description) if tool.name in existing_names: @@ -347,17 +347,17 @@ def _apply_auto_tools(self, agent: ChatAgent, targets: Sequence[HandoffConfigura else: default_options["tools"] = existing_tools - def _create_handoff_tool(self, target_id: str, description: str | None = None) -> AIFunction[Any, Any]: + def _create_handoff_tool(self, target_id: str, description: str | None = None) -> FunctionTool[Any, Any]: """Construct the synthetic handoff tool that signals routing to `target_id`.""" tool_name = get_handoff_tool_name(target_id) doc = description or f"Handoff to the {target_id} agent." - # Note: approval_mode is intentionally NOT set for handoff tools. - # Handoff tools are framework-internal signals that trigger routing logic, - # not actual function executions. They are automatically intercepted by + # Note: approval_mode is set to "never_require" for handoff tools because + # they are framework-internal signals that trigger routing logic, not + # actual function executions. They are automatically intercepted by # _AutoHandoffMiddleware which short-circuits execution and provides synthetic # results, so the function body never actually runs in practice. - @ai_function(name=tool_name, description=doc) + @tool(name=tool_name, description=doc, approval_mode="never_require") def _handoff_tool(context: str | None = None) -> str: """Return a deterministic acknowledgement that encodes the target alias.""" return f"Handoff to {target_id}" diff --git a/python/packages/core/agent_framework/_workflows/_workflow.py b/python/packages/core/agent_framework/_workflows/_workflow.py index d6c612bff6..e7b744265d 100644 --- a/python/packages/core/agent_framework/_workflows/_workflow.py +++ b/python/packages/core/agent_framework/_workflows/_workflow.py @@ -456,7 +456,7 @@ async def run_stream( - Without checkpoint_id: Enables checkpointing for this run, overriding build-time configuration **kwargs: Additional keyword arguments to pass through to agent invocations. - These are stored in SharedState and accessible in @ai_function tools + These are stored in SharedState and accessible in @tool functions via the **kwargs parameter. Yields: @@ -476,7 +476,7 @@ async def run_stream( async for event in workflow.run_stream("start message"): process(event) - With custom context for ai_functions: + With custom context for tools: .. code-block:: python @@ -590,7 +590,7 @@ async def run( build-time configuration include_status_events: Whether to include WorkflowStatusEvent instances in the result list. **kwargs: Additional keyword arguments to pass through to agent invocations. - These are stored in SharedState and accessible in @ai_function tools + These are stored in SharedState and accessible in @tool functions via the **kwargs parameter. Returns: @@ -610,7 +610,7 @@ async def run( result = await workflow.run("start message") outputs = result.get_outputs() - With custom context for ai_functions: + With custom context for tools: .. code-block:: python diff --git a/python/packages/core/agent_framework/observability.py b/python/packages/core/agent_framework/observability.py index 51f2f09e60..2d294daddd 100644 --- a/python/packages/core/agent_framework/observability.py +++ b/python/packages/core/agent_framework/observability.py @@ -33,7 +33,7 @@ from ._agents import AgentProtocol from ._clients import ChatClientProtocol from ._threads import AgentThread - from ._tools import AIFunction + from ._tools import FunctionTool from ._types import ( AgentResponse, AgentResponseUpdate, @@ -1546,7 +1546,7 @@ def decorator(agent: type[TAgent]) -> type[TAgent]: # region Otel Helpers -def get_function_span_attributes(function: "AIFunction[Any, Any]", tool_call_id: str | None = None) -> dict[str, str]: +def get_function_span_attributes(function: "FunctionTool[Any, Any]", tool_call_id: str | None = None) -> dict[str, str]: """Get the span attributes for the given function. Args: diff --git a/python/packages/core/agent_framework/openai/_assistant_provider.py b/python/packages/core/agent_framework/openai/_assistant_provider.py index 336fe40c72..73acd2d05e 100644 --- a/python/packages/core/agent_framework/openai/_assistant_provider.py +++ b/python/packages/core/agent_framework/openai/_assistant_provider.py @@ -11,7 +11,7 @@ from .._agents import ChatAgent from .._memory import ContextProvider from .._middleware import Middleware -from .._tools import AIFunction, ToolProtocol +from .._tools import FunctionTool, ToolProtocol from .._types import normalize_tools from ..exceptions import ServiceInitializationError from ._assistants_client import OpenAIAssistantsClient @@ -215,7 +215,7 @@ async def create_agent( instructions: System instructions for the assistant. description: A description of the assistant. tools: Tools available to the assistant. Can include: - - AIFunction instances or callables decorated with @ai_function + - FunctionTool instances or callables decorated with @tool - HostedCodeInterpreterTool for code execution - HostedFileSearchTool for vector store search - Raw tool dictionaries @@ -467,7 +467,7 @@ def _validate_function_tools( if provided_tools is not None: normalized = normalize_tools(provided_tools) for tool in normalized: - if isinstance(tool, AIFunction): + if isinstance(tool, FunctionTool): provided_functions.add(tool.name) elif isinstance(tool, MutableMapping) and "function" in tool: func_spec = tool.get("function", {}) diff --git a/python/packages/core/agent_framework/openai/_assistants_client.py b/python/packages/core/agent_framework/openai/_assistants_client.py index 12ad1b5797..afb98f1088 100644 --- a/python/packages/core/agent_framework/openai/_assistants_client.py +++ b/python/packages/core/agent_framework/openai/_assistants_client.py @@ -36,7 +36,7 @@ from .._middleware import Middleware, use_chat_middleware from .._threads import ChatMessageStoreProtocol from .._tools import ( - AIFunction, + FunctionTool, HostedCodeInterpreterTool, HostedFileSearchTool, ToolProtocol, @@ -626,7 +626,7 @@ def _prepare_options( tool_definitions: list[MutableMapping[str, Any]] = [] if tool_mode["mode"] != "none" and tools is not None: for tool in tools: - if isinstance(tool, AIFunction): + if isinstance(tool, FunctionTool): tool_definitions.append(tool.to_json_schema_spec()) # type: ignore[reportUnknownArgumentType] elif isinstance(tool, HostedCodeInterpreterTool): tool_definitions.append({"type": "code_interpreter"}) diff --git a/python/packages/core/agent_framework/openai/_chat_client.py b/python/packages/core/agent_framework/openai/_chat_client.py index 2b4023e85a..a1bc1f846a 100644 --- a/python/packages/core/agent_framework/openai/_chat_client.py +++ b/python/packages/core/agent_framework/openai/_chat_client.py @@ -19,7 +19,7 @@ from .._clients import BaseChatClient from .._logging import get_logger from .._middleware import use_chat_middleware -from .._tools import AIFunction, HostedWebSearchTool, ToolProtocol, use_function_invocation +from .._tools import FunctionTool, HostedWebSearchTool, ToolProtocol, use_function_invocation from .._types import ( ChatMessage, ChatOptions, @@ -198,7 +198,7 @@ def _prepare_tools_for_openai(self, tools: Sequence[ToolProtocol | MutableMappin for tool in tools: if isinstance(tool, ToolProtocol): match tool: - case AIFunction(): + case FunctionTool(): chat_tools.append(tool.to_json_schema_spec()) case HostedWebSearchTool(): web_search_options = ( diff --git a/python/packages/core/agent_framework/openai/_responses_client.py b/python/packages/core/agent_framework/openai/_responses_client.py index 3d023110cf..9c12357e0a 100644 --- a/python/packages/core/agent_framework/openai/_responses_client.py +++ b/python/packages/core/agent_framework/openai/_responses_client.py @@ -38,7 +38,7 @@ from .._logging import get_logger from .._middleware import use_chat_middleware from .._tools import ( - AIFunction, + FunctionTool, HostedCodeInterpreterTool, HostedFileSearchTool, HostedImageGenerationTool, @@ -384,7 +384,7 @@ def _prepare_tools_for_openai( container=tool_args, ) ) - case AIFunction(): + case FunctionTool(): params = tool.parameters() params["additionalProperties"] = False response_tools.append( diff --git a/python/packages/core/agent_framework/openai/_shared.py b/python/packages/core/agent_framework/openai/_shared.py index 1eef3624b0..256c114a60 100644 --- a/python/packages/core/agent_framework/openai/_shared.py +++ b/python/packages/core/agent_framework/openai/_shared.py @@ -24,7 +24,7 @@ from .._pydantic import AFBaseSettings from .._serialization import SerializationMixin from .._telemetry import APP_INFO, USER_AGENT_KEY, prepend_agent_framework_to_user_agent -from .._tools import AIFunction, HostedCodeInterpreterTool, HostedFileSearchTool, ToolProtocol +from .._tools import FunctionTool, HostedCodeInterpreterTool, HostedFileSearchTool, ToolProtocol from ..exceptions import ServiceInitializationError logger: logging.Logger = get_logger("agent_framework.openai") @@ -295,7 +295,7 @@ def to_assistant_tools( tool_definitions: list[dict[str, Any]] = [] for tool in tools: - if isinstance(tool, AIFunction): + if isinstance(tool, FunctionTool): tool_definitions.append(tool.to_json_schema_spec()) elif isinstance(tool, HostedCodeInterpreterTool): tool_definitions.append({"type": "code_interpreter"}) diff --git a/python/packages/core/tests/azure/test_azure_assistants_client.py b/python/packages/core/tests/azure/test_azure_assistants_client.py index 776951a9ea..32f1b13252 100644 --- a/python/packages/core/tests/azure/test_azure_assistants_client.py +++ b/python/packages/core/tests/azure/test_azure_assistants_client.py @@ -18,6 +18,7 @@ ChatResponse, ChatResponseUpdate, HostedCodeInterpreterTool, + tool, ) from agent_framework.azure import AzureOpenAIAssistantsClient from agent_framework.exceptions import ServiceInitializationError @@ -253,6 +254,7 @@ def test_azure_assistants_client_serialize(azure_openai_unit_test_env: dict[str, assert "User-Agent" not in dumped_settings["default_headers"] +@tool(approval_mode="never_require") def get_weather( location: Annotated[str, Field(description="The location to get the weather for.")], ) -> str: diff --git a/python/packages/core/tests/azure/test_azure_chat_client.py b/python/packages/core/tests/azure/test_azure_chat_client.py index 84d7d897ff..caba327dc7 100644 --- a/python/packages/core/tests/azure/test_azure_chat_client.py +++ b/python/packages/core/tests/azure/test_azure_chat_client.py @@ -25,7 +25,7 @@ ChatMessage, ChatResponse, ChatResponseUpdate, - ai_function, + tool, ) from agent_framework._telemetry import USER_AGENT_KEY from agent_framework.azure import AzureOpenAIChatClient @@ -631,7 +631,7 @@ async def test_streaming_with_none_delta( assert any(msg.contents for msg in results) -@ai_function +@tool(approval_mode="never_require") def get_story_text() -> str: """Returns a story about Emily and David.""" return ( @@ -642,7 +642,7 @@ def get_story_text() -> str: ) -@ai_function +@tool(approval_mode="never_require") def get_weather(location: str) -> str: """Get the current weather for a location.""" return f"The weather in {location} is sunny and 72°F." diff --git a/python/packages/core/tests/azure/test_azure_responses_client.py b/python/packages/core/tests/azure/test_azure_responses_client.py index b2d4a59ab7..35d92c7b98 100644 --- a/python/packages/core/tests/azure/test_azure_responses_client.py +++ b/python/packages/core/tests/azure/test_azure_responses_client.py @@ -20,7 +20,7 @@ HostedFileSearchTool, HostedMCPTool, HostedWebSearchTool, - ai_function, + tool, ) from agent_framework.azure import AzureOpenAIResponsesClient from agent_framework.exceptions import ServiceInitializationError @@ -41,7 +41,7 @@ class OutputStruct(BaseModel): weather: str -@ai_function +@tool(approval_mode="never_require") async def get_weather(location: Annotated[str, "The location as a city name"]) -> str: """Get the current weather in a given location.""" # Implementation of the tool to get weather diff --git a/python/packages/core/tests/core/conftest.py b/python/packages/core/tests/core/conftest.py index f2f6059b91..ed8de28c11 100644 --- a/python/packages/core/tests/core/conftest.py +++ b/python/packages/core/tests/core/conftest.py @@ -23,7 +23,7 @@ Content, Role, ToolProtocol, - ai_function, + tool, use_chat_middleware, use_function_invocation, ) @@ -65,10 +65,10 @@ def parameters(self) -> dict[str, Any]: @fixture -def ai_function_tool() -> ToolProtocol: +def tool_tool() -> ToolProtocol: """Returns a executable ToolProtocol.""" - @ai_function + @tool(approval_mode="never_require") def simple_function(x: int, y: int) -> int: """A simple function that adds two numbers.""" return x + y diff --git a/python/packages/core/tests/core/test_agents.py b/python/packages/core/tests/core/test_agents.py index a331f6f75c..9b6962784e 100644 --- a/python/packages/core/tests/core/test_agents.py +++ b/python/packages/core/tests/core/test_agents.py @@ -23,7 +23,7 @@ ContextProvider, HostedCodeInterpreterTool, Role, - ai_function, + tool, ) from agent_framework._mcp import MCPTool from agent_framework.exceptions import AgentExecutionException @@ -423,7 +423,7 @@ async def test_chat_agent_as_tool_no_name(chat_client: ChatClientProtocol) -> No async def test_chat_agent_as_tool_function_execution(chat_client: ChatClientProtocol) -> None: - """Test that the generated AIFunction can be executed.""" + """Test that the generated FunctionTool can be executed.""" agent = ChatAgent(chat_client=chat_client, name="TestAgent", description="Test agent") tool = agent.as_tool() @@ -564,7 +564,7 @@ async def test_agent_tool_receives_thread_in_kwargs(chat_client_base: Any) -> No captured: dict[str, Any] = {} - @ai_function(name="echo_thread_info") + @tool(name="echo_thread_info", approval_mode="never_require") def echo_thread_info(text: str, **kwargs: Any) -> str: # type: ignore[reportUnknownParameterType] thread = kwargs.get("thread") captured["has_thread"] = thread is not None @@ -596,9 +596,7 @@ def echo_thread_info(text: str, **kwargs: Any) -> str: # type: ignore[reportUnk assert captured.get("has_message_store") is True -async def test_chat_agent_tool_choice_run_level_overrides_agent_level( - chat_client_base: Any, ai_function_tool: Any -) -> None: +async def test_chat_agent_tool_choice_run_level_overrides_agent_level(chat_client_base: Any, tool_tool: Any) -> None: """Verify that tool_choice passed to run() overrides agent-level tool_choice.""" captured_options: list[dict[str, Any]] = [] @@ -617,7 +615,7 @@ async def capturing_inner( # Create agent with agent-level tool_choice="auto" and a tool (tools required for tool_choice to be meaningful) agent = ChatAgent( chat_client=chat_client_base, - tools=[ai_function_tool], + tools=[tool_tool], options={"tool_choice": "auto"}, ) @@ -630,7 +628,7 @@ async def capturing_inner( async def test_chat_agent_tool_choice_agent_level_used_when_run_level_not_specified( - chat_client_base: Any, ai_function_tool: Any + chat_client_base: Any, tool_tool: Any ) -> None: """Verify that agent-level tool_choice is used when run() doesn't specify one.""" from agent_framework import ChatOptions @@ -650,7 +648,7 @@ async def capturing_inner( # Create agent with agent-level tool_choice="required" and a tool agent = ChatAgent( chat_client=chat_client_base, - tools=[ai_function_tool], + tools=[tool_tool], default_options={"tool_choice": "required"}, ) @@ -664,9 +662,7 @@ async def capturing_inner( assert captured_options[0]["tool_choice"] == "required" -async def test_chat_agent_tool_choice_none_at_run_preserves_agent_level( - chat_client_base: Any, ai_function_tool: Any -) -> None: +async def test_chat_agent_tool_choice_none_at_run_preserves_agent_level(chat_client_base: Any, tool_tool: Any) -> None: """Verify that tool_choice=None at run() uses agent-level default.""" from agent_framework import ChatOptions @@ -685,7 +681,7 @@ async def capturing_inner( # Create agent with agent-level tool_choice="auto" and a tool agent = ChatAgent( chat_client=chat_client_base, - tools=[ai_function_tool], + tools=[tool_tool], default_options={"tool_choice": "auto"}, ) diff --git a/python/packages/core/tests/core/test_chat_agent_integration.py b/python/packages/core/tests/core/test_chat_agent_integration.py deleted file mode 100644 index 574c02fd61..0000000000 --- a/python/packages/core/tests/core/test_chat_agent_integration.py +++ /dev/null @@ -1,433 +0,0 @@ -# Copyright (c) Microsoft. All rights reserved. - -import json -import os -from typing import Annotated - -import pytest -from pydantic import BaseModel - -from agent_framework import ( - AgentResponse, - AgentResponseUpdate, - AgentThread, - ChatAgent, - HostedCodeInterpreterTool, - HostedImageGenerationTool, - HostedMCPTool, - MCPStreamableHTTPTool, - ai_function, -) -from agent_framework.openai import OpenAIResponsesClient - -skip_if_openai_integration_tests_disabled = pytest.mark.skipif( - os.getenv("RUN_INTEGRATION_TESTS", "false").lower() != "true" - or os.getenv("OPENAI_API_KEY", "") in ("", "test-dummy-key"), - reason="No real OPENAI_API_KEY provided; skipping integration tests." - if os.getenv("RUN_INTEGRATION_TESTS", "false").lower() == "true" - else "Integration tests are disabled.", -) - - -@ai_function -async def get_weather(location: Annotated[str, "The location as a city name"]) -> str: - """Get the current weather in a given location.""" - # Implementation of the tool to get weather - return f"The current weather in {location} is sunny." - - -@pytest.mark.flaky -@skip_if_openai_integration_tests_disabled -async def test_openai_responses_client_agent_basic_run_streaming(): - """Test OpenAI Responses Client agent basic streaming functionality with OpenAIResponsesClient.""" - async with ChatAgent( - chat_client=OpenAIResponsesClient(), - ) as agent: - # Test streaming run - full_text = "" - async for chunk in agent.run_stream("Please respond with exactly: 'This is a streaming response test.'"): - assert isinstance(chunk, AgentResponseUpdate) - if chunk.text: - full_text += chunk.text - - assert len(full_text) > 0 - assert "streaming response test" in full_text.lower() - - -@pytest.mark.flaky -@skip_if_openai_integration_tests_disabled -async def test_openai_responses_client_agent_thread_persistence(): - """Test OpenAI Responses Client agent thread persistence across runs with OpenAIResponsesClient.""" - async with ChatAgent( - chat_client=OpenAIResponsesClient(), - instructions="You are a helpful assistant with good memory.", - ) as agent: - # Create a new thread that will be reused - thread = agent.get_new_thread() - - # First interaction - first_response = await agent.run("My favorite programming language is Python. Remember this.", thread=thread) - - assert isinstance(first_response, AgentResponse) - assert first_response.text is not None - - # Second interaction - test memory - second_response = await agent.run("What is my favorite programming language?", thread=thread) - - assert isinstance(second_response, AgentResponse) - assert second_response.text is not None - - -@pytest.mark.flaky -@skip_if_openai_integration_tests_disabled -async def test_openai_responses_client_agent_thread_storage_with_store_true(): - """Test OpenAI Responses Client agent with store=True to verify service_thread_id is returned.""" - async with ChatAgent( - chat_client=OpenAIResponsesClient(), - instructions="You are a helpful assistant.", - ) as agent: - # Create a new thread - thread = AgentThread() - - # Initially, service_thread_id should be None - assert thread.service_thread_id is None - - # Run with store=True to store messages on OpenAI side - response = await agent.run( - "Hello! Please remember that my name is Alex.", - thread=thread, - options={"store": True}, - ) - - # Validate response - assert isinstance(response, AgentResponse) - assert response.text is not None - assert len(response.text) > 0 - - # After store=True, service_thread_id should be populated - assert thread.service_thread_id is not None - assert isinstance(thread.service_thread_id, str) - assert len(thread.service_thread_id) > 0 - - -@pytest.mark.flaky -@skip_if_openai_integration_tests_disabled -async def test_openai_responses_client_agent_existing_thread(): - """Test OpenAI Responses Client agent with existing thread to continue conversations across agent instances.""" - # First conversation - capture the thread - preserved_thread = None - - async with ChatAgent( - chat_client=OpenAIResponsesClient(), - instructions="You are a helpful assistant with good memory.", - ) as first_agent: - # Start a conversation and capture the thread - thread = first_agent.get_new_thread() - first_response = await first_agent.run("My hobby is photography. Remember this.", thread=thread) - - assert isinstance(first_response, AgentResponse) - assert first_response.text is not None - - # Preserve the thread for reuse - preserved_thread = thread - - # Second conversation - reuse the thread in a new agent instance - if preserved_thread: - async with ChatAgent( - chat_client=OpenAIResponsesClient(), - instructions="You are a helpful assistant with good memory.", - ) as second_agent: - # Reuse the preserved thread - second_response = await second_agent.run("What is my hobby?", thread=preserved_thread) - - assert isinstance(second_response, AgentResponse) - assert second_response.text is not None - assert "photography" in second_response.text.lower() - - -@pytest.mark.flaky -@skip_if_openai_integration_tests_disabled -async def test_openai_responses_client_agent_hosted_code_interpreter_tool(): - """Test OpenAI Responses Client agent with HostedCodeInterpreterTool through OpenAIResponsesClient.""" - async with ChatAgent( - chat_client=OpenAIResponsesClient(), - instructions="You are a helpful assistant that can execute Python code.", - tools=[HostedCodeInterpreterTool()], - ) as agent: - # Test code interpreter functionality - response = await agent.run("Calculate the sum of numbers from 1 to 10 using Python code.") - - assert isinstance(response, AgentResponse) - assert response.text is not None - assert len(response.text) > 0 - # Should contain calculation result (sum of 1-10 = 55) or code execution content - contains_relevant_content = any( - term in response.text.lower() for term in ["55", "sum", "code", "python", "calculate", "10"] - ) - assert contains_relevant_content or len(response.text.strip()) > 10 - - -@pytest.mark.flaky -@skip_if_openai_integration_tests_disabled -async def test_openai_responses_client_agent_image_generation_tool(): - """Test OpenAI Responses Client agent with raw image_generation tool through OpenAIResponsesClient.""" - async with ChatAgent( - chat_client=OpenAIResponsesClient(), - instructions="You are a helpful assistant that can generate images.", - tools=HostedImageGenerationTool(options={"image_size": "1024x1024", "media_type": "png"}), - ) as agent: - # Test image generation functionality - response = await agent.run("Generate an image of a cute red panda sitting on a tree branch in a forest.") - - assert isinstance(response, AgentResponse) - assert response.messages - - # Verify we got image content - look for ImageGenerationToolResultContent - image_content_found = False - for message in response.messages: - for content in message.contents: - if content.type == "image_generation_tool_result" and content.outputs: - image_content_found = True - break - if image_content_found: - break - - # The test passes if we got image content - assert image_content_found, "Expected to find image content in response" - - -@pytest.mark.flaky -@skip_if_openai_integration_tests_disabled -async def test_openai_responses_client_agent_level_tool_persistence(): - """Test that agent-level tools persist across multiple runs with OpenAI Responses Client.""" - - async with ChatAgent( - chat_client=OpenAIResponsesClient(), - instructions="You are a helpful assistant that uses available tools.", - tools=[get_weather], # Agent-level tool - ) as agent: - # First run - agent-level tool should be available - first_response = await agent.run("What's the weather like in Chicago?") - - assert isinstance(first_response, AgentResponse) - assert first_response.text is not None - # Should use the agent-level weather tool - assert any(term in first_response.text.lower() for term in ["chicago", "sunny", "72"]) - - # Second run - agent-level tool should still be available (persistence test) - second_response = await agent.run("What's the weather in Miami?") - - assert isinstance(second_response, AgentResponse) - assert second_response.text is not None - # Should use the agent-level weather tool again - assert any(term in second_response.text.lower() for term in ["miami", "sunny", "72"]) - - -@pytest.mark.flaky -@skip_if_openai_integration_tests_disabled -async def test_openai_responses_client_run_level_tool_isolation(): - """Test that run-level tools are isolated to specific runs and don't persist with OpenAI Responses Client.""" - # Counter to track how many times the weather tool is called - call_count = 0 - - @ai_function - async def get_weather_with_counter( - location: Annotated[str, "The location as a city name"], - ) -> str: - """Get the current weather in a given location.""" - nonlocal call_count - call_count += 1 - return f"The weather in {location} is sunny and 72°F." - - async with ChatAgent( - chat_client=OpenAIResponsesClient(), - instructions="You are a helpful assistant.", - ) as agent: - # First run - use run-level tool - first_response = await agent.run( - "What's the weather like in Chicago?", - tools=[get_weather_with_counter], # Run-level tool - ) - - assert isinstance(first_response, AgentResponse) - assert first_response.text is not None - # Should use the run-level weather tool (call count should be 1) - assert call_count == 1 - assert any(term in first_response.text.lower() for term in ["chicago", "sunny", "72"]) - - # Second run - run-level tool should NOT persist (key isolation test) - second_response = await agent.run("What's the weather like in Miami?") - - assert isinstance(second_response, AgentResponse) - assert second_response.text is not None - # Should NOT use the weather tool since it was only run-level in previous call - # Call count should still be 1 (no additional calls) - assert call_count == 1 - - -@pytest.mark.flaky -@skip_if_openai_integration_tests_disabled -async def test_openai_responses_client_agent_chat_options_agent_level() -> None: - """Integration test for comprehensive ChatOptions parameter coverage with OpenAI Response Agent.""" - async with ChatAgent( - chat_client=OpenAIResponsesClient(), - instructions="You are a helpful assistant.", - tools=[get_weather], - default_options={ - "max_tokens": 100, - "temperature": 0.7, - "top_p": 0.9, - "seed": 123, - "user": "comprehensive-test-user", - "tool_choice": "auto", - }, - ) as agent: - response = await agent.run( - "Provide a brief, helpful response.", - ) - - assert isinstance(response, AgentResponse) - assert response.text is not None - assert len(response.text) > 0 - - -@pytest.mark.flaky -@skip_if_openai_integration_tests_disabled -async def test_openai_responses_client_agent_hosted_mcp_tool() -> None: - """Integration test for HostedMCPTool with OpenAI Response Agent using Microsoft Learn MCP.""" - - async with ChatAgent( - chat_client=OpenAIResponsesClient(), - instructions="You are a helpful assistant that can help with microsoft documentation questions.", - tools=HostedMCPTool( - name="Microsoft Learn MCP", - url="https://learn.microsoft.com/api/mcp", - description="A Microsoft Learn MCP server for documentation questions", - approval_mode="never_require", - ), - ) as agent: - response = await agent.run( - "How to create an Azure storage account using az cli?", - # this needs to be high enough to handle the full MCP tool response. - options={"max_tokens": 5000}, - ) - - assert isinstance(response, AgentResponse) - assert response.text - # Should contain Azure-related content since it's asking about Azure CLI - assert any(term in response.text.lower() for term in ["azure", "storage", "account", "cli"]) - - -@pytest.mark.flaky -@skip_if_openai_integration_tests_disabled -async def test_openai_responses_client_agent_local_mcp_tool() -> None: - """Integration test for MCPStreamableHTTPTool with OpenAI Response Agent using Microsoft Learn MCP.""" - - mcp_tool = MCPStreamableHTTPTool( - name="Microsoft Learn MCP", - url="https://learn.microsoft.com/api/mcp", - ) - - async with ChatAgent( - chat_client=OpenAIResponsesClient(), - instructions="You are a helpful assistant that can help with microsoft documentation questions.", - tools=[mcp_tool], - ) as agent: - response = await agent.run( - "How to create an Azure storage account using az cli?", - options={"max_tokens": 200}, - ) - - assert isinstance(response, AgentResponse) - assert response.text is not None - assert len(response.text) > 0 - # Should contain Azure-related content since it's asking about Azure CLI - assert any(term in response.text.lower() for term in ["azure", "storage", "account", "cli"]) - - -class ReleaseBrief(BaseModel): - """Structured output model for release brief testing.""" - - title: str - summary: str - highlights: list[str] - model_config = {"extra": "forbid"} - - -@pytest.mark.flaky -@skip_if_openai_integration_tests_disabled -async def test_openai_responses_client_agent_with_response_format_pydantic() -> None: - """Integration test for response_format with Pydantic model using OpenAI Responses Client.""" - async with ChatAgent( - chat_client=OpenAIResponsesClient(), - instructions="You are a helpful assistant that returns structured JSON responses.", - ) as agent: - response = await agent.run( - "Summarize the following release notes into a ReleaseBrief:\n\n" - "Version 2.0 Release Notes:\n" - "- Added new streaming API for real-time responses\n" - "- Improved error handling with detailed messages\n" - "- Performance boost of 50% in batch processing\n" - "- Fixed memory leak in connection pooling", - options={ - "response_format": ReleaseBrief, - }, - ) - - # Validate response - assert isinstance(response, AgentResponse) - assert response.value is not None - assert isinstance(response.value, ReleaseBrief) - - # Validate structured output fields - brief = response.value - assert len(brief.title) > 0 - assert len(brief.summary) > 0 - assert len(brief.highlights) > 0 - - -@pytest.mark.flaky -@skip_if_openai_integration_tests_disabled -async def test_openai_responses_client_agent_with_runtime_json_schema() -> None: - """Integration test for response_format with runtime JSON schema using OpenAI Responses Client.""" - runtime_schema = { - "title": "WeatherDigest", - "type": "object", - "properties": { - "location": {"type": "string"}, - "conditions": {"type": "string"}, - "temperature_c": {"type": "number"}, - "advisory": {"type": "string"}, - }, - "required": ["location", "conditions", "temperature_c", "advisory"], - "additionalProperties": False, - } - - async with ChatAgent( - chat_client=OpenAIResponsesClient(), - instructions="Return only JSON that matches the provided schema. Do not add commentary.", - ) as agent: - response = await agent.run( - "Give a brief weather digest for Seattle.", - options={ - "response_format": { - "type": "json_schema", - "json_schema": { - "name": runtime_schema["title"], - "strict": True, - "schema": runtime_schema, - }, - }, - }, - ) - - # Validate response - assert isinstance(response, AgentResponse) - assert response.text is not None - - # Parse JSON and validate structure - parsed = json.loads(response.text) - assert "location" in parsed - assert "conditions" in parsed - assert "temperature_c" in parsed - assert "advisory" in parsed diff --git a/python/packages/core/tests/core/test_function_invocation_logic.py b/python/packages/core/tests/core/test_function_invocation_logic.py index b2de663d03..720d5a31d7 100644 --- a/python/packages/core/tests/core/test_function_invocation_logic.py +++ b/python/packages/core/tests/core/test_function_invocation_logic.py @@ -1,5 +1,6 @@ # Copyright (c) Microsoft. All rights reserved. +import asyncio from collections.abc import Awaitable, Callable from typing import Any @@ -13,7 +14,7 @@ ChatResponseUpdate, Content, Role, - ai_function, + tool, ) from agent_framework._middleware import FunctionInvocationContext, FunctionMiddleware @@ -21,7 +22,7 @@ async def test_base_client_with_function_calling(chat_client_base: ChatClientProtocol): exec_counter = 0 - @ai_function(name="test_function") + @tool(name="test_function", approval_mode="never_require") def ai_func(arg1: str) -> str: nonlocal exec_counter exec_counter += 1 @@ -57,7 +58,7 @@ def ai_func(arg1: str) -> str: async def test_base_client_with_function_calling_resets(chat_client_base: ChatClientProtocol): exec_counter = 0 - @ai_function(name="test_function") + @tool(name="test_function", approval_mode="never_require") def ai_func(arg1: str) -> str: nonlocal exec_counter exec_counter += 1 @@ -99,7 +100,7 @@ def ai_func(arg1: str) -> str: async def test_base_client_with_streaming_function_calling(chat_client_base: ChatClientProtocol): exec_counter = 0 - @ai_function(name="test_function") + @tool(name="test_function", approval_mode="never_require") def ai_func(arg1: str) -> str: nonlocal exec_counter exec_counter += 1 @@ -142,7 +143,7 @@ async def test_function_invocation_inside_aiohttp_server(chat_client_base: ChatC exec_counter = 0 - @ai_function(name="start_todo_investigation") + @tool(name="start_todo_investigation", approval_mode="never_require") def ai_func(user_query: str) -> str: nonlocal exec_counter exec_counter += 1 @@ -199,7 +200,7 @@ async def test_function_invocation_in_threaded_aiohttp_app(chat_client_base: Cha exec_counter = 0 - @ai_function(name="start_threaded_investigation") + @tool(name="start_threaded_investigation", approval_mode="never_require") def ai_func(user_query: str) -> str: nonlocal exec_counter exec_counter += 1 @@ -319,13 +320,13 @@ async def test_function_invocation_scenarios( # Simulate a service-side thread with conversation_id conversation_id = "test-thread-123" - @ai_function(name="no_approval_func") + @tool(name="no_approval_func", approval_mode="never_require") def func_no_approval(arg1: str) -> str: nonlocal exec_counter exec_counter += 1 return f"Processed {arg1}" - @ai_function(name="approval_func", approval_mode="always_require") + @tool(name="approval_func", approval_mode="always_require") def func_with_approval(arg1: str) -> str: nonlocal exec_counter exec_counter += 1 @@ -473,13 +474,13 @@ async def test_rejected_approval(chat_client_base: ChatClientProtocol): exec_counter_approved = 0 exec_counter_rejected = 0 - @ai_function(name="approved_func", approval_mode="always_require") + @tool(name="approved_func", approval_mode="always_require") def func_approved(arg1: str) -> str: nonlocal exec_counter_approved exec_counter_approved += 1 return f"Approved {arg1}" - @ai_function(name="rejected_func", approval_mode="always_require") + @tool(name="rejected_func", approval_mode="always_require") def func_rejected(arg1: str) -> str: nonlocal exec_counter_rejected exec_counter_rejected += 1 @@ -569,7 +570,7 @@ async def test_approval_requests_in_assistant_message(chat_client_base: ChatClie """Approval requests should be added to the assistant message that contains the function call.""" exec_counter = 0 - @ai_function(name="test_func", approval_mode="always_require") + @tool(name="test_func", approval_mode="always_require") def func_with_approval(arg1: str) -> str: nonlocal exec_counter exec_counter += 1 @@ -604,7 +605,7 @@ async def test_persisted_approval_messages_replay_correctly(chat_client_base: Ch exec_counter = 0 - @ai_function(name="test_func", approval_mode="always_require") + @tool(name="test_func", approval_mode="always_require") def func_with_approval(arg1: str) -> str: nonlocal exec_counter exec_counter += 1 @@ -656,7 +657,7 @@ def func_with_approval(arg1: str) -> str: async def test_no_duplicate_function_calls_after_approval_processing(chat_client_base: ChatClientProtocol): """Processing approval should not create duplicate function calls in messages.""" - @ai_function(name="test_func", approval_mode="always_require") + @tool(name="test_func", approval_mode="always_require") def func_with_approval(arg1: str) -> str: return f"Result {arg1}" @@ -700,7 +701,7 @@ def func_with_approval(arg1: str) -> str: async def test_rejection_result_uses_function_call_id(chat_client_base: ChatClientProtocol): """Rejection error result should use the function call's call_id, not the approval's id.""" - @ai_function(name="test_func", approval_mode="always_require") + @tool(name="test_func", approval_mode="always_require") def func_with_approval(arg1: str) -> str: return f"Result {arg1}" @@ -745,7 +746,7 @@ async def test_max_iterations_limit(chat_client_base: ChatClientProtocol): """Test that MAX_ITERATIONS in additional_properties limits function call loops.""" exec_counter = 0 - @ai_function(name="test_function") + @tool(name="test_function", approval_mode="never_require") def ai_func(arg1: str) -> str: nonlocal exec_counter exec_counter += 1 @@ -790,7 +791,7 @@ async def test_function_invocation_config_enabled_false(chat_client_base: ChatCl """Test that setting enabled=False disables function invocation.""" exec_counter = 0 - @ai_function(name="test_function") + @tool(name="test_function") def ai_func(arg1: str) -> str: nonlocal exec_counter exec_counter += 1 @@ -814,7 +815,7 @@ def ai_func(arg1: str) -> str: async def test_function_invocation_config_max_consecutive_errors(chat_client_base: ChatClientProtocol): """Test that max_consecutive_errors_per_request limits error retries.""" - @ai_function(name="error_function") + @tool(name="error_function", approval_mode="never_require") def error_func(arg1: str) -> str: raise ValueError("Function error") @@ -882,7 +883,7 @@ async def test_function_invocation_config_terminate_on_unknown_calls_false(chat_ """Test that terminate_on_unknown_calls=False returns error message for unknown functions.""" exec_counter = 0 - @ai_function(name="known_function") + @tool(name="known_function") def known_func(arg1: str) -> str: nonlocal exec_counter exec_counter += 1 @@ -917,7 +918,7 @@ async def test_function_invocation_config_terminate_on_unknown_calls_true(chat_c """Test that terminate_on_unknown_calls=True stops execution on unknown functions.""" exec_counter = 0 - @ai_function(name="known_function") + @tool(name="known_function") def known_func(arg1: str) -> str: nonlocal exec_counter exec_counter += 1 @@ -949,13 +950,13 @@ async def test_function_invocation_config_additional_tools(chat_client_base: Cha exec_counter_visible = 0 exec_counter_hidden = 0 - @ai_function(name="visible_function") + @tool(name="visible_function") def visible_func(arg1: str) -> str: nonlocal exec_counter_visible exec_counter_visible += 1 return f"Visible {arg1}" - @ai_function(name="hidden_function") + @tool(name="hidden_function") def hidden_func(arg1: str) -> str: nonlocal exec_counter_hidden exec_counter_hidden += 1 @@ -996,7 +997,7 @@ def hidden_func(arg1: str) -> str: async def test_function_invocation_config_include_detailed_errors_false(chat_client_base: ChatClientProtocol): """Test that include_detailed_errors=False returns generic error messages.""" - @ai_function(name="error_function") + @tool(name="error_function", approval_mode="never_require") def error_func(arg1: str) -> str: raise ValueError("Specific error message that should not appear") @@ -1030,7 +1031,7 @@ def error_func(arg1: str) -> str: async def test_function_invocation_config_include_detailed_errors_true(chat_client_base: ChatClientProtocol): """Test that include_detailed_errors=True returns detailed error information.""" - @ai_function(name="error_function") + @tool(name="error_function", approval_mode="never_require") def error_func(arg1: str) -> str: raise ValueError("Specific error message that should appear") @@ -1100,7 +1101,7 @@ async def test_function_invocation_config_validation_max_consecutive_errors(): async def test_argument_validation_error_with_detailed_errors(chat_client_base: ChatClientProtocol): """Test that argument validation errors include details when include_detailed_errors=True.""" - @ai_function(name="typed_function") + @tool(name="typed_function", approval_mode="never_require") def typed_func(arg1: int) -> str: # Expects int, not str return f"Got {arg1}" @@ -1134,7 +1135,7 @@ def typed_func(arg1: int) -> str: # Expects int, not str async def test_argument_validation_error_without_detailed_errors(chat_client_base: ChatClientProtocol): """Test that argument validation errors are generic when include_detailed_errors=False.""" - @ai_function(name="typed_function") + @tool(name="typed_function", approval_mode="never_require") def typed_func(arg1: int) -> str: # Expects int, not str return f"Got {arg1}" @@ -1168,7 +1169,7 @@ def typed_func(arg1: int) -> str: # Expects int, not str async def test_hosted_tool_approval_response(chat_client_base: ChatClientProtocol): """Test handling of approval responses for hosted tools (tools not in tool_map).""" - @ai_function(name="local_function") + @tool(name="local_function") def local_func(arg1: str) -> str: return f"Local {arg1}" @@ -1201,7 +1202,7 @@ def local_func(arg1: str) -> str: async def test_unapproved_tool_execution_raises_exception(chat_client_base: ChatClientProtocol): """Test that attempting to execute an unapproved tool raises ToolException.""" - @ai_function(name="test_function", approval_mode="always_require") + @tool(name="test_function", approval_mode="always_require") def test_func(arg1: str) -> str: return f"Result {arg1}" @@ -1256,7 +1257,7 @@ async def test_approved_function_call_with_error_without_detailed_errors(chat_cl exec_counter = 0 - @ai_function(name="error_func", approval_mode="always_require") + @tool(name="error_func", approval_mode="always_require") def error_func(arg1: str) -> str: nonlocal exec_counter exec_counter += 1 @@ -1319,7 +1320,7 @@ async def test_approved_function_call_with_error_with_detailed_errors(chat_clien exec_counter = 0 - @ai_function(name="error_func", approval_mode="always_require") + @tool(name="error_func", approval_mode="always_require") def error_func(arg1: str) -> str: nonlocal exec_counter exec_counter += 1 @@ -1380,7 +1381,7 @@ async def test_approved_function_call_with_validation_error(chat_client_base: Ch exec_counter = 0 - @ai_function(name="typed_func", approval_mode="always_require") + @tool(name="typed_func", approval_mode="always_require") def typed_func(arg1: int) -> str: # Expects int, not str nonlocal exec_counter exec_counter += 1 @@ -1441,7 +1442,7 @@ async def test_approved_function_call_successful_execution(chat_client_base: Cha exec_counter = 0 - @ai_function(name="success_func", approval_mode="always_require") + @tool(name="success_func", approval_mode="always_require") def success_func(arg1: str) -> str: nonlocal exec_counter exec_counter += 1 @@ -1493,10 +1494,10 @@ def success_func(arg1: str) -> str: async def test_declaration_only_tool(chat_client_base: ChatClientProtocol): """Test that declaration_only tools without implementation (func=None) are not executed.""" - from agent_framework import AIFunction + from agent_framework import FunctionTool # Create a truly declaration-only function with no implementation - declaration_func = AIFunction( + declaration_func = FunctionTool( name="declaration_func", func=None, description="A declaration-only function for testing", @@ -1547,14 +1548,14 @@ async def test_multiple_function_calls_parallel_execution(chat_client_base: Chat exec_order = [] - @ai_function(name="func1") + @tool(name="func1", approval_mode="never_require") async def func1(arg1: str) -> str: exec_order.append("func1_start") await asyncio.sleep(0.01) # Small delay exec_order.append("func1_end") return f"Result1 {arg1}" - @ai_function(name="func2") + @tool(name="func2", approval_mode="never_require") async def func2(arg1: str) -> str: exec_order.append("func2_start") await asyncio.sleep(0.01) # Small delay @@ -1587,10 +1588,11 @@ async def func2(arg1: str) -> str: assert len(results) == 2 -async def test_callable_function_converted_to_ai_function(chat_client_base: ChatClientProtocol): - """Test that plain callable functions are converted to AIFunction.""" +async def test_callable_function_converted_to_tool(chat_client_base: ChatClientProtocol): + """Test that plain callable functions are converted to FunctionTool.""" exec_counter = 0 + @tool(approval_mode="never_require") def plain_function(arg1: str) -> str: """A plain function without decorator.""" nonlocal exec_counter @@ -1621,7 +1623,7 @@ def plain_function(arg1: str) -> str: async def test_conversation_id_handling(chat_client_base: ChatClientProtocol): """Test that conversation_id is properly handled and messages are cleared.""" - @ai_function(name="test_function") + @tool(name="test_function", approval_mode="never_require") def test_func(arg1: str) -> str: return f"Result {arg1}" @@ -1653,7 +1655,7 @@ def test_func(arg1: str) -> str: async def test_function_result_appended_to_existing_assistant_message(chat_client_base: ChatClientProtocol): """Test that function results are appended to existing assistant message when appropriate.""" - @ai_function(name="test_function") + @tool(name="test_function", approval_mode="never_require") def test_func(arg1: str) -> str: return f"Result {arg1}" @@ -1685,7 +1687,7 @@ async def test_error_recovery_resets_counter(chat_client_base: ChatClientProtoco call_count = 0 - @ai_function(name="sometimes_fails") + @tool(name="sometimes_fails", approval_mode="never_require") def sometimes_fails(arg1: str) -> str: nonlocal call_count call_count += 1 @@ -1741,7 +1743,7 @@ async def test_streaming_approval_request_generated(chat_client_base: ChatClient """Test that approval requests are generated correctly in streaming mode.""" exec_counter = 0 - @ai_function(name="test_func", approval_mode="always_require") + @tool(name="test_func", approval_mode="always_require") def func_with_approval(arg1: str) -> str: nonlocal exec_counter exec_counter += 1 @@ -1777,7 +1779,7 @@ async def test_streaming_max_iterations_limit(chat_client_base: ChatClientProtoc """Test that MAX_ITERATIONS in streaming mode limits function call loops.""" exec_counter = 0 - @ai_function(name="test_function") + @tool(name="test_function", approval_mode="never_require") def ai_func(arg1: str) -> str: nonlocal exec_counter exec_counter += 1 @@ -1829,7 +1831,7 @@ async def test_streaming_function_invocation_config_enabled_false(chat_client_ba """Test that setting enabled=False disables function invocation in streaming mode.""" exec_counter = 0 - @ai_function(name="test_function") + @tool(name="test_function", approval_mode="never_require") def ai_func(arg1: str) -> str: nonlocal exec_counter exec_counter += 1 @@ -1857,7 +1859,7 @@ def ai_func(arg1: str) -> str: async def test_streaming_function_invocation_config_max_consecutive_errors(chat_client_base: ChatClientProtocol): """Test that max_consecutive_errors_per_request limits error retries in streaming mode.""" - @ai_function(name="error_function") + @tool(name="error_function", approval_mode="never_require") def error_func(arg1: str) -> str: raise ValueError("Function error") @@ -1920,7 +1922,7 @@ async def test_streaming_function_invocation_config_terminate_on_unknown_calls_f """Test that terminate_on_unknown_calls=False returns error message for unknown functions in streaming mode.""" exec_counter = 0 - @ai_function(name="known_function") + @tool(name="known_function", approval_mode="never_require") def known_func(arg1: str) -> str: nonlocal exec_counter exec_counter += 1 @@ -1963,7 +1965,7 @@ async def test_streaming_function_invocation_config_terminate_on_unknown_calls_t """Test that terminate_on_unknown_calls=True stops execution on unknown functions in streaming mode.""" exec_counter = 0 - @ai_function(name="known_function") + @tool(name="known_function", approval_mode="never_require") def known_func(arg1: str) -> str: nonlocal exec_counter exec_counter += 1 @@ -1996,7 +1998,7 @@ def known_func(arg1: str) -> str: async def test_streaming_function_invocation_config_include_detailed_errors_true(chat_client_base: ChatClientProtocol): """Test that include_detailed_errors=True returns detailed error information in streaming mode.""" - @ai_function(name="error_function") + @tool(name="error_function", approval_mode="never_require") def error_func(arg1: str) -> str: raise ValueError("Specific error message that should appear") @@ -2036,7 +2038,7 @@ async def test_streaming_function_invocation_config_include_detailed_errors_fals ): """Test that include_detailed_errors=False returns generic error messages in streaming mode.""" - @ai_function(name="error_function") + @tool(name="error_function", approval_mode="never_require") def error_func(arg1: str) -> str: raise ValueError("Specific error message that should not appear") @@ -2074,7 +2076,7 @@ def error_func(arg1: str) -> str: async def test_streaming_argument_validation_error_with_detailed_errors(chat_client_base: ChatClientProtocol): """Test that argument validation errors include details when include_detailed_errors=True in streaming mode.""" - @ai_function(name="typed_function") + @tool(name="typed_function", approval_mode="never_require") def typed_func(arg1: int) -> str: # Expects int, not str return f"Got {arg1}" @@ -2112,7 +2114,7 @@ def typed_func(arg1: int) -> str: # Expects int, not str async def test_streaming_argument_validation_error_without_detailed_errors(chat_client_base: ChatClientProtocol): """Test that argument validation errors are generic when include_detailed_errors=False in streaming mode.""" - @ai_function(name="typed_function") + @tool(name="typed_function", approval_mode="never_require") def typed_func(arg1: int) -> str: # Expects int, not str return f"Got {arg1}" @@ -2149,18 +2151,17 @@ def typed_func(arg1: int) -> str: # Expects int, not str async def test_streaming_multiple_function_calls_parallel_execution(chat_client_base: ChatClientProtocol): """Test that multiple function calls are executed in parallel in streaming mode.""" - import asyncio exec_order = [] - @ai_function(name="func1") + @tool(name="func1", approval_mode="never_require") async def func1(arg1: str) -> str: exec_order.append("func1_start") await asyncio.sleep(0.01) # Small delay exec_order.append("func1_end") return f"Result1 {arg1}" - @ai_function(name="func2") + @tool(name="func2", approval_mode="never_require") async def func2(arg1: str) -> str: exec_order.append("func2_start") await asyncio.sleep(0.01) # Small delay @@ -2202,7 +2203,7 @@ async def test_streaming_approval_requests_in_assistant_message(chat_client_base """Approval requests should be added to assistant updates in streaming mode.""" exec_counter = 0 - @ai_function(name="test_func", approval_mode="always_require") + @tool(name="test_func", approval_mode="always_require") def func_with_approval(arg1: str) -> str: nonlocal exec_counter exec_counter += 1 @@ -2238,7 +2239,7 @@ async def test_streaming_error_recovery_resets_counter(chat_client_base: ChatCli call_count = 0 - @ai_function(name="sometimes_fails") + @tool(name="sometimes_fails", approval_mode="never_require") def sometimes_fails(arg1: str) -> str: nonlocal call_count call_count += 1 @@ -2306,7 +2307,7 @@ async def test_terminate_loop_single_function_call(chat_client_base: ChatClientP """Test that terminate_loop=True exits the function calling loop after single function call.""" exec_counter = 0 - @ai_function(name="test_function") + @tool(name="test_function", approval_mode="never_require") def ai_func(arg1: str) -> str: nonlocal exec_counter exec_counter += 1 @@ -2367,13 +2368,13 @@ async def test_terminate_loop_multiple_function_calls_one_terminates(chat_client normal_call_count = 0 terminating_call_count = 0 - @ai_function(name="normal_function") + @tool(name="normal_function", approval_mode="never_require") def normal_func(arg1: str) -> str: nonlocal normal_call_count normal_call_count += 1 return f"Normal {arg1}" - @ai_function(name="terminating_function") + @tool(name="terminating_function", approval_mode="never_require") def terminating_func(arg1: str) -> str: nonlocal terminating_call_count terminating_call_count += 1 @@ -2423,7 +2424,7 @@ async def test_terminate_loop_streaming_single_function_call(chat_client_base: C """Test that terminate_loop=True exits the streaming function calling loop.""" exec_counter = 0 - @ai_function(name="test_function") + @tool(name="test_function", approval_mode="never_require") def ai_func(arg1: str) -> str: nonlocal exec_counter exec_counter += 1 diff --git a/python/packages/core/tests/core/test_kwargs_propagation_to_ai_function.py b/python/packages/core/tests/core/test_kwargs_propagation_to_ai_function.py index 1a206d9646..34798a4a16 100644 --- a/python/packages/core/tests/core/test_kwargs_propagation_to_ai_function.py +++ b/python/packages/core/tests/core/test_kwargs_propagation_to_ai_function.py @@ -1,6 +1,6 @@ # Copyright (c) Microsoft. All rights reserved. -"""Tests for kwargs propagation from get_response() to @ai_function tools.""" +"""Tests for kwargs propagation from get_response() to @tool functions.""" from typing import Any @@ -9,19 +9,19 @@ ChatResponse, ChatResponseUpdate, Content, - ai_function, + tool, ) from agent_framework._tools import _handle_function_calls_response, _handle_function_calls_streaming_response -class TestKwargsPropagationToAIFunction: - """Test cases for kwargs flowing from get_response() to @ai_function tools.""" +class TestKwargsPropagationToFunctionTool: + """Test cases for kwargs flowing from get_response() to @tool functions.""" - async def test_kwargs_propagate_to_ai_function_with_kwargs(self) -> None: - """Test that kwargs passed to get_response() are available in @ai_function **kwargs.""" + async def test_kwargs_propagate_to_tool_with_kwargs(self) -> None: + """Test that kwargs passed to get_response() are available in @tool **kwargs.""" captured_kwargs: dict[str, Any] = {} - @ai_function + @tool(approval_mode="never_require") def capture_kwargs_tool(x: int, **kwargs: Any) -> str: """A tool that captures kwargs for testing.""" captured_kwargs.update(kwargs) @@ -75,10 +75,10 @@ async def mock_get_response(self, messages, **kwargs): # Verify result assert result.messages[-1].text == "Done!" - async def test_kwargs_not_forwarded_to_ai_function_without_kwargs(self) -> None: - """Test that kwargs are NOT forwarded to @ai_function that doesn't accept **kwargs.""" + async def test_kwargs_not_forwarded_to_tool_without_kwargs(self) -> None: + """Test that kwargs are NOT forwarded to @tool that doesn't accept **kwargs.""" - @ai_function + @tool(approval_mode="never_require") def simple_tool(x: int) -> str: """A simple tool without **kwargs.""" # This should not receive any extra kwargs @@ -120,7 +120,7 @@ async def test_kwargs_isolated_between_function_calls(self) -> None: """Test that kwargs don't leak between different function call invocations.""" invocation_kwargs: list[dict[str, Any]] = [] - @ai_function + @tool(approval_mode="never_require") def tracking_tool(name: str, **kwargs: Any) -> str: """A tool that tracks kwargs from each invocation.""" invocation_kwargs.append(dict(kwargs)) @@ -170,10 +170,10 @@ async def mock_get_response(self, messages, **kwargs): assert result.messages[-1].text == "All done!" async def test_streaming_response_kwargs_propagation(self) -> None: - """Test that kwargs propagate to @ai_function in streaming mode.""" + """Test that kwargs propagate to @tool in streaming mode.""" captured_kwargs: dict[str, Any] = {} - @ai_function + @tool(approval_mode="never_require") def streaming_capture_tool(value: str, **kwargs: Any) -> str: """A tool that captures kwargs during streaming.""" captured_kwargs.update(kwargs) diff --git a/python/packages/core/tests/core/test_mcp.py b/python/packages/core/tests/core/test_mcp.py index 28f9286294..67bf94acaf 100644 --- a/python/packages/core/tests/core/test_mcp.py +++ b/python/packages/core/tests/core/test_mcp.py @@ -1228,7 +1228,7 @@ async def test_streamable_http_integration(): if not url.startswith("http"): pytest.skip("LOCAL_MCP_URL is not an HTTP URL") - tool = MCPStreamableHTTPTool(name="integration_test", url=url) + tool = MCPStreamableHTTPTool(name="integration_test", url=url, approval_mode="never_require") async with tool: # Test that we can connect and load tools @@ -1260,7 +1260,7 @@ async def test_mcp_connection_reset_integration(): """ url = os.environ.get("LOCAL_MCP_URL") - tool = MCPStreamableHTTPTool(name="integration_test", url=url) + tool = MCPStreamableHTTPTool(name="integration_test", url=url, approval_mode="never_require") async with tool: # Verify initial connection @@ -1620,7 +1620,7 @@ def test_mcp_websocket_tool_get_mcp_client_with_kwargs(): async def test_mcp_tool_deduplication(): """Test that MCP tools are not duplicated in MCPTool""" from agent_framework._mcp import MCPTool - from agent_framework._tools import AIFunction + from agent_framework._tools import FunctionTool # Create MCPStreamableHTTPTool instance tool = MCPTool(name="test_mcp_tool") @@ -1629,12 +1629,12 @@ async def test_mcp_tool_deduplication(): tool._functions = [] # Add initial functions - func1 = AIFunction( + func1 = FunctionTool( func=lambda x: f"Result: {x}", name="analyze_content", description="Analyzes content", ) - func2 = AIFunction( + func2 = FunctionTool( func=lambda x: f"Extract: {x}", name="extract_info", description="Extracts information", @@ -1662,7 +1662,7 @@ async def test_mcp_tool_deduplication(): if tool_name in existing_names: continue # Skip duplicates - new_func = AIFunction(func=lambda x: f"Process: {x}", name=tool_name, description=description) + new_func = FunctionTool(func=lambda x: f"Process: {x}", name=tool_name, description=description) tool._functions.append(new_func) existing_names.add(tool_name) added_count += 1 diff --git a/python/packages/core/tests/core/test_middleware.py b/python/packages/core/tests/core/test_middleware.py index 441896f92b..a62cca2c76 100644 --- a/python/packages/core/tests/core/test_middleware.py +++ b/python/packages/core/tests/core/test_middleware.py @@ -28,7 +28,7 @@ FunctionMiddleware, FunctionMiddlewarePipeline, ) -from agent_framework._tools import AIFunction +from agent_framework._tools import FunctionTool class TestAgentRunContext: @@ -73,7 +73,7 @@ def test_init_with_thread(self, mock_agent: AgentProtocol) -> None: class TestFunctionInvocationContext: """Test cases for FunctionInvocationContext.""" - def test_init_with_defaults(self, mock_function: AIFunction[Any, Any]) -> None: + def test_init_with_defaults(self, mock_function: FunctionTool[Any, Any]) -> None: """Test FunctionInvocationContext initialization with default values.""" arguments = FunctionTestArgs(name="test") context = FunctionInvocationContext(function=mock_function, arguments=arguments) @@ -82,7 +82,7 @@ def test_init_with_defaults(self, mock_function: AIFunction[Any, Any]) -> None: assert context.arguments == arguments assert context.metadata == {} - def test_init_with_custom_metadata(self, mock_function: AIFunction[Any, Any]) -> None: + def test_init_with_custom_metadata(self, mock_function: FunctionTool[Any, Any]) -> None: """Test FunctionInvocationContext initialization with custom metadata.""" arguments = FunctionTestArgs(name="test") metadata = {"key": "value"} @@ -419,7 +419,7 @@ async def process(self, context: FunctionInvocationContext, next: Any) -> None: await next(context) context.terminate = True - async def test_execute_with_pre_next_termination(self, mock_function: AIFunction[Any, Any]) -> None: + async def test_execute_with_pre_next_termination(self, mock_function: FunctionTool[Any, Any]) -> None: """Test pipeline execution with termination before next().""" middleware = self.PreNextTerminateFunctionMiddleware() pipeline = FunctionMiddlewarePipeline([middleware]) @@ -438,7 +438,7 @@ async def final_handler(ctx: FunctionInvocationContext) -> str: # Handler should not be called when terminated before next() assert execution_order == [] - async def test_execute_with_post_next_termination(self, mock_function: AIFunction[Any, Any]) -> None: + async def test_execute_with_post_next_termination(self, mock_function: FunctionTool[Any, Any]) -> None: """Test pipeline execution with termination after next().""" middleware = self.PostNextTerminateFunctionMiddleware() pipeline = FunctionMiddlewarePipeline([middleware]) @@ -477,7 +477,7 @@ async def test_middleware( pipeline = FunctionMiddlewarePipeline([test_middleware]) assert pipeline.has_middlewares - async def test_execute_no_middleware(self, mock_function: AIFunction[Any, Any]) -> None: + async def test_execute_no_middleware(self, mock_function: FunctionTool[Any, Any]) -> None: """Test pipeline execution with no middleware.""" pipeline = FunctionMiddlewarePipeline() arguments = FunctionTestArgs(name="test") @@ -491,7 +491,7 @@ async def final_handler(ctx: FunctionInvocationContext) -> str: result = await pipeline.execute(mock_function, arguments, context, final_handler) assert result == expected_result - async def test_execute_with_middleware(self, mock_function: AIFunction[Any, Any]) -> None: + async def test_execute_with_middleware(self, mock_function: FunctionTool[Any, Any]) -> None: """Test pipeline execution with middleware.""" execution_order: list[str] = [] @@ -778,7 +778,7 @@ async def final_handler(ctx: AgentRunContext) -> AgentResponse: assert context.metadata["after"] is True assert metadata_updates == ["before", "handler", "after"] - async def test_function_middleware_execution(self, mock_function: AIFunction[Any, Any]) -> None: + async def test_function_middleware_execution(self, mock_function: FunctionTool[Any, Any]) -> None: """Test class-based function middleware execution.""" metadata_updates: list[str] = [] @@ -840,7 +840,7 @@ async def final_handler(ctx: AgentRunContext) -> AgentResponse: assert context.metadata["function_middleware"] is True assert execution_order == ["function_before", "handler", "function_after"] - async def test_function_function_middleware(self, mock_function: AIFunction[Any, Any]) -> None: + async def test_function_function_middleware(self, mock_function: FunctionTool[Any, Any]) -> None: """Test function-based function middleware.""" execution_order: list[str] = [] @@ -902,7 +902,7 @@ async def final_handler(ctx: AgentRunContext) -> AgentResponse: assert result is not None assert execution_order == ["class_before", "function_before", "handler", "function_after", "class_after"] - async def test_mixed_function_middleware(self, mock_function: AIFunction[Any, Any]) -> None: + async def test_mixed_function_middleware(self, mock_function: FunctionTool[Any, Any]) -> None: """Test mixed class and function-based function middleware.""" execution_order: list[str] = [] @@ -1022,7 +1022,7 @@ async def final_handler(ctx: AgentRunContext) -> AgentResponse: ] assert execution_order == expected_order - async def test_function_middleware_execution_order(self, mock_function: AIFunction[Any, Any]) -> None: + async def test_function_middleware_execution_order(self, mock_function: FunctionTool[Any, Any]) -> None: """Test that multiple function middleware execute in registration order.""" execution_order: list[str] = [] @@ -1150,7 +1150,7 @@ async def final_handler(ctx: AgentRunContext) -> AgentResponse: result = await pipeline.execute(mock_agent, messages, context, final_handler) assert result is not None - async def test_function_context_validation(self, mock_function: AIFunction[Any, Any]) -> None: + async def test_function_context_validation(self, mock_function: FunctionTool[Any, Any]) -> None: """Test that function context contains expected data.""" class ContextValidationMiddleware(FunctionMiddleware): @@ -1498,7 +1498,7 @@ async def final_handler(ctx: AgentRunContext) -> AsyncIterable[AgentResponseUpda assert not handler_called assert context.result is None - async def test_function_middleware_no_next_no_execution(self, mock_function: AIFunction[Any, Any]) -> None: + async def test_function_middleware_no_next_no_execution(self, mock_function: FunctionTool[Any, Any]) -> None: """Test that when function middleware doesn't call next(), no execution happens.""" class FunctionTestArgs(BaseModel): @@ -1672,9 +1672,9 @@ def mock_agent() -> AgentProtocol: @pytest.fixture -def mock_function() -> AIFunction[Any, Any]: +def mock_function() -> FunctionTool[Any, Any]: """Mock function for testing.""" - function = MagicMock(spec=AIFunction[Any, Any]) + function = MagicMock(spec=FunctionTool[Any, Any]) function.name = "test_function" return function diff --git a/python/packages/core/tests/core/test_middleware_context_result.py b/python/packages/core/tests/core/test_middleware_context_result.py index f939a0f409..0f3b506fab 100644 --- a/python/packages/core/tests/core/test_middleware_context_result.py +++ b/python/packages/core/tests/core/test_middleware_context_result.py @@ -24,7 +24,7 @@ FunctionMiddleware, FunctionMiddlewarePipeline, ) -from agent_framework._tools import AIFunction +from agent_framework._tools import FunctionTool from .conftest import MockChatClient @@ -103,7 +103,7 @@ async def final_handler(ctx: AgentRunContext) -> AsyncIterable[AgentResponseUpda assert updates[0].text == "overridden" assert updates[1].text == " stream" - async def test_function_middleware_result_override(self, mock_function: AIFunction[Any, Any]) -> None: + async def test_function_middleware_result_override(self, mock_function: FunctionTool[Any, Any]) -> None: """Test that function middleware can override result.""" override_result = "overridden function result" @@ -260,7 +260,7 @@ async def final_handler(ctx: AgentRunContext) -> AgentResponse: assert execute_result.messages[0].text == "executed response" assert handler_called - async def test_function_middleware_conditional_no_next(self, mock_function: AIFunction[Any, Any]) -> None: + async def test_function_middleware_conditional_no_next(self, mock_function: FunctionTool[Any, Any]) -> None: """Test that when function middleware conditionally doesn't call next(), no execution happens.""" class ConditionalNoNextFunctionMiddleware(FunctionMiddleware): @@ -345,7 +345,7 @@ async def final_handler(ctx: AgentRunContext) -> AgentResponse: assert observed_responses[0].messages[0].text == "executed response" assert result == observed_responses[0] - async def test_function_middleware_result_observability(self, mock_function: AIFunction[Any, Any]) -> None: + async def test_function_middleware_result_observability(self, mock_function: FunctionTool[Any, Any]) -> None: """Test that middleware can observe function result after execution.""" observed_results: list[str] = [] @@ -414,7 +414,7 @@ async def final_handler(ctx: AgentRunContext) -> AgentResponse: assert result is not None assert result.messages[0].text == "modified after execution" - async def test_function_middleware_post_execution_override(self, mock_function: AIFunction[Any, Any]) -> None: + async def test_function_middleware_post_execution_override(self, mock_function: FunctionTool[Any, Any]) -> None: """Test that middleware can override function result after observing execution.""" class PostExecutionOverrideMiddleware(FunctionMiddleware): @@ -456,8 +456,8 @@ def mock_agent() -> AgentProtocol: @pytest.fixture -def mock_function() -> AIFunction[Any, Any]: +def mock_function() -> FunctionTool[Any, Any]: """Mock function for testing.""" - function = MagicMock(spec=AIFunction[Any, Any]) + function = MagicMock(spec=FunctionTool[Any, Any]) function.name = "test_function" return function diff --git a/python/packages/core/tests/core/test_middleware_with_agent.py b/python/packages/core/tests/core/test_middleware_with_agent.py index 445f13596a..a9f410b609 100644 --- a/python/packages/core/tests/core/test_middleware_with_agent.py +++ b/python/packages/core/tests/core/test_middleware_with_agent.py @@ -14,6 +14,7 @@ ChatResponse, ChatResponseUpdate, Content, + FunctionTool, Role, agent_middleware, chat_middleware, @@ -214,9 +215,13 @@ def test_function(text: str) -> str: execution_order.append("function_called") return "test_result" + test_function_tool = FunctionTool( + func=test_function, name="test_function", description="Test function", approval_mode="never_require" + ) + # Create ChatAgent with function middleware and test function middleware = PreTerminationFunctionMiddleware() - agent = ChatAgent(chat_client=chat_client, middleware=[middleware], tools=[test_function]) + agent = ChatAgent(chat_client=chat_client, middleware=[middleware], tools=[test_function_tool]) # Execute the agent await agent.run(messages) @@ -271,9 +276,13 @@ def test_function(text: str) -> str: execution_order.append("function_called") return "test_result" + test_function_tool = FunctionTool( + func=test_function, name="test_function", description="Test function", approval_mode="never_require" + ) + # Create ChatAgent with function middleware and test function middleware = PostTerminationFunctionMiddleware() - agent = ChatAgent(chat_client=chat_client, middleware=[middleware], tools=[test_function]) + agent = ChatAgent(chat_client=chat_client, middleware=[middleware], tools=[test_function_tool]) # Execute the agent response = await agent.run(messages) @@ -518,11 +527,19 @@ async def function_function_middleware( # region Tool Functions for Testing -def sample_tool_function(location: str) -> str: +def _sample_tool_function_impl(location: str) -> str: """A simple tool function for middleware testing.""" return f"Weather in {location}: sunny" +sample_tool_function = FunctionTool( + func=_sample_tool_function_impl, + name="sample_tool_function", + description="A simple tool function for middleware testing.", + approval_mode="never_require", +) + + # region ChatAgent Function Middleware Tests with Tools @@ -1157,6 +1174,10 @@ def custom_tool(message: str) -> str: execution_log.append("tool_executed") return f"Tool response: {message}" + custom_tool_wrapped = FunctionTool( + func=custom_tool, name="custom_tool", description="Custom tool", approval_mode="never_require" + ) + # Set up mock to return a function call first, then a regular response function_call_response = ChatResponse( messages=[ @@ -1179,7 +1200,7 @@ def custom_tool(message: str) -> str: agent = ChatAgent( chat_client=chat_client, middleware=[AgentLevelAgentMiddleware(), AgentLevelFunctionMiddleware()], - tools=[custom_tool], + tools=[custom_tool_wrapped], ) # Execute with run-level middleware @@ -1246,6 +1267,10 @@ def custom_tool(message: str) -> str: execution_order.append("tool_executed") return f"Tool response: {message}" + custom_tool_wrapped = FunctionTool( + func=custom_tool, name="custom_tool", description="Custom tool", approval_mode="never_require" + ) + # Set up mock to return a function call first, then a regular response function_call_response = ChatResponse( messages=[ @@ -1268,7 +1293,7 @@ def custom_tool(message: str) -> str: agent = ChatAgent( chat_client=chat_client, middleware=[matching_agent_middleware, matching_function_middleware], - tools=[custom_tool], + tools=[custom_tool_wrapped], ) response = await agent.run([ChatMessage(role=Role.USER, text="test")]) @@ -1313,6 +1338,10 @@ def custom_tool(message: str) -> str: execution_order.append("tool_executed") return f"Tool response: {message}" + custom_tool_wrapped = FunctionTool( + func=custom_tool, name="custom_tool", description="Custom tool", approval_mode="never_require" + ) + # Set up mock to return a function call first, then a regular response function_call_response = ChatResponse( messages=[ @@ -1333,7 +1362,9 @@ def custom_tool(message: str) -> str: # Should work - relies on decorator agent = ChatAgent( - chat_client=chat_client, middleware=[decorator_only_agent, decorator_only_function], tools=[custom_tool] + chat_client=chat_client, + middleware=[decorator_only_agent, decorator_only_function], + tools=[custom_tool_wrapped], ) response = await agent.run([ChatMessage(role=Role.USER, text="test")]) @@ -1363,6 +1394,10 @@ def custom_tool(message: str) -> str: execution_order.append("tool_executed") return f"Tool response: {message}" + custom_tool_wrapped = FunctionTool( + func=custom_tool, name="custom_tool", description="Custom tool", approval_mode="never_require" + ) + # Set up mock to return a function call first, then a regular response function_call_response = ChatResponse( messages=[ @@ -1383,7 +1418,7 @@ def custom_tool(message: str) -> str: # Should work - relies on type annotations agent = ChatAgent( - chat_client=chat_client, middleware=[type_only_agent, type_only_function], tools=[custom_tool] + chat_client=chat_client, middleware=[type_only_agent, type_only_function], tools=[custom_tool_wrapped] ) response = await agent.run([ChatMessage(role=Role.USER, text="test")]) diff --git a/python/packages/core/tests/core/test_middleware_with_chat.py b/python/packages/core/tests/core/test_middleware_with_chat.py index a24d0e8037..ef2f6f3c09 100644 --- a/python/packages/core/tests/core/test_middleware_with_chat.py +++ b/python/packages/core/tests/core/test_middleware_with_chat.py @@ -11,6 +11,7 @@ ChatResponse, Content, FunctionInvocationContext, + FunctionTool, Role, chat_middleware, function_middleware, @@ -337,6 +338,13 @@ def sample_tool(location: str) -> str: """Get weather for a location.""" return f"Weather in {location}: sunny" + sample_tool_wrapped = FunctionTool( + func=sample_tool, + name="sample_tool", + description="Get weather for a location", + approval_mode="never_require", + ) + # Create function-invocation enabled chat client chat_client = use_chat_middleware(use_function_invocation(MockBaseChatClient))() @@ -366,7 +374,7 @@ def sample_tool(location: str) -> str: # Execute the chat client directly with tools - this should trigger function invocation and middleware messages = [ChatMessage(role=Role.USER, text="What's the weather in San Francisco?")] - response = await chat_client.get_response(messages, options={"tools": [sample_tool]}) + response = await chat_client.get_response(messages, options={"tools": [sample_tool_wrapped]}) # Verify response assert response is not None @@ -396,6 +404,13 @@ def sample_tool(location: str) -> str: """Get weather for a location.""" return f"Weather in {location}: sunny" + sample_tool_wrapped = FunctionTool( + func=sample_tool, + name="sample_tool", + description="Get weather for a location", + approval_mode="never_require", + ) + # Create function-invocation enabled chat client chat_client = use_function_invocation(MockBaseChatClient)() @@ -423,7 +438,7 @@ def sample_tool(location: str) -> str: # Execute the chat client directly with run-level middleware and tools messages = [ChatMessage(role=Role.USER, text="What's the weather in New York?")] response = await chat_client.get_response( - messages, options={"tools": [sample_tool]}, middleware=[run_level_function_middleware] + messages, options={"tools": [sample_tool_wrapped]}, middleware=[run_level_function_middleware] ) # Verify response diff --git a/python/packages/core/tests/core/test_observability.py b/python/packages/core/tests/core/test_observability.py index 88245cfa52..18609d78f6 100644 --- a/python/packages/core/tests/core/test_observability.py +++ b/python/packages/core/tests/core/test_observability.py @@ -21,8 +21,8 @@ ChatResponseUpdate, Role, UsageDetails, - ai_function, prepend_agent_framework_to_user_agent, + tool, ) from agent_framework.exceptions import AgentInitializationError, ChatClientInitializationError from agent_framework.observability import ( @@ -606,7 +606,7 @@ async def test_function_call_with_error_handling(span_exporter: InMemorySpanExpo """Test that function call errors are properly captured in telemetry.""" # Create a function that raises an error using the decorator - @ai_function(name="failing_function", description="A function that fails") + @tool(name="failing_function", description="A function that fails") async def failing_function(param: str) -> str: raise ValueError("Function execution failed") diff --git a/python/packages/core/tests/core/test_tools.py b/python/packages/core/tests/core/test_tools.py index ee6103a613..0f380ff06f 100644 --- a/python/packages/core/tests/core/test_tools.py +++ b/python/packages/core/tests/core/test_tools.py @@ -8,13 +8,13 @@ from pydantic import BaseModel, ValidationError from agent_framework import ( - AIFunction, Content, + FunctionTool, HostedCodeInterpreterTool, HostedImageGenerationTool, HostedMCPTool, ToolProtocol, - ai_function, + tool, ) from agent_framework._tools import ( _build_pydantic_model_from_json_schema, @@ -24,19 +24,19 @@ from agent_framework.exceptions import ToolException from agent_framework.observability import OtelAttr -# region AIFunction and ai_function decorator tests +# region FunctionTool and tool decorator tests -def test_ai_function_decorator(): - """Test the ai_function decorator.""" +def test_tool_decorator(): + """Test the tool decorator.""" - @ai_function(name="test_tool", description="A test tool") + @tool(name="test_tool", description="A test tool") def test_tool(x: int, y: int) -> int: """A simple function that adds two numbers.""" return x + y assert isinstance(test_tool, ToolProtocol) - assert isinstance(test_tool, AIFunction) + assert isinstance(test_tool, FunctionTool) assert test_tool.name == "test_tool" assert test_tool.description == "A test tool" assert test_tool.parameters() == { @@ -48,16 +48,16 @@ def test_tool(x: int, y: int) -> int: assert test_tool(1, 2) == 3 -def test_ai_function_decorator_without_args(): - """Test the ai_function decorator.""" +def test_tool_decorator_without_args(): + """Test the tool decorator.""" - @ai_function + @tool def test_tool(x: int, y: int) -> int: """A simple function that adds two numbers.""" return x + y assert isinstance(test_tool, ToolProtocol) - assert isinstance(test_tool, AIFunction) + assert isinstance(test_tool, FunctionTool) assert test_tool.name == "test_tool" assert test_tool.description == "A simple function that adds two numbers." assert test_tool.parameters() == { @@ -67,18 +67,19 @@ def test_tool(x: int, y: int) -> int: "type": "object", } assert test_tool(1, 2) == 3 + assert test_tool.approval_mode == "never_require" -def test_ai_function_without_args(): - """Test the ai_function decorator.""" +def test_tool_without_args(): + """Test the tool decorator.""" - @ai_function + @tool def test_tool() -> int: """A simple function that adds two numbers.""" return 1 + 2 assert isinstance(test_tool, ToolProtocol) - assert isinstance(test_tool, AIFunction) + assert isinstance(test_tool, FunctionTool) assert test_tool.name == "test_tool" assert test_tool.description == "A simple function that adds two numbers." assert test_tool.parameters() == { @@ -89,16 +90,16 @@ def test_tool() -> int: assert test_tool() == 3 -async def test_ai_function_decorator_with_async(): - """Test the ai_function decorator with an async function.""" +async def test_tool_decorator_with_async(): + """Test the tool decorator with an async function.""" - @ai_function(name="async_test_tool", description="An async test tool") + @tool(name="async_test_tool", description="An async test tool") async def async_test_tool(x: int, y: int) -> int: """An async function that adds two numbers.""" return x + y assert isinstance(async_test_tool, ToolProtocol) - assert isinstance(async_test_tool, AIFunction) + assert isinstance(async_test_tool, FunctionTool) assert async_test_tool.name == "async_test_tool" assert async_test_tool.description == "An async test tool" assert async_test_tool.parameters() == { @@ -110,11 +111,11 @@ async def async_test_tool(x: int, y: int) -> int: assert (await async_test_tool(1, 2)) == 3 -def test_ai_function_decorator_in_class(): - """Test the ai_function decorator.""" +def test_tool_decorator_in_class(): + """Test the tool decorator.""" class my_tools: - @ai_function(name="test_tool", description="A test tool") + @tool(name="test_tool", description="A test tool") def test_tool(self, x: int, y: int) -> int: """A simple function that adds two numbers.""" return x + y @@ -122,7 +123,7 @@ def test_tool(self, x: int, y: int) -> int: test_tool = my_tools().test_tool assert isinstance(test_tool, ToolProtocol) - assert isinstance(test_tool, AIFunction) + assert isinstance(test_tool, FunctionTool) assert test_tool.name == "test_tool" assert test_tool.description == "A test tool" assert test_tool.parameters() == { @@ -134,15 +135,15 @@ def test_tool(self, x: int, y: int) -> int: assert test_tool(1, 2) == 3 -def test_ai_function_with_literal_type_parameter(): - """Test ai_function decorator with Literal type parameter (issue #2891).""" +def test_tool_with_literal_type_parameter(): + """Test tool decorator with Literal type parameter (issue #2891).""" - @ai_function + @tool def search_flows(category: Literal["Data", "Security", "Network"], issue: str) -> str: """Search flows by category.""" return f"{category}: {issue}" - assert isinstance(search_flows, AIFunction) + assert isinstance(search_flows, FunctionTool) schema = search_flows.parameters() assert schema == { "properties": { @@ -157,18 +158,18 @@ def search_flows(category: Literal["Data", "Security", "Network"], issue: str) - assert search_flows("Data", "test issue") == "Data: test issue" -def test_ai_function_with_literal_type_in_class_method(): - """Test ai_function decorator with Literal type parameter in a class method (issue #2891).""" +def test_tool_with_literal_type_in_class_method(): + """Test tool decorator with Literal type parameter in a class method (issue #2891).""" class MyTools: - @ai_function + @tool def search_flows(self, category: Literal["Data", "Security", "Network"], issue: str) -> str: """Search flows by category.""" return f"{category}: {issue}" tools = MyTools() search_tool = tools.search_flows - assert isinstance(search_tool, AIFunction) + assert isinstance(search_tool, FunctionTool) schema = search_tool.parameters() assert schema == { "properties": { @@ -183,15 +184,15 @@ def search_flows(self, category: Literal["Data", "Security", "Network"], issue: assert search_tool("Security", "test issue") == "Security: test issue" -def test_ai_function_with_literal_int_type(): - """Test ai_function decorator with Literal int type parameter.""" +def test_tool_with_literal_int_type(): + """Test tool decorator with Literal int type parameter.""" - @ai_function + @tool def set_priority(priority: Literal[1, 2, 3], task: str) -> str: """Set priority for a task.""" return f"Priority {priority}: {task}" - assert isinstance(set_priority, AIFunction) + assert isinstance(set_priority, FunctionTool) schema = set_priority.parameters() assert schema == { "properties": { @@ -205,10 +206,10 @@ def set_priority(priority: Literal[1, 2, 3], task: str) -> str: assert set_priority(1, "important task") == "Priority 1: important task" -def test_ai_function_with_literal_and_annotated(): - """Test ai_function decorator with Literal type combined with Annotated for description.""" +def test_tool_with_literal_and_annotated(): + """Test tool decorator with Literal type combined with Annotated for description.""" - @ai_function + @tool def categorize( category: Annotated[Literal["A", "B", "C"], "The category to assign"], name: str, @@ -216,14 +217,14 @@ def categorize( """Categorize an item.""" return f"{category}: {name}" - assert isinstance(categorize, AIFunction) + assert isinstance(categorize, FunctionTool) schema = categorize.parameters() # Literal type inside Annotated should preserve enum values assert schema["properties"]["category"]["enum"] == ["A", "B", "C"] assert categorize("A", "test") == "A: test" -async def test_ai_function_decorator_shared_state(): +async def test_tool_decorator_shared_state(): """Test that decorated methods maintain shared state across multiple calls and tool usage.""" class StatefulCounter: @@ -233,20 +234,20 @@ def __init__(self, initial_value: int = 0): self.counter = initial_value self.operation_log: list[str] = [] - @ai_function(name="increment", description="Increment the counter") + @tool(name="increment", description="Increment the counter") def increment(self, amount: int) -> str: """Increment the counter by the given amount.""" self.counter += amount self.operation_log.append(f"increment({amount})") return f"Counter incremented by {amount}. New value: {self.counter}" - @ai_function(name="get_value", description="Get the current counter value") + @tool(name="get_value", description="Get the current counter value") def get_value(self) -> str: """Get the current counter value.""" self.operation_log.append("get_value()") return f"Current counter value: {self.counter}" - @ai_function(name="multiply", description="Multiply the counter") + @tool(name="multiply", description="Multiply the counter") def multiply(self, factor: int) -> str: """Multiply the counter by the given factor.""" self.counter *= factor @@ -261,10 +262,10 @@ def multiply(self, factor: int) -> str: get_value_tool = counter_instance.get_value multiply_tool = counter_instance.multiply - # Verify they are AIFunction instances - assert isinstance(increment_tool, AIFunction) - assert isinstance(get_value_tool, AIFunction) - assert isinstance(multiply_tool, AIFunction) + # Verify they are FunctionTool instances + assert isinstance(increment_tool, FunctionTool) + assert isinstance(get_value_tool, FunctionTool) + assert isinstance(multiply_tool, FunctionTool) # Tool 1 (increment) is used result1 = increment_tool(5) @@ -329,10 +330,10 @@ def multiply(self, factor: int) -> str: assert counter_instance.counter == 60 -async def test_ai_function_invoke_telemetry_enabled(span_exporter: InMemorySpanExporter): - """Test the ai_function invoke method with telemetry enabled.""" +async def test_tool_invoke_telemetry_enabled(span_exporter: InMemorySpanExporter): + """Test the tool invoke method with telemetry enabled.""" - @ai_function( + @tool( name="telemetry_test_tool", description="A test tool for telemetry", ) @@ -373,10 +374,10 @@ def telemetry_test_tool(x: int, y: int) -> int: @pytest.mark.parametrize("enable_sensitive_data", [False], indirect=True) -async def test_ai_function_invoke_telemetry_sensitive_disabled(span_exporter: InMemorySpanExporter): - """Test the ai_function invoke method with telemetry enabled.""" +async def test_tool_invoke_telemetry_sensitive_disabled(span_exporter: InMemorySpanExporter): + """Test the tool invoke method with telemetry enabled.""" - @ai_function( + @tool( name="telemetry_test_tool", description="A test tool for telemetry", ) @@ -416,10 +417,10 @@ def telemetry_test_tool(x: int, y: int) -> int: assert attributes[OtelAttr.TOOL_CALL_ID] == "test_call_id" -async def test_ai_function_invoke_ignores_additional_kwargs() -> None: - """Ensure ai_function tools drop unknown kwargs when invoked with validated arguments.""" +async def test_tool_invoke_ignores_additional_kwargs() -> None: + """Ensure tools drop unknown kwargs when invoked with validated arguments.""" - @ai_function + @tool async def simple_tool(message: str) -> str: """Echo tool.""" return message.upper() @@ -436,10 +437,10 @@ async def simple_tool(message: str) -> str: assert result == "HELLO WORLD" -async def test_ai_function_invoke_telemetry_with_pydantic_args(span_exporter: InMemorySpanExporter): - """Test the ai_function invoke method with Pydantic model arguments.""" +async def test_tool_invoke_telemetry_with_pydantic_args(span_exporter: InMemorySpanExporter): + """Test the tool invoke method with Pydantic model arguments.""" - @ai_function( + @tool( name="pydantic_test_tool", description="A test tool with Pydantic args", ) @@ -470,10 +471,10 @@ def pydantic_test_tool(x: int, y: int) -> int: assert span.attributes[OtelAttr.TOOL_ARGUMENTS] == '{"x":5,"y":10}' -async def test_ai_function_invoke_telemetry_with_exception(span_exporter: InMemorySpanExporter): - """Test the ai_function invoke method with telemetry when an exception occurs.""" +async def test_tool_invoke_telemetry_with_exception(span_exporter: InMemorySpanExporter): + """Test the tool invoke method with telemetry when an exception occurs.""" - @ai_function( + @tool( name="exception_test_tool", description="A test tool that raises an exception", ) @@ -507,10 +508,10 @@ def exception_test_tool(x: int, y: int) -> int: assert attributes[OtelAttr.ERROR_TYPE] == ValueError.__name__ -async def test_ai_function_invoke_telemetry_async_function(span_exporter: InMemorySpanExporter): - """Test the ai_function invoke method with telemetry on async function.""" +async def test_tool_invoke_telemetry_async_function(span_exporter: InMemorySpanExporter): + """Test the tool invoke method with telemetry on async function.""" - @ai_function( + @tool( name="async_telemetry_test", description="An async test tool for telemetry", ) @@ -544,10 +545,10 @@ async def async_telemetry_test(x: int, y: int) -> int: assert attributes[OtelAttr.MEASUREMENT_FUNCTION_TAG_NAME] == "async_telemetry_test" -async def test_ai_function_invoke_invalid_pydantic_args(): - """Test the ai_function invoke method with invalid Pydantic model arguments.""" +async def test_tool_invoke_invalid_pydantic_args(): + """Test the tool invoke method with invalid Pydantic model arguments.""" - @ai_function(name="invalid_args_test", description="A test tool for invalid args") + @tool(name="invalid_args_test", description="A test tool for invalid args") def invalid_args_test(x: int, y: int) -> int: """A function for testing invalid Pydantic args.""" return x + y @@ -564,20 +565,18 @@ class WrongModel(BaseModel): await invalid_args_test.invoke(arguments=wrong_args) -def test_ai_function_serialization(): - """Test AIFunction serialization and deserialization.""" +def test_tool_serialization(): + """Test FunctionTool serialization and deserialization.""" def serialize_test(x: int, y: int) -> int: """A function for testing serialization.""" return x - y - serialize_test_ai_function = ai_function(name="serialize_test", description="A test tool for serialization")( - serialize_test - ) + serialize_test_tool = tool(name="serialize_test", description="A test tool for serialization")(serialize_test) # Serialize to dict - tool_dict = serialize_test_ai_function.to_dict() - assert tool_dict["type"] == "ai_function" + tool_dict = serialize_test_tool.to_dict() + assert tool_dict["type"] == "function_tool" assert tool_dict["name"] == "serialize_test" assert tool_dict["description"] == "A test tool for serialization" assert tool_dict["input_model"] == { @@ -588,21 +587,21 @@ def serialize_test(x: int, y: int) -> int: } # Deserialize from dict - restored_tool = AIFunction.from_dict(tool_dict, dependencies={"ai_function": {"func": serialize_test}}) - assert isinstance(restored_tool, AIFunction) + restored_tool = FunctionTool.from_dict(tool_dict, dependencies={"function_tool": {"func": serialize_test}}) + assert isinstance(restored_tool, FunctionTool) assert restored_tool.name == "serialize_test" assert restored_tool.description == "A test tool for serialization" - assert restored_tool.parameters() == serialize_test_ai_function.parameters() + assert restored_tool.parameters() == serialize_test_tool.parameters() assert restored_tool(10, 4) == 6 # Deserialize from dict with instance name - restored_tool_2 = AIFunction.from_dict( - tool_dict, dependencies={"ai_function": {"name:serialize_test": {"func": serialize_test}}} + restored_tool_2 = FunctionTool.from_dict( + tool_dict, dependencies={"function_tool": {"name:serialize_test": {"func": serialize_test}}} ) - assert isinstance(restored_tool_2, AIFunction) + assert isinstance(restored_tool_2, FunctionTool) assert restored_tool_2.name == "serialize_test" assert restored_tool_2.description == "A test tool for serialization" - assert restored_tool_2.parameters() == serialize_test_ai_function.parameters() + assert restored_tool_2.parameters() == serialize_test_tool.parameters() assert restored_tool_2(10, 4) == 6 @@ -979,13 +978,17 @@ async def get_streaming_response(self, messages, **kwargs): return MockChatClient() -@ai_function(name="no_approval_tool", description="Tool that doesn't require approval") +@tool( + name="no_approval_tool", + description="Tool that doesn't require approval", + approval_mode="never_require", +) def no_approval_tool(x: int) -> int: """A tool that doesn't require approval.""" return x * 2 -@ai_function( +@tool( name="requires_approval_tool", description="Tool that requires approval", approval_mode="always_require", @@ -1451,10 +1454,10 @@ async def mock_get_streaming_response(self, messages, **kwargs): assert all(c.type == "function_approval_request" for c in updates[2].contents) -async def test_ai_function_with_kwargs_injection(): - """Test that ai_function correctly handles kwargs injection and hides them from schema.""" +async def test_tool_with_kwargs_injection(): + """Test that tool correctly handles kwargs injection and hides them from schema.""" - @ai_function + @tool def tool_with_kwargs(x: int, **kwargs: Any) -> str: """A tool that accepts kwargs.""" user_id = kwargs.get("user_id", "unknown") @@ -1647,11 +1650,11 @@ def test_build_pydantic_model_from_json_schema_array_of_objects_issue(): # CRITICAL: Validate using the same methods that actual chat clients use # This is what would actually be sent to the LLM - # Create an AIFunction wrapper to access the client-facing APIs + # Create a FunctionTool wrapper to access the client-facing APIs def dummy_func(**kwargs): return kwargs - test_func = AIFunction( + test_func = FunctionTool( func=dummy_func, name="create_sales_order", description="Create a sales order", diff --git a/python/packages/core/tests/core/test_types.py b/python/packages/core/tests/core/test_types.py index 3e5317fdae..90cb912b3d 100644 --- a/python/packages/core/tests/core/test_types.py +++ b/python/packages/core/tests/core/test_types.py @@ -24,10 +24,10 @@ ToolMode, ToolProtocol, UsageDetails, - ai_function, detect_media_type_from_base64, merge_chat_options, prepare_function_call_results, + tool, ) from agent_framework.exceptions import ContentError @@ -51,10 +51,10 @@ def parameters(self) -> dict[str, Any]: @fixture -def ai_function_tool() -> ToolProtocol: +def tool_tool() -> ToolProtocol: """Returns a executable ToolProtocol.""" - @ai_function + @tool def simple_function(x: int, y: int) -> int: """A simple function that adds two numbers.""" return x + y @@ -1017,13 +1017,13 @@ def test_chat_options_tool_choice_validation(): validate_tool_mode({"mode": "auto", "required_function_name": "should_not_be_here"}) -def test_chat_options_merge(ai_function_tool, ai_tool) -> None: +def test_chat_options_merge(tool_tool, ai_tool) -> None: """Test merge_chat_options utility function.""" from agent_framework import merge_chat_options options1: ChatOptions = { "model_id": "gpt-4o", - "tools": [ai_function_tool], + "tools": [tool_tool], "logit_bias": {"x": 1}, "metadata": {"a": "b"}, } @@ -1034,7 +1034,7 @@ def test_chat_options_merge(ai_function_tool, ai_tool) -> None: options3 = merge_chat_options(options1, options2) assert options3.get("model_id") == "gpt-4.1" - assert options3.get("tools") == [ai_function_tool, ai_tool] # tools are combined + assert options3.get("tools") == [tool_tool, ai_tool] # tools are combined assert options3.get("logit_bias") == {"x": 1} # base value preserved assert options3.get("metadata") == {"a": "b"} # base value preserved diff --git a/python/packages/core/tests/openai/test_assistant_provider.py b/python/packages/core/tests/openai/test_assistant_provider.py index bb29691a07..90b077c941 100644 --- a/python/packages/core/tests/openai/test_assistant_provider.py +++ b/python/packages/core/tests/openai/test_assistant_provider.py @@ -8,7 +8,7 @@ from openai.types.beta.assistant import Assistant from pydantic import BaseModel, Field -from agent_framework import ChatAgent, HostedCodeInterpreterTool, HostedFileSearchTool, ai_function, normalize_tools +from agent_framework import ChatAgent, HostedCodeInterpreterTool, HostedFileSearchTool, normalize_tools, tool from agent_framework.exceptions import ServiceInitializationError from agent_framework.openai import OpenAIAssistantProvider from agent_framework.openai._shared import from_assistant_tools, to_assistant_tools @@ -244,11 +244,11 @@ async def test_create_agent_with_function_tools(self, mock_async_openai: MagicMo assert call_kwargs["tools"][0]["type"] == "function" assert call_kwargs["tools"][0]["function"]["name"] == "get_weather" - async def test_create_agent_with_ai_function(self, mock_async_openai: MagicMock) -> None: - """Test assistant creation with AIFunction.""" + async def test_create_agent_with_tool(self, mock_async_openai: MagicMock) -> None: + """Test assistant creation with FunctionTool.""" provider = OpenAIAssistantProvider(mock_async_openai) - @ai_function + @tool def my_function(x: int) -> int: """Double a number.""" return x * 2 @@ -537,10 +537,10 @@ def test_as_agent_hosted_tools_not_required(self, mock_async_openai: MagicMock) class TestToolConversion: """Tests for tool conversion utilities (shared functions).""" - def test_to_assistant_tools_ai_function(self) -> None: - """Test AIFunction conversion to API format.""" + def test_to_assistant_tools_tool(self) -> None: + """Test FunctionTool conversion to API format.""" - @ai_function + @tool def test_func(x: int) -> int: """Test function.""" return x @@ -555,7 +555,7 @@ def test_func(x: int) -> int: def test_to_assistant_tools_callable(self) -> None: """Test raw callable conversion via normalize_tools.""" - # normalize_tools converts callables to AIFunction + # normalize_tools converts callables to FunctionTool normalized = normalize_tools([get_weather]) api_tools = to_assistant_tools(normalized) @@ -666,12 +666,12 @@ def test_validate_hosted_tools_not_required(self, mock_async_openai: MagicMock) # Should not raise provider._validate_function_tools(assistant_tools, None) # type: ignore[reportPrivateUsage] - def test_validate_with_ai_function(self, mock_async_openai: MagicMock) -> None: - """Test validation with AIFunction.""" + def test_validate_with_tool(self, mock_async_openai: MagicMock) -> None: + """Test validation with FunctionTool.""" provider = OpenAIAssistantProvider(mock_async_openai) assistant_tools = [create_function_tool("get_weather")] - wrapped = ai_function(get_weather) + wrapped = tool(get_weather) # Should not raise provider._validate_function_tools(assistant_tools, [wrapped]) # type: ignore[reportPrivateUsage] @@ -789,6 +789,7 @@ async def test_create_agent_with_function_tools_integration(self) -> None: """Integration test with function tools.""" provider = OpenAIAssistantProvider() + @tool(approval_mode="never_require") def get_current_time() -> str: """Get the current time.""" from datetime import datetime diff --git a/python/packages/core/tests/openai/test_openai_assistants_client.py b/python/packages/core/tests/openai/test_openai_assistants_client.py index a35c554e1d..331dea2579 100644 --- a/python/packages/core/tests/openai/test_openai_assistants_client.py +++ b/python/packages/core/tests/openai/test_openai_assistants_client.py @@ -23,7 +23,7 @@ HostedCodeInterpreterTool, HostedFileSearchTool, Role, - ai_function, + tool, ) from agent_framework.exceptions import ServiceInitializationError from agent_framework.openai import OpenAIAssistantsClient @@ -709,13 +709,13 @@ def test_prepare_options_basic(mock_async_openai: MagicMock) -> None: assert tool_results is None -def test_prepare_options_with_ai_function_tool(mock_async_openai: MagicMock) -> None: - """Test _prepare_options with AIFunction tool.""" +def test_prepare_options_with_tool_tool(mock_async_openai: MagicMock) -> None: + """Test _prepare_options with a FunctionTool.""" chat_client = create_test_openai_assistants_client(mock_async_openai) # Create a simple function for testing and decorate it - @ai_function + @tool(approval_mode="never_require") def test_function(query: str) -> str: """A test function.""" return f"Result for {query}" @@ -998,6 +998,7 @@ def test_update_agent_name_and_description_none(mock_async_openai: MagicMock) -> assert chat_client.assistant_name is None +@tool(approval_mode="never_require") def get_weather( location: Annotated[str, Field(description="The location to get the weather for.")], ) -> str: diff --git a/python/packages/core/tests/openai/test_openai_chat_client.py b/python/packages/core/tests/openai/test_openai_chat_client.py index a4ffbde03d..44e9884471 100644 --- a/python/packages/core/tests/openai/test_openai_chat_client.py +++ b/python/packages/core/tests/openai/test_openai_chat_client.py @@ -19,8 +19,8 @@ Content, HostedWebSearchTool, ToolProtocol, - ai_function, prepare_function_call_results, + tool, ) from agent_framework.exceptions import ServiceInitializationError, ServiceResponseException from agent_framework.openai import OpenAIChatClient @@ -175,7 +175,7 @@ def test_unsupported_tool_handling(openai_unit_test_env: dict[str, str]) -> None """Test that unsupported tool types are handled correctly.""" client = OpenAIChatClient() - # Create a mock ToolProtocol that's not an AIFunction + # Create a mock ToolProtocol that's not a FunctionTool unsupported_tool = MagicMock(spec=ToolProtocol) unsupported_tool.__class__.__name__ = "UnsupportedAITool" @@ -189,7 +189,7 @@ def test_unsupported_tool_handling(openai_unit_test_env: dict[str, str]) -> None assert result["tools"] == [dict_tool] -@ai_function +@tool(approval_mode="never_require") def get_story_text() -> str: """Returns a story about Emily and David.""" return ( @@ -200,7 +200,7 @@ def get_story_text() -> str: ) -@ai_function +@tool(approval_mode="never_require") def get_weather(location: str) -> str: """Get the current weather for a location.""" return f"The weather in {location} is sunny and 72°F." diff --git a/python/packages/core/tests/openai/test_openai_responses_client.py b/python/packages/core/tests/openai/test_openai_responses_client.py index 834af317c8..a5bc8ac45e 100644 --- a/python/packages/core/tests/openai/test_openai_responses_client.py +++ b/python/packages/core/tests/openai/test_openai_responses_client.py @@ -40,7 +40,7 @@ HostedMCPTool, HostedWebSearchTool, Role, - ai_function, + tool, ) from agent_framework.exceptions import ( ServiceInitializationError, @@ -96,7 +96,7 @@ async def delete_vector_store(client: OpenAIResponsesClient, file_id: str, vecto await client.client.files.delete(file_id=file_id) -@ai_function +@tool(approval_mode="never_require") async def get_weather(location: Annotated[str, "The location as a city name"]) -> str: """Get the current weather in a given location.""" # Implementation of the tool to get weather @@ -642,7 +642,7 @@ def test_response_content_creation_with_function_call() -> None: assert function_call.arguments == '{"location": "Seattle"}' -def test_prepare_content_for_openai_function_approval_response() -> None: +def test_prepare_content_for_opentool_approval_response() -> None: """Test _prepare_content_for_openai with function approval response content.""" client = OpenAIResponsesClient(model_id="test-model", api_key="test-key") diff --git a/python/packages/core/tests/workflow/test_agent_executor_tool_calls.py b/python/packages/core/tests/workflow/test_agent_executor_tool_calls.py index ac861d34b2..874f73fa5b 100644 --- a/python/packages/core/tests/workflow/test_agent_executor_tool_calls.py +++ b/python/packages/core/tests/workflow/test_agent_executor_tool_calls.py @@ -25,8 +25,8 @@ WorkflowBuilder, WorkflowContext, WorkflowOutputEvent, - ai_function, executor, + tool, use_function_invocation, ) @@ -132,7 +132,7 @@ async def test_agent_executor_emits_tool_calls_in_streaming_mode() -> None: assert "sunny" in events[3].data.contents[0].text -@ai_function(approval_mode="always_require") +@tool(approval_mode="always_require") def mock_tool_requiring_approval(query: str) -> str: """Mock tool that requires approval before execution.""" return f"Executed tool with query: {query}" diff --git a/python/packages/core/tests/workflow/test_workflow_kwargs.py b/python/packages/core/tests/workflow/test_workflow_kwargs.py index 14ec9f43ec..16a384bea3 100644 --- a/python/packages/core/tests/workflow/test_workflow_kwargs.py +++ b/python/packages/core/tests/workflow/test_workflow_kwargs.py @@ -20,7 +20,7 @@ SequentialBuilder, WorkflowRunState, WorkflowStatusEvent, - ai_function, + tool, ) from agent_framework._workflows._const import WORKFLOW_RUN_KWARGS_KEY @@ -28,7 +28,7 @@ _received_kwargs: list[dict[str, Any]] = [] -@ai_function +@tool(approval_mode="never_require") def tool_with_kwargs( action: Annotated[str, "The action to perform"], **kwargs: Any, diff --git a/python/packages/declarative/agent_framework_declarative/_loader.py b/python/packages/declarative/agent_framework_declarative/_loader.py index 6181be5f62..c7c06d2f0a 100644 --- a/python/packages/declarative/agent_framework_declarative/_loader.py +++ b/python/packages/declarative/agent_framework_declarative/_loader.py @@ -6,7 +6,6 @@ import yaml from agent_framework import ( - AIFunction, ChatAgent, ChatClientProtocol, Content, @@ -17,6 +16,9 @@ HostedWebSearchTool, ToolProtocol, ) +from agent_framework import ( + FunctionTool as AFFunctionTool, +) from agent_framework._tools import _create_model_from_json_schema # type: ignore from agent_framework.exceptions import AgentFrameworkException from dotenv import load_dotenv @@ -719,7 +721,7 @@ def _parse_tool(self, tool_resource: Tool) -> ToolProtocol: for binding in tool_resource.bindings: if binding.name and (func := self.bindings.get(binding.name)): break - return AIFunction( # type: ignore + return AFFunctionTool( # type: ignore name=tool_resource.name, # type: ignore description=tool_resource.description, # type: ignore input_model=tool_resource.parameters.to_json_schema() if tool_resource.parameters else None, diff --git a/python/packages/github_copilot/agent_framework_github_copilot/_agent.py b/python/packages/github_copilot/agent_framework_github_copilot/_agent.py index d38e50fe6b..933cddcfb3 100644 --- a/python/packages/github_copilot/agent_framework_github_copilot/_agent.py +++ b/python/packages/github_copilot/agent_framework_github_copilot/_agent.py @@ -19,7 +19,7 @@ Role, normalize_messages, ) -from agent_framework._tools import AIFunction, ToolProtocol +from agent_framework._tools import FunctionTool, ToolProtocol from agent_framework._types import normalize_tools from agent_framework.exceptions import ServiceException, ServiceInitializationError from copilot import CopilotClient, CopilotSession @@ -417,8 +417,8 @@ def _prepare_tools( for tool in tools: if isinstance(tool, ToolProtocol): match tool: - case AIFunction(): - copilot_tools.append(self._ai_function_to_copilot_tool(tool)) # type: ignore + case FunctionTool(): + copilot_tools.append(self._tool_to_copilot_tool(tool)) # type: ignore case _: logger.debug(f"Unsupported tool type: {type(tool)}") elif isinstance(tool, CopilotTool): @@ -426,8 +426,8 @@ def _prepare_tools( return copilot_tools - def _ai_function_to_copilot_tool(self, ai_func: AIFunction[Any, Any]) -> CopilotTool: - """Convert an AIFunction to a Copilot SDK tool.""" + def _tool_to_copilot_tool(self, ai_func: FunctionTool[Any, Any]) -> CopilotTool: + """Convert an FunctionTool to a Copilot SDK tool.""" async def handler(invocation: ToolInvocation) -> ToolResult: args = invocation.get("arguments", {}) diff --git a/python/packages/github_copilot/tests/test_github_copilot_agent.py b/python/packages/github_copilot/tests/test_github_copilot_agent.py index 22df440482..e0c8bc39c9 100644 --- a/python/packages/github_copilot/tests/test_github_copilot_agent.py +++ b/python/packages/github_copilot/tests/test_github_copilot_agent.py @@ -7,7 +7,14 @@ from uuid import uuid4 import pytest -from agent_framework import AgentResponse, AgentResponseUpdate, AgentThread, ChatMessage, Content, Role +from agent_framework import ( + AgentResponse, + AgentResponseUpdate, + AgentThread, + ChatMessage, + Content, + Role, +) from agent_framework.exceptions import ServiceException from copilot.generated.session_events import Data, SessionEvent, SessionEventType @@ -733,10 +740,10 @@ def test_mixed_tools_conversion( mock_client: MagicMock, ) -> None: """Test that mixed tool types are handled correctly.""" - from agent_framework._tools import ai_function + from agent_framework import tool from copilot.types import Tool as CopilotTool - @ai_function + @tool(approval_mode="never_require") def my_function(arg: str) -> str: """A function tool.""" return arg @@ -754,7 +761,7 @@ async def tool_handler(invocation: Any) -> Any: result = agent._prepare_tools([my_function, copilot_tool]) # type: ignore assert len(result) == 2 - # First tool is converted AIFunction + # First tool is converted FunctionTool assert result[0].name == "my_function" # Second tool is CopilotTool passthrough assert result[1] == copilot_tool diff --git a/python/packages/lab/tau2/agent_framework_lab_tau2/_tau2_utils.py b/python/packages/lab/tau2/agent_framework_lab_tau2/_tau2_utils.py index 6c64bb44be..647dd8884a 100644 --- a/python/packages/lab/tau2/agent_framework_lab_tau2/_tau2_utils.py +++ b/python/packages/lab/tau2/agent_framework_lab_tau2/_tau2_utils.py @@ -6,7 +6,7 @@ from typing import Any import numpy as np -from agent_framework._tools import AIFunction +from agent_framework._tools import FunctionTool from agent_framework._types import ChatMessage from loguru import logger from pydantic import BaseModel @@ -25,8 +25,8 @@ _original_set_state = Environment.set_state -def convert_tau2_tool_to_ai_function(tau2_tool: Tool) -> AIFunction[Any, Any]: - """Convert a tau2 Tool to an AIFunction for agent framework compatibility. +def convert_tau2_tool_to_function_tool(tau2_tool: Tool) -> FunctionTool[Any, Any]: + """Convert a tau2 Tool to a FunctionTool for agent framework compatibility. Creates a wrapper that preserves the tool's interface while ensuring results are deep-copied to prevent unintended mutations. @@ -37,7 +37,7 @@ def wrapped_func(**kwargs: Any) -> Any: # Deep copy to prevent mutations of returned data return result.model_copy(deep=True) if isinstance(result, BaseModel) else deepcopy(result) - return AIFunction( + return FunctionTool( name=tau2_tool.name, description=tau2_tool._get_description(), func=wrapped_func, diff --git a/python/packages/lab/tau2/agent_framework_lab_tau2/runner.py b/python/packages/lab/tau2/agent_framework_lab_tau2/runner.py index b514ebd60c..dddf7088b5 100644 --- a/python/packages/lab/tau2/agent_framework_lab_tau2/runner.py +++ b/python/packages/lab/tau2/agent_framework_lab_tau2/runner.py @@ -32,7 +32,7 @@ from ._message_utils import flip_messages, log_messages from ._sliding_window import SlidingWindowChatMessageStore -from ._tau2_utils import convert_agent_framework_messages_to_tau2_messages, convert_tau2_tool_to_ai_function +from ._tau2_utils import convert_agent_framework_messages_to_tau2_messages, convert_tau2_tool_to_function_tool __all__ = ["ASSISTANT_AGENT_ID", "ORCHESTRATOR_ID", "USER_SIMULATOR_ID", "TaskRunner"] @@ -179,9 +179,9 @@ def assistant_agent(self, assistant_chat_client: ChatClientProtocol) -> ChatAgen f"Environment has {len(env.get_tools())} tools: {', '.join([tool.name for tool in env.get_tools()])}" ) - # Convert tau2 tools to agent framework AIFunction format + # Convert tau2 tools to agent framework FunctionTool format # This bridges the gap between tau2's tool system and agent framework's expectations - ai_functions = [convert_tau2_tool_to_ai_function(tool) for tool in tools] + tools = [convert_tau2_tool_to_function_tool(tool) for tool in tools] # Combines general customer service behavior with specific policy guidelines assistant_system_prompt = f""" @@ -198,7 +198,7 @@ def assistant_agent(self, assistant_chat_client: ChatClientProtocol) -> ChatAgen return ChatAgent( chat_client=assistant_chat_client, instructions=assistant_system_prompt, - tools=ai_functions, + tools=tools, temperature=self.assistant_sampling_temperature, chat_message_store_factory=lambda: SlidingWindowChatMessageStore( system_message=assistant_system_prompt, diff --git a/python/packages/lab/tau2/tests/test_tau2_utils.py b/python/packages/lab/tau2/tests/test_tau2_utils.py index 252646081b..8811952bbe 100644 --- a/python/packages/lab/tau2/tests/test_tau2_utils.py +++ b/python/packages/lab/tau2/tests/test_tau2_utils.py @@ -6,11 +6,10 @@ from pathlib import Path import pytest -from agent_framework._tools import AIFunction -from agent_framework._types import ChatMessage, Content, Role +from agent_framework import ChatMessage, Content, FunctionTool, Role from agent_framework_lab_tau2._tau2_utils import ( convert_agent_framework_messages_to_tau2_messages, - convert_tau2_tool_to_ai_function, + convert_tau2_tool_to_function_tool, ) from tau2.data_model.message import AssistantMessage, SystemMessage, ToolCall, ToolMessage, UserMessage from tau2.domains.airline.data_model import FlightDB @@ -51,8 +50,8 @@ def tau2_airline_environment() -> Environment: ) -def test_convert_tau2_tool_to_ai_function_basic(tau2_airline_environment): - """Test basic conversion from tau2 tool to AIFunction.""" +def test_convert_tau2_tool_to_function_tool_basic(tau2_airline_environment): + """Test basic conversion from tau2 tool to FunctionTool.""" # Get real tools from tau2 environment tools = tau2_airline_environment.get_tools() @@ -61,33 +60,33 @@ def test_convert_tau2_tool_to_ai_function_basic(tau2_airline_environment): tau2_tool = tools[0] # Convert the tool - ai_function = convert_tau2_tool_to_ai_function(tau2_tool) + tool = convert_tau2_tool_to_function_tool(tau2_tool) # Verify the conversion - assert isinstance(ai_function, AIFunction) - assert ai_function.name == tau2_tool.name - assert ai_function.description == tau2_tool._get_description() - assert ai_function.input_model == tau2_tool.params + assert isinstance(tool, FunctionTool) + assert tool.name == tau2_tool.name + assert tool.description == tau2_tool._get_description() + assert tool.input_model == tau2_tool.params # Test that the function is callable (we won't call it with real params to avoid side effects) - assert callable(ai_function.func) + assert callable(tool.func) -def test_convert_tau2_tool_to_ai_function_multiple_tools(tau2_airline_environment): +def test_convert_tau2_tool_to_function_tool_multiple_tools(tau2_airline_environment): """Test conversion with multiple tau2 tools.""" # Get real tools from tau2 environment tools = tau2_airline_environment.get_tools() # Convert multiple tools - ai_functions = [convert_tau2_tool_to_ai_function(tool) for tool in tools[:3]] # Test first 3 tools + function_tools = [convert_tau2_tool_to_function_tool(tool) for tool in tools[:3]] # Test first 3 tools # Verify all conversions - for ai_function, tau2_tool in zip(ai_functions, tools[:3], strict=False): - assert isinstance(ai_function, AIFunction) - assert ai_function.name == tau2_tool.name - assert ai_function.description == tau2_tool._get_description() - assert ai_function.input_model == tau2_tool.params - assert callable(ai_function.func) + for tool, tau2_tool in zip(function_tools, tools[:3], strict=False): + assert isinstance(tool, FunctionTool) + assert tool.name == tau2_tool.name + assert tool.description == tau2_tool._get_description() + assert tool.input_model == tau2_tool.params + assert callable(tool.func) def test_convert_agent_framework_messages_to_tau2_messages_system(): diff --git a/python/packages/ollama/agent_framework_ollama/_chat_client.py b/python/packages/ollama/agent_framework_ollama/_chat_client.py index ff2f5b6a37..058aeecb4b 100644 --- a/python/packages/ollama/agent_framework_ollama/_chat_client.py +++ b/python/packages/ollama/agent_framework_ollama/_chat_client.py @@ -14,13 +14,13 @@ from typing import Any, ClassVar, Generic, TypedDict from agent_framework import ( - AIFunction, BaseChatClient, ChatMessage, ChatOptions, ChatResponse, ChatResponseUpdate, Content, + FunctionTool, Role, ToolProtocol, UsageDetails, @@ -545,13 +545,13 @@ def _prepare_tools_for_ollama(self, tools: list[ToolProtocol | MutableMapping[st for tool in tools: if isinstance(tool, ToolProtocol): match tool: - case AIFunction(): + case FunctionTool(): chat_tools.append(tool.to_json_schema_spec()) case _: raise ServiceInvalidRequestError( "Unsupported tool type '" f"{type(tool).__name__}" - "' for Ollama client. Supported tool types: AIFunction." + "' for Ollama client. Supported tool types: FunctionTool." ) else: chat_tools.append(tool if isinstance(tool, dict) else dict(tool)) diff --git a/python/packages/ollama/tests/test_ollama_chat_client.py b/python/packages/ollama/tests/test_ollama_chat_client.py index ed517fae20..9658ba7c6e 100644 --- a/python/packages/ollama/tests/test_ollama_chat_client.py +++ b/python/packages/ollama/tests/test_ollama_chat_client.py @@ -11,8 +11,8 @@ ChatResponseUpdate, Content, HostedWebSearchTool, - ai_function, chat_middleware, + tool, ) from agent_framework.exceptions import ( ServiceInitializationError, @@ -109,7 +109,7 @@ def mock_chat_completion_tool_call() -> OllamaChatResponse: ) -@ai_function +@tool(approval_mode="never_require") def hello_world(arg1: str) -> str: return "Hello World" diff --git a/python/samples/README.md b/python/samples/README.md index 169ebbc001..a2c539be02 100644 --- a/python/samples/README.md +++ b/python/samples/README.md @@ -292,16 +292,22 @@ These samples demonstrate durable agent hosting using the Durable Task Scheduler ## Tools +Note: Many tool samples set `approval_mode="never_require"` to keep the examples concise. For production scenarios, +keep `approval_mode="always_require"` unless you are confident in the tool behavior and approval flow. See +`getting_started/tools/function_tool_with_approval.py` and +`getting_started/tools/function_tool_with_approval_and_threads.py`, plus the workflow approval samples in +`getting_started/workflows/tool-approval/`, for end-to-end approval handling. + | File | Description | |------|-------------| -| [`getting_started/tools/ai_function_declaration_only.py`](./getting_started/tools/ai_function_declaration_only.py) | Function declarations without implementations for testing agent reasoning | -| [`getting_started/tools/ai_function_from_dict_with_dependency_injection.py`](./getting_started/tools/ai_function_from_dict_with_dependency_injection.py) | Creating AI functions from dictionary definitions using dependency injection | -| [`getting_started/tools/ai_function_recover_from_failures.py`](./getting_started/tools/ai_function_recover_from_failures.py) | Graceful error handling when tools raise exceptions | -| [`getting_started/tools/ai_function_with_approval.py`](./getting_started/tools/ai_function_with_approval.py) | User approval workflows for function calls without threads | -| [`getting_started/tools/ai_function_with_approval_and_threads.py`](./getting_started/tools/ai_function_with_approval_and_threads.py) | Tool approval workflows using threads for conversation history management | -| [`getting_started/tools/ai_function_with_max_exceptions.py`](./getting_started/tools/ai_function_with_max_exceptions.py) | Limiting tool failure exceptions using max_invocation_exceptions | -| [`getting_started/tools/ai_function_with_max_invocations.py`](./getting_started/tools/ai_function_with_max_invocations.py) | Limiting total tool invocations using max_invocations | -| [`getting_started/tools/ai_functions_in_class.py`](./getting_started/tools/ai_functions_in_class.py) | Using ai_function decorator with class methods for stateful tools | +| [`getting_started/tools/function_tool_declaration_only.py`](./getting_started/tools/function_tool_declaration_only.py) | Function declarations without implementations for testing agent reasoning | +| [`getting_started/tools/function_tool_from_dict_with_dependency_injection.py`](./getting_started/tools/function_tool_from_dict_with_dependency_injection.py) | Creating local tools from dictionary definitions using dependency injection | +| [`getting_started/tools/function_tool_recover_from_failures.py`](./getting_started/tools/function_tool_recover_from_failures.py) | Graceful error handling when tools raise exceptions | +| [`getting_started/tools/function_tool_with_approval.py`](./getting_started/tools/function_tool_with_approval.py) | User approval workflows for function calls without threads | +| [`getting_started/tools/function_tool_with_approval_and_threads.py`](./getting_started/tools/function_tool_with_approval_and_threads.py) | Tool approval workflows using threads for conversation history management | +| [`getting_started/tools/function_tool_with_max_exceptions.py`](./getting_started/tools/function_tool_with_max_exceptions.py) | Limiting tool failure exceptions using max_invocation_exceptions | +| [`getting_started/tools/function_tool_with_max_invocations.py`](./getting_started/tools/function_tool_with_max_invocations.py) | Limiting total tool invocations using max_invocations | +| [`getting_started/tools/tool_in_class.py`](./getting_started/tools/tool_in_class.py) | Using the tool decorator with class methods for stateful tools | ## Workflows diff --git a/python/samples/amazon/bedrock_sample.py b/python/samples/amazon/bedrock_sample.py deleted file mode 100644 index 42feb98ebd..0000000000 --- a/python/samples/amazon/bedrock_sample.py +++ /dev/null @@ -1 +0,0 @@ -"""This sample has moved to python/packages/bedrock/samples/bedrock_sample.py.""" diff --git a/python/samples/autogen-migration/README.md b/python/samples/autogen-migration/README.md index c2ddacee70..616d3c345e 100644 --- a/python/samples/autogen-migration/README.md +++ b/python/samples/autogen-migration/README.md @@ -53,7 +53,7 @@ python samples/autogen-migration/orchestrations/04_magentic_one.py - **Default behavior differences**: AutoGen's `AssistantAgent` is single-turn by default (`max_tool_iterations=1`), while AF's `ChatAgent` is multi-turn and continues tool execution automatically. - **Thread management**: AF agents are stateless by default. Use `agent.get_new_thread()` and pass it to `run()`/`run_stream()` to maintain conversation state, similar to AutoGen's conversation context. -- **Tools**: AutoGen uses `FunctionTool` wrappers; AF uses `@ai_function` decorators with automatic schema inference. +- **Tools**: AutoGen uses `FunctionTool` wrappers; AF uses `@tool` decorators with automatic schema inference. - **Orchestration patterns**: - `RoundRobinGroupChat` → `SequentialBuilder` or `WorkflowBuilder` - `SelectorGroupChat` → `GroupChatBuilder` with LLM-based speaker selection diff --git a/python/samples/autogen-migration/orchestrations/01_round_robin_group_chat.py b/python/samples/autogen-migration/orchestrations/01_round_robin_group_chat.py index 38df1424db..39d360b1e1 100644 --- a/python/samples/autogen-migration/orchestrations/01_round_robin_group_chat.py +++ b/python/samples/autogen-migration/orchestrations/01_round_robin_group_chat.py @@ -103,6 +103,7 @@ async def run_agent_framework_with_cycle() -> None: WorkflowContext, WorkflowOutputEvent, executor, + tool, ) from agent_framework.openai import OpenAIChatClient diff --git a/python/samples/autogen-migration/orchestrations/03_swarm.py b/python/samples/autogen-migration/orchestrations/03_swarm.py index 76b5fc9ca5..3fa9f7a04d 100644 --- a/python/samples/autogen-migration/orchestrations/03_swarm.py +++ b/python/samples/autogen-migration/orchestrations/03_swarm.py @@ -102,6 +102,7 @@ async def run_agent_framework() -> None: RequestInfoEvent, WorkflowRunState, WorkflowStatusEvent, + tool, ) from agent_framework.openai import OpenAIChatClient diff --git a/python/samples/autogen-migration/orchestrations/04_magentic_one.py b/python/samples/autogen-migration/orchestrations/04_magentic_one.py index 48de809a95..d8c99dbb4e 100644 --- a/python/samples/autogen-migration/orchestrations/04_magentic_one.py +++ b/python/samples/autogen-migration/orchestrations/04_magentic_one.py @@ -63,6 +63,7 @@ async def run_agent_framework() -> None: MagenticBuilder, MagenticFinalResultEvent, MagenticOrchestratorMessageEvent, + tool, ) from agent_framework.openai import OpenAIChatClient diff --git a/python/samples/autogen-migration/single_agent/02_assistant_agent_with_tool.py b/python/samples/autogen-migration/single_agent/02_assistant_agent_with_tool.py index 00b82fec9c..be251a272e 100644 --- a/python/samples/autogen-migration/single_agent/02_assistant_agent_with_tool.py +++ b/python/samples/autogen-migration/single_agent/02_assistant_agent_with_tool.py @@ -46,12 +46,13 @@ def get_weather(location: str) -> str: async def run_agent_framework() -> None: - """Agent Framework agent with @ai_function decorator.""" - from agent_framework import ai_function + """Agent Framework agent with @tool decorator.""" + from agent_framework import tool from agent_framework.openai import OpenAIChatClient - # Define tool with @ai_function decorator (automatic schema inference) - @ai_function + # Define tool with @tool decorator (automatic schema inference) + # NOTE: approval_mode="never_require" is for sample brevity. Use "always_require" in production; see samples/getting_started/tools/function_tool_with_approval.py and samples/getting_started/tools/function_tool_with_approval_and_threads.py. + @tool(approval_mode="never_require") def get_weather(location: str) -> str: """Get the weather for a location. diff --git a/python/samples/demos/chatkit-integration/app.py b/python/samples/demos/chatkit-integration/app.py index c215b64290..4e11e4948c 100644 --- a/python/samples/demos/chatkit-integration/app.py +++ b/python/samples/demos/chatkit-integration/app.py @@ -19,6 +19,7 @@ # Agent Framework imports from agent_framework import AgentResponseUpdate, ChatAgent, ChatMessage, FunctionResultContent, Role +from agent_framework import tool from agent_framework.azure import AzureOpenAIChatClient # Agent Framework ChatKit integration @@ -130,7 +131,8 @@ async def stream_widget( yield ThreadItemDoneEvent(type="thread.item.done", item=widget_item) - +# NOTE: approval_mode="never_require" is for sample brevity. Use "always_require" in production; see samples/getting_started/tools/function_tool_with_approval.py and samples/getting_started/tools/function_tool_with_approval_and_threads.py. +@tool(approval_mode="never_require") def get_weather( location: Annotated[str, Field(description="The location to get the weather for.")], ) -> str: @@ -168,14 +170,14 @@ def get_weather( ) return WeatherResponse(text, weather_data) - +@tool(approval_mode="never_require") def get_time() -> str: """Get the current UTC time.""" current_time = datetime.now(timezone.utc) logger.info("Getting current UTC time") return f"Current UTC time: {current_time.strftime('%Y-%m-%d %H:%M:%S')} UTC" - +@tool(approval_mode="never_require") def show_city_selector() -> str: """Show an interactive city selector widget to the user. diff --git a/python/samples/demos/m365-agent/m365_agent_demo/app.py b/python/samples/demos/m365-agent/m365_agent_demo/app.py index 3d29c2c07d..9e11780614 100644 --- a/python/samples/demos/m365-agent/m365_agent_demo/app.py +++ b/python/samples/demos/m365-agent/m365_agent_demo/app.py @@ -17,6 +17,7 @@ from typing import Annotated from agent_framework import ChatAgent +from agent_framework import tool from agent_framework.openai import OpenAIChatClient from aiohttp import web from aiohttp.web_middlewares import middleware @@ -76,7 +77,8 @@ def load_app_config() -> AppConfig: port = 3978 return AppConfig(use_anonymous_mode=use_anonymous_mode, port=port, agents_sdk_config=agents_sdk_config) - +# NOTE: approval_mode="never_require" is for sample brevity. Use "always_require" in production; see samples/getting_started/tools/function_tool_with_approval.py and samples/getting_started/tools/function_tool_with_approval_and_threads.py. +@tool(approval_mode="never_require") def get_weather( location: Annotated[str, Field(description="The location to get the weather for.")], ) -> str: diff --git a/python/samples/demos/workflow_evaluation/_tools.py b/python/samples/demos/workflow_evaluation/_tools.py index eca03544b2..12be0f4094 100644 --- a/python/samples/demos/workflow_evaluation/_tools.py +++ b/python/samples/demos/workflow_evaluation/_tools.py @@ -4,7 +4,7 @@ from datetime import datetime from typing import Annotated -from agent_framework import ai_function +from agent_framework import tool from pydantic import Field # --- Travel Planning Tools --- @@ -13,7 +13,7 @@ # Mock hotel search tool -@ai_function(name="search_hotels", description="Search for available hotels based on location and dates.") +@tool(name="search_hotels", description="Search for available hotels based on location and dates.") def search_hotels( location: Annotated[str, Field(description="City or region to search for hotels.")], check_in: Annotated[str, Field(description="Check-in date (e.g., 'December 15, 2025').")], @@ -83,7 +83,7 @@ def search_hotels( # Mock hotel details tool -@ai_function(name="get_hotel_details", description="Get detailed information about a specific hotel.") +@tool(name="get_hotel_details", description="Get detailed information about a specific hotel.") def get_hotel_details( hotel_name: Annotated[str, Field(description="Name of the hotel to get details for.")], ) -> str: @@ -158,7 +158,7 @@ def get_hotel_details( # Mock flight search tool -@ai_function(name="search_flights", description="Search for available flights between two locations.") +@tool(name="search_flights", description="Search for available flights between two locations.") def search_flights( origin: Annotated[str, Field(description="Departure airport or city (e.g., 'JFK' or 'New York').")], destination: Annotated[str, Field(description="Arrival airport or city (e.g., 'CDG' or 'Paris').")], @@ -284,7 +284,7 @@ def search_flights( # Mock flight details tool -@ai_function(name="get_flight_details", description="Get detailed information about a specific flight.") +@tool(name="get_flight_details", description="Get detailed information about a specific flight.") def get_flight_details( flight_number: Annotated[str, Field(description="Flight number (e.g., 'AF007' or 'DL264').")], ) -> str: @@ -324,7 +324,7 @@ def get_flight_details( # Mock activity search tool -@ai_function(name="search_activities", description="Search for available activities and attractions at a destination.") +@tool(name="search_activities", description="Search for available activities and attractions at a destination.") def search_activities( location: Annotated[str, Field(description="City or region to search for activities.")], date: Annotated[str | None, Field(description="Date for the activity (e.g., 'December 16, 2025').")] = None, @@ -468,7 +468,7 @@ def search_activities( # Mock activity details tool -@ai_function(name="get_activity_details", description="Get detailed information about a specific activity.") +@tool(name="get_activity_details", description="Get detailed information about a specific activity.") def get_activity_details( activity_name: Annotated[str, Field(description="Name of the activity to get details for.")], ) -> str: @@ -545,7 +545,7 @@ def get_activity_details( # Mock booking confirmation tool -@ai_function(name="confirm_booking", description="Confirm a booking reservation.") +@tool(name="confirm_booking", description="Confirm a booking reservation.") def confirm_booking( booking_type: Annotated[str, Field(description="Type of booking (e.g., 'hotel', 'flight', 'activity').")], booking_id: Annotated[str, Field(description="Unique booking identifier.")], @@ -579,7 +579,7 @@ def confirm_booking( # Mock hotel availability check tool -@ai_function(name="check_hotel_availability", description="Check availability for hotel rooms.") +@tool(name="check_hotel_availability", description="Check availability for hotel rooms.") def check_hotel_availability( hotel_name: Annotated[str, Field(description="Name of the hotel to check availability for.")], check_in: Annotated[str, Field(description="Check-in date (e.g., 'December 15, 2025').")], @@ -614,7 +614,7 @@ def check_hotel_availability( # Mock flight availability check tool -@ai_function(name="check_flight_availability", description="Check availability for flight seats.") +@tool(name="check_flight_availability", description="Check availability for flight seats.") def check_flight_availability( flight_number: Annotated[str, Field(description="Flight number to check availability for.")], date: Annotated[str, Field(description="Flight date (e.g., 'December 15, 2025').")], @@ -647,7 +647,7 @@ def check_flight_availability( # Mock activity availability check tool -@ai_function(name="check_activity_availability", description="Check availability for activity bookings.") +@tool(name="check_activity_availability", description="Check availability for activity bookings.") def check_activity_availability( activity_name: Annotated[str, Field(description="Name of the activity to check availability for.")], date: Annotated[str, Field(description="Activity date (e.g., 'December 16, 2025').")], @@ -680,7 +680,7 @@ def check_activity_availability( # Mock payment processing tool -@ai_function(name="process_payment", description="Process payment for a booking.") +@tool(name="process_payment", description="Process payment for a booking.") def process_payment( amount: Annotated[float, Field(description="Payment amount.")], currency: Annotated[str, Field(description="Currency code (e.g., 'USD', 'EUR').")], @@ -714,7 +714,7 @@ def process_payment( # Mock payment validation tool -@ai_function(name="validate_payment_method", description="Validate a payment method before processing.") +@tool(name="validate_payment_method", description="Validate a payment method before processing.") def validate_payment_method( payment_method: Annotated[dict, Field(description="Payment method to validate (type, number, expiry, cvv).")], ) -> str: diff --git a/python/samples/demos/workflow_evaluation/create_workflow.py b/python/samples/demos/workflow_evaluation/create_workflow.py index a762fed628..dc1e920b69 100644 --- a/python/samples/demos/workflow_evaluation/create_workflow.py +++ b/python/samples/demos/workflow_evaluation/create_workflow.py @@ -57,6 +57,7 @@ WorkflowOutputEvent, executor, handler, + tool, ) from agent_framework.azure import AzureAIClient from azure.ai.projects.aio import AIProjectClient diff --git a/python/samples/getting_started/agents/anthropic/anthropic_basic.py b/python/samples/getting_started/agents/anthropic/anthropic_basic.py index c5bb497bec..41fbb3b7e6 100644 --- a/python/samples/getting_started/agents/anthropic/anthropic_basic.py +++ b/python/samples/getting_started/agents/anthropic/anthropic_basic.py @@ -5,6 +5,7 @@ from typing import Annotated from agent_framework.anthropic import AnthropicClient +from agent_framework import tool """ Anthropic Chat Agent Example @@ -12,7 +13,8 @@ This sample demonstrates using Anthropic with an agent and a single custom tool. """ - +# NOTE: approval_mode="never_require" is for sample brevity. Use "always_require" in production; see samples/getting_started/tools/function_tool_with_approval.py and samples/getting_started/tools/function_tool_with_approval_and_threads.py. +@tool(approval_mode="never_require") def get_weather( location: Annotated[str, "The location to get the weather for."], ) -> str: diff --git a/python/samples/getting_started/agents/azure_ai/azure_ai_basic.py b/python/samples/getting_started/agents/azure_ai/azure_ai_basic.py index 6cf5144bb7..f6bf9802e0 100644 --- a/python/samples/getting_started/agents/azure_ai/azure_ai_basic.py +++ b/python/samples/getting_started/agents/azure_ai/azure_ai_basic.py @@ -7,6 +7,7 @@ from agent_framework.azure import AzureAIProjectAgentProvider from azure.identity.aio import AzureCliCredential from pydantic import Field +from agent_framework import tool """ Azure AI Agent Basic Example @@ -15,7 +16,8 @@ Shows both streaming and non-streaming responses with function tools. """ - +# NOTE: approval_mode="never_require" is for sample brevity. Use "always_require" in production; see samples/getting_started/tools/function_tool_with_approval.py and samples/getting_started/tools/function_tool_with_approval_and_threads.py. +@tool(approval_mode="never_require") def get_weather( location: Annotated[str, Field(description="The location to get the weather for.")], ) -> str: diff --git a/python/samples/getting_started/agents/azure_ai/azure_ai_provider_methods.py b/python/samples/getting_started/agents/azure_ai/azure_ai_provider_methods.py index 557d7f4990..266cfbdfdd 100644 --- a/python/samples/getting_started/agents/azure_ai/azure_ai_provider_methods.py +++ b/python/samples/getting_started/agents/azure_ai/azure_ai_provider_methods.py @@ -10,6 +10,7 @@ from azure.ai.projects.models import AgentReference, PromptAgentDefinition from azure.identity.aio import AzureCliCredential from pydantic import Field +from agent_framework import tool """ Azure AI Project Agent Provider Methods Example @@ -25,7 +26,8 @@ Each method returns a ChatAgent that can be used for conversations. """ - +# NOTE: approval_mode="never_require" is for sample brevity. Use "always_require" in production; see samples/getting_started/tools/function_tool_with_approval.py and samples/getting_started/tools/function_tool_with_approval_and_threads.py. +@tool(approval_mode="never_require") def get_weather( location: Annotated[str, Field(description="The location to get the weather for.")], ) -> str: diff --git a/python/samples/getting_started/agents/azure_ai/azure_ai_use_latest_version.py b/python/samples/getting_started/agents/azure_ai/azure_ai_use_latest_version.py index 025e78813e..7106bb1f31 100644 --- a/python/samples/getting_started/agents/azure_ai/azure_ai_use_latest_version.py +++ b/python/samples/getting_started/agents/azure_ai/azure_ai_use_latest_version.py @@ -7,6 +7,7 @@ from agent_framework.azure import AzureAIProjectAgentProvider from azure.identity.aio import AzureCliCredential from pydantic import Field +from agent_framework import tool """ Azure AI Agent Latest Version Example @@ -16,7 +17,8 @@ while subsequent calls with `get_agent()` reuse the latest agent version. """ - +# NOTE: approval_mode="never_require" is for sample brevity. Use "always_require" in production; see samples/getting_started/tools/function_tool_with_approval.py and samples/getting_started/tools/function_tool_with_approval_and_threads.py. +@tool(approval_mode="never_require") def get_weather( location: Annotated[str, Field(description="The location to get the weather for.")], ) -> str: diff --git a/python/samples/getting_started/agents/azure_ai/azure_ai_with_code_interpreter_file_download.py b/python/samples/getting_started/agents/azure_ai/azure_ai_with_code_interpreter_file_download.py index 50ce0037a4..ba3f72c1ce 100644 --- a/python/samples/getting_started/agents/azure_ai/azure_ai_with_code_interpreter_file_download.py +++ b/python/samples/getting_started/agents/azure_ai/azure_ai_with_code_interpreter_file_download.py @@ -11,6 +11,7 @@ HostedCodeInterpreterTool, HostedFileContent, TextContent, + tool, ) from agent_framework.azure import AzureAIProjectAgentProvider from azure.identity.aio import AzureCliCredential diff --git a/python/samples/getting_started/agents/azure_ai/azure_ai_with_code_interpreter_file_generation.py b/python/samples/getting_started/agents/azure_ai/azure_ai_with_code_interpreter_file_generation.py index 3e2b520ede..9e61d2486c 100644 --- a/python/samples/getting_started/agents/azure_ai/azure_ai_with_code_interpreter_file_generation.py +++ b/python/samples/getting_started/agents/azure_ai/azure_ai_with_code_interpreter_file_generation.py @@ -5,6 +5,7 @@ from agent_framework import ( AgentResponseUpdate, HostedCodeInterpreterTool, + tool, ) from agent_framework.azure import AzureAIProjectAgentProvider from azure.identity.aio import AzureCliCredential diff --git a/python/samples/getting_started/agents/azure_ai/azure_ai_with_existing_conversation.py b/python/samples/getting_started/agents/azure_ai/azure_ai_with_existing_conversation.py index 099c5ad5aa..8438abcf67 100644 --- a/python/samples/getting_started/agents/azure_ai/azure_ai_with_existing_conversation.py +++ b/python/samples/getting_started/agents/azure_ai/azure_ai_with_existing_conversation.py @@ -8,6 +8,7 @@ from azure.ai.projects.aio import AIProjectClient from azure.identity.aio import AzureCliCredential from pydantic import Field +from agent_framework import tool """ Azure AI Agent Existing Conversation Example @@ -15,7 +16,8 @@ This sample demonstrates usage of AzureAIProjectAgentProvider with existing conversation created on service side. """ - +# NOTE: approval_mode="never_require" is for sample brevity. Use "always_require" in production; see samples/getting_started/tools/function_tool_with_approval.py and samples/getting_started/tools/function_tool_with_approval_and_threads.py. +@tool(approval_mode="never_require") def get_weather( location: Annotated[str, Field(description="The location to get the weather for.")], ) -> str: diff --git a/python/samples/getting_started/agents/azure_ai/azure_ai_with_explicit_settings.py b/python/samples/getting_started/agents/azure_ai/azure_ai_with_explicit_settings.py index a3e3e24fe1..ba131817d1 100644 --- a/python/samples/getting_started/agents/azure_ai/azure_ai_with_explicit_settings.py +++ b/python/samples/getting_started/agents/azure_ai/azure_ai_with_explicit_settings.py @@ -8,6 +8,7 @@ from agent_framework.azure import AzureAIProjectAgentProvider from azure.identity.aio import AzureCliCredential from pydantic import Field +from agent_framework import tool """ Azure AI Agent with Explicit Settings Example @@ -16,7 +17,8 @@ settings rather than relying on environment variable defaults. """ - +# NOTE: approval_mode="never_require" is for sample brevity. Use "always_require" in production; see samples/getting_started/tools/function_tool_with_approval.py and samples/getting_started/tools/function_tool_with_approval_and_threads.py. +@tool(approval_mode="never_require") def get_weather( location: Annotated[str, Field(description="The location to get the weather for.")], ) -> str: diff --git a/python/samples/getting_started/agents/azure_ai/azure_ai_with_thread.py b/python/samples/getting_started/agents/azure_ai/azure_ai_with_thread.py index f4e69e02ca..766ee5fa51 100644 --- a/python/samples/getting_started/agents/azure_ai/azure_ai_with_thread.py +++ b/python/samples/getting_started/agents/azure_ai/azure_ai_with_thread.py @@ -7,6 +7,7 @@ from agent_framework.azure import AzureAIProjectAgentProvider from azure.identity.aio import AzureCliCredential from pydantic import Field +from agent_framework import tool """ Azure AI Agent with Thread Management Example @@ -15,7 +16,8 @@ persistent conversation capabilities using service-managed threads as well as storing messages in-memory. """ - +# NOTE: approval_mode="never_require" is for sample brevity. Use "always_require" in production; see samples/getting_started/tools/function_tool_with_approval.py and samples/getting_started/tools/function_tool_with_approval_and_threads.py. +@tool(approval_mode="never_require") def get_weather( location: Annotated[str, Field(description="The location to get the weather for.")], ) -> str: diff --git a/python/samples/getting_started/agents/azure_ai_agent/azure_ai_basic.py b/python/samples/getting_started/agents/azure_ai_agent/azure_ai_basic.py index 64f0996184..787b1f317b 100644 --- a/python/samples/getting_started/agents/azure_ai_agent/azure_ai_basic.py +++ b/python/samples/getting_started/agents/azure_ai_agent/azure_ai_basic.py @@ -7,6 +7,7 @@ from agent_framework.azure import AzureAIAgentsProvider from azure.identity.aio import AzureCliCredential from pydantic import Field +from agent_framework import tool """ Azure AI Agent Basic Example @@ -15,7 +16,8 @@ lifecycle management. Shows both streaming and non-streaming responses with function tools. """ - +# NOTE: approval_mode="never_require" is for sample brevity. Use "always_require" in production; see samples/getting_started/tools/function_tool_with_approval.py and samples/getting_started/tools/function_tool_with_approval_and_threads.py. +@tool(approval_mode="never_require") def get_weather( location: Annotated[str, Field(description="The location to get the weather for.")], ) -> str: diff --git a/python/samples/getting_started/agents/azure_ai_agent/azure_ai_provider_methods.py b/python/samples/getting_started/agents/azure_ai_agent/azure_ai_provider_methods.py index 0a07cc5c35..adb6386797 100644 --- a/python/samples/getting_started/agents/azure_ai_agent/azure_ai_provider_methods.py +++ b/python/samples/getting_started/agents/azure_ai_agent/azure_ai_provider_methods.py @@ -9,6 +9,7 @@ from azure.ai.agents.aio import AgentsClient from azure.identity.aio import AzureCliCredential from pydantic import Field +from agent_framework import tool """ Azure AI Agent Provider Methods Example @@ -19,7 +20,8 @@ - as_agent(): Wrap an SDK Agent object without making HTTP calls """ - +# NOTE: approval_mode="never_require" is for sample brevity. Use "always_require" in production; see samples/getting_started/tools/function_tool_with_approval.py and samples/getting_started/tools/function_tool_with_approval_and_threads.py. +@tool(approval_mode="never_require") def get_weather( location: Annotated[str, Field(description="The location to get the weather for.")], ) -> str: diff --git a/python/samples/getting_started/agents/azure_ai_agent/azure_ai_with_code_interpreter_file_generation.py b/python/samples/getting_started/agents/azure_ai_agent/azure_ai_with_code_interpreter_file_generation.py index 665c707adc..44554af05a 100644 --- a/python/samples/getting_started/agents/azure_ai_agent/azure_ai_with_code_interpreter_file_generation.py +++ b/python/samples/getting_started/agents/azure_ai_agent/azure_ai_with_code_interpreter_file_generation.py @@ -7,6 +7,7 @@ AgentResponseUpdate, HostedCodeInterpreterTool, HostedFileContent, + tool, ) from agent_framework.azure import AzureAIAgentsProvider from azure.ai.agents.aio import AgentsClient diff --git a/python/samples/getting_started/agents/azure_ai_agent/azure_ai_with_existing_thread.py b/python/samples/getting_started/agents/azure_ai_agent/azure_ai_with_existing_thread.py index a05aca5eba..4852ba15b7 100644 --- a/python/samples/getting_started/agents/azure_ai_agent/azure_ai_with_existing_thread.py +++ b/python/samples/getting_started/agents/azure_ai_agent/azure_ai_with_existing_thread.py @@ -9,6 +9,7 @@ from azure.ai.agents.aio import AgentsClient from azure.identity.aio import AzureCliCredential from pydantic import Field +from agent_framework import tool """ Azure AI Agent with Existing Thread Example @@ -17,7 +18,8 @@ by providing thread IDs for thread reuse patterns. """ - +# NOTE: approval_mode="never_require" is for sample brevity. Use "always_require" in production; see samples/getting_started/tools/function_tool_with_approval.py and samples/getting_started/tools/function_tool_with_approval_and_threads.py. +@tool(approval_mode="never_require") def get_weather( location: Annotated[str, Field(description="The location to get the weather for.")], ) -> str: diff --git a/python/samples/getting_started/agents/azure_ai_agent/azure_ai_with_explicit_settings.py b/python/samples/getting_started/agents/azure_ai_agent/azure_ai_with_explicit_settings.py index bb0405cd6f..85b4d55b95 100644 --- a/python/samples/getting_started/agents/azure_ai_agent/azure_ai_with_explicit_settings.py +++ b/python/samples/getting_started/agents/azure_ai_agent/azure_ai_with_explicit_settings.py @@ -8,6 +8,7 @@ from agent_framework.azure import AzureAIAgentsProvider from azure.identity.aio import AzureCliCredential from pydantic import Field +from agent_framework import tool """ Azure AI Agent with Explicit Settings Example @@ -16,7 +17,8 @@ settings rather than relying on environment variable defaults. """ - +# NOTE: approval_mode="never_require" is for sample brevity. Use "always_require" in production; see samples/getting_started/tools/function_tool_with_approval.py and samples/getting_started/tools/function_tool_with_approval_and_threads.py. +@tool(approval_mode="never_require") def get_weather( location: Annotated[str, Field(description="The location to get the weather for.")], ) -> str: diff --git a/python/samples/getting_started/agents/azure_ai_agent/azure_ai_with_function_tools.py b/python/samples/getting_started/agents/azure_ai_agent/azure_ai_with_function_tools.py index 1e2e0b618b..7da870c42a 100644 --- a/python/samples/getting_started/agents/azure_ai_agent/azure_ai_with_function_tools.py +++ b/python/samples/getting_started/agents/azure_ai_agent/azure_ai_with_function_tools.py @@ -8,6 +8,7 @@ from agent_framework.azure import AzureAIAgentsProvider from azure.identity.aio import AzureCliCredential from pydantic import Field +from agent_framework import tool """ Azure AI Agent with Function Tools Example @@ -16,7 +17,8 @@ showing both agent-level and query-level tool configuration patterns. """ - +# NOTE: approval_mode="never_require" is for sample brevity. Use "always_require" in production; see samples/getting_started/tools/function_tool_with_approval.py and samples/getting_started/tools/function_tool_with_approval_and_threads.py. +@tool(approval_mode="never_require") def get_weather( location: Annotated[str, Field(description="The location to get the weather for.")], ) -> str: @@ -24,7 +26,7 @@ def get_weather( conditions = ["sunny", "cloudy", "rainy", "stormy"] return f"The weather in {location} is {conditions[randint(0, 3)]} with a high of {randint(10, 30)}°C." - +@tool(approval_mode="never_require") def get_time() -> str: """Get the current UTC time.""" current_time = datetime.now(timezone.utc) diff --git a/python/samples/getting_started/agents/azure_ai_agent/azure_ai_with_multiple_tools.py b/python/samples/getting_started/agents/azure_ai_agent/azure_ai_with_multiple_tools.py index e3c28118be..244c57aa50 100644 --- a/python/samples/getting_started/agents/azure_ai_agent/azure_ai_with_multiple_tools.py +++ b/python/samples/getting_started/agents/azure_ai_agent/azure_ai_with_multiple_tools.py @@ -9,6 +9,7 @@ AgentThread, HostedMCPTool, HostedWebSearchTool, + tool, ) from agent_framework.azure import AzureAIAgentsProvider from azure.identity.aio import AzureCliCredential @@ -33,6 +34,8 @@ 4. Copy the connection ID and set it as the BING_CONNECTION_ID environment variable """ +# NOTE: approval_mode="never_require" is for sample brevity. Use "always_require" in production; see samples/getting_started/tools/function_tool_with_approval.py and samples/getting_started/tools/function_tool_with_approval_and_threads.py. +@tool(approval_mode="never_require") def get_time() -> str: """Get the current UTC time.""" diff --git a/python/samples/getting_started/agents/azure_ai_agent/azure_ai_with_thread.py b/python/samples/getting_started/agents/azure_ai_agent/azure_ai_with_thread.py index db1911fcad..04128c80a1 100644 --- a/python/samples/getting_started/agents/azure_ai_agent/azure_ai_with_thread.py +++ b/python/samples/getting_started/agents/azure_ai_agent/azure_ai_with_thread.py @@ -5,6 +5,7 @@ from typing import Annotated from agent_framework import AgentThread +from agent_framework import tool from agent_framework.azure import AzureAIAgentsProvider from azure.identity.aio import AzureCliCredential from pydantic import Field @@ -16,7 +17,8 @@ automatic thread creation with explicit thread management for persistent context. """ - +# NOTE: approval_mode="never_require" is for sample brevity. Use "always_require" in production; see samples/getting_started/tools/function_tool_with_approval.py and samples/getting_started/tools/function_tool_with_approval_and_threads.py. +@tool(approval_mode="never_require") def get_weather( location: Annotated[str, Field(description="The location to get the weather for.")], ) -> str: diff --git a/python/samples/getting_started/agents/azure_openai/azure_assistants_basic.py b/python/samples/getting_started/agents/azure_openai/azure_assistants_basic.py index e0fb4a3aaf..7613eb62dc 100644 --- a/python/samples/getting_started/agents/azure_openai/azure_assistants_basic.py +++ b/python/samples/getting_started/agents/azure_openai/azure_assistants_basic.py @@ -7,6 +7,7 @@ from agent_framework.azure import AzureOpenAIAssistantsClient from azure.identity import AzureCliCredential from pydantic import Field +from agent_framework import tool """ Azure OpenAI Assistants Basic Example @@ -15,7 +16,8 @@ assistant lifecycle management, showing both streaming and non-streaming responses. """ - +# NOTE: approval_mode="never_require" is for sample brevity. Use "always_require" in production; see samples/getting_started/tools/function_tool_with_approval.py and samples/getting_started/tools/function_tool_with_approval_and_threads.py. +@tool(approval_mode="never_require") def get_weather( location: Annotated[str, Field(description="The location to get the weather for.")], ) -> str: diff --git a/python/samples/getting_started/agents/azure_openai/azure_assistants_with_existing_assistant.py b/python/samples/getting_started/agents/azure_openai/azure_assistants_with_existing_assistant.py index 1211ab709a..70cd79b41a 100644 --- a/python/samples/getting_started/agents/azure_openai/azure_assistants_with_existing_assistant.py +++ b/python/samples/getting_started/agents/azure_openai/azure_assistants_with_existing_assistant.py @@ -6,6 +6,7 @@ from typing import Annotated from agent_framework import ChatAgent +from agent_framework import tool from agent_framework.azure import AzureOpenAIAssistantsClient from azure.identity import AzureCliCredential, get_bearer_token_provider from openai import AsyncAzureOpenAI @@ -18,7 +19,8 @@ using existing assistant IDs rather than creating new ones. """ - +# NOTE: approval_mode="never_require" is for sample brevity. Use "always_require" in production; see samples/getting_started/tools/function_tool_with_approval.py and samples/getting_started/tools/function_tool_with_approval_and_threads.py. +@tool(approval_mode="never_require") def get_weather( location: Annotated[str, Field(description="The location to get the weather for.")], ) -> str: diff --git a/python/samples/getting_started/agents/azure_openai/azure_assistants_with_explicit_settings.py b/python/samples/getting_started/agents/azure_openai/azure_assistants_with_explicit_settings.py index 7abd2d7507..581c447240 100644 --- a/python/samples/getting_started/agents/azure_openai/azure_assistants_with_explicit_settings.py +++ b/python/samples/getting_started/agents/azure_openai/azure_assistants_with_explicit_settings.py @@ -8,6 +8,7 @@ from agent_framework.azure import AzureOpenAIAssistantsClient from azure.identity import AzureCliCredential from pydantic import Field +from agent_framework import tool """ Azure OpenAI Assistants with Explicit Settings Example @@ -16,7 +17,8 @@ settings rather than relying on environment variable defaults. """ - +# NOTE: approval_mode="never_require" is for sample brevity. Use "always_require" in production; see samples/getting_started/tools/function_tool_with_approval.py and samples/getting_started/tools/function_tool_with_approval_and_threads.py. +@tool(approval_mode="never_require") def get_weather( location: Annotated[str, Field(description="The location to get the weather for.")], ) -> str: diff --git a/python/samples/getting_started/agents/azure_openai/azure_assistants_with_function_tools.py b/python/samples/getting_started/agents/azure_openai/azure_assistants_with_function_tools.py index d9ce57e3f1..6256681fce 100644 --- a/python/samples/getting_started/agents/azure_openai/azure_assistants_with_function_tools.py +++ b/python/samples/getting_started/agents/azure_openai/azure_assistants_with_function_tools.py @@ -6,6 +6,7 @@ from typing import Annotated from agent_framework import ChatAgent +from agent_framework import tool from agent_framework.azure import AzureOpenAIAssistantsClient from azure.identity import AzureCliCredential from pydantic import Field @@ -17,7 +18,8 @@ showing both agent-level and query-level tool configuration patterns. """ - +# NOTE: approval_mode="never_require" is for sample brevity. Use "always_require" in production; see samples/getting_started/tools/function_tool_with_approval.py and samples/getting_started/tools/function_tool_with_approval_and_threads.py. +@tool(approval_mode="never_require") def get_weather( location: Annotated[str, Field(description="The location to get the weather for.")], ) -> str: @@ -25,7 +27,7 @@ def get_weather( conditions = ["sunny", "cloudy", "rainy", "stormy"] return f"The weather in {location} is {conditions[randint(0, 3)]} with a high of {randint(10, 30)}°C." - +@tool(approval_mode="never_require") def get_time() -> str: """Get the current UTC time.""" current_time = datetime.now(timezone.utc) diff --git a/python/samples/getting_started/agents/azure_openai/azure_assistants_with_thread.py b/python/samples/getting_started/agents/azure_openai/azure_assistants_with_thread.py index e60909d718..a791604744 100644 --- a/python/samples/getting_started/agents/azure_openai/azure_assistants_with_thread.py +++ b/python/samples/getting_started/agents/azure_openai/azure_assistants_with_thread.py @@ -5,6 +5,7 @@ from typing import Annotated from agent_framework import AgentThread, ChatAgent +from agent_framework import tool from agent_framework.azure import AzureOpenAIAssistantsClient from azure.identity import AzureCliCredential from pydantic import Field @@ -16,7 +17,8 @@ automatic thread creation with explicit thread management for persistent context. """ - +# NOTE: approval_mode="never_require" is for sample brevity. Use "always_require" in production; see samples/getting_started/tools/function_tool_with_approval.py and samples/getting_started/tools/function_tool_with_approval_and_threads.py. +@tool(approval_mode="never_require") def get_weather( location: Annotated[str, Field(description="The location to get the weather for.")], ) -> str: diff --git a/python/samples/getting_started/agents/azure_openai/azure_chat_client_basic.py b/python/samples/getting_started/agents/azure_openai/azure_chat_client_basic.py index bc647685b8..25b0cc5bd3 100644 --- a/python/samples/getting_started/agents/azure_openai/azure_chat_client_basic.py +++ b/python/samples/getting_started/agents/azure_openai/azure_chat_client_basic.py @@ -7,6 +7,7 @@ from agent_framework.azure import AzureOpenAIChatClient from azure.identity import AzureCliCredential from pydantic import Field +from agent_framework import tool """ Azure OpenAI Chat Client Basic Example @@ -15,7 +16,8 @@ interactions, showing both streaming and non-streaming responses. """ - +# NOTE: approval_mode="never_require" is for sample brevity. Use "always_require" in production; see samples/getting_started/tools/function_tool_with_approval.py and samples/getting_started/tools/function_tool_with_approval_and_threads.py. +@tool(approval_mode="never_require") def get_weather( location: Annotated[str, Field(description="The location to get the weather for.")], ) -> str: diff --git a/python/samples/getting_started/agents/azure_openai/azure_chat_client_with_explicit_settings.py b/python/samples/getting_started/agents/azure_openai/azure_chat_client_with_explicit_settings.py index 8d39b4b035..db97390aa8 100644 --- a/python/samples/getting_started/agents/azure_openai/azure_chat_client_with_explicit_settings.py +++ b/python/samples/getting_started/agents/azure_openai/azure_chat_client_with_explicit_settings.py @@ -8,6 +8,7 @@ from agent_framework.azure import AzureOpenAIChatClient from azure.identity import AzureCliCredential from pydantic import Field +from agent_framework import tool """ Azure OpenAI Chat Client with Explicit Settings Example @@ -16,7 +17,8 @@ settings rather than relying on environment variable defaults. """ - +# NOTE: approval_mode="never_require" is for sample brevity. Use "always_require" in production; see samples/getting_started/tools/function_tool_with_approval.py and samples/getting_started/tools/function_tool_with_approval_and_threads.py. +@tool(approval_mode="never_require") def get_weather( location: Annotated[str, Field(description="The location to get the weather for.")], ) -> str: diff --git a/python/samples/getting_started/agents/azure_openai/azure_chat_client_with_function_tools.py b/python/samples/getting_started/agents/azure_openai/azure_chat_client_with_function_tools.py index 3a0f6077be..33b8ffe577 100644 --- a/python/samples/getting_started/agents/azure_openai/azure_chat_client_with_function_tools.py +++ b/python/samples/getting_started/agents/azure_openai/azure_chat_client_with_function_tools.py @@ -6,6 +6,7 @@ from typing import Annotated from agent_framework import ChatAgent +from agent_framework import tool from agent_framework.azure import AzureOpenAIChatClient from azure.identity import AzureCliCredential from pydantic import Field @@ -17,7 +18,8 @@ showing both agent-level and query-level tool configuration patterns. """ - +# NOTE: approval_mode="never_require" is for sample brevity. Use "always_require" in production; see samples/getting_started/tools/function_tool_with_approval.py and samples/getting_started/tools/function_tool_with_approval_and_threads.py. +@tool(approval_mode="never_require") def get_weather( location: Annotated[str, Field(description="The location to get the weather for.")], ) -> str: @@ -25,7 +27,7 @@ def get_weather( conditions = ["sunny", "cloudy", "rainy", "stormy"] return f"The weather in {location} is {conditions[randint(0, 3)]} with a high of {randint(10, 30)}°C." - +@tool(approval_mode="never_require") def get_time() -> str: """Get the current UTC time.""" current_time = datetime.now(timezone.utc) diff --git a/python/samples/getting_started/agents/azure_openai/azure_chat_client_with_thread.py b/python/samples/getting_started/agents/azure_openai/azure_chat_client_with_thread.py index a1a841dec8..16fee4226e 100644 --- a/python/samples/getting_started/agents/azure_openai/azure_chat_client_with_thread.py +++ b/python/samples/getting_started/agents/azure_openai/azure_chat_client_with_thread.py @@ -5,6 +5,7 @@ from typing import Annotated from agent_framework import AgentThread, ChatAgent, ChatMessageStore +from agent_framework import tool from agent_framework.azure import AzureOpenAIChatClient from azure.identity import AzureCliCredential from pydantic import Field @@ -16,7 +17,8 @@ automatic thread creation with explicit thread management for persistent context. """ - +# NOTE: approval_mode="never_require" is for sample brevity. Use "always_require" in production; see samples/getting_started/tools/function_tool_with_approval.py and samples/getting_started/tools/function_tool_with_approval_and_threads.py. +@tool(approval_mode="never_require") def get_weather( location: Annotated[str, Field(description="The location to get the weather for.")], ) -> str: diff --git a/python/samples/getting_started/agents/azure_openai/azure_responses_client_basic.py b/python/samples/getting_started/agents/azure_openai/azure_responses_client_basic.py index 9d91039658..921ee76634 100644 --- a/python/samples/getting_started/agents/azure_openai/azure_responses_client_basic.py +++ b/python/samples/getting_started/agents/azure_openai/azure_responses_client_basic.py @@ -7,6 +7,7 @@ from agent_framework.azure import AzureOpenAIResponsesClient from azure.identity import AzureCliCredential from pydantic import Field +from agent_framework import tool """ Azure OpenAI Responses Client Basic Example @@ -15,7 +16,8 @@ response generation, showing both streaming and non-streaming responses. """ - +# NOTE: approval_mode="never_require" is for sample brevity. Use "always_require" in production; see samples/getting_started/tools/function_tool_with_approval.py and samples/getting_started/tools/function_tool_with_approval_and_threads.py. +@tool(approval_mode="never_require") def get_weather( location: Annotated[str, Field(description="The location to get the weather for.")], ) -> str: diff --git a/python/samples/getting_started/agents/azure_openai/azure_responses_client_with_explicit_settings.py b/python/samples/getting_started/agents/azure_openai/azure_responses_client_with_explicit_settings.py index 16960401bb..5a38798ef0 100644 --- a/python/samples/getting_started/agents/azure_openai/azure_responses_client_with_explicit_settings.py +++ b/python/samples/getting_started/agents/azure_openai/azure_responses_client_with_explicit_settings.py @@ -8,6 +8,7 @@ from agent_framework.azure import AzureOpenAIResponsesClient from azure.identity import AzureCliCredential from pydantic import Field +from agent_framework import tool """ Azure OpenAI Responses Client with Explicit Settings Example @@ -16,7 +17,8 @@ settings rather than relying on environment variable defaults. """ - +# NOTE: approval_mode="never_require" is for sample brevity. Use "always_require" in production; see samples/getting_started/tools/function_tool_with_approval.py and samples/getting_started/tools/function_tool_with_approval_and_threads.py. +@tool(approval_mode="never_require") def get_weather( location: Annotated[str, Field(description="The location to get the weather for.")], ) -> str: diff --git a/python/samples/getting_started/agents/azure_openai/azure_responses_client_with_function_tools.py b/python/samples/getting_started/agents/azure_openai/azure_responses_client_with_function_tools.py index 943319a030..1799f88560 100644 --- a/python/samples/getting_started/agents/azure_openai/azure_responses_client_with_function_tools.py +++ b/python/samples/getting_started/agents/azure_openai/azure_responses_client_with_function_tools.py @@ -6,6 +6,7 @@ from typing import Annotated from agent_framework import ChatAgent +from agent_framework import tool from agent_framework.azure import AzureOpenAIResponsesClient from azure.identity import AzureCliCredential from pydantic import Field @@ -17,7 +18,8 @@ showing both agent-level and query-level tool configuration patterns. """ - +# NOTE: approval_mode="never_require" is for sample brevity. Use "always_require" in production; see samples/getting_started/tools/function_tool_with_approval.py and samples/getting_started/tools/function_tool_with_approval_and_threads.py. +@tool(approval_mode="never_require") def get_weather( location: Annotated[str, Field(description="The location to get the weather for.")], ) -> str: @@ -25,7 +27,7 @@ def get_weather( conditions = ["sunny", "cloudy", "rainy", "stormy"] return f"The weather in {location} is {conditions[randint(0, 3)]} with a high of {randint(10, 30)}°C." - +@tool(approval_mode="never_require") def get_time() -> str: """Get the current UTC time.""" current_time = datetime.now(timezone.utc) diff --git a/python/samples/getting_started/agents/azure_openai/azure_responses_client_with_thread.py b/python/samples/getting_started/agents/azure_openai/azure_responses_client_with_thread.py index c73c9bede9..817ac69ef2 100644 --- a/python/samples/getting_started/agents/azure_openai/azure_responses_client_with_thread.py +++ b/python/samples/getting_started/agents/azure_openai/azure_responses_client_with_thread.py @@ -5,6 +5,7 @@ from typing import Annotated from agent_framework import AgentThread, ChatAgent +from agent_framework import tool from agent_framework.azure import AzureOpenAIResponsesClient from azure.identity import AzureCliCredential from pydantic import Field @@ -16,7 +17,8 @@ automatic thread creation with explicit thread management for persistent context. """ - +# NOTE: approval_mode="never_require" is for sample brevity. Use "always_require" in production; see samples/getting_started/tools/function_tool_with_approval.py and samples/getting_started/tools/function_tool_with_approval_and_threads.py. +@tool(approval_mode="never_require") def get_weather( location: Annotated[str, Field(description="The location to get the weather for.")], ) -> str: diff --git a/python/samples/getting_started/agents/custom/custom_agent.py b/python/samples/getting_started/agents/custom/custom_agent.py index 5dc050a1b5..8408f88fd0 100644 --- a/python/samples/getting_started/agents/custom/custom_agent.py +++ b/python/samples/getting_started/agents/custom/custom_agent.py @@ -12,6 +12,7 @@ ChatMessage, Role, TextContent, + tool, ) """ diff --git a/python/samples/getting_started/agents/custom/custom_chat_client.py b/python/samples/getting_started/agents/custom/custom_chat_client.py index 9a4d544dbe..00078d14c3 100644 --- a/python/samples/getting_started/agents/custom/custom_chat_client.py +++ b/python/samples/getting_started/agents/custom/custom_chat_client.py @@ -15,6 +15,7 @@ TextContent, use_chat_middleware, use_function_invocation, + tool, ) from agent_framework._clients import TOptions_co diff --git a/python/samples/getting_started/agents/github_copilot/github_copilot_basic.py b/python/samples/getting_started/agents/github_copilot/github_copilot_basic.py index afa9dbb7d6..117d9fb300 100644 --- a/python/samples/getting_started/agents/github_copilot/github_copilot_basic.py +++ b/python/samples/getting_started/agents/github_copilot/github_copilot_basic.py @@ -19,8 +19,10 @@ from agent_framework.github import GithubCopilotAgent, GithubCopilotOptions from pydantic import Field +from agent_framework import tool - +# NOTE: approval_mode="never_require" is for sample brevity. Use "always_require" in production; see samples/getting_started/tools/function_tool_with_approval.py and samples/getting_started/tools/function_tool_with_approval_and_threads.py. +@tool(approval_mode="never_require") def get_weather( location: Annotated[str, Field(description="The location to get the weather for.")], ) -> str: diff --git a/python/samples/getting_started/agents/github_copilot/github_copilot_with_session.py b/python/samples/getting_started/agents/github_copilot/github_copilot_with_session.py index 2d5f027874..b1762e87c5 100644 --- a/python/samples/getting_started/agents/github_copilot/github_copilot_with_session.py +++ b/python/samples/getting_started/agents/github_copilot/github_copilot_with_session.py @@ -14,8 +14,10 @@ from agent_framework.github import GithubCopilotAgent, GithubCopilotOptions from pydantic import Field +from agent_framework import tool - +# NOTE: approval_mode="never_require" is for sample brevity. Use "always_require" in production; see samples/getting_started/tools/function_tool_with_approval.py and samples/getting_started/tools/function_tool_with_approval_and_threads.py. +@tool(approval_mode="never_require") def get_weather( location: Annotated[str, Field(description="The location to get the weather for.")], ) -> str: diff --git a/python/samples/getting_started/agents/ollama/ollama_agent_basic.py b/python/samples/getting_started/agents/ollama/ollama_agent_basic.py index 0a89d04a85..afe6700083 100644 --- a/python/samples/getting_started/agents/ollama/ollama_agent_basic.py +++ b/python/samples/getting_started/agents/ollama/ollama_agent_basic.py @@ -4,6 +4,7 @@ from datetime import datetime from agent_framework.ollama import OllamaChatClient +from agent_framework import tool """ Ollama Agent Basic Example @@ -17,7 +18,8 @@ """ - +# NOTE: approval_mode="never_require" is for sample brevity. Use "always_require" in production; see samples/getting_started/tools/function_tool_with_approval.py and samples/getting_started/tools/function_tool_with_approval_and_threads.py. +@tool(approval_mode="never_require") def get_time(location: str) -> str: """Get the current time.""" return f"The current time in {location} is {datetime.now().strftime('%I:%M %p')}." diff --git a/python/samples/getting_started/agents/ollama/ollama_chat_client.py b/python/samples/getting_started/agents/ollama/ollama_chat_client.py index 336a79c721..d22fd737f7 100644 --- a/python/samples/getting_started/agents/ollama/ollama_chat_client.py +++ b/python/samples/getting_started/agents/ollama/ollama_chat_client.py @@ -4,6 +4,7 @@ from datetime import datetime from agent_framework.ollama import OllamaChatClient +from agent_framework import tool """ Ollama Chat Client Example @@ -17,7 +18,8 @@ """ - +# NOTE: approval_mode="never_require" is for sample brevity. Use "always_require" in production; see samples/getting_started/tools/function_tool_with_approval.py and samples/getting_started/tools/function_tool_with_approval_and_threads.py. +@tool(approval_mode="never_require") def get_time(): """Get the current time.""" return f"The current time is {datetime.now().strftime('%I:%M %p')}." diff --git a/python/samples/getting_started/agents/ollama/ollama_with_openai_chat_client.py b/python/samples/getting_started/agents/ollama/ollama_with_openai_chat_client.py index 48d9bca154..47f58cd6e7 100644 --- a/python/samples/getting_started/agents/ollama/ollama_with_openai_chat_client.py +++ b/python/samples/getting_started/agents/ollama/ollama_with_openai_chat_client.py @@ -6,6 +6,7 @@ from typing import Annotated from agent_framework.openai import OpenAIChatClient +from agent_framework import tool """ Ollama with OpenAI Chat Client Example @@ -19,7 +20,8 @@ - OLLAMA_MODEL: The model name to use (e.g., "mistral", "llama3.2", "phi3") """ - +# NOTE: approval_mode="never_require" is for sample brevity. Use "always_require" in production; see samples/getting_started/tools/function_tool_with_approval.py and samples/getting_started/tools/function_tool_with_approval_and_threads.py. +@tool(approval_mode="never_require") def get_weather( location: Annotated[str, "The location to get the weather for."], ) -> str: diff --git a/python/samples/getting_started/agents/openai/openai_assistants_basic.py b/python/samples/getting_started/agents/openai/openai_assistants_basic.py index 4dee6f4672..bf52405218 100644 --- a/python/samples/getting_started/agents/openai/openai_assistants_basic.py +++ b/python/samples/getting_started/agents/openai/openai_assistants_basic.py @@ -8,6 +8,7 @@ from agent_framework.openai import OpenAIAssistantProvider from openai import AsyncOpenAI from pydantic import Field +from agent_framework import tool """ OpenAI Assistants Basic Example @@ -16,7 +17,8 @@ assistant lifecycle management, showing both streaming and non-streaming responses. """ - +# NOTE: approval_mode="never_require" is for sample brevity. Use "always_require" in production; see samples/getting_started/tools/function_tool_with_approval.py and samples/getting_started/tools/function_tool_with_approval_and_threads.py. +@tool(approval_mode="never_require") def get_weather( location: Annotated[str, Field(description="The location to get the weather for.")], ) -> str: diff --git a/python/samples/getting_started/agents/openai/openai_assistants_provider_methods.py b/python/samples/getting_started/agents/openai/openai_assistants_provider_methods.py index ca7133cc3d..55e1110075 100644 --- a/python/samples/getting_started/agents/openai/openai_assistants_provider_methods.py +++ b/python/samples/getting_started/agents/openai/openai_assistants_provider_methods.py @@ -8,6 +8,7 @@ from agent_framework.openai import OpenAIAssistantProvider from openai import AsyncOpenAI from pydantic import Field +from agent_framework import tool """ OpenAI Assistant Provider Methods Example @@ -18,7 +19,8 @@ - as_agent(): Wrap an SDK Assistant object without making HTTP calls """ - +# NOTE: approval_mode="never_require" is for sample brevity. Use "always_require" in production; see samples/getting_started/tools/function_tool_with_approval.py and samples/getting_started/tools/function_tool_with_approval_and_threads.py. +@tool(approval_mode="never_require") def get_weather( location: Annotated[str, Field(description="The location to get the weather for.")], ) -> str: diff --git a/python/samples/getting_started/agents/openai/openai_assistants_with_existing_assistant.py b/python/samples/getting_started/agents/openai/openai_assistants_with_existing_assistant.py index a0e9497d3e..827d8c412c 100644 --- a/python/samples/getting_started/agents/openai/openai_assistants_with_existing_assistant.py +++ b/python/samples/getting_started/agents/openai/openai_assistants_with_existing_assistant.py @@ -8,6 +8,7 @@ from agent_framework.openai import OpenAIAssistantProvider from openai import AsyncOpenAI from pydantic import Field +from agent_framework import tool """ OpenAI Assistants with Existing Assistant Example @@ -16,7 +17,8 @@ using the provider's get_agent() and as_agent() methods. """ - +# NOTE: approval_mode="never_require" is for sample brevity. Use "always_require" in production; see samples/getting_started/tools/function_tool_with_approval.py and samples/getting_started/tools/function_tool_with_approval_and_threads.py. +@tool(approval_mode="never_require") def get_weather( location: Annotated[str, Field(description="The location to get the weather for.")], ) -> str: diff --git a/python/samples/getting_started/agents/openai/openai_assistants_with_explicit_settings.py b/python/samples/getting_started/agents/openai/openai_assistants_with_explicit_settings.py index af99a0a8f9..53afefa5e9 100644 --- a/python/samples/getting_started/agents/openai/openai_assistants_with_explicit_settings.py +++ b/python/samples/getting_started/agents/openai/openai_assistants_with_explicit_settings.py @@ -8,6 +8,7 @@ from agent_framework.openai import OpenAIAssistantProvider from openai import AsyncOpenAI from pydantic import Field +from agent_framework import tool """ OpenAI Assistants with Explicit Settings Example @@ -16,7 +17,8 @@ settings rather than relying on environment variable defaults. """ - +# NOTE: approval_mode="never_require" is for sample brevity. Use "always_require" in production; see samples/getting_started/tools/function_tool_with_approval.py and samples/getting_started/tools/function_tool_with_approval_and_threads.py. +@tool(approval_mode="never_require") def get_weather( location: Annotated[str, Field(description="The location to get the weather for.")], ) -> str: diff --git a/python/samples/getting_started/agents/openai/openai_assistants_with_function_tools.py b/python/samples/getting_started/agents/openai/openai_assistants_with_function_tools.py index 2e3e3f0b07..bf75affc55 100644 --- a/python/samples/getting_started/agents/openai/openai_assistants_with_function_tools.py +++ b/python/samples/getting_started/agents/openai/openai_assistants_with_function_tools.py @@ -9,6 +9,7 @@ from agent_framework.openai import OpenAIAssistantProvider from openai import AsyncOpenAI from pydantic import Field +from agent_framework import tool """ OpenAI Assistants with Function Tools Example @@ -17,7 +18,8 @@ showing both agent-level and query-level tool configuration patterns. """ - +# NOTE: approval_mode="never_require" is for sample brevity. Use "always_require" in production; see samples/getting_started/tools/function_tool_with_approval.py and samples/getting_started/tools/function_tool_with_approval_and_threads.py. +@tool(approval_mode="never_require") def get_weather( location: Annotated[str, Field(description="The location to get the weather for.")], ) -> str: @@ -25,7 +27,7 @@ def get_weather( conditions = ["sunny", "cloudy", "rainy", "stormy"] return f"The weather in {location} is {conditions[randint(0, 3)]} with a high of {randint(10, 30)}C." - +@tool(approval_mode="never_require") def get_time() -> str: """Get the current UTC time.""" current_time = datetime.now(timezone.utc) diff --git a/python/samples/getting_started/agents/openai/openai_assistants_with_thread.py b/python/samples/getting_started/agents/openai/openai_assistants_with_thread.py index 7adb4c61cd..d3b167ebdd 100644 --- a/python/samples/getting_started/agents/openai/openai_assistants_with_thread.py +++ b/python/samples/getting_started/agents/openai/openai_assistants_with_thread.py @@ -6,6 +6,7 @@ from typing import Annotated from agent_framework import AgentThread +from agent_framework import tool from agent_framework.openai import OpenAIAssistantProvider from openai import AsyncOpenAI from pydantic import Field @@ -17,7 +18,8 @@ persistent conversation threads and context preservation across interactions. """ - +# NOTE: approval_mode="never_require" is for sample brevity. Use "always_require" in production; see samples/getting_started/tools/function_tool_with_approval.py and samples/getting_started/tools/function_tool_with_approval_and_threads.py. +@tool(approval_mode="never_require") def get_weather( location: Annotated[str, Field(description="The location to get the weather for.")], ) -> str: diff --git a/python/samples/getting_started/agents/openai/openai_chat_client_basic.py b/python/samples/getting_started/agents/openai/openai_chat_client_basic.py index 8e4e29f007..6c1a94760d 100644 --- a/python/samples/getting_started/agents/openai/openai_chat_client_basic.py +++ b/python/samples/getting_started/agents/openai/openai_chat_client_basic.py @@ -5,6 +5,7 @@ from typing import Annotated from agent_framework.openai import OpenAIChatClient +from agent_framework import tool """ OpenAI Chat Client Basic Example @@ -13,7 +14,8 @@ interactions, showing both streaming and non-streaming responses. """ - +# NOTE: approval_mode="never_require" is for sample brevity. Use "always_require" in production; see samples/getting_started/tools/function_tool_with_approval.py and samples/getting_started/tools/function_tool_with_approval_and_threads.py. +@tool(approval_mode="never_require") def get_weather( location: Annotated[str, "The location to get the weather for."], ) -> str: diff --git a/python/samples/getting_started/agents/openai/openai_chat_client_with_explicit_settings.py b/python/samples/getting_started/agents/openai/openai_chat_client_with_explicit_settings.py index 0497ca4d89..1302841ecf 100644 --- a/python/samples/getting_started/agents/openai/openai_chat_client_with_explicit_settings.py +++ b/python/samples/getting_started/agents/openai/openai_chat_client_with_explicit_settings.py @@ -7,6 +7,7 @@ from agent_framework.openai import OpenAIChatClient from pydantic import Field +from agent_framework import tool """ OpenAI Chat Client with Explicit Settings Example @@ -15,7 +16,8 @@ settings rather than relying on environment variable defaults. """ - +# NOTE: approval_mode="never_require" is for sample brevity. Use "always_require" in production; see samples/getting_started/tools/function_tool_with_approval.py and samples/getting_started/tools/function_tool_with_approval_and_threads.py. +@tool(approval_mode="never_require") def get_weather( location: Annotated[str, Field(description="The location to get the weather for.")], ) -> str: diff --git a/python/samples/getting_started/agents/openai/openai_chat_client_with_function_tools.py b/python/samples/getting_started/agents/openai/openai_chat_client_with_function_tools.py index fdc6f896e4..3fa7fd9e8a 100644 --- a/python/samples/getting_started/agents/openai/openai_chat_client_with_function_tools.py +++ b/python/samples/getting_started/agents/openai/openai_chat_client_with_function_tools.py @@ -6,6 +6,7 @@ from typing import Annotated from agent_framework import ChatAgent +from agent_framework import tool from agent_framework.openai import OpenAIChatClient from pydantic import Field @@ -16,7 +17,8 @@ showing both agent-level and query-level tool configuration patterns. """ - +# NOTE: approval_mode="never_require" is for sample brevity. Use "always_require" in production; see samples/getting_started/tools/function_tool_with_approval.py and samples/getting_started/tools/function_tool_with_approval_and_threads.py. +@tool(approval_mode="never_require") def get_weather( location: Annotated[str, Field(description="The location to get the weather for.")], ) -> str: @@ -24,7 +26,7 @@ def get_weather( conditions = ["sunny", "cloudy", "rainy", "stormy"] return f"The weather in {location} is {conditions[randint(0, 3)]} with a high of {randint(10, 30)}°C." - +@tool(approval_mode="never_require") def get_time() -> str: """Get the current UTC time.""" current_time = datetime.now(timezone.utc) diff --git a/python/samples/getting_started/agents/openai/openai_chat_client_with_thread.py b/python/samples/getting_started/agents/openai/openai_chat_client_with_thread.py index 262630c6a0..0c6595ca16 100644 --- a/python/samples/getting_started/agents/openai/openai_chat_client_with_thread.py +++ b/python/samples/getting_started/agents/openai/openai_chat_client_with_thread.py @@ -5,6 +5,7 @@ from typing import Annotated from agent_framework import AgentThread, ChatAgent, ChatMessageStore +from agent_framework import tool from agent_framework.openai import OpenAIChatClient from pydantic import Field @@ -15,7 +16,8 @@ conversation threads and message history preservation across interactions. """ - +# NOTE: approval_mode="never_require" is for sample brevity. Use "always_require" in production; see samples/getting_started/tools/function_tool_with_approval.py and samples/getting_started/tools/function_tool_with_approval_and_threads.py. +@tool(approval_mode="never_require") def get_weather( location: Annotated[str, Field(description="The location to get the weather for.")], ) -> str: diff --git a/python/samples/getting_started/agents/openai/openai_responses_client_basic.py b/python/samples/getting_started/agents/openai/openai_responses_client_basic.py index adf7378125..c09a4c816a 100644 --- a/python/samples/getting_started/agents/openai/openai_responses_client_basic.py +++ b/python/samples/getting_started/agents/openai/openai_responses_client_basic.py @@ -5,6 +5,7 @@ from typing import Annotated from agent_framework import ChatAgent +from agent_framework import tool from agent_framework.openai import OpenAIResponsesClient from pydantic import Field @@ -15,7 +16,8 @@ response generation, showing both streaming and non-streaming responses. """ - +# NOTE: approval_mode="never_require" is for sample brevity. Use "always_require" in production; see samples/getting_started/tools/function_tool_with_approval.py and samples/getting_started/tools/function_tool_with_approval_and_threads.py. +@tool(approval_mode="never_require") def get_weather( location: Annotated[str, Field(description="The location to get the weather for.")], ) -> str: diff --git a/python/samples/getting_started/agents/openai/openai_responses_client_with_code_interpreter.py b/python/samples/getting_started/agents/openai/openai_responses_client_with_code_interpreter.py index 8f55bdfbf9..5e8e9565ac 100644 --- a/python/samples/getting_started/agents/openai/openai_responses_client_with_code_interpreter.py +++ b/python/samples/getting_started/agents/openai/openai_responses_client_with_code_interpreter.py @@ -8,6 +8,7 @@ CodeInterpreterToolResultContent, HostedCodeInterpreterTool, TextContent, + tool, ) from agent_framework.openai import OpenAIResponsesClient diff --git a/python/samples/getting_started/agents/openai/openai_responses_client_with_explicit_settings.py b/python/samples/getting_started/agents/openai/openai_responses_client_with_explicit_settings.py index ed541dd0ff..fa5583f296 100644 --- a/python/samples/getting_started/agents/openai/openai_responses_client_with_explicit_settings.py +++ b/python/samples/getting_started/agents/openai/openai_responses_client_with_explicit_settings.py @@ -7,6 +7,7 @@ from agent_framework.openai import OpenAIResponsesClient from pydantic import Field +from agent_framework import tool """ OpenAI Responses Client with Explicit Settings Example @@ -15,7 +16,8 @@ settings rather than relying on environment variable defaults. """ - +# NOTE: approval_mode="never_require" is for sample brevity. Use "always_require" in production; see samples/getting_started/tools/function_tool_with_approval.py and samples/getting_started/tools/function_tool_with_approval_and_threads.py. +@tool(approval_mode="never_require") def get_weather( location: Annotated[str, Field(description="The location to get the weather for.")], ) -> str: diff --git a/python/samples/getting_started/agents/openai/openai_responses_client_with_function_tools.py b/python/samples/getting_started/agents/openai/openai_responses_client_with_function_tools.py index b074214ef5..d18a522406 100644 --- a/python/samples/getting_started/agents/openai/openai_responses_client_with_function_tools.py +++ b/python/samples/getting_started/agents/openai/openai_responses_client_with_function_tools.py @@ -6,6 +6,7 @@ from typing import Annotated from agent_framework import ChatAgent +from agent_framework import tool from agent_framework.openai import OpenAIResponsesClient from pydantic import Field @@ -16,7 +17,8 @@ showing both agent-level and query-level tool configuration patterns. """ - +# NOTE: approval_mode="never_require" is for sample brevity. Use "always_require" in production; see samples/getting_started/tools/function_tool_with_approval.py and samples/getting_started/tools/function_tool_with_approval_and_threads.py. +@tool(approval_mode="never_require") def get_weather( location: Annotated[str, Field(description="The location to get the weather for.")], ) -> str: @@ -24,7 +26,7 @@ def get_weather( conditions = ["sunny", "cloudy", "rainy", "stormy"] return f"The weather in {location} is {conditions[randint(0, 3)]} with a high of {randint(10, 30)}°C." - +@tool(approval_mode="never_require") def get_time() -> str: """Get the current UTC time.""" current_time = datetime.now(timezone.utc) diff --git a/python/samples/getting_started/agents/openai/openai_responses_client_with_thread.py b/python/samples/getting_started/agents/openai/openai_responses_client_with_thread.py index ca52b4f074..6a7fc71efc 100644 --- a/python/samples/getting_started/agents/openai/openai_responses_client_with_thread.py +++ b/python/samples/getting_started/agents/openai/openai_responses_client_with_thread.py @@ -5,6 +5,7 @@ from typing import Annotated from agent_framework import AgentThread, ChatAgent +from agent_framework import tool from agent_framework.openai import OpenAIResponsesClient from pydantic import Field @@ -15,7 +16,8 @@ persistent conversation context and simplified response handling. """ - +# NOTE: approval_mode="never_require" is for sample brevity. Use "always_require" in production; see samples/getting_started/tools/function_tool_with_approval.py and samples/getting_started/tools/function_tool_with_approval_and_threads.py. +@tool(approval_mode="never_require") def get_weather( location: Annotated[str, Field(description="The location to get the weather for.")], ) -> str: diff --git a/python/samples/getting_started/azure_functions/02_multi_agent/function_app.py b/python/samples/getting_started/azure_functions/02_multi_agent/function_app.py index 2ebbc75caa..f7181cb4b1 100644 --- a/python/samples/getting_started/azure_functions/02_multi_agent/function_app.py +++ b/python/samples/getting_started/azure_functions/02_multi_agent/function_app.py @@ -13,10 +13,12 @@ from agent_framework.azure import AgentFunctionApp, AzureOpenAIChatClient from azure.identity import AzureCliCredential +from agent_framework import tool logger = logging.getLogger(__name__) - +# NOTE: approval_mode="never_require" is for sample brevity. Use "always_require" in production; see samples/getting_started/tools/function_tool_with_approval.py and samples/getting_started/tools/function_tool_with_approval_and_threads.py. +@tool(approval_mode="never_require") def get_weather(location: str) -> dict[str, Any]: """Get current weather for a location.""" @@ -30,7 +32,7 @@ def get_weather(location: str) -> dict[str, Any]: logger.info(f"✓ [TOOL RESULT] {result}") return result - +@tool(approval_mode="never_require") def calculate_tip(bill_amount: float, tip_percentage: float = 15.0) -> dict[str, Any]: """Calculate tip amount and total bill.""" diff --git a/python/samples/getting_started/chat_client/azure_ai_chat_client.py b/python/samples/getting_started/chat_client/azure_ai_chat_client.py index 22b1324565..ab502b8f35 100644 --- a/python/samples/getting_started/chat_client/azure_ai_chat_client.py +++ b/python/samples/getting_started/chat_client/azure_ai_chat_client.py @@ -7,6 +7,7 @@ from agent_framework.azure import AzureAIAgentClient from azure.identity.aio import AzureCliCredential from pydantic import Field +from agent_framework import tool """ Azure AI Chat Client Direct Usage Example @@ -15,7 +16,8 @@ Shows function calling capabilities with custom business logic. """ - +# NOTE: approval_mode="never_require" is for sample brevity. Use "always_require" in production; see samples/getting_started/tools/function_tool_with_approval.py and samples/getting_started/tools/function_tool_with_approval_and_threads.py. +@tool(approval_mode="never_require") def get_weather( location: Annotated[str, Field(description="The location to get the weather for.")], ) -> str: diff --git a/python/samples/getting_started/chat_client/azure_assistants_client.py b/python/samples/getting_started/chat_client/azure_assistants_client.py index 7682bc1f90..1a40696bd5 100644 --- a/python/samples/getting_started/chat_client/azure_assistants_client.py +++ b/python/samples/getting_started/chat_client/azure_assistants_client.py @@ -7,6 +7,7 @@ from agent_framework.azure import AzureOpenAIAssistantsClient from azure.identity import AzureCliCredential from pydantic import Field +from agent_framework import tool """ Azure Assistants Client Direct Usage Example @@ -15,7 +16,8 @@ Shows function calling capabilities and automatic assistant creation. """ - +# NOTE: approval_mode="never_require" is for sample brevity. Use "always_require" in production; see samples/getting_started/tools/function_tool_with_approval.py and samples/getting_started/tools/function_tool_with_approval_and_threads.py. +@tool(approval_mode="never_require") def get_weather( location: Annotated[str, Field(description="The location to get the weather for.")], ) -> str: diff --git a/python/samples/getting_started/chat_client/azure_chat_client.py b/python/samples/getting_started/chat_client/azure_chat_client.py index cec17e59e9..211fc6d869 100644 --- a/python/samples/getting_started/chat_client/azure_chat_client.py +++ b/python/samples/getting_started/chat_client/azure_chat_client.py @@ -7,6 +7,7 @@ from agent_framework.azure import AzureOpenAIChatClient from azure.identity import AzureCliCredential from pydantic import Field +from agent_framework import tool """ Azure Chat Client Direct Usage Example @@ -15,7 +16,8 @@ Shows function calling capabilities with custom business logic. """ - +# NOTE: approval_mode="never_require" is for sample brevity. Use "always_require" in production; see samples/getting_started/tools/function_tool_with_approval.py and samples/getting_started/tools/function_tool_with_approval_and_threads.py. +@tool(approval_mode="never_require") def get_weather( location: Annotated[str, Field(description="The location to get the weather for.")], ) -> str: diff --git a/python/samples/getting_started/chat_client/azure_responses_client.py b/python/samples/getting_started/chat_client/azure_responses_client.py index 756b295d7e..050225e559 100644 --- a/python/samples/getting_started/chat_client/azure_responses_client.py +++ b/python/samples/getting_started/chat_client/azure_responses_client.py @@ -5,6 +5,7 @@ from typing import Annotated from agent_framework import ChatResponse +from agent_framework import tool from agent_framework.azure import AzureOpenAIResponsesClient from azure.identity import AzureCliCredential from pydantic import BaseModel, Field @@ -16,7 +17,8 @@ Shows function calling capabilities with custom business logic. """ - +# NOTE: approval_mode="never_require" is for sample brevity. Use "always_require" in production; see samples/getting_started/tools/function_tool_with_approval.py and samples/getting_started/tools/function_tool_with_approval_and_threads.py. +@tool(approval_mode="never_require") def get_weather( location: Annotated[str, Field(description="The location to get the weather for.")], ) -> str: diff --git a/python/samples/getting_started/chat_client/openai_assistants_client.py b/python/samples/getting_started/chat_client/openai_assistants_client.py index bd3075cd90..b4dc03ea71 100644 --- a/python/samples/getting_started/chat_client/openai_assistants_client.py +++ b/python/samples/getting_started/chat_client/openai_assistants_client.py @@ -6,6 +6,7 @@ from agent_framework.openai import OpenAIAssistantsClient from pydantic import Field +from agent_framework import tool """ OpenAI Assistants Client Direct Usage Example @@ -15,7 +16,8 @@ """ - +# NOTE: approval_mode="never_require" is for sample brevity. Use "always_require" in production; see samples/getting_started/tools/function_tool_with_approval.py and samples/getting_started/tools/function_tool_with_approval_and_threads.py. +@tool(approval_mode="never_require") def get_weather( location: Annotated[str, Field(description="The location to get the weather for.")], ) -> str: diff --git a/python/samples/getting_started/chat_client/openai_chat_client.py b/python/samples/getting_started/chat_client/openai_chat_client.py index 1a18fc24b8..f45f17d71f 100644 --- a/python/samples/getting_started/chat_client/openai_chat_client.py +++ b/python/samples/getting_started/chat_client/openai_chat_client.py @@ -6,6 +6,7 @@ from agent_framework.openai import OpenAIChatClient from pydantic import Field +from agent_framework import tool """ OpenAI Chat Client Direct Usage Example @@ -15,7 +16,8 @@ """ - +# NOTE: approval_mode="never_require" is for sample brevity. Use "always_require" in production; see samples/getting_started/tools/function_tool_with_approval.py and samples/getting_started/tools/function_tool_with_approval_and_threads.py. +@tool(approval_mode="never_require") def get_weather( location: Annotated[str, Field(description="The location to get the weather for.")], ) -> str: diff --git a/python/samples/getting_started/chat_client/openai_responses_client.py b/python/samples/getting_started/chat_client/openai_responses_client.py index c626f530f3..2c5f3953e9 100644 --- a/python/samples/getting_started/chat_client/openai_responses_client.py +++ b/python/samples/getting_started/chat_client/openai_responses_client.py @@ -6,6 +6,7 @@ from agent_framework.openai import OpenAIResponsesClient from pydantic import Field +from agent_framework import tool """ OpenAI Responses Client Direct Usage Example @@ -15,7 +16,8 @@ """ - +# NOTE: approval_mode="never_require" is for sample brevity. Use "always_require" in production; see samples/getting_started/tools/function_tool_with_approval.py and samples/getting_started/tools/function_tool_with_approval_and_threads.py. +@tool(approval_mode="never_require") def get_weather( location: Annotated[str, Field(description="The location to get the weather for.")], ) -> str: diff --git a/python/samples/getting_started/context_providers/mem0/mem0_basic.py b/python/samples/getting_started/context_providers/mem0/mem0_basic.py index e754d16821..a163b000e8 100644 --- a/python/samples/getting_started/context_providers/mem0/mem0_basic.py +++ b/python/samples/getting_started/context_providers/mem0/mem0_basic.py @@ -6,8 +6,10 @@ from agent_framework.azure import AzureAIAgentClient from agent_framework.mem0 import Mem0Provider from azure.identity.aio import AzureCliCredential +from agent_framework import tool - +# NOTE: approval_mode="never_require" is for sample brevity. Use "always_require" in production; see samples/getting_started/tools/function_tool_with_approval.py and samples/getting_started/tools/function_tool_with_approval_and_threads.py. +@tool(approval_mode="never_require") def retrieve_company_report(company_code: str, detailed: bool) -> str: if company_code != "CNTS": raise ValueError("Company code not found") diff --git a/python/samples/getting_started/context_providers/mem0/mem0_oss.py b/python/samples/getting_started/context_providers/mem0/mem0_oss.py index 03750b0d02..1f5591c004 100644 --- a/python/samples/getting_started/context_providers/mem0/mem0_oss.py +++ b/python/samples/getting_started/context_providers/mem0/mem0_oss.py @@ -7,8 +7,10 @@ from agent_framework.mem0 import Mem0Provider from azure.identity.aio import AzureCliCredential from mem0 import AsyncMemory +from agent_framework import tool - +# NOTE: approval_mode="never_require" is for sample brevity. Use "always_require" in production; see samples/getting_started/tools/function_tool_with_approval.py and samples/getting_started/tools/function_tool_with_approval_and_threads.py. +@tool(approval_mode="never_require") def retrieve_company_report(company_code: str, detailed: bool) -> str: if company_code != "CNTS": raise ValueError("Company code not found") diff --git a/python/samples/getting_started/context_providers/mem0/mem0_threads.py b/python/samples/getting_started/context_providers/mem0/mem0_threads.py index c331666058..ea0375495a 100644 --- a/python/samples/getting_started/context_providers/mem0/mem0_threads.py +++ b/python/samples/getting_started/context_providers/mem0/mem0_threads.py @@ -6,8 +6,10 @@ from agent_framework.azure import AzureAIAgentClient from agent_framework.mem0 import Mem0Provider from azure.identity.aio import AzureCliCredential +from agent_framework import tool - +# NOTE: approval_mode="never_require" is for sample brevity. Use "always_require" in production; see samples/getting_started/tools/function_tool_with_approval.py and samples/getting_started/tools/function_tool_with_approval_and_threads.py. +@tool(approval_mode="never_require") def get_user_preferences(user_id: str) -> str: """Mock function to get user preferences.""" preferences = { diff --git a/python/samples/getting_started/context_providers/redis/redis_basics.py b/python/samples/getting_started/context_providers/redis/redis_basics.py index 043af246c2..afbe835cc0 100644 --- a/python/samples/getting_started/context_providers/redis/redis_basics.py +++ b/python/samples/getting_started/context_providers/redis/redis_basics.py @@ -31,12 +31,14 @@ import os from agent_framework import ChatMessage, Role +from agent_framework import tool from agent_framework.openai import OpenAIChatClient from agent_framework_redis._provider import RedisProvider from redisvl.extensions.cache.embeddings import EmbeddingsCache from redisvl.utils.vectorize import OpenAITextVectorizer - +# NOTE: approval_mode="never_require" is for sample brevity. Use "always_require" in production; see samples/getting_started/tools/function_tool_with_approval.py and samples/getting_started/tools/function_tool_with_approval_and_threads.py. +@tool(approval_mode="never_require") def search_flights(origin_airport_code: str, destination_airport_code: str, detailed: bool = False) -> str: """Simulated flight-search tool to demonstrate tool memory. diff --git a/python/samples/getting_started/devui/azure_responses_agent/agent.py b/python/samples/getting_started/devui/azure_responses_agent/agent.py index a2a8dbf054..b2fbe9c995 100644 --- a/python/samples/getting_started/devui/azure_responses_agent/agent.py +++ b/python/samples/getting_started/devui/azure_responses_agent/agent.py @@ -21,7 +21,7 @@ import os from typing import Annotated -from agent_framework import ChatAgent, ai_function +from agent_framework import ChatAgent, tool from agent_framework.azure import AzureOpenAIResponsesClient logger = logging.getLogger(__name__) @@ -50,7 +50,8 @@ def analyze_content( return f"Analyzing content for: {query}" -@ai_function +# NOTE: approval_mode="never_require" is for sample brevity. Use "always_require" in production; see samples/getting_started/tools/function_tool_with_approval.py and samples/getting_started/tools/function_tool_with_approval_and_threads.py. +@tool(approval_mode="never_require") def summarize_document( length: Annotated[str, "Desired summary length: 'brief', 'medium', or 'detailed'"] = "medium", ) -> str: @@ -58,7 +59,7 @@ def summarize_document( return f"Generating {length} summary of the document..." -@ai_function +@tool(approval_mode="never_require") def extract_key_points( max_points: Annotated[int, "Maximum number of key points to extract"] = 5, ) -> str: diff --git a/python/samples/getting_started/devui/fanout_workflow/workflow.py b/python/samples/getting_started/devui/fanout_workflow/workflow.py index fa9d4edd92..bb84c28db7 100644 --- a/python/samples/getting_started/devui/fanout_workflow/workflow.py +++ b/python/samples/getting_started/devui/fanout_workflow/workflow.py @@ -25,6 +25,7 @@ WorkflowBuilder, WorkflowContext, handler, + tool, ) from pydantic import BaseModel, Field from typing_extensions import Never diff --git a/python/samples/getting_started/devui/foundry_agent/agent.py b/python/samples/getting_started/devui/foundry_agent/agent.py index 58f5fd41f5..9091bb2d7e 100644 --- a/python/samples/getting_started/devui/foundry_agent/agent.py +++ b/python/samples/getting_started/devui/foundry_agent/agent.py @@ -9,11 +9,13 @@ from typing import Annotated from agent_framework import ChatAgent +from agent_framework import tool from agent_framework.azure import AzureAIAgentClient from azure.identity.aio import AzureCliCredential from pydantic import Field - +# NOTE: approval_mode="never_require" is for sample brevity. Use "always_require" in production; see samples/getting_started/tools/function_tool_with_approval.py and samples/getting_started/tools/function_tool_with_approval_and_threads.py. +@tool(approval_mode="never_require") def get_weather( location: Annotated[str, Field(description="The location to get the weather for.")], ) -> str: @@ -22,7 +24,7 @@ def get_weather( temperature = 22 return f"The weather in {location} is {conditions[0]} with a high of {temperature}°C." - +@tool(approval_mode="never_require") def get_forecast( location: Annotated[str, Field(description="The location to get the forecast for.")], days: Annotated[int, Field(description="Number of days for forecast")] = 3, diff --git a/python/samples/getting_started/devui/in_memory_mode.py b/python/samples/getting_started/devui/in_memory_mode.py index 12bb63864c..597f9babf3 100644 --- a/python/samples/getting_started/devui/in_memory_mode.py +++ b/python/samples/getting_started/devui/in_memory_mode.py @@ -11,12 +11,16 @@ from typing import Annotated from agent_framework import ChatAgent, Executor, WorkflowBuilder, WorkflowContext, handler +from agent_framework import tool from agent_framework.azure import AzureOpenAIChatClient from agent_framework.devui import serve from typing_extensions import Never +# NOTE: approval_mode="never_require" is for sample brevity. Use "always_require" in production; see samples/getting_started/tools/function_tool_with_approval.py and samples/getting_started/tools/function_tool_with_approval_and_threads.py. +@tool(approval_mode="never_require") # Tool functions for the agent +@tool(approval_mode="never_require") def get_weather( location: Annotated[str, "The location to get the weather for."], ) -> str: @@ -25,7 +29,7 @@ def get_weather( temperature = 53 return f"The weather in {location} is {conditions[0]} with a high of {temperature}°C." - +@tool(approval_mode="never_require") def get_time( timezone: Annotated[str, "The timezone to get time for."] = "UTC", ) -> str: diff --git a/python/samples/getting_started/devui/spam_workflow/workflow.py b/python/samples/getting_started/devui/spam_workflow/workflow.py index 73be349cc6..54cf6265ca 100644 --- a/python/samples/getting_started/devui/spam_workflow/workflow.py +++ b/python/samples/getting_started/devui/spam_workflow/workflow.py @@ -27,6 +27,7 @@ WorkflowContext, handler, response_handler, + tool, ) from pydantic import BaseModel, Field from typing_extensions import Never diff --git a/python/samples/getting_started/devui/weather_agent_azure/agent.py b/python/samples/getting_started/devui/weather_agent_azure/agent.py index 4616b4971a..56ba546135 100644 --- a/python/samples/getting_started/devui/weather_agent_azure/agent.py +++ b/python/samples/getting_started/devui/weather_agent_azure/agent.py @@ -15,7 +15,7 @@ FunctionInvocationContext, Role, TextContent, - ai_function, + tool, chat_middleware, function_middleware, ) @@ -98,6 +98,8 @@ async def atlantis_location_filter_middleware( await next(context) +# NOTE: approval_mode="never_require" is for sample brevity. Use "always_require" in production; see samples/getting_started/tools/function_tool_with_approval.py and samples/getting_started/tools/function_tool_with_approval_and_threads.py. +@tool(approval_mode="never_require") def get_weather( location: Annotated[str, "The location to get the weather for."], ) -> str: @@ -107,6 +109,7 @@ def get_weather( return f"The weather in {location} is {conditions[0]} with a high of {temperature}°C." +@tool(approval_mode="never_require") def get_forecast( location: Annotated[str, "The location to get the forecast for."], days: Annotated[int, "Number of days for forecast"] = 3, @@ -123,7 +126,7 @@ def get_forecast( return f"Weather forecast for {location}:\n" + "\n".join(forecast) -@ai_function(approval_mode="always_require") +@tool(approval_mode="always_require") def send_email( recipient: Annotated[str, "The email address of the recipient."], subject: Annotated[str, "The subject of the email."], diff --git a/python/samples/getting_started/mcp/agent_as_mcp_server.py b/python/samples/getting_started/mcp/agent_as_mcp_server.py index 5efa6885ca..4171bf41bc 100644 --- a/python/samples/getting_started/mcp/agent_as_mcp_server.py +++ b/python/samples/getting_started/mcp/agent_as_mcp_server.py @@ -4,6 +4,7 @@ import anyio from agent_framework.openai import OpenAIResponsesClient +from agent_framework import tool """ This sample demonstrates how to expose an Agent as an MCP server. @@ -30,7 +31,8 @@ ``` """ - +# NOTE: approval_mode="never_require" is for sample brevity. Use "always_require" in production; see samples/getting_started/tools/function_tool_with_approval.py and samples/getting_started/tools/function_tool_with_approval_and_threads.py. +@tool(approval_mode="never_require") def get_specials() -> Annotated[str, "Returns the specials from the menu."]: return """ Special Soup: Clam Chowder @@ -38,7 +40,7 @@ def get_specials() -> Annotated[str, "Returns the specials from the menu."]: Special Drink: Chai Tea """ - +@tool(approval_mode="never_require") def get_item_price( menu_item: Annotated[str, "The name of the menu item."], ) -> Annotated[str, "Returns the price of the menu item."]: diff --git a/python/samples/getting_started/middleware/agent_and_run_level_middleware.py b/python/samples/getting_started/middleware/agent_and_run_level_middleware.py index a77fa43f34..3732a8fbc2 100644 --- a/python/samples/getting_started/middleware/agent_and_run_level_middleware.py +++ b/python/samples/getting_started/middleware/agent_and_run_level_middleware.py @@ -11,6 +11,7 @@ AgentResponse, AgentRunContext, FunctionInvocationContext, + tool, ) from agent_framework.azure import AzureAIAgentClient from azure.identity.aio import AzureCliCredential @@ -33,6 +34,8 @@ Execution order: Agent middleware (outermost) -> Run middleware (innermost) -> Agent execution """ +# NOTE: approval_mode="never_require" is for sample brevity. Use "always_require" in production; see samples/getting_started/tools/function_tool_with_approval.py and samples/getting_started/tools/function_tool_with_approval_and_threads.py. +@tool(approval_mode="never_require") def get_weather( location: Annotated[str, Field(description="The location to get the weather for.")], diff --git a/python/samples/getting_started/middleware/chat_middleware.py b/python/samples/getting_started/middleware/chat_middleware.py index 5686072504..8c26957e96 100644 --- a/python/samples/getting_started/middleware/chat_middleware.py +++ b/python/samples/getting_started/middleware/chat_middleware.py @@ -12,6 +12,7 @@ ChatResponse, Role, chat_middleware, + tool, ) from agent_framework.azure import AzureAIAgentClient from azure.identity.aio import AzureCliCredential @@ -35,6 +36,8 @@ - Middleware registration at run level (applies to specific run only) """ +# NOTE: approval_mode="never_require" is for sample brevity. Use "always_require" in production; see samples/getting_started/tools/function_tool_with_approval.py and samples/getting_started/tools/function_tool_with_approval_and_threads.py. +@tool(approval_mode="never_require") def get_weather( location: Annotated[str, Field(description="The location to get the weather for.")], diff --git a/python/samples/getting_started/middleware/class_based_middleware.py b/python/samples/getting_started/middleware/class_based_middleware.py index 13febc83dc..59af506e74 100644 --- a/python/samples/getting_started/middleware/class_based_middleware.py +++ b/python/samples/getting_started/middleware/class_based_middleware.py @@ -14,6 +14,7 @@ FunctionInvocationContext, FunctionMiddleware, Role, + tool, ) from agent_framework.azure import AzureAIAgentClient from azure.identity.aio import AzureCliCredential @@ -33,6 +34,8 @@ from object-oriented design patterns. """ +# NOTE: approval_mode="never_require" is for sample brevity. Use "always_require" in production; see samples/getting_started/tools/function_tool_with_approval.py and samples/getting_started/tools/function_tool_with_approval_and_threads.py. +@tool(approval_mode="never_require") def get_weather( location: Annotated[str, Field(description="The location to get the weather for.")], diff --git a/python/samples/getting_started/middleware/decorator_middleware.py b/python/samples/getting_started/middleware/decorator_middleware.py index ca87a943c6..99683fad42 100644 --- a/python/samples/getting_started/middleware/decorator_middleware.py +++ b/python/samples/getting_started/middleware/decorator_middleware.py @@ -6,6 +6,7 @@ from agent_framework import ( agent_middleware, function_middleware, + tool, ) from agent_framework.azure import AzureAIAgentClient from azure.identity.aio import AzureCliCredential @@ -40,6 +41,8 @@ - Prevents type mismatches """ +# NOTE: approval_mode="never_require" is for sample brevity. Use "always_require" in production; see samples/getting_started/tools/function_tool_with_approval.py and samples/getting_started/tools/function_tool_with_approval_and_threads.py. +@tool(approval_mode="never_require") def get_current_time() -> str: """Get the current time.""" diff --git a/python/samples/getting_started/middleware/exception_handling_with_middleware.py b/python/samples/getting_started/middleware/exception_handling_with_middleware.py index 61cc254b9e..4bd102c4ff 100644 --- a/python/samples/getting_started/middleware/exception_handling_with_middleware.py +++ b/python/samples/getting_started/middleware/exception_handling_with_middleware.py @@ -5,6 +5,7 @@ from typing import Annotated from agent_framework import FunctionInvocationContext +from agent_framework import tool from agent_framework.azure import AzureAIAgentClient from azure.identity.aio import AzureCliCredential from pydantic import Field @@ -23,7 +24,8 @@ a helpful message for the user, preventing raw exceptions from reaching the end user. """ - +# NOTE: approval_mode="never_require" is for sample brevity. Use "always_require" in production; see samples/getting_started/tools/function_tool_with_approval.py and samples/getting_started/tools/function_tool_with_approval_and_threads.py. +@tool(approval_mode="never_require") def unstable_data_service( query: Annotated[str, Field(description="The data query to execute.")], ) -> str: diff --git a/python/samples/getting_started/middleware/function_based_middleware.py b/python/samples/getting_started/middleware/function_based_middleware.py index 24defa5e10..83cc9eead6 100644 --- a/python/samples/getting_started/middleware/function_based_middleware.py +++ b/python/samples/getting_started/middleware/function_based_middleware.py @@ -9,6 +9,7 @@ from agent_framework import ( AgentRunContext, FunctionInvocationContext, + tool, ) from agent_framework.azure import AzureAIAgentClient from azure.identity.aio import AzureCliCredential @@ -29,6 +30,8 @@ can be implemented as async functions that accept context and next parameters. """ +# NOTE: approval_mode="never_require" is for sample brevity. Use "always_require" in production; see samples/getting_started/tools/function_tool_with_approval.py and samples/getting_started/tools/function_tool_with_approval_and_threads.py. +@tool(approval_mode="never_require") def get_weather( location: Annotated[str, Field(description="The location to get the weather for.")], diff --git a/python/samples/getting_started/middleware/middleware_termination.py b/python/samples/getting_started/middleware/middleware_termination.py index ddd4a699bb..8b8c771ff3 100644 --- a/python/samples/getting_started/middleware/middleware_termination.py +++ b/python/samples/getting_started/middleware/middleware_termination.py @@ -11,6 +11,7 @@ AgentRunContext, ChatMessage, Role, + tool, ) from agent_framework.azure import AzureAIAgentClient from azure.identity.aio import AzureCliCredential @@ -28,6 +29,8 @@ This is useful for implementing security checks, rate limiting, or early exit conditions. """ +# NOTE: approval_mode="never_require" is for sample brevity. Use "always_require" in production; see samples/getting_started/tools/function_tool_with_approval.py and samples/getting_started/tools/function_tool_with_approval_and_threads.py. +@tool(approval_mode="never_require") def get_weather( location: Annotated[str, Field(description="The location to get the weather for.")], diff --git a/python/samples/getting_started/middleware/override_result_with_middleware.py b/python/samples/getting_started/middleware/override_result_with_middleware.py index 5738a0669e..e364eac279 100644 --- a/python/samples/getting_started/middleware/override_result_with_middleware.py +++ b/python/samples/getting_started/middleware/override_result_with_middleware.py @@ -12,6 +12,7 @@ ChatMessage, Role, TextContent, + tool, ) from agent_framework.azure import AzureAIAgentClient from azure.identity.aio import AzureCliCredential @@ -34,6 +35,8 @@ it creates a custom async generator that yields the override message in chunks. """ +# NOTE: approval_mode="never_require" is for sample brevity. Use "always_require" in production; see samples/getting_started/tools/function_tool_with_approval.py and samples/getting_started/tools/function_tool_with_approval_and_threads.py. +@tool(approval_mode="never_require") def get_weather( location: Annotated[str, Field(description="The location to get the weather for.")], diff --git a/python/samples/getting_started/middleware/runtime_context_delegation.py b/python/samples/getting_started/middleware/runtime_context_delegation.py index abc6340324..300d6cdb22 100644 --- a/python/samples/getting_started/middleware/runtime_context_delegation.py +++ b/python/samples/getting_started/middleware/runtime_context_delegation.py @@ -4,7 +4,7 @@ from collections.abc import Awaitable, Callable from typing import Annotated -from agent_framework import FunctionInvocationContext, ai_function, function_middleware +from agent_framework import FunctionInvocationContext, tool, function_middleware from agent_framework.openai import OpenAIChatClient from pydantic import Field @@ -81,7 +81,8 @@ async def inject_context_middleware( runtime_context = SessionContextContainer() -@ai_function +# NOTE: approval_mode="never_require" is for sample brevity. Use "always_require" in production; see samples/getting_started/tools/function_tool_with_approval.py and samples/getting_started/tools/function_tool_with_approval_and_threads.py. +@tool(approval_mode="never_require") async def send_email( to: Annotated[str, Field(description="Recipient email address")], subject: Annotated[str, Field(description="Email subject line")], @@ -112,7 +113,7 @@ async def send_email( return f"Email sent to {to} from user {user_id} (tenant: {tenant}). Subject: '{subject}'" -@ai_function +@tool(approval_mode="never_require") async def send_notification( message: Annotated[str, Field(description="Notification message to send")], priority: Annotated[str, Field(description="Priority level: low, medium, high")] = "medium", @@ -241,7 +242,7 @@ async def pattern_1_single_agent_with_closure() -> None: # Create tools for sub-agents (these will use kwargs propagation) -@ai_function +@tool(approval_mode="never_require") async def send_email_v2( to: Annotated[str, Field(description="Recipient email")], subject: Annotated[str, Field(description="Subject")], @@ -253,7 +254,7 @@ async def send_email_v2( return f"Email sent to {to} with subject '{subject}'" -@ai_function +@tool(approval_mode="never_require") async def send_sms( phone: Annotated[str, Field(description="Phone number")], message: Annotated[str, Field(description="SMS message")], @@ -377,7 +378,7 @@ async def validate_and_track( await next(context) -@ai_function +@tool(approval_mode="never_require") async def protected_operation(operation: Annotated[str, Field(description="Operation to perform")]) -> str: """Protected operation that requires authentication.""" return f"Executed protected operation: {operation}" diff --git a/python/samples/getting_started/middleware/shared_state_middleware.py b/python/samples/getting_started/middleware/shared_state_middleware.py index eb22d11fcb..9b568a2ff6 100644 --- a/python/samples/getting_started/middleware/shared_state_middleware.py +++ b/python/samples/getting_started/middleware/shared_state_middleware.py @@ -7,6 +7,7 @@ from agent_framework import ( FunctionInvocationContext, + tool, ) from agent_framework.azure import AzureAIAgentClient from azure.identity.aio import AzureCliCredential @@ -25,6 +26,8 @@ This approach shows how middleware can work together by sharing state within the same class instance. """ +# NOTE: approval_mode="never_require" is for sample brevity. Use "always_require" in production; see samples/getting_started/tools/function_tool_with_approval.py and samples/getting_started/tools/function_tool_with_approval_and_threads.py. +@tool(approval_mode="never_require") def get_weather( location: Annotated[str, Field(description="The location to get the weather for.")], @@ -33,6 +36,7 @@ def get_weather( conditions = ["sunny", "cloudy", "rainy", "stormy"] return f"The weather in {location} is {conditions[randint(0, 3)]} with a high of {randint(10, 30)}°C." +@tool(approval_mode="never_require") def get_time( timezone: Annotated[str, Field(description="The timezone to get the time for.")] = "UTC", diff --git a/python/samples/getting_started/middleware/thread_behavior_middleware.py b/python/samples/getting_started/middleware/thread_behavior_middleware.py index a0b1b3d2dd..d7723812c9 100644 --- a/python/samples/getting_started/middleware/thread_behavior_middleware.py +++ b/python/samples/getting_started/middleware/thread_behavior_middleware.py @@ -7,6 +7,7 @@ from agent_framework import ( AgentRunContext, ChatMessageStore, + tool, ) from agent_framework.azure import AzureOpenAIChatClient from azure.identity import AzureCliCredential @@ -30,6 +31,8 @@ 4. After next(): thread contains full conversation history (all previous + current messages) """ +# NOTE: approval_mode="never_require" is for sample brevity. Use "always_require" in production; see samples/getting_started/tools/function_tool_with_approval.py and samples/getting_started/tools/function_tool_with_approval_and_threads.py. +@tool(approval_mode="never_require") def get_weather( location: Annotated[str, Field(description="The location to get the weather for.")], diff --git a/python/samples/getting_started/minimal_sample.py b/python/samples/getting_started/minimal_sample.py index f312786cfd..ec28486282 100644 --- a/python/samples/getting_started/minimal_sample.py +++ b/python/samples/getting_started/minimal_sample.py @@ -5,8 +5,10 @@ from typing import Annotated from agent_framework.openai import OpenAIChatClient +from agent_framework import tool - +# NOTE: approval_mode="never_require" is for sample brevity. Use "always_require" in production; see samples/getting_started/tools/function_tool_with_approval.py and samples/getting_started/tools/function_tool_with_approval_and_threads.py. +@tool(approval_mode="never_require") def get_weather( location: Annotated[str, "The location to get the weather for."], ) -> str: diff --git a/python/samples/getting_started/observability/advanced_manual_setup_console_output.py b/python/samples/getting_started/observability/advanced_manual_setup_console_output.py index 53c369c3ed..411d0ed2a6 100644 --- a/python/samples/getting_started/observability/advanced_manual_setup_console_output.py +++ b/python/samples/getting_started/observability/advanced_manual_setup_console_output.py @@ -19,6 +19,7 @@ from opentelemetry.semconv._incubating.attributes.service_attributes import SERVICE_NAME from opentelemetry.trace import set_tracer_provider from pydantic import Field +from agent_framework import tool """ This sample shows how to manually configure to send traces, logs, and metrics to the console, @@ -64,7 +65,8 @@ def setup_metrics(): # Sets the global default meter provider set_meter_provider(meter_provider) - +# NOTE: approval_mode="never_require" is for sample brevity. Use "always_require" in production; see samples/getting_started/tools/function_tool_with_approval.py and samples/getting_started/tools/function_tool_with_approval_and_threads.py. +@tool(approval_mode="never_require") async def get_weather( location: Annotated[str, Field(description="The location to get the weather for.")], ) -> str: diff --git a/python/samples/getting_started/observability/advanced_zero_code.py b/python/samples/getting_started/observability/advanced_zero_code.py index 91e3703dac..d6dcef3b76 100644 --- a/python/samples/getting_started/observability/advanced_zero_code.py +++ b/python/samples/getting_started/observability/advanced_zero_code.py @@ -9,6 +9,7 @@ from opentelemetry.trace import SpanKind from opentelemetry.trace.span import format_trace_id from pydantic import Field +from agent_framework import tool if TYPE_CHECKING: from agent_framework import ChatClientProtocol @@ -38,7 +39,8 @@ """ - +# NOTE: approval_mode="never_require" is for sample brevity. Use "always_require" in production; see samples/getting_started/tools/function_tool_with_approval.py and samples/getting_started/tools/function_tool_with_approval_and_threads.py. +@tool(approval_mode="never_require") async def get_weather( location: Annotated[str, Field(description="The location to get the weather for.")], ) -> str: diff --git a/python/samples/getting_started/observability/agent_observability.py b/python/samples/getting_started/observability/agent_observability.py index cd1b505194..bdfa3fdcd3 100644 --- a/python/samples/getting_started/observability/agent_observability.py +++ b/python/samples/getting_started/observability/agent_observability.py @@ -5,6 +5,7 @@ from typing import Annotated from agent_framework import ChatAgent +from agent_framework import tool from agent_framework.observability import configure_otel_providers, get_tracer from agent_framework.openai import OpenAIChatClient from opentelemetry.trace import SpanKind @@ -16,7 +17,8 @@ same observability setup function. """ - +# NOTE: approval_mode="never_require" is for sample brevity. Use "always_require" in production; see samples/getting_started/tools/function_tool_with_approval.py and samples/getting_started/tools/function_tool_with_approval_and_threads.py. +@tool(approval_mode="never_require") async def get_weather( location: Annotated[str, Field(description="The location to get the weather for.")], ) -> str: diff --git a/python/samples/getting_started/observability/agent_with_foundry_tracing.py b/python/samples/getting_started/observability/agent_with_foundry_tracing.py index 9bce1f1b4a..30921b26ba 100644 --- a/python/samples/getting_started/observability/agent_with_foundry_tracing.py +++ b/python/samples/getting_started/observability/agent_with_foundry_tracing.py @@ -8,6 +8,7 @@ import dotenv from agent_framework import ChatAgent +from agent_framework import tool from agent_framework.observability import create_resource, enable_instrumentation, get_tracer from agent_framework.openai import OpenAIResponsesClient from azure.ai.projects.aio import AIProjectClient @@ -31,7 +32,8 @@ logger = logging.getLogger(__name__) - +# NOTE: approval_mode="never_require" is for sample brevity. Use "always_require" in production; see samples/getting_started/tools/function_tool_with_approval.py and samples/getting_started/tools/function_tool_with_approval_and_threads.py. +@tool(approval_mode="never_require") async def get_weather( location: Annotated[str, Field(description="The location to get the weather for.")], ) -> str: diff --git a/python/samples/getting_started/observability/azure_ai_agent_observability.py b/python/samples/getting_started/observability/azure_ai_agent_observability.py index f5804f4cfd..c9827cb382 100644 --- a/python/samples/getting_started/observability/azure_ai_agent_observability.py +++ b/python/samples/getting_started/observability/azure_ai_agent_observability.py @@ -7,6 +7,7 @@ import dotenv from agent_framework import ChatAgent +from agent_framework import tool from agent_framework.azure import AzureAIClient from agent_framework.observability import get_tracer from azure.ai.projects.aio import AIProjectClient @@ -28,7 +29,8 @@ # For loading the `AZURE_AI_PROJECT_ENDPOINT` environment variable dotenv.load_dotenv() - +# NOTE: approval_mode="never_require" is for sample brevity. Use "always_require" in production; see samples/getting_started/tools/function_tool_with_approval.py and samples/getting_started/tools/function_tool_with_approval_and_threads.py. +@tool(approval_mode="never_require") async def get_weather( location: Annotated[str, Field(description="The location to get the weather for.")], ) -> str: diff --git a/python/samples/getting_started/observability/configure_otel_providers_with_env_var.py b/python/samples/getting_started/observability/configure_otel_providers_with_env_var.py index 57bb3ab302..f900b8cf6e 100644 --- a/python/samples/getting_started/observability/configure_otel_providers_with_env_var.py +++ b/python/samples/getting_started/observability/configure_otel_providers_with_env_var.py @@ -6,7 +6,7 @@ from random import randint from typing import TYPE_CHECKING, Annotated, Literal -from agent_framework import ai_function +from agent_framework import tool from agent_framework.observability import configure_otel_providers, get_tracer from agent_framework.openai import OpenAIResponsesClient from opentelemetry import trace @@ -28,9 +28,11 @@ """ # Define the scenarios that can be run to show the telemetry data collected by the SDK -SCENARIOS = ["chat_client", "chat_client_stream", "ai_function", "all"] +SCENARIOS = ["chat_client", "chat_client_stream", "tool", "all"] +# NOTE: approval_mode="never_require" is for sample brevity. Use "always_require" in production; see samples/getting_started/tools/function_tool_with_approval.py and samples/getting_started/tools/function_tool_with_approval_and_threads.py. +@tool(approval_mode="never_require") async def get_weather( location: Annotated[str, Field(description="The location to get the weather for.")], ) -> str: @@ -78,7 +80,7 @@ async def run_chat_client(client: "ChatClientProtocol", stream: bool = False) -> print(f"Assistant: {response}") -async def run_ai_function() -> None: +async def run_tool() -> None: """Run a AI function. This function runs a AI function and prints the output. @@ -90,12 +92,12 @@ async def run_ai_function() -> None: """ with get_tracer().start_as_current_span("Scenario: AI Function", kind=trace.SpanKind.CLIENT): print("Running scenario: AI Function") - func = ai_function(get_weather) + func = tool(get_weather) weather = await func.invoke(location="Amsterdam") print(f"Weather in Amsterdam:\n{weather}") -async def main(scenario: Literal["chat_client", "chat_client_stream", "ai_function", "all"] = "all"): +async def main(scenario: Literal["chat_client", "chat_client_stream", "tool", "all"] = "all"): """Run the selected scenario(s).""" # This will enable tracing and create the necessary tracing, logging and metrics providers @@ -108,9 +110,9 @@ async def main(scenario: Literal["chat_client", "chat_client_stream", "ai_functi client = OpenAIResponsesClient() # Scenarios where telemetry is collected in the SDK, from the most basic to the most complex. - if scenario == "ai_function" or scenario == "all": + if scenario == "tool" or scenario == "all": with suppress(Exception): - await run_ai_function() + await run_tool() if scenario == "chat_client_stream" or scenario == "all": with suppress(Exception): await run_chat_client(client, stream=True) diff --git a/python/samples/getting_started/observability/configure_otel_providers_with_parameters.py b/python/samples/getting_started/observability/configure_otel_providers_with_parameters.py index 50fadbeed5..a69dfe76ec 100644 --- a/python/samples/getting_started/observability/configure_otel_providers_with_parameters.py +++ b/python/samples/getting_started/observability/configure_otel_providers_with_parameters.py @@ -6,7 +6,7 @@ from random import randint from typing import TYPE_CHECKING, Annotated, Literal -from agent_framework import ai_function, setup_logging +from agent_framework import tool, setup_logging from agent_framework.observability import configure_otel_providers, get_tracer from agent_framework.openai import OpenAIResponsesClient from opentelemetry import trace @@ -28,9 +28,11 @@ """ # Define the scenarios that can be run to show the telemetry data collected by the SDK -SCENARIOS = ["chat_client", "chat_client_stream", "ai_function", "all"] +SCENARIOS = ["chat_client", "chat_client_stream", "tool", "all"] +# NOTE: approval_mode="never_require" is for sample brevity. Use "always_require" in production; see samples/getting_started/tools/function_tool_with_approval.py and samples/getting_started/tools/function_tool_with_approval_and_threads.py. +@tool(approval_mode="never_require") async def get_weather( location: Annotated[str, Field(description="The location to get the weather for.")], ) -> str: @@ -78,7 +80,7 @@ async def run_chat_client(client: "ChatClientProtocol", stream: bool = False) -> print(f"Assistant: {response}") -async def run_ai_function() -> None: +async def run_tool() -> None: """Run a AI function. This function runs a AI function and prints the output. @@ -90,12 +92,12 @@ async def run_ai_function() -> None: """ with get_tracer().start_as_current_span("Scenario: AI Function", kind=trace.SpanKind.CLIENT): print("Running scenario: AI Function") - func = ai_function(get_weather) + func = tool(get_weather) weather = await func.invoke(location="Amsterdam") print(f"Weather in Amsterdam:\n{weather}") -async def main(scenario: Literal["chat_client", "chat_client_stream", "ai_function", "all"] = "all"): +async def main(scenario: Literal["chat_client", "chat_client_stream", "tool", "all"] = "all"): """Run the selected scenario(s).""" # Setup the logging with the more complete format @@ -137,9 +139,9 @@ async def main(scenario: Literal["chat_client", "chat_client_stream", "ai_functi client = OpenAIResponsesClient() # Scenarios where telemetry is collected in the SDK, from the most basic to the most complex. - if scenario == "ai_function" or scenario == "all": + if scenario == "tool" or scenario == "all": with suppress(Exception): - await run_ai_function() + await run_tool() if scenario == "chat_client_stream" or scenario == "all": with suppress(Exception): await run_chat_client(client, stream=True) diff --git a/python/samples/getting_started/observability/workflow_observability.py b/python/samples/getting_started/observability/workflow_observability.py index 7cd5174025..57e636fd68 100644 --- a/python/samples/getting_started/observability/workflow_observability.py +++ b/python/samples/getting_started/observability/workflow_observability.py @@ -8,6 +8,7 @@ WorkflowContext, WorkflowOutputEvent, handler, + tool, ) from agent_framework.observability import configure_otel_providers, get_tracer from opentelemetry.trace import SpanKind diff --git a/python/samples/getting_started/tools/README.md b/python/samples/getting_started/tools/README.md index 7c2d09cee9..e732784dbb 100644 --- a/python/samples/getting_started/tools/README.md +++ b/python/samples/getting_started/tools/README.md @@ -1,25 +1,29 @@ # Tools Examples -This folder contains examples demonstrating how to use AI functions (tools) with the Agent Framework. AI functions allow agents to interact with external systems, perform computations, and execute custom logic. +This folder contains examples demonstrating how to use local tools with the Agent Framework. Local tools allow agents to interact with external systems, perform computations, and execute custom logic. + +Note: Several examples set `approval_mode="never_require"` to keep the samples concise. For production scenarios, +keep `approval_mode="always_require"` unless you are confident in the tool behavior and approval flow. See +`function_tool_with_approval.py` and `function_tool_with_approval_and_threads.py` for end-to-end approval handling. ## Examples | File | Description | |------|-------------| -| [`ai_function_declaration_only.py`](ai_function_declaration_only.py) | Demonstrates how to create function declarations without implementations. Useful for testing agent reasoning about tool usage or when tools are defined elsewhere. Shows how agents request tool calls even when the tool won't be executed. | -| [`ai_function_from_dict_with_dependency_injection.py`](ai_function_from_dict_with_dependency_injection.py) | Shows how to create AI functions from dictionary definitions using dependency injection. The function implementation is injected at runtime during deserialization, enabling dynamic tool creation and configuration. Note: This serialization/deserialization feature is in active development. | -| [`ai_function_recover_from_failures.py`](ai_function_recover_from_failures.py) | Demonstrates graceful error handling when tools raise exceptions. Shows how agents receive error information and can recover from failures, deciding whether to retry or respond differently based on the exception. | -| [`ai_function_with_approval.py`](ai_function_with_approval.py) | Shows how to implement user approval workflows for function calls without using threads. Demonstrates both streaming and non-streaming approval patterns where users can approve or reject function executions before they run. | -| [`ai_function_with_approval_and_threads.py`](ai_function_with_approval_and_threads.py) | Demonstrates tool approval workflows using threads for automatic conversation history management. Shows how threads simplify approval workflows by automatically storing and retrieving conversation context. Includes both approval and rejection examples. | -| [`ai_function_with_kwargs.py`](ai_function_with_kwargs.py) | Demonstrates how to inject custom arguments (context) into an AI function from the agent's run method. Useful for passing runtime information like access tokens or user IDs that the tool needs but the model shouldn't see. | -| [`ai_function_with_thread_injection.py`](ai_function_with_thread_injection.py) | Shows how to access the current `thread` object inside an AI function via `**kwargs`. | -| [`ai_function_with_max_exceptions.py`](ai_function_with_max_exceptions.py) | Shows how to limit the number of times a tool can fail with exceptions using `max_invocation_exceptions`. Useful for preventing expensive tools from being called repeatedly when they keep failing. | -| [`ai_function_with_max_invocations.py`](ai_function_with_max_invocations.py) | Demonstrates limiting the total number of times a tool can be invoked using `max_invocations`. Useful for rate-limiting expensive operations or ensuring tools are only called a specific number of times per conversation. | -| [`ai_functions_in_class.py`](ai_functions_in_class.py) | Shows how to use `ai_function` decorator with class methods to create stateful tools. Demonstrates how class state can control tool behavior dynamically, allowing you to adjust tool functionality at runtime by modifying class properties. | +| [`function_tool_declaration_only.py`](function_tool_declaration_only.py) | Demonstrates how to create function declarations without implementations. Useful for testing agent reasoning about tool usage or when tools are defined elsewhere. Shows how agents request tool calls even when the tool won't be executed. | +| [`function_tool_from_dict_with_dependency_injection.py`](function_tool_from_dict_with_dependency_injection.py) | Shows how to create local tools from dictionary definitions using dependency injection. The function implementation is injected at runtime during deserialization, enabling dynamic tool creation and configuration. Note: This serialization/deserialization feature is in active development. | +| [`function_tool_recover_from_failures.py`](function_tool_recover_from_failures.py) | Demonstrates graceful error handling when tools raise exceptions. Shows how agents receive error information and can recover from failures, deciding whether to retry or respond differently based on the exception. | +| [`function_tool_with_approval.py`](function_tool_with_approval.py) | Shows how to implement user approval workflows for function calls without using threads. Demonstrates both streaming and non-streaming approval patterns where users can approve or reject function executions before they run. | +| [`function_tool_with_approval_and_threads.py`](function_tool_with_approval_and_threads.py) | Demonstrates tool approval workflows using threads for automatic conversation history management. Shows how threads simplify approval workflows by automatically storing and retrieving conversation context. Includes both approval and rejection examples. | +| [`function_tool_with_kwargs.py`](function_tool_with_kwargs.py) | Demonstrates how to inject custom arguments (context) into a local tool from the agent's run method. Useful for passing runtime information like access tokens or user IDs that the tool needs but the model shouldn't see. | +| [`function_tool_with_thread_injection.py`](function_tool_with_thread_injection.py) | Shows how to access the current `thread` object inside a local tool via `**kwargs`. | +| [`function_tool_with_max_exceptions.py`](function_tool_with_max_exceptions.py) | Shows how to limit the number of times a tool can fail with exceptions using `max_invocation_exceptions`. Useful for preventing expensive tools from being called repeatedly when they keep failing. | +| [`function_tool_with_max_invocations.py`](function_tool_with_max_invocations.py) | Demonstrates limiting the total number of times a tool can be invoked using `max_invocations`. Useful for rate-limiting expensive operations or ensuring tools are only called a specific number of times per conversation. | +| [`tool_in_class.py`](tool_in_class.py) | Shows how to use the `tool` decorator with class methods to create stateful tools. Demonstrates how class state can control tool behavior dynamically, allowing you to adjust tool functionality at runtime by modifying class properties. | ## Key Concepts -### AI Function Features +### Local Tool Features - **Function Declarations**: Define tool schemas without implementations for testing or external tools - **Dependency Injection**: Create tools from configurations with runtime-injected implementations @@ -33,10 +37,10 @@ This folder contains examples demonstrating how to use AI functions (tools) with #### Basic Tool Definition ```python -from agent_framework import ai_function +from agent_framework import tool from typing import Annotated -@ai_function +@tool(approval_mode="never_require") def my_tool(param: Annotated[str, "Description"]) -> str: """Tool description for the AI.""" return f"Result: {param}" @@ -45,7 +49,7 @@ def my_tool(param: Annotated[str, "Description"]) -> str: #### Tool with Approval ```python -@ai_function(approval_mode="always_require") +@tool(approval_mode="always_require") def sensitive_operation(data: Annotated[str, "Data to process"]) -> str: """This requires user approval before execution.""" return f"Processed: {data}" @@ -54,12 +58,12 @@ def sensitive_operation(data: Annotated[str, "Data to process"]) -> str: #### Tool with Invocation Limits ```python -@ai_function(max_invocations=3) +@tool(max_invocations=3) def limited_tool() -> str: """Can only be called 3 times total.""" return "Result" -@ai_function(max_invocation_exceptions=2) +@tool(max_invocation_exceptions=2) def fragile_tool() -> str: """Can only fail 2 times before being disabled.""" return "Result" @@ -115,7 +119,7 @@ Two approaches for handling approvals: Each example is a standalone Python script that can be run directly: ```bash -uv run python ai_function_with_approval.py +uv run python function_tool_with_approval.py ``` Make sure you have the necessary environment variables configured (like `OPENAI_API_KEY` or Azure credentials) before running the examples. diff --git a/python/samples/getting_started/tools/function_invocation_configuration.py b/python/samples/getting_started/tools/function_invocation_configuration.py index c1966d4627..c53eab01a3 100644 --- a/python/samples/getting_started/tools/function_invocation_configuration.py +++ b/python/samples/getting_started/tools/function_invocation_configuration.py @@ -4,15 +4,17 @@ from typing import Annotated from agent_framework.openai import OpenAIResponsesClient +from agent_framework import tool """ This sample demonstrates how to configure function invocation settings -for an client and use a simple ai_function as a tool in an agent. +for an client and use a simple tool as a tool in an agent. This behavior is the same for all chat client types. """ - +# NOTE: approval_mode="never_require" is for sample brevity. Use "always_require" in production; see samples/getting_started/tools/function_tool_with_approval.py and samples/getting_started/tools/function_tool_with_approval_and_threads.py. +@tool(approval_mode="never_require") def add( x: Annotated[int, "First number"], y: Annotated[int, "Second number"], diff --git a/python/samples/getting_started/tools/ai_function_declaration_only.py b/python/samples/getting_started/tools/function_tool_declaration_only.py similarity index 92% rename from python/samples/getting_started/tools/ai_function_declaration_only.py rename to python/samples/getting_started/tools/function_tool_declaration_only.py index 320d62f727..c82f04a371 100644 --- a/python/samples/getting_started/tools/ai_function_declaration_only.py +++ b/python/samples/getting_started/tools/function_tool_declaration_only.py @@ -1,6 +1,6 @@ # Copyright (c) Microsoft. All rights reserved. -from agent_framework import AIFunction +from agent_framework import FunctionTool from agent_framework.openai import OpenAIResponsesClient """ @@ -8,13 +8,13 @@ This is useful when you want the agent to use tools that are defined elsewhere or when you want to test the agent's ability to reason about tool usage without executing them. -The only difference is that you provide an AIFunction without a function. +The only difference is that you provide a FunctionTool without a function. If you need a input_model, you can still provide that as well. """ async def main(): - function_declaration = AIFunction[None, None]( + function_declaration = FunctionTool( name="get_current_time", description="Get the current time in ISO 8601 format.", ) diff --git a/python/samples/getting_started/tools/ai_function_from_dict_with_dependency_injection.py b/python/samples/getting_started/tools/function_tool_from_dict_with_dependency_injection.py similarity index 76% rename from python/samples/getting_started/tools/ai_function_from_dict_with_dependency_injection.py rename to python/samples/getting_started/tools/function_tool_from_dict_with_dependency_injection.py index a445255abf..126d937f43 100644 --- a/python/samples/getting_started/tools/ai_function_from_dict_with_dependency_injection.py +++ b/python/samples/getting_started/tools/function_tool_from_dict_with_dependency_injection.py @@ -1,31 +1,31 @@ # Copyright (c) Microsoft. All rights reserved. # type: ignore """ -AIFunction Tool with Dependency Injection Example +Local Tool with Dependency Injection Example -This example demonstrates how to create an AIFunction tool using the agent framework's +This example demonstrates how to create a FunctionTool using the agent framework's dependency injection system. Instead of providing the function at initialization time, the actual callable function is injected during deserialization from a dictionary definition. Note: The serialization and deserialization feature used in this example is currently - in active development. The API may change in future versions as we continue + in active development. The API may change in future versions as we continue to improve and extend its functionality. Please refer to the latest documentation for any updates to the dependency injection patterns. Usage: - Run this script to see how an AIFunction tool can be created from a dictionary + Run this script to see how a FunctionTool can be created from a dictionary definition with the function injected at runtime. The agent will use this tool to perform arithmetic operations. """ import asyncio -from agent_framework import AIFunction +from agent_framework import FunctionTool from agent_framework.openai import OpenAIResponsesClient definition = { - "type": "ai_function", + "type": "function_tool", "name": "add_numbers", "description": "Add two numbers together.", "input_model": { @@ -47,15 +47,15 @@ def func(a, b) -> int: """Add two numbers together.""" return a + b - # Create the AIFunction tool using dependency injection + # Create the FunctionTool using dependency injection # The 'definition' dictionary contains the serialized tool configuration, # while the actual function implementation is provided via dependencies. # - # Dependency structure: {"ai_function": {"name:add_numbers": {"func": func}}} - # - "ai_function": matches the tool type identifier + # Dependency structure: {"function_tool": {"name:add_numbers": {"func": func}}} + # - "function_tool": matches the tool type identifier # - "name:add_numbers": instance-specific injection targeting tools with name="add_numbers" # - "func": the parameter name that will receive the injected function - tool = AIFunction.from_dict(definition, dependencies={"ai_function": {"name:add_numbers": {"func": func}}}) + tool = FunctionTool.from_dict(definition, dependencies={"function_tool": {"name:add_numbers": {"func": func}}}) agent = OpenAIResponsesClient().as_agent( name="FunctionToolAgent", instructions="You are a helpful assistant.", tools=tool diff --git a/python/samples/getting_started/tools/ai_function_recover_from_failures.py b/python/samples/getting_started/tools/function_tool_recover_from_failures.py similarity index 91% rename from python/samples/getting_started/tools/ai_function_recover_from_failures.py rename to python/samples/getting_started/tools/function_tool_recover_from_failures.py index ed6d0fe136..1349421b24 100644 --- a/python/samples/getting_started/tools/ai_function_recover_from_failures.py +++ b/python/samples/getting_started/tools/function_tool_recover_from_failures.py @@ -4,6 +4,7 @@ from typing import Annotated from agent_framework import FunctionCallContent, FunctionResultContent +from agent_framework import tool from agent_framework.openai import OpenAIResponsesClient """ @@ -13,13 +14,16 @@ The LLM decides whether to retry the call or to respond with something else, based on the exception. """ - +# NOTE: approval_mode="never_require" is for sample brevity. Use "always_require" in production; see samples/getting_started/tools/function_tool_with_approval.py and samples/getting_started/tools/function_tool_with_approval_and_threads.py. +@tool(approval_mode="never_require") def greet(name: Annotated[str, "Name to greet"]) -> str: """Greet someone.""" return f"Hello, {name}!" +@tool(approval_mode="never_require") # we trick the AI into calling this function with 0 as denominator to trigger the exception +@tool(approval_mode="never_require") def safe_divide( a: Annotated[int, "Numerator"], b: Annotated[int, "Denominator"], diff --git a/python/samples/getting_started/tools/ai_function_with_approval.py b/python/samples/getting_started/tools/function_tool_with_approval.py similarity index 94% rename from python/samples/getting_started/tools/ai_function_with_approval.py rename to python/samples/getting_started/tools/function_tool_with_approval.py index a74e1aed3f..9c026b8bc6 100644 --- a/python/samples/getting_started/tools/ai_function_with_approval.py +++ b/python/samples/getting_started/tools/function_tool_with_approval.py @@ -4,7 +4,7 @@ from random import randrange from typing import TYPE_CHECKING, Annotated, Any -from agent_framework import AgentResponse, ChatAgent, ChatMessage, ai_function +from agent_framework import AgentResponse, ChatAgent, ChatMessage, tool from agent_framework.openai import OpenAIResponsesClient if TYPE_CHECKING: @@ -20,7 +20,8 @@ conditions = ["sunny", "cloudy", "raining", "snowing", "clear"] -@ai_function +# NOTE: approval_mode="never_require" is for sample brevity. Use "always_require" in production; see samples/getting_started/tools/function_tool_with_approval.py and samples/getting_started/tools/function_tool_with_approval_and_threads.py. +@tool(approval_mode="never_require") def get_weather(location: Annotated[str, "The city and state, e.g. San Francisco, CA"]) -> str: """Get the current weather for a given location.""" # Simulate weather data @@ -28,7 +29,7 @@ def get_weather(location: Annotated[str, "The city and state, e.g. San Francisco # Define a simple weather tool that requires approval -@ai_function(approval_mode="always_require") +@tool(approval_mode="always_require") def get_weather_detail(location: Annotated[str, "The city and state, e.g. San Francisco, CA"]) -> str: """Get the current weather for a given location.""" # Simulate weather data diff --git a/python/samples/getting_started/tools/ai_function_with_approval_and_threads.py b/python/samples/getting_started/tools/function_tool_with_approval_and_threads.py similarity index 96% rename from python/samples/getting_started/tools/ai_function_with_approval_and_threads.py rename to python/samples/getting_started/tools/function_tool_with_approval_and_threads.py index 2da16a2101..80940efc1f 100644 --- a/python/samples/getting_started/tools/ai_function_with_approval_and_threads.py +++ b/python/samples/getting_started/tools/function_tool_with_approval_and_threads.py @@ -3,7 +3,7 @@ import asyncio from typing import Annotated -from agent_framework import ChatAgent, ChatMessage, ai_function +from agent_framework import ChatAgent, ChatMessage, tool from agent_framework.azure import AzureOpenAIChatClient """ @@ -15,7 +15,7 @@ """ -@ai_function(approval_mode="always_require") +@tool(approval_mode="always_require") def add_to_calendar( event_name: Annotated[str, "Name of the event"], date: Annotated[str, "Date of the event"] ) -> str: diff --git a/python/samples/getting_started/tools/ai_function_with_kwargs.py b/python/samples/getting_started/tools/function_tool_with_kwargs.py similarity index 83% rename from python/samples/getting_started/tools/ai_function_with_kwargs.py rename to python/samples/getting_started/tools/function_tool_with_kwargs.py index abd4784a74..59225c0832 100644 --- a/python/samples/getting_started/tools/ai_function_with_kwargs.py +++ b/python/samples/getting_started/tools/function_tool_with_kwargs.py @@ -3,7 +3,7 @@ import asyncio from typing import Annotated, Any -from agent_framework import ai_function +from agent_framework import tool from agent_framework.openai import OpenAIResponsesClient from pydantic import Field @@ -20,7 +20,8 @@ # Define the function tool with **kwargs to accept injected arguments -@ai_function +# NOTE: approval_mode="never_require" is for sample brevity. Use "always_require" in production; see samples/getting_started/tools/function_tool_with_approval.py and samples/getting_started/tools/function_tool_with_approval_and_threads.py. +@tool(approval_mode="never_require") def get_weather( location: Annotated[str, Field(description="The location to get the weather for.")], **kwargs: Any, diff --git a/python/samples/getting_started/tools/ai_function_with_max_exceptions.py b/python/samples/getting_started/tools/function_tool_with_max_exceptions.py similarity index 99% rename from python/samples/getting_started/tools/ai_function_with_max_exceptions.py rename to python/samples/getting_started/tools/function_tool_with_max_exceptions.py index 7ffc246462..7b83ead248 100644 --- a/python/samples/getting_started/tools/ai_function_with_max_exceptions.py +++ b/python/samples/getting_started/tools/function_tool_with_max_exceptions.py @@ -3,7 +3,7 @@ import asyncio from typing import Annotated -from agent_framework import FunctionCallContent, FunctionResultContent, ai_function +from agent_framework import FunctionCallContent, FunctionResultContent, tool from agent_framework.openai import OpenAIResponsesClient """ @@ -14,7 +14,7 @@ # we trick the AI into calling this function with 0 as denominator to trigger the exception -@ai_function(max_invocation_exceptions=1) +@tool(max_invocation_exceptions=1) def safe_divide( a: Annotated[int, "Numerator"], b: Annotated[int, "Denominator"], diff --git a/python/samples/getting_started/tools/ai_function_with_max_invocations.py b/python/samples/getting_started/tools/function_tool_with_max_invocations.py similarity index 98% rename from python/samples/getting_started/tools/ai_function_with_max_invocations.py rename to python/samples/getting_started/tools/function_tool_with_max_invocations.py index 3fa49e25b4..0b13d1cb3b 100644 --- a/python/samples/getting_started/tools/ai_function_with_max_invocations.py +++ b/python/samples/getting_started/tools/function_tool_with_max_invocations.py @@ -3,7 +3,7 @@ import asyncio from typing import Annotated -from agent_framework import FunctionCallContent, FunctionResultContent, ai_function +from agent_framework import FunctionCallContent, FunctionResultContent, tool from agent_framework.openai import OpenAIResponsesClient """ @@ -12,7 +12,7 @@ """ -@ai_function(max_invocations=1) +@tool(max_invocations=1) def unicorn_function(times: Annotated[int, "The number of unicorns to return."]) -> str: """This function returns precious unicorns!""" return f"{'🦄' * times}✨" diff --git a/python/samples/getting_started/tools/ai_function_with_thread_injection.py b/python/samples/getting_started/tools/function_tool_with_thread_injection.py similarity index 83% rename from python/samples/getting_started/tools/ai_function_with_thread_injection.py rename to python/samples/getting_started/tools/function_tool_with_thread_injection.py index 2d34b4169f..0a02ef09d7 100644 --- a/python/samples/getting_started/tools/ai_function_with_thread_injection.py +++ b/python/samples/getting_started/tools/function_tool_with_thread_injection.py @@ -3,7 +3,7 @@ import asyncio from typing import Annotated, Any -from agent_framework import AgentThread, ai_function +from agent_framework import AgentThread, tool from agent_framework.openai import OpenAIChatClient from pydantic import Field @@ -16,7 +16,8 @@ # Define the function tool with **kwargs -@ai_function +# NOTE: approval_mode="never_require" is for sample brevity. Use "always_require" in production; see samples/getting_started/tools/function_tool_with_approval.py and samples/getting_started/tools/function_tool_with_approval_and_threads.py. +@tool(approval_mode="never_require") async def get_weather( location: Annotated[str, Field(description="The location to get the weather for.")], **kwargs: Any, diff --git a/python/samples/getting_started/tools/ai_functions_in_class.py b/python/samples/getting_started/tools/tool_in_class.py similarity index 88% rename from python/samples/getting_started/tools/ai_functions_in_class.py rename to python/samples/getting_started/tools/tool_in_class.py index d589fa2da2..e4fa7ca015 100644 --- a/python/samples/getting_started/tools/ai_functions_in_class.py +++ b/python/samples/getting_started/tools/tool_in_class.py @@ -3,20 +3,20 @@ import asyncio from typing import Annotated -from agent_framework import ai_function +from agent_framework import tool from agent_framework.openai import OpenAIResponsesClient """ -This sample demonstrates using ai_function within a class, +This sample demonstrates using tool within a class, showing how to manage state within the class that affects tool behavior. -And how to use ai_function-decorated methods as tools in an agent in order to adjust the behavior of a tool. +And how to use tool-decorated methods as tools in an agent in order to adjust the behavior of a tool. """ class MyFunctionClass: def __init__(self, safe: bool = False) -> None: - """Simple class with two ai_functions: divide and add. + """Simple class with two tools: divide and add. The safe parameter controls whether divide raises on division by zero or returns `infinity` for divide by zero. """ @@ -42,8 +42,8 @@ def add( async def main(): # Creating my function class with safe division enabled tools = MyFunctionClass(safe=True) - # Applying the ai_function decorator to one of the methods of the class - add_function = ai_function(description="Add two numbers.")(tools.add) + # Applying the tool decorator to one of the methods of the class + add_function = tool(description="Add two numbers.")(tools.add) agent = OpenAIResponsesClient().as_agent( name="ToolAgent", diff --git a/python/samples/getting_started/workflows/README.md b/python/samples/getting_started/workflows/README.md index 8ca5e0f4bc..5ffcfe3dd0 100644 --- a/python/samples/getting_started/workflows/README.md +++ b/python/samples/getting_started/workflows/README.md @@ -45,7 +45,7 @@ Once comfortable with these, explore the rest of the samples below. | Workflow as Agent (Reflection Pattern) | [agents/workflow_as_agent_reflection_pattern.py](./agents/workflow_as_agent_reflection_pattern.py) | Wrap a workflow so it can behave like an agent (reflection pattern) | | Workflow as Agent + HITL | [agents/workflow_as_agent_human_in_the_loop.py](./agents/workflow_as_agent_human_in_the_loop.py) | Extend workflow-as-agent with human-in-the-loop capability | | Workflow as Agent with Thread | [agents/workflow_as_agent_with_thread.py](./agents/workflow_as_agent_with_thread.py) | Use AgentThread to maintain conversation history across workflow-as-agent invocations | -| Workflow as Agent kwargs | [agents/workflow_as_agent_kwargs.py](./agents/workflow_as_agent_kwargs.py) | Pass custom context (data, user tokens) via kwargs through workflow.as_agent() to @ai_function tools | +| Workflow as Agent kwargs | [agents/workflow_as_agent_kwargs.py](./agents/workflow_as_agent_kwargs.py) | Pass custom context (data, user tokens) via kwargs through workflow.as_agent() to @tool functions | | Handoff Workflow as Agent | [agents/handoff_workflow_as_agent.py](./agents/handoff_workflow_as_agent.py) | Use a HandoffBuilder workflow as an agent with HITL via FunctionCallContent/FunctionResultContent | ### checkpoint @@ -91,7 +91,7 @@ Once comfortable with these, explore the rest of the samples below. ### tool-approval -Tool approval samples demonstrate using `@ai_function(approval_mode="always_require")` to gate sensitive tool executions with human approval. These work with the high-level builder APIs. +Tool approval samples demonstrate using `@tool(approval_mode="always_require")` to gate sensitive tool executions with human approval. These work with the high-level builder APIs. | Sample | File | Concepts | |---|---|---| @@ -148,7 +148,7 @@ to configure which agents can route to which others with a fluent, type-safe API | Sample | File | Concepts | |---|---|---| | Shared States | [state-management/shared_states_with_agents.py](./state-management/shared_states_with_agents.py) | Store in shared state once and later reuse across agents | -| Workflow Kwargs (Custom Context) | [state-management/workflow_kwargs.py](./state-management/workflow_kwargs.py) | Pass custom context (data, user tokens) via kwargs to `@ai_function` tools | +| Workflow Kwargs (Custom Context) | [state-management/workflow_kwargs.py](./state-management/workflow_kwargs.py) | Pass custom context (data, user tokens) via kwargs to `@tool` tools | ### visualization diff --git a/python/samples/getting_started/workflows/_start-here/step1_executors_and_edges.py b/python/samples/getting_started/workflows/_start-here/step1_executors_and_edges.py index b5c80062dd..fe031ab548 100644 --- a/python/samples/getting_started/workflows/_start-here/step1_executors_and_edges.py +++ b/python/samples/getting_started/workflows/_start-here/step1_executors_and_edges.py @@ -8,6 +8,7 @@ WorkflowContext, executor, handler, + tool, ) from typing_extensions import Never diff --git a/python/samples/getting_started/workflows/_start-here/step3_streaming.py b/python/samples/getting_started/workflows/_start-here/step3_streaming.py index e7da7efd7c..ffd3e9323d 100644 --- a/python/samples/getting_started/workflows/_start-here/step3_streaming.py +++ b/python/samples/getting_started/workflows/_start-here/step3_streaming.py @@ -13,6 +13,7 @@ WorkflowRunState, WorkflowStatusEvent, handler, + tool, ) from agent_framework._workflows._events import WorkflowOutputEvent from agent_framework.azure import AzureOpenAIChatClient diff --git a/python/samples/getting_started/workflows/_start-here/step4_using_factories.py b/python/samples/getting_started/workflows/_start-here/step4_using_factories.py index a7b9918991..f9d4f2b971 100644 --- a/python/samples/getting_started/workflows/_start-here/step4_using_factories.py +++ b/python/samples/getting_started/workflows/_start-here/step4_using_factories.py @@ -11,6 +11,7 @@ WorkflowOutputEvent, executor, handler, + tool, ) from agent_framework.azure import AzureOpenAIChatClient from azure.identity import AzureCliCredential diff --git a/python/samples/getting_started/workflows/agents/azure_chat_agents_function_bridge.py b/python/samples/getting_started/workflows/agents/azure_chat_agents_function_bridge.py index a459d9e9d9..11bac9f2c9 100644 --- a/python/samples/getting_started/workflows/agents/azure_chat_agents_function_bridge.py +++ b/python/samples/getting_started/workflows/agents/azure_chat_agents_function_bridge.py @@ -14,6 +14,7 @@ WorkflowContext, WorkflowOutputEvent, executor, + tool, ) from agent_framework.azure import AzureOpenAIChatClient from azure.identity import AzureCliCredential diff --git a/python/samples/getting_started/workflows/agents/azure_chat_agents_tool_calls_with_feedback.py b/python/samples/getting_started/workflows/agents/azure_chat_agents_tool_calls_with_feedback.py index 3981be2356..1b97677374 100644 --- a/python/samples/getting_started/workflows/agents/azure_chat_agents_tool_calls_with_feedback.py +++ b/python/samples/getting_started/workflows/agents/azure_chat_agents_tool_calls_with_feedback.py @@ -22,6 +22,7 @@ WorkflowOutputEvent, handler, response_handler, + tool, ) from agent_framework.azure import AzureOpenAIChatClient from azure.identity import AzureCliCredential @@ -49,6 +50,8 @@ - Authentication via azure-identity. Run `az login` before executing. """ +# NOTE: approval_mode="never_require" is for sample brevity. Use "always_require" in production; see samples/getting_started/tools/function_tool_with_approval.py and samples/getting_started/tools/function_tool_with_approval_and_threads.py. +@tool(approval_mode="never_require") def fetch_product_brief( product_name: Annotated[str, Field(description="Product name to look up.")], @@ -65,6 +68,7 @@ def fetch_product_brief( } return briefs.get(product_name.lower(), f"No stored brief for '{product_name}'.") +@tool(approval_mode="never_require") def get_brand_voice_profile( voice_name: Annotated[str, Field(description="Brand or campaign voice to emulate.")], diff --git a/python/samples/getting_started/workflows/agents/custom_agent_executors.py b/python/samples/getting_started/workflows/agents/custom_agent_executors.py index 66b9f2df46..3f95aab0e4 100644 --- a/python/samples/getting_started/workflows/agents/custom_agent_executors.py +++ b/python/samples/getting_started/workflows/agents/custom_agent_executors.py @@ -9,6 +9,7 @@ WorkflowBuilder, WorkflowContext, handler, + tool, ) from agent_framework.azure import AzureOpenAIChatClient from azure.identity import AzureCliCredential diff --git a/python/samples/getting_started/workflows/agents/handoff_workflow_as_agent.py b/python/samples/getting_started/workflows/agents/handoff_workflow_as_agent.py index 2373984586..3b820fe969 100644 --- a/python/samples/getting_started/workflows/agents/handoff_workflow_as_agent.py +++ b/python/samples/getting_started/workflows/agents/handoff_workflow_as_agent.py @@ -13,7 +13,7 @@ HandoffBuilder, Role, WorkflowAgent, - ai_function, + tool, ) from agent_framework.azure import AzureOpenAIChatClient from azure.identity import AzureCliCredential @@ -38,19 +38,20 @@ """ -@ai_function +# NOTE: approval_mode="never_require" is for sample brevity. Use "always_require" in production; see samples/getting_started/tools/function_tool_with_approval.py and samples/getting_started/tools/function_tool_with_approval_and_threads.py. +@tool(approval_mode="never_require") def process_refund(order_number: Annotated[str, "Order number to process refund for"]) -> str: """Simulated function to process a refund for a given order number.""" return f"Refund processed successfully for order {order_number}." -@ai_function +@tool(approval_mode="never_require") def check_order_status(order_number: Annotated[str, "Order number to check status for"]) -> str: """Simulated function to check the status of a given order number.""" return f"Order {order_number} is currently being processed and will ship in 2 business days." -@ai_function +@tool(approval_mode="never_require") def process_return(order_number: Annotated[str, "Order number to process return for"]) -> str: """Simulated function to process a return for a given order number.""" return f"Return initiated successfully for order {order_number}. You will receive return instructions via email." diff --git a/python/samples/getting_started/workflows/agents/magentic_workflow_as_agent.py b/python/samples/getting_started/workflows/agents/magentic_workflow_as_agent.py index f4e5b38e86..9820b7b6c6 100644 --- a/python/samples/getting_started/workflows/agents/magentic_workflow_as_agent.py +++ b/python/samples/getting_started/workflows/agents/magentic_workflow_as_agent.py @@ -6,6 +6,7 @@ ChatAgent, HostedCodeInterpreterTool, MagenticBuilder, + tool, ) from agent_framework.openai import OpenAIChatClient, OpenAIResponsesClient diff --git a/python/samples/getting_started/workflows/agents/mixed_agents_and_executors.py b/python/samples/getting_started/workflows/agents/mixed_agents_and_executors.py index 3ec8d0f530..ab36cf3962 100644 --- a/python/samples/getting_started/workflows/agents/mixed_agents_and_executors.py +++ b/python/samples/getting_started/workflows/agents/mixed_agents_and_executors.py @@ -11,6 +11,7 @@ WorkflowBuilder, WorkflowContext, handler, + tool, ) from agent_framework.azure import AzureAIAgentClient from azure.identity.aio import AzureCliCredential diff --git a/python/samples/getting_started/workflows/agents/workflow_as_agent_human_in_the_loop.py b/python/samples/getting_started/workflows/agents/workflow_as_agent_human_in_the_loop.py index 3850cf74e7..118800765d 100644 --- a/python/samples/getting_started/workflows/agents/workflow_as_agent_human_in_the_loop.py +++ b/python/samples/getting_started/workflows/agents/workflow_as_agent_human_in_the_loop.py @@ -26,6 +26,7 @@ WorkflowContext, handler, response_handler, + tool, ) from getting_started.workflows.agents.workflow_as_agent_reflection_pattern import ( # noqa: E402 ReviewRequest, diff --git a/python/samples/getting_started/workflows/agents/workflow_as_agent_kwargs.py b/python/samples/getting_started/workflows/agents/workflow_as_agent_kwargs.py index 0c86b72ff3..56b8c6de77 100644 --- a/python/samples/getting_started/workflows/agents/workflow_as_agent_kwargs.py +++ b/python/samples/getting_started/workflows/agents/workflow_as_agent_kwargs.py @@ -4,22 +4,22 @@ import json from typing import Annotated, Any -from agent_framework import SequentialBuilder, ai_function +from agent_framework import SequentialBuilder, tool from agent_framework.openai import OpenAIChatClient from pydantic import Field """ -Sample: Workflow as Agent with kwargs Propagation to @ai_function Tools +Sample: Workflow as Agent with kwargs Propagation to @tool Tools This sample demonstrates how to flow custom context (skill data, user tokens, etc.) -through a workflow exposed via .as_agent() to @ai_function tools using the **kwargs pattern. +through a workflow exposed via .as_agent() to @tool functions using the **kwargs pattern. Key Concepts: - Build a workflow using SequentialBuilder (or any builder pattern) - Expose the workflow as a reusable agent via workflow.as_agent() - Pass custom context as kwargs when invoking workflow_agent.run() or run_stream() - kwargs are stored in SharedState and propagated to all agent invocations -- @ai_function tools receive kwargs via **kwargs parameter +- @tool functions receive kwargs via **kwargs parameter When to use workflow.as_agent(): - To treat an entire workflow orchestration as a single agent @@ -32,7 +32,8 @@ # Define tools that accept custom context via **kwargs -@ai_function +# NOTE: approval_mode="never_require" is for sample brevity. Use "always_require" in production; see samples/getting_started/tools/function_tool_with_approval.py and samples/getting_started/tools/function_tool_with_approval_and_threads.py. +@tool(approval_mode="never_require") def get_user_data( query: Annotated[str, Field(description="What user data to retrieve")], **kwargs: Any, @@ -49,7 +50,7 @@ def get_user_data( return f"Retrieved data for user {user_name} with {access_level} access: {query}" -@ai_function +@tool(approval_mode="never_require") def call_api( endpoint_name: Annotated[str, Field(description="Name of the API endpoint to call")], **kwargs: Any, @@ -95,7 +96,7 @@ async def main() -> None: # Expose the workflow as an agent using .as_agent() workflow_agent = workflow.as_agent(name="WorkflowAgent") - # Define custom context that will flow to ai_functions via kwargs + # Define custom context that will flow to tools via kwargs custom_data = { "api_config": { "base_url": "https://api.example.com", @@ -119,7 +120,7 @@ async def main() -> None: print("Workflow Agent Execution (watch for [tool_name] logs showing kwargs received):") print("-" * 70) - # Run workflow agent with kwargs - these will flow through to ai_functions + # Run workflow agent with kwargs - these will flow through to tools # Note: kwargs are passed to workflow_agent.run_stream() just like workflow.run_stream() print("\n===== Streaming Response =====") async for update in workflow_agent.run_stream( diff --git a/python/samples/getting_started/workflows/agents/workflow_as_agent_reflection_pattern.py b/python/samples/getting_started/workflows/agents/workflow_as_agent_reflection_pattern.py index 0320d02a1f..9aa98f7b96 100644 --- a/python/samples/getting_started/workflows/agents/workflow_as_agent_reflection_pattern.py +++ b/python/samples/getting_started/workflows/agents/workflow_as_agent_reflection_pattern.py @@ -15,6 +15,7 @@ WorkflowBuilder, WorkflowContext, handler, + tool, ) from agent_framework.openai import OpenAIChatClient from pydantic import BaseModel diff --git a/python/samples/getting_started/workflows/checkpoint/checkpoint_with_human_in_the_loop.py b/python/samples/getting_started/workflows/checkpoint/checkpoint_with_human_in_the_loop.py index 694fc759db..a2628592ea 100644 --- a/python/samples/getting_started/workflows/checkpoint/checkpoint_with_human_in_the_loop.py +++ b/python/samples/getting_started/workflows/checkpoint/checkpoint_with_human_in_the_loop.py @@ -26,6 +26,7 @@ get_checkpoint_summary, handler, response_handler, + tool, ) from agent_framework.azure import AzureOpenAIChatClient from azure.identity import AzureCliCredential diff --git a/python/samples/getting_started/workflows/checkpoint/checkpoint_with_resume.py b/python/samples/getting_started/workflows/checkpoint/checkpoint_with_resume.py index a6f0a2431b..bfa2484d63 100644 --- a/python/samples/getting_started/workflows/checkpoint/checkpoint_with_resume.py +++ b/python/samples/getting_started/workflows/checkpoint/checkpoint_with_resume.py @@ -37,6 +37,7 @@ WorkflowContext, WorkflowOutputEvent, handler, + tool, ) diff --git a/python/samples/getting_started/workflows/checkpoint/handoff_with_tool_approval_checkpoint_resume.py b/python/samples/getting_started/workflows/checkpoint/handoff_with_tool_approval_checkpoint_resume.py index 7ee4d2cf14..0d60f6ca22 100644 --- a/python/samples/getting_started/workflows/checkpoint/handoff_with_tool_approval_checkpoint_resume.py +++ b/python/samples/getting_started/workflows/checkpoint/handoff_with_tool_approval_checkpoint_resume.py @@ -17,7 +17,7 @@ Workflow, WorkflowOutputEvent, WorkflowStatusEvent, - ai_function, + tool, ) from agent_framework.azure import AzureOpenAIChatClient from azure.identity import AzureCliCredential @@ -51,7 +51,7 @@ CHECKPOINT_DIR.mkdir(parents=True, exist_ok=True) -@ai_function(approval_mode="always_require") +@tool(approval_mode="always_require") def submit_refund(refund_description: str, amount: str, order_id: str) -> str: """Capture a refund request for manual review before processing.""" return f"refund recorded for order {order_id} (amount: {amount}) with details: {refund_description}" diff --git a/python/samples/getting_started/workflows/checkpoint/sub_workflow_checkpoint.py b/python/samples/getting_started/workflows/checkpoint/sub_workflow_checkpoint.py index 24dec9fb3e..d35fd5e41f 100644 --- a/python/samples/getting_started/workflows/checkpoint/sub_workflow_checkpoint.py +++ b/python/samples/getting_started/workflows/checkpoint/sub_workflow_checkpoint.py @@ -24,6 +24,7 @@ WorkflowStatusEvent, handler, response_handler, + tool, ) CHECKPOINT_DIR = Path(__file__).with_suffix("").parent / "tmp" / "sub_workflow_checkpoints" diff --git a/python/samples/getting_started/workflows/checkpoint/workflow_as_agent_checkpoint.py b/python/samples/getting_started/workflows/checkpoint/workflow_as_agent_checkpoint.py index 1c4df767c5..c0647c72f7 100644 --- a/python/samples/getting_started/workflows/checkpoint/workflow_as_agent_checkpoint.py +++ b/python/samples/getting_started/workflows/checkpoint/workflow_as_agent_checkpoint.py @@ -31,6 +31,7 @@ ChatMessageStore, InMemoryCheckpointStorage, SequentialBuilder, + tool, ) from agent_framework.openai import OpenAIChatClient diff --git a/python/samples/getting_started/workflows/composition/sub_workflow_basics.py b/python/samples/getting_started/workflows/composition/sub_workflow_basics.py index 9189e70d29..cb789850c4 100644 --- a/python/samples/getting_started/workflows/composition/sub_workflow_basics.py +++ b/python/samples/getting_started/workflows/composition/sub_workflow_basics.py @@ -10,6 +10,7 @@ WorkflowContext, WorkflowExecutor, handler, + tool, ) from typing_extensions import Never diff --git a/python/samples/getting_started/workflows/composition/sub_workflow_kwargs.py b/python/samples/getting_started/workflows/composition/sub_workflow_kwargs.py index b2e43b72c7..07e0f67d9d 100644 --- a/python/samples/getting_started/workflows/composition/sub_workflow_kwargs.py +++ b/python/samples/getting_started/workflows/composition/sub_workflow_kwargs.py @@ -9,7 +9,7 @@ SequentialBuilder, WorkflowExecutor, WorkflowOutputEvent, - ai_function, + tool, ) from agent_framework.openai import OpenAIChatClient @@ -32,7 +32,8 @@ # Define tools that access custom context via **kwargs -@ai_function +# NOTE: approval_mode="never_require" is for sample brevity. Use "always_require" in production; see samples/getting_started/tools/function_tool_with_approval.py and samples/getting_started/tools/function_tool_with_approval_and_threads.py. +@tool(approval_mode="never_require") def get_authenticated_data( resource: Annotated[str, "The resource to fetch"], **kwargs: Any, @@ -48,7 +49,7 @@ def get_authenticated_data( return f"Fetched '{resource}' for user {user_name} ({access_level} access)" -@ai_function +@tool(approval_mode="never_require") def call_configured_service( service_name: Annotated[str, "Name of the service to call"], **kwargs: Any, diff --git a/python/samples/getting_started/workflows/composition/sub_workflow_parallel_requests.py b/python/samples/getting_started/workflows/composition/sub_workflow_parallel_requests.py index 0959f591f0..dadb4325d9 100644 --- a/python/samples/getting_started/workflows/composition/sub_workflow_parallel_requests.py +++ b/python/samples/getting_started/workflows/composition/sub_workflow_parallel_requests.py @@ -16,6 +16,7 @@ WorkflowExecutor, handler, response_handler, + tool, ) from typing_extensions import Never diff --git a/python/samples/getting_started/workflows/composition/sub_workflow_request_interception.py b/python/samples/getting_started/workflows/composition/sub_workflow_request_interception.py index 167ae2e950..e21c74039a 100644 --- a/python/samples/getting_started/workflows/composition/sub_workflow_request_interception.py +++ b/python/samples/getting_started/workflows/composition/sub_workflow_request_interception.py @@ -14,6 +14,7 @@ WorkflowOutputEvent, handler, response_handler, + tool, ) from typing_extensions import Never diff --git a/python/samples/getting_started/workflows/control-flow/edge_condition.py b/python/samples/getting_started/workflows/control-flow/edge_condition.py index f55fba008d..6d1a8ffb0f 100644 --- a/python/samples/getting_started/workflows/control-flow/edge_condition.py +++ b/python/samples/getting_started/workflows/control-flow/edge_condition.py @@ -13,6 +13,7 @@ WorkflowBuilder, # Fluent builder for wiring executors and edges WorkflowContext, # Per-run context and event bus executor, # Decorator to declare a Python function as a workflow executor + tool, ) from agent_framework.azure import AzureOpenAIChatClient # Thin client wrapper for Azure OpenAI chat models from azure.identity import AzureCliCredential # Uses your az CLI login for credentials diff --git a/python/samples/getting_started/workflows/control-flow/multi_selection_edge_group.py b/python/samples/getting_started/workflows/control-flow/multi_selection_edge_group.py index e0dee175de..44385bffca 100644 --- a/python/samples/getting_started/workflows/control-flow/multi_selection_edge_group.py +++ b/python/samples/getting_started/workflows/control-flow/multi_selection_edge_group.py @@ -19,6 +19,7 @@ WorkflowEvent, WorkflowOutputEvent, executor, + tool, ) from agent_framework.azure import AzureOpenAIChatClient from azure.identity import AzureCliCredential diff --git a/python/samples/getting_started/workflows/control-flow/sequential_executors.py b/python/samples/getting_started/workflows/control-flow/sequential_executors.py index e422009766..0fedfcf1cd 100644 --- a/python/samples/getting_started/workflows/control-flow/sequential_executors.py +++ b/python/samples/getting_started/workflows/control-flow/sequential_executors.py @@ -9,6 +9,7 @@ WorkflowContext, WorkflowOutputEvent, handler, + tool, ) from typing_extensions import Never diff --git a/python/samples/getting_started/workflows/control-flow/simple_loop.py b/python/samples/getting_started/workflows/control-flow/simple_loop.py index 2db8d93104..d458589123 100644 --- a/python/samples/getting_started/workflows/control-flow/simple_loop.py +++ b/python/samples/getting_started/workflows/control-flow/simple_loop.py @@ -14,6 +14,7 @@ WorkflowBuilder, WorkflowContext, handler, + tool, ) from agent_framework.azure import AzureOpenAIChatClient from azure.identity import AzureCliCredential diff --git a/python/samples/getting_started/workflows/control-flow/switch_case_edge_group.py b/python/samples/getting_started/workflows/control-flow/switch_case_edge_group.py index 597ba2ef89..f2090e4acc 100644 --- a/python/samples/getting_started/workflows/control-flow/switch_case_edge_group.py +++ b/python/samples/getting_started/workflows/control-flow/switch_case_edge_group.py @@ -17,6 +17,7 @@ WorkflowBuilder, # Fluent builder for assembling the graph WorkflowContext, # Per-run context and event bus executor, # Decorator to turn a function into a workflow executor + tool, ) from agent_framework.azure import AzureOpenAIChatClient # Thin client for Azure OpenAI chat models from azure.identity import AzureCliCredential # Uses your az CLI login for credentials diff --git a/python/samples/getting_started/workflows/declarative/function_tools/main.py b/python/samples/getting_started/workflows/declarative/function_tools/main.py index a6680b74ac..ea647e7f21 100644 --- a/python/samples/getting_started/workflows/declarative/function_tools/main.py +++ b/python/samples/getting_started/workflows/declarative/function_tools/main.py @@ -11,6 +11,7 @@ from typing import Annotated, Any from agent_framework import FileCheckpointStorage, RequestInfoEvent, WorkflowOutputEvent +from agent_framework import tool from agent_framework.azure import AzureOpenAIChatClient from agent_framework_declarative import ExternalInputRequest, ExternalInputResponse, WorkflowFactory from azure.identity import AzureCliCredential @@ -37,17 +38,18 @@ class MenuItem: MenuItem(category="Drink", name="Soda", price=1.95, is_special=False), ] - +# NOTE: approval_mode="never_require" is for sample brevity. Use "always_require" in production; see samples/getting_started/tools/function_tool_with_approval.py and samples/getting_started/tools/function_tool_with_approval_and_threads.py. +@tool(approval_mode="never_require") def get_menu() -> list[dict[str, Any]]: """Get all menu items.""" return [{"category": i.category, "name": i.name, "price": i.price} for i in MENU_ITEMS] - +@tool(approval_mode="never_require") def get_specials() -> list[dict[str, Any]]: """Get today's specials.""" return [{"category": i.category, "name": i.name, "price": i.price} for i in MENU_ITEMS if i.is_special] - +@tool(approval_mode="never_require") def get_item_price(name: Annotated[str, Field(description="Menu item name")]) -> str: """Get price of a menu item.""" for item in MENU_ITEMS: diff --git a/python/samples/getting_started/workflows/human-in-the-loop/agents_with_approval_requests.py b/python/samples/getting_started/workflows/human-in-the-loop/agents_with_approval_requests.py index b2b5d3ff80..b724f2876c 100644 --- a/python/samples/getting_started/workflows/human-in-the-loop/agents_with_approval_requests.py +++ b/python/samples/getting_started/workflows/human-in-the-loop/agents_with_approval_requests.py @@ -14,7 +14,7 @@ FunctionApprovalResponseContent, WorkflowBuilder, WorkflowContext, - ai_function, + tool, executor, handler, ) @@ -53,14 +53,15 @@ """ -@ai_function +# NOTE: approval_mode="never_require" is for sample brevity. Use "always_require" in production; see samples/getting_started/tools/function_tool_with_approval.py and samples/getting_started/tools/function_tool_with_approval_and_threads.py. +@tool(approval_mode="never_require") def get_current_date() -> str: """Get the current date in YYYY-MM-DD format.""" # For demonstration purposes, we return a fixed date. return "2025-11-07" -@ai_function +@tool(approval_mode="never_require") def get_team_members_email_addresses() -> list[dict[str, str]]: """Get the email addresses of team members.""" # In a real implementation, this might query a database or directory service. @@ -92,7 +93,7 @@ def get_team_members_email_addresses() -> list[dict[str, str]]: ] -@ai_function +@tool(approval_mode="never_require") def get_my_information() -> dict[str, str]: """Get my personal information.""" return { @@ -103,7 +104,7 @@ def get_my_information() -> dict[str, str]: } -@ai_function(approval_mode="always_require") +@tool(approval_mode="always_require") async def read_historical_email_data( email_address: Annotated[str, "The email address to read historical data from"], start_date: Annotated[str, "The start date in YYYY-MM-DD format"], @@ -165,7 +166,7 @@ async def read_historical_email_data( return [email for email in emails if start_date <= email["date"] <= end_date] -@ai_function(approval_mode="always_require") +@tool(approval_mode="always_require") async def send_email( to: Annotated[str, "The recipient email address"], subject: Annotated[str, "The email subject"], diff --git a/python/samples/getting_started/workflows/human-in-the-loop/concurrent_request_info.py b/python/samples/getting_started/workflows/human-in-the-loop/concurrent_request_info.py index fb2508c4b4..5aca9f8848 100644 --- a/python/samples/getting_started/workflows/human-in-the-loop/concurrent_request_info.py +++ b/python/samples/getting_started/workflows/human-in-the-loop/concurrent_request_info.py @@ -33,6 +33,7 @@ WorkflowOutputEvent, WorkflowRunState, WorkflowStatusEvent, + tool, ) from agent_framework._workflows._agent_executor import AgentExecutorResponse from agent_framework.azure import AzureOpenAIChatClient diff --git a/python/samples/getting_started/workflows/human-in-the-loop/group_chat_request_info.py b/python/samples/getting_started/workflows/human-in-the-loop/group_chat_request_info.py index c2cbb7dd00..0f0f4e9789 100644 --- a/python/samples/getting_started/workflows/human-in-the-loop/group_chat_request_info.py +++ b/python/samples/getting_started/workflows/human-in-the-loop/group_chat_request_info.py @@ -35,6 +35,7 @@ WorkflowOutputEvent, WorkflowRunState, WorkflowStatusEvent, + tool, ) from agent_framework.azure import AzureOpenAIChatClient from azure.identity import AzureCliCredential diff --git a/python/samples/getting_started/workflows/human-in-the-loop/guessing_game_with_human_input.py b/python/samples/getting_started/workflows/human-in-the-loop/guessing_game_with_human_input.py index 3534e94183..52a9d72901 100644 --- a/python/samples/getting_started/workflows/human-in-the-loop/guessing_game_with_human_input.py +++ b/python/samples/getting_started/workflows/human-in-the-loop/guessing_game_with_human_input.py @@ -18,6 +18,7 @@ WorkflowStatusEvent, # Event emitted on run state changes handler, response_handler, # Decorator to expose an Executor method as a step + tool, ) from agent_framework.azure import AzureOpenAIChatClient from azure.identity import AzureCliCredential diff --git a/python/samples/getting_started/workflows/human-in-the-loop/sequential_request_info.py b/python/samples/getting_started/workflows/human-in-the-loop/sequential_request_info.py index 609dfc2ee7..401c24b5dd 100644 --- a/python/samples/getting_started/workflows/human-in-the-loop/sequential_request_info.py +++ b/python/samples/getting_started/workflows/human-in-the-loop/sequential_request_info.py @@ -32,6 +32,7 @@ WorkflowOutputEvent, WorkflowRunState, WorkflowStatusEvent, + tool, ) from agent_framework.azure import AzureOpenAIChatClient from azure.identity import AzureCliCredential diff --git a/python/samples/getting_started/workflows/observability/executor_io_observation.py b/python/samples/getting_started/workflows/observability/executor_io_observation.py index 0237f294f2..54645f237d 100644 --- a/python/samples/getting_started/workflows/observability/executor_io_observation.py +++ b/python/samples/getting_started/workflows/observability/executor_io_observation.py @@ -11,6 +11,7 @@ WorkflowContext, WorkflowOutputEvent, handler, + tool, ) from typing_extensions import Never diff --git a/python/samples/getting_started/workflows/orchestration/concurrent_custom_agent_executors.py b/python/samples/getting_started/workflows/orchestration/concurrent_custom_agent_executors.py index 76203dba63..caf97c7f8f 100644 --- a/python/samples/getting_started/workflows/orchestration/concurrent_custom_agent_executors.py +++ b/python/samples/getting_started/workflows/orchestration/concurrent_custom_agent_executors.py @@ -12,6 +12,7 @@ Executor, WorkflowContext, handler, + tool, ) from agent_framework.azure import AzureOpenAIChatClient from azure.identity import AzureCliCredential diff --git a/python/samples/getting_started/workflows/orchestration/concurrent_participant_factory.py b/python/samples/getting_started/workflows/orchestration/concurrent_participant_factory.py index 113e096ca6..aaa05a37a9 100644 --- a/python/samples/getting_started/workflows/orchestration/concurrent_participant_factory.py +++ b/python/samples/getting_started/workflows/orchestration/concurrent_participant_factory.py @@ -12,6 +12,7 @@ Workflow, WorkflowContext, handler, + tool, ) from agent_framework.azure import AzureOpenAIChatClient from azure.identity import AzureCliCredential diff --git a/python/samples/getting_started/workflows/orchestration/group_chat_agent_manager.py b/python/samples/getting_started/workflows/orchestration/group_chat_agent_manager.py index 12475205d3..236438347a 100644 --- a/python/samples/getting_started/workflows/orchestration/group_chat_agent_manager.py +++ b/python/samples/getting_started/workflows/orchestration/group_chat_agent_manager.py @@ -9,6 +9,7 @@ GroupChatBuilder, Role, WorkflowOutputEvent, + tool, ) from agent_framework.azure import AzureOpenAIChatClient from azure.identity import AzureCliCredential diff --git a/python/samples/getting_started/workflows/orchestration/group_chat_philosophical_debate.py b/python/samples/getting_started/workflows/orchestration/group_chat_philosophical_debate.py index a26b9df4d0..cf0b8b8767 100644 --- a/python/samples/getting_started/workflows/orchestration/group_chat_philosophical_debate.py +++ b/python/samples/getting_started/workflows/orchestration/group_chat_philosophical_debate.py @@ -11,6 +11,7 @@ GroupChatBuilder, Role, WorkflowOutputEvent, + tool, ) from agent_framework.azure import AzureOpenAIChatClient from azure.identity import AzureCliCredential diff --git a/python/samples/getting_started/workflows/orchestration/group_chat_simple_selector.py b/python/samples/getting_started/workflows/orchestration/group_chat_simple_selector.py index 517ae313f3..f0899a8ddb 100644 --- a/python/samples/getting_started/workflows/orchestration/group_chat_simple_selector.py +++ b/python/samples/getting_started/workflows/orchestration/group_chat_simple_selector.py @@ -9,6 +9,7 @@ GroupChatBuilder, GroupChatState, WorkflowOutputEvent, + tool, ) from agent_framework.azure import AzureOpenAIChatClient from azure.identity import AzureCliCredential diff --git a/python/samples/getting_started/workflows/orchestration/handoff_autonomous.py b/python/samples/getting_started/workflows/orchestration/handoff_autonomous.py index 758043d9b9..edab013700 100644 --- a/python/samples/getting_started/workflows/orchestration/handoff_autonomous.py +++ b/python/samples/getting_started/workflows/orchestration/handoff_autonomous.py @@ -14,6 +14,7 @@ WorkflowEvent, WorkflowOutputEvent, resolve_agent_id, + tool, ) from agent_framework.azure import AzureOpenAIChatClient from azure.identity import AzureCliCredential diff --git a/python/samples/getting_started/workflows/orchestration/handoff_participant_factory.py b/python/samples/getting_started/workflows/orchestration/handoff_participant_factory.py index d95871d8f0..dd4e4054c8 100644 --- a/python/samples/getting_started/workflows/orchestration/handoff_participant_factory.py +++ b/python/samples/getting_started/workflows/orchestration/handoff_participant_factory.py @@ -18,7 +18,7 @@ WorkflowOutputEvent, WorkflowRunState, WorkflowStatusEvent, - ai_function, + tool, ) from agent_framework.azure import AzureOpenAIChatClient from azure.identity import AzureCliCredential @@ -47,19 +47,20 @@ """ -@ai_function +# NOTE: approval_mode="never_require" is for sample brevity. Use "always_require" in production; see samples/getting_started/tools/function_tool_with_approval.py and samples/getting_started/tools/function_tool_with_approval_and_threads.py. +@tool(approval_mode="never_require") def process_refund(order_number: Annotated[str, "Order number to process refund for"]) -> str: """Simulated function to process a refund for a given order number.""" return f"Refund processed successfully for order {order_number}." -@ai_function +@tool(approval_mode="never_require") def check_order_status(order_number: Annotated[str, "Order number to check status for"]) -> str: """Simulated function to check the status of a given order number.""" return f"Order {order_number} is currently being processed and will ship in 2 business days." -@ai_function +@tool(approval_mode="never_require") def process_return(order_number: Annotated[str, "Order number to process return for"]) -> str: """Simulated function to process a return for a given order number.""" return f"Return initiated successfully for order {order_number}. You will receive return instructions via email." diff --git a/python/samples/getting_started/workflows/orchestration/handoff_simple.py b/python/samples/getting_started/workflows/orchestration/handoff_simple.py index 3fd88c5a86..72ea035a4f 100644 --- a/python/samples/getting_started/workflows/orchestration/handoff_simple.py +++ b/python/samples/getting_started/workflows/orchestration/handoff_simple.py @@ -16,7 +16,7 @@ WorkflowOutputEvent, WorkflowRunState, WorkflowStatusEvent, - ai_function, + tool, ) from agent_framework.azure import AzureOpenAIChatClient from azure.identity import AzureCliCredential @@ -38,19 +38,20 @@ """ -@ai_function +# NOTE: approval_mode="never_require" is for sample brevity. Use "always_require" in production; see samples/getting_started/tools/function_tool_with_approval.py and samples/getting_started/tools/function_tool_with_approval_and_threads.py. +@tool(approval_mode="never_require") def process_refund(order_number: Annotated[str, "Order number to process refund for"]) -> str: """Simulated function to process a refund for a given order number.""" return f"Refund processed successfully for order {order_number}." -@ai_function +@tool(approval_mode="never_require") def check_order_status(order_number: Annotated[str, "Order number to check status for"]) -> str: """Simulated function to check the status of a given order number.""" return f"Order {order_number} is currently being processed and will ship in 2 business days." -@ai_function +@tool(approval_mode="never_require") def process_return(order_number: Annotated[str, "Order number to process return for"]) -> str: """Simulated function to process a return for a given order number.""" return f"Return initiated successfully for order {order_number}. You will receive return instructions via email." diff --git a/python/samples/getting_started/workflows/orchestration/handoff_with_code_interpreter_file.py b/python/samples/getting_started/workflows/orchestration/handoff_with_code_interpreter_file.py index fa6575300a..b1d6f394b7 100644 --- a/python/samples/getting_started/workflows/orchestration/handoff_with_code_interpreter_file.py +++ b/python/samples/getting_started/workflows/orchestration/handoff_with_code_interpreter_file.py @@ -41,6 +41,7 @@ WorkflowEvent, WorkflowRunState, WorkflowStatusEvent, + tool, ) from azure.identity.aio import AzureCliCredential diff --git a/python/samples/getting_started/workflows/orchestration/magentic.py b/python/samples/getting_started/workflows/orchestration/magentic.py index 8e71d09a42..90ec01a7d3 100644 --- a/python/samples/getting_started/workflows/orchestration/magentic.py +++ b/python/samples/getting_started/workflows/orchestration/magentic.py @@ -15,6 +15,7 @@ MagenticOrchestratorEvent, MagenticProgressLedger, WorkflowOutputEvent, + tool, ) from agent_framework.openai import OpenAIChatClient, OpenAIResponsesClient diff --git a/python/samples/getting_started/workflows/orchestration/magentic_checkpoint.py b/python/samples/getting_started/workflows/orchestration/magentic_checkpoint.py index 6fc284a9ab..7acd566f39 100644 --- a/python/samples/getting_started/workflows/orchestration/magentic_checkpoint.py +++ b/python/samples/getting_started/workflows/orchestration/magentic_checkpoint.py @@ -16,6 +16,7 @@ WorkflowOutputEvent, WorkflowRunState, WorkflowStatusEvent, + tool, ) from agent_framework.azure import AzureOpenAIChatClient from azure.identity._credentials import AzureCliCredential diff --git a/python/samples/getting_started/workflows/orchestration/magentic_human_plan_review.py b/python/samples/getting_started/workflows/orchestration/magentic_human_plan_review.py index 37a53020e7..eece67f4bf 100644 --- a/python/samples/getting_started/workflows/orchestration/magentic_human_plan_review.py +++ b/python/samples/getting_started/workflows/orchestration/magentic_human_plan_review.py @@ -12,6 +12,7 @@ MagenticPlanReviewRequest, RequestInfoEvent, WorkflowOutputEvent, + tool, ) from agent_framework.openai import OpenAIChatClient diff --git a/python/samples/getting_started/workflows/orchestration/sequential_custom_executors.py b/python/samples/getting_started/workflows/orchestration/sequential_custom_executors.py index db60b3486a..b29cec6d83 100644 --- a/python/samples/getting_started/workflows/orchestration/sequential_custom_executors.py +++ b/python/samples/getting_started/workflows/orchestration/sequential_custom_executors.py @@ -11,6 +11,7 @@ SequentialBuilder, WorkflowContext, handler, + tool, ) from agent_framework.azure import AzureOpenAIChatClient from azure.identity import AzureCliCredential diff --git a/python/samples/getting_started/workflows/orchestration/sequential_participant_factory.py b/python/samples/getting_started/workflows/orchestration/sequential_participant_factory.py index d155d1c9b1..6cf87bf21c 100644 --- a/python/samples/getting_started/workflows/orchestration/sequential_participant_factory.py +++ b/python/samples/getting_started/workflows/orchestration/sequential_participant_factory.py @@ -11,6 +11,7 @@ Workflow, WorkflowContext, handler, + tool, ) from agent_framework.azure import AzureOpenAIChatClient from azure.identity import AzureCliCredential diff --git a/python/samples/getting_started/workflows/parallelism/fan_out_fan_in_edges.py b/python/samples/getting_started/workflows/parallelism/fan_out_fan_in_edges.py index f2e3fffeaa..36c2ca24f6 100644 --- a/python/samples/getting_started/workflows/parallelism/fan_out_fan_in_edges.py +++ b/python/samples/getting_started/workflows/parallelism/fan_out_fan_in_edges.py @@ -16,6 +16,7 @@ WorkflowContext, # Per run context and event bus WorkflowOutputEvent, # Event emitted when workflow yields output handler, # Decorator to mark an Executor method as invokable + tool, ) from agent_framework.azure import AzureOpenAIChatClient from azure.identity import AzureCliCredential # Uses your az CLI login for credentials diff --git a/python/samples/getting_started/workflows/parallelism/map_reduce_and_visualization.py b/python/samples/getting_started/workflows/parallelism/map_reduce_and_visualization.py index e443df0354..d98c6cb78b 100644 --- a/python/samples/getting_started/workflows/parallelism/map_reduce_and_visualization.py +++ b/python/samples/getting_started/workflows/parallelism/map_reduce_and_visualization.py @@ -14,6 +14,7 @@ WorkflowOutputEvent, # Event emitted when workflow yields output WorkflowViz, # Utility to visualize a workflow graph handler, # Decorator to expose an Executor method as a step + tool, ) from typing_extensions import Never diff --git a/python/samples/getting_started/workflows/state-management/shared_states_with_agents.py b/python/samples/getting_started/workflows/state-management/shared_states_with_agents.py index e0a95f949a..700dcb1b95 100644 --- a/python/samples/getting_started/workflows/state-management/shared_states_with_agents.py +++ b/python/samples/getting_started/workflows/state-management/shared_states_with_agents.py @@ -15,6 +15,7 @@ WorkflowBuilder, WorkflowContext, executor, + tool, ) from agent_framework.azure import AzureOpenAIChatClient from azure.identity import AzureCliCredential diff --git a/python/samples/getting_started/workflows/state-management/workflow_kwargs.py b/python/samples/getting_started/workflows/state-management/workflow_kwargs.py index 96dd8e0a38..bf7320f834 100644 --- a/python/samples/getting_started/workflows/state-management/workflow_kwargs.py +++ b/python/samples/getting_started/workflows/state-management/workflow_kwargs.py @@ -4,20 +4,20 @@ import json from typing import Annotated, Any -from agent_framework import ChatMessage, SequentialBuilder, WorkflowOutputEvent, ai_function +from agent_framework import ChatMessage, SequentialBuilder, WorkflowOutputEvent, tool from agent_framework.openai import OpenAIChatClient from pydantic import Field """ -Sample: Workflow kwargs Flow to @ai_function Tools +Sample: Workflow kwargs Flow to @tool Tools This sample demonstrates how to flow custom context (skill data, user tokens, etc.) -through any workflow pattern to @ai_function tools using the **kwargs pattern. +through any workflow pattern to @tool functions using the **kwargs pattern. Key Concepts: - Pass custom context as kwargs when invoking workflow.run_stream() or workflow.run() - kwargs are stored in SharedState and passed to all agent invocations -- @ai_function tools receive kwargs via **kwargs parameter +- @tool functions receive kwargs via **kwargs parameter - Works with Sequential, Concurrent, GroupChat, Handoff, and Magentic patterns Prerequisites: @@ -26,7 +26,8 @@ # Define tools that accept custom context via **kwargs -@ai_function +# NOTE: approval_mode="never_require" is for sample brevity. Use "always_require" in production; see samples/getting_started/tools/function_tool_with_approval.py and samples/getting_started/tools/function_tool_with_approval_and_threads.py. +@tool(approval_mode="never_require") def get_user_data( query: Annotated[str, Field(description="What user data to retrieve")], **kwargs: Any, @@ -43,7 +44,7 @@ def get_user_data( return f"Retrieved data for user {user_name} with {access_level} access: {query}" -@ai_function +@tool(approval_mode="never_require") def call_api( endpoint_name: Annotated[str, Field(description="Name of the API endpoint to call")], **kwargs: Any, @@ -86,7 +87,7 @@ async def main() -> None: # Build a simple sequential workflow workflow = SequentialBuilder().participants([agent]).build() - # Define custom context that will flow to ai_functions via kwargs + # Define custom context that will flow to tools via kwargs custom_data = { "api_config": { "base_url": "https://api.example.com", @@ -110,7 +111,7 @@ async def main() -> None: print("Workflow Execution (watch for [tool_name] logs showing kwargs received):") print("-" * 70) - # Run workflow with kwargs - these will flow through to ai_functions + # Run workflow with kwargs - these will flow through to tools async for event in workflow.run_stream( "Please get my user data and then call the users API endpoint.", custom_data=custom_data, diff --git a/python/samples/getting_started/workflows/tool-approval/concurrent_builder_tool_approval.py b/python/samples/getting_started/workflows/tool-approval/concurrent_builder_tool_approval.py index ce8d0d5977..83e6175a72 100644 --- a/python/samples/getting_started/workflows/tool-approval/concurrent_builder_tool_approval.py +++ b/python/samples/getting_started/workflows/tool-approval/concurrent_builder_tool_approval.py @@ -10,7 +10,7 @@ FunctionApprovalResponseContent, RequestInfoEvent, WorkflowOutputEvent, - ai_function, + tool, ) from agent_framework.openai import OpenAIChatClient @@ -45,7 +45,8 @@ # 1. Define market data tools (no approval required) -@ai_function +# NOTE: approval_mode="never_require" is for sample brevity. Use "always_require" in production; see samples/getting_started/tools/function_tool_with_approval.py and samples/getting_started/tools/function_tool_with_approval_and_threads.py. +@tool(approval_mode="never_require") def get_stock_price(symbol: Annotated[str, "The stock ticker symbol"]) -> str: """Get the current stock price for a given symbol.""" # Mock data for demonstration @@ -54,7 +55,7 @@ def get_stock_price(symbol: Annotated[str, "The stock ticker symbol"]) -> str: return f"{symbol.upper()}: ${price:.2f}" -@ai_function +@tool(approval_mode="never_require") def get_market_sentiment(symbol: Annotated[str, "The stock ticker symbol"]) -> str: """Get market sentiment analysis for a stock.""" # Mock sentiment data @@ -68,7 +69,7 @@ def get_market_sentiment(symbol: Annotated[str, "The stock ticker symbol"]) -> s # 2. Define trading tools (approval required) -@ai_function(approval_mode="always_require") +@tool(approval_mode="always_require") def execute_trade( symbol: Annotated[str, "The stock ticker symbol"], action: Annotated[str, "Either 'buy' or 'sell'"], @@ -78,7 +79,7 @@ def execute_trade( return f"Trade executed: {action.upper()} {quantity} shares of {symbol.upper()}" -@ai_function +@tool(approval_mode="never_require") def get_portfolio_balance() -> str: """Get current portfolio balance and available funds.""" return "Portfolio: $50,000 invested, $10,000 cash available. Holdings: AAPL, GOOGL, MSFT." diff --git a/python/samples/getting_started/workflows/tool-approval/group_chat_builder_tool_approval.py b/python/samples/getting_started/workflows/tool-approval/group_chat_builder_tool_approval.py index ae893e05ae..60679c70c6 100644 --- a/python/samples/getting_started/workflows/tool-approval/group_chat_builder_tool_approval.py +++ b/python/samples/getting_started/workflows/tool-approval/group_chat_builder_tool_approval.py @@ -10,7 +10,7 @@ GroupChatRequestSentEvent, GroupChatState, RequestInfoEvent, - ai_function, + tool, ) from agent_framework.openai import OpenAIChatClient @@ -44,19 +44,20 @@ # 1. Define tools for different agents -@ai_function +# NOTE: approval_mode="never_require" is for sample brevity. Use "always_require" in production; see samples/getting_started/tools/function_tool_with_approval.py and samples/getting_started/tools/function_tool_with_approval_and_threads.py. +@tool(approval_mode="never_require") def run_tests(test_suite: Annotated[str, "Name of the test suite to run"]) -> str: """Run automated tests for the application.""" return f"Test suite '{test_suite}' completed: 47 passed, 0 failed, 0 skipped" -@ai_function +@tool(approval_mode="never_require") def check_staging_status() -> str: """Check the current status of the staging environment.""" return "Staging environment: Healthy, Version 2.3.0 deployed, All services running" -@ai_function(approval_mode="always_require") +@tool(approval_mode="always_require") def deploy_to_production( version: Annotated[str, "The version to deploy"], components: Annotated[str, "Comma-separated list of components to deploy"], @@ -65,7 +66,7 @@ def deploy_to_production( return f"Production deployment complete: Version {version}, Components: {components}" -@ai_function +@tool(approval_mode="never_require") def create_rollback_plan(version: Annotated[str, "The version being deployed"]) -> str: """Create a rollback plan for the deployment.""" return ( diff --git a/python/samples/getting_started/workflows/tool-approval/sequential_builder_tool_approval.py b/python/samples/getting_started/workflows/tool-approval/sequential_builder_tool_approval.py index 042020b0b7..1397ce31a1 100644 --- a/python/samples/getting_started/workflows/tool-approval/sequential_builder_tool_approval.py +++ b/python/samples/getting_started/workflows/tool-approval/sequential_builder_tool_approval.py @@ -9,7 +9,7 @@ RequestInfoEvent, SequentialBuilder, WorkflowOutputEvent, - ai_function, + tool, ) from agent_framework.openai import OpenAIChatClient @@ -17,7 +17,7 @@ Sample: Sequential Workflow with Tool Approval Requests This sample demonstrates how to use SequentialBuilder with tools that require human -approval before execution. The approval flow uses the existing @ai_function decorator +approval before execution. The approval flow uses the existing @tool decorator with approval_mode="always_require" to trigger human-in-the-loop interactions. This sample works as follows: @@ -33,7 +33,7 @@ requiring any additional builder configuration. Demonstrate: -- Using @ai_function(approval_mode="always_require") for sensitive operations. +- Using @tool(approval_mode="always_require") for sensitive operations. - Handling RequestInfoEvent with FunctionApprovalRequestContent in sequential workflows. - Resuming workflow execution after approval via send_responses_streaming. @@ -44,7 +44,7 @@ # 1. Define tools - one requiring approval, one that doesn't -@ai_function(approval_mode="always_require") +@tool(approval_mode="always_require") def execute_database_query( query: Annotated[str, "The SQL query to execute against the production database"], ) -> str: @@ -53,7 +53,8 @@ def execute_database_query( return f"Query executed successfully. Results: 3 rows affected by '{query}'" -@ai_function +# NOTE: approval_mode="never_require" is for sample brevity. Use "always_require" in production; see samples/getting_started/tools/function_tool_with_approval.py and samples/getting_started/tools/function_tool_with_approval_and_threads.py. +@tool(approval_mode="never_require") def get_database_schema() -> str: """Get the current database schema. Does not require approval.""" return """ diff --git a/python/samples/getting_started/workflows/visualization/concurrent_with_visualization.py b/python/samples/getting_started/workflows/visualization/concurrent_with_visualization.py index e606ae0229..877bb13038 100644 --- a/python/samples/getting_started/workflows/visualization/concurrent_with_visualization.py +++ b/python/samples/getting_started/workflows/visualization/concurrent_with_visualization.py @@ -14,6 +14,7 @@ WorkflowContext, WorkflowViz, handler, + tool, ) from agent_framework.azure import AzureOpenAIChatClient from azure.identity import AzureCliCredential diff --git a/python/samples/semantic-kernel-migration/README.md b/python/samples/semantic-kernel-migration/README.md index 4e5e04a345..64c9d80aa5 100644 --- a/python/samples/semantic-kernel-migration/README.md +++ b/python/samples/semantic-kernel-migration/README.md @@ -71,5 +71,5 @@ Swap the script path for any other workflow or process sample. Deactivate the sa ## Tips for Migration - Keep the original SK sample open while iterating on the AF equivalent; the code is intentionally formatted so you can copy/paste across SDKs. - Threads/conversation state are explicit in AF. When porting SK code that relies on implicit thread reuse, call `agent.get_new_thread()` and pass it into each `run`/`run_stream` call. -- Tools map cleanly: SK `@kernel_function` plugins translate to AF `@ai_function` callables. Hosted tools (code interpreter, web search, MCP) are available only in AF—introduce them once parity is achieved. +- Tools map cleanly: SK `@kernel_function` plugins translate to AF `@tool` callables. Hosted tools (code interpreter, web search, MCP) are available only in AF—introduce them once parity is achieved. - For multi-agent orchestration, AF workflows expose checkpoints and resume capabilities that SK Process/Team abstractions do not. Use the workflow samples as a blueprint when modernizing complex agent graphs. diff --git a/python/samples/semantic-kernel-migration/chat_completion/02_chat_completion_with_tool.py b/python/samples/semantic-kernel-migration/chat_completion/02_chat_completion_with_tool.py index e6b5aef386..363cdaec53 100644 --- a/python/samples/semantic-kernel-migration/chat_completion/02_chat_completion_with_tool.py +++ b/python/samples/semantic-kernel-migration/chat_completion/02_chat_completion_with_tool.py @@ -34,10 +34,10 @@ def specials(self) -> str: async def run_agent_framework() -> None: - from agent_framework._tools import ai_function + from agent_framework._tools import tool from agent_framework.openai import OpenAIChatClient - @ai_function(name="specials", description="List daily specials") + @tool(name="specials", description="List daily specials") async def specials() -> str: return "Clam chowder, Cobb salad, Chai tea" diff --git a/python/samples/semantic-kernel-migration/openai_assistant/03_openai_assistant_function_tool.py b/python/samples/semantic-kernel-migration/openai_assistant/03_openai_assistant_function_tool.py index fb9cf991d8..e84bc1b171 100644 --- a/python/samples/semantic-kernel-migration/openai_assistant/03_openai_assistant_function_tool.py +++ b/python/samples/semantic-kernel-migration/openai_assistant/03_openai_assistant_function_tool.py @@ -55,10 +55,10 @@ async def fake_weather_lookup(city: str, day: str) -> dict[str, Any]: async def run_agent_framework() -> None: - from agent_framework._tools import ai_function + from agent_framework._tools import tool from agent_framework.openai import OpenAIAssistantsClient - @ai_function( + @tool( name="get_forecast", description="Look up the forecast for a city and day.", ) diff --git a/python/samples/semantic-kernel-migration/openai_responses/02_responses_agent_with_tool.py b/python/samples/semantic-kernel-migration/openai_responses/02_responses_agent_with_tool.py index 8a89871505..fb18708ddf 100644 --- a/python/samples/semantic-kernel-migration/openai_responses/02_responses_agent_with_tool.py +++ b/python/samples/semantic-kernel-migration/openai_responses/02_responses_agent_with_tool.py @@ -34,10 +34,10 @@ def add(self, a: float, b: float) -> float: async def run_agent_framework() -> None: from agent_framework import ChatAgent - from agent_framework._tools import ai_function + from agent_framework._tools import tool from agent_framework.openai import OpenAIResponsesClient - @ai_function(name="add", description="Add two numbers") + @tool(name="add", description="Add two numbers") async def add(a: float, b: float) -> float: return a + b diff --git a/python/samples/semantic-kernel-migration/orchestrations/handoff.py b/python/samples/semantic-kernel-migration/orchestrations/handoff.py index 087a28afce..bd4cfccec4 100644 --- a/python/samples/semantic-kernel-migration/orchestrations/handoff.py +++ b/python/samples/semantic-kernel-migration/orchestrations/handoff.py @@ -13,6 +13,7 @@ RequestInfoEvent, WorkflowEvent, WorkflowOutputEvent, + tool, ) from agent_framework.azure import AzureOpenAIChatClient from azure.identity import AzureCliCredential diff --git a/python/samples/semantic-kernel-migration/processes/nested_process.py b/python/samples/semantic-kernel-migration/processes/nested_process.py index 884ee6f4b0..e649103703 100644 --- a/python/samples/semantic-kernel-migration/processes/nested_process.py +++ b/python/samples/semantic-kernel-migration/processes/nested_process.py @@ -19,6 +19,7 @@ WorkflowExecutor, WorkflowOutputEvent, handler, + tool, ) from pydantic import BaseModel, Field