From db3f51b83a0e546585c2d0edf476407cd4b900ce Mon Sep 17 00:00:00 2001 From: M-Hietala <78813398+M-Hietala@users.noreply.github.com> Date: Mon, 2 Feb 2026 15:17:21 -0600 Subject: [PATCH 01/10] update operation name and add support for message attributes --- .../telemetry/_responses_instrumentor.py | 134 +++- .../azure/ai/projects/telemetry/_utils.py | 29 + .../telemetry/test_responses_instrumentor.py | 582 ++++++++++++------ .../test_responses_instrumentor_async.py | 47 +- ...sponses_instrumentor_browser_automation.py | 10 +- ...s_instrumentor_browser_automation_async.py | 10 +- ...responses_instrumentor_code_interpreter.py | 10 +- ...ses_instrumentor_code_interpreter_async.py | 10 +- ...test_responses_instrumentor_file_search.py | 10 +- ...esponses_instrumentor_file_search_async.py | 10 +- .../test_responses_instrumentor_mcp.py | 18 +- .../test_responses_instrumentor_mcp_async.py | 18 +- .../test_responses_instrumentor_workflow.py | 10 +- ...t_responses_instrumentor_workflow_async.py | 10 +- 14 files changed, 629 insertions(+), 279 deletions(-) diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/telemetry/_responses_instrumentor.py b/sdk/ai/azure-ai-projects/azure/ai/projects/telemetry/_responses_instrumentor.py index 37a14d1f682b..58618596f724 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/telemetry/_responses_instrumentor.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/telemetry/_responses_instrumentor.py @@ -33,9 +33,11 @@ GEN_AI_CONVERSATION_ITEM_EVENT, GEN_AI_CONVERSATION_ITEM_ID, GEN_AI_EVENT_CONTENT, + GEN_AI_INPUT_MESSAGES, GEN_AI_OPENAI_RESPONSE_SERVICE_TIER, GEN_AI_OPENAI_RESPONSE_SYSTEM_FINGERPRINT, GEN_AI_OPERATION_NAME, + GEN_AI_OUTPUT_MESSAGES, GEN_AI_PROVIDER_NAME, GEN_AI_REQUEST_MODEL, GEN_AI_REQUEST_TOOLS, @@ -48,14 +50,19 @@ GEN_AI_USAGE_OUTPUT_TOKENS, GEN_AI_USER_MESSAGE_EVENT, GEN_AI_WORKFLOW_ACTION_EVENT, + OPERATION_NAME_CHAT, + OPERATION_NAME_INVOKE_AGENT, OperationName, SERVER_ADDRESS, SERVER_PORT, SPAN_NAME_CHAT, SPAN_NAME_INVOKE_AGENT, + get_use_message_events, start_span, ) + + _Unset: Any = object() logger = logging.getLogger(__name__) @@ -534,6 +541,35 @@ def _create_event_attributes( # attrs[GEN_AI_MESSAGE_ROLE] = message_role return attrs + def _append_to_message_attribute( + self, + span: "AbstractSpan", + attribute_name: str, + new_messages: List[Dict[str, Any]], + ) -> None: + """Helper to append messages to an existing attribute, combining with previous messages.""" + # Get existing attribute value + existing_value = span.span_instance.attributes.get(attribute_name) if span.span_instance.attributes else None + + if existing_value: + # Parse existing JSON array + try: + existing_messages = json.loads(existing_value) + if not isinstance(existing_messages, list): + existing_messages = [] + except (json.JSONDecodeError, TypeError): + existing_messages = [] + + # Append new messages + combined_messages = existing_messages + new_messages + else: + # No existing value, just use new messages + combined_messages = new_messages + + # Set the combined value + combined_json = json.dumps(combined_messages, ensure_ascii=False) + span.span_instance.set_attribute(attribute_name, combined_json) + def _add_message_event( self, span: "AbstractSpan", @@ -542,7 +578,7 @@ def _add_message_event( conversation_id: Optional[str] = None, finish_reason: Optional[str] = None, ) -> None: - """Add a message event to the span.""" + """Add a message event or attribute to the span based on configuration.""" content_array: List[Dict[str, Any]] = [] # Always include role and finish_reason, only include actual content if tracing is enabled @@ -567,23 +603,38 @@ def _add_message_event( content_array.append(role_obj) - attributes = self._create_event_attributes( - conversation_id=conversation_id, - message_role=role, - ) - # Store as JSON array directly without outer wrapper - attributes[GEN_AI_EVENT_CONTENT] = json.dumps(content_array, ensure_ascii=False) + # Serialize the content array to JSON + json_content = json.dumps(content_array, ensure_ascii=False) + + if get_use_message_events(): + # Original event-based implementation + attributes = self._create_event_attributes( + conversation_id=conversation_id, + message_role=role, + ) + # Store as JSON array directly without outer wrapper + attributes[GEN_AI_EVENT_CONTENT] = json_content + + # Map role to appropriate event name constant + if role == "user": + event_name = GEN_AI_USER_MESSAGE_EVENT + elif role == "assistant": + event_name = GEN_AI_ASSISTANT_MESSAGE_EVENT + else: + # Fallback for any other roles (shouldn't happen in practice) + event_name = f"gen_ai.{role}.message" - # Map role to appropriate event name constant - if role == "user": - event_name = GEN_AI_USER_MESSAGE_EVENT - elif role == "assistant": - event_name = GEN_AI_ASSISTANT_MESSAGE_EVENT + span.span_instance.add_event(name=event_name, attributes=attributes) else: - # Fallback for any other roles (shouldn't happen in practice) - event_name = f"gen_ai.{role}.message" + # New attribute-based implementation + # Append messages to the appropriate attribute (accumulating multiple messages) + if role in ("user", "tool"): + # User and tool messages go to input.messages + self._append_to_message_attribute(span, GEN_AI_INPUT_MESSAGES, content_array) + elif role == "assistant": + # Assistant messages go to output.messages + self._append_to_message_attribute(span, GEN_AI_OUTPUT_MESSAGES, content_array) - span.span_instance.add_event(name=event_name, attributes=attributes) def _add_tool_message_events( # pylint: disable=too-many-branches self, @@ -975,17 +1026,24 @@ def _emit_tool_call_event( tool_call: Dict[str, Any], conversation_id: Optional[str] = None, ) -> None: - """Helper to emit a single tool call event.""" + """Helper to emit a single tool call event or attribute.""" # Wrap tool call in parts array parts = [{"type": "tool_call", "content": tool_call}] content_array = [{"role": "assistant", "parts": parts}] - attributes = self._create_event_attributes( - conversation_id=conversation_id, - message_role="assistant", - ) - # Store as JSON array directly without outer wrapper - attributes[GEN_AI_EVENT_CONTENT] = json.dumps(content_array, ensure_ascii=False) - span.span_instance.add_event(name=GEN_AI_ASSISTANT_MESSAGE_EVENT, attributes=attributes) + + if get_use_message_events(): + # Original event-based implementation + json_content = json.dumps(content_array, ensure_ascii=False) + attributes = self._create_event_attributes( + conversation_id=conversation_id, + message_role="assistant", + ) + # Store as JSON array directly without outer wrapper + attributes[GEN_AI_EVENT_CONTENT] = json_content + span.span_instance.add_event(name=GEN_AI_ASSISTANT_MESSAGE_EVENT, attributes=attributes) + else: + # New attribute-based implementation - tool calls are output messages + self._append_to_message_attribute(span, GEN_AI_OUTPUT_MESSAGES, content_array) def _emit_tool_output_event( self, @@ -993,19 +1051,26 @@ def _emit_tool_output_event( tool_output: Dict[str, Any], conversation_id: Optional[str] = None, ) -> None: - """Helper to emit a single tool output event.""" + """Helper to emit a single tool output event or attribute.""" # Wrap tool output in parts array # Tool outputs are inputs TO the model (from tool execution), so use role "tool" parts = [{"type": "tool_call_output", "content": tool_output}] content_array = [{"role": "tool", "parts": parts}] - attributes = self._create_event_attributes( - conversation_id=conversation_id, - message_role="tool", - ) - # Store as JSON array directly without outer wrapper - attributes[GEN_AI_EVENT_CONTENT] = json.dumps(content_array, ensure_ascii=False) - # Tool outputs are inputs to the model, so use input.messages event - span.span_instance.add_event(name=GEN_AI_USER_MESSAGE_EVENT, attributes=attributes) + + if get_use_message_events(): + # Original event-based implementation + json_content = json.dumps(content_array, ensure_ascii=False) + attributes = self._create_event_attributes( + conversation_id=conversation_id, + message_role="tool", + ) + # Store as JSON array directly without outer wrapper + attributes[GEN_AI_EVENT_CONTENT] = json_content + # Tool outputs are inputs to the model, so use input.messages event + span.span_instance.add_event(name=GEN_AI_USER_MESSAGE_EVENT, attributes=attributes) + else: + # New attribute-based implementation - tool outputs are input messages + self._append_to_message_attribute(span, GEN_AI_INPUT_MESSAGES, content_array) def _add_tool_call_events( # pylint: disable=too-many-branches self, @@ -1486,10 +1551,13 @@ def start_responses_span( # Build span name: agent case uses "invoke_agent", non-agent case uses "chat" if assistant_name: span_name = f"{SPAN_NAME_INVOKE_AGENT} {assistant_name}" + operation_name_value = OPERATION_NAME_INVOKE_AGENT elif model: span_name = f"{SPAN_NAME_CHAT} {model}" + operation_name_value = OPERATION_NAME_CHAT else: span_name = OperationName.RESPONSES.value + operation_name_value = OperationName.RESPONSES.value span = start_span( operation_name=OperationName.RESPONSES, @@ -1504,7 +1572,7 @@ def start_responses_span( # Set operation name attribute (start_span doesn't set this automatically) self._set_attributes( span, - (GEN_AI_OPERATION_NAME, OperationName.RESPONSES.value), + (GEN_AI_OPERATION_NAME, operation_name_value), ) # Set response-specific attributes that start_span doesn't handle diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/telemetry/_utils.py b/sdk/ai/azure-ai-projects/azure/ai/projects/telemetry/_utils.py index 050268a6e133..20f2411b5aff 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/telemetry/_utils.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/telemetry/_utils.py @@ -91,6 +91,10 @@ GEN_AI_SYSTEM_INSTRUCTION_EVENT = "gen_ai.system.instructions" GEN_AI_AGENT_WORKFLOW_EVENT = "gen_ai.agent.workflow" +# Attribute names for messages (when USE_MESSAGE_EVENTS = False) +GEN_AI_INPUT_MESSAGES = "gen_ai.input.messages" +GEN_AI_OUTPUT_MESSAGES = "gen_ai.output.messages" + # Metric names GEN_AI_CLIENT_OPERATION_DURATION = "gen_ai.client.operation.duration" GEN_AI_CLIENT_TOKEN_USAGE = "gen_ai.client.token.usage" @@ -107,6 +111,31 @@ SPAN_NAME_INVOKE_AGENT = "invoke_agent" SPAN_NAME_CHAT = "chat" +# Operation names for gen_ai.operation.name attribute +OPERATION_NAME_INVOKE_AGENT = "invoke_agent" +OPERATION_NAME_CHAT = "chat" + +# Configuration: Controls whether input/output messages are emitted as events or attributes +# Can be set at runtime for testing purposes (internal use only) +# Set to True for event-based, False for attribute-based (default) +_use_message_events = False + + +def get_use_message_events() -> bool: + """Get the current message tracing mode (events vs attributes).""" + return _use_message_events + + +def set_use_message_events(use_events: bool) -> None: + """ + Set the message tracing mode at runtime. + + Args: + use_events: True to use events (default), False to use attributes + """ + global _use_message_events + _use_message_events = use_events + class OperationName(Enum): CREATE_AGENT = "create_agent" diff --git a/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor.py b/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor.py index 8bf3b5077780..c95a0bf698bf 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor.py +++ b/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor.py @@ -9,7 +9,13 @@ import pytest from typing import Optional, Tuple from azure.ai.projects.telemetry import AIProjectInstrumentor, _utils -from azure.ai.projects.telemetry._utils import SPAN_NAME_CHAT, SPAN_NAME_INVOKE_AGENT +from azure.ai.projects.telemetry._utils import ( + OPERATION_NAME_CHAT, + OPERATION_NAME_INVOKE_AGENT, + SPAN_NAME_CHAT, + SPAN_NAME_INVOKE_AGENT, + set_use_message_events, +) from azure.core.settings import settings from gen_ai_trace_verifier import GenAiTraceVerifier from openai import OpenAI @@ -187,12 +193,10 @@ def set_env_var(var_name, value): finally: self.cleanup() - @pytest.mark.usefixtures("instrument_with_content") - @servicePreparer() - @recorded_by_proxy(RecordedTransport.HTTPX) - def test_sync_non_streaming_with_content_recording(self, **kwargs): - """Test synchronous non-streaming responses with content recording enabled.""" + def _test_sync_non_streaming_with_content_recording_impl(self, use_events, **kwargs): + """Implementation for testing synchronous non-streaming responses with content recording enabled.""" self.cleanup() + set_use_message_events(use_events) os.environ.update( { CONTENT_TRACING_ENV_VARIABLE: "True", @@ -232,7 +236,7 @@ def test_sync_non_streaming_with_content_recording(self, **kwargs): # Check span attributes expected_attributes = [ ("az.namespace", "Microsoft.CognitiveServices"), - ("gen_ai.operation.name", "responses"), + ("gen_ai.operation.name", OPERATION_NAME_CHAT), ("gen_ai.request.model", deployment_name), ("gen_ai.provider.name", "azure.openai"), ("server.address", ""), @@ -245,34 +249,68 @@ def test_sync_non_streaming_with_content_recording(self, **kwargs): attributes_match = GenAiTraceVerifier().check_span_attributes(span, expected_attributes) assert attributes_match == True - # Check span events - expected_events = [ - { - "name": "gen_ai.input.messages", - "attributes": { - "gen_ai.provider.name": "azure.openai", - # "gen_ai.message.role": "user", # Commented out - now in event content - "gen_ai.event.content": '[{"role": "user", "parts": [{"type": "text", "content": "Write a short poem about AI"}]}]', + # Check span events or attributes based on mode + if use_events: + # Check span events + expected_events = [ + { + "name": "gen_ai.input.messages", + "attributes": { + "gen_ai.provider.name": "azure.openai", + "gen_ai.event.content": '[{"role": "user", "parts": [{"type": "text", "content": "Write a short poem about AI"}]}]', + }, }, - }, - { - "name": "gen_ai.output.messages", - "attributes": { - "gen_ai.provider.name": "azure.openai", - # "gen_ai.message.role": "assistant", # Commented out - now in event content - "gen_ai.event.content": '[{"role": "assistant", "parts": [{"type": "text", "content": "*"}], "finish_reason": "*"}]', + { + "name": "gen_ai.output.messages", + "attributes": { + "gen_ai.provider.name": "azure.openai", + "gen_ai.event.content": '[{"role": "assistant", "parts": [{"type": "text", "content": "*"}], "finish_reason": "*"}]', + }, }, - }, - ] - events_match = GenAiTraceVerifier().check_span_events(span, expected_events) - assert events_match == True + ] + events_match = GenAiTraceVerifier().check_span_events(span, expected_events) + assert events_match == True + else: + # Check span attributes for message content + assert "gen_ai.input.messages" in span.attributes # type: ignore + assert "gen_ai.output.messages" in span.attributes # type: ignore + + # Verify input messages attribute + input_messages = json.loads(span.attributes["gen_ai.input.messages"]) # type: ignore + assert len(input_messages) == 1 + assert input_messages[0]["role"] == "user" + assert len(input_messages[0]["parts"]) == 1 + assert input_messages[0]["parts"][0]["type"] == "text" + assert input_messages[0]["parts"][0]["content"] == "Write a short poem about AI" + + # Verify output messages attribute + output_messages = json.loads(span.attributes["gen_ai.output.messages"]) # type: ignore + assert len(output_messages) == 1 + assert output_messages[0]["role"] == "assistant" + assert len(output_messages[0]["parts"]) == 1 + assert output_messages[0]["parts"][0]["type"] == "text" + assert "content" in output_messages[0]["parts"][0] + assert len(output_messages[0]["parts"][0]["content"]) > 0 + assert "finish_reason" in output_messages[0] - @pytest.mark.usefixtures("instrument_without_content") + @pytest.mark.usefixtures("instrument_with_content") @servicePreparer() @recorded_by_proxy(RecordedTransport.HTTPX) - def test_sync_non_streaming_without_content_recording(self, **kwargs): - """Test synchronous non-streaming responses with content recording disabled.""" + def test_sync_non_streaming_with_content_recording_events(self, **kwargs): + """Test synchronous non-streaming responses with content recording enabled (event mode).""" + self._test_sync_non_streaming_with_content_recording_impl(True, **kwargs) + + @pytest.mark.usefixtures("instrument_with_content") + @servicePreparer() + @recorded_by_proxy(RecordedTransport.HTTPX) + def test_sync_non_streaming_with_content_recording_attributes(self, **kwargs): + """Test synchronous non-streaming responses with content recording enabled (attribute mode).""" + self._test_sync_non_streaming_with_content_recording_impl(False, **kwargs) + + def _test_sync_non_streaming_without_content_recording_impl(self, use_events, **kwargs): + """Implementation for testing synchronous non-streaming responses with content recording disabled.""" self.cleanup() + set_use_message_events(use_events) os.environ.update( { CONTENT_TRACING_ENV_VARIABLE: "False", @@ -312,7 +350,7 @@ def test_sync_non_streaming_without_content_recording(self, **kwargs): # Check span attributes expected_attributes = [ ("az.namespace", "Microsoft.CognitiveServices"), - ("gen_ai.operation.name", "responses"), + ("gen_ai.operation.name", OPERATION_NAME_CHAT), ("gen_ai.request.model", deployment_name), ("gen_ai.provider.name", "azure.openai"), ("server.address", ""), @@ -325,36 +363,69 @@ def test_sync_non_streaming_without_content_recording(self, **kwargs): attributes_match = GenAiTraceVerifier().check_span_attributes(span, expected_attributes) assert attributes_match == True - # Check span events (should not contain content, but should have role and finish_reason) - expected_events = [ - { - "name": "gen_ai.input.messages", - "attributes": { - "gen_ai.provider.name": "azure.openai", - # "gen_ai.message.role": "user", # Commented out - now in event content - "gen_ai.event.content": '[{"role": "user", "parts": [{"type": "text"}]}]', + # Check span events or attributes based on mode (no content) + if use_events: + # Check span events (should not contain content, but should have role and finish_reason) + expected_events = [ + { + "name": "gen_ai.input.messages", + "attributes": { + "gen_ai.provider.name": "azure.openai", + "gen_ai.event.content": '[{"role": "user", "parts": [{"type": "text"}]}]', + }, }, - }, - { - "name": "gen_ai.output.messages", - "attributes": { - "gen_ai.provider.name": "azure.openai", - # "gen_ai.message.role": "assistant", # Commented out - now in event content - "gen_ai.event.content": '[{"role": "assistant", "parts": [{"type": "text"}], "finish_reason": "*"}]', + { + "name": "gen_ai.output.messages", + "attributes": { + "gen_ai.provider.name": "azure.openai", + "gen_ai.event.content": '[{"role": "assistant", "parts": [{"type": "text"}], "finish_reason": "*"}]', + }, }, - }, - ] - events_match = GenAiTraceVerifier().check_span_events(span, expected_events) - assert events_match == True + ] + events_match = GenAiTraceVerifier().check_span_events(span, expected_events) + assert events_match == True + else: + # Check span attributes for message content (without actual content) + assert "gen_ai.input.messages" in span.attributes # type: ignore + assert "gen_ai.output.messages" in span.attributes # type: ignore + + # Verify input messages attribute (no content) + input_messages = json.loads(span.attributes["gen_ai.input.messages"]) # type: ignore + assert len(input_messages) == 1 + assert input_messages[0]["role"] == "user" + assert len(input_messages[0]["parts"]) == 1 + assert input_messages[0]["parts"][0]["type"] == "text" + assert "content" not in input_messages[0]["parts"][0] # Content should be omitted + + # Verify output messages attribute (no content) + output_messages = json.loads(span.attributes["gen_ai.output.messages"]) # type: ignore + assert len(output_messages) == 1 + assert output_messages[0]["role"] == "assistant" + assert len(output_messages[0]["parts"]) == 1 + assert output_messages[0]["parts"][0]["type"] == "text" + assert "content" not in output_messages[0]["parts"][0] # Content should be omitted + assert "finish_reason" in output_messages[0] - @pytest.mark.usefixtures("instrument_with_content") + @pytest.mark.usefixtures("instrument_without_content") + @servicePreparer() + @recorded_by_proxy(RecordedTransport.HTTPX) + def test_sync_non_streaming_without_content_recording_events(self, **kwargs): + """Test synchronous non-streaming responses with content recording disabled (event mode).""" + self._test_sync_non_streaming_without_content_recording_impl(True, **kwargs) + + @pytest.mark.usefixtures("instrument_without_content") @servicePreparer() @recorded_by_proxy(RecordedTransport.HTTPX) - def test_sync_streaming_with_content_recording(self, **kwargs): - """Test synchronous streaming responses with content recording enabled.""" + def test_sync_non_streaming_without_content_recording_attributes(self, **kwargs): + """Test synchronous non-streaming responses with content recording disabled (attribute mode).""" + self._test_sync_non_streaming_without_content_recording_impl(False, **kwargs) + + def _test_sync_streaming_with_content_recording_impl(self, use_events, **kwargs): + """Implementation for testing synchronous streaming responses with content recording enabled.""" from openai.types.responses.response_input_param import FunctionCallOutput self.cleanup() + set_use_message_events(use_events) os.environ.update( { CONTENT_TRACING_ENV_VARIABLE: "True", @@ -402,7 +473,7 @@ def test_sync_streaming_with_content_recording(self, **kwargs): # Check span attributes expected_attributes = [ ("az.namespace", "Microsoft.CognitiveServices"), - ("gen_ai.operation.name", "responses"), + ("gen_ai.operation.name", OPERATION_NAME_CHAT), ("gen_ai.request.model", deployment_name), ("gen_ai.provider.name", "azure.openai"), ("server.address", ""), @@ -415,27 +486,90 @@ def test_sync_streaming_with_content_recording(self, **kwargs): attributes_match = GenAiTraceVerifier().check_span_attributes(span, expected_attributes) assert attributes_match == True - # Check span events (should include assistant message for streaming) - expected_events = [ - { - "name": "gen_ai.input.messages", - "attributes": { - "gen_ai.provider.name": "azure.openai", - # "gen_ai.message.role": "user", # Commented out - now in event content - "gen_ai.event.content": '[{"role": "user", "parts": [{"type": "text", "content": "Write a short poem about AI"}]}]', + # Check span events or attributes based on mode (should include assistant message for streaming) + if use_events: + expected_events = [ + { + "name": "gen_ai.input.messages", + "attributes": { + "gen_ai.provider.name": "azure.openai", + "gen_ai.event.content": '[{"role": "user", "parts": [{"type": "text", "content": "Write a short poem about AI"}]}]', + }, }, - }, - { - "name": "gen_ai.output.messages", - "attributes": { - "gen_ai.provider.name": "azure.openai", - # "gen_ai.message.role": "assistant", # Commented out - now in event content - "gen_ai.event.content": '[{"role": "assistant", "parts": [{"type": "text", "content": "*"}], "finish_reason": "*"}]', + { + "name": "gen_ai.output.messages", + "attributes": { + "gen_ai.provider.name": "azure.openai", + "gen_ai.event.content": '[{"role": "assistant", "parts": [{"type": "text", "content": "*"}], "finish_reason": "*"}]', + }, }, - }, - ] - events_match = GenAiTraceVerifier().check_span_events(span, expected_events) - assert events_match == True + ] + events_match = GenAiTraceVerifier().check_span_events(span, expected_events) + assert events_match == True + else: + # Check span attributes for message content + assert "gen_ai.input.messages" in span.attributes # type: ignore + assert "gen_ai.output.messages" in span.attributes # type: ignore + + # Verify input messages attribute + input_messages = json.loads(span.attributes["gen_ai.input.messages"]) # type: ignore + assert len(input_messages) == 1 + assert input_messages[0]["role"] == "user" + assert len(input_messages[0]["parts"]) == 1 + assert input_messages[0]["parts"][0]["type"] == "text" + assert input_messages[0]["parts"][0]["content"] == "Write a short poem about AI" + + # Verify output messages attribute + output_messages = json.loads(span.attributes["gen_ai.output.messages"]) # type: ignore + assert len(output_messages) == 1 + assert output_messages[0]["role"] == "assistant" + assert len(output_messages[0]["parts"]) == 1 + assert output_messages[0]["parts"][0]["type"] == "text" + assert "content" in output_messages[0]["parts"][0] + assert len(output_messages[0]["parts"][0]["content"]) > 0 + assert "finish_reason" in output_messages[0] + + @pytest.mark.usefixtures("instrument_with_content") + @servicePreparer() + @recorded_by_proxy(RecordedTransport.AZURE_CORE, RecordedTransport.HTTPX) + def test_sync_function_tool_with_content_recording_non_streaming_events(self, **kwargs): + """Test synchronous function tool usage with content recording enabled, non-streaming (event mode).""" + self._test_sync_function_tool_with_content_recording_non_streaming_impl(True, **kwargs) + + @pytest.mark.usefixtures("instrument_with_content") + @servicePreparer() + @recorded_by_proxy(RecordedTransport.AZURE_CORE, RecordedTransport.HTTPX) + def test_sync_function_tool_with_content_recording_non_streaming_attributes(self, **kwargs): + """Test synchronous function tool usage with content recording enabled, non-streaming (attribute mode).""" + self._test_sync_function_tool_with_content_recording_non_streaming_impl(False, **kwargs) + + @pytest.mark.usefixtures("instrument_with_content") + @servicePreparer() + @recorded_by_proxy(RecordedTransport.HTTPX) + def test_sync_non_streaming_without_conversation_events(self, **kwargs): + """Test synchronous non-streaming responses without conversation parameter (event mode).""" + self._test_sync_non_streaming_without_conversation_impl(True, **kwargs) + + @pytest.mark.usefixtures("instrument_with_content") + @servicePreparer() + @recorded_by_proxy(RecordedTransport.HTTPX) + def test_sync_non_streaming_without_conversation_attributes(self, **kwargs): + """Test synchronous non-streaming responses without conversation parameter (attribute mode).""" + self._test_sync_non_streaming_without_conversation_impl(False, **kwargs) + + @pytest.mark.usefixtures("instrument_with_content") + @servicePreparer() + @recorded_by_proxy(RecordedTransport.HTTPX) + def test_sync_streaming_with_content_recording_events(self, **kwargs): + """Test synchronous streaming responses with content recording enabled (event mode).""" + self._test_sync_streaming_with_content_recording_impl(True, **kwargs) + + @pytest.mark.usefixtures("instrument_with_content") + @servicePreparer() + @recorded_by_proxy(RecordedTransport.HTTPX) + def test_sync_streaming_with_content_recording_attributes(self, **kwargs): + """Test synchronous streaming responses with content recording enabled (attribute mode).""" + self._test_sync_streaming_with_content_recording_impl(False, **kwargs) @pytest.mark.usefixtures("instrument_with_content") @servicePreparer() @@ -487,6 +621,7 @@ def test_sync_conversations_create(self, **kwargs): def test_sync_list_conversation_items_with_content_recording(self, **kwargs): """Test synchronous list_conversation_items with content recording enabled.""" self.cleanup() + set_use_message_events(True) # Use event-based mode for this test os.environ.update( { CONTENT_TRACING_ENV_VARIABLE: "True", @@ -563,6 +698,7 @@ def test_sync_list_conversation_items_with_content_recording(self, **kwargs): def test_sync_list_conversation_items_without_content_recording(self, **kwargs): """Test synchronous list_conversation_items with content recording disabled.""" self.cleanup() + set_use_message_events(True) # Use event-based mode for this test os.environ.update( { CONTENT_TRACING_ENV_VARIABLE: "False", @@ -669,12 +805,10 @@ def test_no_instrumentation_no_spans(self): exporter.shutdown() trace._TRACER_PROVIDER = None - @pytest.mark.usefixtures("instrument_with_content") - @servicePreparer() - @recorded_by_proxy(RecordedTransport.HTTPX) - def test_sync_non_streaming_without_conversation(self, **kwargs): - """Test synchronous non-streaming responses without conversation parameter.""" + def _test_sync_non_streaming_without_conversation_impl(self, use_events, **kwargs): + """Implementation for testing synchronous non-streaming responses without conversation parameter.""" self.cleanup() + set_use_message_events(use_events) os.environ.update( { CONTENT_TRACING_ENV_VARIABLE: "True", @@ -708,7 +842,7 @@ def test_sync_non_streaming_without_conversation(self, **kwargs): # Check span attributes - should NOT have conversation.id expected_attributes = [ ("az.namespace", "Microsoft.CognitiveServices"), - ("gen_ai.operation.name", "responses"), + ("gen_ai.operation.name", OPERATION_NAME_CHAT), ("gen_ai.request.model", deployment_name), ("gen_ai.provider.name", "azure.openai"), ("server.address", ""), @@ -720,36 +854,55 @@ def test_sync_non_streaming_without_conversation(self, **kwargs): attributes_match = GenAiTraceVerifier().check_span_attributes(span, expected_attributes) assert attributes_match == True - # Check span events - expected_events = [ - { - "name": "gen_ai.input.messages", - "attributes": { - "gen_ai.provider.name": "azure.openai", - # "gen_ai.message.role": "user", # Commented out - now in event content - "gen_ai.event.content": '[{"role": "user", "parts": [{"type": "text", "content": "Write a short poem about AI"}]}]', + # Check span events or attributes based on mode + if use_events: + expected_events = [ + { + "name": "gen_ai.input.messages", + "attributes": { + "gen_ai.provider.name": "azure.openai", + "gen_ai.event.content": '[{"role": "user", "parts": [{"type": "text", "content": "Write a short poem about AI"}]}]', + }, }, - }, - { - "name": "gen_ai.output.messages", - "attributes": { - "gen_ai.provider.name": "azure.openai", - # "gen_ai.message.role": "assistant", # Commented out - now in event content - "gen_ai.event.content": '[{"role": "assistant", "parts": [{"type": "text", "content": "*"}], "finish_reason": "*"}]', + { + "name": "gen_ai.output.messages", + "attributes": { + "gen_ai.provider.name": "azure.openai", + "gen_ai.event.content": '[{"role": "assistant", "parts": [{"type": "text", "content": "*"}], "finish_reason": "*"}]', + }, }, - }, - ] - events_match = GenAiTraceVerifier().check_span_events(span, expected_events) - assert events_match == True - - @pytest.mark.usefixtures("instrument_with_content") - @servicePreparer() - @recorded_by_proxy(RecordedTransport.AZURE_CORE, RecordedTransport.HTTPX) - def test_sync_function_tool_with_content_recording_non_streaming(self, **kwargs): - """Test synchronous function tool usage with content recording enabled (non-streaming).""" + ] + events_match = GenAiTraceVerifier().check_span_events(span, expected_events) + assert events_match == True + else: + # Check span attributes for message content + assert "gen_ai.input.messages" in span.attributes # type: ignore + assert "gen_ai.output.messages" in span.attributes # type: ignore + + # Verify input messages attribute + input_messages = json.loads(span.attributes["gen_ai.input.messages"]) # type: ignore + assert len(input_messages) == 1 + assert input_messages[0]["role"] == "user" + assert len(input_messages[0]["parts"]) == 1 + assert input_messages[0]["parts"][0]["type"] == "text" + assert input_messages[0]["parts"][0]["content"] == "Write a short poem about AI" + + # Verify output messages attribute + output_messages = json.loads(span.attributes["gen_ai.output.messages"]) # type: ignore + assert len(output_messages) == 1 + assert output_messages[0]["role"] == "assistant" + assert len(output_messages[0]["parts"]) == 1 + assert output_messages[0]["parts"][0]["type"] == "text" + assert "content" in output_messages[0]["parts"][0] + assert len(output_messages[0]["parts"][0]["content"]) > 0 + assert "finish_reason" in output_messages[0] + + def _test_sync_function_tool_with_content_recording_non_streaming_impl(self, use_events, **kwargs): + """Implementation for testing synchronous function tool usage with content recording (non-streaming).""" from openai.types.responses.response_input_param import FunctionCallOutput self.cleanup() + set_use_message_events(use_events) os.environ.update( { CONTENT_TRACING_ENV_VARIABLE: "True", @@ -841,7 +994,7 @@ def test_sync_function_tool_with_content_recording_non_streaming(self, **kwargs) span1 = spans[0] expected_attributes_1 = [ ("az.namespace", "Microsoft.CognitiveServices"), - ("gen_ai.operation.name", "responses"), + ("gen_ai.operation.name", OPERATION_NAME_INVOKE_AGENT), ("gen_ai.agent.name", agent.name), ("gen_ai.provider.name", "azure.openai"), ("server.address", ""), @@ -854,33 +1007,53 @@ def test_sync_function_tool_with_content_recording_non_streaming(self, **kwargs) attributes_match = GenAiTraceVerifier().check_span_attributes(span1, expected_attributes_1) assert attributes_match == True - # Check events for first span - user message and assistant tool call - expected_events_1 = [ - { - "name": "gen_ai.input.messages", - "attributes": { - "gen_ai.provider.name": "azure.openai", - # "gen_ai.message.role": "user", # Commented out - now in event content - "gen_ai.event.content": '[{"role": "user", "parts": [{"type": "text", "content": "What\'s the weather in Seattle?"}]}]', + # Check events or attributes for first span - user message and assistant tool call + if use_events: + expected_events_1 = [ + { + "name": "gen_ai.input.messages", + "attributes": { + "gen_ai.provider.name": "azure.openai", + "gen_ai.event.content": '[{"role": "user", "parts": [{"type": "text", "content": "What\'s the weather in Seattle?"}]}]', + }, }, - }, - { - "name": "gen_ai.output.messages", - "attributes": { - "gen_ai.provider.name": "azure.openai", - # "gen_ai.message.role": "assistant", # Commented out - now in event content - "gen_ai.event.content": '[{"role": "assistant", "parts": [{"type": "tool_call", "content": {"type": "function_call", "id": "*", "function": {"name": "get_weather", "arguments": "*"}}}]}]', + { + "name": "gen_ai.output.messages", + "attributes": { + "gen_ai.provider.name": "azure.openai", + "gen_ai.event.content": '[{"role": "assistant", "parts": [{"type": "tool_call", "content": {"type": "function_call", "id": "*", "function": {"name": "get_weather", "arguments": "*"}}}]}]', + }, }, - }, - ] - events_match = GenAiTraceVerifier().check_span_events(span1, expected_events_1) - assert events_match == True + ] + events_match = GenAiTraceVerifier().check_span_events(span1, expected_events_1) + assert events_match == True + else: + # Check span attributes + assert "gen_ai.input.messages" in span1.attributes # type: ignore + assert "gen_ai.output.messages" in span1.attributes # type: ignore + + # Verify input messages - user question + input_messages = json.loads(span1.attributes["gen_ai.input.messages"]) # type: ignore + assert len(input_messages) == 1 + assert input_messages[0]["role"] == "user" + assert input_messages[0]["parts"][0]["type"] == "text" + assert input_messages[0]["parts"][0]["content"] == "What's the weather in Seattle?" + + # Verify output messages - assistant tool call + output_messages = json.loads(span1.attributes["gen_ai.output.messages"]) # type: ignore + assert len(output_messages) == 1 + assert output_messages[0]["role"] == "assistant" + assert output_messages[0]["parts"][0]["type"] == "tool_call" + assert output_messages[0]["parts"][0]["content"]["type"] == "function_call" + assert "id" in output_messages[0]["parts"][0]["content"] + assert output_messages[0]["parts"][0]["content"]["function"]["name"] == "get_weather" + assert "arguments" in output_messages[0]["parts"][0]["content"]["function"] # Validate second span (tool output + final response) span2 = spans[1] expected_attributes_2 = [ ("az.namespace", "Microsoft.CognitiveServices"), - ("gen_ai.operation.name", "responses"), + ("gen_ai.operation.name", OPERATION_NAME_INVOKE_AGENT), ("gen_ai.agent.name", agent.name), ("gen_ai.provider.name", "azure.openai"), ("server.address", ""), @@ -893,36 +1066,57 @@ def test_sync_function_tool_with_content_recording_non_streaming(self, **kwargs) attributes_match = GenAiTraceVerifier().check_span_attributes(span2, expected_attributes_2) assert attributes_match == True - # Check events for second span - tool output and assistant response - expected_events_2 = [ - { - "name": "gen_ai.input.messages", - "attributes": { - "gen_ai.provider.name": "azure.openai", - # "gen_ai.message.role": "tool", # Commented out - now in event content - "gen_ai.event.content": '[{"role": "tool", "parts": [{"type": "tool_call_output", "content": {"type": "function_call_output", "id": "*", "output": {"temperature": "72°F", "condition": "sunny"}}}]}]', + # Check events or attributes for second span - tool output and assistant response + if use_events: + expected_events_2 = [ + { + "name": "gen_ai.input.messages", + "attributes": { + "gen_ai.provider.name": "azure.openai", + "gen_ai.event.content": '[{"role": "tool", "parts": [{"type": "tool_call_output", "content": {"type": "function_call_output", "id": "*", "output": {"temperature": "72°F", "condition": "sunny"}}}]}]', + }, }, - }, - { - "name": "gen_ai.output.messages", - "attributes": { - "gen_ai.provider.name": "azure.openai", - # "gen_ai.message.role": "assistant", # Commented out - now in event content - "gen_ai.event.content": '[{"role": "assistant", "parts": [{"type": "text", "content": "*"}], "finish_reason": "*"}]', + { + "name": "gen_ai.output.messages", + "attributes": { + "gen_ai.provider.name": "azure.openai", + "gen_ai.event.content": '[{"role": "assistant", "parts": [{"type": "text", "content": "*"}], "finish_reason": "*"}]', + }, }, - }, - ] - events_match = GenAiTraceVerifier().check_span_events(span2, expected_events_2) - assert events_match == True - - @pytest.mark.usefixtures("instrument_with_content") - @servicePreparer() - @recorded_by_proxy(RecordedTransport.AZURE_CORE, RecordedTransport.HTTPX) - def test_sync_function_tool_with_content_recording_streaming(self, **kwargs): - """Test synchronous function tool usage with content recording enabled (streaming).""" + ] + events_match = GenAiTraceVerifier().check_span_events(span2, expected_events_2) + assert events_match == True + else: + # Check span attributes + assert "gen_ai.input.messages" in span2.attributes # type: ignore + assert "gen_ai.output.messages" in span2.attributes # type: ignore + + # Verify input messages - tool output + input_messages = json.loads(span2.attributes["gen_ai.input.messages"]) # type: ignore + assert len(input_messages) == 1 + assert input_messages[0]["role"] == "tool" + assert input_messages[0]["parts"][0]["type"] == "tool_call_output" + assert input_messages[0]["parts"][0]["content"]["type"] == "function_call_output" + assert "id" in input_messages[0]["parts"][0]["content"] + output_data = json.loads(input_messages[0]["parts"][0]["content"]["output"]) + assert output_data["temperature"] == "72°F" + assert output_data["condition"] == "sunny" + + # Verify output messages - assistant final response + output_messages = json.loads(span2.attributes["gen_ai.output.messages"]) # type: ignore + assert len(output_messages) == 1 + assert output_messages[0]["role"] == "assistant" + assert output_messages[0]["parts"][0]["type"] == "text" + assert "content" in output_messages[0]["parts"][0] + assert len(output_messages[0]["parts"][0]["content"]) > 0 + assert "finish_reason" in output_messages[0] + + def _test_sync_function_tool_with_content_recording_streaming_impl(self, use_events, **kwargs): + """Implementation for testing synchronous function tool usage with content recording (streaming).""" from openai.types.responses.response_input_param import FunctionCallOutput self.cleanup() + set_use_message_events(use_events) os.environ.update( { CONTENT_TRACING_ENV_VARIABLE: "True", @@ -1037,7 +1231,7 @@ def test_sync_function_tool_with_content_recording_streaming(self, **kwargs): span1 = spans[0] expected_attributes_1 = [ ("az.namespace", "Microsoft.CognitiveServices"), - ("gen_ai.operation.name", "responses"), + ("gen_ai.operation.name", OPERATION_NAME_INVOKE_AGENT), ("gen_ai.agent.name", agent.name), ("gen_ai.provider.name", "azure.openai"), ("server.address", ""), @@ -1076,7 +1270,7 @@ def test_sync_function_tool_with_content_recording_streaming(self, **kwargs): span2 = spans[1] expected_attributes_2 = [ ("az.namespace", "Microsoft.CognitiveServices"), - ("gen_ai.operation.name", "responses"), + ("gen_ai.operation.name", OPERATION_NAME_INVOKE_AGENT), ("gen_ai.agent.name", agent.name), ("gen_ai.provider.name", "azure.openai"), ("server.address", ""), @@ -1111,14 +1305,26 @@ def test_sync_function_tool_with_content_recording_streaming(self, **kwargs): events_match = GenAiTraceVerifier().check_span_events(span2, expected_events_2) assert events_match == True - @pytest.mark.usefixtures("instrument_without_content") + @pytest.mark.usefixtures("instrument_with_content") + @servicePreparer() + @recorded_by_proxy(RecordedTransport.AZURE_CORE, RecordedTransport.HTTPX) + def test_sync_function_tool_with_content_recording_streaming_events(self, **kwargs): + """Test synchronous function tool usage with content recording enabled, streaming (event mode).""" + self._test_sync_function_tool_with_content_recording_streaming_impl(True, **kwargs) + + @pytest.mark.usefixtures("instrument_with_content") @servicePreparer() @recorded_by_proxy(RecordedTransport.AZURE_CORE, RecordedTransport.HTTPX) - def test_sync_function_tool_without_content_recording_non_streaming(self, **kwargs): - """Test synchronous function tool usage without content recording (non-streaming).""" + def test_sync_function_tool_with_content_recording_streaming_attributes(self, **kwargs): + """Test synchronous function tool usage with content recording enabled, streaming (attribute mode).""" + self._test_sync_function_tool_with_content_recording_streaming_impl(False, **kwargs) + + def _test_sync_function_tool_without_content_recording_non_streaming_impl(self, use_events, **kwargs): + """Implementation for testing synchronous function tool usage without content recording (non-streaming).""" from openai.types.responses.response_input_param import FunctionCallOutput self.cleanup() + set_use_message_events(use_events) os.environ.update( { CONTENT_TRACING_ENV_VARIABLE: "False", @@ -1209,7 +1415,7 @@ def test_sync_function_tool_without_content_recording_non_streaming(self, **kwar span1 = spans[0] expected_attributes_1 = [ ("az.namespace", "Microsoft.CognitiveServices"), - ("gen_ai.operation.name", "responses"), + ("gen_ai.operation.name", OPERATION_NAME_INVOKE_AGENT), ("gen_ai.agent.name", agent.name), ("gen_ai.provider.name", "azure.openai"), ("server.address", ""), @@ -1248,7 +1454,7 @@ def test_sync_function_tool_without_content_recording_non_streaming(self, **kwar span2 = spans[1] expected_attributes_2 = [ ("az.namespace", "Microsoft.CognitiveServices"), - ("gen_ai.operation.name", "responses"), + ("gen_ai.operation.name", OPERATION_NAME_INVOKE_AGENT), ("gen_ai.agent.name", agent.name), ("gen_ai.provider.name", "azure.openai"), ("server.address", ""), @@ -1283,14 +1489,12 @@ def test_sync_function_tool_without_content_recording_non_streaming(self, **kwar events_match = GenAiTraceVerifier().check_span_events(span2, expected_events_2) assert events_match == True - @pytest.mark.usefixtures("instrument_without_content") - @servicePreparer() - @recorded_by_proxy(RecordedTransport.AZURE_CORE, RecordedTransport.HTTPX) - def test_sync_function_tool_without_content_recording_streaming(self, **kwargs): - """Test synchronous function tool usage without content recording (streaming).""" + def _test_sync_function_tool_without_content_recording_streaming_impl(self, use_events, **kwargs): + """Implementation for testing synchronous function tool usage without content recording (streaming).""" from openai.types.responses.response_input_param import FunctionCallOutput self.cleanup() + set_use_message_events(use_events) os.environ.update( { CONTENT_TRACING_ENV_VARIABLE: "False", @@ -1399,7 +1603,7 @@ def test_sync_function_tool_without_content_recording_streaming(self, **kwargs): span1 = spans[0] expected_attributes_1 = [ ("az.namespace", "Microsoft.CognitiveServices"), - ("gen_ai.operation.name", "responses"), + ("gen_ai.operation.name", OPERATION_NAME_INVOKE_AGENT), ("gen_ai.agent.name", agent.name), ("gen_ai.provider.name", "azure.openai"), ("server.address", ""), @@ -1438,7 +1642,7 @@ def test_sync_function_tool_without_content_recording_streaming(self, **kwargs): span2 = spans[1] expected_attributes_2 = [ ("az.namespace", "Microsoft.CognitiveServices"), - ("gen_ai.operation.name", "responses"), + ("gen_ai.operation.name", OPERATION_NAME_INVOKE_AGENT), ("gen_ai.agent.name", agent.name), ("gen_ai.provider.name", "azure.openai"), ("server.address", ""), @@ -1473,6 +1677,34 @@ def test_sync_function_tool_without_content_recording_streaming(self, **kwargs): events_match = GenAiTraceVerifier().check_span_events(span2, expected_events_2) assert events_match == True + @pytest.mark.usefixtures("instrument_without_content") + @servicePreparer() + @recorded_by_proxy(RecordedTransport.AZURE_CORE, RecordedTransport.HTTPX) + def test_sync_function_tool_without_content_recording_non_streaming_events(self, **kwargs): + """Test synchronous function tool usage without content recording, non-streaming (event mode).""" + self._test_sync_function_tool_without_content_recording_non_streaming_impl(True, **kwargs) + + @pytest.mark.usefixtures("instrument_without_content") + @servicePreparer() + @recorded_by_proxy(RecordedTransport.AZURE_CORE, RecordedTransport.HTTPX) + def test_sync_function_tool_without_content_recording_non_streaming_attributes(self, **kwargs): + """Test synchronous function tool usage without content recording, non-streaming (attribute mode).""" + self._test_sync_function_tool_without_content_recording_non_streaming_impl(False, **kwargs) + + @pytest.mark.usefixtures("instrument_without_content") + @servicePreparer() + @recorded_by_proxy(RecordedTransport.AZURE_CORE, RecordedTransport.HTTPX) + def test_sync_function_tool_without_content_recording_streaming_events(self, **kwargs): + """Test synchronous function tool usage without content recording, streaming (event mode).""" + self._test_sync_function_tool_without_content_recording_streaming_impl(True, **kwargs) + + @pytest.mark.usefixtures("instrument_without_content") + @servicePreparer() + @recorded_by_proxy(RecordedTransport.AZURE_CORE, RecordedTransport.HTTPX) + def test_sync_function_tool_without_content_recording_streaming_attributes(self, **kwargs): + """Test synchronous function tool usage without content recording, streaming (attribute mode).""" + self._test_sync_function_tool_without_content_recording_streaming_impl(False, **kwargs) + @pytest.mark.usefixtures("instrument_with_content") @servicePreparer() @recorded_by_proxy(RecordedTransport.AZURE_CORE, RecordedTransport.HTTPX) @@ -1481,6 +1713,7 @@ def test_sync_function_tool_list_conversation_items_with_content_recording(self, from openai.types.responses.response_input_param import FunctionCallOutput self.cleanup() + set_use_message_events(True) # Use event-based mode for this test os.environ.update( { CONTENT_TRACING_ENV_VARIABLE: "True", @@ -1630,6 +1863,7 @@ def test_sync_function_tool_list_conversation_items_without_content_recording(se from openai.types.responses.response_input_param import FunctionCallOutput self.cleanup() + set_use_message_events(True) # Use event-based mode for this test os.environ.update( { CONTENT_TRACING_ENV_VARIABLE: "False", @@ -1777,6 +2011,7 @@ def test_sync_function_tool_list_conversation_items_without_content_recording(se def test_sync_multiple_text_inputs_with_content_recording_non_streaming(self, **kwargs): """Test synchronous non-streaming responses with multiple text inputs and content recording enabled.""" self.cleanup() + set_use_message_events(True) # Use event-based mode for this test os.environ.update( { CONTENT_TRACING_ENV_VARIABLE: "True", @@ -1824,7 +2059,7 @@ def test_sync_multiple_text_inputs_with_content_recording_non_streaming(self, ** # Check span attributes expected_attributes = [ ("az.namespace", "Microsoft.CognitiveServices"), - ("gen_ai.operation.name", "responses"), + ("gen_ai.operation.name", OPERATION_NAME_CHAT), ("gen_ai.request.model", deployment_name), ("gen_ai.provider.name", "azure.openai"), ("server.address", ""), @@ -1873,6 +2108,7 @@ def test_sync_multiple_text_inputs_with_content_recording_non_streaming(self, ** def test_sync_multiple_text_inputs_with_content_recording_streaming(self, **kwargs): """Test synchronous streaming responses with multiple text inputs and content recording enabled.""" self.cleanup() + set_use_message_events(True) # Use event-based mode for this test os.environ.update( { CONTENT_TRACING_ENV_VARIABLE: "True", @@ -1928,7 +2164,7 @@ def test_sync_multiple_text_inputs_with_content_recording_streaming(self, **kwar # Check span attributes expected_attributes = [ ("az.namespace", "Microsoft.CognitiveServices"), - ("gen_ai.operation.name", "responses"), + ("gen_ai.operation.name", OPERATION_NAME_CHAT), ("gen_ai.request.model", deployment_name), ("gen_ai.provider.name", "azure.openai"), ("server.address", ""), @@ -1977,6 +2213,7 @@ def test_sync_multiple_text_inputs_with_content_recording_streaming(self, **kwar def test_sync_multiple_text_inputs_without_content_recording_non_streaming(self, **kwargs): """Test synchronous non-streaming responses with multiple text inputs and content recording disabled.""" self.cleanup() + set_use_message_events(True) # Use event-based mode for this test os.environ.update( { CONTENT_TRACING_ENV_VARIABLE: "False", @@ -2024,7 +2261,7 @@ def test_sync_multiple_text_inputs_without_content_recording_non_streaming(self, # Check span attributes expected_attributes = [ ("az.namespace", "Microsoft.CognitiveServices"), - ("gen_ai.operation.name", "responses"), + ("gen_ai.operation.name", OPERATION_NAME_CHAT), ("gen_ai.request.model", deployment_name), ("gen_ai.provider.name", "azure.openai"), ("server.address", ""), @@ -2073,6 +2310,7 @@ def test_sync_multiple_text_inputs_without_content_recording_non_streaming(self, def test_sync_multiple_text_inputs_without_content_recording_streaming(self, **kwargs): """Test synchronous streaming responses with multiple text inputs and content recording disabled.""" self.cleanup() + set_use_message_events(True) # Use event-based mode for this test os.environ.update({CONTENT_TRACING_ENV_VARIABLE: "False"}) self.setup_telemetry() assert False == AIProjectInstrumentor().is_content_recording_enabled() @@ -2123,7 +2361,7 @@ def test_sync_multiple_text_inputs_without_content_recording_streaming(self, **k # Check span attributes expected_attributes = [ ("az.namespace", "Microsoft.CognitiveServices"), - ("gen_ai.operation.name", "responses"), + ("gen_ai.operation.name", OPERATION_NAME_CHAT), ("gen_ai.request.model", deployment_name), ("gen_ai.provider.name", "azure.openai"), ("server.address", ""), @@ -3423,7 +3661,7 @@ def test_responses_stream_method_with_content_recording(self, **kwargs): # Check span attributes expected_attributes = [ ("az.namespace", "Microsoft.CognitiveServices"), - ("gen_ai.operation.name", "responses"), + ("gen_ai.operation.name", OPERATION_NAME_CHAT), ("gen_ai.request.model", deployment_name), ("gen_ai.provider.name", "azure.openai"), ("server.address", ""), @@ -3496,7 +3734,7 @@ def test_responses_stream_method_without_content_recording(self, **kwargs): # Check span attributes expected_attributes = [ ("az.namespace", "Microsoft.CognitiveServices"), - ("gen_ai.operation.name", "responses"), + ("gen_ai.operation.name", OPERATION_NAME_CHAT), ("gen_ai.request.model", deployment_name), ("gen_ai.provider.name", "azure.openai"), ("server.address", ""), @@ -3620,7 +3858,7 @@ def test_responses_stream_method_with_tools_with_content_recording(self, **kwarg span1 = spans[0] expected_attributes_1 = [ ("az.namespace", "Microsoft.CognitiveServices"), - ("gen_ai.operation.name", "responses"), + ("gen_ai.operation.name", OPERATION_NAME_CHAT), ("gen_ai.request.model", deployment_name), ("gen_ai.provider.name", "azure.openai"), ("server.address", ""), @@ -4516,7 +4754,7 @@ def test_prompt_agent_with_responses_non_streaming(self, **kwargs): span = spans[0] expected_attributes = [ ("az.namespace", "Microsoft.CognitiveServices"), - ("gen_ai.operation.name", "responses"), + ("gen_ai.operation.name", OPERATION_NAME_INVOKE_AGENT), ("gen_ai.agent.name", agent.name), ("gen_ai.agent.id", agent.id), ("gen_ai.provider.name", "azure.openai"), @@ -4593,7 +4831,7 @@ def test_prompt_agent_with_responses_streaming(self, **kwargs): span = spans[0] expected_attributes = [ ("az.namespace", "Microsoft.CognitiveServices"), - ("gen_ai.operation.name", "responses"), + ("gen_ai.operation.name", OPERATION_NAME_INVOKE_AGENT), ("gen_ai.agent.name", agent.name), ("gen_ai.agent.id", agent.id), ("gen_ai.provider.name", "azure.openai"), diff --git a/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_async.py b/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_async.py index 171ac7cc53dc..e1aa0597485f 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_async.py +++ b/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_async.py @@ -8,7 +8,13 @@ import json import pytest from azure.ai.projects.telemetry import AIProjectInstrumentor, _utils -from azure.ai.projects.telemetry._utils import SPAN_NAME_CHAT, SPAN_NAME_INVOKE_AGENT +from azure.ai.projects.telemetry._utils import ( + OPERATION_NAME_CHAT, + OPERATION_NAME_INVOKE_AGENT, + SPAN_NAME_CHAT, + SPAN_NAME_INVOKE_AGENT, + set_use_message_events, +) from azure.ai.projects.models import FunctionTool, PromptAgentDefinition from azure.core.settings import settings from gen_ai_trace_verifier import GenAiTraceVerifier @@ -38,6 +44,7 @@ class TestResponsesInstrumentor(TestAiAgentsInstrumentorBase): async def test_async_non_streaming_with_content_recording(self, **kwargs): """Test asynchronous non-streaming responses with content recording enabled.""" self.cleanup() + set_use_message_events(True) # Use event-based mode for this test os.environ.update( { CONTENT_TRACING_ENV_VARIABLE: "True", @@ -79,7 +86,7 @@ async def test_async_non_streaming_with_content_recording(self, **kwargs): # Check span attributes expected_attributes = [ ("az.namespace", "Microsoft.CognitiveServices"), - ("gen_ai.operation.name", "responses"), + ("gen_ai.operation.name", OPERATION_NAME_CHAT), ("gen_ai.request.model", deployment_name), ("gen_ai.provider.name", "azure.openai"), ("server.address", ""), @@ -169,7 +176,7 @@ async def test_async_streaming_with_content_recording(self, **kwargs): # Check span attributes expected_attributes = [ ("az.namespace", "Microsoft.CognitiveServices"), - ("gen_ai.operation.name", "responses"), + ("gen_ai.operation.name", OPERATION_NAME_CHAT), ("gen_ai.request.model", deployment_name), ("gen_ai.provider.name", "azure.openai"), ("server.address", ""), @@ -257,6 +264,7 @@ async def test_async_conversations_create(self, **kwargs): async def test_async_list_conversation_items_with_content_recording(self, **kwargs): """Test asynchronous list_conversation_items with content recording enabled.""" self.cleanup() + set_use_message_events(True) # Use event-based mode for this test os.environ.update( { CONTENT_TRACING_ENV_VARIABLE: "True", @@ -339,6 +347,7 @@ async def test_async_function_tool_with_content_recording_streaming(self, **kwar from openai.types.responses.response_input_param import FunctionCallOutput self.cleanup() + set_use_message_events(True) # Use event-based mode for this test os.environ.update( { CONTENT_TRACING_ENV_VARIABLE: "True", @@ -455,7 +464,7 @@ async def test_async_function_tool_with_content_recording_streaming(self, **kwar span1 = spans[0] expected_attributes_1 = [ ("az.namespace", "Microsoft.CognitiveServices"), - ("gen_ai.operation.name", "responses"), + ("gen_ai.operation.name", OPERATION_NAME_INVOKE_AGENT), ("gen_ai.agent.name", agent.name), ("gen_ai.provider.name", "azure.openai"), ("server.address", ""), @@ -494,7 +503,7 @@ async def test_async_function_tool_with_content_recording_streaming(self, **kwar span2 = spans[1] expected_attributes_2 = [ ("az.namespace", "Microsoft.CognitiveServices"), - ("gen_ai.operation.name", "responses"), + ("gen_ai.operation.name", OPERATION_NAME_INVOKE_AGENT), ("gen_ai.agent.name", agent.name), ("gen_ai.provider.name", "azure.openai"), ("server.address", ""), @@ -537,6 +546,7 @@ async def test_async_function_tool_without_content_recording_streaming(self, **k from openai.types.responses.response_input_param import FunctionCallOutput self.cleanup() + set_use_message_events(True) # Use event-based mode for this test os.environ.update( { CONTENT_TRACING_ENV_VARIABLE: "False", @@ -647,7 +657,7 @@ async def test_async_function_tool_without_content_recording_streaming(self, **k span1 = spans[0] expected_attributes_1 = [ ("az.namespace", "Microsoft.CognitiveServices"), - ("gen_ai.operation.name", "responses"), + ("gen_ai.operation.name", OPERATION_NAME_INVOKE_AGENT), ("gen_ai.agent.name", agent.name), ("gen_ai.provider.name", "azure.openai"), ("server.address", ""), @@ -686,7 +696,7 @@ async def test_async_function_tool_without_content_recording_streaming(self, **k span2 = spans[1] expected_attributes_2 = [ ("az.namespace", "Microsoft.CognitiveServices"), - ("gen_ai.operation.name", "responses"), + ("gen_ai.operation.name", OPERATION_NAME_INVOKE_AGENT), ("gen_ai.agent.name", agent.name), ("gen_ai.provider.name", "azure.openai"), ("server.address", ""), @@ -727,6 +737,7 @@ async def test_async_function_tool_without_content_recording_streaming(self, **k async def test_async_multiple_text_inputs_with_content_recording_non_streaming(self, **kwargs): """Test asynchronous non-streaming responses with multiple text inputs and content recording enabled.""" self.cleanup() + set_use_message_events(True) # Use event-based mode for this test os.environ.update( { CONTENT_TRACING_ENV_VARIABLE: "True", @@ -774,7 +785,7 @@ async def test_async_multiple_text_inputs_with_content_recording_non_streaming(s # Check span attributes expected_attributes = [ ("az.namespace", "Microsoft.CognitiveServices"), - ("gen_ai.operation.name", "responses"), + ("gen_ai.operation.name", OPERATION_NAME_CHAT), ("gen_ai.request.model", deployment_name), ("gen_ai.provider.name", "azure.openai"), ("server.address", ""), @@ -823,6 +834,7 @@ async def test_async_multiple_text_inputs_with_content_recording_non_streaming(s async def test_async_multiple_text_inputs_with_content_recording_streaming(self, **kwargs): """Test asynchronous streaming responses with multiple text inputs and content recording enabled.""" self.cleanup() + set_use_message_events(True) # Use event-based mode for this test os.environ.update( { CONTENT_TRACING_ENV_VARIABLE: "True", @@ -878,7 +890,7 @@ async def test_async_multiple_text_inputs_with_content_recording_streaming(self, # Check span attributes expected_attributes = [ ("az.namespace", "Microsoft.CognitiveServices"), - ("gen_ai.operation.name", "responses"), + ("gen_ai.operation.name", OPERATION_NAME_CHAT), ("gen_ai.request.model", deployment_name), ("gen_ai.provider.name", "azure.openai"), ("server.address", ""), @@ -927,6 +939,7 @@ async def test_async_multiple_text_inputs_with_content_recording_streaming(self, async def test_async_multiple_text_inputs_without_content_recording_non_streaming(self, **kwargs): """Test asynchronous non-streaming responses with multiple text inputs and content recording disabled.""" self.cleanup() + set_use_message_events(True) # Use event-based mode for this test os.environ.update( { CONTENT_TRACING_ENV_VARIABLE: "False", @@ -974,7 +987,7 @@ async def test_async_multiple_text_inputs_without_content_recording_non_streamin # Check span attributes expected_attributes = [ ("az.namespace", "Microsoft.CognitiveServices"), - ("gen_ai.operation.name", "responses"), + ("gen_ai.operation.name", OPERATION_NAME_CHAT), ("gen_ai.request.model", deployment_name), ("gen_ai.provider.name", "azure.openai"), ("server.address", ""), @@ -2313,7 +2326,7 @@ async def test_async_multiple_text_inputs_without_content_recording_streaming(se # Check span attributes expected_attributes = [ ("az.namespace", "Microsoft.CognitiveServices"), - ("gen_ai.operation.name", "responses"), + ("gen_ai.operation.name", OPERATION_NAME_CHAT), ("gen_ai.request.model", deployment_name), ("gen_ai.provider.name", "azure.openai"), ("server.address", ""), @@ -2366,6 +2379,7 @@ async def test_async_multiple_text_inputs_without_content_recording_streaming(se async def test_async_responses_stream_method_with_content_recording(self, **kwargs): """Test async responses.stream() method with content recording enabled.""" self.cleanup() + set_use_message_events(True) # Use event-based mode for this test os.environ.update( { CONTENT_TRACING_ENV_VARIABLE: "True", @@ -2405,7 +2419,7 @@ async def test_async_responses_stream_method_with_content_recording(self, **kwar # Check span attributes expected_attributes = [ ("az.namespace", "Microsoft.CognitiveServices"), - ("gen_ai.operation.name", "responses"), + ("gen_ai.operation.name", OPERATION_NAME_CHAT), ("gen_ai.request.model", deployment_name), ("gen_ai.provider.name", "azure.openai"), ("server.address", ""), @@ -2485,7 +2499,7 @@ async def test_async_responses_stream_method_without_content_recording(self, **k # Check span attributes expected_attributes = [ ("az.namespace", "Microsoft.CognitiveServices"), - ("gen_ai.operation.name", "responses"), + ("gen_ai.operation.name", OPERATION_NAME_CHAT), ("gen_ai.request.model", deployment_name), ("gen_ai.provider.name", "azure.openai"), ("server.address", ""), @@ -2528,6 +2542,7 @@ async def test_async_responses_stream_method_with_tools_with_content_recording(s from openai.types.responses.response_input_param import FunctionCallOutput self.cleanup() + set_use_message_events(True) # Use event-based mode for this test os.environ.update( { CONTENT_TRACING_ENV_VARIABLE: "True", @@ -2616,7 +2631,7 @@ async def test_async_responses_stream_method_with_tools_with_content_recording(s span1 = spans[0] expected_attributes_1 = [ ("az.namespace", "Microsoft.CognitiveServices"), - ("gen_ai.operation.name", "responses"), + ("gen_ai.operation.name", OPERATION_NAME_CHAT), ("gen_ai.request.model", deployment_name), ("gen_ai.provider.name", "azure.openai"), ("server.address", ""), @@ -3361,7 +3376,7 @@ async def test_async_prompt_agent_with_responses_non_streaming(self, **kwargs): span = spans[0] expected_attributes = [ ("az.namespace", "Microsoft.CognitiveServices"), - ("gen_ai.operation.name", "responses"), + ("gen_ai.operation.name", OPERATION_NAME_INVOKE_AGENT), ("gen_ai.agent.name", agent.name), ("gen_ai.agent.id", agent.id), ("gen_ai.provider.name", "azure.openai"), @@ -3440,7 +3455,7 @@ async def test_async_prompt_agent_with_responses_streaming(self, **kwargs): span = spans[0] expected_attributes = [ ("az.namespace", "Microsoft.CognitiveServices"), - ("gen_ai.operation.name", "responses"), + ("gen_ai.operation.name", OPERATION_NAME_INVOKE_AGENT), ("gen_ai.agent.name", agent.name), ("gen_ai.agent.id", agent.id), ("gen_ai.provider.name", "azure.openai"), diff --git a/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_browser_automation.py b/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_browser_automation.py index 5dbcb4566c87..31ce3d059f9e 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_browser_automation.py +++ b/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_browser_automation.py @@ -9,7 +9,7 @@ import os import pytest from azure.ai.projects.telemetry import AIProjectInstrumentor, _utils -from azure.ai.projects.telemetry._utils import SPAN_NAME_INVOKE_AGENT +from azure.ai.projects.telemetry._utils import OPERATION_NAME_INVOKE_AGENT, SPAN_NAME_INVOKE_AGENT from azure.core.settings import settings from gen_ai_trace_verifier import GenAiTraceVerifier from devtools_testutils import recorded_by_proxy, RecordedTransport @@ -105,7 +105,7 @@ def test_sync_browser_automation_non_streaming_with_content_recording(self, **kw expected_attributes = [ ("az.namespace", "Microsoft.CognitiveServices"), - ("gen_ai.operation.name", "responses"), + ("gen_ai.operation.name", OPERATION_NAME_INVOKE_AGENT), ("gen_ai.provider.name", "azure.openai"), ("server.address", ""), ("gen_ai.conversation.id", conversation.id), @@ -236,7 +236,7 @@ def test_sync_browser_automation_non_streaming_without_content_recording(self, * expected_attributes = [ ("az.namespace", "Microsoft.CognitiveServices"), - ("gen_ai.operation.name", "responses"), + ("gen_ai.operation.name", OPERATION_NAME_INVOKE_AGENT), ("gen_ai.provider.name", "azure.openai"), ("server.address", ""), ("gen_ai.conversation.id", conversation.id), @@ -365,7 +365,7 @@ def test_sync_browser_automation_streaming_with_content_recording(self, **kwargs expected_attributes = [ ("az.namespace", "Microsoft.CognitiveServices"), - ("gen_ai.operation.name", "responses"), + ("gen_ai.operation.name", OPERATION_NAME_INVOKE_AGENT), ("gen_ai.provider.name", "azure.openai"), ("server.address", ""), ("gen_ai.conversation.id", conversation.id), @@ -490,7 +490,7 @@ def test_sync_browser_automation_streaming_without_content_recording(self, **kwa expected_attributes = [ ("az.namespace", "Microsoft.CognitiveServices"), - ("gen_ai.operation.name", "responses"), + ("gen_ai.operation.name", OPERATION_NAME_INVOKE_AGENT), ("gen_ai.provider.name", "azure.openai"), ("server.address", ""), ("gen_ai.conversation.id", conversation.id), diff --git a/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_browser_automation_async.py b/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_browser_automation_async.py index 0631495ff0b2..e1b5aa9b9817 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_browser_automation_async.py +++ b/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_browser_automation_async.py @@ -9,7 +9,7 @@ import os import pytest from azure.ai.projects.telemetry import AIProjectInstrumentor, _utils -from azure.ai.projects.telemetry._utils import SPAN_NAME_INVOKE_AGENT +from azure.ai.projects.telemetry._utils import OPERATION_NAME_INVOKE_AGENT, SPAN_NAME_INVOKE_AGENT from azure.core.settings import settings from gen_ai_trace_verifier import GenAiTraceVerifier from devtools_testutils.aio import recorded_by_proxy_async @@ -108,7 +108,7 @@ async def test_async_browser_automation_non_streaming_with_content_recording(sel expected_attributes = [ ("az.namespace", "Microsoft.CognitiveServices"), - ("gen_ai.operation.name", "responses"), + ("gen_ai.operation.name", OPERATION_NAME_INVOKE_AGENT), ("gen_ai.provider.name", "azure.openai"), ("server.address", ""), ("gen_ai.conversation.id", conversation.id), @@ -235,7 +235,7 @@ async def test_async_browser_automation_non_streaming_without_content_recording( expected_attributes = [ ("az.namespace", "Microsoft.CognitiveServices"), - ("gen_ai.operation.name", "responses"), + ("gen_ai.operation.name", OPERATION_NAME_INVOKE_AGENT), ("gen_ai.provider.name", "azure.openai"), ("server.address", ""), ("gen_ai.conversation.id", conversation.id), @@ -360,7 +360,7 @@ async def test_async_browser_automation_streaming_with_content_recording(self, * expected_attributes = [ ("az.namespace", "Microsoft.CognitiveServices"), - ("gen_ai.operation.name", "responses"), + ("gen_ai.operation.name", OPERATION_NAME_INVOKE_AGENT), ("gen_ai.provider.name", "azure.openai"), ("server.address", ""), ("gen_ai.conversation.id", conversation.id), @@ -482,7 +482,7 @@ async def test_async_browser_automation_streaming_without_content_recording(self expected_attributes = [ ("az.namespace", "Microsoft.CognitiveServices"), - ("gen_ai.operation.name", "responses"), + ("gen_ai.operation.name", OPERATION_NAME_INVOKE_AGENT), ("gen_ai.provider.name", "azure.openai"), ("server.address", ""), ("gen_ai.conversation.id", conversation.id), diff --git a/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_code_interpreter.py b/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_code_interpreter.py index 13b0d8342186..a431a3f0e79b 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_code_interpreter.py +++ b/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_code_interpreter.py @@ -10,7 +10,7 @@ import pytest from io import BytesIO from azure.ai.projects.telemetry import AIProjectInstrumentor, _utils -from azure.ai.projects.telemetry._utils import SPAN_NAME_INVOKE_AGENT +from azure.ai.projects.telemetry._utils import OPERATION_NAME_INVOKE_AGENT, SPAN_NAME_INVOKE_AGENT from azure.core.settings import settings from gen_ai_trace_verifier import GenAiTraceVerifier from devtools_testutils import recorded_by_proxy, RecordedTransport @@ -118,7 +118,7 @@ def test_sync_code_interpreter_non_streaming_with_content_recording(self, **kwar expected_attributes = [ ("az.namespace", "Microsoft.CognitiveServices"), - ("gen_ai.operation.name", "responses"), + ("gen_ai.operation.name", OPERATION_NAME_INVOKE_AGENT), ("gen_ai.provider.name", "azure.openai"), ("server.address", ""), ("gen_ai.conversation.id", conversation.id), @@ -304,7 +304,7 @@ def test_sync_code_interpreter_non_streaming_without_content_recording(self, **k expected_attributes = [ ("az.namespace", "Microsoft.CognitiveServices"), - ("gen_ai.operation.name", "responses"), + ("gen_ai.operation.name", OPERATION_NAME_INVOKE_AGENT), ("gen_ai.provider.name", "azure.openai"), ("server.address", ""), ("gen_ai.conversation.id", conversation.id), @@ -499,7 +499,7 @@ def test_sync_code_interpreter_streaming_with_content_recording(self, **kwargs): expected_attributes = [ ("az.namespace", "Microsoft.CognitiveServices"), - ("gen_ai.operation.name", "responses"), + ("gen_ai.operation.name", OPERATION_NAME_INVOKE_AGENT), ("gen_ai.provider.name", "azure.openai"), ("server.address", ""), ("gen_ai.conversation.id", conversation.id), @@ -689,7 +689,7 @@ def test_sync_code_interpreter_streaming_without_content_recording(self, **kwarg expected_attributes = [ ("az.namespace", "Microsoft.CognitiveServices"), - ("gen_ai.operation.name", "responses"), + ("gen_ai.operation.name", OPERATION_NAME_INVOKE_AGENT), ("gen_ai.provider.name", "azure.openai"), ("server.address", ""), ("gen_ai.conversation.id", conversation.id), diff --git a/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_code_interpreter_async.py b/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_code_interpreter_async.py index 5f2eb03e3fbe..25676aadb2e6 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_code_interpreter_async.py +++ b/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_code_interpreter_async.py @@ -10,7 +10,7 @@ import pytest from io import BytesIO from azure.ai.projects.telemetry import AIProjectInstrumentor, _utils -from azure.ai.projects.telemetry._utils import SPAN_NAME_INVOKE_AGENT +from azure.ai.projects.telemetry._utils import OPERATION_NAME_INVOKE_AGENT, SPAN_NAME_INVOKE_AGENT from azure.core.settings import settings from gen_ai_trace_verifier import GenAiTraceVerifier from devtools_testutils.aio import recorded_by_proxy_async @@ -118,7 +118,7 @@ async def test_async_code_interpreter_non_streaming_with_content_recording(self, expected_attributes = [ ("az.namespace", "Microsoft.CognitiveServices"), - ("gen_ai.operation.name", "responses"), + ("gen_ai.operation.name", OPERATION_NAME_INVOKE_AGENT), ("gen_ai.provider.name", "azure.openai"), ("server.address", ""), ("gen_ai.conversation.id", conversation.id), @@ -304,7 +304,7 @@ async def test_async_code_interpreter_non_streaming_without_content_recording(se expected_attributes = [ ("az.namespace", "Microsoft.CognitiveServices"), - ("gen_ai.operation.name", "responses"), + ("gen_ai.operation.name", OPERATION_NAME_INVOKE_AGENT), ("gen_ai.provider.name", "azure.openai"), ("server.address", ""), ("gen_ai.conversation.id", conversation.id), @@ -499,7 +499,7 @@ async def test_async_code_interpreter_streaming_with_content_recording(self, **k expected_attributes = [ ("az.namespace", "Microsoft.CognitiveServices"), - ("gen_ai.operation.name", "responses"), + ("gen_ai.operation.name", OPERATION_NAME_INVOKE_AGENT), ("gen_ai.provider.name", "azure.openai"), ("server.address", ""), ("gen_ai.conversation.id", conversation.id), @@ -689,7 +689,7 @@ async def test_async_code_interpreter_streaming_without_content_recording(self, expected_attributes = [ ("az.namespace", "Microsoft.CognitiveServices"), - ("gen_ai.operation.name", "responses"), + ("gen_ai.operation.name", OPERATION_NAME_INVOKE_AGENT), ("gen_ai.provider.name", "azure.openai"), ("server.address", ""), ("gen_ai.conversation.id", conversation.id), diff --git a/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_file_search.py b/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_file_search.py index 92902d6fa908..3a447d2e99a0 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_file_search.py +++ b/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_file_search.py @@ -10,7 +10,7 @@ import pytest from io import BytesIO from azure.ai.projects.telemetry import AIProjectInstrumentor, _utils -from azure.ai.projects.telemetry._utils import SPAN_NAME_INVOKE_AGENT +from azure.ai.projects.telemetry._utils import OPERATION_NAME_INVOKE_AGENT, SPAN_NAME_INVOKE_AGENT from azure.core.settings import settings from gen_ai_trace_verifier import GenAiTraceVerifier from devtools_testutils import recorded_by_proxy, RecordedTransport @@ -118,7 +118,7 @@ def test_sync_file_search_non_streaming_with_content_recording(self, **kwargs): span = spans[0] expected_attributes = [ ("az.namespace", "Microsoft.CognitiveServices"), - ("gen_ai.operation.name", "responses"), + ("gen_ai.operation.name", OPERATION_NAME_INVOKE_AGENT), ("gen_ai.provider.name", "azure.openai"), ("server.address", ""), ("gen_ai.conversation.id", conversation.id), @@ -326,7 +326,7 @@ def test_sync_file_search_non_streaming_without_content_recording(self, **kwargs span = spans[0] expected_attributes = [ ("az.namespace", "Microsoft.CognitiveServices"), - ("gen_ai.operation.name", "responses"), + ("gen_ai.operation.name", OPERATION_NAME_INVOKE_AGENT), ("gen_ai.provider.name", "azure.openai"), ("server.address", ""), ("gen_ai.conversation.id", conversation.id), @@ -538,7 +538,7 @@ def test_sync_file_search_streaming_with_content_recording(self, **kwargs): expected_attributes = [ ("az.namespace", "Microsoft.CognitiveServices"), - ("gen_ai.operation.name", "responses"), + ("gen_ai.operation.name", OPERATION_NAME_INVOKE_AGENT), ("gen_ai.provider.name", "azure.openai"), ("server.address", ""), ("gen_ai.conversation.id", conversation.id), @@ -742,7 +742,7 @@ def test_sync_file_search_streaming_without_content_recording(self, **kwargs): expected_attributes = [ ("az.namespace", "Microsoft.CognitiveServices"), - ("gen_ai.operation.name", "responses"), + ("gen_ai.operation.name", OPERATION_NAME_INVOKE_AGENT), ("gen_ai.provider.name", "azure.openai"), ("server.address", ""), ("gen_ai.conversation.id", conversation.id), diff --git a/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_file_search_async.py b/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_file_search_async.py index f215ee1072f2..e1b47fe0df30 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_file_search_async.py +++ b/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_file_search_async.py @@ -10,7 +10,7 @@ import pytest from io import BytesIO from azure.ai.projects.telemetry import AIProjectInstrumentor, _utils -from azure.ai.projects.telemetry._utils import SPAN_NAME_INVOKE_AGENT +from azure.ai.projects.telemetry._utils import OPERATION_NAME_INVOKE_AGENT, SPAN_NAME_INVOKE_AGENT from azure.core.settings import settings from gen_ai_trace_verifier import GenAiTraceVerifier from devtools_testutils.aio import recorded_by_proxy_async @@ -119,7 +119,7 @@ async def test_async_file_search_non_streaming_with_content_recording(self, **kw span = spans[0] expected_attributes = [ ("az.namespace", "Microsoft.CognitiveServices"), - ("gen_ai.operation.name", "responses"), + ("gen_ai.operation.name", OPERATION_NAME_INVOKE_AGENT), ("gen_ai.provider.name", "azure.openai"), ("server.address", ""), ("gen_ai.conversation.id", conversation.id), @@ -327,7 +327,7 @@ async def test_async_file_search_non_streaming_without_content_recording(self, * span = spans[0] expected_attributes = [ ("az.namespace", "Microsoft.CognitiveServices"), - ("gen_ai.operation.name", "responses"), + ("gen_ai.operation.name", OPERATION_NAME_INVOKE_AGENT), ("gen_ai.provider.name", "azure.openai"), ("server.address", ""), ("gen_ai.conversation.id", conversation.id), @@ -539,7 +539,7 @@ async def test_async_file_search_streaming_with_content_recording(self, **kwargs expected_attributes = [ ("az.namespace", "Microsoft.CognitiveServices"), - ("gen_ai.operation.name", "responses"), + ("gen_ai.operation.name", OPERATION_NAME_INVOKE_AGENT), ("gen_ai.provider.name", "azure.openai"), ("server.address", ""), ("gen_ai.conversation.id", conversation.id), @@ -743,7 +743,7 @@ async def test_async_file_search_streaming_without_content_recording(self, **kwa expected_attributes = [ ("az.namespace", "Microsoft.CognitiveServices"), - ("gen_ai.operation.name", "responses"), + ("gen_ai.operation.name", OPERATION_NAME_INVOKE_AGENT), ("gen_ai.provider.name", "azure.openai"), ("server.address", ""), ("gen_ai.conversation.id", conversation.id), diff --git a/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_mcp.py b/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_mcp.py index 9152d4b244f2..ca1c5957853d 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_mcp.py +++ b/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_mcp.py @@ -9,7 +9,7 @@ import os import pytest from azure.ai.projects.telemetry import AIProjectInstrumentor, _utils -from azure.ai.projects.telemetry._utils import SPAN_NAME_INVOKE_AGENT +from azure.ai.projects.telemetry._utils import OPERATION_NAME_INVOKE_AGENT, SPAN_NAME_INVOKE_AGENT from azure.core.settings import settings from gen_ai_trace_verifier import GenAiTraceVerifier from devtools_testutils import recorded_by_proxy, RecordedTransport @@ -120,7 +120,7 @@ def test_sync_mcp_non_streaming_with_content_recording(self, **kwargs): span1 = spans[0] expected_attributes_1 = [ ("az.namespace", "Microsoft.CognitiveServices"), - ("gen_ai.operation.name", "responses"), + ("gen_ai.operation.name", OPERATION_NAME_INVOKE_AGENT), ("gen_ai.provider.name", "azure.openai"), ("server.address", ""), ("gen_ai.conversation.id", conversation.id), @@ -197,7 +197,7 @@ def test_sync_mcp_non_streaming_with_content_recording(self, **kwargs): span2 = spans[1] expected_attributes_2 = [ ("az.namespace", "Microsoft.CognitiveServices"), - ("gen_ai.operation.name", "responses"), + ("gen_ai.operation.name", OPERATION_NAME_INVOKE_AGENT), ("gen_ai.provider.name", "azure.openai"), ("server.address", ""), ("gen_ai.conversation.id", conversation.id), @@ -408,7 +408,7 @@ def test_sync_mcp_non_streaming_without_content_recording(self, **kwargs): span1 = spans[0] expected_attributes_1 = [ ("az.namespace", "Microsoft.CognitiveServices"), - ("gen_ai.operation.name", "responses"), + ("gen_ai.operation.name", OPERATION_NAME_INVOKE_AGENT), ("gen_ai.provider.name", "azure.openai"), ("server.address", ""), ("gen_ai.conversation.id", conversation.id), @@ -480,7 +480,7 @@ def test_sync_mcp_non_streaming_without_content_recording(self, **kwargs): span2 = spans[1] expected_attributes_2 = [ ("az.namespace", "Microsoft.CognitiveServices"), - ("gen_ai.operation.name", "responses"), + ("gen_ai.operation.name", OPERATION_NAME_INVOKE_AGENT), ("gen_ai.provider.name", "azure.openai"), ("server.address", ""), ("gen_ai.conversation.id", conversation.id), @@ -695,7 +695,7 @@ def test_sync_mcp_streaming_with_content_recording(self, **kwargs): expected_attributes_1 = [ ("az.namespace", "Microsoft.CognitiveServices"), - ("gen_ai.operation.name", "responses"), + ("gen_ai.operation.name", OPERATION_NAME_INVOKE_AGENT), ("gen_ai.provider.name", "azure.openai"), ("server.address", ""), ("gen_ai.conversation.id", conversation.id), @@ -761,7 +761,7 @@ def test_sync_mcp_streaming_with_content_recording(self, **kwargs): expected_attributes_2 = [ ("az.namespace", "Microsoft.CognitiveServices"), - ("gen_ai.operation.name", "responses"), + ("gen_ai.operation.name", OPERATION_NAME_INVOKE_AGENT), ("gen_ai.provider.name", "azure.openai"), ("server.address", ""), ("gen_ai.conversation.id", conversation.id), @@ -934,7 +934,7 @@ def test_sync_mcp_streaming_without_content_recording(self, **kwargs): expected_attributes_1 = [ ("az.namespace", "Microsoft.CognitiveServices"), - ("gen_ai.operation.name", "responses"), + ("gen_ai.operation.name", OPERATION_NAME_INVOKE_AGENT), ("gen_ai.provider.name", "azure.openai"), ("server.address", ""), ("gen_ai.conversation.id", conversation.id), @@ -998,7 +998,7 @@ def test_sync_mcp_streaming_without_content_recording(self, **kwargs): expected_attributes_2 = [ ("az.namespace", "Microsoft.CognitiveServices"), - ("gen_ai.operation.name", "responses"), + ("gen_ai.operation.name", OPERATION_NAME_INVOKE_AGENT), ("gen_ai.provider.name", "azure.openai"), ("server.address", ""), ("gen_ai.conversation.id", conversation.id), diff --git a/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_mcp_async.py b/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_mcp_async.py index c0fd327890da..fe3a86d55e72 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_mcp_async.py +++ b/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_mcp_async.py @@ -9,7 +9,7 @@ import os import pytest from azure.ai.projects.telemetry import AIProjectInstrumentor, _utils -from azure.ai.projects.telemetry._utils import SPAN_NAME_INVOKE_AGENT +from azure.ai.projects.telemetry._utils import OPERATION_NAME_INVOKE_AGENT, SPAN_NAME_INVOKE_AGENT from azure.core.settings import settings from gen_ai_trace_verifier import GenAiTraceVerifier from devtools_testutils.aio import recorded_by_proxy_async @@ -121,7 +121,7 @@ async def test_async_mcp_non_streaming_with_content_recording(self, **kwargs): span1 = spans[0] expected_attributes_1 = [ ("az.namespace", "Microsoft.CognitiveServices"), - ("gen_ai.operation.name", "responses"), + ("gen_ai.operation.name", OPERATION_NAME_INVOKE_AGENT), ("gen_ai.provider.name", "azure.openai"), ("server.address", ""), ("gen_ai.conversation.id", conversation.id), @@ -198,7 +198,7 @@ async def test_async_mcp_non_streaming_with_content_recording(self, **kwargs): span2 = spans[1] expected_attributes_2 = [ ("az.namespace", "Microsoft.CognitiveServices"), - ("gen_ai.operation.name", "responses"), + ("gen_ai.operation.name", OPERATION_NAME_INVOKE_AGENT), ("gen_ai.provider.name", "azure.openai"), ("server.address", ""), ("gen_ai.conversation.id", conversation.id), @@ -409,7 +409,7 @@ async def test_async_mcp_non_streaming_without_content_recording(self, **kwargs) span1 = spans[0] expected_attributes_1 = [ ("az.namespace", "Microsoft.CognitiveServices"), - ("gen_ai.operation.name", "responses"), + ("gen_ai.operation.name", OPERATION_NAME_INVOKE_AGENT), ("gen_ai.provider.name", "azure.openai"), ("server.address", ""), ("gen_ai.conversation.id", conversation.id), @@ -481,7 +481,7 @@ async def test_async_mcp_non_streaming_without_content_recording(self, **kwargs) span2 = spans[1] expected_attributes_2 = [ ("az.namespace", "Microsoft.CognitiveServices"), - ("gen_ai.operation.name", "responses"), + ("gen_ai.operation.name", OPERATION_NAME_INVOKE_AGENT), ("gen_ai.provider.name", "azure.openai"), ("server.address", ""), ("gen_ai.conversation.id", conversation.id), @@ -696,7 +696,7 @@ async def test_async_mcp_streaming_with_content_recording(self, **kwargs): expected_attributes_1 = [ ("az.namespace", "Microsoft.CognitiveServices"), - ("gen_ai.operation.name", "responses"), + ("gen_ai.operation.name", OPERATION_NAME_INVOKE_AGENT), ("gen_ai.provider.name", "azure.openai"), ("server.address", ""), ("gen_ai.conversation.id", conversation.id), @@ -762,7 +762,7 @@ async def test_async_mcp_streaming_with_content_recording(self, **kwargs): expected_attributes_2 = [ ("az.namespace", "Microsoft.CognitiveServices"), - ("gen_ai.operation.name", "responses"), + ("gen_ai.operation.name", OPERATION_NAME_INVOKE_AGENT), ("gen_ai.provider.name", "azure.openai"), ("server.address", ""), ("gen_ai.conversation.id", conversation.id), @@ -935,7 +935,7 @@ async def test_async_mcp_streaming_without_content_recording(self, **kwargs): expected_attributes_1 = [ ("az.namespace", "Microsoft.CognitiveServices"), - ("gen_ai.operation.name", "responses"), + ("gen_ai.operation.name", OPERATION_NAME_INVOKE_AGENT), ("gen_ai.provider.name", "azure.openai"), ("server.address", ""), ("gen_ai.conversation.id", conversation.id), @@ -999,7 +999,7 @@ async def test_async_mcp_streaming_without_content_recording(self, **kwargs): expected_attributes_2 = [ ("az.namespace", "Microsoft.CognitiveServices"), - ("gen_ai.operation.name", "responses"), + ("gen_ai.operation.name", OPERATION_NAME_INVOKE_AGENT), ("gen_ai.provider.name", "azure.openai"), ("server.address", ""), ("gen_ai.conversation.id", conversation.id), diff --git a/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_workflow.py b/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_workflow.py index dca9a911fd75..1ba8723af323 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_workflow.py +++ b/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_workflow.py @@ -9,7 +9,7 @@ import os import pytest from azure.ai.projects.telemetry import AIProjectInstrumentor, _utils -from azure.ai.projects.telemetry._utils import SPAN_NAME_INVOKE_AGENT +from azure.ai.projects.telemetry._utils import OPERATION_NAME_INVOKE_AGENT, SPAN_NAME_INVOKE_AGENT from azure.core.settings import settings from gen_ai_trace_verifier import GenAiTraceVerifier from devtools_testutils import recorded_by_proxy, RecordedTransport @@ -264,7 +264,7 @@ def test_sync_workflow_non_streaming_with_content_recording(self, **kwargs): # Check span attributes expected_attributes = [ ("az.namespace", "Microsoft.CognitiveServices"), - ("gen_ai.operation.name", "responses"), + ("gen_ai.operation.name", OPERATION_NAME_INVOKE_AGENT), ("gen_ai.provider.name", "azure.openai"), ("server.address", ""), ("gen_ai.conversation.id", conversation.id), @@ -425,7 +425,7 @@ def test_sync_workflow_non_streaming_without_content_recording(self, **kwargs): # Check span attributes expected_attributes = [ ("az.namespace", "Microsoft.CognitiveServices"), - ("gen_ai.operation.name", "responses"), + ("gen_ai.operation.name", OPERATION_NAME_INVOKE_AGENT), ("gen_ai.provider.name", "azure.openai"), ("server.address", ""), ("gen_ai.conversation.id", conversation.id), @@ -596,7 +596,7 @@ def test_sync_workflow_streaming_with_content_recording(self, **kwargs): # Check span attributes expected_attributes = [ ("az.namespace", "Microsoft.CognitiveServices"), - ("gen_ai.operation.name", "responses"), + ("gen_ai.operation.name", OPERATION_NAME_INVOKE_AGENT), ("gen_ai.provider.name", "azure.openai"), ("server.address", ""), ("gen_ai.conversation.id", conversation.id), @@ -763,7 +763,7 @@ def test_sync_workflow_streaming_without_content_recording(self, **kwargs): # Check span attributes expected_attributes = [ ("az.namespace", "Microsoft.CognitiveServices"), - ("gen_ai.operation.name", "responses"), + ("gen_ai.operation.name", OPERATION_NAME_INVOKE_AGENT), ("gen_ai.provider.name", "azure.openai"), ("server.address", ""), ("gen_ai.conversation.id", conversation.id), diff --git a/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_workflow_async.py b/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_workflow_async.py index fcb55113a0eb..392e9c1d62a1 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_workflow_async.py +++ b/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_workflow_async.py @@ -9,7 +9,7 @@ import os import pytest from azure.ai.projects.telemetry import AIProjectInstrumentor, _utils -from azure.ai.projects.telemetry._utils import SPAN_NAME_INVOKE_AGENT +from azure.ai.projects.telemetry._utils import OPERATION_NAME_INVOKE_AGENT, SPAN_NAME_INVOKE_AGENT from azure.core.settings import settings from gen_ai_trace_verifier import GenAiTraceVerifier from devtools_testutils.aio import recorded_by_proxy_async @@ -260,7 +260,7 @@ async def test_async_workflow_non_streaming_with_content_recording(self, **kwarg # Check span attributes expected_attributes = [ ("az.namespace", "Microsoft.CognitiveServices"), - ("gen_ai.operation.name", "responses"), + ("gen_ai.operation.name", OPERATION_NAME_INVOKE_AGENT), ("gen_ai.provider.name", "azure.openai"), ("server.address", ""), ("gen_ai.conversation.id", conversation.id), @@ -420,7 +420,7 @@ async def test_async_workflow_non_streaming_without_content_recording(self, **kw # Check span attributes expected_attributes = [ ("az.namespace", "Microsoft.CognitiveServices"), - ("gen_ai.operation.name", "responses"), + ("gen_ai.operation.name", OPERATION_NAME_INVOKE_AGENT), ("gen_ai.provider.name", "azure.openai"), ("server.address", ""), ("gen_ai.conversation.id", conversation.id), @@ -589,7 +589,7 @@ async def test_async_workflow_streaming_with_content_recording(self, **kwargs): # Check span attributes expected_attributes = [ ("az.namespace", "Microsoft.CognitiveServices"), - ("gen_ai.operation.name", "responses"), + ("gen_ai.operation.name", OPERATION_NAME_INVOKE_AGENT), ("gen_ai.provider.name", "azure.openai"), ("server.address", ""), ("gen_ai.conversation.id", conversation.id), @@ -754,7 +754,7 @@ async def test_async_workflow_streaming_without_content_recording(self, **kwargs # Check span attributes expected_attributes = [ ("az.namespace", "Microsoft.CognitiveServices"), - ("gen_ai.operation.name", "responses"), + ("gen_ai.operation.name", OPERATION_NAME_INVOKE_AGENT), ("gen_ai.provider.name", "azure.openai"), ("server.address", ""), ("gen_ai.conversation.id", conversation.id), From b27dcecbec82d67ca5856e6b09ac20d2fb7fd7e2 Mon Sep 17 00:00:00 2001 From: M-Hietala <78813398+M-Hietala@users.noreply.github.com> Date: Tue, 3 Feb 2026 12:30:31 -0600 Subject: [PATCH 02/10] adding support of input and output message attributes for responses trace --- sdk/ai/azure-ai-projects/CHANGELOG.md | 2 + .../telemetry/_responses_instrumentor.py | 109 +- .../azure/ai/projects/telemetry/_utils.py | 20 +- .../telemetry/test_responses_instrumentor.py | 1610 +++++++++++------ .../test_responses_instrumentor_async.py | 389 ++-- ...sponses_instrumentor_browser_automation.py | 10 +- ...s_instrumentor_browser_automation_async.py | 10 +- ...responses_instrumentor_code_interpreter.py | 10 +- ...ses_instrumentor_code_interpreter_async.py | 10 +- ...test_responses_instrumentor_file_search.py | 10 +- ...esponses_instrumentor_file_search_async.py | 10 +- .../test_responses_instrumentor_mcp.py | 607 ++++--- .../test_responses_instrumentor_mcp_async.py | 1144 +++++++----- .../test_responses_instrumentor_workflow.py | 10 +- ...t_responses_instrumentor_workflow_async.py | 10 +- 15 files changed, 2546 insertions(+), 1415 deletions(-) diff --git a/sdk/ai/azure-ai-projects/CHANGELOG.md b/sdk/ai/azure-ai-projects/CHANGELOG.md index 0cac8cb15095..cd112dbed28b 100644 --- a/sdk/ai/azure-ai-projects/CHANGELOG.md +++ b/sdk/ai/azure-ai-projects/CHANGELOG.md @@ -23,6 +23,8 @@ * Rename class `ItemParam` to `InputItem`. * Tracing: workflow actions in conversation item listings are now emitted as "gen_ai.conversation.item" events (with role="workflow") instead of "gen_ai.workflow.action" events in the list_conversation_items span. * Tracing: response generation span names changed from "responses {model_name}" to "chat {model_name}" for model calls and from "responses {agent_name}" to "invoke_agent {agent_name}" for agent calls. +* Tracing: response generation operation names changed from "responses" to "chat" for model calls and from "responses" to "invoke_agent" for agent calls. +* Tracing: response generation uses gen_ai.input.messages and gen_ai.output.messages attributes directly under the span instead of events. ## 2.0.0b3 (2026-01-06) diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/telemetry/_responses_instrumentor.py b/sdk/ai/azure-ai-projects/azure/ai/projects/telemetry/_responses_instrumentor.py index 58618596f724..20cc36165c35 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/telemetry/_responses_instrumentor.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/telemetry/_responses_instrumentor.py @@ -57,12 +57,11 @@ SERVER_PORT, SPAN_NAME_CHAT, SPAN_NAME_INVOKE_AGENT, - get_use_message_events, + _get_use_message_events, start_span, ) - _Unset: Any = object() logger = logging.getLogger(__name__) @@ -550,7 +549,7 @@ def _append_to_message_attribute( """Helper to append messages to an existing attribute, combining with previous messages.""" # Get existing attribute value existing_value = span.span_instance.attributes.get(attribute_name) if span.span_instance.attributes else None - + if existing_value: # Parse existing JSON array try: @@ -559,16 +558,16 @@ def _append_to_message_attribute( existing_messages = [] except (json.JSONDecodeError, TypeError): existing_messages = [] - + # Append new messages combined_messages = existing_messages + new_messages else: # No existing value, just use new messages combined_messages = new_messages - + # Set the combined value combined_json = json.dumps(combined_messages, ensure_ascii=False) - span.span_instance.set_attribute(attribute_name, combined_json) + span.add_attribute(attribute_name, combined_json) def _add_message_event( self, @@ -606,7 +605,7 @@ def _add_message_event( # Serialize the content array to JSON json_content = json.dumps(content_array, ensure_ascii=False) - if get_use_message_events(): + if _get_use_message_events(): # Original event-based implementation attributes = self._create_event_attributes( conversation_id=conversation_id, @@ -635,7 +634,6 @@ def _add_message_event( # Assistant messages go to output.messages self._append_to_message_attribute(span, GEN_AI_OUTPUT_MESSAGES, content_array) - def _add_tool_message_events( # pylint: disable=too-many-branches self, span: "AbstractSpan", @@ -722,15 +720,20 @@ def _add_tool_message_events( # pylint: disable=too-many-branches # Always include parts array with type and id, even when content recording is disabled content_array = [{"role": "tool", "parts": parts}] if parts else [] - attributes = self._create_event_attributes( - conversation_id=conversation_id, - message_role="tool", - ) - # Store as JSON array directly without outer wrapper - attributes[GEN_AI_EVENT_CONTENT] = json.dumps(content_array, ensure_ascii=False) + if _get_use_message_events(): + # Event-based mode: add events + attributes = self._create_event_attributes( + conversation_id=conversation_id, + message_role="tool", + ) + # Store as JSON array directly without outer wrapper + attributes[GEN_AI_EVENT_CONTENT] = json.dumps(content_array, ensure_ascii=False) - # Use "tool" for the event name: gen_ai.tool.message - span.span_instance.add_event(name=GEN_AI_TOOL_MESSAGE_EVENT, attributes=attributes) + # Use "tool" for the event name: gen_ai.tool.message + span.span_instance.add_event(name=GEN_AI_TOOL_MESSAGE_EVENT, attributes=attributes) + else: + # Attribute-based mode: append to input messages (tool outputs are inputs to the model) + self._append_to_message_attribute(span, GEN_AI_INPUT_MESSAGES, content_array) def _add_mcp_response_events( self, @@ -799,15 +802,20 @@ def _add_mcp_response_events( # Always include parts array with type and id, even when content recording is disabled content_array = [{"role": "user", "parts": parts}] if parts else [] - attributes = self._create_event_attributes( - conversation_id=conversation_id, - message_role="user", - ) - # Store as JSON array directly without outer wrapper - attributes[GEN_AI_EVENT_CONTENT] = json.dumps(content_array, ensure_ascii=False) + if _get_use_message_events(): + # Event-based mode: add events + attributes = self._create_event_attributes( + conversation_id=conversation_id, + message_role="user", + ) + # Store as JSON array directly without outer wrapper + attributes[GEN_AI_EVENT_CONTENT] = json.dumps(content_array, ensure_ascii=False) - # Use user message event name since MCP responses are user inputs - span.span_instance.add_event(name=GEN_AI_USER_MESSAGE_EVENT, attributes=attributes) + # Use user message event name since MCP responses are user inputs + span.span_instance.add_event(name=GEN_AI_USER_MESSAGE_EVENT, attributes=attributes) + else: + # Attribute-based mode: append to input messages (MCP responses are user inputs) + self._append_to_message_attribute(span, GEN_AI_INPUT_MESSAGES, content_array) def _add_workflow_action_events( self, @@ -991,25 +999,36 @@ def _add_structured_input_events( role_obj["parts"] = parts content_array = [role_obj] - # Create event attributes - attributes = self._create_event_attributes( - conversation_id=conversation_id, - message_role=role, - ) - # Store as JSON array directly without outer wrapper - attributes[GEN_AI_EVENT_CONTENT] = json.dumps(content_array, ensure_ascii=False) + if _get_use_message_events(): + # Event-based mode + # Create event attributes + attributes = self._create_event_attributes( + conversation_id=conversation_id, + message_role=role, + ) + # Store as JSON array directly without outer wrapper + attributes[GEN_AI_EVENT_CONTENT] = json.dumps(content_array, ensure_ascii=False) + + # Map role to appropriate event name constant + if role == "user": + event_name = GEN_AI_USER_MESSAGE_EVENT + elif role == "assistant": + event_name = GEN_AI_ASSISTANT_MESSAGE_EVENT + else: + # Fallback for any other roles (shouldn't happen in practice) + event_name = f"gen_ai.{role}.message" - # Map role to appropriate event name constant - if role == "user": - event_name = GEN_AI_USER_MESSAGE_EVENT - elif role == "assistant": - event_name = GEN_AI_ASSISTANT_MESSAGE_EVENT + # Add the event + span.span_instance.add_event(name=event_name, attributes=attributes) else: - # Fallback for any other roles (shouldn't happen in practice) - event_name = f"gen_ai.{role}.message" - - # Add the event - span.span_instance.add_event(name=event_name, attributes=attributes) + # Attribute-based mode + # Append messages to the appropriate attribute + if role in ("user", "tool"): + # User and tool messages go to input.messages + self._append_to_message_attribute(span, GEN_AI_INPUT_MESSAGES, content_array) + elif role == "assistant": + # Assistant messages go to output.messages + self._append_to_message_attribute(span, GEN_AI_OUTPUT_MESSAGES, content_array) except Exception: # pylint: disable=broad-exception-caught # Skip items that can't be processed @@ -1030,8 +1049,8 @@ def _emit_tool_call_event( # Wrap tool call in parts array parts = [{"type": "tool_call", "content": tool_call}] content_array = [{"role": "assistant", "parts": parts}] - - if get_use_message_events(): + + if _get_use_message_events(): # Original event-based implementation json_content = json.dumps(content_array, ensure_ascii=False) attributes = self._create_event_attributes( @@ -1056,8 +1075,8 @@ def _emit_tool_output_event( # Tool outputs are inputs TO the model (from tool execution), so use role "tool" parts = [{"type": "tool_call_output", "content": tool_output}] content_array = [{"role": "tool", "parts": parts}] - - if get_use_message_events(): + + if _get_use_message_events(): # Original event-based implementation json_content = json.dumps(content_array, ensure_ascii=False) attributes = self._create_event_attributes( diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/telemetry/_utils.py b/sdk/ai/azure-ai-projects/azure/ai/projects/telemetry/_utils.py index 20f2411b5aff..87729c3039f7 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/telemetry/_utils.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/telemetry/_utils.py @@ -121,19 +121,23 @@ _use_message_events = False -def get_use_message_events() -> bool: - """Get the current message tracing mode (events vs attributes).""" +def _get_use_message_events() -> bool: + """Get the current message tracing mode (events vs attributes). Internal use only. + + :return: True if using events, False if using attributes + :rtype: bool + """ return _use_message_events -def set_use_message_events(use_events: bool) -> None: +def _set_use_message_events(use_events: bool) -> None: """ - Set the message tracing mode at runtime. - - Args: - use_events: True to use events (default), False to use attributes + Set the message tracing mode at runtime. Internal use only. + + :param use_events: True to use events (default), False to use attributes + :type use_events: bool """ - global _use_message_events + global _use_message_events # pylint: disable=global-statement _use_message_events = use_events diff --git a/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor.py b/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor.py index c95a0bf698bf..9dc173b0ba7b 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor.py +++ b/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor.py @@ -14,12 +14,12 @@ OPERATION_NAME_INVOKE_AGENT, SPAN_NAME_CHAT, SPAN_NAME_INVOKE_AGENT, - set_use_message_events, + _set_use_message_events, ) from azure.core.settings import settings from gen_ai_trace_verifier import GenAiTraceVerifier from openai import OpenAI -from devtools_testutils import recorded_by_proxy, RecordedTransport +from devtools_testutils import recorded_by_proxy, RecordedTransport, set_custom_default_matcher, add_body_key_sanitizer from azure.ai.projects.models import PromptAgentDefinition, FunctionTool from test_base import servicePreparer @@ -54,6 +54,19 @@ class TestResponsesInstrumentor(TestAiAgentsInstrumentorBase): """Tests for ResponsesInstrumentor with real endpoints.""" + @pytest.fixture(scope="session", autouse=True) + def configure_playback_matcher(self, test_proxy, add_sanitizers): + """Add body sanitizer and custom matchers for image_url in requests.""" + # Sanitize image_url in request body to a consistent placeholder + add_body_key_sanitizer(json_path="$..image_url", value="SANITIZED_IMAGE_DATA") + + # Configure playback matcher + set_custom_default_matcher( + excluded_headers="Authorization,x-ms-client-request-id,x-ms-request-id", + ignored_query_parameters="api-version", + compare_bodies=True, + ) + def _get_openai_client_and_deployment(self, **kwargs) -> Tuple[OpenAI, str]: """Create OpenAI client through AI Projects client""" # Create AI Projects client using the standard test infrastructure @@ -196,7 +209,7 @@ def set_env_var(var_name, value): def _test_sync_non_streaming_with_content_recording_impl(self, use_events, **kwargs): """Implementation for testing synchronous non-streaming responses with content recording enabled.""" self.cleanup() - set_use_message_events(use_events) + _set_use_message_events(use_events) os.environ.update( { CONTENT_TRACING_ENV_VARIABLE: "True", @@ -246,6 +259,14 @@ def _test_sync_non_streaming_with_content_recording_impl(self, use_events, **kwa ("gen_ai.usage.input_tokens", "+"), ("gen_ai.usage.output_tokens", "+"), ] + # In attribute mode, also expect message attributes + if not use_events: + expected_attributes.extend( + [ + ("gen_ai.input.messages", ""), + ("gen_ai.output.messages", ""), + ] + ) attributes_match = GenAiTraceVerifier().check_span_attributes(span, expected_attributes) assert attributes_match == True @@ -274,7 +295,7 @@ def _test_sync_non_streaming_with_content_recording_impl(self, use_events, **kwa # Check span attributes for message content assert "gen_ai.input.messages" in span.attributes # type: ignore assert "gen_ai.output.messages" in span.attributes # type: ignore - + # Verify input messages attribute input_messages = json.loads(span.attributes["gen_ai.input.messages"]) # type: ignore assert len(input_messages) == 1 @@ -282,7 +303,7 @@ def _test_sync_non_streaming_with_content_recording_impl(self, use_events, **kwa assert len(input_messages[0]["parts"]) == 1 assert input_messages[0]["parts"][0]["type"] == "text" assert input_messages[0]["parts"][0]["content"] == "Write a short poem about AI" - + # Verify output messages attribute output_messages = json.loads(span.attributes["gen_ai.output.messages"]) # type: ignore assert len(output_messages) == 1 @@ -310,7 +331,7 @@ def test_sync_non_streaming_with_content_recording_attributes(self, **kwargs): def _test_sync_non_streaming_without_content_recording_impl(self, use_events, **kwargs): """Implementation for testing synchronous non-streaming responses with content recording disabled.""" self.cleanup() - set_use_message_events(use_events) + _set_use_message_events(use_events) os.environ.update( { CONTENT_TRACING_ENV_VARIABLE: "False", @@ -360,6 +381,14 @@ def _test_sync_non_streaming_without_content_recording_impl(self, use_events, ** ("gen_ai.usage.input_tokens", "+"), ("gen_ai.usage.output_tokens", "+"), ] + # In attribute mode, also expect message attributes + if not use_events: + expected_attributes.extend( + [ + ("gen_ai.input.messages", ""), + ("gen_ai.output.messages", ""), + ] + ) attributes_match = GenAiTraceVerifier().check_span_attributes(span, expected_attributes) assert attributes_match == True @@ -388,7 +417,7 @@ def _test_sync_non_streaming_without_content_recording_impl(self, use_events, ** # Check span attributes for message content (without actual content) assert "gen_ai.input.messages" in span.attributes # type: ignore assert "gen_ai.output.messages" in span.attributes # type: ignore - + # Verify input messages attribute (no content) input_messages = json.loads(span.attributes["gen_ai.input.messages"]) # type: ignore assert len(input_messages) == 1 @@ -396,7 +425,7 @@ def _test_sync_non_streaming_without_content_recording_impl(self, use_events, ** assert len(input_messages[0]["parts"]) == 1 assert input_messages[0]["parts"][0]["type"] == "text" assert "content" not in input_messages[0]["parts"][0] # Content should be omitted - + # Verify output messages attribute (no content) output_messages = json.loads(span.attributes["gen_ai.output.messages"]) # type: ignore assert len(output_messages) == 1 @@ -425,7 +454,7 @@ def _test_sync_streaming_with_content_recording_impl(self, use_events, **kwargs) from openai.types.responses.response_input_param import FunctionCallOutput self.cleanup() - set_use_message_events(use_events) + _set_use_message_events(use_events) os.environ.update( { CONTENT_TRACING_ENV_VARIABLE: "True", @@ -483,6 +512,14 @@ def _test_sync_streaming_with_content_recording_impl(self, use_events, **kwargs) ("gen_ai.usage.input_tokens", "+"), ("gen_ai.usage.output_tokens", "+"), ] + # In attribute mode, also expect message attributes + if not use_events: + expected_attributes.extend( + [ + ("gen_ai.input.messages", ""), + ("gen_ai.output.messages", ""), + ] + ) attributes_match = GenAiTraceVerifier().check_span_attributes(span, expected_attributes) assert attributes_match == True @@ -510,7 +547,7 @@ def _test_sync_streaming_with_content_recording_impl(self, use_events, **kwargs) # Check span attributes for message content assert "gen_ai.input.messages" in span.attributes # type: ignore assert "gen_ai.output.messages" in span.attributes # type: ignore - + # Verify input messages attribute input_messages = json.loads(span.attributes["gen_ai.input.messages"]) # type: ignore assert len(input_messages) == 1 @@ -518,7 +555,7 @@ def _test_sync_streaming_with_content_recording_impl(self, use_events, **kwargs) assert len(input_messages[0]["parts"]) == 1 assert input_messages[0]["parts"][0]["type"] == "text" assert input_messages[0]["parts"][0]["content"] == "Write a short poem about AI" - + # Verify output messages attribute output_messages = json.loads(span.attributes["gen_ai.output.messages"]) # type: ignore assert len(output_messages) == 1 @@ -621,7 +658,7 @@ def test_sync_conversations_create(self, **kwargs): def test_sync_list_conversation_items_with_content_recording(self, **kwargs): """Test synchronous list_conversation_items with content recording enabled.""" self.cleanup() - set_use_message_events(True) # Use event-based mode for this test + _set_use_message_events(True) # Use event-based mode for this test os.environ.update( { CONTENT_TRACING_ENV_VARIABLE: "True", @@ -698,7 +735,7 @@ def test_sync_list_conversation_items_with_content_recording(self, **kwargs): def test_sync_list_conversation_items_without_content_recording(self, **kwargs): """Test synchronous list_conversation_items with content recording disabled.""" self.cleanup() - set_use_message_events(True) # Use event-based mode for this test + _set_use_message_events(True) # Use event-based mode for this test os.environ.update( { CONTENT_TRACING_ENV_VARIABLE: "False", @@ -808,7 +845,7 @@ def test_no_instrumentation_no_spans(self): def _test_sync_non_streaming_without_conversation_impl(self, use_events, **kwargs): """Implementation for testing synchronous non-streaming responses without conversation parameter.""" self.cleanup() - set_use_message_events(use_events) + _set_use_message_events(use_events) os.environ.update( { CONTENT_TRACING_ENV_VARIABLE: "True", @@ -851,6 +888,14 @@ def _test_sync_non_streaming_without_conversation_impl(self, use_events, **kwarg ("gen_ai.usage.input_tokens", "+"), ("gen_ai.usage.output_tokens", "+"), ] + # In attribute mode, also expect message attributes + if not use_events: + expected_attributes.extend( + [ + ("gen_ai.input.messages", ""), + ("gen_ai.output.messages", ""), + ] + ) attributes_match = GenAiTraceVerifier().check_span_attributes(span, expected_attributes) assert attributes_match == True @@ -878,7 +923,7 @@ def _test_sync_non_streaming_without_conversation_impl(self, use_events, **kwarg # Check span attributes for message content assert "gen_ai.input.messages" in span.attributes # type: ignore assert "gen_ai.output.messages" in span.attributes # type: ignore - + # Verify input messages attribute input_messages = json.loads(span.attributes["gen_ai.input.messages"]) # type: ignore assert len(input_messages) == 1 @@ -886,7 +931,7 @@ def _test_sync_non_streaming_without_conversation_impl(self, use_events, **kwarg assert len(input_messages[0]["parts"]) == 1 assert input_messages[0]["parts"][0]["type"] == "text" assert input_messages[0]["parts"][0]["content"] == "Write a short poem about AI" - + # Verify output messages attribute output_messages = json.loads(span.attributes["gen_ai.output.messages"]) # type: ignore assert len(output_messages) == 1 @@ -902,7 +947,7 @@ def _test_sync_function_tool_with_content_recording_non_streaming_impl(self, use from openai.types.responses.response_input_param import FunctionCallOutput self.cleanup() - set_use_message_events(use_events) + _set_use_message_events(use_events) os.environ.update( { CONTENT_TRACING_ENV_VARIABLE: "True", @@ -1004,6 +1049,14 @@ def _test_sync_function_tool_with_content_recording_non_streaming_impl(self, use ("gen_ai.usage.input_tokens", "+"), ("gen_ai.usage.output_tokens", "+"), ] + # In attribute mode, also expect message attributes + if not use_events: + expected_attributes_1.extend( + [ + ("gen_ai.input.messages", ""), + ("gen_ai.output.messages", ""), + ] + ) attributes_match = GenAiTraceVerifier().check_span_attributes(span1, expected_attributes_1) assert attributes_match == True @@ -1031,14 +1084,14 @@ def _test_sync_function_tool_with_content_recording_non_streaming_impl(self, use # Check span attributes assert "gen_ai.input.messages" in span1.attributes # type: ignore assert "gen_ai.output.messages" in span1.attributes # type: ignore - + # Verify input messages - user question input_messages = json.loads(span1.attributes["gen_ai.input.messages"]) # type: ignore assert len(input_messages) == 1 assert input_messages[0]["role"] == "user" assert input_messages[0]["parts"][0]["type"] == "text" assert input_messages[0]["parts"][0]["content"] == "What's the weather in Seattle?" - + # Verify output messages - assistant tool call output_messages = json.loads(span1.attributes["gen_ai.output.messages"]) # type: ignore assert len(output_messages) == 1 @@ -1063,6 +1116,14 @@ def _test_sync_function_tool_with_content_recording_non_streaming_impl(self, use ("gen_ai.usage.input_tokens", "+"), ("gen_ai.usage.output_tokens", "+"), ] + # In attribute mode, also expect message attributes + if not use_events: + expected_attributes_2.extend( + [ + ("gen_ai.input.messages", ""), + ("gen_ai.output.messages", ""), + ] + ) attributes_match = GenAiTraceVerifier().check_span_attributes(span2, expected_attributes_2) assert attributes_match == True @@ -1087,21 +1148,9 @@ def _test_sync_function_tool_with_content_recording_non_streaming_impl(self, use events_match = GenAiTraceVerifier().check_span_events(span2, expected_events_2) assert events_match == True else: - # Check span attributes - assert "gen_ai.input.messages" in span2.attributes # type: ignore + # Check span attributes - span2 only has output messages (continuation span) assert "gen_ai.output.messages" in span2.attributes # type: ignore - - # Verify input messages - tool output - input_messages = json.loads(span2.attributes["gen_ai.input.messages"]) # type: ignore - assert len(input_messages) == 1 - assert input_messages[0]["role"] == "tool" - assert input_messages[0]["parts"][0]["type"] == "tool_call_output" - assert input_messages[0]["parts"][0]["content"]["type"] == "function_call_output" - assert "id" in input_messages[0]["parts"][0]["content"] - output_data = json.loads(input_messages[0]["parts"][0]["content"]["output"]) - assert output_data["temperature"] == "72°F" - assert output_data["condition"] == "sunny" - + # Verify output messages - assistant final response output_messages = json.loads(span2.attributes["gen_ai.output.messages"]) # type: ignore assert len(output_messages) == 1 @@ -1116,7 +1165,7 @@ def _test_sync_function_tool_with_content_recording_streaming_impl(self, use_eve from openai.types.responses.response_input_param import FunctionCallOutput self.cleanup() - set_use_message_events(use_events) + _set_use_message_events(use_events) os.environ.update( { CONTENT_TRACING_ENV_VARIABLE: "True", @@ -1241,10 +1290,21 @@ def _test_sync_function_tool_with_content_recording_streaming_impl(self, use_eve ("gen_ai.usage.input_tokens", "+"), ("gen_ai.usage.output_tokens", "+"), ] + # In attribute mode, also expect message attributes + if not use_events: + expected_attributes_1.extend( + [ + ("gen_ai.input.messages", ""), + ("gen_ai.output.messages", ""), + ] + ) attributes_match = GenAiTraceVerifier().check_span_attributes(span1, expected_attributes_1) assert attributes_match == True # Check events for first span - user message and assistant tool call + if not use_events: + # Only validate events mode (no attribute validation yet) + return expected_events_1 = [ { "name": "gen_ai.input.messages", @@ -1280,10 +1340,21 @@ def _test_sync_function_tool_with_content_recording_streaming_impl(self, use_eve ("gen_ai.usage.input_tokens", "+"), ("gen_ai.usage.output_tokens", "+"), ] + # In attribute mode, also expect message attributes + if not use_events: + expected_attributes_2.extend( + [ + ("gen_ai.input.messages", ""), + ("gen_ai.output.messages", ""), + ] + ) attributes_match = GenAiTraceVerifier().check_span_attributes(span2, expected_attributes_2) assert attributes_match == True # Check events for second span - tool output and assistant response + if not use_events: + # Skip event validation in attribute mode + return expected_events_2 = [ { "name": "gen_ai.input.messages", @@ -1324,7 +1395,7 @@ def _test_sync_function_tool_without_content_recording_non_streaming_impl(self, from openai.types.responses.response_input_param import FunctionCallOutput self.cleanup() - set_use_message_events(use_events) + _set_use_message_events(use_events) os.environ.update( { CONTENT_TRACING_ENV_VARIABLE: "False", @@ -1425,6 +1496,14 @@ def _test_sync_function_tool_without_content_recording_non_streaming_impl(self, ("gen_ai.usage.input_tokens", "+"), ("gen_ai.usage.output_tokens", "+"), ] + # In attribute mode, also expect message attributes + if not use_events: + expected_attributes_1.extend( + [ + ("gen_ai.input.messages", ""), + ("gen_ai.output.messages", ""), + ] + ) attributes_match = GenAiTraceVerifier().check_span_attributes(span1, expected_attributes_1) assert attributes_match == True @@ -1447,8 +1526,9 @@ def _test_sync_function_tool_without_content_recording_non_streaming_impl(self, }, }, ] - events_match = GenAiTraceVerifier().check_span_events(span1, expected_events_1) - assert events_match == True + if use_events: + events_match = GenAiTraceVerifier().check_span_events(span1, expected_events_1) + assert events_match == True # Validate second span (tool output + final response) - no content span2 = spans[1] @@ -1464,10 +1544,21 @@ def _test_sync_function_tool_without_content_recording_non_streaming_impl(self, ("gen_ai.usage.input_tokens", "+"), ("gen_ai.usage.output_tokens", "+"), ] + # In attribute mode, also expect message attributes + if not use_events: + expected_attributes_2.extend( + [ + ("gen_ai.input.messages", ""), + ("gen_ai.output.messages", ""), + ] + ) attributes_match = GenAiTraceVerifier().check_span_attributes(span2, expected_attributes_2) assert attributes_match == True # Check events for second span - role included but no content + if not use_events: + # Skip event validation in attribute mode + return expected_events_2 = [ { "name": "gen_ai.input.messages", @@ -1494,7 +1585,7 @@ def _test_sync_function_tool_without_content_recording_streaming_impl(self, use_ from openai.types.responses.response_input_param import FunctionCallOutput self.cleanup() - set_use_message_events(use_events) + _set_use_message_events(use_events) os.environ.update( { CONTENT_TRACING_ENV_VARIABLE: "False", @@ -1613,6 +1704,14 @@ def _test_sync_function_tool_without_content_recording_streaming_impl(self, use_ ("gen_ai.usage.input_tokens", "+"), ("gen_ai.usage.output_tokens", "+"), ] + # In attribute mode, also expect message attributes + if not use_events: + expected_attributes_1.extend( + [ + ("gen_ai.input.messages", ""), + ("gen_ai.output.messages", ""), + ] + ) attributes_match = GenAiTraceVerifier().check_span_attributes(span1, expected_attributes_1) assert attributes_match == True @@ -1635,8 +1734,9 @@ def _test_sync_function_tool_without_content_recording_streaming_impl(self, use_ }, }, ] - events_match = GenAiTraceVerifier().check_span_events(span1, expected_events_1) - assert events_match == True + if use_events: + events_match = GenAiTraceVerifier().check_span_events(span1, expected_events_1) + assert events_match == True # Validate second span (tool output + final response) - no content span2 = spans[1] @@ -1652,10 +1752,21 @@ def _test_sync_function_tool_without_content_recording_streaming_impl(self, use_ ("gen_ai.usage.input_tokens", "+"), ("gen_ai.usage.output_tokens", "+"), ] + # In attribute mode, also expect message attributes + if not use_events: + expected_attributes_2.extend( + [ + ("gen_ai.input.messages", ""), + ("gen_ai.output.messages", ""), + ] + ) attributes_match = GenAiTraceVerifier().check_span_attributes(span2, expected_attributes_2) assert attributes_match == True # Check events for second span - role included but no content + if not use_events: + # Skip event validation in attribute mode + return expected_events_2 = [ { "name": "gen_ai.input.messages", @@ -1713,7 +1824,7 @@ def test_sync_function_tool_list_conversation_items_with_content_recording(self, from openai.types.responses.response_input_param import FunctionCallOutput self.cleanup() - set_use_message_events(True) # Use event-based mode for this test + _set_use_message_events(True) # Use event-based mode for this test os.environ.update( { CONTENT_TRACING_ENV_VARIABLE: "True", @@ -1863,7 +1974,7 @@ def test_sync_function_tool_list_conversation_items_without_content_recording(se from openai.types.responses.response_input_param import FunctionCallOutput self.cleanup() - set_use_message_events(True) # Use event-based mode for this test + _set_use_message_events(True) # Use event-based mode for this test os.environ.update( { CONTENT_TRACING_ENV_VARIABLE: "False", @@ -2011,7 +2122,7 @@ def test_sync_function_tool_list_conversation_items_without_content_recording(se def test_sync_multiple_text_inputs_with_content_recording_non_streaming(self, **kwargs): """Test synchronous non-streaming responses with multiple text inputs and content recording enabled.""" self.cleanup() - set_use_message_events(True) # Use event-based mode for this test + _set_use_message_events(True) # Use event-based mode for this test os.environ.update( { CONTENT_TRACING_ENV_VARIABLE: "True", @@ -2108,7 +2219,7 @@ def test_sync_multiple_text_inputs_with_content_recording_non_streaming(self, ** def test_sync_multiple_text_inputs_with_content_recording_streaming(self, **kwargs): """Test synchronous streaming responses with multiple text inputs and content recording enabled.""" self.cleanup() - set_use_message_events(True) # Use event-based mode for this test + _set_use_message_events(True) # Use event-based mode for this test os.environ.update( { CONTENT_TRACING_ENV_VARIABLE: "True", @@ -2213,7 +2324,7 @@ def test_sync_multiple_text_inputs_with_content_recording_streaming(self, **kwar def test_sync_multiple_text_inputs_without_content_recording_non_streaming(self, **kwargs): """Test synchronous non-streaming responses with multiple text inputs and content recording disabled.""" self.cleanup() - set_use_message_events(True) # Use event-based mode for this test + _set_use_message_events(True) # Use event-based mode for this test os.environ.update( { CONTENT_TRACING_ENV_VARIABLE: "False", @@ -2310,7 +2421,7 @@ def test_sync_multiple_text_inputs_without_content_recording_non_streaming(self, def test_sync_multiple_text_inputs_without_content_recording_streaming(self, **kwargs): """Test synchronous streaming responses with multiple text inputs and content recording disabled.""" self.cleanup() - set_use_message_events(True) # Use event-based mode for this test + _set_use_message_events(True) # Use event-based mode for this test os.environ.update({CONTENT_TRACING_ENV_VARIABLE: "False"}) self.setup_telemetry() assert False == AIProjectInstrumentor().is_content_recording_enabled() @@ -2404,12 +2515,10 @@ def test_sync_multiple_text_inputs_without_content_recording_streaming(self, **k events_match = GenAiTraceVerifier().check_span_events(span, expected_events) assert events_match == True - @pytest.mark.usefixtures("instrument_without_content") - @servicePreparer() - @recorded_by_proxy(RecordedTransport.HTTPX) - def test_image_only_content_off_binary_off_non_streaming(self, **kwargs): - """Test image only with content recording OFF and binary data OFF (non-streaming).""" + def _test_image_only_content_off_binary_off_non_streaming_impl(self, use_events, **kwargs): + """Implementation for testing image only with content recording OFF and binary data OFF (non-streaming).""" self.cleanup() + _set_use_message_events(use_events) os.environ.update( { CONTENT_TRACING_ENV_VARIABLE: "False", @@ -2452,34 +2561,69 @@ def test_image_only_content_off_binary_off_non_streaming(self, **kwargs): assert len(spans) == 1 span = spans[0] + expected_attributes = [ + ("az.namespace", "Microsoft.CognitiveServices"), + ("gen_ai.operation.name", OPERATION_NAME_CHAT), + ("gen_ai.request.model", deployment_name), + ("gen_ai.provider.name", "azure.openai"), + ("server.address", ""), + ("gen_ai.conversation.id", conversation.id), + ("gen_ai.response.model", deployment_name), + ("gen_ai.response.id", ""), + ("gen_ai.usage.input_tokens", "+"), + ("gen_ai.usage.output_tokens", "+"), + ] + if not use_events: + expected_attributes.extend( + [ + ("gen_ai.input.messages", ""), + ("gen_ai.output.messages", ""), + ] + ) + attributes_match = GenAiTraceVerifier().check_span_attributes(span, expected_attributes) + assert attributes_match == True + # Content recording OFF: event content should have role, parts with type only, and finish_reason - expected_events = [ - { - "name": "gen_ai.input.messages", - "attributes": { - "gen_ai.provider.name": "azure.openai", - # "gen_ai.message.role": "user", # Commented out - now in event content - "gen_ai.event.content": '[{"role": "user", "parts": [{"type": "image"}]}]', + if use_events: + expected_events = [ + { + "name": "gen_ai.input.messages", + "attributes": { + "gen_ai.provider.name": "azure.openai", + # "gen_ai.message.role": "user", # Commented out - now in event content + "gen_ai.event.content": '[{"role": "user", "parts": [{"type": "image"}]}]', + }, }, - }, - { - "name": "gen_ai.output.messages", - "attributes": { - "gen_ai.provider.name": "azure.openai", - # "gen_ai.message.role": "assistant", # Commented out - now in event content - "gen_ai.event.content": '[{"role": "assistant", "parts": [{"type": "text"}], "finish_reason": "*"}]', + { + "name": "gen_ai.output.messages", + "attributes": { + "gen_ai.provider.name": "azure.openai", + # "gen_ai.message.role": "assistant", # Commented out - now in event content + "gen_ai.event.content": '[{"role": "assistant", "parts": [{"type": "text"}], "finish_reason": "*"}]', + }, }, - }, - ] - events_match = GenAiTraceVerifier().check_span_events(span, expected_events) - assert events_match == True + ] + events_match = GenAiTraceVerifier().check_span_events(span, expected_events) + assert events_match == True + + @pytest.mark.usefixtures("instrument_without_content") + @servicePreparer() + @recorded_by_proxy(RecordedTransport.HTTPX) + def test_image_only_content_off_binary_off_non_streaming_events(self, **kwargs): + """Test image only with content recording OFF and binary data OFF (non-streaming, event-based messages).""" + self._test_image_only_content_off_binary_off_non_streaming_impl(True, **kwargs) @pytest.mark.usefixtures("instrument_without_content") @servicePreparer() @recorded_by_proxy(RecordedTransport.HTTPX) - def test_image_only_content_off_binary_on_non_streaming(self, **kwargs): - """Test image only with content recording OFF and binary data ON (non-streaming).""" + def test_image_only_content_off_binary_off_non_streaming_attributes(self, **kwargs): + """Test image only with content recording OFF and binary data OFF (non-streaming, attribute-based messages).""" + self._test_image_only_content_off_binary_off_non_streaming_impl(False, **kwargs) + + def _test_image_only_content_off_binary_on_non_streaming_impl(self, use_events, **kwargs): + """Implementation for testing image only with content recording OFF and binary data ON (non-streaming).""" self.cleanup() + _set_use_message_events(use_events) os.environ.update( { CONTENT_TRACING_ENV_VARIABLE: "False", @@ -2521,34 +2665,69 @@ def test_image_only_content_off_binary_on_non_streaming(self, **kwargs): assert len(spans) == 1 span = spans[0] + expected_attributes = [ + ("az.namespace", "Microsoft.CognitiveServices"), + ("gen_ai.operation.name", OPERATION_NAME_CHAT), + ("gen_ai.request.model", deployment_name), + ("gen_ai.provider.name", "azure.openai"), + ("server.address", ""), + ("gen_ai.conversation.id", conversation.id), + ("gen_ai.response.model", deployment_name), + ("gen_ai.response.id", ""), + ("gen_ai.usage.input_tokens", "+"), + ("gen_ai.usage.output_tokens", "+"), + ] + if not use_events: + expected_attributes.extend( + [ + ("gen_ai.input.messages", ""), + ("gen_ai.output.messages", ""), + ] + ) + attributes_match = GenAiTraceVerifier().check_span_attributes(span, expected_attributes) + assert attributes_match == True + # Content recording OFF: event content should have role, parts with type only, and finish_reason (binary flag doesn't matter) - expected_events = [ - { - "name": "gen_ai.input.messages", - "attributes": { - "gen_ai.provider.name": "azure.openai", - # "gen_ai.message.role": "user", # Commented out - now in event content - "gen_ai.event.content": '[{"role": "user", "parts": [{"type": "image"}]}]', + if use_events: + expected_events = [ + { + "name": "gen_ai.input.messages", + "attributes": { + "gen_ai.provider.name": "azure.openai", + # "gen_ai.message.role": "user", # Commented out - now in event content + "gen_ai.event.content": '[{"role": "user", "parts": [{"type": "image"}]}]', + }, }, - }, - { - "name": "gen_ai.output.messages", - "attributes": { - "gen_ai.provider.name": "azure.openai", - # "gen_ai.message.role": "assistant", # Commented out - now in event content - "gen_ai.event.content": '[{"role": "assistant", "parts": [{"type": "text"}], "finish_reason": "*"}]', + { + "name": "gen_ai.output.messages", + "attributes": { + "gen_ai.provider.name": "azure.openai", + # "gen_ai.message.role": "assistant", # Commented out - now in event content + "gen_ai.event.content": '[{"role": "assistant", "parts": [{"type": "text"}], "finish_reason": "*"}]', + }, }, - }, - ] - events_match = GenAiTraceVerifier().check_span_events(span, expected_events) - assert events_match == True + ] + events_match = GenAiTraceVerifier().check_span_events(span, expected_events) + assert events_match == True - @pytest.mark.usefixtures("instrument_with_content") + @pytest.mark.usefixtures("instrument_without_content") + @servicePreparer() + @recorded_by_proxy(RecordedTransport.HTTPX) + def test_image_only_content_off_binary_on_non_streaming_events(self, **kwargs): + """Test image only with content recording OFF and binary data ON (non-streaming, event-based messages).""" + self._test_image_only_content_off_binary_on_non_streaming_impl(True, **kwargs) + + @pytest.mark.usefixtures("instrument_without_content") @servicePreparer() @recorded_by_proxy(RecordedTransport.HTTPX) - def test_image_only_content_on_binary_off_non_streaming(self, **kwargs): - """Test image only with content recording ON and binary data OFF (non-streaming).""" + def test_image_only_content_off_binary_on_non_streaming_attributes(self, **kwargs): + """Test image only with content recording OFF and binary data ON (non-streaming, attribute-based messages).""" + self._test_image_only_content_off_binary_on_non_streaming_impl(False, **kwargs) + + def _test_image_only_content_on_binary_off_non_streaming_impl(self, use_events, **kwargs): + """Implementation for testing image only with content recording ON and binary data OFF (non-streaming).""" self.cleanup() + _set_use_message_events(use_events) os.environ.update( { CONTENT_TRACING_ENV_VARIABLE: "True", @@ -2590,34 +2769,69 @@ def test_image_only_content_on_binary_off_non_streaming(self, **kwargs): assert len(spans) == 1 span = spans[0] + expected_attributes = [ + ("az.namespace", "Microsoft.CognitiveServices"), + ("gen_ai.operation.name", OPERATION_NAME_CHAT), + ("gen_ai.request.model", deployment_name), + ("gen_ai.provider.name", "azure.openai"), + ("server.address", ""), + ("gen_ai.conversation.id", conversation.id), + ("gen_ai.response.model", deployment_name), + ("gen_ai.response.id", ""), + ("gen_ai.usage.input_tokens", "+"), + ("gen_ai.usage.output_tokens", "+"), + ] + if not use_events: + expected_attributes.extend( + [ + ("gen_ai.input.messages", ""), + ("gen_ai.output.messages", ""), + ] + ) + attributes_match = GenAiTraceVerifier().check_span_attributes(span, expected_attributes) + assert attributes_match == True + # Content recording ON, binary OFF: should have image type but no image_url - expected_events = [ - { - "name": "gen_ai.input.messages", - "attributes": { - "gen_ai.provider.name": "azure.openai", - # "gen_ai.message.role": "user", # Commented out - now in event content - "gen_ai.event.content": '[{"role":"user","parts":[{"type":"image"}]}]', + if use_events: + expected_events = [ + { + "name": "gen_ai.input.messages", + "attributes": { + "gen_ai.provider.name": "azure.openai", + # "gen_ai.message.role": "user", # Commented out - now in event content + "gen_ai.event.content": '[{"role":"user","parts":[{"type":"image"}]}]', + }, }, - }, - { - "name": "gen_ai.output.messages", - "attributes": { - "gen_ai.provider.name": "azure.openai", - # "gen_ai.message.role": "assistant", # Commented out - now in event content - "gen_ai.event.content": '[{"role": "assistant", "parts": [{"type": "text", "content": "*"}], "finish_reason": "*"}]', + { + "name": "gen_ai.output.messages", + "attributes": { + "gen_ai.provider.name": "azure.openai", + # "gen_ai.message.role": "assistant", # Commented out - now in event content + "gen_ai.event.content": '[{"role": "assistant", "parts": [{"type": "text", "content": "*"}], "finish_reason": "*"}]', + }, }, - }, - ] - events_match = GenAiTraceVerifier().check_span_events(span, expected_events) - assert events_match == True + ] + events_match = GenAiTraceVerifier().check_span_events(span, expected_events) + assert events_match == True + + @pytest.mark.usefixtures("instrument_with_content") + @servicePreparer() + @recorded_by_proxy(RecordedTransport.HTTPX) + def test_image_only_content_on_binary_off_non_streaming_events(self, **kwargs): + """Test image only with content recording ON and binary data OFF (non-streaming, event-based messages).""" + self._test_image_only_content_on_binary_off_non_streaming_impl(True, **kwargs) @pytest.mark.usefixtures("instrument_with_content") @servicePreparer() @recorded_by_proxy(RecordedTransport.HTTPX) - def test_image_only_content_on_binary_on_non_streaming(self, **kwargs): - """Test image only with content recording ON and binary data ON (non-streaming).""" + def test_image_only_content_on_binary_off_non_streaming_attributes(self, **kwargs): + """Test image only with content recording ON and binary data OFF (non-streaming, attribute-based messages).""" + self._test_image_only_content_on_binary_off_non_streaming_impl(False, **kwargs) + + def _test_image_only_content_on_binary_on_non_streaming_impl(self, use_events, **kwargs): + """Implementation for testing image only with content recording ON and binary data ON (non-streaming).""" self.cleanup() + _set_use_message_events(use_events) os.environ.update( { CONTENT_TRACING_ENV_VARIABLE: "True", @@ -2659,38 +2873,73 @@ def test_image_only_content_on_binary_on_non_streaming(self, **kwargs): assert len(spans) == 1 span = spans[0] + expected_attributes = [ + ("az.namespace", "Microsoft.CognitiveServices"), + ("gen_ai.operation.name", OPERATION_NAME_CHAT), + ("gen_ai.request.model", deployment_name), + ("gen_ai.provider.name", "azure.openai"), + ("server.address", ""), + ("gen_ai.conversation.id", conversation.id), + ("gen_ai.response.model", deployment_name), + ("gen_ai.response.id", ""), + ("gen_ai.usage.input_tokens", "+"), + ("gen_ai.usage.output_tokens", "+"), + ] + if not use_events: + expected_attributes.extend( + [ + ("gen_ai.input.messages", ""), + ("gen_ai.output.messages", ""), + ] + ) + attributes_match = GenAiTraceVerifier().check_span_attributes(span, expected_attributes) + assert attributes_match == True + # Content recording ON, binary ON: should have image type AND image_url with base64 data - expected_events = [ - { - "name": "gen_ai.input.messages", - "attributes": { - "gen_ai.provider.name": "azure.openai", - # "gen_ai.message.role": "user", # Commented out - now in event content - "gen_ai.event.content": f'[{{"role":"user","parts":[{{"type":"image","content":"data:image/png;base64,{TEST_IMAGE_BASE64}"}}]}}]', + if use_events: + expected_events = [ + { + "name": "gen_ai.input.messages", + "attributes": { + "gen_ai.provider.name": "azure.openai", + # "gen_ai.message.role": "user", # Commented out - now in event content + "gen_ai.event.content": f'[{{"role":"user","parts":[{{"type":"image","content":"data:image/png;base64,{TEST_IMAGE_BASE64}"}}]}}]', + }, }, - }, - { - "name": "gen_ai.output.messages", - "attributes": { - "gen_ai.provider.name": "azure.openai", - # "gen_ai.message.role": "assistant", # Commented out - now in event content - "gen_ai.event.content": '[{"role": "assistant", "parts": [{"type": "text", "content": "*"}], "finish_reason": "*"}]', + { + "name": "gen_ai.output.messages", + "attributes": { + "gen_ai.provider.name": "azure.openai", + # "gen_ai.message.role": "assistant", # Commented out - now in event content + "gen_ai.event.content": '[{"role": "assistant", "parts": [{"type": "text", "content": "*"}], "finish_reason": "*"}]', + }, }, - }, - ] - events_match = GenAiTraceVerifier().check_span_events(span, expected_events) - assert events_match == True + ] + events_match = GenAiTraceVerifier().check_span_events(span, expected_events) + assert events_match == True - # ======================================== - # Binary Data Tracing Tests (Text + Image) - # ======================================== + @pytest.mark.usefixtures("instrument_with_content") + @servicePreparer() + @recorded_by_proxy(RecordedTransport.HTTPX) + def test_image_only_content_on_binary_on_non_streaming_events(self, **kwargs): + """Test image only with content recording ON and binary data ON (non-streaming, event-based messages).""" + self._test_image_only_content_on_binary_on_non_streaming_impl(True, **kwargs) - @pytest.mark.usefixtures("instrument_without_content") + @pytest.mark.usefixtures("instrument_with_content") @servicePreparer() @recorded_by_proxy(RecordedTransport.HTTPX) - def test_text_and_image_content_off_binary_off_non_streaming(self, **kwargs): - """Test text + image with content recording OFF and binary data OFF (non-streaming).""" + def test_image_only_content_on_binary_on_non_streaming_attributes(self, **kwargs): + """Test image only with content recording ON and binary data ON (non-streaming, attribute-based messages).""" + self._test_image_only_content_on_binary_on_non_streaming_impl(False, **kwargs) + + # ======================================== + # Binary Data Tracing Tests (Text + Image) + # ======================================== + + def _test_text_and_image_content_off_binary_off_non_streaming_impl(self, use_events, **kwargs): + """Implementation for testing text + image with content recording OFF and binary data OFF (non-streaming).""" self.cleanup() + _set_use_message_events(use_events) os.environ.update( { CONTENT_TRACING_ENV_VARIABLE: "False", @@ -2737,34 +2986,69 @@ def test_text_and_image_content_off_binary_off_non_streaming(self, **kwargs): assert len(spans) == 1 span = spans[0] + expected_attributes = [ + ("az.namespace", "Microsoft.CognitiveServices"), + ("gen_ai.operation.name", OPERATION_NAME_CHAT), + ("gen_ai.request.model", deployment_name), + ("gen_ai.provider.name", "azure.openai"), + ("server.address", ""), + ("gen_ai.conversation.id", conversation.id), + ("gen_ai.response.model", deployment_name), + ("gen_ai.response.id", ""), + ("gen_ai.usage.input_tokens", "+"), + ("gen_ai.usage.output_tokens", "+"), + ] + if not use_events: + expected_attributes.extend( + [ + ("gen_ai.input.messages", ""), + ("gen_ai.output.messages", ""), + ] + ) + attributes_match = GenAiTraceVerifier().check_span_attributes(span, expected_attributes) + assert attributes_match == True + # Content recording OFF: event content should have role, parts with type only, and finish_reason - expected_events = [ - { - "name": "gen_ai.input.messages", - "attributes": { - "gen_ai.provider.name": "azure.openai", - # "gen_ai.message.role": "user", # Commented out - now in event content - "gen_ai.event.content": '[{"role": "user", "parts": [{"type": "text"}, {"type": "image"}]}]', + if use_events: + expected_events = [ + { + "name": "gen_ai.input.messages", + "attributes": { + "gen_ai.provider.name": "azure.openai", + # "gen_ai.message.role": "user", # Commented out - now in event content + "gen_ai.event.content": '[{"role": "user", "parts": [{"type": "text"}, {"type": "image"}]}]', + }, }, - }, - { - "name": "gen_ai.output.messages", - "attributes": { - "gen_ai.provider.name": "azure.openai", - # "gen_ai.message.role": "assistant", # Commented out - now in event content - "gen_ai.event.content": '[{"role": "assistant", "parts": [{"type": "text"}], "finish_reason": "*"}]', + { + "name": "gen_ai.output.messages", + "attributes": { + "gen_ai.provider.name": "azure.openai", + # "gen_ai.message.role": "assistant", # Commented out - now in event content + "gen_ai.event.content": '[{"role": "assistant", "parts": [{"type": "text"}], "finish_reason": "*"}]', + }, }, - }, - ] - events_match = GenAiTraceVerifier().check_span_events(span, expected_events) - assert events_match == True + ] + events_match = GenAiTraceVerifier().check_span_events(span, expected_events) + assert events_match == True + + @pytest.mark.usefixtures("instrument_without_content") + @servicePreparer() + @recorded_by_proxy(RecordedTransport.HTTPX) + def test_text_and_image_content_off_binary_off_non_streaming_events(self, **kwargs): + """Test text + image with content recording OFF and binary data OFF (non-streaming, event-based messages).""" + self._test_text_and_image_content_off_binary_off_non_streaming_impl(True, **kwargs) @pytest.mark.usefixtures("instrument_without_content") @servicePreparer() @recorded_by_proxy(RecordedTransport.HTTPX) - def test_text_and_image_content_off_binary_on_non_streaming(self, **kwargs): - """Test text + image with content recording OFF and binary data ON (non-streaming).""" + def test_text_and_image_content_off_binary_off_non_streaming_attributes(self, **kwargs): + """Test text + image with content recording OFF and binary data OFF (non-streaming, attribute-based messages).""" + self._test_text_and_image_content_off_binary_off_non_streaming_impl(False, **kwargs) + + def _test_text_and_image_content_off_binary_on_non_streaming_impl(self, use_events, **kwargs): + """Implementation for testing text + image with content recording OFF and binary data ON (non-streaming).""" self.cleanup() + _set_use_message_events(use_events) os.environ.update( { CONTENT_TRACING_ENV_VARIABLE: "False", @@ -2810,34 +3094,70 @@ def test_text_and_image_content_off_binary_on_non_streaming(self, **kwargs): assert len(spans) == 1 span = spans[0] + expected_attributes = [ + ("az.namespace", "Microsoft.CognitiveServices"), + ("gen_ai.operation.name", OPERATION_NAME_CHAT), + ("gen_ai.request.model", deployment_name), + ("gen_ai.provider.name", "azure.openai"), + ("server.address", ""), + ("gen_ai.conversation.id", conversation.id), + ("gen_ai.response.model", deployment_name), + ("gen_ai.response.id", ""), + ("gen_ai.usage.input_tokens", "+"), + ("gen_ai.usage.output_tokens", "+"), + ] + if not use_events: + # Content OFF + attribute mode: both input and output messages are captured + expected_attributes.extend( + [ + ("gen_ai.input.messages", ""), + ("gen_ai.output.messages", ""), + ] + ) + attributes_match = GenAiTraceVerifier().check_span_attributes(span, expected_attributes) + assert attributes_match == True + # Content recording OFF: event content should have role, parts with type only, and finish_reason (binary flag doesn't matter) - expected_events = [ - { - "name": "gen_ai.input.messages", - "attributes": { - "gen_ai.provider.name": "azure.openai", - # "gen_ai.message.role": "user", # Commented out - now in event content - "gen_ai.event.content": '[{"role": "user", "parts": [{"type": "text"}, {"type": "image"}]}]', + if use_events: + expected_events = [ + { + "name": "gen_ai.input.messages", + "attributes": { + "gen_ai.provider.name": "azure.openai", + # "gen_ai.message.role": "user", # Commented out - now in event content + "gen_ai.event.content": '[{"role": "user", "parts": [{"type": "text"}, {"type": "image"}]}]', + }, }, - }, - { - "name": "gen_ai.output.messages", - "attributes": { - "gen_ai.provider.name": "azure.openai", - # "gen_ai.message.role": "assistant", # Commented out - now in event content - "gen_ai.event.content": '[{"role": "assistant", "parts": [{"type": "text"}], "finish_reason": "*"}]', + { + "name": "gen_ai.output.messages", + "attributes": { + "gen_ai.provider.name": "azure.openai", + # "gen_ai.message.role": "assistant", # Commented out - now in event content + "gen_ai.event.content": '[{"role": "assistant", "parts": [{"type": "text"}], "finish_reason": "*"}]', + }, }, - }, - ] - events_match = GenAiTraceVerifier().check_span_events(span, expected_events) - assert events_match == True + ] + events_match = GenAiTraceVerifier().check_span_events(span, expected_events) + assert events_match == True - @pytest.mark.usefixtures("instrument_with_content") + @pytest.mark.usefixtures("instrument_without_content") + @servicePreparer() + @recorded_by_proxy(RecordedTransport.HTTPX) + def test_text_and_image_content_off_binary_on_non_streaming_events(self, **kwargs): + """Test text + image with content recording OFF and binary data ON (non-streaming, event-based messages).""" + self._test_text_and_image_content_off_binary_on_non_streaming_impl(True, **kwargs) + + @pytest.mark.usefixtures("instrument_without_content") @servicePreparer() @recorded_by_proxy(RecordedTransport.HTTPX) - def test_text_and_image_content_on_binary_off_non_streaming(self, **kwargs): - """Test text + image with content recording ON and binary data OFF (non-streaming).""" + def test_text_and_image_content_off_binary_on_non_streaming_attributes(self, **kwargs): + """Test text + image with content recording OFF and binary data ON (non-streaming, attribute-based messages).""" + self._test_text_and_image_content_off_binary_on_non_streaming_impl(False, **kwargs) + + def _test_text_and_image_content_on_binary_off_non_streaming_impl(self, use_events, **kwargs): + """Implementation for testing text + image with content recording ON and binary data OFF (non-streaming).""" self.cleanup() + _set_use_message_events(use_events) os.environ.update( { CONTENT_TRACING_ENV_VARIABLE: "True", @@ -2883,34 +3203,69 @@ def test_text_and_image_content_on_binary_off_non_streaming(self, **kwargs): assert len(spans) == 1 span = spans[0] + expected_attributes = [ + ("az.namespace", "Microsoft.CognitiveServices"), + ("gen_ai.operation.name", OPERATION_NAME_CHAT), + ("gen_ai.request.model", deployment_name), + ("gen_ai.provider.name", "azure.openai"), + ("server.address", ""), + ("gen_ai.conversation.id", conversation.id), + ("gen_ai.response.model", deployment_name), + ("gen_ai.response.id", ""), + ("gen_ai.usage.input_tokens", "+"), + ("gen_ai.usage.output_tokens", "+"), + ] + if not use_events: + expected_attributes.extend( + [ + ("gen_ai.input.messages", ""), + ("gen_ai.output.messages", ""), + ] + ) + attributes_match = GenAiTraceVerifier().check_span_attributes(span, expected_attributes) + assert attributes_match == True + # Content recording ON, binary OFF: should have text and image type but no image_url - expected_events = [ - { - "name": "gen_ai.input.messages", - "attributes": { - "gen_ai.provider.name": "azure.openai", - # "gen_ai.message.role": "user", # Commented out - now in event content - "gen_ai.event.content": '[{"role":"user","parts":[{"type":"text","content":"What is shown in this image?"},{"type":"image"}]}]', + if use_events: + expected_events = [ + { + "name": "gen_ai.input.messages", + "attributes": { + "gen_ai.provider.name": "azure.openai", + # "gen_ai.message.role": "user", # Commented out - now in event content + "gen_ai.event.content": '[{"role":"user","parts":[{"type":"text","content":"What is shown in this image?"},{"type":"image"}]}]', + }, }, - }, - { - "name": "gen_ai.output.messages", - "attributes": { - "gen_ai.provider.name": "azure.openai", - # "gen_ai.message.role": "assistant", # Commented out - now in event content - "gen_ai.event.content": '[{"role": "assistant", "parts": [{"type": "text", "content": "*"}], "finish_reason": "*"}]', + { + "name": "gen_ai.output.messages", + "attributes": { + "gen_ai.provider.name": "azure.openai", + # "gen_ai.message.role": "assistant", # Commented out - now in event content + "gen_ai.event.content": '[{"role": "assistant", "parts": [{"type": "text", "content": "*"}], "finish_reason": "*"}]', + }, }, - }, - ] - events_match = GenAiTraceVerifier().check_span_events(span, expected_events) - assert events_match == True + ] + events_match = GenAiTraceVerifier().check_span_events(span, expected_events) + assert events_match == True + + @pytest.mark.usefixtures("instrument_with_content") + @servicePreparer() + @recorded_by_proxy(RecordedTransport.HTTPX) + def test_text_and_image_content_on_binary_off_non_streaming_events(self, **kwargs): + """Test text + image with content recording ON and binary data OFF (non-streaming, event-based messages).""" + self._test_text_and_image_content_on_binary_off_non_streaming_impl(True, **kwargs) @pytest.mark.usefixtures("instrument_with_content") @servicePreparer() @recorded_by_proxy(RecordedTransport.HTTPX) - def test_text_and_image_content_on_binary_on_non_streaming(self, **kwargs): - """Test text + image with content recording ON and binary data ON (non-streaming).""" + def test_text_and_image_content_on_binary_off_non_streaming_attributes(self, **kwargs): + """Test text + image with content recording ON and binary data OFF (non-streaming, attribute-based messages).""" + self._test_text_and_image_content_on_binary_off_non_streaming_impl(False, **kwargs) + + def _test_text_and_image_content_on_binary_on_non_streaming_impl(self, use_events, **kwargs): + """Implementation for testing text + image with content recording ON and binary data ON (non-streaming).""" self.cleanup() + _set_use_message_events(use_events) os.environ.update( { CONTENT_TRACING_ENV_VARIABLE: "True", @@ -2956,38 +3311,73 @@ def test_text_and_image_content_on_binary_on_non_streaming(self, **kwargs): assert len(spans) == 1 span = spans[0] + expected_attributes = [ + ("az.namespace", "Microsoft.CognitiveServices"), + ("gen_ai.operation.name", OPERATION_NAME_CHAT), + ("gen_ai.request.model", deployment_name), + ("gen_ai.provider.name", "azure.openai"), + ("server.address", ""), + ("gen_ai.conversation.id", conversation.id), + ("gen_ai.response.model", deployment_name), + ("gen_ai.response.id", ""), + ("gen_ai.usage.input_tokens", "+"), + ("gen_ai.usage.output_tokens", "+"), + ] + if not use_events: + expected_attributes.extend( + [ + ("gen_ai.input.messages", ""), + ("gen_ai.output.messages", ""), + ] + ) + attributes_match = GenAiTraceVerifier().check_span_attributes(span, expected_attributes) + assert attributes_match == True + # Content recording ON, binary ON: should have text and image with full base64 data - expected_events = [ - { - "name": "gen_ai.input.messages", - "attributes": { - "gen_ai.provider.name": "azure.openai", - # "gen_ai.message.role": "user", # Commented out - now in event content - "gen_ai.event.content": f'[{{"role":"user","parts":[{{"type":"text","content":"What is shown in this image?"}},{{"type":"image","content":"data:image/png;base64,{TEST_IMAGE_BASE64}"}}]}}]', + if use_events: + expected_events = [ + { + "name": "gen_ai.input.messages", + "attributes": { + "gen_ai.provider.name": "azure.openai", + # "gen_ai.message.role": "user", # Commented out - now in event content + "gen_ai.event.content": f'[{{"role":"user","parts":[{{"type":"text","content":"What is shown in this image?"}},{{"type":"image","content":"data:image/png;base64,{TEST_IMAGE_BASE64}"}}]}}]', + }, }, - }, - { - "name": "gen_ai.output.messages", - "attributes": { - "gen_ai.provider.name": "azure.openai", - # "gen_ai.message.role": "assistant", # Commented out - now in event content - "gen_ai.event.content": '[{"role": "assistant", "parts": [{"type": "text", "content": "*"}], "finish_reason": "*"}]', + { + "name": "gen_ai.output.messages", + "attributes": { + "gen_ai.provider.name": "azure.openai", + # "gen_ai.message.role": "assistant", # Commented out - now in event content + "gen_ai.event.content": '[{"role": "assistant", "parts": [{"type": "text", "content": "*"}], "finish_reason": "*"}]', + }, }, - }, - ] - events_match = GenAiTraceVerifier().check_span_events(span, expected_events) - assert events_match == True + ] + events_match = GenAiTraceVerifier().check_span_events(span, expected_events) + assert events_match == True + + @pytest.mark.usefixtures("instrument_with_content") + @servicePreparer() + @recorded_by_proxy(RecordedTransport.HTTPX) + def test_text_and_image_content_on_binary_on_non_streaming_events(self, **kwargs): + """Test text + image with content recording ON and binary data ON (non-streaming, event-based messages).""" + self._test_text_and_image_content_on_binary_on_non_streaming_impl(True, **kwargs) + + @pytest.mark.usefixtures("instrument_with_content") + @servicePreparer() + @recorded_by_proxy(RecordedTransport.HTTPX) + def test_text_and_image_content_on_binary_on_non_streaming_attributes(self, **kwargs): + """Test text + image with content recording ON and binary data ON (non-streaming, attribute-based messages).""" + self._test_text_and_image_content_on_binary_on_non_streaming_impl(False, **kwargs) # ======================================== # Binary Data Tracing Tests - Streaming (Image Only) # ======================================== - @pytest.mark.usefixtures("instrument_without_content") - @servicePreparer() - @recorded_by_proxy(RecordedTransport.HTTPX) - def test_image_only_content_off_binary_off_streaming(self, **kwargs): - """Test image only with content recording OFF and binary data OFF (streaming).""" + def _test_image_only_content_off_binary_off_streaming_impl(self, use_events, **kwargs): + """Implementation for testing image only with content recording OFF and binary data OFF (streaming).""" self.cleanup() + _set_use_message_events(use_events) os.environ.update( { CONTENT_TRACING_ENV_VARIABLE: "False", @@ -3038,34 +3428,69 @@ def test_image_only_content_off_binary_off_streaming(self, **kwargs): assert len(spans) == 1 span = spans[0] + expected_attributes = [ + ("az.namespace", "Microsoft.CognitiveServices"), + ("gen_ai.operation.name", OPERATION_NAME_CHAT), + ("gen_ai.request.model", deployment_name), + ("gen_ai.provider.name", "azure.openai"), + ("server.address", ""), + ("gen_ai.conversation.id", conversation.id), + ("gen_ai.response.model", deployment_name), + ("gen_ai.response.id", ""), + ("gen_ai.usage.input_tokens", "+"), + ("gen_ai.usage.output_tokens", "+"), + ] + if not use_events: + expected_attributes.extend( + [ + ("gen_ai.input.messages", ""), + ("gen_ai.output.messages", ""), + ] + ) + attributes_match = GenAiTraceVerifier().check_span_attributes(span, expected_attributes) + assert attributes_match == True + # Content recording OFF: event content should have role, parts with type only, and finish_reason - expected_events = [ - { - "name": "gen_ai.input.messages", - "attributes": { - "gen_ai.provider.name": "azure.openai", - # "gen_ai.message.role": "user", # Commented out - now in event content - "gen_ai.event.content": '[{"role": "user", "parts": [{"type": "image"}]}]', + if use_events: + expected_events = [ + { + "name": "gen_ai.input.messages", + "attributes": { + "gen_ai.provider.name": "azure.openai", + # "gen_ai.message.role": "user", # Commented out - now in event content + "gen_ai.event.content": '[{"role": "user", "parts": [{"type": "image"}]}]', + }, }, - }, - { - "name": "gen_ai.output.messages", - "attributes": { - "gen_ai.provider.name": "azure.openai", - # "gen_ai.message.role": "assistant", # Commented out - now in event content - "gen_ai.event.content": '[{"role": "assistant", "parts": [{"type": "text"}], "finish_reason": "*"}]', + { + "name": "gen_ai.output.messages", + "attributes": { + "gen_ai.provider.name": "azure.openai", + # "gen_ai.message.role": "assistant", # Commented out - now in event content + "gen_ai.event.content": '[{"role": "assistant", "parts": [{"type": "text"}], "finish_reason": "*"}]', + }, }, - }, - ] - events_match = GenAiTraceVerifier().check_span_events(span, expected_events) - assert events_match == True + ] + events_match = GenAiTraceVerifier().check_span_events(span, expected_events) + assert events_match == True @pytest.mark.usefixtures("instrument_without_content") @servicePreparer() @recorded_by_proxy(RecordedTransport.HTTPX) - def test_image_only_content_off_binary_on_streaming(self, **kwargs): - """Test image only with content recording OFF and binary data ON (streaming).""" + def test_image_only_content_off_binary_off_streaming_events(self, **kwargs): + """Test image only with content recording OFF and binary data OFF (streaming, event-based messages).""" + self._test_image_only_content_off_binary_off_streaming_impl(True, **kwargs) + + @pytest.mark.usefixtures("instrument_without_content") + @servicePreparer() + @recorded_by_proxy(RecordedTransport.HTTPX) + def test_image_only_content_off_binary_off_streaming_attributes(self, **kwargs): + """Test image only with content recording OFF and binary data OFF (streaming, attribute-based messages).""" + self._test_image_only_content_off_binary_off_streaming_impl(False, **kwargs) + + def _test_image_only_content_off_binary_on_streaming_impl(self, use_events, **kwargs): + """Implementation for testing image only with content recording OFF and binary data ON (streaming).""" self.cleanup() + _set_use_message_events(use_events) os.environ.update( { CONTENT_TRACING_ENV_VARIABLE: "False", @@ -3115,34 +3540,69 @@ def test_image_only_content_off_binary_on_streaming(self, **kwargs): assert len(spans) == 1 span = spans[0] + expected_attributes = [ + ("az.namespace", "Microsoft.CognitiveServices"), + ("gen_ai.operation.name", OPERATION_NAME_CHAT), + ("gen_ai.request.model", deployment_name), + ("gen_ai.provider.name", "azure.openai"), + ("server.address", ""), + ("gen_ai.conversation.id", conversation.id), + ("gen_ai.response.model", deployment_name), + ("gen_ai.response.id", ""), + ("gen_ai.usage.input_tokens", "+"), + ("gen_ai.usage.output_tokens", "+"), + ] + if not use_events: + expected_attributes.extend( + [ + ("gen_ai.input.messages", ""), + ("gen_ai.output.messages", ""), + ] + ) + attributes_match = GenAiTraceVerifier().check_span_attributes(span, expected_attributes) + assert attributes_match == True + # Content recording OFF: event content should have role, parts with type only, and finish_reason - expected_events = [ - { - "name": "gen_ai.input.messages", - "attributes": { - "gen_ai.provider.name": "azure.openai", - # "gen_ai.message.role": "user", # Commented out - now in event content - "gen_ai.event.content": '[{"role": "user", "parts": [{"type": "image"}]}]', + if use_events: + expected_events = [ + { + "name": "gen_ai.input.messages", + "attributes": { + "gen_ai.provider.name": "azure.openai", + # "gen_ai.message.role": "user", # Commented out - now in event content + "gen_ai.event.content": '[{"role": "user", "parts": [{"type": "image"}]}]', + }, }, - }, - { - "name": "gen_ai.output.messages", - "attributes": { - "gen_ai.provider.name": "azure.openai", - # "gen_ai.message.role": "assistant", # Commented out - now in event content - "gen_ai.event.content": '[{"role": "assistant", "parts": [{"type": "text"}], "finish_reason": "*"}]', + { + "name": "gen_ai.output.messages", + "attributes": { + "gen_ai.provider.name": "azure.openai", + # "gen_ai.message.role": "assistant", # Commented out - now in event content + "gen_ai.event.content": '[{"role": "assistant", "parts": [{"type": "text"}], "finish_reason": "*"}]', + }, }, - }, - ] - events_match = GenAiTraceVerifier().check_span_events(span, expected_events) - assert events_match == True + ] + events_match = GenAiTraceVerifier().check_span_events(span, expected_events) + assert events_match == True - @pytest.mark.usefixtures("instrument_with_content") + @pytest.mark.usefixtures("instrument_without_content") @servicePreparer() @recorded_by_proxy(RecordedTransport.HTTPX) - def test_image_only_content_on_binary_off_streaming(self, **kwargs): - """Test image only with content recording ON and binary data OFF (streaming).""" + def test_image_only_content_off_binary_on_streaming_events(self, **kwargs): + """Test image only with content recording OFF and binary data ON (streaming, event-based messages).""" + self._test_image_only_content_off_binary_on_streaming_impl(True, **kwargs) + + @pytest.mark.usefixtures("instrument_without_content") + @servicePreparer() + @recorded_by_proxy(RecordedTransport.HTTPX) + def test_image_only_content_off_binary_on_streaming_attributes(self, **kwargs): + """Test image only with content recording OFF and binary data ON (streaming, attribute-based messages).""" + self._test_image_only_content_off_binary_on_streaming_impl(False, **kwargs) + + def _test_image_only_content_on_binary_off_streaming_impl(self, use_events, **kwargs): + """Implementation for testing image only with content recording ON and binary data OFF (streaming).""" self.cleanup() + _set_use_message_events(use_events) os.environ.update( { CONTENT_TRACING_ENV_VARIABLE: "True", @@ -3192,34 +3652,69 @@ def test_image_only_content_on_binary_off_streaming(self, **kwargs): assert len(spans) == 1 span = spans[0] + expected_attributes = [ + ("az.namespace", "Microsoft.CognitiveServices"), + ("gen_ai.operation.name", OPERATION_NAME_CHAT), + ("gen_ai.request.model", deployment_name), + ("gen_ai.provider.name", "azure.openai"), + ("server.address", ""), + ("gen_ai.conversation.id", conversation.id), + ("gen_ai.response.model", deployment_name), + ("gen_ai.response.id", ""), + ("gen_ai.usage.input_tokens", "+"), + ("gen_ai.usage.output_tokens", "+"), + ] + if not use_events: + expected_attributes.extend( + [ + ("gen_ai.input.messages", ""), + ("gen_ai.output.messages", ""), + ] + ) + attributes_match = GenAiTraceVerifier().check_span_attributes(span, expected_attributes) + assert attributes_match == True + # Content recording ON, binary OFF: should have image type but no image_url - expected_events = [ - { - "name": "gen_ai.input.messages", - "attributes": { - "gen_ai.provider.name": "azure.openai", - # "gen_ai.message.role": "user", # Commented out - now in event content - "gen_ai.event.content": '[{"role":"user","parts":[{"type":"image"}]}]', + if use_events: + expected_events = [ + { + "name": "gen_ai.input.messages", + "attributes": { + "gen_ai.provider.name": "azure.openai", + # "gen_ai.message.role": "user", # Commented out - now in event content + "gen_ai.event.content": '[{"role":"user","parts":[{"type":"image"}]}]', + }, }, - }, - { - "name": "gen_ai.output.messages", - "attributes": { - "gen_ai.provider.name": "azure.openai", - # "gen_ai.message.role": "assistant", # Commented out - now in event content - "gen_ai.event.content": '[{"role": "assistant", "parts": [{"type": "text", "content": "*"}], "finish_reason": "*"}]', + { + "name": "gen_ai.output.messages", + "attributes": { + "gen_ai.provider.name": "azure.openai", + # "gen_ai.message.role": "assistant", # Commented out - now in event content + "gen_ai.event.content": '[{"role": "assistant", "parts": [{"type": "text", "content": "*"}], "finish_reason": "*"}]', + }, }, - }, - ] - events_match = GenAiTraceVerifier().check_span_events(span, expected_events) - assert events_match == True + ] + events_match = GenAiTraceVerifier().check_span_events(span, expected_events) + assert events_match == True @pytest.mark.usefixtures("instrument_with_content") @servicePreparer() @recorded_by_proxy(RecordedTransport.HTTPX) - def test_image_only_content_on_binary_on_streaming(self, **kwargs): - """Test image only with content recording ON and binary data ON (streaming).""" + def test_image_only_content_on_binary_off_streaming_events(self, **kwargs): + """Test image only with content recording ON and binary data OFF (streaming, event-based messages).""" + self._test_image_only_content_on_binary_off_streaming_impl(True, **kwargs) + + @pytest.mark.usefixtures("instrument_with_content") + @servicePreparer() + @recorded_by_proxy(RecordedTransport.HTTPX) + def test_image_only_content_on_binary_off_streaming_attributes(self, **kwargs): + """Test image only with content recording ON and binary data OFF (streaming, attribute-based messages).""" + self._test_image_only_content_on_binary_off_streaming_impl(False, **kwargs) + + def _test_image_only_content_on_binary_on_streaming_impl(self, use_events, **kwargs): + """Implementation for testing image only with content recording ON and binary data ON (streaming).""" self.cleanup() + _set_use_message_events(use_events) os.environ.update( { CONTENT_TRACING_ENV_VARIABLE: "True", @@ -3269,38 +3764,73 @@ def test_image_only_content_on_binary_on_streaming(self, **kwargs): assert len(spans) == 1 span = spans[0] + expected_attributes = [ + ("az.namespace", "Microsoft.CognitiveServices"), + ("gen_ai.operation.name", OPERATION_NAME_CHAT), + ("gen_ai.request.model", deployment_name), + ("gen_ai.provider.name", "azure.openai"), + ("server.address", ""), + ("gen_ai.conversation.id", conversation.id), + ("gen_ai.response.model", deployment_name), + ("gen_ai.response.id", ""), + ("gen_ai.usage.input_tokens", "+"), + ("gen_ai.usage.output_tokens", "+"), + ] + if not use_events: + expected_attributes.extend( + [ + ("gen_ai.input.messages", ""), + ("gen_ai.output.messages", ""), + ] + ) + attributes_match = GenAiTraceVerifier().check_span_attributes(span, expected_attributes) + assert attributes_match == True + # Content recording ON, binary ON: should have image type AND image_url with base64 data - expected_events = [ - { - "name": "gen_ai.input.messages", - "attributes": { - "gen_ai.provider.name": "azure.openai", - # "gen_ai.message.role": "user", # Commented out - now in event content - "gen_ai.event.content": f'[{{"role":"user","parts":[{{"type":"image","content":"data:image/png;base64,{TEST_IMAGE_BASE64}"}}]}}]', + if use_events: + expected_events = [ + { + "name": "gen_ai.input.messages", + "attributes": { + "gen_ai.provider.name": "azure.openai", + # "gen_ai.message.role": "user", # Commented out - now in event content + "gen_ai.event.content": f'[{{"role":"user","parts":[{{"type":"image","content":"data:image/png;base64,{TEST_IMAGE_BASE64}"}}]}}]', + }, }, - }, - { - "name": "gen_ai.output.messages", - "attributes": { - "gen_ai.provider.name": "azure.openai", - # "gen_ai.message.role": "assistant", # Commented out - now in event content - "gen_ai.event.content": '[{"role": "assistant", "parts": [{"type": "text", "content": "*"}], "finish_reason": "*"}]', + { + "name": "gen_ai.output.messages", + "attributes": { + "gen_ai.provider.name": "azure.openai", + # "gen_ai.message.role": "assistant", # Commented out - now in event content + "gen_ai.event.content": '[{"role": "assistant", "parts": [{"type": "text", "content": "*"}], "finish_reason": "*"}]', + }, }, - }, - ] - events_match = GenAiTraceVerifier().check_span_events(span, expected_events) - assert events_match == True + ] + events_match = GenAiTraceVerifier().check_span_events(span, expected_events) + assert events_match == True + + @pytest.mark.usefixtures("instrument_with_content") + @servicePreparer() + @recorded_by_proxy(RecordedTransport.HTTPX) + def test_image_only_content_on_binary_on_streaming_events(self, **kwargs): + """Test image only with content recording ON and binary data ON (streaming, event-based messages).""" + self._test_image_only_content_on_binary_on_streaming_impl(True, **kwargs) + + @pytest.mark.usefixtures("instrument_with_content") + @servicePreparer() + @recorded_by_proxy(RecordedTransport.HTTPX) + def test_image_only_content_on_binary_on_streaming_attributes(self, **kwargs): + """Test image only with content recording ON and binary data ON (streaming, attribute-based messages).""" + self._test_image_only_content_on_binary_on_streaming_impl(False, **kwargs) # ======================================== # Binary Data Tracing Tests - Streaming (Text + Image) # ======================================== - @pytest.mark.usefixtures("instrument_without_content") - @servicePreparer() - @recorded_by_proxy(RecordedTransport.HTTPX) - def test_text_and_image_content_off_binary_off_streaming(self, **kwargs): - """Test text + image with content recording OFF and binary data OFF (streaming).""" + def _test_text_and_image_content_off_binary_off_streaming_impl(self, use_events, **kwargs): + """Implementation for testing text + image with content recording OFF and binary data OFF (streaming).""" self.cleanup() + _set_use_message_events(use_events) os.environ.update( { CONTENT_TRACING_ENV_VARIABLE: "False", @@ -3354,34 +3884,70 @@ def test_text_and_image_content_off_binary_off_streaming(self, **kwargs): assert len(spans) == 1 span = spans[0] + expected_attributes = [ + ("az.namespace", "Microsoft.CognitiveServices"), + ("gen_ai.operation.name", OPERATION_NAME_CHAT), + ("gen_ai.request.model", deployment_name), + ("gen_ai.provider.name", "azure.openai"), + ("server.address", ""), + ("gen_ai.conversation.id", conversation.id), + ("gen_ai.response.model", deployment_name), + ("gen_ai.response.id", ""), + ("gen_ai.usage.input_tokens", "+"), + ("gen_ai.usage.output_tokens", "+"), + ] + if not use_events: + # Content OFF + attribute mode: both input and output messages are captured + expected_attributes.extend( + [ + ("gen_ai.input.messages", ""), + ("gen_ai.output.messages", ""), + ] + ) + attributes_match = GenAiTraceVerifier().check_span_attributes(span, expected_attributes) + assert attributes_match == True + # Content recording OFF: event content should have role, parts with type only, and finish_reason - expected_events = [ - { - "name": "gen_ai.input.messages", - "attributes": { - "gen_ai.provider.name": "azure.openai", - # "gen_ai.message.role": "user", # Commented out - now in event content - "gen_ai.event.content": '[{"role": "user", "parts": [{"type": "text"}, {"type": "image"}]}]', + if use_events: + expected_events = [ + { + "name": "gen_ai.input.messages", + "attributes": { + "gen_ai.provider.name": "azure.openai", + # "gen_ai.message.role": "user", # Commented out - now in event content + "gen_ai.event.content": '[{"role": "user", "parts": [{"type": "text"}, {"type": "image"}]}]', + }, }, - }, - { - "name": "gen_ai.output.messages", - "attributes": { - "gen_ai.provider.name": "azure.openai", - # "gen_ai.message.role": "assistant", # Commented out - now in event content - "gen_ai.event.content": '[{"role": "assistant", "parts": [{"type": "text"}], "finish_reason": "*"}]', + { + "name": "gen_ai.output.messages", + "attributes": { + "gen_ai.provider.name": "azure.openai", + # "gen_ai.message.role": "assistant", # Commented out - now in event content + "gen_ai.event.content": '[{"role": "assistant", "parts": [{"type": "text"}], "finish_reason": "*"}]', + }, }, - }, - ] - events_match = GenAiTraceVerifier().check_span_events(span, expected_events) - assert events_match == True + ] + events_match = GenAiTraceVerifier().check_span_events(span, expected_events) + assert events_match == True @pytest.mark.usefixtures("instrument_without_content") @servicePreparer() @recorded_by_proxy(RecordedTransport.HTTPX) - def test_text_and_image_content_off_binary_on_streaming(self, **kwargs): - """Test text + image with content recording OFF and binary data ON (streaming).""" + def test_text_and_image_content_off_binary_off_streaming_events(self, **kwargs): + """Test text + image with content recording OFF and binary data OFF (streaming, event-based messages).""" + self._test_text_and_image_content_off_binary_off_streaming_impl(True, **kwargs) + + @pytest.mark.usefixtures("instrument_without_content") + @servicePreparer() + @recorded_by_proxy(RecordedTransport.HTTPX) + def test_text_and_image_content_off_binary_off_streaming_attributes(self, **kwargs): + """Test text + image with content recording OFF and binary data OFF (streaming, attribute-based messages).""" + self._test_text_and_image_content_off_binary_off_streaming_impl(False, **kwargs) + + def _test_text_and_image_content_off_binary_on_streaming_impl(self, use_events, **kwargs): + """Implementation for testing text + image with content recording OFF and binary data ON (streaming).""" self.cleanup() + _set_use_message_events(use_events) os.environ.update( { CONTENT_TRACING_ENV_VARIABLE: "False", @@ -3435,34 +4001,70 @@ def test_text_and_image_content_off_binary_on_streaming(self, **kwargs): assert len(spans) == 1 span = spans[0] + expected_attributes = [ + ("az.namespace", "Microsoft.CognitiveServices"), + ("gen_ai.operation.name", OPERATION_NAME_CHAT), + ("gen_ai.request.model", deployment_name), + ("gen_ai.provider.name", "azure.openai"), + ("server.address", ""), + ("gen_ai.conversation.id", conversation.id), + ("gen_ai.response.model", deployment_name), + ("gen_ai.response.id", ""), + ("gen_ai.usage.input_tokens", "+"), + ("gen_ai.usage.output_tokens", "+"), + ] + if not use_events: + # Content OFF + attribute mode: both input and output messages are captured + expected_attributes.extend( + [ + ("gen_ai.input.messages", ""), + ("gen_ai.output.messages", ""), + ] + ) + attributes_match = GenAiTraceVerifier().check_span_attributes(span, expected_attributes) + assert attributes_match == True + # Content recording OFF: event content should have role, parts with type only, and finish_reason - expected_events = [ - { - "name": "gen_ai.input.messages", - "attributes": { - "gen_ai.provider.name": "azure.openai", - # "gen_ai.message.role": "user", # Commented out - now in event content - "gen_ai.event.content": '[{"role": "user", "parts": [{"type": "text"}, {"type": "image"}]}]', + if use_events: + expected_events = [ + { + "name": "gen_ai.input.messages", + "attributes": { + "gen_ai.provider.name": "azure.openai", + # "gen_ai.message.role": "user", # Commented out - now in event content + "gen_ai.event.content": '[{"role": "user", "parts": [{"type": "text"}, {"type": "image"}]}]', + }, }, - }, - { - "name": "gen_ai.output.messages", - "attributes": { - "gen_ai.provider.name": "azure.openai", - # "gen_ai.message.role": "assistant", # Commented out - now in event content - "gen_ai.event.content": '[{"role": "assistant", "parts": [{"type": "text"}], "finish_reason": "*"}]', + { + "name": "gen_ai.output.messages", + "attributes": { + "gen_ai.provider.name": "azure.openai", + # "gen_ai.message.role": "assistant", # Commented out - now in event content + "gen_ai.event.content": '[{"role": "assistant", "parts": [{"type": "text"}], "finish_reason": "*"}]', + }, }, - }, - ] - events_match = GenAiTraceVerifier().check_span_events(span, expected_events) - assert events_match == True + ] + events_match = GenAiTraceVerifier().check_span_events(span, expected_events) + assert events_match == True - @pytest.mark.usefixtures("instrument_with_content") + @pytest.mark.usefixtures("instrument_without_content") @servicePreparer() @recorded_by_proxy(RecordedTransport.HTTPX) - def test_text_and_image_content_on_binary_off_streaming(self, **kwargs): - """Test text + image with content recording ON and binary data OFF (streaming).""" + def test_text_and_image_content_off_binary_on_streaming_events(self, **kwargs): + """Test text + image with content recording OFF and binary data ON (streaming, event-based messages).""" + self._test_text_and_image_content_off_binary_on_streaming_impl(True, **kwargs) + + @pytest.mark.usefixtures("instrument_without_content") + @servicePreparer() + @recorded_by_proxy(RecordedTransport.HTTPX) + def test_text_and_image_content_off_binary_on_streaming_attributes(self, **kwargs): + """Test text + image with content recording OFF and binary data ON (streaming, attribute-based messages).""" + self._test_text_and_image_content_off_binary_on_streaming_impl(False, **kwargs) + + def _test_text_and_image_content_on_binary_off_streaming_impl(self, use_events, **kwargs): + """Implementation for testing text + image with content recording ON and binary data OFF (streaming).""" self.cleanup() + _set_use_message_events(use_events) os.environ.update( { CONTENT_TRACING_ENV_VARIABLE: "True", @@ -3516,34 +4118,69 @@ def test_text_and_image_content_on_binary_off_streaming(self, **kwargs): assert len(spans) == 1 span = spans[0] + expected_attributes = [ + ("az.namespace", "Microsoft.CognitiveServices"), + ("gen_ai.operation.name", OPERATION_NAME_CHAT), + ("gen_ai.request.model", deployment_name), + ("gen_ai.provider.name", "azure.openai"), + ("server.address", ""), + ("gen_ai.conversation.id", conversation.id), + ("gen_ai.response.model", deployment_name), + ("gen_ai.response.id", ""), + ("gen_ai.usage.input_tokens", "+"), + ("gen_ai.usage.output_tokens", "+"), + ] + if not use_events: + expected_attributes.extend( + [ + ("gen_ai.input.messages", ""), + ("gen_ai.output.messages", ""), + ] + ) + attributes_match = GenAiTraceVerifier().check_span_attributes(span, expected_attributes) + assert attributes_match == True + # Content recording ON, binary OFF: should have text and image type but no image_url - expected_events = [ - { - "name": "gen_ai.input.messages", - "attributes": { - "gen_ai.provider.name": "azure.openai", - # "gen_ai.message.role": "user", # Commented out - now in event content - "gen_ai.event.content": '[{"role":"user","parts":[{"type":"text","content":"What is shown in this image?"},{"type":"image"}]}]', + if use_events: + expected_events = [ + { + "name": "gen_ai.input.messages", + "attributes": { + "gen_ai.provider.name": "azure.openai", + # "gen_ai.message.role": "user", # Commented out - now in event content + "gen_ai.event.content": '[{"role":"user","parts":[{"type":"text","content":"What is shown in this image?"},{"type":"image"}]}]', + }, }, - }, - { - "name": "gen_ai.output.messages", - "attributes": { - "gen_ai.provider.name": "azure.openai", - # "gen_ai.message.role": "assistant", # Commented out - now in event content - "gen_ai.event.content": '[{"role": "assistant", "parts": [{"type": "text", "content": "*"}], "finish_reason": "*"}]', + { + "name": "gen_ai.output.messages", + "attributes": { + "gen_ai.provider.name": "azure.openai", + # "gen_ai.message.role": "assistant", # Commented out - now in event content + "gen_ai.event.content": '[{"role": "assistant", "parts": [{"type": "text", "content": "*"}], "finish_reason": "*"}]', + }, }, - }, - ] - events_match = GenAiTraceVerifier().check_span_events(span, expected_events) - assert events_match == True + ] + events_match = GenAiTraceVerifier().check_span_events(span, expected_events) + assert events_match == True + + @pytest.mark.usefixtures("instrument_with_content") + @servicePreparer() + @recorded_by_proxy(RecordedTransport.HTTPX) + def test_text_and_image_content_on_binary_off_streaming_events(self, **kwargs): + """Test text + image with content recording ON and binary data OFF (streaming, event-based messages).""" + self._test_text_and_image_content_on_binary_off_streaming_impl(True, **kwargs) @pytest.mark.usefixtures("instrument_with_content") @servicePreparer() @recorded_by_proxy(RecordedTransport.HTTPX) - def test_text_and_image_content_on_binary_on_streaming(self, **kwargs): - """Test text + image with content recording ON and binary data ON (streaming).""" + def test_text_and_image_content_on_binary_off_streaming_attributes(self, **kwargs): + """Test text + image with content recording ON and binary data OFF (streaming, attribute-based messages).""" + self._test_text_and_image_content_on_binary_off_streaming_impl(False, **kwargs) + + def _test_text_and_image_content_on_binary_on_streaming_impl(self, use_events, **kwargs): + """Implementation for testing text + image with content recording ON and binary data ON (streaming).""" self.cleanup() + _set_use_message_events(use_events) os.environ.update( { CONTENT_TRACING_ENV_VARIABLE: "True", @@ -3597,27 +4234,64 @@ def test_text_and_image_content_on_binary_on_streaming(self, **kwargs): assert len(spans) == 1 span = spans[0] + expected_attributes = [ + ("az.namespace", "Microsoft.CognitiveServices"), + ("gen_ai.operation.name", OPERATION_NAME_CHAT), + ("gen_ai.request.model", deployment_name), + ("gen_ai.provider.name", "azure.openai"), + ("server.address", ""), + ("gen_ai.conversation.id", conversation.id), + ("gen_ai.response.model", deployment_name), + ("gen_ai.response.id", ""), + ("gen_ai.usage.input_tokens", "+"), + ("gen_ai.usage.output_tokens", "+"), + ] + if not use_events: + expected_attributes.extend( + [ + ("gen_ai.input.messages", ""), + ("gen_ai.output.messages", ""), + ] + ) + attributes_match = GenAiTraceVerifier().check_span_attributes(span, expected_attributes) + assert attributes_match == True + # Content recording ON, binary ON: should have text and image with full base64 data - expected_events = [ - { - "name": "gen_ai.input.messages", - "attributes": { - "gen_ai.provider.name": "azure.openai", - # "gen_ai.message.role": "user", # Commented out - now in event content - "gen_ai.event.content": f'[{{"role":"user","parts":[{{"type":"text","content":"What is shown in this image?"}},{{"type":"image","content":"data:image/png;base64,{TEST_IMAGE_BASE64}"}}]}}]', + if use_events: + expected_events = [ + { + "name": "gen_ai.input.messages", + "attributes": { + "gen_ai.provider.name": "azure.openai", + # "gen_ai.message.role": "user", # Commented out - now in event content + "gen_ai.event.content": f'[{{"role":"user","parts":[{{"type":"text","content":"What is shown in this image?"}},{{"type":"image","content":"data:image/png;base64,{TEST_IMAGE_BASE64}"}}]}}]', + }, }, - }, - { - "name": "gen_ai.output.messages", - "attributes": { - "gen_ai.provider.name": "azure.openai", - # "gen_ai.message.role": "assistant", # Commented out - now in event content - "gen_ai.event.content": '[{"role": "assistant", "parts": [{"type": "text", "content": "*"}], "finish_reason": "*"}]', + { + "name": "gen_ai.output.messages", + "attributes": { + "gen_ai.provider.name": "azure.openai", + # "gen_ai.message.role": "assistant", # Commented out - now in event content + "gen_ai.event.content": '[{"role": "assistant", "parts": [{"type": "text", "content": "*"}], "finish_reason": "*"}]', + }, }, - }, - ] - events_match = GenAiTraceVerifier().check_span_events(span, expected_events) - assert events_match == True + ] + events_match = GenAiTraceVerifier().check_span_events(span, expected_events) + assert events_match == True + + @pytest.mark.usefixtures("instrument_with_content") + @servicePreparer() + @recorded_by_proxy(RecordedTransport.HTTPX) + def test_text_and_image_content_on_binary_on_streaming_events(self, **kwargs): + """Test text + image with content recording ON and binary data ON (streaming, event-based messages).""" + self._test_text_and_image_content_on_binary_on_streaming_impl(True, **kwargs) + + @pytest.mark.usefixtures("instrument_with_content") + @servicePreparer() + @recorded_by_proxy(RecordedTransport.HTTPX) + def test_text_and_image_content_on_binary_on_streaming_attributes(self, **kwargs): + """Test text + image with content recording ON and binary data ON (streaming, attribute-based messages).""" + self._test_text_and_image_content_on_binary_on_streaming_impl(False, **kwargs) # ======================================== # responses.stream() Method Tests @@ -3670,32 +4344,12 @@ def test_responses_stream_method_with_content_recording(self, **kwargs): ("gen_ai.response.id", ""), ("gen_ai.usage.input_tokens", "+"), ("gen_ai.usage.output_tokens", "+"), + ("gen_ai.input.messages", ""), + ("gen_ai.output.messages", ""), ] attributes_match = GenAiTraceVerifier().check_span_attributes(span, expected_attributes) assert attributes_match == True - # Check span events - expected_events = [ - { - "name": "gen_ai.input.messages", - "attributes": { - "gen_ai.provider.name": "azure.openai", - # "gen_ai.message.role": "user", # Commented out - now in event content - "gen_ai.event.content": '[{"role": "user", "parts": [{"type": "text", "content": "Write a short haiku about testing"}]}]', - }, - }, - { - "name": "gen_ai.output.messages", - "attributes": { - "gen_ai.provider.name": "azure.openai", - # "gen_ai.message.role": "assistant", # Commented out - now in event content - "gen_ai.event.content": '[{"role": "assistant", "parts": [{"type": "text", "content": "*"}], "finish_reason": "*"}]', - }, - }, - ] - events_match = GenAiTraceVerifier().check_span_events(span, expected_events) - assert events_match == True - @pytest.mark.usefixtures("instrument_without_content") @servicePreparer() @recorded_by_proxy(RecordedTransport.HTTPX) @@ -3743,32 +4397,12 @@ def test_responses_stream_method_without_content_recording(self, **kwargs): ("gen_ai.response.id", ""), ("gen_ai.usage.input_tokens", "+"), ("gen_ai.usage.output_tokens", "+"), + ("gen_ai.input.messages", ""), + ("gen_ai.output.messages", ""), ] attributes_match = GenAiTraceVerifier().check_span_attributes(span, expected_attributes) assert attributes_match == True - # Check span events - should have events with role, parts with type, and finish_reason but no actual content - expected_events = [ - { - "name": "gen_ai.input.messages", - "attributes": { - "gen_ai.provider.name": "azure.openai", - # "gen_ai.message.role": "user", # Commented out - now in event content - "gen_ai.event.content": '[{"role": "user", "parts": [{"type": "text"}]}]', - }, - }, - { - "name": "gen_ai.output.messages", - "attributes": { - "gen_ai.provider.name": "azure.openai", - # "gen_ai.message.role": "assistant", # Commented out - now in event content - "gen_ai.event.content": '[{"role": "assistant", "parts": [{"type": "text"}], "finish_reason": "*"}]', - }, - }, - ] - events_match = GenAiTraceVerifier().check_span_events(span, expected_events) - assert events_match == True - @pytest.mark.usefixtures("instrument_with_content") @servicePreparer() @recorded_by_proxy(RecordedTransport.HTTPX) @@ -3867,54 +4501,14 @@ def test_responses_stream_method_with_tools_with_content_recording(self, **kwarg ("gen_ai.response.id", ""), ("gen_ai.usage.input_tokens", "+"), ("gen_ai.usage.output_tokens", "+"), + ("gen_ai.input.messages", ""), + ("gen_ai.output.messages", ""), ] attributes_match = GenAiTraceVerifier().check_span_attributes(span1, expected_attributes_1) assert attributes_match == True - # Check events for first span - expected_events_1 = [ - { - "name": "gen_ai.input.messages", - "attributes": { - "gen_ai.provider.name": "azure.openai", - # "gen_ai.message.role": "user", # Commented out - now in event content - "gen_ai.event.content": '[{"role": "user", "parts": [{"type": "text", "content": "What\'s the weather in Boston?"}]}]', - }, - }, - { - "name": "gen_ai.output.messages", - "attributes": { - "gen_ai.provider.name": "azure.openai", - # "gen_ai.message.role": "assistant", # Commented out - now in event content - "gen_ai.event.content": '[{"role": "assistant", "parts": [{"type": "tool_call", "content": {"type": "function_call", "id": "*", "function": {"name": "get_weather", "arguments": "*"}}}]}]', - }, - }, - ] - events_match = GenAiTraceVerifier().check_span_events(span1, expected_events_1) - assert events_match == True - # Validate second span (tool output + final response) span2 = spans[1] - expected_events_2 = [ - { - "name": "gen_ai.input.messages", - "attributes": { - "gen_ai.provider.name": "azure.openai", - # "gen_ai.message.role": "tool", # Commented out - now in event content - "gen_ai.event.content": '[{"role": "tool", "parts": [{"type": "tool_call_output", "content": {"type": "function_call_output", "id": "*", "output": {"temperature": "65°F", "condition": "cloudy"}}}]}]', - }, - }, - { - "name": "gen_ai.output.messages", - "attributes": { - "gen_ai.provider.name": "azure.openai", - # "gen_ai.message.role": "assistant", # Commented out - now in event content - "gen_ai.event.content": '[{"role": "assistant", "parts": [{"type": "text", "content": "*"}], "finish_reason": "*"}]', - }, - }, - ] - events_match = GenAiTraceVerifier().check_span_events(span2, expected_events_2) - assert events_match == True @pytest.mark.usefixtures("instrument_without_content") @servicePreparer() @@ -4008,51 +4602,45 @@ def test_responses_stream_method_with_tools_without_content_recording(self, **kw spans = self.exporter.get_spans_by_name(f"{SPAN_NAME_CHAT} {deployment_name}") assert len(spans) == 2 - # Validate first span - should have events with tool call structure but no details + # Validate first span - should have both attribute and events with tool call structure but no details span1 = spans[0] - expected_events_1 = [ - { - "name": "gen_ai.input.messages", - "attributes": { - "gen_ai.provider.name": "azure.openai", - # "gen_ai.message.role": "user", # Commented out - now in event content - "gen_ai.event.content": '[{"role": "user", "parts": [{"type": "text"}]}]', - }, - }, - { - "name": "gen_ai.output.messages", - "attributes": { - "gen_ai.provider.name": "azure.openai", - # "gen_ai.message.role": "assistant", # Commented out - now in event content - "gen_ai.event.content": '[{"role": "assistant", "parts": [{"type": "tool_call", "content": {"type": "function_call", "id": "*"}}]}]', - }, - }, + + # In attribute mode (default for this test), check attributes include both input and output + expected_attributes_1 = [ + ("az.namespace", "Microsoft.CognitiveServices"), + ("gen_ai.operation.name", OPERATION_NAME_CHAT), + ("gen_ai.request.model", deployment_name), + ("gen_ai.provider.name", "azure.openai"), + ("server.address", ""), + ("gen_ai.conversation.id", conversation.id), + ("gen_ai.response.model", deployment_name), + ("gen_ai.response.id", ""), + ("gen_ai.usage.input_tokens", "+"), + ("gen_ai.usage.output_tokens", "+"), + ("gen_ai.input.messages", ""), + ("gen_ai.output.messages", ""), ] - events_match = GenAiTraceVerifier().check_span_events(span1, expected_events_1) - assert events_match == True + attributes_match = GenAiTraceVerifier().check_span_attributes(span1, expected_attributes_1) + assert attributes_match == True - # Validate second span - should include parts with tool output metadata (type, id) but no output field + # Validate second span - in attribute mode, check attributes include tool output span2 = spans[1] - expected_events_2 = [ - { - "name": "gen_ai.input.messages", - "attributes": { - "gen_ai.provider.name": "azure.openai", - # "gen_ai.message.role": "tool", # Commented out - now in event content - "gen_ai.event.content": '[{"role": "tool", "parts": [{"type": "tool_call_output", "content": {"type": "function_call_output", "id": "*"}}]}]', - }, - }, - { - "name": "gen_ai.output.messages", - "attributes": { - "gen_ai.provider.name": "azure.openai", - # "gen_ai.message.role": "assistant", # Commented out - now in event content - "gen_ai.event.content": '[{"role": "assistant", "parts": [{"type": "text"}], "finish_reason": "*"}]', - }, - }, + expected_attributes_2 = [ + ("az.namespace", "Microsoft.CognitiveServices"), + ("gen_ai.operation.name", OPERATION_NAME_CHAT), + ("gen_ai.request.model", deployment_name), + ("gen_ai.provider.name", "azure.openai"), + ("server.address", ""), + ("gen_ai.conversation.id", conversation.id), + ("gen_ai.response.model", deployment_name), + ("gen_ai.response.id", ""), + ("gen_ai.usage.input_tokens", "+"), + ("gen_ai.usage.output_tokens", "+"), + ("gen_ai.input.messages", ""), + ("gen_ai.output.messages", ""), ] - events_match = GenAiTraceVerifier().check_span_events(span2, expected_events_2) - assert events_match == True + attributes_match = GenAiTraceVerifier().check_span_attributes(span2, expected_attributes_2) + assert attributes_match == True # ======================================== # Workflow Agent Tracing Tests @@ -4764,6 +5352,8 @@ def test_prompt_agent_with_responses_non_streaming(self, **kwargs): ("gen_ai.response.id", ""), ("gen_ai.usage.input_tokens", "+"), ("gen_ai.usage.output_tokens", "+"), + ("gen_ai.input.messages", ""), + ("gen_ai.output.messages", ""), ] attributes_match = GenAiTraceVerifier().check_span_attributes(span, expected_attributes) assert attributes_match == True @@ -4841,6 +5431,8 @@ def test_prompt_agent_with_responses_streaming(self, **kwargs): ("gen_ai.response.id", ""), ("gen_ai.usage.input_tokens", "+"), ("gen_ai.usage.output_tokens", "+"), + ("gen_ai.input.messages", ""), + ("gen_ai.output.messages", ""), ] attributes_match = GenAiTraceVerifier().check_span_attributes(span, expected_attributes) assert attributes_match == True diff --git a/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_async.py b/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_async.py index e1aa0597485f..ddb6f23d7cff 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_async.py +++ b/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_async.py @@ -13,7 +13,7 @@ OPERATION_NAME_INVOKE_AGENT, SPAN_NAME_CHAT, SPAN_NAME_INVOKE_AGENT, - set_use_message_events, + _set_use_message_events, ) from azure.ai.projects.models import FunctionTool, PromptAgentDefinition from azure.core.settings import settings @@ -38,13 +38,14 @@ class TestResponsesInstrumentor(TestAiAgentsInstrumentorBase): """Tests for ResponsesInstrumentor with real endpoints (async).""" - @pytest.mark.usefixtures("instrument_with_content") - @servicePreparer() - @recorded_by_proxy_async(RecordedTransport.HTTPX) - async def test_async_non_streaming_with_content_recording(self, **kwargs): - """Test asynchronous non-streaming responses with content recording enabled.""" + async def _test_async_non_streaming_with_content_recording_impl(self, use_events, **kwargs): + """Implementation for testing asynchronous non-streaming responses with content recording. + + Args: + use_events: If True, use event-based message tracing. If False, use attribute-based. + """ self.cleanup() - set_use_message_events(True) # Use event-based mode for this test + _set_use_message_events(use_events) os.environ.update( { CONTENT_TRACING_ENV_VARIABLE: "True", @@ -96,37 +97,61 @@ async def test_async_non_streaming_with_content_recording(self, **kwargs): ("gen_ai.usage.input_tokens", "+"), ("gen_ai.usage.output_tokens", "+"), ] + if not use_events: + expected_attributes.extend( + [ + ("gen_ai.input.messages", ""), + ("gen_ai.output.messages", ""), + ] + ) attributes_match = GenAiTraceVerifier().check_span_attributes(span, expected_attributes) assert attributes_match == True - # Check span events - expected_events = [ - { - "name": "gen_ai.input.messages", - "attributes": { - "gen_ai.provider.name": "azure.openai", - # "gen_ai.message.role": "user", # Commented out - now in event content - "gen_ai.event.content": '[{"role": "user", "parts": [{"type": "text", "content": "Write a short poem about AI"}]}]', + # Check span events (only in events mode) + if use_events: + expected_events = [ + { + "name": "gen_ai.input.messages", + "attributes": { + "gen_ai.provider.name": "azure.openai", + # "gen_ai.message.role": "user", # Commented out - now in event content + "gen_ai.event.content": '[{"role": "user", "parts": [{"type": "text", "content": "Write a short poem about AI"}]}]', + }, }, - }, - { - "name": "gen_ai.output.messages", - "attributes": { - "gen_ai.provider.name": "azure.openai", - # "gen_ai.message.role": "assistant", # Commented out - now in event content - "gen_ai.event.content": '[{"role": "assistant", "parts": [{"type": "text", "content": "*"}], "finish_reason": "*"}]', + { + "name": "gen_ai.output.messages", + "attributes": { + "gen_ai.provider.name": "azure.openai", + # "gen_ai.message.role": "assistant", # Commented out - now in event content + "gen_ai.event.content": '[{"role": "assistant", "parts": [{"type": "text", "content": "*"}], "finish_reason": "*"}]', + }, }, - }, - ] - events_match = GenAiTraceVerifier().check_span_events(span, expected_events) - assert events_match == True + ] + events_match = GenAiTraceVerifier().check_span_events(span, expected_events) + assert events_match == True @pytest.mark.usefixtures("instrument_with_content") @servicePreparer() @recorded_by_proxy_async(RecordedTransport.HTTPX) - async def test_async_streaming_with_content_recording(self, **kwargs): - """Test asynchronous streaming responses with content recording enabled.""" + async def test_async_non_streaming_with_content_recording_events(self, **kwargs): + """Test asynchronous non-streaming responses with content recording enabled (event-based messages).""" + await self._test_async_non_streaming_with_content_recording_impl(True, **kwargs) + + @pytest.mark.usefixtures("instrument_with_content") + @servicePreparer() + @recorded_by_proxy_async(RecordedTransport.HTTPX) + async def test_async_non_streaming_with_content_recording_attributes(self, **kwargs): + """Test asynchronous non-streaming responses with content recording enabled (attribute-based messages).""" + await self._test_async_non_streaming_with_content_recording_impl(False, **kwargs) + + async def _test_async_streaming_with_content_recording_impl(self, use_events, **kwargs): + """Implementation for testing asynchronous streaming responses with content recording. + + Args: + use_events: If True, use event-based message tracing. If False, use attribute-based. + """ self.cleanup() + _set_use_message_events(use_events) os.environ.update( { CONTENT_TRACING_ENV_VARIABLE: "True", @@ -186,30 +211,52 @@ async def test_async_streaming_with_content_recording(self, **kwargs): ("gen_ai.usage.input_tokens", "+"), ("gen_ai.usage.output_tokens", "+"), ] + if not use_events: + expected_attributes.extend( + [ + ("gen_ai.input.messages", ""), + ("gen_ai.output.messages", ""), + ] + ) attributes_match = GenAiTraceVerifier().check_span_attributes(span, expected_attributes) assert attributes_match == True - # Check span events (should include assistant message for streaming) - expected_events = [ - { - "name": "gen_ai.input.messages", - "attributes": { - "gen_ai.provider.name": "azure.openai", - # "gen_ai.message.role": "user", # Commented out - now in event content - "gen_ai.event.content": '[{"role": "user", "parts": [{"type": "text", "content": "Write a short poem about AI"}]}]', + # Check span events (only in events mode) + if use_events: + expected_events = [ + { + "name": "gen_ai.input.messages", + "attributes": { + "gen_ai.provider.name": "azure.openai", + # "gen_ai.message.role": "user", # Commented out - now in event content + "gen_ai.event.content": '[{"role": "user", "parts": [{"type": "text", "content": "Write a short poem about AI"}]}]', + }, }, - }, - { - "name": "gen_ai.output.messages", - "attributes": { - "gen_ai.provider.name": "azure.openai", - # "gen_ai.message.role": "assistant", # Commented out - now in event content - "gen_ai.event.content": '[{"role": "assistant", "parts": [{"type": "text", "content": "*"}], "finish_reason": "*"}]', + { + "name": "gen_ai.output.messages", + "attributes": { + "gen_ai.provider.name": "azure.openai", + # "gen_ai.message.role": "assistant", # Commented out - now in event content + "gen_ai.event.content": '[{"role": "assistant", "parts": [{"type": "text", "content": "*"}], "finish_reason": "*"}]', + }, }, - }, - ] - events_match = GenAiTraceVerifier().check_span_events(span, expected_events) - assert events_match == True + ] + events_match = GenAiTraceVerifier().check_span_events(span, expected_events) + assert events_match == True + + @pytest.mark.usefixtures("instrument_with_content") + @servicePreparer() + @recorded_by_proxy_async(RecordedTransport.HTTPX) + async def test_async_streaming_with_content_recording_events(self, **kwargs): + """Test asynchronous streaming responses with content recording enabled (event-based messages).""" + await self._test_async_streaming_with_content_recording_impl(True, **kwargs) + + @pytest.mark.usefixtures("instrument_with_content") + @servicePreparer() + @recorded_by_proxy_async(RecordedTransport.HTTPX) + async def test_async_streaming_with_content_recording_attributes(self, **kwargs): + """Test asynchronous streaming responses with content recording enabled (attribute-based messages).""" + await self._test_async_streaming_with_content_recording_impl(False, **kwargs) @pytest.mark.usefixtures("instrument_with_content") @servicePreparer() @@ -264,7 +311,7 @@ async def test_async_conversations_create(self, **kwargs): async def test_async_list_conversation_items_with_content_recording(self, **kwargs): """Test asynchronous list_conversation_items with content recording enabled.""" self.cleanup() - set_use_message_events(True) # Use event-based mode for this test + _set_use_message_events(True) # Use event-based mode for this test os.environ.update( { CONTENT_TRACING_ENV_VARIABLE: "True", @@ -339,15 +386,16 @@ async def test_async_list_conversation_items_with_content_recording(self, **kwar events_match = GenAiTraceVerifier().check_span_events(span, expected_events) assert events_match == True - @pytest.mark.usefixtures("instrument_with_content") - @servicePreparer() - @recorded_by_proxy_async(RecordedTransport.AZURE_CORE, RecordedTransport.HTTPX) - async def test_async_function_tool_with_content_recording_streaming(self, **kwargs): - """Test asynchronous function tool usage with content recording enabled (streaming).""" + async def _test_async_function_tool_with_content_recording_streaming_impl(self, use_events, **kwargs): + """Implementation for testing asynchronous function tool usage with content recording (streaming). + + Args: + use_events: If True, use event-based message tracing. If False, use attribute-based. + """ from openai.types.responses.response_input_param import FunctionCallOutput self.cleanup() - set_use_message_events(True) # Use event-based mode for this test + _set_use_message_events(use_events) os.environ.update( { CONTENT_TRACING_ENV_VARIABLE: "True", @@ -474,30 +522,38 @@ async def test_async_function_tool_with_content_recording_streaming(self, **kwar ("gen_ai.usage.input_tokens", "+"), ("gen_ai.usage.output_tokens", "+"), ] + if not use_events: + expected_attributes_1.extend( + [ + ("gen_ai.input.messages", ""), + ("gen_ai.output.messages", ""), + ] + ) attributes_match = GenAiTraceVerifier().check_span_attributes(span1, expected_attributes_1) assert attributes_match == True - # Check events for first span - user message and assistant tool call - expected_events_1 = [ - { - "name": "gen_ai.input.messages", - "attributes": { - "gen_ai.provider.name": "azure.openai", - # "gen_ai.message.role": "user", # Commented out - now in event content - "gen_ai.event.content": '[{"role": "user", "parts": [{"type": "text", "content": "What\'s the weather in Seattle?"}]}]', + # Check events for first span - user message and assistant tool call (only in events mode) + if use_events: + expected_events_1 = [ + { + "name": "gen_ai.input.messages", + "attributes": { + "gen_ai.provider.name": "azure.openai", + # "gen_ai.message.role": "user", # Commented out - now in event content + "gen_ai.event.content": '[{"role": "user", "parts": [{"type": "text", "content": "What\'s the weather in Seattle?"}]}]', + }, }, - }, - { - "name": "gen_ai.output.messages", - "attributes": { - "gen_ai.provider.name": "azure.openai", - # "gen_ai.message.role": "assistant", # Commented out - now in event content - "gen_ai.event.content": '[{"role": "assistant", "parts": [{"type": "tool_call", "content": {"type": "function_call", "id": "*", "function": {"name": "get_weather", "arguments": "*"}}}]}]', + { + "name": "gen_ai.output.messages", + "attributes": { + "gen_ai.provider.name": "azure.openai", + # "gen_ai.message.role": "assistant", # Commented out - now in event content + "gen_ai.event.content": '[{"role": "assistant", "parts": [{"type": "tool_call", "content": {"type": "function_call", "id": "*", "function": {"name": "get_weather", "arguments": "*"}}}]}]', + }, }, - }, - ] - events_match = GenAiTraceVerifier().check_span_events(span1, expected_events_1) - assert events_match == True + ] + events_match = GenAiTraceVerifier().check_span_events(span1, expected_events_1) + assert events_match == True # Validate second span (tool output + final response) span2 = spans[1] @@ -513,40 +569,64 @@ async def test_async_function_tool_with_content_recording_streaming(self, **kwar ("gen_ai.usage.input_tokens", "+"), ("gen_ai.usage.output_tokens", "+"), ] + if not use_events: + # Span2 has both input messages (tool output) and output messages (assistant response) + expected_attributes_2.extend( + [ + ("gen_ai.input.messages", ""), + ("gen_ai.output.messages", ""), + ] + ) attributes_match = GenAiTraceVerifier().check_span_attributes(span2, expected_attributes_2) assert attributes_match == True - # Check events for second span - tool output and assistant response - expected_events_2 = [ - { - "name": "gen_ai.input.messages", - "attributes": { - "gen_ai.provider.name": "azure.openai", - # "gen_ai.message.role": "tool", # Commented out - now in event content - "gen_ai.event.content": '[{"role": "tool", "parts": [{"type": "tool_call_output", "content": {"type": "function_call_output", "id": "*", "output": {"temperature": "72°F", "condition": "sunny"}}}]}]', + # Check events for second span - tool output and assistant response (only in events mode) + if use_events: + expected_events_2 = [ + { + "name": "gen_ai.input.messages", + "attributes": { + "gen_ai.provider.name": "azure.openai", + # "gen_ai.message.role": "tool", # Commented out - now in event content + "gen_ai.event.content": '[{"role": "tool", "parts": [{"type": "tool_call_output", "content": {"type": "function_call_output", "id": "*", "output": {"temperature": "72°F", "condition": "sunny"}}}]}]', + }, }, - }, - { - "name": "gen_ai.output.messages", - "attributes": { - "gen_ai.provider.name": "azure.openai", - # "gen_ai.message.role": "assistant", # Commented out - now in event content - "gen_ai.event.content": '[{"role": "assistant", "parts": [{"type": "text", "content": "*"}], "finish_reason": "*"}]', + { + "name": "gen_ai.output.messages", + "attributes": { + "gen_ai.provider.name": "azure.openai", + # "gen_ai.message.role": "assistant", # Commented out - now in event content + "gen_ai.event.content": '[{"role": "assistant", "parts": [{"type": "text", "content": "*"}], "finish_reason": "*"}]', + }, }, - }, - ] - events_match = GenAiTraceVerifier().check_span_events(span2, expected_events_2) - assert events_match == True + ] + events_match = GenAiTraceVerifier().check_span_events(span2, expected_events_2) + assert events_match == True - @pytest.mark.usefixtures("instrument_without_content") + @pytest.mark.usefixtures("instrument_with_content") @servicePreparer() @recorded_by_proxy_async(RecordedTransport.AZURE_CORE, RecordedTransport.HTTPX) - async def test_async_function_tool_without_content_recording_streaming(self, **kwargs): - """Test asynchronous function tool usage without content recording (streaming).""" + async def test_async_function_tool_with_content_recording_streaming_events(self, **kwargs): + """Test asynchronous function tool usage with content recording enabled (streaming, event-based messages).""" + await self._test_async_function_tool_with_content_recording_streaming_impl(True, **kwargs) + + @pytest.mark.usefixtures("instrument_with_content") + @servicePreparer() + @recorded_by_proxy_async(RecordedTransport.AZURE_CORE, RecordedTransport.HTTPX) + async def test_async_function_tool_with_content_recording_streaming_attributes(self, **kwargs): + """Test asynchronous function tool usage with content recording enabled (streaming, attribute-based messages).""" + await self._test_async_function_tool_with_content_recording_streaming_impl(False, **kwargs) + + async def _test_async_function_tool_without_content_recording_streaming_impl(self, use_events, **kwargs): + """Implementation for testing asynchronous function tool usage without content recording (streaming). + + Args: + use_events: If True, use event-based message tracing. If False, use attribute-based. + """ from openai.types.responses.response_input_param import FunctionCallOutput self.cleanup() - set_use_message_events(True) # Use event-based mode for this test + _set_use_message_events(use_events) os.environ.update( { CONTENT_TRACING_ENV_VARIABLE: "False", @@ -667,30 +747,38 @@ async def test_async_function_tool_without_content_recording_streaming(self, **k ("gen_ai.usage.input_tokens", "+"), ("gen_ai.usage.output_tokens", "+"), ] + if not use_events: + expected_attributes_1.extend( + [ + ("gen_ai.input.messages", ""), + ("gen_ai.output.messages", ""), + ] + ) attributes_match = GenAiTraceVerifier().check_span_attributes(span1, expected_attributes_1) assert attributes_match == True - # Check events for first span - tool call ID included but no function details, role and finish_reason included - expected_events_1 = [ - { - "name": "gen_ai.input.messages", - "attributes": { - "gen_ai.provider.name": "azure.openai", - # "gen_ai.message.role": "user", # Commented out - now in event content - "gen_ai.event.content": '[{"role": "user", "parts": [{"type": "text"}]}]', + # Check events for first span - tool call ID included but no function details (only in events mode) + if use_events: + expected_events_1 = [ + { + "name": "gen_ai.input.messages", + "attributes": { + "gen_ai.provider.name": "azure.openai", + # "gen_ai.message.role": "user", # Commented out - now in event content + "gen_ai.event.content": '[{"role": "user", "parts": [{"type": "text"}]}]', + }, }, - }, - { - "name": "gen_ai.output.messages", - "attributes": { - "gen_ai.provider.name": "azure.openai", - # "gen_ai.message.role": "assistant", # Commented out - now in event content - "gen_ai.event.content": '[{"role": "assistant", "parts": [{"type": "tool_call", "content": {"type": "function_call", "id": "*"}}]}]', + { + "name": "gen_ai.output.messages", + "attributes": { + "gen_ai.provider.name": "azure.openai", + # "gen_ai.message.role": "assistant", # Commented out - now in event content + "gen_ai.event.content": '[{"role": "assistant", "parts": [{"type": "tool_call", "content": {"type": "function_call", "id": "*"}}]}]', + }, }, - }, - ] - events_match = GenAiTraceVerifier().check_span_events(span1, expected_events_1) - assert events_match == True + ] + events_match = GenAiTraceVerifier().check_span_events(span1, expected_events_1) + assert events_match == True # Validate second span (tool output + final response) - no content span2 = spans[1] @@ -706,30 +794,53 @@ async def test_async_function_tool_without_content_recording_streaming(self, **k ("gen_ai.usage.input_tokens", "+"), ("gen_ai.usage.output_tokens", "+"), ] + if not use_events: + # Span2 has both input messages (tool output) and output messages (assistant response) + expected_attributes_2.extend( + [ + ("gen_ai.input.messages", ""), + ("gen_ai.output.messages", ""), + ] + ) attributes_match = GenAiTraceVerifier().check_span_attributes(span2, expected_attributes_2) assert attributes_match == True - # Check events for second span - should include parts with tool output metadata (type, id) but no output field - expected_events_2 = [ - { - "name": "gen_ai.input.messages", - "attributes": { - "gen_ai.provider.name": "azure.openai", - # "gen_ai.message.role": "tool", # Commented out - now in event content - "gen_ai.event.content": '[{"role": "tool", "parts": [{"type": "tool_call_output", "content": {"type": "function_call_output", "id": "*"}}]}]', + # Check events for second span - tool output metadata and response (only in events mode) + if use_events: + expected_events_2 = [ + { + "name": "gen_ai.input.messages", + "attributes": { + "gen_ai.provider.name": "azure.openai", + # "gen_ai.message.role": "tool", # Commented out - now in event content + "gen_ai.event.content": '[{"role": "tool", "parts": [{"type": "tool_call_output", "content": {"type": "function_call_output", "id": "*"}}]}]', + }, }, - }, - { - "name": "gen_ai.output.messages", - "attributes": { - "gen_ai.provider.name": "azure.openai", - # "gen_ai.message.role": "assistant", # Commented out - now in event content - "gen_ai.event.content": '[{"role": "assistant", "parts": [{"type": "text"}], "finish_reason": "*"}]', + { + "name": "gen_ai.output.messages", + "attributes": { + "gen_ai.provider.name": "azure.openai", + # "gen_ai.message.role": "assistant", # Commented out - now in event content + "gen_ai.event.content": '[{"role": "assistant", "parts": [{"type": "text"}], "finish_reason": "*"}]', + }, }, - }, - ] - events_match = GenAiTraceVerifier().check_span_events(span2, expected_events_2) - assert events_match == True + ] + events_match = GenAiTraceVerifier().check_span_events(span2, expected_events_2) + assert events_match == True + + @pytest.mark.usefixtures("instrument_without_content") + @servicePreparer() + @recorded_by_proxy_async(RecordedTransport.AZURE_CORE, RecordedTransport.HTTPX) + async def test_async_function_tool_without_content_recording_streaming_events(self, **kwargs): + """Test asynchronous function tool usage without content recording (streaming, event-based messages).""" + await self._test_async_function_tool_without_content_recording_streaming_impl(True, **kwargs) + + @pytest.mark.usefixtures("instrument_without_content") + @servicePreparer() + @recorded_by_proxy_async(RecordedTransport.AZURE_CORE, RecordedTransport.HTTPX) + async def test_async_function_tool_without_content_recording_streaming_attributes(self, **kwargs): + """Test asynchronous function tool usage without content recording (streaming, attribute-based messages).""" + await self._test_async_function_tool_without_content_recording_streaming_impl(False, **kwargs) @pytest.mark.usefixtures("instrument_with_content") @servicePreparer() @@ -737,7 +848,7 @@ async def test_async_function_tool_without_content_recording_streaming(self, **k async def test_async_multiple_text_inputs_with_content_recording_non_streaming(self, **kwargs): """Test asynchronous non-streaming responses with multiple text inputs and content recording enabled.""" self.cleanup() - set_use_message_events(True) # Use event-based mode for this test + _set_use_message_events(True) # Use event-based mode for this test os.environ.update( { CONTENT_TRACING_ENV_VARIABLE: "True", @@ -834,7 +945,7 @@ async def test_async_multiple_text_inputs_with_content_recording_non_streaming(s async def test_async_multiple_text_inputs_with_content_recording_streaming(self, **kwargs): """Test asynchronous streaming responses with multiple text inputs and content recording enabled.""" self.cleanup() - set_use_message_events(True) # Use event-based mode for this test + _set_use_message_events(True) # Use event-based mode for this test os.environ.update( { CONTENT_TRACING_ENV_VARIABLE: "True", @@ -939,7 +1050,7 @@ async def test_async_multiple_text_inputs_with_content_recording_streaming(self, async def test_async_multiple_text_inputs_without_content_recording_non_streaming(self, **kwargs): """Test asynchronous non-streaming responses with multiple text inputs and content recording disabled.""" self.cleanup() - set_use_message_events(True) # Use event-based mode for this test + _set_use_message_events(True) # Use event-based mode for this test os.environ.update( { CONTENT_TRACING_ENV_VARIABLE: "False", @@ -2379,7 +2490,7 @@ async def test_async_multiple_text_inputs_without_content_recording_streaming(se async def test_async_responses_stream_method_with_content_recording(self, **kwargs): """Test async responses.stream() method with content recording enabled.""" self.cleanup() - set_use_message_events(True) # Use event-based mode for this test + _set_use_message_events(True) # Use event-based mode for this test os.environ.update( { CONTENT_TRACING_ENV_VARIABLE: "True", @@ -2542,7 +2653,7 @@ async def test_async_responses_stream_method_with_tools_with_content_recording(s from openai.types.responses.response_input_param import FunctionCallOutput self.cleanup() - set_use_message_events(True) # Use event-based mode for this test + _set_use_message_events(True) # Use event-based mode for this test os.environ.update( { CONTENT_TRACING_ENV_VARIABLE: "True", diff --git a/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_browser_automation.py b/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_browser_automation.py index 31ce3d059f9e..282555f66883 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_browser_automation.py +++ b/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_browser_automation.py @@ -9,7 +9,11 @@ import os import pytest from azure.ai.projects.telemetry import AIProjectInstrumentor, _utils -from azure.ai.projects.telemetry._utils import OPERATION_NAME_INVOKE_AGENT, SPAN_NAME_INVOKE_AGENT +from azure.ai.projects.telemetry._utils import ( + OPERATION_NAME_INVOKE_AGENT, + SPAN_NAME_INVOKE_AGENT, + _set_use_message_events, +) from azure.core.settings import settings from gen_ai_trace_verifier import GenAiTraceVerifier from devtools_testutils import recorded_by_proxy, RecordedTransport @@ -38,6 +42,7 @@ class TestResponsesInstrumentorBrowserAutomation(TestAiAgentsInstrumentorBase): def test_sync_browser_automation_non_streaming_with_content_recording(self, **kwargs): """Test synchronous browser automation agent with non-streaming and content recording enabled.""" self.cleanup() + _set_use_message_events(True) os.environ.update( { CONTENT_TRACING_ENV_VARIABLE: "True", @@ -173,6 +178,7 @@ def test_sync_browser_automation_non_streaming_with_content_recording(self, **kw def test_sync_browser_automation_non_streaming_without_content_recording(self, **kwargs): """Test synchronous browser automation agent with non-streaming and content recording disabled.""" self.cleanup() + _set_use_message_events(True) os.environ.update( { CONTENT_TRACING_ENV_VARIABLE: "False", @@ -302,6 +308,7 @@ def test_sync_browser_automation_non_streaming_without_content_recording(self, * def test_sync_browser_automation_streaming_with_content_recording(self, **kwargs): """Test synchronous browser automation agent with streaming and content recording enabled.""" self.cleanup() + _set_use_message_events(True) os.environ.update( {CONTENT_TRACING_ENV_VARIABLE: "True", "AZURE_TRACING_GEN_AI_INSTRUMENT_RESPONSES_API": "True"} ) @@ -429,6 +436,7 @@ def test_sync_browser_automation_streaming_with_content_recording(self, **kwargs def test_sync_browser_automation_streaming_without_content_recording(self, **kwargs): """Test synchronous browser automation agent with streaming and content recording disabled.""" self.cleanup() + _set_use_message_events(True) os.environ.update( {CONTENT_TRACING_ENV_VARIABLE: "False", "AZURE_TRACING_GEN_AI_INSTRUMENT_RESPONSES_API": "True"} ) diff --git a/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_browser_automation_async.py b/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_browser_automation_async.py index e1b5aa9b9817..459002ef920f 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_browser_automation_async.py +++ b/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_browser_automation_async.py @@ -9,7 +9,11 @@ import os import pytest from azure.ai.projects.telemetry import AIProjectInstrumentor, _utils -from azure.ai.projects.telemetry._utils import OPERATION_NAME_INVOKE_AGENT, SPAN_NAME_INVOKE_AGENT +from azure.ai.projects.telemetry._utils import ( + OPERATION_NAME_INVOKE_AGENT, + SPAN_NAME_INVOKE_AGENT, + _set_use_message_events, +) from azure.core.settings import settings from gen_ai_trace_verifier import GenAiTraceVerifier from devtools_testutils.aio import recorded_by_proxy_async @@ -41,6 +45,7 @@ class TestResponsesInstrumentorBrowserAutomationAsync(TestAiAgentsInstrumentorBa async def test_async_browser_automation_non_streaming_with_content_recording(self, **kwargs): """Test asynchronous browser automation agent with non-streaming and content recording enabled.""" self.cleanup() + _set_use_message_events(True) os.environ.update( { CONTENT_TRACING_ENV_VARIABLE: "True", @@ -172,6 +177,7 @@ async def test_async_browser_automation_non_streaming_with_content_recording(sel async def test_async_browser_automation_non_streaming_without_content_recording(self, **kwargs): """Test asynchronous browser automation agent with non-streaming and content recording disabled.""" self.cleanup() + _set_use_message_events(True) os.environ.update( { CONTENT_TRACING_ENV_VARIABLE: "False", @@ -297,6 +303,7 @@ async def test_async_browser_automation_non_streaming_without_content_recording( async def test_async_browser_automation_streaming_with_content_recording(self, **kwargs): """Test asynchronous browser automation agent with streaming and content recording enabled.""" self.cleanup() + _set_use_message_events(True) os.environ.update( {CONTENT_TRACING_ENV_VARIABLE: "True", "AZURE_TRACING_GEN_AI_INSTRUMENT_RESPONSES_API": "True"} ) @@ -421,6 +428,7 @@ async def test_async_browser_automation_streaming_with_content_recording(self, * async def test_async_browser_automation_streaming_without_content_recording(self, **kwargs): """Test asynchronous browser automation agent with streaming and content recording disabled.""" self.cleanup() + _set_use_message_events(True) os.environ.update( {CONTENT_TRACING_ENV_VARIABLE: "False", "AZURE_TRACING_GEN_AI_INSTRUMENT_RESPONSES_API": "True"} ) diff --git a/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_code_interpreter.py b/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_code_interpreter.py index a431a3f0e79b..fae2041cc19d 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_code_interpreter.py +++ b/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_code_interpreter.py @@ -10,7 +10,11 @@ import pytest from io import BytesIO from azure.ai.projects.telemetry import AIProjectInstrumentor, _utils -from azure.ai.projects.telemetry._utils import OPERATION_NAME_INVOKE_AGENT, SPAN_NAME_INVOKE_AGENT +from azure.ai.projects.telemetry._utils import ( + OPERATION_NAME_INVOKE_AGENT, + SPAN_NAME_INVOKE_AGENT, + _set_use_message_events, +) from azure.core.settings import settings from gen_ai_trace_verifier import GenAiTraceVerifier from devtools_testutils import recorded_by_proxy, RecordedTransport @@ -48,6 +52,7 @@ class TestResponsesInstrumentorCodeInterpreter(TestAiAgentsInstrumentorBase): def test_sync_code_interpreter_non_streaming_with_content_recording(self, **kwargs): """Test synchronous Code Interpreter agent with content recording enabled.""" self.cleanup() + _set_use_message_events(True) os.environ.update( { CONTENT_TRACING_ENV_VARIABLE: "True", @@ -235,6 +240,7 @@ def test_sync_code_interpreter_non_streaming_with_content_recording(self, **kwar def test_sync_code_interpreter_non_streaming_without_content_recording(self, **kwargs): """Test synchronous Code Interpreter agent with content recording disabled.""" self.cleanup() + _set_use_message_events(True) os.environ.update( { CONTENT_TRACING_ENV_VARIABLE: "False", @@ -425,6 +431,7 @@ def test_sync_code_interpreter_non_streaming_without_content_recording(self, **k def test_sync_code_interpreter_streaming_with_content_recording(self, **kwargs): """Test synchronous Code Interpreter agent with streaming and content recording enabled.""" self.cleanup() + _set_use_message_events(True) os.environ.update( { CONTENT_TRACING_ENV_VARIABLE: "True", @@ -615,6 +622,7 @@ def test_sync_code_interpreter_streaming_with_content_recording(self, **kwargs): def test_sync_code_interpreter_streaming_without_content_recording(self, **kwargs): """Test synchronous Code Interpreter agent with streaming and content recording disabled.""" self.cleanup() + _set_use_message_events(True) os.environ.update( { CONTENT_TRACING_ENV_VARIABLE: "False", diff --git a/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_code_interpreter_async.py b/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_code_interpreter_async.py index 25676aadb2e6..54b1f17b9a01 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_code_interpreter_async.py +++ b/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_code_interpreter_async.py @@ -10,7 +10,11 @@ import pytest from io import BytesIO from azure.ai.projects.telemetry import AIProjectInstrumentor, _utils -from azure.ai.projects.telemetry._utils import OPERATION_NAME_INVOKE_AGENT, SPAN_NAME_INVOKE_AGENT +from azure.ai.projects.telemetry._utils import ( + OPERATION_NAME_INVOKE_AGENT, + SPAN_NAME_INVOKE_AGENT, + _set_use_message_events, +) from azure.core.settings import settings from gen_ai_trace_verifier import GenAiTraceVerifier from devtools_testutils.aio import recorded_by_proxy_async @@ -49,6 +53,7 @@ class TestResponsesInstrumentorCodeInterpreterAsync(TestAiAgentsInstrumentorBase async def test_async_code_interpreter_non_streaming_with_content_recording(self, **kwargs): """Test asynchronous Code Interpreter agent with content recording enabled.""" self.cleanup() + _set_use_message_events(True) os.environ.update( { CONTENT_TRACING_ENV_VARIABLE: "True", @@ -235,6 +240,7 @@ async def test_async_code_interpreter_non_streaming_with_content_recording(self, async def test_async_code_interpreter_non_streaming_without_content_recording(self, **kwargs): """Test asynchronous Code Interpreter agent with content recording disabled.""" self.cleanup() + _set_use_message_events(True) os.environ.update( { CONTENT_TRACING_ENV_VARIABLE: "False", @@ -425,6 +431,7 @@ async def test_async_code_interpreter_non_streaming_without_content_recording(se async def test_async_code_interpreter_streaming_with_content_recording(self, **kwargs): """Test asynchronous Code Interpreter agent with streaming and content recording enabled.""" self.cleanup() + _set_use_message_events(True) os.environ.update( { CONTENT_TRACING_ENV_VARIABLE: "True", @@ -615,6 +622,7 @@ async def test_async_code_interpreter_streaming_with_content_recording(self, **k async def test_async_code_interpreter_streaming_without_content_recording(self, **kwargs): """Test asynchronous Code Interpreter agent with streaming and content recording disabled.""" self.cleanup() + _set_use_message_events(True) os.environ.update( { CONTENT_TRACING_ENV_VARIABLE: "False", diff --git a/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_file_search.py b/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_file_search.py index 3a447d2e99a0..58a273fef7dd 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_file_search.py +++ b/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_file_search.py @@ -10,7 +10,11 @@ import pytest from io import BytesIO from azure.ai.projects.telemetry import AIProjectInstrumentor, _utils -from azure.ai.projects.telemetry._utils import OPERATION_NAME_INVOKE_AGENT, SPAN_NAME_INVOKE_AGENT +from azure.ai.projects.telemetry._utils import ( + OPERATION_NAME_INVOKE_AGENT, + SPAN_NAME_INVOKE_AGENT, + _set_use_message_events, +) from azure.core.settings import settings from gen_ai_trace_verifier import GenAiTraceVerifier from devtools_testutils import recorded_by_proxy, RecordedTransport @@ -35,6 +39,7 @@ class TestResponsesInstrumentorFileSearch(TestAiAgentsInstrumentorBase): def test_sync_file_search_non_streaming_with_content_recording(self, **kwargs): """Test synchronous File Search agent with non-streaming and content recording enabled.""" self.cleanup() + _set_use_message_events(True) os.environ.update( { CONTENT_TRACING_ENV_VARIABLE: "True", @@ -243,6 +248,7 @@ def test_sync_file_search_non_streaming_with_content_recording(self, **kwargs): def test_sync_file_search_non_streaming_without_content_recording(self, **kwargs): """Test synchronous File Search agent with non-streaming and content recording disabled.""" self.cleanup() + _set_use_message_events(True) os.environ.update( { CONTENT_TRACING_ENV_VARIABLE: "False", @@ -449,6 +455,7 @@ def test_sync_file_search_non_streaming_without_content_recording(self, **kwargs def test_sync_file_search_streaming_with_content_recording(self, **kwargs): """Test synchronous File Search agent with streaming and content recording enabled.""" self.cleanup() + _set_use_message_events(True) os.environ.update( { CONTENT_TRACING_ENV_VARIABLE: "True", @@ -653,6 +660,7 @@ def test_sync_file_search_streaming_with_content_recording(self, **kwargs): def test_sync_file_search_streaming_without_content_recording(self, **kwargs): """Test synchronous File Search agent with streaming and content recording disabled.""" self.cleanup() + _set_use_message_events(True) os.environ.update( { CONTENT_TRACING_ENV_VARIABLE: "False", diff --git a/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_file_search_async.py b/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_file_search_async.py index e1b47fe0df30..6342508de272 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_file_search_async.py +++ b/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_file_search_async.py @@ -10,7 +10,11 @@ import pytest from io import BytesIO from azure.ai.projects.telemetry import AIProjectInstrumentor, _utils -from azure.ai.projects.telemetry._utils import OPERATION_NAME_INVOKE_AGENT, SPAN_NAME_INVOKE_AGENT +from azure.ai.projects.telemetry._utils import ( + OPERATION_NAME_INVOKE_AGENT, + SPAN_NAME_INVOKE_AGENT, + _set_use_message_events, +) from azure.core.settings import settings from gen_ai_trace_verifier import GenAiTraceVerifier from devtools_testutils.aio import recorded_by_proxy_async @@ -36,6 +40,7 @@ class TestResponsesInstrumentorFileSearchAsync(TestAiAgentsInstrumentorBase): async def test_async_file_search_non_streaming_with_content_recording(self, **kwargs): """Test asynchronous File Search agent with non-streaming and content recording enabled.""" self.cleanup() + _set_use_message_events(True) os.environ.update( { CONTENT_TRACING_ENV_VARIABLE: "True", @@ -244,6 +249,7 @@ async def test_async_file_search_non_streaming_with_content_recording(self, **kw async def test_async_file_search_non_streaming_without_content_recording(self, **kwargs): """Test asynchronous File Search agent with non-streaming and content recording disabled.""" self.cleanup() + _set_use_message_events(True) os.environ.update( { CONTENT_TRACING_ENV_VARIABLE: "False", @@ -450,6 +456,7 @@ async def test_async_file_search_non_streaming_without_content_recording(self, * async def test_async_file_search_streaming_with_content_recording(self, **kwargs): """Test asynchronous File Search agent with streaming and content recording enabled.""" self.cleanup() + _set_use_message_events(True) os.environ.update( { CONTENT_TRACING_ENV_VARIABLE: "True", @@ -654,6 +661,7 @@ async def test_async_file_search_streaming_with_content_recording(self, **kwargs async def test_async_file_search_streaming_without_content_recording(self, **kwargs): """Test asynchronous File Search agent with streaming and content recording disabled.""" self.cleanup() + _set_use_message_events(True) os.environ.update( { CONTENT_TRACING_ENV_VARIABLE: "False", diff --git a/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_mcp.py b/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_mcp.py index ca1c5957853d..4fbf04cfc4dd 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_mcp.py +++ b/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_mcp.py @@ -9,7 +9,11 @@ import os import pytest from azure.ai.projects.telemetry import AIProjectInstrumentor, _utils -from azure.ai.projects.telemetry._utils import OPERATION_NAME_INVOKE_AGENT, SPAN_NAME_INVOKE_AGENT +from azure.ai.projects.telemetry._utils import ( + OPERATION_NAME_INVOKE_AGENT, + SPAN_NAME_INVOKE_AGENT, + _set_use_message_events, +) from azure.core.settings import settings from gen_ai_trace_verifier import GenAiTraceVerifier from devtools_testutils import recorded_by_proxy, RecordedTransport @@ -33,12 +37,15 @@ class TestResponsesInstrumentorMCP(TestAiAgentsInstrumentorBase): # Sync MCP Agent Tests - Non-Streaming # ======================================== - @pytest.mark.usefixtures("instrument_with_content") - @servicePreparer() - @recorded_by_proxy(RecordedTransport.AZURE_CORE, RecordedTransport.HTTPX) - def test_sync_mcp_non_streaming_with_content_recording(self, **kwargs): - """Test synchronous MCP agent with non-streaming and content recording enabled.""" + def _test_sync_mcp_non_streaming_with_content_recording_impl(self, use_events, **kwargs): + """Implementation for testing synchronous MCP agent with non-streaming and content recording enabled. + + Args: + use_events: If True, use event-based message tracing. If False, use attribute-based. + Note: MCP tests currently only validate event mode regardless of this setting. + """ self.cleanup() + _set_use_message_events(use_events) os.environ.update( { CONTENT_TRACING_ENV_VARIABLE: "True", @@ -130,68 +137,80 @@ def test_sync_mcp_non_streaming_with_content_recording(self, **kwargs): ("gen_ai.usage.input_tokens", "+"), ("gen_ai.usage.output_tokens", "+"), ] + + # Add message attributes when not using events + if not use_events: + expected_attributes_1.extend( + [ + ("gen_ai.input.messages", ""), + ("gen_ai.output.messages", ""), + ] + ) + assert GenAiTraceVerifier().check_span_attributes(span1, expected_attributes_1) # Comprehensive event validation for first span - verify content IS present + # Only validate events when use_events is True from collections.abc import Mapping - for event in span1.events: - if event.name == "gen_ai.input.messages": - attrs = event.attributes - assert attrs is not None and isinstance(attrs, Mapping) - content = attrs.get("gen_ai.event.content") - assert isinstance(content, str) and content.strip() != "" - import json - - data = json.loads(content) - assert isinstance(data, list) and len(data) > 0 - # Validate content fields ARE present - for entry in data: - if entry.get("role") == "user": - parts = entry.get("parts") - assert isinstance(parts, list) and len(parts) > 0 - for part in parts: - if part.get("type") == "text": - assert ( - "content" in part - and isinstance(part["content"], str) - and part["content"].strip() != "" - ), "Text content should be present when content recording is enabled" - elif event.name == "gen_ai.output.messages": - attrs = event.attributes - assert attrs is not None and isinstance(attrs, Mapping) - content = attrs.get("gen_ai.event.content") - assert isinstance(content, str) and content.strip() != "" - import json - - data = json.loads(content) - assert isinstance(data, list) and len(data) > 0 - first = data[0] - assert first.get("role") in ("assistant", "tool") - parts = first.get("parts") - assert isinstance(parts, list) and len(parts) > 0 - # Check for MCP-specific content - for part in parts: - if part.get("type") == "tool_call": - tool_content = part.get("content") - assert isinstance(tool_content, dict) - tool_type = tool_content.get("type") - if tool_type in ("mcp_list_tools", "mcp_approval_request"): - assert "id" in tool_content - if tool_type == "mcp_list_tools": - assert ( - "server_label" in tool_content - ), "server_label should be present for mcp_list_tools when content recording is enabled" - elif tool_type == "mcp_approval_request": - assert ( - "name" in tool_content - ), "name should be present for mcp_approval_request when content recording is enabled" - assert ( - "server_label" in tool_content - ), "server_label should be present for mcp_approval_request when content recording is enabled" - assert ( - "arguments" in tool_content - ), "arguments should be present for mcp_approval_request when content recording is enabled" + if use_events: + for event in span1.events: + if event.name == "gen_ai.input.messages": + attrs = event.attributes + assert attrs is not None and isinstance(attrs, Mapping) + content = attrs.get("gen_ai.event.content") + assert isinstance(content, str) and content.strip() != "" + import json + + data = json.loads(content) + assert isinstance(data, list) and len(data) > 0 + # Validate content fields ARE present + for entry in data: + if entry.get("role") == "user": + parts = entry.get("parts") + assert isinstance(parts, list) and len(parts) > 0 + for part in parts: + if part.get("type") == "text": + assert ( + "content" in part + and isinstance(part["content"], str) + and part["content"].strip() != "" + ), "Text content should be present when content recording is enabled" + elif event.name == "gen_ai.output.messages": + attrs = event.attributes + assert attrs is not None and isinstance(attrs, Mapping) + content = attrs.get("gen_ai.event.content") + assert isinstance(content, str) and content.strip() != "" + import json + + data = json.loads(content) + assert isinstance(data, list) and len(data) > 0 + first = data[0] + assert first.get("role") in ("assistant", "tool") + parts = first.get("parts") + assert isinstance(parts, list) and len(parts) > 0 + # Check for MCP-specific content + for part in parts: + if part.get("type") == "tool_call": + tool_content = part.get("content") + assert isinstance(tool_content, dict) + tool_type = tool_content.get("type") + if tool_type in ("mcp_list_tools", "mcp_approval_request"): + assert "id" in tool_content + if tool_type == "mcp_list_tools": + assert ( + "server_label" in tool_content + ), "server_label should be present for mcp_list_tools when content recording is enabled" + elif tool_type == "mcp_approval_request": + assert ( + "name" in tool_content + ), "name should be present for mcp_approval_request when content recording is enabled" + assert ( + "server_label" in tool_content + ), "server_label should be present for mcp_approval_request when content recording is enabled" + assert ( + "arguments" in tool_content + ), "arguments should be present for mcp_approval_request when content recording is enabled" # Validate second response span (approval response) span2 = spans[1] @@ -207,113 +226,126 @@ def test_sync_mcp_non_streaming_with_content_recording(self, **kwargs): ("gen_ai.usage.input_tokens", "+"), ("gen_ai.usage.output_tokens", "+"), ] + + # Add message attributes when not using events + if not use_events: + expected_attributes_2.extend( + [ + ("gen_ai.input.messages", ""), + ("gen_ai.output.messages", ""), + ] + ) + assert GenAiTraceVerifier().check_span_attributes(span2, expected_attributes_2) # Validate MCP approval response and call in second span - for event in span2.events: - if event.name == "gen_ai.input.messages": - attrs = event.attributes - assert attrs is not None and isinstance(attrs, Mapping) - content = attrs.get("gen_ai.event.content") - assert isinstance(content, str) and content.strip() != "" - import json - - data = json.loads(content) - # Check for MCP approval response content - for entry in data: - if entry.get("role") == "user": + # Only validate events when use_events is True + if use_events: + for event in span2.events: + if event.name == "gen_ai.input.messages": + attrs = event.attributes + assert attrs is not None and isinstance(attrs, Mapping) + content = attrs.get("gen_ai.event.content") + assert isinstance(content, str) and content.strip() != "" + import json + + data = json.loads(content) + # Check for MCP approval response content + for entry in data: + if entry.get("role") == "user": + parts = entry.get("parts") + for part in parts: + if part.get("type") == "mcp": + mcp_content = part.get("content") + assert isinstance(mcp_content, dict) + if mcp_content.get("type") == "mcp_approval_response": + assert "id" in mcp_content + assert ( + "approval_request_id" in mcp_content + ), "approval_request_id should be present when content recording is enabled" + elif event.name == "gen_ai.output.messages": + attrs = event.attributes + assert attrs is not None and isinstance(attrs, Mapping) + content = attrs.get("gen_ai.event.content") + assert isinstance(content, str) and content.strip() != "" + import json + + data = json.loads(content) + # Check for MCP call content + for entry in data: parts = entry.get("parts") - for part in parts: - if part.get("type") == "mcp": - mcp_content = part.get("content") - assert isinstance(mcp_content, dict) - if mcp_content.get("type") == "mcp_approval_response": - assert "id" in mcp_content - assert ( - "approval_request_id" in mcp_content - ), "approval_request_id should be present when content recording is enabled" - elif event.name == "gen_ai.output.messages": - attrs = event.attributes - assert attrs is not None and isinstance(attrs, Mapping) - content = attrs.get("gen_ai.event.content") - assert isinstance(content, str) and content.strip() != "" - import json - - data = json.loads(content) - # Check for MCP call content - for entry in data: - parts = entry.get("parts") - if parts: - for part in parts: - if part.get("type") == "tool_call": - tool_content = part.get("content") - if tool_content and tool_content.get("type") == "mcp_call": - assert "id" in tool_content - assert ( - "name" in tool_content - ), "name should be present for mcp_call when content recording is enabled" - assert ( - "arguments" in tool_content - ), "arguments should be present for mcp_call when content recording is enabled" + if parts: + for part in parts: + if part.get("type") == "tool_call": + tool_content = part.get("content") + if tool_content and tool_content.get("type") == "mcp_call": + assert "id" in tool_content + assert ( + "name" in tool_content + ), "name should be present for mcp_call when content recording is enabled" + assert ( + "arguments" in tool_content + ), "arguments should be present for mcp_call when content recording is enabled" + assert ( + "server_label" in tool_content + ), "server_label should be present for mcp_call when content recording is enabled" + elif part.get("type") == "text": assert ( - "server_label" in tool_content - ), "server_label should be present for mcp_call when content recording is enabled" - elif part.get("type") == "text": - assert ( - "content" in part - ), "text content should be present when content recording is enabled" + "content" in part + ), "text content should be present when content recording is enabled" # Check list_conversation_items span list_spans = self.exporter.get_spans_by_name("list_conversation_items") assert len(list_spans) == 1, "Should have one list_conversation_items span" list_span = list_spans[0] - for event in list_span.events: - if event.name == "gen_ai.conversation.item": - attrs = event.attributes - assert attrs is not None and isinstance(attrs, Mapping) - content = attrs.get("gen_ai.event.content") - assert isinstance(content, str) and content.strip() != "" - import json - - data = json.loads(content) - # Validate MCP content in conversation items - for entry in data: - if entry.get("role") == "user": - parts = entry.get("parts") - for part in parts: - if part.get("type") == "text": - assert ( - "content" in part - ), "text content should be present in conversation items when content recording is enabled" - elif part.get("type") == "mcp": - mcp_content = part.get("content") - if mcp_content and mcp_content.get("type") == "mcp_approval_response": + if use_events: + for event in list_span.events: + if event.name == "gen_ai.conversation.item": + attrs = event.attributes + assert attrs is not None and isinstance(attrs, Mapping) + content = attrs.get("gen_ai.event.content") + assert isinstance(content, str) and content.strip() != "" + import json + + data = json.loads(content) + # Validate MCP content in conversation items + for entry in data: + if entry.get("role") == "user": + parts = entry.get("parts") + for part in parts: + if part.get("type") == "text": assert ( - "approval_request_id" in mcp_content - ), "approval_request_id should be present when content recording is enabled" - elif entry.get("role") == "assistant": - parts = entry.get("parts") - for part in parts: - if part.get("type") == "text": - assert ( - "content" in part - ), "text content should be present in conversation items when content recording is enabled" - elif part.get("type") == "mcp": - mcp_content = part.get("content") - if mcp_content: - mcp_type = mcp_content.get("type") - if mcp_type in ("mcp_list_tools", "mcp_call", "mcp_approval_request"): - assert "id" in mcp_content - if mcp_type == "mcp_call": - assert ( - "name" in mcp_content - ), "name should be present for mcp_call in conversation items" - assert ( - "server_label" in mcp_content - ), "server_label should be present for mcp_call in conversation items" - else: - assert False, f"Unexpected event name in list_conversation_items span: {event.name}" + "content" in part + ), "text content should be present in conversation items when content recording is enabled" + elif part.get("type") == "mcp": + mcp_content = part.get("content") + if mcp_content and mcp_content.get("type") == "mcp_approval_response": + assert ( + "approval_request_id" in mcp_content + ), "approval_request_id should be present when content recording is enabled" + elif entry.get("role") == "assistant": + parts = entry.get("parts") + for part in parts: + if part.get("type") == "text": + assert ( + "content" in part + ), "text content should be present in conversation items when content recording is enabled" + elif part.get("type") == "mcp": + mcp_content = part.get("content") + if mcp_content: + mcp_type = mcp_content.get("type") + if mcp_type in ("mcp_list_tools", "mcp_call", "mcp_approval_request"): + assert "id" in mcp_content + if mcp_type == "mcp_call": + assert ( + "name" in mcp_content + ), "name should be present for mcp_call in conversation items" + assert ( + "server_label" in mcp_content + ), "server_label should be present for mcp_call in conversation items" + else: + assert False, f"Unexpected event name in list_conversation_items span: {event.name}" # Cleanup openai_client.conversations.delete(conversation_id=conversation.id) @@ -321,12 +353,29 @@ def test_sync_mcp_non_streaming_with_content_recording(self, **kwargs): finally: project_client.agents.delete_version(agent_name=agent.name, agent_version=agent.version) - @pytest.mark.usefixtures("instrument_without_content") + @pytest.mark.usefixtures("instrument_with_content") + @servicePreparer() + @recorded_by_proxy(RecordedTransport.AZURE_CORE, RecordedTransport.HTTPX) + def test_sync_mcp_non_streaming_with_content_recording_events(self, **kwargs): + """Test synchronous MCP agent with non-streaming and content recording enabled (event-based messages).""" + self._test_sync_mcp_non_streaming_with_content_recording_impl(True, **kwargs) + + @pytest.mark.usefixtures("instrument_with_content") @servicePreparer() @recorded_by_proxy(RecordedTransport.AZURE_CORE, RecordedTransport.HTTPX) - def test_sync_mcp_non_streaming_without_content_recording(self, **kwargs): - """Test synchronous MCP agent with non-streaming and content recording disabled.""" + def test_sync_mcp_non_streaming_with_content_recording_attributes(self, **kwargs): + """Test synchronous MCP agent with non-streaming and content recording enabled (attribute-based messages).""" + self._test_sync_mcp_non_streaming_with_content_recording_impl(False, **kwargs) + + def _test_sync_mcp_non_streaming_without_content_recording_impl(self, use_events, **kwargs): + """Implementation for testing synchronous MCP agent with non-streaming and content recording disabled. + + Args: + use_events: If True, use event-based message tracing. If False, use attribute-based. + Note: MCP tests currently only validate event mode regardless of this setting. + """ self.cleanup() + _set_use_message_events(use_events) os.environ.update( { CONTENT_TRACING_ENV_VARIABLE: "False", @@ -407,6 +456,7 @@ def test_sync_mcp_non_streaming_without_content_recording(self, **kwargs): # Validate first response span (MCP tool trigger) span1 = spans[0] expected_attributes_1 = [ + ("az.namespace", "Microsoft.CognitiveServices"), ("az.namespace", "Microsoft.CognitiveServices"), ("gen_ai.operation.name", OPERATION_NAME_INVOKE_AGENT), ("gen_ai.provider.name", "azure.openai"), @@ -418,67 +468,80 @@ def test_sync_mcp_non_streaming_without_content_recording(self, **kwargs): ("gen_ai.usage.input_tokens", "+"), ("gen_ai.usage.output_tokens", "+"), ] + + # Add message attributes when not using events + if not use_events: + expected_attributes_1.extend( + [ + ("gen_ai.input.messages", ""), + ("gen_ai.output.messages", ""), + ] + ) + assert GenAiTraceVerifier().check_span_attributes(span1, expected_attributes_1) # Comprehensive event validation for first span - verify content is NOT present + # Only validate events when use_events is True from collections.abc import Mapping - for event in span1.events: - if event.name == "gen_ai.input.messages": - attrs = event.attributes - assert attrs is not None and isinstance(attrs, Mapping) - content = attrs.get("gen_ai.event.content") - assert isinstance(content, str) and content.strip() != "" - import json - - data = json.loads(content) - assert isinstance(data, list) and len(data) > 0 - # Validate content fields are NOT present - for entry in data: - if entry.get("role") == "user": - parts = entry.get("parts") - assert isinstance(parts, list) and len(parts) > 0 - for part in parts: - if part.get("type") == "text": - assert ( - "content" not in part - ), "Text content should NOT be present when content recording is disabled" - elif event.name == "gen_ai.output.messages": - attrs = event.attributes - assert attrs is not None and isinstance(attrs, Mapping) - content = attrs.get("gen_ai.event.content") - assert isinstance(content, str) and content.strip() != "" - import json - - data = json.loads(content) - assert isinstance(data, list) and len(data) > 0 - first = data[0] - assert first.get("role") in ("assistant", "tool") - parts = first.get("parts") - assert isinstance(parts, list) and len(parts) > 0 - # Check for MCP-specific content - should have type and id but not detailed fields - for part in parts: - if part.get("type") == "tool_call": - tool_content = part.get("content") - assert isinstance(tool_content, dict) - tool_type = tool_content.get("type") - if tool_type in ("mcp_list_tools", "mcp_approval_request"): - assert "id" in tool_content - if tool_type == "mcp_list_tools": - # server_label might be present but other details should not - pass - elif tool_type == "mcp_approval_request": - # Should not have name, arguments when content recording is disabled - assert ( - "name" not in tool_content - ), "name should NOT be present for mcp_approval_request when content recording is disabled" - assert ( - "arguments" not in tool_content - ), "arguments should NOT be present for mcp_approval_request when content recording is disabled" + if use_events: + for event in span1.events: + if event.name == "gen_ai.input.messages": + attrs = event.attributes + assert attrs is not None and isinstance(attrs, Mapping) + content = attrs.get("gen_ai.event.content") + assert isinstance(content, str) and content.strip() != "" + import json + + data = json.loads(content) + assert isinstance(data, list) and len(data) > 0 + # Validate content fields are NOT present + for entry in data: + if entry.get("role") == "user": + parts = entry.get("parts") + assert isinstance(parts, list) and len(parts) > 0 + for part in parts: + if part.get("type") == "text": + assert ( + "content" not in part + ), "Text content should NOT be present when content recording is disabled" + elif event.name == "gen_ai.output.messages": + attrs = event.attributes + assert attrs is not None and isinstance(attrs, Mapping) + content = attrs.get("gen_ai.event.content") + assert isinstance(content, str) and content.strip() != "" + import json + + data = json.loads(content) + assert isinstance(data, list) and len(data) > 0 + first = data[0] + assert first.get("role") in ("assistant", "tool") + parts = first.get("parts") + assert isinstance(parts, list) and len(parts) > 0 + # Check for MCP-specific content - should have type and id but not detailed fields + for part in parts: + if part.get("type") == "tool_call": + tool_content = part.get("content") + assert isinstance(tool_content, dict) + tool_type = tool_content.get("type") + if tool_type in ("mcp_list_tools", "mcp_approval_request"): + assert "id" in tool_content + if tool_type == "mcp_list_tools": + # server_label might be present but other details should not + pass + elif tool_type == "mcp_approval_request": + # Should not have name, arguments when content recording is disabled + assert ( + "name" not in tool_content + ), "name should NOT be present for mcp_approval_request when content recording is disabled" + assert ( + "arguments" not in tool_content + ), "arguments should NOT be present for mcp_approval_request when content recording is disabled" # Validate second response span (approval response) span2 = spans[1] expected_attributes_2 = [ + ("az.namespace", "Microsoft.CognitiveServices"), ("az.namespace", "Microsoft.CognitiveServices"), ("gen_ai.operation.name", OPERATION_NAME_INVOKE_AGENT), ("gen_ai.provider.name", "azure.openai"), @@ -490,6 +553,16 @@ def test_sync_mcp_non_streaming_without_content_recording(self, **kwargs): ("gen_ai.usage.input_tokens", "+"), ("gen_ai.usage.output_tokens", "+"), ] + + # Add message attributes when not using events + if not use_events: + expected_attributes_2.extend( + [ + ("gen_ai.input.messages", ""), + ("gen_ai.output.messages", ""), + ] + ) + assert GenAiTraceVerifier().check_span_attributes(span2, expected_attributes_2) # Validate MCP approval response and call in second span - content should be minimal @@ -595,16 +668,33 @@ def test_sync_mcp_non_streaming_without_content_recording(self, **kwargs): finally: project_client.agents.delete_version(agent_name=agent.name, agent_version=agent.version) + @pytest.mark.usefixtures("instrument_without_content") + @servicePreparer() + @recorded_by_proxy(RecordedTransport.AZURE_CORE, RecordedTransport.HTTPX) + def test_sync_mcp_non_streaming_without_content_recording_events(self, **kwargs): + """Test synchronous MCP agent with non-streaming and content recording disabled (event-based messages).""" + self._test_sync_mcp_non_streaming_without_content_recording_impl(True, **kwargs) + + @pytest.mark.usefixtures("instrument_without_content") + @servicePreparer() + @recorded_by_proxy(RecordedTransport.AZURE_CORE, RecordedTransport.HTTPX) + def test_sync_mcp_non_streaming_without_content_recording_attributes(self, **kwargs): + """Test synchronous MCP agent with non-streaming and content recording disabled (attribute-based messages).""" + self._test_sync_mcp_non_streaming_without_content_recording_impl(False, **kwargs) + # ======================================== # Sync MCP Agent Tests - Streaming # ======================================== - @pytest.mark.usefixtures("instrument_with_content") - @servicePreparer() - @recorded_by_proxy(RecordedTransport.AZURE_CORE, RecordedTransport.HTTPX) - def test_sync_mcp_streaming_with_content_recording(self, **kwargs): - """Test synchronous MCP agent with streaming and content recording enabled.""" + def _test_sync_mcp_streaming_with_content_recording_impl(self, use_events, **kwargs): + """Implementation for testing synchronous MCP agent with streaming and content recording enabled. + + Args: + use_events: If True, use event-based message tracing. If False, use attribute-based. + Note: MCP tests currently only validate event mode regardless of this setting. + """ self.cleanup() + _set_use_message_events(use_events) os.environ.update( { CONTENT_TRACING_ENV_VARIABLE: "True", @@ -694,6 +784,7 @@ def test_sync_mcp_streaming_with_content_recording(self, **kwargs): assert response_id_1 is not None expected_attributes_1 = [ + ("az.namespace", "Microsoft.CognitiveServices"), ("az.namespace", "Microsoft.CognitiveServices"), ("gen_ai.operation.name", OPERATION_NAME_INVOKE_AGENT), ("gen_ai.provider.name", "azure.openai"), @@ -705,6 +796,16 @@ def test_sync_mcp_streaming_with_content_recording(self, **kwargs): ("gen_ai.usage.input_tokens", "+"), ("gen_ai.usage.output_tokens", "+"), ] + + # Add message attributes when not using events + if not use_events: + expected_attributes_1.extend( + [ + ("gen_ai.input.messages", ""), + ("gen_ai.output.messages", ""), + ] + ) + assert GenAiTraceVerifier().check_span_attributes(span1, expected_attributes_1) # Comprehensive event validation - verify content IS present @@ -760,6 +861,7 @@ def test_sync_mcp_streaming_with_content_recording(self, **kwargs): assert response_id_2 is not None expected_attributes_2 = [ + ("az.namespace", "Microsoft.CognitiveServices"), ("az.namespace", "Microsoft.CognitiveServices"), ("gen_ai.operation.name", OPERATION_NAME_INVOKE_AGENT), ("gen_ai.provider.name", "azure.openai"), @@ -771,6 +873,16 @@ def test_sync_mcp_streaming_with_content_recording(self, **kwargs): ("gen_ai.usage.input_tokens", "+"), ("gen_ai.usage.output_tokens", "+"), ] + + # Add message attributes when not using events + if not use_events: + expected_attributes_2.extend( + [ + ("gen_ai.input.messages", ""), + ("gen_ai.output.messages", ""), + ] + ) + assert GenAiTraceVerifier().check_span_attributes(span2, expected_attributes_2) # Validate second span events @@ -838,12 +950,29 @@ def test_sync_mcp_streaming_with_content_recording(self, **kwargs): finally: project_client.agents.delete_version(agent_name=agent.name, agent_version=agent.version) - @pytest.mark.usefixtures("instrument_without_content") + @pytest.mark.usefixtures("instrument_with_content") @servicePreparer() @recorded_by_proxy(RecordedTransport.AZURE_CORE, RecordedTransport.HTTPX) - def test_sync_mcp_streaming_without_content_recording(self, **kwargs): - """Test synchronous MCP agent with streaming and content recording disabled.""" + def test_sync_mcp_streaming_with_content_recording_events(self, **kwargs): + """Test synchronous MCP agent with streaming and content recording enabled (event-based messages).""" + self._test_sync_mcp_streaming_with_content_recording_impl(True, **kwargs) + + @pytest.mark.usefixtures("instrument_with_content") + @servicePreparer() + @recorded_by_proxy(RecordedTransport.AZURE_CORE, RecordedTransport.HTTPX) + def test_sync_mcp_streaming_with_content_recording_attributes(self, **kwargs): + """Test synchronous MCP agent with streaming and content recording enabled (attribute-based messages).""" + self._test_sync_mcp_streaming_with_content_recording_impl(False, **kwargs) + + def _test_sync_mcp_streaming_without_content_recording_impl(self, use_events, **kwargs): + """Implementation for testing synchronous MCP agent with streaming and content recording disabled. + + Args: + use_events: If True, use event-based message tracing. If False, use attribute-based. + Note: MCP tests currently only validate event mode regardless of this setting. + """ self.cleanup() + _set_use_message_events(use_events) os.environ.update( { CONTENT_TRACING_ENV_VARIABLE: "False", @@ -933,6 +1062,7 @@ def test_sync_mcp_streaming_without_content_recording(self, **kwargs): assert response_id_1 is not None expected_attributes_1 = [ + ("az.namespace", "Microsoft.CognitiveServices"), ("az.namespace", "Microsoft.CognitiveServices"), ("gen_ai.operation.name", OPERATION_NAME_INVOKE_AGENT), ("gen_ai.provider.name", "azure.openai"), @@ -944,6 +1074,16 @@ def test_sync_mcp_streaming_without_content_recording(self, **kwargs): ("gen_ai.usage.input_tokens", "+"), ("gen_ai.usage.output_tokens", "+"), ] + + # Add message attributes when not using events + if not use_events: + expected_attributes_1.extend( + [ + ("gen_ai.input.messages", ""), + ("gen_ai.output.messages", ""), + ] + ) + assert GenAiTraceVerifier().check_span_attributes(span1, expected_attributes_1) # Comprehensive event validation - verify content is NOT present @@ -997,6 +1137,7 @@ def test_sync_mcp_streaming_without_content_recording(self, **kwargs): assert response_id_2 is not None expected_attributes_2 = [ + ("az.namespace", "Microsoft.CognitiveServices"), ("az.namespace", "Microsoft.CognitiveServices"), ("gen_ai.operation.name", OPERATION_NAME_INVOKE_AGENT), ("gen_ai.provider.name", "azure.openai"), @@ -1008,6 +1149,16 @@ def test_sync_mcp_streaming_without_content_recording(self, **kwargs): ("gen_ai.usage.input_tokens", "+"), ("gen_ai.usage.output_tokens", "+"), ] + + # Add message attributes when not using events + if not use_events: + expected_attributes_2.extend( + [ + ("gen_ai.input.messages", ""), + ("gen_ai.output.messages", ""), + ] + ) + assert GenAiTraceVerifier().check_span_attributes(span2, expected_attributes_2) # Validate second span events - content should be minimal @@ -1074,3 +1225,17 @@ def test_sync_mcp_streaming_without_content_recording(self, **kwargs): finally: project_client.agents.delete_version(agent_name=agent.name, agent_version=agent.version) + + @pytest.mark.usefixtures("instrument_without_content") + @servicePreparer() + @recorded_by_proxy(RecordedTransport.AZURE_CORE, RecordedTransport.HTTPX) + def test_sync_mcp_streaming_without_content_recording_events(self, **kwargs): + """Test synchronous MCP agent with streaming and content recording disabled (event-based messages).""" + self._test_sync_mcp_streaming_without_content_recording_impl(True, **kwargs) + + @pytest.mark.usefixtures("instrument_without_content") + @servicePreparer() + @recorded_by_proxy(RecordedTransport.AZURE_CORE, RecordedTransport.HTTPX) + def test_sync_mcp_streaming_without_content_recording_attributes(self, **kwargs): + """Test synchronous MCP agent with streaming and content recording disabled (attribute-based messages).""" + self._test_sync_mcp_streaming_without_content_recording_impl(False, **kwargs) diff --git a/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_mcp_async.py b/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_mcp_async.py index fe3a86d55e72..d007365b9a5c 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_mcp_async.py +++ b/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_mcp_async.py @@ -9,7 +9,11 @@ import os import pytest from azure.ai.projects.telemetry import AIProjectInstrumentor, _utils -from azure.ai.projects.telemetry._utils import OPERATION_NAME_INVOKE_AGENT, SPAN_NAME_INVOKE_AGENT +from azure.ai.projects.telemetry._utils import ( + OPERATION_NAME_INVOKE_AGENT, + SPAN_NAME_INVOKE_AGENT, + _set_use_message_events, +) from azure.core.settings import settings from gen_ai_trace_verifier import GenAiTraceVerifier from devtools_testutils.aio import recorded_by_proxy_async @@ -34,12 +38,15 @@ class TestResponsesInstrumentorMCPAsync(TestAiAgentsInstrumentorBase): # Async MCP Agent Tests - Non-Streaming # ======================================== - @pytest.mark.usefixtures("instrument_with_content") - @servicePreparer() - @recorded_by_proxy_async(RecordedTransport.AZURE_CORE, RecordedTransport.HTTPX) - async def test_async_mcp_non_streaming_with_content_recording(self, **kwargs): - """Test asynchronous MCP agent with non-streaming and content recording enabled.""" + async def _test_async_mcp_non_streaming_with_content_recording_impl(self, use_events, **kwargs): + """Implementation for testing asynchronous MCP agent with non-streaming and content recording enabled. + + Args: + use_events: If True, use event-based message tracing. If False, use attribute-based. + Note: MCP tests currently only validate event mode regardless of this setting. + """ self.cleanup() + _set_use_message_events(use_events) os.environ.update( { CONTENT_TRACING_ENV_VARIABLE: "True", @@ -120,6 +127,7 @@ async def test_async_mcp_non_streaming_with_content_recording(self, **kwargs): # Validate first response span (MCP tool trigger) span1 = spans[0] expected_attributes_1 = [ + ("az.namespace", "Microsoft.CognitiveServices"), ("az.namespace", "Microsoft.CognitiveServices"), ("gen_ai.operation.name", OPERATION_NAME_INVOKE_AGENT), ("gen_ai.provider.name", "azure.openai"), @@ -131,72 +139,84 @@ async def test_async_mcp_non_streaming_with_content_recording(self, **kwargs): ("gen_ai.usage.input_tokens", "+"), ("gen_ai.usage.output_tokens", "+"), ] + + # Add message attributes when not using events + if not use_events: + expected_attributes_1.extend( + [ + ("gen_ai.input.messages", ""), + ("gen_ai.output.messages", ""), + ] + ) + assert GenAiTraceVerifier().check_span_attributes(span1, expected_attributes_1) # Comprehensive event validation for first span - verify content IS present from collections.abc import Mapping - for event in span1.events: - if event.name == "gen_ai.input.messages": - attrs = event.attributes - assert attrs is not None and isinstance(attrs, Mapping) - content = attrs.get("gen_ai.event.content") - assert isinstance(content, str) and content.strip() != "" - import json - - data = json.loads(content) - assert isinstance(data, list) and len(data) > 0 - # Validate content fields ARE present - for entry in data: - if entry.get("role") == "user": - parts = entry.get("parts") - assert isinstance(parts, list) and len(parts) > 0 - for part in parts: - if part.get("type") == "text": - assert ( - "content" in part - and isinstance(part["content"], str) - and part["content"].strip() != "" - ), "Text content should be present when content recording is enabled" - elif event.name == "gen_ai.output.messages": - attrs = event.attributes - assert attrs is not None and isinstance(attrs, Mapping) - content = attrs.get("gen_ai.event.content") - assert isinstance(content, str) and content.strip() != "" - import json - - data = json.loads(content) - assert isinstance(data, list) and len(data) > 0 - first = data[0] - assert first.get("role") in ("assistant", "tool") - parts = first.get("parts") - assert isinstance(parts, list) and len(parts) > 0 - # Check for MCP-specific content - for part in parts: - if part.get("type") == "tool_call": - tool_content = part.get("content") - assert isinstance(tool_content, dict) - tool_type = tool_content.get("type") - if tool_type in ("mcp_list_tools", "mcp_approval_request"): - assert "id" in tool_content - if tool_type == "mcp_list_tools": - assert ( - "server_label" in tool_content - ), "server_label should be present for mcp_list_tools when content recording is enabled" - elif tool_type == "mcp_approval_request": - assert ( - "name" in tool_content - ), "name should be present for mcp_approval_request when content recording is enabled" - assert ( - "server_label" in tool_content - ), "server_label should be present for mcp_approval_request when content recording is enabled" - assert ( - "arguments" in tool_content - ), "arguments should be present for mcp_approval_request when content recording is enabled" + if use_events: + for event in span1.events: + if event.name == "gen_ai.input.messages": + attrs = event.attributes + assert attrs is not None and isinstance(attrs, Mapping) + content = attrs.get("gen_ai.event.content") + assert isinstance(content, str) and content.strip() != "" + import json + + data = json.loads(content) + assert isinstance(data, list) and len(data) > 0 + # Validate content fields ARE present + for entry in data: + if entry.get("role") == "user": + parts = entry.get("parts") + assert isinstance(parts, list) and len(parts) > 0 + for part in parts: + if part.get("type") == "text": + assert ( + "content" in part + and isinstance(part["content"], str) + and part["content"].strip() != "" + ), "Text content should be present when content recording is enabled" + elif event.name == "gen_ai.output.messages": + attrs = event.attributes + assert attrs is not None and isinstance(attrs, Mapping) + content = attrs.get("gen_ai.event.content") + assert isinstance(content, str) and content.strip() != "" + import json + + data = json.loads(content) + assert isinstance(data, list) and len(data) > 0 + first = data[0] + assert first.get("role") in ("assistant", "tool") + parts = first.get("parts") + assert isinstance(parts, list) and len(parts) > 0 + # Check for MCP-specific content + for part in parts: + if part.get("type") == "tool_call": + tool_content = part.get("content") + assert isinstance(tool_content, dict) + tool_type = tool_content.get("type") + if tool_type in ("mcp_list_tools", "mcp_approval_request"): + assert "id" in tool_content + if tool_type == "mcp_list_tools": + assert ( + "server_label" in tool_content + ), "server_label should be present for mcp_list_tools when content recording is enabled" + elif tool_type == "mcp_approval_request": + assert ( + "name" in tool_content + ), "name should be present for mcp_approval_request when content recording is enabled" + assert ( + "server_label" in tool_content + ), "server_label should be present for mcp_approval_request when content recording is enabled" + assert ( + "arguments" in tool_content + ), "arguments should be present for mcp_approval_request when content recording is enabled" # Validate second response span (approval response) span2 = spans[1] expected_attributes_2 = [ + ("az.namespace", "Microsoft.CognitiveServices"), ("az.namespace", "Microsoft.CognitiveServices"), ("gen_ai.operation.name", OPERATION_NAME_INVOKE_AGENT), ("gen_ai.provider.name", "azure.openai"), @@ -208,113 +228,126 @@ async def test_async_mcp_non_streaming_with_content_recording(self, **kwargs): ("gen_ai.usage.input_tokens", "+"), ("gen_ai.usage.output_tokens", "+"), ] + + # Add message attributes when not using events + if not use_events: + expected_attributes_2.extend( + [ + ("gen_ai.input.messages", ""), + ("gen_ai.output.messages", ""), + ] + ) + assert GenAiTraceVerifier().check_span_attributes(span2, expected_attributes_2) # Validate MCP approval response and call in second span - for event in span2.events: - if event.name == "gen_ai.input.messages": - attrs = event.attributes - assert attrs is not None and isinstance(attrs, Mapping) - content = attrs.get("gen_ai.event.content") - assert isinstance(content, str) and content.strip() != "" - import json - - data = json.loads(content) - # Check for MCP approval response content - for entry in data: - if entry.get("role") == "user": + # Only validate events when use_events is True + if use_events: + for event in span2.events: + if event.name == "gen_ai.input.messages": + attrs = event.attributes + assert attrs is not None and isinstance(attrs, Mapping) + content = attrs.get("gen_ai.event.content") + assert isinstance(content, str) and content.strip() != "" + import json + + data = json.loads(content) + # Check for MCP approval response content + for entry in data: + if entry.get("role") == "user": + parts = entry.get("parts") + for part in parts: + if part.get("type") == "mcp": + mcp_content = part.get("content") + assert isinstance(mcp_content, dict) + if mcp_content.get("type") == "mcp_approval_response": + assert "id" in mcp_content + assert ( + "approval_request_id" in mcp_content + ), "approval_request_id should be present when content recording is enabled" + elif event.name == "gen_ai.output.messages": + attrs = event.attributes + assert attrs is not None and isinstance(attrs, Mapping) + content = attrs.get("gen_ai.event.content") + assert isinstance(content, str) and content.strip() != "" + import json + + data = json.loads(content) + # Check for MCP call content + for entry in data: parts = entry.get("parts") - for part in parts: - if part.get("type") == "mcp": - mcp_content = part.get("content") - assert isinstance(mcp_content, dict) - if mcp_content.get("type") == "mcp_approval_response": - assert "id" in mcp_content - assert ( - "approval_request_id" in mcp_content - ), "approval_request_id should be present when content recording is enabled" - elif event.name == "gen_ai.output.messages": - attrs = event.attributes - assert attrs is not None and isinstance(attrs, Mapping) - content = attrs.get("gen_ai.event.content") - assert isinstance(content, str) and content.strip() != "" - import json - - data = json.loads(content) - # Check for MCP call content - for entry in data: - parts = entry.get("parts") - if parts: - for part in parts: - if part.get("type") == "tool_call": - tool_content = part.get("content") - if tool_content and tool_content.get("type") == "mcp_call": - assert "id" in tool_content - assert ( - "name" in tool_content - ), "name should be present for mcp_call when content recording is enabled" - assert ( - "arguments" in tool_content - ), "arguments should be present for mcp_call when content recording is enabled" + if parts: + for part in parts: + if part.get("type") == "tool_call": + tool_content = part.get("content") + if tool_content and tool_content.get("type") == "mcp_call": + assert "id" in tool_content + assert ( + "name" in tool_content + ), "name should be present for mcp_call when content recording is enabled" + assert ( + "arguments" in tool_content + ), "arguments should be present for mcp_call when content recording is enabled" + assert ( + "server_label" in tool_content + ), "server_label should be present for mcp_call when content recording is enabled" + elif part.get("type") == "text": assert ( - "server_label" in tool_content - ), "server_label should be present for mcp_call when content recording is enabled" - elif part.get("type") == "text": - assert ( - "content" in part - ), "text content should be present when content recording is enabled" + "content" in part + ), "text content should be present when content recording is enabled" # Check list_conversation_items span list_spans = self.exporter.get_spans_by_name("list_conversation_items") assert len(list_spans) == 1, "Should have one list_conversation_items span" list_span = list_spans[0] - for event in list_span.events: - if event.name == "gen_ai.conversation.item": - attrs = event.attributes - assert attrs is not None and isinstance(attrs, Mapping) - content = attrs.get("gen_ai.event.content") - assert isinstance(content, str) and content.strip() != "" - import json - - data = json.loads(content) - # Validate MCP content in conversation items - for entry in data: - if entry.get("role") == "user": - parts = entry.get("parts") - for part in parts: - if part.get("type") == "text": - assert ( - "content" in part - ), "text content should be present in conversation items when content recording is enabled" - elif part.get("type") == "mcp": - mcp_content = part.get("content") - if mcp_content and mcp_content.get("type") == "mcp_approval_response": + if use_events: + for event in list_span.events: + if event.name == "gen_ai.conversation.item": + attrs = event.attributes + assert attrs is not None and isinstance(attrs, Mapping) + content = attrs.get("gen_ai.event.content") + assert isinstance(content, str) and content.strip() != "" + import json + + data = json.loads(content) + # Validate MCP content in conversation items + for entry in data: + if entry.get("role") == "user": + parts = entry.get("parts") + for part in parts: + if part.get("type") == "text": assert ( - "approval_request_id" in mcp_content - ), "approval_request_id should be present when content recording is enabled" - elif entry.get("role") == "assistant": - parts = entry.get("parts") - for part in parts: - if part.get("type") == "text": - assert ( - "content" in part - ), "text content should be present in conversation items when content recording is enabled" - elif part.get("type") == "mcp": - mcp_content = part.get("content") - if mcp_content: - mcp_type = mcp_content.get("type") - if mcp_type in ("mcp_list_tools", "mcp_call", "mcp_approval_request"): - assert "id" in mcp_content - if mcp_type == "mcp_call": - assert ( - "name" in mcp_content - ), "name should be present for mcp_call in conversation items" - assert ( - "server_label" in mcp_content - ), "server_label should be present for mcp_call in conversation items" - else: - assert False, f"Unexpected event name in list_conversation_items span: {event.name}" + "content" in part + ), "text content should be present in conversation items when content recording is enabled" + elif part.get("type") == "mcp": + mcp_content = part.get("content") + if mcp_content and mcp_content.get("type") == "mcp_approval_response": + assert ( + "approval_request_id" in mcp_content + ), "approval_request_id should be present when content recording is enabled" + elif entry.get("role") == "assistant": + parts = entry.get("parts") + for part in parts: + if part.get("type") == "text": + assert ( + "content" in part + ), "text content should be present in conversation items when content recording is enabled" + elif part.get("type") == "mcp": + mcp_content = part.get("content") + if mcp_content: + mcp_type = mcp_content.get("type") + if mcp_type in ("mcp_list_tools", "mcp_call", "mcp_approval_request"): + assert "id" in mcp_content + if mcp_type == "mcp_call": + assert ( + "name" in mcp_content + ), "name should be present for mcp_call in conversation items" + assert ( + "server_label" in mcp_content + ), "server_label should be present for mcp_call in conversation items" + else: + assert False, f"Unexpected event name in list_conversation_items span: {event.name}" # Cleanup await openai_client.conversations.delete(conversation_id=conversation.id) @@ -322,12 +355,29 @@ async def test_async_mcp_non_streaming_with_content_recording(self, **kwargs): finally: await project_client.agents.delete_version(agent_name=agent.name, agent_version=agent.version) - @pytest.mark.usefixtures("instrument_without_content") + @pytest.mark.usefixtures("instrument_with_content") @servicePreparer() @recorded_by_proxy_async(RecordedTransport.AZURE_CORE, RecordedTransport.HTTPX) - async def test_async_mcp_non_streaming_without_content_recording(self, **kwargs): - """Test asynchronous MCP agent with non-streaming and content recording disabled.""" + async def test_async_mcp_non_streaming_with_content_recording_events(self, **kwargs): + """Test asynchronous MCP agent with non-streaming and content recording enabled (event-based messages).""" + await self._test_async_mcp_non_streaming_with_content_recording_impl(True, **kwargs) + + @pytest.mark.usefixtures("instrument_with_content") + @servicePreparer() + @recorded_by_proxy_async(RecordedTransport.AZURE_CORE, RecordedTransport.HTTPX) + async def test_async_mcp_non_streaming_with_content_recording_attributes(self, **kwargs): + """Test asynchronous MCP agent with non-streaming and content recording enabled (attribute-based messages).""" + await self._test_async_mcp_non_streaming_with_content_recording_impl(False, **kwargs) + + async def _test_async_mcp_non_streaming_without_content_recording_impl(self, use_events, **kwargs): + """Implementation for testing asynchronous MCP agent with non-streaming and content recording disabled. + + Args: + use_events: If True, use event-based message tracing. If False, use attribute-based. + Note: MCP tests currently only validate event mode regardless of this setting. + """ self.cleanup() + _set_use_message_events(use_events) os.environ.update( { CONTENT_TRACING_ENV_VARIABLE: "False", @@ -408,6 +458,7 @@ async def test_async_mcp_non_streaming_without_content_recording(self, **kwargs) # Validate first response span (MCP tool trigger) span1 = spans[0] expected_attributes_1 = [ + ("az.namespace", "Microsoft.CognitiveServices"), ("az.namespace", "Microsoft.CognitiveServices"), ("gen_ai.operation.name", OPERATION_NAME_INVOKE_AGENT), ("gen_ai.provider.name", "azure.openai"), @@ -419,67 +470,79 @@ async def test_async_mcp_non_streaming_without_content_recording(self, **kwargs) ("gen_ai.usage.input_tokens", "+"), ("gen_ai.usage.output_tokens", "+"), ] + + # Add message attributes when not using events + if not use_events: + expected_attributes_1.extend( + [ + ("gen_ai.input.messages", ""), + ("gen_ai.output.messages", ""), + ] + ) + assert GenAiTraceVerifier().check_span_attributes(span1, expected_attributes_1) # Comprehensive event validation for first span - verify content is NOT present from collections.abc import Mapping - for event in span1.events: - if event.name == "gen_ai.input.messages": - attrs = event.attributes - assert attrs is not None and isinstance(attrs, Mapping) - content = attrs.get("gen_ai.event.content") - assert isinstance(content, str) and content.strip() != "" - import json - - data = json.loads(content) - assert isinstance(data, list) and len(data) > 0 - # Validate content fields are NOT present - for entry in data: - if entry.get("role") == "user": - parts = entry.get("parts") - assert isinstance(parts, list) and len(parts) > 0 - for part in parts: - if part.get("type") == "text": - assert ( - "content" not in part - ), "Text content should NOT be present when content recording is disabled" - elif event.name == "gen_ai.output.messages": - attrs = event.attributes - assert attrs is not None and isinstance(attrs, Mapping) - content = attrs.get("gen_ai.event.content") - assert isinstance(content, str) and content.strip() != "" - import json - - data = json.loads(content) - assert isinstance(data, list) and len(data) > 0 - first = data[0] - assert first.get("role") in ("assistant", "tool") - parts = first.get("parts") - assert isinstance(parts, list) and len(parts) > 0 - # Check for MCP-specific content - should have type and id but not detailed fields - for part in parts: - if part.get("type") == "tool_call": - tool_content = part.get("content") - assert isinstance(tool_content, dict) - tool_type = tool_content.get("type") - if tool_type in ("mcp_list_tools", "mcp_approval_request"): - assert "id" in tool_content - if tool_type == "mcp_list_tools": - # server_label might be present but other details should not - pass - elif tool_type == "mcp_approval_request": - # Should not have name, arguments when content recording is disabled - assert ( - "name" not in tool_content - ), "name should NOT be present for mcp_approval_request when content recording is disabled" - assert ( - "arguments" not in tool_content - ), "arguments should NOT be present for mcp_approval_request when content recording is disabled" + if use_events: + for event in span1.events: + if event.name == "gen_ai.input.messages": + attrs = event.attributes + assert attrs is not None and isinstance(attrs, Mapping) + content = attrs.get("gen_ai.event.content") + assert isinstance(content, str) and content.strip() != "" + import json + + data = json.loads(content) + assert isinstance(data, list) and len(data) > 0 + # Validate content fields are NOT present + for entry in data: + if entry.get("role") == "user": + parts = entry.get("parts") + assert isinstance(parts, list) and len(parts) > 0 + for part in parts: + if part.get("type") == "text": + assert ( + "content" not in part + ), "Text content should NOT be present when content recording is disabled" + elif event.name == "gen_ai.output.messages": + attrs = event.attributes + assert attrs is not None and isinstance(attrs, Mapping) + content = attrs.get("gen_ai.event.content") + assert isinstance(content, str) and content.strip() != "" + import json + + data = json.loads(content) + assert isinstance(data, list) and len(data) > 0 + first = data[0] + assert first.get("role") in ("assistant", "tool") + parts = first.get("parts") + assert isinstance(parts, list) and len(parts) > 0 + # Check for MCP-specific content - should have type and id but not detailed fields + for part in parts: + if part.get("type") == "tool_call": + tool_content = part.get("content") + assert isinstance(tool_content, dict) + tool_type = tool_content.get("type") + if tool_type in ("mcp_list_tools", "mcp_approval_request"): + assert "id" in tool_content + if tool_type == "mcp_list_tools": + # server_label might be present but other details should not + pass + elif tool_type == "mcp_approval_request": + # Should not have name, arguments when content recording is disabled + assert ( + "name" not in tool_content + ), "name should NOT be present for mcp_approval_request when content recording is disabled" + assert ( + "arguments" not in tool_content + ), "arguments should NOT be present for mcp_approval_request when content recording is disabled" # Validate second response span (approval response) span2 = spans[1] expected_attributes_2 = [ + ("az.namespace", "Microsoft.CognitiveServices"), ("az.namespace", "Microsoft.CognitiveServices"), ("gen_ai.operation.name", OPERATION_NAME_INVOKE_AGENT), ("gen_ai.provider.name", "azure.openai"), @@ -491,104 +554,117 @@ async def test_async_mcp_non_streaming_without_content_recording(self, **kwargs) ("gen_ai.usage.input_tokens", "+"), ("gen_ai.usage.output_tokens", "+"), ] + + # Add message attributes when not using events + if not use_events: + expected_attributes_2.extend( + [ + ("gen_ai.input.messages", ""), + ("gen_ai.output.messages", ""), + ] + ) + assert GenAiTraceVerifier().check_span_attributes(span2, expected_attributes_2) # Validate MCP approval response and call in second span - content should be minimal - for event in span2.events: - if event.name == "gen_ai.input.messages": - attrs = event.attributes - assert attrs is not None and isinstance(attrs, Mapping) - content = attrs.get("gen_ai.event.content") - assert isinstance(content, str) and content.strip() != "" - import json - - data = json.loads(content) - # Check for MCP approval response content - should be minimal - for entry in data: - if entry.get("role") == "user": + # Only validate events when use_events is True + if use_events: + for event in span2.events: + if event.name == "gen_ai.input.messages": + attrs = event.attributes + assert attrs is not None and isinstance(attrs, Mapping) + content = attrs.get("gen_ai.event.content") + assert isinstance(content, str) and content.strip() != "" + import json + + data = json.loads(content) + # Check for MCP approval response content - should be minimal + for entry in data: + if entry.get("role") == "user": + parts = entry.get("parts") + for part in parts: + if part.get("type") == "mcp": + mcp_content = part.get("content") + assert isinstance(mcp_content, dict) + if mcp_content.get("type") == "mcp_approval_response": + assert "id" in mcp_content + # approval_request_id might not be present when content recording is disabled + elif event.name == "gen_ai.output.messages": + attrs = event.attributes + assert attrs is not None and isinstance(attrs, Mapping) + content = attrs.get("gen_ai.event.content") + assert isinstance(content, str) and content.strip() != "" + import json + + data = json.loads(content) + # Check for MCP call content - should be minimal + for entry in data: parts = entry.get("parts") - for part in parts: - if part.get("type") == "mcp": - mcp_content = part.get("content") - assert isinstance(mcp_content, dict) - if mcp_content.get("type") == "mcp_approval_response": - assert "id" in mcp_content - # approval_request_id might not be present when content recording is disabled - elif event.name == "gen_ai.output.messages": - attrs = event.attributes - assert attrs is not None and isinstance(attrs, Mapping) - content = attrs.get("gen_ai.event.content") - assert isinstance(content, str) and content.strip() != "" - import json - - data = json.loads(content) - # Check for MCP call content - should be minimal - for entry in data: - parts = entry.get("parts") - if parts: - for part in parts: - if part.get("type") == "tool_call": - tool_content = part.get("content") - if tool_content and tool_content.get("type") == "mcp_call": - assert "id" in tool_content - assert ( - "name" not in tool_content - ), "name should NOT be present for mcp_call when content recording is disabled" + if parts: + for part in parts: + if part.get("type") == "tool_call": + tool_content = part.get("content") + if tool_content and tool_content.get("type") == "mcp_call": + assert "id" in tool_content + assert ( + "name" not in tool_content + ), "name should NOT be present for mcp_call when content recording is disabled" + assert ( + "arguments" not in tool_content + ), "arguments should NOT be present for mcp_call when content recording is disabled" + elif part.get("type") == "text": assert ( - "arguments" not in tool_content - ), "arguments should NOT be present for mcp_call when content recording is disabled" - elif part.get("type") == "text": - assert ( - "content" not in part - ), "text content should NOT be present when content recording is disabled" + "content" not in part + ), "text content should NOT be present when content recording is disabled" # Check list_conversation_items span list_spans = self.exporter.get_spans_by_name("list_conversation_items") assert len(list_spans) == 1, "Should have one list_conversation_items span" list_span = list_spans[0] - for event in list_span.events: - if event.name == "gen_ai.conversation.item": - attrs = event.attributes - assert attrs is not None and isinstance(attrs, Mapping) - content = attrs.get("gen_ai.event.content") - assert isinstance(content, str) and content.strip() != "" - import json - - data = json.loads(content) - # Validate MCP content in conversation items - should be minimal - for entry in data: - if entry.get("role") == "user": - parts = entry.get("parts") - for part in parts: - if part.get("type") == "text": - assert ( - "content" not in part - ), "text content should NOT be present in conversation items when content recording is disabled" - elif part.get("type") == "mcp": - mcp_content = part.get("content") - if mcp_content and mcp_content.get("type") == "mcp_approval_response": - # Should have id but might not have other details - assert "id" in mcp_content - elif entry.get("role") == "assistant": - parts = entry.get("parts") - for part in parts: - if part.get("type") == "text": - assert ( - "content" not in part - ), "text content should NOT be present in conversation items when content recording is disabled" - elif part.get("type") == "mcp": - mcp_content = part.get("content") - if mcp_content: - mcp_type = mcp_content.get("type") - if mcp_type == "mcp_call": + if use_events: + for event in list_span.events: + if event.name == "gen_ai.conversation.item": + attrs = event.attributes + assert attrs is not None and isinstance(attrs, Mapping) + content = attrs.get("gen_ai.event.content") + assert isinstance(content, str) and content.strip() != "" + import json + + data = json.loads(content) + # Validate MCP content in conversation items - should be minimal + for entry in data: + if entry.get("role") == "user": + parts = entry.get("parts") + for part in parts: + if part.get("type") == "text": + assert ( + "content" not in part + ), "text content should NOT be present in conversation items when content recording is disabled" + elif part.get("type") == "mcp": + mcp_content = part.get("content") + if mcp_content and mcp_content.get("type") == "mcp_approval_response": + # Should have id but might not have other details assert "id" in mcp_content - # Should not have name, server_label, arguments when content recording is disabled - assert ( - "name" not in mcp_content - ), "name should NOT be present for mcp_call in conversation items when content recording is disabled" - else: - assert False, f"Unexpected event name in list_conversation_items span: {event.name}" + elif entry.get("role") == "assistant": + parts = entry.get("parts") + for part in parts: + if part.get("type") == "text": + assert ( + "content" not in part + ), "text content should NOT be present in conversation items when content recording is disabled" + elif part.get("type") == "mcp": + mcp_content = part.get("content") + if mcp_content: + mcp_type = mcp_content.get("type") + if mcp_type == "mcp_call": + assert "id" in mcp_content + # Should not have name, server_label, arguments when content recording is disabled + assert ( + "name" not in mcp_content + ), "name should NOT be present for mcp_call in conversation items when content recording is disabled" + else: + assert False, f"Unexpected event name in list_conversation_items span: {event.name}" # Cleanup await openai_client.conversations.delete(conversation_id=conversation.id) @@ -596,16 +672,33 @@ async def test_async_mcp_non_streaming_without_content_recording(self, **kwargs) finally: await project_client.agents.delete_version(agent_name=agent.name, agent_version=agent.version) + @pytest.mark.usefixtures("instrument_without_content") + @servicePreparer() + @recorded_by_proxy_async(RecordedTransport.AZURE_CORE, RecordedTransport.HTTPX) + async def test_async_mcp_non_streaming_without_content_recording_events(self, **kwargs): + """Test asynchronous MCP agent with non-streaming and content recording disabled (event-based messages).""" + await self._test_async_mcp_non_streaming_without_content_recording_impl(True, **kwargs) + + @pytest.mark.usefixtures("instrument_without_content") + @servicePreparer() + @recorded_by_proxy_async(RecordedTransport.AZURE_CORE, RecordedTransport.HTTPX) + async def test_async_mcp_non_streaming_without_content_recording_attributes(self, **kwargs): + """Test asynchronous MCP agent with non-streaming and content recording disabled (attribute-based messages).""" + await self._test_async_mcp_non_streaming_without_content_recording_impl(False, **kwargs) + # ======================================== # Async MCP Agent Tests - Streaming # ======================================== - @pytest.mark.usefixtures("instrument_with_content") - @servicePreparer() - @recorded_by_proxy_async(RecordedTransport.AZURE_CORE, RecordedTransport.HTTPX) - async def test_async_mcp_streaming_with_content_recording(self, **kwargs): - """Test asynchronous MCP agent with streaming and content recording enabled.""" + async def _test_async_mcp_streaming_with_content_recording_impl(self, use_events, **kwargs): + """Implementation for testing asynchronous MCP agent with streaming and content recording enabled. + + Args: + use_events: If True, use event-based message tracing. If False, use attribute-based. + Note: MCP tests currently only validate event mode regardless of this setting. + """ self.cleanup() + _set_use_message_events(use_events) os.environ.update( { CONTENT_TRACING_ENV_VARIABLE: "True", @@ -695,6 +788,7 @@ async def test_async_mcp_streaming_with_content_recording(self, **kwargs): assert response_id_1 is not None expected_attributes_1 = [ + ("az.namespace", "Microsoft.CognitiveServices"), ("az.namespace", "Microsoft.CognitiveServices"), ("gen_ai.operation.name", OPERATION_NAME_INVOKE_AGENT), ("gen_ai.provider.name", "azure.openai"), @@ -706,53 +800,64 @@ async def test_async_mcp_streaming_with_content_recording(self, **kwargs): ("gen_ai.usage.input_tokens", "+"), ("gen_ai.usage.output_tokens", "+"), ] + + # Add message attributes when not using events + if not use_events: + expected_attributes_1.extend( + [ + ("gen_ai.input.messages", ""), + ("gen_ai.output.messages", ""), + ] + ) + assert GenAiTraceVerifier().check_span_attributes(span1, expected_attributes_1) # Comprehensive event validation - verify content IS present from collections.abc import Mapping - for event in span1.events: - if event.name == "gen_ai.input.messages": - attrs = event.attributes - assert attrs is not None and isinstance(attrs, Mapping) - content = attrs.get("gen_ai.event.content") - assert isinstance(content, str) and content.strip() != "" - import json - - data = json.loads(content) - for entry in data: - if entry.get("role") == "user": + if use_events: + for event in span1.events: + if event.name == "gen_ai.input.messages": + attrs = event.attributes + assert attrs is not None and isinstance(attrs, Mapping) + content = attrs.get("gen_ai.event.content") + assert isinstance(content, str) and content.strip() != "" + import json + + data = json.loads(content) + for entry in data: + if entry.get("role") == "user": + parts = entry.get("parts") + for part in parts: + if part.get("type") == "text": + assert ( + "content" in part + and isinstance(part["content"], str) + and part["content"].strip() != "" + ), "Text content should be present when content recording is enabled" + elif event.name == "gen_ai.output.messages": + attrs = event.attributes + assert attrs is not None and isinstance(attrs, Mapping) + content = attrs.get("gen_ai.event.content") + assert isinstance(content, str) and content.strip() != "" + import json + + data = json.loads(content) + for entry in data: parts = entry.get("parts") - for part in parts: - if part.get("type") == "text": - assert ( - "content" in part - and isinstance(part["content"], str) - and part["content"].strip() != "" - ), "Text content should be present when content recording is enabled" - elif event.name == "gen_ai.output.messages": - attrs = event.attributes - assert attrs is not None and isinstance(attrs, Mapping) - content = attrs.get("gen_ai.event.content") - assert isinstance(content, str) and content.strip() != "" - import json - - data = json.loads(content) - for entry in data: - parts = entry.get("parts") - if parts: - for part in parts: - if part.get("type") == "tool_call": - tool_content = part.get("content") - if tool_content: - tool_type = tool_content.get("type") - if tool_type == "mcp_approval_request": - assert ( - "name" in tool_content - ), "name should be present for mcp_approval_request when content recording is enabled" - assert ( - "arguments" in tool_content - ), "arguments should be present for mcp_approval_request when content recording is enabled" + if parts: + for part in parts: + if part.get("type") == "tool_call": + tool_content = part.get("content") + if tool_content: + tool_type = tool_content.get("type") + if tool_type == "mcp_approval_request": + assert ( + "name" in tool_content + ), "name should be present for mcp_approval_request when content recording is enabled" + assert ( + "arguments" in tool_content + ), "arguments should be present for mcp_approval_request when content recording is enabled" # Validate second response span span2 = spans[1] @@ -761,6 +866,7 @@ async def test_async_mcp_streaming_with_content_recording(self, **kwargs): assert response_id_2 is not None expected_attributes_2 = [ + ("az.namespace", "Microsoft.CognitiveServices"), ("az.namespace", "Microsoft.CognitiveServices"), ("gen_ai.operation.name", OPERATION_NAME_INVOKE_AGENT), ("gen_ai.provider.name", "azure.openai"), @@ -772,66 +878,78 @@ async def test_async_mcp_streaming_with_content_recording(self, **kwargs): ("gen_ai.usage.input_tokens", "+"), ("gen_ai.usage.output_tokens", "+"), ] + + # Add message attributes when not using events + if not use_events: + expected_attributes_2.extend( + [ + ("gen_ai.input.messages", ""), + ("gen_ai.output.messages", ""), + ] + ) + assert GenAiTraceVerifier().check_span_attributes(span2, expected_attributes_2) # Validate second span events - for event in span2.events: - if event.name == "gen_ai.output.messages": - attrs = event.attributes - assert attrs is not None and isinstance(attrs, Mapping) - content = attrs.get("gen_ai.event.content") - assert isinstance(content, str) and content.strip() != "" - import json - - data = json.loads(content) - for entry in data: - parts = entry.get("parts") - if parts: - for part in parts: - if part.get("type") == "tool_call": - tool_content = part.get("content") - if tool_content and tool_content.get("type") == "mcp_call": - assert ( - "name" in tool_content - ), "name should be present for mcp_call when content recording is enabled" + if use_events: + for event in span2.events: + if event.name == "gen_ai.output.messages": + attrs = event.attributes + assert attrs is not None and isinstance(attrs, Mapping) + content = attrs.get("gen_ai.event.content") + assert isinstance(content, str) and content.strip() != "" + import json + + data = json.loads(content) + for entry in data: + parts = entry.get("parts") + if parts: + for part in parts: + if part.get("type") == "tool_call": + tool_content = part.get("content") + if tool_content and tool_content.get("type") == "mcp_call": + assert ( + "name" in tool_content + ), "name should be present for mcp_call when content recording is enabled" + assert ( + "arguments" in tool_content + ), "arguments should be present for mcp_call when content recording is enabled" + elif part.get("type") == "text": assert ( - "arguments" in tool_content - ), "arguments should be present for mcp_call when content recording is enabled" - elif part.get("type") == "text": - assert ( - "content" in part - ), "text content should be present when content recording is enabled" + "content" in part + ), "text content should be present when content recording is enabled" # Check list_conversation_items span list_spans = self.exporter.get_spans_by_name("list_conversation_items") assert len(list_spans) == 1 list_span = list_spans[0] - for event in list_span.events: - if event.name == "gen_ai.conversation.item": - attrs = event.attributes - assert attrs is not None and isinstance(attrs, Mapping) - content = attrs.get("gen_ai.event.content") - assert isinstance(content, str) and content.strip() != "" - import json - - data = json.loads(content) - for entry in data: - parts = entry.get("parts") - if parts: - for part in parts: - if part.get("type") == "text": - assert ( - "content" in part - ), "text content should be present in conversation items when content recording is enabled" - elif part.get("type") == "mcp": - mcp_content = part.get("content") - if mcp_content and mcp_content.get("type") == "mcp_call": + if use_events: + for event in list_span.events: + if event.name == "gen_ai.conversation.item": + attrs = event.attributes + assert attrs is not None and isinstance(attrs, Mapping) + content = attrs.get("gen_ai.event.content") + assert isinstance(content, str) and content.strip() != "" + import json + + data = json.loads(content) + for entry in data: + parts = entry.get("parts") + if parts: + for part in parts: + if part.get("type") == "text": assert ( - "name" in mcp_content - ), "name should be present for mcp_call in conversation items when content recording is enabled" - else: - assert False, f"Unexpected event name in list_conversation_items span: {event.name}" + "content" in part + ), "text content should be present in conversation items when content recording is enabled" + elif part.get("type") == "mcp": + mcp_content = part.get("content") + if mcp_content and mcp_content.get("type") == "mcp_call": + assert ( + "name" in mcp_content + ), "name should be present for mcp_call in conversation items when content recording is enabled" + else: + assert False, f"Unexpected event name in list_conversation_items span: {event.name}" # Cleanup await openai_client.conversations.delete(conversation_id=conversation.id) @@ -839,12 +957,29 @@ async def test_async_mcp_streaming_with_content_recording(self, **kwargs): finally: await project_client.agents.delete_version(agent_name=agent.name, agent_version=agent.version) - @pytest.mark.usefixtures("instrument_without_content") + @pytest.mark.usefixtures("instrument_with_content") @servicePreparer() @recorded_by_proxy_async(RecordedTransport.AZURE_CORE, RecordedTransport.HTTPX) - async def test_async_mcp_streaming_without_content_recording(self, **kwargs): - """Test asynchronous MCP agent with streaming and content recording disabled.""" + async def test_async_mcp_streaming_with_content_recording_events(self, **kwargs): + """Test asynchronous MCP agent with streaming and content recording enabled (event-based messages).""" + await self._test_async_mcp_streaming_with_content_recording_impl(True, **kwargs) + + @pytest.mark.usefixtures("instrument_with_content") + @servicePreparer() + @recorded_by_proxy_async(RecordedTransport.AZURE_CORE, RecordedTransport.HTTPX) + async def test_async_mcp_streaming_with_content_recording_attributes(self, **kwargs): + """Test asynchronous MCP agent with streaming and content recording enabled (attribute-based messages).""" + await self._test_async_mcp_streaming_with_content_recording_impl(False, **kwargs) + + async def _test_async_mcp_streaming_without_content_recording_impl(self, use_events, **kwargs): + """Implementation for testing asynchronous MCP agent with streaming and content recording disabled. + + Args: + use_events: If True, use event-based message tracing. If False, use attribute-based. + Note: MCP tests currently only validate event mode regardless of this setting. + """ self.cleanup() + _set_use_message_events(use_events) os.environ.update( { CONTENT_TRACING_ENV_VARIABLE: "False", @@ -934,6 +1069,7 @@ async def test_async_mcp_streaming_without_content_recording(self, **kwargs): assert response_id_1 is not None expected_attributes_1 = [ + ("az.namespace", "Microsoft.CognitiveServices"), ("az.namespace", "Microsoft.CognitiveServices"), ("gen_ai.operation.name", OPERATION_NAME_INVOKE_AGENT), ("gen_ai.provider.name", "azure.openai"), @@ -945,51 +1081,62 @@ async def test_async_mcp_streaming_without_content_recording(self, **kwargs): ("gen_ai.usage.input_tokens", "+"), ("gen_ai.usage.output_tokens", "+"), ] + + # Add message attributes when not using events + if not use_events: + expected_attributes_1.extend( + [ + ("gen_ai.input.messages", ""), + ("gen_ai.output.messages", ""), + ] + ) + assert GenAiTraceVerifier().check_span_attributes(span1, expected_attributes_1) # Comprehensive event validation - verify content is NOT present from collections.abc import Mapping - for event in span1.events: - if event.name == "gen_ai.input.messages": - attrs = event.attributes - assert attrs is not None and isinstance(attrs, Mapping) - content = attrs.get("gen_ai.event.content") - assert isinstance(content, str) and content.strip() != "" - import json - - data = json.loads(content) - for entry in data: - if entry.get("role") == "user": + if use_events: + for event in span1.events: + if event.name == "gen_ai.input.messages": + attrs = event.attributes + assert attrs is not None and isinstance(attrs, Mapping) + content = attrs.get("gen_ai.event.content") + assert isinstance(content, str) and content.strip() != "" + import json + + data = json.loads(content) + for entry in data: + if entry.get("role") == "user": + parts = entry.get("parts") + for part in parts: + if part.get("type") == "text": + assert ( + "content" not in part + ), "Text content should NOT be present when content recording is disabled" + elif event.name == "gen_ai.output.messages": + attrs = event.attributes + assert attrs is not None and isinstance(attrs, Mapping) + content = attrs.get("gen_ai.event.content") + assert isinstance(content, str) and content.strip() != "" + import json + + data = json.loads(content) + for entry in data: parts = entry.get("parts") - for part in parts: - if part.get("type") == "text": - assert ( - "content" not in part - ), "Text content should NOT be present when content recording is disabled" - elif event.name == "gen_ai.output.messages": - attrs = event.attributes - assert attrs is not None and isinstance(attrs, Mapping) - content = attrs.get("gen_ai.event.content") - assert isinstance(content, str) and content.strip() != "" - import json - - data = json.loads(content) - for entry in data: - parts = entry.get("parts") - if parts: - for part in parts: - if part.get("type") == "tool_call": - tool_content = part.get("content") - if tool_content: - tool_type = tool_content.get("type") - if tool_type == "mcp_approval_request": - assert ( - "name" not in tool_content - ), "name should NOT be present for mcp_approval_request when content recording is disabled" - assert ( - "arguments" not in tool_content - ), "arguments should NOT be present for mcp_approval_request when content recording is disabled" + if parts: + for part in parts: + if part.get("type") == "tool_call": + tool_content = part.get("content") + if tool_content: + tool_type = tool_content.get("type") + if tool_type == "mcp_approval_request": + assert ( + "name" not in tool_content + ), "name should NOT be present for mcp_approval_request when content recording is disabled" + assert ( + "arguments" not in tool_content + ), "arguments should NOT be present for mcp_approval_request when content recording is disabled" # Validate second response span span2 = spans[1] @@ -998,6 +1145,7 @@ async def test_async_mcp_streaming_without_content_recording(self, **kwargs): assert response_id_2 is not None expected_attributes_2 = [ + ("az.namespace", "Microsoft.CognitiveServices"), ("az.namespace", "Microsoft.CognitiveServices"), ("gen_ai.operation.name", OPERATION_NAME_INVOKE_AGENT), ("gen_ai.provider.name", "azure.openai"), @@ -1009,69 +1157,95 @@ async def test_async_mcp_streaming_without_content_recording(self, **kwargs): ("gen_ai.usage.input_tokens", "+"), ("gen_ai.usage.output_tokens", "+"), ] + + # Add message attributes when not using events + if not use_events: + expected_attributes_2.extend( + [ + ("gen_ai.input.messages", ""), + ("gen_ai.output.messages", ""), + ] + ) + assert GenAiTraceVerifier().check_span_attributes(span2, expected_attributes_2) # Validate second span events - content should be minimal - for event in span2.events: - if event.name == "gen_ai.output.messages": - attrs = event.attributes - assert attrs is not None and isinstance(attrs, Mapping) - content = attrs.get("gen_ai.event.content") - assert isinstance(content, str) and content.strip() != "" - import json - - data = json.loads(content) - for entry in data: - parts = entry.get("parts") - if parts: - for part in parts: - if part.get("type") == "tool_call": - tool_content = part.get("content") - if tool_content and tool_content.get("type") == "mcp_call": - assert ( - "name" not in tool_content - ), "name should NOT be present for mcp_call when content recording is disabled" + if use_events: + for event in span2.events: + if event.name == "gen_ai.output.messages": + attrs = event.attributes + assert attrs is not None and isinstance(attrs, Mapping) + content = attrs.get("gen_ai.event.content") + assert isinstance(content, str) and content.strip() != "" + import json + + data = json.loads(content) + for entry in data: + parts = entry.get("parts") + if parts: + for part in parts: + if part.get("type") == "tool_call": + tool_content = part.get("content") + if tool_content and tool_content.get("type") == "mcp_call": + assert ( + "name" not in tool_content + ), "name should NOT be present for mcp_call when content recording is disabled" + assert ( + "arguments" not in tool_content + ), "arguments should NOT be present for mcp_call when content recording is disabled" + elif part.get("type") == "text": assert ( - "arguments" not in tool_content - ), "arguments should NOT be present for mcp_call when content recording is disabled" - elif part.get("type") == "text": - assert ( - "content" not in part - ), "text content should NOT be present when content recording is disabled" + "content" not in part + ), "text content should NOT be present when content recording is disabled" # Check list_conversation_items span list_spans = self.exporter.get_spans_by_name("list_conversation_items") assert len(list_spans) == 1 list_span = list_spans[0] - for event in list_span.events: - if event.name == "gen_ai.conversation.item": - attrs = event.attributes - assert attrs is not None and isinstance(attrs, Mapping) - content = attrs.get("gen_ai.event.content") - assert isinstance(content, str) and content.strip() != "" - import json - - data = json.loads(content) - for entry in data: - parts = entry.get("parts") - if parts: - for part in parts: - if part.get("type") == "text": - assert ( - "content" not in part - ), "text content should NOT be present in conversation items when content recording is disabled" - elif part.get("type") == "mcp": - mcp_content = part.get("content") - if mcp_content and mcp_content.get("type") == "mcp_call": + if use_events: + for event in list_span.events: + if event.name == "gen_ai.conversation.item": + attrs = event.attributes + assert attrs is not None and isinstance(attrs, Mapping) + content = attrs.get("gen_ai.event.content") + assert isinstance(content, str) and content.strip() != "" + import json + + data = json.loads(content) + for entry in data: + parts = entry.get("parts") + if parts: + for part in parts: + if part.get("type") == "text": assert ( - "name" not in mcp_content - ), "name should NOT be present for mcp_call in conversation items when content recording is disabled" - else: - assert False, f"Unexpected event name in list_conversation_items span: {event.name}" + "content" not in part + ), "text content should NOT be present in conversation items when content recording is disabled" + elif part.get("type") == "mcp": + mcp_content = part.get("content") + if mcp_content and mcp_content.get("type") == "mcp_call": + assert ( + "name" not in mcp_content + ), "name should NOT be present for mcp_call in conversation items when content recording is disabled" + else: + assert False, f"Unexpected event name in list_conversation_items span: {event.name}" # Cleanup await openai_client.conversations.delete(conversation_id=conversation.id) finally: await project_client.agents.delete_version(agent_name=agent.name, agent_version=agent.version) + + @pytest.mark.usefixtures("instrument_without_content") + @servicePreparer() + @recorded_by_proxy_async(RecordedTransport.AZURE_CORE, RecordedTransport.HTTPX) + async def test_async_mcp_streaming_without_content_recording_events(self, **kwargs): + """Test asynchronous MCP agent with streaming and content recording disabled (event-based messages).""" + await self._test_async_mcp_streaming_without_content_recording_impl(True, **kwargs) + + @pytest.mark.usefixtures("instrument_without_content") + @servicePreparer() + @recorded_by_proxy_async(RecordedTransport.AZURE_CORE, RecordedTransport.HTTPX) + async def test_async_mcp_streaming_without_content_recording_attributes(self, **kwargs): + """Test asynchronous MCP agent with streaming and content recording disabled (attribute-based messages).""" + await self._test_async_mcp_streaming_without_content_recording_impl(False, **kwargs) diff --git a/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_workflow.py b/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_workflow.py index 1ba8723af323..769e03b68a91 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_workflow.py +++ b/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_workflow.py @@ -9,7 +9,11 @@ import os import pytest from azure.ai.projects.telemetry import AIProjectInstrumentor, _utils -from azure.ai.projects.telemetry._utils import OPERATION_NAME_INVOKE_AGENT, SPAN_NAME_INVOKE_AGENT +from azure.ai.projects.telemetry._utils import ( + OPERATION_NAME_INVOKE_AGENT, + SPAN_NAME_INVOKE_AGENT, + _set_use_message_events, +) from azure.core.settings import settings from gen_ai_trace_verifier import GenAiTraceVerifier from devtools_testutils import recorded_by_proxy, RecordedTransport @@ -189,6 +193,7 @@ def _create_student_teacher_workflow(self, project_client, student_agent, teache def test_sync_workflow_non_streaming_with_content_recording(self, **kwargs): """Test synchronous workflow agent with non-streaming and content recording enabled.""" self.cleanup() + _set_use_message_events(True) os.environ.update( { CONTENT_TRACING_ENV_VARIABLE: "True", @@ -353,6 +358,7 @@ def test_sync_workflow_non_streaming_with_content_recording(self, **kwargs): def test_sync_workflow_non_streaming_without_content_recording(self, **kwargs): """Test synchronous workflow agent with non-streaming and content recording disabled.""" self.cleanup() + _set_use_message_events(True) os.environ.update( { CONTENT_TRACING_ENV_VARIABLE: "False", @@ -519,6 +525,7 @@ def test_sync_workflow_non_streaming_without_content_recording(self, **kwargs): def test_sync_workflow_streaming_with_content_recording(self, **kwargs): """Test synchronous workflow agent with streaming and content recording enabled.""" self.cleanup() + _set_use_message_events(True) os.environ.update( { CONTENT_TRACING_ENV_VARIABLE: "True", @@ -686,6 +693,7 @@ def test_sync_workflow_streaming_with_content_recording(self, **kwargs): def test_sync_workflow_streaming_without_content_recording(self, **kwargs): """Test synchronous workflow agent with streaming and content recording disabled.""" self.cleanup() + _set_use_message_events(True) os.environ.update( { CONTENT_TRACING_ENV_VARIABLE: "False", diff --git a/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_workflow_async.py b/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_workflow_async.py index 392e9c1d62a1..d4b987418bf4 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_workflow_async.py +++ b/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_workflow_async.py @@ -9,7 +9,11 @@ import os import pytest from azure.ai.projects.telemetry import AIProjectInstrumentor, _utils -from azure.ai.projects.telemetry._utils import OPERATION_NAME_INVOKE_AGENT, SPAN_NAME_INVOKE_AGENT +from azure.ai.projects.telemetry._utils import ( + OPERATION_NAME_INVOKE_AGENT, + SPAN_NAME_INVOKE_AGENT, + _set_use_message_events, +) from azure.core.settings import settings from gen_ai_trace_verifier import GenAiTraceVerifier from devtools_testutils.aio import recorded_by_proxy_async @@ -188,6 +192,7 @@ async def _create_student_teacher_workflow(self, project_client, student_agent, async def test_async_workflow_non_streaming_with_content_recording(self, **kwargs): """Test asynchronous workflow agent with non-streaming and content recording enabled.""" self.cleanup() + _set_use_message_events(True) os.environ.update( { CONTENT_TRACING_ENV_VARIABLE: "True", @@ -348,6 +353,7 @@ async def test_async_workflow_non_streaming_with_content_recording(self, **kwarg async def test_async_workflow_non_streaming_without_content_recording(self, **kwargs): """Test asynchronous workflow agent with non-streaming and content recording disabled.""" self.cleanup() + _set_use_message_events(True) os.environ.update( { CONTENT_TRACING_ENV_VARIABLE: "False", @@ -512,6 +518,7 @@ async def test_async_workflow_non_streaming_without_content_recording(self, **kw async def test_async_workflow_streaming_with_content_recording(self, **kwargs): """Test asynchronous workflow agent with streaming and content recording enabled.""" self.cleanup() + _set_use_message_events(True) os.environ.update( { CONTENT_TRACING_ENV_VARIABLE: "True", @@ -677,6 +684,7 @@ async def test_async_workflow_streaming_with_content_recording(self, **kwargs): async def test_async_workflow_streaming_without_content_recording(self, **kwargs): """Test asynchronous workflow agent with streaming and content recording disabled.""" self.cleanup() + _set_use_message_events(True) os.environ.update( { CONTENT_TRACING_ENV_VARIABLE: "False", From 80343387fd746238037d33f0d4193674293d17fd Mon Sep 17 00:00:00 2001 From: M-Hietala <78813398+M-Hietala@users.noreply.github.com> Date: Tue, 3 Feb 2026 12:31:15 -0600 Subject: [PATCH 03/10] updating assets.json --- sdk/ai/azure-ai-projects/assets.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sdk/ai/azure-ai-projects/assets.json b/sdk/ai/azure-ai-projects/assets.json index cf8a8fe386d2..b08a3a7726d6 100644 --- a/sdk/ai/azure-ai-projects/assets.json +++ b/sdk/ai/azure-ai-projects/assets.json @@ -2,5 +2,5 @@ "AssetsRepo": "Azure/azure-sdk-assets", "AssetsRepoPrefixPath": "python", "TagPrefix": "python/ai/azure-ai-projects", - "Tag": "python/ai/azure-ai-projects_7cddb7d06f" + "Tag": "python/ai/azure-ai-projects_4fb2407dfd" } From 6826247a13db5a192d8a4003c48160de018f2480 Mon Sep 17 00:00:00 2001 From: M-Hietala <78813398+M-Hietala@users.noreply.github.com> Date: Wed, 4 Feb 2026 09:53:57 -0600 Subject: [PATCH 04/10] change agent creation system instructions from event to attribute --- sdk/ai/azure-ai-projects/CHANGELOG.md | 1 + .../telemetry/_ai_project_instrumentor.py | 88 +- .../azure/ai/projects/telemetry/_utils.py | 2 +- .../telemetry/test_ai_agents_instrumentor.py | 789 +++++++++-------- .../test_ai_agents_instrumentor_async.py | 797 +++++++++--------- 5 files changed, 899 insertions(+), 778 deletions(-) diff --git a/sdk/ai/azure-ai-projects/CHANGELOG.md b/sdk/ai/azure-ai-projects/CHANGELOG.md index cd112dbed28b..9f73891461c9 100644 --- a/sdk/ai/azure-ai-projects/CHANGELOG.md +++ b/sdk/ai/azure-ai-projects/CHANGELOG.md @@ -25,6 +25,7 @@ * Tracing: response generation span names changed from "responses {model_name}" to "chat {model_name}" for model calls and from "responses {agent_name}" to "invoke_agent {agent_name}" for agent calls. * Tracing: response generation operation names changed from "responses" to "chat" for model calls and from "responses" to "invoke_agent" for agent calls. * Tracing: response generation uses gen_ai.input.messages and gen_ai.output.messages attributes directly under the span instead of events. +* Tracing: agent creation uses gen_ai.system.instructions attribute directly under the span instead of an event. ## 2.0.0b3 (2026-01-06) diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/telemetry/_ai_project_instrumentor.py b/sdk/ai/azure-ai-projects/azure/ai/projects/telemetry/_ai_project_instrumentor.py index ba9a5da65c3a..164868fd8548 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/telemetry/_ai_project_instrumentor.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/telemetry/_ai_project_instrumentor.py @@ -27,9 +27,11 @@ GEN_AI_AGENT_ID, GEN_AI_AGENT_NAME, GEN_AI_EVENT_CONTENT, + GEN_AI_INPUT_MESSAGES, GEN_AI_MESSAGE_ID, GEN_AI_MESSAGE_STATUS, GEN_AI_OPERATION_NAME, + GEN_AI_OUTPUT_MESSAGES, GEN_AI_PROVIDER_NAME, GEN_AI_SYSTEM_MESSAGE, GEN_AI_THREAD_ID, @@ -53,6 +55,7 @@ ERROR_MESSAGE, OperationName, start_span, + _get_use_message_events, ) from ._responses_instrumentor import _ResponsesInstrumentorPreview @@ -423,30 +426,52 @@ def _add_message_event( # No metadata, use content array directly as the event data event_data = content_array if isinstance(content_array, list) else {} - attributes = self._create_event_attributes( - thread_id=thread_id, - agent_id=agent_id, - thread_run_id=thread_run_id, - message_id=message_id, - message_status=message_status, - usage=usage, - ) - # Store as JSON - either array or object depending on metadata - if not metadata and content_array: - attributes[GEN_AI_EVENT_CONTENT] = json.dumps(content_array, ensure_ascii=False) - else: - attributes[GEN_AI_EVENT_CONTENT] = json.dumps(event_data, ensure_ascii=False) + use_events = _get_use_message_events() + + if use_events: + # Use events for message tracing + attributes = self._create_event_attributes( + thread_id=thread_id, + agent_id=agent_id, + thread_run_id=thread_run_id, + message_id=message_id, + message_status=message_status, + usage=usage, + ) + # Store as JSON - either array or object depending on metadata + if not metadata and content_array: + attributes[GEN_AI_EVENT_CONTENT] = json.dumps(content_array, ensure_ascii=False) + else: + attributes[GEN_AI_EVENT_CONTENT] = json.dumps(event_data, ensure_ascii=False) + + event_name = None + if role == "user": + event_name = "gen_ai.input.message" + elif role == "system": + event_name = "gen_ai.system_instruction" + else: + event_name = "gen_ai.input.message" - event_name = None - if role == "user": - event_name = "gen_ai.input.message" - elif role == "system": - event_name = "gen_ai.system_instruction" + span.span_instance.add_event(name=event_name, attributes=attributes) else: - event_name = "gen_ai.input.message" + # Use attributes for message tracing + # Prepare message content as JSON + message_json = json.dumps( + [{"role": role, "parts": content_array}] if content_array else [{"role": role}], ensure_ascii=False + ) + + # Determine which attribute to use based on role + if role == "user": + attribute_name = GEN_AI_INPUT_MESSAGES + elif role == "assistant": + attribute_name = GEN_AI_OUTPUT_MESSAGES + else: + # Default to input messages for other roles (including system) + attribute_name = GEN_AI_INPUT_MESSAGES - # span.span_instance.add_event(name=f"gen_ai.{role}.message", attributes=attributes) - span.span_instance.add_event(name=event_name, attributes=attributes) + # Set the attribute on the span + if span and span.span_instance.is_recording: + span.add_attribute(attribute_name, message_json) def _get_field(self, obj: Any, field: str) -> Any: if not obj: @@ -502,10 +527,23 @@ def _add_instructions_event( content_array.append({"type": "response_schema", "content": schema_str}) - attributes = self._create_event_attributes(agent_id=agent_id, thread_id=thread_id) - # Store as JSON array directly without outer wrapper - attributes[GEN_AI_EVENT_CONTENT] = json.dumps(content_array, ensure_ascii=False) - span.span_instance.add_event(name=GEN_AI_SYSTEM_MESSAGE, attributes=attributes) + use_events = _get_use_message_events() + + if use_events: + # Use events for instructions tracing + attributes = self._create_event_attributes(agent_id=agent_id, thread_id=thread_id) + # Store as JSON array directly without outer wrapper + attributes[GEN_AI_EVENT_CONTENT] = json.dumps(content_array, ensure_ascii=False) + span.span_instance.add_event(name=GEN_AI_SYSTEM_MESSAGE, attributes=attributes) + else: + # Use attributes for instructions tracing + # System instructions use the same attribute name as the event + message_json = json.dumps( + [{"role": "system", "parts": content_array}] if content_array else [{"role": "system"}], + ensure_ascii=False, + ) + if span and span.span_instance.is_recording: + span.add_attribute(GEN_AI_SYSTEM_MESSAGE, message_json) def _status_to_string(self, status: Any) -> str: return status.value if hasattr(status, "value") else status diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/telemetry/_utils.py b/sdk/ai/azure-ai-projects/azure/ai/projects/telemetry/_utils.py index 87729c3039f7..4f5e713227bb 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/telemetry/_utils.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/telemetry/_utils.py @@ -123,7 +123,7 @@ def _get_use_message_events() -> bool: """Get the current message tracing mode (events vs attributes). Internal use only. - + :return: True if using events, False if using attributes :rtype: bool """ diff --git a/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_ai_agents_instrumentor.py b/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_ai_agents_instrumentor.py index fe1f1ae1c27b..9fcade517890 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_ai_agents_instrumentor.py +++ b/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_ai_agents_instrumentor.py @@ -54,6 +54,7 @@ AZURE_AI_AGENTS_PROVIDER, AGENT_TYPE_PROMPT, AGENT_TYPE_WORKFLOW, + _set_use_message_events, ) settings.tracing_implementation = "OpenTelemetry" @@ -182,11 +183,14 @@ def set_env_var(var_name, value): self.cleanup() # This also undefines CONTENT_TRACING_ENV_VARIABLE os.environ.pop(OLD_CONTENT_TRACING_ENV_VARIABLE, None) - @pytest.mark.usefixtures("instrument_with_content") - @servicePreparer() - @recorded_by_proxy - def test_agent_creation_with_tracing_content_recording_enabled(self, **kwargs): + def _test_agent_creation_with_tracing_content_recording_enabled_impl(self, use_events: bool, **kwargs): + """Implementation for agent creation with content recording enabled test. + + :param use_events: If True, use events for messages. If False, use attributes. + :type use_events: bool + """ self.cleanup() + _set_use_message_events(use_events) os.environ.update({CONTENT_TRACING_ENV_VARIABLE: "True"}) self.setup_telemetry() assert True == AIProjectInstrumentor().is_content_recording_enabled() @@ -202,36 +206,6 @@ def test_agent_creation_with_tracing_content_recording_enabled(self, **kwargs): model=model, # Optional parameters instructions="You are a helpful AI assistant. Be polite and provide accurate information.", - # temperature=0.7, - # top_p=0.9, - # # Reasoning configuration - # reasoning=Reasoning( - # effort="medium", - # summary="auto", - # ), - # # Tools that the model can use - # tools=[ - # # Function tool for custom functions - # FunctionTool( - # name="get_weather", - # description="Get the current weather for a location", - # parameters={ - # "type": "object", - # "properties": { - # "location": {"type": "string", "description": "The city and state, e.g. San Francisco, CA"}, - # "unit": { - # "type": "string", - # "enum": ["celsius", "fahrenheit"], - # "description": "The temperature unit", - # }, - # }, - # "required": ["location"], - # }, - # strict=True, # Enforce strict parameter validation - # ), - # ], - # # Text response configuration - # text=PromptAgentDefinitionText(format=ResponseTextFormatConfigurationText()), ) agent = project_client.agents.create_version(agent_name="myagent", definition=agent_definition) @@ -251,36 +225,94 @@ def test_agent_creation_with_tracing_content_recording_enabled(self, **kwargs): (GEN_AI_OPERATION_NAME, "create_agent"), (SERVER_ADDRESS, ""), (GEN_AI_REQUEST_MODEL, model), - # ("gen_ai.request.temperature", "0.7"), - # ("gen_ai.request.top_p", "0.9"), - # ("gen_ai.request.response_format", "text"), - # ("gen_ai.request.reasoning.effort", "medium"), - # ("gen_ai.request.reasoning.summary", "auto"), (GEN_AI_AGENT_NAME, "myagent"), (GEN_AI_AGENT_ID, "myagent:" + str(version)), (GEN_AI_AGENT_VERSION, str(version)), (GEN_AI_AGENT_TYPE, AGENT_TYPE_PROMPT), ] + + # When using attributes, add the system instructions attribute to expected list + if not use_events: + from azure.ai.projects.telemetry._utils import GEN_AI_SYSTEM_MESSAGE + import json + + expected_system_message = json.dumps( + [ + { + "role": "system", + "parts": [ + { + "type": "text", + "content": "You are a helpful AI assistant. Be polite and provide accurate information.", + } + ], + } + ], + ensure_ascii=False, + ) + expected_attributes.append((GEN_AI_SYSTEM_MESSAGE, expected_system_message)) + attributes_match = GenAiTraceVerifier().check_span_attributes(span, expected_attributes) assert attributes_match == True - expected_events = [ - { - "name": GEN_AI_SYSTEM_INSTRUCTION_EVENT, - "attributes": { - GEN_AI_PROVIDER_NAME: AZURE_AI_AGENTS_PROVIDER, - GEN_AI_EVENT_CONTENT: '[{"type": "text", "content": "You are a helpful AI assistant. Be polite and provide accurate information."}]', - }, - } - ] - events_match = GenAiTraceVerifier().check_span_events(span, expected_events) - assert events_match == True + if use_events: + # When using events, check for events + expected_events = [ + { + "name": GEN_AI_SYSTEM_INSTRUCTION_EVENT, + "attributes": { + GEN_AI_PROVIDER_NAME: AZURE_AI_AGENTS_PROVIDER, + GEN_AI_EVENT_CONTENT: '[{"type": "text", "content": "You are a helpful AI assistant. Be polite and provide accurate information."}]', + }, + } + ] + events_match = GenAiTraceVerifier().check_span_events(span, expected_events) + assert events_match == True + else: + # When using attributes, check for gen_ai.system.instructions attribute + from azure.ai.projects.telemetry._utils import GEN_AI_SYSTEM_MESSAGE + import json + + assert span.attributes is not None + assert GEN_AI_SYSTEM_MESSAGE in span.attributes + + system_message_json = span.attributes[GEN_AI_SYSTEM_MESSAGE] + system_message = json.loads(system_message_json) + + # Verify structure + assert isinstance(system_message, list) + assert len(system_message) == 1 + assert system_message[0]["role"] == "system" + assert "parts" in system_message[0] + assert len(system_message[0]["parts"]) == 1 + assert system_message[0]["parts"][0]["type"] == "text" + assert ( + system_message[0]["parts"][0]["content"] + == "You are a helpful AI assistant. Be polite and provide accurate information." + ) - @pytest.mark.usefixtures("instrument_without_content") + @pytest.mark.usefixtures("instrument_with_content") @servicePreparer() @recorded_by_proxy - def test_agent_creation_with_tracing_content_recording_disabled(self, **kwargs): + def test_agent_creation_with_tracing_content_recording_enabled(self, **kwargs): + """Test agent creation with content recording enabled using events.""" + self._test_agent_creation_with_tracing_content_recording_enabled_impl(use_events=True, **kwargs) + + @pytest.mark.usefixtures("instrument_with_content") + @servicePreparer() + @recorded_by_proxy + def test_agent_creation_with_tracing_content_recording_enabled_with_attributes(self, **kwargs): + """Test agent creation with content recording enabled using attributes.""" + self._test_agent_creation_with_tracing_content_recording_enabled_impl(use_events=False, **kwargs) + + def _test_agent_creation_with_tracing_content_recording_disabled_impl(self, use_events: bool, **kwargs): + """Implementation for agent creation with content recording disabled test. + + :param use_events: If True, use events for messages. If False, use attributes. + :type use_events: bool + """ self.cleanup() + _set_use_message_events(use_events) os.environ.update({CONTENT_TRACING_ENV_VARIABLE: "False"}) self.setup_telemetry() assert False == AIProjectInstrumentor().is_content_recording_enabled() @@ -290,46 +322,13 @@ def test_agent_creation_with_tracing_content_recording_disabled(self, **kwargs): model = kwargs.get("azure_ai_model_deployment_name") agent_definition = PromptAgentDefinition( - # Required parameter model=model, - # Optional parameters instructions="You are a helpful AI assistant. Always be polite and provide accurate information.", - # temperature=0.7, - # top_p=0.9, - # # Reasoning configuration - # reasoning=Reasoning( - # effort="medium", - # summary="auto", - # ), - # # Tools that the model can use - # tools=[ - # # Function tool for custom functions - # FunctionTool( - # name="get_weather", - # description="Get the current weather for a location", - # parameters={ - # "type": "object", - # "properties": { - # "location": {"type": "string", "description": "The city and state, e.g. San Francisco, CA"}, - # "unit": { - # "type": "string", - # "enum": ["celsius", "fahrenheit"], - # "description": "The temperature unit", - # }, - # }, - # "required": ["location"], - # }, - # strict=True, # Enforce strict parameter validation - # ), - # ], - # Text response configuration - # text=PromptAgentDefinitionText(format=ResponseTextFormatConfigurationText()), ) agent = project_client.agents.create_version(agent_name="myagent", definition=agent_definition) version = agent.version - # delete agent and close client project_client.agents.delete_version(agent_name=agent.name, agent_version=agent.version) print("Deleted agent") @@ -343,115 +342,87 @@ def test_agent_creation_with_tracing_content_recording_disabled(self, **kwargs): (GEN_AI_OPERATION_NAME, "create_agent"), (SERVER_ADDRESS, ""), (GEN_AI_REQUEST_MODEL, model), - # ("gen_ai.request.temperature", "0.7"), - # ("gen_ai.request.top_p", "0.9"), - # ("gen_ai.request.response_format", "text"), - # ("gen_ai.request.reasoning.effort", "medium"), - # ("gen_ai.request.reasoning.summary", "auto"), (GEN_AI_AGENT_NAME, "myagent"), (GEN_AI_AGENT_ID, "myagent:" + str(version)), (GEN_AI_AGENT_VERSION, str(version)), (GEN_AI_AGENT_TYPE, AGENT_TYPE_PROMPT), ] - attributes_match = GenAiTraceVerifier().check_span_attributes(span, expected_attributes) - assert attributes_match == True - expected_events = [ - { - "name": GEN_AI_SYSTEM_INSTRUCTION_EVENT, - "attributes": { - GEN_AI_PROVIDER_NAME: AZURE_AI_AGENTS_PROVIDER, - GEN_AI_EVENT_CONTENT: "[]", - }, - } - ] - events_match = GenAiTraceVerifier().check_span_events(span, expected_events) - assert events_match == True + # When using attributes (regardless of content recording), add system message attribute + # When content recording is disabled, it will have empty content (just role) + if not use_events: + from azure.ai.projects.telemetry._utils import GEN_AI_SYSTEM_MESSAGE + import json - @pytest.mark.usefixtures("instrument_with_content") - @servicePreparer() - @recorded_by_proxy - def test_workflow_agent_creation_with_tracing_content_recording_enabled(self, **kwargs): - """Test workflow agent creation with content recording enabled.""" - self.cleanup() - os.environ.update({CONTENT_TRACING_ENV_VARIABLE: "True"}) - self.setup_telemetry() - assert True == AIProjectInstrumentor().is_content_recording_enabled() - assert True == AIProjectInstrumentor().is_instrumented() - - from azure.ai.projects.models import WorkflowAgentDefinition - - with self.create_client(operation_group="tracing", **kwargs) as project_client: + # Empty system message (no parts, just role) + expected_system_message = json.dumps([{"role": "system"}], ensure_ascii=False) + expected_attributes.append((GEN_AI_SYSTEM_MESSAGE, expected_system_message)) - workflow_yaml = """ -kind: workflow -trigger: - kind: OnConversationStart - id: test_workflow - actions: - - kind: SetVariable - id: set_variable - variable: Local.TestVar - value: "test" -""" - - agent = project_client.agents.create_version( - agent_name="test-workflow-agent", - definition=WorkflowAgentDefinition(workflow=workflow_yaml), - ) - version = agent.version - - # delete agent - project_client.agents.delete_version(agent_name=agent.name, agent_version=agent.version) - print("Deleted workflow agent") - - # ------------------------- Validate "create_agent" span --------------------------------- - self.exporter.force_flush() - spans = self.exporter.get_spans_by_name("create_agent test-workflow-agent") - assert len(spans) == 1 - span = spans[0] - expected_attributes = [ - (GEN_AI_PROVIDER_NAME, AZURE_AI_AGENTS_PROVIDER), - (GEN_AI_OPERATION_NAME, "create_agent"), - (SERVER_ADDRESS, ""), - (GEN_AI_AGENT_NAME, "test-workflow-agent"), - (GEN_AI_AGENT_ID, "test-workflow-agent:" + str(version)), - (GEN_AI_AGENT_VERSION, str(version)), - (GEN_AI_AGENT_TYPE, AGENT_TYPE_WORKFLOW), - ] attributes_match = GenAiTraceVerifier().check_span_attributes(span, expected_attributes) assert attributes_match == True - # Verify workflow event with standard content format - events = span.events - assert len(events) == 1 - workflow_event = events[0] - assert workflow_event.name == GEN_AI_AGENT_WORKFLOW_EVENT + if use_events: + expected_events = [ + { + "name": GEN_AI_SYSTEM_INSTRUCTION_EVENT, + "attributes": { + GEN_AI_PROVIDER_NAME: AZURE_AI_AGENTS_PROVIDER, + GEN_AI_EVENT_CONTENT: "[]", + }, + } + ] + events_match = GenAiTraceVerifier().check_span_events(span, expected_events) + assert events_match == True + else: + # When using attributes and content recording disabled, verify empty structure + from azure.ai.projects.telemetry._utils import GEN_AI_SYSTEM_MESSAGE + import json + + assert span.attributes is not None + assert GEN_AI_SYSTEM_MESSAGE in span.attributes + + system_message_json = span.attributes[GEN_AI_SYSTEM_MESSAGE] + system_message = json.loads(system_message_json) + # Should have empty content (just role, no parts) + assert isinstance(system_message, list) + assert len(system_message) == 1 + assert system_message[0]["role"] == "system" + # No parts field when content recording is disabled + assert "parts" not in system_message[0] - import json - - event_content = json.loads(workflow_event.attributes[GEN_AI_EVENT_CONTENT]) - # New optimized format: direct array with "content" field for workflow YAML - assert isinstance(event_content, list) - assert len(event_content) == 1 - assert event_content[0]["type"] == "workflow" - assert "content" in event_content[0] - assert "kind: workflow" in event_content[0]["content"] + @pytest.mark.usefixtures("instrument_without_content") + @servicePreparer() + @recorded_by_proxy + def test_agent_creation_with_tracing_content_recording_disabled(self, **kwargs): + """Test agent creation with content recording disabled using events.""" + self._test_agent_creation_with_tracing_content_recording_disabled_impl(use_events=True, **kwargs) @pytest.mark.usefixtures("instrument_without_content") @servicePreparer() @recorded_by_proxy - def test_workflow_agent_creation_with_tracing_content_recording_disabled(self, **kwargs): - """Test workflow agent creation with content recording disabled.""" + def test_agent_creation_with_tracing_content_recording_disabled_with_attributes(self, **kwargs): + """Test agent creation with content recording disabled using attributes.""" + self._test_agent_creation_with_tracing_content_recording_disabled_impl(use_events=False, **kwargs) + + def _test_workflow_agent_creation_impl(self, use_events: bool, content_recording_enabled: bool, **kwargs): + """Implementation for workflow agent creation test. + + :param use_events: If True, use events for messages. If False, use attributes. + :type use_events: bool + :param content_recording_enabled: Whether content recording is enabled. + :type content_recording_enabled: bool + """ self.cleanup() - os.environ.update({CONTENT_TRACING_ENV_VARIABLE: "False"}) + _set_use_message_events(use_events) + os.environ.update({CONTENT_TRACING_ENV_VARIABLE: "True" if content_recording_enabled else "False"}) self.setup_telemetry() - assert False == AIProjectInstrumentor().is_content_recording_enabled() + assert content_recording_enabled == AIProjectInstrumentor().is_content_recording_enabled() assert True == AIProjectInstrumentor().is_instrumented() from azure.ai.projects.models import WorkflowAgentDefinition - with self.create_client(operation_group="agents", **kwargs) as project_client: + operation_group = "tracing" if content_recording_enabled else "agents" + with self.create_client(operation_group=operation_group, **kwargs) as project_client: workflow_yaml = """ kind: workflow @@ -471,7 +442,6 @@ def test_workflow_agent_creation_with_tracing_content_recording_disabled(self, * ) version = agent.version - # delete agent project_client.agents.delete_version(agent_name=agent.name, agent_version=agent.version) print("Deleted workflow agent") @@ -492,120 +462,84 @@ def test_workflow_agent_creation_with_tracing_content_recording_disabled(self, * attributes_match = GenAiTraceVerifier().check_span_attributes(span, expected_attributes) assert attributes_match == True - # Verify workflow event is present but content is empty when content recording is disabled - events = span.events - assert len(events) == 1 - workflow_event = events[0] - assert workflow_event.name == GEN_AI_AGENT_WORKFLOW_EVENT + if use_events: + # Verify workflow event + events = span.events + assert len(events) == 1 + workflow_event = events[0] + assert workflow_event.name == GEN_AI_AGENT_WORKFLOW_EVENT - import json + import json - event_content = json.loads(workflow_event.attributes[GEN_AI_EVENT_CONTENT]) - # When content recording is disabled, event should be an empty array - assert isinstance(event_content, list) - assert len(event_content) == 0 + event_content = json.loads(workflow_event.attributes[GEN_AI_EVENT_CONTENT]) + assert isinstance(event_content, list) + + if content_recording_enabled: + assert len(event_content) == 1 + assert event_content[0]["type"] == "workflow" + assert "content" in event_content[0] + assert "kind: workflow" in event_content[0]["content"] + else: + # When content recording is disabled, event should be empty + assert len(event_content) == 0 + else: + # When using attributes, workflow events are still sent as events (not attributes) + # So we still validate events, but this is mainly for consistency + events = span.events + assert len(events) == 1 + workflow_event = events[0] + assert workflow_event.name == GEN_AI_AGENT_WORKFLOW_EVENT @pytest.mark.usefixtures("instrument_with_content") @servicePreparer() @recorded_by_proxy - def test_agent_with_structured_output_with_instructions_content_recording_enabled(self, **kwargs): - """Test agent creation with structured output and instructions, content recording enabled.""" - self.cleanup() - os.environ.update({CONTENT_TRACING_ENV_VARIABLE: "True"}) - self.setup_telemetry() - assert True == AIProjectInstrumentor().is_content_recording_enabled() - assert True == AIProjectInstrumentor().is_instrumented() - - from azure.ai.projects.models import TextResponseFormatJsonSchema - - with self.create_client(operation_group="tracing", **kwargs) as project_client: - - model = kwargs.get("azure_ai_model_deployment_name") - - # Define a JSON schema for structured output - test_schema = { - "type": "object", - "properties": { - "name": {"type": "string"}, - "age": {"type": "number"}, - }, - "required": ["name", "age"], - } - - agent_definition = PromptAgentDefinition( - model=model, - instructions="You are a helpful assistant that extracts person information.", - text=PromptAgentDefinitionText( - format=TextResponseFormatJsonSchema( - name="PersonInfo", - schema=test_schema, - ) - ), - ) - - agent = project_client.agents.create_version(agent_name="structured-agent", definition=agent_definition) - version = agent.version - - project_client.agents.delete_version(agent_name=agent.name, agent_version=agent.version) - - # Validate span - self.exporter.force_flush() - spans = self.exporter.get_spans_by_name("create_agent structured-agent") - assert len(spans) == 1 - span = spans[0] - - expected_attributes = [ - (GEN_AI_PROVIDER_NAME, AZURE_AI_AGENTS_PROVIDER), - (GEN_AI_OPERATION_NAME, "create_agent"), - (SERVER_ADDRESS, ""), - (GEN_AI_REQUEST_MODEL, model), - ("gen_ai.request.response_format", "json_schema"), - (GEN_AI_AGENT_NAME, "structured-agent"), - (GEN_AI_AGENT_ID, "structured-agent:" + str(version)), - (GEN_AI_AGENT_VERSION, str(version)), - (GEN_AI_AGENT_TYPE, AGENT_TYPE_PROMPT), - ] - attributes_match = GenAiTraceVerifier().check_span_attributes(span, expected_attributes) - assert attributes_match == True - - # Verify event contains both instructions and schema - events = span.events - assert len(events) == 1 - instruction_event = events[0] - assert instruction_event.name == GEN_AI_SYSTEM_INSTRUCTION_EVENT - - import json - - event_content = json.loads(instruction_event.attributes[GEN_AI_EVENT_CONTENT]) - assert isinstance(event_content, list) - assert len(event_content) == 2 # Both instructions and schema + def test_workflow_agent_creation_with_tracing_content_recording_enabled(self, **kwargs): + """Test workflow agent creation with content recording enabled using events.""" + self._test_workflow_agent_creation_impl(use_events=True, content_recording_enabled=True, **kwargs) - # Check instructions content - assert event_content[0]["type"] == "text" - assert "helpful assistant" in event_content[0]["content"] + @pytest.mark.usefixtures("instrument_with_content") + @servicePreparer() + @recorded_by_proxy + def test_workflow_agent_creation_with_tracing_content_recording_enabled_with_attributes(self, **kwargs): + """Test workflow agent creation with content recording enabled using attributes.""" + self._test_workflow_agent_creation_impl(use_events=False, content_recording_enabled=True, **kwargs) - # Check schema content - assert event_content[1]["type"] == "response_schema" - schema_str = event_content[1]["content"] - schema_obj = json.loads(schema_str) - assert schema_obj["type"] == "object" - assert "name" in schema_obj["properties"] - assert "age" in schema_obj["properties"] + @pytest.mark.usefixtures("instrument_without_content") + @servicePreparer() + @recorded_by_proxy + def test_workflow_agent_creation_with_tracing_content_recording_disabled(self, **kwargs): + """Test workflow agent creation with content recording disabled using events.""" + self._test_workflow_agent_creation_impl(use_events=True, content_recording_enabled=False, **kwargs) @pytest.mark.usefixtures("instrument_without_content") @servicePreparer() @recorded_by_proxy - def test_agent_with_structured_output_with_instructions_content_recording_disabled(self, **kwargs): - """Test agent creation with structured output and instructions, content recording disabled.""" + def test_workflow_agent_creation_with_tracing_content_recording_disabled_with_attributes(self, **kwargs): + """Test workflow agent creation with content recording disabled using attributes.""" + self._test_workflow_agent_creation_impl(use_events=False, content_recording_enabled=False, **kwargs) + + def _test_agent_with_structured_output_with_instructions_impl( + self, use_events: bool, content_recording_enabled: bool, **kwargs + ): + """Implementation for agent with structured output and instructions test. + + :param use_events: If True, use events for messages. If False, use attributes. + :type use_events: bool + :param content_recording_enabled: Whether content recording is enabled. + :type content_recording_enabled: bool + """ self.cleanup() - os.environ.update({CONTENT_TRACING_ENV_VARIABLE: "False"}) + _set_use_message_events(use_events) + os.environ.update({CONTENT_TRACING_ENV_VARIABLE: "True" if content_recording_enabled else "False"}) self.setup_telemetry() - assert False == AIProjectInstrumentor().is_content_recording_enabled() + assert content_recording_enabled == AIProjectInstrumentor().is_content_recording_enabled() assert True == AIProjectInstrumentor().is_instrumented() from azure.ai.projects.models import TextResponseFormatJsonSchema + import json - with self.create_client(operation_group="agents", **kwargs) as project_client: + operation_group = "tracing" if content_recording_enabled else "agents" + with self.create_client(operation_group=operation_group, **kwargs) as project_client: model = kwargs.get("azure_ai_model_deployment_name") @@ -651,35 +585,145 @@ def test_agent_with_structured_output_with_instructions_content_recording_disabl (GEN_AI_AGENT_VERSION, str(version)), (GEN_AI_AGENT_TYPE, AGENT_TYPE_PROMPT), ] + + # When using attributes, add system message attribute (with or without content based on content_recording_enabled) + if not use_events: + from azure.ai.projects.telemetry._utils import GEN_AI_SYSTEM_MESSAGE + + if content_recording_enabled: + expected_system_msg = json.dumps( + [ + { + "role": "system", + "parts": [ + { + "type": "text", + "content": "You are a helpful assistant that extracts person information.", + }, + {"type": "response_schema", "content": json.dumps(test_schema)}, + ], + } + ], + ensure_ascii=False, + ) + else: + # When content recording disabled, attribute has empty structure + expected_system_msg = json.dumps([{"role": "system"}], ensure_ascii=False) + expected_attributes.append((GEN_AI_SYSTEM_MESSAGE, expected_system_msg)) + attributes_match = GenAiTraceVerifier().check_span_attributes(span, expected_attributes) assert attributes_match == True - # When content recording is disabled, event should be empty - events = span.events - assert len(events) == 1 - instruction_event = events[0] - assert instruction_event.name == GEN_AI_SYSTEM_INSTRUCTION_EVENT - - import json + if use_events: + events = span.events + assert len(events) == 1 + instruction_event = events[0] + assert instruction_event.name == GEN_AI_SYSTEM_INSTRUCTION_EVENT + + event_content = json.loads(instruction_event.attributes[GEN_AI_EVENT_CONTENT]) + assert isinstance(event_content, list) + + if content_recording_enabled: + assert len(event_content) == 2 # Both instructions and schema + assert event_content[0]["type"] == "text" + assert "helpful assistant" in event_content[0]["content"] + assert event_content[1]["type"] == "response_schema" + schema_str = event_content[1]["content"] + schema_obj = json.loads(schema_str) + assert schema_obj["type"] == "object" + assert "name" in schema_obj["properties"] + assert "age" in schema_obj["properties"] + else: + assert len(event_content) == 0 # Empty when content recording disabled + else: + # When using attributes, verify attribute + from azure.ai.projects.telemetry._utils import GEN_AI_SYSTEM_MESSAGE + + assert span.attributes is not None + assert GEN_AI_SYSTEM_MESSAGE in span.attributes + + system_message_json = span.attributes[GEN_AI_SYSTEM_MESSAGE] + system_message = json.loads(system_message_json) + + assert isinstance(system_message, list) + assert len(system_message) == 1 + assert system_message[0]["role"] == "system" + + if content_recording_enabled: + assert "parts" in system_message[0] + assert len(system_message[0]["parts"]) == 2 + + # Check instruction part + assert system_message[0]["parts"][0]["type"] == "text" + assert "helpful assistant" in system_message[0]["parts"][0]["content"] + + # Check schema part + assert system_message[0]["parts"][1]["type"] == "response_schema" + schema_obj = json.loads(system_message[0]["parts"][1]["content"]) + assert schema_obj["type"] == "object" + assert "name" in schema_obj["properties"] + else: + # When content recording disabled, no parts field + assert "parts" not in system_message[0] - event_content = json.loads(instruction_event.attributes[GEN_AI_EVENT_CONTENT]) - assert isinstance(event_content, list) - assert len(event_content) == 0 # Empty when content recording disabled + @pytest.mark.usefixtures("instrument_with_content") + @servicePreparer() + @recorded_by_proxy + def test_agent_with_structured_output_with_instructions_content_recording_enabled(self, **kwargs): + """Test agent creation with structured output and instructions, content recording enabled using events.""" + self._test_agent_with_structured_output_with_instructions_impl( + use_events=True, content_recording_enabled=True, **kwargs + ) @pytest.mark.usefixtures("instrument_with_content") @servicePreparer() @recorded_by_proxy - def test_agent_with_structured_output_without_instructions_content_recording_enabled(self, **kwargs): - """Test agent creation with structured output but NO instructions, content recording enabled.""" + def test_agent_with_structured_output_with_instructions_content_recording_enabled_with_attributes(self, **kwargs): + """Test agent creation with structured output and instructions, content recording enabled using attributes.""" + self._test_agent_with_structured_output_with_instructions_impl( + use_events=False, content_recording_enabled=True, **kwargs + ) + + @pytest.mark.usefixtures("instrument_without_content") + @servicePreparer() + @recorded_by_proxy + def test_agent_with_structured_output_with_instructions_content_recording_disabled(self, **kwargs): + """Test agent creation with structured output and instructions, content recording disabled using events.""" + self._test_agent_with_structured_output_with_instructions_impl( + use_events=True, content_recording_enabled=False, **kwargs + ) + + @pytest.mark.usefixtures("instrument_without_content") + @servicePreparer() + @recorded_by_proxy + def test_agent_with_structured_output_with_instructions_content_recording_disabled_with_attributes(self, **kwargs): + """Test agent creation with structured output and instructions, content recording disabled using attributes.""" + self._test_agent_with_structured_output_with_instructions_impl( + use_events=False, content_recording_enabled=False, **kwargs + ) + + def _test_agent_with_structured_output_without_instructions_impl( + self, use_events: bool, content_recording_enabled: bool, **kwargs + ): + """Implementation for agent with structured output but NO instructions test. + + :param use_events: If True, use events for messages. If False, use attributes. + :type use_events: bool + :param content_recording_enabled: Whether content recording is enabled. + :type content_recording_enabled: bool + """ self.cleanup() - os.environ.update({CONTENT_TRACING_ENV_VARIABLE: "True"}) + _set_use_message_events(use_events) + os.environ.update({CONTENT_TRACING_ENV_VARIABLE: "True" if content_recording_enabled else "False"}) self.setup_telemetry() - assert True == AIProjectInstrumentor().is_content_recording_enabled() + assert content_recording_enabled == AIProjectInstrumentor().is_content_recording_enabled() assert True == AIProjectInstrumentor().is_instrumented() from azure.ai.projects.models import TextResponseFormatJsonSchema + import json - with self.create_client(operation_group="tracing", **kwargs) as project_client: + operation_group = "tracing" if content_recording_enabled else "agents" + with self.create_client(operation_group=operation_group, **kwargs) as project_client: model = kwargs.get("azure_ai_model_deployment_name") @@ -693,7 +737,6 @@ def test_agent_with_structured_output_without_instructions_content_recording_ena agent_definition = PromptAgentDefinition( model=model, - # No instructions provided text=PromptAgentDefinitionText( format=TextResponseFormatJsonSchema( name="Result", @@ -726,99 +769,105 @@ def test_agent_with_structured_output_without_instructions_content_recording_ena (GEN_AI_AGENT_VERSION, str(version)), (GEN_AI_AGENT_TYPE, AGENT_TYPE_PROMPT), ] + + # When using attributes, add system message attribute (with or without content based on content_recording_enabled) + if not use_events: + from azure.ai.projects.telemetry._utils import GEN_AI_SYSTEM_MESSAGE + + if content_recording_enabled: + expected_system_msg = json.dumps( + [{"role": "system", "parts": [{"type": "response_schema", "content": json.dumps(test_schema)}]}], + ensure_ascii=False, + ) + else: + # When content recording disabled, attribute has empty structure + expected_system_msg = json.dumps([{"role": "system"}], ensure_ascii=False) + expected_attributes.append((GEN_AI_SYSTEM_MESSAGE, expected_system_msg)) + attributes_match = GenAiTraceVerifier().check_span_attributes(span, expected_attributes) assert attributes_match == True - # Event should be created with just the schema (no instructions) - events = span.events - assert len(events) == 1 - instruction_event = events[0] - assert instruction_event.name == GEN_AI_SYSTEM_INSTRUCTION_EVENT - - import json + if use_events: + events = span.events + assert len(events) == 1 + instruction_event = events[0] + assert instruction_event.name == GEN_AI_SYSTEM_INSTRUCTION_EVENT + + event_content = json.loads(instruction_event.attributes[GEN_AI_EVENT_CONTENT]) + assert isinstance(event_content, list) + + if content_recording_enabled: + assert len(event_content) == 1 # Only schema, no instructions + assert event_content[0]["type"] == "response_schema" + schema_str = event_content[0]["content"] + schema_obj = json.loads(schema_str) + assert schema_obj["type"] == "object" + assert "result" in schema_obj["properties"] + else: + assert len(event_content) == 0 # Empty when content recording disabled + else: + # When using attributes, verify attribute + from azure.ai.projects.telemetry._utils import GEN_AI_SYSTEM_MESSAGE + + assert span.attributes is not None + assert GEN_AI_SYSTEM_MESSAGE in span.attributes + + system_message_json = span.attributes[GEN_AI_SYSTEM_MESSAGE] + system_message = json.loads(system_message_json) + + assert isinstance(system_message, list) + assert len(system_message) == 1 + assert system_message[0]["role"] == "system" + + if content_recording_enabled: + assert "parts" in system_message[0] + assert len(system_message[0]["parts"]) == 1 + + # Check schema part + assert system_message[0]["parts"][0]["type"] == "response_schema" + schema_obj = json.loads(system_message[0]["parts"][0]["content"]) + assert schema_obj["type"] == "object" + assert "result" in schema_obj["properties"] + else: + # When content recording disabled, no parts field + assert "parts" not in system_message[0] - event_content = json.loads(instruction_event.attributes[GEN_AI_EVENT_CONTENT]) - assert isinstance(event_content, list) - assert len(event_content) == 1 # Only schema, no instructions + @pytest.mark.usefixtures("instrument_with_content") + @servicePreparer() + @recorded_by_proxy + def test_agent_with_structured_output_without_instructions_content_recording_enabled(self, **kwargs): + """Test agent creation with structured output but NO instructions, content recording enabled using events.""" + self._test_agent_with_structured_output_without_instructions_impl( + use_events=True, content_recording_enabled=True, **kwargs + ) - # Check schema content - assert event_content[0]["type"] == "response_schema" - schema_str = event_content[0]["content"] - schema_obj = json.loads(schema_str) - assert schema_obj["type"] == "object" - assert "result" in schema_obj["properties"] + @pytest.mark.usefixtures("instrument_with_content") + @servicePreparer() + @recorded_by_proxy + def test_agent_with_structured_output_without_instructions_content_recording_enabled_with_attributes( + self, **kwargs + ): + """Test agent creation with structured output but NO instructions, content recording enabled using attributes.""" + self._test_agent_with_structured_output_without_instructions_impl( + use_events=False, content_recording_enabled=True, **kwargs + ) @pytest.mark.usefixtures("instrument_without_content") @servicePreparer() @recorded_by_proxy def test_agent_with_structured_output_without_instructions_content_recording_disabled(self, **kwargs): - """Test agent creation with structured output but NO instructions, content recording disabled.""" - self.cleanup() - os.environ.update({CONTENT_TRACING_ENV_VARIABLE: "False"}) - self.setup_telemetry() - assert False == AIProjectInstrumentor().is_content_recording_enabled() - assert True == AIProjectInstrumentor().is_instrumented() - - from azure.ai.projects.models import TextResponseFormatJsonSchema - - with self.create_client(operation_group="agents", **kwargs) as project_client: - - model = kwargs.get("azure_ai_model_deployment_name") - - test_schema = { - "type": "object", - "properties": { - "result": {"type": "string"}, - }, - "required": ["result"], - } - - agent_definition = PromptAgentDefinition( - model=model, - # No instructions provided - text=PromptAgentDefinitionText( - format=TextResponseFormatJsonSchema( - name="Result", - schema=test_schema, - ) - ), - ) - - agent = project_client.agents.create_version( - agent_name="no-instructions-agent", definition=agent_definition - ) - version = agent.version - - project_client.agents.delete_version(agent_name=agent.name, agent_version=agent.version) - - # Validate span - self.exporter.force_flush() - spans = self.exporter.get_spans_by_name("create_agent no-instructions-agent") - assert len(spans) == 1 - span = spans[0] + """Test agent creation with structured output but NO instructions, content recording disabled using events.""" + self._test_agent_with_structured_output_without_instructions_impl( + use_events=True, content_recording_enabled=False, **kwargs + ) - expected_attributes = [ - (GEN_AI_PROVIDER_NAME, AZURE_AI_AGENTS_PROVIDER), - (GEN_AI_OPERATION_NAME, "create_agent"), - (SERVER_ADDRESS, ""), - (GEN_AI_REQUEST_MODEL, model), - ("gen_ai.request.response_format", "json_schema"), - (GEN_AI_AGENT_NAME, "no-instructions-agent"), - (GEN_AI_AGENT_ID, "no-instructions-agent:" + str(version)), - (GEN_AI_AGENT_VERSION, str(version)), - (GEN_AI_AGENT_TYPE, AGENT_TYPE_PROMPT), - ] - attributes_match = GenAiTraceVerifier().check_span_attributes(span, expected_attributes) - assert attributes_match == True - - # Event should be created with empty content due to content recording disabled - events = span.events - assert len(events) == 1 - instruction_event = events[0] - assert instruction_event.name == GEN_AI_SYSTEM_INSTRUCTION_EVENT - - import json - - event_content = json.loads(instruction_event.attributes[GEN_AI_EVENT_CONTENT]) - assert isinstance(event_content, list) - assert len(event_content) == 0 # Empty because content recording is disabled + @pytest.mark.usefixtures("instrument_without_content") + @servicePreparer() + @recorded_by_proxy + def test_agent_with_structured_output_without_instructions_content_recording_disabled_with_attributes( + self, **kwargs + ): + """Test agent creation with structured output but NO instructions, content recording disabled using attributes.""" + self._test_agent_with_structured_output_without_instructions_impl( + use_events=False, content_recording_enabled=False, **kwargs + ) diff --git a/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_ai_agents_instrumentor_async.py b/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_ai_agents_instrumentor_async.py index 39c878fd889c..82ac4fa58f18 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_ai_agents_instrumentor_async.py +++ b/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_ai_agents_instrumentor_async.py @@ -51,6 +51,7 @@ AZURE_AI_AGENTS_PROVIDER, AGENT_TYPE_PROMPT, AGENT_TYPE_WORKFLOW, + _set_use_message_events, ) settings.tracing_implementation = "OpenTelemetry" @@ -60,11 +61,14 @@ class TestAiAgentsInstrumentor(TestAiAgentsInstrumentorBase): """Tests for AI agents instrumentor.""" - @pytest.mark.usefixtures("instrument_with_content") - @servicePreparer() - @recorded_by_proxy_async - async def test_create_agent_with_tracing_content_recording_enabled(self, **kwargs): + async def _test_create_agent_with_tracing_content_recording_enabled_impl(self, use_events: bool, **kwargs): + """Implementation for agent creation with content recording enabled test (async). + + :param use_events: If True, use events for messages. If False, use attributes. + :type use_events: bool + """ self.cleanup() + _set_use_message_events(use_events) os.environ.update({CONTENT_TRACING_ENV_VARIABLE: "True"}) self.setup_telemetry() assert True == AIProjectInstrumentor().is_content_recording_enabled() @@ -75,43 +79,8 @@ async def test_create_agent_with_tracing_content_recording_enabled(self, **kwarg async with project_client: agent_definition = PromptAgentDefinition( - # Required parameter model=model, - # Optional parameters instructions="You are a helpful AI assistant. Always be polite and provide accurate information.", - # temperature=0.7, - # top_p=0.9, - # # Reasoning configuration - # reasoning=Reasoning( - # effort="medium", - # summary="auto", - # ), - # # Tools that the model can use - # tools=[ - # # Function tool for custom functions - # FunctionTool( - # name="get_weather", - # description="Get the current weather for a location", - # parameters={ - # "type": "object", - # "properties": { - # "location": { - # "type": "string", - # "description": "The city and state, e.g. San Francisco, CA", - # }, - # "unit": { - # "type": "string", - # "enum": ["celsius", "fahrenheit"], - # "description": "The temperature unit", - # }, - # }, - # "required": ["location"], - # }, - # strict=True, # Enforce strict parameter validation - # ), - # ], - # # Text response configuration - # text=PromptAgentDefinitionText(format=ResponseTextFormatConfigurationText()), ) agent = await project_client.agents.create_version(agent_name="myagent", definition=agent_definition) @@ -128,36 +97,93 @@ async def test_create_agent_with_tracing_content_recording_enabled(self, **kwarg (GEN_AI_OPERATION_NAME, "create_agent"), (SERVER_ADDRESS, ""), (GEN_AI_REQUEST_MODEL, model), - # ("gen_ai.request.temperature", "0.7"), - # ("gen_ai.request.top_p", "0.9"), - # ("gen_ai.request.response_format", "text"), - # ("gen_ai.request.reasoning.effort", "medium"), - # ("gen_ai.request.reasoning.summary", "auto"), (GEN_AI_AGENT_NAME, "myagent"), (GEN_AI_AGENT_ID, "myagent:" + str(version)), (GEN_AI_AGENT_VERSION, str(version)), (GEN_AI_AGENT_TYPE, AGENT_TYPE_PROMPT), ] + + # When using attributes, add the system instructions attribute to expected list + if not use_events: + from azure.ai.projects.telemetry._utils import GEN_AI_SYSTEM_MESSAGE + import json + + expected_system_message = json.dumps( + [ + { + "role": "system", + "parts": [ + { + "type": "text", + "content": "You are a helpful AI assistant. Always be polite and provide accurate information.", + } + ], + } + ], + ensure_ascii=False, + ) + expected_attributes.append((GEN_AI_SYSTEM_MESSAGE, expected_system_message)) + attributes_match = GenAiTraceVerifier().check_span_attributes(span, expected_attributes) assert attributes_match == True - expected_events = [ - { - "name": GEN_AI_SYSTEM_INSTRUCTION_EVENT, - "attributes": { - GEN_AI_PROVIDER_NAME: AZURE_AI_AGENTS_PROVIDER, - GEN_AI_EVENT_CONTENT: '[{"type": "text", "content": "You are a helpful AI assistant. Always be polite and provide accurate information."}]', - }, - } - ] - events_match = GenAiTraceVerifier().check_span_events(span, expected_events) - assert events_match == True + if use_events: + expected_events = [ + { + "name": GEN_AI_SYSTEM_INSTRUCTION_EVENT, + "attributes": { + GEN_AI_PROVIDER_NAME: AZURE_AI_AGENTS_PROVIDER, + GEN_AI_EVENT_CONTENT: '[{"type": "text", "content": "You are a helpful AI assistant. Always be polite and provide accurate information."}]', + }, + } + ] + events_match = GenAiTraceVerifier().check_span_events(span, expected_events) + assert events_match == True + else: + # When using attributes, check for gen_ai.system.instructions attribute + from azure.ai.projects.telemetry._utils import GEN_AI_SYSTEM_MESSAGE + import json + + assert span.attributes is not None + assert GEN_AI_SYSTEM_MESSAGE in span.attributes + + system_message_json = span.attributes[GEN_AI_SYSTEM_MESSAGE] + system_message = json.loads(system_message_json) + + # Verify structure + assert isinstance(system_message, list) + assert len(system_message) == 1 + assert system_message[0]["role"] == "system" + assert "parts" in system_message[0] + assert len(system_message[0]["parts"]) == 1 + assert system_message[0]["parts"][0]["type"] == "text" + assert ( + system_message[0]["parts"][0]["content"] + == "You are a helpful AI assistant. Always be polite and provide accurate information." + ) - @pytest.mark.usefixtures("instrument_without_content") + @pytest.mark.usefixtures("instrument_with_content") @servicePreparer() @recorded_by_proxy_async - async def test_agent_creation_with_tracing_content_recording_disabled(self, **kwargs): + async def test_create_agent_with_tracing_content_recording_enabled(self, **kwargs): + """Test agent creation with content recording enabled using events (async).""" + await self._test_create_agent_with_tracing_content_recording_enabled_impl(use_events=True, **kwargs) + + @pytest.mark.usefixtures("instrument_with_content") + @servicePreparer() + @recorded_by_proxy_async + async def test_create_agent_with_tracing_content_recording_enabled_with_attributes(self, **kwargs): + """Test agent creation with content recording enabled using attributes (async).""" + await self._test_create_agent_with_tracing_content_recording_enabled_impl(use_events=False, **kwargs) + + async def _test_agent_creation_with_tracing_content_recording_disabled_impl(self, use_events: bool, **kwargs): + """Implementation for agent creation with content recording disabled test (async). + + :param use_events: If True, use events for messages. If False, use attributes. + :type use_events: bool + """ self.cleanup() + _set_use_message_events(use_events) os.environ.update({CONTENT_TRACING_ENV_VARIABLE: "False"}) self.setup_telemetry() assert False == AIProjectInstrumentor().is_content_recording_enabled() @@ -168,43 +194,8 @@ async def test_agent_creation_with_tracing_content_recording_disabled(self, **kw async with project_client: agent_definition = PromptAgentDefinition( - # Required parameter model=model, - # Optional parameters instructions="You are a helpful AI assistant. Always be polite and provide accurate information.", - # temperature=0.7, - # top_p=0.9, - # # Reasoning configuration - # reasoning=Reasoning( - # effort="medium", - # summary="auto", - # ), - # # Tools that the model can use - # tools=[ - # # Function tool for custom functions - # FunctionTool( - # name="get_weather", - # description="Get the current weather for a location", - # parameters={ - # "type": "object", - # "properties": { - # "location": { - # "type": "string", - # "description": "The city and state, e.g. San Francisco, CA", - # }, - # "unit": { - # "type": "string", - # "enum": ["celsius", "fahrenheit"], - # "description": "The temperature unit", - # }, - # }, - # "required": ["location"], - # }, - # strict=True, # Enforce strict parameter validation - # ), - # ], - # # Text response configuration - # text=PromptAgentDefinitionText(format=ResponseTextFormatConfigurationText()), ) agent = await project_client.agents.create_version(agent_name="myagent", definition=agent_definition) @@ -221,115 +212,85 @@ async def test_agent_creation_with_tracing_content_recording_disabled(self, **kw (GEN_AI_OPERATION_NAME, "create_agent"), (SERVER_ADDRESS, ""), (GEN_AI_REQUEST_MODEL, model), - # ("gen_ai.request.temperature", "0.7"), - # ("gen_ai.request.top_p", "0.9"), - # ("gen_ai.request.response_format", "text"), - # ("gen_ai.request.reasoning.effort", "medium"), - # ("gen_ai.request.reasoning.summary", "auto"), (GEN_AI_AGENT_NAME, "myagent"), (GEN_AI_AGENT_ID, "myagent:" + str(version)), (GEN_AI_AGENT_VERSION, str(version)), (GEN_AI_AGENT_TYPE, AGENT_TYPE_PROMPT), ] - attributes_match = GenAiTraceVerifier().check_span_attributes(span, expected_attributes) - assert attributes_match == True - expected_events = [ - { - "name": GEN_AI_SYSTEM_INSTRUCTION_EVENT, - "attributes": { - GEN_AI_PROVIDER_NAME: AZURE_AI_AGENTS_PROVIDER, - GEN_AI_EVENT_CONTENT: "[]", - }, - } - ] - events_match = GenAiTraceVerifier().check_span_events(span, expected_events) - assert events_match == True + # When using attributes, add empty system message attribute + if not use_events: + from azure.ai.projects.telemetry._utils import GEN_AI_SYSTEM_MESSAGE + import json - @pytest.mark.usefixtures("instrument_with_content") - @servicePreparer() - @recorded_by_proxy_async - async def test_workflow_agent_creation_with_tracing_content_recording_enabled(self, **kwargs): - """Test workflow agent creation with content recording enabled (async).""" - self.cleanup() - os.environ.update({CONTENT_TRACING_ENV_VARIABLE: "True"}) - self.setup_telemetry() - assert True == AIProjectInstrumentor().is_content_recording_enabled() - assert True == AIProjectInstrumentor().is_instrumented() + expected_system_message = json.dumps([{"role": "system"}], ensure_ascii=False) + expected_attributes.append((GEN_AI_SYSTEM_MESSAGE, expected_system_message)) - from azure.ai.projects.models import WorkflowAgentDefinition - - project_client = self.create_async_client(operation_group="tracing", **kwargs) - - async with project_client: - workflow_yaml = """ -kind: workflow -trigger: - kind: OnConversationStart - id: test_workflow - actions: - - kind: SetVariable - id: set_variable - variable: Local.TestVar - value: "test" -""" - - agent = await project_client.agents.create_version( - agent_name="test-workflow-agent-async", - definition=WorkflowAgentDefinition(workflow=workflow_yaml), - ) - version = agent.version - - # delete agent - await project_client.agents.delete_version(agent_name=agent.name, agent_version=agent.version) - - # ------------------------- Validate "create_agent" span --------------------------------- - self.exporter.force_flush() - spans = self.exporter.get_spans_by_name("create_agent test-workflow-agent-async") - assert len(spans) == 1 - span = spans[0] - expected_attributes = [ - (GEN_AI_PROVIDER_NAME, AZURE_AI_AGENTS_PROVIDER), - (GEN_AI_OPERATION_NAME, "create_agent"), - (SERVER_ADDRESS, ""), - (GEN_AI_AGENT_NAME, "test-workflow-agent-async"), - (GEN_AI_AGENT_ID, "test-workflow-agent-async:" + str(version)), - (GEN_AI_AGENT_VERSION, str(version)), - (GEN_AI_AGENT_TYPE, AGENT_TYPE_WORKFLOW), - ] attributes_match = GenAiTraceVerifier().check_span_attributes(span, expected_attributes) assert attributes_match == True - # Verify workflow event with standard content format - events = span.events - assert len(events) == 1 - workflow_event = events[0] - assert workflow_event.name == GEN_AI_AGENT_WORKFLOW_EVENT + if use_events: + expected_events = [ + { + "name": GEN_AI_SYSTEM_INSTRUCTION_EVENT, + "attributes": { + GEN_AI_PROVIDER_NAME: AZURE_AI_AGENTS_PROVIDER, + GEN_AI_EVENT_CONTENT: "[]", + }, + } + ] + events_match = GenAiTraceVerifier().check_span_events(span, expected_events) + assert events_match == True + else: + # When using attributes and content recording disabled, verify empty structure + from azure.ai.projects.telemetry._utils import GEN_AI_SYSTEM_MESSAGE + import json + + assert span.attributes is not None + assert GEN_AI_SYSTEM_MESSAGE in span.attributes + + system_message_json = span.attributes[GEN_AI_SYSTEM_MESSAGE] + system_message = json.loads(system_message_json) + # Should have empty content (just role, no parts) + assert isinstance(system_message, list) + assert len(system_message) == 1 + assert system_message[0]["role"] == "system" + # No parts field when content recording is disabled + assert "parts" not in system_message[0] - import json - - event_content = json.loads(workflow_event.attributes[GEN_AI_EVENT_CONTENT]) - # New optimized format: direct array with "content" field for workflow YAML - assert isinstance(event_content, list) - assert len(event_content) == 1 - assert event_content[0]["type"] == "workflow" - assert "content" in event_content[0] - assert "kind: workflow" in event_content[0]["content"] + @pytest.mark.usefixtures("instrument_without_content") + @servicePreparer() + @recorded_by_proxy_async + async def test_agent_creation_with_tracing_content_recording_disabled(self, **kwargs): + """Test agent creation with content recording disabled using events (async).""" + await self._test_agent_creation_with_tracing_content_recording_disabled_impl(use_events=True, **kwargs) @pytest.mark.usefixtures("instrument_without_content") @servicePreparer() @recorded_by_proxy_async - async def test_workflow_agent_creation_with_tracing_content_recording_disabled(self, **kwargs): - """Test workflow agent creation with content recording disabled (async).""" + async def test_agent_creation_with_tracing_content_recording_disabled_with_attributes(self, **kwargs): + """Test agent creation with content recording disabled using attributes (async).""" + await self._test_agent_creation_with_tracing_content_recording_disabled_impl(use_events=False, **kwargs) + + async def _test_workflow_agent_creation_impl(self, use_events: bool, content_recording_enabled: bool, **kwargs): + """Implementation for workflow agent creation test (async). + + :param use_events: If True, use events for messages. If False, use attributes. + :type use_events: bool + :param content_recording_enabled: Whether content recording is enabled. + :type content_recording_enabled: bool + """ self.cleanup() - os.environ.update({CONTENT_TRACING_ENV_VARIABLE: "False"}) + _set_use_message_events(use_events) + os.environ.update({CONTENT_TRACING_ENV_VARIABLE: "True" if content_recording_enabled else "False"}) self.setup_telemetry() - assert False == AIProjectInstrumentor().is_content_recording_enabled() + assert content_recording_enabled == AIProjectInstrumentor().is_content_recording_enabled() assert True == AIProjectInstrumentor().is_instrumented() from azure.ai.projects.models import WorkflowAgentDefinition - project_client = self.create_async_client(operation_group="agents", **kwargs) + operation_group = "tracing" if content_recording_enabled else "agents" + project_client = self.create_async_client(operation_group=operation_group, **kwargs) async with project_client: workflow_yaml = """ @@ -350,7 +311,6 @@ async def test_workflow_agent_creation_with_tracing_content_recording_disabled(s ) version = agent.version - # delete agent await project_client.agents.delete_version(agent_name=agent.name, agent_version=agent.version) # ------------------------- Validate "create_agent" span --------------------------------- @@ -370,123 +330,84 @@ async def test_workflow_agent_creation_with_tracing_content_recording_disabled(s attributes_match = GenAiTraceVerifier().check_span_attributes(span, expected_attributes) assert attributes_match == True - # Verify workflow event is present but content is empty when content recording is disabled - events = span.events - assert len(events) == 1 - workflow_event = events[0] - assert workflow_event.name == GEN_AI_AGENT_WORKFLOW_EVENT - - import json - - event_content = json.loads(workflow_event.attributes[GEN_AI_EVENT_CONTENT]) - # When content recording is disabled, event should be an empty array - assert isinstance(event_content, list) - assert len(event_content) == 0 + if use_events: + # Verify workflow event + events = span.events + assert len(events) == 1 + workflow_event = events[0] + assert workflow_event.name == GEN_AI_AGENT_WORKFLOW_EVENT + + import json + + event_content = json.loads(workflow_event.attributes[GEN_AI_EVENT_CONTENT]) + assert isinstance(event_content, list) + + if content_recording_enabled: + assert len(event_content) == 1 + assert event_content[0]["type"] == "workflow" + assert "content" in event_content[0] + assert "kind: workflow" in event_content[0]["content"] + else: + # When content recording is disabled, event should be empty + assert len(event_content) == 0 + else: + # When using attributes, workflow events are still sent as events (not attributes) + events = span.events + assert len(events) == 1 + workflow_event = events[0] + assert workflow_event.name == GEN_AI_AGENT_WORKFLOW_EVENT @pytest.mark.usefixtures("instrument_with_content") @servicePreparer() @recorded_by_proxy_async - async def test_agent_with_structured_output_with_instructions_content_recording_enabled(self, **kwargs): - """Test agent creation with structured output and instructions, content recording enabled (async).""" - self.cleanup() - os.environ.update({CONTENT_TRACING_ENV_VARIABLE: "True"}) - self.setup_telemetry() - assert True == AIProjectInstrumentor().is_content_recording_enabled() - assert True == AIProjectInstrumentor().is_instrumented() - - from azure.ai.projects.models import TextResponseFormatJsonSchema - - project_client = self.create_async_client(operation_group="tracing", **kwargs) - - async with project_client: - model = kwargs.get("azure_ai_model_deployment_name") - - # Define a JSON schema for structured output - test_schema = { - "type": "object", - "properties": { - "name": {"type": "string"}, - "age": {"type": "number"}, - }, - "required": ["name", "age"], - } - - agent_definition = PromptAgentDefinition( - model=model, - instructions="You are a helpful assistant that extracts person information.", - text=PromptAgentDefinitionText( - format=TextResponseFormatJsonSchema( - name="PersonInfo", - schema=test_schema, - ) - ), - ) - - agent = await project_client.agents.create_version( - agent_name="structured-agent-async", definition=agent_definition - ) - version = agent.version - - await project_client.agents.delete_version(agent_name=agent.name, agent_version=agent.version) - - # Validate span - self.exporter.force_flush() - spans = self.exporter.get_spans_by_name("create_agent structured-agent-async") - assert len(spans) == 1 - span = spans[0] - - expected_attributes = [ - (GEN_AI_PROVIDER_NAME, AZURE_AI_AGENTS_PROVIDER), - (GEN_AI_OPERATION_NAME, "create_agent"), - (SERVER_ADDRESS, ""), - (GEN_AI_REQUEST_MODEL, model), - ("gen_ai.request.response_format", "json_schema"), - (GEN_AI_AGENT_NAME, "structured-agent-async"), - (GEN_AI_AGENT_ID, "structured-agent-async:" + str(version)), - (GEN_AI_AGENT_VERSION, str(version)), - (GEN_AI_AGENT_TYPE, AGENT_TYPE_PROMPT), - ] - attributes_match = GenAiTraceVerifier().check_span_attributes(span, expected_attributes) - assert attributes_match == True - - # Verify event contains both instructions and schema - events = span.events - assert len(events) == 1 - instruction_event = events[0] - assert instruction_event.name == GEN_AI_SYSTEM_INSTRUCTION_EVENT - - import json - - event_content = json.loads(instruction_event.attributes[GEN_AI_EVENT_CONTENT]) - assert isinstance(event_content, list) - assert len(event_content) == 2 # Both instructions and schema + async def test_workflow_agent_creation_with_tracing_content_recording_enabled(self, **kwargs): + """Test workflow agent creation with content recording enabled using events (async).""" + await self._test_workflow_agent_creation_impl(use_events=True, content_recording_enabled=True, **kwargs) - # Check instructions content - assert event_content[0]["type"] == "text" - assert "helpful assistant" in event_content[0]["content"] + @pytest.mark.usefixtures("instrument_with_content") + @servicePreparer() + @recorded_by_proxy_async + async def test_workflow_agent_creation_with_tracing_content_recording_enabled_with_attributes(self, **kwargs): + """Test workflow agent creation with content recording enabled using attributes (async).""" + await self._test_workflow_agent_creation_impl(use_events=False, content_recording_enabled=True, **kwargs) - # Check schema content - assert event_content[1]["type"] == "response_schema" - schema_str = event_content[1]["content"] - schema_obj = json.loads(schema_str) - assert schema_obj["type"] == "object" - assert "name" in schema_obj["properties"] - assert "age" in schema_obj["properties"] + @pytest.mark.usefixtures("instrument_without_content") + @servicePreparer() + @recorded_by_proxy_async + async def test_workflow_agent_creation_with_tracing_content_recording_disabled(self, **kwargs): + """Test workflow agent creation with content recording disabled using events (async).""" + await self._test_workflow_agent_creation_impl(use_events=True, content_recording_enabled=False, **kwargs) @pytest.mark.usefixtures("instrument_without_content") @servicePreparer() @recorded_by_proxy_async - async def test_agent_with_structured_output_with_instructions_content_recording_disabled(self, **kwargs): - """Test agent creation with structured output and instructions, content recording disabled (async).""" + async def test_workflow_agent_creation_with_tracing_content_recording_disabled_with_attributes(self, **kwargs): + """Test workflow agent creation with content recording disabled using attributes (async).""" + await self._test_workflow_agent_creation_impl(use_events=False, content_recording_enabled=False, **kwargs) + + async def _test_agent_with_structured_output_with_instructions_impl( + self, use_events: bool, content_recording_enabled: bool, **kwargs + ): + """Implementation for structured output with instructions test (async). + + :param use_events: If True, use events for messages. If False, use attributes. + :type use_events: bool + :param content_recording_enabled: Whether content recording is enabled. + :type content_recording_enabled: bool + """ + import json + self.cleanup() - os.environ.update({CONTENT_TRACING_ENV_VARIABLE: "False"}) + _set_use_message_events(use_events) + os.environ.update({CONTENT_TRACING_ENV_VARIABLE: "True" if content_recording_enabled else "False"}) self.setup_telemetry() - assert False == AIProjectInstrumentor().is_content_recording_enabled() + assert content_recording_enabled == AIProjectInstrumentor().is_content_recording_enabled() assert True == AIProjectInstrumentor().is_instrumented() from azure.ai.projects.models import TextResponseFormatJsonSchema - project_client = self.create_async_client(operation_group="agents", **kwargs) + operation_group = "tracing" if content_recording_enabled else "agents" + project_client = self.create_async_client(operation_group=operation_group, **kwargs) async with project_client: model = kwargs.get("azure_ai_model_deployment_name") @@ -535,35 +456,144 @@ async def test_agent_with_structured_output_with_instructions_content_recording_ (GEN_AI_AGENT_VERSION, str(version)), (GEN_AI_AGENT_TYPE, AGENT_TYPE_PROMPT), ] + + # Add attribute expectation when using attributes mode + if not use_events: + from azure.ai.projects.telemetry._utils import GEN_AI_SYSTEM_MESSAGE + + if content_recording_enabled: + expected_system_message = json.dumps( + [ + { + "role": "system", + "parts": [ + { + "type": "text", + "content": "You are a helpful assistant that extracts person information.", + }, + {"type": "response_schema", "content": json.dumps(test_schema)}, + ], + } + ] + ) + else: + expected_system_message = json.dumps([{"role": "system"}]) + expected_attributes.append((GEN_AI_SYSTEM_MESSAGE, expected_system_message)) + attributes_match = GenAiTraceVerifier().check_span_attributes(span, expected_attributes) assert attributes_match == True - # When content recording is disabled, event should be empty - events = span.events - assert len(events) == 1 - instruction_event = events[0] - assert instruction_event.name == GEN_AI_SYSTEM_INSTRUCTION_EVENT + if use_events: + # Verify event + events = span.events + assert len(events) == 1 + instruction_event = events[0] + assert instruction_event.name == GEN_AI_SYSTEM_INSTRUCTION_EVENT + + import json + + event_content = json.loads(instruction_event.attributes[GEN_AI_EVENT_CONTENT]) + assert isinstance(event_content, list) + + if content_recording_enabled: + assert len(event_content) == 2 # Both instructions and schema + assert event_content[0]["type"] == "text" + assert "helpful assistant" in event_content[0]["content"] + assert event_content[1]["type"] == "response_schema" + schema_obj = json.loads(event_content[1]["content"]) + assert schema_obj["type"] == "object" + assert "name" in schema_obj["properties"] + else: + assert len(event_content) == 0 + else: + # Validate attribute + attribute_value = None + for attr_key, attr_val in span.attributes.items(): + if attr_key == GEN_AI_SYSTEM_MESSAGE: + attribute_value = attr_val + break + assert attribute_value is not None + + system_message = json.loads(attribute_value) + assert isinstance(system_message, list) + assert len(system_message) == 1 + assert system_message[0]["role"] == "system" + + if content_recording_enabled: + assert "parts" in system_message[0] + parts = system_message[0]["parts"] + assert len(parts) == 2 + assert parts[0]["type"] == "text" + assert "helpful assistant" in parts[0]["content"] + assert parts[1]["type"] == "response_schema" + schema_obj = json.loads(parts[1]["content"]) + assert schema_obj["type"] == "object" + else: + assert "parts" not in system_message[0] - import json - - event_content = json.loads(instruction_event.attributes[GEN_AI_EVENT_CONTENT]) - assert isinstance(event_content, list) - assert len(event_content) == 0 # Empty when content recording disabled + @pytest.mark.usefixtures("instrument_with_content") + @servicePreparer() + @recorded_by_proxy_async + async def test_agent_with_structured_output_with_instructions_content_recording_enabled(self, **kwargs): + """Test agent creation with structured output and instructions, content recording enabled using events (async).""" + await self._test_agent_with_structured_output_with_instructions_impl( + use_events=True, content_recording_enabled=True, **kwargs + ) @pytest.mark.usefixtures("instrument_with_content") @servicePreparer() @recorded_by_proxy_async - async def test_agent_with_structured_output_without_instructions_content_recording_enabled(self, **kwargs): - """Test agent creation with structured output but NO instructions, content recording enabled (async).""" + async def test_agent_with_structured_output_with_instructions_content_recording_enabled_with_attributes( + self, **kwargs + ): + """Test agent creation with structured output and instructions, content recording enabled using attributes (async).""" + await self._test_agent_with_structured_output_with_instructions_impl( + use_events=False, content_recording_enabled=True, **kwargs + ) + + @pytest.mark.usefixtures("instrument_without_content") + @servicePreparer() + @recorded_by_proxy_async + async def test_agent_with_structured_output_with_instructions_content_recording_disabled(self, **kwargs): + """Test agent creation with structured output and instructions, content recording disabled using events (async).""" + await self._test_agent_with_structured_output_with_instructions_impl( + use_events=True, content_recording_enabled=False, **kwargs + ) + + @pytest.mark.usefixtures("instrument_without_content") + @servicePreparer() + @recorded_by_proxy_async + async def test_agent_with_structured_output_with_instructions_content_recording_disabled_with_attributes( + self, **kwargs + ): + """Test agent creation with structured output and instructions, content recording disabled using attributes (async).""" + await self._test_agent_with_structured_output_with_instructions_impl( + use_events=False, content_recording_enabled=False, **kwargs + ) + + async def _test_agent_with_structured_output_without_instructions_impl( + self, use_events: bool, content_recording_enabled: bool, **kwargs + ): + """Implementation for structured output without instructions test (async). + + :param use_events: If True, use events for messages. If False, use attributes. + :type use_events: bool + :param content_recording_enabled: Whether content recording is enabled. + :type content_recording_enabled: bool + """ + import json + self.cleanup() - os.environ.update({CONTENT_TRACING_ENV_VARIABLE: "True"}) + _set_use_message_events(use_events) + os.environ.update({CONTENT_TRACING_ENV_VARIABLE: "True" if content_recording_enabled else "False"}) self.setup_telemetry() - assert True == AIProjectInstrumentor().is_content_recording_enabled() + assert content_recording_enabled == AIProjectInstrumentor().is_content_recording_enabled() assert True == AIProjectInstrumentor().is_instrumented() from azure.ai.projects.models import TextResponseFormatJsonSchema - project_client = self.create_async_client(operation_group="tracing", **kwargs) + operation_group = "tracing" if content_recording_enabled else "agents" + project_client = self.create_async_client(operation_group=operation_group, **kwargs) async with project_client: model = kwargs.get("azure_ai_model_deployment_name") @@ -611,100 +641,103 @@ async def test_agent_with_structured_output_without_instructions_content_recordi (GEN_AI_AGENT_VERSION, str(version)), (GEN_AI_AGENT_TYPE, AGENT_TYPE_PROMPT), ] + + # Add attribute expectation when using attributes mode + if not use_events: + from azure.ai.projects.telemetry._utils import GEN_AI_SYSTEM_MESSAGE + + if content_recording_enabled: + expected_system_message = json.dumps( + [{"role": "system", "parts": [{"type": "response_schema", "content": json.dumps(test_schema)}]}] + ) + else: + expected_system_message = json.dumps([{"role": "system"}]) + expected_attributes.append((GEN_AI_SYSTEM_MESSAGE, expected_system_message)) + attributes_match = GenAiTraceVerifier().check_span_attributes(span, expected_attributes) assert attributes_match == True - # Event should be created with just the schema (no instructions) - events = span.events - assert len(events) == 1 - instruction_event = events[0] - assert instruction_event.name == GEN_AI_SYSTEM_INSTRUCTION_EVENT - - import json + if use_events: + # Verify event + events = span.events + assert len(events) == 1 + instruction_event = events[0] + assert instruction_event.name == GEN_AI_SYSTEM_INSTRUCTION_EVENT + + import json + + event_content = json.loads(instruction_event.attributes[GEN_AI_EVENT_CONTENT]) + assert isinstance(event_content, list) + + if content_recording_enabled: + assert len(event_content) == 1 # Only schema, no instructions + assert event_content[0]["type"] == "response_schema" + schema_obj = json.loads(event_content[0]["content"]) + assert schema_obj["type"] == "object" + assert "result" in schema_obj["properties"] + else: + assert len(event_content) == 0 + else: + # Validate attribute + attribute_value = None + for attr_key, attr_val in span.attributes.items(): + if attr_key == GEN_AI_SYSTEM_MESSAGE: + attribute_value = attr_val + break + assert attribute_value is not None + + system_message = json.loads(attribute_value) + assert isinstance(system_message, list) + assert len(system_message) == 1 + assert system_message[0]["role"] == "system" + + if content_recording_enabled: + assert "parts" in system_message[0] + parts = system_message[0]["parts"] + assert len(parts) == 1 # Only schema + assert parts[0]["type"] == "response_schema" + schema_obj = json.loads(parts[0]["content"]) + assert schema_obj["type"] == "object" + assert "result" in schema_obj["properties"] + else: + assert "parts" not in system_message[0] - event_content = json.loads(instruction_event.attributes[GEN_AI_EVENT_CONTENT]) - assert isinstance(event_content, list) - assert len(event_content) == 1 # Only schema, no instructions + @pytest.mark.usefixtures("instrument_with_content") + @servicePreparer() + @recorded_by_proxy_async + async def test_agent_with_structured_output_without_instructions_content_recording_enabled(self, **kwargs): + """Test agent creation with structured output but NO instructions, content recording enabled using events (async).""" + await self._test_agent_with_structured_output_without_instructions_impl( + use_events=True, content_recording_enabled=True, **kwargs + ) - # Check schema content - assert event_content[0]["type"] == "response_schema" - schema_str = event_content[0]["content"] - schema_obj = json.loads(schema_str) - assert schema_obj["type"] == "object" - assert "result" in schema_obj["properties"] + @pytest.mark.usefixtures("instrument_with_content") + @servicePreparer() + @recorded_by_proxy_async + async def test_agent_with_structured_output_without_instructions_content_recording_enabled_with_attributes( + self, **kwargs + ): + """Test agent creation with structured output but NO instructions, content recording enabled using attributes (async).""" + await self._test_agent_with_structured_output_without_instructions_impl( + use_events=False, content_recording_enabled=True, **kwargs + ) @pytest.mark.usefixtures("instrument_without_content") @servicePreparer() @recorded_by_proxy_async async def test_agent_with_structured_output_without_instructions_content_recording_disabled(self, **kwargs): - """Test agent creation with structured output but NO instructions, content recording disabled (async).""" - self.cleanup() - os.environ.update({CONTENT_TRACING_ENV_VARIABLE: "False"}) - self.setup_telemetry() - assert False == AIProjectInstrumentor().is_content_recording_enabled() - assert True == AIProjectInstrumentor().is_instrumented() - - from azure.ai.projects.models import TextResponseFormatJsonSchema - - project_client = self.create_async_client(operation_group="agents", **kwargs) - - async with project_client: - model = kwargs.get("azure_ai_model_deployment_name") - - test_schema = { - "type": "object", - "properties": { - "result": {"type": "string"}, - }, - "required": ["result"], - } - - agent_definition = PromptAgentDefinition( - model=model, - # No instructions provided - text=PromptAgentDefinitionText( - format=TextResponseFormatJsonSchema( - name="Result", - schema=test_schema, - ) - ), - ) + """Test agent creation with structured output but NO instructions, content recording disabled using events (async).""" + await self._test_agent_with_structured_output_without_instructions_impl( + use_events=True, content_recording_enabled=False, **kwargs + ) - agent = await project_client.agents.create_version( - agent_name="no-instructions-agent-async", definition=agent_definition - ) - version = agent.version - - await project_client.agents.delete_version(agent_name=agent.name, agent_version=agent.version) - - # Validate span - self.exporter.force_flush() - spans = self.exporter.get_spans_by_name("create_agent no-instructions-agent-async") - assert len(spans) == 1 - span = spans[0] - - expected_attributes = [ - (GEN_AI_PROVIDER_NAME, AZURE_AI_AGENTS_PROVIDER), - (GEN_AI_OPERATION_NAME, "create_agent"), - (SERVER_ADDRESS, ""), - (GEN_AI_REQUEST_MODEL, model), - ("gen_ai.request.response_format", "json_schema"), - (GEN_AI_AGENT_NAME, "no-instructions-agent-async"), - (GEN_AI_AGENT_ID, "no-instructions-agent-async:" + str(version)), - (GEN_AI_AGENT_VERSION, str(version)), - (GEN_AI_AGENT_TYPE, AGENT_TYPE_PROMPT), - ] - attributes_match = GenAiTraceVerifier().check_span_attributes(span, expected_attributes) - assert attributes_match == True - - # Event should be created with empty content due to content recording disabled - events = span.events - assert len(events) == 1 - instruction_event = events[0] - assert instruction_event.name == GEN_AI_SYSTEM_INSTRUCTION_EVENT - - import json - - event_content = json.loads(instruction_event.attributes[GEN_AI_EVENT_CONTENT]) - assert isinstance(event_content, list) - assert len(event_content) == 0 # Empty because content recording is disabled + @pytest.mark.usefixtures("instrument_without_content") + @servicePreparer() + @recorded_by_proxy_async + async def test_agent_with_structured_output_without_instructions_content_recording_disabled_with_attributes( + self, **kwargs + ): + """Test agent creation with structured output but NO instructions, content recording disabled using attributes (async).""" + await self._test_agent_with_structured_output_without_instructions_impl( + use_events=False, content_recording_enabled=False, **kwargs + ) From a37151bf024f54f6e98eaeeb450e3c215facca3e Mon Sep 17 00:00:00 2001 From: M-Hietala <78813398+M-Hietala@users.noreply.github.com> Date: Wed, 4 Feb 2026 12:48:25 -0600 Subject: [PATCH 05/10] adding support for opt-in trace context propagation --- sdk/ai/azure-ai-projects/CHANGELOG.md | 1 + sdk/ai/azure-ai-projects/README.md | 20 +- .../telemetry/_ai_project_instrumentor.py | 222 ++++++++++++++++-- 3 files changed, 216 insertions(+), 27 deletions(-) diff --git a/sdk/ai/azure-ai-projects/CHANGELOG.md b/sdk/ai/azure-ai-projects/CHANGELOG.md index 9f73891461c9..ec12a9ef6788 100644 --- a/sdk/ai/azure-ai-projects/CHANGELOG.md +++ b/sdk/ai/azure-ai-projects/CHANGELOG.md @@ -5,6 +5,7 @@ ### Features Added * Tracing: included agent ID in response generation traces when available. +* Tracing: Added support for opt-in trace context propagation. ### Breaking changes diff --git a/sdk/ai/azure-ai-projects/README.md b/sdk/ai/azure-ai-projects/README.md index f6c858708b72..3c76ea962be0 100644 --- a/sdk/ai/azure-ai-projects/README.md +++ b/sdk/ai/azure-ai-projects/README.md @@ -1017,11 +1017,29 @@ AIProjectInstrumentor().instrument() See the full sample in file `\agents\telemetry\sample_agent_basic_with_console_tracing.py` in the [Samples][samples] folder. +### Enabling trace context propagation + +Trace context propagation allows client-side spans generated by the Projects SDK to be correlated with server-side spans from Azure OpenAI and other Azure services. When enabled, the SDK automatically injects trace context headers into HTTP requests made by OpenAI clients obtained via `get_openai_client()`. + +This feature ensures that all operations within a distributed trace share the same trace ID, providing end-to-end visibility across your application and Azure services in your observability backend (such as Azure Monitor). + +To enable trace context propagation set the `AZURE_TRACING_GEN_AI_ENABLE_TRACE_CONTEXT_PROPAGATION` environment variable to `true` +If the environment variable is not set and no value is provided with the `AIProjectInstrumentor().instrument()` call for the propagation parameter, the trace context propagation defaults to `false` (opt-in). + +**Important Security and Privacy Considerations:** + +- **Trace IDs and Baggage**: When trace context propagation is enabled, trace IDs and OpenTelemetry baggage are sent to Azure OpenAI and other external services. Baggage may contain arbitrary key-value pairs added to the trace context by your application or other components. +- **Sensitive Information**: Ensure that your application does not add sensitive information (such as user identifiers, authentication tokens, or personally identifiable information) to OpenTelemetry baggage, as this data will be propagated to external services. +- **Request Correlation**: Trace IDs allow Azure services to correlate requests from the same session or user across multiple API calls, which may have privacy implications depending on your use case. +- **Opt-in by Design**: This feature is disabled by default to give you explicit control over when trace context is propagated to external services. + +Only enable trace context propagation after carefully reviewing your observability and security requirements. + ### Enabling content recording Content recording controls whether message contents and tool call related details, such as parameters and return values, are captured with the traces. This data may include sensitive user information. -To enable content recording, set the `OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT` environment variable to `true`. If the environment variable is not set, content recording defaults to `false`. +To enable content recording, set the `OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT` environment variable to `true`. If the environment variable is not set and no value is provided with the `AIProjectInstrumentor().instrument()` call for the content recording parameter, content recording defaults to `false`. **Important:** The environment variable only controls content recording for built-in traces. When you use custom tracing decorators on your own functions, all parameters and return values are always traced. diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/telemetry/_ai_project_instrumentor.py b/sdk/ai/azure-ai-projects/azure/ai/projects/telemetry/_ai_project_instrumentor.py index 164868fd8548..73ebe98c6a47 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/telemetry/_ai_project_instrumentor.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/telemetry/_ai_project_instrumentor.py @@ -79,14 +79,88 @@ "AIProjectInstrumentor", ] -_agents_traces_enabled: bool = False +_projects_traces_enabled: bool = False _trace_agents_content: bool = False +_trace_context_propagation_enabled: bool = False + + +def _inject_trace_context_sync(request): + """Synchronous event hook to inject trace context (traceparent) into outgoing requests. + + :param request: The httpx Request object. + :type request: httpx.Request + """ + try: + from opentelemetry import propagate + + carrier = dict(request.headers) + propagate.inject(carrier) + for key, value in carrier.items(): + if key.lower() in ("traceparent", "tracestate", "baggage"): + if key.lower() not in [h.lower() for h in request.headers.keys()]: + request.headers[key] = value + except Exception as e: # pylint: disable=broad-exception-caught + logger.debug("Failed to inject trace context: %s", e) + + +async def _inject_trace_context_async(request): + """Async event hook to inject trace context (traceparent) into outgoing requests. + + :param request: The httpx Request object. + :type request: httpx.Request + """ + try: + from opentelemetry import propagate + + carrier = dict(request.headers) + propagate.inject(carrier) + for key, value in carrier.items(): + if key.lower() in ("traceparent", "tracestate", "baggage"): + if key.lower() not in [h.lower() for h in request.headers.keys()]: + request.headers[key] = value + except Exception as e: # pylint: disable=broad-exception-caught + logger.debug("Failed to inject trace context: %s", e) + + +def _enable_trace_propagation_for_openai_client(openai_client): + """Enable trace context propagation for an OpenAI client. + + This function hooks into the httpx client used by the OpenAI SDK to inject + trace context headers (traceparent, tracestate) into outgoing HTTP requests. + This ensures that client-side spans and server-side spans share the same trace ID. + + :param openai_client: The OpenAI client instance. + :type openai_client: Any + """ + try: + # Access the underlying httpx client + if hasattr(openai_client, "_client"): + httpx_client = openai_client._client # pylint: disable=protected-access + + # Check if the client has event hooks support + if hasattr(httpx_client, "_event_hooks"): + event_hooks = httpx_client._event_hooks # pylint: disable=protected-access + + # Determine if this is an async client + is_async = hasattr(httpx_client, "__aenter__") + + # Add appropriate hook based on client type + if "request" in event_hooks: + hook_to_add = _inject_trace_context_async if is_async else _inject_trace_context_sync + + # Check if our hook is already registered to avoid duplicates + if hook_to_add not in event_hooks["request"]: + event_hooks["request"].append(hook_to_add) + logger.debug("Enabled trace propagation for %s OpenAI client", "async" if is_async else "sync") + except Exception as e: # pylint: disable=broad-exception-caught + logger.debug("Failed to enable trace propagation for OpenAI client: %s", e) class TraceType(str, Enum, metaclass=CaseInsensitiveEnumMeta): # pylint: disable=C4747 """An enumeration class to represent different types of traces.""" AGENTS = "Agents" + PROJECT = "Project" class AIProjectInstrumentor: @@ -109,7 +183,9 @@ def __init__(self): self._impl = _AIAgentsInstrumentorPreview() self._responses_impl = _ResponsesInstrumentorPreview() - def instrument(self, enable_content_recording: Optional[bool] = None) -> None: + def instrument( + self, enable_content_recording: Optional[bool] = None, enable_trace_context_propagation: Optional[bool] = None + ) -> None: """ Enable trace instrumentation for AIProjectClient. @@ -126,9 +202,18 @@ def instrument(self, enable_content_recording: Optional[bool] = None) -> None: if the environment variable is not found), even if instrument was already previously called without uninstrument being called in between the instrument calls. :type enable_content_recording: bool, optional + :param enable_trace_context_propagation: Whether to enable automatic trace context propagation + to OpenAI SDK HTTP requests. When enabled, traceparent headers will be injected into + requests made by OpenAI clients obtained via get_openai_client(), allowing server-side + spans to be correlated with client-side spans. `True` will enable it, `False` will + disable it. If no value is provided, then the value read from environment variable + AZURE_TRACING_GEN_AI_ENABLE_TRACE_CONTEXT_PROPAGATION is used. If the environment + variable is not found, then the value will default to `False`. + Note: Trace context may include trace IDs and baggage that will be sent to Azure OpenAI. + :type enable_trace_context_propagation: bool, optional """ - self._impl.instrument(enable_content_recording) + self._impl.instrument(enable_content_recording, enable_trace_context_propagation) self._responses_impl.instrument(enable_content_recording) def uninstrument(self) -> None: @@ -173,7 +258,9 @@ def _str_to_bool(self, s): return False return str(s).lower() == "true" - def instrument(self, enable_content_recording: Optional[bool] = None): + def instrument( + self, enable_content_recording: Optional[bool] = None, enable_trace_context_propagation: Optional[bool] = None + ): """ Enable trace instrumentation for AI Agents. @@ -190,6 +277,11 @@ def instrument(self, enable_content_recording: Optional[bool] = None): if the environment variable is not found), even if instrument was already previously called without uninstrument being called in between the instrument calls. :type enable_content_recording: bool, optional + :param enable_trace_context_propagation: Whether to enable automatic trace context propagation. + `True` will enable it, `False` will disable it. If no value is provided, then the + value read from environment variable AZURE_TRACING_GEN_AI_ENABLE_TRACE_CONTEXT_PROPAGATION + is used. If the environment variable is not found, then the value will default to `False`. + :type enable_trace_context_propagation: bool, optional """ if enable_content_recording is None: @@ -197,24 +289,31 @@ def instrument(self, enable_content_recording: Optional[bool] = None): var_value = os.environ.get("OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT") enable_content_recording = self._str_to_bool(var_value) + if enable_trace_context_propagation is None: + var_value = os.environ.get("AZURE_TRACING_GEN_AI_ENABLE_TRACE_CONTEXT_PROPAGATION") + enable_trace_context_propagation = self._str_to_bool(var_value) + if not self.is_instrumented(): - self._instrument_agents(enable_content_recording) + self._instrument_projects(enable_content_recording, enable_trace_context_propagation) else: self._set_enable_content_recording(enable_content_recording=enable_content_recording) + self._set_enable_trace_context_propagation( + enable_trace_context_propagation=enable_trace_context_propagation + ) def uninstrument(self): """ - Disable trace instrumentation for AI Agents. + Disable trace instrumentation for AI Projects. This method removes any active instrumentation, stopping the tracing - of AI Agents. + of AI Projects. """ if self.is_instrumented(): - self._uninstrument_agents() + self._uninstrument_projects() def is_instrumented(self): """ - Check if trace instrumentation for AI Agents is currently enabled. + Check if trace instrumentation for AI Projects is currently enabled. :return: True if instrumentation is active, False otherwise. :rtype: bool @@ -369,7 +468,7 @@ def add_thread_message_event( usage=usage, ) - def _add_message_event( + def _add_message_event( # pylint: disable=too-many-branches,too-many-statements self, span, role: str, @@ -1144,11 +1243,64 @@ def _agents_apis(self): ) return sync_apis, async_apis + def _project_apis(self): + """Define AIProjectClient APIs to instrument for trace propagation. + + :return: A tuple containing sync and async API tuples. + :rtype: Tuple[Tuple, Tuple] + """ + sync_apis = ( + ( + "azure.ai.projects", + "AIProjectClient", + "get_openai_client", + TraceType.PROJECT, + "get_openai_client", + ), + ) + async_apis = ( + ( + "azure.ai.projects.aio", + "AIProjectClient", + "get_openai_client", + TraceType.PROJECT, + "get_openai_client", + ), + ) + return sync_apis, async_apis + + def _inject_openai_client(self, f, _trace_type, _name): + """Injector for get_openai_client that enables trace context propagation if opted in. + + :return: The wrapped function with trace context propagation enabled. + :rtype: Callable + """ + + @functools.wraps(f) + def wrapper(*args, **kwargs): + openai_client = f(*args, **kwargs) + if _trace_context_propagation_enabled: + _enable_trace_propagation_for_openai_client(openai_client) + return openai_client + + wrapper._original = f # type: ignore # pylint: disable=protected-access + return wrapper + def _agents_api_list(self): sync_apis, async_apis = self._agents_apis() yield sync_apis, self._inject_sync yield async_apis, self._inject_async + def _project_api_list(self): + """Generate project API list with custom injector. + + :return: A generator yielding API tuples with injectors. + :rtype: Generator + """ + sync_apis, async_apis = self._project_apis() + yield sync_apis, self._inject_openai_client + yield async_apis, self._inject_openai_client + def _generate_api_and_injector(self, apis): for api, injector in apis: for module_name, class_name, method_name, trace_type, name in api: @@ -1172,18 +1324,21 @@ def _generate_api_and_injector(self, apis): "An unexpected error occurred: '%s'", str(e) ) - def _available_agents_apis_and_injectors(self): + def _available_projects_apis_and_injectors(self): """ - Generates a sequence of tuples containing Agents API classes, method names, and + Generates a sequence of tuples containing Agents and Project API classes, method names, and corresponding injector functions. :return: A generator yielding tuples. :rtype: tuple """ yield from self._generate_api_and_injector(self._agents_api_list()) + yield from self._generate_api_and_injector(self._project_api_list()) - def _instrument_agents(self, enable_content_tracing: bool = False): - """This function modifies the methods of the Agents API classes to + def _instrument_projects( + self, enable_content_tracing: bool = False, enable_trace_context_propagation: bool = False + ): + """This function modifies the methods of the Projects API classes to inject logic before calling the original methods. The original methods are stored as _original attributes of the methods. @@ -1191,48 +1346,54 @@ def _instrument_agents(self, enable_content_tracing: bool = False): This also controls whether function call tool function names, parameter names and parameter values are traced. :type enable_content_tracing: bool + :param enable_trace_context_propagation: Whether to enable automatic trace context propagation. + :type enable_trace_context_propagation: bool """ # pylint: disable=W0603 - global _agents_traces_enabled + global _projects_traces_enabled global _trace_agents_content - if _agents_traces_enabled: + global _trace_context_propagation_enabled + if _projects_traces_enabled: raise RuntimeError("Traces already started for AI Agents") - _agents_traces_enabled = True + _projects_traces_enabled = True _trace_agents_content = enable_content_tracing + _trace_context_propagation_enabled = enable_trace_context_propagation for ( api, method, trace_type, injector, name, - ) in self._available_agents_apis_and_injectors(): + ) in self._available_projects_apis_and_injectors(): # Check if the method of the api class has already been modified if not hasattr(getattr(api, method), "_original"): setattr(api, method, injector(getattr(api, method), trace_type, name)) - def _uninstrument_agents(self): - """This function restores the original methods of the Agents API classes + def _uninstrument_projects(self): + """This function restores the original methods of the Projects API classes by assigning them back from the _original attributes of the modified methods. """ # pylint: disable=W0603 - global _agents_traces_enabled + global _projects_traces_enabled global _trace_agents_content + global _trace_context_propagation_enabled _trace_agents_content = False - for api, method, _, _, _ in self._available_agents_apis_and_injectors(): + _trace_context_propagation_enabled = False + for api, method, _, _, _ in self._available_projects_apis_and_injectors(): if hasattr(getattr(api, method), "_original"): setattr(api, method, getattr(getattr(api, method), "_original")) - _agents_traces_enabled = False + _projects_traces_enabled = False def _is_instrumented(self): - """This function returns True if Agents API has already been instrumented + """This function returns True if Projects API has already been instrumented for tracing and False if it has not been instrumented. - :return: A value indicating whether the Agents API is currently instrumented or not. + :return: A value indicating whether the Projects API is currently instrumented or not. :rtype: bool """ - return _agents_traces_enabled + return _projects_traces_enabled def _set_enable_content_recording(self, enable_content_recording: bool = False) -> None: """This function sets the content recording value. @@ -1253,6 +1414,15 @@ def _is_content_recording_enabled(self) -> bool: """ return _trace_agents_content + def _set_enable_trace_context_propagation(self, enable_trace_context_propagation: bool = False) -> None: + """This function sets the trace context propagation value. + + :param enable_trace_context_propagation: Indicates whether automatic trace context propagation should be enabled. + :type enable_trace_context_propagation: bool + """ + global _trace_context_propagation_enabled # pylint: disable=W0603 + _trace_context_propagation_enabled = enable_trace_context_propagation + def record_error(self, span, exc): # Set the span status to error if isinstance(span.span_instance, Span): # pyright: ignore [reportPossiblyUnboundVariable] From 25906935161c03110e7d2bd68b56e2271d259dbf Mon Sep 17 00:00:00 2001 From: M-Hietala <78813398+M-Hietala@users.noreply.github.com> Date: Wed, 4 Feb 2026 12:56:15 -0600 Subject: [PATCH 06/10] small update to readme --- sdk/ai/azure-ai-projects/README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/sdk/ai/azure-ai-projects/README.md b/sdk/ai/azure-ai-projects/README.md index 3c76ea962be0..07bc4d8c3b60 100644 --- a/sdk/ai/azure-ai-projects/README.md +++ b/sdk/ai/azure-ai-projects/README.md @@ -1024,7 +1024,7 @@ Trace context propagation allows client-side spans generated by the Projects SDK This feature ensures that all operations within a distributed trace share the same trace ID, providing end-to-end visibility across your application and Azure services in your observability backend (such as Azure Monitor). To enable trace context propagation set the `AZURE_TRACING_GEN_AI_ENABLE_TRACE_CONTEXT_PROPAGATION` environment variable to `true` -If the environment variable is not set and no value is provided with the `AIProjectInstrumentor().instrument()` call for the propagation parameter, the trace context propagation defaults to `false` (opt-in). +If the environment variable is not set and no value is provided with the `AIProjectInstrumentor().instrument()` call for the propagation parameter, the trace context propagation defaults to `false`. **Important Security and Privacy Considerations:** @@ -1033,7 +1033,7 @@ If the environment variable is not set and no value is provided with the `AIProj - **Request Correlation**: Trace IDs allow Azure services to correlate requests from the same session or user across multiple API calls, which may have privacy implications depending on your use case. - **Opt-in by Design**: This feature is disabled by default to give you explicit control over when trace context is propagated to external services. -Only enable trace context propagation after carefully reviewing your observability and security requirements. +Only enable trace context propagation after carefully reviewing your observability, privacy and security requirements. ### Enabling content recording From 6848458afea7445100a1038904493933015be2d8 Mon Sep 17 00:00:00 2001 From: M-Hietala <78813398+M-Hietala@users.noreply.github.com> Date: Wed, 4 Feb 2026 13:56:37 -0600 Subject: [PATCH 07/10] fix review findings --- .../agents/telemetry/test_responses_instrumentor_mcp.py | 6 ------ .../telemetry/test_responses_instrumentor_mcp_async.py | 8 -------- 2 files changed, 14 deletions(-) diff --git a/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_mcp.py b/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_mcp.py index 4fbf04cfc4dd..4052a53eb0ec 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_mcp.py +++ b/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_mcp.py @@ -456,7 +456,6 @@ def _test_sync_mcp_non_streaming_without_content_recording_impl(self, use_events # Validate first response span (MCP tool trigger) span1 = spans[0] expected_attributes_1 = [ - ("az.namespace", "Microsoft.CognitiveServices"), ("az.namespace", "Microsoft.CognitiveServices"), ("gen_ai.operation.name", OPERATION_NAME_INVOKE_AGENT), ("gen_ai.provider.name", "azure.openai"), @@ -541,7 +540,6 @@ def _test_sync_mcp_non_streaming_without_content_recording_impl(self, use_events # Validate second response span (approval response) span2 = spans[1] expected_attributes_2 = [ - ("az.namespace", "Microsoft.CognitiveServices"), ("az.namespace", "Microsoft.CognitiveServices"), ("gen_ai.operation.name", OPERATION_NAME_INVOKE_AGENT), ("gen_ai.provider.name", "azure.openai"), @@ -784,7 +782,6 @@ def _test_sync_mcp_streaming_with_content_recording_impl(self, use_events, **kwa assert response_id_1 is not None expected_attributes_1 = [ - ("az.namespace", "Microsoft.CognitiveServices"), ("az.namespace", "Microsoft.CognitiveServices"), ("gen_ai.operation.name", OPERATION_NAME_INVOKE_AGENT), ("gen_ai.provider.name", "azure.openai"), @@ -861,7 +858,6 @@ def _test_sync_mcp_streaming_with_content_recording_impl(self, use_events, **kwa assert response_id_2 is not None expected_attributes_2 = [ - ("az.namespace", "Microsoft.CognitiveServices"), ("az.namespace", "Microsoft.CognitiveServices"), ("gen_ai.operation.name", OPERATION_NAME_INVOKE_AGENT), ("gen_ai.provider.name", "azure.openai"), @@ -1062,7 +1058,6 @@ def _test_sync_mcp_streaming_without_content_recording_impl(self, use_events, ** assert response_id_1 is not None expected_attributes_1 = [ - ("az.namespace", "Microsoft.CognitiveServices"), ("az.namespace", "Microsoft.CognitiveServices"), ("gen_ai.operation.name", OPERATION_NAME_INVOKE_AGENT), ("gen_ai.provider.name", "azure.openai"), @@ -1137,7 +1132,6 @@ def _test_sync_mcp_streaming_without_content_recording_impl(self, use_events, ** assert response_id_2 is not None expected_attributes_2 = [ - ("az.namespace", "Microsoft.CognitiveServices"), ("az.namespace", "Microsoft.CognitiveServices"), ("gen_ai.operation.name", OPERATION_NAME_INVOKE_AGENT), ("gen_ai.provider.name", "azure.openai"), diff --git a/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_mcp_async.py b/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_mcp_async.py index d007365b9a5c..748e42785fe9 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_mcp_async.py +++ b/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_mcp_async.py @@ -127,7 +127,6 @@ async def _test_async_mcp_non_streaming_with_content_recording_impl(self, use_ev # Validate first response span (MCP tool trigger) span1 = spans[0] expected_attributes_1 = [ - ("az.namespace", "Microsoft.CognitiveServices"), ("az.namespace", "Microsoft.CognitiveServices"), ("gen_ai.operation.name", OPERATION_NAME_INVOKE_AGENT), ("gen_ai.provider.name", "azure.openai"), @@ -216,7 +215,6 @@ async def _test_async_mcp_non_streaming_with_content_recording_impl(self, use_ev # Validate second response span (approval response) span2 = spans[1] expected_attributes_2 = [ - ("az.namespace", "Microsoft.CognitiveServices"), ("az.namespace", "Microsoft.CognitiveServices"), ("gen_ai.operation.name", OPERATION_NAME_INVOKE_AGENT), ("gen_ai.provider.name", "azure.openai"), @@ -458,7 +456,6 @@ async def _test_async_mcp_non_streaming_without_content_recording_impl(self, use # Validate first response span (MCP tool trigger) span1 = spans[0] expected_attributes_1 = [ - ("az.namespace", "Microsoft.CognitiveServices"), ("az.namespace", "Microsoft.CognitiveServices"), ("gen_ai.operation.name", OPERATION_NAME_INVOKE_AGENT), ("gen_ai.provider.name", "azure.openai"), @@ -542,7 +539,6 @@ async def _test_async_mcp_non_streaming_without_content_recording_impl(self, use # Validate second response span (approval response) span2 = spans[1] expected_attributes_2 = [ - ("az.namespace", "Microsoft.CognitiveServices"), ("az.namespace", "Microsoft.CognitiveServices"), ("gen_ai.operation.name", OPERATION_NAME_INVOKE_AGENT), ("gen_ai.provider.name", "azure.openai"), @@ -788,7 +784,6 @@ async def _test_async_mcp_streaming_with_content_recording_impl(self, use_events assert response_id_1 is not None expected_attributes_1 = [ - ("az.namespace", "Microsoft.CognitiveServices"), ("az.namespace", "Microsoft.CognitiveServices"), ("gen_ai.operation.name", OPERATION_NAME_INVOKE_AGENT), ("gen_ai.provider.name", "azure.openai"), @@ -866,7 +861,6 @@ async def _test_async_mcp_streaming_with_content_recording_impl(self, use_events assert response_id_2 is not None expected_attributes_2 = [ - ("az.namespace", "Microsoft.CognitiveServices"), ("az.namespace", "Microsoft.CognitiveServices"), ("gen_ai.operation.name", OPERATION_NAME_INVOKE_AGENT), ("gen_ai.provider.name", "azure.openai"), @@ -1069,7 +1063,6 @@ async def _test_async_mcp_streaming_without_content_recording_impl(self, use_eve assert response_id_1 is not None expected_attributes_1 = [ - ("az.namespace", "Microsoft.CognitiveServices"), ("az.namespace", "Microsoft.CognitiveServices"), ("gen_ai.operation.name", OPERATION_NAME_INVOKE_AGENT), ("gen_ai.provider.name", "azure.openai"), @@ -1145,7 +1138,6 @@ async def _test_async_mcp_streaming_without_content_recording_impl(self, use_eve assert response_id_2 is not None expected_attributes_2 = [ - ("az.namespace", "Microsoft.CognitiveServices"), ("az.namespace", "Microsoft.CognitiveServices"), ("gen_ai.operation.name", OPERATION_NAME_INVOKE_AGENT), ("gen_ai.provider.name", "azure.openai"), From df5fd20de9f28998da41d86e9811dd9c0e4acd8f Mon Sep 17 00:00:00 2001 From: M-Hietala <78813398+M-Hietala@users.noreply.github.com> Date: Wed, 4 Feb 2026 14:25:54 -0600 Subject: [PATCH 08/10] updating test recording --- sdk/ai/azure-ai-projects/assets.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sdk/ai/azure-ai-projects/assets.json b/sdk/ai/azure-ai-projects/assets.json index 24777c3b7249..af8c0b06a8c4 100644 --- a/sdk/ai/azure-ai-projects/assets.json +++ b/sdk/ai/azure-ai-projects/assets.json @@ -2,5 +2,5 @@ "AssetsRepo": "Azure/azure-sdk-assets", "AssetsRepoPrefixPath": "python", "TagPrefix": "python/ai/azure-ai-projects", - "Tag": "python/ai/azure-ai-projects_212aab4d9b" + "Tag": "python/ai/azure-ai-projects_92b666ecb8" } From 27d2fbac03b2ab2b573ab82d543bc0a2db320863 Mon Sep 17 00:00:00 2001 From: M-Hietala <78813398+M-Hietala@users.noreply.github.com> Date: Wed, 4 Feb 2026 15:08:36 -0600 Subject: [PATCH 09/10] changing provider name to microsoft.foundry --- sdk/ai/azure-ai-projects/CHANGELOG.md | 1 + .../telemetry/_ai_project_instrumentor.py | 4 +- .../telemetry/_responses_instrumentor.py | 20 +- .../azure/ai/projects/telemetry/_utils.py | 7 +- .../telemetry/test_ai_agents_instrumentor.py | 16 +- .../test_ai_agents_instrumentor_async.py | 16 +- .../telemetry/test_responses_instrumentor.py | 249 +++++++++--------- .../test_responses_instrumentor_async.py | 175 ++++++------ ...sponses_instrumentor_browser_automation.py | 9 +- ...s_instrumentor_browser_automation_async.py | 9 +- ...responses_instrumentor_code_interpreter.py | 9 +- ...ses_instrumentor_code_interpreter_async.py | 9 +- ...test_responses_instrumentor_file_search.py | 9 +- ...esponses_instrumentor_file_search_async.py | 9 +- .../test_responses_instrumentor_mcp.py | 17 +- .../test_responses_instrumentor_mcp_async.py | 17 +- .../test_responses_instrumentor_workflow.py | 9 +- ...t_responses_instrumentor_workflow_async.py | 9 +- 18 files changed, 303 insertions(+), 291 deletions(-) diff --git a/sdk/ai/azure-ai-projects/CHANGELOG.md b/sdk/ai/azure-ai-projects/CHANGELOG.md index ec12a9ef6788..9e6b24b66a3f 100644 --- a/sdk/ai/azure-ai-projects/CHANGELOG.md +++ b/sdk/ai/azure-ai-projects/CHANGELOG.md @@ -27,6 +27,7 @@ * Tracing: response generation operation names changed from "responses" to "chat" for model calls and from "responses" to "invoke_agent" for agent calls. * Tracing: response generation uses gen_ai.input.messages and gen_ai.output.messages attributes directly under the span instead of events. * Tracing: agent creation uses gen_ai.system.instructions attribute directly under the span instead of an event. +* Tracing: "gen_ai.provider.name" attribute value changed to "microsoft.foundry". ## 2.0.0b3 (2026-01-06) diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/telemetry/_ai_project_instrumentor.py b/sdk/ai/azure-ai-projects/azure/ai/projects/telemetry/_ai_project_instrumentor.py index 73ebe98c6a47..4999adeae9ff 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/telemetry/_ai_project_instrumentor.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/telemetry/_ai_project_instrumentor.py @@ -21,7 +21,7 @@ from azure.core.settings import settings from azure.core.tracing import AbstractSpan from ._utils import ( - AZURE_AI_AGENTS_PROVIDER, + AGENTS_PROVIDER, ERROR_TYPE, GEN_AI_AGENT_DESCRIPTION, GEN_AI_AGENT_ID, @@ -377,7 +377,7 @@ def _create_event_attributes( run_step_last_error: Optional[Any] = None, usage: Optional[Any] = None, ) -> Dict[str, Any]: - attrs: Dict[str, Any] = {GEN_AI_PROVIDER_NAME: AZURE_AI_AGENTS_PROVIDER} + attrs: Dict[str, Any] = {GEN_AI_PROVIDER_NAME: AGENTS_PROVIDER} if thread_id: attrs[GEN_AI_THREAD_ID] = thread_id diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/telemetry/_responses_instrumentor.py b/sdk/ai/azure-ai-projects/azure/ai/projects/telemetry/_responses_instrumentor.py index 20cc36165c35..c1cf1edd2cad 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/telemetry/_responses_instrumentor.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/telemetry/_responses_instrumentor.py @@ -59,6 +59,7 @@ SPAN_NAME_INVOKE_AGENT, _get_use_message_events, start_span, + RESPONSES_PROVIDER, ) @@ -86,9 +87,6 @@ _trace_responses_content: bool = False _trace_binary_data: bool = False -# Azure OpenAI system identifier for traces -AZURE_OPENAI_SYSTEM = "azure.openai" - # Metrics instruments _operation_duration_histogram = None _token_usage_histogram = None @@ -242,7 +240,7 @@ def _record_operation_duration( attributes = { GEN_AI_OPERATION_NAME: operation_name, - GEN_AI_PROVIDER_NAME: AZURE_OPENAI_SYSTEM, + GEN_AI_PROVIDER_NAME: RESPONSES_PROVIDER, } if server_address: @@ -275,7 +273,7 @@ def _record_token_usage( attributes = { GEN_AI_OPERATION_NAME: operation_name, - GEN_AI_PROVIDER_NAME: AZURE_OPENAI_SYSTEM, + GEN_AI_PROVIDER_NAME: RESPONSES_PROVIDER, GEN_AI_TOKEN_TYPE: token_type, } @@ -531,7 +529,7 @@ def _create_event_attributes( conversation_id: Optional[str] = None, # pylint: disable=unused-argument message_role: Optional[str] = None, ) -> Dict[str, Any]: - attrs: Dict[str, Any] = {GEN_AI_PROVIDER_NAME: AZURE_OPENAI_SYSTEM} + attrs: Dict[str, Any] = {GEN_AI_PROVIDER_NAME: RESPONSES_PROVIDER} # Removed conversation_id from event attributes as requested - it's redundant # if conversation_id: # attrs[GEN_AI_CONVERSATION_ID] = conversation_id @@ -844,7 +842,7 @@ def _add_workflow_action_events( # Create event attributes event_attributes = { - GEN_AI_PROVIDER_NAME: AZURE_OPENAI_SYSTEM, + GEN_AI_PROVIDER_NAME: RESPONSES_PROVIDER, } # Build workflow action details object @@ -1584,7 +1582,7 @@ def start_responses_span( port=port, span_name=span_name, model=model, - gen_ai_provider=AZURE_OPENAI_SYSTEM, + gen_ai_provider=RESPONSES_PROVIDER, ) if span and span.span_instance.is_recording: @@ -3353,7 +3351,7 @@ def start_create_conversation_span( server_address=server_address, port=port, span_name=OperationName.CREATE_CONVERSATION.value, - gen_ai_provider=AZURE_OPENAI_SYSTEM, + gen_ai_provider=RESPONSES_PROVIDER, ) if span and span.span_instance.is_recording: @@ -3551,7 +3549,7 @@ def start_list_conversation_items_span( server_address=server_address, port=port, span_name=OperationName.LIST_CONVERSATION_ITEMS.value, - gen_ai_provider=AZURE_OPENAI_SYSTEM, + gen_ai_provider=RESPONSES_PROVIDER, ) if span and span.span_instance.is_recording: @@ -4200,7 +4198,7 @@ def _add_conversation_item_event( # pylint: disable=too-many-branches,too-many- # Create event attributes event_attributes = { - GEN_AI_PROVIDER_NAME: AZURE_OPENAI_SYSTEM, + GEN_AI_PROVIDER_NAME: RESPONSES_PROVIDER, GEN_AI_CONVERSATION_ITEM_ID: item_id, } diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/telemetry/_utils.py b/sdk/ai/azure-ai-projects/azure/ai/projects/telemetry/_utils.py index 4f5e713227bb..f41f2495b3b1 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/telemetry/_utils.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/telemetry/_utils.py @@ -101,7 +101,8 @@ # Constant attribute values AZURE_AI_AGENTS_SYSTEM = "az.ai.agents" -AZURE_AI_AGENTS_PROVIDER = "azure.ai.agents" +AGENTS_PROVIDER = "microsoft.foundry" +RESPONSES_PROVIDER = "microsoft.foundry" AGENT_TYPE_PROMPT = "prompt" AGENT_TYPE_WORKFLOW = "workflow" AGENT_TYPE_HOSTED = "hosted" @@ -176,7 +177,7 @@ def start_span( reasoning_summary: Optional[str] = None, structured_inputs: Optional[str] = None, gen_ai_system: Optional[str] = None, - gen_ai_provider: Optional[str] = AZURE_AI_AGENTS, + gen_ai_provider: Optional[str] = AGENTS_PROVIDER, kind: SpanKind = SpanKind.CLIENT, ) -> "Optional[AbstractSpan]": global _span_impl_type # pylint: disable=global-statement @@ -197,7 +198,7 @@ def start_span( if span and span.span_instance.is_recording: span.add_attribute(AZ_NAMESPACE, AZ_NAMESPACE_VALUE) - span.add_attribute(GEN_AI_PROVIDER_NAME, AZURE_AI_AGENTS) + span.add_attribute(GEN_AI_PROVIDER_NAME, AGENTS_PROVIDER) if gen_ai_provider: span.add_attribute(GEN_AI_PROVIDER_NAME, gen_ai_provider) diff --git a/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_ai_agents_instrumentor.py b/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_ai_agents_instrumentor.py index 9fcade517890..b5e62189aede 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_ai_agents_instrumentor.py +++ b/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_ai_agents_instrumentor.py @@ -51,7 +51,7 @@ GEN_AI_AGENT_WORKFLOW_EVENT, GEN_AI_CONVERSATION_ITEM_TYPE, AZURE_AI_AGENTS_SYSTEM, - AZURE_AI_AGENTS_PROVIDER, + AGENTS_PROVIDER, AGENT_TYPE_PROMPT, AGENT_TYPE_WORKFLOW, _set_use_message_events, @@ -221,7 +221,7 @@ def _test_agent_creation_with_tracing_content_recording_enabled_impl(self, use_e assert len(spans) == 1 span = spans[0] expected_attributes = [ - (GEN_AI_PROVIDER_NAME, AZURE_AI_AGENTS_PROVIDER), + (GEN_AI_PROVIDER_NAME, AGENTS_PROVIDER), (GEN_AI_OPERATION_NAME, "create_agent"), (SERVER_ADDRESS, ""), (GEN_AI_REQUEST_MODEL, model), @@ -261,7 +261,7 @@ def _test_agent_creation_with_tracing_content_recording_enabled_impl(self, use_e { "name": GEN_AI_SYSTEM_INSTRUCTION_EVENT, "attributes": { - GEN_AI_PROVIDER_NAME: AZURE_AI_AGENTS_PROVIDER, + GEN_AI_PROVIDER_NAME: AGENTS_PROVIDER, GEN_AI_EVENT_CONTENT: '[{"type": "text", "content": "You are a helpful AI assistant. Be polite and provide accurate information."}]', }, } @@ -338,7 +338,7 @@ def _test_agent_creation_with_tracing_content_recording_disabled_impl(self, use_ assert len(spans) == 1 span = spans[0] expected_attributes = [ - (GEN_AI_PROVIDER_NAME, AZURE_AI_AGENTS_PROVIDER), + (GEN_AI_PROVIDER_NAME, AGENTS_PROVIDER), (GEN_AI_OPERATION_NAME, "create_agent"), (SERVER_ADDRESS, ""), (GEN_AI_REQUEST_MODEL, model), @@ -366,7 +366,7 @@ def _test_agent_creation_with_tracing_content_recording_disabled_impl(self, use_ { "name": GEN_AI_SYSTEM_INSTRUCTION_EVENT, "attributes": { - GEN_AI_PROVIDER_NAME: AZURE_AI_AGENTS_PROVIDER, + GEN_AI_PROVIDER_NAME: AGENTS_PROVIDER, GEN_AI_EVENT_CONTENT: "[]", }, } @@ -451,7 +451,7 @@ def _test_workflow_agent_creation_impl(self, use_events: bool, content_recording assert len(spans) == 1 span = spans[0] expected_attributes = [ - (GEN_AI_PROVIDER_NAME, AZURE_AI_AGENTS_PROVIDER), + (GEN_AI_PROVIDER_NAME, AGENTS_PROVIDER), (GEN_AI_OPERATION_NAME, "create_agent"), (SERVER_ADDRESS, ""), (GEN_AI_AGENT_NAME, "test-workflow-agent"), @@ -575,7 +575,7 @@ def _test_agent_with_structured_output_with_instructions_impl( span = spans[0] expected_attributes = [ - (GEN_AI_PROVIDER_NAME, AZURE_AI_AGENTS_PROVIDER), + (GEN_AI_PROVIDER_NAME, AGENTS_PROVIDER), (GEN_AI_OPERATION_NAME, "create_agent"), (SERVER_ADDRESS, ""), (GEN_AI_REQUEST_MODEL, model), @@ -759,7 +759,7 @@ def _test_agent_with_structured_output_without_instructions_impl( span = spans[0] expected_attributes = [ - (GEN_AI_PROVIDER_NAME, AZURE_AI_AGENTS_PROVIDER), + (GEN_AI_PROVIDER_NAME, AGENTS_PROVIDER), (GEN_AI_OPERATION_NAME, "create_agent"), (SERVER_ADDRESS, ""), (GEN_AI_REQUEST_MODEL, model), diff --git a/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_ai_agents_instrumentor_async.py b/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_ai_agents_instrumentor_async.py index 82ac4fa58f18..bda4cd2aba04 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_ai_agents_instrumentor_async.py +++ b/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_ai_agents_instrumentor_async.py @@ -48,7 +48,7 @@ GEN_AI_AGENT_WORKFLOW_EVENT, GEN_AI_CONVERSATION_ITEM_TYPE, AZURE_AI_AGENTS_SYSTEM, - AZURE_AI_AGENTS_PROVIDER, + AGENTS_PROVIDER, AGENT_TYPE_PROMPT, AGENT_TYPE_WORKFLOW, _set_use_message_events, @@ -93,7 +93,7 @@ async def _test_create_agent_with_tracing_content_recording_enabled_impl(self, u assert len(spans) == 1 span = spans[0] expected_attributes = [ - (GEN_AI_PROVIDER_NAME, AZURE_AI_AGENTS_PROVIDER), + (GEN_AI_PROVIDER_NAME, AGENTS_PROVIDER), (GEN_AI_OPERATION_NAME, "create_agent"), (SERVER_ADDRESS, ""), (GEN_AI_REQUEST_MODEL, model), @@ -132,7 +132,7 @@ async def _test_create_agent_with_tracing_content_recording_enabled_impl(self, u { "name": GEN_AI_SYSTEM_INSTRUCTION_EVENT, "attributes": { - GEN_AI_PROVIDER_NAME: AZURE_AI_AGENTS_PROVIDER, + GEN_AI_PROVIDER_NAME: AGENTS_PROVIDER, GEN_AI_EVENT_CONTENT: '[{"type": "text", "content": "You are a helpful AI assistant. Always be polite and provide accurate information."}]', }, } @@ -208,7 +208,7 @@ async def _test_agent_creation_with_tracing_content_recording_disabled_impl(self assert len(spans) == 1 span = spans[0] expected_attributes = [ - (GEN_AI_PROVIDER_NAME, AZURE_AI_AGENTS_PROVIDER), + (GEN_AI_PROVIDER_NAME, AGENTS_PROVIDER), (GEN_AI_OPERATION_NAME, "create_agent"), (SERVER_ADDRESS, ""), (GEN_AI_REQUEST_MODEL, model), @@ -234,7 +234,7 @@ async def _test_agent_creation_with_tracing_content_recording_disabled_impl(self { "name": GEN_AI_SYSTEM_INSTRUCTION_EVENT, "attributes": { - GEN_AI_PROVIDER_NAME: AZURE_AI_AGENTS_PROVIDER, + GEN_AI_PROVIDER_NAME: AGENTS_PROVIDER, GEN_AI_EVENT_CONTENT: "[]", }, } @@ -319,7 +319,7 @@ async def _test_workflow_agent_creation_impl(self, use_events: bool, content_rec assert len(spans) == 1 span = spans[0] expected_attributes = [ - (GEN_AI_PROVIDER_NAME, AZURE_AI_AGENTS_PROVIDER), + (GEN_AI_PROVIDER_NAME, AGENTS_PROVIDER), (GEN_AI_OPERATION_NAME, "create_agent"), (SERVER_ADDRESS, ""), (GEN_AI_AGENT_NAME, "test-workflow-agent-async"), @@ -446,7 +446,7 @@ async def _test_agent_with_structured_output_with_instructions_impl( span = spans[0] expected_attributes = [ - (GEN_AI_PROVIDER_NAME, AZURE_AI_AGENTS_PROVIDER), + (GEN_AI_PROVIDER_NAME, AGENTS_PROVIDER), (GEN_AI_OPERATION_NAME, "create_agent"), (SERVER_ADDRESS, ""), (GEN_AI_REQUEST_MODEL, model), @@ -631,7 +631,7 @@ async def _test_agent_with_structured_output_without_instructions_impl( span = spans[0] expected_attributes = [ - (GEN_AI_PROVIDER_NAME, AZURE_AI_AGENTS_PROVIDER), + (GEN_AI_PROVIDER_NAME, AGENTS_PROVIDER), (GEN_AI_OPERATION_NAME, "create_agent"), (SERVER_ADDRESS, ""), (GEN_AI_REQUEST_MODEL, model), diff --git a/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor.py b/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor.py index 9dc173b0ba7b..b70c801f7bd7 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor.py +++ b/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor.py @@ -15,6 +15,7 @@ SPAN_NAME_CHAT, SPAN_NAME_INVOKE_AGENT, _set_use_message_events, + RESPONSES_PROVIDER, ) from azure.core.settings import settings from gen_ai_trace_verifier import GenAiTraceVerifier @@ -251,7 +252,7 @@ def _test_sync_non_streaming_with_content_recording_impl(self, use_events, **kwa ("az.namespace", "Microsoft.CognitiveServices"), ("gen_ai.operation.name", OPERATION_NAME_CHAT), ("gen_ai.request.model", deployment_name), - ("gen_ai.provider.name", "azure.openai"), + ("gen_ai.provider.name", RESPONSES_PROVIDER), ("server.address", ""), ("gen_ai.conversation.id", conversation.id), ("gen_ai.response.model", deployment_name), @@ -277,14 +278,14 @@ def _test_sync_non_streaming_with_content_recording_impl(self, use_events, **kwa { "name": "gen_ai.input.messages", "attributes": { - "gen_ai.provider.name": "azure.openai", + "gen_ai.provider.name": RESPONSES_PROVIDER, "gen_ai.event.content": '[{"role": "user", "parts": [{"type": "text", "content": "Write a short poem about AI"}]}]', }, }, { "name": "gen_ai.output.messages", "attributes": { - "gen_ai.provider.name": "azure.openai", + "gen_ai.provider.name": RESPONSES_PROVIDER, "gen_ai.event.content": '[{"role": "assistant", "parts": [{"type": "text", "content": "*"}], "finish_reason": "*"}]', }, }, @@ -373,7 +374,7 @@ def _test_sync_non_streaming_without_content_recording_impl(self, use_events, ** ("az.namespace", "Microsoft.CognitiveServices"), ("gen_ai.operation.name", OPERATION_NAME_CHAT), ("gen_ai.request.model", deployment_name), - ("gen_ai.provider.name", "azure.openai"), + ("gen_ai.provider.name", RESPONSES_PROVIDER), ("server.address", ""), ("gen_ai.conversation.id", conversation.id), ("gen_ai.response.model", deployment_name), @@ -399,14 +400,14 @@ def _test_sync_non_streaming_without_content_recording_impl(self, use_events, ** { "name": "gen_ai.input.messages", "attributes": { - "gen_ai.provider.name": "azure.openai", + "gen_ai.provider.name": RESPONSES_PROVIDER, "gen_ai.event.content": '[{"role": "user", "parts": [{"type": "text"}]}]', }, }, { "name": "gen_ai.output.messages", "attributes": { - "gen_ai.provider.name": "azure.openai", + "gen_ai.provider.name": RESPONSES_PROVIDER, "gen_ai.event.content": '[{"role": "assistant", "parts": [{"type": "text"}], "finish_reason": "*"}]', }, }, @@ -504,7 +505,7 @@ def _test_sync_streaming_with_content_recording_impl(self, use_events, **kwargs) ("az.namespace", "Microsoft.CognitiveServices"), ("gen_ai.operation.name", OPERATION_NAME_CHAT), ("gen_ai.request.model", deployment_name), - ("gen_ai.provider.name", "azure.openai"), + ("gen_ai.provider.name", RESPONSES_PROVIDER), ("server.address", ""), ("gen_ai.conversation.id", conversation.id), ("gen_ai.response.model", deployment_name), @@ -529,14 +530,14 @@ def _test_sync_streaming_with_content_recording_impl(self, use_events, **kwargs) { "name": "gen_ai.input.messages", "attributes": { - "gen_ai.provider.name": "azure.openai", + "gen_ai.provider.name": RESPONSES_PROVIDER, "gen_ai.event.content": '[{"role": "user", "parts": [{"type": "text", "content": "Write a short poem about AI"}]}]', }, }, { "name": "gen_ai.output.messages", "attributes": { - "gen_ai.provider.name": "azure.openai", + "gen_ai.provider.name": RESPONSES_PROVIDER, "gen_ai.event.content": '[{"role": "assistant", "parts": [{"type": "text", "content": "*"}], "finish_reason": "*"}]', }, }, @@ -645,7 +646,7 @@ def test_sync_conversations_create(self, **kwargs): # Check basic span attributes expected_attributes = [ ("gen_ai.operation.name", "create_conversation"), - ("gen_ai.provider.name", "azure.openai"), + ("gen_ai.provider.name", RESPONSES_PROVIDER), ("server.address", ""), ("gen_ai.conversation.id", conversation.id), ] @@ -700,7 +701,7 @@ def test_sync_list_conversation_items_with_content_recording(self, **kwargs): expected_attributes = [ ("az.namespace", "Microsoft.CognitiveServices"), ("gen_ai.operation.name", "list_conversation_items"), - ("gen_ai.provider.name", "azure.openai"), + ("gen_ai.provider.name", RESPONSES_PROVIDER), ("server.address", ""), ("gen_ai.conversation.id", conversation.id), ] @@ -712,7 +713,7 @@ def test_sync_list_conversation_items_with_content_recording(self, **kwargs): { "name": "gen_ai.conversation.item", "attributes": { - "gen_ai.provider.name": "azure.openai", + "gen_ai.provider.name": RESPONSES_PROVIDER, "gen_ai.conversation.item.id": "*", "gen_ai.event.content": '[{"role": "assistant", "parts": [{"type": "text", "content": "*"}]}]', }, @@ -720,7 +721,7 @@ def test_sync_list_conversation_items_with_content_recording(self, **kwargs): { "name": "gen_ai.conversation.item", "attributes": { - "gen_ai.provider.name": "azure.openai", + "gen_ai.provider.name": RESPONSES_PROVIDER, "gen_ai.conversation.item.id": "*", "gen_ai.event.content": '[{"role": "user", "parts": [{"type": "text", "content": "Hello"}]}]', }, @@ -777,7 +778,7 @@ def test_sync_list_conversation_items_without_content_recording(self, **kwargs): expected_attributes = [ ("az.namespace", "Microsoft.CognitiveServices"), ("gen_ai.operation.name", "list_conversation_items"), - ("gen_ai.provider.name", "azure.openai"), + ("gen_ai.provider.name", RESPONSES_PROVIDER), ("server.address", ""), ("gen_ai.conversation.id", conversation.id), ] @@ -789,7 +790,7 @@ def test_sync_list_conversation_items_without_content_recording(self, **kwargs): { "name": "gen_ai.conversation.item", "attributes": { - "gen_ai.provider.name": "azure.openai", + "gen_ai.provider.name": RESPONSES_PROVIDER, "gen_ai.conversation.item.id": "*", "gen_ai.event.content": '[{"role": "assistant", "parts": [{"type": "text"}]}]', }, @@ -797,7 +798,7 @@ def test_sync_list_conversation_items_without_content_recording(self, **kwargs): { "name": "gen_ai.conversation.item", "attributes": { - "gen_ai.provider.name": "azure.openai", + "gen_ai.provider.name": RESPONSES_PROVIDER, "gen_ai.conversation.item.id": "*", "gen_ai.event.content": '[{"role": "user", "parts": [{"type": "text"}]}]', }, @@ -881,7 +882,7 @@ def _test_sync_non_streaming_without_conversation_impl(self, use_events, **kwarg ("az.namespace", "Microsoft.CognitiveServices"), ("gen_ai.operation.name", OPERATION_NAME_CHAT), ("gen_ai.request.model", deployment_name), - ("gen_ai.provider.name", "azure.openai"), + ("gen_ai.provider.name", RESPONSES_PROVIDER), ("server.address", ""), ("gen_ai.response.model", deployment_name), ("gen_ai.response.id", ""), @@ -905,14 +906,14 @@ def _test_sync_non_streaming_without_conversation_impl(self, use_events, **kwarg { "name": "gen_ai.input.messages", "attributes": { - "gen_ai.provider.name": "azure.openai", + "gen_ai.provider.name": RESPONSES_PROVIDER, "gen_ai.event.content": '[{"role": "user", "parts": [{"type": "text", "content": "Write a short poem about AI"}]}]', }, }, { "name": "gen_ai.output.messages", "attributes": { - "gen_ai.provider.name": "azure.openai", + "gen_ai.provider.name": RESPONSES_PROVIDER, "gen_ai.event.content": '[{"role": "assistant", "parts": [{"type": "text", "content": "*"}], "finish_reason": "*"}]', }, }, @@ -1041,7 +1042,7 @@ def _test_sync_function_tool_with_content_recording_non_streaming_impl(self, use ("az.namespace", "Microsoft.CognitiveServices"), ("gen_ai.operation.name", OPERATION_NAME_INVOKE_AGENT), ("gen_ai.agent.name", agent.name), - ("gen_ai.provider.name", "azure.openai"), + ("gen_ai.provider.name", RESPONSES_PROVIDER), ("server.address", ""), ("gen_ai.conversation.id", conversation.id), ("gen_ai.response.model", deployment_name), @@ -1066,14 +1067,14 @@ def _test_sync_function_tool_with_content_recording_non_streaming_impl(self, use { "name": "gen_ai.input.messages", "attributes": { - "gen_ai.provider.name": "azure.openai", + "gen_ai.provider.name": RESPONSES_PROVIDER, "gen_ai.event.content": '[{"role": "user", "parts": [{"type": "text", "content": "What\'s the weather in Seattle?"}]}]', }, }, { "name": "gen_ai.output.messages", "attributes": { - "gen_ai.provider.name": "azure.openai", + "gen_ai.provider.name": RESPONSES_PROVIDER, "gen_ai.event.content": '[{"role": "assistant", "parts": [{"type": "tool_call", "content": {"type": "function_call", "id": "*", "function": {"name": "get_weather", "arguments": "*"}}}]}]', }, }, @@ -1108,7 +1109,7 @@ def _test_sync_function_tool_with_content_recording_non_streaming_impl(self, use ("az.namespace", "Microsoft.CognitiveServices"), ("gen_ai.operation.name", OPERATION_NAME_INVOKE_AGENT), ("gen_ai.agent.name", agent.name), - ("gen_ai.provider.name", "azure.openai"), + ("gen_ai.provider.name", RESPONSES_PROVIDER), ("server.address", ""), ("gen_ai.conversation.id", conversation.id), ("gen_ai.response.model", deployment_name), @@ -1133,14 +1134,14 @@ def _test_sync_function_tool_with_content_recording_non_streaming_impl(self, use { "name": "gen_ai.input.messages", "attributes": { - "gen_ai.provider.name": "azure.openai", + "gen_ai.provider.name": RESPONSES_PROVIDER, "gen_ai.event.content": '[{"role": "tool", "parts": [{"type": "tool_call_output", "content": {"type": "function_call_output", "id": "*", "output": {"temperature": "72°F", "condition": "sunny"}}}]}]', }, }, { "name": "gen_ai.output.messages", "attributes": { - "gen_ai.provider.name": "azure.openai", + "gen_ai.provider.name": RESPONSES_PROVIDER, "gen_ai.event.content": '[{"role": "assistant", "parts": [{"type": "text", "content": "*"}], "finish_reason": "*"}]', }, }, @@ -1282,7 +1283,7 @@ def _test_sync_function_tool_with_content_recording_streaming_impl(self, use_eve ("az.namespace", "Microsoft.CognitiveServices"), ("gen_ai.operation.name", OPERATION_NAME_INVOKE_AGENT), ("gen_ai.agent.name", agent.name), - ("gen_ai.provider.name", "azure.openai"), + ("gen_ai.provider.name", RESPONSES_PROVIDER), ("server.address", ""), ("gen_ai.conversation.id", conversation.id), ("gen_ai.response.model", deployment_name), @@ -1309,7 +1310,7 @@ def _test_sync_function_tool_with_content_recording_streaming_impl(self, use_eve { "name": "gen_ai.input.messages", "attributes": { - "gen_ai.provider.name": "azure.openai", + "gen_ai.provider.name": RESPONSES_PROVIDER, # "gen_ai.message.role": "user", # Commented out - now in event content "gen_ai.event.content": '[{"role": "user", "parts": [{"type": "text", "content": "What\'s the weather in Seattle?"}]}]', }, @@ -1317,7 +1318,7 @@ def _test_sync_function_tool_with_content_recording_streaming_impl(self, use_eve { "name": "gen_ai.output.messages", "attributes": { - "gen_ai.provider.name": "azure.openai", + "gen_ai.provider.name": RESPONSES_PROVIDER, # "gen_ai.message.role": "assistant", # Commented out - now in event content "gen_ai.event.content": '[{"role": "assistant", "parts": [{"type": "tool_call", "content": {"type": "function_call", "id": "*", "function": {"name": "get_weather", "arguments": "*"}}}]}]', }, @@ -1332,7 +1333,7 @@ def _test_sync_function_tool_with_content_recording_streaming_impl(self, use_eve ("az.namespace", "Microsoft.CognitiveServices"), ("gen_ai.operation.name", OPERATION_NAME_INVOKE_AGENT), ("gen_ai.agent.name", agent.name), - ("gen_ai.provider.name", "azure.openai"), + ("gen_ai.provider.name", RESPONSES_PROVIDER), ("server.address", ""), ("gen_ai.conversation.id", conversation.id), ("gen_ai.response.model", deployment_name), @@ -1359,7 +1360,7 @@ def _test_sync_function_tool_with_content_recording_streaming_impl(self, use_eve { "name": "gen_ai.input.messages", "attributes": { - "gen_ai.provider.name": "azure.openai", + "gen_ai.provider.name": RESPONSES_PROVIDER, # "gen_ai.message.role": "tool", # Commented out - now in event content "gen_ai.event.content": '[{"role": "tool", "parts": [{"type": "tool_call_output", "content": {"type": "function_call_output", "id": "*", "output": {"temperature": "72°F", "condition": "sunny"}}}]}]', }, @@ -1367,7 +1368,7 @@ def _test_sync_function_tool_with_content_recording_streaming_impl(self, use_eve { "name": "gen_ai.output.messages", "attributes": { - "gen_ai.provider.name": "azure.openai", + "gen_ai.provider.name": RESPONSES_PROVIDER, # "gen_ai.message.role": "assistant", # Commented out - now in event content "gen_ai.event.content": '[{"role": "assistant", "parts": [{"type": "text", "content": "*"}], "finish_reason": "*"}]', }, @@ -1488,7 +1489,7 @@ def _test_sync_function_tool_without_content_recording_non_streaming_impl(self, ("az.namespace", "Microsoft.CognitiveServices"), ("gen_ai.operation.name", OPERATION_NAME_INVOKE_AGENT), ("gen_ai.agent.name", agent.name), - ("gen_ai.provider.name", "azure.openai"), + ("gen_ai.provider.name", RESPONSES_PROVIDER), ("server.address", ""), ("gen_ai.conversation.id", conversation.id), ("gen_ai.response.model", deployment_name), @@ -1512,7 +1513,7 @@ def _test_sync_function_tool_without_content_recording_non_streaming_impl(self, { "name": "gen_ai.input.messages", "attributes": { - "gen_ai.provider.name": "azure.openai", + "gen_ai.provider.name": RESPONSES_PROVIDER, # "gen_ai.message.role": "user", # Commented out - now in event content "gen_ai.event.content": '[{"role": "user", "parts": [{"type": "text"}]}]', }, @@ -1520,7 +1521,7 @@ def _test_sync_function_tool_without_content_recording_non_streaming_impl(self, { "name": "gen_ai.output.messages", "attributes": { - "gen_ai.provider.name": "azure.openai", + "gen_ai.provider.name": RESPONSES_PROVIDER, # "gen_ai.message.role": "assistant", # Commented out - now in event content "gen_ai.event.content": '[{"role": "assistant", "parts": [{"type": "tool_call", "content": {"type": "function_call", "id": "*"}}]}]', }, @@ -1536,7 +1537,7 @@ def _test_sync_function_tool_without_content_recording_non_streaming_impl(self, ("az.namespace", "Microsoft.CognitiveServices"), ("gen_ai.operation.name", OPERATION_NAME_INVOKE_AGENT), ("gen_ai.agent.name", agent.name), - ("gen_ai.provider.name", "azure.openai"), + ("gen_ai.provider.name", RESPONSES_PROVIDER), ("server.address", ""), ("gen_ai.conversation.id", conversation.id), ("gen_ai.response.model", deployment_name), @@ -1563,7 +1564,7 @@ def _test_sync_function_tool_without_content_recording_non_streaming_impl(self, { "name": "gen_ai.input.messages", "attributes": { - "gen_ai.provider.name": "azure.openai", + "gen_ai.provider.name": RESPONSES_PROVIDER, # "gen_ai.message.role": "tool", # Commented out - now in event content "gen_ai.event.content": '[{"role": "tool", "parts": [{"type": "tool_call_output", "content": {"type": "function_call_output", "id": "*"}}]}]', }, @@ -1571,7 +1572,7 @@ def _test_sync_function_tool_without_content_recording_non_streaming_impl(self, { "name": "gen_ai.output.messages", "attributes": { - "gen_ai.provider.name": "azure.openai", + "gen_ai.provider.name": RESPONSES_PROVIDER, # "gen_ai.message.role": "assistant", # Commented out - now in event content "gen_ai.event.content": '[{"role": "assistant", "parts": [{"type": "text"}], "finish_reason": "*"}]', }, @@ -1696,7 +1697,7 @@ def _test_sync_function_tool_without_content_recording_streaming_impl(self, use_ ("az.namespace", "Microsoft.CognitiveServices"), ("gen_ai.operation.name", OPERATION_NAME_INVOKE_AGENT), ("gen_ai.agent.name", agent.name), - ("gen_ai.provider.name", "azure.openai"), + ("gen_ai.provider.name", RESPONSES_PROVIDER), ("server.address", ""), ("gen_ai.conversation.id", conversation.id), ("gen_ai.response.model", deployment_name), @@ -1720,7 +1721,7 @@ def _test_sync_function_tool_without_content_recording_streaming_impl(self, use_ { "name": "gen_ai.input.messages", "attributes": { - "gen_ai.provider.name": "azure.openai", + "gen_ai.provider.name": RESPONSES_PROVIDER, # "gen_ai.message.role": "user", # Commented out - now in event content "gen_ai.event.content": '[{"role": "user", "parts": [{"type": "text"}]}]', }, @@ -1728,7 +1729,7 @@ def _test_sync_function_tool_without_content_recording_streaming_impl(self, use_ { "name": "gen_ai.output.messages", "attributes": { - "gen_ai.provider.name": "azure.openai", + "gen_ai.provider.name": RESPONSES_PROVIDER, # "gen_ai.message.role": "assistant", # Commented out - now in event content "gen_ai.event.content": '[{"role": "assistant", "parts": [{"type": "tool_call", "content": {"type": "function_call", "id": "*"}}]}]', }, @@ -1744,7 +1745,7 @@ def _test_sync_function_tool_without_content_recording_streaming_impl(self, use_ ("az.namespace", "Microsoft.CognitiveServices"), ("gen_ai.operation.name", OPERATION_NAME_INVOKE_AGENT), ("gen_ai.agent.name", agent.name), - ("gen_ai.provider.name", "azure.openai"), + ("gen_ai.provider.name", RESPONSES_PROVIDER), ("server.address", ""), ("gen_ai.conversation.id", conversation.id), ("gen_ai.response.model", deployment_name), @@ -1771,7 +1772,7 @@ def _test_sync_function_tool_without_content_recording_streaming_impl(self, use_ { "name": "gen_ai.input.messages", "attributes": { - "gen_ai.provider.name": "azure.openai", + "gen_ai.provider.name": RESPONSES_PROVIDER, # "gen_ai.message.role": "tool", # Commented out - now in event content "gen_ai.event.content": '[{"role": "tool", "parts": [{"type": "tool_call_output", "content": {"type": "function_call_output", "id": "*"}}]}]', }, @@ -1779,7 +1780,7 @@ def _test_sync_function_tool_without_content_recording_streaming_impl(self, use_ { "name": "gen_ai.output.messages", "attributes": { - "gen_ai.provider.name": "azure.openai", + "gen_ai.provider.name": RESPONSES_PROVIDER, # "gen_ai.message.role": "assistant", # Commented out - now in event content "gen_ai.event.content": '[{"role": "assistant", "parts": [{"type": "text"}], "finish_reason": "*"}]', }, @@ -1920,7 +1921,7 @@ def test_sync_function_tool_list_conversation_items_with_content_recording(self, expected_attributes = [ ("az.namespace", "Microsoft.CognitiveServices"), ("gen_ai.operation.name", "list_conversation_items"), - ("gen_ai.provider.name", "azure.openai"), + ("gen_ai.provider.name", RESPONSES_PROVIDER), ("server.address", ""), ("gen_ai.conversation.id", conversation.id), ] @@ -1933,7 +1934,7 @@ def test_sync_function_tool_list_conversation_items_with_content_recording(self, { "name": "gen_ai.conversation.item", "attributes": { - "gen_ai.provider.name": "azure.openai", + "gen_ai.provider.name": RESPONSES_PROVIDER, "gen_ai.conversation.item.id": "*", "gen_ai.event.content": '[{"role": "assistant", "parts": [{"type": "text", "content": "*"}]}]', }, @@ -1941,7 +1942,7 @@ def test_sync_function_tool_list_conversation_items_with_content_recording(self, { "name": "gen_ai.conversation.item", "attributes": { - "gen_ai.provider.name": "azure.openai", + "gen_ai.provider.name": RESPONSES_PROVIDER, "gen_ai.conversation.item.id": "*", "gen_ai.event.content": '[{"role": "tool", "parts": [{"type": "tool_call_output", "content": {"type": "function_call_output", "id": "*", "output": {"temperature": "72°F", "condition": "sunny"}}}]}]', }, @@ -1949,7 +1950,7 @@ def test_sync_function_tool_list_conversation_items_with_content_recording(self, { "name": "gen_ai.conversation.item", "attributes": { - "gen_ai.provider.name": "azure.openai", + "gen_ai.provider.name": RESPONSES_PROVIDER, "gen_ai.conversation.item.id": "*", "gen_ai.event.content": '[{"role": "assistant", "parts": [{"type": "tool_call", "content": {"type": "function_call", "id": "*", "function": {"name": "get_weather", "arguments": "*"}}}]}]', }, @@ -1957,7 +1958,7 @@ def test_sync_function_tool_list_conversation_items_with_content_recording(self, { "name": "gen_ai.conversation.item", "attributes": { - "gen_ai.provider.name": "azure.openai", + "gen_ai.provider.name": RESPONSES_PROVIDER, "gen_ai.conversation.item.id": "*", "gen_ai.event.content": '[{"role": "user", "parts": [{"type": "text", "content": "What\'s the weather in Seattle?"}]}]', }, @@ -2070,7 +2071,7 @@ def test_sync_function_tool_list_conversation_items_without_content_recording(se expected_attributes = [ ("az.namespace", "Microsoft.CognitiveServices"), ("gen_ai.operation.name", "list_conversation_items"), - ("gen_ai.provider.name", "azure.openai"), + ("gen_ai.provider.name", RESPONSES_PROVIDER), ("server.address", ""), ("gen_ai.conversation.id", conversation.id), ] @@ -2083,7 +2084,7 @@ def test_sync_function_tool_list_conversation_items_without_content_recording(se { "name": "gen_ai.conversation.item", "attributes": { - "gen_ai.provider.name": "azure.openai", + "gen_ai.provider.name": RESPONSES_PROVIDER, "gen_ai.conversation.item.id": "*", "gen_ai.event.content": '[{"role": "assistant", "parts": [{"type": "text"}]}]', }, @@ -2091,7 +2092,7 @@ def test_sync_function_tool_list_conversation_items_without_content_recording(se { "name": "gen_ai.conversation.item", "attributes": { - "gen_ai.provider.name": "azure.openai", + "gen_ai.provider.name": RESPONSES_PROVIDER, "gen_ai.conversation.item.id": "*", "gen_ai.event.content": '[{"role": "tool", "parts": [{"type": "tool_call_output", "content": {"type": "function_call_output", "id": "*"}}]}]', }, @@ -2099,7 +2100,7 @@ def test_sync_function_tool_list_conversation_items_without_content_recording(se { "name": "gen_ai.conversation.item", "attributes": { - "gen_ai.provider.name": "azure.openai", + "gen_ai.provider.name": RESPONSES_PROVIDER, "gen_ai.conversation.item.id": "*", "gen_ai.event.content": '[{"role": "assistant", "parts": [{"type": "tool_call", "content": {"type": "function_call", "id": "*"}}]}]', }, @@ -2107,7 +2108,7 @@ def test_sync_function_tool_list_conversation_items_without_content_recording(se { "name": "gen_ai.conversation.item", "attributes": { - "gen_ai.provider.name": "azure.openai", + "gen_ai.provider.name": RESPONSES_PROVIDER, "gen_ai.conversation.item.id": "*", "gen_ai.event.content": '[{"role": "user", "parts": [{"type": "text"}]}]', }, @@ -2172,7 +2173,7 @@ def test_sync_multiple_text_inputs_with_content_recording_non_streaming(self, ** ("az.namespace", "Microsoft.CognitiveServices"), ("gen_ai.operation.name", OPERATION_NAME_CHAT), ("gen_ai.request.model", deployment_name), - ("gen_ai.provider.name", "azure.openai"), + ("gen_ai.provider.name", RESPONSES_PROVIDER), ("server.address", ""), ("gen_ai.conversation.id", conversation.id), ("gen_ai.response.model", deployment_name), @@ -2188,7 +2189,7 @@ def test_sync_multiple_text_inputs_with_content_recording_non_streaming(self, ** { "name": "gen_ai.input.messages", "attributes": { - "gen_ai.provider.name": "azure.openai", + "gen_ai.provider.name": RESPONSES_PROVIDER, # "gen_ai.message.role": "user", # Commented out - now in event content "gen_ai.event.content": '[{"role": "user", "parts": [{"type": "text", "content": "Hello"}]}]', }, @@ -2196,7 +2197,7 @@ def test_sync_multiple_text_inputs_with_content_recording_non_streaming(self, ** { "name": "gen_ai.input.messages", "attributes": { - "gen_ai.provider.name": "azure.openai", + "gen_ai.provider.name": RESPONSES_PROVIDER, # "gen_ai.message.role": "user", # Commented out - now in event content "gen_ai.event.content": '[{"role": "user", "parts": [{"type": "text", "content": "Write a haiku about Python"}]}]', }, @@ -2204,7 +2205,7 @@ def test_sync_multiple_text_inputs_with_content_recording_non_streaming(self, ** { "name": "gen_ai.output.messages", "attributes": { - "gen_ai.provider.name": "azure.openai", + "gen_ai.provider.name": RESPONSES_PROVIDER, # "gen_ai.message.role": "assistant", # Commented out - now in event content "gen_ai.event.content": '[{"role": "assistant", "parts": [{"type": "text", "content": "*"}], "finish_reason": "*"}]', }, @@ -2277,7 +2278,7 @@ def test_sync_multiple_text_inputs_with_content_recording_streaming(self, **kwar ("az.namespace", "Microsoft.CognitiveServices"), ("gen_ai.operation.name", OPERATION_NAME_CHAT), ("gen_ai.request.model", deployment_name), - ("gen_ai.provider.name", "azure.openai"), + ("gen_ai.provider.name", RESPONSES_PROVIDER), ("server.address", ""), ("gen_ai.conversation.id", conversation.id), ("gen_ai.response.model", deployment_name), @@ -2293,7 +2294,7 @@ def test_sync_multiple_text_inputs_with_content_recording_streaming(self, **kwar { "name": "gen_ai.input.messages", "attributes": { - "gen_ai.provider.name": "azure.openai", + "gen_ai.provider.name": RESPONSES_PROVIDER, # "gen_ai.message.role": "user", # Commented out - now in event content "gen_ai.event.content": '[{"role": "user", "parts": [{"type": "text", "content": "Hello"}]}]', }, @@ -2301,7 +2302,7 @@ def test_sync_multiple_text_inputs_with_content_recording_streaming(self, **kwar { "name": "gen_ai.input.messages", "attributes": { - "gen_ai.provider.name": "azure.openai", + "gen_ai.provider.name": RESPONSES_PROVIDER, # "gen_ai.message.role": "user", # Commented out - now in event content "gen_ai.event.content": '[{"role": "user", "parts": [{"type": "text", "content": "Write a haiku about Python"}]}]', }, @@ -2309,7 +2310,7 @@ def test_sync_multiple_text_inputs_with_content_recording_streaming(self, **kwar { "name": "gen_ai.output.messages", "attributes": { - "gen_ai.provider.name": "azure.openai", + "gen_ai.provider.name": RESPONSES_PROVIDER, # "gen_ai.message.role": "assistant", # Commented out - now in event content "gen_ai.event.content": '[{"role": "assistant", "parts": [{"type": "text", "content": "*"}], "finish_reason": "*"}]', }, @@ -2374,7 +2375,7 @@ def test_sync_multiple_text_inputs_without_content_recording_non_streaming(self, ("az.namespace", "Microsoft.CognitiveServices"), ("gen_ai.operation.name", OPERATION_NAME_CHAT), ("gen_ai.request.model", deployment_name), - ("gen_ai.provider.name", "azure.openai"), + ("gen_ai.provider.name", RESPONSES_PROVIDER), ("server.address", ""), ("gen_ai.conversation.id", conversation.id), ("gen_ai.response.model", deployment_name), @@ -2390,7 +2391,7 @@ def test_sync_multiple_text_inputs_without_content_recording_non_streaming(self, { "name": "gen_ai.input.messages", "attributes": { - "gen_ai.provider.name": "azure.openai", + "gen_ai.provider.name": RESPONSES_PROVIDER, # "gen_ai.message.role": "user", # Commented out - now in event content "gen_ai.event.content": '[{"role": "user", "parts": [{"type": "text"}]}]', }, @@ -2398,7 +2399,7 @@ def test_sync_multiple_text_inputs_without_content_recording_non_streaming(self, { "name": "gen_ai.input.messages", "attributes": { - "gen_ai.provider.name": "azure.openai", + "gen_ai.provider.name": RESPONSES_PROVIDER, # "gen_ai.message.role": "user", # Commented out - now in event content "gen_ai.event.content": '[{"role": "user", "parts": [{"type": "text"}]}]', }, @@ -2406,7 +2407,7 @@ def test_sync_multiple_text_inputs_without_content_recording_non_streaming(self, { "name": "gen_ai.output.messages", "attributes": { - "gen_ai.provider.name": "azure.openai", + "gen_ai.provider.name": RESPONSES_PROVIDER, # "gen_ai.message.role": "assistant", # Commented out - now in event content "gen_ai.event.content": '[{"role": "assistant", "parts": [{"type": "text"}], "finish_reason": "*"}]', }, @@ -2474,7 +2475,7 @@ def test_sync_multiple_text_inputs_without_content_recording_streaming(self, **k ("az.namespace", "Microsoft.CognitiveServices"), ("gen_ai.operation.name", OPERATION_NAME_CHAT), ("gen_ai.request.model", deployment_name), - ("gen_ai.provider.name", "azure.openai"), + ("gen_ai.provider.name", RESPONSES_PROVIDER), ("server.address", ""), ("gen_ai.conversation.id", conversation.id), ("gen_ai.response.model", deployment_name), @@ -2490,7 +2491,7 @@ def test_sync_multiple_text_inputs_without_content_recording_streaming(self, **k { "name": "gen_ai.input.messages", "attributes": { - "gen_ai.provider.name": "azure.openai", + "gen_ai.provider.name": RESPONSES_PROVIDER, # "gen_ai.message.role": "user", # Commented out - now in event content "gen_ai.event.content": '[{"role": "user", "parts": [{"type": "text"}]}]', }, @@ -2498,7 +2499,7 @@ def test_sync_multiple_text_inputs_without_content_recording_streaming(self, **k { "name": "gen_ai.input.messages", "attributes": { - "gen_ai.provider.name": "azure.openai", + "gen_ai.provider.name": RESPONSES_PROVIDER, # "gen_ai.message.role": "user", # Commented out - now in event content "gen_ai.event.content": '[{"role": "user", "parts": [{"type": "text"}]}]', }, @@ -2506,7 +2507,7 @@ def test_sync_multiple_text_inputs_without_content_recording_streaming(self, **k { "name": "gen_ai.output.messages", "attributes": { - "gen_ai.provider.name": "azure.openai", + "gen_ai.provider.name": RESPONSES_PROVIDER, # "gen_ai.message.role": "assistant", # Commented out - now in event content "gen_ai.event.content": '[{"role": "assistant", "parts": [{"type": "text"}], "finish_reason": "*"}]', }, @@ -2565,7 +2566,7 @@ def _test_image_only_content_off_binary_off_non_streaming_impl(self, use_events, ("az.namespace", "Microsoft.CognitiveServices"), ("gen_ai.operation.name", OPERATION_NAME_CHAT), ("gen_ai.request.model", deployment_name), - ("gen_ai.provider.name", "azure.openai"), + ("gen_ai.provider.name", RESPONSES_PROVIDER), ("server.address", ""), ("gen_ai.conversation.id", conversation.id), ("gen_ai.response.model", deployment_name), @@ -2589,7 +2590,7 @@ def _test_image_only_content_off_binary_off_non_streaming_impl(self, use_events, { "name": "gen_ai.input.messages", "attributes": { - "gen_ai.provider.name": "azure.openai", + "gen_ai.provider.name": RESPONSES_PROVIDER, # "gen_ai.message.role": "user", # Commented out - now in event content "gen_ai.event.content": '[{"role": "user", "parts": [{"type": "image"}]}]', }, @@ -2597,7 +2598,7 @@ def _test_image_only_content_off_binary_off_non_streaming_impl(self, use_events, { "name": "gen_ai.output.messages", "attributes": { - "gen_ai.provider.name": "azure.openai", + "gen_ai.provider.name": RESPONSES_PROVIDER, # "gen_ai.message.role": "assistant", # Commented out - now in event content "gen_ai.event.content": '[{"role": "assistant", "parts": [{"type": "text"}], "finish_reason": "*"}]', }, @@ -2669,7 +2670,7 @@ def _test_image_only_content_off_binary_on_non_streaming_impl(self, use_events, ("az.namespace", "Microsoft.CognitiveServices"), ("gen_ai.operation.name", OPERATION_NAME_CHAT), ("gen_ai.request.model", deployment_name), - ("gen_ai.provider.name", "azure.openai"), + ("gen_ai.provider.name", RESPONSES_PROVIDER), ("server.address", ""), ("gen_ai.conversation.id", conversation.id), ("gen_ai.response.model", deployment_name), @@ -2693,7 +2694,7 @@ def _test_image_only_content_off_binary_on_non_streaming_impl(self, use_events, { "name": "gen_ai.input.messages", "attributes": { - "gen_ai.provider.name": "azure.openai", + "gen_ai.provider.name": RESPONSES_PROVIDER, # "gen_ai.message.role": "user", # Commented out - now in event content "gen_ai.event.content": '[{"role": "user", "parts": [{"type": "image"}]}]', }, @@ -2701,7 +2702,7 @@ def _test_image_only_content_off_binary_on_non_streaming_impl(self, use_events, { "name": "gen_ai.output.messages", "attributes": { - "gen_ai.provider.name": "azure.openai", + "gen_ai.provider.name": RESPONSES_PROVIDER, # "gen_ai.message.role": "assistant", # Commented out - now in event content "gen_ai.event.content": '[{"role": "assistant", "parts": [{"type": "text"}], "finish_reason": "*"}]', }, @@ -2773,7 +2774,7 @@ def _test_image_only_content_on_binary_off_non_streaming_impl(self, use_events, ("az.namespace", "Microsoft.CognitiveServices"), ("gen_ai.operation.name", OPERATION_NAME_CHAT), ("gen_ai.request.model", deployment_name), - ("gen_ai.provider.name", "azure.openai"), + ("gen_ai.provider.name", RESPONSES_PROVIDER), ("server.address", ""), ("gen_ai.conversation.id", conversation.id), ("gen_ai.response.model", deployment_name), @@ -2797,7 +2798,7 @@ def _test_image_only_content_on_binary_off_non_streaming_impl(self, use_events, { "name": "gen_ai.input.messages", "attributes": { - "gen_ai.provider.name": "azure.openai", + "gen_ai.provider.name": RESPONSES_PROVIDER, # "gen_ai.message.role": "user", # Commented out - now in event content "gen_ai.event.content": '[{"role":"user","parts":[{"type":"image"}]}]', }, @@ -2805,7 +2806,7 @@ def _test_image_only_content_on_binary_off_non_streaming_impl(self, use_events, { "name": "gen_ai.output.messages", "attributes": { - "gen_ai.provider.name": "azure.openai", + "gen_ai.provider.name": RESPONSES_PROVIDER, # "gen_ai.message.role": "assistant", # Commented out - now in event content "gen_ai.event.content": '[{"role": "assistant", "parts": [{"type": "text", "content": "*"}], "finish_reason": "*"}]', }, @@ -2877,7 +2878,7 @@ def _test_image_only_content_on_binary_on_non_streaming_impl(self, use_events, * ("az.namespace", "Microsoft.CognitiveServices"), ("gen_ai.operation.name", OPERATION_NAME_CHAT), ("gen_ai.request.model", deployment_name), - ("gen_ai.provider.name", "azure.openai"), + ("gen_ai.provider.name", RESPONSES_PROVIDER), ("server.address", ""), ("gen_ai.conversation.id", conversation.id), ("gen_ai.response.model", deployment_name), @@ -2901,7 +2902,7 @@ def _test_image_only_content_on_binary_on_non_streaming_impl(self, use_events, * { "name": "gen_ai.input.messages", "attributes": { - "gen_ai.provider.name": "azure.openai", + "gen_ai.provider.name": RESPONSES_PROVIDER, # "gen_ai.message.role": "user", # Commented out - now in event content "gen_ai.event.content": f'[{{"role":"user","parts":[{{"type":"image","content":"data:image/png;base64,{TEST_IMAGE_BASE64}"}}]}}]', }, @@ -2909,7 +2910,7 @@ def _test_image_only_content_on_binary_on_non_streaming_impl(self, use_events, * { "name": "gen_ai.output.messages", "attributes": { - "gen_ai.provider.name": "azure.openai", + "gen_ai.provider.name": RESPONSES_PROVIDER, # "gen_ai.message.role": "assistant", # Commented out - now in event content "gen_ai.event.content": '[{"role": "assistant", "parts": [{"type": "text", "content": "*"}], "finish_reason": "*"}]', }, @@ -2990,7 +2991,7 @@ def _test_text_and_image_content_off_binary_off_non_streaming_impl(self, use_eve ("az.namespace", "Microsoft.CognitiveServices"), ("gen_ai.operation.name", OPERATION_NAME_CHAT), ("gen_ai.request.model", deployment_name), - ("gen_ai.provider.name", "azure.openai"), + ("gen_ai.provider.name", RESPONSES_PROVIDER), ("server.address", ""), ("gen_ai.conversation.id", conversation.id), ("gen_ai.response.model", deployment_name), @@ -3014,7 +3015,7 @@ def _test_text_and_image_content_off_binary_off_non_streaming_impl(self, use_eve { "name": "gen_ai.input.messages", "attributes": { - "gen_ai.provider.name": "azure.openai", + "gen_ai.provider.name": RESPONSES_PROVIDER, # "gen_ai.message.role": "user", # Commented out - now in event content "gen_ai.event.content": '[{"role": "user", "parts": [{"type": "text"}, {"type": "image"}]}]', }, @@ -3022,7 +3023,7 @@ def _test_text_and_image_content_off_binary_off_non_streaming_impl(self, use_eve { "name": "gen_ai.output.messages", "attributes": { - "gen_ai.provider.name": "azure.openai", + "gen_ai.provider.name": RESPONSES_PROVIDER, # "gen_ai.message.role": "assistant", # Commented out - now in event content "gen_ai.event.content": '[{"role": "assistant", "parts": [{"type": "text"}], "finish_reason": "*"}]', }, @@ -3098,7 +3099,7 @@ def _test_text_and_image_content_off_binary_on_non_streaming_impl(self, use_even ("az.namespace", "Microsoft.CognitiveServices"), ("gen_ai.operation.name", OPERATION_NAME_CHAT), ("gen_ai.request.model", deployment_name), - ("gen_ai.provider.name", "azure.openai"), + ("gen_ai.provider.name", RESPONSES_PROVIDER), ("server.address", ""), ("gen_ai.conversation.id", conversation.id), ("gen_ai.response.model", deployment_name), @@ -3123,7 +3124,7 @@ def _test_text_and_image_content_off_binary_on_non_streaming_impl(self, use_even { "name": "gen_ai.input.messages", "attributes": { - "gen_ai.provider.name": "azure.openai", + "gen_ai.provider.name": RESPONSES_PROVIDER, # "gen_ai.message.role": "user", # Commented out - now in event content "gen_ai.event.content": '[{"role": "user", "parts": [{"type": "text"}, {"type": "image"}]}]', }, @@ -3131,7 +3132,7 @@ def _test_text_and_image_content_off_binary_on_non_streaming_impl(self, use_even { "name": "gen_ai.output.messages", "attributes": { - "gen_ai.provider.name": "azure.openai", + "gen_ai.provider.name": RESPONSES_PROVIDER, # "gen_ai.message.role": "assistant", # Commented out - now in event content "gen_ai.event.content": '[{"role": "assistant", "parts": [{"type": "text"}], "finish_reason": "*"}]', }, @@ -3207,7 +3208,7 @@ def _test_text_and_image_content_on_binary_off_non_streaming_impl(self, use_even ("az.namespace", "Microsoft.CognitiveServices"), ("gen_ai.operation.name", OPERATION_NAME_CHAT), ("gen_ai.request.model", deployment_name), - ("gen_ai.provider.name", "azure.openai"), + ("gen_ai.provider.name", RESPONSES_PROVIDER), ("server.address", ""), ("gen_ai.conversation.id", conversation.id), ("gen_ai.response.model", deployment_name), @@ -3231,7 +3232,7 @@ def _test_text_and_image_content_on_binary_off_non_streaming_impl(self, use_even { "name": "gen_ai.input.messages", "attributes": { - "gen_ai.provider.name": "azure.openai", + "gen_ai.provider.name": RESPONSES_PROVIDER, # "gen_ai.message.role": "user", # Commented out - now in event content "gen_ai.event.content": '[{"role":"user","parts":[{"type":"text","content":"What is shown in this image?"},{"type":"image"}]}]', }, @@ -3239,7 +3240,7 @@ def _test_text_and_image_content_on_binary_off_non_streaming_impl(self, use_even { "name": "gen_ai.output.messages", "attributes": { - "gen_ai.provider.name": "azure.openai", + "gen_ai.provider.name": RESPONSES_PROVIDER, # "gen_ai.message.role": "assistant", # Commented out - now in event content "gen_ai.event.content": '[{"role": "assistant", "parts": [{"type": "text", "content": "*"}], "finish_reason": "*"}]', }, @@ -3315,7 +3316,7 @@ def _test_text_and_image_content_on_binary_on_non_streaming_impl(self, use_event ("az.namespace", "Microsoft.CognitiveServices"), ("gen_ai.operation.name", OPERATION_NAME_CHAT), ("gen_ai.request.model", deployment_name), - ("gen_ai.provider.name", "azure.openai"), + ("gen_ai.provider.name", RESPONSES_PROVIDER), ("server.address", ""), ("gen_ai.conversation.id", conversation.id), ("gen_ai.response.model", deployment_name), @@ -3339,7 +3340,7 @@ def _test_text_and_image_content_on_binary_on_non_streaming_impl(self, use_event { "name": "gen_ai.input.messages", "attributes": { - "gen_ai.provider.name": "azure.openai", + "gen_ai.provider.name": RESPONSES_PROVIDER, # "gen_ai.message.role": "user", # Commented out - now in event content "gen_ai.event.content": f'[{{"role":"user","parts":[{{"type":"text","content":"What is shown in this image?"}},{{"type":"image","content":"data:image/png;base64,{TEST_IMAGE_BASE64}"}}]}}]', }, @@ -3347,7 +3348,7 @@ def _test_text_and_image_content_on_binary_on_non_streaming_impl(self, use_event { "name": "gen_ai.output.messages", "attributes": { - "gen_ai.provider.name": "azure.openai", + "gen_ai.provider.name": RESPONSES_PROVIDER, # "gen_ai.message.role": "assistant", # Commented out - now in event content "gen_ai.event.content": '[{"role": "assistant", "parts": [{"type": "text", "content": "*"}], "finish_reason": "*"}]', }, @@ -3432,7 +3433,7 @@ def _test_image_only_content_off_binary_off_streaming_impl(self, use_events, **k ("az.namespace", "Microsoft.CognitiveServices"), ("gen_ai.operation.name", OPERATION_NAME_CHAT), ("gen_ai.request.model", deployment_name), - ("gen_ai.provider.name", "azure.openai"), + ("gen_ai.provider.name", RESPONSES_PROVIDER), ("server.address", ""), ("gen_ai.conversation.id", conversation.id), ("gen_ai.response.model", deployment_name), @@ -3456,7 +3457,7 @@ def _test_image_only_content_off_binary_off_streaming_impl(self, use_events, **k { "name": "gen_ai.input.messages", "attributes": { - "gen_ai.provider.name": "azure.openai", + "gen_ai.provider.name": RESPONSES_PROVIDER, # "gen_ai.message.role": "user", # Commented out - now in event content "gen_ai.event.content": '[{"role": "user", "parts": [{"type": "image"}]}]', }, @@ -3464,7 +3465,7 @@ def _test_image_only_content_off_binary_off_streaming_impl(self, use_events, **k { "name": "gen_ai.output.messages", "attributes": { - "gen_ai.provider.name": "azure.openai", + "gen_ai.provider.name": RESPONSES_PROVIDER, # "gen_ai.message.role": "assistant", # Commented out - now in event content "gen_ai.event.content": '[{"role": "assistant", "parts": [{"type": "text"}], "finish_reason": "*"}]', }, @@ -3544,7 +3545,7 @@ def _test_image_only_content_off_binary_on_streaming_impl(self, use_events, **kw ("az.namespace", "Microsoft.CognitiveServices"), ("gen_ai.operation.name", OPERATION_NAME_CHAT), ("gen_ai.request.model", deployment_name), - ("gen_ai.provider.name", "azure.openai"), + ("gen_ai.provider.name", RESPONSES_PROVIDER), ("server.address", ""), ("gen_ai.conversation.id", conversation.id), ("gen_ai.response.model", deployment_name), @@ -3568,7 +3569,7 @@ def _test_image_only_content_off_binary_on_streaming_impl(self, use_events, **kw { "name": "gen_ai.input.messages", "attributes": { - "gen_ai.provider.name": "azure.openai", + "gen_ai.provider.name": RESPONSES_PROVIDER, # "gen_ai.message.role": "user", # Commented out - now in event content "gen_ai.event.content": '[{"role": "user", "parts": [{"type": "image"}]}]', }, @@ -3576,7 +3577,7 @@ def _test_image_only_content_off_binary_on_streaming_impl(self, use_events, **kw { "name": "gen_ai.output.messages", "attributes": { - "gen_ai.provider.name": "azure.openai", + "gen_ai.provider.name": RESPONSES_PROVIDER, # "gen_ai.message.role": "assistant", # Commented out - now in event content "gen_ai.event.content": '[{"role": "assistant", "parts": [{"type": "text"}], "finish_reason": "*"}]', }, @@ -3656,7 +3657,7 @@ def _test_image_only_content_on_binary_off_streaming_impl(self, use_events, **kw ("az.namespace", "Microsoft.CognitiveServices"), ("gen_ai.operation.name", OPERATION_NAME_CHAT), ("gen_ai.request.model", deployment_name), - ("gen_ai.provider.name", "azure.openai"), + ("gen_ai.provider.name", RESPONSES_PROVIDER), ("server.address", ""), ("gen_ai.conversation.id", conversation.id), ("gen_ai.response.model", deployment_name), @@ -3680,7 +3681,7 @@ def _test_image_only_content_on_binary_off_streaming_impl(self, use_events, **kw { "name": "gen_ai.input.messages", "attributes": { - "gen_ai.provider.name": "azure.openai", + "gen_ai.provider.name": RESPONSES_PROVIDER, # "gen_ai.message.role": "user", # Commented out - now in event content "gen_ai.event.content": '[{"role":"user","parts":[{"type":"image"}]}]', }, @@ -3688,7 +3689,7 @@ def _test_image_only_content_on_binary_off_streaming_impl(self, use_events, **kw { "name": "gen_ai.output.messages", "attributes": { - "gen_ai.provider.name": "azure.openai", + "gen_ai.provider.name": RESPONSES_PROVIDER, # "gen_ai.message.role": "assistant", # Commented out - now in event content "gen_ai.event.content": '[{"role": "assistant", "parts": [{"type": "text", "content": "*"}], "finish_reason": "*"}]', }, @@ -3768,7 +3769,7 @@ def _test_image_only_content_on_binary_on_streaming_impl(self, use_events, **kwa ("az.namespace", "Microsoft.CognitiveServices"), ("gen_ai.operation.name", OPERATION_NAME_CHAT), ("gen_ai.request.model", deployment_name), - ("gen_ai.provider.name", "azure.openai"), + ("gen_ai.provider.name", RESPONSES_PROVIDER), ("server.address", ""), ("gen_ai.conversation.id", conversation.id), ("gen_ai.response.model", deployment_name), @@ -3792,7 +3793,7 @@ def _test_image_only_content_on_binary_on_streaming_impl(self, use_events, **kwa { "name": "gen_ai.input.messages", "attributes": { - "gen_ai.provider.name": "azure.openai", + "gen_ai.provider.name": RESPONSES_PROVIDER, # "gen_ai.message.role": "user", # Commented out - now in event content "gen_ai.event.content": f'[{{"role":"user","parts":[{{"type":"image","content":"data:image/png;base64,{TEST_IMAGE_BASE64}"}}]}}]', }, @@ -3800,7 +3801,7 @@ def _test_image_only_content_on_binary_on_streaming_impl(self, use_events, **kwa { "name": "gen_ai.output.messages", "attributes": { - "gen_ai.provider.name": "azure.openai", + "gen_ai.provider.name": RESPONSES_PROVIDER, # "gen_ai.message.role": "assistant", # Commented out - now in event content "gen_ai.event.content": '[{"role": "assistant", "parts": [{"type": "text", "content": "*"}], "finish_reason": "*"}]', }, @@ -3888,7 +3889,7 @@ def _test_text_and_image_content_off_binary_off_streaming_impl(self, use_events, ("az.namespace", "Microsoft.CognitiveServices"), ("gen_ai.operation.name", OPERATION_NAME_CHAT), ("gen_ai.request.model", deployment_name), - ("gen_ai.provider.name", "azure.openai"), + ("gen_ai.provider.name", RESPONSES_PROVIDER), ("server.address", ""), ("gen_ai.conversation.id", conversation.id), ("gen_ai.response.model", deployment_name), @@ -3913,7 +3914,7 @@ def _test_text_and_image_content_off_binary_off_streaming_impl(self, use_events, { "name": "gen_ai.input.messages", "attributes": { - "gen_ai.provider.name": "azure.openai", + "gen_ai.provider.name": RESPONSES_PROVIDER, # "gen_ai.message.role": "user", # Commented out - now in event content "gen_ai.event.content": '[{"role": "user", "parts": [{"type": "text"}, {"type": "image"}]}]', }, @@ -3921,7 +3922,7 @@ def _test_text_and_image_content_off_binary_off_streaming_impl(self, use_events, { "name": "gen_ai.output.messages", "attributes": { - "gen_ai.provider.name": "azure.openai", + "gen_ai.provider.name": RESPONSES_PROVIDER, # "gen_ai.message.role": "assistant", # Commented out - now in event content "gen_ai.event.content": '[{"role": "assistant", "parts": [{"type": "text"}], "finish_reason": "*"}]', }, @@ -4005,7 +4006,7 @@ def _test_text_and_image_content_off_binary_on_streaming_impl(self, use_events, ("az.namespace", "Microsoft.CognitiveServices"), ("gen_ai.operation.name", OPERATION_NAME_CHAT), ("gen_ai.request.model", deployment_name), - ("gen_ai.provider.name", "azure.openai"), + ("gen_ai.provider.name", RESPONSES_PROVIDER), ("server.address", ""), ("gen_ai.conversation.id", conversation.id), ("gen_ai.response.model", deployment_name), @@ -4030,7 +4031,7 @@ def _test_text_and_image_content_off_binary_on_streaming_impl(self, use_events, { "name": "gen_ai.input.messages", "attributes": { - "gen_ai.provider.name": "azure.openai", + "gen_ai.provider.name": RESPONSES_PROVIDER, # "gen_ai.message.role": "user", # Commented out - now in event content "gen_ai.event.content": '[{"role": "user", "parts": [{"type": "text"}, {"type": "image"}]}]', }, @@ -4038,7 +4039,7 @@ def _test_text_and_image_content_off_binary_on_streaming_impl(self, use_events, { "name": "gen_ai.output.messages", "attributes": { - "gen_ai.provider.name": "azure.openai", + "gen_ai.provider.name": RESPONSES_PROVIDER, # "gen_ai.message.role": "assistant", # Commented out - now in event content "gen_ai.event.content": '[{"role": "assistant", "parts": [{"type": "text"}], "finish_reason": "*"}]', }, @@ -4122,7 +4123,7 @@ def _test_text_and_image_content_on_binary_off_streaming_impl(self, use_events, ("az.namespace", "Microsoft.CognitiveServices"), ("gen_ai.operation.name", OPERATION_NAME_CHAT), ("gen_ai.request.model", deployment_name), - ("gen_ai.provider.name", "azure.openai"), + ("gen_ai.provider.name", RESPONSES_PROVIDER), ("server.address", ""), ("gen_ai.conversation.id", conversation.id), ("gen_ai.response.model", deployment_name), @@ -4146,7 +4147,7 @@ def _test_text_and_image_content_on_binary_off_streaming_impl(self, use_events, { "name": "gen_ai.input.messages", "attributes": { - "gen_ai.provider.name": "azure.openai", + "gen_ai.provider.name": RESPONSES_PROVIDER, # "gen_ai.message.role": "user", # Commented out - now in event content "gen_ai.event.content": '[{"role":"user","parts":[{"type":"text","content":"What is shown in this image?"},{"type":"image"}]}]', }, @@ -4154,7 +4155,7 @@ def _test_text_and_image_content_on_binary_off_streaming_impl(self, use_events, { "name": "gen_ai.output.messages", "attributes": { - "gen_ai.provider.name": "azure.openai", + "gen_ai.provider.name": RESPONSES_PROVIDER, # "gen_ai.message.role": "assistant", # Commented out - now in event content "gen_ai.event.content": '[{"role": "assistant", "parts": [{"type": "text", "content": "*"}], "finish_reason": "*"}]', }, @@ -4238,7 +4239,7 @@ def _test_text_and_image_content_on_binary_on_streaming_impl(self, use_events, * ("az.namespace", "Microsoft.CognitiveServices"), ("gen_ai.operation.name", OPERATION_NAME_CHAT), ("gen_ai.request.model", deployment_name), - ("gen_ai.provider.name", "azure.openai"), + ("gen_ai.provider.name", RESPONSES_PROVIDER), ("server.address", ""), ("gen_ai.conversation.id", conversation.id), ("gen_ai.response.model", deployment_name), @@ -4262,7 +4263,7 @@ def _test_text_and_image_content_on_binary_on_streaming_impl(self, use_events, * { "name": "gen_ai.input.messages", "attributes": { - "gen_ai.provider.name": "azure.openai", + "gen_ai.provider.name": RESPONSES_PROVIDER, # "gen_ai.message.role": "user", # Commented out - now in event content "gen_ai.event.content": f'[{{"role":"user","parts":[{{"type":"text","content":"What is shown in this image?"}},{{"type":"image","content":"data:image/png;base64,{TEST_IMAGE_BASE64}"}}]}}]', }, @@ -4270,7 +4271,7 @@ def _test_text_and_image_content_on_binary_on_streaming_impl(self, use_events, * { "name": "gen_ai.output.messages", "attributes": { - "gen_ai.provider.name": "azure.openai", + "gen_ai.provider.name": RESPONSES_PROVIDER, # "gen_ai.message.role": "assistant", # Commented out - now in event content "gen_ai.event.content": '[{"role": "assistant", "parts": [{"type": "text", "content": "*"}], "finish_reason": "*"}]', }, @@ -4337,7 +4338,7 @@ def test_responses_stream_method_with_content_recording(self, **kwargs): ("az.namespace", "Microsoft.CognitiveServices"), ("gen_ai.operation.name", OPERATION_NAME_CHAT), ("gen_ai.request.model", deployment_name), - ("gen_ai.provider.name", "azure.openai"), + ("gen_ai.provider.name", RESPONSES_PROVIDER), ("server.address", ""), ("gen_ai.conversation.id", conversation.id), ("gen_ai.response.model", deployment_name), @@ -4390,7 +4391,7 @@ def test_responses_stream_method_without_content_recording(self, **kwargs): ("az.namespace", "Microsoft.CognitiveServices"), ("gen_ai.operation.name", OPERATION_NAME_CHAT), ("gen_ai.request.model", deployment_name), - ("gen_ai.provider.name", "azure.openai"), + ("gen_ai.provider.name", RESPONSES_PROVIDER), ("server.address", ""), ("gen_ai.conversation.id", conversation.id), ("gen_ai.response.model", deployment_name), @@ -4494,7 +4495,7 @@ def test_responses_stream_method_with_tools_with_content_recording(self, **kwarg ("az.namespace", "Microsoft.CognitiveServices"), ("gen_ai.operation.name", OPERATION_NAME_CHAT), ("gen_ai.request.model", deployment_name), - ("gen_ai.provider.name", "azure.openai"), + ("gen_ai.provider.name", RESPONSES_PROVIDER), ("server.address", ""), ("gen_ai.conversation.id", conversation.id), ("gen_ai.response.model", deployment_name), @@ -4610,7 +4611,7 @@ def test_responses_stream_method_with_tools_without_content_recording(self, **kw ("az.namespace", "Microsoft.CognitiveServices"), ("gen_ai.operation.name", OPERATION_NAME_CHAT), ("gen_ai.request.model", deployment_name), - ("gen_ai.provider.name", "azure.openai"), + ("gen_ai.provider.name", RESPONSES_PROVIDER), ("server.address", ""), ("gen_ai.conversation.id", conversation.id), ("gen_ai.response.model", deployment_name), @@ -4629,7 +4630,7 @@ def test_responses_stream_method_with_tools_without_content_recording(self, **kw ("az.namespace", "Microsoft.CognitiveServices"), ("gen_ai.operation.name", OPERATION_NAME_CHAT), ("gen_ai.request.model", deployment_name), - ("gen_ai.provider.name", "azure.openai"), + ("gen_ai.provider.name", RESPONSES_PROVIDER), ("server.address", ""), ("gen_ai.conversation.id", conversation.id), ("gen_ai.response.model", deployment_name), @@ -5345,7 +5346,7 @@ def test_prompt_agent_with_responses_non_streaming(self, **kwargs): ("gen_ai.operation.name", OPERATION_NAME_INVOKE_AGENT), ("gen_ai.agent.name", agent.name), ("gen_ai.agent.id", agent.id), - ("gen_ai.provider.name", "azure.openai"), + ("gen_ai.provider.name", RESPONSES_PROVIDER), ("server.address", ""), ("gen_ai.conversation.id", conversation.id), ("gen_ai.response.model", deployment_name), @@ -5424,7 +5425,7 @@ def test_prompt_agent_with_responses_streaming(self, **kwargs): ("gen_ai.operation.name", OPERATION_NAME_INVOKE_AGENT), ("gen_ai.agent.name", agent.name), ("gen_ai.agent.id", agent.id), - ("gen_ai.provider.name", "azure.openai"), + ("gen_ai.provider.name", RESPONSES_PROVIDER), ("server.address", ""), ("gen_ai.conversation.id", conversation.id), ("gen_ai.response.model", deployment_name), diff --git a/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_async.py b/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_async.py index ddb6f23d7cff..be30ffd057bf 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_async.py +++ b/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_async.py @@ -14,6 +14,7 @@ SPAN_NAME_CHAT, SPAN_NAME_INVOKE_AGENT, _set_use_message_events, + RESPONSES_PROVIDER, ) from azure.ai.projects.models import FunctionTool, PromptAgentDefinition from azure.core.settings import settings @@ -89,7 +90,7 @@ async def _test_async_non_streaming_with_content_recording_impl(self, use_events ("az.namespace", "Microsoft.CognitiveServices"), ("gen_ai.operation.name", OPERATION_NAME_CHAT), ("gen_ai.request.model", deployment_name), - ("gen_ai.provider.name", "azure.openai"), + ("gen_ai.provider.name", RESPONSES_PROVIDER), ("server.address", ""), ("gen_ai.conversation.id", conversation.id), ("gen_ai.response.model", deployment_name), @@ -113,7 +114,7 @@ async def _test_async_non_streaming_with_content_recording_impl(self, use_events { "name": "gen_ai.input.messages", "attributes": { - "gen_ai.provider.name": "azure.openai", + "gen_ai.provider.name": RESPONSES_PROVIDER, # "gen_ai.message.role": "user", # Commented out - now in event content "gen_ai.event.content": '[{"role": "user", "parts": [{"type": "text", "content": "Write a short poem about AI"}]}]', }, @@ -121,7 +122,7 @@ async def _test_async_non_streaming_with_content_recording_impl(self, use_events { "name": "gen_ai.output.messages", "attributes": { - "gen_ai.provider.name": "azure.openai", + "gen_ai.provider.name": RESPONSES_PROVIDER, # "gen_ai.message.role": "assistant", # Commented out - now in event content "gen_ai.event.content": '[{"role": "assistant", "parts": [{"type": "text", "content": "*"}], "finish_reason": "*"}]', }, @@ -203,7 +204,7 @@ async def _test_async_streaming_with_content_recording_impl(self, use_events, ** ("az.namespace", "Microsoft.CognitiveServices"), ("gen_ai.operation.name", OPERATION_NAME_CHAT), ("gen_ai.request.model", deployment_name), - ("gen_ai.provider.name", "azure.openai"), + ("gen_ai.provider.name", RESPONSES_PROVIDER), ("server.address", ""), ("gen_ai.conversation.id", conversation.id), ("gen_ai.response.model", deployment_name), @@ -227,7 +228,7 @@ async def _test_async_streaming_with_content_recording_impl(self, use_events, ** { "name": "gen_ai.input.messages", "attributes": { - "gen_ai.provider.name": "azure.openai", + "gen_ai.provider.name": RESPONSES_PROVIDER, # "gen_ai.message.role": "user", # Commented out - now in event content "gen_ai.event.content": '[{"role": "user", "parts": [{"type": "text", "content": "Write a short poem about AI"}]}]', }, @@ -235,7 +236,7 @@ async def _test_async_streaming_with_content_recording_impl(self, use_events, ** { "name": "gen_ai.output.messages", "attributes": { - "gen_ai.provider.name": "azure.openai", + "gen_ai.provider.name": RESPONSES_PROVIDER, # "gen_ai.message.role": "assistant", # Commented out - now in event content "gen_ai.event.content": '[{"role": "assistant", "parts": [{"type": "text", "content": "*"}], "finish_reason": "*"}]', }, @@ -298,7 +299,7 @@ async def test_async_conversations_create(self, **kwargs): expected_attributes = [ ("az.namespace", "Microsoft.CognitiveServices"), ("gen_ai.operation.name", "create_conversation"), - ("gen_ai.provider.name", "azure.openai"), + ("gen_ai.provider.name", RESPONSES_PROVIDER), ("server.address", ""), ("gen_ai.conversation.id", conversation.id), ] @@ -357,7 +358,7 @@ async def test_async_list_conversation_items_with_content_recording(self, **kwar expected_attributes = [ ("az.namespace", "Microsoft.CognitiveServices"), ("gen_ai.operation.name", "list_conversation_items"), - ("gen_ai.provider.name", "azure.openai"), + ("gen_ai.provider.name", RESPONSES_PROVIDER), ("server.address", ""), ("gen_ai.conversation.id", conversation.id), ] @@ -369,7 +370,7 @@ async def test_async_list_conversation_items_with_content_recording(self, **kwar { "name": "gen_ai.conversation.item", "attributes": { - "gen_ai.provider.name": "azure.openai", + "gen_ai.provider.name": RESPONSES_PROVIDER, "gen_ai.conversation.item.id": "*", "gen_ai.event.content": '[{"role": "assistant", "parts": [{"type": "text", "content": "*"}]}]', }, @@ -377,7 +378,7 @@ async def test_async_list_conversation_items_with_content_recording(self, **kwar { "name": "gen_ai.conversation.item", "attributes": { - "gen_ai.provider.name": "azure.openai", + "gen_ai.provider.name": RESPONSES_PROVIDER, "gen_ai.conversation.item.id": "*", "gen_ai.event.content": '[{"role": "user", "parts": [{"type": "text", "content": "Hello"}]}]', }, @@ -514,7 +515,7 @@ async def _test_async_function_tool_with_content_recording_streaming_impl(self, ("az.namespace", "Microsoft.CognitiveServices"), ("gen_ai.operation.name", OPERATION_NAME_INVOKE_AGENT), ("gen_ai.agent.name", agent.name), - ("gen_ai.provider.name", "azure.openai"), + ("gen_ai.provider.name", RESPONSES_PROVIDER), ("server.address", ""), ("gen_ai.conversation.id", conversation.id), ("gen_ai.response.model", deployment_name), @@ -538,7 +539,7 @@ async def _test_async_function_tool_with_content_recording_streaming_impl(self, { "name": "gen_ai.input.messages", "attributes": { - "gen_ai.provider.name": "azure.openai", + "gen_ai.provider.name": RESPONSES_PROVIDER, # "gen_ai.message.role": "user", # Commented out - now in event content "gen_ai.event.content": '[{"role": "user", "parts": [{"type": "text", "content": "What\'s the weather in Seattle?"}]}]', }, @@ -546,7 +547,7 @@ async def _test_async_function_tool_with_content_recording_streaming_impl(self, { "name": "gen_ai.output.messages", "attributes": { - "gen_ai.provider.name": "azure.openai", + "gen_ai.provider.name": RESPONSES_PROVIDER, # "gen_ai.message.role": "assistant", # Commented out - now in event content "gen_ai.event.content": '[{"role": "assistant", "parts": [{"type": "tool_call", "content": {"type": "function_call", "id": "*", "function": {"name": "get_weather", "arguments": "*"}}}]}]', }, @@ -561,7 +562,7 @@ async def _test_async_function_tool_with_content_recording_streaming_impl(self, ("az.namespace", "Microsoft.CognitiveServices"), ("gen_ai.operation.name", OPERATION_NAME_INVOKE_AGENT), ("gen_ai.agent.name", agent.name), - ("gen_ai.provider.name", "azure.openai"), + ("gen_ai.provider.name", RESPONSES_PROVIDER), ("server.address", ""), ("gen_ai.conversation.id", conversation.id), ("gen_ai.response.model", deployment_name), @@ -586,7 +587,7 @@ async def _test_async_function_tool_with_content_recording_streaming_impl(self, { "name": "gen_ai.input.messages", "attributes": { - "gen_ai.provider.name": "azure.openai", + "gen_ai.provider.name": RESPONSES_PROVIDER, # "gen_ai.message.role": "tool", # Commented out - now in event content "gen_ai.event.content": '[{"role": "tool", "parts": [{"type": "tool_call_output", "content": {"type": "function_call_output", "id": "*", "output": {"temperature": "72°F", "condition": "sunny"}}}]}]', }, @@ -594,7 +595,7 @@ async def _test_async_function_tool_with_content_recording_streaming_impl(self, { "name": "gen_ai.output.messages", "attributes": { - "gen_ai.provider.name": "azure.openai", + "gen_ai.provider.name": RESPONSES_PROVIDER, # "gen_ai.message.role": "assistant", # Commented out - now in event content "gen_ai.event.content": '[{"role": "assistant", "parts": [{"type": "text", "content": "*"}], "finish_reason": "*"}]', }, @@ -739,7 +740,7 @@ async def _test_async_function_tool_without_content_recording_streaming_impl(sel ("az.namespace", "Microsoft.CognitiveServices"), ("gen_ai.operation.name", OPERATION_NAME_INVOKE_AGENT), ("gen_ai.agent.name", agent.name), - ("gen_ai.provider.name", "azure.openai"), + ("gen_ai.provider.name", RESPONSES_PROVIDER), ("server.address", ""), ("gen_ai.conversation.id", conversation.id), ("gen_ai.response.model", deployment_name), @@ -763,7 +764,7 @@ async def _test_async_function_tool_without_content_recording_streaming_impl(sel { "name": "gen_ai.input.messages", "attributes": { - "gen_ai.provider.name": "azure.openai", + "gen_ai.provider.name": RESPONSES_PROVIDER, # "gen_ai.message.role": "user", # Commented out - now in event content "gen_ai.event.content": '[{"role": "user", "parts": [{"type": "text"}]}]', }, @@ -771,7 +772,7 @@ async def _test_async_function_tool_without_content_recording_streaming_impl(sel { "name": "gen_ai.output.messages", "attributes": { - "gen_ai.provider.name": "azure.openai", + "gen_ai.provider.name": RESPONSES_PROVIDER, # "gen_ai.message.role": "assistant", # Commented out - now in event content "gen_ai.event.content": '[{"role": "assistant", "parts": [{"type": "tool_call", "content": {"type": "function_call", "id": "*"}}]}]', }, @@ -786,7 +787,7 @@ async def _test_async_function_tool_without_content_recording_streaming_impl(sel ("az.namespace", "Microsoft.CognitiveServices"), ("gen_ai.operation.name", OPERATION_NAME_INVOKE_AGENT), ("gen_ai.agent.name", agent.name), - ("gen_ai.provider.name", "azure.openai"), + ("gen_ai.provider.name", RESPONSES_PROVIDER), ("server.address", ""), ("gen_ai.conversation.id", conversation.id), ("gen_ai.response.model", deployment_name), @@ -811,7 +812,7 @@ async def _test_async_function_tool_without_content_recording_streaming_impl(sel { "name": "gen_ai.input.messages", "attributes": { - "gen_ai.provider.name": "azure.openai", + "gen_ai.provider.name": RESPONSES_PROVIDER, # "gen_ai.message.role": "tool", # Commented out - now in event content "gen_ai.event.content": '[{"role": "tool", "parts": [{"type": "tool_call_output", "content": {"type": "function_call_output", "id": "*"}}]}]', }, @@ -819,7 +820,7 @@ async def _test_async_function_tool_without_content_recording_streaming_impl(sel { "name": "gen_ai.output.messages", "attributes": { - "gen_ai.provider.name": "azure.openai", + "gen_ai.provider.name": RESPONSES_PROVIDER, # "gen_ai.message.role": "assistant", # Commented out - now in event content "gen_ai.event.content": '[{"role": "assistant", "parts": [{"type": "text"}], "finish_reason": "*"}]', }, @@ -898,7 +899,7 @@ async def test_async_multiple_text_inputs_with_content_recording_non_streaming(s ("az.namespace", "Microsoft.CognitiveServices"), ("gen_ai.operation.name", OPERATION_NAME_CHAT), ("gen_ai.request.model", deployment_name), - ("gen_ai.provider.name", "azure.openai"), + ("gen_ai.provider.name", RESPONSES_PROVIDER), ("server.address", ""), ("gen_ai.conversation.id", conversation.id), ("gen_ai.response.model", deployment_name), @@ -914,7 +915,7 @@ async def test_async_multiple_text_inputs_with_content_recording_non_streaming(s { "name": "gen_ai.input.messages", "attributes": { - "gen_ai.provider.name": "azure.openai", + "gen_ai.provider.name": RESPONSES_PROVIDER, # "gen_ai.message.role": "user", # Commented out - now in event content "gen_ai.event.content": '[{"role": "user", "parts": [{"type": "text", "content": "Hello"}]}]', }, @@ -922,7 +923,7 @@ async def test_async_multiple_text_inputs_with_content_recording_non_streaming(s { "name": "gen_ai.input.messages", "attributes": { - "gen_ai.provider.name": "azure.openai", + "gen_ai.provider.name": RESPONSES_PROVIDER, # "gen_ai.message.role": "user", # Commented out - now in event content "gen_ai.event.content": '[{"role": "user", "parts": [{"type": "text", "content": "Write a haiku about Python"}]}]', }, @@ -930,7 +931,7 @@ async def test_async_multiple_text_inputs_with_content_recording_non_streaming(s { "name": "gen_ai.output.messages", "attributes": { - "gen_ai.provider.name": "azure.openai", + "gen_ai.provider.name": RESPONSES_PROVIDER, # "gen_ai.message.role": "assistant", # Commented out - now in event content "gen_ai.event.content": '[{"role": "assistant", "parts": [{"type": "text", "content": "*"}], "finish_reason": "*"}]', }, @@ -1003,7 +1004,7 @@ async def test_async_multiple_text_inputs_with_content_recording_streaming(self, ("az.namespace", "Microsoft.CognitiveServices"), ("gen_ai.operation.name", OPERATION_NAME_CHAT), ("gen_ai.request.model", deployment_name), - ("gen_ai.provider.name", "azure.openai"), + ("gen_ai.provider.name", RESPONSES_PROVIDER), ("server.address", ""), ("gen_ai.conversation.id", conversation.id), ("gen_ai.response.model", deployment_name), @@ -1019,7 +1020,7 @@ async def test_async_multiple_text_inputs_with_content_recording_streaming(self, { "name": "gen_ai.input.messages", "attributes": { - "gen_ai.provider.name": "azure.openai", + "gen_ai.provider.name": RESPONSES_PROVIDER, # "gen_ai.message.role": "user", # Commented out - now in event content "gen_ai.event.content": '[{"role": "user", "parts": [{"type": "text", "content": "Hello"}]}]', }, @@ -1027,7 +1028,7 @@ async def test_async_multiple_text_inputs_with_content_recording_streaming(self, { "name": "gen_ai.input.messages", "attributes": { - "gen_ai.provider.name": "azure.openai", + "gen_ai.provider.name": RESPONSES_PROVIDER, # "gen_ai.message.role": "user", # Commented out - now in event content "gen_ai.event.content": '[{"role": "user", "parts": [{"type": "text", "content": "Write a haiku about Python"}]}]', }, @@ -1035,7 +1036,7 @@ async def test_async_multiple_text_inputs_with_content_recording_streaming(self, { "name": "gen_ai.output.messages", "attributes": { - "gen_ai.provider.name": "azure.openai", + "gen_ai.provider.name": RESPONSES_PROVIDER, # "gen_ai.message.role": "assistant", # Commented out - now in event content "gen_ai.event.content": '[{"role": "assistant", "parts": [{"type": "text", "content": "*"}], "finish_reason": "*"}]', }, @@ -1100,7 +1101,7 @@ async def test_async_multiple_text_inputs_without_content_recording_non_streamin ("az.namespace", "Microsoft.CognitiveServices"), ("gen_ai.operation.name", OPERATION_NAME_CHAT), ("gen_ai.request.model", deployment_name), - ("gen_ai.provider.name", "azure.openai"), + ("gen_ai.provider.name", RESPONSES_PROVIDER), ("server.address", ""), ("gen_ai.conversation.id", conversation.id), ("gen_ai.response.model", deployment_name), @@ -1116,7 +1117,7 @@ async def test_async_multiple_text_inputs_without_content_recording_non_streamin { "name": "gen_ai.input.messages", "attributes": { - "gen_ai.provider.name": "azure.openai", + "gen_ai.provider.name": RESPONSES_PROVIDER, # "gen_ai.message.role": "user", # Commented out - now in event content "gen_ai.event.content": '[{"role": "user", "parts": [{"type": "text"}]}]', }, @@ -1124,7 +1125,7 @@ async def test_async_multiple_text_inputs_without_content_recording_non_streamin { "name": "gen_ai.input.messages", "attributes": { - "gen_ai.provider.name": "azure.openai", + "gen_ai.provider.name": RESPONSES_PROVIDER, # "gen_ai.message.role": "user", # Commented out - now in event content "gen_ai.event.content": '[{"role": "user", "parts": [{"type": "text"}]}]', }, @@ -1132,7 +1133,7 @@ async def test_async_multiple_text_inputs_without_content_recording_non_streamin { "name": "gen_ai.output.messages", "attributes": { - "gen_ai.provider.name": "azure.openai", + "gen_ai.provider.name": RESPONSES_PROVIDER, # "gen_ai.message.role": "assistant", # Commented out - now in event content "gen_ai.event.content": '[{"role": "assistant", "parts": [{"type": "text"}], "finish_reason": "*"}]', }, @@ -1199,7 +1200,7 @@ async def test_async_image_only_content_off_binary_off_non_streaming(self, **kwa { "name": "gen_ai.input.messages", "attributes": { - "gen_ai.provider.name": "azure.openai", + "gen_ai.provider.name": RESPONSES_PROVIDER, # "gen_ai.message.role": "user", # Commented out - now in event content "gen_ai.event.content": '[{"role": "user", "parts": [{"type": "image"}]}]', }, @@ -1207,7 +1208,7 @@ async def test_async_image_only_content_off_binary_off_non_streaming(self, **kwa { "name": "gen_ai.output.messages", "attributes": { - "gen_ai.provider.name": "azure.openai", + "gen_ai.provider.name": RESPONSES_PROVIDER, # "gen_ai.message.role": "assistant", # Commented out - now in event content "gen_ai.event.content": '[{"role": "assistant", "parts": [{"type": "text"}], "finish_reason": "*"}]', }, @@ -1269,7 +1270,7 @@ async def test_async_image_only_content_off_binary_on_non_streaming(self, **kwar { "name": "gen_ai.input.messages", "attributes": { - "gen_ai.provider.name": "azure.openai", + "gen_ai.provider.name": RESPONSES_PROVIDER, # "gen_ai.message.role": "user", # Commented out - now in event content "gen_ai.event.content": '[{"role": "user", "parts": [{"type": "image"}]}]', }, @@ -1277,7 +1278,7 @@ async def test_async_image_only_content_off_binary_on_non_streaming(self, **kwar { "name": "gen_ai.output.messages", "attributes": { - "gen_ai.provider.name": "azure.openai", + "gen_ai.provider.name": RESPONSES_PROVIDER, # "gen_ai.message.role": "assistant", # Commented out - now in event content "gen_ai.event.content": '[{"role": "assistant", "parts": [{"type": "text"}], "finish_reason": "*"}]', }, @@ -1339,7 +1340,7 @@ async def test_async_image_only_content_on_binary_off_non_streaming(self, **kwar { "name": "gen_ai.input.messages", "attributes": { - "gen_ai.provider.name": "azure.openai", + "gen_ai.provider.name": RESPONSES_PROVIDER, # "gen_ai.message.role": "user", # Commented out - now in event content "gen_ai.event.content": '[{"role":"user","parts":[{"type":"image"}]}]', }, @@ -1347,7 +1348,7 @@ async def test_async_image_only_content_on_binary_off_non_streaming(self, **kwar { "name": "gen_ai.output.messages", "attributes": { - "gen_ai.provider.name": "azure.openai", + "gen_ai.provider.name": RESPONSES_PROVIDER, # "gen_ai.message.role": "assistant", # Commented out - now in event content "gen_ai.event.content": '[{"role": "assistant", "parts": [{"type": "text", "content": "*"}], "finish_reason": "*"}]', }, @@ -1409,7 +1410,7 @@ async def test_async_image_only_content_on_binary_on_non_streaming(self, **kwarg { "name": "gen_ai.input.messages", "attributes": { - "gen_ai.provider.name": "azure.openai", + "gen_ai.provider.name": RESPONSES_PROVIDER, # "gen_ai.message.role": "user", # Commented out - now in event content "gen_ai.event.content": f'[{{"role":"user","parts":[{{"type":"image","content":"data:image/png;base64,{TEST_IMAGE_BASE64}"}}]}}]', }, @@ -1417,7 +1418,7 @@ async def test_async_image_only_content_on_binary_on_non_streaming(self, **kwarg { "name": "gen_ai.output.messages", "attributes": { - "gen_ai.provider.name": "azure.openai", + "gen_ai.provider.name": RESPONSES_PROVIDER, # "gen_ai.message.role": "assistant", # Commented out - now in event content "gen_ai.event.content": '[{"role": "assistant", "parts": [{"type": "text", "content": "*"}], "finish_reason": "*"}]', }, @@ -1488,7 +1489,7 @@ async def test_async_text_and_image_content_off_binary_off_non_streaming(self, * { "name": "gen_ai.input.messages", "attributes": { - "gen_ai.provider.name": "azure.openai", + "gen_ai.provider.name": RESPONSES_PROVIDER, # "gen_ai.message.role": "user", # Commented out - now in event content "gen_ai.event.content": '[{"role": "user", "parts": [{"type": "text"}, {"type": "image"}]}]', }, @@ -1496,7 +1497,7 @@ async def test_async_text_and_image_content_off_binary_off_non_streaming(self, * { "name": "gen_ai.output.messages", "attributes": { - "gen_ai.provider.name": "azure.openai", + "gen_ai.provider.name": RESPONSES_PROVIDER, # "gen_ai.message.role": "assistant", # Commented out - now in event content "gen_ai.event.content": '[{"role": "assistant", "parts": [{"type": "text"}], "finish_reason": "*"}]', }, @@ -1562,7 +1563,7 @@ async def test_async_text_and_image_content_off_binary_on_non_streaming(self, ** { "name": "gen_ai.input.messages", "attributes": { - "gen_ai.provider.name": "azure.openai", + "gen_ai.provider.name": RESPONSES_PROVIDER, # "gen_ai.message.role": "user", # Commented out - now in event content "gen_ai.event.content": '[{"role": "user", "parts": [{"type": "text"}, {"type": "image"}]}]', }, @@ -1570,7 +1571,7 @@ async def test_async_text_and_image_content_off_binary_on_non_streaming(self, ** { "name": "gen_ai.output.messages", "attributes": { - "gen_ai.provider.name": "azure.openai", + "gen_ai.provider.name": RESPONSES_PROVIDER, # "gen_ai.message.role": "assistant", # Commented out - now in event content "gen_ai.event.content": '[{"role": "assistant", "parts": [{"type": "text"}], "finish_reason": "*"}]', }, @@ -1636,7 +1637,7 @@ async def test_async_text_and_image_content_on_binary_off_non_streaming(self, ** { "name": "gen_ai.input.messages", "attributes": { - "gen_ai.provider.name": "azure.openai", + "gen_ai.provider.name": RESPONSES_PROVIDER, # "gen_ai.message.role": "user", # Commented out - now in event content "gen_ai.event.content": '[{"role":"user","parts":[{"type":"text","content":"What is shown in this image?"},{"type":"image"}]}]', }, @@ -1644,7 +1645,7 @@ async def test_async_text_and_image_content_on_binary_off_non_streaming(self, ** { "name": "gen_ai.output.messages", "attributes": { - "gen_ai.provider.name": "azure.openai", + "gen_ai.provider.name": RESPONSES_PROVIDER, # "gen_ai.message.role": "assistant", # Commented out - now in event content "gen_ai.event.content": '[{"role": "assistant", "parts": [{"type": "text", "content": "*"}], "finish_reason": "*"}]', }, @@ -1710,7 +1711,7 @@ async def test_async_text_and_image_content_on_binary_on_non_streaming(self, **k { "name": "gen_ai.input.messages", "attributes": { - "gen_ai.provider.name": "azure.openai", + "gen_ai.provider.name": RESPONSES_PROVIDER, # "gen_ai.message.role": "user", # Commented out - now in event content "gen_ai.event.content": f'[{{"role":"user","parts":[{{"type":"text","content":"What is shown in this image?"}},{{"type":"image","content":"data:image/png;base64,{TEST_IMAGE_BASE64}"}}]}}]', }, @@ -1718,7 +1719,7 @@ async def test_async_text_and_image_content_on_binary_on_non_streaming(self, **k { "name": "gen_ai.output.messages", "attributes": { - "gen_ai.provider.name": "azure.openai", + "gen_ai.provider.name": RESPONSES_PROVIDER, # "gen_ai.message.role": "assistant", # Commented out - now in event content "gen_ai.event.content": '[{"role": "assistant", "parts": [{"type": "text", "content": "*"}], "finish_reason": "*"}]', }, @@ -1793,7 +1794,7 @@ async def test_async_image_only_content_off_binary_off_streaming(self, **kwargs) { "name": "gen_ai.input.messages", "attributes": { - "gen_ai.provider.name": "azure.openai", + "gen_ai.provider.name": RESPONSES_PROVIDER, # "gen_ai.message.role": "user", # Commented out - now in event content "gen_ai.event.content": '[{"role": "user", "parts": [{"type": "image"}]}]', }, @@ -1801,7 +1802,7 @@ async def test_async_image_only_content_off_binary_off_streaming(self, **kwargs) { "name": "gen_ai.output.messages", "attributes": { - "gen_ai.provider.name": "azure.openai", + "gen_ai.provider.name": RESPONSES_PROVIDER, # "gen_ai.message.role": "assistant", # Commented out - now in event content "gen_ai.event.content": '[{"role": "assistant", "parts": [{"type": "text"}], "finish_reason": "*"}]', }, @@ -1871,7 +1872,7 @@ async def test_async_image_only_content_off_binary_on_streaming(self, **kwargs): { "name": "gen_ai.input.messages", "attributes": { - "gen_ai.provider.name": "azure.openai", + "gen_ai.provider.name": RESPONSES_PROVIDER, # "gen_ai.message.role": "user", # Commented out - now in event content "gen_ai.event.content": '[{"role": "user", "parts": [{"type": "image"}]}]', }, @@ -1879,7 +1880,7 @@ async def test_async_image_only_content_off_binary_on_streaming(self, **kwargs): { "name": "gen_ai.output.messages", "attributes": { - "gen_ai.provider.name": "azure.openai", + "gen_ai.provider.name": RESPONSES_PROVIDER, # "gen_ai.message.role": "assistant", # Commented out - now in event content "gen_ai.event.content": '[{"role": "assistant", "parts": [{"type": "text"}], "finish_reason": "*"}]', }, @@ -1949,7 +1950,7 @@ async def test_async_image_only_content_on_binary_off_streaming(self, **kwargs): { "name": "gen_ai.input.messages", "attributes": { - "gen_ai.provider.name": "azure.openai", + "gen_ai.provider.name": RESPONSES_PROVIDER, # "gen_ai.message.role": "user", # Commented out - now in event content "gen_ai.event.content": '[{"role":"user","parts":[{"type":"image"}]}]', }, @@ -1957,7 +1958,7 @@ async def test_async_image_only_content_on_binary_off_streaming(self, **kwargs): { "name": "gen_ai.output.messages", "attributes": { - "gen_ai.provider.name": "azure.openai", + "gen_ai.provider.name": RESPONSES_PROVIDER, # "gen_ai.message.role": "assistant", # Commented out - now in event content "gen_ai.event.content": '[{"role": "assistant", "parts": [{"type": "text", "content": "*"}], "finish_reason": "*"}]', }, @@ -2027,7 +2028,7 @@ async def test_async_image_only_content_on_binary_on_streaming(self, **kwargs): { "name": "gen_ai.input.messages", "attributes": { - "gen_ai.provider.name": "azure.openai", + "gen_ai.provider.name": RESPONSES_PROVIDER, # "gen_ai.message.role": "user", # Commented out - now in event content "gen_ai.event.content": f'[{{"role":"user","parts":[{{"type":"image","content":"data:image/png;base64,{TEST_IMAGE_BASE64}"}}]}}]', }, @@ -2035,7 +2036,7 @@ async def test_async_image_only_content_on_binary_on_streaming(self, **kwargs): { "name": "gen_ai.output.messages", "attributes": { - "gen_ai.provider.name": "azure.openai", + "gen_ai.provider.name": RESPONSES_PROVIDER, # "gen_ai.message.role": "assistant", # Commented out - now in event content "gen_ai.event.content": '[{"role": "assistant", "parts": [{"type": "text", "content": "*"}], "finish_reason": "*"}]', }, @@ -2113,7 +2114,7 @@ async def test_async_text_and_image_content_off_binary_off_streaming(self, **kwa { "name": "gen_ai.input.messages", "attributes": { - "gen_ai.provider.name": "azure.openai", + "gen_ai.provider.name": RESPONSES_PROVIDER, # "gen_ai.message.role": "user", # Commented out - now in event content "gen_ai.event.content": '[{"role": "user", "parts": [{"type": "text"}, {"type": "image"}]}]', }, @@ -2121,7 +2122,7 @@ async def test_async_text_and_image_content_off_binary_off_streaming(self, **kwa { "name": "gen_ai.output.messages", "attributes": { - "gen_ai.provider.name": "azure.openai", + "gen_ai.provider.name": RESPONSES_PROVIDER, # "gen_ai.message.role": "assistant", # Commented out - now in event content "gen_ai.event.content": '[{"role": "assistant", "parts": [{"type": "text"}], "finish_reason": "*"}]', }, @@ -2195,7 +2196,7 @@ async def test_async_text_and_image_content_off_binary_on_streaming(self, **kwar { "name": "gen_ai.input.messages", "attributes": { - "gen_ai.provider.name": "azure.openai", + "gen_ai.provider.name": RESPONSES_PROVIDER, # "gen_ai.message.role": "user", # Commented out - now in event content "gen_ai.event.content": '[{"role": "user", "parts": [{"type": "text"}, {"type": "image"}]}]', }, @@ -2203,7 +2204,7 @@ async def test_async_text_and_image_content_off_binary_on_streaming(self, **kwar { "name": "gen_ai.output.messages", "attributes": { - "gen_ai.provider.name": "azure.openai", + "gen_ai.provider.name": RESPONSES_PROVIDER, # "gen_ai.message.role": "assistant", # Commented out - now in event content "gen_ai.event.content": '[{"role": "assistant", "parts": [{"type": "text"}], "finish_reason": "*"}]', }, @@ -2277,7 +2278,7 @@ async def test_async_text_and_image_content_on_binary_off_streaming(self, **kwar { "name": "gen_ai.input.messages", "attributes": { - "gen_ai.provider.name": "azure.openai", + "gen_ai.provider.name": RESPONSES_PROVIDER, # "gen_ai.message.role": "user", # Commented out - now in event content "gen_ai.event.content": '[{"role":"user","parts":[{"type":"text","content":"What is shown in this image?"},{"type":"image"}]}]', }, @@ -2285,7 +2286,7 @@ async def test_async_text_and_image_content_on_binary_off_streaming(self, **kwar { "name": "gen_ai.output.messages", "attributes": { - "gen_ai.provider.name": "azure.openai", + "gen_ai.provider.name": RESPONSES_PROVIDER, # "gen_ai.message.role": "assistant", # Commented out - now in event content "gen_ai.event.content": '[{"role": "assistant", "parts": [{"type": "text", "content": "*"}], "finish_reason": "*"}]', }, @@ -2359,7 +2360,7 @@ async def test_async_text_and_image_content_on_binary_on_streaming(self, **kwarg { "name": "gen_ai.input.messages", "attributes": { - "gen_ai.provider.name": "azure.openai", + "gen_ai.provider.name": RESPONSES_PROVIDER, # "gen_ai.message.role": "user", # Commented out - now in event content "gen_ai.event.content": f'[{{"role":"user","parts":[{{"type":"text","content":"What is shown in this image?"}},{{"type":"image","content":"data:image/png;base64,{TEST_IMAGE_BASE64}"}}]}}]', }, @@ -2367,7 +2368,7 @@ async def test_async_text_and_image_content_on_binary_on_streaming(self, **kwarg { "name": "gen_ai.output.messages", "attributes": { - "gen_ai.provider.name": "azure.openai", + "gen_ai.provider.name": RESPONSES_PROVIDER, # "gen_ai.message.role": "assistant", # Commented out - now in event content "gen_ai.event.content": '[{"role": "assistant", "parts": [{"type": "text", "content": "*"}], "finish_reason": "*"}]', }, @@ -2439,7 +2440,7 @@ async def test_async_multiple_text_inputs_without_content_recording_streaming(se ("az.namespace", "Microsoft.CognitiveServices"), ("gen_ai.operation.name", OPERATION_NAME_CHAT), ("gen_ai.request.model", deployment_name), - ("gen_ai.provider.name", "azure.openai"), + ("gen_ai.provider.name", RESPONSES_PROVIDER), ("server.address", ""), ("gen_ai.conversation.id", conversation.id), ("gen_ai.response.model", deployment_name), @@ -2455,7 +2456,7 @@ async def test_async_multiple_text_inputs_without_content_recording_streaming(se { "name": "gen_ai.input.messages", "attributes": { - "gen_ai.provider.name": "azure.openai", + "gen_ai.provider.name": RESPONSES_PROVIDER, # "gen_ai.message.role": "user", # Commented out - now in event content "gen_ai.event.content": '[{"role": "user", "parts": [{"type": "text"}]}]', }, @@ -2463,7 +2464,7 @@ async def test_async_multiple_text_inputs_without_content_recording_streaming(se { "name": "gen_ai.input.messages", "attributes": { - "gen_ai.provider.name": "azure.openai", + "gen_ai.provider.name": RESPONSES_PROVIDER, # "gen_ai.message.role": "user", # Commented out - now in event content "gen_ai.event.content": '[{"role": "user", "parts": [{"type": "text"}]}]', }, @@ -2471,7 +2472,7 @@ async def test_async_multiple_text_inputs_without_content_recording_streaming(se { "name": "gen_ai.output.messages", "attributes": { - "gen_ai.provider.name": "azure.openai", + "gen_ai.provider.name": RESPONSES_PROVIDER, # "gen_ai.message.role": "assistant", # Commented out - now in event content "gen_ai.event.content": '[{"role": "assistant", "parts": [{"type": "text"}], "finish_reason": "*"}]', }, @@ -2532,7 +2533,7 @@ async def test_async_responses_stream_method_with_content_recording(self, **kwar ("az.namespace", "Microsoft.CognitiveServices"), ("gen_ai.operation.name", OPERATION_NAME_CHAT), ("gen_ai.request.model", deployment_name), - ("gen_ai.provider.name", "azure.openai"), + ("gen_ai.provider.name", RESPONSES_PROVIDER), ("server.address", ""), ("gen_ai.conversation.id", conversation.id), ("gen_ai.response.model", deployment_name), @@ -2548,7 +2549,7 @@ async def test_async_responses_stream_method_with_content_recording(self, **kwar { "name": "gen_ai.input.messages", "attributes": { - "gen_ai.provider.name": "azure.openai", + "gen_ai.provider.name": RESPONSES_PROVIDER, # "gen_ai.message.role": "user", # Commented out - now in event content "gen_ai.event.content": '[{"role": "user", "parts": [{"type": "text", "content": "Write a short haiku about testing"}]}]', }, @@ -2556,7 +2557,7 @@ async def test_async_responses_stream_method_with_content_recording(self, **kwar { "name": "gen_ai.output.messages", "attributes": { - "gen_ai.provider.name": "azure.openai", + "gen_ai.provider.name": RESPONSES_PROVIDER, # "gen_ai.message.role": "assistant", # Commented out - now in event content "gen_ai.event.content": '[{"role": "assistant", "parts": [{"type": "text", "content": "*"}], "finish_reason": "*"}]', }, @@ -2612,7 +2613,7 @@ async def test_async_responses_stream_method_without_content_recording(self, **k ("az.namespace", "Microsoft.CognitiveServices"), ("gen_ai.operation.name", OPERATION_NAME_CHAT), ("gen_ai.request.model", deployment_name), - ("gen_ai.provider.name", "azure.openai"), + ("gen_ai.provider.name", RESPONSES_PROVIDER), ("server.address", ""), ("gen_ai.conversation.id", conversation.id), ("gen_ai.response.model", deployment_name), @@ -2628,7 +2629,7 @@ async def test_async_responses_stream_method_without_content_recording(self, **k { "name": "gen_ai.input.messages", "attributes": { - "gen_ai.provider.name": "azure.openai", + "gen_ai.provider.name": RESPONSES_PROVIDER, # "gen_ai.message.role": "user", # Commented out - now in event content "gen_ai.event.content": '[{"role": "user", "parts": [{"type": "text"}]}]', }, @@ -2636,7 +2637,7 @@ async def test_async_responses_stream_method_without_content_recording(self, **k { "name": "gen_ai.output.messages", "attributes": { - "gen_ai.provider.name": "azure.openai", + "gen_ai.provider.name": RESPONSES_PROVIDER, # "gen_ai.message.role": "assistant", # Commented out - now in event content "gen_ai.event.content": '[{"role": "assistant", "parts": [{"type": "text"}], "finish_reason": "*"}]', }, @@ -2744,7 +2745,7 @@ async def test_async_responses_stream_method_with_tools_with_content_recording(s ("az.namespace", "Microsoft.CognitiveServices"), ("gen_ai.operation.name", OPERATION_NAME_CHAT), ("gen_ai.request.model", deployment_name), - ("gen_ai.provider.name", "azure.openai"), + ("gen_ai.provider.name", RESPONSES_PROVIDER), ("server.address", ""), ("gen_ai.conversation.id", conversation.id), ("gen_ai.response.model", deployment_name), @@ -2760,7 +2761,7 @@ async def test_async_responses_stream_method_with_tools_with_content_recording(s { "name": "gen_ai.input.messages", "attributes": { - "gen_ai.provider.name": "azure.openai", + "gen_ai.provider.name": RESPONSES_PROVIDER, # "gen_ai.message.role": "user", # Commented out - now in event content "gen_ai.event.content": '[{"role": "user", "parts": [{"type": "text", "content": "What\'s the weather in Boston?"}]}]', }, @@ -2768,7 +2769,7 @@ async def test_async_responses_stream_method_with_tools_with_content_recording(s { "name": "gen_ai.output.messages", "attributes": { - "gen_ai.provider.name": "azure.openai", + "gen_ai.provider.name": RESPONSES_PROVIDER, # "gen_ai.message.role": "assistant", # Commented out - now in event content "gen_ai.event.content": '[{"role": "assistant", "parts": [{"type": "tool_call", "content": {"type": "function_call", "id": "*", "function": {"name": "get_weather", "arguments": "*"}}}]}]', }, @@ -2783,7 +2784,7 @@ async def test_async_responses_stream_method_with_tools_with_content_recording(s { "name": "gen_ai.input.messages", "attributes": { - "gen_ai.provider.name": "azure.openai", + "gen_ai.provider.name": RESPONSES_PROVIDER, # "gen_ai.message.role": "tool", # Commented out - now in event content "gen_ai.event.content": '[{"role": "tool", "parts": [{"type": "tool_call_output", "content": {"type": "function_call_output", "id": "*", "output": {"temperature": "65°F", "condition": "cloudy"}}}]}]', }, @@ -2791,7 +2792,7 @@ async def test_async_responses_stream_method_with_tools_with_content_recording(s { "name": "gen_ai.output.messages", "attributes": { - "gen_ai.provider.name": "azure.openai", + "gen_ai.provider.name": RESPONSES_PROVIDER, # "gen_ai.message.role": "assistant", # Commented out - now in event content "gen_ai.event.content": '[{"role": "assistant", "parts": [{"type": "text", "content": "*"}], "finish_reason": "*"}]', }, @@ -2898,7 +2899,7 @@ async def test_async_responses_stream_method_with_tools_without_content_recordin { "name": "gen_ai.input.messages", "attributes": { - "gen_ai.provider.name": "azure.openai", + "gen_ai.provider.name": RESPONSES_PROVIDER, # "gen_ai.message.role": "user", # Commented out - now in event content "gen_ai.event.content": '[{"role": "user", "parts": [{"type": "text"}]}]', }, @@ -2906,7 +2907,7 @@ async def test_async_responses_stream_method_with_tools_without_content_recordin { "name": "gen_ai.output.messages", "attributes": { - "gen_ai.provider.name": "azure.openai", + "gen_ai.provider.name": RESPONSES_PROVIDER, # "gen_ai.message.role": "assistant", # Commented out - now in event content "gen_ai.event.content": '[{"role": "assistant", "parts": [{"type": "tool_call", "content": {"type": "function_call", "id": "*"}}]}]', }, @@ -2921,7 +2922,7 @@ async def test_async_responses_stream_method_with_tools_without_content_recordin { "name": "gen_ai.input.messages", "attributes": { - "gen_ai.provider.name": "azure.openai", + "gen_ai.provider.name": RESPONSES_PROVIDER, # "gen_ai.message.role": "tool", # Commented out - now in event content "gen_ai.event.content": '[{"role": "tool", "parts": [{"type": "tool_call_output", "content": {"type": "function_call_output", "id": "*"}}]}]', }, @@ -2929,7 +2930,7 @@ async def test_async_responses_stream_method_with_tools_without_content_recordin { "name": "gen_ai.output.messages", "attributes": { - "gen_ai.provider.name": "azure.openai", + "gen_ai.provider.name": RESPONSES_PROVIDER, # "gen_ai.message.role": "assistant", # Commented out - now in event content "gen_ai.event.content": '[{"role": "assistant", "parts": [{"type": "text"}], "finish_reason": "*"}]', }, @@ -3490,7 +3491,7 @@ async def test_async_prompt_agent_with_responses_non_streaming(self, **kwargs): ("gen_ai.operation.name", OPERATION_NAME_INVOKE_AGENT), ("gen_ai.agent.name", agent.name), ("gen_ai.agent.id", agent.id), - ("gen_ai.provider.name", "azure.openai"), + ("gen_ai.provider.name", RESPONSES_PROVIDER), ("server.address", ""), ("gen_ai.conversation.id", conversation.id), ("gen_ai.response.model", deployment_name), @@ -3569,7 +3570,7 @@ async def test_async_prompt_agent_with_responses_streaming(self, **kwargs): ("gen_ai.operation.name", OPERATION_NAME_INVOKE_AGENT), ("gen_ai.agent.name", agent.name), ("gen_ai.agent.id", agent.id), - ("gen_ai.provider.name", "azure.openai"), + ("gen_ai.provider.name", RESPONSES_PROVIDER), ("server.address", ""), ("gen_ai.conversation.id", conversation.id), ("gen_ai.response.model", deployment_name), diff --git a/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_browser_automation.py b/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_browser_automation.py index 282555f66883..323a43dd0c6f 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_browser_automation.py +++ b/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_browser_automation.py @@ -13,6 +13,7 @@ OPERATION_NAME_INVOKE_AGENT, SPAN_NAME_INVOKE_AGENT, _set_use_message_events, + RESPONSES_PROVIDER, ) from azure.core.settings import settings from gen_ai_trace_verifier import GenAiTraceVerifier @@ -111,7 +112,7 @@ def test_sync_browser_automation_non_streaming_with_content_recording(self, **kw expected_attributes = [ ("az.namespace", "Microsoft.CognitiveServices"), ("gen_ai.operation.name", OPERATION_NAME_INVOKE_AGENT), - ("gen_ai.provider.name", "azure.openai"), + ("gen_ai.provider.name", RESPONSES_PROVIDER), ("server.address", ""), ("gen_ai.conversation.id", conversation.id), ("gen_ai.agent.name", agent.name), @@ -243,7 +244,7 @@ def test_sync_browser_automation_non_streaming_without_content_recording(self, * expected_attributes = [ ("az.namespace", "Microsoft.CognitiveServices"), ("gen_ai.operation.name", OPERATION_NAME_INVOKE_AGENT), - ("gen_ai.provider.name", "azure.openai"), + ("gen_ai.provider.name", RESPONSES_PROVIDER), ("server.address", ""), ("gen_ai.conversation.id", conversation.id), ("gen_ai.agent.name", agent.name), @@ -373,7 +374,7 @@ def test_sync_browser_automation_streaming_with_content_recording(self, **kwargs expected_attributes = [ ("az.namespace", "Microsoft.CognitiveServices"), ("gen_ai.operation.name", OPERATION_NAME_INVOKE_AGENT), - ("gen_ai.provider.name", "azure.openai"), + ("gen_ai.provider.name", RESPONSES_PROVIDER), ("server.address", ""), ("gen_ai.conversation.id", conversation.id), ("gen_ai.agent.name", agent.name), @@ -499,7 +500,7 @@ def test_sync_browser_automation_streaming_without_content_recording(self, **kwa expected_attributes = [ ("az.namespace", "Microsoft.CognitiveServices"), ("gen_ai.operation.name", OPERATION_NAME_INVOKE_AGENT), - ("gen_ai.provider.name", "azure.openai"), + ("gen_ai.provider.name", RESPONSES_PROVIDER), ("server.address", ""), ("gen_ai.conversation.id", conversation.id), ("gen_ai.agent.name", agent.name), diff --git a/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_browser_automation_async.py b/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_browser_automation_async.py index 459002ef920f..87b89d8343f6 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_browser_automation_async.py +++ b/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_browser_automation_async.py @@ -13,6 +13,7 @@ OPERATION_NAME_INVOKE_AGENT, SPAN_NAME_INVOKE_AGENT, _set_use_message_events, + RESPONSES_PROVIDER, ) from azure.core.settings import settings from gen_ai_trace_verifier import GenAiTraceVerifier @@ -114,7 +115,7 @@ async def test_async_browser_automation_non_streaming_with_content_recording(sel expected_attributes = [ ("az.namespace", "Microsoft.CognitiveServices"), ("gen_ai.operation.name", OPERATION_NAME_INVOKE_AGENT), - ("gen_ai.provider.name", "azure.openai"), + ("gen_ai.provider.name", RESPONSES_PROVIDER), ("server.address", ""), ("gen_ai.conversation.id", conversation.id), ("gen_ai.agent.name", agent.name), @@ -242,7 +243,7 @@ async def test_async_browser_automation_non_streaming_without_content_recording( expected_attributes = [ ("az.namespace", "Microsoft.CognitiveServices"), ("gen_ai.operation.name", OPERATION_NAME_INVOKE_AGENT), - ("gen_ai.provider.name", "azure.openai"), + ("gen_ai.provider.name", RESPONSES_PROVIDER), ("server.address", ""), ("gen_ai.conversation.id", conversation.id), ("gen_ai.agent.name", agent.name), @@ -368,7 +369,7 @@ async def test_async_browser_automation_streaming_with_content_recording(self, * expected_attributes = [ ("az.namespace", "Microsoft.CognitiveServices"), ("gen_ai.operation.name", OPERATION_NAME_INVOKE_AGENT), - ("gen_ai.provider.name", "azure.openai"), + ("gen_ai.provider.name", RESPONSES_PROVIDER), ("server.address", ""), ("gen_ai.conversation.id", conversation.id), ("gen_ai.agent.name", agent.name), @@ -491,7 +492,7 @@ async def test_async_browser_automation_streaming_without_content_recording(self expected_attributes = [ ("az.namespace", "Microsoft.CognitiveServices"), ("gen_ai.operation.name", OPERATION_NAME_INVOKE_AGENT), - ("gen_ai.provider.name", "azure.openai"), + ("gen_ai.provider.name", RESPONSES_PROVIDER), ("server.address", ""), ("gen_ai.conversation.id", conversation.id), ("gen_ai.agent.name", agent.name), diff --git a/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_code_interpreter.py b/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_code_interpreter.py index fae2041cc19d..df073393af8c 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_code_interpreter.py +++ b/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_code_interpreter.py @@ -14,6 +14,7 @@ OPERATION_NAME_INVOKE_AGENT, SPAN_NAME_INVOKE_AGENT, _set_use_message_events, + RESPONSES_PROVIDER, ) from azure.core.settings import settings from gen_ai_trace_verifier import GenAiTraceVerifier @@ -124,7 +125,7 @@ def test_sync_code_interpreter_non_streaming_with_content_recording(self, **kwar expected_attributes = [ ("az.namespace", "Microsoft.CognitiveServices"), ("gen_ai.operation.name", OPERATION_NAME_INVOKE_AGENT), - ("gen_ai.provider.name", "azure.openai"), + ("gen_ai.provider.name", RESPONSES_PROVIDER), ("server.address", ""), ("gen_ai.conversation.id", conversation.id), ("gen_ai.agent.name", agent.name), @@ -311,7 +312,7 @@ def test_sync_code_interpreter_non_streaming_without_content_recording(self, **k expected_attributes = [ ("az.namespace", "Microsoft.CognitiveServices"), ("gen_ai.operation.name", OPERATION_NAME_INVOKE_AGENT), - ("gen_ai.provider.name", "azure.openai"), + ("gen_ai.provider.name", RESPONSES_PROVIDER), ("server.address", ""), ("gen_ai.conversation.id", conversation.id), ("gen_ai.agent.name", agent.name), @@ -507,7 +508,7 @@ def test_sync_code_interpreter_streaming_with_content_recording(self, **kwargs): expected_attributes = [ ("az.namespace", "Microsoft.CognitiveServices"), ("gen_ai.operation.name", OPERATION_NAME_INVOKE_AGENT), - ("gen_ai.provider.name", "azure.openai"), + ("gen_ai.provider.name", RESPONSES_PROVIDER), ("server.address", ""), ("gen_ai.conversation.id", conversation.id), ("gen_ai.agent.name", agent.name), @@ -698,7 +699,7 @@ def test_sync_code_interpreter_streaming_without_content_recording(self, **kwarg expected_attributes = [ ("az.namespace", "Microsoft.CognitiveServices"), ("gen_ai.operation.name", OPERATION_NAME_INVOKE_AGENT), - ("gen_ai.provider.name", "azure.openai"), + ("gen_ai.provider.name", RESPONSES_PROVIDER), ("server.address", ""), ("gen_ai.conversation.id", conversation.id), ("gen_ai.agent.name", agent.name), diff --git a/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_code_interpreter_async.py b/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_code_interpreter_async.py index 54b1f17b9a01..850a2eb6e542 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_code_interpreter_async.py +++ b/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_code_interpreter_async.py @@ -14,6 +14,7 @@ OPERATION_NAME_INVOKE_AGENT, SPAN_NAME_INVOKE_AGENT, _set_use_message_events, + RESPONSES_PROVIDER, ) from azure.core.settings import settings from gen_ai_trace_verifier import GenAiTraceVerifier @@ -124,7 +125,7 @@ async def test_async_code_interpreter_non_streaming_with_content_recording(self, expected_attributes = [ ("az.namespace", "Microsoft.CognitiveServices"), ("gen_ai.operation.name", OPERATION_NAME_INVOKE_AGENT), - ("gen_ai.provider.name", "azure.openai"), + ("gen_ai.provider.name", RESPONSES_PROVIDER), ("server.address", ""), ("gen_ai.conversation.id", conversation.id), ("gen_ai.agent.name", agent.name), @@ -311,7 +312,7 @@ async def test_async_code_interpreter_non_streaming_without_content_recording(se expected_attributes = [ ("az.namespace", "Microsoft.CognitiveServices"), ("gen_ai.operation.name", OPERATION_NAME_INVOKE_AGENT), - ("gen_ai.provider.name", "azure.openai"), + ("gen_ai.provider.name", RESPONSES_PROVIDER), ("server.address", ""), ("gen_ai.conversation.id", conversation.id), ("gen_ai.agent.name", agent.name), @@ -507,7 +508,7 @@ async def test_async_code_interpreter_streaming_with_content_recording(self, **k expected_attributes = [ ("az.namespace", "Microsoft.CognitiveServices"), ("gen_ai.operation.name", OPERATION_NAME_INVOKE_AGENT), - ("gen_ai.provider.name", "azure.openai"), + ("gen_ai.provider.name", RESPONSES_PROVIDER), ("server.address", ""), ("gen_ai.conversation.id", conversation.id), ("gen_ai.agent.name", agent.name), @@ -698,7 +699,7 @@ async def test_async_code_interpreter_streaming_without_content_recording(self, expected_attributes = [ ("az.namespace", "Microsoft.CognitiveServices"), ("gen_ai.operation.name", OPERATION_NAME_INVOKE_AGENT), - ("gen_ai.provider.name", "azure.openai"), + ("gen_ai.provider.name", RESPONSES_PROVIDER), ("server.address", ""), ("gen_ai.conversation.id", conversation.id), ("gen_ai.agent.name", agent.name), diff --git a/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_file_search.py b/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_file_search.py index 58a273fef7dd..773f7c60614f 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_file_search.py +++ b/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_file_search.py @@ -14,6 +14,7 @@ OPERATION_NAME_INVOKE_AGENT, SPAN_NAME_INVOKE_AGENT, _set_use_message_events, + RESPONSES_PROVIDER, ) from azure.core.settings import settings from gen_ai_trace_verifier import GenAiTraceVerifier @@ -124,7 +125,7 @@ def test_sync_file_search_non_streaming_with_content_recording(self, **kwargs): expected_attributes = [ ("az.namespace", "Microsoft.CognitiveServices"), ("gen_ai.operation.name", OPERATION_NAME_INVOKE_AGENT), - ("gen_ai.provider.name", "azure.openai"), + ("gen_ai.provider.name", RESPONSES_PROVIDER), ("server.address", ""), ("gen_ai.conversation.id", conversation.id), ("gen_ai.agent.name", agent.name), @@ -333,7 +334,7 @@ def test_sync_file_search_non_streaming_without_content_recording(self, **kwargs expected_attributes = [ ("az.namespace", "Microsoft.CognitiveServices"), ("gen_ai.operation.name", OPERATION_NAME_INVOKE_AGENT), - ("gen_ai.provider.name", "azure.openai"), + ("gen_ai.provider.name", RESPONSES_PROVIDER), ("server.address", ""), ("gen_ai.conversation.id", conversation.id), ("gen_ai.agent.name", agent.name), @@ -546,7 +547,7 @@ def test_sync_file_search_streaming_with_content_recording(self, **kwargs): expected_attributes = [ ("az.namespace", "Microsoft.CognitiveServices"), ("gen_ai.operation.name", OPERATION_NAME_INVOKE_AGENT), - ("gen_ai.provider.name", "azure.openai"), + ("gen_ai.provider.name", RESPONSES_PROVIDER), ("server.address", ""), ("gen_ai.conversation.id", conversation.id), ("gen_ai.agent.name", agent.name), @@ -751,7 +752,7 @@ def test_sync_file_search_streaming_without_content_recording(self, **kwargs): expected_attributes = [ ("az.namespace", "Microsoft.CognitiveServices"), ("gen_ai.operation.name", OPERATION_NAME_INVOKE_AGENT), - ("gen_ai.provider.name", "azure.openai"), + ("gen_ai.provider.name", RESPONSES_PROVIDER), ("server.address", ""), ("gen_ai.conversation.id", conversation.id), ("gen_ai.agent.name", agent.name), diff --git a/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_file_search_async.py b/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_file_search_async.py index 6342508de272..4e3e94308413 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_file_search_async.py +++ b/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_file_search_async.py @@ -14,6 +14,7 @@ OPERATION_NAME_INVOKE_AGENT, SPAN_NAME_INVOKE_AGENT, _set_use_message_events, + RESPONSES_PROVIDER, ) from azure.core.settings import settings from gen_ai_trace_verifier import GenAiTraceVerifier @@ -125,7 +126,7 @@ async def test_async_file_search_non_streaming_with_content_recording(self, **kw expected_attributes = [ ("az.namespace", "Microsoft.CognitiveServices"), ("gen_ai.operation.name", OPERATION_NAME_INVOKE_AGENT), - ("gen_ai.provider.name", "azure.openai"), + ("gen_ai.provider.name", RESPONSES_PROVIDER), ("server.address", ""), ("gen_ai.conversation.id", conversation.id), ("gen_ai.agent.name", agent.name), @@ -334,7 +335,7 @@ async def test_async_file_search_non_streaming_without_content_recording(self, * expected_attributes = [ ("az.namespace", "Microsoft.CognitiveServices"), ("gen_ai.operation.name", OPERATION_NAME_INVOKE_AGENT), - ("gen_ai.provider.name", "azure.openai"), + ("gen_ai.provider.name", RESPONSES_PROVIDER), ("server.address", ""), ("gen_ai.conversation.id", conversation.id), ("gen_ai.agent.name", agent.name), @@ -547,7 +548,7 @@ async def test_async_file_search_streaming_with_content_recording(self, **kwargs expected_attributes = [ ("az.namespace", "Microsoft.CognitiveServices"), ("gen_ai.operation.name", OPERATION_NAME_INVOKE_AGENT), - ("gen_ai.provider.name", "azure.openai"), + ("gen_ai.provider.name", RESPONSES_PROVIDER), ("server.address", ""), ("gen_ai.conversation.id", conversation.id), ("gen_ai.agent.name", agent.name), @@ -752,7 +753,7 @@ async def test_async_file_search_streaming_without_content_recording(self, **kwa expected_attributes = [ ("az.namespace", "Microsoft.CognitiveServices"), ("gen_ai.operation.name", OPERATION_NAME_INVOKE_AGENT), - ("gen_ai.provider.name", "azure.openai"), + ("gen_ai.provider.name", RESPONSES_PROVIDER), ("server.address", ""), ("gen_ai.conversation.id", conversation.id), ("gen_ai.agent.name", agent.name), diff --git a/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_mcp.py b/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_mcp.py index 4052a53eb0ec..a0640b93e62d 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_mcp.py +++ b/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_mcp.py @@ -13,6 +13,7 @@ OPERATION_NAME_INVOKE_AGENT, SPAN_NAME_INVOKE_AGENT, _set_use_message_events, + RESPONSES_PROVIDER, ) from azure.core.settings import settings from gen_ai_trace_verifier import GenAiTraceVerifier @@ -128,7 +129,7 @@ def _test_sync_mcp_non_streaming_with_content_recording_impl(self, use_events, * expected_attributes_1 = [ ("az.namespace", "Microsoft.CognitiveServices"), ("gen_ai.operation.name", OPERATION_NAME_INVOKE_AGENT), - ("gen_ai.provider.name", "azure.openai"), + ("gen_ai.provider.name", RESPONSES_PROVIDER), ("server.address", ""), ("gen_ai.conversation.id", conversation.id), ("gen_ai.agent.name", agent.name), @@ -217,7 +218,7 @@ def _test_sync_mcp_non_streaming_with_content_recording_impl(self, use_events, * expected_attributes_2 = [ ("az.namespace", "Microsoft.CognitiveServices"), ("gen_ai.operation.name", OPERATION_NAME_INVOKE_AGENT), - ("gen_ai.provider.name", "azure.openai"), + ("gen_ai.provider.name", RESPONSES_PROVIDER), ("server.address", ""), ("gen_ai.conversation.id", conversation.id), ("gen_ai.agent.name", agent.name), @@ -458,7 +459,7 @@ def _test_sync_mcp_non_streaming_without_content_recording_impl(self, use_events expected_attributes_1 = [ ("az.namespace", "Microsoft.CognitiveServices"), ("gen_ai.operation.name", OPERATION_NAME_INVOKE_AGENT), - ("gen_ai.provider.name", "azure.openai"), + ("gen_ai.provider.name", RESPONSES_PROVIDER), ("server.address", ""), ("gen_ai.conversation.id", conversation.id), ("gen_ai.agent.name", agent.name), @@ -542,7 +543,7 @@ def _test_sync_mcp_non_streaming_without_content_recording_impl(self, use_events expected_attributes_2 = [ ("az.namespace", "Microsoft.CognitiveServices"), ("gen_ai.operation.name", OPERATION_NAME_INVOKE_AGENT), - ("gen_ai.provider.name", "azure.openai"), + ("gen_ai.provider.name", RESPONSES_PROVIDER), ("server.address", ""), ("gen_ai.conversation.id", conversation.id), ("gen_ai.agent.name", agent.name), @@ -784,7 +785,7 @@ def _test_sync_mcp_streaming_with_content_recording_impl(self, use_events, **kwa expected_attributes_1 = [ ("az.namespace", "Microsoft.CognitiveServices"), ("gen_ai.operation.name", OPERATION_NAME_INVOKE_AGENT), - ("gen_ai.provider.name", "azure.openai"), + ("gen_ai.provider.name", RESPONSES_PROVIDER), ("server.address", ""), ("gen_ai.conversation.id", conversation.id), ("gen_ai.agent.name", agent.name), @@ -860,7 +861,7 @@ def _test_sync_mcp_streaming_with_content_recording_impl(self, use_events, **kwa expected_attributes_2 = [ ("az.namespace", "Microsoft.CognitiveServices"), ("gen_ai.operation.name", OPERATION_NAME_INVOKE_AGENT), - ("gen_ai.provider.name", "azure.openai"), + ("gen_ai.provider.name", RESPONSES_PROVIDER), ("server.address", ""), ("gen_ai.conversation.id", conversation.id), ("gen_ai.agent.name", agent.name), @@ -1060,7 +1061,7 @@ def _test_sync_mcp_streaming_without_content_recording_impl(self, use_events, ** expected_attributes_1 = [ ("az.namespace", "Microsoft.CognitiveServices"), ("gen_ai.operation.name", OPERATION_NAME_INVOKE_AGENT), - ("gen_ai.provider.name", "azure.openai"), + ("gen_ai.provider.name", RESPONSES_PROVIDER), ("server.address", ""), ("gen_ai.conversation.id", conversation.id), ("gen_ai.agent.name", agent.name), @@ -1134,7 +1135,7 @@ def _test_sync_mcp_streaming_without_content_recording_impl(self, use_events, ** expected_attributes_2 = [ ("az.namespace", "Microsoft.CognitiveServices"), ("gen_ai.operation.name", OPERATION_NAME_INVOKE_AGENT), - ("gen_ai.provider.name", "azure.openai"), + ("gen_ai.provider.name", RESPONSES_PROVIDER), ("server.address", ""), ("gen_ai.conversation.id", conversation.id), ("gen_ai.agent.name", agent.name), diff --git a/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_mcp_async.py b/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_mcp_async.py index 748e42785fe9..d0d4fb384984 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_mcp_async.py +++ b/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_mcp_async.py @@ -13,6 +13,7 @@ OPERATION_NAME_INVOKE_AGENT, SPAN_NAME_INVOKE_AGENT, _set_use_message_events, + RESPONSES_PROVIDER, ) from azure.core.settings import settings from gen_ai_trace_verifier import GenAiTraceVerifier @@ -129,7 +130,7 @@ async def _test_async_mcp_non_streaming_with_content_recording_impl(self, use_ev expected_attributes_1 = [ ("az.namespace", "Microsoft.CognitiveServices"), ("gen_ai.operation.name", OPERATION_NAME_INVOKE_AGENT), - ("gen_ai.provider.name", "azure.openai"), + ("gen_ai.provider.name", RESPONSES_PROVIDER), ("server.address", ""), ("gen_ai.conversation.id", conversation.id), ("gen_ai.agent.name", agent.name), @@ -217,7 +218,7 @@ async def _test_async_mcp_non_streaming_with_content_recording_impl(self, use_ev expected_attributes_2 = [ ("az.namespace", "Microsoft.CognitiveServices"), ("gen_ai.operation.name", OPERATION_NAME_INVOKE_AGENT), - ("gen_ai.provider.name", "azure.openai"), + ("gen_ai.provider.name", RESPONSES_PROVIDER), ("server.address", ""), ("gen_ai.conversation.id", conversation.id), ("gen_ai.agent.name", agent.name), @@ -458,7 +459,7 @@ async def _test_async_mcp_non_streaming_without_content_recording_impl(self, use expected_attributes_1 = [ ("az.namespace", "Microsoft.CognitiveServices"), ("gen_ai.operation.name", OPERATION_NAME_INVOKE_AGENT), - ("gen_ai.provider.name", "azure.openai"), + ("gen_ai.provider.name", RESPONSES_PROVIDER), ("server.address", ""), ("gen_ai.conversation.id", conversation.id), ("gen_ai.agent.name", agent.name), @@ -541,7 +542,7 @@ async def _test_async_mcp_non_streaming_without_content_recording_impl(self, use expected_attributes_2 = [ ("az.namespace", "Microsoft.CognitiveServices"), ("gen_ai.operation.name", OPERATION_NAME_INVOKE_AGENT), - ("gen_ai.provider.name", "azure.openai"), + ("gen_ai.provider.name", RESPONSES_PROVIDER), ("server.address", ""), ("gen_ai.conversation.id", conversation.id), ("gen_ai.agent.name", agent.name), @@ -786,7 +787,7 @@ async def _test_async_mcp_streaming_with_content_recording_impl(self, use_events expected_attributes_1 = [ ("az.namespace", "Microsoft.CognitiveServices"), ("gen_ai.operation.name", OPERATION_NAME_INVOKE_AGENT), - ("gen_ai.provider.name", "azure.openai"), + ("gen_ai.provider.name", RESPONSES_PROVIDER), ("server.address", ""), ("gen_ai.conversation.id", conversation.id), ("gen_ai.agent.name", agent.name), @@ -863,7 +864,7 @@ async def _test_async_mcp_streaming_with_content_recording_impl(self, use_events expected_attributes_2 = [ ("az.namespace", "Microsoft.CognitiveServices"), ("gen_ai.operation.name", OPERATION_NAME_INVOKE_AGENT), - ("gen_ai.provider.name", "azure.openai"), + ("gen_ai.provider.name", RESPONSES_PROVIDER), ("server.address", ""), ("gen_ai.conversation.id", conversation.id), ("gen_ai.agent.name", agent.name), @@ -1065,7 +1066,7 @@ async def _test_async_mcp_streaming_without_content_recording_impl(self, use_eve expected_attributes_1 = [ ("az.namespace", "Microsoft.CognitiveServices"), ("gen_ai.operation.name", OPERATION_NAME_INVOKE_AGENT), - ("gen_ai.provider.name", "azure.openai"), + ("gen_ai.provider.name", RESPONSES_PROVIDER), ("server.address", ""), ("gen_ai.conversation.id", conversation.id), ("gen_ai.agent.name", agent.name), @@ -1140,7 +1141,7 @@ async def _test_async_mcp_streaming_without_content_recording_impl(self, use_eve expected_attributes_2 = [ ("az.namespace", "Microsoft.CognitiveServices"), ("gen_ai.operation.name", OPERATION_NAME_INVOKE_AGENT), - ("gen_ai.provider.name", "azure.openai"), + ("gen_ai.provider.name", RESPONSES_PROVIDER), ("server.address", ""), ("gen_ai.conversation.id", conversation.id), ("gen_ai.agent.name", agent.name), diff --git a/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_workflow.py b/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_workflow.py index 769e03b68a91..5d9254d23c80 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_workflow.py +++ b/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_workflow.py @@ -13,6 +13,7 @@ OPERATION_NAME_INVOKE_AGENT, SPAN_NAME_INVOKE_AGENT, _set_use_message_events, + RESPONSES_PROVIDER, ) from azure.core.settings import settings from gen_ai_trace_verifier import GenAiTraceVerifier @@ -270,7 +271,7 @@ def test_sync_workflow_non_streaming_with_content_recording(self, **kwargs): expected_attributes = [ ("az.namespace", "Microsoft.CognitiveServices"), ("gen_ai.operation.name", OPERATION_NAME_INVOKE_AGENT), - ("gen_ai.provider.name", "azure.openai"), + ("gen_ai.provider.name", RESPONSES_PROVIDER), ("server.address", ""), ("gen_ai.conversation.id", conversation.id), ("gen_ai.agent.name", workflow.name), @@ -432,7 +433,7 @@ def test_sync_workflow_non_streaming_without_content_recording(self, **kwargs): expected_attributes = [ ("az.namespace", "Microsoft.CognitiveServices"), ("gen_ai.operation.name", OPERATION_NAME_INVOKE_AGENT), - ("gen_ai.provider.name", "azure.openai"), + ("gen_ai.provider.name", RESPONSES_PROVIDER), ("server.address", ""), ("gen_ai.conversation.id", conversation.id), ("gen_ai.agent.name", workflow.name), @@ -604,7 +605,7 @@ def test_sync_workflow_streaming_with_content_recording(self, **kwargs): expected_attributes = [ ("az.namespace", "Microsoft.CognitiveServices"), ("gen_ai.operation.name", OPERATION_NAME_INVOKE_AGENT), - ("gen_ai.provider.name", "azure.openai"), + ("gen_ai.provider.name", RESPONSES_PROVIDER), ("server.address", ""), ("gen_ai.conversation.id", conversation.id), ("gen_ai.agent.name", workflow.name), @@ -772,7 +773,7 @@ def test_sync_workflow_streaming_without_content_recording(self, **kwargs): expected_attributes = [ ("az.namespace", "Microsoft.CognitiveServices"), ("gen_ai.operation.name", OPERATION_NAME_INVOKE_AGENT), - ("gen_ai.provider.name", "azure.openai"), + ("gen_ai.provider.name", RESPONSES_PROVIDER), ("server.address", ""), ("gen_ai.conversation.id", conversation.id), ("gen_ai.agent.name", workflow.name), diff --git a/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_workflow_async.py b/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_workflow_async.py index d4b987418bf4..494c29e71fd3 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_workflow_async.py +++ b/sdk/ai/azure-ai-projects/tests/agents/telemetry/test_responses_instrumentor_workflow_async.py @@ -13,6 +13,7 @@ OPERATION_NAME_INVOKE_AGENT, SPAN_NAME_INVOKE_AGENT, _set_use_message_events, + RESPONSES_PROVIDER, ) from azure.core.settings import settings from gen_ai_trace_verifier import GenAiTraceVerifier @@ -266,7 +267,7 @@ async def test_async_workflow_non_streaming_with_content_recording(self, **kwarg expected_attributes = [ ("az.namespace", "Microsoft.CognitiveServices"), ("gen_ai.operation.name", OPERATION_NAME_INVOKE_AGENT), - ("gen_ai.provider.name", "azure.openai"), + ("gen_ai.provider.name", RESPONSES_PROVIDER), ("server.address", ""), ("gen_ai.conversation.id", conversation.id), ("gen_ai.agent.name", workflow.name), @@ -427,7 +428,7 @@ async def test_async_workflow_non_streaming_without_content_recording(self, **kw expected_attributes = [ ("az.namespace", "Microsoft.CognitiveServices"), ("gen_ai.operation.name", OPERATION_NAME_INVOKE_AGENT), - ("gen_ai.provider.name", "azure.openai"), + ("gen_ai.provider.name", RESPONSES_PROVIDER), ("server.address", ""), ("gen_ai.conversation.id", conversation.id), ("gen_ai.agent.name", workflow.name), @@ -597,7 +598,7 @@ async def test_async_workflow_streaming_with_content_recording(self, **kwargs): expected_attributes = [ ("az.namespace", "Microsoft.CognitiveServices"), ("gen_ai.operation.name", OPERATION_NAME_INVOKE_AGENT), - ("gen_ai.provider.name", "azure.openai"), + ("gen_ai.provider.name", RESPONSES_PROVIDER), ("server.address", ""), ("gen_ai.conversation.id", conversation.id), ("gen_ai.agent.name", workflow.name), @@ -763,7 +764,7 @@ async def test_async_workflow_streaming_without_content_recording(self, **kwargs expected_attributes = [ ("az.namespace", "Microsoft.CognitiveServices"), ("gen_ai.operation.name", OPERATION_NAME_INVOKE_AGENT), - ("gen_ai.provider.name", "azure.openai"), + ("gen_ai.provider.name", RESPONSES_PROVIDER), ("server.address", ""), ("gen_ai.conversation.id", conversation.id), ("gen_ai.agent.name", workflow.name), From 51bd1fd44ba8044976ccfbd6b2bf6f45d2122ffa Mon Sep 17 00:00:00 2001 From: M-Hietala <78813398+M-Hietala@users.noreply.github.com> Date: Wed, 4 Feb 2026 16:29:05 -0600 Subject: [PATCH 10/10] making baggage header inclusion separately configurable for trace propagation --- sdk/ai/azure-ai-projects/README.md | 33 ++++++-- .../telemetry/_ai_project_instrumentor.py | 80 ++++++++++++++++--- 2 files changed, 96 insertions(+), 17 deletions(-) diff --git a/sdk/ai/azure-ai-projects/README.md b/sdk/ai/azure-ai-projects/README.md index 07bc4d8c3b60..86e711138dcb 100644 --- a/sdk/ai/azure-ai-projects/README.md +++ b/sdk/ai/azure-ai-projects/README.md @@ -1019,22 +1019,45 @@ See the full sample in file `\agents\telemetry\sample_agent_basic_with_console_t ### Enabling trace context propagation -Trace context propagation allows client-side spans generated by the Projects SDK to be correlated with server-side spans from Azure OpenAI and other Azure services. When enabled, the SDK automatically injects trace context headers into HTTP requests made by OpenAI clients obtained via `get_openai_client()`. +Trace context propagation allows client-side spans generated by the Projects SDK to be correlated with server-side spans from Azure OpenAI and other Azure services. When enabled, the SDK automatically injects W3C Trace Context headers (`traceparent` and `tracestate`) into HTTP requests made by OpenAI clients obtained via `get_openai_client()`. This feature ensures that all operations within a distributed trace share the same trace ID, providing end-to-end visibility across your application and Azure services in your observability backend (such as Azure Monitor). -To enable trace context propagation set the `AZURE_TRACING_GEN_AI_ENABLE_TRACE_CONTEXT_PROPAGATION` environment variable to `true` -If the environment variable is not set and no value is provided with the `AIProjectInstrumentor().instrument()` call for the propagation parameter, the trace context propagation defaults to `false`. +To enable trace context propagation, set the `AZURE_TRACING_GEN_AI_ENABLE_TRACE_CONTEXT_PROPAGATION` environment variable to `true`: + +If no value is provided for the `enable_trace_context_propagation` parameter with the AIProjectInstrumentor.instrument()` call and the environment variable is not set, trace context propagation defaults to `false` (opt-in). **Important Security and Privacy Considerations:** -- **Trace IDs and Baggage**: When trace context propagation is enabled, trace IDs and OpenTelemetry baggage are sent to Azure OpenAI and other external services. Baggage may contain arbitrary key-value pairs added to the trace context by your application or other components. -- **Sensitive Information**: Ensure that your application does not add sensitive information (such as user identifiers, authentication tokens, or personally identifiable information) to OpenTelemetry baggage, as this data will be propagated to external services. +- **Trace IDs**: When trace context propagation is enabled, trace IDs are sent to Azure OpenAI and other external services. - **Request Correlation**: Trace IDs allow Azure services to correlate requests from the same session or user across multiple API calls, which may have privacy implications depending on your use case. - **Opt-in by Design**: This feature is disabled by default to give you explicit control over when trace context is propagated to external services. Only enable trace context propagation after carefully reviewing your observability, privacy and security requirements. +#### Controlling baggage propagation + +When trace context propagation is enabled, you can separately control whether the baggage header is included. By default, only `traceparent` and `tracestate` headers are propagated. To also include the `baggage` header, set the `AZURE_TRACING_GEN_AI_TRACE_CONTEXT_PROPAGATION_INCLUDE_BAGGAGE` environment variable to `true`: + +If no value is provided for the `enable_baggage_propagation` parameter with the `AIProjectInstrumentor.instrument()` call and the environment variable is not set, the value defaults to `false` and baggage is not included. + +**Why is baggage propagation separate?** + +The baggage header can contain arbitrary key-value pairs added anywhere in your application's trace context. Unlike trace IDs (which are randomly generated identifiers), baggage may contain: +- User identifiers or session information +- Authentication tokens or credentials +- Business-specific data or metadata +- Personally identifiable information (PII) + +Baggage is automatically propagated through your entire application's call chain, meaning data added in one part of your application will be included in requests to Azure OpenAI unless explicitly controlled. + +**Important Security Considerations:** + +- **Review Baggage Contents**: Before enabling baggage propagation, audit what data your application (and any third-party libraries) adds to OpenTelemetry baggage. +- **Sensitive Data Risk**: Baggage is sent to Azure OpenAI and may be logged or processed by Microsoft services. Never add sensitive information to baggage when baggage propagation is enabled. +- **Opt-in by Design**: Baggage propagation is disabled by default (even when trace context propagation is enabled) to prevent accidental exposure of sensitive data. +- **Minimal Propagation**: `traceparent` and `tracestate` headers are generally sufficient for distributed tracing. Only enable baggage propagation if your specific observability requirements demand it. + ### Enabling content recording Content recording controls whether message contents and tool call related details, such as parameters and return values, are captured with the traces. This data may include sensitive user information. diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/telemetry/_ai_project_instrumentor.py b/sdk/ai/azure-ai-projects/azure/ai/projects/telemetry/_ai_project_instrumentor.py index 4999adeae9ff..6dbc434212d4 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/telemetry/_ai_project_instrumentor.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/telemetry/_ai_project_instrumentor.py @@ -82,6 +82,7 @@ _projects_traces_enabled: bool = False _trace_agents_content: bool = False _trace_context_propagation_enabled: bool = False +_trace_context_baggage_propagation_enabled: bool = False def _inject_trace_context_sync(request): @@ -96,8 +97,14 @@ def _inject_trace_context_sync(request): carrier = dict(request.headers) propagate.inject(carrier) for key, value in carrier.items(): - if key.lower() in ("traceparent", "tracestate", "baggage"): - if key.lower() not in [h.lower() for h in request.headers.keys()]: + key_lower = key.lower() + # Always include traceparent and tracestate + # Only include baggage if explicitly enabled + if key_lower in ("traceparent", "tracestate"): + if key_lower not in [h.lower() for h in request.headers.keys()]: + request.headers[key] = value + elif key_lower == "baggage" and _trace_context_baggage_propagation_enabled: + if key_lower not in [h.lower() for h in request.headers.keys()]: request.headers[key] = value except Exception as e: # pylint: disable=broad-exception-caught logger.debug("Failed to inject trace context: %s", e) @@ -115,8 +122,14 @@ async def _inject_trace_context_async(request): carrier = dict(request.headers) propagate.inject(carrier) for key, value in carrier.items(): - if key.lower() in ("traceparent", "tracestate", "baggage"): - if key.lower() not in [h.lower() for h in request.headers.keys()]: + key_lower = key.lower() + # Always include traceparent and tracestate + # Only include baggage if explicitly enabled + if key_lower in ("traceparent", "tracestate"): + if key_lower not in [h.lower() for h in request.headers.keys()]: + request.headers[key] = value + elif key_lower == "baggage" and _trace_context_baggage_propagation_enabled: + if key_lower not in [h.lower() for h in request.headers.keys()]: request.headers[key] = value except Exception as e: # pylint: disable=broad-exception-caught logger.debug("Failed to inject trace context: %s", e) @@ -184,7 +197,10 @@ def __init__(self): self._responses_impl = _ResponsesInstrumentorPreview() def instrument( - self, enable_content_recording: Optional[bool] = None, enable_trace_context_propagation: Optional[bool] = None + self, + enable_content_recording: Optional[bool] = None, + enable_trace_context_propagation: Optional[bool] = None, + enable_baggage_propagation: Optional[bool] = None, ) -> None: """ Enable trace instrumentation for AIProjectClient. @@ -203,17 +219,23 @@ def instrument( called without uninstrument being called in between the instrument calls. :type enable_content_recording: bool, optional :param enable_trace_context_propagation: Whether to enable automatic trace context propagation - to OpenAI SDK HTTP requests. When enabled, traceparent headers will be injected into - requests made by OpenAI clients obtained via get_openai_client(), allowing server-side + to OpenAI SDK HTTP requests. When enabled, traceparent and tracestate headers will be injected + into requests made by OpenAI clients obtained via get_openai_client(), allowing server-side spans to be correlated with client-side spans. `True` will enable it, `False` will disable it. If no value is provided, then the value read from environment variable AZURE_TRACING_GEN_AI_ENABLE_TRACE_CONTEXT_PROPAGATION is used. If the environment variable is not found, then the value will default to `False`. - Note: Trace context may include trace IDs and baggage that will be sent to Azure OpenAI. :type enable_trace_context_propagation: bool, optional + :param enable_baggage_propagation: Whether to include baggage headers in trace context propagation. + Only applies when enable_trace_context_propagation is True. `True` will enable baggage propagation, + `False` will disable it. If no value is provided, then the value read from environment variable + AZURE_TRACING_GEN_AI_TRACE_CONTEXT_PROPAGATION_INCLUDE_BAGGAGE is used. If the environment + variable is not found, then the value will default to `False`. + Note: Baggage may contain sensitive application data. + :type enable_baggage_propagation: bool, optional """ - self._impl.instrument(enable_content_recording, enable_trace_context_propagation) + self._impl.instrument(enable_content_recording, enable_trace_context_propagation, enable_baggage_propagation) self._responses_impl.instrument(enable_content_recording) def uninstrument(self) -> None: @@ -259,7 +281,10 @@ def _str_to_bool(self, s): return str(s).lower() == "true" def instrument( - self, enable_content_recording: Optional[bool] = None, enable_trace_context_propagation: Optional[bool] = None + self, + enable_content_recording: Optional[bool] = None, + enable_trace_context_propagation: Optional[bool] = None, + enable_baggage_propagation: Optional[bool] = None, ): """ Enable trace instrumentation for AI Agents. @@ -282,6 +307,12 @@ def instrument( value read from environment variable AZURE_TRACING_GEN_AI_ENABLE_TRACE_CONTEXT_PROPAGATION is used. If the environment variable is not found, then the value will default to `False`. :type enable_trace_context_propagation: bool, optional + :param enable_baggage_propagation: Whether to include baggage in trace context propagation. + Only applies when enable_trace_context_propagation is True. `True` will enable it, `False` + will disable it. If no value is provided, then the value read from environment variable + AZURE_TRACING_GEN_AI_TRACE_CONTEXT_PROPAGATION_INCLUDE_BAGGAGE is used. If the + environment variable is not found, then the value will default to `False`. + :type enable_baggage_propagation: bool, optional """ if enable_content_recording is None: @@ -293,13 +324,20 @@ def instrument( var_value = os.environ.get("AZURE_TRACING_GEN_AI_ENABLE_TRACE_CONTEXT_PROPAGATION") enable_trace_context_propagation = self._str_to_bool(var_value) + if enable_baggage_propagation is None: + var_value = os.environ.get("AZURE_TRACING_GEN_AI_TRACE_CONTEXT_PROPAGATION_INCLUDE_BAGGAGE") + enable_baggage_propagation = self._str_to_bool(var_value) + if not self.is_instrumented(): - self._instrument_projects(enable_content_recording, enable_trace_context_propagation) + self._instrument_projects( + enable_content_recording, enable_trace_context_propagation, enable_baggage_propagation + ) else: self._set_enable_content_recording(enable_content_recording=enable_content_recording) self._set_enable_trace_context_propagation( enable_trace_context_propagation=enable_trace_context_propagation ) + self._set_enable_baggage_propagation(enable_baggage_propagation=enable_baggage_propagation) def uninstrument(self): """ @@ -1336,7 +1374,10 @@ def _available_projects_apis_and_injectors(self): yield from self._generate_api_and_injector(self._project_api_list()) def _instrument_projects( - self, enable_content_tracing: bool = False, enable_trace_context_propagation: bool = False + self, + enable_content_tracing: bool = False, + enable_trace_context_propagation: bool = False, + enable_baggage_propagation: bool = False, ): """This function modifies the methods of the Projects API classes to inject logic before calling the original methods. @@ -1348,17 +1389,21 @@ def _instrument_projects( :type enable_content_tracing: bool :param enable_trace_context_propagation: Whether to enable automatic trace context propagation. :type enable_trace_context_propagation: bool + :param enable_baggage_propagation: Whether to include baggage in trace context propagation. + :type enable_baggage_propagation: bool """ # pylint: disable=W0603 global _projects_traces_enabled global _trace_agents_content global _trace_context_propagation_enabled + global _trace_context_baggage_propagation_enabled if _projects_traces_enabled: raise RuntimeError("Traces already started for AI Agents") _projects_traces_enabled = True _trace_agents_content = enable_content_tracing _trace_context_propagation_enabled = enable_trace_context_propagation + _trace_context_baggage_propagation_enabled = enable_baggage_propagation for ( api, method, @@ -1378,8 +1423,10 @@ def _uninstrument_projects(self): global _projects_traces_enabled global _trace_agents_content global _trace_context_propagation_enabled + global _trace_context_baggage_propagation_enabled _trace_agents_content = False _trace_context_propagation_enabled = False + _trace_context_baggage_propagation_enabled = False for api, method, _, _, _ in self._available_projects_apis_and_injectors(): if hasattr(getattr(api, method), "_original"): setattr(api, method, getattr(getattr(api, method), "_original")) @@ -1423,6 +1470,15 @@ def _set_enable_trace_context_propagation(self, enable_trace_context_propagation global _trace_context_propagation_enabled # pylint: disable=W0603 _trace_context_propagation_enabled = enable_trace_context_propagation + def _set_enable_baggage_propagation(self, enable_baggage_propagation: bool = False) -> None: + """This function sets the baggage propagation value. + + :param enable_baggage_propagation: Indicates whether baggage should be included in trace context propagation. + :type enable_baggage_propagation: bool + """ + global _trace_context_baggage_propagation_enabled # pylint: disable=W0603 + _trace_context_baggage_propagation_enabled = enable_baggage_propagation + def record_error(self, span, exc): # Set the span status to error if isinstance(span.span_instance, Span): # pyright: ignore [reportPossiblyUnboundVariable]