From d75d8a026aa6961ddd8182404ade96a846a61107 Mon Sep 17 00:00:00 2001 From: brucearctor <5032356+brucearctor@users.noreply.github.com.> Date: Fri, 13 Mar 2026 18:14:15 -0700 Subject: [PATCH] fix: export thoughts_token_count to OpenTelemetry trace spans Add thoughts_token_count as gen_ai.usage.experimental.reasoning_tokens span attribute in trace_generate_content_result() and trace_inference_result(), matching the existing pattern in trace_call_llm(). Fixes #4829 --- src/google/adk/telemetry/tracing.py | 16 ++++++++ tests/unittests/telemetry/test_spans.py | 51 +++++++++++++++++++++++++ 2 files changed, 67 insertions(+) diff --git a/src/google/adk/telemetry/tracing.py b/src/google/adk/telemetry/tracing.py index 5c05968d31..5cae128106 100644 --- a/src/google/adk/telemetry/tracing.py +++ b/src/google/adk/telemetry/tracing.py @@ -744,6 +744,14 @@ def trace_generate_content_result(span: Span | None, llm_response: LlmResponse): span.set_attribute( GEN_AI_USAGE_OUTPUT_TOKENS, usage_metadata.candidates_token_count ) + try: + if usage_metadata.thoughts_token_count is not None: + span.set_attribute( + 'gen_ai.usage.experimental.reasoning_tokens', + usage_metadata.thoughts_token_count, + ) + except AttributeError: + pass otel_logger.emit( LogRecord( @@ -787,6 +795,14 @@ def trace_inference_result( span.set_attribute( GEN_AI_USAGE_OUTPUT_TOKENS, usage_metadata.candidates_token_count ) + try: + if usage_metadata.thoughts_token_count is not None: + span.set_attribute( + 'gen_ai.usage.experimental.reasoning_tokens', + usage_metadata.thoughts_token_count, + ) + except AttributeError: + pass if is_experimental_semconv() and isinstance(gc_span, GenerateContentSpan): set_operation_details_attributes_from_response( diff --git a/tests/unittests/telemetry/test_spans.py b/tests/unittests/telemetry/test_spans.py index c4bd485fba..8c872cd212 100644 --- a/tests/unittests/telemetry/test_spans.py +++ b/tests/unittests/telemetry/test_spans.py @@ -1284,3 +1284,54 @@ def test_trace_tool_call_with_standard_error( mock.call('error.type', 'ValueError') in mock_span_fixture.set_attribute.call_args_list ) + + +def test_trace_inference_result_with_thinking_tokens(mock_span_fixture): + """Test trace_inference_result exports thoughts_token_count.""" + llm_response = LlmResponse( + turn_complete=True, + finish_reason=types.FinishReason.STOP, + usage_metadata=types.GenerateContentResponseUsageMetadata( + total_token_count=110, + prompt_token_count=50, + candidates_token_count=10, + thoughts_token_count=50, + ), + ) + + trace_inference_result(mock_span_fixture, llm_response) + + mock_span_fixture.set_attribute.assert_any_call(GEN_AI_USAGE_INPUT_TOKENS, 50) + mock_span_fixture.set_attribute.assert_any_call( + GEN_AI_USAGE_OUTPUT_TOKENS, 10 + ) + mock_span_fixture.set_attribute.assert_any_call( + 'gen_ai.usage.experimental.reasoning_tokens', 50 + ) + + +def test_trace_inference_result_without_thinking_tokens(mock_span_fixture): + """Test trace_inference_result works when thoughts_token_count is None.""" + llm_response = LlmResponse( + turn_complete=True, + finish_reason=types.FinishReason.STOP, + usage_metadata=types.GenerateContentResponseUsageMetadata( + total_token_count=60, + prompt_token_count=50, + candidates_token_count=10, + ), + ) + + trace_inference_result(mock_span_fixture, llm_response) + + mock_span_fixture.set_attribute.assert_any_call(GEN_AI_USAGE_INPUT_TOKENS, 50) + mock_span_fixture.set_attribute.assert_any_call( + GEN_AI_USAGE_OUTPUT_TOKENS, 10 + ) + # Verify reasoning_tokens is NOT set when thoughts_token_count is None + reasoning_calls = [ + call + for call in mock_span_fixture.set_attribute.call_args_list + if call.args[0] == 'gen_ai.usage.experimental.reasoning_tokens' + ] + assert len(reasoning_calls) == 0