Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
16 changes: 16 additions & 0 deletions src/google/adk/telemetry/tracing.py
Original file line number Diff line number Diff line change
Expand Up @@ -744,6 +744,14 @@ def trace_generate_content_result(span: Span | None, llm_response: LlmResponse):
span.set_attribute(
GEN_AI_USAGE_OUTPUT_TOKENS, usage_metadata.candidates_token_count
)
try:
if usage_metadata.thoughts_token_count is not None:
span.set_attribute(
'gen_ai.usage.experimental.reasoning_tokens',
usage_metadata.thoughts_token_count,
)
except AttributeError:
pass

otel_logger.emit(
LogRecord(
Expand Down Expand Up @@ -787,6 +795,14 @@ def trace_inference_result(
span.set_attribute(
GEN_AI_USAGE_OUTPUT_TOKENS, usage_metadata.candidates_token_count
)
try:
if usage_metadata.thoughts_token_count is not None:
span.set_attribute(
'gen_ai.usage.experimental.reasoning_tokens',
usage_metadata.thoughts_token_count,
)
except AttributeError:
pass

if is_experimental_semconv() and isinstance(gc_span, GenerateContentSpan):
set_operation_details_attributes_from_response(
Expand Down
51 changes: 51 additions & 0 deletions tests/unittests/telemetry/test_spans.py
Original file line number Diff line number Diff line change
Expand Up @@ -1284,3 +1284,54 @@ def test_trace_tool_call_with_standard_error(
mock.call('error.type', 'ValueError')
in mock_span_fixture.set_attribute.call_args_list
)


def test_trace_inference_result_with_thinking_tokens(mock_span_fixture):
"""Test trace_inference_result exports thoughts_token_count."""
llm_response = LlmResponse(
turn_complete=True,
finish_reason=types.FinishReason.STOP,
usage_metadata=types.GenerateContentResponseUsageMetadata(
total_token_count=110,
prompt_token_count=50,
candidates_token_count=10,
thoughts_token_count=50,
),
)

trace_inference_result(mock_span_fixture, llm_response)

mock_span_fixture.set_attribute.assert_any_call(GEN_AI_USAGE_INPUT_TOKENS, 50)
mock_span_fixture.set_attribute.assert_any_call(
GEN_AI_USAGE_OUTPUT_TOKENS, 10
)
mock_span_fixture.set_attribute.assert_any_call(
'gen_ai.usage.experimental.reasoning_tokens', 50
)


def test_trace_inference_result_without_thinking_tokens(mock_span_fixture):
"""Test trace_inference_result works when thoughts_token_count is None."""
llm_response = LlmResponse(
turn_complete=True,
finish_reason=types.FinishReason.STOP,
usage_metadata=types.GenerateContentResponseUsageMetadata(
total_token_count=60,
prompt_token_count=50,
candidates_token_count=10,
),
)

trace_inference_result(mock_span_fixture, llm_response)

mock_span_fixture.set_attribute.assert_any_call(GEN_AI_USAGE_INPUT_TOKENS, 50)
mock_span_fixture.set_attribute.assert_any_call(
GEN_AI_USAGE_OUTPUT_TOKENS, 10
)
# Verify reasoning_tokens is NOT set when thoughts_token_count is None
reasoning_calls = [
call
for call in mock_span_fixture.set_attribute.call_args_list
if call.args[0] == 'gen_ai.usage.experimental.reasoning_tokens'
]
assert len(reasoning_calls) == 0