diff --git a/dev-packages/cloudflare-integration-tests/suites/tracing/anthropic-ai/test.ts b/dev-packages/cloudflare-integration-tests/suites/tracing/anthropic-ai/test.ts index c9e112b32241..d8087ebf79fe 100644 --- a/dev-packages/cloudflare-integration-tests/suites/tracing/anthropic-ai/test.ts +++ b/dev-packages/cloudflare-integration-tests/suites/tracing/anthropic-ai/test.ts @@ -1,4 +1,15 @@ +import { SEMANTIC_ATTRIBUTE_SENTRY_OP, SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN } from '@sentry/core'; import { expect, it } from 'vitest'; +import { + GEN_AI_OPERATION_NAME_ATTRIBUTE, + GEN_AI_REQUEST_MODEL_ATTRIBUTE, + GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE, + GEN_AI_RESPONSE_ID_ATTRIBUTE, + GEN_AI_RESPONSE_MODEL_ATTRIBUTE, + GEN_AI_SYSTEM_ATTRIBUTE, + GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE, + GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE, +} from '../../../../../packages/core/src/tracing/ai/gen-ai-attributes'; import { createRunner } from '../../../runner'; // These tests are not exhaustive because the instrumentation is @@ -17,16 +28,16 @@ it('traces a basic message creation request', async ({ signal }) => { expect.arrayContaining([ expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'messages', - 'sentry.op': 'gen_ai.messages', - 'sentry.origin': 'auto.ai.anthropic', - 'gen_ai.system': 'anthropic', - 'gen_ai.request.model': 'claude-3-haiku-20240307', - 'gen_ai.request.temperature': 0.7, - 'gen_ai.response.model': 'claude-3-haiku-20240307', - 'gen_ai.response.id': 'msg_mock123', - 'gen_ai.usage.input_tokens': 10, - 'gen_ai.usage.output_tokens': 15, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'messages', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.messages', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.anthropic', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'anthropic', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'claude-3-haiku-20240307', + [GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE]: 0.7, + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'claude-3-haiku-20240307', + [GEN_AI_RESPONSE_ID_ATTRIBUTE]: 'msg_mock123', + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 15, }), description: 'messages claude-3-haiku-20240307', op: 'gen_ai.messages', diff --git a/dev-packages/cloudflare-integration-tests/suites/tracing/google-genai/test.ts b/dev-packages/cloudflare-integration-tests/suites/tracing/google-genai/test.ts index 3c36e832a17a..4579baf7d5de 100644 --- a/dev-packages/cloudflare-integration-tests/suites/tracing/google-genai/test.ts +++ b/dev-packages/cloudflare-integration-tests/suites/tracing/google-genai/test.ts @@ -1,4 +1,16 @@ +import { SEMANTIC_ATTRIBUTE_SENTRY_OP, SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN } from '@sentry/core'; import { expect, it } from 'vitest'; +import { + GEN_AI_OPERATION_NAME_ATTRIBUTE, + GEN_AI_REQUEST_MAX_TOKENS_ATTRIBUTE, + GEN_AI_REQUEST_MODEL_ATTRIBUTE, + GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE, + GEN_AI_REQUEST_TOP_P_ATTRIBUTE, + GEN_AI_SYSTEM_ATTRIBUTE, + GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE, + GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE, + GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE, +} from '../../../../../packages/core/src/tracing/ai/gen-ai-attributes'; import { createRunner } from '../../../runner'; // These tests are not exhaustive because the instrumentation is @@ -18,14 +30,14 @@ it('traces Google GenAI chat creation and message sending', async () => { // First span - chats.create expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'chat', - 'sentry.op': 'gen_ai.chat', - 'sentry.origin': 'auto.ai.google_genai', - 'gen_ai.system': 'google_genai', - 'gen_ai.request.model': 'gemini-1.5-pro', - 'gen_ai.request.temperature': 0.8, - 'gen_ai.request.top_p': 0.9, - 'gen_ai.request.max_tokens': 150, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.google_genai', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'google_genai', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'gemini-1.5-pro', + [GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE]: 0.8, + [GEN_AI_REQUEST_TOP_P_ATTRIBUTE]: 0.9, + [GEN_AI_REQUEST_MAX_TOKENS_ATTRIBUTE]: 150, }), description: 'chat gemini-1.5-pro create', op: 'gen_ai.chat', @@ -34,14 +46,14 @@ it('traces Google GenAI chat creation and message sending', async () => { // Second span - chat.sendMessage expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'chat', - 'sentry.op': 'gen_ai.chat', - 'sentry.origin': 'auto.ai.google_genai', - 'gen_ai.system': 'google_genai', - 'gen_ai.request.model': 'gemini-1.5-pro', - 'gen_ai.usage.input_tokens': 8, - 'gen_ai.usage.output_tokens': 12, - 'gen_ai.usage.total_tokens': 20, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.google_genai', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'google_genai', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'gemini-1.5-pro', + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 8, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 12, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 20, }), description: 'chat gemini-1.5-pro', op: 'gen_ai.chat', @@ -50,17 +62,17 @@ it('traces Google GenAI chat creation and message sending', async () => { // Third span - models.generateContent expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'models', - 'sentry.op': 'gen_ai.models', - 'sentry.origin': 'auto.ai.google_genai', - 'gen_ai.system': 'google_genai', - 'gen_ai.request.model': 'gemini-1.5-flash', - 'gen_ai.request.temperature': 0.7, - 'gen_ai.request.top_p': 0.9, - 'gen_ai.request.max_tokens': 100, - 'gen_ai.usage.input_tokens': 8, - 'gen_ai.usage.output_tokens': 12, - 'gen_ai.usage.total_tokens': 20, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'models', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.models', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.google_genai', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'google_genai', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'gemini-1.5-flash', + [GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE]: 0.7, + [GEN_AI_REQUEST_TOP_P_ATTRIBUTE]: 0.9, + [GEN_AI_REQUEST_MAX_TOKENS_ATTRIBUTE]: 100, + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 8, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 12, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 20, }), description: 'models gemini-1.5-flash', op: 'gen_ai.models', diff --git a/dev-packages/cloudflare-integration-tests/suites/tracing/langchain/test.ts b/dev-packages/cloudflare-integration-tests/suites/tracing/langchain/test.ts index 875b4191b84b..d4abc4ae7220 100644 --- a/dev-packages/cloudflare-integration-tests/suites/tracing/langchain/test.ts +++ b/dev-packages/cloudflare-integration-tests/suites/tracing/langchain/test.ts @@ -1,4 +1,16 @@ +import { SEMANTIC_ATTRIBUTE_SENTRY_OP, SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN } from '@sentry/core'; import { expect, it } from 'vitest'; +import { + GEN_AI_OPERATION_NAME_ATTRIBUTE, + GEN_AI_REQUEST_MAX_TOKENS_ATTRIBUTE, + GEN_AI_REQUEST_MODEL_ATTRIBUTE, + GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE, + GEN_AI_SYSTEM_ATTRIBUTE, + GEN_AI_TOOL_NAME_ATTRIBUTE, + GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE, + GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE, + GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE, +} from '../../../../../packages/core/src/tracing/ai/gen-ai-attributes'; import { createRunner } from '../../../runner'; // These tests are not exhaustive because the instrumentation is @@ -18,16 +30,16 @@ it('traces langchain chat model, chain, and tool invocations', async ({ signal } // Chat model span expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'chat', - 'sentry.op': 'gen_ai.chat', - 'sentry.origin': 'auto.ai.langchain', - 'gen_ai.system': 'anthropic', - 'gen_ai.request.model': 'claude-3-5-sonnet-20241022', - 'gen_ai.request.temperature': 0.7, - 'gen_ai.request.max_tokens': 100, - 'gen_ai.usage.input_tokens': 10, - 'gen_ai.usage.output_tokens': 15, - 'gen_ai.usage.total_tokens': 25, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.langchain', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'anthropic', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'claude-3-5-sonnet-20241022', + [GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE]: 0.7, + [GEN_AI_REQUEST_MAX_TOKENS_ATTRIBUTE]: 100, + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 15, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 25, }), description: 'chat claude-3-5-sonnet-20241022', op: 'gen_ai.chat', @@ -36,8 +48,8 @@ it('traces langchain chat model, chain, and tool invocations', async ({ signal } // Chain span expect.objectContaining({ data: expect.objectContaining({ - 'sentry.origin': 'auto.ai.langchain', - 'sentry.op': 'gen_ai.invoke_agent', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.langchain', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.invoke_agent', 'langchain.chain.name': 'my_test_chain', }), description: 'chain my_test_chain', @@ -47,9 +59,9 @@ it('traces langchain chat model, chain, and tool invocations', async ({ signal } // Tool span expect.objectContaining({ data: expect.objectContaining({ - 'sentry.origin': 'auto.ai.langchain', - 'sentry.op': 'gen_ai.execute_tool', - 'gen_ai.tool.name': 'search_tool', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.langchain', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.execute_tool', + [GEN_AI_TOOL_NAME_ATTRIBUTE]: 'search_tool', }), description: 'execute_tool search_tool', op: 'gen_ai.execute_tool', diff --git a/dev-packages/cloudflare-integration-tests/suites/tracing/langgraph/test.ts b/dev-packages/cloudflare-integration-tests/suites/tracing/langgraph/test.ts index 33023b30fa55..da9635952632 100644 --- a/dev-packages/cloudflare-integration-tests/suites/tracing/langgraph/test.ts +++ b/dev-packages/cloudflare-integration-tests/suites/tracing/langgraph/test.ts @@ -1,4 +1,16 @@ +import { SEMANTIC_ATTRIBUTE_SENTRY_OP, SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN } from '@sentry/core'; import { expect, it } from 'vitest'; +import { + GEN_AI_AGENT_NAME_ATTRIBUTE, + GEN_AI_OPERATION_NAME_ATTRIBUTE, + GEN_AI_PIPELINE_NAME_ATTRIBUTE, + GEN_AI_REQUEST_AVAILABLE_TOOLS_ATTRIBUTE, + GEN_AI_REQUEST_MESSAGES_ATTRIBUTE, + GEN_AI_RESPONSE_MODEL_ATTRIBUTE, + GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE, + GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE, + GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE, +} from '../../../../../packages/core/src/tracing/ai/gen-ai-attributes'; import { createRunner } from '../../../runner'; // These tests are not exhaustive because the instrumentation is @@ -18,10 +30,10 @@ it('traces langgraph compile and invoke operations', async ({ signal }) => { const createAgentSpan = transactionEvent.spans.find((span: any) => span.op === 'gen_ai.create_agent'); expect(createAgentSpan).toMatchObject({ data: { - 'gen_ai.operation.name': 'create_agent', - 'sentry.op': 'gen_ai.create_agent', - 'sentry.origin': 'auto.ai.langgraph', - 'gen_ai.agent.name': 'weather_assistant', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'create_agent', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.create_agent', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.langgraph', + [GEN_AI_AGENT_NAME_ATTRIBUTE]: 'weather_assistant', }, description: 'create_agent weather_assistant', op: 'gen_ai.create_agent', @@ -32,16 +44,16 @@ it('traces langgraph compile and invoke operations', async ({ signal }) => { const invokeAgentSpan = transactionEvent.spans.find((span: any) => span.op === 'gen_ai.invoke_agent'); expect(invokeAgentSpan).toMatchObject({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'invoke_agent', - 'sentry.op': 'gen_ai.invoke_agent', - 'sentry.origin': 'auto.ai.langgraph', - 'gen_ai.agent.name': 'weather_assistant', - 'gen_ai.pipeline.name': 'weather_assistant', - 'gen_ai.request.messages': '[{"role":"user","content":"What is the weather in SF?"}]', - 'gen_ai.response.model': 'mock-model', - 'gen_ai.usage.input_tokens': 20, - 'gen_ai.usage.output_tokens': 10, - 'gen_ai.usage.total_tokens': 30, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'invoke_agent', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.invoke_agent', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.langgraph', + [GEN_AI_AGENT_NAME_ATTRIBUTE]: 'weather_assistant', + [GEN_AI_PIPELINE_NAME_ATTRIBUTE]: 'weather_assistant', + [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: '[{"role":"user","content":"What is the weather in SF?"}]', + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'mock-model', + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 20, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 10, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 30, }), description: 'invoke_agent weather_assistant', op: 'gen_ai.invoke_agent', @@ -49,8 +61,8 @@ it('traces langgraph compile and invoke operations', async ({ signal }) => { }); // Verify tools are captured - if (invokeAgentSpan.data['gen_ai.request.available_tools']) { - expect(invokeAgentSpan.data['gen_ai.request.available_tools']).toMatch(/get_weather/); + if (invokeAgentSpan.data[GEN_AI_REQUEST_AVAILABLE_TOOLS_ATTRIBUTE]) { + expect(invokeAgentSpan.data[GEN_AI_REQUEST_AVAILABLE_TOOLS_ATTRIBUTE]).toMatch(/get_weather/); } }) .start(signal); diff --git a/dev-packages/cloudflare-integration-tests/suites/tracing/openai/test.ts b/dev-packages/cloudflare-integration-tests/suites/tracing/openai/test.ts index eb15fd80fc97..1c057e1a986c 100644 --- a/dev-packages/cloudflare-integration-tests/suites/tracing/openai/test.ts +++ b/dev-packages/cloudflare-integration-tests/suites/tracing/openai/test.ts @@ -1,4 +1,17 @@ +import { SEMANTIC_ATTRIBUTE_SENTRY_OP, SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN } from '@sentry/core'; import { expect, it } from 'vitest'; +import { + GEN_AI_OPERATION_NAME_ATTRIBUTE, + GEN_AI_REQUEST_MODEL_ATTRIBUTE, + GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE, + GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE, + GEN_AI_RESPONSE_ID_ATTRIBUTE, + GEN_AI_RESPONSE_MODEL_ATTRIBUTE, + GEN_AI_SYSTEM_ATTRIBUTE, + GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE, + GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE, + GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE, +} from '../../../../../packages/core/src/tracing/ai/gen-ai-attributes'; import { createRunner } from '../../../runner'; // These tests are not exhaustive because the instrumentation is @@ -17,18 +30,18 @@ it('traces a basic chat completion request', async ({ signal }) => { expect.arrayContaining([ expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'chat', - 'sentry.op': 'gen_ai.chat', - 'sentry.origin': 'auto.ai.openai', - 'gen_ai.system': 'openai', - 'gen_ai.request.model': 'gpt-3.5-turbo', - 'gen_ai.request.temperature': 0.7, - 'gen_ai.response.model': 'gpt-3.5-turbo', - 'gen_ai.response.id': 'chatcmpl-mock123', - 'gen_ai.usage.input_tokens': 10, - 'gen_ai.usage.output_tokens': 15, - 'gen_ai.usage.total_tokens': 25, - 'gen_ai.response.finish_reasons': '["stop"]', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.openai', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'openai', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'gpt-3.5-turbo', + [GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE]: 0.7, + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'gpt-3.5-turbo', + [GEN_AI_RESPONSE_ID_ATTRIBUTE]: 'chatcmpl-mock123', + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 15, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 25, + [GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]: '["stop"]', }), description: 'chat gpt-3.5-turbo', op: 'gen_ai.chat', diff --git a/dev-packages/node-integration-tests/suites/tracing/anthropic/test.ts b/dev-packages/node-integration-tests/suites/tracing/anthropic/test.ts index a70e51858113..1a20a3c3dc7b 100644 --- a/dev-packages/node-integration-tests/suites/tracing/anthropic/test.ts +++ b/dev-packages/node-integration-tests/suites/tracing/anthropic/test.ts @@ -1,4 +1,25 @@ +import { SEMANTIC_ATTRIBUTE_SENTRY_OP, SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN } from '@sentry/core'; import { afterAll, describe, expect } from 'vitest'; +import { + ANTHROPIC_AI_RESPONSE_TIMESTAMP_ATTRIBUTE, + GEN_AI_OPERATION_NAME_ATTRIBUTE, + GEN_AI_REQUEST_AVAILABLE_TOOLS_ATTRIBUTE, + GEN_AI_REQUEST_MAX_TOKENS_ATTRIBUTE, + GEN_AI_REQUEST_MESSAGES_ATTRIBUTE, + GEN_AI_REQUEST_MODEL_ATTRIBUTE, + GEN_AI_REQUEST_STREAM_ATTRIBUTE, + GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE, + GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE, + GEN_AI_RESPONSE_ID_ATTRIBUTE, + GEN_AI_RESPONSE_MODEL_ATTRIBUTE, + GEN_AI_RESPONSE_STREAMING_ATTRIBUTE, + GEN_AI_RESPONSE_TEXT_ATTRIBUTE, + GEN_AI_RESPONSE_TOOL_CALLS_ATTRIBUTE, + GEN_AI_SYSTEM_ATTRIBUTE, + GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE, + GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE, + GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE, +} from '../../../../../packages/core/src/tracing/ai/gen-ai-attributes'; import { cleanupChildProcesses, createEsmAndCjsTests } from '../../../utils/runner'; describe('Anthropic integration', () => { @@ -12,18 +33,18 @@ describe('Anthropic integration', () => { // First span - basic message completion without PII expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'messages', - 'sentry.op': 'gen_ai.messages', - 'sentry.origin': 'auto.ai.anthropic', - 'gen_ai.system': 'anthropic', - 'gen_ai.request.model': 'claude-3-haiku-20240307', - 'gen_ai.request.temperature': 0.7, - 'gen_ai.request.max_tokens': 100, - 'gen_ai.response.model': 'claude-3-haiku-20240307', - 'gen_ai.response.id': 'msg_mock123', - 'gen_ai.usage.input_tokens': 10, - 'gen_ai.usage.output_tokens': 15, - 'gen_ai.usage.total_tokens': 25, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'messages', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.messages', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.anthropic', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'anthropic', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'claude-3-haiku-20240307', + [GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE]: 0.7, + [GEN_AI_REQUEST_MAX_TOKENS_ATTRIBUTE]: 100, + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'claude-3-haiku-20240307', + [GEN_AI_RESPONSE_ID_ATTRIBUTE]: 'msg_mock123', + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 15, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 25, }), description: 'messages claude-3-haiku-20240307', op: 'gen_ai.messages', @@ -33,11 +54,11 @@ describe('Anthropic integration', () => { // Second span - error handling expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'messages', - 'sentry.op': 'gen_ai.messages', - 'sentry.origin': 'auto.ai.anthropic', - 'gen_ai.system': 'anthropic', - 'gen_ai.request.model': 'error-model', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'messages', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.messages', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.anthropic', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'anthropic', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'error-model', }), description: 'messages error-model', op: 'gen_ai.messages', @@ -47,11 +68,11 @@ describe('Anthropic integration', () => { // Third span - token counting (no response.text because recordOutputs=false by default) expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'messages', - 'sentry.op': 'gen_ai.messages', - 'sentry.origin': 'auto.ai.anthropic', - 'gen_ai.system': 'anthropic', - 'gen_ai.request.model': 'claude-3-haiku-20240307', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'messages', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.messages', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.anthropic', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'anthropic', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'claude-3-haiku-20240307', }), description: 'messages claude-3-haiku-20240307', op: 'gen_ai.messages', @@ -61,14 +82,14 @@ describe('Anthropic integration', () => { // Fourth span - models.retrieve expect.objectContaining({ data: expect.objectContaining({ - 'anthropic.response.timestamp': '2024-05-08T05:20:00.000Z', - 'gen_ai.operation.name': 'models', - 'sentry.op': 'gen_ai.models', - 'sentry.origin': 'auto.ai.anthropic', - 'gen_ai.system': 'anthropic', - 'gen_ai.request.model': 'claude-3-haiku-20240307', - 'gen_ai.response.id': 'claude-3-haiku-20240307', - 'gen_ai.response.model': 'claude-3-haiku-20240307', + [ANTHROPIC_AI_RESPONSE_TIMESTAMP_ATTRIBUTE]: '2024-05-08T05:20:00.000Z', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'models', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.models', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.anthropic', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'anthropic', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'claude-3-haiku-20240307', + [GEN_AI_RESPONSE_ID_ATTRIBUTE]: 'claude-3-haiku-20240307', + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'claude-3-haiku-20240307', }), description: 'models claude-3-haiku-20240307', op: 'gen_ai.models', @@ -84,20 +105,20 @@ describe('Anthropic integration', () => { // First span - basic message completion with PII expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'messages', - 'gen_ai.request.max_tokens': 100, - 'gen_ai.request.messages': '[{"role":"user","content":"What is the capital of France?"}]', - 'gen_ai.request.model': 'claude-3-haiku-20240307', - 'gen_ai.request.temperature': 0.7, - 'gen_ai.response.id': 'msg_mock123', - 'gen_ai.response.model': 'claude-3-haiku-20240307', - 'gen_ai.response.text': 'Hello from Anthropic mock!', - 'gen_ai.system': 'anthropic', - 'gen_ai.usage.input_tokens': 10, - 'gen_ai.usage.output_tokens': 15, - 'gen_ai.usage.total_tokens': 25, - 'sentry.op': 'gen_ai.messages', - 'sentry.origin': 'auto.ai.anthropic', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'messages', + [GEN_AI_REQUEST_MAX_TOKENS_ATTRIBUTE]: 100, + [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: '[{"role":"user","content":"What is the capital of France?"}]', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'claude-3-haiku-20240307', + [GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE]: 0.7, + [GEN_AI_RESPONSE_ID_ATTRIBUTE]: 'msg_mock123', + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'claude-3-haiku-20240307', + [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: 'Hello from Anthropic mock!', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'anthropic', + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 15, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 25, + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.messages', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.anthropic', }), description: 'messages claude-3-haiku-20240307', op: 'gen_ai.messages', @@ -111,8 +132,8 @@ describe('Anthropic integration', () => { 'http.response.header.content-length': 247, 'http.response.status_code': 200, 'otel.kind': 'CLIENT', - 'sentry.op': 'http.client', - 'sentry.origin': 'auto.http.otel.node_fetch', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'http.client', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.http.otel.node_fetch', 'url.path': '/anthropic/v1/messages', 'url.query': '', 'url.scheme': 'http', @@ -125,12 +146,12 @@ describe('Anthropic integration', () => { // Second - error handling with PII expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'messages', - 'gen_ai.request.messages': '[{"role":"user","content":"This will fail"}]', - 'gen_ai.request.model': 'error-model', - 'gen_ai.system': 'anthropic', - 'sentry.op': 'gen_ai.messages', - 'sentry.origin': 'auto.ai.anthropic', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'messages', + [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: '[{"role":"user","content":"This will fail"}]', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'error-model', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'anthropic', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.messages', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.anthropic', }), description: 'messages error-model', op: 'gen_ai.messages', @@ -144,8 +165,8 @@ describe('Anthropic integration', () => { 'http.response.header.content-length': 15, 'http.response.status_code': 404, 'otel.kind': 'CLIENT', - 'sentry.op': 'http.client', - 'sentry.origin': 'auto.http.otel.node_fetch', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'http.client', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.http.otel.node_fetch', 'url.path': '/anthropic/v1/messages', 'url.query': '', 'url.scheme': 'http', @@ -158,13 +179,13 @@ describe('Anthropic integration', () => { // Third - token counting with PII (response.text is present because sendDefaultPii=true enables recordOutputs) expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'messages', - 'gen_ai.request.messages': '[{"role":"user","content":"What is the capital of France?"}]', - 'gen_ai.request.model': 'claude-3-haiku-20240307', - 'gen_ai.response.text': '15', - 'gen_ai.system': 'anthropic', - 'sentry.op': 'gen_ai.messages', - 'sentry.origin': 'auto.ai.anthropic', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'messages', + [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: '[{"role":"user","content":"What is the capital of France?"}]', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'claude-3-haiku-20240307', + [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: '15', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'anthropic', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.messages', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.anthropic', }), description: 'messages claude-3-haiku-20240307', op: 'gen_ai.messages', @@ -178,8 +199,8 @@ describe('Anthropic integration', () => { 'http.response.header.content-length': 19, 'http.response.status_code': 200, 'otel.kind': 'CLIENT', - 'sentry.op': 'http.client', - 'sentry.origin': 'auto.http.otel.node_fetch', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'http.client', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.http.otel.node_fetch', 'url.path': '/anthropic/v1/messages/count_tokens', 'url.query': '', 'url.scheme': 'http', @@ -192,14 +213,14 @@ describe('Anthropic integration', () => { // Fourth - models.retrieve with PII expect.objectContaining({ data: expect.objectContaining({ - 'anthropic.response.timestamp': '2024-05-08T05:20:00.000Z', - 'gen_ai.operation.name': 'models', - 'gen_ai.request.model': 'claude-3-haiku-20240307', - 'gen_ai.response.id': 'claude-3-haiku-20240307', - 'gen_ai.response.model': 'claude-3-haiku-20240307', - 'gen_ai.system': 'anthropic', - 'sentry.op': 'gen_ai.models', - 'sentry.origin': 'auto.ai.anthropic', + [ANTHROPIC_AI_RESPONSE_TIMESTAMP_ATTRIBUTE]: '2024-05-08T05:20:00.000Z', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'models', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'claude-3-haiku-20240307', + [GEN_AI_RESPONSE_ID_ATTRIBUTE]: 'claude-3-haiku-20240307', + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'claude-3-haiku-20240307', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'anthropic', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.models', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.anthropic', }), description: 'models claude-3-haiku-20240307', op: 'gen_ai.models', @@ -213,8 +234,8 @@ describe('Anthropic integration', () => { 'http.response.header.content-length': 123, 'http.response.status_code': 200, 'otel.kind': 'CLIENT', - 'sentry.op': 'http.client', - 'sentry.origin': 'auto.http.otel.node_fetch', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'http.client', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.http.otel.node_fetch', 'url.path': '/anthropic/v1/models/claude-3-haiku-20240307', 'url.query': '', 'url.scheme': 'http', @@ -228,20 +249,20 @@ describe('Anthropic integration', () => { // Fifth - messages.create with stream: true expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'messages', - 'gen_ai.request.messages': '[{"role":"user","content":"What is the capital of France?"}]', - 'gen_ai.request.model': 'claude-3-haiku-20240307', - 'gen_ai.request.stream': true, - 'gen_ai.response.id': 'msg_stream123', - 'gen_ai.response.model': 'claude-3-haiku-20240307', - 'gen_ai.response.streaming': true, - 'gen_ai.response.text': 'Hello from stream!', - 'gen_ai.system': 'anthropic', - 'gen_ai.usage.input_tokens': 10, - 'gen_ai.usage.output_tokens': 15, - 'gen_ai.usage.total_tokens': 25, - 'sentry.op': 'gen_ai.messages', - 'sentry.origin': 'auto.ai.anthropic', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'messages', + [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: '[{"role":"user","content":"What is the capital of France?"}]', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'claude-3-haiku-20240307', + [GEN_AI_REQUEST_STREAM_ATTRIBUTE]: true, + [GEN_AI_RESPONSE_ID_ATTRIBUTE]: 'msg_stream123', + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'claude-3-haiku-20240307', + [GEN_AI_RESPONSE_STREAMING_ATTRIBUTE]: true, + [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: 'Hello from stream!', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'anthropic', + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 15, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 25, + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.messages', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.anthropic', }), description: 'messages claude-3-haiku-20240307 stream-response', op: 'gen_ai.messages', @@ -254,8 +275,8 @@ describe('Anthropic integration', () => { 'http.request.method_original': 'POST', 'http.response.status_code': 200, 'otel.kind': 'CLIENT', - 'sentry.op': 'http.client', - 'sentry.origin': 'auto.http.otel.node_fetch', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'http.client', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.http.otel.node_fetch', 'url.path': '/anthropic/v1/messages', 'url.query': '', 'url.scheme': 'http', @@ -269,9 +290,9 @@ describe('Anthropic integration', () => { // Sixth - messages.stream expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'messages', - 'gen_ai.request.model': 'claude-3-haiku-20240307', - 'gen_ai.request.stream': true, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'messages', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'claude-3-haiku-20240307', + [GEN_AI_REQUEST_STREAM_ATTRIBUTE]: true, }), description: 'messages claude-3-haiku-20240307 stream-response', op: 'gen_ai.messages', @@ -287,27 +308,27 @@ describe('Anthropic integration', () => { // Check that custom options are respected expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.request.messages': expect.any(String), // Should include messages when recordInputs: true - 'gen_ai.response.text': expect.any(String), // Should include response text when recordOutputs: true + [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: expect.any(String), // Should include messages when recordInputs: true + [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: expect.any(String), // Should include response text when recordOutputs: true }), }), // Check token counting with options expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'messages', - 'gen_ai.request.messages': expect.any(String), // Should include messages when recordInputs: true - 'gen_ai.response.text': '15', // Present because recordOutputs=true is set in options + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'messages', + [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: expect.any(String), // Should include messages when recordInputs: true + [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: '15', // Present because recordOutputs=true is set in options }), op: 'gen_ai.messages', }), // Check models.retrieve with options expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'models', - 'gen_ai.system': 'anthropic', - 'gen_ai.request.model': 'claude-3-haiku-20240307', - 'gen_ai.response.id': 'claude-3-haiku-20240307', - 'gen_ai.response.model': 'claude-3-haiku-20240307', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'models', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'anthropic', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'claude-3-haiku-20240307', + [GEN_AI_RESPONSE_ID_ATTRIBUTE]: 'claude-3-haiku-20240307', + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'claude-3-haiku-20240307', }), op: 'gen_ai.models', description: 'models claude-3-haiku-20240307', @@ -381,17 +402,17 @@ describe('Anthropic integration', () => { description: 'messages claude-3-haiku-20240307 stream-response', op: 'gen_ai.messages', data: expect.objectContaining({ - 'gen_ai.system': 'anthropic', - 'gen_ai.operation.name': 'messages', - 'gen_ai.request.model': 'claude-3-haiku-20240307', - 'gen_ai.request.stream': true, - 'gen_ai.response.streaming': true, - 'gen_ai.response.model': 'claude-3-haiku-20240307', - 'gen_ai.response.id': 'msg_stream_1', - 'gen_ai.usage.input_tokens': 10, - 'gen_ai.usage.output_tokens': 15, - 'gen_ai.usage.total_tokens': 25, - 'gen_ai.response.finish_reasons': '["end_turn"]', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'anthropic', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'messages', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'claude-3-haiku-20240307', + [GEN_AI_REQUEST_STREAM_ATTRIBUTE]: true, + [GEN_AI_RESPONSE_STREAMING_ATTRIBUTE]: true, + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'claude-3-haiku-20240307', + [GEN_AI_RESPONSE_ID_ATTRIBUTE]: 'msg_stream_1', + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 15, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 25, + [GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]: '["end_turn"]', }), }), // messages.stream @@ -399,15 +420,15 @@ describe('Anthropic integration', () => { description: 'messages claude-3-haiku-20240307 stream-response', op: 'gen_ai.messages', data: expect.objectContaining({ - 'gen_ai.system': 'anthropic', - 'gen_ai.operation.name': 'messages', - 'gen_ai.request.model': 'claude-3-haiku-20240307', - 'gen_ai.response.streaming': true, - 'gen_ai.response.model': 'claude-3-haiku-20240307', - 'gen_ai.response.id': 'msg_stream_1', - 'gen_ai.usage.input_tokens': 10, - 'gen_ai.usage.output_tokens': 15, - 'gen_ai.usage.total_tokens': 25, + [GEN_AI_SYSTEM_ATTRIBUTE]: 'anthropic', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'messages', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'claude-3-haiku-20240307', + [GEN_AI_RESPONSE_STREAMING_ATTRIBUTE]: true, + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'claude-3-haiku-20240307', + [GEN_AI_RESPONSE_ID_ATTRIBUTE]: 'msg_stream_1', + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 15, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 25, }), }), // messages.stream with redundant stream: true param @@ -415,16 +436,16 @@ describe('Anthropic integration', () => { description: 'messages claude-3-haiku-20240307 stream-response', op: 'gen_ai.messages', data: expect.objectContaining({ - 'gen_ai.system': 'anthropic', - 'gen_ai.operation.name': 'messages', - 'gen_ai.request.model': 'claude-3-haiku-20240307', - 'gen_ai.request.stream': true, - 'gen_ai.response.streaming': true, - 'gen_ai.response.model': 'claude-3-haiku-20240307', - 'gen_ai.response.id': 'msg_stream_1', - 'gen_ai.usage.input_tokens': 10, - 'gen_ai.usage.output_tokens': 15, - 'gen_ai.usage.total_tokens': 25, + [GEN_AI_SYSTEM_ATTRIBUTE]: 'anthropic', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'messages', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'claude-3-haiku-20240307', + [GEN_AI_REQUEST_STREAM_ATTRIBUTE]: true, + [GEN_AI_RESPONSE_STREAMING_ATTRIBUTE]: true, + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'claude-3-haiku-20240307', + [GEN_AI_RESPONSE_ID_ATTRIBUTE]: 'msg_stream_1', + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 15, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 25, }), }), ]), @@ -437,25 +458,25 @@ describe('Anthropic integration', () => { description: 'messages claude-3-haiku-20240307 stream-response', op: 'gen_ai.messages', data: expect.objectContaining({ - 'gen_ai.response.streaming': true, + [GEN_AI_RESPONSE_STREAMING_ATTRIBUTE]: true, // streamed text concatenated - 'gen_ai.response.text': 'Hello from stream!', + [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: 'Hello from stream!', }), }), expect.objectContaining({ description: 'messages claude-3-haiku-20240307 stream-response', op: 'gen_ai.messages', data: expect.objectContaining({ - 'gen_ai.response.streaming': true, - 'gen_ai.response.text': 'Hello from stream!', + [GEN_AI_RESPONSE_STREAMING_ATTRIBUTE]: true, + [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: 'Hello from stream!', }), }), expect.objectContaining({ description: 'messages claude-3-haiku-20240307 stream-response', op: 'gen_ai.messages', data: expect.objectContaining({ - 'gen_ai.response.streaming': true, - 'gen_ai.response.text': 'Hello from stream!', + [GEN_AI_RESPONSE_STREAMING_ATTRIBUTE]: true, + [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: 'Hello from stream!', }), }), ]), @@ -488,8 +509,8 @@ describe('Anthropic integration', () => { expect.objectContaining({ op: 'gen_ai.messages', data: expect.objectContaining({ - 'gen_ai.request.available_tools': EXPECTED_TOOLS_JSON, - 'gen_ai.response.tool_calls': EXPECTED_TOOL_CALLS_JSON, + [GEN_AI_REQUEST_AVAILABLE_TOOLS_ATTRIBUTE]: EXPECTED_TOOLS_JSON, + [GEN_AI_RESPONSE_TOOL_CALLS_ATTRIBUTE]: EXPECTED_TOOL_CALLS_JSON, }), }), ]), @@ -516,8 +537,8 @@ describe('Anthropic integration', () => { description: expect.stringContaining('stream-response'), op: 'gen_ai.messages', data: expect.objectContaining({ - 'gen_ai.request.available_tools': EXPECTED_TOOLS_JSON, - 'gen_ai.response.tool_calls': EXPECTED_TOOL_CALLS_JSON, + [GEN_AI_REQUEST_AVAILABLE_TOOLS_ATTRIBUTE]: EXPECTED_TOOLS_JSON, + [GEN_AI_RESPONSE_TOOL_CALLS_ATTRIBUTE]: EXPECTED_TOOL_CALLS_JSON, }), }), ]), @@ -538,8 +559,8 @@ describe('Anthropic integration', () => { op: 'gen_ai.messages', status: 'internal_error', // Actual status coming from the instrumentation data: expect.objectContaining({ - 'gen_ai.request.model': 'error-stream-init', - 'gen_ai.request.stream': true, + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'error-stream-init', + [GEN_AI_REQUEST_STREAM_ATTRIBUTE]: true, }), }), // Error with messages.stream on stream initialization @@ -548,7 +569,7 @@ describe('Anthropic integration', () => { op: 'gen_ai.messages', status: 'internal_error', // Actual status coming from the instrumentation data: expect.objectContaining({ - 'gen_ai.request.model': 'error-stream-init', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'error-stream-init', }), }), // Error midway with messages.create on streaming - note: The stream is started successfully @@ -558,10 +579,10 @@ describe('Anthropic integration', () => { op: 'gen_ai.messages', status: 'ok', data: expect.objectContaining({ - 'gen_ai.request.model': 'error-stream-midway', - 'gen_ai.request.stream': true, - 'gen_ai.response.streaming': true, - 'gen_ai.response.text': 'This stream will ', // We received some data before error + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'error-stream-midway', + [GEN_AI_REQUEST_STREAM_ATTRIBUTE]: true, + [GEN_AI_RESPONSE_STREAMING_ATTRIBUTE]: true, + [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: 'This stream will ', // We received some data before error }), }), // Error midway with messages.stream - same behavior, we get a span with the streamed data @@ -570,9 +591,9 @@ describe('Anthropic integration', () => { op: 'gen_ai.messages', status: 'ok', data: expect.objectContaining({ - 'gen_ai.request.model': 'error-stream-midway', - 'gen_ai.response.streaming': true, - 'gen_ai.response.text': 'This stream will ', // We received some data before error + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'error-stream-midway', + [GEN_AI_RESPONSE_STREAMING_ATTRIBUTE]: true, + [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: 'This stream will ', // We received some data before error }), }), ]), @@ -594,7 +615,7 @@ describe('Anthropic integration', () => { op: 'gen_ai.messages', status: 'internal_error', data: expect.objectContaining({ - 'gen_ai.request.model': 'invalid-format', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'invalid-format', }), }), // Model retrieval error @@ -603,7 +624,7 @@ describe('Anthropic integration', () => { op: 'gen_ai.models', status: 'internal_error', data: expect.objectContaining({ - 'gen_ai.request.model': 'nonexistent-model', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'nonexistent-model', }), }), // Successful tool usage (for comparison) @@ -612,8 +633,8 @@ describe('Anthropic integration', () => { op: 'gen_ai.messages', status: 'ok', data: expect.objectContaining({ - 'gen_ai.request.model': 'claude-3-haiku-20240307', - 'gen_ai.response.tool_calls': expect.stringContaining('tool_ok_1'), + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'claude-3-haiku-20240307', + [GEN_AI_RESPONSE_TOOL_CALLS_ATTRIBUTE]: expect.stringContaining('tool_ok_1'), }), }), ]), @@ -640,13 +661,15 @@ describe('Anthropic integration', () => { // First call: Last message is large and gets truncated (only C's remain, D's are cropped) expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'messages', - 'sentry.op': 'gen_ai.messages', - 'sentry.origin': 'auto.ai.anthropic', - 'gen_ai.system': 'anthropic', - 'gen_ai.request.model': 'claude-3-haiku-20240307', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'messages', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.messages', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.anthropic', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'anthropic', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'claude-3-haiku-20240307', // Messages should be present (truncation happened) and should be a JSON array - 'gen_ai.request.messages': expect.stringMatching(/^\[\{"role":"user","content":"C+"\}\]$/), + [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: expect.stringMatching( + /^\[\{"role":"user","content":"C+"\}\]$/, + ), }), description: 'messages claude-3-haiku-20240307', op: 'gen_ai.messages', @@ -656,13 +679,13 @@ describe('Anthropic integration', () => { // Second call: Last message is small and kept without truncation expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'messages', - 'sentry.op': 'gen_ai.messages', - 'sentry.origin': 'auto.ai.anthropic', - 'gen_ai.system': 'anthropic', - 'gen_ai.request.model': 'claude-3-haiku-20240307', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'messages', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.messages', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.anthropic', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'anthropic', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'claude-3-haiku-20240307', // Small message should be kept intact - 'gen_ai.request.messages': JSON.stringify([ + [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: JSON.stringify([ { role: 'user', content: 'This is a small message that fits within the limit' }, ]), }), @@ -690,13 +713,13 @@ describe('Anthropic integration', () => { spans: expect.arrayContaining([ expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'messages', - 'sentry.op': 'gen_ai.messages', - 'sentry.origin': 'auto.ai.anthropic', - 'gen_ai.system': 'anthropic', - 'gen_ai.request.model': 'claude-3-haiku-20240307', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'messages', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.messages', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.anthropic', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'anthropic', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'claude-3-haiku-20240307', // Only the last message (with filtered media) should be kept - 'gen_ai.request.messages': JSON.stringify([ + [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: JSON.stringify([ { role: 'user', content: [ diff --git a/dev-packages/node-integration-tests/suites/tracing/google-genai/test.ts b/dev-packages/node-integration-tests/suites/tracing/google-genai/test.ts index d6ff72cde6d8..b61aea2ac7b8 100644 --- a/dev-packages/node-integration-tests/suites/tracing/google-genai/test.ts +++ b/dev-packages/node-integration-tests/suites/tracing/google-genai/test.ts @@ -1,4 +1,24 @@ +import { SEMANTIC_ATTRIBUTE_SENTRY_OP, SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN } from '@sentry/core'; import { afterAll, describe, expect } from 'vitest'; +import { + GEN_AI_OPERATION_NAME_ATTRIBUTE, + GEN_AI_REQUEST_AVAILABLE_TOOLS_ATTRIBUTE, + GEN_AI_REQUEST_MAX_TOKENS_ATTRIBUTE, + GEN_AI_REQUEST_MESSAGES_ATTRIBUTE, + GEN_AI_REQUEST_MODEL_ATTRIBUTE, + GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE, + GEN_AI_REQUEST_TOP_P_ATTRIBUTE, + GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE, + GEN_AI_RESPONSE_ID_ATTRIBUTE, + GEN_AI_RESPONSE_MODEL_ATTRIBUTE, + GEN_AI_RESPONSE_STREAMING_ATTRIBUTE, + GEN_AI_RESPONSE_TEXT_ATTRIBUTE, + GEN_AI_RESPONSE_TOOL_CALLS_ATTRIBUTE, + GEN_AI_SYSTEM_ATTRIBUTE, + GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE, + GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE, + GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE, +} from '../../../../../packages/core/src/tracing/ai/gen-ai-attributes'; import { cleanupChildProcesses, createEsmAndCjsTests } from '../../../utils/runner'; describe('Google GenAI integration', () => { @@ -12,14 +32,14 @@ describe('Google GenAI integration', () => { // First span - chats.create expect.objectContaining({ data: { - 'gen_ai.operation.name': 'chat', - 'sentry.op': 'gen_ai.chat', - 'sentry.origin': 'auto.ai.google_genai', - 'gen_ai.system': 'google_genai', - 'gen_ai.request.model': 'gemini-1.5-pro', - 'gen_ai.request.temperature': 0.8, - 'gen_ai.request.top_p': 0.9, - 'gen_ai.request.max_tokens': 150, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.google_genai', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'google_genai', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'gemini-1.5-pro', + [GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE]: 0.8, + [GEN_AI_REQUEST_TOP_P_ATTRIBUTE]: 0.9, + [GEN_AI_REQUEST_MAX_TOKENS_ATTRIBUTE]: 150, }, description: 'chat gemini-1.5-pro create', op: 'gen_ai.chat', @@ -29,14 +49,14 @@ describe('Google GenAI integration', () => { // Second span - chat.sendMessage (should get model from context) expect.objectContaining({ data: { - 'gen_ai.operation.name': 'chat', - 'sentry.op': 'gen_ai.chat', - 'sentry.origin': 'auto.ai.google_genai', - 'gen_ai.system': 'google_genai', - 'gen_ai.request.model': 'gemini-1.5-pro', // Should get from chat context - 'gen_ai.usage.input_tokens': 8, - 'gen_ai.usage.output_tokens': 12, - 'gen_ai.usage.total_tokens': 20, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.google_genai', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'google_genai', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'gemini-1.5-pro', // Should get from chat context + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 8, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 12, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 20, }, description: 'chat gemini-1.5-pro', op: 'gen_ai.chat', @@ -46,17 +66,17 @@ describe('Google GenAI integration', () => { // Third span - models.generateContent expect.objectContaining({ data: { - 'gen_ai.operation.name': 'models', - 'sentry.op': 'gen_ai.models', - 'sentry.origin': 'auto.ai.google_genai', - 'gen_ai.system': 'google_genai', - 'gen_ai.request.model': 'gemini-1.5-flash', - 'gen_ai.request.temperature': 0.7, - 'gen_ai.request.top_p': 0.9, - 'gen_ai.request.max_tokens': 100, - 'gen_ai.usage.input_tokens': 8, - 'gen_ai.usage.output_tokens': 12, - 'gen_ai.usage.total_tokens': 20, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'models', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.models', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.google_genai', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'google_genai', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'gemini-1.5-flash', + [GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE]: 0.7, + [GEN_AI_REQUEST_TOP_P_ATTRIBUTE]: 0.9, + [GEN_AI_REQUEST_MAX_TOKENS_ATTRIBUTE]: 100, + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 8, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 12, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 20, }, description: 'models gemini-1.5-flash', op: 'gen_ai.models', @@ -66,11 +86,11 @@ describe('Google GenAI integration', () => { // Fourth span - error handling expect.objectContaining({ data: { - 'gen_ai.operation.name': 'models', - 'sentry.op': 'gen_ai.models', - 'sentry.origin': 'auto.ai.google_genai', - 'gen_ai.system': 'google_genai', - 'gen_ai.request.model': 'error-model', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'models', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.models', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.google_genai', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'google_genai', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'error-model', }, description: 'models error-model', op: 'gen_ai.models', @@ -86,15 +106,15 @@ describe('Google GenAI integration', () => { // First span - chats.create with PII expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'chat', - 'sentry.op': 'gen_ai.chat', - 'sentry.origin': 'auto.ai.google_genai', - 'gen_ai.system': 'google_genai', - 'gen_ai.request.model': 'gemini-1.5-pro', - 'gen_ai.request.temperature': 0.8, - 'gen_ai.request.top_p': 0.9, - 'gen_ai.request.max_tokens': 150, - 'gen_ai.request.messages': '[{"role":"user","parts":[{"text":"Hello, how are you?"}]}]', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.google_genai', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'google_genai', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'gemini-1.5-pro', + [GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE]: 0.8, + [GEN_AI_REQUEST_TOP_P_ATTRIBUTE]: 0.9, + [GEN_AI_REQUEST_MAX_TOKENS_ATTRIBUTE]: 150, + [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: '[{"role":"user","parts":[{"text":"Hello, how are you?"}]}]', }), description: 'chat gemini-1.5-pro create', op: 'gen_ai.chat', @@ -104,16 +124,16 @@ describe('Google GenAI integration', () => { // Second span - chat.sendMessage with PII expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'chat', - 'sentry.op': 'gen_ai.chat', - 'sentry.origin': 'auto.ai.google_genai', - 'gen_ai.system': 'google_genai', - 'gen_ai.request.model': 'gemini-1.5-pro', - 'gen_ai.request.messages': expect.any(String), // Should include message when recordInputs: true - 'gen_ai.response.text': expect.any(String), // Should include response when recordOutputs: true - 'gen_ai.usage.input_tokens': 8, - 'gen_ai.usage.output_tokens': 12, - 'gen_ai.usage.total_tokens': 20, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.google_genai', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'google_genai', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'gemini-1.5-pro', + [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: expect.any(String), // Should include message when recordInputs: true + [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: expect.any(String), // Should include response when recordOutputs: true + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 8, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 12, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 20, }), description: 'chat gemini-1.5-pro', op: 'gen_ai.chat', @@ -123,19 +143,19 @@ describe('Google GenAI integration', () => { // Third span - models.generateContent with PII expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'models', - 'sentry.op': 'gen_ai.models', - 'sentry.origin': 'auto.ai.google_genai', - 'gen_ai.system': 'google_genai', - 'gen_ai.request.model': 'gemini-1.5-flash', - 'gen_ai.request.temperature': 0.7, - 'gen_ai.request.top_p': 0.9, - 'gen_ai.request.max_tokens': 100, - 'gen_ai.request.messages': expect.any(String), // Should include contents when recordInputs: true - 'gen_ai.response.text': expect.any(String), // Should include response when recordOutputs: true - 'gen_ai.usage.input_tokens': 8, - 'gen_ai.usage.output_tokens': 12, - 'gen_ai.usage.total_tokens': 20, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'models', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.models', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.google_genai', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'google_genai', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'gemini-1.5-flash', + [GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE]: 0.7, + [GEN_AI_REQUEST_TOP_P_ATTRIBUTE]: 0.9, + [GEN_AI_REQUEST_MAX_TOKENS_ATTRIBUTE]: 100, + [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: expect.any(String), // Should include contents when recordInputs: true + [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: expect.any(String), // Should include response when recordOutputs: true + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 8, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 12, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 20, }), description: 'models gemini-1.5-flash', op: 'gen_ai.models', @@ -145,12 +165,12 @@ describe('Google GenAI integration', () => { // Fourth span - error handling with PII expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'models', - 'sentry.op': 'gen_ai.models', - 'sentry.origin': 'auto.ai.google_genai', - 'gen_ai.system': 'google_genai', - 'gen_ai.request.model': 'error-model', - 'gen_ai.request.messages': expect.any(String), // Should include contents when recordInputs: true + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'models', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.models', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.google_genai', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'google_genai', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'error-model', + [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: expect.any(String), // Should include contents when recordInputs: true }), description: 'models error-model', op: 'gen_ai.models', @@ -166,8 +186,8 @@ describe('Google GenAI integration', () => { // Check that custom options are respected expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.request.messages': expect.any(String), // Should include messages when recordInputs: true - 'gen_ai.response.text': expect.any(String), // Should include response text when recordOutputs: true + [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: expect.any(String), // Should include messages when recordInputs: true + [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: expect.any(String), // Should include response text when recordOutputs: true }), description: expect.not.stringContaining('stream-response'), // Non-streaming span }), @@ -213,18 +233,18 @@ describe('Google GenAI integration', () => { // Non-streaming with tools expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'models', - 'sentry.op': 'gen_ai.models', - 'sentry.origin': 'auto.ai.google_genai', - 'gen_ai.system': 'google_genai', - 'gen_ai.request.model': 'gemini-2.0-flash-001', - 'gen_ai.request.available_tools': EXPECTED_AVAILABLE_TOOLS_JSON, - 'gen_ai.request.messages': expect.any(String), // Should include contents - 'gen_ai.response.text': expect.any(String), // Should include response text - 'gen_ai.response.tool_calls': expect.any(String), // Should include tool calls - 'gen_ai.usage.input_tokens': 15, - 'gen_ai.usage.output_tokens': 8, - 'gen_ai.usage.total_tokens': 23, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'models', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.models', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.google_genai', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'google_genai', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'gemini-2.0-flash-001', + [GEN_AI_REQUEST_AVAILABLE_TOOLS_ATTRIBUTE]: EXPECTED_AVAILABLE_TOOLS_JSON, + [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: expect.any(String), // Should include contents + [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: expect.any(String), // Should include response text + [GEN_AI_RESPONSE_TOOL_CALLS_ATTRIBUTE]: expect.any(String), // Should include tool calls + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 15, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 8, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 23, }), description: 'models gemini-2.0-flash-001', op: 'gen_ai.models', @@ -234,21 +254,21 @@ describe('Google GenAI integration', () => { // Streaming with tools expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'models', - 'sentry.op': 'gen_ai.models', - 'sentry.origin': 'auto.ai.google_genai', - 'gen_ai.system': 'google_genai', - 'gen_ai.request.model': 'gemini-2.0-flash-001', - 'gen_ai.request.available_tools': EXPECTED_AVAILABLE_TOOLS_JSON, - 'gen_ai.request.messages': expect.any(String), // Should include contents - 'gen_ai.response.streaming': true, - 'gen_ai.response.text': expect.any(String), // Should include response text - 'gen_ai.response.tool_calls': expect.any(String), // Should include tool calls - 'gen_ai.response.id': 'mock-response-tools-id', - 'gen_ai.response.model': 'gemini-2.0-flash-001', - 'gen_ai.usage.input_tokens': 12, - 'gen_ai.usage.output_tokens': 10, - 'gen_ai.usage.total_tokens': 22, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'models', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.models', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.google_genai', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'google_genai', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'gemini-2.0-flash-001', + [GEN_AI_REQUEST_AVAILABLE_TOOLS_ATTRIBUTE]: EXPECTED_AVAILABLE_TOOLS_JSON, + [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: expect.any(String), // Should include contents + [GEN_AI_RESPONSE_STREAMING_ATTRIBUTE]: true, + [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: expect.any(String), // Should include response text + [GEN_AI_RESPONSE_TOOL_CALLS_ATTRIBUTE]: expect.any(String), // Should include tool calls + [GEN_AI_RESPONSE_ID_ATTRIBUTE]: 'mock-response-tools-id', + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'gemini-2.0-flash-001', + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 12, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 10, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 22, }), description: 'models gemini-2.0-flash-001 stream-response', op: 'gen_ai.models', @@ -258,16 +278,16 @@ describe('Google GenAI integration', () => { // Without tools for comparison expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'models', - 'sentry.op': 'gen_ai.models', - 'sentry.origin': 'auto.ai.google_genai', - 'gen_ai.system': 'google_genai', - 'gen_ai.request.model': 'gemini-2.0-flash-001', - 'gen_ai.request.messages': expect.any(String), // Should include contents - 'gen_ai.response.text': expect.any(String), // Should include response text - 'gen_ai.usage.input_tokens': 8, - 'gen_ai.usage.output_tokens': 12, - 'gen_ai.usage.total_tokens': 20, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'models', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.models', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.google_genai', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'google_genai', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'gemini-2.0-flash-001', + [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: expect.any(String), // Should include contents + [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: expect.any(String), // Should include response text + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 8, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 12, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 20, }), description: 'models gemini-2.0-flash-001', op: 'gen_ai.models', @@ -289,21 +309,21 @@ describe('Google GenAI integration', () => { // First span - models.generateContentStream (streaming) expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'models', - 'sentry.op': 'gen_ai.models', - 'sentry.origin': 'auto.ai.google_genai', - 'gen_ai.system': 'google_genai', - 'gen_ai.request.model': 'gemini-1.5-flash', - 'gen_ai.request.temperature': 0.7, - 'gen_ai.request.top_p': 0.9, - 'gen_ai.request.max_tokens': 100, - 'gen_ai.response.streaming': true, - 'gen_ai.response.id': 'mock-response-streaming-id', - 'gen_ai.response.model': 'gemini-1.5-pro', - 'gen_ai.response.finish_reasons': '["STOP"]', - 'gen_ai.usage.input_tokens': 10, - 'gen_ai.usage.output_tokens': 12, - 'gen_ai.usage.total_tokens': 22, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'models', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.models', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.google_genai', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'google_genai', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'gemini-1.5-flash', + [GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE]: 0.7, + [GEN_AI_REQUEST_TOP_P_ATTRIBUTE]: 0.9, + [GEN_AI_REQUEST_MAX_TOKENS_ATTRIBUTE]: 100, + [GEN_AI_RESPONSE_STREAMING_ATTRIBUTE]: true, + [GEN_AI_RESPONSE_ID_ATTRIBUTE]: 'mock-response-streaming-id', + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'gemini-1.5-pro', + [GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]: '["STOP"]', + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 12, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 22, }), description: 'models gemini-1.5-flash stream-response', op: 'gen_ai.models', @@ -313,14 +333,14 @@ describe('Google GenAI integration', () => { // Second span - chat.create expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'chat', - 'sentry.op': 'gen_ai.chat', - 'sentry.origin': 'auto.ai.google_genai', - 'gen_ai.system': 'google_genai', - 'gen_ai.request.model': 'gemini-1.5-pro', - 'gen_ai.request.temperature': 0.8, - 'gen_ai.request.top_p': 0.9, - 'gen_ai.request.max_tokens': 150, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.google_genai', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'google_genai', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'gemini-1.5-pro', + [GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE]: 0.8, + [GEN_AI_REQUEST_TOP_P_ATTRIBUTE]: 0.9, + [GEN_AI_REQUEST_MAX_TOKENS_ATTRIBUTE]: 150, }), description: 'chat gemini-1.5-pro create', op: 'gen_ai.chat', @@ -330,14 +350,14 @@ describe('Google GenAI integration', () => { // Third span - chat.sendMessageStream (streaming) expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'chat', - 'sentry.op': 'gen_ai.chat', - 'sentry.origin': 'auto.ai.google_genai', - 'gen_ai.system': 'google_genai', - 'gen_ai.request.model': 'gemini-1.5-pro', - 'gen_ai.response.streaming': true, - 'gen_ai.response.id': 'mock-response-streaming-id', - 'gen_ai.response.model': 'gemini-1.5-pro', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.google_genai', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'google_genai', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'gemini-1.5-pro', + [GEN_AI_RESPONSE_STREAMING_ATTRIBUTE]: true, + [GEN_AI_RESPONSE_ID_ATTRIBUTE]: 'mock-response-streaming-id', + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'gemini-1.5-pro', }), description: 'chat gemini-1.5-pro stream-response', op: 'gen_ai.chat', @@ -347,9 +367,9 @@ describe('Google GenAI integration', () => { // Fourth span - blocked content streaming expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'models', - 'sentry.op': 'gen_ai.models', - 'sentry.origin': 'auto.ai.google_genai', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'models', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.models', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.google_genai', }), description: 'models blocked-model stream-response', op: 'gen_ai.models', @@ -359,9 +379,9 @@ describe('Google GenAI integration', () => { // Fifth span - error handling for streaming expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'models', - 'sentry.op': 'gen_ai.models', - 'sentry.origin': 'auto.ai.google_genai', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'models', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.models', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.google_genai', }), description: 'models error-model stream-response', op: 'gen_ai.models', @@ -377,22 +397,22 @@ describe('Google GenAI integration', () => { // First span - models.generateContentStream (streaming) with PII expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'models', - 'sentry.op': 'gen_ai.models', - 'sentry.origin': 'auto.ai.google_genai', - 'gen_ai.system': 'google_genai', - 'gen_ai.request.model': 'gemini-1.5-flash', - 'gen_ai.request.temperature': 0.7, - 'gen_ai.request.top_p': 0.9, - 'gen_ai.request.max_tokens': 100, - 'gen_ai.request.messages': expect.any(String), // Should include contents when recordInputs: true - 'gen_ai.response.streaming': true, - 'gen_ai.response.id': 'mock-response-streaming-id', - 'gen_ai.response.model': 'gemini-1.5-pro', - 'gen_ai.response.finish_reasons': '["STOP"]', - 'gen_ai.usage.input_tokens': 10, - 'gen_ai.usage.output_tokens': 12, - 'gen_ai.usage.total_tokens': 22, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'models', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.models', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.google_genai', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'google_genai', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'gemini-1.5-flash', + [GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE]: 0.7, + [GEN_AI_REQUEST_TOP_P_ATTRIBUTE]: 0.9, + [GEN_AI_REQUEST_MAX_TOKENS_ATTRIBUTE]: 100, + [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: expect.any(String), // Should include contents when recordInputs: true + [GEN_AI_RESPONSE_STREAMING_ATTRIBUTE]: true, + [GEN_AI_RESPONSE_ID_ATTRIBUTE]: 'mock-response-streaming-id', + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'gemini-1.5-pro', + [GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]: '["STOP"]', + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 12, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 22, }), description: 'models gemini-1.5-flash stream-response', op: 'gen_ai.models', @@ -402,14 +422,14 @@ describe('Google GenAI integration', () => { // Second span - chat.create expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'chat', - 'sentry.op': 'gen_ai.chat', - 'sentry.origin': 'auto.ai.google_genai', - 'gen_ai.system': 'google_genai', - 'gen_ai.request.model': 'gemini-1.5-pro', - 'gen_ai.request.temperature': 0.8, - 'gen_ai.request.top_p': 0.9, - 'gen_ai.request.max_tokens': 150, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.google_genai', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'google_genai', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'gemini-1.5-pro', + [GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE]: 0.8, + [GEN_AI_REQUEST_TOP_P_ATTRIBUTE]: 0.9, + [GEN_AI_REQUEST_MAX_TOKENS_ATTRIBUTE]: 150, }), description: 'chat gemini-1.5-pro create', op: 'gen_ai.chat', @@ -419,19 +439,19 @@ describe('Google GenAI integration', () => { // Third span - chat.sendMessageStream (streaming) with PII expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'chat', - 'sentry.op': 'gen_ai.chat', - 'sentry.origin': 'auto.ai.google_genai', - 'gen_ai.system': 'google_genai', - 'gen_ai.request.model': 'gemini-1.5-pro', - 'gen_ai.request.messages': expect.any(String), // Should include message when recordInputs: true - 'gen_ai.response.streaming': true, - 'gen_ai.response.id': 'mock-response-streaming-id', - 'gen_ai.response.model': 'gemini-1.5-pro', - 'gen_ai.response.finish_reasons': '["STOP"]', - 'gen_ai.usage.input_tokens': 10, - 'gen_ai.usage.output_tokens': 12, - 'gen_ai.usage.total_tokens': 22, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.google_genai', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'google_genai', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'gemini-1.5-pro', + [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: expect.any(String), // Should include message when recordInputs: true + [GEN_AI_RESPONSE_STREAMING_ATTRIBUTE]: true, + [GEN_AI_RESPONSE_ID_ATTRIBUTE]: 'mock-response-streaming-id', + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'gemini-1.5-pro', + [GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]: '["STOP"]', + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 12, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 22, }), description: 'chat gemini-1.5-pro stream-response', op: 'gen_ai.chat', @@ -441,14 +461,14 @@ describe('Google GenAI integration', () => { // Fourth span - blocked content stream with PII expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'models', - 'sentry.op': 'gen_ai.models', - 'sentry.origin': 'auto.ai.google_genai', - 'gen_ai.system': 'google_genai', - 'gen_ai.request.model': 'blocked-model', - 'gen_ai.request.temperature': 0.7, - 'gen_ai.request.messages': expect.any(String), // Should include contents when recordInputs: true - 'gen_ai.response.streaming': true, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'models', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.models', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.google_genai', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'google_genai', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'blocked-model', + [GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE]: 0.7, + [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: expect.any(String), // Should include contents when recordInputs: true + [GEN_AI_RESPONSE_STREAMING_ATTRIBUTE]: true, }), description: 'models blocked-model stream-response', op: 'gen_ai.models', @@ -458,13 +478,13 @@ describe('Google GenAI integration', () => { // Fifth span - error handling for streaming with PII expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'models', - 'sentry.op': 'gen_ai.models', - 'sentry.origin': 'auto.ai.google_genai', - 'gen_ai.system': 'google_genai', - 'gen_ai.request.model': 'error-model', - 'gen_ai.request.temperature': 0.7, - 'gen_ai.request.messages': expect.any(String), // Should include contents when recordInputs: true + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'models', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.models', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.google_genai', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'google_genai', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'error-model', + [GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE]: 0.7, + [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: expect.any(String), // Should include contents when recordInputs: true }), description: 'models error-model stream-response', op: 'gen_ai.models', @@ -505,13 +525,13 @@ describe('Google GenAI integration', () => { // First call: Last message is large and gets truncated (only C's remain, D's are cropped) expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'models', - 'sentry.op': 'gen_ai.models', - 'sentry.origin': 'auto.ai.google_genai', - 'gen_ai.system': 'google_genai', - 'gen_ai.request.model': 'gemini-1.5-flash', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'models', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.models', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.google_genai', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'google_genai', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'gemini-1.5-flash', // Messages should be present (truncation happened) and should be a JSON array with parts - 'gen_ai.request.messages': expect.stringMatching( + [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: expect.stringMatching( /^\[\{"role":"user","parts":\[\{"text":"C+"\}\]\}\]$/, ), }), @@ -523,13 +543,13 @@ describe('Google GenAI integration', () => { // Second call: Last message is small and kept without truncation expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'models', - 'sentry.op': 'gen_ai.models', - 'sentry.origin': 'auto.ai.google_genai', - 'gen_ai.system': 'google_genai', - 'gen_ai.request.model': 'gemini-1.5-flash', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'models', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.models', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.google_genai', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'google_genai', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'gemini-1.5-flash', // Small message should be kept intact - 'gen_ai.request.messages': JSON.stringify([ + [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: JSON.stringify([ { role: 'user', parts: [{ text: 'This is a small message that fits within the limit' }], diff --git a/dev-packages/node-integration-tests/suites/tracing/langchain/test.ts b/dev-packages/node-integration-tests/suites/tracing/langchain/test.ts index 8d8f1d542f70..eb9344a12608 100644 --- a/dev-packages/node-integration-tests/suites/tracing/langchain/test.ts +++ b/dev-packages/node-integration-tests/suites/tracing/langchain/test.ts @@ -1,4 +1,22 @@ +import { SEMANTIC_ATTRIBUTE_SENTRY_OP, SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN } from '@sentry/core'; import { afterAll, describe, expect } from 'vitest'; +import { + GEN_AI_OPERATION_NAME_ATTRIBUTE, + GEN_AI_REQUEST_MAX_TOKENS_ATTRIBUTE, + GEN_AI_REQUEST_MESSAGES_ATTRIBUTE, + GEN_AI_REQUEST_MODEL_ATTRIBUTE, + GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE, + GEN_AI_REQUEST_TOP_P_ATTRIBUTE, + GEN_AI_RESPONSE_ID_ATTRIBUTE, + GEN_AI_RESPONSE_MODEL_ATTRIBUTE, + GEN_AI_RESPONSE_STOP_REASON_ATTRIBUTE, + GEN_AI_RESPONSE_TEXT_ATTRIBUTE, + GEN_AI_RESPONSE_TOOL_CALLS_ATTRIBUTE, + GEN_AI_SYSTEM_ATTRIBUTE, + GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE, + GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE, + GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE, +} from '../../../../../packages/core/src/tracing/ai/gen-ai-attributes'; import { cleanupChildProcesses, createEsmAndCjsTests } from '../../../utils/runner'; describe('LangChain integration', () => { @@ -12,19 +30,19 @@ describe('LangChain integration', () => { // First span - chat model with claude-3-5-sonnet expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'chat', - 'sentry.op': 'gen_ai.chat', - 'sentry.origin': 'auto.ai.langchain', - 'gen_ai.system': 'anthropic', - 'gen_ai.request.model': 'claude-3-5-sonnet-20241022', - 'gen_ai.request.temperature': 0.7, - 'gen_ai.request.max_tokens': 100, - 'gen_ai.usage.input_tokens': 10, - 'gen_ai.usage.output_tokens': 15, - 'gen_ai.usage.total_tokens': 25, - 'gen_ai.response.id': expect.any(String), - 'gen_ai.response.model': expect.any(String), - 'gen_ai.response.stop_reason': expect.any(String), + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.langchain', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'anthropic', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'claude-3-5-sonnet-20241022', + [GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE]: 0.7, + [GEN_AI_REQUEST_MAX_TOKENS_ATTRIBUTE]: 100, + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 15, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 25, + [GEN_AI_RESPONSE_ID_ATTRIBUTE]: expect.any(String), + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: expect.any(String), + [GEN_AI_RESPONSE_STOP_REASON_ATTRIBUTE]: expect.any(String), }), description: 'chat claude-3-5-sonnet-20241022', op: 'gen_ai.chat', @@ -34,20 +52,20 @@ describe('LangChain integration', () => { // Second span - chat model with claude-3-opus expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'chat', - 'sentry.op': 'gen_ai.chat', - 'sentry.origin': 'auto.ai.langchain', - 'gen_ai.system': 'anthropic', - 'gen_ai.request.model': 'claude-3-opus-20240229', - 'gen_ai.request.temperature': 0.9, - 'gen_ai.request.top_p': 0.95, - 'gen_ai.request.max_tokens': 200, - 'gen_ai.usage.input_tokens': 10, - 'gen_ai.usage.output_tokens': 15, - 'gen_ai.usage.total_tokens': 25, - 'gen_ai.response.id': expect.any(String), - 'gen_ai.response.model': expect.any(String), - 'gen_ai.response.stop_reason': expect.any(String), + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.langchain', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'anthropic', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'claude-3-opus-20240229', + [GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE]: 0.9, + [GEN_AI_REQUEST_TOP_P_ATTRIBUTE]: 0.95, + [GEN_AI_REQUEST_MAX_TOKENS_ATTRIBUTE]: 200, + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 15, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 25, + [GEN_AI_RESPONSE_ID_ATTRIBUTE]: expect.any(String), + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: expect.any(String), + [GEN_AI_RESPONSE_STOP_REASON_ATTRIBUTE]: expect.any(String), }), description: 'chat claude-3-opus-20240229', op: 'gen_ai.chat', @@ -57,11 +75,11 @@ describe('LangChain integration', () => { // Third span - error handling expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'chat', - 'sentry.op': 'gen_ai.chat', - 'sentry.origin': 'auto.ai.langchain', - 'gen_ai.system': 'anthropic', - 'gen_ai.request.model': 'error-model', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.langchain', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'anthropic', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'error-model', }), description: 'chat error-model', op: 'gen_ai.chat', @@ -77,21 +95,21 @@ describe('LangChain integration', () => { // First span - chat model with PII expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'chat', - 'sentry.op': 'gen_ai.chat', - 'sentry.origin': 'auto.ai.langchain', - 'gen_ai.system': 'anthropic', - 'gen_ai.request.model': 'claude-3-5-sonnet-20241022', - 'gen_ai.request.temperature': 0.7, - 'gen_ai.request.max_tokens': 100, - 'gen_ai.request.messages': expect.any(String), // Should include messages when recordInputs: true - 'gen_ai.response.text': expect.any(String), // Should include response when recordOutputs: true - 'gen_ai.response.id': expect.any(String), - 'gen_ai.response.model': expect.any(String), - 'gen_ai.response.stop_reason': expect.any(String), - 'gen_ai.usage.input_tokens': 10, - 'gen_ai.usage.output_tokens': 15, - 'gen_ai.usage.total_tokens': 25, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.langchain', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'anthropic', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'claude-3-5-sonnet-20241022', + [GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE]: 0.7, + [GEN_AI_REQUEST_MAX_TOKENS_ATTRIBUTE]: 100, + [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: expect.any(String), // Should include messages when recordInputs: true + [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: expect.any(String), // Should include response when recordOutputs: true + [GEN_AI_RESPONSE_ID_ATTRIBUTE]: expect.any(String), + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: expect.any(String), + [GEN_AI_RESPONSE_STOP_REASON_ATTRIBUTE]: expect.any(String), + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 15, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 25, }), description: 'chat claude-3-5-sonnet-20241022', op: 'gen_ai.chat', @@ -101,22 +119,22 @@ describe('LangChain integration', () => { // Second span - chat model with PII expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'chat', - 'sentry.op': 'gen_ai.chat', - 'sentry.origin': 'auto.ai.langchain', - 'gen_ai.system': 'anthropic', - 'gen_ai.request.model': 'claude-3-opus-20240229', - 'gen_ai.request.temperature': 0.9, - 'gen_ai.request.top_p': 0.95, - 'gen_ai.request.max_tokens': 200, - 'gen_ai.request.messages': expect.any(String), // Should include messages when recordInputs: true - 'gen_ai.response.text': expect.any(String), // Should include response when recordOutputs: true - 'gen_ai.response.id': expect.any(String), - 'gen_ai.response.model': expect.any(String), - 'gen_ai.response.stop_reason': expect.any(String), - 'gen_ai.usage.input_tokens': 10, - 'gen_ai.usage.output_tokens': 15, - 'gen_ai.usage.total_tokens': 25, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.langchain', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'anthropic', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'claude-3-opus-20240229', + [GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE]: 0.9, + [GEN_AI_REQUEST_TOP_P_ATTRIBUTE]: 0.95, + [GEN_AI_REQUEST_MAX_TOKENS_ATTRIBUTE]: 200, + [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: expect.any(String), // Should include messages when recordInputs: true + [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: expect.any(String), // Should include response when recordOutputs: true + [GEN_AI_RESPONSE_ID_ATTRIBUTE]: expect.any(String), + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: expect.any(String), + [GEN_AI_RESPONSE_STOP_REASON_ATTRIBUTE]: expect.any(String), + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 15, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 25, }), description: 'chat claude-3-opus-20240229', op: 'gen_ai.chat', @@ -126,12 +144,12 @@ describe('LangChain integration', () => { // Third span - error handling with PII expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'chat', - 'sentry.op': 'gen_ai.chat', - 'sentry.origin': 'auto.ai.langchain', - 'gen_ai.system': 'anthropic', - 'gen_ai.request.model': 'error-model', - 'gen_ai.request.messages': expect.any(String), // Should include messages when recordInputs: true + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.langchain', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'anthropic', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'error-model', + [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: expect.any(String), // Should include messages when recordInputs: true }), description: 'chat error-model', op: 'gen_ai.chat', @@ -166,20 +184,20 @@ describe('LangChain integration', () => { spans: expect.arrayContaining([ expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'chat', - 'sentry.op': 'gen_ai.chat', - 'sentry.origin': 'auto.ai.langchain', - 'gen_ai.system': 'anthropic', - 'gen_ai.request.model': 'claude-3-5-sonnet-20241022', - 'gen_ai.request.temperature': 0.7, - 'gen_ai.request.max_tokens': 150, - 'gen_ai.usage.input_tokens': 20, - 'gen_ai.usage.output_tokens': 30, - 'gen_ai.usage.total_tokens': 50, - 'gen_ai.response.id': expect.any(String), - 'gen_ai.response.model': expect.any(String), - 'gen_ai.response.stop_reason': 'tool_use', - 'gen_ai.response.tool_calls': expect.any(String), + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.langchain', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'anthropic', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'claude-3-5-sonnet-20241022', + [GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE]: 0.7, + [GEN_AI_REQUEST_MAX_TOKENS_ATTRIBUTE]: 150, + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 20, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 30, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 50, + [GEN_AI_RESPONSE_ID_ATTRIBUTE]: expect.any(String), + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: expect.any(String), + [GEN_AI_RESPONSE_STOP_REASON_ATTRIBUTE]: 'tool_use', + [GEN_AI_RESPONSE_TOOL_CALLS_ATTRIBUTE]: expect.any(String), }), description: 'chat claude-3-5-sonnet-20241022', op: 'gen_ai.chat', @@ -201,13 +219,13 @@ describe('LangChain integration', () => { // First call: String input truncated (only C's remain, D's are cropped) expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'chat', - 'sentry.op': 'gen_ai.chat', - 'sentry.origin': 'auto.ai.langchain', - 'gen_ai.system': 'anthropic', - 'gen_ai.request.model': 'claude-3-5-sonnet-20241022', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.langchain', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'anthropic', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'claude-3-5-sonnet-20241022', // Messages should be present and should include truncated string input (contains only Cs) - 'gen_ai.request.messages': expect.stringMatching(/^\[\{"role":"user","content":"C+"\}\]$/), + [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: expect.stringMatching(/^\[\{"role":"user","content":"C+"\}\]$/), }), description: 'chat claude-3-5-sonnet-20241022', op: 'gen_ai.chat', @@ -217,13 +235,13 @@ describe('LangChain integration', () => { // Second call: Array input, last message truncated (only C's remain, D's are cropped) expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'chat', - 'sentry.op': 'gen_ai.chat', - 'sentry.origin': 'auto.ai.langchain', - 'gen_ai.system': 'anthropic', - 'gen_ai.request.model': 'claude-3-5-sonnet-20241022', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.langchain', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'anthropic', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'claude-3-5-sonnet-20241022', // Messages should be present (truncation happened) and should be a JSON array of a single index (contains only Cs) - 'gen_ai.request.messages': expect.stringMatching(/^\[\{"role":"user","content":"C+"\}\]$/), + [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: expect.stringMatching(/^\[\{"role":"user","content":"C+"\}\]$/), }), description: 'chat claude-3-5-sonnet-20241022', op: 'gen_ai.chat', @@ -233,13 +251,13 @@ describe('LangChain integration', () => { // Third call: Last message is small and kept without truncation expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'chat', - 'sentry.op': 'gen_ai.chat', - 'sentry.origin': 'auto.ai.langchain', - 'gen_ai.system': 'anthropic', - 'gen_ai.request.model': 'claude-3-5-sonnet-20241022', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.langchain', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'anthropic', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'claude-3-5-sonnet-20241022', // Small message should be kept intact - 'gen_ai.request.messages': JSON.stringify([ + [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: JSON.stringify([ { role: 'user', content: 'This is a small message that fits within the limit' }, ]), }), diff --git a/dev-packages/node-integration-tests/suites/tracing/langgraph/test.ts b/dev-packages/node-integration-tests/suites/tracing/langgraph/test.ts index bafcdf49a32c..539bce1a740e 100644 --- a/dev-packages/node-integration-tests/suites/tracing/langgraph/test.ts +++ b/dev-packages/node-integration-tests/suites/tracing/langgraph/test.ts @@ -1,4 +1,20 @@ +import { SEMANTIC_ATTRIBUTE_SENTRY_OP, SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN } from '@sentry/core'; import { afterAll, describe, expect } from 'vitest'; +import { + GEN_AI_AGENT_NAME_ATTRIBUTE, + GEN_AI_CONVERSATION_ID_ATTRIBUTE, + GEN_AI_OPERATION_NAME_ATTRIBUTE, + GEN_AI_PIPELINE_NAME_ATTRIBUTE, + GEN_AI_REQUEST_AVAILABLE_TOOLS_ATTRIBUTE, + GEN_AI_REQUEST_MESSAGES_ATTRIBUTE, + GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE, + GEN_AI_RESPONSE_MODEL_ATTRIBUTE, + GEN_AI_RESPONSE_TEXT_ATTRIBUTE, + GEN_AI_RESPONSE_TOOL_CALLS_ATTRIBUTE, + GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE, + GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE, + GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE, +} from '../../../../../packages/core/src/tracing/ai/gen-ai-attributes'; import { cleanupChildProcesses, createEsmAndCjsTests } from '../../../utils/runner'; describe('LangGraph integration', () => { @@ -12,10 +28,10 @@ describe('LangGraph integration', () => { // create_agent span expect.objectContaining({ data: { - 'gen_ai.operation.name': 'create_agent', - 'sentry.op': 'gen_ai.create_agent', - 'sentry.origin': 'auto.ai.langgraph', - 'gen_ai.agent.name': 'weather_assistant', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'create_agent', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.create_agent', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.langgraph', + [GEN_AI_AGENT_NAME_ATTRIBUTE]: 'weather_assistant', }, description: 'create_agent weather_assistant', op: 'gen_ai.create_agent', @@ -25,11 +41,11 @@ describe('LangGraph integration', () => { // First invoke_agent span expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'invoke_agent', - 'sentry.op': 'gen_ai.invoke_agent', - 'sentry.origin': 'auto.ai.langgraph', - 'gen_ai.agent.name': 'weather_assistant', - 'gen_ai.pipeline.name': 'weather_assistant', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'invoke_agent', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.invoke_agent', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.langgraph', + [GEN_AI_AGENT_NAME_ATTRIBUTE]: 'weather_assistant', + [GEN_AI_PIPELINE_NAME_ATTRIBUTE]: 'weather_assistant', }), description: 'invoke_agent weather_assistant', op: 'gen_ai.invoke_agent', @@ -39,11 +55,11 @@ describe('LangGraph integration', () => { // Second invoke_agent span expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'invoke_agent', - 'sentry.op': 'gen_ai.invoke_agent', - 'sentry.origin': 'auto.ai.langgraph', - 'gen_ai.agent.name': 'weather_assistant', - 'gen_ai.pipeline.name': 'weather_assistant', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'invoke_agent', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.invoke_agent', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.langgraph', + [GEN_AI_AGENT_NAME_ATTRIBUTE]: 'weather_assistant', + [GEN_AI_PIPELINE_NAME_ATTRIBUTE]: 'weather_assistant', }), description: 'invoke_agent weather_assistant', op: 'gen_ai.invoke_agent', @@ -59,10 +75,10 @@ describe('LangGraph integration', () => { // create_agent span (PII enabled doesn't affect this span) expect.objectContaining({ data: { - 'gen_ai.operation.name': 'create_agent', - 'sentry.op': 'gen_ai.create_agent', - 'sentry.origin': 'auto.ai.langgraph', - 'gen_ai.agent.name': 'weather_assistant', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'create_agent', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.create_agent', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.langgraph', + [GEN_AI_AGENT_NAME_ATTRIBUTE]: 'weather_assistant', }, description: 'create_agent weather_assistant', op: 'gen_ai.create_agent', @@ -72,12 +88,12 @@ describe('LangGraph integration', () => { // First invoke_agent span with PII expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'invoke_agent', - 'sentry.op': 'gen_ai.invoke_agent', - 'sentry.origin': 'auto.ai.langgraph', - 'gen_ai.agent.name': 'weather_assistant', - 'gen_ai.pipeline.name': 'weather_assistant', - 'gen_ai.request.messages': expect.stringContaining('What is the weather today?'), + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'invoke_agent', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.invoke_agent', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.langgraph', + [GEN_AI_AGENT_NAME_ATTRIBUTE]: 'weather_assistant', + [GEN_AI_PIPELINE_NAME_ATTRIBUTE]: 'weather_assistant', + [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: expect.stringContaining('What is the weather today?'), }), description: 'invoke_agent weather_assistant', op: 'gen_ai.invoke_agent', @@ -87,12 +103,12 @@ describe('LangGraph integration', () => { // Second invoke_agent span with PII and multiple messages expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'invoke_agent', - 'sentry.op': 'gen_ai.invoke_agent', - 'sentry.origin': 'auto.ai.langgraph', - 'gen_ai.agent.name': 'weather_assistant', - 'gen_ai.pipeline.name': 'weather_assistant', - 'gen_ai.request.messages': expect.stringContaining('Tell me about the weather'), + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'invoke_agent', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.invoke_agent', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.langgraph', + [GEN_AI_AGENT_NAME_ATTRIBUTE]: 'weather_assistant', + [GEN_AI_PIPELINE_NAME_ATTRIBUTE]: 'weather_assistant', + [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: expect.stringContaining('Tell me about the weather'), }), description: 'invoke_agent weather_assistant', op: 'gen_ai.invoke_agent', @@ -108,10 +124,10 @@ describe('LangGraph integration', () => { // create_agent span for first graph (no tool calls) expect.objectContaining({ data: { - 'gen_ai.operation.name': 'create_agent', - 'sentry.op': 'gen_ai.create_agent', - 'sentry.origin': 'auto.ai.langgraph', - 'gen_ai.agent.name': 'tool_agent', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'create_agent', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.create_agent', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.langgraph', + [GEN_AI_AGENT_NAME_ATTRIBUTE]: 'tool_agent', }, description: 'create_agent tool_agent', op: 'gen_ai.create_agent', @@ -121,19 +137,19 @@ describe('LangGraph integration', () => { // invoke_agent span with tools available but not called expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'invoke_agent', - 'sentry.op': 'gen_ai.invoke_agent', - 'sentry.origin': 'auto.ai.langgraph', - 'gen_ai.agent.name': 'tool_agent', - 'gen_ai.pipeline.name': 'tool_agent', - 'gen_ai.request.available_tools': expect.stringContaining('get_weather'), - 'gen_ai.request.messages': expect.stringContaining('What is the weather?'), - 'gen_ai.response.model': 'gpt-4-0613', - 'gen_ai.response.finish_reasons': ['stop'], - 'gen_ai.response.text': expect.stringContaining('Response without calling tools'), - 'gen_ai.usage.input_tokens': 25, - 'gen_ai.usage.output_tokens': 15, - 'gen_ai.usage.total_tokens': 40, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'invoke_agent', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.invoke_agent', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.langgraph', + [GEN_AI_AGENT_NAME_ATTRIBUTE]: 'tool_agent', + [GEN_AI_PIPELINE_NAME_ATTRIBUTE]: 'tool_agent', + [GEN_AI_REQUEST_AVAILABLE_TOOLS_ATTRIBUTE]: expect.stringContaining('get_weather'), + [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: expect.stringContaining('What is the weather?'), + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'gpt-4-0613', + [GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]: ['stop'], + [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: expect.stringContaining('Response without calling tools'), + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 25, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 15, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 40, }), description: 'invoke_agent tool_agent', op: 'gen_ai.invoke_agent', @@ -143,10 +159,10 @@ describe('LangGraph integration', () => { // create_agent span for second graph (with tool calls) expect.objectContaining({ data: { - 'gen_ai.operation.name': 'create_agent', - 'sentry.op': 'gen_ai.create_agent', - 'sentry.origin': 'auto.ai.langgraph', - 'gen_ai.agent.name': 'tool_calling_agent', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'create_agent', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.create_agent', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.langgraph', + [GEN_AI_AGENT_NAME_ATTRIBUTE]: 'tool_calling_agent', }, description: 'create_agent tool_calling_agent', op: 'gen_ai.create_agent', @@ -156,21 +172,21 @@ describe('LangGraph integration', () => { // invoke_agent span with tool calls and execution expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'invoke_agent', - 'sentry.op': 'gen_ai.invoke_agent', - 'sentry.origin': 'auto.ai.langgraph', - 'gen_ai.agent.name': 'tool_calling_agent', - 'gen_ai.pipeline.name': 'tool_calling_agent', - 'gen_ai.request.available_tools': expect.stringContaining('get_weather'), - 'gen_ai.request.messages': expect.stringContaining('San Francisco'), - 'gen_ai.response.model': 'gpt-4-0613', - 'gen_ai.response.finish_reasons': ['stop'], - 'gen_ai.response.text': expect.stringMatching(/"role":"tool"/), + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'invoke_agent', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.invoke_agent', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.langgraph', + [GEN_AI_AGENT_NAME_ATTRIBUTE]: 'tool_calling_agent', + [GEN_AI_PIPELINE_NAME_ATTRIBUTE]: 'tool_calling_agent', + [GEN_AI_REQUEST_AVAILABLE_TOOLS_ATTRIBUTE]: expect.stringContaining('get_weather'), + [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: expect.stringContaining('San Francisco'), + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'gpt-4-0613', + [GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]: ['stop'], + [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: expect.stringMatching(/"role":"tool"/), // Verify tool_calls are captured - 'gen_ai.response.tool_calls': expect.stringContaining('get_weather'), - 'gen_ai.usage.input_tokens': 80, - 'gen_ai.usage.output_tokens': 40, - 'gen_ai.usage.total_tokens': 120, + [GEN_AI_RESPONSE_TOOL_CALLS_ATTRIBUTE]: expect.stringContaining('get_weather'), + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 80, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 40, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 120, }), description: 'invoke_agent tool_calling_agent', op: 'gen_ai.invoke_agent', @@ -213,10 +229,10 @@ describe('LangGraph integration', () => { // create_agent span expect.objectContaining({ data: { - 'gen_ai.operation.name': 'create_agent', - 'sentry.op': 'gen_ai.create_agent', - 'sentry.origin': 'auto.ai.langgraph', - 'gen_ai.agent.name': 'thread_test_agent', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'create_agent', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.create_agent', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.langgraph', + [GEN_AI_AGENT_NAME_ATTRIBUTE]: 'thread_test_agent', }, description: 'create_agent thread_test_agent', op: 'gen_ai.create_agent', @@ -226,13 +242,13 @@ describe('LangGraph integration', () => { // First invoke_agent span with thread_id expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'invoke_agent', - 'sentry.op': 'gen_ai.invoke_agent', - 'sentry.origin': 'auto.ai.langgraph', - 'gen_ai.agent.name': 'thread_test_agent', - 'gen_ai.pipeline.name': 'thread_test_agent', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'invoke_agent', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.invoke_agent', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.langgraph', + [GEN_AI_AGENT_NAME_ATTRIBUTE]: 'thread_test_agent', + [GEN_AI_PIPELINE_NAME_ATTRIBUTE]: 'thread_test_agent', // The thread_id should be captured as conversation.id - 'gen_ai.conversation.id': 'thread_abc123_session_1', + [GEN_AI_CONVERSATION_ID_ATTRIBUTE]: 'thread_abc123_session_1', }), description: 'invoke_agent thread_test_agent', op: 'gen_ai.invoke_agent', @@ -242,13 +258,13 @@ describe('LangGraph integration', () => { // Second invoke_agent span with different thread_id expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'invoke_agent', - 'sentry.op': 'gen_ai.invoke_agent', - 'sentry.origin': 'auto.ai.langgraph', - 'gen_ai.agent.name': 'thread_test_agent', - 'gen_ai.pipeline.name': 'thread_test_agent', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'invoke_agent', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.invoke_agent', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.langgraph', + [GEN_AI_AGENT_NAME_ATTRIBUTE]: 'thread_test_agent', + [GEN_AI_PIPELINE_NAME_ATTRIBUTE]: 'thread_test_agent', // Different thread_id for different conversation - 'gen_ai.conversation.id': 'thread_xyz789_session_2', + [GEN_AI_CONVERSATION_ID_ATTRIBUTE]: 'thread_xyz789_session_2', }), description: 'invoke_agent thread_test_agent', op: 'gen_ai.invoke_agent', @@ -258,7 +274,7 @@ describe('LangGraph integration', () => { // Third invoke_agent span without thread_id (should NOT have gen_ai.conversation.id) expect.objectContaining({ data: expect.not.objectContaining({ - 'gen_ai.conversation.id': expect.anything(), + [GEN_AI_CONVERSATION_ID_ATTRIBUTE]: expect.anything(), }), description: 'invoke_agent thread_test_agent', op: 'gen_ai.invoke_agent', diff --git a/dev-packages/node-integration-tests/suites/tracing/openai/test.ts b/dev-packages/node-integration-tests/suites/tracing/openai/test.ts index bf64d2b92b72..52ddc0837097 100644 --- a/dev-packages/node-integration-tests/suites/tracing/openai/test.ts +++ b/dev-packages/node-integration-tests/suites/tracing/openai/test.ts @@ -1,4 +1,31 @@ +import { SEMANTIC_ATTRIBUTE_SENTRY_OP, SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN } from '@sentry/core'; import { afterAll, describe, expect } from 'vitest'; +import { + GEN_AI_CONVERSATION_ID_ATTRIBUTE, + GEN_AI_EMBEDDINGS_INPUT_ATTRIBUTE, + GEN_AI_OPERATION_NAME_ATTRIBUTE, + GEN_AI_REQUEST_DIMENSIONS_ATTRIBUTE, + GEN_AI_REQUEST_ENCODING_FORMAT_ATTRIBUTE, + GEN_AI_REQUEST_MESSAGES_ATTRIBUTE, + GEN_AI_REQUEST_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE, + GEN_AI_REQUEST_MODEL_ATTRIBUTE, + GEN_AI_REQUEST_STREAM_ATTRIBUTE, + GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE, + GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE, + GEN_AI_RESPONSE_ID_ATTRIBUTE, + GEN_AI_RESPONSE_MODEL_ATTRIBUTE, + GEN_AI_RESPONSE_STREAMING_ATTRIBUTE, + GEN_AI_RESPONSE_TEXT_ATTRIBUTE, + GEN_AI_SYSTEM_ATTRIBUTE, + GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE, + GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE, + GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE, + OPENAI_RESPONSE_ID_ATTRIBUTE, + OPENAI_RESPONSE_MODEL_ATTRIBUTE, + OPENAI_RESPONSE_TIMESTAMP_ATTRIBUTE, + OPENAI_USAGE_COMPLETION_TOKENS_ATTRIBUTE, + OPENAI_USAGE_PROMPT_TOKENS_ATTRIBUTE, +} from '../../../../../packages/core/src/tracing/ai/gen-ai-attributes'; import { cleanupChildProcesses, createEsmAndCjsTests } from '../../../utils/runner'; describe('OpenAI integration', () => { @@ -12,23 +39,23 @@ describe('OpenAI integration', () => { // First span - basic chat completion without PII expect.objectContaining({ data: { - 'gen_ai.operation.name': 'chat', - 'sentry.op': 'gen_ai.chat', - 'sentry.origin': 'auto.ai.openai', - 'gen_ai.system': 'openai', - 'gen_ai.request.model': 'gpt-3.5-turbo', - 'gen_ai.request.temperature': 0.7, - 'gen_ai.response.model': 'gpt-3.5-turbo', - 'gen_ai.response.id': 'chatcmpl-mock123', - 'gen_ai.response.finish_reasons': '["stop"]', - 'gen_ai.usage.input_tokens': 10, - 'gen_ai.usage.output_tokens': 15, - 'gen_ai.usage.total_tokens': 25, - 'openai.response.id': 'chatcmpl-mock123', - 'openai.response.model': 'gpt-3.5-turbo', - 'openai.response.timestamp': '2023-03-01T06:31:28.000Z', - 'openai.usage.completion_tokens': 15, - 'openai.usage.prompt_tokens': 10, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.openai', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'openai', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'gpt-3.5-turbo', + [GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE]: 0.7, + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'gpt-3.5-turbo', + [GEN_AI_RESPONSE_ID_ATTRIBUTE]: 'chatcmpl-mock123', + [GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]: '["stop"]', + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 15, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 25, + [OPENAI_RESPONSE_ID_ATTRIBUTE]: 'chatcmpl-mock123', + [OPENAI_RESPONSE_MODEL_ATTRIBUTE]: 'gpt-3.5-turbo', + [OPENAI_RESPONSE_TIMESTAMP_ATTRIBUTE]: '2023-03-01T06:31:28.000Z', + [OPENAI_USAGE_COMPLETION_TOKENS_ATTRIBUTE]: 15, + [OPENAI_USAGE_PROMPT_TOKENS_ATTRIBUTE]: 10, }, description: 'chat gpt-3.5-turbo', op: 'gen_ai.chat', @@ -38,22 +65,22 @@ describe('OpenAI integration', () => { // Second span - responses API expect.objectContaining({ data: { - 'gen_ai.operation.name': 'responses', - 'sentry.op': 'gen_ai.responses', - 'sentry.origin': 'auto.ai.openai', - 'gen_ai.system': 'openai', - 'gen_ai.request.model': 'gpt-3.5-turbo', - 'gen_ai.response.model': 'gpt-3.5-turbo', - 'gen_ai.response.id': 'resp_mock456', - 'gen_ai.response.finish_reasons': '["completed"]', - 'gen_ai.usage.input_tokens': 5, - 'gen_ai.usage.output_tokens': 8, - 'gen_ai.usage.total_tokens': 13, - 'openai.response.id': 'resp_mock456', - 'openai.response.model': 'gpt-3.5-turbo', - 'openai.response.timestamp': '2023-03-01T06:31:30.000Z', - 'openai.usage.completion_tokens': 8, - 'openai.usage.prompt_tokens': 5, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'responses', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.responses', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.openai', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'openai', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'gpt-3.5-turbo', + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'gpt-3.5-turbo', + [GEN_AI_RESPONSE_ID_ATTRIBUTE]: 'resp_mock456', + [GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]: '["completed"]', + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 5, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 8, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 13, + [OPENAI_RESPONSE_ID_ATTRIBUTE]: 'resp_mock456', + [OPENAI_RESPONSE_MODEL_ATTRIBUTE]: 'gpt-3.5-turbo', + [OPENAI_RESPONSE_TIMESTAMP_ATTRIBUTE]: '2023-03-01T06:31:30.000Z', + [OPENAI_USAGE_COMPLETION_TOKENS_ATTRIBUTE]: 8, + [OPENAI_USAGE_PROMPT_TOKENS_ATTRIBUTE]: 5, }, description: 'responses gpt-3.5-turbo', op: 'gen_ai.responses', @@ -63,11 +90,11 @@ describe('OpenAI integration', () => { // Third span - error handling expect.objectContaining({ data: { - 'gen_ai.operation.name': 'chat', - 'sentry.op': 'gen_ai.chat', - 'sentry.origin': 'auto.ai.openai', - 'gen_ai.system': 'openai', - 'gen_ai.request.model': 'error-model', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.openai', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'openai', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'error-model', }, description: 'chat error-model', op: 'gen_ai.chat', @@ -77,25 +104,25 @@ describe('OpenAI integration', () => { // Fourth span - chat completions streaming expect.objectContaining({ data: { - 'gen_ai.operation.name': 'chat', - 'sentry.op': 'gen_ai.chat', - 'sentry.origin': 'auto.ai.openai', - 'gen_ai.system': 'openai', - 'gen_ai.request.model': 'gpt-4', - 'gen_ai.request.temperature': 0.8, - 'gen_ai.request.stream': true, - 'gen_ai.response.model': 'gpt-4', - 'gen_ai.response.id': 'chatcmpl-stream-123', - 'gen_ai.response.finish_reasons': '["stop"]', - 'gen_ai.usage.input_tokens': 12, - 'gen_ai.usage.output_tokens': 18, - 'gen_ai.usage.total_tokens': 30, - 'openai.response.id': 'chatcmpl-stream-123', - 'openai.response.model': 'gpt-4', - 'gen_ai.response.streaming': true, - 'openai.response.timestamp': '2023-03-01T06:31:40.000Z', - 'openai.usage.completion_tokens': 18, - 'openai.usage.prompt_tokens': 12, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.openai', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'openai', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'gpt-4', + [GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE]: 0.8, + [GEN_AI_REQUEST_STREAM_ATTRIBUTE]: true, + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'gpt-4', + [GEN_AI_RESPONSE_ID_ATTRIBUTE]: 'chatcmpl-stream-123', + [GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]: '["stop"]', + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 12, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 18, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 30, + [OPENAI_RESPONSE_ID_ATTRIBUTE]: 'chatcmpl-stream-123', + [OPENAI_RESPONSE_MODEL_ATTRIBUTE]: 'gpt-4', + [GEN_AI_RESPONSE_STREAMING_ATTRIBUTE]: true, + [OPENAI_RESPONSE_TIMESTAMP_ATTRIBUTE]: '2023-03-01T06:31:40.000Z', + [OPENAI_USAGE_COMPLETION_TOKENS_ATTRIBUTE]: 18, + [OPENAI_USAGE_PROMPT_TOKENS_ATTRIBUTE]: 12, }, description: 'chat gpt-4 stream-response', op: 'gen_ai.chat', @@ -105,24 +132,24 @@ describe('OpenAI integration', () => { // Fifth span - responses API streaming expect.objectContaining({ data: { - 'gen_ai.operation.name': 'responses', - 'sentry.op': 'gen_ai.responses', - 'sentry.origin': 'auto.ai.openai', - 'gen_ai.system': 'openai', - 'gen_ai.request.model': 'gpt-4', - 'gen_ai.request.stream': true, - 'gen_ai.response.model': 'gpt-4', - 'gen_ai.response.id': 'resp_stream_456', - 'gen_ai.response.finish_reasons': '["in_progress","completed"]', - 'gen_ai.usage.input_tokens': 6, - 'gen_ai.usage.output_tokens': 10, - 'gen_ai.usage.total_tokens': 16, - 'openai.response.id': 'resp_stream_456', - 'openai.response.model': 'gpt-4', - 'gen_ai.response.streaming': true, - 'openai.response.timestamp': '2023-03-01T06:31:50.000Z', - 'openai.usage.completion_tokens': 10, - 'openai.usage.prompt_tokens': 6, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'responses', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.responses', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.openai', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'openai', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'gpt-4', + [GEN_AI_REQUEST_STREAM_ATTRIBUTE]: true, + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'gpt-4', + [GEN_AI_RESPONSE_ID_ATTRIBUTE]: 'resp_stream_456', + [GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]: '["in_progress","completed"]', + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 6, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 10, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 16, + [OPENAI_RESPONSE_ID_ATTRIBUTE]: 'resp_stream_456', + [OPENAI_RESPONSE_MODEL_ATTRIBUTE]: 'gpt-4', + [GEN_AI_RESPONSE_STREAMING_ATTRIBUTE]: true, + [OPENAI_RESPONSE_TIMESTAMP_ATTRIBUTE]: '2023-03-01T06:31:50.000Z', + [OPENAI_USAGE_COMPLETION_TOKENS_ATTRIBUTE]: 10, + [OPENAI_USAGE_PROMPT_TOKENS_ATTRIBUTE]: 6, }, description: 'responses gpt-4 stream-response', op: 'gen_ai.responses', @@ -132,12 +159,12 @@ describe('OpenAI integration', () => { // Sixth span - error handling in streaming context expect.objectContaining({ data: { - 'gen_ai.operation.name': 'chat', - 'gen_ai.request.model': 'error-model', - 'gen_ai.request.stream': true, - 'gen_ai.system': 'openai', - 'sentry.op': 'gen_ai.chat', - 'sentry.origin': 'auto.ai.openai', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'error-model', + [GEN_AI_REQUEST_STREAM_ATTRIBUTE]: true, + [GEN_AI_SYSTEM_ATTRIBUTE]: 'openai', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.openai', }, description: 'chat error-model stream-response', op: 'gen_ai.chat', @@ -153,26 +180,26 @@ describe('OpenAI integration', () => { // First span - basic chat completion with PII expect.objectContaining({ data: { - 'gen_ai.operation.name': 'chat', - 'sentry.op': 'gen_ai.chat', - 'sentry.origin': 'auto.ai.openai', - 'gen_ai.system': 'openai', - 'gen_ai.request.model': 'gpt-3.5-turbo', - 'gen_ai.request.temperature': 0.7, - 'gen_ai.request.messages.original_length': 2, - 'gen_ai.request.messages': '[{"role":"user","content":"What is the capital of France?"}]', - 'gen_ai.response.model': 'gpt-3.5-turbo', - 'gen_ai.response.id': 'chatcmpl-mock123', - 'gen_ai.response.finish_reasons': '["stop"]', - 'gen_ai.response.text': '["Hello from OpenAI mock!"]', - 'gen_ai.usage.input_tokens': 10, - 'gen_ai.usage.output_tokens': 15, - 'gen_ai.usage.total_tokens': 25, - 'openai.response.id': 'chatcmpl-mock123', - 'openai.response.model': 'gpt-3.5-turbo', - 'openai.response.timestamp': '2023-03-01T06:31:28.000Z', - 'openai.usage.completion_tokens': 15, - 'openai.usage.prompt_tokens': 10, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.openai', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'openai', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'gpt-3.5-turbo', + [GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE]: 0.7, + [GEN_AI_REQUEST_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE]: 2, + [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: '[{"role":"user","content":"What is the capital of France?"}]', + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'gpt-3.5-turbo', + [GEN_AI_RESPONSE_ID_ATTRIBUTE]: 'chatcmpl-mock123', + [GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]: '["stop"]', + [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: '["Hello from OpenAI mock!"]', + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 15, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 25, + [OPENAI_RESPONSE_ID_ATTRIBUTE]: 'chatcmpl-mock123', + [OPENAI_RESPONSE_MODEL_ATTRIBUTE]: 'gpt-3.5-turbo', + [OPENAI_RESPONSE_TIMESTAMP_ATTRIBUTE]: '2023-03-01T06:31:28.000Z', + [OPENAI_USAGE_COMPLETION_TOKENS_ATTRIBUTE]: 15, + [OPENAI_USAGE_PROMPT_TOKENS_ATTRIBUTE]: 10, }, description: 'chat gpt-3.5-turbo', op: 'gen_ai.chat', @@ -182,24 +209,24 @@ describe('OpenAI integration', () => { // Second span - responses API with PII expect.objectContaining({ data: { - 'gen_ai.operation.name': 'responses', - 'sentry.op': 'gen_ai.responses', - 'sentry.origin': 'auto.ai.openai', - 'gen_ai.system': 'openai', - 'gen_ai.request.model': 'gpt-3.5-turbo', - 'gen_ai.request.messages': 'Translate this to French: Hello', - 'gen_ai.response.text': 'Response to: Translate this to French: Hello', - 'gen_ai.response.finish_reasons': '["completed"]', - 'gen_ai.response.model': 'gpt-3.5-turbo', - 'gen_ai.response.id': 'resp_mock456', - 'gen_ai.usage.input_tokens': 5, - 'gen_ai.usage.output_tokens': 8, - 'gen_ai.usage.total_tokens': 13, - 'openai.response.id': 'resp_mock456', - 'openai.response.model': 'gpt-3.5-turbo', - 'openai.response.timestamp': '2023-03-01T06:31:30.000Z', - 'openai.usage.completion_tokens': 8, - 'openai.usage.prompt_tokens': 5, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'responses', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.responses', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.openai', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'openai', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'gpt-3.5-turbo', + [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: 'Translate this to French: Hello', + [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: 'Response to: Translate this to French: Hello', + [GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]: '["completed"]', + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'gpt-3.5-turbo', + [GEN_AI_RESPONSE_ID_ATTRIBUTE]: 'resp_mock456', + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 5, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 8, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 13, + [OPENAI_RESPONSE_ID_ATTRIBUTE]: 'resp_mock456', + [OPENAI_RESPONSE_MODEL_ATTRIBUTE]: 'gpt-3.5-turbo', + [OPENAI_RESPONSE_TIMESTAMP_ATTRIBUTE]: '2023-03-01T06:31:30.000Z', + [OPENAI_USAGE_COMPLETION_TOKENS_ATTRIBUTE]: 8, + [OPENAI_USAGE_PROMPT_TOKENS_ATTRIBUTE]: 5, }, description: 'responses gpt-3.5-turbo', op: 'gen_ai.responses', @@ -209,13 +236,13 @@ describe('OpenAI integration', () => { // Third span - error handling with PII expect.objectContaining({ data: { - 'gen_ai.operation.name': 'chat', - 'sentry.op': 'gen_ai.chat', - 'sentry.origin': 'auto.ai.openai', - 'gen_ai.system': 'openai', - 'gen_ai.request.model': 'error-model', - 'gen_ai.request.messages.original_length': 1, - 'gen_ai.request.messages': '[{"role":"user","content":"This will fail"}]', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.openai', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'openai', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'error-model', + [GEN_AI_REQUEST_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE]: 1, + [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: '[{"role":"user","content":"This will fail"}]', }, description: 'chat error-model', op: 'gen_ai.chat', @@ -225,28 +252,28 @@ describe('OpenAI integration', () => { // Fourth span - chat completions streaming with PII expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'chat', - 'sentry.op': 'gen_ai.chat', - 'sentry.origin': 'auto.ai.openai', - 'gen_ai.system': 'openai', - 'gen_ai.request.model': 'gpt-4', - 'gen_ai.request.temperature': 0.8, - 'gen_ai.request.stream': true, - 'gen_ai.request.messages.original_length': 2, - 'gen_ai.request.messages': '[{"role":"user","content":"Tell me about streaming"}]', - 'gen_ai.response.text': 'Hello from OpenAI streaming!', - 'gen_ai.response.finish_reasons': '["stop"]', - 'gen_ai.response.id': 'chatcmpl-stream-123', - 'gen_ai.response.model': 'gpt-4', - 'gen_ai.usage.input_tokens': 12, - 'gen_ai.usage.output_tokens': 18, - 'gen_ai.usage.total_tokens': 30, - 'openai.response.id': 'chatcmpl-stream-123', - 'openai.response.model': 'gpt-4', - 'gen_ai.response.streaming': true, - 'openai.response.timestamp': '2023-03-01T06:31:40.000Z', - 'openai.usage.completion_tokens': 18, - 'openai.usage.prompt_tokens': 12, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.openai', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'openai', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'gpt-4', + [GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE]: 0.8, + [GEN_AI_REQUEST_STREAM_ATTRIBUTE]: true, + [GEN_AI_REQUEST_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE]: 2, + [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: '[{"role":"user","content":"Tell me about streaming"}]', + [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: 'Hello from OpenAI streaming!', + [GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]: '["stop"]', + [GEN_AI_RESPONSE_ID_ATTRIBUTE]: 'chatcmpl-stream-123', + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'gpt-4', + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 12, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 18, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 30, + [OPENAI_RESPONSE_ID_ATTRIBUTE]: 'chatcmpl-stream-123', + [OPENAI_RESPONSE_MODEL_ATTRIBUTE]: 'gpt-4', + [GEN_AI_RESPONSE_STREAMING_ATTRIBUTE]: true, + [OPENAI_RESPONSE_TIMESTAMP_ATTRIBUTE]: '2023-03-01T06:31:40.000Z', + [OPENAI_USAGE_COMPLETION_TOKENS_ATTRIBUTE]: 18, + [OPENAI_USAGE_PROMPT_TOKENS_ATTRIBUTE]: 12, }), description: 'chat gpt-4 stream-response', op: 'gen_ai.chat', @@ -256,26 +283,27 @@ describe('OpenAI integration', () => { // Fifth span - responses API streaming with PII expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'responses', - 'sentry.op': 'gen_ai.responses', - 'sentry.origin': 'auto.ai.openai', - 'gen_ai.system': 'openai', - 'gen_ai.request.model': 'gpt-4', - 'gen_ai.request.stream': true, - 'gen_ai.request.messages': 'Test streaming responses API', - 'gen_ai.response.text': 'Streaming response to: Test streaming responses APITest streaming responses API', - 'gen_ai.response.finish_reasons': '["in_progress","completed"]', - 'gen_ai.response.id': 'resp_stream_456', - 'gen_ai.response.model': 'gpt-4', - 'gen_ai.usage.input_tokens': 6, - 'gen_ai.usage.output_tokens': 10, - 'gen_ai.usage.total_tokens': 16, - 'openai.response.id': 'resp_stream_456', - 'openai.response.model': 'gpt-4', - 'gen_ai.response.streaming': true, - 'openai.response.timestamp': '2023-03-01T06:31:50.000Z', - 'openai.usage.completion_tokens': 10, - 'openai.usage.prompt_tokens': 6, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'responses', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.responses', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.openai', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'openai', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'gpt-4', + [GEN_AI_REQUEST_STREAM_ATTRIBUTE]: true, + [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: 'Test streaming responses API', + [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: + 'Streaming response to: Test streaming responses APITest streaming responses API', + [GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]: '["in_progress","completed"]', + [GEN_AI_RESPONSE_ID_ATTRIBUTE]: 'resp_stream_456', + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'gpt-4', + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 6, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 10, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 16, + [OPENAI_RESPONSE_ID_ATTRIBUTE]: 'resp_stream_456', + [OPENAI_RESPONSE_MODEL_ATTRIBUTE]: 'gpt-4', + [GEN_AI_RESPONSE_STREAMING_ATTRIBUTE]: true, + [OPENAI_RESPONSE_TIMESTAMP_ATTRIBUTE]: '2023-03-01T06:31:50.000Z', + [OPENAI_USAGE_COMPLETION_TOKENS_ATTRIBUTE]: 10, + [OPENAI_USAGE_PROMPT_TOKENS_ATTRIBUTE]: 6, }), description: 'responses gpt-4 stream-response', op: 'gen_ai.responses', @@ -285,14 +313,14 @@ describe('OpenAI integration', () => { // Sixth span - error handling in streaming context with PII expect.objectContaining({ data: { - 'gen_ai.operation.name': 'chat', - 'gen_ai.request.model': 'error-model', - 'gen_ai.request.stream': true, - 'gen_ai.request.messages.original_length': 1, - 'gen_ai.request.messages': '[{"role":"user","content":"This will fail"}]', - 'gen_ai.system': 'openai', - 'sentry.op': 'gen_ai.chat', - 'sentry.origin': 'auto.ai.openai', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'error-model', + [GEN_AI_REQUEST_STREAM_ATTRIBUTE]: true, + [GEN_AI_REQUEST_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE]: 1, + [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: '[{"role":"user","content":"This will fail"}]', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'openai', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.openai', }, description: 'chat error-model stream-response', op: 'gen_ai.chat', @@ -308,16 +336,16 @@ describe('OpenAI integration', () => { // Check that custom options are respected expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.request.messages': expect.any(String), // Should include messages when recordInputs: true - 'gen_ai.response.text': expect.any(String), // Should include response text when recordOutputs: true + [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: expect.any(String), // Should include messages when recordInputs: true + [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: expect.any(String), // Should include response text when recordOutputs: true }), }), // Check that custom options are respected for streaming expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.request.messages': expect.any(String), // Should include messages when recordInputs: true - 'gen_ai.response.text': expect.any(String), // Should include response text when recordOutputs: true - 'gen_ai.request.stream': true, // Should be marked as stream + [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: expect.any(String), // Should include messages when recordInputs: true + [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: expect.any(String), // Should include response text when recordOutputs: true + [GEN_AI_REQUEST_STREAM_ATTRIBUTE]: true, // Should be marked as stream }), }), ]), @@ -359,18 +387,18 @@ describe('OpenAI integration', () => { // First span - embeddings API expect.objectContaining({ data: { - 'gen_ai.operation.name': 'embeddings', - 'sentry.op': 'gen_ai.embeddings', - 'sentry.origin': 'auto.ai.openai', - 'gen_ai.system': 'openai', - 'gen_ai.request.model': 'text-embedding-3-small', - 'gen_ai.request.encoding_format': 'float', - 'gen_ai.request.dimensions': 1536, - 'gen_ai.response.model': 'text-embedding-3-small', - 'gen_ai.usage.input_tokens': 10, - 'gen_ai.usage.total_tokens': 10, - 'openai.response.model': 'text-embedding-3-small', - 'openai.usage.prompt_tokens': 10, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'embeddings', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.embeddings', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.openai', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'openai', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'text-embedding-3-small', + [GEN_AI_REQUEST_ENCODING_FORMAT_ATTRIBUTE]: 'float', + [GEN_AI_REQUEST_DIMENSIONS_ATTRIBUTE]: 1536, + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'text-embedding-3-small', + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 10, + [OPENAI_RESPONSE_MODEL_ATTRIBUTE]: 'text-embedding-3-small', + [OPENAI_USAGE_PROMPT_TOKENS_ATTRIBUTE]: 10, }, description: 'embeddings text-embedding-3-small', op: 'gen_ai.embeddings', @@ -380,11 +408,11 @@ describe('OpenAI integration', () => { // Second span - embeddings API error model expect.objectContaining({ data: { - 'gen_ai.operation.name': 'embeddings', - 'sentry.op': 'gen_ai.embeddings', - 'sentry.origin': 'auto.ai.openai', - 'gen_ai.system': 'openai', - 'gen_ai.request.model': 'error-model', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'embeddings', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.embeddings', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.openai', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'openai', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'error-model', }, description: 'embeddings error-model', op: 'gen_ai.embeddings', @@ -400,19 +428,19 @@ describe('OpenAI integration', () => { // First span - embeddings API with PII expect.objectContaining({ data: { - 'gen_ai.operation.name': 'embeddings', - 'sentry.op': 'gen_ai.embeddings', - 'sentry.origin': 'auto.ai.openai', - 'gen_ai.system': 'openai', - 'gen_ai.request.model': 'text-embedding-3-small', - 'gen_ai.request.encoding_format': 'float', - 'gen_ai.request.dimensions': 1536, - 'gen_ai.embeddings.input': 'Embedding test!', - 'gen_ai.response.model': 'text-embedding-3-small', - 'gen_ai.usage.input_tokens': 10, - 'gen_ai.usage.total_tokens': 10, - 'openai.response.model': 'text-embedding-3-small', - 'openai.usage.prompt_tokens': 10, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'embeddings', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.embeddings', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.openai', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'openai', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'text-embedding-3-small', + [GEN_AI_REQUEST_ENCODING_FORMAT_ATTRIBUTE]: 'float', + [GEN_AI_REQUEST_DIMENSIONS_ATTRIBUTE]: 1536, + [GEN_AI_EMBEDDINGS_INPUT_ATTRIBUTE]: 'Embedding test!', + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'text-embedding-3-small', + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 10, + [OPENAI_RESPONSE_MODEL_ATTRIBUTE]: 'text-embedding-3-small', + [OPENAI_USAGE_PROMPT_TOKENS_ATTRIBUTE]: 10, }, description: 'embeddings text-embedding-3-small', op: 'gen_ai.embeddings', @@ -422,12 +450,12 @@ describe('OpenAI integration', () => { // Second span - embeddings API error model with PII expect.objectContaining({ data: { - 'gen_ai.operation.name': 'embeddings', - 'sentry.op': 'gen_ai.embeddings', - 'sentry.origin': 'auto.ai.openai', - 'gen_ai.system': 'openai', - 'gen_ai.request.model': 'error-model', - 'gen_ai.embeddings.input': 'Error embedding test!', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'embeddings', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.embeddings', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.openai', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'openai', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'error-model', + [GEN_AI_EMBEDDINGS_INPUT_ATTRIBUTE]: 'Error embedding test!', }, description: 'embeddings error-model', op: 'gen_ai.embeddings', @@ -437,17 +465,17 @@ describe('OpenAI integration', () => { // Third span - embeddings API with multiple inputs (this does not get truncated) expect.objectContaining({ data: { - 'gen_ai.operation.name': 'embeddings', - 'sentry.op': 'gen_ai.embeddings', - 'sentry.origin': 'auto.ai.openai', - 'gen_ai.system': 'openai', - 'gen_ai.request.model': 'text-embedding-3-small', - 'gen_ai.embeddings.input': '["First input text","Second input text","Third input text"]', - 'gen_ai.response.model': 'text-embedding-3-small', - 'gen_ai.usage.input_tokens': 10, - 'gen_ai.usage.total_tokens': 10, - 'openai.response.model': 'text-embedding-3-small', - 'openai.usage.prompt_tokens': 10, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'embeddings', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.embeddings', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.openai', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'openai', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'text-embedding-3-small', + [GEN_AI_EMBEDDINGS_INPUT_ATTRIBUTE]: '["First input text","Second input text","Third input text"]', + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'text-embedding-3-small', + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 10, + [OPENAI_RESPONSE_MODEL_ATTRIBUTE]: 'text-embedding-3-small', + [OPENAI_USAGE_PROMPT_TOKENS_ATTRIBUTE]: 10, }, description: 'embeddings text-embedding-3-small', op: 'gen_ai.embeddings', @@ -493,23 +521,23 @@ describe('OpenAI integration', () => { span_id: expect.any(String), trace_id: expect.any(String), data: { - 'gen_ai.operation.name': 'chat', - 'sentry.op': 'gen_ai.chat', - 'sentry.origin': 'auto.ai.openai', - 'gen_ai.system': 'openai', - 'gen_ai.request.model': 'gpt-3.5-turbo', - 'gen_ai.request.temperature': 0.7, - 'gen_ai.response.model': 'gpt-3.5-turbo', - 'gen_ai.response.id': 'chatcmpl-mock123', - 'gen_ai.response.finish_reasons': '["stop"]', - 'gen_ai.usage.input_tokens': 10, - 'gen_ai.usage.output_tokens': 15, - 'gen_ai.usage.total_tokens': 25, - 'openai.response.id': 'chatcmpl-mock123', - 'openai.response.model': 'gpt-3.5-turbo', - 'openai.response.timestamp': '2023-03-01T06:31:28.000Z', - 'openai.usage.completion_tokens': 15, - 'openai.usage.prompt_tokens': 10, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.openai', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'openai', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'gpt-3.5-turbo', + [GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE]: 0.7, + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'gpt-3.5-turbo', + [GEN_AI_RESPONSE_ID_ATTRIBUTE]: 'chatcmpl-mock123', + [GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]: '["stop"]', + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 15, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 25, + [OPENAI_RESPONSE_ID_ATTRIBUTE]: 'chatcmpl-mock123', + [OPENAI_RESPONSE_MODEL_ATTRIBUTE]: 'gpt-3.5-turbo', + [OPENAI_RESPONSE_TIMESTAMP_ATTRIBUTE]: '2023-03-01T06:31:28.000Z', + [OPENAI_USAGE_COMPLETION_TOKENS_ATTRIBUTE]: 15, + [OPENAI_USAGE_PROMPT_TOKENS_ATTRIBUTE]: 10, }, op: 'gen_ai.chat', origin: 'auto.ai.openai', @@ -540,23 +568,23 @@ describe('OpenAI integration', () => { span_id: expect.any(String), trace_id: expect.any(String), data: { - 'gen_ai.operation.name': 'chat', - 'sentry.op': 'gen_ai.chat', - 'sentry.origin': 'auto.ai.openai', - 'gen_ai.system': 'openai', - 'gen_ai.request.model': 'gpt-3.5-turbo', - 'gen_ai.request.temperature': 0.7, - 'gen_ai.response.model': 'gpt-3.5-turbo', - 'gen_ai.response.id': 'chatcmpl-mock123', - 'gen_ai.response.finish_reasons': '["stop"]', - 'gen_ai.usage.input_tokens': 10, - 'gen_ai.usage.output_tokens': 15, - 'gen_ai.usage.total_tokens': 25, - 'openai.response.id': 'chatcmpl-mock123', - 'openai.response.model': 'gpt-3.5-turbo', - 'openai.response.timestamp': '2023-03-01T06:31:28.000Z', - 'openai.usage.completion_tokens': 15, - 'openai.usage.prompt_tokens': 10, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.openai', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'openai', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'gpt-3.5-turbo', + [GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE]: 0.7, + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'gpt-3.5-turbo', + [GEN_AI_RESPONSE_ID_ATTRIBUTE]: 'chatcmpl-mock123', + [GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]: '["stop"]', + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 15, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 25, + [OPENAI_RESPONSE_ID_ATTRIBUTE]: 'chatcmpl-mock123', + [OPENAI_RESPONSE_MODEL_ATTRIBUTE]: 'gpt-3.5-turbo', + [OPENAI_RESPONSE_TIMESTAMP_ATTRIBUTE]: '2023-03-01T06:31:28.000Z', + [OPENAI_USAGE_COMPLETION_TOKENS_ATTRIBUTE]: 15, + [OPENAI_USAGE_PROMPT_TOKENS_ATTRIBUTE]: 10, }, op: 'gen_ai.chat', origin: 'auto.ai.openai', @@ -585,13 +613,15 @@ describe('OpenAI integration', () => { // First call: Last message is large and gets truncated (only C's remain, D's are cropped) expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'chat', - 'sentry.op': 'gen_ai.chat', - 'sentry.origin': 'auto.ai.openai', - 'gen_ai.system': 'openai', - 'gen_ai.request.model': 'gpt-3.5-turbo', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.openai', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'openai', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'gpt-3.5-turbo', // Messages should be present (truncation happened) and should be a JSON array of a single index - 'gen_ai.request.messages': expect.stringMatching(/^\[\{"role":"user","content":"C+"\}\]$/), + [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: expect.stringMatching( + /^\[\{"role":"user","content":"C+"\}\]$/, + ), }), description: 'chat gpt-3.5-turbo', op: 'gen_ai.chat', @@ -601,13 +631,13 @@ describe('OpenAI integration', () => { // Second call: Last message is small and kept without truncation expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'chat', - 'sentry.op': 'gen_ai.chat', - 'sentry.origin': 'auto.ai.openai', - 'gen_ai.system': 'openai', - 'gen_ai.request.model': 'gpt-3.5-turbo', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.openai', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'openai', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'gpt-3.5-turbo', // Small message should be kept intact - 'gen_ai.request.messages': JSON.stringify([ + [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: JSON.stringify([ { role: 'user', content: 'This is a small message that fits within the limit' }, ]), }), @@ -639,13 +669,13 @@ describe('OpenAI integration', () => { spans: expect.arrayContaining([ expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'responses', - 'sentry.op': 'gen_ai.responses', - 'sentry.origin': 'auto.ai.openai', - 'gen_ai.system': 'openai', - 'gen_ai.request.model': 'gpt-3.5-turbo', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'responses', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.responses', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.openai', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'openai', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'gpt-3.5-turbo', // Messages should be present and should include truncated string input (contains only As) - 'gen_ai.request.messages': expect.stringMatching(/^A+$/), + [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: expect.stringMatching(/^A+$/), }), description: 'responses gpt-3.5-turbo', op: 'gen_ai.responses', @@ -668,12 +698,12 @@ describe('OpenAI integration', () => { // First span - conversations.create returns conversation object with id expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'conversations', - 'sentry.op': 'gen_ai.conversations', - 'sentry.origin': 'auto.ai.openai', - 'gen_ai.system': 'openai', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'conversations', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.conversations', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.openai', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'openai', // The conversation ID should be captured from the response - 'gen_ai.conversation.id': 'conv_689667905b048191b4740501625afd940c7533ace33a2dab', + [GEN_AI_CONVERSATION_ID_ATTRIBUTE]: 'conv_689667905b048191b4740501625afd940c7533ace33a2dab', }), description: 'conversations unknown', op: 'gen_ai.conversations', @@ -683,13 +713,13 @@ describe('OpenAI integration', () => { // Second span - responses.create with conversation parameter expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'responses', - 'sentry.op': 'gen_ai.responses', - 'sentry.origin': 'auto.ai.openai', - 'gen_ai.system': 'openai', - 'gen_ai.request.model': 'gpt-4', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'responses', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.responses', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.openai', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'openai', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'gpt-4', // The conversation ID should be captured from the request - 'gen_ai.conversation.id': 'conv_689667905b048191b4740501625afd940c7533ace33a2dab', + [GEN_AI_CONVERSATION_ID_ATTRIBUTE]: 'conv_689667905b048191b4740501625afd940c7533ace33a2dab', }), op: 'gen_ai.responses', origin: 'auto.ai.openai', @@ -698,7 +728,7 @@ describe('OpenAI integration', () => { // Third span - responses.create without conversation (first in chain, should NOT have gen_ai.conversation.id) expect.objectContaining({ data: expect.not.objectContaining({ - 'gen_ai.conversation.id': expect.anything(), + [GEN_AI_CONVERSATION_ID_ATTRIBUTE]: expect.anything(), }), op: 'gen_ai.responses', origin: 'auto.ai.openai', @@ -707,13 +737,13 @@ describe('OpenAI integration', () => { // Fourth span - responses.create with previous_response_id (chaining) expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'responses', - 'sentry.op': 'gen_ai.responses', - 'sentry.origin': 'auto.ai.openai', - 'gen_ai.system': 'openai', - 'gen_ai.request.model': 'gpt-4', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'responses', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.responses', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.openai', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'openai', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'gpt-4', // The previous_response_id should be captured as conversation.id - 'gen_ai.conversation.id': 'resp_mock_conv_123', + [GEN_AI_CONVERSATION_ID_ATTRIBUTE]: 'resp_mock_conv_123', }), op: 'gen_ai.responses', origin: 'auto.ai.openai', diff --git a/dev-packages/node-integration-tests/suites/tracing/vercelai/test.ts b/dev-packages/node-integration-tests/suites/tracing/vercelai/test.ts index 8112bcadd5f5..05d209176179 100644 --- a/dev-packages/node-integration-tests/suites/tracing/vercelai/test.ts +++ b/dev-packages/node-integration-tests/suites/tracing/vercelai/test.ts @@ -1,5 +1,28 @@ +import { SEMANTIC_ATTRIBUTE_SENTRY_OP, SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN } from '@sentry/core'; import type { Event } from '@sentry/node'; import { afterAll, describe, expect } from 'vitest'; +import { + GEN_AI_OPERATION_NAME_ATTRIBUTE, + GEN_AI_PROMPT_ATTRIBUTE, + GEN_AI_REQUEST_AVAILABLE_TOOLS_ATTRIBUTE, + GEN_AI_REQUEST_MESSAGES_ATTRIBUTE, + GEN_AI_REQUEST_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE, + GEN_AI_REQUEST_MODEL_ATTRIBUTE, + GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE, + GEN_AI_RESPONSE_ID_ATTRIBUTE, + GEN_AI_RESPONSE_MODEL_ATTRIBUTE, + GEN_AI_RESPONSE_TEXT_ATTRIBUTE, + GEN_AI_RESPONSE_TOOL_CALLS_ATTRIBUTE, + GEN_AI_SYSTEM_ATTRIBUTE, + GEN_AI_TOOL_CALL_ID_ATTRIBUTE, + GEN_AI_TOOL_INPUT_ATTRIBUTE, + GEN_AI_TOOL_NAME_ATTRIBUTE, + GEN_AI_TOOL_OUTPUT_ATTRIBUTE, + GEN_AI_TOOL_TYPE_ATTRIBUTE, + GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE, + GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE, + GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE, +} from '../../../../../packages/core/src/tracing/ai/gen-ai-attributes'; import { cleanupChildProcesses, createEsmAndCjsTests } from '../../../utils/runner'; describe('Vercel AI integration', () => { @@ -13,14 +36,14 @@ describe('Vercel AI integration', () => { // First span - no telemetry config, should enable telemetry but not record inputs/outputs when sendDefaultPii: false expect.objectContaining({ data: { - 'gen_ai.request.model': 'mock-model-id', - 'gen_ai.response.model': 'mock-model-id', - 'gen_ai.usage.input_tokens': 10, - 'gen_ai.usage.output_tokens': 20, - 'gen_ai.usage.total_tokens': 30, - 'gen_ai.operation.name': 'ai.generateText', - 'sentry.op': 'gen_ai.invoke_agent', - 'sentry.origin': 'auto.vercelai.otel', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'mock-model-id', + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'mock-model-id', + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 20, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 30, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'ai.generateText', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.invoke_agent', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', 'vercel.ai.model.provider': 'mock-provider', 'vercel.ai.operationId': 'ai.generateText', 'vercel.ai.pipeline.name': 'generateText', @@ -37,17 +60,17 @@ describe('Vercel AI integration', () => { // Second span - explicitly enabled telemetry but recordInputs/recordOutputs not set, should not record when sendDefaultPii: false expect.objectContaining({ data: { - 'gen_ai.request.model': 'mock-model-id', - 'gen_ai.response.finish_reasons': ['stop'], - 'gen_ai.response.id': expect.any(String), - 'gen_ai.response.model': 'mock-model-id', - 'gen_ai.system': 'mock-provider', - 'gen_ai.usage.input_tokens': 10, - 'gen_ai.usage.output_tokens': 20, - 'gen_ai.usage.total_tokens': 30, - 'gen_ai.operation.name': 'ai.generateText.doGenerate', - 'sentry.op': 'gen_ai.generate_text', - 'sentry.origin': 'auto.vercelai.otel', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'mock-model-id', + [GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]: ['stop'], + [GEN_AI_RESPONSE_ID_ATTRIBUTE]: expect.any(String), + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'mock-model-id', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'mock-provider', + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 20, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 30, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'ai.generateText.doGenerate', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.generate_text', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', 'vercel.ai.model.provider': 'mock-provider', 'vercel.ai.operationId': 'ai.generateText.doGenerate', 'vercel.ai.pipeline.name': 'generateText.doGenerate', @@ -66,18 +89,18 @@ describe('Vercel AI integration', () => { // Third span - explicit telemetry enabled, should record inputs/outputs regardless of sendDefaultPii expect.objectContaining({ data: { - 'gen_ai.prompt': '{"prompt":"Where is the second span?"}', - 'gen_ai.request.messages.original_length': 1, - 'gen_ai.request.messages': '[{"role":"user","content":"Where is the second span?"}]', - 'gen_ai.request.model': 'mock-model-id', - 'gen_ai.response.model': 'mock-model-id', - 'gen_ai.response.text': expect.any(String), - 'gen_ai.usage.input_tokens': 10, - 'gen_ai.usage.output_tokens': 20, - 'gen_ai.usage.total_tokens': 30, - 'gen_ai.operation.name': 'ai.generateText', - 'sentry.op': 'gen_ai.invoke_agent', - 'sentry.origin': 'auto.vercelai.otel', + [GEN_AI_PROMPT_ATTRIBUTE]: '{"prompt":"Where is the second span?"}', + [GEN_AI_REQUEST_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE]: 1, + [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: '[{"role":"user","content":"Where is the second span?"}]', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'mock-model-id', + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'mock-model-id', + [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: expect.any(String), + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 20, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 30, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'ai.generateText', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.invoke_agent', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', 'vercel.ai.model.provider': 'mock-provider', 'vercel.ai.operationId': 'ai.generateText', 'vercel.ai.pipeline.name': 'generateText', @@ -95,20 +118,20 @@ describe('Vercel AI integration', () => { // Fourth span - doGenerate for explicit telemetry enabled call expect.objectContaining({ data: { - 'gen_ai.request.messages': expect.any(String), - 'gen_ai.request.messages.original_length': expect.any(Number), - 'gen_ai.request.model': 'mock-model-id', - 'gen_ai.response.finish_reasons': ['stop'], - 'gen_ai.response.id': expect.any(String), - 'gen_ai.response.model': 'mock-model-id', - 'gen_ai.response.text': expect.any(String), - 'gen_ai.system': 'mock-provider', - 'gen_ai.usage.input_tokens': 10, - 'gen_ai.usage.output_tokens': 20, - 'gen_ai.usage.total_tokens': 30, - 'gen_ai.operation.name': 'ai.generateText.doGenerate', - 'sentry.op': 'gen_ai.generate_text', - 'sentry.origin': 'auto.vercelai.otel', + [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: expect.any(String), + [GEN_AI_REQUEST_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE]: expect.any(Number), + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'mock-model-id', + [GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]: ['stop'], + [GEN_AI_RESPONSE_ID_ATTRIBUTE]: expect.any(String), + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'mock-model-id', + [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: expect.any(String), + [GEN_AI_SYSTEM_ATTRIBUTE]: 'mock-provider', + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 20, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 30, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'ai.generateText.doGenerate', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.generate_text', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', 'vercel.ai.model.provider': 'mock-provider', 'vercel.ai.operationId': 'ai.generateText.doGenerate', 'vercel.ai.pipeline.name': 'generateText.doGenerate', @@ -128,14 +151,14 @@ describe('Vercel AI integration', () => { // Fifth span - tool call generateText span expect.objectContaining({ data: { - 'gen_ai.request.model': 'mock-model-id', - 'gen_ai.response.model': 'mock-model-id', - 'gen_ai.usage.input_tokens': 15, - 'gen_ai.usage.output_tokens': 25, - 'gen_ai.usage.total_tokens': 40, - 'gen_ai.operation.name': 'ai.generateText', - 'sentry.op': 'gen_ai.invoke_agent', - 'sentry.origin': 'auto.vercelai.otel', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'mock-model-id', + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'mock-model-id', + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 15, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 25, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 40, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'ai.generateText', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.invoke_agent', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', 'vercel.ai.model.provider': 'mock-provider', 'vercel.ai.operationId': 'ai.generateText', 'vercel.ai.pipeline.name': 'generateText', @@ -152,17 +175,17 @@ describe('Vercel AI integration', () => { // Sixth span - tool call doGenerate span expect.objectContaining({ data: { - 'gen_ai.request.model': 'mock-model-id', - 'gen_ai.response.finish_reasons': ['tool-calls'], - 'gen_ai.response.id': expect.any(String), - 'gen_ai.response.model': 'mock-model-id', - 'gen_ai.system': 'mock-provider', - 'gen_ai.usage.input_tokens': 15, - 'gen_ai.usage.output_tokens': 25, - 'gen_ai.usage.total_tokens': 40, - 'gen_ai.operation.name': 'ai.generateText.doGenerate', - 'sentry.op': 'gen_ai.generate_text', - 'sentry.origin': 'auto.vercelai.otel', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'mock-model-id', + [GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]: ['tool-calls'], + [GEN_AI_RESPONSE_ID_ATTRIBUTE]: expect.any(String), + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'mock-model-id', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'mock-provider', + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 15, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 25, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 40, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'ai.generateText.doGenerate', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.generate_text', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', 'vercel.ai.model.provider': 'mock-provider', 'vercel.ai.operationId': 'ai.generateText.doGenerate', 'vercel.ai.pipeline.name': 'generateText.doGenerate', @@ -181,12 +204,12 @@ describe('Vercel AI integration', () => { // Seventh span - tool call execution span expect.objectContaining({ data: { - 'gen_ai.tool.call.id': 'call-1', - 'gen_ai.tool.name': 'getWeather', - 'gen_ai.tool.type': 'function', - 'gen_ai.operation.name': 'ai.toolCall', - 'sentry.op': 'gen_ai.execute_tool', - 'sentry.origin': 'auto.vercelai.otel', + [GEN_AI_TOOL_CALL_ID_ATTRIBUTE]: 'call-1', + [GEN_AI_TOOL_NAME_ATTRIBUTE]: 'getWeather', + [GEN_AI_TOOL_TYPE_ATTRIBUTE]: 'function', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'ai.toolCall', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.execute_tool', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', 'vercel.ai.operationId': 'ai.toolCall', }, description: 'execute_tool getWeather', @@ -206,18 +229,18 @@ describe('Vercel AI integration', () => { // First span - no telemetry config, should enable telemetry AND record inputs/outputs when sendDefaultPii: true expect.objectContaining({ data: { - 'gen_ai.prompt': '{"prompt":"Where is the first span?"}', - 'gen_ai.request.messages.original_length': 1, - 'gen_ai.request.messages': '[{"role":"user","content":"Where is the first span?"}]', - 'gen_ai.request.model': 'mock-model-id', - 'gen_ai.response.model': 'mock-model-id', - 'gen_ai.response.text': 'First span here!', - 'gen_ai.usage.input_tokens': 10, - 'gen_ai.usage.output_tokens': 20, - 'gen_ai.usage.total_tokens': 30, - 'gen_ai.operation.name': 'ai.generateText', - 'sentry.op': 'gen_ai.invoke_agent', - 'sentry.origin': 'auto.vercelai.otel', + [GEN_AI_PROMPT_ATTRIBUTE]: '{"prompt":"Where is the first span?"}', + [GEN_AI_REQUEST_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE]: 1, + [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: '[{"role":"user","content":"Where is the first span?"}]', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'mock-model-id', + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'mock-model-id', + [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: 'First span here!', + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 20, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 30, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'ai.generateText', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.invoke_agent', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', 'vercel.ai.model.provider': 'mock-provider', 'vercel.ai.operationId': 'ai.generateText', 'vercel.ai.pipeline.name': 'generateText', @@ -240,20 +263,21 @@ describe('Vercel AI integration', () => { // Second span - doGenerate for first call, should also include input/output fields when sendDefaultPii: true expect.objectContaining({ data: { - 'gen_ai.request.messages.original_length': 1, - 'gen_ai.request.messages': '[{"role":"user","content":[{"type":"text","text":"Where is the first span?"}]}]', - 'gen_ai.request.model': 'mock-model-id', - 'gen_ai.response.finish_reasons': ['stop'], - 'gen_ai.response.id': expect.any(String), - 'gen_ai.response.model': 'mock-model-id', - 'gen_ai.response.text': 'First span here!', - 'gen_ai.system': 'mock-provider', - 'gen_ai.usage.input_tokens': 10, - 'gen_ai.usage.output_tokens': 20, - 'gen_ai.usage.total_tokens': 30, - 'gen_ai.operation.name': 'ai.generateText.doGenerate', - 'sentry.op': 'gen_ai.generate_text', - 'sentry.origin': 'auto.vercelai.otel', + [GEN_AI_REQUEST_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE]: 1, + [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: + '[{"role":"user","content":[{"type":"text","text":"Where is the first span?"}]}]', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'mock-model-id', + [GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]: ['stop'], + [GEN_AI_RESPONSE_ID_ATTRIBUTE]: expect.any(String), + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'mock-model-id', + [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: 'First span here!', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'mock-provider', + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 20, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 30, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'ai.generateText.doGenerate', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.generate_text', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', 'vercel.ai.model.provider': 'mock-provider', 'vercel.ai.operationId': 'ai.generateText.doGenerate', 'vercel.ai.pipeline.name': 'generateText.doGenerate', @@ -278,18 +302,18 @@ describe('Vercel AI integration', () => { // Third span - explicitly enabled telemetry, should record inputs/outputs regardless of sendDefaultPii expect.objectContaining({ data: { - 'gen_ai.prompt': '{"prompt":"Where is the second span?"}', - 'gen_ai.request.messages.original_length': 1, - 'gen_ai.request.messages': '[{"role":"user","content":"Where is the second span?"}]', - 'gen_ai.request.model': 'mock-model-id', - 'gen_ai.response.model': 'mock-model-id', - 'gen_ai.response.text': expect.any(String), - 'gen_ai.usage.input_tokens': 10, - 'gen_ai.usage.output_tokens': 20, - 'gen_ai.usage.total_tokens': 30, - 'gen_ai.operation.name': 'ai.generateText', - 'sentry.op': 'gen_ai.invoke_agent', - 'sentry.origin': 'auto.vercelai.otel', + [GEN_AI_PROMPT_ATTRIBUTE]: '{"prompt":"Where is the second span?"}', + [GEN_AI_REQUEST_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE]: 1, + [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: '[{"role":"user","content":"Where is the second span?"}]', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'mock-model-id', + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'mock-model-id', + [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: expect.any(String), + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 20, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 30, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'ai.generateText', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.invoke_agent', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', 'vercel.ai.model.provider': 'mock-provider', 'vercel.ai.operationId': 'ai.generateText', 'vercel.ai.pipeline.name': 'generateText', @@ -312,20 +336,20 @@ describe('Vercel AI integration', () => { // Fourth span - doGenerate for explicitly enabled telemetry call expect.objectContaining({ data: { - 'gen_ai.request.messages': expect.any(String), - 'gen_ai.request.messages.original_length': expect.any(Number), - 'gen_ai.request.model': 'mock-model-id', - 'gen_ai.response.finish_reasons': ['stop'], - 'gen_ai.response.id': expect.any(String), - 'gen_ai.response.model': 'mock-model-id', - 'gen_ai.response.text': expect.any(String), - 'gen_ai.system': 'mock-provider', - 'gen_ai.usage.input_tokens': 10, - 'gen_ai.usage.output_tokens': 20, - 'gen_ai.usage.total_tokens': 30, - 'gen_ai.operation.name': 'ai.generateText.doGenerate', - 'sentry.op': 'gen_ai.generate_text', - 'sentry.origin': 'auto.vercelai.otel', + [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: expect.any(String), + [GEN_AI_REQUEST_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE]: expect.any(Number), + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'mock-model-id', + [GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]: ['stop'], + [GEN_AI_RESPONSE_ID_ATTRIBUTE]: expect.any(String), + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'mock-model-id', + [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: expect.any(String), + [GEN_AI_SYSTEM_ATTRIBUTE]: 'mock-provider', + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 20, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 30, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'ai.generateText.doGenerate', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.generate_text', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', 'vercel.ai.model.provider': 'mock-provider', 'vercel.ai.operationId': 'ai.generateText.doGenerate', 'vercel.ai.pipeline.name': 'generateText.doGenerate', @@ -350,19 +374,19 @@ describe('Vercel AI integration', () => { // Fifth span - tool call generateText span (should include prompts when sendDefaultPii: true) expect.objectContaining({ data: { - 'gen_ai.prompt': '{"prompt":"What is the weather in San Francisco?"}', - 'gen_ai.request.messages.original_length': 1, - 'gen_ai.request.messages': '[{"role":"user","content":"What is the weather in San Francisco?"}]', - 'gen_ai.request.model': 'mock-model-id', - 'gen_ai.response.model': 'mock-model-id', - 'gen_ai.response.text': 'Tool call completed!', - 'gen_ai.response.tool_calls': expect.any(String), - 'gen_ai.usage.input_tokens': 15, - 'gen_ai.usage.output_tokens': 25, - 'gen_ai.usage.total_tokens': 40, - 'gen_ai.operation.name': 'ai.generateText', - 'sentry.op': 'gen_ai.invoke_agent', - 'sentry.origin': 'auto.vercelai.otel', + [GEN_AI_PROMPT_ATTRIBUTE]: '{"prompt":"What is the weather in San Francisco?"}', + [GEN_AI_REQUEST_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE]: 1, + [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: '[{"role":"user","content":"What is the weather in San Francisco?"}]', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'mock-model-id', + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'mock-model-id', + [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: 'Tool call completed!', + [GEN_AI_RESPONSE_TOOL_CALLS_ATTRIBUTE]: expect.any(String), + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 15, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 25, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 40, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'ai.generateText', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.invoke_agent', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', 'vercel.ai.model.provider': 'mock-provider', 'vercel.ai.operationId': 'ai.generateText', 'vercel.ai.pipeline.name': 'generateText', @@ -385,22 +409,22 @@ describe('Vercel AI integration', () => { // Sixth span - tool call doGenerate span (should include prompts when sendDefaultPii: true) expect.objectContaining({ data: { - 'gen_ai.request.available_tools': EXPECTED_AVAILABLE_TOOLS_JSON, - 'gen_ai.request.messages': expect.any(String), - 'gen_ai.request.messages.original_length': expect.any(Number), - 'gen_ai.request.model': 'mock-model-id', - 'gen_ai.response.finish_reasons': ['tool-calls'], - 'gen_ai.response.id': expect.any(String), - 'gen_ai.response.model': 'mock-model-id', - 'gen_ai.response.text': 'Tool call completed!', - 'gen_ai.response.tool_calls': expect.any(String), - 'gen_ai.system': 'mock-provider', - 'gen_ai.usage.input_tokens': 15, - 'gen_ai.usage.output_tokens': 25, - 'gen_ai.usage.total_tokens': 40, - 'gen_ai.operation.name': 'ai.generateText.doGenerate', - 'sentry.op': 'gen_ai.generate_text', - 'sentry.origin': 'auto.vercelai.otel', + [GEN_AI_REQUEST_AVAILABLE_TOOLS_ATTRIBUTE]: EXPECTED_AVAILABLE_TOOLS_JSON, + [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: expect.any(String), + [GEN_AI_REQUEST_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE]: expect.any(Number), + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'mock-model-id', + [GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]: ['tool-calls'], + [GEN_AI_RESPONSE_ID_ATTRIBUTE]: expect.any(String), + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'mock-model-id', + [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: 'Tool call completed!', + [GEN_AI_RESPONSE_TOOL_CALLS_ATTRIBUTE]: expect.any(String), + [GEN_AI_SYSTEM_ATTRIBUTE]: 'mock-provider', + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 15, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 25, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 40, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'ai.generateText.doGenerate', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.generate_text', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', 'vercel.ai.model.provider': 'mock-provider', 'vercel.ai.operationId': 'ai.generateText.doGenerate', 'vercel.ai.pipeline.name': 'generateText.doGenerate', @@ -426,14 +450,14 @@ describe('Vercel AI integration', () => { // Seventh span - tool call execution span expect.objectContaining({ data: { - 'gen_ai.tool.call.id': 'call-1', - 'gen_ai.tool.input': expect.any(String), - 'gen_ai.tool.name': 'getWeather', - 'gen_ai.tool.output': expect.any(String), - 'gen_ai.tool.type': 'function', - 'gen_ai.operation.name': 'ai.toolCall', - 'sentry.op': 'gen_ai.execute_tool', - 'sentry.origin': 'auto.vercelai.otel', + [GEN_AI_TOOL_CALL_ID_ATTRIBUTE]: 'call-1', + [GEN_AI_TOOL_INPUT_ATTRIBUTE]: expect.any(String), + [GEN_AI_TOOL_NAME_ATTRIBUTE]: 'getWeather', + [GEN_AI_TOOL_OUTPUT_ATTRIBUTE]: expect.any(String), + [GEN_AI_TOOL_TYPE_ATTRIBUTE]: 'function', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'ai.toolCall', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.execute_tool', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', 'vercel.ai.operationId': 'ai.toolCall', }, description: 'execute_tool getWeather', @@ -468,14 +492,14 @@ describe('Vercel AI integration', () => { spans: expect.arrayContaining([ expect.objectContaining({ data: { - 'gen_ai.request.model': 'mock-model-id', - 'gen_ai.response.model': 'mock-model-id', - 'gen_ai.usage.input_tokens': 15, - 'gen_ai.usage.output_tokens': 25, - 'gen_ai.usage.total_tokens': 40, - 'gen_ai.operation.name': 'ai.generateText', - 'sentry.op': 'gen_ai.invoke_agent', - 'sentry.origin': 'auto.vercelai.otel', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'mock-model-id', + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'mock-model-id', + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 15, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 25, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 40, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'ai.generateText', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.invoke_agent', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', 'vercel.ai.model.provider': 'mock-provider', 'vercel.ai.operationId': 'ai.generateText', 'vercel.ai.pipeline.name': 'generateText', @@ -490,17 +514,17 @@ describe('Vercel AI integration', () => { }), expect.objectContaining({ data: { - 'gen_ai.request.model': 'mock-model-id', - 'gen_ai.response.finish_reasons': ['tool-calls'], - 'gen_ai.response.id': expect.any(String), - 'gen_ai.response.model': 'mock-model-id', - 'gen_ai.system': 'mock-provider', - 'gen_ai.usage.input_tokens': 15, - 'gen_ai.usage.output_tokens': 25, - 'gen_ai.usage.total_tokens': 40, - 'gen_ai.operation.name': 'ai.generateText.doGenerate', - 'sentry.op': 'gen_ai.generate_text', - 'sentry.origin': 'auto.vercelai.otel', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'mock-model-id', + [GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]: ['tool-calls'], + [GEN_AI_RESPONSE_ID_ATTRIBUTE]: expect.any(String), + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'mock-model-id', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'mock-provider', + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 15, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 25, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 40, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'ai.generateText.doGenerate', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.generate_text', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', 'vercel.ai.model.provider': 'mock-provider', 'vercel.ai.operationId': 'ai.generateText.doGenerate', 'vercel.ai.pipeline.name': 'generateText.doGenerate', @@ -518,12 +542,12 @@ describe('Vercel AI integration', () => { }), expect.objectContaining({ data: { - 'gen_ai.tool.call.id': 'call-1', - 'gen_ai.tool.name': 'getWeather', - 'gen_ai.tool.type': 'function', - 'gen_ai.operation.name': 'ai.toolCall', - 'sentry.op': 'gen_ai.execute_tool', - 'sentry.origin': 'auto.vercelai.otel', + [GEN_AI_TOOL_CALL_ID_ATTRIBUTE]: 'call-1', + [GEN_AI_TOOL_NAME_ATTRIBUTE]: 'getWeather', + [GEN_AI_TOOL_TYPE_ATTRIBUTE]: 'function', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'ai.toolCall', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.execute_tool', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', 'vercel.ai.operationId': 'ai.toolCall', }, description: 'execute_tool getWeather', @@ -588,14 +612,14 @@ describe('Vercel AI integration', () => { spans: expect.arrayContaining([ expect.objectContaining({ data: { - 'gen_ai.request.model': 'mock-model-id', - 'gen_ai.response.model': 'mock-model-id', - 'gen_ai.usage.input_tokens': 15, - 'gen_ai.usage.output_tokens': 25, - 'gen_ai.usage.total_tokens': 40, - 'gen_ai.operation.name': 'ai.generateText', - 'sentry.op': 'gen_ai.invoke_agent', - 'sentry.origin': 'auto.vercelai.otel', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'mock-model-id', + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'mock-model-id', + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 15, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 25, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 40, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'ai.generateText', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.invoke_agent', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', 'vercel.ai.model.provider': 'mock-provider', 'vercel.ai.operationId': 'ai.generateText', 'vercel.ai.pipeline.name': 'generateText', @@ -610,17 +634,17 @@ describe('Vercel AI integration', () => { }), expect.objectContaining({ data: { - 'gen_ai.request.model': 'mock-model-id', - 'gen_ai.response.finish_reasons': ['tool-calls'], - 'gen_ai.response.id': expect.any(String), - 'gen_ai.response.model': 'mock-model-id', - 'gen_ai.system': 'mock-provider', - 'gen_ai.usage.input_tokens': 15, - 'gen_ai.usage.output_tokens': 25, - 'gen_ai.usage.total_tokens': 40, - 'gen_ai.operation.name': 'ai.generateText.doGenerate', - 'sentry.op': 'gen_ai.generate_text', - 'sentry.origin': 'auto.vercelai.otel', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'mock-model-id', + [GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]: ['tool-calls'], + [GEN_AI_RESPONSE_ID_ATTRIBUTE]: expect.any(String), + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'mock-model-id', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'mock-provider', + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 15, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 25, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 40, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'ai.generateText.doGenerate', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.generate_text', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', 'vercel.ai.model.provider': 'mock-provider', 'vercel.ai.operationId': 'ai.generateText.doGenerate', 'vercel.ai.pipeline.name': 'generateText.doGenerate', @@ -638,12 +662,12 @@ describe('Vercel AI integration', () => { }), expect.objectContaining({ data: { - 'gen_ai.tool.call.id': 'call-1', - 'gen_ai.tool.name': 'getWeather', - 'gen_ai.tool.type': 'function', - 'gen_ai.operation.name': 'ai.toolCall', - 'sentry.op': 'gen_ai.execute_tool', - 'sentry.origin': 'auto.vercelai.otel', + [GEN_AI_TOOL_CALL_ID_ATTRIBUTE]: 'call-1', + [GEN_AI_TOOL_NAME_ATTRIBUTE]: 'getWeather', + [GEN_AI_TOOL_TYPE_ATTRIBUTE]: 'function', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'ai.toolCall', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.execute_tool', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', 'vercel.ai.operationId': 'ai.toolCall', }, description: 'execute_tool getWeather', @@ -720,9 +744,9 @@ describe('Vercel AI integration', () => { origin: 'auto.vercelai.otel', status: 'ok', data: expect.objectContaining({ - 'sentry.op': 'gen_ai.invoke_agent', - 'sentry.origin': 'auto.vercelai.otel', - 'gen_ai.operation.name': 'ai.generateText', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.invoke_agent', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'ai.generateText', }), }), // The doGenerate span - name stays as 'generateText.doGenerate' since model ID is missing @@ -732,9 +756,9 @@ describe('Vercel AI integration', () => { origin: 'auto.vercelai.otel', status: 'ok', data: expect.objectContaining({ - 'sentry.op': 'gen_ai.generate_text', - 'sentry.origin': 'auto.vercelai.otel', - 'gen_ai.operation.name': 'ai.generateText.doGenerate', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.generate_text', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'ai.generateText.doGenerate', }), }), ]), diff --git a/dev-packages/node-integration-tests/suites/tracing/vercelai/v5/test.ts b/dev-packages/node-integration-tests/suites/tracing/vercelai/v5/test.ts index 179644bbcd73..2e769f98dbda 100644 --- a/dev-packages/node-integration-tests/suites/tracing/vercelai/v5/test.ts +++ b/dev-packages/node-integration-tests/suites/tracing/vercelai/v5/test.ts @@ -1,5 +1,12 @@ import type { Event } from '@sentry/node'; import { afterAll, describe, expect } from 'vitest'; +import { + GEN_AI_TOOL_CALL_ID_ATTRIBUTE, + GEN_AI_TOOL_INPUT_ATTRIBUTE, + GEN_AI_TOOL_NAME_ATTRIBUTE, + GEN_AI_TOOL_OUTPUT_ATTRIBUTE, + GEN_AI_TOOL_TYPE_ATTRIBUTE, +} from '../../../../../../packages/core/src/tracing/ai/gen-ai-attributes'; import { cleanupChildProcesses, createEsmAndCjsTests } from '../../../../utils/runner'; describe('Vercel AI integration (V5)', () => { @@ -178,9 +185,9 @@ describe('Vercel AI integration (V5)', () => { expect.objectContaining({ data: { 'vercel.ai.operationId': 'ai.toolCall', - 'gen_ai.tool.call.id': 'call-1', - 'gen_ai.tool.name': 'getWeather', - 'gen_ai.tool.type': 'function', + [GEN_AI_TOOL_CALL_ID_ATTRIBUTE]: 'call-1', + [GEN_AI_TOOL_NAME_ATTRIBUTE]: 'getWeather', + [GEN_AI_TOOL_TYPE_ATTRIBUTE]: 'function', 'gen_ai.operation.name': 'ai.toolCall', 'sentry.op': 'gen_ai.execute_tool', 'sentry.origin': 'auto.vercelai.otel', @@ -386,11 +393,11 @@ describe('Vercel AI integration (V5)', () => { expect.objectContaining({ data: { 'vercel.ai.operationId': 'ai.toolCall', - 'gen_ai.tool.call.id': 'call-1', - 'gen_ai.tool.name': 'getWeather', - 'gen_ai.tool.input': expect.any(String), - 'gen_ai.tool.output': expect.any(String), - 'gen_ai.tool.type': 'function', + [GEN_AI_TOOL_CALL_ID_ATTRIBUTE]: 'call-1', + [GEN_AI_TOOL_NAME_ATTRIBUTE]: 'getWeather', + [GEN_AI_TOOL_INPUT_ATTRIBUTE]: expect.any(String), + [GEN_AI_TOOL_OUTPUT_ATTRIBUTE]: expect.any(String), + [GEN_AI_TOOL_TYPE_ATTRIBUTE]: 'function', 'gen_ai.operation.name': 'ai.toolCall', 'sentry.op': 'gen_ai.execute_tool', 'sentry.origin': 'auto.vercelai.otel', @@ -496,9 +503,9 @@ describe('Vercel AI integration (V5)', () => { expect.objectContaining({ data: { 'vercel.ai.operationId': 'ai.toolCall', - 'gen_ai.tool.call.id': 'call-1', - 'gen_ai.tool.name': 'getWeather', - 'gen_ai.tool.type': 'function', + [GEN_AI_TOOL_CALL_ID_ATTRIBUTE]: 'call-1', + [GEN_AI_TOOL_NAME_ATTRIBUTE]: 'getWeather', + [GEN_AI_TOOL_TYPE_ATTRIBUTE]: 'function', 'gen_ai.operation.name': 'ai.toolCall', 'sentry.op': 'gen_ai.execute_tool', 'sentry.origin': 'auto.vercelai.otel', diff --git a/dev-packages/node-integration-tests/suites/tracing/vercelai/v6/test.ts b/dev-packages/node-integration-tests/suites/tracing/vercelai/v6/test.ts index 98a16618d77d..63ca5fcde258 100644 --- a/dev-packages/node-integration-tests/suites/tracing/vercelai/v6/test.ts +++ b/dev-packages/node-integration-tests/suites/tracing/vercelai/v6/test.ts @@ -1,5 +1,12 @@ import type { Event } from '@sentry/node'; import { afterAll, describe, expect } from 'vitest'; +import { + GEN_AI_TOOL_CALL_ID_ATTRIBUTE, + GEN_AI_TOOL_INPUT_ATTRIBUTE, + GEN_AI_TOOL_NAME_ATTRIBUTE, + GEN_AI_TOOL_OUTPUT_ATTRIBUTE, + GEN_AI_TOOL_TYPE_ATTRIBUTE, +} from '../../../../../../packages/core/src/tracing/ai/gen-ai-attributes'; import { cleanupChildProcesses, createEsmAndCjsTests } from '../../../../utils/runner'; describe('Vercel AI integration (V6)', () => { @@ -182,9 +189,9 @@ describe('Vercel AI integration (V6)', () => { expect.objectContaining({ data: expect.objectContaining({ 'vercel.ai.operationId': 'ai.toolCall', - 'gen_ai.tool.call.id': 'call-1', - 'gen_ai.tool.name': 'getWeather', - 'gen_ai.tool.type': 'function', + [GEN_AI_TOOL_CALL_ID_ATTRIBUTE]: 'call-1', + [GEN_AI_TOOL_NAME_ATTRIBUTE]: 'getWeather', + [GEN_AI_TOOL_TYPE_ATTRIBUTE]: 'function', 'gen_ai.operation.name': 'ai.toolCall', 'sentry.op': 'gen_ai.execute_tool', 'sentry.origin': 'auto.vercelai.otel', @@ -390,11 +397,11 @@ describe('Vercel AI integration (V6)', () => { expect.objectContaining({ data: expect.objectContaining({ 'vercel.ai.operationId': 'ai.toolCall', - 'gen_ai.tool.call.id': 'call-1', - 'gen_ai.tool.name': 'getWeather', - 'gen_ai.tool.input': expect.any(String), - 'gen_ai.tool.output': expect.any(String), - 'gen_ai.tool.type': 'function', + [GEN_AI_TOOL_CALL_ID_ATTRIBUTE]: 'call-1', + [GEN_AI_TOOL_NAME_ATTRIBUTE]: 'getWeather', + [GEN_AI_TOOL_INPUT_ATTRIBUTE]: expect.any(String), + [GEN_AI_TOOL_OUTPUT_ATTRIBUTE]: expect.any(String), + [GEN_AI_TOOL_TYPE_ATTRIBUTE]: 'function', 'gen_ai.operation.name': 'ai.toolCall', 'sentry.op': 'gen_ai.execute_tool', 'sentry.origin': 'auto.vercelai.otel', @@ -502,9 +509,9 @@ describe('Vercel AI integration (V6)', () => { expect.objectContaining({ data: expect.objectContaining({ 'vercel.ai.operationId': 'ai.toolCall', - 'gen_ai.tool.call.id': 'call-1', - 'gen_ai.tool.name': 'getWeather', - 'gen_ai.tool.type': 'function', + [GEN_AI_TOOL_CALL_ID_ATTRIBUTE]: 'call-1', + [GEN_AI_TOOL_NAME_ATTRIBUTE]: 'getWeather', + [GEN_AI_TOOL_TYPE_ATTRIBUTE]: 'function', 'gen_ai.operation.name': 'ai.toolCall', 'sentry.op': 'gen_ai.execute_tool', 'sentry.origin': 'auto.vercelai.otel', diff --git a/packages/core/src/tracing/ai/gen-ai-attributes.ts b/packages/core/src/tracing/ai/gen-ai-attributes.ts index 4fa7274d7281..a52afa58a430 100644 --- a/packages/core/src/tracing/ai/gen-ai-attributes.ts +++ b/packages/core/src/tracing/ai/gen-ai-attributes.ts @@ -232,6 +232,31 @@ export const GEN_AI_EMBED_MANY_DO_EMBED_OPERATION_ATTRIBUTE = 'gen_ai.embed_many */ export const GEN_AI_EXECUTE_TOOL_OPERATION_ATTRIBUTE = 'gen_ai.execute_tool'; +/** + * The tool name for tool call spans + */ +export const GEN_AI_TOOL_NAME_ATTRIBUTE = 'gen_ai.tool.name'; + +/** + * The tool call ID + */ +export const GEN_AI_TOOL_CALL_ID_ATTRIBUTE = 'gen_ai.tool.call.id'; + +/** + * The tool type (e.g., 'function') + */ +export const GEN_AI_TOOL_TYPE_ATTRIBUTE = 'gen_ai.tool.type'; + +/** + * The tool input/arguments + */ +export const GEN_AI_TOOL_INPUT_ATTRIBUTE = 'gen_ai.tool.input'; + +/** + * The tool output/result + */ +export const GEN_AI_TOOL_OUTPUT_ATTRIBUTE = 'gen_ai.tool.output'; + // ============================================================================= // OPENAI-SPECIFIC ATTRIBUTES // ============================================================================= diff --git a/packages/core/src/tracing/langchain/index.ts b/packages/core/src/tracing/langchain/index.ts index 1930be794be5..e5ad6cc14189 100644 --- a/packages/core/src/tracing/langchain/index.ts +++ b/packages/core/src/tracing/langchain/index.ts @@ -3,7 +3,13 @@ import { SEMANTIC_ATTRIBUTE_SENTRY_OP, SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN } from ' import { SPAN_STATUS_ERROR } from '../../tracing'; import { startSpanManual } from '../../tracing/trace'; import type { Span, SpanAttributeValue } from '../../types-hoist/span'; -import { GEN_AI_OPERATION_NAME_ATTRIBUTE, GEN_AI_REQUEST_MODEL_ATTRIBUTE } from '../ai/gen-ai-attributes'; +import { + GEN_AI_OPERATION_NAME_ATTRIBUTE, + GEN_AI_REQUEST_MODEL_ATTRIBUTE, + GEN_AI_TOOL_INPUT_ATTRIBUTE, + GEN_AI_TOOL_NAME_ATTRIBUTE, + GEN_AI_TOOL_OUTPUT_ATTRIBUTE, +} from '../ai/gen-ai-attributes'; import { LANGCHAIN_ORIGIN } from './constants'; import type { LangChainCallbackHandler, @@ -241,12 +247,12 @@ export function createLangChainCallbackHandler(options: LangChainOptions = {}): const toolName = tool.name || 'unknown_tool'; const attributes: Record = { [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: LANGCHAIN_ORIGIN, - 'gen_ai.tool.name': toolName, + [GEN_AI_TOOL_NAME_ATTRIBUTE]: toolName, }; // Add input if recordInputs is enabled if (recordInputs) { - attributes['gen_ai.tool.input'] = input; + attributes[GEN_AI_TOOL_INPUT_ATTRIBUTE] = input; } startSpanManual( @@ -272,7 +278,7 @@ export function createLangChainCallbackHandler(options: LangChainOptions = {}): // Add output if recordOutputs is enabled if (recordOutputs) { span.setAttributes({ - 'gen_ai.tool.output': JSON.stringify(output), + [GEN_AI_TOOL_OUTPUT_ATTRIBUTE]: JSON.stringify(output), }); } exitSpan(runId); diff --git a/packages/core/src/tracing/vercel-ai/index.ts b/packages/core/src/tracing/vercel-ai/index.ts index 9b95e8aa91ad..375e803159be 100644 --- a/packages/core/src/tracing/vercel-ai/index.ts +++ b/packages/core/src/tracing/vercel-ai/index.ts @@ -8,10 +8,16 @@ import { GEN_AI_REQUEST_MESSAGES_ATTRIBUTE, GEN_AI_REQUEST_MODEL_ATTRIBUTE, GEN_AI_RESPONSE_MODEL_ATTRIBUTE, + GEN_AI_TOOL_CALL_ID_ATTRIBUTE, + GEN_AI_TOOL_INPUT_ATTRIBUTE, + GEN_AI_TOOL_NAME_ATTRIBUTE, + GEN_AI_TOOL_OUTPUT_ATTRIBUTE, + GEN_AI_TOOL_TYPE_ATTRIBUTE, GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE, GEN_AI_USAGE_INPUT_TOKENS_CACHE_WRITE_ATTRIBUTE, GEN_AI_USAGE_INPUT_TOKENS_CACHED_ATTRIBUTE, GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE, + GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE, } from '../ai/gen-ai-attributes'; import { toolCallSpanMap } from './constants'; import type { TokenSummary } from './types'; @@ -133,7 +139,7 @@ function processEndedVercelAiSpan(span: SpanJSON): void { typeof attributes[GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE] === 'number' && typeof attributes[GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE] === 'number' ) { - attributes['gen_ai.usage.total_tokens'] = + attributes[GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE] = attributes[GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE] + attributes[GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]; } @@ -152,8 +158,8 @@ function processEndedVercelAiSpan(span: SpanJSON): void { renameAttributeKey(attributes, AI_RESPONSE_OBJECT_ATTRIBUTE, 'gen_ai.response.object'); renameAttributeKey(attributes, AI_PROMPT_TOOLS_ATTRIBUTE, 'gen_ai.request.available_tools'); - renameAttributeKey(attributes, AI_TOOL_CALL_ARGS_ATTRIBUTE, 'gen_ai.tool.input'); - renameAttributeKey(attributes, AI_TOOL_CALL_RESULT_ATTRIBUTE, 'gen_ai.tool.output'); + renameAttributeKey(attributes, AI_TOOL_CALL_ARGS_ATTRIBUTE, GEN_AI_TOOL_INPUT_ATTRIBUTE); + renameAttributeKey(attributes, AI_TOOL_CALL_RESULT_ATTRIBUTE, GEN_AI_TOOL_OUTPUT_ATTRIBUTE); renameAttributeKey(attributes, AI_SCHEMA_ATTRIBUTE, 'gen_ai.request.schema'); renameAttributeKey(attributes, AI_MODEL_ID_ATTRIBUTE, GEN_AI_REQUEST_MODEL_ATTRIBUTE); @@ -183,22 +189,22 @@ function renameAttributeKey(attributes: Record, oldKey: string, function processToolCallSpan(span: Span, attributes: SpanAttributes): void { addOriginToSpan(span, 'auto.vercelai.otel'); span.setAttribute(SEMANTIC_ATTRIBUTE_SENTRY_OP, 'gen_ai.execute_tool'); - renameAttributeKey(attributes, AI_TOOL_CALL_NAME_ATTRIBUTE, 'gen_ai.tool.name'); - renameAttributeKey(attributes, AI_TOOL_CALL_ID_ATTRIBUTE, 'gen_ai.tool.call.id'); + renameAttributeKey(attributes, AI_TOOL_CALL_NAME_ATTRIBUTE, GEN_AI_TOOL_NAME_ATTRIBUTE); + renameAttributeKey(attributes, AI_TOOL_CALL_ID_ATTRIBUTE, GEN_AI_TOOL_CALL_ID_ATTRIBUTE); // Store the span in our global map using the tool call ID // This allows us to capture tool errors and link them to the correct span - const toolCallId = attributes['gen_ai.tool.call.id']; + const toolCallId = attributes[GEN_AI_TOOL_CALL_ID_ATTRIBUTE]; if (typeof toolCallId === 'string') { toolCallSpanMap.set(toolCallId, span); } // https://opentelemetry.io/docs/specs/semconv/registry/attributes/gen-ai/#gen-ai-tool-type - if (!attributes['gen_ai.tool.type']) { - span.setAttribute('gen_ai.tool.type', 'function'); + if (!attributes[GEN_AI_TOOL_TYPE_ATTRIBUTE]) { + span.setAttribute(GEN_AI_TOOL_TYPE_ATTRIBUTE, 'function'); } - const toolName = attributes['gen_ai.tool.name']; + const toolName = attributes[GEN_AI_TOOL_NAME_ATTRIBUTE]; if (toolName) { span.updateName(`execute_tool ${toolName}`); }